diff --git a/.travis.yml b/.travis.yml index 33c0f098fd..7150d0c7f8 100644 --- a/.travis.yml +++ b/.travis.yml @@ -11,7 +11,9 @@ matrix: # The pypy tests are slow, so we list them first - python: pypy3.5 - language: generic - env: USE_PYPY_NIGHTLY=1 + env: PYPY_NIGHTLY_BRANCH=py3.5 + - language: generic + env: PYPY_NIGHTLY_BRANCH=py3.6 - python: 3.5.0 - python: 3.5.2 - python: 3.6 @@ -26,6 +28,15 @@ matrix: - python: 3.8-dev dist: xenial sudo: required + # - os: osx + # language: generic + # env: MACPYTHON=3.5.4 + # - os: osx + # language: generic + # env: MACPYTHON=3.6.6 + # - os: osx + # language: generic + # env: MACPYTHON=3.7.0 script: - ci/travis.sh diff --git a/Jenkinsfile b/Jenkinsfile index d94d4df1e2..10c14ae662 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -1,7 +1,7 @@ def configs = [ [ label: 'sierra', - pyversions: ['python3.5', 'python3.6'], + pyversions: ['python3.5', 'python3.6', 'python3.7'], ], ] @@ -52,6 +52,7 @@ def build(pyversion, label) { export PATH="/usr/local/bin:\${PATH}" export PATH="/Library/Frameworks/Python.framework/Versions/3.5/bin:\${PATH}" export PATH="/Library/Frameworks/Python.framework/Versions/3.6/bin:\${PATH}" + export PATH="/Library/Frameworks/Python.framework/Versions/3.7/bin:\${PATH}" # Workaround for https://github.com/pypa/pip/issues/5345 # See also: diff --git a/appveyor.yml b/appveyor.yml index bedd5cf75b..5317d989c7 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -8,6 +8,8 @@ environment: - PYTHON: "C:\\Python35-x64" - PYTHON: "C:\\Python36" - PYTHON: "C:\\Python36-x64" + - PYTHON: "C:\\Python37" + - PYTHON: "C:\\Python37-x64" build_script: - "git --no-pager log -n2" diff --git a/ci/travis.sh b/ci/travis.sh index 3e0fde36d2..0b90aae129 100755 --- a/ci/travis.sh +++ b/ci/travis.sh @@ -6,8 +6,20 @@ YAPF_VERSION=0.20.1 git rev-parse HEAD -if [ "$USE_PYPY_NIGHTLY" = "1" ]; then - curl -fLo pypy.tar.bz2 http://buildbot.pypy.org/nightly/py3.5/pypy-c-jit-latest-linux64.tar.bz2 +if [ "$TRAVIS_OS_NAME" = "osx" ]; then + curl -Lo macpython.pkg https://www.python.org/ftp/python/${MACPYTHON}/python-${MACPYTHON}-macosx10.6.pkg + sudo installer -pkg macpython.pkg -target / + ls /Library/Frameworks/Python.framework/Versions/*/bin/ + PYTHON_EXE=/Library/Frameworks/Python.framework/Versions/*/bin/python3 + # The pip in older MacPython releases doesn't support a new enough TLS + curl https://bootstrap.pypa.io/get-pip.py | sudo $PYTHON_EXE + sudo $PYTHON_EXE -m pip install virtualenv + $PYTHON_EXE -m virtualenv testenv + source testenv/bin/activate +fi + +if [ "$PYPY_NIGHTLY_BRANCH" != "" ]; then + curl -fLo pypy.tar.bz2 http://buildbot.pypy.org/nightly/${PYPY_NIGHTLY_BRANCH}/pypy-c-jit-latest-linux64.tar.bz2 if [ ! -s pypy.tar.bz2 ]; then # We know: # - curl succeeded (200 response code; -f means "exit with error if diff --git a/docs/source/history.rst b/docs/source/history.rst index faf112ee3c..836f8bf2cf 100644 --- a/docs/source/history.rst +++ b/docs/source/history.rst @@ -5,6 +5,34 @@ Release history .. towncrier release notes start +Trio 0.6.0 (2018-08-13) +----------------------- + +Features +~~~~~~~~ + +- Add :func:`trio.hazmat.WaitForSingleObject` async function to await Windows + handles. (`#233 `__) +- The `sniffio `__ library can now + detect when Trio is running. (`#572 + `__) + + +Bugfixes +~~~~~~~~ + +- Make trio.socket._SocketType.connect *always* close the socket on + cancellation (`#247 `__) +- Fix a memory leak in :class:`trio.CapacityLimiter`, that could occurr when + ``acquire`` or ``acquire_on_behalf_of`` was cancelled. (`#548 + `__) +- Some version of MacOS have a buggy ``getaddrinfo`` that was causing spurious + test failures; we now detect those systems and skip the relevant test when + found. (`#580 `__) +- Prevent crashes when used with Sentry (raven-python). (`#599 + `__) + + Trio 0.5.0 (2018-07-20) ----------------------- diff --git a/docs/source/index.rst b/docs/source/index.rst index adc555efac..285c1f9c8f 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -80,6 +80,7 @@ Vital statistics: design.rst history.rst contributing.rst + releasing.rst code-of-conduct.rst ==================== diff --git a/docs/source/reference-hazmat.rst b/docs/source/reference-hazmat.rst index 094f5901cc..efee0ba528 100644 --- a/docs/source/reference-hazmat.rst +++ b/docs/source/reference-hazmat.rst @@ -253,6 +253,19 @@ anything real. See `#26 Windows-specific API -------------------- +.. function:: WaitForSingleObject(handle) + :async: + + Async and cancellable variant of `WaitForSingleObject + `__. + Windows only. + + :arg handle: + A Win32 object handle, as a Python integer. + :raises OSError: + If the handle is invalid, e.g. when it is already closed. + + TODO: these are implemented, but are currently more of a sketch than anything real. See `#26 `__ and `#52 diff --git a/docs/source/releasing.rst b/docs/source/releasing.rst new file mode 100644 index 0000000000..27b44ae367 --- /dev/null +++ b/docs/source/releasing.rst @@ -0,0 +1,67 @@ +.. _releasing: + +Preparing a release +------------------- + +Things to do for releasing: + +* announce intent to release on gitter + +* check for open issues / pull requests that really should be in the release + + + come back when these are done + + + … or ignore them and do another release next week + +* check for deprecations "long enough ago" (two months or two releases, whichever is longer) + + + remove affected code + +* Do the actual release changeset + + + update version number + + - increment as per Semantic Versioning rules + + - remove ``+dev`` tag from version number + + + Run ``towncrier`` + + - review history change + + - ``git rm`` changes + + + commit + +* push to your personal repository, "release" branch + +* create pull request to ``python-trio/trio``'s "release" branch + +* announce PR on gitter + + + wait for feedback + + + fix problems, if any + +* verify that all checks succeeded + +* acknowledge the release PR + + + or rather, somebody else should do that + +* tag with vVERSION + +* push to PyPI + + + ``python3 setup.py sdist bdist_wheel upload`` + +* announce on gitter + +* update version number + + + add ``+dev`` tag to the end + +* prepare pull request from "release" back to "master" + + + acknowledge it + diff --git a/docs/source/tutorial.rst b/docs/source/tutorial.rst index 33a1d2a6d8..f45c8bb2b3 100644 --- a/docs/source/tutorial.rst +++ b/docs/source/tutorial.rst @@ -374,7 +374,7 @@ with :func:`time.sleep`. ``async/await`` is useless! Well, not really. Trio has one more trick up its sleeve, that makes async functions more powerful than regular functions: it can run -multiple async function *at the same time*. Here's an example: +multiple async functions *at the same time*. Here's an example: .. _tutorial-example-tasks-intro: diff --git a/setup.py b/setup.py index 485c743ab0..a3e364a1d5 100644 --- a/setup.py +++ b/setup.py @@ -79,6 +79,7 @@ "async_generator >= 1.9", "idna", "outcome", + "sniffio", # PEP 508 style, but: # https://bitbucket.org/pypa/wheel/issues/181/bdist_wheel-silently-discards-pep-508 #"cffi; os_name == 'nt'", # "cffi is required on windows" diff --git a/trio/__init__.py b/trio/__init__.py index 7ea2df21eb..5f84226570 100644 --- a/trio/__init__.py +++ b/trio/__init__.py @@ -1,3 +1,6 @@ +"""Trio - Pythonic async I/O for humans and snake people. +""" + # General layout: # # trio/_core/... is the self-contained core library. It does various diff --git a/trio/_core/__init__.py b/trio/_core/__init__.py index 25779eef04..976975ae48 100644 --- a/trio/_core/__init__.py +++ b/trio/_core/__init__.py @@ -1,3 +1,10 @@ +""" +This namespace represents the core functionality that has to be built-in +and deal with private internal data structures. Things in this namespace +are publicly available in either trio, trio.hazmat, or trio.testing. +""" + + # Needs to be defined early so it can be imported: def _public(fn): # Used to mark methods on _Runner and on IOManager implementations that diff --git a/trio/_core/_io_windows.py b/trio/_core/_io_windows.py index 90a79c032d..885ddccf6f 100644 --- a/trio/_core/_io_windows.py +++ b/trio/_core/_io_windows.py @@ -22,6 +22,7 @@ INVALID_HANDLE_VALUE, raise_winerror, ErrorCodes, + _handle, ) # There's a lot to be said about the overall design of a Windows event @@ -96,19 +97,6 @@ def _check(success): return success -def _handle(obj): - # For now, represent handles as either cffi HANDLEs or as ints. If you - # try to pass in a file descriptor instead, it's not going to work - # out. (For that msvcrt.get_osfhandle does the trick, but I don't know if - # we'll actually need that for anything...) For sockets this doesn't - # matter, Python never allocates an fd. So let's wait until we actually - # encounter the problem before worrying about it. - if type(obj) is int: - return ffi.cast("HANDLE", obj) - else: - return obj - - @attr.s(frozen=True) class _WindowsStatistics: tasks_waiting_overlapped = attr.ib() diff --git a/trio/_core/_ki.py b/trio/_core/_ki.py index 72646e6004..83ffb28d11 100644 --- a/trio/_core/_ki.py +++ b/trio/_core/_ki.py @@ -77,12 +77,10 @@ # for any Python program that's written to catch and ignore # KeyboardInterrupt.) - -# We use this class object as a unique key into the frame locals dictionary, -# which in particular is guaranteed not to clash with any possible real local -# name (I bet this will confuse some debugger at some point though...): -class LOCALS_KEY_KI_PROTECTION_ENABLED: - pass +# We use this special string as a unique key into the frame locals dictionary. +# The @ ensures it is not a valid identifier and can't clash with any possible +# real local name. See: https://github.com/python-trio/trio/issues/469 +LOCALS_KEY_KI_PROTECTION_ENABLED = '@TRIO_KI_PROTECTION_ENABLED' # NB: according to the signal.signal docs, 'frame' can be None on entry to diff --git a/trio/_core/_run.py b/trio/_core/_run.py index 3228e2d2de..6c8651ceb8 100644 --- a/trio/_core/_run.py +++ b/trio/_core/_run.py @@ -1,11 +1,11 @@ import functools -import inspect import logging import os import random import select import threading from collections import deque +import collections.abc from contextlib import contextmanager, closing import outcome @@ -13,6 +13,8 @@ from math import inf from time import monotonic +from sniffio import current_async_library_cvar + import attr from async_generator import ( async_generator, yield_, asynccontextmanager, isasyncgen @@ -745,7 +747,7 @@ def spawn_impl(self, async_fn, args, nursery, name, *, system_task=False): def _return_value_looks_like_wrong_library(value): # Returned by legacy @asyncio.coroutine functions, which includes # a surprising proportion of asyncio builtins. - if inspect.isgenerator(value): + if isinstance(value, collections.abc.Generator): return True # The protocol for detecting an asyncio Future-like object if getattr(value, "_asyncio_future_blocking", None) is not None: @@ -764,7 +766,7 @@ def _return_value_looks_like_wrong_library(value): coro = async_fn(*args) except TypeError: # Give good error for: nursery.start_soon(trio.sleep(1)) - if inspect.iscoroutine(async_fn): + if isinstance(async_fn, collections.abc.Coroutine): raise TypeError( "trio was expecting an async function, but instead it got " "a coroutine object {async_fn!r}\n" @@ -797,7 +799,7 @@ def _return_value_looks_like_wrong_library(value): # for things like functools.partial objects wrapping an async # function. So we have to just call it and then check whether the # result is a coroutine object. - if not inspect.iscoroutine(coro): + if not isinstance(coro, collections.abc.Coroutine): # Give good error for: nursery.start_soon(func_returning_future) if _return_value_looks_like_wrong_library(coro): raise TypeError( @@ -874,8 +876,14 @@ def task_exited(self, task, result): task._cancel_stack[-1]._remove_task(task) self.tasks.remove(task) if task._parent_nursery is None: - # the init task should be the last task to exit - assert not self.tasks + # the init task should be the last task to exit. If not, then + # something is very wrong. Probably it hit some unexpected error, + # in which case we re-raise the error (which will later get + # converted to a TrioInternalError, but at least we'll get a + # traceback). Otherwise, raise a new error. + if self.tasks: # pragma: no cover + result.unwrap() + raise TrioInternalError else: task._parent_nursery._child_finished(task, result) if task is self.main_task: @@ -1235,11 +1243,13 @@ def run( clock = SystemClock() instruments = list(instruments) io_manager = TheIOManager() + system_context = copy_context() + system_context.run(current_async_library_cvar.set, "trio") runner = Runner( clock=clock, instruments=instruments, io_manager=io_manager, - system_context=copy_context(), + system_context=system_context, ) GLOBAL_RUN_CONTEXT.runner = runner locals()[LOCALS_KEY_KI_PROTECTION_ENABLED] = True diff --git a/trio/_core/_windows_cffi.py b/trio/_core/_windows_cffi.py index 3e124e6b61..16dd9a232b 100644 --- a/trio/_core/_windows_cffi.py +++ b/trio/_core/_windows_cffi.py @@ -35,6 +35,8 @@ typedef OVERLAPPED WSAOVERLAPPED; typedef LPOVERLAPPED LPWSAOVERLAPPED; +typedef PVOID LPSECURITY_ATTRIBUTES; +typedef PVOID LPCSTR; typedef struct _OVERLAPPED_ENTRY { ULONG_PTR lpCompletionKey; @@ -80,6 +82,34 @@ _In_opt_ void* HandlerRoutine, _In_ BOOL Add ); + +HANDLE CreateEventA( + LPSECURITY_ATTRIBUTES lpEventAttributes, + BOOL bManualReset, + BOOL bInitialState, + LPCSTR lpName +); + +BOOL SetEvent( + HANDLE hEvent +); + +BOOL ResetEvent( + HANDLE hEvent +); + +DWORD WaitForSingleObject( + HANDLE hHandle, + DWORD dwMilliseconds +); + +DWORD WaitForMultipleObjects( + DWORD nCount, + HANDLE *lpHandles, + BOOL bWaitAll, + DWORD dwMilliseconds +); + """ # cribbed from pywincffi @@ -104,6 +134,19 @@ INVALID_HANDLE_VALUE = ffi.cast("HANDLE", -1) +def _handle(obj): + # For now, represent handles as either cffi HANDLEs or as ints. If you + # try to pass in a file descriptor instead, it's not going to work + # out. (For that msvcrt.get_osfhandle does the trick, but I don't know if + # we'll actually need that for anything...) For sockets this doesn't + # matter, Python never allocates an fd. So let's wait until we actually + # encounter the problem before worrying about it. + if type(obj) is int: + return ffi.cast("HANDLE", obj) + else: + return obj + + def raise_winerror(winerror=None, *, filename=None, filename2=None): if winerror is None: winerror, msg = ffi.getwinerror() @@ -116,6 +159,10 @@ def raise_winerror(winerror=None, *, filename=None, filename2=None): class ErrorCodes(enum.IntEnum): STATUS_TIMEOUT = 0x102 + WAIT_TIMEOUT = 0x102 + WAIT_ABANDONED = 0x80 + WAIT_OBJECT_0 = 0x00 # object is signaled + WAIT_FAILED = 0xFFFFFFFF ERROR_IO_PENDING = 997 ERROR_OPERATION_ABORTED = 995 ERROR_ABANDONED_WAIT_0 = 735 diff --git a/trio/_core/tests/test_run.py b/trio/_core/tests/test_run.py index a646206808..49ffb76baf 100644 --- a/trio/_core/tests/test_run.py +++ b/trio/_core/tests/test_run.py @@ -10,6 +10,7 @@ import attr import outcome +import sniffio import pytest from async_generator import async_generator @@ -1894,3 +1895,16 @@ def test_Cancelled_init(): # private constructor should not raise _core.Cancelled._init() + + +def test_sniffio_integration(): + with pytest.raises(sniffio.AsyncLibraryNotFoundError): + sniffio.current_async_library() + + async def check_inside_trio(): + assert sniffio.current_async_library() == "trio" + + _core.run(check_inside_trio) + + with pytest.raises(sniffio.AsyncLibraryNotFoundError): + sniffio.current_async_library() diff --git a/trio/_core/tests/test_windows.py b/trio/_core/tests/test_windows.py index 11fbed45f2..c0ec5cce64 100644 --- a/trio/_core/tests/test_windows.py +++ b/trio/_core/tests/test_windows.py @@ -1,4 +1,5 @@ import os + import pytest on_windows = (os.name == "nt") diff --git a/trio/_highlevel_open_tcp_listeners.py b/trio/_highlevel_open_tcp_listeners.py index 7b7aebb19d..832028f9d2 100644 --- a/trio/_highlevel_open_tcp_listeners.py +++ b/trio/_highlevel_open_tcp_listeners.py @@ -168,8 +168,8 @@ async def serve_tcp( listeners = await nursery.start(serve_tcp, handler, 0) client_stream = await open_stream_to_socket_listener(listeners[0]) - # Then send and receive data on 'client', for example: - await client.send_all(b"GET / HTTP/1.0\\r\\n\\r\\n") + # Then send and receive data on 'client_stream', for example: + await client_stream.send_all(b"GET / HTTP/1.0\\r\\n\\r\\n") This avoids several common pitfalls: diff --git a/trio/_socket.py b/trio/_socket.py index eba29366b3..dc46faa452 100644 --- a/trio/_socket.py +++ b/trio/_socket.py @@ -654,59 +654,59 @@ async def connect(self, address): # off, then the socket becomes writable as a completion # notification. This means it isn't really cancellable... we close the # socket if cancelled, to avoid confusion. - address = await self._resolve_remote_address(address) - async with _try_sync(): - # An interesting puzzle: can a non-blocking connect() return EINTR - # (= raise InterruptedError)? PEP 475 specifically left this as - # the one place where it lets an InterruptedError escape instead - # of automatically retrying. This is based on the idea that EINTR - # from connect means that the connection was already started, and - # will continue in the background. For a blocking connect, this - # sort of makes sense: if it returns EINTR then the connection - # attempt is continuing in the background, and on many system you - # can't then call connect() again because there is already a - # connect happening. See: - # - # http://www.madore.org/~david/computers/connect-intr.html - # - # For a non-blocking connect, it doesn't make as much sense -- - # surely the interrupt didn't happen after we successfully - # initiated the connect and are just waiting for it to complete, - # because a non-blocking connect does not wait! And the spec - # describes the interaction between EINTR/blocking connect, but - # doesn't have anything useful to say about non-blocking connect: - # - # http://pubs.opengroup.org/onlinepubs/007904975/functions/connect.html - # - # So we have a conundrum: if EINTR means that the connect() hasn't - # happened (like it does for essentially every other syscall), - # then InterruptedError should be caught and retried. If EINTR - # means that the connect() has successfully started, then - # InterruptedError should be caught and ignored. Which should we - # do? - # - # In practice, the resolution is probably that non-blocking - # connect simply never returns EINTR, so the question of how to - # handle it is moot. Someone spelunked MacOS/FreeBSD and - # confirmed this is true there: - # - # https://stackoverflow.com/questions/14134440/eintr-and-non-blocking-calls - # - # and exarkun seems to think it's true in general of non-blocking - # calls: - # - # https://twistedmatrix.com/pipermail/twisted-python/2010-September/022864.html - # (and indeed, AFAICT twisted doesn't try to handle - # InterruptedError). - # - # So we don't try to catch InterruptedError. This way if it - # happens, someone will hopefully tell us, and then hopefully we - # can investigate their system to figure out what its semantics - # are. - return self._sock.connect(address) - # It raised BlockingIOError, meaning that it's started the - # connection attempt. We wait for it to complete: try: + address = await self._resolve_remote_address(address) + async with _try_sync(): + # An interesting puzzle: can a non-blocking connect() return EINTR + # (= raise InterruptedError)? PEP 475 specifically left this as + # the one place where it lets an InterruptedError escape instead + # of automatically retrying. This is based on the idea that EINTR + # from connect means that the connection was already started, and + # will continue in the background. For a blocking connect, this + # sort of makes sense: if it returns EINTR then the connection + # attempt is continuing in the background, and on many system you + # can't then call connect() again because there is already a + # connect happening. See: + # + # http://www.madore.org/~david/computers/connect-intr.html + # + # For a non-blocking connect, it doesn't make as much sense -- + # surely the interrupt didn't happen after we successfully + # initiated the connect and are just waiting for it to complete, + # because a non-blocking connect does not wait! And the spec + # describes the interaction between EINTR/blocking connect, but + # doesn't have anything useful to say about non-blocking connect: + # + # http://pubs.opengroup.org/onlinepubs/007904975/functions/connect.html + # + # So we have a conundrum: if EINTR means that the connect() hasn't + # happened (like it does for essentially every other syscall), + # then InterruptedError should be caught and retried. If EINTR + # means that the connect() has successfully started, then + # InterruptedError should be caught and ignored. Which should we + # do? + # + # In practice, the resolution is probably that non-blocking + # connect simply never returns EINTR, so the question of how to + # handle it is moot. Someone spelunked MacOS/FreeBSD and + # confirmed this is true there: + # + # https://stackoverflow.com/questions/14134440/eintr-and-non-blocking-calls + # + # and exarkun seems to think it's true in general of non-blocking + # calls: + # + # https://twistedmatrix.com/pipermail/twisted-python/2010-September/022864.html + # (and indeed, AFAICT twisted doesn't try to handle + # InterruptedError). + # + # So we don't try to catch InterruptedError. This way if it + # happens, someone will hopefully tell us, and then hopefully we + # can investigate their system to figure out what its semantics + # are. + return self._sock.connect(address) + # It raised BlockingIOError, meaning that it's started the + # connection attempt. We wait for it to complete: await _core.wait_socket_writable(self._sock) except _core.Cancelled: # We can't really cancel a connect, and the socket is in an diff --git a/trio/_ssl.py b/trio/_ssl.py index 80f56714f1..74bf0628e8 100644 --- a/trio/_ssl.py +++ b/trio/_ssl.py @@ -839,7 +839,9 @@ async def wait_send_all_might_not_block(self): class SSLListener(Listener): """A :class:`~trio.abc.Listener` for SSL/TLS-encrypted servers. - :class:`SSLListener` allows you to wrap + :class:`SSLListener` wraps around another Listener, and converts + all incoming connections to encrypted connections by wrapping them + in a :class:`SSLStream`. Args: transport_listener (~trio.abc.Listener): The listener whose incoming diff --git a/trio/_sync.py b/trio/_sync.py index fe806dd34a..a99f75a3a7 100644 --- a/trio/_sync.py +++ b/trio/_sync.py @@ -288,7 +288,11 @@ async def acquire_on_behalf_of(self, borrower): except _core.WouldBlock: task = _core.current_task() self._pending_borrowers[task] = borrower - await self._lot.park() + try: + await self._lot.park() + except _core.Cancelled: + self._pending_borrowers.pop(task) + raise except: await _core.cancel_shielded_checkpoint() raise diff --git a/trio/_version.py b/trio/_version.py index f6370253c9..ae10048ba9 100644 --- a/trio/_version.py +++ b/trio/_version.py @@ -1,3 +1,3 @@ # This file is imported from __init__.py and exec'd from setup.py -__version__ = "0.5.0" +__version__ = "0.6.0" diff --git a/trio/_wait_for_object.py b/trio/_wait_for_object.py new file mode 100644 index 0000000000..8fd2e28abc --- /dev/null +++ b/trio/_wait_for_object.py @@ -0,0 +1,70 @@ +from . import _timeouts +from . import _core +from ._threads import run_sync_in_worker_thread +from ._core._windows_cffi import ffi, kernel32, ErrorCodes, raise_winerror, _handle + +__all__ = ["WaitForSingleObject"] + + +class StubLimiter: + def release_on_behalf_of(self, x): + pass + + async def acquire_on_behalf_of(self, x): + pass + + +async def WaitForSingleObject(obj): + """Async and cancellable variant of WaitForSingleObject. Windows only. + + Args: + handle: A Win32 handle, as a Python integer. + + Raises: + OSError: If the handle is invalid, e.g. when it is already closed. + + """ + # Allow ints or whatever we can convert to a win handle + handle = _handle(obj) + + # Quick check; we might not even need to spawn a thread. The zero + # means a zero timeout; this call never blocks. We also exit here + # if the handle is already closed for some reason. + retcode = kernel32.WaitForSingleObject(handle, 0) + if retcode == ErrorCodes.WAIT_FAILED: + raise_winerror() + elif retcode != ErrorCodes.WAIT_TIMEOUT: + return + + # Wait for a thread that waits for two handles: the handle plus a handle + # that we can use to cancel the thread. + cancel_handle = kernel32.CreateEventA(ffi.NULL, True, False, ffi.NULL) + try: + await run_sync_in_worker_thread( + WaitForMultipleObjects_sync, + handle, + cancel_handle, + cancellable=True, + limiter=StubLimiter(), + ) + finally: + # Clean up our cancel handle. In case we get here because this task was + # cancelled, we also want to set the cancel_handle to stop the thread. + kernel32.SetEvent(cancel_handle) + kernel32.CloseHandle(cancel_handle) + + +def WaitForMultipleObjects_sync(*handles): + """Wait for any of the given Windows handles to be signaled. + + """ + n = len(handles) + handle_arr = ffi.new("HANDLE[{}]".format(n)) + for i in range(n): + handle_arr[i] = handles[i] + timeout = 0xffffffff # INFINITE + retcode = kernel32.WaitForMultipleObjects( + n, handle_arr, False, timeout + ) # blocking + if retcode == ErrorCodes.WAIT_FAILED: + raise_winerror() diff --git a/trio/hazmat.py b/trio/hazmat.py index e5a417ceb8..4aebcc1baf 100644 --- a/trio/hazmat.py +++ b/trio/hazmat.py @@ -1,7 +1,15 @@ -# These are all re-exported from trio._core. See comments in trio/__init__.py -# for details. To make static analysis easier, this lists all possible -# symbols, and then we prune some below if they aren't available on this -# system. +""" +This namespace represents low-level functionality not intended for daily use, +but useful for extending Trio's functionality. +""" + +import sys + +# This is the union of a subset of trio/_core/ and some things from trio/*.py. +# See comments in trio/__init__.py for details. To make static analysis easier, +# this lists all possible symbols from trio._core, and then we prune those that +# aren't available on this system. After that we add some symbols from trio/*.py. + __all__ = [ "cancel_shielded_checkpoint", "Abort", @@ -56,3 +64,8 @@ # who knows. remove_from_all = __all__.remove remove_from_all(_sym) + +# Import bits from trio/*.py +if sys.platform.startswith("win"): + from ._wait_for_object import WaitForSingleObject + __all__ += ["WaitForSingleObject"] diff --git a/trio/tests/test_socket.py b/trio/tests/test_socket.py index fdcb8dfa6b..f61de0c453 100644 --- a/trio/tests/test_socket.py +++ b/trio/tests/test_socket.py @@ -377,6 +377,18 @@ async def test_SocketType_simple_server(address, socket_type): assert await client.recv(1) == b"x" +# On some MacOS systems, getaddrinfo likes to return V4-mapped addresses even +# when we *don't* pass AI_V4MAPPED. +# https://github.com/python-trio/trio/issues/580 +def gai_without_v4mapped_is_buggy(): # pragma: no cover + try: + stdlib_socket.getaddrinfo("1.2.3.4", 0, family=stdlib_socket.AF_INET6) + except stdlib_socket.gaierror: + return False + else: + return True + + # Direct thorough tests of the implicit resolver helpers async def test_SocketType_resolve(): # For some reason the stdlib special-cases "" to pass NULL to getaddrinfo @@ -412,20 +424,26 @@ async def s6res(*args): assert await s6res(("1::2", 80, 1)) == ("1::2", 80, 1, 0) assert await s6res(("1::2", 80, 1, 2)) == ("1::2", 80, 1, 2) - # V4 mapped addresses resolved if V6ONLY if False + # V4 mapped addresses resolved if V6ONLY is False sock6.setsockopt(tsocket.IPPROTO_IPV6, tsocket.IPV6_V6ONLY, False) assert await s6res(("1.2.3.4", "http")) == ("::ffff:1.2.3.4", 80, 0, 0) - # But not if it's true - sock6.setsockopt(tsocket.IPPROTO_IPV6, tsocket.IPV6_V6ONLY, True) - with pytest.raises(tsocket.gaierror) as excinfo: - await s6res(("1.2.3.4", 80)) - # Windows, MacOS - expected_errnos = {tsocket.EAI_NONAME} - # Linux - if hasattr(tsocket, "EAI_ADDRFAMILY"): - expected_errnos.add(tsocket.EAI_ADDRFAMILY) - assert excinfo.value.errno in expected_errnos + # Check the special case, because why not + await s4res(("", 123)) == ("255.255.255.255", 123) + await s6res(("", 123)) == ("::ffff:255.255.255.255", 123) + + # But not if it's true (at least on systems where getaddrinfo works + # correctly) + if not gai_without_v4mapped_is_buggy(): # pragma: no branch + sock6.setsockopt(tsocket.IPPROTO_IPV6, tsocket.IPV6_V6ONLY, True) + with pytest.raises(tsocket.gaierror) as excinfo: + await s6res(("1.2.3.4", 80)) + # Windows, MacOS + expected_errnos = {tsocket.EAI_NONAME} + # Linux + if hasattr(tsocket, "EAI_ADDRFAMILY"): + expected_errnos.add(tsocket.EAI_ADDRFAMILY) + assert excinfo.value.errno in expected_errnos # A family where we know nothing about the addresses, so should just # pass them through. This should work on Linux, which is enough to @@ -452,11 +470,6 @@ async def s6res(*args): with pytest.raises(ValueError): await s6res(("1.2.3.4", 80, 0, 0, 0)) - # The special case, because why not - await s4res(("", 123)) == ("255.255.255.255", 123) - with pytest.raises(tsocket.gaierror): - await s6res(("", 123)) - async def test_deprecated_resolver_methods(recwarn): with tsocket.socket() as sock: @@ -614,6 +627,22 @@ def connect(self, *args, **kwargs): await sock.connect(("127.0.0.1", 2)) +async def test_resolve_remote_address_exception_closes_socket(): + # Here we are testing issue 247, any cancellation will leave the socket closed + with _core.open_cancel_scope() as cancel_scope: + with tsocket.socket() as sock: + + async def _resolve_remote_address(self, *args, **kwargs): + cancel_scope.cancel() + await _core.checkpoint() + + sock._resolve_remote_address = _resolve_remote_address + with assert_checkpoints(): + with pytest.raises(_core.Cancelled): + await sock.connect('') + assert sock.fileno() == -1 + + async def test_send_recv_variants(): a, b = tsocket.socketpair() with a, b: diff --git a/trio/tests/test_sync.py b/trio/tests/test_sync.py index bded1b0544..64aa3235a8 100644 --- a/trio/tests/test_sync.py +++ b/trio/tests/test_sync.py @@ -147,6 +147,21 @@ async def test_CapacityLimiter_change_total_tokens(): assert c.statistics().tasks_waiting == 0 +# regression test for issue #548 +async def test_CapacityLimiter_memleak_548(): + limiter = CapacityLimiter(total_tokens=1) + await limiter.acquire() + + async with _core.open_nursery() as n: + n.start_soon(limiter.acquire) + await wait_all_tasks_blocked() # give it a chance to run the task + n.cancel_scope.cancel() + + # if this is 1, the acquire call (despite being killed) is still there in the task, and will + # leak memory all the while the limiter is active + assert len(limiter._pending_borrowers) == 0 + + async def test_Semaphore(): with pytest.raises(TypeError): Semaphore(1.0) diff --git a/trio/tests/test_wait_for_object.py b/trio/tests/test_wait_for_object.py new file mode 100644 index 0000000000..3a83e454e0 --- /dev/null +++ b/trio/tests/test_wait_for_object.py @@ -0,0 +1,220 @@ +import os +import time + +import pytest + +on_windows = (os.name == "nt") +# Mark all the tests in this file as being windows-only +pytestmark = pytest.mark.skipif(not on_windows, reason="windows only") + +from .._core.tests.tutil import slow +from .. import _core +from .. import _timeouts +if on_windows: + from .._core._windows_cffi import ffi, kernel32 + from .._wait_for_object import WaitForSingleObject, WaitForMultipleObjects_sync, run_sync_in_worker_thread + + +async def test_WaitForMultipleObjects_sync(): + # This does a series of tests where we set/close the handle before + # initiating the waiting for it. + # + # Note that closing the handle (not signaling) will cause the + # *initiation* of a wait to return immediately. But closing a handle + # that is already being waited on will not stop whatever is waiting + # for it. + + # One handle + handle1 = kernel32.CreateEventA(ffi.NULL, True, False, ffi.NULL) + kernel32.SetEvent(handle1) + WaitForMultipleObjects_sync(handle1) + kernel32.CloseHandle(handle1) + print('test_WaitForMultipleObjects_sync one OK') + + # Two handles, signal first + handle1 = kernel32.CreateEventA(ffi.NULL, True, False, ffi.NULL) + handle2 = kernel32.CreateEventA(ffi.NULL, True, False, ffi.NULL) + kernel32.SetEvent(handle1) + WaitForMultipleObjects_sync(handle1, handle2) + kernel32.CloseHandle(handle1) + kernel32.CloseHandle(handle2) + print('test_WaitForMultipleObjects_sync set first OK') + + # Two handles, signal second + handle1 = kernel32.CreateEventA(ffi.NULL, True, False, ffi.NULL) + handle2 = kernel32.CreateEventA(ffi.NULL, True, False, ffi.NULL) + kernel32.SetEvent(handle2) + WaitForMultipleObjects_sync(handle1, handle2) + kernel32.CloseHandle(handle1) + kernel32.CloseHandle(handle2) + print('test_WaitForMultipleObjects_sync set second OK') + + # Two handles, close first + handle1 = kernel32.CreateEventA(ffi.NULL, True, False, ffi.NULL) + handle2 = kernel32.CreateEventA(ffi.NULL, True, False, ffi.NULL) + kernel32.CloseHandle(handle1) + with pytest.raises(OSError): + WaitForMultipleObjects_sync(handle1, handle2) + kernel32.CloseHandle(handle2) + print('test_WaitForMultipleObjects_sync close first OK') + + # Two handles, close second + handle1 = kernel32.CreateEventA(ffi.NULL, True, False, ffi.NULL) + handle2 = kernel32.CreateEventA(ffi.NULL, True, False, ffi.NULL) + kernel32.CloseHandle(handle2) + with pytest.raises(OSError): + WaitForMultipleObjects_sync(handle1, handle2) + kernel32.CloseHandle(handle1) + print('test_WaitForMultipleObjects_sync close second OK') + + +@slow +async def test_WaitForMultipleObjects_sync_slow(): + # This does a series of test in which the main thread sync-waits for + # handles, while we spawn a thread to set the handles after a short while. + + TIMEOUT = 0.3 + + # One handle + handle1 = kernel32.CreateEventA(ffi.NULL, True, False, ffi.NULL) + t0 = _core.current_time() + async with _core.open_nursery() as nursery: + nursery.start_soon( + run_sync_in_worker_thread, WaitForMultipleObjects_sync, handle1 + ) + await _timeouts.sleep(TIMEOUT) + # If we would comment the line below, the above thread will be stuck, + # and trio wont exit this scope + kernel32.SetEvent(handle1) + t1 = _core.current_time() + assert TIMEOUT <= (t1 - t0) < 2.0 * TIMEOUT + kernel32.CloseHandle(handle1) + print('test_WaitForMultipleObjects_sync_slow one OK') + + # Two handles, signal first + handle1 = kernel32.CreateEventA(ffi.NULL, True, False, ffi.NULL) + handle2 = kernel32.CreateEventA(ffi.NULL, True, False, ffi.NULL) + t0 = _core.current_time() + async with _core.open_nursery() as nursery: + nursery.start_soon( + run_sync_in_worker_thread, WaitForMultipleObjects_sync, handle1, + handle2 + ) + await _timeouts.sleep(TIMEOUT) + kernel32.SetEvent(handle1) + t1 = _core.current_time() + assert TIMEOUT <= (t1 - t0) < 2.0 * TIMEOUT + kernel32.CloseHandle(handle1) + kernel32.CloseHandle(handle2) + print('test_WaitForMultipleObjects_sync_slow thread-set first OK') + + # Two handles, signal second + handle1 = kernel32.CreateEventA(ffi.NULL, True, False, ffi.NULL) + handle2 = kernel32.CreateEventA(ffi.NULL, True, False, ffi.NULL) + t0 = _core.current_time() + async with _core.open_nursery() as nursery: + nursery.start_soon( + run_sync_in_worker_thread, WaitForMultipleObjects_sync, handle1, + handle2 + ) + await _timeouts.sleep(TIMEOUT) + kernel32.SetEvent(handle2) + t1 = _core.current_time() + assert TIMEOUT <= (t1 - t0) < 2.0 * TIMEOUT + kernel32.CloseHandle(handle1) + kernel32.CloseHandle(handle2) + print('test_WaitForMultipleObjects_sync_slow thread-set second OK') + + +async def test_WaitForSingleObject(): + # This does a series of test for setting/closing the handle before + # initiating the wait. + + # Test already set + handle = kernel32.CreateEventA(ffi.NULL, True, False, ffi.NULL) + kernel32.SetEvent(handle) + await WaitForSingleObject(handle) # should return at once + kernel32.CloseHandle(handle) + print('test_WaitForSingleObject already set OK') + + # Test already set, as int + handle = kernel32.CreateEventA(ffi.NULL, True, False, ffi.NULL) + handle_int = int(ffi.cast("intptr_t", handle)) + kernel32.SetEvent(handle) + await WaitForSingleObject(handle_int) # should return at once + kernel32.CloseHandle(handle) + print('test_WaitForSingleObject already set OK') + + # Test already closed + handle = kernel32.CreateEventA(ffi.NULL, True, False, ffi.NULL) + kernel32.CloseHandle(handle) + with pytest.raises(OSError): + await WaitForSingleObject(handle) # should return at once + print('test_WaitForSingleObject already closed OK') + + # Not a handle + with pytest.raises(TypeError): + await WaitForSingleObject("not a handle") # Wrong type + # with pytest.raises(OSError): + # await WaitForSingleObject(99) # If you're unlucky, it actually IS a handle :( + print('test_WaitForSingleObject not a handle OK') + + +@slow +async def test_WaitForSingleObject_slow(): + # This does a series of test for setting the handle in another task, + # and cancelling the wait task. + + # Set the timeout used in the tests. We test the waiting time against + # the timeout with a certain margin. + TIMEOUT = 0.3 + + async def signal_soon_async(handle): + await _timeouts.sleep(TIMEOUT) + kernel32.SetEvent(handle) + + # Test handle is SET after TIMEOUT in separate coroutine + + handle = kernel32.CreateEventA(ffi.NULL, True, False, ffi.NULL) + t0 = _core.current_time() + + async with _core.open_nursery() as nursery: + nursery.start_soon(WaitForSingleObject, handle) + nursery.start_soon(signal_soon_async, handle) + + kernel32.CloseHandle(handle) + t1 = _core.current_time() + assert TIMEOUT <= (t1 - t0) < 2.0 * TIMEOUT + print('test_WaitForSingleObject_slow set from task OK') + + # Test handle is SET after TIMEOUT in separate coroutine, as int + + handle = kernel32.CreateEventA(ffi.NULL, True, False, ffi.NULL) + handle_int = int(ffi.cast("intptr_t", handle)) + t0 = _core.current_time() + + async with _core.open_nursery() as nursery: + nursery.start_soon(WaitForSingleObject, handle_int) + nursery.start_soon(signal_soon_async, handle) + + kernel32.CloseHandle(handle) + t1 = _core.current_time() + assert TIMEOUT <= (t1 - t0) < 2.0 * TIMEOUT + print('test_WaitForSingleObject_slow set from task as int OK') + + # Test handle is CLOSED after 1 sec - NOPE see comment above + + pass + + # Test cancellation + + handle = kernel32.CreateEventA(ffi.NULL, True, False, ffi.NULL) + t0 = _core.current_time() + + with _timeouts.move_on_after(TIMEOUT): + await WaitForSingleObject(handle) + + kernel32.CloseHandle(handle) + t1 = _core.current_time() + assert TIMEOUT <= (t1 - t0) < 2.0 * TIMEOUT + print('test_WaitForSingleObject_slow cancellation OK')