diff --git a/LICENSE b/LICENSE index b2f25c4e4b8974..b26c65a2410529 100644 --- a/LICENSE +++ b/LICENSE @@ -644,9 +644,6 @@ The externally maintained libraries used by Node.js are: - libuv, located at deps/uv, is licensed as follows: """ - libuv is licensed for use as follows: - - ==== Copyright (c) 2015-present libuv project contributors. Permission is hereby granted, free of charge, to any person obtaining a copy @@ -666,8 +663,6 @@ The externally maintained libraries used by Node.js are: LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ==== - This license applies to parts of libuv originating from the https://github.com/joyent/libuv repository: @@ -704,12 +699,6 @@ The externally maintained libraries used by Node.js are: - inet_pton and inet_ntop implementations, contained in src/inet.c, are copyright the Internet Systems Consortium, Inc., and licensed under the ISC license. - - - stdint-msvc2008.h (from msinttypes), copyright Alexander Chemeris. Three - clause BSD license. - - - pthread-fixes.c, copyright Google Inc. and Sony Mobile Communications AB. - Three clause BSD license. """ - llhttp, located at deps/llhttp, is licensed as follows: diff --git a/deps/uv/.mailmap b/deps/uv/.mailmap index b23377c6151008..bf12432495de03 100644 --- a/deps/uv/.mailmap +++ b/deps/uv/.mailmap @@ -29,6 +29,7 @@ Keno Fischer Keno Fischer Leith Bade Leonard Hecker +Lewis Russell Maciej Małecki Marc Schlaich Michael @@ -60,5 +61,7 @@ gengjiawen jBarz jBarz ptlomholt +theanarkh <2923878201@qq.com> tjarlama <59913901+tjarlama@users.noreply.github.com> +ywave620 <60539365+ywave620@users.noreply.github.com> zlargon diff --git a/deps/uv/.readthedocs.yaml b/deps/uv/.readthedocs.yaml index e53b9f3e84be0a..c1c9ab238cd900 100644 --- a/deps/uv/.readthedocs.yaml +++ b/deps/uv/.readthedocs.yaml @@ -5,7 +5,10 @@ sphinx: configuration: null fail_on_warning: false +build: + tools: + python: "3.9" + python: - version: 3.8 install: - requirements: docs/requirements.txt diff --git a/deps/uv/AUTHORS b/deps/uv/AUTHORS index e03100eab9d360..b6860c6620f43e 100644 --- a/deps/uv/AUTHORS +++ b/deps/uv/AUTHORS @@ -517,3 +517,28 @@ chucksilvers Sergey Fedorov theanarkh <2923878201@qq.com> Samuel Cabrero +自发对称破缺 <429839446@qq.com> +Luan Devecchi +Steven Schveighoffer +number201724 +Daniel +Christian Clason +ywave620 +jensbjorgensen +daomingq +Qix +Edward Humes <29870961+aurxenon@users.noreply.github.com> +Tim Besard +Sergey Rubanov +Stefan Stojanovic +Zvicii +dundargoc <33953936+dundargoc@users.noreply.github.com> +Jack·Boos·Yu <47264268+JackBoosY@users.noreply.github.com> +panran <310762957@qq.com> +Tamás Bálint Misius +Bruno Passeri +Jason Zhang +Lewis Russell +sivadeilra +cui fliter +Mohammed Keyvanzadeh diff --git a/deps/uv/CMakeLists.txt b/deps/uv/CMakeLists.txt index 7f466826c50337..93733dd0478343 100644 --- a/deps/uv/CMakeLists.txt +++ b/deps/uv/CMakeLists.txt @@ -1,8 +1,13 @@ cmake_minimum_required(VERSION 3.4) -project(libuv LANGUAGES C) -cmake_policy(SET CMP0057 NEW) # Enable IN_LIST operator -cmake_policy(SET CMP0064 NEW) # Support if (TEST) operator +if(POLICY CMP0091) + cmake_policy(SET CMP0091 NEW) # Enable MSVC_RUNTIME_LIBRARY setting +endif() +if(POLICY CMP0092) + cmake_policy(SET CMP0092 NEW) # disable /W3 warning, if possible +endif() + +project(libuv LANGUAGES C) list(APPEND CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/cmake") @@ -17,9 +22,13 @@ set(CMAKE_C_STANDARD_REQUIRED ON) set(CMAKE_C_EXTENSIONS ON) set(CMAKE_C_STANDARD 90) +set(CMAKE_EXPORT_COMPILE_COMMANDS ON) + +option(LIBUV_BUILD_SHARED "Build shared lib" ON) + cmake_dependent_option(LIBUV_BUILD_TESTS "Build the unit tests when BUILD_TESTING is enabled and we are the root project" ON - "BUILD_TESTING;CMAKE_SOURCE_DIR STREQUAL PROJECT_SOURCE_DIR" OFF) + "BUILD_TESTING;LIBUV_BUILD_SHARED;CMAKE_SOURCE_DIR STREQUAL PROJECT_SOURCE_DIR" OFF) cmake_dependent_option(LIBUV_BUILD_BENCH "Build the benchmarks when building unit tests and we are the root project" ON "LIBUV_BUILD_TESTS" OFF) @@ -27,28 +36,61 @@ cmake_dependent_option(LIBUV_BUILD_BENCH # Qemu Build option(QEMU "build for qemu" OFF) if(QEMU) - add_definitions(-D__QEMU__=1) + list(APPEND uv_defines __QEMU__=1) endif() +# Note: these are mutually exclusive. option(ASAN "Enable AddressSanitizer (ASan)" OFF) +option(MSAN "Enable MemorySanitizer (MSan)" OFF) option(TSAN "Enable ThreadSanitizer (TSan)" OFF) +option(UBSAN "Enable UndefinedBehaviorSanitizer (UBSan)" OFF) -if((ASAN OR TSAN) AND NOT (CMAKE_C_COMPILER_ID MATCHES "AppleClang|GNU|Clang")) - message(SEND_ERROR "Sanitizer support requires clang or gcc. Try again with -DCMAKE_C_COMPILER.") +if(MSAN AND NOT CMAKE_C_COMPILER_ID MATCHES "AppleClang|Clang") + message(SEND_ERROR "MemorySanitizer requires clang. Try again with -DCMAKE_C_COMPILER=clang") endif() if(ASAN) - add_definitions(-D__ASAN__=1) - set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fno-omit-frame-pointer -fsanitize=address") - set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fno-omit-frame-pointer -fsanitize=address") - set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fno-omit-frame-pointer -fsanitize=address") + list(APPEND uv_defines __ASAN__=1) + if(CMAKE_C_COMPILER_ID MATCHES "AppleClang|GNU|Clang") + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fno-omit-frame-pointer -fsanitize=address") + set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fno-omit-frame-pointer -fsanitize=address") + set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fno-omit-frame-pointer -fsanitize=address") + elseif(MSVC) + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /fsanitize=address") + else() + message(SEND_ERROR "AddressSanitizer support requires clang, gcc, or msvc. Try again with -DCMAKE_C_COMPILER.") + endif() +endif() + +if(MSAN) + list(APPEND uv_defines __MSAN__=1) + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fno-omit-frame-pointer -fsanitize=memory") + set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fno-omit-frame-pointer -fsanitize=memory") + set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fno-omit-frame-pointer -fsanitize=memory") endif() if(TSAN) - add_definitions(-D__TSAN__=1) - set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fno-omit-frame-pointer -fsanitize=thread") - set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fno-omit-frame-pointer -fsanitize=thread") - set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fno-omit-frame-pointer -fsanitize=thread") + list(APPEND uv_defines __TSAN__=1) + if(CMAKE_C_COMPILER_ID MATCHES "AppleClang|GNU|Clang") + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fno-omit-frame-pointer -fsanitize=thread") + set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fno-omit-frame-pointer -fsanitize=thread") + set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fno-omit-frame-pointer -fsanitize=thread") + else() + message(SEND_ERROR "ThreadSanitizer support requires clang or gcc. Try again with -DCMAKE_C_COMPILER.") + endif() +endif() + +if(UBSAN) + list(APPEND uv_defines __UBSAN__=1) + if(CMAKE_C_COMPILER_ID MATCHES "AppleClang|GNU|Clang") + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fno-omit-frame-pointer -fsanitize=undefined") + set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fno-omit-frame-pointer -fsanitize=undefined") + set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fno-omit-frame-pointer -fsanitize=undefined") + elseif(MSVC) + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /fsanitize=undefined") + else() + message(SEND_ERROR "UndefinedBehaviorSanitizer support requires clang, gcc, or msvc. Try again with -DCMAKE_C_COMPILER.") + endif() endif() # Compiler check @@ -126,6 +168,7 @@ set(uv_sources src/random.c src/strscpy.c src/strtok.c + src/thread-common.c src/threadpool.c src/timer.c src/uv-common.c @@ -140,7 +183,10 @@ if(WIN32) advapi32 iphlpapi userenv - ws2_32) + ws2_32 + dbghelp + ole32 + uuid) list(APPEND uv_sources src/win/async.c src/win/core.c @@ -216,15 +262,11 @@ if(CMAKE_SYSTEM_NAME STREQUAL "Android") list(APPEND uv_defines _GNU_SOURCE) list(APPEND uv_libraries dl) list(APPEND uv_sources - src/unix/linux-core.c - src/unix/linux-inotify.c - src/unix/linux-syscalls.c + src/unix/linux.c src/unix/procfs-exepath.c - src/unix/pthread-fixes.c src/unix/random-getentropy.c src/unix/random-getrandom.c - src/unix/random-sysctl-linux.c - src/unix/epoll.c) + src/unix/random-sysctl-linux.c) endif() if(APPLE OR CMAKE_SYSTEM_NAME MATCHES "Android|Linux") @@ -270,22 +312,14 @@ if(CMAKE_SYSTEM_NAME STREQUAL "GNU") src/unix/hurd.c) endif() -if(CMAKE_SYSTEM_NAME STREQUAL "kFreeBSD") - list(APPEND uv_defines _GNU_SOURCE) - list(APPEND uv_libraries dl freebsd-glue) -endif() - if(CMAKE_SYSTEM_NAME STREQUAL "Linux") list(APPEND uv_defines _GNU_SOURCE _POSIX_C_SOURCE=200112) list(APPEND uv_libraries dl rt) list(APPEND uv_sources - src/unix/linux-core.c - src/unix/linux-inotify.c - src/unix/linux-syscalls.c + src/unix/linux.c src/unix/procfs-exepath.c src/unix/random-getrandom.c - src/unix/random-sysctl-linux.c - src/unix/epoll.c) + src/unix/random-sysctl-linux.c) endif() if(CMAKE_SYSTEM_NAME STREQUAL "NetBSD") @@ -316,7 +350,6 @@ if(CMAKE_SYSTEM_NAME STREQUAL "OS390") list(APPEND uv_defines _XOPEN_SOURCE=600) list(APPEND uv_defines _XOPEN_SOURCE_EXTENDED) list(APPEND uv_sources - src/unix/pthread-fixes.c src/unix/os390.c src/unix/os390-syscalls.c src/unix/os390-proctitle.c) @@ -354,6 +387,10 @@ if(CMAKE_SYSTEM_NAME STREQUAL "OS400") endif() if(CMAKE_SYSTEM_NAME STREQUAL "SunOS") + if(CMAKE_SYSTEM_VERSION STREQUAL "5.10") + list(APPEND uv_defines SUNOS_NO_IFADDRS) + list(APPEND uv_libraries rt) + endif() list(APPEND uv_defines __EXTENSIONS__ _XOPEN_SOURCE=500 _REENTRANT) list(APPEND uv_libraries kstat nsl sendfile socket) list(APPEND uv_sources @@ -388,25 +425,42 @@ if(APPLE OR CMAKE_SYSTEM_NAME MATCHES "DragonFly|FreeBSD|Linux|NetBSD|OpenBSD") list(APPEND uv_test_libraries util) endif() -add_library(uv SHARED ${uv_sources}) -target_compile_definitions(uv - INTERFACE - USING_UV_SHARED=1 - PRIVATE - BUILDING_UV_SHARED=1 - ${uv_defines}) -target_compile_options(uv PRIVATE ${uv_cflags}) -target_include_directories(uv - PUBLIC - $ - $ - PRIVATE - $) -if(CMAKE_SYSTEM_NAME STREQUAL "OS390") - target_include_directories(uv PUBLIC $) - set_target_properties(uv PROPERTIES LINKER_LANGUAGE CXX) +if(CYGWIN OR MSYS) + list(APPEND uv_defines _GNU_SOURCE) + list(APPEND uv_sources + src/unix/cygwin.c + src/unix/bsd-ifaddrs.c + src/unix/no-fsevents.c + src/unix/no-proctitle.c + src/unix/posix-hrtime.c + src/unix/posix-poll.c + src/unix/procfs-exepath.c + src/unix/sysinfo-loadavg.c + src/unix/sysinfo-memory.c) +endif() + +if(LIBUV_BUILD_SHARED) + add_library(uv SHARED ${uv_sources}) + target_compile_definitions(uv + INTERFACE + USING_UV_SHARED=1 + PRIVATE + BUILDING_UV_SHARED=1 + ${uv_defines}) + target_compile_options(uv PRIVATE ${uv_cflags}) + target_include_directories(uv + PUBLIC + $ + $ + PRIVATE + $) + if(CMAKE_SYSTEM_NAME STREQUAL "OS390") + target_include_directories(uv PUBLIC $) + set_target_properties(uv PROPERTIES LINKER_LANGUAGE CXX) + endif() + target_link_libraries(uv ${uv_libraries}) + set_target_properties(uv PROPERTIES OUTPUT_NAME "uv") endif() -target_link_libraries(uv ${uv_libraries}) add_library(uv_a STATIC ${uv_sources}) target_compile_definitions(uv_a PRIVATE ${uv_defines}) @@ -422,6 +476,10 @@ if(CMAKE_SYSTEM_NAME STREQUAL "OS390") set_target_properties(uv_a PROPERTIES LINKER_LANGUAGE CXX) endif() target_link_libraries(uv_a ${uv_libraries}) +set_target_properties(uv_a PROPERTIES OUTPUT_NAME "uv") +if(MSVC) + set_target_properties(uv_a PROPERTIES PREFIX "lib") +endif() if(LIBUV_BUILD_TESTS) # Small hack: use ${uv_test_sources} now to get the runner skeleton, @@ -584,6 +642,7 @@ if(LIBUV_BUILD_TESTS) test/test-tcp-rst.c test/test-tcp-shutdown-after-write.c test/test-tcp-try-write.c + test/test-tcp-write-in-a-row.c test/test-tcp-try-write-error.c test/test-tcp-unexpected-read.c test/test-tcp-write-after-connect.c @@ -592,6 +651,7 @@ if(LIBUV_BUILD_TESTS) test/test-tcp-write-to-half-open-connection.c test/test-tcp-writealot.c test/test-test-macros.c + test/test-thread-affinity.c test/test-thread-equal.c test/test-thread.c test/test-threadpool-cancel.c @@ -624,6 +684,7 @@ if(LIBUV_BUILD_TESTS) test/test-udp-sendmmsg-error.c test/test-udp-send-unreachable.c test/test-udp-try-send.c + test/test-udp-recv-in-a-row.c test/test-uname.c test/test-walk-handles.c test/test-watcher-cross-stop.c) @@ -667,27 +728,36 @@ string(REPLACE ";" " " LIBS "${LIBS}") file(STRINGS configure.ac configure_ac REGEX ^AC_INIT) string(REGEX MATCH "([0-9]+)[.][0-9]+[.][0-9]+" PACKAGE_VERSION "${configure_ac}") set(UV_VERSION_MAJOR "${CMAKE_MATCH_1}") -# The version in the filename is mirroring the behaviour of autotools. -set_target_properties(uv PROPERTIES - VERSION ${UV_VERSION_MAJOR}.0.0 - SOVERSION ${UV_VERSION_MAJOR}) + set(includedir ${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_INCLUDEDIR}) set(libdir ${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}) set(prefix ${CMAKE_INSTALL_PREFIX}) -configure_file(libuv.pc.in libuv.pc @ONLY) configure_file(libuv-static.pc.in libuv-static.pc @ONLY) install(DIRECTORY include/ DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) install(FILES LICENSE DESTINATION ${CMAKE_INSTALL_DOCDIR}) -install(FILES ${PROJECT_BINARY_DIR}/libuv.pc ${PROJECT_BINARY_DIR}/libuv-static.pc +install(FILES LICENSE-extra DESTINATION ${CMAKE_INSTALL_DOCDIR}) +install(FILES ${PROJECT_BINARY_DIR}/libuv-static.pc DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig) -install(TARGETS uv EXPORT libuvConfig - RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} - LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} - ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}) install(TARGETS uv_a EXPORT libuvConfig ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}) -install(EXPORT libuvConfig DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/libuv) +install(EXPORT libuvConfig + DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/libuv + NAMESPACE libuv::) + +if(LIBUV_BUILD_SHARED) + # The version in the filename is mirroring the behaviour of autotools. + set_target_properties(uv PROPERTIES + VERSION ${UV_VERSION_MAJOR}.0.0 + SOVERSION ${UV_VERSION_MAJOR}) + configure_file(libuv.pc.in libuv.pc @ONLY) + install(FILES ${PROJECT_BINARY_DIR}/libuv.pc + DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig) + install(TARGETS uv EXPORT libuvConfig + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}) +endif() if(MSVC) set(CMAKE_DEBUG_POSTFIX d) diff --git a/deps/uv/ChangeLog b/deps/uv/ChangeLog index cfbfed4fa6d6df..559c1442205ffc 100644 --- a/deps/uv/ChangeLog +++ b/deps/uv/ChangeLog @@ -1,4 +1,297 @@ -2022.07.12, Version 1.44.2 (Stable) +2023.05.19, Version 1.45.0 (Stable) + +Changes since version 1.44.2: + +* win: remove stdint-msvc2008.h (Ben Noordhuis) + +* android: remove pthread-fixes.c (Ben Noordhuis) + +* build: enable MSVC_RUNTIME_LIBRARY setting (自发对称破缺) + +* unix: switch to c11 atomics (Ben Noordhuis) + +* unix: don't accept() connections in a loop (Ben Noordhuis) + +* win: fix off-by-1 buffer overrun in uv_exepath() (Ben Noordhuis) + +* build: switch ci from macos-10.15 to macos-11 (Ben Noordhuis) + +* win: fix thread race in uv_cwd() and uv_chdir() (Ben Noordhuis) + +* unix,win: remove UV_HANDLE_SHUTTING flag (Santiago Gimeno) + +* win: support Windows 11 in uv_os_uname() (Luan Devecchi) + +* unix: fix uv_getrusage() ru_maxrss reporting (Ben Noordhuis) + +* doc: add note about offset -1 in uv_fs_read/write (Steven Schveighoffer) + +* test: fix musl libc.a dlerror() test expectation (Ben Noordhuis) + +* kqueue: DRY file descriptor deletion logic (Ben Noordhuis) + +* linux: teach uv_get_constrained_memory() cgroupsv2 (Ben Noordhuis) + +* build: upgrade qemu-user-static package (Ben Noordhuis) + +* linux: move epoll.c back into linux-core.c (Ben Noordhuis) + +* unix: remove pre-macos 10.8 compatibility hack (Ben Noordhuis) + +* unix,win: fix memory leak in uv_fs_scandir() (Ben Noordhuis) + +* build: restore qemu download logic (Ben Noordhuis) + +* win: fix uv__pipe_accept memory leak (number201724) + +* doc: update LINKS.md (Daniel) + +* unix: simplify atomic op in uv_tty_reset_mode() (Ben Noordhuis) + +* build: add LIBUV_BUILD_SHARED cmake option (Christian Clason) + +* linux: remove unused or obsolete syscall wrappers (Ben Noordhuis) + +* linux: merge files back into single file (Ben Noordhuis) + +* stream: process more than one write req per loop tick (ywave620) + +* unix,win: give thread pool threads an 8 MB stack (Ben Noordhuis) + +* build: add MemorySanitizer (MSAN) support (Ben Noordhuis) + +* doc: add uv_poll_cb status==UV_EBADF note (jensbjorgensen) + +* build: support AddressSanitizer on MSVC (Jameson Nash) + +* win,pipe: improve method of obtaining pid for ipc (number201724) + +* thread: add support for affinity (daomingq) + +* include: map ENODATA error code (Ben Noordhuis) + +* build: remove bashism from autogen.sh (Santiago Gimeno) + +* win,tcp,udp: remove "active streams" optimization (Saúl Ibarra Corretgé) + +* win: drop code checking for Windows XP / Server 2k3 (Saúl Ibarra Corretgé) + +* unix,win: fix 'sprintf' is deprecated warning (twosee) + +* doc: mention close_cb can be NULL (Qix) + +* win: optimize udp receive performance (ywave620) + +* win: fix an incompatible types warning (twosee) + +* doc: document 0 return value for free/total memory (Ben Noordhuis) + +* darwin: use hw.cpufrequency again for frequency info (Jameson Nash) + +* win,test: change format of TEST_PIPENAME's (Santiago Gimeno) + +* win,pipe: fixes in uv_pipe_connect() (Santiago Gimeno) + +* misc: fix return value of memory functions (theanarkh) + +* src: add new metrics APIs (Trevor Norris) + +* thread: add uv_thread_getcpu() (daomingq) + +* build: don't use ifaddrs.h on solaris 10 (Edward Humes) + +* unix,win: add uv_get_available_memory() (Tim Besard) + +* test: fix -Wunused-but-set-variable warnings (Ben Noordhuis) + +* doc: bump min supported linux and freebsd versions (Ben Noordhuis) + +* Add Socket Runtime to the LINKS.md (Sergey Rubanov) + +* unix: drop kfreebsd support (Ben Noordhuis) + +* win: fix fstat for pipes and character files (Stefan Stojanovic) + +* win: fix -Wunused-variable warning (Ben Noordhuis) + +* win: fix -Wunused-function warning (Ben Noordhuis) + +* build: drop qemu-alpha from ci matrix (Ben Noordhuis) + +* win: move child_stdio_buffer out of uv_process_t (Santiago Gimeno) + +* test: fix some unreachable code warnings (Santiago Gimeno) + +* linux: simplify uv_uptime() (Ben Noordhuis) + +* test: unflake fs_event_watch_dir test (Ben Noordhuis) + +* darwin: remove unused fsevents symbol lookups (Ben Noordhuis) + +* build: add define guard around UV_EXTERN (Zvicii) + +* build: add UndefinedBehaviorSanitizer support (Ben Noordhuis) + +* build: enable platform_output test on qemu (Ben Noordhuis) + +* linux: handle cpu hotplugging in uv_cpu_info() (Ben Noordhuis) + +* build: remove unnecessary policy setting (dundargoc) + +* docs: add vcpkg instruction step (Jack·Boos·Yu) + +* win,fs: fix readlink errno for a non-symlink file (Darshan Sen) + +* misc: extend getpw to take uid as an argument (Jameson Nash) + +* unix,win: use static_assert when available (Ben Noordhuis) + +* docs: delete code Makefile (Jameson Nash) + +* docs: add CI for docs PRs (Jameson Nash) + +* docs: update Sphinx version on RTD (Jameson Nash) + +* doc: clean up license file (Ben Noordhuis) + +* test: fix some warnings when compiling tests (panran) + +* build,win: add mingw-w64 CI configuration (Jameson Nash) + +* build: add CI for distcheck (Jameson Nash) + +* unix: remove busy loop from uv_async_send (Jameson Nash) + +* doc: document uv_fs_cb type (Tamás Bálint Misius) + +* build: Improve build by cmake for Cygwin (erw7) + +* build: add libuv:: namespace to libuvConfig.cmake (AJ Heller) + +* test: fix ThreadSanitizer thread leak warning (Ben Noordhuis) + +* test: fix ThreadSanitizer data race warning (Ben Noordhuis) + +* test: fix ThreadSanitizer data race warning (Ben Noordhuis) + +* test: fix ThreadSanitizer data race warning (Ben Noordhuis) + +* test: cond-skip fork_threadpool_queue_work_simple (Ben Noordhuis) + +* test: cond-skip signal_multiple_loops (Ben Noordhuis) + +* test: cond-skip tcp_writealot (Ben Noordhuis) + +* build: promote tsan ci to must-pass (Ben Noordhuis) + +* build: add CI for OpenBSD and FreeBSD (James McCoy) + +* build,test: fix distcheck errors (Jameson Nash) + +* test: remove bad tty window size assumption (Ben Noordhuis) + +* darwin,process: feed kevent the signal to reap children (Jameson Nash) + +* unix: abort on clock_gettime() error (Ben Noordhuis) + +* test: remove timing-sensitive check (Ben Noordhuis) + +* unix: DRY and fix tcp bind error path (Jameson Nash) + +* macos: fix fsevents thread race conditions (Ben Noordhuis) + +* win: fix leak in uv_chdir (Trevor Norris) + +* test: make valgrind happy (Trevor Norris) + +* barrier: wait for prior out before next in (Jameson Nash) + +* test: fix visual studio 2015 build error (Ben Noordhuis) + +* linux: fix ceph copy error truncating readonly files (Bruno Passeri) + +* test: silence more valgrind warnings (Trevor Norris) + +* doc: add entries to LINKS.md (Trevor Norris) + +* win,unix: change execution order of timers (Trevor Norris) + +* doc: add trevnorris to maintainers (Trevor Norris) + +* linux: remove epoll_pwait() emulation code path (Ben Noordhuis) + +* linux: replace unsafe macro with inline function (Ben Noordhuis) + +* linux: remove arm oabi support (Ben Noordhuis) + +* unix,sunos: SO_REUSEPORT not valid on all sockets (Stacey Marshall) + +* doc: consistent single backquote in misc.rst (Jason Zhang) + +* src: switch to use C11 atomics where available (Trevor Norris) + +* test: don't use static buffer for formatting (Ben Noordhuis) + +* linux: introduce io_uring support (Ben Noordhuis) + +* linux: fix academic valgrind warning (Ben Noordhuis) + +* test: disable signal test under ASan and MSan (Ben Noordhuis) + +* linux: add IORING_OP_OPENAT support (Ben Noordhuis) + +* linux: add IORING_OP_CLOSE support (Ben Noordhuis) + +* linux: remove bug workaround for obsolete kernels (Ben Noordhuis) + +* doc: update active maintainers list (Ben Noordhuis) + +* test: add ASSERT_OK (Trevor Norris) + +* src: fix events/events_waiting metrics counter (Trevor Norris) + +* unix,win: add uv_clock_gettime() (Ben Noordhuis) + +* build: remove freebsd and openbsd buildbots (Ben Noordhuis) + +* win: fix race condition in uv__init_console() (sivadeilra) + +* linux: fix logic bug in sqe ring space check (Ben Noordhuis) + +* linux: use io_uring to batch epoll_ctl calls (Ben Noordhuis) + +* macos: update minimum supported version (Santiago Gimeno) + +* docs: fix some typos (cui fliter) + +* unix: use memcpy() instead of type punning (Ben Noordhuis) + +* test: add additional assert (Mohammed Keyvanzadeh) + +* build: export compile_commands.json (Lewis Russell) + +* win,process: write minidumps when sending SIGQUIT (Elliot Saba) + +* unix: constrained_memory should return UINT64_MAX (Tim Besard) + +* unix: handle CQ overflow in iou ring (Santiago Gimeno) + +* unix: remove clang compiler warning pragmas (Ben Noordhuis) + +* win: fix mingw build (gengjiawen) + +* test: fix -Wbool-compare compiler warning (Ben Noordhuis) + +* win: define MiniDumpWithAvxXStateContext always (Santiago Gimeno) + +* freebsd: hard-code UV_ENODATA definition (Santiago Gimeno) + +* linux: work around EOWNERDEAD io_uring kernel bug (Ben Noordhuis) + +* linux: fix WRITEV with lots of bufs using io_uring (Santiago Gimeno) + + +2022.07.12, Version 1.44.2 (Stable), 0c1fa696aa502eb749c2c4735005f41ba00a27b8 Changes since version 1.44.1: diff --git a/deps/uv/LICENSE b/deps/uv/LICENSE index eb126dab3aab52..6566365d4f2380 100644 --- a/deps/uv/LICENSE +++ b/deps/uv/LICENSE @@ -1,6 +1,3 @@ -libuv is licensed for use as follows: - -==== Copyright (c) 2015-present libuv project contributors. Permission is hereby granted, free of charge, to any person obtaining a copy @@ -20,47 +17,3 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -==== - -This license applies to parts of libuv originating from the -https://github.com/joyent/libuv repository: - -==== - -Copyright Joyent, Inc. and other Node contributors. All rights reserved. -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to -deal in the Software without restriction, including without limitation the -rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -IN THE SOFTWARE. - -==== - -This license applies to all parts of libuv that are not externally -maintained libraries. - -The externally maintained libraries used by libuv are: - - - tree.h (from FreeBSD), copyright Niels Provos. Two clause BSD license. - - - inet_pton and inet_ntop implementations, contained in src/inet.c, are - copyright the Internet Systems Consortium, Inc., and licensed under the ISC - license. - - - stdint-msvc2008.h (from msinttypes), copyright Alexander Chemeris. Three - clause BSD license. - - - pthread-fixes.c, copyright Google Inc. and Sony Mobile Communications AB. - Three clause BSD license. diff --git a/deps/uv/LICENSE-extra b/deps/uv/LICENSE-extra new file mode 100644 index 00000000000000..7d8ee65fce626e --- /dev/null +++ b/deps/uv/LICENSE-extra @@ -0,0 +1,36 @@ +This license applies to parts of libuv originating from the +https://github.com/joyent/libuv repository: + +==== + +Copyright Joyent, Inc. and other Node contributors. All rights reserved. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to +deal in the Software without restriction, including without limitation the +rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +IN THE SOFTWARE. + +==== + +This license applies to all parts of libuv that are not externally +maintained libraries. + +The externally maintained libraries used by libuv are: + + - tree.h (from FreeBSD), copyright Niels Provos. Two clause BSD license. + + - inet_pton and inet_ntop implementations, contained in src/inet.c, are + copyright the Internet Systems Consortium, Inc., and licensed under the ISC + license. diff --git a/deps/uv/LINKS.md b/deps/uv/LINKS.md index b8204e56e160d2..c2d79f8bb7ad3d 100644 --- a/deps/uv/LINKS.md +++ b/deps/uv/LINKS.md @@ -1,8 +1,11 @@ ### Apps / VM +* [AliceO2](https://github.com/AliceO2Group/AliceO2): The framework and detector specific code for the reconstruction, calibration and simulation for the ALICE experiment at CERN. +* [Beam](https://github.com/BeamMW/beam): A scalable, confidential cryptocurrency based on the Mimblewimble protocol. * [BIND 9](https://bind.isc.org/): DNS software system including an authoritative server, a recursive resolver and related utilities. * [cjdns](https://github.com/cjdelisle/cjdns): Encrypted self-configuring network/VPN routing engine * [clearskies_core](https://github.com/larroy/clearskies_core): Clearskies file synchronization program. (C++11) * [CMake](https://cmake.org) open-source, cross-platform family of tools designed to build, test and package software +* [Cocos-Engine](https://github.com/cocos/cocos-engine): The runtime framework for Cocos Creator editor. * [Coherence](https://github.com/liesware/coherence/): Cryptographic server for modern web apps. * [DPS-For-IoT](https://github.com/intel/dps-for-iot/wiki): Fully distributed publish/subscribe protocol. * [HashLink](https://github.com/HaxeFoundation/hashlink): Haxe run-time with libuv support included. @@ -10,7 +13,7 @@ * [H2O](https://github.com/h2o/h2o): An optimized HTTP server with support for HTTP/1.x and HTTP/2. * [Igropyr](https://github.com/guenchi/Igropyr): a async Scheme http server base on libuv. * [Julia](http://julialang.org/): Scientific computing programming language -* [Kestrel](https://github.com/aspnet/AspNetCore/tree/master/src/Servers/Kestrel): web server (C# + libuv + [ASP.NET Core](http://github.com/aspnet)) +* [Kestrel](https://github.com/dotnet/aspnetcore/tree/main/src/Servers/Kestrel): web server (C# + libuv + [ASP.NET Core](http://github.com/aspnet)) * [Knot DNS Resolver](https://www.knot-resolver.cz/): A minimalistic DNS caching resolver * [Lever](http://leverlanguage.com): runtime, libuv at the 0.9.0 release * [libnode](https://github.com/plenluno/libnode): C++ implementation of Node.js @@ -30,8 +33,10 @@ * [phastlight](https://github.com/phastlight/phastlight): Command line tool and web server written in PHP 5.3+ inspired by Node.js * [pilight](https://www.pilight.org/): home automation ("domotica") * [pixie](https://github.com/pixie-lang/pixie): clojure-inspired lisp with a tracing JIT +* [Pixie-io](https://github.com/pixie-io/pixie): Open-source observability tool for Kubernetes applications. * [potion](https://github.com/perl11/potion)/[p2](https://github.com/perl11/p2): runtime * [racer](https://libraries.io/rubygems/racer): Ruby web server written as an C extension +* [Socket Runtime](https://sockets.sh): A runtime for creating native cross-platform software on mobile and desktop using HTML, CSS, and JavaScript * [spider-gazelle](https://github.com/cotag/spider-gazelle): Ruby web server using libuv bindings * [Suave](http://suave.io/): A simple web development F# library providing a lightweight web server and a set of combinators to manipulate route flow and task composition * [Swish](https://github.com/becls/swish/): Concurrency engine with Erlang-like concepts. Includes a web server. @@ -39,6 +44,7 @@ * [Urbit](http://urbit.org): runtime * [uv_callback](https://github.com/litesync/uv_callback) libuv thread communication * [uvloop](https://github.com/MagicStack/uvloop): Ultra fast implementation of python's asyncio event loop on top of libuv +* [WPILib](https://github.com/wpilibsuite/allwpilib): Libraries for creating robot programs for the roboRIO. * [Wren CLI](https://github.com/wren-lang/wren-cli): For io, process, scheduler and timer modules ### Other @@ -59,6 +65,7 @@ * [lluv](https://github.com/moteus/lua-lluv) * C++11 * [uvpp](https://github.com/larroy/uvpp) - Not complete, exposes very few aspects of `libuv` + * [nsuv](https://github.com/nodesource/nsuv) - Template wrapper focused on enforcing compile-time type safety when propagating data * C++17 * [uvw](https://github.com/skypjack/uvw) - Header-only, event based, tiny and easy to use *libuv* wrapper in modern C++. * Python diff --git a/deps/uv/MAINTAINERS.md b/deps/uv/MAINTAINERS.md index 477901f8680f9a..ff8be88b7b7cd5 100644 --- a/deps/uv/MAINTAINERS.md +++ b/deps/uv/MAINTAINERS.md @@ -4,12 +4,9 @@ libuv is currently managed by the following individuals: * **Ben Noordhuis** ([@bnoordhuis](https://github.com/bnoordhuis)) - GPG key: D77B 1E34 243F BAF0 5F8E 9CC3 4F55 C8C8 46AB 89B9 (pubkey-bnoordhuis) -* **Bert Belder** ([@piscisaureus](https://github.com/piscisaureus)) * **Colin Ihrig** ([@cjihrig](https://github.com/cjihrig)) - GPG key: 94AE 3667 5C46 4D64 BAFA 68DD 7434 390B DBE9 B9C5 (pubkey-cjihrig) - GPG key: 5735 3E0D BDAA A7E8 39B6 6A1A FF47 D5E4 AD8B 4FDC (pubkey-cjihrig-kb) -* **Fedor Indutny** ([@indutny](https://github.com/indutny)) - - GPG key: AF2E EA41 EC34 47BF DD86 FED9 D706 3CCE 19B7 E890 (pubkey-indutny) * **Jameson Nash** ([@vtjnash](https://github.com/vtjnash)) - GPG key: AEAD 0A4B 6867 6775 1A0E 4AEF 34A2 5FB1 2824 6514 (pubkey-vtjnash) - GPG key: CFBB 9CA9 A5BE AFD7 0E2B 3C5A 79A6 7C55 A367 9C8B (pubkey2022-vtjnash) @@ -22,11 +19,16 @@ libuv is currently managed by the following individuals: - GPG key: 612F 0EAD 9401 6223 79DF 4402 F28C 3C8D A33C 03BE (pubkey-santigimeno) * **Saúl Ibarra Corretgé** ([@saghul](https://github.com/saghul)) - GPG key: FDF5 1936 4458 319F A823 3DC9 410E 5553 AE9B C059 (pubkey-saghul) +* **Trevor Norris** ([@trevnorris](https://github.com/trevnorris)) + - GPG key: AEFC 279A 0C93 0676 7E58 29A1 251C A676 820D C7F3 (pubkey-trevnorris) ## Project Maintainers emeriti * **Anna Henningsen** ([@addaleax](https://github.com/addaleax)) * **Bartosz Sosnowski** ([@bzoz](https://github.com/bzoz)) +* **Bert Belder** ([@piscisaureus](https://github.com/piscisaureus)) +* **Fedor Indutny** ([@indutny](https://github.com/indutny)) + - GPG key: AF2E EA41 EC34 47BF DD86 FED9 D706 3CCE 19B7 E890 (pubkey-indutny) * **Imran Iqbal** ([@imran-iq](https://github.com/imran-iq)) * **John Barboza** ([@jbarz](https://github.com/jbarz)) diff --git a/deps/uv/Makefile.am b/deps/uv/Makefile.am index 0c6d96598aed47..1dca3dd1f8a82e 100644 --- a/deps/uv/Makefile.am +++ b/deps/uv/Makefile.am @@ -38,6 +38,7 @@ libuv_la_SOURCES = src/fs-poll.c \ src/random.c \ src/strscpy.c \ src/strscpy.h \ + src/thread-common.c \ src/threadpool.c \ src/timer.c \ src/uv-data-getter-setters.c \ @@ -96,7 +97,6 @@ else # WINNT uvinclude_HEADERS += include/uv/unix.h AM_CPPFLAGS += -I$(top_srcdir)/src/unix libuv_la_SOURCES += src/unix/async.c \ - src/unix/atomic-ops.h \ src/unix/core.c \ src/unix/dl.c \ src/unix/fs.c \ @@ -110,7 +110,6 @@ libuv_la_SOURCES += src/unix/async.c \ src/unix/process.c \ src/unix/random-devurandom.c \ src/unix/signal.c \ - src/unix/spinlock.h \ src/unix/stream.c \ src/unix/tcp.c \ src/unix/thread.c \ @@ -122,11 +121,13 @@ endif # WINNT EXTRA_DIST = test/fixtures/empty_file \ test/fixtures/load_error.node \ test/fixtures/lorem_ipsum.txt \ + test/fixtures/one_file/one_file \ include \ docs \ img \ CONTRIBUTING.md \ LICENSE \ + LICENSE-extra \ README.md @@ -278,11 +279,13 @@ test_run_tests_SOURCES = test/blackhole-server.c \ test/test-tcp-writealot.c \ test/test-tcp-write-fail.c \ test/test-tcp-try-write.c \ + test/test-tcp-write-in-a-row.c \ test/test-tcp-try-write-error.c \ test/test-tcp-write-queue-order.c \ test/test-test-macros.c \ test/test-thread-equal.c \ test/test-thread.c \ + test/test-thread-affinity.c \ test/test-threadpool-cancel.c \ test/test-threadpool.c \ test/test-timer-again.c \ @@ -313,6 +316,7 @@ test_run_tests_SOURCES = test/blackhole-server.c \ test/test-udp-sendmmsg-error.c \ test/test-udp-send-unreachable.c \ test/test-udp-try-send.c \ + test/test-udp-recv-in-a-row.c \ test/test-uname.c \ test/test-walk-handles.c \ test/test-watcher-cross-stop.c @@ -393,7 +397,6 @@ endif if ANDROID libuv_la_CFLAGS += -D_GNU_SOURCE -libuv_la_SOURCES += src/unix/pthread-fixes.c endif if CYGWIN @@ -467,22 +470,14 @@ libuv_la_SOURCES += src/unix/bsd-ifaddrs.c \ src/unix/hurd.c endif -if KFREEBSD -libuv_la_CFLAGS += -D_GNU_SOURCE -endif - if LINUX uvinclude_HEADERS += include/uv/linux.h libuv_la_CFLAGS += -D_GNU_SOURCE -libuv_la_SOURCES += src/unix/linux-core.c \ - src/unix/linux-inotify.c \ - src/unix/linux-syscalls.c \ - src/unix/linux-syscalls.h \ +libuv_la_SOURCES += src/unix/linux.c \ src/unix/procfs-exepath.c \ src/unix/proctitle.c \ src/unix/random-getrandom.c \ - src/unix/random-sysctl-linux.c \ - src/unix/epoll.c + src/unix/random-sysctl-linux.c test_run_tests_LDFLAGS += -lutil endif @@ -546,8 +541,7 @@ libuv_la_CFLAGS += -D_UNIX03_THREADS \ -qXPLINK \ -qFLOAT=IEEE libuv_la_LDFLAGS += -qXPLINK -libuv_la_SOURCES += src/unix/pthread-fixes.c \ - src/unix/os390.c \ +libuv_la_SOURCES += src/unix/os390.c \ src/unix/os390-syscalls.c \ src/unix/proctitle.c endif diff --git a/deps/uv/README.md b/deps/uv/README.md index 06486febc28199..09e9bf10b6dc31 100644 --- a/deps/uv/README.md +++ b/deps/uv/README.md @@ -43,8 +43,11 @@ The ABI/API changes can be tracked [here](http://abi-laboratory.pro/tracker/time ## Licensing -libuv is licensed under the MIT license. Check the [LICENSE file](LICENSE). -The documentation is licensed under the CC BY 4.0 license. Check the [LICENSE-docs file](LICENSE-docs). +libuv is licensed under the MIT license. Check the [LICENSE](LICENSE) and +[LICENSE-extra](LICENSE-extra) files. + +The documentation is licensed under the CC BY 4.0 license. Check the +[LICENSE-docs file](LICENSE-docs). ## Community @@ -220,6 +223,15 @@ Make sure that you specify the architecture you wish to build for in the "ARCHS" flag. You can specify more than one by delimiting with a space (e.g. "x86_64 i386"). +### Install with vcpkg + +```bash +$ git clone https://github.com/microsoft/vcpkg.git +$ ./bootstrap-vcpkg.bat # for powershell +$ ./bootstrap-vcpkg.sh # for bash +$ ./vcpkg install libuv +``` + ### Running tests Some tests are timing sensitive. Relaxing test timeouts may be necessary diff --git a/deps/uv/SUPPORTED_PLATFORMS.md b/deps/uv/SUPPORTED_PLATFORMS.md index 0c1dd4e29fa8e0..8a435d2592e47f 100644 --- a/deps/uv/SUPPORTED_PLATFORMS.md +++ b/deps/uv/SUPPORTED_PLATFORMS.md @@ -2,10 +2,10 @@ | System | Support type | Supported versions | Notes | |---|---|---|---| -| GNU/Linux | Tier 1 | Linux >= 2.6.32 with glibc >= 2.12 | | -| macOS | Tier 1 | macOS >= 10.15 | Current and previous macOS release | +| GNU/Linux | Tier 1 | Linux >= 3.10 with glibc >= 2.17 | | +| macOS | Tier 1 | macOS >= 11 | Currently supported macOS releases | | Windows | Tier 1 | >= Windows 8 | VS 2015 and later are supported | -| FreeBSD | Tier 1 | >= 10 | | +| FreeBSD | Tier 2 | >= 12 | | | AIX | Tier 2 | >= 6 | Maintainers: @libuv/aix | | IBM i | Tier 2 | >= IBM i 7.2 | Maintainers: @libuv/ibmi | | z/OS | Tier 2 | >= V2R2 | Maintainers: @libuv/zos | diff --git a/deps/uv/autogen.sh b/deps/uv/autogen.sh index bfd8f3e6df0867..cf82cc634d4f95 100755 --- a/deps/uv/autogen.sh +++ b/deps/uv/autogen.sh @@ -17,7 +17,7 @@ set -eu cd `dirname "$0"` -if [ "${1:-dev}" == "release" ]; then +if [ "${1:-dev}" = "release" ]; then export LIBUV_RELEASE=true else export LIBUV_RELEASE=false diff --git a/deps/uv/cmake-toolchains/cross-mingw32.cmake b/deps/uv/cmake-toolchains/cross-mingw32.cmake new file mode 100644 index 00000000000000..3fe1dd69ec565d --- /dev/null +++ b/deps/uv/cmake-toolchains/cross-mingw32.cmake @@ -0,0 +1,17 @@ +if(NOT HOST_ARCH) + message(SEND_ERROR "-DHOST_ARCH required to be specified") +endif() + +list(APPEND CMAKE_TRY_COMPILE_PLATFORM_VARIABLES + HOST_ARCH + ) + +SET(CMAKE_SYSTEM_NAME Windows) +set(COMPILER_PREFIX "${HOST_ARCH}-w64-mingw32") +find_program(CMAKE_RC_COMPILER NAMES ${COMPILER_PREFIX}-windres) +find_program(CMAKE_C_COMPILER NAMES ${COMPILER_PREFIX}-gcc) +find_program(CMAKE_CXX_COMPILER NAMES ${COMPILER_PREFIX}-g++) + +set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) +set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) +set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) diff --git a/deps/uv/configure.ac b/deps/uv/configure.ac index 82d1640c8e3afd..143ade35719316 100644 --- a/deps/uv/configure.ac +++ b/deps/uv/configure.ac @@ -13,7 +13,7 @@ # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. AC_PREREQ(2.57) -AC_INIT([libuv], [1.44.2], [https://github.com/libuv/libuv/issues]) +AC_INIT([libuv], [1.45.0], [https://github.com/libuv/libuv/issues]) AC_CONFIG_MACRO_DIR([m4]) m4_include([m4/libuv-extra-automake-flags.m4]) m4_include([m4/as_case.m4]) @@ -61,8 +61,7 @@ AM_CONDITIONAL([ANDROID], [AS_CASE([$host_os],[linux-android*],[true], [false]) AM_CONDITIONAL([CYGWIN], [AS_CASE([$host_os],[cygwin*], [true], [false])]) AM_CONDITIONAL([DARWIN], [AS_CASE([$host_os],[darwin*], [true], [false])]) AM_CONDITIONAL([DRAGONFLY],[AS_CASE([$host_os],[dragonfly*], [true], [false])]) -AM_CONDITIONAL([FREEBSD], [AS_CASE([$host_os],[*freebsd*], [true], [false])]) -AM_CONDITIONAL([KFREEBSD], [AS_CASE([$host_os],[kfreebsd*], [true], [false])]) +AM_CONDITIONAL([FREEBSD], [AS_CASE([$host_os],[freebsd*], [true], [false])]) AM_CONDITIONAL([HAIKU], [AS_CASE([$host_os],[haiku], [true], [false])]) AM_CONDITIONAL([HURD], [AS_CASE([$host_os],[gnu*], [true], [false])]) AM_CONDITIONAL([LINUX], [AS_CASE([$host_os],[linux*], [true], [false])]) @@ -74,12 +73,12 @@ AM_CONDITIONAL([OS400], [AS_CASE([$host_os],[os400], [true], [false]) AM_CONDITIONAL([SUNOS], [AS_CASE([$host_os],[solaris*], [true], [false])]) AM_CONDITIONAL([WINNT], [AS_CASE([$host_os],[mingw*], [true], [false])]) AS_CASE([$host_os],[mingw*], [ - LIBS="$LIBS -lws2_32 -lpsapi -liphlpapi -lshell32 -luserenv -luser32" + LIBS="$LIBS -lws2_32 -lpsapi -liphlpapi -lshell32 -luserenv -luser32 -ldbghelp -lole32 -luuid" ]) -AS_CASE([$host_os], [netbsd*], [AC_CHECK_LIB([kvm], [kvm_open])]) -AS_CASE([$host_os], [kfreebsd*], [ - LIBS="$LIBS -lfreebsd-glue" +AS_CASE([$host_os], [solaris2.10], [ + CFLAGS="$CFLAGS -DSUNOS_NO_IFADDRS" ]) +AS_CASE([$host_os], [netbsd*], [AC_CHECK_LIB([kvm], [kvm_open])]) AS_CASE([$host_os], [haiku], [ LIBS="$LIBS -lnetwork" ]) @@ -88,4 +87,5 @@ AC_CONFIG_FILES([Makefile libuv.pc]) AC_CONFIG_LINKS([test/fixtures/empty_file:test/fixtures/empty_file]) AC_CONFIG_LINKS([test/fixtures/load_error.node:test/fixtures/load_error.node]) AC_CONFIG_LINKS([test/fixtures/lorem_ipsum.txt:test/fixtures/lorem_ipsum.txt]) +AC_CONFIG_LINKS([test/fixtures/one_file/one_file:test/fixtures/one_file/one_file]) AC_OUTPUT diff --git a/deps/uv/docs/requirements.txt b/deps/uv/docs/requirements.txt index 8386e0178fabac..b037de46595389 100644 --- a/deps/uv/docs/requirements.txt +++ b/deps/uv/docs/requirements.txt @@ -1,42 +1,27 @@ # primary -Sphinx==3.5.4 +sphinx==6.1.3 # dependencies -alabaster==0.7.12 -appdirs==1.4.3 -Babel==2.9.0 -CacheControl==0.12.6 -certifi==2019.11.28 -chardet==3.0.4 -colorama==0.4.3 -contextlib2==0.6.0 -distlib==0.3.0 -distro==1.4.0 -docutils==0.16 -html5lib==1.0.1 -idna==2.8 -imagesize==1.2.0 -ipaddr==2.2.0 -Jinja2==2.11.3 -lockfile==0.12.2 -MarkupSafe==1.1.1 -msgpack==0.6.2 -packaging==20.3 -pep517==0.8.2 -progress==1.5 -Pygments==2.8.1 -pyparsing==2.4.6 -pytoml==0.1.21 -pytz==2021.1 -requests==2.22.0 -retrying==1.3.3 -six==1.14.0 -snowballstemmer==2.1.0 -sphinxcontrib-applehelp==1.0.2 +alabaster==0.7.13 +Babel==2.11.0 +certifi==2022.12.7 +charset-normalizer==3.0.1 +docutils==0.19 +idna==3.4 +imagesize==1.4.1 +importlib-metadata==6.0.0 +Jinja2==3.1.2 +MarkupSafe==2.1.2 +packaging==23.0 +Pygments==2.14.0 +pytz==2022.7.1 +requests==2.28.2 +snowballstemmer==2.2.0 +sphinxcontrib-applehelp==1.0.3 sphinxcontrib-devhelp==1.0.2 -sphinxcontrib-htmlhelp==1.0.3 +sphinxcontrib-htmlhelp==2.0.0 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.3 -sphinxcontrib-serializinghtml==1.1.4 -urllib3==1.25.8 -webencodings==0.5.1 +sphinxcontrib-serializinghtml==1.1.5 +urllib3==1.26.14 +zipp==3.11.0 diff --git a/deps/uv/docs/src/design.rst b/deps/uv/docs/src/design.rst index 0f5580c7e95211..5a20595c3b42ce 100644 --- a/deps/uv/docs/src/design.rst +++ b/deps/uv/docs/src/design.rst @@ -60,16 +60,15 @@ stages of a loop iteration: :align: center -#. The loop concept of 'now' is updated. The event loop caches the current time at the start of - the event loop tick in order to reduce the number of time-related system calls. +#. The loop concept of 'now' is initially set. + +#. Due timers are run if the loop was run with ``UV_RUN_DEFAULT``. All active timers scheduled + for a time before the loop's concept of *now* get their callbacks called. #. If the loop is *alive* an iteration is started, otherwise the loop will exit immediately. So, when is a loop considered to be *alive*? If a loop has active and ref'd handles, active requests or closing handles it's considered to be *alive*. -#. Due timers are run. All active timers scheduled for a time before the loop's concept of *now* - get their callbacks called. - #. Pending callbacks are called. All I/O callbacks are called right after polling for I/O, for the most part. There are cases, however, in which calling such a callback is deferred for the next loop iteration. If the previous iteration deferred any I/O callback it will be run at this point. @@ -101,9 +100,11 @@ stages of a loop iteration: #. Close callbacks are called. If a handle was closed by calling :c:func:`uv_close` it will get the close callback called. -#. Special case in case the loop was run with ``UV_RUN_ONCE``, as it implies forward progress. - It's possible that no I/O callbacks were fired after blocking for I/O, but some time has passed - so there might be timers which are due, those timers get their callbacks called. +#. The loop concept of 'now' is updated. + +#. Due timers are run. Note that 'now' is not updated again until the next loop iteration. + So if a timer became due while other timers were being processed, it won't be run until + the following event loop iteration. #. Iteration ends. If the loop was run with ``UV_RUN_NOWAIT`` or ``UV_RUN_ONCE`` modes the iteration ends and :c:func:`uv_run` will return. If the loop was run with ``UV_RUN_DEFAULT`` diff --git a/deps/uv/docs/src/fs.rst b/deps/uv/docs/src/fs.rst index 0bf2abed5e128a..891ee74c19d912 100644 --- a/deps/uv/docs/src/fs.rst +++ b/deps/uv/docs/src/fs.rst @@ -12,6 +12,12 @@ otherwise it will be performed asynchronously. All file operations are run on the threadpool. See :ref:`threadpool` for information on the threadpool size. +Starting with libuv v1.45.0, some file operations on Linux are handed off to +`io_uring ` when possible. Apart from +a (sometimes significant) increase in throughput there should be no change in +observable behavior. Libuv reverts to using its threadpool when the necessary +kernel features are unavailable or unsuitable. + .. note:: On Windows `uv_fs_*` functions use utf-8 encoding. @@ -24,7 +30,8 @@ Data types .. c:type:: uv_timespec_t - Portable equivalent of ``struct timespec``. + Y2K38-unsafe data type for storing times with nanosecond resolution. + Will be replaced with :c:type:`uv_timespec64_t` in libuv v2.0. :: @@ -160,6 +167,10 @@ Data types size_t nentries; } uv_dir_t; +.. c:type:: void (*uv_fs_cb)(uv_fs_t* req) + + Callback called when a request is completed asynchronously. + Public members ^^^^^^^^^^^^^^ @@ -218,7 +229,8 @@ API .. c:function:: int uv_fs_read(uv_loop_t* loop, uv_fs_t* req, uv_file file, const uv_buf_t bufs[], unsigned int nbufs, int64_t offset, uv_fs_cb cb) - Equivalent to :man:`preadv(2)`. + Equivalent to :man:`preadv(2)`. If the `offset` argument is `-1`, then + the current file offset is used and updated. .. warning:: On Windows, under non-MSVC environments (e.g. when GCC or Clang is used @@ -231,7 +243,8 @@ API .. c:function:: int uv_fs_write(uv_loop_t* loop, uv_fs_t* req, uv_file file, const uv_buf_t bufs[], unsigned int nbufs, int64_t offset, uv_fs_cb cb) - Equivalent to :man:`pwritev(2)`. + Equivalent to :man:`pwritev(2)`. If the `offset` argument is `-1`, then + the current file offset is used and updated. .. warning:: On Windows, under non-MSVC environments (e.g. when GCC or Clang is used @@ -463,10 +476,6 @@ API The background story and some more details on these issues can be checked `here `_. - .. note:: - This function is not implemented on Windows XP and Windows Server 2003. - On these systems, UV_ENOSYS is returned. - .. versionadded:: 1.8.0 .. c:function:: int uv_fs_chown(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_uid_t uid, uv_gid_t gid, uv_fs_cb cb) diff --git a/deps/uv/docs/src/handle.rst b/deps/uv/docs/src/handle.rst index 0edb7d7adf23ed..e91d6e8fb2d906 100644 --- a/deps/uv/docs/src/handle.rst +++ b/deps/uv/docs/src/handle.rst @@ -153,6 +153,9 @@ API In-progress requests, like uv_connect_t or uv_write_t, are cancelled and have their callbacks called asynchronously with status=UV_ECANCELED. + `close_cb` can be `NULL` in cases where no cleanup or deallocation is + necessary. + .. c:function:: void uv_ref(uv_handle_t* handle) Reference the given handle. References are idempotent, that is, if a handle diff --git a/deps/uv/docs/src/metrics.rst b/deps/uv/docs/src/metrics.rst index 696c620d192f36..0141d03286b199 100644 --- a/deps/uv/docs/src/metrics.rst +++ b/deps/uv/docs/src/metrics.rst @@ -4,8 +4,46 @@ Metrics operations ====================== -libuv provides a metrics API to track the amount of time the event loop has -spent idle in the kernel's event provider. +libuv provides a metrics API to track various internal operations of the event +loop. + + +Data types +---------- + +.. c:type:: uv_metrics_t + + The struct that contains event loop metrics. It is recommended to retrieve + these metrics in a :c:type:`uv_prepare_cb` in order to make sure there are + no inconsistencies with the metrics counters. + + :: + + typedef struct { + uint64_t loop_count; + uint64_t events; + uint64_t events_waiting; + /* private */ + uint64_t* reserved[13]; + } uv_metrics_t; + + +Public members +^^^^^^^^^^^^^^ + +.. c:member:: uint64_t uv_metrics_t.loop_count + + Number of event loop iterations. + +.. c:member:: uint64_t uv_metrics_t.events + + Number of events that have been processed by the event handler. + +.. c:member:: uint64_t uv_metrics_t.events_waiting + + Number of events that were waiting to be processed when the event provider + was called. + API --- @@ -25,3 +63,9 @@ API :c:type:`UV_METRICS_IDLE_TIME`. .. versionadded:: 1.39.0 + +.. c:function:: int uv_metrics_info(uv_loop_t* loop, uv_metrics_t* metrics) + + Copy the current set of event loop metrics to the ``metrics`` pointer. + + .. versionadded:: 1.45.0 diff --git a/deps/uv/docs/src/misc.rst b/deps/uv/docs/src/misc.rst index bae44814f193ed..8c3a00e934c97e 100644 --- a/deps/uv/docs/src/misc.rst +++ b/deps/uv/docs/src/misc.rst @@ -73,7 +73,8 @@ Data types .. c:type:: uv_timeval_t - Data type for storing times. + Y2K38-unsafe data type for storing times with microsecond resolution. + Will be replaced with :c:type:`uv_timeval64_t` in libuv v2.0. :: @@ -84,7 +85,7 @@ Data types .. c:type:: uv_timeval64_t - Alternative data type for storing times. + Y2K38-safe data type for storing times with microsecond resolution. :: @@ -93,6 +94,28 @@ Data types int32_t tv_usec; } uv_timeval64_t; +.. c:type:: uv_timespec64_t + + Y2K38-safe data type for storing times with nanosecond resolution. + + :: + + typedef struct { + int64_t tv_sec; + int32_t tv_nsec; + } uv_timespec64_t; + +.. c:enum:: uv_clock_id + + Clock source for :c:func:`uv_clock_gettime`. + + :: + + typedef enum { + UV_CLOCK_MONOTONIC, + UV_CLOCK_REALTIME + } uv_clock_id; + .. c:type:: uv_rusage_t Data type for resource usage results. @@ -119,7 +142,10 @@ Data types } uv_rusage_t; Members marked with `(X)` are unsupported on Windows. - See :man:`getrusage(2)` for supported fields on Unix + See :man:`getrusage(2)` for supported fields on UNIX-like platforms. + + The maximum resident set size is reported in kilobytes, the unit most + platforms use natively. .. c:type:: uv_cpu_info_t @@ -211,7 +237,7 @@ API type of the stdio streams. For :man:`isatty(3)` equivalent functionality use this function and test - for ``UV_TTY``. + for `UV_TTY`. .. c:function:: int uv_replace_allocator(uv_malloc_func malloc_func, uv_realloc_func realloc_func, uv_calloc_func calloc_func, uv_free_func free_func) @@ -225,8 +251,8 @@ API after all resources have been freed and thus libuv doesn't reference any allocated memory chunk. - On success, it returns 0, if any of the function pointers is NULL it - returns UV_EINVAL. + On success, it returns 0, if any of the function pointers is `NULL` it + returns `UV_EINVAL`. .. warning:: There is no protection against changing the allocator multiple times. If the user changes it they are responsible for making @@ -362,6 +388,13 @@ API Frees the `cpu_infos` array previously allocated with :c:func:`uv_cpu_info`. +.. c:function:: int uv_cpumask_size(void) + + Returns the maximum size of the mask used for process/thread affinities, + or `UV_ENOTSUP` if affinities are not supported on the current platform. + + .. versionadded:: 1.45.0 + .. c:function:: int uv_interface_addresses(uv_interface_address_t** addresses, int* count) Gets address information about the network interfaces on the system. An @@ -541,18 +574,21 @@ API .. c:function:: uint64_t uv_get_free_memory(void) - Gets the amount of free memory available in the system, as reported by the kernel (in bytes). + Gets the amount of free memory available in the system, as reported by + the kernel (in bytes). Returns 0 when unknown. .. c:function:: uint64_t uv_get_total_memory(void) Gets the total amount of physical memory in the system (in bytes). + Returns 0 when unknown. .. c:function:: uint64_t uv_get_constrained_memory(void) - Gets the amount of memory available to the process (in bytes) based on + Gets the total amount of memory available to the process (in bytes) based on limits imposed by the OS. If there is no such constraint, or the constraint - is unknown, `0` is returned. Note that it is not unusual for this value to - be less than or greater than :c:func:`uv_get_total_memory`. + is unknown, `0` is returned. If there is a constraining mechanism, but there + is no constraint set, `UINT64_MAX` is returned. Note that it is not unusual + for this value to be less than or greater than :c:func:`uv_get_total_memory`. .. note:: This function currently only returns a non-zero value on Linux, based @@ -560,9 +596,23 @@ API .. versionadded:: 1.29.0 +.. c:function:: uint64_t uv_get_available_memory(void) + + Gets the amount of free memory that is still available to the process (in bytes). + This differs from :c:func:`uv_get_free_memory` in that it takes into account any + limits imposed by the OS. If there is no such constraint, or the constraint + is unknown, the amount returned will be identical to :c:func:`uv_get_free_memory`. + + .. note:: + This function currently only returns a value that is different from + what :c:func:`uv_get_free_memory` reports on Linux, based + on cgroups if it is present. + + .. versionadded:: 1.45.0 + .. c:function:: uint64_t uv_hrtime(void) - Returns the current high-resolution real time. This is expressed in + Returns the current high-resolution timestamp. This is expressed in nanoseconds. It is relative to an arbitrary time in the past. It is not related to the time of day and therefore not subject to clock drift. The primary use is for measuring performance between intervals. @@ -571,6 +621,19 @@ API Not every platform can support nanosecond resolution; however, this value will always be in nanoseconds. +.. c:function:: int uv_clock_gettime(uv_clock_id clock_id, uv_timespec64_t* ts) + + Obtain the current system time from a high-resolution real-time or monotonic + clock source. + + The real-time clock counts from the UNIX epoch (1970-01-01) and is subject + to time adjustments; it can jump back in time. + + The monotonic clock counts from an arbitrary point in the past and never + jumps back in time. + + .. versionadded:: 1.45.0 + .. c:function:: void uv_print_all_handles(uv_loop_t* loop, FILE* stream) Prints all handles associated with the given `loop` to the given `stream`. diff --git a/deps/uv/docs/src/poll.rst b/deps/uv/docs/src/poll.rst index 93a101ec686c53..f501089279d55e 100644 --- a/deps/uv/docs/src/poll.rst +++ b/deps/uv/docs/src/poll.rst @@ -101,7 +101,9 @@ API with one of the `UV_E*` error codes (see :ref:`errors`). The user should not close the socket while the handle is active. If the user does that anyway, the callback *may* be called reporting an error status, but this is - **not** guaranteed. + **not** guaranteed. If `status == UV_EBADF` polling is discontinued for the + file handle and no further events will be reported. The user should + then call :c:func:`uv_close` on the handle. .. note:: Calling :c:func:`uv_poll_start` on a handle that is already active is diff --git a/deps/uv/docs/src/static/loop_iteration.png b/deps/uv/docs/src/static/loop_iteration.png index e769cf338b4456..1545f84a8dcd1c 100644 Binary files a/deps/uv/docs/src/static/loop_iteration.png and b/deps/uv/docs/src/static/loop_iteration.png differ diff --git a/deps/uv/docs/src/threading.rst b/deps/uv/docs/src/threading.rst index 7ca1d4b7a58b96..d379677a2113bd 100644 --- a/deps/uv/docs/src/threading.rst +++ b/deps/uv/docs/src/threading.rst @@ -88,6 +88,46 @@ Threads .. versionadded:: 1.26.0 +.. c:function:: int uv_thread_setaffinity(uv_thread_t* tid, char* cpumask, char* oldmask, size_t mask_size) + + Sets the specified thread's affinity to cpumask, which is specified in + bytes. Optionally returning the previous affinity setting in oldmask. + On Unix, uses :man:`pthread_getaffinity_np(3)` to get the affinity setting + and maps the cpu_set_t to bytes in oldmask. Then maps the bytes in cpumask + to a cpu_set_t and uses :man:`pthread_setaffinity_np(3)`. On Windows, maps + the bytes in cpumask to a bitmask and uses SetThreadAffinityMask() which + returns the previous affinity setting. + + The mask_size specifies the number of entries (bytes) in cpumask / oldmask, + and must be greater-than-or-equal-to :c:func:`uv_cpumask_size`. + + .. note:: + Thread affinity setting is not atomic on Windows. Unsupported on macOS. + + .. versionadded:: 1.45.0 + +.. c:function:: int uv_thread_getaffinity(uv_thread_t* tid, char* cpumask, size_t mask_size) + + Gets the specified thread's affinity setting. On Unix, this maps the + cpu_set_t returned by :man:`pthread_getaffinity_np(3)` to bytes in cpumask. + + The mask_size specifies the number of entries (bytes) in cpumask, + and must be greater-than-or-equal-to :c:func:`uv_cpumask_size`. + + .. note:: + Thread affinity getting is not atomic on Windows. Unsupported on macOS. + + .. versionadded:: 1.45.0 + +.. c:function:: int uv_thread_getcpu(void) + + Gets the CPU number on which the calling thread is running. + + .. note:: + Currently only implemented on Windows, Linux and FreeBSD. + + .. versionadded:: 1.45.0 + .. c:function:: uv_thread_t uv_thread_self(void) .. c:function:: int uv_thread_join(uv_thread_t *tid) .. c:function:: int uv_thread_equal(const uv_thread_t* t1, const uv_thread_t* t2) diff --git a/deps/uv/docs/src/threadpool.rst b/deps/uv/docs/src/threadpool.rst index cf6cdc1be0f2c4..7cfa797314ca48 100644 --- a/deps/uv/docs/src/threadpool.rst +++ b/deps/uv/docs/src/threadpool.rst @@ -14,6 +14,9 @@ is 1024). .. versionchanged:: 1.30.0 the maximum UV_THREADPOOL_SIZE allowed was increased from 128 to 1024. +.. versionchanged:: 1.45.0 threads now have an 8 MB stack instead of the + (sometimes too low) platform default. + The threadpool is global and shared across all event loops. When a particular function makes use of the threadpool (i.e. when using :c:func:`uv_queue_work`) libuv preallocates and initializes the maximum number of threads allowed by diff --git a/deps/uv/docs/src/udp.rst b/deps/uv/docs/src/udp.rst index 009767d55dc74e..d7da95edd506e2 100644 --- a/deps/uv/docs/src/udp.rst +++ b/deps/uv/docs/src/udp.rst @@ -56,7 +56,7 @@ Data types /* * Indicates if IP_RECVERR/IPV6_RECVERR will be set when binding the handle. * This sets IP_RECVERR for IPv4 and IPV6_RECVERR for IPv6 UDP sockets on - * Linux. This stops the Linux kernel from supressing some ICMP error messages + * Linux. This stops the Linux kernel from suppressing some ICMP error messages * and enables full ICMP error reporting for faster failover. * This flag is no-op on platforms other than Linux. */ diff --git a/deps/uv/include/uv.h b/deps/uv/include/uv.h index ee1c94ccd38991..f3d70231ffa719 100644 --- a/deps/uv/include/uv.h +++ b/deps/uv/include/uv.h @@ -31,6 +31,7 @@ extern "C" { #error "Define either BUILDING_UV_SHARED or USING_UV_SHARED, not both." #endif +#ifndef UV_EXTERN #ifdef _WIN32 /* Windows - set up dll import/export decorators. */ # if defined(BUILDING_UV_SHARED) @@ -50,17 +51,13 @@ extern "C" { #else # define UV_EXTERN /* nothing */ #endif +#endif /* UV_EXTERN */ #include "uv/errno.h" #include "uv/version.h" #include #include - -#if defined(_MSC_VER) && _MSC_VER < 1600 -# include "uv/stdint-msvc2008.h" -#else -# include -#endif +#include #if defined(_WIN32) # include "uv/win.h" @@ -152,6 +149,7 @@ extern "C" { XX(EFTYPE, "inappropriate file type or format") \ XX(EILSEQ, "illegal byte sequence") \ XX(ESOCKTNOSUPPORT, "socket type not supported") \ + XX(ENODATA, "no data available") \ #define UV_HANDLE_TYPE_MAP(XX) \ XX(ASYNC, async) \ @@ -247,9 +245,12 @@ typedef struct uv_cpu_info_s uv_cpu_info_t; typedef struct uv_interface_address_s uv_interface_address_t; typedef struct uv_dirent_s uv_dirent_t; typedef struct uv_passwd_s uv_passwd_t; +typedef struct uv_group_s uv_group_t; typedef struct uv_utsname_s uv_utsname_t; typedef struct uv_statfs_s uv_statfs_t; +typedef struct uv_metrics_s uv_metrics_t; + typedef enum { UV_LOOP_BLOCK_SIGNAL = 0, UV_METRICS_IDLE_TIME @@ -344,11 +345,32 @@ typedef void (*uv_random_cb)(uv_random_t* req, void* buf, size_t buflen); +typedef enum { + UV_CLOCK_MONOTONIC, + UV_CLOCK_REALTIME +} uv_clock_id; + +/* XXX(bnoordhuis) not 2038-proof, https://github.com/libuv/libuv/issues/3864 */ typedef struct { long tv_sec; long tv_nsec; } uv_timespec_t; +typedef struct { + int64_t tv_sec; + int32_t tv_nsec; +} uv_timespec64_t; + +/* XXX(bnoordhuis) not 2038-proof, https://github.com/libuv/libuv/issues/3864 */ +typedef struct { + long tv_sec; + long tv_usec; +} uv_timeval_t; + +typedef struct { + int64_t tv_sec; + int32_t tv_usec; +} uv_timeval64_t; typedef struct { uint64_t st_dev; @@ -1139,6 +1161,12 @@ struct uv_passwd_s { char* homedir; }; +struct uv_group_s { + char* groupname; + unsigned long gid; + char** members; +}; + struct uv_utsname_s { char sysname[256]; char release[256]; @@ -1184,16 +1212,6 @@ UV_EXTERN int uv_uptime(double* uptime); UV_EXTERN uv_os_fd_t uv_get_osfhandle(int fd); UV_EXTERN int uv_open_osfhandle(uv_os_fd_t os_fd); -typedef struct { - long tv_sec; - long tv_usec; -} uv_timeval_t; - -typedef struct { - int64_t tv_sec; - int32_t tv_usec; -} uv_timeval64_t; - typedef struct { uv_timeval_t ru_utime; /* user CPU time used */ uv_timeval_t ru_stime; /* system CPU time used */ @@ -1219,6 +1237,9 @@ UV_EXTERN int uv_os_homedir(char* buffer, size_t* size); UV_EXTERN int uv_os_tmpdir(char* buffer, size_t* size); UV_EXTERN int uv_os_get_passwd(uv_passwd_t* pwd); UV_EXTERN void uv_os_free_passwd(uv_passwd_t* pwd); +UV_EXTERN int uv_os_get_passwd2(uv_passwd_t* pwd, uv_uid_t uid); +UV_EXTERN int uv_os_get_group(uv_group_t* grp, uv_uid_t gid); +UV_EXTERN void uv_os_free_group(uv_group_t* grp); UV_EXTERN uv_pid_t uv_os_getpid(void); UV_EXTERN uv_pid_t uv_os_getppid(void); @@ -1245,6 +1266,7 @@ UV_EXTERN int uv_os_setpriority(uv_pid_t pid, int priority); UV_EXTERN unsigned int uv_available_parallelism(void); UV_EXTERN int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count); UV_EXTERN void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count); +UV_EXTERN int uv_cpumask_size(void); UV_EXTERN int uv_interface_addresses(uv_interface_address_t** addresses, int* count); @@ -1277,6 +1299,15 @@ UV_EXTERN int uv_os_gethostname(char* buffer, size_t* size); UV_EXTERN int uv_os_uname(uv_utsname_t* buffer); +struct uv_metrics_s { + uint64_t loop_count; + uint64_t events; + uint64_t events_waiting; + /* private */ + uint64_t* reserved[13]; +}; + +UV_EXTERN int uv_metrics_info(uv_loop_t* loop, uv_metrics_t* metrics); UV_EXTERN uint64_t uv_metrics_idle_time(uv_loop_t* loop); typedef enum { @@ -1710,7 +1741,9 @@ UV_EXTERN int uv_chdir(const char* dir); UV_EXTERN uint64_t uv_get_free_memory(void); UV_EXTERN uint64_t uv_get_total_memory(void); UV_EXTERN uint64_t uv_get_constrained_memory(void); +UV_EXTERN uint64_t uv_get_available_memory(void); +UV_EXTERN int uv_clock_gettime(uv_clock_id clock_id, uv_timespec64_t* ts); UV_EXTERN uint64_t uv_hrtime(void); UV_EXTERN void uv_sleep(unsigned int msec); @@ -1787,6 +1820,14 @@ UV_EXTERN int uv_thread_create_ex(uv_thread_t* tid, const uv_thread_options_t* params, uv_thread_cb entry, void* arg); +UV_EXTERN int uv_thread_setaffinity(uv_thread_t* tid, + char* cpumask, + char* oldmask, + size_t mask_size); +UV_EXTERN int uv_thread_getaffinity(uv_thread_t* tid, + char* cpumask, + size_t mask_size); +UV_EXTERN int uv_thread_getcpu(void); UV_EXTERN uv_thread_t uv_thread_self(void); UV_EXTERN int uv_thread_join(uv_thread_t *tid); UV_EXTERN int uv_thread_equal(const uv_thread_t* t1, const uv_thread_t* t2); diff --git a/deps/uv/include/uv/errno.h b/deps/uv/include/uv/errno.h index 71906b3f5e65e6..648e493d0e7f0f 100644 --- a/deps/uv/include/uv/errno.h +++ b/deps/uv/include/uv/errno.h @@ -413,7 +413,6 @@ #elif defined(__APPLE__) || \ defined(__DragonFly__) || \ defined(__FreeBSD__) || \ - defined(__FreeBSD_kernel__) || \ defined(__NetBSD__) || \ defined(__OpenBSD__) # define UV__EHOSTDOWN (-64) @@ -457,4 +456,16 @@ # define UV__ESOCKTNOSUPPORT (-4025) #endif +/* FreeBSD defines ENODATA in /usr/include/c++/v1/errno.h which is only visible + * if C++ is being used. Define it directly to avoid problems when integrating + * libuv in a C++ project. + */ +#if defined(ENODATA) && !defined(_WIN32) +# define UV__ENODATA UV__ERR(ENODATA) +#elif defined(__FreeBSD__) +# define UV__ENODATA (-9919) +#else +# define UV__ENODATA (-4024) +#endif + #endif /* UV_ERRNO_H_ */ diff --git a/deps/uv/include/uv/stdint-msvc2008.h b/deps/uv/include/uv/stdint-msvc2008.h deleted file mode 100644 index d02608a5972642..00000000000000 --- a/deps/uv/include/uv/stdint-msvc2008.h +++ /dev/null @@ -1,247 +0,0 @@ -// ISO C9x compliant stdint.h for Microsoft Visual Studio -// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 -// -// Copyright (c) 2006-2008 Alexander Chemeris -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// -// 3. The name of the author may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED -// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO -// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; -// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR -// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -/////////////////////////////////////////////////////////////////////////////// - -#ifndef _MSC_VER // [ -#error "Use this header only with Microsoft Visual C++ compilers!" -#endif // _MSC_VER ] - -#ifndef _MSC_STDINT_H_ // [ -#define _MSC_STDINT_H_ - -#if _MSC_VER > 1000 -#pragma once -#endif - -#include - -// For Visual Studio 6 in C++ mode and for many Visual Studio versions when -// compiling for ARM we should wrap include with 'extern "C++" {}' -// or compiler give many errors like this: -// error C2733: second C linkage of overloaded function 'wmemchr' not allowed -#ifdef __cplusplus -extern "C" { -#endif -# include -#ifdef __cplusplus -} -#endif - -// Define _W64 macros to mark types changing their size, like intptr_t. -#ifndef _W64 -# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300 -# define _W64 __w64 -# else -# define _W64 -# endif -#endif - - -// 7.18.1 Integer types - -// 7.18.1.1 Exact-width integer types - -// Visual Studio 6 and Embedded Visual C++ 4 doesn't -// realize that, e.g. char has the same size as __int8 -// so we give up on __intX for them. -#if (_MSC_VER < 1300) - typedef signed char int8_t; - typedef signed short int16_t; - typedef signed int int32_t; - typedef unsigned char uint8_t; - typedef unsigned short uint16_t; - typedef unsigned int uint32_t; -#else - typedef signed __int8 int8_t; - typedef signed __int16 int16_t; - typedef signed __int32 int32_t; - typedef unsigned __int8 uint8_t; - typedef unsigned __int16 uint16_t; - typedef unsigned __int32 uint32_t; -#endif -typedef signed __int64 int64_t; -typedef unsigned __int64 uint64_t; - - -// 7.18.1.2 Minimum-width integer types -typedef int8_t int_least8_t; -typedef int16_t int_least16_t; -typedef int32_t int_least32_t; -typedef int64_t int_least64_t; -typedef uint8_t uint_least8_t; -typedef uint16_t uint_least16_t; -typedef uint32_t uint_least32_t; -typedef uint64_t uint_least64_t; - -// 7.18.1.3 Fastest minimum-width integer types -typedef int8_t int_fast8_t; -typedef int16_t int_fast16_t; -typedef int32_t int_fast32_t; -typedef int64_t int_fast64_t; -typedef uint8_t uint_fast8_t; -typedef uint16_t uint_fast16_t; -typedef uint32_t uint_fast32_t; -typedef uint64_t uint_fast64_t; - -// 7.18.1.4 Integer types capable of holding object pointers -#ifdef _WIN64 // [ - typedef signed __int64 intptr_t; - typedef unsigned __int64 uintptr_t; -#else // _WIN64 ][ - typedef _W64 signed int intptr_t; - typedef _W64 unsigned int uintptr_t; -#endif // _WIN64 ] - -// 7.18.1.5 Greatest-width integer types -typedef int64_t intmax_t; -typedef uint64_t uintmax_t; - - -// 7.18.2 Limits of specified-width integer types - -#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259 - -// 7.18.2.1 Limits of exact-width integer types -#define INT8_MIN ((int8_t)_I8_MIN) -#define INT8_MAX _I8_MAX -#define INT16_MIN ((int16_t)_I16_MIN) -#define INT16_MAX _I16_MAX -#define INT32_MIN ((int32_t)_I32_MIN) -#define INT32_MAX _I32_MAX -#define INT64_MIN ((int64_t)_I64_MIN) -#define INT64_MAX _I64_MAX -#define UINT8_MAX _UI8_MAX -#define UINT16_MAX _UI16_MAX -#define UINT32_MAX _UI32_MAX -#define UINT64_MAX _UI64_MAX - -// 7.18.2.2 Limits of minimum-width integer types -#define INT_LEAST8_MIN INT8_MIN -#define INT_LEAST8_MAX INT8_MAX -#define INT_LEAST16_MIN INT16_MIN -#define INT_LEAST16_MAX INT16_MAX -#define INT_LEAST32_MIN INT32_MIN -#define INT_LEAST32_MAX INT32_MAX -#define INT_LEAST64_MIN INT64_MIN -#define INT_LEAST64_MAX INT64_MAX -#define UINT_LEAST8_MAX UINT8_MAX -#define UINT_LEAST16_MAX UINT16_MAX -#define UINT_LEAST32_MAX UINT32_MAX -#define UINT_LEAST64_MAX UINT64_MAX - -// 7.18.2.3 Limits of fastest minimum-width integer types -#define INT_FAST8_MIN INT8_MIN -#define INT_FAST8_MAX INT8_MAX -#define INT_FAST16_MIN INT16_MIN -#define INT_FAST16_MAX INT16_MAX -#define INT_FAST32_MIN INT32_MIN -#define INT_FAST32_MAX INT32_MAX -#define INT_FAST64_MIN INT64_MIN -#define INT_FAST64_MAX INT64_MAX -#define UINT_FAST8_MAX UINT8_MAX -#define UINT_FAST16_MAX UINT16_MAX -#define UINT_FAST32_MAX UINT32_MAX -#define UINT_FAST64_MAX UINT64_MAX - -// 7.18.2.4 Limits of integer types capable of holding object pointers -#ifdef _WIN64 // [ -# define INTPTR_MIN INT64_MIN -# define INTPTR_MAX INT64_MAX -# define UINTPTR_MAX UINT64_MAX -#else // _WIN64 ][ -# define INTPTR_MIN INT32_MIN -# define INTPTR_MAX INT32_MAX -# define UINTPTR_MAX UINT32_MAX -#endif // _WIN64 ] - -// 7.18.2.5 Limits of greatest-width integer types -#define INTMAX_MIN INT64_MIN -#define INTMAX_MAX INT64_MAX -#define UINTMAX_MAX UINT64_MAX - -// 7.18.3 Limits of other integer types - -#ifdef _WIN64 // [ -# define PTRDIFF_MIN _I64_MIN -# define PTRDIFF_MAX _I64_MAX -#else // _WIN64 ][ -# define PTRDIFF_MIN _I32_MIN -# define PTRDIFF_MAX _I32_MAX -#endif // _WIN64 ] - -#define SIG_ATOMIC_MIN INT_MIN -#define SIG_ATOMIC_MAX INT_MAX - -#ifndef SIZE_MAX // [ -# ifdef _WIN64 // [ -# define SIZE_MAX _UI64_MAX -# else // _WIN64 ][ -# define SIZE_MAX _UI32_MAX -# endif // _WIN64 ] -#endif // SIZE_MAX ] - -// WCHAR_MIN and WCHAR_MAX are also defined in -#ifndef WCHAR_MIN // [ -# define WCHAR_MIN 0 -#endif // WCHAR_MIN ] -#ifndef WCHAR_MAX // [ -# define WCHAR_MAX _UI16_MAX -#endif // WCHAR_MAX ] - -#define WINT_MIN 0 -#define WINT_MAX _UI16_MAX - -#endif // __STDC_LIMIT_MACROS ] - - -// 7.18.4 Limits of other integer types - -#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260 - -// 7.18.4.1 Macros for minimum-width integer constants - -#define INT8_C(val) val##i8 -#define INT16_C(val) val##i16 -#define INT32_C(val) val##i32 -#define INT64_C(val) val##i64 - -#define UINT8_C(val) val##ui8 -#define UINT16_C(val) val##ui16 -#define UINT32_C(val) val##ui32 -#define UINT64_C(val) val##ui64 - -// 7.18.4.2 Macros for greatest-width integer constants -#define INTMAX_C INT64_C -#define UINTMAX_C UINT64_C - -#endif // __STDC_CONSTANT_MACROS ] - - -#endif // _MSC_STDINT_H_ ] diff --git a/deps/uv/include/uv/unix.h b/deps/uv/include/uv/unix.h index ea37d78768654e..0b1fdac2b25a6d 100644 --- a/deps/uv/include/uv/unix.h +++ b/deps/uv/include/uv/unix.h @@ -59,7 +59,6 @@ # include "uv/darwin.h" #elif defined(__DragonFly__) || \ defined(__FreeBSD__) || \ - defined(__FreeBSD_kernel__) || \ defined(__OpenBSD__) || \ defined(__NetBSD__) # include "uv/bsd.h" diff --git a/deps/uv/include/uv/version.h b/deps/uv/include/uv/version.h index 9c9d292f695038..febad1ef1c141c 100644 --- a/deps/uv/include/uv/version.h +++ b/deps/uv/include/uv/version.h @@ -31,8 +31,8 @@ */ #define UV_VERSION_MAJOR 1 -#define UV_VERSION_MINOR 44 -#define UV_VERSION_PATCH 2 +#define UV_VERSION_MINOR 45 +#define UV_VERSION_PATCH 0 #define UV_VERSION_IS_RELEASE 1 #define UV_VERSION_SUFFIX "" diff --git a/deps/uv/include/uv/win.h b/deps/uv/include/uv/win.h index 155c4355022176..92a95fa15f1aec 100644 --- a/deps/uv/include/uv/win.h +++ b/deps/uv/include/uv/win.h @@ -59,12 +59,7 @@ typedef struct pollfd { #include #include #include - -#if defined(_MSC_VER) && _MSC_VER < 1600 -# include "uv/stdint-msvc2008.h" -#else -# include -#endif +#include #include "uv/tree.h" #include "uv/threadpool.h" @@ -75,6 +70,11 @@ typedef struct pollfd { # define S_IFLNK 0xA000 #endif +// Define missing in Windows Kit Include\{VERSION}\ucrt\sys\stat.h +#if defined(_CRT_INTERNAL_NONSTDC_NAMES) && _CRT_INTERNAL_NONSTDC_NAMES && !defined(S_IFIFO) +# define S_IFIFO _S_IFIFO +#endif + /* Additional signals supported by uv_signal and or uv_kill. The CRT defines * the following signals already: * @@ -91,6 +91,7 @@ typedef struct pollfd { * variants (Linux and Darwin) */ #define SIGHUP 1 +#define SIGQUIT 3 #define SIGKILL 9 #define SIGWINCH 28 @@ -223,7 +224,7 @@ typedef struct _AFD_POLL_INFO { AFD_POLL_HANDLE_INFO Handles[1]; } AFD_POLL_INFO, *PAFD_POLL_INFO; -#define UV_MSAFD_PROVIDER_COUNT 3 +#define UV_MSAFD_PROVIDER_COUNT 4 /** @@ -274,11 +275,12 @@ typedef struct { } uv_rwlock_t; typedef struct { - unsigned int n; - unsigned int count; + unsigned threshold; + unsigned in; uv_mutex_t mutex; - uv_sem_t turnstile1; - uv_sem_t turnstile2; + /* TODO: in v2 make this a uv_cond_t, without unused_ */ + CONDITION_VARIABLE cond; + unsigned out; } uv_barrier_t; typedef struct { @@ -348,9 +350,9 @@ typedef struct { uv_idle_t* next_idle_handle; \ /* This handle holds the peer sockets for the fast variant of uv_poll_t */ \ SOCKET poll_peer_sockets[UV_MSAFD_PROVIDER_COUNT]; \ - /* Counter to keep track of active tcp streams */ \ + /* No longer used. */ \ unsigned int active_tcp_streams; \ - /* Counter to keep track of active udp streams */ \ + /* No longer used. */ \ unsigned int active_udp_streams; \ /* Counter to started timer */ \ uint64_t timer_counter; \ @@ -382,6 +384,7 @@ typedef struct { ULONG_PTR result; /* overlapped.Internal is reused to hold the result */\ HANDLE pipeHandle; \ DWORD duplex_flags; \ + WCHAR* name; \ } connect; \ } u; \ struct uv_req_s* next_req; @@ -497,7 +500,7 @@ typedef struct { struct { uv_pipe_connection_fields } conn; \ } pipe; -/* TODO: put the parser states in an union - TTY handles are always half-duplex +/* TODO: put the parser states in a union - TTY handles are always half-duplex * so read-state can safely overlap write-state. */ #define UV_TTY_PRIVATE_FIELDS \ HANDLE handle; \ @@ -605,7 +608,7 @@ typedef struct { struct uv_process_exit_s { \ UV_REQ_FIELDS \ } exit_req; \ - BYTE* child_stdio_buffer; \ + void* unused; /* TODO: retained for ABI compat; remove this in v2.x. */ \ int exit_signal; \ HANDLE wait_handle; \ HANDLE process_handle; \ diff --git a/deps/uv/libuv-static.pc.in b/deps/uv/libuv-static.pc.in index ea625482d5ebd4..639058c8e083aa 100644 --- a/deps/uv/libuv-static.pc.in +++ b/deps/uv/libuv-static.pc.in @@ -8,5 +8,5 @@ Version: @PACKAGE_VERSION@ Description: multi-platform support library with a focus on asynchronous I/O. URL: http://libuv.org/ -Libs: -L${libdir} -luv_a @LIBS@ +Libs: -L${libdir} -l:libuv.a @LIBS@ Cflags: -I${includedir} diff --git a/deps/uv/libuv.pc.in b/deps/uv/libuv.pc.in index 1d7b86f751764c..0f569146697451 100644 --- a/deps/uv/libuv.pc.in +++ b/deps/uv/libuv.pc.in @@ -2,6 +2,7 @@ prefix=@prefix@ exec_prefix=${prefix} libdir=@libdir@ includedir=@includedir@ +LIBUV_STATIC=-L${libdir} -l:libuv.a @LIBS@ Name: libuv Version: @PACKAGE_VERSION@ diff --git a/deps/uv/src/inet.c b/deps/uv/src/inet.c index ddabf22fa52a0d..cd77496846e90e 100644 --- a/deps/uv/src/inet.c +++ b/deps/uv/src/inet.c @@ -17,12 +17,7 @@ #include #include - -#if defined(_MSC_VER) && _MSC_VER < 1600 -# include "uv/stdint-msvc2008.h" -#else -# include -#endif +#include #include "uv.h" #include "uv-common.h" @@ -135,7 +130,7 @@ static int inet_ntop6(const unsigned char *src, char *dst, size_t size) { tp += strlen(tp); break; } - tp += sprintf(tp, "%x", words[i]); + tp += snprintf(tp, sizeof tmp - (tp - tmp), "%x", words[i]); } /* Was it a trailing run of 0x00's? */ if (best.base != -1 && (best.base + best.len) == ARRAY_SIZE(words)) diff --git a/deps/uv/src/thread-common.c b/deps/uv/src/thread-common.c new file mode 100644 index 00000000000000..c67c0a7dd7279a --- /dev/null +++ b/deps/uv/src/thread-common.c @@ -0,0 +1,175 @@ +/* Copyright libuv project contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "uv-common.h" + +#include +#ifndef _WIN32 +#include +#endif + +#if defined(PTHREAD_BARRIER_SERIAL_THREAD) +STATIC_ASSERT(sizeof(uv_barrier_t) == sizeof(pthread_barrier_t)); +#endif + +/* Note: guard clauses should match uv_barrier_t's in include/uv/unix.h. */ +#if defined(_AIX) || \ + defined(__OpenBSD__) || \ + !defined(PTHREAD_BARRIER_SERIAL_THREAD) +int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) { + int rc; +#ifdef _WIN32 + uv_barrier_t* b; + b = barrier; + + if (barrier == NULL || count == 0) + return UV_EINVAL; +#else + struct _uv_barrier* b; + + if (barrier == NULL || count == 0) + return UV_EINVAL; + + b = uv__malloc(sizeof(*b)); + if (b == NULL) + return UV_ENOMEM; +#endif + + b->in = 0; + b->out = 0; + b->threshold = count; + + rc = uv_mutex_init(&b->mutex); + if (rc != 0) + goto error2; + + /* TODO(vjnash): remove these uv_cond_t casts in v2. */ + rc = uv_cond_init((uv_cond_t*) &b->cond); + if (rc != 0) + goto error; + +#ifndef _WIN32 + barrier->b = b; +#endif + return 0; + +error: + uv_mutex_destroy(&b->mutex); +error2: +#ifndef _WIN32 + uv__free(b); +#endif + return rc; +} + + +int uv_barrier_wait(uv_barrier_t* barrier) { + int last; +#ifdef _WIN32 + uv_barrier_t* b; + b = barrier; +#else + struct _uv_barrier* b; + + if (barrier == NULL || barrier->b == NULL) + return UV_EINVAL; + + b = barrier->b; +#endif + + uv_mutex_lock(&b->mutex); + + while (b->out != 0) + uv_cond_wait((uv_cond_t*) &b->cond, &b->mutex); + + if (++b->in == b->threshold) { + b->in = 0; + b->out = b->threshold; + uv_cond_broadcast((uv_cond_t*) &b->cond); + } else { + do + uv_cond_wait((uv_cond_t*) &b->cond, &b->mutex); + while (b->in != 0); + } + + last = (--b->out == 0); + if (last) + uv_cond_broadcast((uv_cond_t*) &b->cond); + + uv_mutex_unlock(&b->mutex); + return last; +} + + +void uv_barrier_destroy(uv_barrier_t* barrier) { +#ifdef _WIN32 + uv_barrier_t* b; + b = barrier; +#else + struct _uv_barrier* b; + b = barrier->b; +#endif + + uv_mutex_lock(&b->mutex); + + assert(b->in == 0); + while (b->out != 0) + uv_cond_wait((uv_cond_t*) &b->cond, &b->mutex); + + if (b->in != 0) + abort(); + + uv_mutex_unlock(&b->mutex); + uv_mutex_destroy(&b->mutex); + uv_cond_destroy((uv_cond_t*) &b->cond); + +#ifndef _WIN32 + uv__free(barrier->b); + barrier->b = NULL; +#endif +} + +#else + +int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) { + return UV__ERR(pthread_barrier_init(barrier, NULL, count)); +} + + +int uv_barrier_wait(uv_barrier_t* barrier) { + int rc; + + rc = pthread_barrier_wait(barrier); + if (rc != 0) + if (rc != PTHREAD_BARRIER_SERIAL_THREAD) + abort(); + + return rc == PTHREAD_BARRIER_SERIAL_THREAD; +} + + +void uv_barrier_destroy(uv_barrier_t* barrier) { + if (pthread_barrier_destroy(barrier)) + abort(); +} + +#endif diff --git a/deps/uv/src/threadpool.c b/deps/uv/src/threadpool.c index e804c7c4b6f03c..51962bf0021574 100644 --- a/deps/uv/src/threadpool.c +++ b/deps/uv/src/threadpool.c @@ -191,6 +191,7 @@ void uv__threadpool_cleanup(void) { static void init_threads(void) { + uv_thread_options_t config; unsigned int i; const char* val; uv_sem_t sem; @@ -226,8 +227,11 @@ static void init_threads(void) { if (uv_sem_init(&sem, 0)) abort(); + config.flags = UV_THREAD_HAS_STACK_SIZE; + config.stack_size = 8u << 20; /* 8 MB */ + for (i = 0; i < nthreads; i++) - if (uv_thread_create(threads + i, worker, &sem)) + if (uv_thread_create_ex(threads + i, &config, worker, &sem)) abort(); for (i = 0; i < nthreads; i++) @@ -271,9 +275,13 @@ void uv__work_submit(uv_loop_t* loop, } +/* TODO(bnoordhuis) teach libuv how to cancel file operations + * that go through io_uring instead of the thread pool. + */ static int uv__work_cancel(uv_loop_t* loop, uv_req_t* req, struct uv__work* w) { int cancelled; + uv_once(&once, init_once); /* Ensure |mutex| is initialized. */ uv_mutex_lock(&mutex); uv_mutex_lock(&w->loop->wq_mutex); @@ -303,12 +311,15 @@ void uv__work_done(uv_async_t* handle) { QUEUE* q; QUEUE wq; int err; + int nevents; loop = container_of(handle, uv_loop_t, wq_async); uv_mutex_lock(&loop->wq_mutex); QUEUE_MOVE(&loop->wq, &wq); uv_mutex_unlock(&loop->wq_mutex); + nevents = 0; + while (!QUEUE_EMPTY(&wq)) { q = QUEUE_HEAD(&wq); QUEUE_REMOVE(q); @@ -316,6 +327,20 @@ void uv__work_done(uv_async_t* handle) { w = container_of(q, struct uv__work, wq); err = (w->work == uv__cancelled) ? UV_ECANCELED : 0; w->done(w, err); + nevents++; + } + + /* This check accomplishes 2 things: + * 1. Even if the queue was empty, the call to uv__work_done() should count + * as an event. Which will have been added by the event loop when + * calling this callback. + * 2. Prevents accidental wrap around in case nevents == 0 events == 0. + */ + if (nevents > 1) { + /* Subtract 1 to counter the call to uv__work_done(). */ + uv__metrics_inc_events(loop, nevents - 1); + if (uv__get_internal_fields(loop)->current_timeout == 0) + uv__metrics_inc_events_waiting(loop, nevents - 1); } } diff --git a/deps/uv/src/unix/aix.c b/deps/uv/src/unix/aix.c index 6a013d43e3ae4b..f1afbed49ec8d2 100644 --- a/deps/uv/src/unix/aix.c +++ b/deps/uv/src/unix/aix.c @@ -131,6 +131,7 @@ int uv__io_check_fd(uv_loop_t* loop, int fd) { void uv__io_poll(uv_loop_t* loop, int timeout) { + uv__loop_internal_fields_t* lfields; struct pollfd events[1024]; struct pollfd pqry; struct pollfd* pe; @@ -154,6 +155,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { return; } + lfields = uv__get_internal_fields(loop); + while (!QUEUE_EMPTY(&loop->watcher_queue)) { q = QUEUE_HEAD(&loop->watcher_queue); QUEUE_REMOVE(q); @@ -217,7 +220,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { base = loop->time; count = 48; /* Benchmarks suggest this gives the best throughput. */ - if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) { + if (lfields->flags & UV_METRICS_IDLE_TIME) { reset_timeout = 1; user_timeout = timeout; timeout = 0; @@ -232,6 +235,12 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { if (timeout != 0) uv__metrics_set_provider_entry_time(loop); + /* Store the current timeout in a location that's globally accessible so + * other locations like uv__work_done() can determine whether the queue + * of events in the callback were waiting when poll was called. + */ + lfields->current_timeout = timeout; + nfds = pollset_poll(loop->backend_fd, events, ARRAY_SIZE(events), @@ -321,9 +330,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { nevents++; } + uv__metrics_inc_events(loop, nevents); if (reset_timeout != 0) { timeout = user_timeout; reset_timeout = 0; + uv__metrics_inc_events_waiting(loop, nevents); } if (have_signals != 0) { @@ -389,6 +400,11 @@ uint64_t uv_get_constrained_memory(void) { } +uint64_t uv_get_available_memory(void) { + return uv_get_free_memory(); +} + + void uv_loadavg(double avg[3]) { perfstat_cpu_total_t ps_total; int result = perfstat_cpu_total(NULL, &ps_total, sizeof(ps_total), 1); @@ -425,7 +441,7 @@ static char* uv__rawname(const char* cp, char (*dst)[FILENAME_MAX+1]) { static int uv__path_is_a_directory(char* filename) { struct stat statbuf; - if (stat(filename, &statbuf) < 0) + if (uv__stat(filename, &statbuf) < 0) return -1; /* failed: not a directory, assume it is a file */ if (statbuf.st_type == VDIR) diff --git a/deps/uv/src/unix/async.c b/deps/uv/src/unix/async.c index e1805c323795e5..5751b6d02be9e6 100644 --- a/deps/uv/src/unix/async.c +++ b/deps/uv/src/unix/async.c @@ -24,9 +24,9 @@ #include "uv.h" #include "internal.h" -#include "atomic-ops.h" #include +#include #include /* snprintf() */ #include #include @@ -40,6 +40,7 @@ static void uv__async_send(uv_loop_t* loop); static int uv__async_start(uv_loop_t* loop); +static void uv__cpu_relax(void); int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) { @@ -52,6 +53,7 @@ int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) { uv__handle_init(loop, (uv_handle_t*)handle, UV_ASYNC); handle->async_cb = async_cb; handle->pending = 0; + handle->u.fd = 0; /* This will be used as a busy flag. */ QUEUE_INSERT_TAIL(&loop->async_handles, &handle->queue); uv__handle_start(handle); @@ -61,46 +63,54 @@ int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) { int uv_async_send(uv_async_t* handle) { + _Atomic int* pending; + _Atomic int* busy; + + pending = (_Atomic int*) &handle->pending; + busy = (_Atomic int*) &handle->u.fd; + /* Do a cheap read first. */ - if (ACCESS_ONCE(int, handle->pending) != 0) + if (atomic_load_explicit(pending, memory_order_relaxed) != 0) return 0; - /* Tell the other thread we're busy with the handle. */ - if (cmpxchgi(&handle->pending, 0, 1) != 0) - return 0; + /* Set the loop to busy. */ + atomic_fetch_add(busy, 1); /* Wake up the other thread's event loop. */ - uv__async_send(handle->loop); + if (atomic_exchange(pending, 1) == 0) + uv__async_send(handle->loop); - /* Tell the other thread we're done. */ - if (cmpxchgi(&handle->pending, 1, 2) != 1) - abort(); + /* Set the loop to not-busy. */ + atomic_fetch_add(busy, -1); return 0; } -/* Only call this from the event loop thread. */ -static int uv__async_spin(uv_async_t* handle) { +/* Wait for the busy flag to clear before closing. + * Only call this from the event loop thread. */ +static void uv__async_spin(uv_async_t* handle) { + _Atomic int* pending; + _Atomic int* busy; int i; - int rc; + + pending = (_Atomic int*) &handle->pending; + busy = (_Atomic int*) &handle->u.fd; + + /* Set the pending flag first, so no new events will be added by other + * threads after this function returns. */ + atomic_store(pending, 1); for (;;) { - /* 997 is not completely chosen at random. It's a prime number, acyclical - * by nature, and should therefore hopefully dampen sympathetic resonance. + /* 997 is not completely chosen at random. It's a prime number, acyclic by + * nature, and should therefore hopefully dampen sympathetic resonance. */ for (i = 0; i < 997; i++) { - /* rc=0 -- handle is not pending. - * rc=1 -- handle is pending, other thread is still working with it. - * rc=2 -- handle is pending, other thread is done. - */ - rc = cmpxchgi(&handle->pending, 2, 0); - - if (rc != 1) - return rc; + if (atomic_load(busy) == 0) + return; /* Other thread is busy with this handle, spin until it's done. */ - cpu_relax(); + uv__cpu_relax(); } /* Yield the CPU. We may have preempted the other thread while it's @@ -125,6 +135,7 @@ static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) { QUEUE queue; QUEUE* q; uv_async_t* h; + _Atomic int *pending; assert(w == &loop->async_io_watcher); @@ -154,8 +165,10 @@ static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) { QUEUE_REMOVE(q); QUEUE_INSERT_TAIL(&loop->async_handles, q); - if (0 == uv__async_spin(h)) - continue; /* Not pending. */ + /* Atomically fetch and clear pending flag */ + pending = (_Atomic int*) &h->pending; + if (atomic_exchange(pending, 0) == 0) + continue; if (h->async_cb == NULL) continue; @@ -227,20 +240,68 @@ static int uv__async_start(uv_loop_t* loop) { } +void uv__async_stop(uv_loop_t* loop) { + QUEUE queue; + QUEUE* q; + uv_async_t* h; + + if (loop->async_io_watcher.fd == -1) + return; + + /* Make sure no other thread is accessing the async handle fd after the loop + * cleanup. + */ + QUEUE_MOVE(&loop->async_handles, &queue); + while (!QUEUE_EMPTY(&queue)) { + q = QUEUE_HEAD(&queue); + h = QUEUE_DATA(q, uv_async_t, queue); + + QUEUE_REMOVE(q); + QUEUE_INSERT_TAIL(&loop->async_handles, q); + + uv__async_spin(h); + } + + if (loop->async_wfd != -1) { + if (loop->async_wfd != loop->async_io_watcher.fd) + uv__close(loop->async_wfd); + loop->async_wfd = -1; + } + + uv__io_stop(loop, &loop->async_io_watcher, POLLIN); + uv__close(loop->async_io_watcher.fd); + loop->async_io_watcher.fd = -1; +} + + int uv__async_fork(uv_loop_t* loop) { + QUEUE queue; + QUEUE* q; + uv_async_t* h; + if (loop->async_io_watcher.fd == -1) /* never started */ return 0; - uv__async_stop(loop); - - return uv__async_start(loop); -} + QUEUE_MOVE(&loop->async_handles, &queue); + while (!QUEUE_EMPTY(&queue)) { + q = QUEUE_HEAD(&queue); + h = QUEUE_DATA(q, uv_async_t, queue); + QUEUE_REMOVE(q); + QUEUE_INSERT_TAIL(&loop->async_handles, q); -void uv__async_stop(uv_loop_t* loop) { - if (loop->async_io_watcher.fd == -1) - return; + /* The state of any thread that set pending is now likely corrupt in this + * child because the user called fork, so just clear these flags and move + * on. Calling most libc functions after `fork` is declared to be undefined + * behavior anyways, unless async-signal-safe, for multithreaded programs + * like libuv, and nothing interesting in pthreads is async-signal-safe. + */ + h->pending = 0; + /* This is the busy flag, and we just abruptly lost all other threads. */ + h->u.fd = 0; + } + /* Recreate these, since they still exist, but belong to the wrong pid now. */ if (loop->async_wfd != -1) { if (loop->async_wfd != loop->async_io_watcher.fd) uv__close(loop->async_wfd); @@ -250,4 +311,19 @@ void uv__async_stop(uv_loop_t* loop) { uv__io_stop(loop, &loop->async_io_watcher, POLLIN); uv__close(loop->async_io_watcher.fd); loop->async_io_watcher.fd = -1; + + return uv__async_start(loop); +} + + +static void uv__cpu_relax(void) { +#if defined(__i386__) || defined(__x86_64__) + __asm__ __volatile__ ("rep; nop" ::: "memory"); /* a.k.a. PAUSE */ +#elif (defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__) + __asm__ __volatile__ ("yield" ::: "memory"); +#elif (defined(__ppc__) || defined(__ppc64__)) && defined(__APPLE__) + __asm volatile ("" : : : "memory"); +#elif !defined(__APPLE__) && (defined(__powerpc64__) || defined(__ppc64__) || defined(__PPC64__)) + __asm__ __volatile__ ("or 1,1,1; or 2,2,2" ::: "memory"); +#endif } diff --git a/deps/uv/src/unix/atomic-ops.h b/deps/uv/src/unix/atomic-ops.h deleted file mode 100644 index 58043c42fbd7aa..00000000000000 --- a/deps/uv/src/unix/atomic-ops.h +++ /dev/null @@ -1,64 +0,0 @@ -/* Copyright (c) 2013, Ben Noordhuis - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#ifndef UV_ATOMIC_OPS_H_ -#define UV_ATOMIC_OPS_H_ - -#include "internal.h" /* UV_UNUSED */ - -#if defined(__SUNPRO_C) || defined(__SUNPRO_CC) -#include -#endif - -UV_UNUSED(static int cmpxchgi(int* ptr, int oldval, int newval)); -UV_UNUSED(static void cpu_relax(void)); - -/* Prefer hand-rolled assembly over the gcc builtins because the latter also - * issue full memory barriers. - */ -UV_UNUSED(static int cmpxchgi(int* ptr, int oldval, int newval)) { -#if defined(__i386__) || defined(__x86_64__) - int out; - __asm__ __volatile__ ("lock; cmpxchg %2, %1;" - : "=a" (out), "+m" (*(volatile int*) ptr) - : "r" (newval), "0" (oldval) - : "memory"); - return out; -#elif defined(__MVS__) - /* Use hand-rolled assembly because codegen from builtin __plo_CSST results in - * a runtime bug. - */ - __asm(" cs %0,%2,%1 \n " : "+r"(oldval), "+m"(*ptr) : "r"(newval) :); - return oldval; -#elif defined(__SUNPRO_C) || defined(__SUNPRO_CC) - return atomic_cas_uint((uint_t *)ptr, (uint_t)oldval, (uint_t)newval); -#else - return __sync_val_compare_and_swap(ptr, oldval, newval); -#endif -} - -UV_UNUSED(static void cpu_relax(void)) { -#if defined(__i386__) || defined(__x86_64__) - __asm__ __volatile__ ("rep; nop" ::: "memory"); /* a.k.a. PAUSE */ -#elif (defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__) - __asm__ __volatile__ ("yield" ::: "memory"); -#elif (defined(__ppc__) || defined(__ppc64__)) && defined(__APPLE__) - __asm volatile ("" : : : "memory"); -#elif !defined(__APPLE__) && (defined(__powerpc64__) || defined(__ppc64__) || defined(__PPC64__)) - __asm__ __volatile__ ("or 1,1,1; or 2,2,2" ::: "memory"); -#endif -} - -#endif /* UV_ATOMIC_OPS_H_ */ diff --git a/deps/uv/src/unix/core.c b/deps/uv/src/unix/core.c index 54c769f37f2331..9c0b3f99b80aa5 100644 --- a/deps/uv/src/unix/core.c +++ b/deps/uv/src/unix/core.c @@ -41,12 +41,13 @@ #include /* writev */ #include /* getrusage */ #include +#include #include #include +#include /* clock_gettime */ #ifdef __sun # include -# include # include #endif @@ -66,13 +67,14 @@ extern char** environ; #if defined(__DragonFly__) || \ defined(__FreeBSD__) || \ - defined(__FreeBSD_kernel__) || \ defined(__NetBSD__) || \ defined(__OpenBSD__) # include # include # include +# include # if defined(__FreeBSD__) +# include # define uv__accept4 accept4 # endif # if defined(__NetBSD__) @@ -107,6 +109,35 @@ STATIC_ASSERT(offsetof(uv_buf_t, base) == offsetof(struct iovec, iov_base)); STATIC_ASSERT(offsetof(uv_buf_t, len) == offsetof(struct iovec, iov_len)); +/* https://github.com/libuv/libuv/issues/1674 */ +int uv_clock_gettime(uv_clock_id clock_id, uv_timespec64_t* ts) { + struct timespec t; + int r; + + if (ts == NULL) + return UV_EFAULT; + + switch (clock_id) { + default: + return UV_EINVAL; + case UV_CLOCK_MONOTONIC: + r = clock_gettime(CLOCK_MONOTONIC, &t); + break; + case UV_CLOCK_REALTIME: + r = clock_gettime(CLOCK_REALTIME, &t); + break; + } + + if (r) + return UV__ERR(errno); + + ts->tv_sec = t.tv_sec; + ts->tv_nsec = t.tv_nsec; + + return 0; +} + + uint64_t uv_hrtime(void) { return uv__hrtime(UV_CLOCK_PRECISE); } @@ -232,10 +263,10 @@ int uv__getiovmax(void) { #if defined(IOV_MAX) return IOV_MAX; #elif defined(_SC_IOV_MAX) - static int iovmax_cached = -1; + static _Atomic int iovmax_cached = -1; int iovmax; - iovmax = uv__load_relaxed(&iovmax_cached); + iovmax = atomic_load_explicit(&iovmax_cached, memory_order_relaxed); if (iovmax != -1) return iovmax; @@ -247,7 +278,7 @@ int uv__getiovmax(void) { if (iovmax == -1) iovmax = 1; - uv__store_relaxed(&iovmax_cached, iovmax); + atomic_store_explicit(&iovmax_cached, iovmax, memory_order_relaxed); return iovmax; #else @@ -360,6 +391,7 @@ static int uv__backend_timeout(const uv_loop_t* loop) { (uv__has_active_handles(loop) || uv__has_active_reqs(loop)) && QUEUE_EMPTY(&loop->pending_queue) && QUEUE_EMPTY(&loop->idle_handles) && + (loop->flags & UV_LOOP_REAP_CHILDREN) == 0 && loop->closing_handles == NULL) return uv__next_timeout(loop); return 0; @@ -388,10 +420,17 @@ int uv_run(uv_loop_t* loop, uv_run_mode mode) { if (!r) uv__update_time(loop); - while (r != 0 && loop->stop_flag == 0) { - uv__update_time(loop); + /* Maintain backwards compatibility by processing timers before entering the + * while loop for UV_RUN_DEFAULT. Otherwise timers only need to be executed + * once, which should be done after polling in order to maintain proper + * execution order of the conceptual event loop. */ + if (mode == UV_RUN_DEFAULT) { + if (r) + uv__update_time(loop); uv__run_timers(loop); + } + while (r != 0 && loop->stop_flag == 0) { can_sleep = QUEUE_EMPTY(&loop->pending_queue) && QUEUE_EMPTY(&loop->idle_handles); @@ -403,6 +442,8 @@ int uv_run(uv_loop_t* loop, uv_run_mode mode) { if ((mode == UV_RUN_ONCE && can_sleep) || mode == UV_RUN_DEFAULT) timeout = uv__backend_timeout(loop); + uv__metrics_inc_loop_count(loop); + uv__io_poll(loop, timeout); /* Process immediate callbacks (e.g. write_cb) a small fixed number of @@ -420,18 +461,8 @@ int uv_run(uv_loop_t* loop, uv_run_mode mode) { uv__run_check(loop); uv__run_closing_handles(loop); - if (mode == UV_RUN_ONCE) { - /* UV_RUN_ONCE implies forward progress: at least one callback must have - * been invoked when it returns. uv__io_poll() can return without doing - * I/O (meaning: no callbacks) when its timeout expires - which means we - * have pending timers that satisfy the forward progress constraint. - * - * UV_RUN_NOWAIT makes no guarantees about progress so it's omitted from - * the check. - */ - uv__update_time(loop); - uv__run_timers(loop); - } + uv__update_time(loop); + uv__run_timers(loop); r = uv__loop_alive(loop); if (mode == UV_RUN_ONCE || mode == UV_RUN_NOWAIT) @@ -867,11 +898,6 @@ void uv__io_init(uv__io_t* w, uv__io_cb cb, int fd) { w->fd = fd; w->events = 0; w->pevents = 0; - -#if defined(UV_HAVE_KQUEUE) - w->rcount = 0; - w->wcount = 0; -#endif /* defined(UV_HAVE_KQUEUE) */ } @@ -991,6 +1017,15 @@ int uv_getrusage(uv_rusage_t* rusage) { rusage->ru_nivcsw = usage.ru_nivcsw; #endif + /* Most platforms report ru_maxrss in kilobytes; macOS and Solaris are + * the outliers because of course they are. + */ +#if defined(__APPLE__) && !TARGET_OS_IPHONE + rusage->ru_maxrss /= 1024; /* macOS reports bytes. */ +#elif defined(__sun) + rusage->ru_maxrss /= getpagesize() / 1024; /* Solaris reports pages. */ +#endif + return 0; } @@ -1090,8 +1125,8 @@ int uv_os_homedir(char* buffer, size_t* size) { if (r != UV_ENOENT) return r; - /* HOME is not set, so call uv__getpwuid_r() */ - r = uv__getpwuid_r(&pwd); + /* HOME is not set, so call uv_os_get_passwd() */ + r = uv_os_get_passwd(&pwd); if (r != 0) { return r; @@ -1164,11 +1199,10 @@ int uv_os_tmpdir(char* buffer, size_t* size) { } -int uv__getpwuid_r(uv_passwd_t* pwd) { +static int uv__getpwuid_r(uv_passwd_t *pwd, uid_t uid) { struct passwd pw; struct passwd* result; char* buf; - uid_t uid; size_t bufsize; size_t name_size; size_t homedir_size; @@ -1178,8 +1212,6 @@ int uv__getpwuid_r(uv_passwd_t* pwd) { if (pwd == NULL) return UV_EINVAL; - uid = geteuid(); - /* Calling sysconf(_SC_GETPW_R_SIZE_MAX) would get the suggested size, but it * is frequently 1024 or 4096, so we can just use that directly. The pwent * will not usually be large. */ @@ -1238,24 +1270,93 @@ int uv__getpwuid_r(uv_passwd_t* pwd) { } -void uv_os_free_passwd(uv_passwd_t* pwd) { - if (pwd == NULL) - return; +int uv_os_get_group(uv_group_t* grp, uv_uid_t gid) { + struct group gp; + struct group* result; + char* buf; + char* gr_mem; + size_t bufsize; + size_t name_size; + long members; + size_t mem_size; + int r; - /* - The memory for name, shell, and homedir are allocated in a single - uv__malloc() call. The base of the pointer is stored in pwd->username, so - that is the field that needs to be freed. - */ - uv__free(pwd->username); - pwd->username = NULL; - pwd->shell = NULL; - pwd->homedir = NULL; + if (grp == NULL) + return UV_EINVAL; + + /* Calling sysconf(_SC_GETGR_R_SIZE_MAX) would get the suggested size, but it + * is frequently 1024 or 4096, so we can just use that directly. The pwent + * will not usually be large. */ + for (bufsize = 2000;; bufsize *= 2) { + buf = uv__malloc(bufsize); + + if (buf == NULL) + return UV_ENOMEM; + + do + r = getgrgid_r(gid, &gp, buf, bufsize, &result); + while (r == EINTR); + + if (r != 0 || result == NULL) + uv__free(buf); + + if (r != ERANGE) + break; + } + + if (r != 0) + return UV__ERR(r); + + if (result == NULL) + return UV_ENOENT; + + /* Allocate memory for the groupname and members. */ + name_size = strlen(gp.gr_name) + 1; + members = 0; + mem_size = sizeof(char*); + for (r = 0; gp.gr_mem[r] != NULL; r++) { + mem_size += strlen(gp.gr_mem[r]) + 1 + sizeof(char*); + members++; + } + + gr_mem = uv__malloc(name_size + mem_size); + if (gr_mem == NULL) { + uv__free(buf); + return UV_ENOMEM; + } + + /* Copy the members */ + grp->members = (char**) gr_mem; + grp->members[members] = NULL; + gr_mem = (char*) &grp->members[members + 1]; + for (r = 0; r < members; r++) { + grp->members[r] = gr_mem; + strcpy(gr_mem, gp.gr_mem[r]); + gr_mem += strlen(gr_mem) + 1; + } + assert(gr_mem == (char*)grp->members + mem_size); + + /* Copy the groupname */ + grp->groupname = gr_mem; + memcpy(grp->groupname, gp.gr_name, name_size); + gr_mem += name_size; + + /* Copy the gid */ + grp->gid = gp.gr_gid; + + uv__free(buf); + + return 0; } int uv_os_get_passwd(uv_passwd_t* pwd) { - return uv__getpwuid_r(pwd); + return uv__getpwuid_r(pwd, geteuid()); +} + + +int uv_os_get_passwd2(uv_passwd_t* pwd, uv_uid_t uid) { + return uv__getpwuid_r(pwd, uid); } @@ -1416,6 +1517,13 @@ uv_pid_t uv_os_getppid(void) { return getppid(); } +int uv_cpumask_size(void) { +#if UV__CPU_AFFINITY_SUPPORTED + return CPU_SETSIZE; +#else + return UV_ENOTSUP; +#endif +} int uv_os_getpriority(uv_pid_t pid, int* priority) { int r; diff --git a/deps/uv/src/unix/cygwin.c b/deps/uv/src/unix/cygwin.c index 169958d55f2ed0..4e5413963d6acf 100644 --- a/deps/uv/src/unix/cygwin.c +++ b/deps/uv/src/unix/cygwin.c @@ -51,3 +51,7 @@ int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) { uint64_t uv_get_constrained_memory(void) { return 0; /* Memory constraints are unknown. */ } + +uint64_t uv_get_available_memory(void) { + return uv_get_free_memory(); +} diff --git a/deps/uv/src/unix/darwin-stub.h b/deps/uv/src/unix/darwin-stub.h index 433e3efa73079e..b93cf67c596285 100644 --- a/deps/uv/src/unix/darwin-stub.h +++ b/deps/uv/src/unix/darwin-stub.h @@ -27,7 +27,6 @@ struct CFArrayCallBacks; struct CFRunLoopSourceContext; struct FSEventStreamContext; -struct CFRange; typedef double CFAbsoluteTime; typedef double CFTimeInterval; @@ -43,23 +42,13 @@ typedef unsigned CFStringEncoding; typedef void* CFAllocatorRef; typedef void* CFArrayRef; typedef void* CFBundleRef; -typedef void* CFDataRef; typedef void* CFDictionaryRef; -typedef void* CFMutableDictionaryRef; -typedef struct CFRange CFRange; typedef void* CFRunLoopRef; typedef void* CFRunLoopSourceRef; typedef void* CFStringRef; typedef void* CFTypeRef; typedef void* FSEventStreamRef; -typedef uint32_t IOOptionBits; -typedef unsigned int io_iterator_t; -typedef unsigned int io_object_t; -typedef unsigned int io_service_t; -typedef unsigned int io_registry_entry_t; - - typedef void (*FSEventStreamCallback)(const FSEventStreamRef, void*, size_t, @@ -80,11 +69,6 @@ struct FSEventStreamContext { void* pad[3]; }; -struct CFRange { - CFIndex location; - CFIndex length; -}; - static const CFStringEncoding kCFStringEncodingUTF8 = 0x8000100; static const OSStatus noErr = 0; diff --git a/deps/uv/src/unix/darwin.c b/deps/uv/src/unix/darwin.c index 62f04d315423d5..90790d701c4327 100644 --- a/deps/uv/src/unix/darwin.c +++ b/deps/uv/src/unix/darwin.c @@ -33,13 +33,10 @@ #include #include /* sysconf */ -#include "darwin-stub.h" - static uv_once_t once = UV_ONCE_INIT; static uint64_t (*time_func)(void); static mach_timebase_info_data_t timebase; -typedef unsigned char UInt8; int uv__platform_loop_init(uv_loop_t* loop) { loop->cf_state = NULL; @@ -110,7 +107,7 @@ uint64_t uv_get_free_memory(void) { if (host_statistics(mach_host_self(), HOST_VM_INFO, (host_info_t)&info, &count) != KERN_SUCCESS) { - return UV_EINVAL; /* FIXME(bnoordhuis) Translate error. */ + return 0; } return (uint64_t) info.free_count * sysconf(_SC_PAGESIZE); @@ -123,7 +120,7 @@ uint64_t uv_get_total_memory(void) { size_t size = sizeof(info); if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0)) - return UV__ERR(errno); + return 0; return (uint64_t) info; } @@ -134,6 +131,11 @@ uint64_t uv_get_constrained_memory(void) { } +uint64_t uv_get_available_memory(void) { + return uv_get_free_memory(); +} + + void uv_loadavg(double avg[3]) { struct loadavg info; size_t size = sizeof(info); @@ -183,159 +185,17 @@ int uv_uptime(double* uptime) { return 0; } -static int uv__get_cpu_speed(uint64_t* speed) { - /* IOKit */ - void (*pIOObjectRelease)(io_object_t); - kern_return_t (*pIOMasterPort)(mach_port_t, mach_port_t*); - CFMutableDictionaryRef (*pIOServiceMatching)(const char*); - kern_return_t (*pIOServiceGetMatchingServices)(mach_port_t, - CFMutableDictionaryRef, - io_iterator_t*); - io_service_t (*pIOIteratorNext)(io_iterator_t); - CFTypeRef (*pIORegistryEntryCreateCFProperty)(io_registry_entry_t, - CFStringRef, - CFAllocatorRef, - IOOptionBits); - - /* CoreFoundation */ - CFStringRef (*pCFStringCreateWithCString)(CFAllocatorRef, - const char*, - CFStringEncoding); - CFStringEncoding (*pCFStringGetSystemEncoding)(void); - UInt8 *(*pCFDataGetBytePtr)(CFDataRef); - CFIndex (*pCFDataGetLength)(CFDataRef); - void (*pCFDataGetBytes)(CFDataRef, CFRange, UInt8*); - void (*pCFRelease)(CFTypeRef); - - void* core_foundation_handle; - void* iokit_handle; - int err; - - kern_return_t kr; - mach_port_t mach_port; - io_iterator_t it; - io_object_t service; - - mach_port = 0; - - err = UV_ENOENT; - core_foundation_handle = dlopen("/System/Library/Frameworks/" - "CoreFoundation.framework/" - "CoreFoundation", - RTLD_LAZY | RTLD_LOCAL); - iokit_handle = dlopen("/System/Library/Frameworks/IOKit.framework/" - "IOKit", - RTLD_LAZY | RTLD_LOCAL); - - if (core_foundation_handle == NULL || iokit_handle == NULL) - goto out; - -#define V(handle, symbol) \ - do { \ - *(void **)(&p ## symbol) = dlsym((handle), #symbol); \ - if (p ## symbol == NULL) \ - goto out; \ - } \ - while (0) - V(iokit_handle, IOMasterPort); - V(iokit_handle, IOServiceMatching); - V(iokit_handle, IOServiceGetMatchingServices); - V(iokit_handle, IOIteratorNext); - V(iokit_handle, IOObjectRelease); - V(iokit_handle, IORegistryEntryCreateCFProperty); - V(core_foundation_handle, CFStringCreateWithCString); - V(core_foundation_handle, CFStringGetSystemEncoding); - V(core_foundation_handle, CFDataGetBytePtr); - V(core_foundation_handle, CFDataGetLength); - V(core_foundation_handle, CFDataGetBytes); - V(core_foundation_handle, CFRelease); -#undef V - -#define S(s) pCFStringCreateWithCString(NULL, (s), kCFStringEncodingUTF8) - - kr = pIOMasterPort(MACH_PORT_NULL, &mach_port); - assert(kr == KERN_SUCCESS); - CFMutableDictionaryRef classes_to_match - = pIOServiceMatching("IOPlatformDevice"); - kr = pIOServiceGetMatchingServices(mach_port, classes_to_match, &it); - assert(kr == KERN_SUCCESS); - service = pIOIteratorNext(it); - - CFStringRef device_type_str = S("device_type"); - CFStringRef clock_frequency_str = S("clock-frequency"); - - while (service != 0) { - CFDataRef data; - data = pIORegistryEntryCreateCFProperty(service, - device_type_str, - NULL, - 0); - if (data) { - const UInt8* raw = pCFDataGetBytePtr(data); - if (strncmp((char*)raw, "cpu", 3) == 0 || - strncmp((char*)raw, "processor", 9) == 0) { - CFDataRef freq_ref; - freq_ref = pIORegistryEntryCreateCFProperty(service, - clock_frequency_str, - NULL, - 0); - if (freq_ref) { - const UInt8* freq_ref_ptr = pCFDataGetBytePtr(freq_ref); - CFIndex len = pCFDataGetLength(freq_ref); - if (len == 8) - memcpy(speed, freq_ref_ptr, 8); - else if (len == 4) { - uint32_t v; - memcpy(&v, freq_ref_ptr, 4); - *speed = v; - } else { - *speed = 0; - } - - pCFRelease(freq_ref); - pCFRelease(data); - break; - } - } - pCFRelease(data); - } - - service = pIOIteratorNext(it); - } - - pIOObjectRelease(it); - - err = 0; - - if (device_type_str != NULL) - pCFRelease(device_type_str); - if (clock_frequency_str != NULL) - pCFRelease(clock_frequency_str); - -out: - if (core_foundation_handle != NULL) - dlclose(core_foundation_handle); - - if (iokit_handle != NULL) - dlclose(iokit_handle); - - mach_port_deallocate(mach_task_self(), mach_port); - - return err; -} - int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) { unsigned int ticks = (unsigned int)sysconf(_SC_CLK_TCK), multiplier = ((uint64_t)1000L / ticks); char model[512]; + uint64_t cpuspeed; size_t size; unsigned int i; natural_t numcpus; mach_msg_type_number_t msg_type; processor_cpu_load_info_data_t *info; uv_cpu_info_t* cpu_info; - uint64_t cpuspeed; - int err; size = sizeof(model); if (sysctlbyname("machdep.cpu.brand_string", &model, &size, NULL, 0) && @@ -343,9 +203,13 @@ int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) { return UV__ERR(errno); } - err = uv__get_cpu_speed(&cpuspeed); - if (err < 0) - return err; + cpuspeed = 0; + size = sizeof(cpuspeed); + sysctlbyname("hw.cpufrequency", &cpuspeed, &size, NULL, 0); + if (cpuspeed == 0) + /* If sysctl hw.cputype == CPU_TYPE_ARM64, the correct value is unavailable + * from Apple, but we can hard-code it here to a plausible value. */ + cpuspeed = 2400000000; if (host_processor_info(mach_host_self(), PROCESSOR_CPU_LOAD_INFO, &numcpus, (processor_info_array_t*)&info, diff --git a/deps/uv/src/unix/epoll.c b/deps/uv/src/unix/epoll.c deleted file mode 100644 index 97348e254b4556..00000000000000 --- a/deps/uv/src/unix/epoll.c +++ /dev/null @@ -1,422 +0,0 @@ -/* Copyright libuv contributors. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - */ - -#include "uv.h" -#include "internal.h" -#include -#include - -int uv__epoll_init(uv_loop_t* loop) { - int fd; - fd = epoll_create1(O_CLOEXEC); - - /* epoll_create1() can fail either because it's not implemented (old kernel) - * or because it doesn't understand the O_CLOEXEC flag. - */ - if (fd == -1 && (errno == ENOSYS || errno == EINVAL)) { - fd = epoll_create(256); - - if (fd != -1) - uv__cloexec(fd, 1); - } - - loop->backend_fd = fd; - if (fd == -1) - return UV__ERR(errno); - - return 0; -} - - -void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) { - struct epoll_event* events; - struct epoll_event dummy; - uintptr_t i; - uintptr_t nfds; - - assert(loop->watchers != NULL); - assert(fd >= 0); - - events = (struct epoll_event*) loop->watchers[loop->nwatchers]; - nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1]; - if (events != NULL) - /* Invalidate events with same file descriptor */ - for (i = 0; i < nfds; i++) - if (events[i].data.fd == fd) - events[i].data.fd = -1; - - /* Remove the file descriptor from the epoll. - * This avoids a problem where the same file description remains open - * in another process, causing repeated junk epoll events. - * - * We pass in a dummy epoll_event, to work around a bug in old kernels. - */ - if (loop->backend_fd >= 0) { - /* Work around a bug in kernels 3.10 to 3.19 where passing a struct that - * has the EPOLLWAKEUP flag set generates spurious audit syslog warnings. - */ - memset(&dummy, 0, sizeof(dummy)); - epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, &dummy); - } -} - - -int uv__io_check_fd(uv_loop_t* loop, int fd) { - struct epoll_event e; - int rc; - - memset(&e, 0, sizeof(e)); - e.events = POLLIN; - e.data.fd = -1; - - rc = 0; - if (epoll_ctl(loop->backend_fd, EPOLL_CTL_ADD, fd, &e)) - if (errno != EEXIST) - rc = UV__ERR(errno); - - if (rc == 0) - if (epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, &e)) - abort(); - - return rc; -} - - -void uv__io_poll(uv_loop_t* loop, int timeout) { - /* A bug in kernels < 2.6.37 makes timeouts larger than ~30 minutes - * effectively infinite on 32 bits architectures. To avoid blocking - * indefinitely, we cap the timeout and poll again if necessary. - * - * Note that "30 minutes" is a simplification because it depends on - * the value of CONFIG_HZ. The magic constant assumes CONFIG_HZ=1200, - * that being the largest value I have seen in the wild (and only once.) - */ - static const int max_safe_timeout = 1789569; - static int no_epoll_pwait_cached; - static int no_epoll_wait_cached; - int no_epoll_pwait; - int no_epoll_wait; - struct epoll_event events[1024]; - struct epoll_event* pe; - struct epoll_event e; - int real_timeout; - QUEUE* q; - uv__io_t* w; - sigset_t sigset; - uint64_t sigmask; - uint64_t base; - int have_signals; - int nevents; - int count; - int nfds; - int fd; - int op; - int i; - int user_timeout; - int reset_timeout; - - if (loop->nfds == 0) { - assert(QUEUE_EMPTY(&loop->watcher_queue)); - return; - } - - memset(&e, 0, sizeof(e)); - - while (!QUEUE_EMPTY(&loop->watcher_queue)) { - q = QUEUE_HEAD(&loop->watcher_queue); - QUEUE_REMOVE(q); - QUEUE_INIT(q); - - w = QUEUE_DATA(q, uv__io_t, watcher_queue); - assert(w->pevents != 0); - assert(w->fd >= 0); - assert(w->fd < (int) loop->nwatchers); - - e.events = w->pevents; - e.data.fd = w->fd; - - if (w->events == 0) - op = EPOLL_CTL_ADD; - else - op = EPOLL_CTL_MOD; - - /* XXX Future optimization: do EPOLL_CTL_MOD lazily if we stop watching - * events, skip the syscall and squelch the events after epoll_wait(). - */ - if (epoll_ctl(loop->backend_fd, op, w->fd, &e)) { - if (errno != EEXIST) - abort(); - - assert(op == EPOLL_CTL_ADD); - - /* We've reactivated a file descriptor that's been watched before. */ - if (epoll_ctl(loop->backend_fd, EPOLL_CTL_MOD, w->fd, &e)) - abort(); - } - - w->events = w->pevents; - } - - sigmask = 0; - if (loop->flags & UV_LOOP_BLOCK_SIGPROF) { - sigemptyset(&sigset); - sigaddset(&sigset, SIGPROF); - sigmask |= 1 << (SIGPROF - 1); - } - - assert(timeout >= -1); - base = loop->time; - count = 48; /* Benchmarks suggest this gives the best throughput. */ - real_timeout = timeout; - - if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) { - reset_timeout = 1; - user_timeout = timeout; - timeout = 0; - } else { - reset_timeout = 0; - user_timeout = 0; - } - - /* You could argue there is a dependency between these two but - * ultimately we don't care about their ordering with respect - * to one another. Worst case, we make a few system calls that - * could have been avoided because another thread already knows - * they fail with ENOSYS. Hardly the end of the world. - */ - no_epoll_pwait = uv__load_relaxed(&no_epoll_pwait_cached); - no_epoll_wait = uv__load_relaxed(&no_epoll_wait_cached); - - for (;;) { - /* Only need to set the provider_entry_time if timeout != 0. The function - * will return early if the loop isn't configured with UV_METRICS_IDLE_TIME. - */ - if (timeout != 0) - uv__metrics_set_provider_entry_time(loop); - - /* See the comment for max_safe_timeout for an explanation of why - * this is necessary. Executive summary: kernel bug workaround. - */ - if (sizeof(int32_t) == sizeof(long) && timeout >= max_safe_timeout) - timeout = max_safe_timeout; - - if (sigmask != 0 && no_epoll_pwait != 0) - if (pthread_sigmask(SIG_BLOCK, &sigset, NULL)) - abort(); - - if (no_epoll_wait != 0 || (sigmask != 0 && no_epoll_pwait == 0)) { - nfds = epoll_pwait(loop->backend_fd, - events, - ARRAY_SIZE(events), - timeout, - &sigset); - if (nfds == -1 && errno == ENOSYS) { - uv__store_relaxed(&no_epoll_pwait_cached, 1); - no_epoll_pwait = 1; - } - } else { - nfds = epoll_wait(loop->backend_fd, - events, - ARRAY_SIZE(events), - timeout); - if (nfds == -1 && errno == ENOSYS) { - uv__store_relaxed(&no_epoll_wait_cached, 1); - no_epoll_wait = 1; - } - } - - if (sigmask != 0 && no_epoll_pwait != 0) - if (pthread_sigmask(SIG_UNBLOCK, &sigset, NULL)) - abort(); - - /* Update loop->time unconditionally. It's tempting to skip the update when - * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the - * operating system didn't reschedule our process while in the syscall. - */ - SAVE_ERRNO(uv__update_time(loop)); - - if (nfds == 0) { - assert(timeout != -1); - - if (reset_timeout != 0) { - timeout = user_timeout; - reset_timeout = 0; - } - - if (timeout == -1) - continue; - - if (timeout == 0) - return; - - /* We may have been inside the system call for longer than |timeout| - * milliseconds so we need to update the timestamp to avoid drift. - */ - goto update_timeout; - } - - if (nfds == -1) { - if (errno == ENOSYS) { - /* epoll_wait() or epoll_pwait() failed, try the other system call. */ - assert(no_epoll_wait == 0 || no_epoll_pwait == 0); - continue; - } - - if (errno != EINTR) - abort(); - - if (reset_timeout != 0) { - timeout = user_timeout; - reset_timeout = 0; - } - - if (timeout == -1) - continue; - - if (timeout == 0) - return; - - /* Interrupted by a signal. Update timeout and poll again. */ - goto update_timeout; - } - - have_signals = 0; - nevents = 0; - - { - /* Squelch a -Waddress-of-packed-member warning with gcc >= 9. */ - union { - struct epoll_event* events; - uv__io_t* watchers; - } x; - - x.events = events; - assert(loop->watchers != NULL); - loop->watchers[loop->nwatchers] = x.watchers; - loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds; - } - - for (i = 0; i < nfds; i++) { - pe = events + i; - fd = pe->data.fd; - - /* Skip invalidated events, see uv__platform_invalidate_fd */ - if (fd == -1) - continue; - - assert(fd >= 0); - assert((unsigned) fd < loop->nwatchers); - - w = loop->watchers[fd]; - - if (w == NULL) { - /* File descriptor that we've stopped watching, disarm it. - * - * Ignore all errors because we may be racing with another thread - * when the file descriptor is closed. - */ - epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, pe); - continue; - } - - /* Give users only events they're interested in. Prevents spurious - * callbacks when previous callback invocation in this loop has stopped - * the current watcher. Also, filters out events that users has not - * requested us to watch. - */ - pe->events &= w->pevents | POLLERR | POLLHUP; - - /* Work around an epoll quirk where it sometimes reports just the - * EPOLLERR or EPOLLHUP event. In order to force the event loop to - * move forward, we merge in the read/write events that the watcher - * is interested in; uv__read() and uv__write() will then deal with - * the error or hangup in the usual fashion. - * - * Note to self: happens when epoll reports EPOLLIN|EPOLLHUP, the user - * reads the available data, calls uv_read_stop(), then sometime later - * calls uv_read_start() again. By then, libuv has forgotten about the - * hangup and the kernel won't report EPOLLIN again because there's - * nothing left to read. If anything, libuv is to blame here. The - * current hack is just a quick bandaid; to properly fix it, libuv - * needs to remember the error/hangup event. We should get that for - * free when we switch over to edge-triggered I/O. - */ - if (pe->events == POLLERR || pe->events == POLLHUP) - pe->events |= - w->pevents & (POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI); - - if (pe->events != 0) { - /* Run signal watchers last. This also affects child process watchers - * because those are implemented in terms of signal watchers. - */ - if (w == &loop->signal_io_watcher) { - have_signals = 1; - } else { - uv__metrics_update_idle_time(loop); - w->cb(loop, w, pe->events); - } - - nevents++; - } - } - - if (reset_timeout != 0) { - timeout = user_timeout; - reset_timeout = 0; - } - - if (have_signals != 0) { - uv__metrics_update_idle_time(loop); - loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN); - } - - loop->watchers[loop->nwatchers] = NULL; - loop->watchers[loop->nwatchers + 1] = NULL; - - if (have_signals != 0) - return; /* Event loop should cycle now so don't poll again. */ - - if (nevents != 0) { - if (nfds == ARRAY_SIZE(events) && --count != 0) { - /* Poll for more events but don't block this time. */ - timeout = 0; - continue; - } - return; - } - - if (timeout == 0) - return; - - if (timeout == -1) - continue; - -update_timeout: - assert(timeout > 0); - - real_timeout -= (loop->time - base); - if (real_timeout <= 0) - return; - - timeout = real_timeout; - } -} - diff --git a/deps/uv/src/unix/freebsd.c b/deps/uv/src/unix/freebsd.c index 658ff262d3738e..191bc8bc213ffd 100644 --- a/deps/uv/src/unix/freebsd.c +++ b/deps/uv/src/unix/freebsd.c @@ -91,7 +91,7 @@ uint64_t uv_get_free_memory(void) { size_t size = sizeof(freecount); if (sysctlbyname("vm.stats.vm.v_free_count", &freecount, &size, NULL, 0)) - return UV__ERR(errno); + return 0; return (uint64_t) freecount * sysconf(_SC_PAGESIZE); @@ -105,7 +105,7 @@ uint64_t uv_get_total_memory(void) { size_t size = sizeof(info); if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0)) - return UV__ERR(errno); + return 0; return (uint64_t) info; } @@ -116,6 +116,11 @@ uint64_t uv_get_constrained_memory(void) { } +uint64_t uv_get_available_memory(void) { + return uv_get_free_memory(); +} + + void uv_loadavg(double avg[3]) { struct loadavg info; size_t size = sizeof(info); @@ -264,30 +269,6 @@ int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) { } -int uv__sendmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen) { -#if __FreeBSD__ >= 11 && !defined(__DragonFly__) - return sendmmsg(fd, - (struct mmsghdr*) mmsg, - vlen, - 0 /* flags */); -#else - return errno = ENOSYS, -1; -#endif -} - - -int uv__recvmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen) { -#if __FreeBSD__ >= 11 && !defined(__DragonFly__) - return recvmmsg(fd, - (struct mmsghdr*) mmsg, - vlen, - 0 /* flags */, - NULL /* timeout */); -#else - return errno = ENOSYS, -1; -#endif -} - ssize_t uv__fs_copy_file_range(int fd_in, off_t* off_in, diff --git a/deps/uv/src/unix/fs.c b/deps/uv/src/unix/fs.c index 933c9c0dc2da40..5f9aae373b9fd2 100644 --- a/deps/uv/src/unix/fs.c +++ b/deps/uv/src/unix/fs.c @@ -48,7 +48,6 @@ #if defined(__DragonFly__) || \ defined(__FreeBSD__) || \ - defined(__FreeBSD_kernel__) || \ defined(__OpenBSD__) || \ defined(__NetBSD__) # define HAVE_PREADV 1 @@ -57,10 +56,11 @@ #endif #if defined(__linux__) -# include "sys/utsname.h" +# include +# include #endif -#if defined(__linux__) || defined(__sun) +#if defined(__sun) # include # include #endif @@ -79,7 +79,6 @@ #if defined(__APPLE__) || \ defined(__DragonFly__) || \ defined(__FreeBSD__) || \ - defined(__FreeBSD_kernel__) || \ defined(__OpenBSD__) || \ defined(__NetBSD__) # include @@ -256,7 +255,6 @@ static ssize_t uv__fs_futime(uv_fs_t* req) { #elif defined(__APPLE__) \ || defined(__DragonFly__) \ || defined(__FreeBSD__) \ - || defined(__FreeBSD_kernel__) \ || defined(__NetBSD__) \ || defined(__OpenBSD__) \ || defined(__sun) @@ -311,7 +309,7 @@ static int uv__fs_mkstemp(uv_fs_t* req) { static uv_once_t once = UV_ONCE_INIT; int r; #ifdef O_CLOEXEC - static int no_cloexec_support; + static _Atomic int no_cloexec_support; #endif static const char pattern[] = "XXXXXX"; static const size_t pattern_size = sizeof(pattern) - 1; @@ -336,7 +334,8 @@ static int uv__fs_mkstemp(uv_fs_t* req) { uv_once(&once, uv__mkostemp_initonce); #ifdef O_CLOEXEC - if (uv__load_relaxed(&no_cloexec_support) == 0 && uv__mkostemp != NULL) { + if (atomic_load_explicit(&no_cloexec_support, memory_order_relaxed) == 0 && + uv__mkostemp != NULL) { r = uv__mkostemp(path, O_CLOEXEC); if (r >= 0) @@ -349,7 +348,7 @@ static int uv__fs_mkstemp(uv_fs_t* req) { /* We set the static variable so that next calls don't even try to use mkostemp. */ - uv__store_relaxed(&no_cloexec_support, 1); + atomic_store_explicit(&no_cloexec_support, 1, memory_order_relaxed); } #endif /* O_CLOEXEC */ @@ -459,7 +458,7 @@ static ssize_t uv__fs_preadv(uv_file fd, static ssize_t uv__fs_read(uv_fs_t* req) { #if defined(__linux__) - static int no_preadv; + static _Atomic int no_preadv; #endif unsigned int iovmax; ssize_t result; @@ -483,19 +482,19 @@ static ssize_t uv__fs_read(uv_fs_t* req) { result = preadv(req->file, (struct iovec*) req->bufs, req->nbufs, req->off); #else # if defined(__linux__) - if (uv__load_relaxed(&no_preadv)) retry: + if (atomic_load_explicit(&no_preadv, memory_order_relaxed)) retry: # endif { result = uv__fs_preadv(req->file, req->bufs, req->nbufs, req->off); } # if defined(__linux__) else { - result = uv__preadv(req->file, - (struct iovec*)req->bufs, - req->nbufs, - req->off); + result = preadv(req->file, + (struct iovec*) req->bufs, + req->nbufs, + req->off); if (result == -1 && errno == ENOSYS) { - uv__store_relaxed(&no_preadv, 1); + atomic_store_explicit(&no_preadv, 1, memory_order_relaxed); goto retry; } } @@ -516,7 +515,7 @@ static ssize_t uv__fs_read(uv_fs_t* req) { if (result == -1 && errno == EOPNOTSUPP) { struct stat buf; ssize_t rc; - rc = fstat(req->file, &buf); + rc = uv__fstat(req->file, &buf); if (rc == 0 && S_ISDIR(buf.st_mode)) { errno = EISDIR; } @@ -527,19 +526,12 @@ static ssize_t uv__fs_read(uv_fs_t* req) { } -#if defined(__APPLE__) && !defined(MAC_OS_X_VERSION_10_8) -#define UV_CONST_DIRENT uv__dirent_t -#else -#define UV_CONST_DIRENT const uv__dirent_t -#endif - - -static int uv__fs_scandir_filter(UV_CONST_DIRENT* dent) { +static int uv__fs_scandir_filter(const uv__dirent_t* dent) { return strcmp(dent->d_name, ".") != 0 && strcmp(dent->d_name, "..") != 0; } -static int uv__fs_scandir_sort(UV_CONST_DIRENT** a, UV_CONST_DIRENT** b) { +static int uv__fs_scandir_sort(const uv__dirent_t** a, const uv__dirent_t** b) { return strcmp((*a)->d_name, (*b)->d_name); } @@ -715,7 +707,7 @@ static ssize_t uv__fs_readlink(uv_fs_t* req) { /* We may not have a real PATH_MAX. Read size of link. */ struct stat st; int ret; - ret = lstat(req->path, &st); + ret = uv__lstat(req->path, &st); if (ret != 0) return -1; if (!S_ISLNK(st.st_mode)) { @@ -908,14 +900,14 @@ static ssize_t uv__fs_sendfile_emul(uv_fs_t* req) { #ifdef __linux__ static unsigned uv__kernel_version(void) { - static unsigned cached_version; + static _Atomic unsigned cached_version; struct utsname u; unsigned version; unsigned major; unsigned minor; unsigned patch; - version = uv__load_relaxed(&cached_version); + version = atomic_load_explicit(&cached_version, memory_order_relaxed); if (version != 0) return version; @@ -926,7 +918,7 @@ static unsigned uv__kernel_version(void) { return 0; version = major * 65536 + minor * 256 + patch; - uv__store_relaxed(&cached_version, version); + atomic_store_explicit(&cached_version, version, memory_order_relaxed); return version; } @@ -968,10 +960,10 @@ static int uv__is_cifs_or_smb(int fd) { static ssize_t uv__fs_try_copy_file_range(int in_fd, off_t* off, int out_fd, size_t len) { - static int no_copy_file_range_support; + static _Atomic int no_copy_file_range_support; ssize_t r; - if (uv__load_relaxed(&no_copy_file_range_support)) { + if (atomic_load_explicit(&no_copy_file_range_support, memory_order_relaxed)) { errno = ENOSYS; return -1; } @@ -990,7 +982,7 @@ static ssize_t uv__fs_try_copy_file_range(int in_fd, off_t* off, errno = ENOSYS; /* Use fallback. */ break; case ENOSYS: - uv__store_relaxed(&no_copy_file_range_support, 1); + atomic_store_explicit(&no_copy_file_range_support, 1, memory_order_relaxed); break; case EPERM: /* It's been reported that CIFS spuriously fails. @@ -1061,10 +1053,7 @@ static ssize_t uv__fs_sendfile(uv_fs_t* req) { return -1; } -#elif defined(__APPLE__) || \ - defined(__DragonFly__) || \ - defined(__FreeBSD__) || \ - defined(__FreeBSD_kernel__) +#elif defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__) { off_t len; ssize_t r; @@ -1088,15 +1077,6 @@ static ssize_t uv__fs_sendfile(uv_fs_t* req) { #endif len = 0; r = sendfile(in_fd, out_fd, req->off, req->bufsml[0].len, NULL, &len, 0); -#elif defined(__FreeBSD_kernel__) - len = 0; - r = bsd_sendfile(in_fd, - out_fd, - req->off, - req->bufsml[0].len, - NULL, - &len, - 0); #else /* The darwin sendfile takes len as an input for the length to send, * so make sure to initialize it with the caller's value. */ @@ -1148,7 +1128,6 @@ static ssize_t uv__fs_utime(uv_fs_t* req) { #elif defined(__APPLE__) \ || defined(__DragonFly__) \ || defined(__FreeBSD__) \ - || defined(__FreeBSD_kernel__) \ || defined(__NetBSD__) \ || defined(__OpenBSD__) struct timeval tv[2]; @@ -1190,7 +1169,6 @@ static ssize_t uv__fs_lutime(uv_fs_t* req) { #elif defined(__APPLE__) || \ defined(__DragonFly__) || \ defined(__FreeBSD__) || \ - defined(__FreeBSD_kernel__) || \ defined(__NetBSD__) struct timeval tv[2]; tv[0] = uv__fs_to_timeval(req->atime); @@ -1241,10 +1219,10 @@ static ssize_t uv__fs_write(uv_fs_t* req) { } # if defined(__linux__) else { - r = uv__pwritev(req->file, - (struct iovec*) req->bufs, - req->nbufs, - req->off); + r = pwritev(req->file, + (struct iovec*) req->bufs, + req->nbufs, + req->off); if (r == -1 && errno == ENOSYS) { no_pwritev = 1; goto retry; @@ -1288,7 +1266,7 @@ static ssize_t uv__fs_copyfile(uv_fs_t* req) { return srcfd; /* Get the source file's mode. */ - if (fstat(srcfd, &src_statsbuf)) { + if (uv__fstat(srcfd, &src_statsbuf)) { err = UV__ERR(errno); goto out; } @@ -1316,7 +1294,7 @@ static ssize_t uv__fs_copyfile(uv_fs_t* req) { destination are not the same file. If they are the same, bail out early. */ if ((req->flags & UV_FS_COPYFILE_EXCL) == 0) { /* Get the destination file's mode. */ - if (fstat(dstfd, &dst_statsbuf)) { + if (uv__fstat(dstfd, &dst_statsbuf)) { err = UV__ERR(errno); goto out; } @@ -1330,7 +1308,19 @@ static ssize_t uv__fs_copyfile(uv_fs_t* req) { /* Truncate the file in case the destination already existed. */ if (ftruncate(dstfd, 0) != 0) { err = UV__ERR(errno); - goto out; + + /* ftruncate() on ceph-fuse fails with EACCES when the file is created + * with read only permissions. Since ftruncate() on a newly created + * file is a meaningless operation anyway, detect that condition + * and squelch the error. + */ + if (err != UV_EACCES) + goto out; + + if (dst_statsbuf.st_size > 0) + goto out; + + err = 0; } } @@ -1514,14 +1504,14 @@ static int uv__fs_statx(int fd, uv_stat_t* buf) { STATIC_ASSERT(UV_ENOSYS != -1); #ifdef __linux__ - static int no_statx; + static _Atomic int no_statx; struct uv__statx statxbuf; int dirfd; int flags; int mode; int rc; - if (uv__load_relaxed(&no_statx)) + if (atomic_load_explicit(&no_statx, memory_order_relaxed)) return UV_ENOSYS; dirfd = AT_FDCWD; @@ -1555,30 +1545,11 @@ static int uv__fs_statx(int fd, * implemented, rc might return 1 with 0 set as the error code in which * case we return ENOSYS. */ - uv__store_relaxed(&no_statx, 1); + atomic_store_explicit(&no_statx, 1, memory_order_relaxed); return UV_ENOSYS; } - buf->st_dev = makedev(statxbuf.stx_dev_major, statxbuf.stx_dev_minor); - buf->st_mode = statxbuf.stx_mode; - buf->st_nlink = statxbuf.stx_nlink; - buf->st_uid = statxbuf.stx_uid; - buf->st_gid = statxbuf.stx_gid; - buf->st_rdev = makedev(statxbuf.stx_rdev_major, statxbuf.stx_rdev_minor); - buf->st_ino = statxbuf.stx_ino; - buf->st_size = statxbuf.stx_size; - buf->st_blksize = statxbuf.stx_blksize; - buf->st_blocks = statxbuf.stx_blocks; - buf->st_atim.tv_sec = statxbuf.stx_atime.tv_sec; - buf->st_atim.tv_nsec = statxbuf.stx_atime.tv_nsec; - buf->st_mtim.tv_sec = statxbuf.stx_mtime.tv_sec; - buf->st_mtim.tv_nsec = statxbuf.stx_mtime.tv_nsec; - buf->st_ctim.tv_sec = statxbuf.stx_ctime.tv_sec; - buf->st_ctim.tv_nsec = statxbuf.stx_ctime.tv_nsec; - buf->st_birthtim.tv_sec = statxbuf.stx_btime.tv_sec; - buf->st_birthtim.tv_nsec = statxbuf.stx_btime.tv_nsec; - buf->st_flags = 0; - buf->st_gen = 0; + uv__statx_to_stat(&statxbuf, buf); return 0; #else @@ -1595,7 +1566,7 @@ static int uv__fs_stat(const char *path, uv_stat_t *buf) { if (ret != UV_ENOSYS) return ret; - ret = stat(path, &pbuf); + ret = uv__stat(path, &pbuf); if (ret == 0) uv__to_stat(&pbuf, buf); @@ -1611,7 +1582,7 @@ static int uv__fs_lstat(const char *path, uv_stat_t *buf) { if (ret != UV_ENOSYS) return ret; - ret = lstat(path, &pbuf); + ret = uv__lstat(path, &pbuf); if (ret == 0) uv__to_stat(&pbuf, buf); @@ -1627,7 +1598,7 @@ static int uv__fs_fstat(int fd, uv_stat_t *buf) { if (ret != UV_ENOSYS) return ret; - ret = fstat(fd, &pbuf); + ret = uv__fstat(fd, &pbuf); if (ret == 0) uv__to_stat(&pbuf, buf); @@ -1822,6 +1793,9 @@ int uv_fs_chown(uv_loop_t* loop, int uv_fs_close(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) { INIT(CLOSE); req->file = file; + if (cb != NULL) + if (uv__iou_fs_close(loop, req)) + return 0; POST; } @@ -1869,6 +1843,9 @@ int uv_fs_lchown(uv_loop_t* loop, int uv_fs_fdatasync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) { INIT(FDATASYNC); req->file = file; + if (cb != NULL) + if (uv__iou_fs_fsync_or_fdatasync(loop, req, /* IORING_FSYNC_DATASYNC */ 1)) + return 0; POST; } @@ -1876,6 +1853,9 @@ int uv_fs_fdatasync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) { int uv_fs_fstat(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) { INIT(FSTAT); req->file = file; + if (cb != NULL) + if (uv__iou_fs_statx(loop, req, /* is_fstat */ 1, /* is_lstat */ 0)) + return 0; POST; } @@ -1883,6 +1863,9 @@ int uv_fs_fstat(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) { int uv_fs_fsync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) { INIT(FSYNC); req->file = file; + if (cb != NULL) + if (uv__iou_fs_fsync_or_fdatasync(loop, req, /* no flags */ 0)) + return 0; POST; } @@ -1929,6 +1912,9 @@ int uv_fs_lutime(uv_loop_t* loop, int uv_fs_lstat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) { INIT(LSTAT); PATH; + if (cb != NULL) + if (uv__iou_fs_statx(loop, req, /* is_fstat */ 0, /* is_lstat */ 1)) + return 0; POST; } @@ -1990,6 +1976,9 @@ int uv_fs_open(uv_loop_t* loop, PATH; req->flags = flags; req->mode = mode; + if (cb != NULL) + if (uv__iou_fs_open(loop, req)) + return 0; POST; } @@ -2018,6 +2007,11 @@ int uv_fs_read(uv_loop_t* loop, uv_fs_t* req, memcpy(req->bufs, bufs, nbufs * sizeof(*bufs)); req->off = off; + + if (cb != NULL) + if (uv__iou_fs_read_or_write(loop, req, /* is_read */ 1)) + return 0; + POST; } @@ -2125,6 +2119,9 @@ int uv_fs_sendfile(uv_loop_t* loop, int uv_fs_stat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) { INIT(STAT); PATH; + if (cb != NULL) + if (uv__iou_fs_statx(loop, req, /* is_fstat */ 0, /* is_lstat */ 0)) + return 0; POST; } @@ -2188,6 +2185,11 @@ int uv_fs_write(uv_loop_t* loop, memcpy(req->bufs, bufs, nbufs * sizeof(*bufs)); req->off = off; + + if (cb != NULL) + if (uv__iou_fs_read_or_write(loop, req, /* is_read */ 0)) + return 0; + POST; } @@ -2196,7 +2198,7 @@ void uv_fs_req_cleanup(uv_fs_t* req) { if (req == NULL) return; - /* Only necessary for asychronous requests, i.e., requests with a callback. + /* Only necessary for asynchronous requests, i.e., requests with a callback. * Synchronous ones don't copy their arguments and have req->path and * req->new_path pointing to user-owned memory. UV_FS_MKDTEMP and * UV_FS_MKSTEMP are the exception to the rule, they always allocate memory. diff --git a/deps/uv/src/unix/fsevents.c b/deps/uv/src/unix/fsevents.c index bf4f1f6a5180ab..0535b4547aa961 100644 --- a/deps/uv/src/unix/fsevents.c +++ b/deps/uv/src/unix/fsevents.c @@ -132,7 +132,6 @@ static void (*pCFRunLoopWakeUp)(CFRunLoopRef); static CFStringRef (*pCFStringCreateWithFileSystemRepresentation)( CFAllocatorRef, const char*); -static CFStringEncoding (*pCFStringGetSystemEncoding)(void); static CFStringRef (*pkCFRunLoopDefaultMode); static FSEventStreamRef (*pFSEventStreamCreate)(CFAllocatorRef, FSEventStreamCallback, @@ -141,7 +140,6 @@ static FSEventStreamRef (*pFSEventStreamCreate)(CFAllocatorRef, FSEventStreamEventId, CFTimeInterval, FSEventStreamCreateFlags); -static void (*pFSEventStreamFlushSync)(FSEventStreamRef); static void (*pFSEventStreamInvalidate)(FSEventStreamRef); static void (*pFSEventStreamRelease)(FSEventStreamRef); static void (*pFSEventStreamScheduleWithRunLoop)(FSEventStreamRef, @@ -331,8 +329,9 @@ static void uv__fsevents_event_cb(const FSEventStreamRef streamRef, /* Runs in CF thread */ -static int uv__fsevents_create_stream(uv_loop_t* loop, CFArrayRef paths) { - uv__cf_loop_state_t* state; +static int uv__fsevents_create_stream(uv__cf_loop_state_t* state, + uv_loop_t* loop, + CFArrayRef paths) { FSEventStreamContext ctx; FSEventStreamRef ref; CFAbsoluteTime latency; @@ -373,10 +372,7 @@ static int uv__fsevents_create_stream(uv_loop_t* loop, CFArrayRef paths) { flags); assert(ref != NULL); - state = loop->cf_state; - pFSEventStreamScheduleWithRunLoop(ref, - state->loop, - *pkCFRunLoopDefaultMode); + pFSEventStreamScheduleWithRunLoop(ref, state->loop, *pkCFRunLoopDefaultMode); if (!pFSEventStreamStart(ref)) { pFSEventStreamInvalidate(ref); pFSEventStreamRelease(ref); @@ -389,11 +385,7 @@ static int uv__fsevents_create_stream(uv_loop_t* loop, CFArrayRef paths) { /* Runs in CF thread */ -static void uv__fsevents_destroy_stream(uv_loop_t* loop) { - uv__cf_loop_state_t* state; - - state = loop->cf_state; - +static void uv__fsevents_destroy_stream(uv__cf_loop_state_t* state) { if (state->fsevent_stream == NULL) return; @@ -408,9 +400,9 @@ static void uv__fsevents_destroy_stream(uv_loop_t* loop) { /* Runs in CF thread, when there're new fsevent handles to add to stream */ -static void uv__fsevents_reschedule(uv_fs_event_t* handle, +static void uv__fsevents_reschedule(uv__cf_loop_state_t* state, + uv_loop_t* loop, uv__cf_loop_signal_type_t type) { - uv__cf_loop_state_t* state; QUEUE* q; uv_fs_event_t* curr; CFArrayRef cf_paths; @@ -419,7 +411,6 @@ static void uv__fsevents_reschedule(uv_fs_event_t* handle, int err; unsigned int path_count; - state = handle->loop->cf_state; paths = NULL; cf_paths = NULL; err = 0; @@ -438,7 +429,7 @@ static void uv__fsevents_reschedule(uv_fs_event_t* handle, uv_mutex_unlock(&state->fsevent_mutex); /* Destroy previous FSEventStream */ - uv__fsevents_destroy_stream(handle->loop); + uv__fsevents_destroy_stream(state); /* Any failure below will be a memory failure */ err = UV_ENOMEM; @@ -478,7 +469,7 @@ static void uv__fsevents_reschedule(uv_fs_event_t* handle, err = UV_ENOMEM; goto final; } - err = uv__fsevents_create_stream(handle->loop, cf_paths); + err = uv__fsevents_create_stream(state, loop, cf_paths); } final: @@ -563,10 +554,8 @@ static int uv__fsevents_global_init(void) { V(core_foundation_handle, CFRunLoopStop); V(core_foundation_handle, CFRunLoopWakeUp); V(core_foundation_handle, CFStringCreateWithFileSystemRepresentation); - V(core_foundation_handle, CFStringGetSystemEncoding); V(core_foundation_handle, kCFRunLoopDefaultMode); V(core_services_handle, FSEventStreamCreate); - V(core_services_handle, FSEventStreamFlushSync); V(core_services_handle, FSEventStreamInvalidate); V(core_services_handle, FSEventStreamRelease); V(core_services_handle, FSEventStreamScheduleWithRunLoop); @@ -767,7 +756,7 @@ static void uv__cf_loop_cb(void* arg) { if (s->handle == NULL) pCFRunLoopStop(state->loop); else - uv__fsevents_reschedule(s->handle, s->type); + uv__fsevents_reschedule(state, loop, s->type); uv__free(s); } diff --git a/deps/uv/src/unix/haiku.c b/deps/uv/src/unix/haiku.c index cf17d836b4c7e8..31284b66dc3e96 100644 --- a/deps/uv/src/unix/haiku.c +++ b/deps/uv/src/unix/haiku.c @@ -84,6 +84,11 @@ uint64_t uv_get_constrained_memory(void) { } +uint64_t uv_get_available_memory(void) { + return uv_get_free_memory(); +} + + int uv_resident_set_memory(size_t* rss) { area_info area; ssize_t cookie; diff --git a/deps/uv/src/unix/hurd.c b/deps/uv/src/unix/hurd.c index d19ea6347906e3..63c878123f13ac 100644 --- a/deps/uv/src/unix/hurd.c +++ b/deps/uv/src/unix/hurd.c @@ -165,3 +165,8 @@ int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) { uint64_t uv_get_constrained_memory(void) { return 0; /* Memory constraints are unknown. */ } + + +uint64_t uv_get_available_memory(void) { + return uv_get_free_memory(); +} diff --git a/deps/uv/src/unix/ibmi.c b/deps/uv/src/unix/ibmi.c index 8c6ae636329e5b..837bba6e2fef7b 100644 --- a/deps/uv/src/unix/ibmi.c +++ b/deps/uv/src/unix/ibmi.c @@ -249,6 +249,11 @@ uint64_t uv_get_constrained_memory(void) { } +uint64_t uv_get_available_memory(void) { + return uv_get_free_memory(); +} + + void uv_loadavg(double avg[3]) { SSTS0200 rcvr; diff --git a/deps/uv/src/unix/internal.h b/deps/uv/src/unix/internal.h index cee35c2106aed2..6c5822e6a0d2a3 100644 --- a/deps/uv/src/unix/internal.h +++ b/deps/uv/src/unix/internal.h @@ -26,21 +26,34 @@ #include #include /* _POSIX_PATH_MAX, PATH_MAX */ +#include #include /* abort */ #include /* strrchr */ #include /* O_CLOEXEC and O_NONBLOCK, if supported. */ #include #include #include +#include +#include + +#define uv__msan_unpoison(p, n) \ + do { \ + (void) (p); \ + (void) (n); \ + } while (0) + +#if defined(__has_feature) +# if __has_feature(memory_sanitizer) +# include +# undef uv__msan_unpoison +# define uv__msan_unpoison __msan_unpoison +# endif +#endif #if defined(__STRICT_ANSI__) # define inline __inline #endif -#if defined(__linux__) -# include "linux-syscalls.h" -#endif /* __linux__ */ - #if defined(__MVS__) # include "os390-syscalls.h" #endif /* __MVS__ */ @@ -79,13 +92,11 @@ # define UV__PATH_MAX 8192 #endif -#if defined(__ANDROID__) -int uv__pthread_sigmask(int how, const sigset_t* set, sigset_t* oset); -# ifdef pthread_sigmask -# undef pthread_sigmask -# endif -# define pthread_sigmask(how, set, oldset) uv__pthread_sigmask(how, set, oldset) -#endif +union uv__sockaddr { + struct sockaddr_in6 in6; + struct sockaddr_in in; + struct sockaddr addr; +}; #define ACCESS_ONCE(type, var) \ (*(volatile type*) &(var)) @@ -166,12 +177,42 @@ struct uv__stream_queued_fds_s { int fds[1]; }; +#ifdef __linux__ +struct uv__statx_timestamp { + int64_t tv_sec; + uint32_t tv_nsec; + int32_t unused0; +}; + +struct uv__statx { + uint32_t stx_mask; + uint32_t stx_blksize; + uint64_t stx_attributes; + uint32_t stx_nlink; + uint32_t stx_uid; + uint32_t stx_gid; + uint16_t stx_mode; + uint16_t unused0; + uint64_t stx_ino; + uint64_t stx_size; + uint64_t stx_blocks; + uint64_t stx_attributes_mask; + struct uv__statx_timestamp stx_atime; + struct uv__statx_timestamp stx_btime; + struct uv__statx_timestamp stx_ctime; + struct uv__statx_timestamp stx_mtime; + uint32_t stx_rdev_major; + uint32_t stx_rdev_minor; + uint32_t stx_dev_major; + uint32_t stx_dev_minor; + uint64_t unused1[14]; +}; +#endif /* __linux__ */ #if defined(_AIX) || \ defined(__APPLE__) || \ defined(__DragonFly__) || \ defined(__FreeBSD__) || \ - defined(__FreeBSD_kernel__) || \ defined(__linux__) || \ defined(__OpenBSD__) || \ defined(__NetBSD__) @@ -258,10 +299,10 @@ int uv__signal_loop_fork(uv_loop_t* loop); /* platform specific */ uint64_t uv__hrtime(uv_clocktype_t type); int uv__kqueue_init(uv_loop_t* loop); -int uv__epoll_init(uv_loop_t* loop); int uv__platform_loop_init(uv_loop_t* loop); void uv__platform_loop_delete(uv_loop_t* loop); void uv__platform_invalidate_fd(uv_loop_t* loop, int fd); +int uv__process_init(uv_loop_t* loop); /* various */ void uv__async_close(uv_async_t* handle); @@ -278,7 +319,6 @@ size_t uv__thread_stack_size(void); void uv__udp_close(uv_udp_t* handle); void uv__udp_finish_close(uv_udp_t* handle); FILE* uv__open_file(const char* path); -int uv__getpwuid_r(uv_passwd_t* pwd); int uv__search_path(const char* prog, char* buf, size_t* buflen); void uv__wait_children(uv_loop_t* loop); @@ -289,6 +329,28 @@ int uv__random_getentropy(void* buf, size_t buflen); int uv__random_readpath(const char* path, void* buf, size_t buflen); int uv__random_sysctl(void* buf, size_t buflen); +/* io_uring */ +#ifdef __linux__ +int uv__iou_fs_close(uv_loop_t* loop, uv_fs_t* req); +int uv__iou_fs_fsync_or_fdatasync(uv_loop_t* loop, + uv_fs_t* req, + uint32_t fsync_flags); +int uv__iou_fs_open(uv_loop_t* loop, uv_fs_t* req); +int uv__iou_fs_read_or_write(uv_loop_t* loop, + uv_fs_t* req, + int is_read); +int uv__iou_fs_statx(uv_loop_t* loop, + uv_fs_t* req, + int is_fstat, + int is_lstat); +#else +#define uv__iou_fs_close(loop, req) 0 +#define uv__iou_fs_fsync_or_fdatasync(loop, req, fsync_flags) 0 +#define uv__iou_fs_open(loop, req) 0 +#define uv__iou_fs_read_or_write(loop, req, is_read) 0 +#define uv__iou_fs_statx(loop, req, is_fstat, is_lstat) 0 +#endif + #if defined(__APPLE__) int uv___stream_fd(const uv_stream_t* handle); #define uv__stream_fd(handle) (uv___stream_fd((const uv_stream_t*) (handle))) @@ -322,8 +384,51 @@ UV_UNUSED(static char* uv__basename_r(const char* path)) { return s + 1; } +UV_UNUSED(static int uv__fstat(int fd, struct stat* s)) { + int rc; + + rc = fstat(fd, s); + if (rc >= 0) + uv__msan_unpoison(s, sizeof(*s)); + + return rc; +} + +UV_UNUSED(static int uv__lstat(const char* path, struct stat* s)) { + int rc; + + rc = lstat(path, s); + if (rc >= 0) + uv__msan_unpoison(s, sizeof(*s)); + + return rc; +} + +UV_UNUSED(static int uv__stat(const char* path, struct stat* s)) { + int rc; + + rc = stat(path, s); + if (rc >= 0) + uv__msan_unpoison(s, sizeof(*s)); + + return rc; +} + #if defined(__linux__) -int uv__inotify_fork(uv_loop_t* loop, void* old_watchers); +ssize_t +uv__fs_copy_file_range(int fd_in, + off_t* off_in, + int fd_out, + off_t* off_out, + size_t len, + unsigned int flags); +int uv__statx(int dirfd, + const char* path, + int flags, + unsigned int mask, + struct uv__statx* statxbuf); +void uv__statx_to_stat(const struct uv__statx* statxbuf, uv_stat_t* buf); +ssize_t uv__getrandom(void* buf, size_t buflen, unsigned flags); #endif typedef int (*uv__peersockfunc)(int, struct sockaddr*, socklen_t*); @@ -333,22 +438,6 @@ int uv__getsockpeername(const uv_handle_t* handle, struct sockaddr* name, int* namelen); -#if defined(__linux__) || \ - defined(__FreeBSD__) || \ - defined(__FreeBSD_kernel__) || \ - defined(__DragonFly__) -#define HAVE_MMSG 1 -struct uv__mmsghdr { - struct msghdr msg_hdr; - unsigned int msg_len; -}; - -int uv__recvmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen); -int uv__sendmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen); -#else -#define HAVE_MMSG 0 -#endif - #if defined(__sun) #if !defined(_POSIX_VERSION) || _POSIX_VERSION < 200809L size_t strnlen(const char* s, size_t maxlen); @@ -365,5 +454,10 @@ uv__fs_copy_file_range(int fd_in, unsigned int flags); #endif +#if defined(__linux__) || (defined(__FreeBSD__) && __FreeBSD_version >= 1301000) +#define UV__CPU_AFFINITY_SUPPORTED 1 +#else +#define UV__CPU_AFFINITY_SUPPORTED 0 +#endif #endif /* UV_UNIX_INTERNAL_H_ */ diff --git a/deps/uv/src/unix/kqueue.c b/deps/uv/src/unix/kqueue.c index 5dac76ae753c6c..82916d659332b8 100644 --- a/deps/uv/src/unix/kqueue.c +++ b/deps/uv/src/unix/kqueue.c @@ -60,7 +60,7 @@ int uv__kqueue_init(uv_loop_t* loop) { #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070 -static int uv__has_forked_with_cfrunloop; +static _Atomic int uv__has_forked_with_cfrunloop; #endif int uv__io_fork(uv_loop_t* loop) { @@ -82,7 +82,9 @@ int uv__io_fork(uv_loop_t* loop) { process. So we sidestep the issue by pretending like we never started it in the first place. */ - uv__store_relaxed(&uv__has_forked_with_cfrunloop, 1); + atomic_store_explicit(&uv__has_forked_with_cfrunloop, + 1, + memory_order_relaxed); uv__free(loop->cf_state); loop->cf_state = NULL; } @@ -109,7 +111,23 @@ int uv__io_check_fd(uv_loop_t* loop, int fd) { } +static void uv__kqueue_delete(int kqfd, const struct kevent *ev) { + struct kevent change; + + EV_SET(&change, ev->ident, ev->filter, EV_DELETE, 0, 0, 0); + + if (0 == kevent(kqfd, &change, 1, NULL, 0, NULL)) + return; + + if (errno == EBADF || errno == ENOENT) + return; + + abort(); +} + + void uv__io_poll(uv_loop_t* loop, int timeout) { + uv__loop_internal_fields_t* lfields; struct kevent events[1024]; struct kevent* ev; struct timespec spec; @@ -138,6 +156,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { return; } + lfields = uv__get_internal_fields(loop); nevents = 0; while (!QUEUE_EMPTY(&loop->watcher_queue)) { @@ -205,7 +224,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { base = loop->time; count = 48; /* Benchmarks suggest this gives the best throughput. */ - if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) { + if (lfields->flags & UV_METRICS_IDLE_TIME) { reset_timeout = 1; user_timeout = timeout; timeout = 0; @@ -228,6 +247,12 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { if (pset != NULL) pthread_sigmask(SIG_BLOCK, pset, NULL); + /* Store the current timeout in a location that's globally accessible so + * other locations like uv__work_done() can determine whether the queue + * of events in the callback were waiting when poll was called. + */ + lfields->current_timeout = timeout; + nfds = kevent(loop->backend_fd, events, nevents, @@ -235,6 +260,9 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { ARRAY_SIZE(events), timeout == -1 ? NULL : &spec); + if (nfds == -1) + assert(errno == EINTR); + if (pset != NULL) pthread_sigmask(SIG_UNBLOCK, pset, NULL); @@ -242,36 +270,26 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the * operating system didn't reschedule our process while in the syscall. */ - SAVE_ERRNO(uv__update_time(loop)); - - if (nfds == 0) { - if (reset_timeout != 0) { - timeout = user_timeout; - reset_timeout = 0; - if (timeout == -1) - continue; - if (timeout > 0) - goto update_timeout; + uv__update_time(loop); + + if (nfds == 0 || nfds == -1) { + /* If kqueue is empty or interrupted, we might still have children ready + * to reap immediately. */ + if (loop->flags & UV_LOOP_REAP_CHILDREN) { + loop->flags &= ~UV_LOOP_REAP_CHILDREN; + uv__wait_children(loop); + assert((reset_timeout == 0 ? timeout : user_timeout) == 0); + return; /* Equivalent to fall-through behavior. */ } - assert(timeout != -1); - return; - } - - if (nfds == -1) { - if (errno != EINTR) - abort(); - if (reset_timeout != 0) { timeout = user_timeout; reset_timeout = 0; - } - - if (timeout == 0) + } else if (nfds == 0) { + /* Reached the user timeout value. */ + assert(timeout != -1); return; - - if (timeout == -1) - continue; + } /* Interrupted by a signal. Update timeout and poll again. */ goto update_timeout; @@ -307,15 +325,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { w = loop->watchers[fd]; if (w == NULL) { - /* File descriptor that we've stopped watching, disarm it. - * TODO: batch up. */ - struct kevent events[1]; - - EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0); - if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL)) - if (errno != EBADF && errno != ENOENT) - abort(); - + /* File descriptor that we've stopped watching, disarm it. */ + uv__kqueue_delete(loop->backend_fd, ev); continue; } @@ -331,47 +342,27 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { revents = 0; if (ev->filter == EVFILT_READ) { - if (w->pevents & POLLIN) { + if (w->pevents & POLLIN) revents |= POLLIN; - w->rcount = ev->data; - } else { - /* TODO batch up */ - struct kevent events[1]; - EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0); - if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL)) - if (errno != ENOENT) - abort(); - } + else + uv__kqueue_delete(loop->backend_fd, ev); + if ((ev->flags & EV_EOF) && (w->pevents & UV__POLLRDHUP)) revents |= UV__POLLRDHUP; } if (ev->filter == EV_OOBAND) { - if (w->pevents & UV__POLLPRI) { + if (w->pevents & UV__POLLPRI) revents |= UV__POLLPRI; - w->rcount = ev->data; - } else { - /* TODO batch up */ - struct kevent events[1]; - EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0); - if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL)) - if (errno != ENOENT) - abort(); - } + else + uv__kqueue_delete(loop->backend_fd, ev); } if (ev->filter == EVFILT_WRITE) { - if (w->pevents & POLLOUT) { + if (w->pevents & POLLOUT) revents |= POLLOUT; - w->wcount = ev->data; - } else { - /* TODO batch up */ - struct kevent events[1]; - EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0); - if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL)) - if (errno != ENOENT) - abort(); - } + else + uv__kqueue_delete(loop->backend_fd, ev); } if (ev->flags & EV_ERROR) @@ -398,9 +389,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { uv__wait_children(loop); } + uv__metrics_inc_events(loop, nevents); if (reset_timeout != 0) { timeout = user_timeout; reset_timeout = 0; + uv__metrics_inc_events_waiting(loop, nevents); } if (have_signals != 0) { @@ -423,13 +416,13 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { return; } +update_timeout: if (timeout == 0) return; if (timeout == -1) continue; -update_timeout: assert(timeout > 0); diff = loop->time - base; @@ -541,13 +534,14 @@ int uv_fs_event_start(uv_fs_event_t* handle, handle->realpath_len = 0; handle->cf_flags = flags; - if (fstat(fd, &statbuf)) + if (uv__fstat(fd, &statbuf)) goto fallback; /* FSEvents works only with directories */ if (!(statbuf.st_mode & S_IFDIR)) goto fallback; - if (0 == uv__load_relaxed(&uv__has_forked_with_cfrunloop)) { + if (0 == atomic_load_explicit(&uv__has_forked_with_cfrunloop, + memory_order_relaxed)) { int r; /* The fallback fd is no longer needed */ uv__close_nocheckstdio(fd); @@ -582,7 +576,8 @@ int uv_fs_event_stop(uv_fs_event_t* handle) { uv__handle_stop(handle); #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070 - if (0 == uv__load_relaxed(&uv__has_forked_with_cfrunloop)) + if (0 == atomic_load_explicit(&uv__has_forked_with_cfrunloop, + memory_order_relaxed)) if (handle->cf_cb != NULL) r = uv__fsevents_close(handle); #endif diff --git a/deps/uv/src/unix/linux-core.c b/deps/uv/src/unix/linux-core.c deleted file mode 100644 index 23a7dafec814f6..00000000000000 --- a/deps/uv/src/unix/linux-core.c +++ /dev/null @@ -1,834 +0,0 @@ -/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - */ - -/* We lean on the fact that POLL{IN,OUT,ERR,HUP} correspond with their - * EPOLL* counterparts. We use the POLL* variants in this file because that - * is what libuv uses elsewhere. - */ - -#include "uv.h" -#include "internal.h" - -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#define HAVE_IFADDRS_H 1 - -# if defined(__ANDROID_API__) && __ANDROID_API__ < 24 -# undef HAVE_IFADDRS_H -#endif - -#ifdef __UCLIBC__ -# if __UCLIBC_MAJOR__ < 0 && __UCLIBC_MINOR__ < 9 && __UCLIBC_SUBLEVEL__ < 32 -# undef HAVE_IFADDRS_H -# endif -#endif - -#ifdef HAVE_IFADDRS_H -# include -# include -# include -# include -#endif /* HAVE_IFADDRS_H */ - -/* Available from 2.6.32 onwards. */ -#ifndef CLOCK_MONOTONIC_COARSE -# define CLOCK_MONOTONIC_COARSE 6 -#endif - -/* This is rather annoying: CLOCK_BOOTTIME lives in but we can't - * include that file because it conflicts with . We'll just have to - * define it ourselves. - */ -#ifndef CLOCK_BOOTTIME -# define CLOCK_BOOTTIME 7 -#endif - -static int read_models(unsigned int numcpus, uv_cpu_info_t* ci); -static int read_times(FILE* statfile_fp, - unsigned int numcpus, - uv_cpu_info_t* ci); -static void read_speeds(unsigned int numcpus, uv_cpu_info_t* ci); -static uint64_t read_cpufreq(unsigned int cpunum); - -int uv__platform_loop_init(uv_loop_t* loop) { - - loop->inotify_fd = -1; - loop->inotify_watchers = NULL; - - return uv__epoll_init(loop); -} - - -int uv__io_fork(uv_loop_t* loop) { - int err; - void* old_watchers; - - old_watchers = loop->inotify_watchers; - - uv__close(loop->backend_fd); - loop->backend_fd = -1; - uv__platform_loop_delete(loop); - - err = uv__platform_loop_init(loop); - if (err) - return err; - - return uv__inotify_fork(loop, old_watchers); -} - - -void uv__platform_loop_delete(uv_loop_t* loop) { - if (loop->inotify_fd == -1) return; - uv__io_stop(loop, &loop->inotify_read_watcher, POLLIN); - uv__close(loop->inotify_fd); - loop->inotify_fd = -1; -} - - - -uint64_t uv__hrtime(uv_clocktype_t type) { - static clock_t fast_clock_id = -1; - struct timespec t; - clock_t clock_id; - - /* Prefer CLOCK_MONOTONIC_COARSE if available but only when it has - * millisecond granularity or better. CLOCK_MONOTONIC_COARSE is - * serviced entirely from the vDSO, whereas CLOCK_MONOTONIC may - * decide to make a costly system call. - */ - /* TODO(bnoordhuis) Use CLOCK_MONOTONIC_COARSE for UV_CLOCK_PRECISE - * when it has microsecond granularity or better (unlikely). - */ - clock_id = CLOCK_MONOTONIC; - if (type != UV_CLOCK_FAST) - goto done; - - clock_id = uv__load_relaxed(&fast_clock_id); - if (clock_id != -1) - goto done; - - clock_id = CLOCK_MONOTONIC; - if (0 == clock_getres(CLOCK_MONOTONIC_COARSE, &t)) - if (t.tv_nsec <= 1 * 1000 * 1000) - clock_id = CLOCK_MONOTONIC_COARSE; - - uv__store_relaxed(&fast_clock_id, clock_id); - -done: - - if (clock_gettime(clock_id, &t)) - return 0; /* Not really possible. */ - - return t.tv_sec * (uint64_t) 1e9 + t.tv_nsec; -} - - -int uv_resident_set_memory(size_t* rss) { - char buf[1024]; - const char* s; - ssize_t n; - long val; - int fd; - int i; - - do - fd = open("/proc/self/stat", O_RDONLY); - while (fd == -1 && errno == EINTR); - - if (fd == -1) - return UV__ERR(errno); - - do - n = read(fd, buf, sizeof(buf) - 1); - while (n == -1 && errno == EINTR); - - uv__close(fd); - if (n == -1) - return UV__ERR(errno); - buf[n] = '\0'; - - s = strchr(buf, ' '); - if (s == NULL) - goto err; - - s += 1; - if (*s != '(') - goto err; - - s = strchr(s, ')'); - if (s == NULL) - goto err; - - for (i = 1; i <= 22; i++) { - s = strchr(s + 1, ' '); - if (s == NULL) - goto err; - } - - errno = 0; - val = strtol(s, NULL, 10); - if (errno != 0) - goto err; - if (val < 0) - goto err; - - *rss = val * getpagesize(); - return 0; - -err: - return UV_EINVAL; -} - -int uv_uptime(double* uptime) { - static volatile int no_clock_boottime; - char buf[128]; - struct timespec now; - int r; - - /* Try /proc/uptime first, then fallback to clock_gettime(). */ - - if (0 == uv__slurp("/proc/uptime", buf, sizeof(buf))) - if (1 == sscanf(buf, "%lf", uptime)) - return 0; - - /* Try CLOCK_BOOTTIME first, fall back to CLOCK_MONOTONIC if not available - * (pre-2.6.39 kernels). CLOCK_MONOTONIC doesn't increase when the system - * is suspended. - */ - if (no_clock_boottime) { - retry_clock_gettime: r = clock_gettime(CLOCK_MONOTONIC, &now); - } - else if ((r = clock_gettime(CLOCK_BOOTTIME, &now)) && errno == EINVAL) { - no_clock_boottime = 1; - goto retry_clock_gettime; - } - - if (r) - return UV__ERR(errno); - - *uptime = now.tv_sec; - return 0; -} - - -static int uv__cpu_num(FILE* statfile_fp, unsigned int* numcpus) { - unsigned int num; - char buf[1024]; - - if (!fgets(buf, sizeof(buf), statfile_fp)) - return UV_EIO; - - num = 0; - while (fgets(buf, sizeof(buf), statfile_fp)) { - if (strncmp(buf, "cpu", 3)) - break; - num++; - } - - if (num == 0) - return UV_EIO; - - *numcpus = num; - return 0; -} - - -int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) { - unsigned int numcpus; - uv_cpu_info_t* ci; - int err; - FILE* statfile_fp; - - *cpu_infos = NULL; - *count = 0; - - statfile_fp = uv__open_file("/proc/stat"); - if (statfile_fp == NULL) - return UV__ERR(errno); - - err = uv__cpu_num(statfile_fp, &numcpus); - if (err < 0) - goto out; - - err = UV_ENOMEM; - ci = uv__calloc(numcpus, sizeof(*ci)); - if (ci == NULL) - goto out; - - err = read_models(numcpus, ci); - if (err == 0) - err = read_times(statfile_fp, numcpus, ci); - - if (err) { - uv_free_cpu_info(ci, numcpus); - goto out; - } - - /* read_models() on x86 also reads the CPU speed from /proc/cpuinfo. - * We don't check for errors here. Worst case, the field is left zero. - */ - if (ci[0].speed == 0) - read_speeds(numcpus, ci); - - *cpu_infos = ci; - *count = numcpus; - err = 0; - -out: - - if (fclose(statfile_fp)) - if (errno != EINTR && errno != EINPROGRESS) - abort(); - - return err; -} - - -static void read_speeds(unsigned int numcpus, uv_cpu_info_t* ci) { - unsigned int num; - - for (num = 0; num < numcpus; num++) - ci[num].speed = read_cpufreq(num) / 1000; -} - - -/* Also reads the CPU frequency on ppc and x86. The other architectures only - * have a BogoMIPS field, which may not be very accurate. - * - * Note: Simply returns on error, uv_cpu_info() takes care of the cleanup. - */ -static int read_models(unsigned int numcpus, uv_cpu_info_t* ci) { -#if defined(__PPC__) - static const char model_marker[] = "cpu\t\t: "; - static const char speed_marker[] = "clock\t\t: "; -#else - static const char model_marker[] = "model name\t: "; - static const char speed_marker[] = "cpu MHz\t\t: "; -#endif - const char* inferred_model; - unsigned int model_idx; - unsigned int speed_idx; - unsigned int part_idx; - char buf[1024]; - char* model; - FILE* fp; - int model_id; - - /* Most are unused on non-ARM, non-MIPS and non-x86 architectures. */ - (void) &model_marker; - (void) &speed_marker; - (void) &speed_idx; - (void) &part_idx; - (void) &model; - (void) &buf; - (void) &fp; - (void) &model_id; - - model_idx = 0; - speed_idx = 0; - part_idx = 0; - -#if defined(__arm__) || \ - defined(__i386__) || \ - defined(__mips__) || \ - defined(__aarch64__) || \ - defined(__PPC__) || \ - defined(__x86_64__) - fp = uv__open_file("/proc/cpuinfo"); - if (fp == NULL) - return UV__ERR(errno); - - while (fgets(buf, sizeof(buf), fp)) { - if (model_idx < numcpus) { - if (strncmp(buf, model_marker, sizeof(model_marker) - 1) == 0) { - model = buf + sizeof(model_marker) - 1; - model = uv__strndup(model, strlen(model) - 1); /* Strip newline. */ - if (model == NULL) { - fclose(fp); - return UV_ENOMEM; - } - ci[model_idx++].model = model; - continue; - } - } -#if defined(__arm__) || defined(__mips__) || defined(__aarch64__) - if (model_idx < numcpus) { -#if defined(__arm__) - /* Fallback for pre-3.8 kernels. */ - static const char model_marker[] = "Processor\t: "; -#elif defined(__aarch64__) - static const char part_marker[] = "CPU part\t: "; - - /* Adapted from: https://github.com/karelzak/util-linux */ - struct vendor_part { - const int id; - const char* name; - }; - - static const struct vendor_part arm_chips[] = { - { 0x811, "ARM810" }, - { 0x920, "ARM920" }, - { 0x922, "ARM922" }, - { 0x926, "ARM926" }, - { 0x940, "ARM940" }, - { 0x946, "ARM946" }, - { 0x966, "ARM966" }, - { 0xa20, "ARM1020" }, - { 0xa22, "ARM1022" }, - { 0xa26, "ARM1026" }, - { 0xb02, "ARM11 MPCore" }, - { 0xb36, "ARM1136" }, - { 0xb56, "ARM1156" }, - { 0xb76, "ARM1176" }, - { 0xc05, "Cortex-A5" }, - { 0xc07, "Cortex-A7" }, - { 0xc08, "Cortex-A8" }, - { 0xc09, "Cortex-A9" }, - { 0xc0d, "Cortex-A17" }, /* Originally A12 */ - { 0xc0f, "Cortex-A15" }, - { 0xc0e, "Cortex-A17" }, - { 0xc14, "Cortex-R4" }, - { 0xc15, "Cortex-R5" }, - { 0xc17, "Cortex-R7" }, - { 0xc18, "Cortex-R8" }, - { 0xc20, "Cortex-M0" }, - { 0xc21, "Cortex-M1" }, - { 0xc23, "Cortex-M3" }, - { 0xc24, "Cortex-M4" }, - { 0xc27, "Cortex-M7" }, - { 0xc60, "Cortex-M0+" }, - { 0xd01, "Cortex-A32" }, - { 0xd03, "Cortex-A53" }, - { 0xd04, "Cortex-A35" }, - { 0xd05, "Cortex-A55" }, - { 0xd06, "Cortex-A65" }, - { 0xd07, "Cortex-A57" }, - { 0xd08, "Cortex-A72" }, - { 0xd09, "Cortex-A73" }, - { 0xd0a, "Cortex-A75" }, - { 0xd0b, "Cortex-A76" }, - { 0xd0c, "Neoverse-N1" }, - { 0xd0d, "Cortex-A77" }, - { 0xd0e, "Cortex-A76AE" }, - { 0xd13, "Cortex-R52" }, - { 0xd20, "Cortex-M23" }, - { 0xd21, "Cortex-M33" }, - { 0xd41, "Cortex-A78" }, - { 0xd42, "Cortex-A78AE" }, - { 0xd4a, "Neoverse-E1" }, - { 0xd4b, "Cortex-A78C" }, - }; - - if (strncmp(buf, part_marker, sizeof(part_marker) - 1) == 0) { - model = buf + sizeof(part_marker) - 1; - - errno = 0; - model_id = strtol(model, NULL, 16); - if ((errno != 0) || model_id < 0) { - fclose(fp); - return UV_EINVAL; - } - - for (part_idx = 0; part_idx < ARRAY_SIZE(arm_chips); part_idx++) { - if (model_id == arm_chips[part_idx].id) { - model = uv__strdup(arm_chips[part_idx].name); - if (model == NULL) { - fclose(fp); - return UV_ENOMEM; - } - ci[model_idx++].model = model; - break; - } - } - } -#else /* defined(__mips__) */ - static const char model_marker[] = "cpu model\t\t: "; -#endif - if (strncmp(buf, model_marker, sizeof(model_marker) - 1) == 0) { - model = buf + sizeof(model_marker) - 1; - model = uv__strndup(model, strlen(model) - 1); /* Strip newline. */ - if (model == NULL) { - fclose(fp); - return UV_ENOMEM; - } - ci[model_idx++].model = model; - continue; - } - } -#else /* !__arm__ && !__mips__ && !__aarch64__ */ - if (speed_idx < numcpus) { - if (strncmp(buf, speed_marker, sizeof(speed_marker) - 1) == 0) { - ci[speed_idx++].speed = atoi(buf + sizeof(speed_marker) - 1); - continue; - } - } -#endif /* __arm__ || __mips__ || __aarch64__ */ - } - - fclose(fp); -#endif /* __arm__ || __i386__ || __mips__ || __PPC__ || __x86_64__ || __aarch__ */ - - /* Now we want to make sure that all the models contain *something* because - * it's not safe to leave them as null. Copy the last entry unless there - * isn't one, in that case we simply put "unknown" into everything. - */ - inferred_model = "unknown"; - if (model_idx > 0) - inferred_model = ci[model_idx - 1].model; - - while (model_idx < numcpus) { - model = uv__strndup(inferred_model, strlen(inferred_model)); - if (model == NULL) - return UV_ENOMEM; - ci[model_idx++].model = model; - } - - return 0; -} - - -static int read_times(FILE* statfile_fp, - unsigned int numcpus, - uv_cpu_info_t* ci) { - struct uv_cpu_times_s ts; - unsigned int ticks; - unsigned int multiplier; - uint64_t user; - uint64_t nice; - uint64_t sys; - uint64_t idle; - uint64_t dummy; - uint64_t irq; - uint64_t num; - uint64_t len; - char buf[1024]; - - ticks = (unsigned int)sysconf(_SC_CLK_TCK); - assert(ticks != (unsigned int) -1); - assert(ticks != 0); - multiplier = ((uint64_t)1000L / ticks); - - rewind(statfile_fp); - - if (!fgets(buf, sizeof(buf), statfile_fp)) - abort(); - - num = 0; - - while (fgets(buf, sizeof(buf), statfile_fp)) { - if (num >= numcpus) - break; - - if (strncmp(buf, "cpu", 3)) - break; - - /* skip "cpu " marker */ - { - unsigned int n; - int r = sscanf(buf, "cpu%u ", &n); - assert(r == 1); - (void) r; /* silence build warning */ - for (len = sizeof("cpu0"); n /= 10; len++); - } - - /* Line contains user, nice, system, idle, iowait, irq, softirq, steal, - * guest, guest_nice but we're only interested in the first four + irq. - * - * Don't use %*s to skip fields or %ll to read straight into the uint64_t - * fields, they're not allowed in C89 mode. - */ - if (6 != sscanf(buf + len, - "%" PRIu64 " %" PRIu64 " %" PRIu64 - "%" PRIu64 " %" PRIu64 " %" PRIu64, - &user, - &nice, - &sys, - &idle, - &dummy, - &irq)) - abort(); - - ts.user = user * multiplier; - ts.nice = nice * multiplier; - ts.sys = sys * multiplier; - ts.idle = idle * multiplier; - ts.irq = irq * multiplier; - ci[num++].cpu_times = ts; - } - assert(num == numcpus); - - return 0; -} - - -static uint64_t read_cpufreq(unsigned int cpunum) { - uint64_t val; - char buf[1024]; - FILE* fp; - - snprintf(buf, - sizeof(buf), - "/sys/devices/system/cpu/cpu%u/cpufreq/scaling_cur_freq", - cpunum); - - fp = uv__open_file(buf); - if (fp == NULL) - return 0; - - if (fscanf(fp, "%" PRIu64, &val) != 1) - val = 0; - - fclose(fp); - - return val; -} - - -#ifdef HAVE_IFADDRS_H -static int uv__ifaddr_exclude(struct ifaddrs *ent, int exclude_type) { - if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING))) - return 1; - if (ent->ifa_addr == NULL) - return 1; - /* - * On Linux getifaddrs returns information related to the raw underlying - * devices. We're not interested in this information yet. - */ - if (ent->ifa_addr->sa_family == PF_PACKET) - return exclude_type; - return !exclude_type; -} -#endif - -int uv_interface_addresses(uv_interface_address_t** addresses, int* count) { -#ifndef HAVE_IFADDRS_H - *count = 0; - *addresses = NULL; - return UV_ENOSYS; -#else - struct ifaddrs *addrs, *ent; - uv_interface_address_t* address; - int i; - struct sockaddr_ll *sll; - - *count = 0; - *addresses = NULL; - - if (getifaddrs(&addrs)) - return UV__ERR(errno); - - /* Count the number of interfaces */ - for (ent = addrs; ent != NULL; ent = ent->ifa_next) { - if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFADDR)) - continue; - - (*count)++; - } - - if (*count == 0) { - freeifaddrs(addrs); - return 0; - } - - /* Make sure the memory is initiallized to zero using calloc() */ - *addresses = uv__calloc(*count, sizeof(**addresses)); - if (!(*addresses)) { - freeifaddrs(addrs); - return UV_ENOMEM; - } - - address = *addresses; - - for (ent = addrs; ent != NULL; ent = ent->ifa_next) { - if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFADDR)) - continue; - - address->name = uv__strdup(ent->ifa_name); - - if (ent->ifa_addr->sa_family == AF_INET6) { - address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr); - } else { - address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr); - } - - if (ent->ifa_netmask->sa_family == AF_INET6) { - address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask); - } else { - address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask); - } - - address->is_internal = !!(ent->ifa_flags & IFF_LOOPBACK); - - address++; - } - - /* Fill in physical addresses for each interface */ - for (ent = addrs; ent != NULL; ent = ent->ifa_next) { - if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFPHYS)) - continue; - - address = *addresses; - - for (i = 0; i < (*count); i++) { - size_t namelen = strlen(ent->ifa_name); - /* Alias interface share the same physical address */ - if (strncmp(address->name, ent->ifa_name, namelen) == 0 && - (address->name[namelen] == 0 || address->name[namelen] == ':')) { - sll = (struct sockaddr_ll*)ent->ifa_addr; - memcpy(address->phys_addr, sll->sll_addr, sizeof(address->phys_addr)); - } - address++; - } - } - - freeifaddrs(addrs); - - return 0; -#endif -} - - -void uv_free_interface_addresses(uv_interface_address_t* addresses, - int count) { - int i; - - for (i = 0; i < count; i++) { - uv__free(addresses[i].name); - } - - uv__free(addresses); -} - - -void uv__set_process_title(const char* title) { -#if defined(PR_SET_NAME) - prctl(PR_SET_NAME, title); /* Only copies first 16 characters. */ -#endif -} - - -static uint64_t uv__read_proc_meminfo(const char* what) { - uint64_t rc; - char* p; - char buf[4096]; /* Large enough to hold all of /proc/meminfo. */ - - if (uv__slurp("/proc/meminfo", buf, sizeof(buf))) - return 0; - - p = strstr(buf, what); - - if (p == NULL) - return 0; - - p += strlen(what); - - rc = 0; - sscanf(p, "%" PRIu64 " kB", &rc); - - return rc * 1024; -} - - -uint64_t uv_get_free_memory(void) { - struct sysinfo info; - uint64_t rc; - - rc = uv__read_proc_meminfo("MemAvailable:"); - - if (rc != 0) - return rc; - - if (0 == sysinfo(&info)) - return (uint64_t) info.freeram * info.mem_unit; - - return 0; -} - - -uint64_t uv_get_total_memory(void) { - struct sysinfo info; - uint64_t rc; - - rc = uv__read_proc_meminfo("MemTotal:"); - - if (rc != 0) - return rc; - - if (0 == sysinfo(&info)) - return (uint64_t) info.totalram * info.mem_unit; - - return 0; -} - - -static uint64_t uv__read_cgroups_uint64(const char* cgroup, const char* param) { - char filename[256]; - char buf[32]; /* Large enough to hold an encoded uint64_t. */ - uint64_t rc; - - rc = 0; - snprintf(filename, sizeof(filename), "/sys/fs/cgroup/%s/%s", cgroup, param); - if (0 == uv__slurp(filename, buf, sizeof(buf))) - sscanf(buf, "%" PRIu64, &rc); - - return rc; -} - - -uint64_t uv_get_constrained_memory(void) { - /* - * This might return 0 if there was a problem getting the memory limit from - * cgroups. This is OK because a return value of 0 signifies that the memory - * limit is unknown. - */ - return uv__read_cgroups_uint64("memory", "memory.limit_in_bytes"); -} - - -void uv_loadavg(double avg[3]) { - struct sysinfo info; - char buf[128]; /* Large enough to hold all of /proc/loadavg. */ - - if (0 == uv__slurp("/proc/loadavg", buf, sizeof(buf))) - if (3 == sscanf(buf, "%lf %lf %lf", &avg[0], &avg[1], &avg[2])) - return; - - if (sysinfo(&info) < 0) - return; - - avg[0] = (double) info.loads[0] / 65536.0; - avg[1] = (double) info.loads[1] / 65536.0; - avg[2] = (double) info.loads[2] / 65536.0; -} diff --git a/deps/uv/src/unix/linux-inotify.c b/deps/uv/src/unix/linux-inotify.c deleted file mode 100644 index c1bd260e16e5e9..00000000000000 --- a/deps/uv/src/unix/linux-inotify.c +++ /dev/null @@ -1,327 +0,0 @@ -/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - */ - -#include "uv.h" -#include "uv/tree.h" -#include "internal.h" - -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -struct watcher_list { - RB_ENTRY(watcher_list) entry; - QUEUE watchers; - int iterating; - char* path; - int wd; -}; - -struct watcher_root { - struct watcher_list* rbh_root; -}; -#define CAST(p) ((struct watcher_root*)(p)) - - -static int compare_watchers(const struct watcher_list* a, - const struct watcher_list* b) { - if (a->wd < b->wd) return -1; - if (a->wd > b->wd) return 1; - return 0; -} - - -RB_GENERATE_STATIC(watcher_root, watcher_list, entry, compare_watchers) - - -static void uv__inotify_read(uv_loop_t* loop, - uv__io_t* w, - unsigned int revents); - -static void maybe_free_watcher_list(struct watcher_list* w, - uv_loop_t* loop); - -static int init_inotify(uv_loop_t* loop) { - int fd; - - if (loop->inotify_fd != -1) - return 0; - - fd = inotify_init1(IN_NONBLOCK | IN_CLOEXEC); - if (fd < 0) - return UV__ERR(errno); - - loop->inotify_fd = fd; - uv__io_init(&loop->inotify_read_watcher, uv__inotify_read, loop->inotify_fd); - uv__io_start(loop, &loop->inotify_read_watcher, POLLIN); - - return 0; -} - - -int uv__inotify_fork(uv_loop_t* loop, void* old_watchers) { - /* Open the inotify_fd, and re-arm all the inotify watchers. */ - int err; - struct watcher_list* tmp_watcher_list_iter; - struct watcher_list* watcher_list; - struct watcher_list tmp_watcher_list; - QUEUE queue; - QUEUE* q; - uv_fs_event_t* handle; - char* tmp_path; - - if (old_watchers != NULL) { - /* We must restore the old watcher list to be able to close items - * out of it. - */ - loop->inotify_watchers = old_watchers; - - QUEUE_INIT(&tmp_watcher_list.watchers); - /* Note that the queue we use is shared with the start and stop() - * functions, making QUEUE_FOREACH unsafe to use. So we use the - * QUEUE_MOVE trick to safely iterate. Also don't free the watcher - * list until we're done iterating. c.f. uv__inotify_read. - */ - RB_FOREACH_SAFE(watcher_list, watcher_root, - CAST(&old_watchers), tmp_watcher_list_iter) { - watcher_list->iterating = 1; - QUEUE_MOVE(&watcher_list->watchers, &queue); - while (!QUEUE_EMPTY(&queue)) { - q = QUEUE_HEAD(&queue); - handle = QUEUE_DATA(q, uv_fs_event_t, watchers); - /* It's critical to keep a copy of path here, because it - * will be set to NULL by stop() and then deallocated by - * maybe_free_watcher_list - */ - tmp_path = uv__strdup(handle->path); - assert(tmp_path != NULL); - QUEUE_REMOVE(q); - QUEUE_INSERT_TAIL(&watcher_list->watchers, q); - uv_fs_event_stop(handle); - - QUEUE_INSERT_TAIL(&tmp_watcher_list.watchers, &handle->watchers); - handle->path = tmp_path; - } - watcher_list->iterating = 0; - maybe_free_watcher_list(watcher_list, loop); - } - - QUEUE_MOVE(&tmp_watcher_list.watchers, &queue); - while (!QUEUE_EMPTY(&queue)) { - q = QUEUE_HEAD(&queue); - QUEUE_REMOVE(q); - handle = QUEUE_DATA(q, uv_fs_event_t, watchers); - tmp_path = handle->path; - handle->path = NULL; - err = uv_fs_event_start(handle, handle->cb, tmp_path, 0); - uv__free(tmp_path); - if (err) - return err; - } - } - - return 0; -} - - -static struct watcher_list* find_watcher(uv_loop_t* loop, int wd) { - struct watcher_list w; - w.wd = wd; - return RB_FIND(watcher_root, CAST(&loop->inotify_watchers), &w); -} - -static void maybe_free_watcher_list(struct watcher_list* w, uv_loop_t* loop) { - /* if the watcher_list->watchers is being iterated over, we can't free it. */ - if ((!w->iterating) && QUEUE_EMPTY(&w->watchers)) { - /* No watchers left for this path. Clean up. */ - RB_REMOVE(watcher_root, CAST(&loop->inotify_watchers), w); - inotify_rm_watch(loop->inotify_fd, w->wd); - uv__free(w); - } -} - -static void uv__inotify_read(uv_loop_t* loop, - uv__io_t* dummy, - unsigned int events) { - const struct inotify_event* e; - struct watcher_list* w; - uv_fs_event_t* h; - QUEUE queue; - QUEUE* q; - const char* path; - ssize_t size; - const char *p; - /* needs to be large enough for sizeof(inotify_event) + strlen(path) */ - char buf[4096]; - - for (;;) { - do - size = read(loop->inotify_fd, buf, sizeof(buf)); - while (size == -1 && errno == EINTR); - - if (size == -1) { - assert(errno == EAGAIN || errno == EWOULDBLOCK); - break; - } - - assert(size > 0); /* pre-2.6.21 thing, size=0 == read buffer too small */ - - /* Now we have one or more inotify_event structs. */ - for (p = buf; p < buf + size; p += sizeof(*e) + e->len) { - e = (const struct inotify_event*) p; - - events = 0; - if (e->mask & (IN_ATTRIB|IN_MODIFY)) - events |= UV_CHANGE; - if (e->mask & ~(IN_ATTRIB|IN_MODIFY)) - events |= UV_RENAME; - - w = find_watcher(loop, e->wd); - if (w == NULL) - continue; /* Stale event, no watchers left. */ - - /* inotify does not return the filename when monitoring a single file - * for modifications. Repurpose the filename for API compatibility. - * I'm not convinced this is a good thing, maybe it should go. - */ - path = e->len ? (const char*) (e + 1) : uv__basename_r(w->path); - - /* We're about to iterate over the queue and call user's callbacks. - * What can go wrong? - * A callback could call uv_fs_event_stop() - * and the queue can change under our feet. - * So, we use QUEUE_MOVE() trick to safely iterate over the queue. - * And we don't free the watcher_list until we're done iterating. - * - * First, - * tell uv_fs_event_stop() (that could be called from a user's callback) - * not to free watcher_list. - */ - w->iterating = 1; - QUEUE_MOVE(&w->watchers, &queue); - while (!QUEUE_EMPTY(&queue)) { - q = QUEUE_HEAD(&queue); - h = QUEUE_DATA(q, uv_fs_event_t, watchers); - - QUEUE_REMOVE(q); - QUEUE_INSERT_TAIL(&w->watchers, q); - - h->cb(h, path, events, 0); - } - /* done iterating, time to (maybe) free empty watcher_list */ - w->iterating = 0; - maybe_free_watcher_list(w, loop); - } - } -} - - -int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) { - uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT); - return 0; -} - - -int uv_fs_event_start(uv_fs_event_t* handle, - uv_fs_event_cb cb, - const char* path, - unsigned int flags) { - struct watcher_list* w; - size_t len; - int events; - int err; - int wd; - - if (uv__is_active(handle)) - return UV_EINVAL; - - err = init_inotify(handle->loop); - if (err) - return err; - - events = IN_ATTRIB - | IN_CREATE - | IN_MODIFY - | IN_DELETE - | IN_DELETE_SELF - | IN_MOVE_SELF - | IN_MOVED_FROM - | IN_MOVED_TO; - - wd = inotify_add_watch(handle->loop->inotify_fd, path, events); - if (wd == -1) - return UV__ERR(errno); - - w = find_watcher(handle->loop, wd); - if (w) - goto no_insert; - - len = strlen(path) + 1; - w = uv__malloc(sizeof(*w) + len); - if (w == NULL) - return UV_ENOMEM; - - w->wd = wd; - w->path = memcpy(w + 1, path, len); - QUEUE_INIT(&w->watchers); - w->iterating = 0; - RB_INSERT(watcher_root, CAST(&handle->loop->inotify_watchers), w); - -no_insert: - uv__handle_start(handle); - QUEUE_INSERT_TAIL(&w->watchers, &handle->watchers); - handle->path = w->path; - handle->cb = cb; - handle->wd = wd; - - return 0; -} - - -int uv_fs_event_stop(uv_fs_event_t* handle) { - struct watcher_list* w; - - if (!uv__is_active(handle)) - return 0; - - w = find_watcher(handle->loop, handle->wd); - assert(w != NULL); - - handle->wd = -1; - handle->path = NULL; - uv__handle_stop(handle); - QUEUE_REMOVE(&handle->watchers); - - maybe_free_watcher_list(w, handle->loop); - - return 0; -} - - -void uv__fs_event_close(uv_fs_event_t* handle) { - uv_fs_event_stop(handle); -} diff --git a/deps/uv/src/unix/linux-syscalls.c b/deps/uv/src/unix/linux-syscalls.c deleted file mode 100644 index 5071cd56d1fcb2..00000000000000 --- a/deps/uv/src/unix/linux-syscalls.c +++ /dev/null @@ -1,264 +0,0 @@ -/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - */ - -#include "linux-syscalls.h" -#include -#include -#include -#include -#include - -#if defined(__arm__) -# if defined(__thumb__) || defined(__ARM_EABI__) -# define UV_SYSCALL_BASE 0 -# else -# define UV_SYSCALL_BASE 0x900000 -# endif -#endif /* __arm__ */ - -#ifndef __NR_recvmmsg -# if defined(__x86_64__) -# define __NR_recvmmsg 299 -# elif defined(__arm__) -# define __NR_recvmmsg (UV_SYSCALL_BASE + 365) -# endif -#endif /* __NR_recvmsg */ - -#ifndef __NR_sendmmsg -# if defined(__x86_64__) -# define __NR_sendmmsg 307 -# elif defined(__arm__) -# define __NR_sendmmsg (UV_SYSCALL_BASE + 374) -# endif -#endif /* __NR_sendmmsg */ - -#ifndef __NR_utimensat -# if defined(__x86_64__) -# define __NR_utimensat 280 -# elif defined(__i386__) -# define __NR_utimensat 320 -# elif defined(__arm__) -# define __NR_utimensat (UV_SYSCALL_BASE + 348) -# endif -#endif /* __NR_utimensat */ - -#ifndef __NR_preadv -# if defined(__x86_64__) -# define __NR_preadv 295 -# elif defined(__i386__) -# define __NR_preadv 333 -# elif defined(__arm__) -# define __NR_preadv (UV_SYSCALL_BASE + 361) -# endif -#endif /* __NR_preadv */ - -#ifndef __NR_pwritev -# if defined(__x86_64__) -# define __NR_pwritev 296 -# elif defined(__i386__) -# define __NR_pwritev 334 -# elif defined(__arm__) -# define __NR_pwritev (UV_SYSCALL_BASE + 362) -# endif -#endif /* __NR_pwritev */ - -#ifndef __NR_dup3 -# if defined(__x86_64__) -# define __NR_dup3 292 -# elif defined(__i386__) -# define __NR_dup3 330 -# elif defined(__arm__) -# define __NR_dup3 (UV_SYSCALL_BASE + 358) -# endif -#endif /* __NR_pwritev */ - -#ifndef __NR_copy_file_range -# if defined(__x86_64__) -# define __NR_copy_file_range 326 -# elif defined(__i386__) -# define __NR_copy_file_range 377 -# elif defined(__s390__) -# define __NR_copy_file_range 375 -# elif defined(__arm__) -# define __NR_copy_file_range (UV_SYSCALL_BASE + 391) -# elif defined(__aarch64__) -# define __NR_copy_file_range 285 -# elif defined(__powerpc__) -# define __NR_copy_file_range 379 -# elif defined(__arc__) -# define __NR_copy_file_range 285 -# endif -#endif /* __NR_copy_file_range */ - -#ifndef __NR_statx -# if defined(__x86_64__) -# define __NR_statx 332 -# elif defined(__i386__) -# define __NR_statx 383 -# elif defined(__aarch64__) -# define __NR_statx 397 -# elif defined(__arm__) -# define __NR_statx (UV_SYSCALL_BASE + 397) -# elif defined(__ppc__) -# define __NR_statx 383 -# elif defined(__s390__) -# define __NR_statx 379 -# endif -#endif /* __NR_statx */ - -#ifndef __NR_getrandom -# if defined(__x86_64__) -# define __NR_getrandom 318 -# elif defined(__i386__) -# define __NR_getrandom 355 -# elif defined(__aarch64__) -# define __NR_getrandom 384 -# elif defined(__arm__) -# define __NR_getrandom (UV_SYSCALL_BASE + 384) -# elif defined(__ppc__) -# define __NR_getrandom 359 -# elif defined(__s390__) -# define __NR_getrandom 349 -# endif -#endif /* __NR_getrandom */ - -struct uv__mmsghdr; - -int uv__sendmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen) { -#if defined(__i386__) - unsigned long args[4]; - int rc; - - args[0] = (unsigned long) fd; - args[1] = (unsigned long) mmsg; - args[2] = (unsigned long) vlen; - args[3] = /* flags */ 0; - - /* socketcall() raises EINVAL when SYS_SENDMMSG is not supported. */ - rc = syscall(/* __NR_socketcall */ 102, 20 /* SYS_SENDMMSG */, args); - if (rc == -1) - if (errno == EINVAL) - errno = ENOSYS; - - return rc; -#elif defined(__NR_sendmmsg) - return syscall(__NR_sendmmsg, fd, mmsg, vlen, /* flags */ 0); -#else - return errno = ENOSYS, -1; -#endif -} - - -int uv__recvmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen) { -#if defined(__i386__) - unsigned long args[5]; - int rc; - - args[0] = (unsigned long) fd; - args[1] = (unsigned long) mmsg; - args[2] = (unsigned long) vlen; - args[3] = /* flags */ 0; - args[4] = /* timeout */ 0; - - /* socketcall() raises EINVAL when SYS_RECVMMSG is not supported. */ - rc = syscall(/* __NR_socketcall */ 102, 19 /* SYS_RECVMMSG */, args); - if (rc == -1) - if (errno == EINVAL) - errno = ENOSYS; - - return rc; -#elif defined(__NR_recvmmsg) - return syscall(__NR_recvmmsg, fd, mmsg, vlen, /* flags */ 0, /* timeout */ 0); -#else - return errno = ENOSYS, -1; -#endif -} - - -ssize_t uv__preadv(int fd, const struct iovec *iov, int iovcnt, int64_t offset) { -#if !defined(__NR_preadv) || defined(__ANDROID_API__) && __ANDROID_API__ < 24 - return errno = ENOSYS, -1; -#else - return syscall(__NR_preadv, fd, iov, iovcnt, (long)offset, (long)(offset >> 32)); -#endif -} - - -ssize_t uv__pwritev(int fd, const struct iovec *iov, int iovcnt, int64_t offset) { -#if !defined(__NR_pwritev) || defined(__ANDROID_API__) && __ANDROID_API__ < 24 - return errno = ENOSYS, -1; -#else - return syscall(__NR_pwritev, fd, iov, iovcnt, (long)offset, (long)(offset >> 32)); -#endif -} - - -int uv__dup3(int oldfd, int newfd, int flags) { -#if !defined(__NR_dup3) || defined(__ANDROID_API__) && __ANDROID_API__ < 21 - return errno = ENOSYS, -1; -#else - return syscall(__NR_dup3, oldfd, newfd, flags); -#endif -} - - -ssize_t -uv__fs_copy_file_range(int fd_in, - off_t* off_in, - int fd_out, - off_t* off_out, - size_t len, - unsigned int flags) -{ -#ifdef __NR_copy_file_range - return syscall(__NR_copy_file_range, - fd_in, - off_in, - fd_out, - off_out, - len, - flags); -#else - return errno = ENOSYS, -1; -#endif -} - - -int uv__statx(int dirfd, - const char* path, - int flags, - unsigned int mask, - struct uv__statx* statxbuf) { -#if !defined(__NR_statx) || defined(__ANDROID_API__) && __ANDROID_API__ < 30 - return errno = ENOSYS, -1; -#else - return syscall(__NR_statx, dirfd, path, flags, mask, statxbuf); -#endif -} - - -ssize_t uv__getrandom(void* buf, size_t buflen, unsigned flags) { -#if !defined(__NR_getrandom) || defined(__ANDROID_API__) && __ANDROID_API__ < 28 - return errno = ENOSYS, -1; -#else - return syscall(__NR_getrandom, buf, buflen, flags); -#endif -} diff --git a/deps/uv/src/unix/linux-syscalls.h b/deps/uv/src/unix/linux-syscalls.h deleted file mode 100644 index b4d9082d46f99b..00000000000000 --- a/deps/uv/src/unix/linux-syscalls.h +++ /dev/null @@ -1,78 +0,0 @@ -/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - */ - -#ifndef UV_LINUX_SYSCALL_H_ -#define UV_LINUX_SYSCALL_H_ - -#include -#include -#include -#include -#include - -struct uv__statx_timestamp { - int64_t tv_sec; - uint32_t tv_nsec; - int32_t unused0; -}; - -struct uv__statx { - uint32_t stx_mask; - uint32_t stx_blksize; - uint64_t stx_attributes; - uint32_t stx_nlink; - uint32_t stx_uid; - uint32_t stx_gid; - uint16_t stx_mode; - uint16_t unused0; - uint64_t stx_ino; - uint64_t stx_size; - uint64_t stx_blocks; - uint64_t stx_attributes_mask; - struct uv__statx_timestamp stx_atime; - struct uv__statx_timestamp stx_btime; - struct uv__statx_timestamp stx_ctime; - struct uv__statx_timestamp stx_mtime; - uint32_t stx_rdev_major; - uint32_t stx_rdev_minor; - uint32_t stx_dev_major; - uint32_t stx_dev_minor; - uint64_t unused1[14]; -}; - -ssize_t uv__preadv(int fd, const struct iovec *iov, int iovcnt, int64_t offset); -ssize_t uv__pwritev(int fd, const struct iovec *iov, int iovcnt, int64_t offset); -int uv__dup3(int oldfd, int newfd, int flags); -ssize_t -uv__fs_copy_file_range(int fd_in, - off_t* off_in, - int fd_out, - off_t* off_out, - size_t len, - unsigned int flags); -int uv__statx(int dirfd, - const char* path, - int flags, - unsigned int mask, - struct uv__statx* statxbuf); -ssize_t uv__getrandom(void* buf, size_t buflen, unsigned flags); - -#endif /* UV_LINUX_SYSCALL_H_ */ diff --git a/deps/uv/src/unix/linux.c b/deps/uv/src/unix/linux.c new file mode 100644 index 00000000000000..a3439184c3724c --- /dev/null +++ b/deps/uv/src/unix/linux.c @@ -0,0 +1,2341 @@ +/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +/* We lean on the fact that POLL{IN,OUT,ERR,HUP} correspond with their + * EPOLL* counterparts. We use the POLL* variants in this file because that + * is what libuv uses elsewhere. + */ + +#include "uv.h" +#include "internal.h" + +#include +#include +#include /* offsetof */ +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef __NR_io_uring_setup +# define __NR_io_uring_setup 425 +#endif + +#ifndef __NR_io_uring_enter +# define __NR_io_uring_enter 426 +#endif + +#ifndef __NR_io_uring_register +# define __NR_io_uring_register 427 +#endif + +#ifndef __NR_copy_file_range +# if defined(__x86_64__) +# define __NR_copy_file_range 326 +# elif defined(__i386__) +# define __NR_copy_file_range 377 +# elif defined(__s390__) +# define __NR_copy_file_range 375 +# elif defined(__arm__) +# define __NR_copy_file_range 391 +# elif defined(__aarch64__) +# define __NR_copy_file_range 285 +# elif defined(__powerpc__) +# define __NR_copy_file_range 379 +# elif defined(__arc__) +# define __NR_copy_file_range 285 +# endif +#endif /* __NR_copy_file_range */ + +#ifndef __NR_statx +# if defined(__x86_64__) +# define __NR_statx 332 +# elif defined(__i386__) +# define __NR_statx 383 +# elif defined(__aarch64__) +# define __NR_statx 397 +# elif defined(__arm__) +# define __NR_statx 397 +# elif defined(__ppc__) +# define __NR_statx 383 +# elif defined(__s390__) +# define __NR_statx 379 +# endif +#endif /* __NR_statx */ + +#ifndef __NR_getrandom +# if defined(__x86_64__) +# define __NR_getrandom 318 +# elif defined(__i386__) +# define __NR_getrandom 355 +# elif defined(__aarch64__) +# define __NR_getrandom 384 +# elif defined(__arm__) +# define __NR_getrandom 384 +# elif defined(__ppc__) +# define __NR_getrandom 359 +# elif defined(__s390__) +# define __NR_getrandom 349 +# endif +#endif /* __NR_getrandom */ + +#define HAVE_IFADDRS_H 1 + +# if defined(__ANDROID_API__) && __ANDROID_API__ < 24 +# undef HAVE_IFADDRS_H +#endif + +#ifdef __UCLIBC__ +# if __UCLIBC_MAJOR__ < 0 && __UCLIBC_MINOR__ < 9 && __UCLIBC_SUBLEVEL__ < 32 +# undef HAVE_IFADDRS_H +# endif +#endif + +#ifdef HAVE_IFADDRS_H +# include +# include +# include +# include +#endif /* HAVE_IFADDRS_H */ + +enum { + UV__IORING_SETUP_SQPOLL = 2u, +}; + +enum { + UV__IORING_FEAT_SINGLE_MMAP = 1u, + UV__IORING_FEAT_NODROP = 2u, + UV__IORING_FEAT_RSRC_TAGS = 1024u, /* linux v5.13 */ +}; + +enum { + UV__IORING_OP_READV = 1, + UV__IORING_OP_WRITEV = 2, + UV__IORING_OP_FSYNC = 3, + UV__IORING_OP_OPENAT = 18, + UV__IORING_OP_CLOSE = 19, + UV__IORING_OP_STATX = 21, + UV__IORING_OP_EPOLL_CTL = 29, +}; + +enum { + UV__IORING_ENTER_GETEVENTS = 1u, + UV__IORING_ENTER_SQ_WAKEUP = 2u, +}; + +enum { + UV__IORING_SQ_NEED_WAKEUP = 1u, + UV__IORING_SQ_CQ_OVERFLOW = 2u, +}; + +struct uv__io_cqring_offsets { + uint32_t head; + uint32_t tail; + uint32_t ring_mask; + uint32_t ring_entries; + uint32_t overflow; + uint32_t cqes; + uint64_t reserved0; + uint64_t reserved1; +}; + +STATIC_ASSERT(40 == sizeof(struct uv__io_cqring_offsets)); + +struct uv__io_sqring_offsets { + uint32_t head; + uint32_t tail; + uint32_t ring_mask; + uint32_t ring_entries; + uint32_t flags; + uint32_t dropped; + uint32_t array; + uint32_t reserved0; + uint64_t reserved1; +}; + +STATIC_ASSERT(40 == sizeof(struct uv__io_sqring_offsets)); + +struct uv__io_uring_cqe { + uint64_t user_data; + int32_t res; + uint32_t flags; +}; + +STATIC_ASSERT(16 == sizeof(struct uv__io_uring_cqe)); + +struct uv__io_uring_sqe { + uint8_t opcode; + uint8_t flags; + uint16_t ioprio; + int32_t fd; + union { + uint64_t off; + uint64_t addr2; + }; + union { + uint64_t addr; + }; + uint32_t len; + union { + uint32_t rw_flags; + uint32_t fsync_flags; + uint32_t open_flags; + uint32_t statx_flags; + }; + uint64_t user_data; + union { + uint16_t buf_index; + uint64_t pad[3]; + }; +}; + +STATIC_ASSERT(64 == sizeof(struct uv__io_uring_sqe)); +STATIC_ASSERT(0 == offsetof(struct uv__io_uring_sqe, opcode)); +STATIC_ASSERT(1 == offsetof(struct uv__io_uring_sqe, flags)); +STATIC_ASSERT(2 == offsetof(struct uv__io_uring_sqe, ioprio)); +STATIC_ASSERT(4 == offsetof(struct uv__io_uring_sqe, fd)); +STATIC_ASSERT(8 == offsetof(struct uv__io_uring_sqe, off)); +STATIC_ASSERT(16 == offsetof(struct uv__io_uring_sqe, addr)); +STATIC_ASSERT(24 == offsetof(struct uv__io_uring_sqe, len)); +STATIC_ASSERT(28 == offsetof(struct uv__io_uring_sqe, rw_flags)); +STATIC_ASSERT(32 == offsetof(struct uv__io_uring_sqe, user_data)); +STATIC_ASSERT(40 == offsetof(struct uv__io_uring_sqe, buf_index)); + +struct uv__io_uring_params { + uint32_t sq_entries; + uint32_t cq_entries; + uint32_t flags; + uint32_t sq_thread_cpu; + uint32_t sq_thread_idle; + uint32_t features; + uint32_t reserved[4]; + struct uv__io_sqring_offsets sq_off; /* 40 bytes */ + struct uv__io_cqring_offsets cq_off; /* 40 bytes */ +}; + +STATIC_ASSERT(40 + 40 + 40 == sizeof(struct uv__io_uring_params)); +STATIC_ASSERT(40 == offsetof(struct uv__io_uring_params, sq_off)); +STATIC_ASSERT(80 == offsetof(struct uv__io_uring_params, cq_off)); + +STATIC_ASSERT(EPOLL_CTL_ADD < 4); +STATIC_ASSERT(EPOLL_CTL_DEL < 4); +STATIC_ASSERT(EPOLL_CTL_MOD < 4); + +struct watcher_list { + RB_ENTRY(watcher_list) entry; + QUEUE watchers; + int iterating; + char* path; + int wd; +}; + +struct watcher_root { + struct watcher_list* rbh_root; +}; + +static int uv__inotify_fork(uv_loop_t* loop, struct watcher_list* root); +static void uv__inotify_read(uv_loop_t* loop, + uv__io_t* w, + unsigned int revents); +static int compare_watchers(const struct watcher_list* a, + const struct watcher_list* b); +static void maybe_free_watcher_list(struct watcher_list* w, + uv_loop_t* loop); + +static void uv__epoll_ctl_flush(int epollfd, + struct uv__iou* ctl, + struct epoll_event (*events)[256]); + +static void uv__epoll_ctl_prep(int epollfd, + struct uv__iou* ctl, + struct epoll_event (*events)[256], + int op, + int fd, + struct epoll_event* e); + +RB_GENERATE_STATIC(watcher_root, watcher_list, entry, compare_watchers) + + +static struct watcher_root* uv__inotify_watchers(uv_loop_t* loop) { + /* This cast works because watcher_root is a struct with a pointer as its + * sole member. Such type punning is unsafe in the presence of strict + * pointer aliasing (and is just plain nasty) but that is why libuv + * is compiled with -fno-strict-aliasing. + */ + return (struct watcher_root*) &loop->inotify_watchers; +} + + +ssize_t +uv__fs_copy_file_range(int fd_in, + off_t* off_in, + int fd_out, + off_t* off_out, + size_t len, + unsigned int flags) +{ +#ifdef __NR_copy_file_range + return syscall(__NR_copy_file_range, + fd_in, + off_in, + fd_out, + off_out, + len, + flags); +#else + return errno = ENOSYS, -1; +#endif +} + + +int uv__statx(int dirfd, + const char* path, + int flags, + unsigned int mask, + struct uv__statx* statxbuf) { +#if !defined(__NR_statx) || defined(__ANDROID_API__) && __ANDROID_API__ < 30 + return errno = ENOSYS, -1; +#else + int rc; + + rc = syscall(__NR_statx, dirfd, path, flags, mask, statxbuf); + if (rc >= 0) + uv__msan_unpoison(statxbuf, sizeof(*statxbuf)); + + return rc; +#endif +} + + +ssize_t uv__getrandom(void* buf, size_t buflen, unsigned flags) { +#if !defined(__NR_getrandom) || defined(__ANDROID_API__) && __ANDROID_API__ < 28 + return errno = ENOSYS, -1; +#else + ssize_t rc; + + rc = syscall(__NR_getrandom, buf, buflen, flags); + if (rc >= 0) + uv__msan_unpoison(buf, buflen); + + return rc; +#endif +} + + +int uv__io_uring_setup(int entries, struct uv__io_uring_params* params) { + return syscall(__NR_io_uring_setup, entries, params); +} + + +int uv__io_uring_enter(int fd, + unsigned to_submit, + unsigned min_complete, + unsigned flags) { + /* io_uring_enter used to take a sigset_t but it's unused + * in newer kernels unless IORING_ENTER_EXT_ARG is set, + * in which case it takes a struct io_uring_getevents_arg. + */ + return syscall(__NR_io_uring_enter, + fd, + to_submit, + min_complete, + flags, + NULL, + 0L); +} + + +int uv__io_uring_register(int fd, unsigned opcode, void* arg, unsigned nargs) { + return syscall(__NR_io_uring_register, fd, opcode, arg, nargs); +} + + +static int uv__use_io_uring(void) { + /* Ternary: unknown=0, yes=1, no=-1 */ + static _Atomic int use_io_uring; + char* val; + int use; + + use = atomic_load_explicit(&use_io_uring, memory_order_relaxed); + + if (use == 0) { + val = getenv("UV_USE_IO_URING"); + use = val == NULL || atoi(val) ? 1 : -1; + atomic_store_explicit(&use_io_uring, use, memory_order_relaxed); + } + + return use > 0; +} + + +static void uv__iou_init(int epollfd, + struct uv__iou* iou, + uint32_t entries, + uint32_t flags) { + struct uv__io_uring_params params; + struct epoll_event e; + size_t cqlen; + size_t sqlen; + size_t maxlen; + size_t sqelen; + uint32_t i; + char* sq; + char* sqe; + int ringfd; + + sq = MAP_FAILED; + sqe = MAP_FAILED; + + if (!uv__use_io_uring()) + return; + + /* SQPOLL required CAP_SYS_NICE until linux v5.12 relaxed that requirement. + * Mostly academic because we check for a v5.13 kernel afterwards anyway. + */ + memset(¶ms, 0, sizeof(params)); + params.flags = flags; + + if (flags & UV__IORING_SETUP_SQPOLL) + params.sq_thread_idle = 10; /* milliseconds */ + + /* Kernel returns a file descriptor with O_CLOEXEC flag set. */ + ringfd = uv__io_uring_setup(entries, ¶ms); + if (ringfd == -1) + return; + + /* IORING_FEAT_RSRC_TAGS is used to detect linux v5.13 but what we're + * actually detecting is whether IORING_OP_STATX works with SQPOLL. + */ + if (!(params.features & UV__IORING_FEAT_RSRC_TAGS)) + goto fail; + + /* Implied by IORING_FEAT_RSRC_TAGS but checked explicitly anyway. */ + if (!(params.features & UV__IORING_FEAT_SINGLE_MMAP)) + goto fail; + + /* Implied by IORING_FEAT_RSRC_TAGS but checked explicitly anyway. */ + if (!(params.features & UV__IORING_FEAT_NODROP)) + goto fail; + + sqlen = params.sq_off.array + params.sq_entries * sizeof(uint32_t); + cqlen = + params.cq_off.cqes + params.cq_entries * sizeof(struct uv__io_uring_cqe); + maxlen = sqlen < cqlen ? cqlen : sqlen; + sqelen = params.sq_entries * sizeof(struct uv__io_uring_sqe); + + sq = mmap(0, + maxlen, + PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_POPULATE, + ringfd, + 0); /* IORING_OFF_SQ_RING */ + + sqe = mmap(0, + sqelen, + PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_POPULATE, + ringfd, + 0x10000000ull); /* IORING_OFF_SQES */ + + if (sq == MAP_FAILED || sqe == MAP_FAILED) + goto fail; + + if (flags & UV__IORING_SETUP_SQPOLL) { + /* Only interested in completion events. To get notified when + * the kernel pulls items from the submission ring, add POLLOUT. + */ + memset(&e, 0, sizeof(e)); + e.events = POLLIN; + e.data.fd = ringfd; + + if (epoll_ctl(epollfd, EPOLL_CTL_ADD, ringfd, &e)) + goto fail; + } + + iou->sqhead = (uint32_t*) (sq + params.sq_off.head); + iou->sqtail = (uint32_t*) (sq + params.sq_off.tail); + iou->sqmask = *(uint32_t*) (sq + params.sq_off.ring_mask); + iou->sqarray = (uint32_t*) (sq + params.sq_off.array); + iou->sqflags = (uint32_t*) (sq + params.sq_off.flags); + iou->cqhead = (uint32_t*) (sq + params.cq_off.head); + iou->cqtail = (uint32_t*) (sq + params.cq_off.tail); + iou->cqmask = *(uint32_t*) (sq + params.cq_off.ring_mask); + iou->sq = sq; + iou->cqe = sq + params.cq_off.cqes; + iou->sqe = sqe; + iou->sqlen = sqlen; + iou->cqlen = cqlen; + iou->maxlen = maxlen; + iou->sqelen = sqelen; + iou->ringfd = ringfd; + iou->in_flight = 0; + + for (i = 0; i <= iou->sqmask; i++) + iou->sqarray[i] = i; /* Slot -> sqe identity mapping. */ + + return; + +fail: + if (sq != MAP_FAILED) + munmap(sq, maxlen); + + if (sqe != MAP_FAILED) + munmap(sqe, sqelen); + + uv__close(ringfd); +} + + +static void uv__iou_delete(struct uv__iou* iou) { + if (iou->ringfd != -1) { + munmap(iou->sq, iou->maxlen); + munmap(iou->sqe, iou->sqelen); + uv__close(iou->ringfd); + iou->ringfd = -1; + } +} + + +int uv__platform_loop_init(uv_loop_t* loop) { + uv__loop_internal_fields_t* lfields; + + lfields = uv__get_internal_fields(loop); + lfields->ctl.ringfd = -1; + lfields->iou.ringfd = -1; + + loop->inotify_watchers = NULL; + loop->inotify_fd = -1; + loop->backend_fd = epoll_create1(O_CLOEXEC); + + if (loop->backend_fd == -1) + return UV__ERR(errno); + + uv__iou_init(loop->backend_fd, &lfields->iou, 64, UV__IORING_SETUP_SQPOLL); + uv__iou_init(loop->backend_fd, &lfields->ctl, 256, 0); + + return 0; +} + + +int uv__io_fork(uv_loop_t* loop) { + int err; + struct watcher_list* root; + + root = uv__inotify_watchers(loop)->rbh_root; + + uv__close(loop->backend_fd); + loop->backend_fd = -1; + + /* TODO(bnoordhuis) Loses items from the submission and completion rings. */ + uv__platform_loop_delete(loop); + + err = uv__platform_loop_init(loop); + if (err) + return err; + + return uv__inotify_fork(loop, root); +} + + +void uv__platform_loop_delete(uv_loop_t* loop) { + uv__loop_internal_fields_t* lfields; + + lfields = uv__get_internal_fields(loop); + uv__iou_delete(&lfields->ctl); + uv__iou_delete(&lfields->iou); + + if (loop->inotify_fd != -1) { + uv__io_stop(loop, &loop->inotify_read_watcher, POLLIN); + uv__close(loop->inotify_fd); + loop->inotify_fd = -1; + } +} + + +struct uv__invalidate { + struct epoll_event (*prep)[256]; + struct epoll_event* events; + int nfds; +}; + + +void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) { + uv__loop_internal_fields_t* lfields; + struct uv__invalidate* inv; + struct epoll_event dummy; + int i; + + lfields = uv__get_internal_fields(loop); + inv = lfields->inv; + + /* Invalidate events with same file descriptor */ + if (inv != NULL) + for (i = 0; i < inv->nfds; i++) + if (inv->events[i].data.fd == fd) + inv->events[i].data.fd = -1; + + /* Remove the file descriptor from the epoll. + * This avoids a problem where the same file description remains open + * in another process, causing repeated junk epoll events. + * + * We pass in a dummy epoll_event, to work around a bug in old kernels. + * + * Work around a bug in kernels 3.10 to 3.19 where passing a struct that + * has the EPOLLWAKEUP flag set generates spurious audit syslog warnings. + */ + memset(&dummy, 0, sizeof(dummy)); + + if (inv == NULL) { + epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, &dummy); + } else { + uv__epoll_ctl_prep(loop->backend_fd, + &lfields->ctl, + inv->prep, + EPOLL_CTL_DEL, + fd, + &dummy); + } +} + + +int uv__io_check_fd(uv_loop_t* loop, int fd) { + struct epoll_event e; + int rc; + + memset(&e, 0, sizeof(e)); + e.events = POLLIN; + e.data.fd = -1; + + rc = 0; + if (epoll_ctl(loop->backend_fd, EPOLL_CTL_ADD, fd, &e)) + if (errno != EEXIST) + rc = UV__ERR(errno); + + if (rc == 0) + if (epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, &e)) + abort(); + + return rc; +} + + +/* Caller must initialize SQE and call uv__iou_submit(). */ +static struct uv__io_uring_sqe* uv__iou_get_sqe(struct uv__iou* iou, + uv_loop_t* loop, + uv_fs_t* req) { + struct uv__io_uring_sqe* sqe; + uint32_t head; + uint32_t tail; + uint32_t mask; + uint32_t slot; + + if (iou->ringfd == -1) + return NULL; + + head = atomic_load_explicit((_Atomic uint32_t*) iou->sqhead, + memory_order_acquire); + tail = *iou->sqtail; + mask = iou->sqmask; + + if ((head & mask) == ((tail + 1) & mask)) + return NULL; /* No room in ring buffer. TODO(bnoordhuis) maybe flush it? */ + + slot = tail & mask; + sqe = iou->sqe; + sqe = &sqe[slot]; + memset(sqe, 0, sizeof(*sqe)); + sqe->user_data = (uintptr_t) req; + + /* Pacify uv_cancel(). */ + req->work_req.loop = loop; + req->work_req.work = NULL; + req->work_req.done = NULL; + QUEUE_INIT(&req->work_req.wq); + + uv__req_register(loop, req); + iou->in_flight++; + + return sqe; +} + + +static void uv__iou_submit(struct uv__iou* iou) { + uint32_t flags; + + atomic_store_explicit((_Atomic uint32_t*) iou->sqtail, + *iou->sqtail + 1, + memory_order_release); + + flags = atomic_load_explicit((_Atomic uint32_t*) iou->sqflags, + memory_order_acquire); + + if (flags & UV__IORING_SQ_NEED_WAKEUP) + if (uv__io_uring_enter(iou->ringfd, 0, 0, UV__IORING_ENTER_SQ_WAKEUP)) + if (errno != EOWNERDEAD) /* Kernel bug. Harmless, ignore. */ + perror("libuv: io_uring_enter(wakeup)"); /* Can't happen. */ +} + + +int uv__iou_fs_close(uv_loop_t* loop, uv_fs_t* req) { + struct uv__io_uring_sqe* sqe; + struct uv__iou* iou; + + iou = &uv__get_internal_fields(loop)->iou; + + sqe = uv__iou_get_sqe(iou, loop, req); + if (sqe == NULL) + return 0; + + sqe->fd = req->file; + sqe->opcode = UV__IORING_OP_CLOSE; + + uv__iou_submit(iou); + + return 1; +} + + +int uv__iou_fs_fsync_or_fdatasync(uv_loop_t* loop, + uv_fs_t* req, + uint32_t fsync_flags) { + struct uv__io_uring_sqe* sqe; + struct uv__iou* iou; + + iou = &uv__get_internal_fields(loop)->iou; + + sqe = uv__iou_get_sqe(iou, loop, req); + if (sqe == NULL) + return 0; + + /* Little known fact: setting seq->off and seq->len turns + * it into an asynchronous sync_file_range() operation. + */ + sqe->fd = req->file; + sqe->fsync_flags = fsync_flags; + sqe->opcode = UV__IORING_OP_FSYNC; + + uv__iou_submit(iou); + + return 1; +} + + +int uv__iou_fs_open(uv_loop_t* loop, uv_fs_t* req) { + struct uv__io_uring_sqe* sqe; + struct uv__iou* iou; + + iou = &uv__get_internal_fields(loop)->iou; + + sqe = uv__iou_get_sqe(iou, loop, req); + if (sqe == NULL) + return 0; + + sqe->addr = (uintptr_t) req->path; + sqe->fd = AT_FDCWD; + sqe->len = req->mode; + sqe->opcode = UV__IORING_OP_OPENAT; + sqe->open_flags = req->flags | O_CLOEXEC; + + uv__iou_submit(iou); + + return 1; +} + + +int uv__iou_fs_read_or_write(uv_loop_t* loop, + uv_fs_t* req, + int is_read) { + struct uv__io_uring_sqe* sqe; + struct uv__iou* iou; + + /* For the moment, if iovcnt is greater than IOV_MAX, fallback to the + * threadpool. In the future we might take advantage of IOSQE_IO_LINK. */ + if (req->nbufs > IOV_MAX) + return 0; + + iou = &uv__get_internal_fields(loop)->iou; + + sqe = uv__iou_get_sqe(iou, loop, req); + if (sqe == NULL) + return 0; + + sqe->addr = (uintptr_t) req->bufs; + sqe->fd = req->file; + sqe->len = req->nbufs; + sqe->off = req->off < 0 ? -1 : req->off; + sqe->opcode = is_read ? UV__IORING_OP_READV : UV__IORING_OP_WRITEV; + + uv__iou_submit(iou); + + return 1; +} + + +int uv__iou_fs_statx(uv_loop_t* loop, + uv_fs_t* req, + int is_fstat, + int is_lstat) { + struct uv__io_uring_sqe* sqe; + struct uv__statx* statxbuf; + struct uv__iou* iou; + + statxbuf = uv__malloc(sizeof(*statxbuf)); + if (statxbuf == NULL) + return 0; + + iou = &uv__get_internal_fields(loop)->iou; + + sqe = uv__iou_get_sqe(iou, loop, req); + if (sqe == NULL) { + uv__free(statxbuf); + return 0; + } + + req->ptr = statxbuf; + + sqe->addr = (uintptr_t) req->path; + sqe->addr2 = (uintptr_t) statxbuf; + sqe->fd = AT_FDCWD; + sqe->len = 0xFFF; /* STATX_BASIC_STATS + STATX_BTIME */ + sqe->opcode = UV__IORING_OP_STATX; + + if (is_fstat) { + sqe->addr = (uintptr_t) ""; + sqe->fd = req->file; + sqe->statx_flags |= 0x1000; /* AT_EMPTY_PATH */ + } + + if (is_lstat) + sqe->statx_flags |= AT_SYMLINK_NOFOLLOW; + + uv__iou_submit(iou); + + return 1; +} + + +void uv__statx_to_stat(const struct uv__statx* statxbuf, uv_stat_t* buf) { + buf->st_dev = makedev(statxbuf->stx_dev_major, statxbuf->stx_dev_minor); + buf->st_mode = statxbuf->stx_mode; + buf->st_nlink = statxbuf->stx_nlink; + buf->st_uid = statxbuf->stx_uid; + buf->st_gid = statxbuf->stx_gid; + buf->st_rdev = makedev(statxbuf->stx_rdev_major, statxbuf->stx_rdev_minor); + buf->st_ino = statxbuf->stx_ino; + buf->st_size = statxbuf->stx_size; + buf->st_blksize = statxbuf->stx_blksize; + buf->st_blocks = statxbuf->stx_blocks; + buf->st_atim.tv_sec = statxbuf->stx_atime.tv_sec; + buf->st_atim.tv_nsec = statxbuf->stx_atime.tv_nsec; + buf->st_mtim.tv_sec = statxbuf->stx_mtime.tv_sec; + buf->st_mtim.tv_nsec = statxbuf->stx_mtime.tv_nsec; + buf->st_ctim.tv_sec = statxbuf->stx_ctime.tv_sec; + buf->st_ctim.tv_nsec = statxbuf->stx_ctime.tv_nsec; + buf->st_birthtim.tv_sec = statxbuf->stx_btime.tv_sec; + buf->st_birthtim.tv_nsec = statxbuf->stx_btime.tv_nsec; + buf->st_flags = 0; + buf->st_gen = 0; +} + + +static void uv__iou_fs_statx_post(uv_fs_t* req) { + struct uv__statx* statxbuf; + uv_stat_t* buf; + + buf = &req->statbuf; + statxbuf = req->ptr; + req->ptr = NULL; + + if (req->result == 0) { + uv__msan_unpoison(statxbuf, sizeof(*statxbuf)); + uv__statx_to_stat(statxbuf, buf); + req->ptr = buf; + } + + uv__free(statxbuf); +} + + +static void uv__poll_io_uring(uv_loop_t* loop, struct uv__iou* iou) { + struct uv__io_uring_cqe* cqe; + struct uv__io_uring_cqe* e; + uv_fs_t* req; + uint32_t head; + uint32_t tail; + uint32_t mask; + uint32_t i; + uint32_t flags; + int nevents; + int rc; + + head = *iou->cqhead; + tail = atomic_load_explicit((_Atomic uint32_t*) iou->cqtail, + memory_order_acquire); + mask = iou->cqmask; + cqe = iou->cqe; + nevents = 0; + + for (i = head; i != tail; i++) { + e = &cqe[i & mask]; + + req = (uv_fs_t*) (uintptr_t) e->user_data; + assert(req->type == UV_FS); + + uv__req_unregister(loop, req); + iou->in_flight--; + + /* io_uring stores error codes as negative numbers, same as libuv. */ + req->result = e->res; + + switch (req->fs_type) { + case UV_FS_FSTAT: + case UV_FS_LSTAT: + case UV_FS_STAT: + uv__iou_fs_statx_post(req); + break; + default: /* Squelch -Wswitch warnings. */ + break; + } + + uv__metrics_update_idle_time(loop); + req->cb(req); + nevents++; + } + + atomic_store_explicit((_Atomic uint32_t*) iou->cqhead, + tail, + memory_order_release); + + /* Check whether CQE's overflowed, if so enter the kernel to make them + * available. Don't grab them immediately but in the next loop iteration to + * avoid loop starvation. */ + flags = atomic_load_explicit((_Atomic uint32_t*) iou->sqflags, + memory_order_acquire); + + if (flags & UV__IORING_SQ_CQ_OVERFLOW) { + do + rc = uv__io_uring_enter(iou->ringfd, 0, 0, UV__IORING_ENTER_GETEVENTS); + while (rc == -1 && errno == EINTR); + + if (rc < 0) + perror("libuv: io_uring_enter(getevents)"); /* Can't happen. */ + } + + uv__metrics_inc_events(loop, nevents); + if (uv__get_internal_fields(loop)->current_timeout == 0) + uv__metrics_inc_events_waiting(loop, nevents); +} + + +static void uv__epoll_ctl_prep(int epollfd, + struct uv__iou* ctl, + struct epoll_event (*events)[256], + int op, + int fd, + struct epoll_event* e) { + struct uv__io_uring_sqe* sqe; + struct epoll_event* pe; + uint32_t mask; + uint32_t slot; + + if (ctl->ringfd == -1) { + if (!epoll_ctl(epollfd, op, fd, e)) + return; + + if (op == EPOLL_CTL_DEL) + return; /* Ignore errors, may be racing with another thread. */ + + if (op != EPOLL_CTL_ADD) + abort(); + + if (errno != EEXIST) + abort(); + + /* File descriptor that's been watched before, update event mask. */ + if (!epoll_ctl(epollfd, EPOLL_CTL_MOD, fd, e)) + return; + + abort(); + } else { + mask = ctl->sqmask; + slot = (*ctl->sqtail)++ & mask; + + pe = &(*events)[slot]; + *pe = *e; + + sqe = ctl->sqe; + sqe = &sqe[slot]; + + memset(sqe, 0, sizeof(*sqe)); + sqe->addr = (uintptr_t) pe; + sqe->fd = epollfd; + sqe->len = op; + sqe->off = fd; + sqe->opcode = UV__IORING_OP_EPOLL_CTL; + sqe->user_data = op | slot << 2 | (int64_t) fd << 32; + + if ((*ctl->sqhead & mask) == (*ctl->sqtail & mask)) + uv__epoll_ctl_flush(epollfd, ctl, events); + } +} + + +static void uv__epoll_ctl_flush(int epollfd, + struct uv__iou* ctl, + struct epoll_event (*events)[256]) { + struct epoll_event oldevents[256]; + struct uv__io_uring_cqe* cqe; + uint32_t oldslot; + uint32_t slot; + uint32_t n; + int fd; + int op; + int rc; + + STATIC_ASSERT(sizeof(oldevents) == sizeof(*events)); + assert(ctl->ringfd != -1); + assert(*ctl->sqhead != *ctl->sqtail); + + n = *ctl->sqtail - *ctl->sqhead; + do + rc = uv__io_uring_enter(ctl->ringfd, n, n, UV__IORING_ENTER_GETEVENTS); + while (rc == -1 && errno == EINTR); + + if (rc < 0) + perror("libuv: io_uring_enter(getevents)"); /* Can't happen. */ + + if (rc != (int) n) + abort(); + + assert(*ctl->sqhead == *ctl->sqtail); + + memcpy(oldevents, *events, sizeof(*events)); + + /* Failed submissions are either EPOLL_CTL_DEL commands for file descriptors + * that have been closed, or EPOLL_CTL_ADD commands for file descriptors + * that we are already watching. Ignore the former and retry the latter + * with EPOLL_CTL_MOD. + */ + while (*ctl->cqhead != *ctl->cqtail) { + slot = (*ctl->cqhead)++ & ctl->cqmask; + + cqe = ctl->cqe; + cqe = &cqe[slot]; + + if (cqe->res == 0) + continue; + + fd = cqe->user_data >> 32; + op = 3 & cqe->user_data; + oldslot = 255 & (cqe->user_data >> 2); + + if (op == EPOLL_CTL_DEL) + continue; + + if (op != EPOLL_CTL_ADD) + abort(); + + if (cqe->res != -EEXIST) + abort(); + + uv__epoll_ctl_prep(epollfd, + ctl, + events, + EPOLL_CTL_MOD, + fd, + &oldevents[oldslot]); + } +} + + +void uv__io_poll(uv_loop_t* loop, int timeout) { + uv__loop_internal_fields_t* lfields; + struct epoll_event events[1024]; + struct epoll_event prep[256]; + struct uv__invalidate inv; + struct epoll_event* pe; + struct epoll_event e; + struct uv__iou* ctl; + struct uv__iou* iou; + int real_timeout; + QUEUE* q; + uv__io_t* w; + sigset_t* sigmask; + sigset_t sigset; + uint64_t base; + int have_iou_events; + int have_signals; + int nevents; + int epollfd; + int count; + int nfds; + int fd; + int op; + int i; + int user_timeout; + int reset_timeout; + + lfields = uv__get_internal_fields(loop); + ctl = &lfields->ctl; + iou = &lfields->iou; + + sigmask = NULL; + if (loop->flags & UV_LOOP_BLOCK_SIGPROF) { + sigemptyset(&sigset); + sigaddset(&sigset, SIGPROF); + sigmask = &sigset; + } + + assert(timeout >= -1); + base = loop->time; + count = 48; /* Benchmarks suggest this gives the best throughput. */ + real_timeout = timeout; + + if (lfields->flags & UV_METRICS_IDLE_TIME) { + reset_timeout = 1; + user_timeout = timeout; + timeout = 0; + } else { + reset_timeout = 0; + user_timeout = 0; + } + + epollfd = loop->backend_fd; + + memset(&e, 0, sizeof(e)); + + while (!QUEUE_EMPTY(&loop->watcher_queue)) { + q = QUEUE_HEAD(&loop->watcher_queue); + w = QUEUE_DATA(q, uv__io_t, watcher_queue); + QUEUE_REMOVE(q); + QUEUE_INIT(q); + + op = EPOLL_CTL_MOD; + if (w->events == 0) + op = EPOLL_CTL_ADD; + + w->events = w->pevents; + e.events = w->pevents; + e.data.fd = w->fd; + + uv__epoll_ctl_prep(epollfd, ctl, &prep, op, w->fd, &e); + } + + inv.events = events; + inv.prep = &prep; + inv.nfds = -1; + + for (;;) { + if (loop->nfds == 0) + if (iou->in_flight == 0) + break; + + /* All event mask mutations should be visible to the kernel before + * we enter epoll_pwait(). + */ + if (ctl->ringfd != -1) + while (*ctl->sqhead != *ctl->sqtail) + uv__epoll_ctl_flush(epollfd, ctl, &prep); + + /* Only need to set the provider_entry_time if timeout != 0. The function + * will return early if the loop isn't configured with UV_METRICS_IDLE_TIME. + */ + if (timeout != 0) + uv__metrics_set_provider_entry_time(loop); + + /* Store the current timeout in a location that's globally accessible so + * other locations like uv__work_done() can determine whether the queue + * of events in the callback were waiting when poll was called. + */ + lfields->current_timeout = timeout; + + nfds = epoll_pwait(epollfd, events, ARRAY_SIZE(events), timeout, sigmask); + + /* Update loop->time unconditionally. It's tempting to skip the update when + * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the + * operating system didn't reschedule our process while in the syscall. + */ + SAVE_ERRNO(uv__update_time(loop)); + + if (nfds == 0) { + assert(timeout != -1); + + if (reset_timeout != 0) { + timeout = user_timeout; + reset_timeout = 0; + } + + if (timeout == -1) + continue; + + if (timeout == 0) + break; + + /* We may have been inside the system call for longer than |timeout| + * milliseconds so we need to update the timestamp to avoid drift. + */ + goto update_timeout; + } + + if (nfds == -1) { + if (errno != EINTR) + abort(); + + if (reset_timeout != 0) { + timeout = user_timeout; + reset_timeout = 0; + } + + if (timeout == -1) + continue; + + if (timeout == 0) + break; + + /* Interrupted by a signal. Update timeout and poll again. */ + goto update_timeout; + } + + have_iou_events = 0; + have_signals = 0; + nevents = 0; + + inv.nfds = nfds; + lfields->inv = &inv; + + for (i = 0; i < nfds; i++) { + pe = events + i; + fd = pe->data.fd; + + /* Skip invalidated events, see uv__platform_invalidate_fd */ + if (fd == -1) + continue; + + if (fd == iou->ringfd) { + uv__poll_io_uring(loop, iou); + have_iou_events = 1; + continue; + } + + assert(fd >= 0); + assert((unsigned) fd < loop->nwatchers); + + w = loop->watchers[fd]; + + if (w == NULL) { + /* File descriptor that we've stopped watching, disarm it. + * + * Ignore all errors because we may be racing with another thread + * when the file descriptor is closed. + */ + uv__epoll_ctl_prep(epollfd, ctl, &prep, EPOLL_CTL_DEL, fd, pe); + continue; + } + + /* Give users only events they're interested in. Prevents spurious + * callbacks when previous callback invocation in this loop has stopped + * the current watcher. Also, filters out events that users has not + * requested us to watch. + */ + pe->events &= w->pevents | POLLERR | POLLHUP; + + /* Work around an epoll quirk where it sometimes reports just the + * EPOLLERR or EPOLLHUP event. In order to force the event loop to + * move forward, we merge in the read/write events that the watcher + * is interested in; uv__read() and uv__write() will then deal with + * the error or hangup in the usual fashion. + * + * Note to self: happens when epoll reports EPOLLIN|EPOLLHUP, the user + * reads the available data, calls uv_read_stop(), then sometime later + * calls uv_read_start() again. By then, libuv has forgotten about the + * hangup and the kernel won't report EPOLLIN again because there's + * nothing left to read. If anything, libuv is to blame here. The + * current hack is just a quick bandaid; to properly fix it, libuv + * needs to remember the error/hangup event. We should get that for + * free when we switch over to edge-triggered I/O. + */ + if (pe->events == POLLERR || pe->events == POLLHUP) + pe->events |= + w->pevents & (POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI); + + if (pe->events != 0) { + /* Run signal watchers last. This also affects child process watchers + * because those are implemented in terms of signal watchers. + */ + if (w == &loop->signal_io_watcher) { + have_signals = 1; + } else { + uv__metrics_update_idle_time(loop); + w->cb(loop, w, pe->events); + } + + nevents++; + } + } + + uv__metrics_inc_events(loop, nevents); + if (reset_timeout != 0) { + timeout = user_timeout; + reset_timeout = 0; + uv__metrics_inc_events_waiting(loop, nevents); + } + + if (have_signals != 0) { + uv__metrics_update_idle_time(loop); + loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN); + } + + lfields->inv = NULL; + + if (have_iou_events != 0) + break; /* Event loop should cycle now so don't poll again. */ + + if (have_signals != 0) + break; /* Event loop should cycle now so don't poll again. */ + + if (nevents != 0) { + if (nfds == ARRAY_SIZE(events) && --count != 0) { + /* Poll for more events but don't block this time. */ + timeout = 0; + continue; + } + break; + } + + if (timeout == 0) + break; + + if (timeout == -1) + continue; + +update_timeout: + assert(timeout > 0); + + real_timeout -= (loop->time - base); + if (real_timeout <= 0) + break; + + timeout = real_timeout; + } + + if (ctl->ringfd != -1) + while (*ctl->sqhead != *ctl->sqtail) + uv__epoll_ctl_flush(epollfd, ctl, &prep); +} + +uint64_t uv__hrtime(uv_clocktype_t type) { + static _Atomic clock_t fast_clock_id = -1; + struct timespec t; + clock_t clock_id; + + /* Prefer CLOCK_MONOTONIC_COARSE if available but only when it has + * millisecond granularity or better. CLOCK_MONOTONIC_COARSE is + * serviced entirely from the vDSO, whereas CLOCK_MONOTONIC may + * decide to make a costly system call. + */ + /* TODO(bnoordhuis) Use CLOCK_MONOTONIC_COARSE for UV_CLOCK_PRECISE + * when it has microsecond granularity or better (unlikely). + */ + clock_id = CLOCK_MONOTONIC; + if (type != UV_CLOCK_FAST) + goto done; + + clock_id = atomic_load_explicit(&fast_clock_id, memory_order_relaxed); + if (clock_id != -1) + goto done; + + clock_id = CLOCK_MONOTONIC; + if (0 == clock_getres(CLOCK_MONOTONIC_COARSE, &t)) + if (t.tv_nsec <= 1 * 1000 * 1000) + clock_id = CLOCK_MONOTONIC_COARSE; + + atomic_store_explicit(&fast_clock_id, clock_id, memory_order_relaxed); + +done: + + if (clock_gettime(clock_id, &t)) + return 0; /* Not really possible. */ + + return t.tv_sec * (uint64_t) 1e9 + t.tv_nsec; +} + + +int uv_resident_set_memory(size_t* rss) { + char buf[1024]; + const char* s; + ssize_t n; + long val; + int fd; + int i; + + do + fd = open("/proc/self/stat", O_RDONLY); + while (fd == -1 && errno == EINTR); + + if (fd == -1) + return UV__ERR(errno); + + do + n = read(fd, buf, sizeof(buf) - 1); + while (n == -1 && errno == EINTR); + + uv__close(fd); + if (n == -1) + return UV__ERR(errno); + buf[n] = '\0'; + + s = strchr(buf, ' '); + if (s == NULL) + goto err; + + s += 1; + if (*s != '(') + goto err; + + s = strchr(s, ')'); + if (s == NULL) + goto err; + + for (i = 1; i <= 22; i++) { + s = strchr(s + 1, ' '); + if (s == NULL) + goto err; + } + + errno = 0; + val = strtol(s, NULL, 10); + if (errno != 0) + goto err; + if (val < 0) + goto err; + + *rss = val * getpagesize(); + return 0; + +err: + return UV_EINVAL; +} + +int uv_uptime(double* uptime) { + struct timespec now; + char buf[128]; + + /* Consult /proc/uptime when present (common case), or fall back to + * clock_gettime. Why not always clock_gettime? It doesn't always return the + * right result under OpenVZ and possibly other containerized environments. + */ + if (0 == uv__slurp("/proc/uptime", buf, sizeof(buf))) + if (1 == sscanf(buf, "%lf", uptime)) + return 0; + + if (clock_gettime(CLOCK_BOOTTIME, &now)) + return UV__ERR(errno); + + *uptime = now.tv_sec; + return 0; +} + + +int uv_cpu_info(uv_cpu_info_t** ci, int* count) { +#if defined(__PPC__) + static const char model_marker[] = "cpu\t\t: "; +#elif defined(__arm__) + static const char model_marker[] = "Processor\t: "; +#elif defined(__aarch64__) + static const char model_marker[] = "CPU part\t: "; +#elif defined(__mips__) + static const char model_marker[] = "cpu model\t\t: "; +#else + static const char model_marker[] = "model name\t: "; +#endif + static const char parts[] = +#ifdef __aarch64__ + "0x811\nARM810\n" "0x920\nARM920\n" "0x922\nARM922\n" + "0x926\nARM926\n" "0x940\nARM940\n" "0x946\nARM946\n" + "0x966\nARM966\n" "0xa20\nARM1020\n" "0xa22\nARM1022\n" + "0xa26\nARM1026\n" "0xb02\nARM11 MPCore\n" "0xb36\nARM1136\n" + "0xb56\nARM1156\n" "0xb76\nARM1176\n" "0xc05\nCortex-A5\n" + "0xc07\nCortex-A7\n" "0xc08\nCortex-A8\n" "0xc09\nCortex-A9\n" + "0xc0d\nCortex-A17\n" /* Originally A12 */ + "0xc0f\nCortex-A15\n" "0xc0e\nCortex-A17\n" "0xc14\nCortex-R4\n" + "0xc15\nCortex-R5\n" "0xc17\nCortex-R7\n" "0xc18\nCortex-R8\n" + "0xc20\nCortex-M0\n" "0xc21\nCortex-M1\n" "0xc23\nCortex-M3\n" + "0xc24\nCortex-M4\n" "0xc27\nCortex-M7\n" "0xc60\nCortex-M0+\n" + "0xd01\nCortex-A32\n" "0xd03\nCortex-A53\n" "0xd04\nCortex-A35\n" + "0xd05\nCortex-A55\n" "0xd06\nCortex-A65\n" "0xd07\nCortex-A57\n" + "0xd08\nCortex-A72\n" "0xd09\nCortex-A73\n" "0xd0a\nCortex-A75\n" + "0xd0b\nCortex-A76\n" "0xd0c\nNeoverse-N1\n" "0xd0d\nCortex-A77\n" + "0xd0e\nCortex-A76AE\n" "0xd13\nCortex-R52\n" "0xd20\nCortex-M23\n" + "0xd21\nCortex-M33\n" "0xd41\nCortex-A78\n" "0xd42\nCortex-A78AE\n" + "0xd4a\nNeoverse-E1\n" "0xd4b\nCortex-A78C\n" +#endif + ""; + struct cpu { + unsigned long long freq, user, nice, sys, idle, irq; + unsigned model; + }; + FILE* fp; + char* p; + int found; + int n; + unsigned i; + unsigned cpu; + unsigned maxcpu; + unsigned size; + unsigned long long skip; + struct cpu (*cpus)[8192]; /* Kernel maximum. */ + struct cpu* c; + struct cpu t; + char (*model)[64]; + unsigned char bitmap[ARRAY_SIZE(*cpus) / 8]; + /* Assumption: even big.LITTLE systems will have only a handful + * of different CPU models. Most systems will just have one. + */ + char models[8][64]; + char buf[1024]; + + memset(bitmap, 0, sizeof(bitmap)); + memset(models, 0, sizeof(models)); + snprintf(*models, sizeof(*models), "unknown"); + maxcpu = 0; + + cpus = uv__calloc(ARRAY_SIZE(*cpus), sizeof(**cpus)); + if (cpus == NULL) + return UV_ENOMEM; + + fp = uv__open_file("/proc/stat"); + if (fp == NULL) { + uv__free(cpus); + return UV__ERR(errno); + } + + fgets(buf, sizeof(buf), fp); /* Skip first line. */ + + for (;;) { + memset(&t, 0, sizeof(t)); + + n = fscanf(fp, "cpu%u %llu %llu %llu %llu %llu %llu", + &cpu, &t.user, &t.nice, &t.sys, &t.idle, &skip, &t.irq); + + if (n != 7) + break; + + fgets(buf, sizeof(buf), fp); /* Skip rest of line. */ + + if (cpu >= ARRAY_SIZE(*cpus)) + continue; + + (*cpus)[cpu] = t; + + bitmap[cpu >> 3] |= 1 << (cpu & 7); + + if (cpu >= maxcpu) + maxcpu = cpu + 1; + } + + fclose(fp); + + fp = uv__open_file("/proc/cpuinfo"); + if (fp == NULL) + goto nocpuinfo; + + for (;;) { + if (1 != fscanf(fp, "processor\t: %u\n", &cpu)) + break; /* Parse error. */ + + found = 0; + while (!found && fgets(buf, sizeof(buf), fp)) + found = !strncmp(buf, model_marker, sizeof(model_marker) - 1); + + if (!found) + goto next; + + p = buf + sizeof(model_marker) - 1; + n = (int) strcspn(p, "\n"); + + /* arm64: translate CPU part code to model name. */ + if (*parts) { + p = memmem(parts, sizeof(parts) - 1, p, n + 1); + if (p == NULL) + p = "unknown"; + else + p += n + 1; + n = (int) strcspn(p, "\n"); + } + + found = 0; + for (model = models; !found && model < ARRAY_END(models); model++) + found = !strncmp(p, *model, strlen(*model)); + + if (!found) + goto next; + + if (**model == '\0') + snprintf(*model, sizeof(*model), "%.*s", n, p); + + if (cpu < maxcpu) + (*cpus)[cpu].model = model - models; + +next: + while (fgets(buf, sizeof(buf), fp)) + if (*buf == '\n') + break; + } + + fclose(fp); + fp = NULL; + +nocpuinfo: + + n = 0; + for (cpu = 0; cpu < maxcpu; cpu++) { + if (!(bitmap[cpu >> 3] & (1 << (cpu & 7)))) + continue; + + n++; + snprintf(buf, sizeof(buf), + "/sys/devices/system/cpu/cpu%u/cpufreq/scaling_cur_freq", cpu); + + fp = uv__open_file(buf); + if (fp == NULL) + continue; + + fscanf(fp, "%llu", &(*cpus)[cpu].freq); + fclose(fp); + fp = NULL; + } + + size = n * sizeof(**ci) + sizeof(models); + *ci = uv__malloc(size); + *count = 0; + + if (*ci == NULL) { + uv__free(cpus); + return UV_ENOMEM; + } + + *count = n; + p = memcpy(*ci + n, models, sizeof(models)); + + i = 0; + for (cpu = 0; cpu < maxcpu; cpu++) { + if (!(bitmap[cpu >> 3] & (1 << (cpu & 7)))) + continue; + + c = *cpus + cpu; + + (*ci)[i++] = (uv_cpu_info_t) { + .model = p + c->model * sizeof(*model), + .speed = c->freq / 1000, + /* Note: sysconf(_SC_CLK_TCK) is fixed at 100 Hz, + * therefore the multiplier is always 1000/100 = 10. + */ + .cpu_times = (struct uv_cpu_times_s) { + .user = 10 * c->user, + .nice = 10 * c->nice, + .sys = 10 * c->sys, + .idle = 10 * c->idle, + .irq = 10 * c->irq, + }, + }; + } + + uv__free(cpus); + + return 0; +} + + +#ifdef HAVE_IFADDRS_H +static int uv__ifaddr_exclude(struct ifaddrs *ent, int exclude_type) { + if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING))) + return 1; + if (ent->ifa_addr == NULL) + return 1; + /* + * On Linux getifaddrs returns information related to the raw underlying + * devices. We're not interested in this information yet. + */ + if (ent->ifa_addr->sa_family == PF_PACKET) + return exclude_type; + return !exclude_type; +} +#endif + +int uv_interface_addresses(uv_interface_address_t** addresses, int* count) { +#ifndef HAVE_IFADDRS_H + *count = 0; + *addresses = NULL; + return UV_ENOSYS; +#else + struct ifaddrs *addrs, *ent; + uv_interface_address_t* address; + int i; + struct sockaddr_ll *sll; + + *count = 0; + *addresses = NULL; + + if (getifaddrs(&addrs)) + return UV__ERR(errno); + + /* Count the number of interfaces */ + for (ent = addrs; ent != NULL; ent = ent->ifa_next) { + if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFADDR)) + continue; + + (*count)++; + } + + if (*count == 0) { + freeifaddrs(addrs); + return 0; + } + + /* Make sure the memory is initiallized to zero using calloc() */ + *addresses = uv__calloc(*count, sizeof(**addresses)); + if (!(*addresses)) { + freeifaddrs(addrs); + return UV_ENOMEM; + } + + address = *addresses; + + for (ent = addrs; ent != NULL; ent = ent->ifa_next) { + if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFADDR)) + continue; + + address->name = uv__strdup(ent->ifa_name); + + if (ent->ifa_addr->sa_family == AF_INET6) { + address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr); + } else { + address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr); + } + + if (ent->ifa_netmask->sa_family == AF_INET6) { + address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask); + } else { + address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask); + } + + address->is_internal = !!(ent->ifa_flags & IFF_LOOPBACK); + + address++; + } + + /* Fill in physical addresses for each interface */ + for (ent = addrs; ent != NULL; ent = ent->ifa_next) { + if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFPHYS)) + continue; + + address = *addresses; + + for (i = 0; i < (*count); i++) { + size_t namelen = strlen(ent->ifa_name); + /* Alias interface share the same physical address */ + if (strncmp(address->name, ent->ifa_name, namelen) == 0 && + (address->name[namelen] == 0 || address->name[namelen] == ':')) { + sll = (struct sockaddr_ll*)ent->ifa_addr; + memcpy(address->phys_addr, sll->sll_addr, sizeof(address->phys_addr)); + } + address++; + } + } + + freeifaddrs(addrs); + + return 0; +#endif +} + + +void uv_free_interface_addresses(uv_interface_address_t* addresses, + int count) { + int i; + + for (i = 0; i < count; i++) { + uv__free(addresses[i].name); + } + + uv__free(addresses); +} + + +void uv__set_process_title(const char* title) { +#if defined(PR_SET_NAME) + prctl(PR_SET_NAME, title); /* Only copies first 16 characters. */ +#endif +} + + +static uint64_t uv__read_proc_meminfo(const char* what) { + uint64_t rc; + char* p; + char buf[4096]; /* Large enough to hold all of /proc/meminfo. */ + + if (uv__slurp("/proc/meminfo", buf, sizeof(buf))) + return 0; + + p = strstr(buf, what); + + if (p == NULL) + return 0; + + p += strlen(what); + + rc = 0; + sscanf(p, "%" PRIu64 " kB", &rc); + + return rc * 1024; +} + + +uint64_t uv_get_free_memory(void) { + struct sysinfo info; + uint64_t rc; + + rc = uv__read_proc_meminfo("MemAvailable:"); + + if (rc != 0) + return rc; + + if (0 == sysinfo(&info)) + return (uint64_t) info.freeram * info.mem_unit; + + return 0; +} + + +uint64_t uv_get_total_memory(void) { + struct sysinfo info; + uint64_t rc; + + rc = uv__read_proc_meminfo("MemTotal:"); + + if (rc != 0) + return rc; + + if (0 == sysinfo(&info)) + return (uint64_t) info.totalram * info.mem_unit; + + return 0; +} + + +static uint64_t uv__read_uint64(const char* filename) { + char buf[32]; /* Large enough to hold an encoded uint64_t. */ + uint64_t rc; + + rc = 0; + if (0 == uv__slurp(filename, buf, sizeof(buf))) + if (1 != sscanf(buf, "%" PRIu64, &rc)) + if (0 == strcmp(buf, "max\n")) + rc = UINT64_MAX; + + return rc; +} + + +/* Given a buffer with the contents of a cgroup1 /proc/self/cgroups, + * finds the location and length of the memory controller mount path. + * This disregards the leading / for easy concatenation of paths. + * Returns NULL if the memory controller wasn't found. */ +static char* uv__cgroup1_find_memory_controller(char buf[static 1024], + int* n) { + char* p; + + /* Seek to the memory controller line. */ + p = strchr(buf, ':'); + while (p != NULL && strncmp(p, ":memory:", 8)) { + p = strchr(p, '\n'); + if (p != NULL) + p = strchr(p, ':'); + } + + if (p != NULL) { + /* Determine the length of the mount path. */ + p = p + strlen(":memory:/"); + *n = (int) strcspn(p, "\n"); + } + + return p; +} + +static void uv__get_cgroup1_memory_limits(char buf[static 1024], uint64_t* high, + uint64_t* max) { + char filename[4097]; + char* p; + int n; + uint64_t cgroup1_max; + + /* Find out where the controller is mounted. */ + p = uv__cgroup1_find_memory_controller(buf, &n); + if (p != NULL) { + snprintf(filename, sizeof(filename), + "/sys/fs/cgroup/memory/%.*s/memory.soft_limit_in_bytes", n, p); + *high = uv__read_uint64(filename); + + snprintf(filename, sizeof(filename), + "/sys/fs/cgroup/memory/%.*s/memory.limit_in_bytes", n, p); + *max = uv__read_uint64(filename); + + /* If the controller wasn't mounted, the reads above will have failed, + * as indicated by uv__read_uint64 returning 0. + */ + if (*high != 0 && *max != 0) + goto update_limits; + } + + /* Fall back to the limits of the global memory controller. */ + *high = uv__read_uint64("/sys/fs/cgroup/memory/memory.soft_limit_in_bytes"); + *max = uv__read_uint64("/sys/fs/cgroup/memory/memory.limit_in_bytes"); + + /* uv__read_uint64 detects cgroup2's "max", so we need to separately detect + * cgroup1's maximum value (which is derived from LONG_MAX and PAGE_SIZE). + */ +update_limits: + cgroup1_max = LONG_MAX & ~(sysconf(_SC_PAGESIZE) - 1); + if (*high == cgroup1_max) + *high = UINT64_MAX; + if (*max == cgroup1_max) + *max = UINT64_MAX; +} + +static void uv__get_cgroup2_memory_limits(char buf[static 1024], uint64_t* high, + uint64_t* max) { + char filename[4097]; + char* p; + int n; + + /* Find out where the controller is mounted. */ + p = buf + strlen("0::/"); + n = (int) strcspn(p, "\n"); + + /* Read the memory limits of the controller. */ + snprintf(filename, sizeof(filename), "/sys/fs/cgroup/%.*s/memory.max", n, p); + *max = uv__read_uint64(filename); + snprintf(filename, sizeof(filename), "/sys/fs/cgroup/%.*s/memory.high", n, p); + *high = uv__read_uint64(filename); +} + +static uint64_t uv__get_cgroup_constrained_memory(char buf[static 1024]) { + uint64_t high; + uint64_t max; + + /* In the case of cgroupv2, we'll only have a single entry. */ + if (strncmp(buf, "0::/", 4)) + uv__get_cgroup1_memory_limits(buf, &high, &max); + else + uv__get_cgroup2_memory_limits(buf, &high, &max); + + if (high == 0 || max == 0) + return 0; + + return high < max ? high : max; +} + +uint64_t uv_get_constrained_memory(void) { + char buf[1024]; + + if (uv__slurp("/proc/self/cgroup", buf, sizeof(buf))) + return 0; + + return uv__get_cgroup_constrained_memory(buf); +} + + +static uint64_t uv__get_cgroup1_current_memory(char buf[static 1024]) { + char filename[4097]; + uint64_t current; + char* p; + int n; + + /* Find out where the controller is mounted. */ + p = uv__cgroup1_find_memory_controller(buf, &n); + if (p != NULL) { + snprintf(filename, sizeof(filename), + "/sys/fs/cgroup/memory/%.*s/memory.usage_in_bytes", n, p); + current = uv__read_uint64(filename); + + /* If the controller wasn't mounted, the reads above will have failed, + * as indicated by uv__read_uint64 returning 0. + */ + if (current != 0) + return current; + } + + /* Fall back to the usage of the global memory controller. */ + return uv__read_uint64("/sys/fs/cgroup/memory/memory.usage_in_bytes"); +} + +static uint64_t uv__get_cgroup2_current_memory(char buf[static 1024]) { + char filename[4097]; + char* p; + int n; + + /* Find out where the controller is mounted. */ + p = buf + strlen("0::/"); + n = (int) strcspn(p, "\n"); + + snprintf(filename, sizeof(filename), + "/sys/fs/cgroup/%.*s/memory.current", n, p); + return uv__read_uint64(filename); +} + +uint64_t uv_get_available_memory(void) { + char buf[1024]; + uint64_t constrained; + uint64_t current; + uint64_t total; + + if (uv__slurp("/proc/self/cgroup", buf, sizeof(buf))) + return 0; + + constrained = uv__get_cgroup_constrained_memory(buf); + if (constrained == 0) + return uv_get_free_memory(); + + total = uv_get_total_memory(); + if (constrained > total) + return uv_get_free_memory(); + + /* In the case of cgroupv2, we'll only have a single entry. */ + if (strncmp(buf, "0::/", 4)) + current = uv__get_cgroup1_current_memory(buf); + else + current = uv__get_cgroup2_current_memory(buf); + + /* memory usage can be higher than the limit (for short bursts of time) */ + if (constrained < current) + return 0; + + return constrained - current; +} + + +void uv_loadavg(double avg[3]) { + struct sysinfo info; + char buf[128]; /* Large enough to hold all of /proc/loadavg. */ + + if (0 == uv__slurp("/proc/loadavg", buf, sizeof(buf))) + if (3 == sscanf(buf, "%lf %lf %lf", &avg[0], &avg[1], &avg[2])) + return; + + if (sysinfo(&info) < 0) + return; + + avg[0] = (double) info.loads[0] / 65536.0; + avg[1] = (double) info.loads[1] / 65536.0; + avg[2] = (double) info.loads[2] / 65536.0; +} + + +static int compare_watchers(const struct watcher_list* a, + const struct watcher_list* b) { + if (a->wd < b->wd) return -1; + if (a->wd > b->wd) return 1; + return 0; +} + + +static int init_inotify(uv_loop_t* loop) { + int fd; + + if (loop->inotify_fd != -1) + return 0; + + fd = inotify_init1(IN_NONBLOCK | IN_CLOEXEC); + if (fd < 0) + return UV__ERR(errno); + + loop->inotify_fd = fd; + uv__io_init(&loop->inotify_read_watcher, uv__inotify_read, loop->inotify_fd); + uv__io_start(loop, &loop->inotify_read_watcher, POLLIN); + + return 0; +} + + +static int uv__inotify_fork(uv_loop_t* loop, struct watcher_list* root) { + /* Open the inotify_fd, and re-arm all the inotify watchers. */ + int err; + struct watcher_list* tmp_watcher_list_iter; + struct watcher_list* watcher_list; + struct watcher_list tmp_watcher_list; + QUEUE queue; + QUEUE* q; + uv_fs_event_t* handle; + char* tmp_path; + + if (root == NULL) + return 0; + + /* We must restore the old watcher list to be able to close items + * out of it. + */ + loop->inotify_watchers = root; + + QUEUE_INIT(&tmp_watcher_list.watchers); + /* Note that the queue we use is shared with the start and stop() + * functions, making QUEUE_FOREACH unsafe to use. So we use the + * QUEUE_MOVE trick to safely iterate. Also don't free the watcher + * list until we're done iterating. c.f. uv__inotify_read. + */ + RB_FOREACH_SAFE(watcher_list, watcher_root, + uv__inotify_watchers(loop), tmp_watcher_list_iter) { + watcher_list->iterating = 1; + QUEUE_MOVE(&watcher_list->watchers, &queue); + while (!QUEUE_EMPTY(&queue)) { + q = QUEUE_HEAD(&queue); + handle = QUEUE_DATA(q, uv_fs_event_t, watchers); + /* It's critical to keep a copy of path here, because it + * will be set to NULL by stop() and then deallocated by + * maybe_free_watcher_list + */ + tmp_path = uv__strdup(handle->path); + assert(tmp_path != NULL); + QUEUE_REMOVE(q); + QUEUE_INSERT_TAIL(&watcher_list->watchers, q); + uv_fs_event_stop(handle); + + QUEUE_INSERT_TAIL(&tmp_watcher_list.watchers, &handle->watchers); + handle->path = tmp_path; + } + watcher_list->iterating = 0; + maybe_free_watcher_list(watcher_list, loop); + } + + QUEUE_MOVE(&tmp_watcher_list.watchers, &queue); + while (!QUEUE_EMPTY(&queue)) { + q = QUEUE_HEAD(&queue); + QUEUE_REMOVE(q); + handle = QUEUE_DATA(q, uv_fs_event_t, watchers); + tmp_path = handle->path; + handle->path = NULL; + err = uv_fs_event_start(handle, handle->cb, tmp_path, 0); + uv__free(tmp_path); + if (err) + return err; + } + + return 0; +} + + +static struct watcher_list* find_watcher(uv_loop_t* loop, int wd) { + struct watcher_list w; + w.wd = wd; + return RB_FIND(watcher_root, uv__inotify_watchers(loop), &w); +} + + +static void maybe_free_watcher_list(struct watcher_list* w, uv_loop_t* loop) { + /* if the watcher_list->watchers is being iterated over, we can't free it. */ + if ((!w->iterating) && QUEUE_EMPTY(&w->watchers)) { + /* No watchers left for this path. Clean up. */ + RB_REMOVE(watcher_root, uv__inotify_watchers(loop), w); + inotify_rm_watch(loop->inotify_fd, w->wd); + uv__free(w); + } +} + + +static void uv__inotify_read(uv_loop_t* loop, + uv__io_t* dummy, + unsigned int events) { + const struct inotify_event* e; + struct watcher_list* w; + uv_fs_event_t* h; + QUEUE queue; + QUEUE* q; + const char* path; + ssize_t size; + const char *p; + /* needs to be large enough for sizeof(inotify_event) + strlen(path) */ + char buf[4096]; + + for (;;) { + do + size = read(loop->inotify_fd, buf, sizeof(buf)); + while (size == -1 && errno == EINTR); + + if (size == -1) { + assert(errno == EAGAIN || errno == EWOULDBLOCK); + break; + } + + assert(size > 0); /* pre-2.6.21 thing, size=0 == read buffer too small */ + + /* Now we have one or more inotify_event structs. */ + for (p = buf; p < buf + size; p += sizeof(*e) + e->len) { + e = (const struct inotify_event*) p; + + events = 0; + if (e->mask & (IN_ATTRIB|IN_MODIFY)) + events |= UV_CHANGE; + if (e->mask & ~(IN_ATTRIB|IN_MODIFY)) + events |= UV_RENAME; + + w = find_watcher(loop, e->wd); + if (w == NULL) + continue; /* Stale event, no watchers left. */ + + /* inotify does not return the filename when monitoring a single file + * for modifications. Repurpose the filename for API compatibility. + * I'm not convinced this is a good thing, maybe it should go. + */ + path = e->len ? (const char*) (e + 1) : uv__basename_r(w->path); + + /* We're about to iterate over the queue and call user's callbacks. + * What can go wrong? + * A callback could call uv_fs_event_stop() + * and the queue can change under our feet. + * So, we use QUEUE_MOVE() trick to safely iterate over the queue. + * And we don't free the watcher_list until we're done iterating. + * + * First, + * tell uv_fs_event_stop() (that could be called from a user's callback) + * not to free watcher_list. + */ + w->iterating = 1; + QUEUE_MOVE(&w->watchers, &queue); + while (!QUEUE_EMPTY(&queue)) { + q = QUEUE_HEAD(&queue); + h = QUEUE_DATA(q, uv_fs_event_t, watchers); + + QUEUE_REMOVE(q); + QUEUE_INSERT_TAIL(&w->watchers, q); + + h->cb(h, path, events, 0); + } + /* done iterating, time to (maybe) free empty watcher_list */ + w->iterating = 0; + maybe_free_watcher_list(w, loop); + } + } +} + + +int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) { + uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT); + return 0; +} + + +int uv_fs_event_start(uv_fs_event_t* handle, + uv_fs_event_cb cb, + const char* path, + unsigned int flags) { + struct watcher_list* w; + uv_loop_t* loop; + size_t len; + int events; + int err; + int wd; + + if (uv__is_active(handle)) + return UV_EINVAL; + + loop = handle->loop; + + err = init_inotify(loop); + if (err) + return err; + + events = IN_ATTRIB + | IN_CREATE + | IN_MODIFY + | IN_DELETE + | IN_DELETE_SELF + | IN_MOVE_SELF + | IN_MOVED_FROM + | IN_MOVED_TO; + + wd = inotify_add_watch(loop->inotify_fd, path, events); + if (wd == -1) + return UV__ERR(errno); + + w = find_watcher(loop, wd); + if (w) + goto no_insert; + + len = strlen(path) + 1; + w = uv__malloc(sizeof(*w) + len); + if (w == NULL) + return UV_ENOMEM; + + w->wd = wd; + w->path = memcpy(w + 1, path, len); + QUEUE_INIT(&w->watchers); + w->iterating = 0; + RB_INSERT(watcher_root, uv__inotify_watchers(loop), w); + +no_insert: + uv__handle_start(handle); + QUEUE_INSERT_TAIL(&w->watchers, &handle->watchers); + handle->path = w->path; + handle->cb = cb; + handle->wd = wd; + + return 0; +} + + +int uv_fs_event_stop(uv_fs_event_t* handle) { + struct watcher_list* w; + + if (!uv__is_active(handle)) + return 0; + + w = find_watcher(handle->loop, handle->wd); + assert(w != NULL); + + handle->wd = -1; + handle->path = NULL; + uv__handle_stop(handle); + QUEUE_REMOVE(&handle->watchers); + + maybe_free_watcher_list(w, handle->loop); + + return 0; +} + + +void uv__fs_event_close(uv_fs_event_t* handle) { + uv_fs_event_stop(handle); +} diff --git a/deps/uv/src/unix/loop.c b/deps/uv/src/unix/loop.c index a88e71c339351f..90a51b339de016 100644 --- a/deps/uv/src/unix/loop.c +++ b/deps/uv/src/unix/loop.c @@ -45,6 +45,9 @@ int uv_loop_init(uv_loop_t* loop) { err = uv_mutex_init(&lfields->loop_metrics.lock); if (err) goto fail_metrics_mutex_init; + memset(&lfields->loop_metrics.metrics, + 0, + sizeof(lfields->loop_metrics.metrics)); heap_init((struct heap*) &loop->timer_heap); QUEUE_INIT(&loop->wq); @@ -79,12 +82,9 @@ int uv_loop_init(uv_loop_t* loop) { goto fail_platform_init; uv__signal_global_once_init(); - err = uv_signal_init(loop, &loop->child_watcher); + err = uv__process_init(loop); if (err) goto fail_signal_init; - - uv__handle_unref(&loop->child_watcher); - loop->child_watcher.flags |= UV_HANDLE_INTERNAL; QUEUE_INIT(&loop->process_handles); err = uv_rwlock_init(&loop->cloexec_lock); diff --git a/deps/uv/src/unix/netbsd.c b/deps/uv/src/unix/netbsd.c index c66333f522c5d4..fa21e98e41aec8 100644 --- a/deps/uv/src/unix/netbsd.c +++ b/deps/uv/src/unix/netbsd.c @@ -103,7 +103,7 @@ uint64_t uv_get_free_memory(void) { int which[] = {CTL_VM, VM_UVMEXP}; if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0)) - return UV__ERR(errno); + return 0; return (uint64_t) info.free * sysconf(_SC_PAGESIZE); } @@ -120,7 +120,7 @@ uint64_t uv_get_total_memory(void) { size_t size = sizeof(info); if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0)) - return UV__ERR(errno); + return 0; return (uint64_t) info; } @@ -131,6 +131,11 @@ uint64_t uv_get_constrained_memory(void) { } +uint64_t uv_get_available_memory(void) { + return uv_get_free_memory(); +} + + int uv_resident_set_memory(size_t* rss) { kvm_t *kd = NULL; struct kinfo_proc2 *kinfo = NULL; diff --git a/deps/uv/src/unix/openbsd.c b/deps/uv/src/unix/openbsd.c index f32a94df38765f..9c863b6c90dad9 100644 --- a/deps/uv/src/unix/openbsd.c +++ b/deps/uv/src/unix/openbsd.c @@ -116,7 +116,7 @@ uint64_t uv_get_free_memory(void) { int which[] = {CTL_VM, VM_UVMEXP}; if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0)) - return UV__ERR(errno); + return 0; return (uint64_t) info.free * sysconf(_SC_PAGESIZE); } @@ -128,7 +128,7 @@ uint64_t uv_get_total_memory(void) { size_t size = sizeof(info); if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0)) - return UV__ERR(errno); + return 0; return (uint64_t) info; } @@ -139,6 +139,11 @@ uint64_t uv_get_constrained_memory(void) { } +uint64_t uv_get_available_memory(void) { + return uv_get_free_memory(); +} + + int uv_resident_set_memory(size_t* rss) { struct kinfo_proc kinfo; size_t page_size = getpagesize(); diff --git a/deps/uv/src/unix/os390.c b/deps/uv/src/unix/os390.c index 3b16318ce28a92..a87c2d77fafa02 100644 --- a/deps/uv/src/unix/os390.c +++ b/deps/uv/src/unix/os390.c @@ -198,6 +198,11 @@ uint64_t uv_get_constrained_memory(void) { } +uint64_t uv_get_available_memory(void) { + return uv_get_free_memory(); +} + + int uv_resident_set_memory(size_t* rss) { char* ascb; char* rax; @@ -803,6 +808,7 @@ static int os390_message_queue_handler(uv__os390_epoll* ep) { void uv__io_poll(uv_loop_t* loop, int timeout) { static const int max_safe_timeout = 1789569; + uv__loop_internal_fields_t* lfields; struct epoll_event events[1024]; struct epoll_event* pe; struct epoll_event e; @@ -825,6 +831,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { return; } + lfields = uv__get_internal_fields(loop); + while (!QUEUE_EMPTY(&loop->watcher_queue)) { uv_stream_t* stream; @@ -872,7 +880,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { int nevents = 0; have_signals = 0; - if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) { + if (lfields->flags & UV_METRICS_IDLE_TIME) { reset_timeout = 1; user_timeout = timeout; timeout = 0; @@ -891,6 +899,12 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { if (sizeof(int32_t) == sizeof(long) && timeout >= max_safe_timeout) timeout = max_safe_timeout; + /* Store the current timeout in a location that's globally accessible so + * other locations like uv__work_done() can determine whether the queue + * of events in the callback were waiting when poll was called. + */ + lfields->current_timeout = timeout; + nfds = epoll_wait(loop->ep, events, ARRAY_SIZE(events), timeout); @@ -998,9 +1012,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { } } + uv__metrics_inc_events(loop, nevents); if (reset_timeout != 0) { timeout = user_timeout; reset_timeout = 0; + uv__metrics_inc_events_waiting(loop, nevents); } if (have_signals != 0) { diff --git a/deps/uv/src/unix/pipe.c b/deps/uv/src/unix/pipe.c index e8cfa1481c3648..610b09b37f338e 100644 --- a/deps/uv/src/unix/pipe.c +++ b/deps/uv/src/unix/pipe.c @@ -357,7 +357,7 @@ int uv_pipe_chmod(uv_pipe_t* handle, int mode) { } /* stat must be used as fstat has a bug on Darwin */ - if (stat(name_buffer, &pipe_stat) == -1) { + if (uv__stat(name_buffer, &pipe_stat) == -1) { uv__free(name_buffer); return -errno; } diff --git a/deps/uv/src/unix/posix-hrtime.c b/deps/uv/src/unix/posix-hrtime.c index 323dfc20392423..7b45c01a4d06ee 100644 --- a/deps/uv/src/unix/posix-hrtime.c +++ b/deps/uv/src/unix/posix-hrtime.c @@ -23,13 +23,14 @@ #include "internal.h" #include +#include #include -#undef NANOSEC -#define NANOSEC ((uint64_t) 1e9) - uint64_t uv__hrtime(uv_clocktype_t type) { - struct timespec ts; - clock_gettime(CLOCK_MONOTONIC, &ts); - return (((uint64_t) ts.tv_sec) * NANOSEC + ts.tv_nsec); + struct timespec t; + + if (clock_gettime(CLOCK_MONOTONIC, &t)) + abort(); + + return t.tv_sec * (uint64_t) 1e9 + t.tv_nsec; } diff --git a/deps/uv/src/unix/posix-poll.c b/deps/uv/src/unix/posix-poll.c index 0f4bf93874be89..7e7de86845ddb4 100644 --- a/deps/uv/src/unix/posix-poll.c +++ b/deps/uv/src/unix/posix-poll.c @@ -132,6 +132,7 @@ static void uv__pollfds_del(uv_loop_t* loop, int fd) { void uv__io_poll(uv_loop_t* loop, int timeout) { + uv__loop_internal_fields_t* lfields; sigset_t* pset; sigset_t set; uint64_t time_base; @@ -152,6 +153,8 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { return; } + lfields = uv__get_internal_fields(loop); + /* Take queued watchers and add their fds to our poll fds array. */ while (!QUEUE_EMPTY(&loop->watcher_queue)) { q = QUEUE_HEAD(&loop->watcher_queue); @@ -179,7 +182,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { assert(timeout >= -1); time_base = loop->time; - if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) { + if (lfields->flags & UV_METRICS_IDLE_TIME) { reset_timeout = 1; user_timeout = timeout; timeout = 0; @@ -198,6 +201,12 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { if (timeout != 0) uv__metrics_set_provider_entry_time(loop); + /* Store the current timeout in a location that's globally accessible so + * other locations like uv__work_done() can determine whether the queue + * of events in the callback were waiting when poll was called. + */ + lfields->current_timeout = timeout; + if (pset != NULL) if (pthread_sigmask(SIG_BLOCK, pset, NULL)) abort(); @@ -292,9 +301,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { } } + uv__metrics_inc_events(loop, nevents); if (reset_timeout != 0) { timeout = user_timeout; reset_timeout = 0; + uv__metrics_inc_events_waiting(loop, nevents); } if (have_signals != 0) { diff --git a/deps/uv/src/unix/process.c b/deps/uv/src/unix/process.c index f84153687f799a..c4fb322d17f3da 100644 --- a/deps/uv/src/unix/process.c +++ b/deps/uv/src/unix/process.c @@ -55,7 +55,7 @@ extern char **environ; #endif -#if defined(__linux__) || defined(__GLIBC__) +#if defined(__linux__) # include #endif @@ -79,8 +79,28 @@ static void uv__chld(uv_signal_t* handle, int signum) { assert(signum == SIGCHLD); uv__wait_children(handle->loop); } + + +int uv__process_init(uv_loop_t* loop) { + int err; + + err = uv_signal_init(loop, &loop->child_watcher); + if (err) + return err; + uv__handle_unref(&loop->child_watcher); + loop->child_watcher.flags |= UV_HANDLE_INTERNAL; + return 0; +} + + +#else +int uv__process_init(uv_loop_t* loop) { + memset(&loop->child_watcher, 0, sizeof(loop->child_watcher)); + return 0; +} #endif + void uv__wait_children(uv_loop_t* loop) { uv_process_t* process; int exit_status; @@ -105,6 +125,7 @@ void uv__wait_children(uv_loop_t* loop) { continue; options = 0; process->flags &= ~UV_HANDLE_REAP; + loop->nfds--; #else options = WNOHANG; #endif @@ -665,7 +686,7 @@ static int uv__spawn_resolve_and_spawn(const uv_process_options_t* options, if (options->file == NULL) return ENOENT; - /* The environment for the child process is that of the parent unless overriden + /* The environment for the child process is that of the parent unless overridden * by options->env */ char** env = environ; if (options->env != NULL) @@ -1012,6 +1033,10 @@ int uv_spawn(uv_loop_t* loop, process->flags |= UV_HANDLE_REAP; loop->flags |= UV_LOOP_REAP_CHILDREN; } + /* This prevents uv__io_poll() from bailing out prematurely, being unaware + * that we added an event here for it to react to. We will decrement this + * again after the waitpid call succeeds. */ + loop->nfds++; #endif process->pid = pid; @@ -1080,6 +1105,8 @@ int uv_kill(int pid, int signum) { void uv__process_close(uv_process_t* handle) { QUEUE_REMOVE(&handle->queue); uv__handle_stop(handle); +#ifdef UV_USE_SIGCHLD if (QUEUE_EMPTY(&handle->loop->process_handles)) uv_signal_stop(&handle->loop->child_watcher); +#endif } diff --git a/deps/uv/src/unix/pthread-fixes.c b/deps/uv/src/unix/pthread-fixes.c deleted file mode 100644 index 022d79c4e21615..00000000000000 --- a/deps/uv/src/unix/pthread-fixes.c +++ /dev/null @@ -1,58 +0,0 @@ -/* Copyright (c) 2013, Sony Mobile Communications AB - * Copyright (c) 2012, Google Inc. - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - -/* Android versions < 4.1 have a broken pthread_sigmask. */ -#include "uv-common.h" - -#include -#include -#include - -int uv__pthread_sigmask(int how, const sigset_t* set, sigset_t* oset) { - static int workaround; - int err; - - if (uv__load_relaxed(&workaround)) { - return sigprocmask(how, set, oset); - } else { - err = pthread_sigmask(how, set, oset); - if (err) { - if (err == EINVAL && sigprocmask(how, set, oset) == 0) { - uv__store_relaxed(&workaround, 1); - return 0; - } else { - return -1; - } - } - } - - return 0; -} diff --git a/deps/uv/src/unix/qnx.c b/deps/uv/src/unix/qnx.c index ca148d349f87c8..57ea9dfd9ccc9c 100644 --- a/deps/uv/src/unix/qnx.c +++ b/deps/uv/src/unix/qnx.c @@ -88,6 +88,11 @@ uint64_t uv_get_constrained_memory(void) { } +uint64_t uv_get_available_memory(void) { + return uv_get_free_memory(); +} + + int uv_resident_set_memory(size_t* rss) { int fd; procfs_asinfo asinfo; diff --git a/deps/uv/src/unix/random-devurandom.c b/deps/uv/src/unix/random-devurandom.c index 05e52a56a364ea..d6336f2c98c2c9 100644 --- a/deps/uv/src/unix/random-devurandom.c +++ b/deps/uv/src/unix/random-devurandom.c @@ -40,7 +40,7 @@ int uv__random_readpath(const char* path, void* buf, size_t buflen) { if (fd < 0) return fd; - if (fstat(fd, &s)) { + if (uv__fstat(fd, &s)) { uv__close(fd); return UV__ERR(errno); } diff --git a/deps/uv/src/unix/random-getrandom.c b/deps/uv/src/unix/random-getrandom.c index bcc94089bcb64e..054eccf1664666 100644 --- a/deps/uv/src/unix/random-getrandom.c +++ b/deps/uv/src/unix/random-getrandom.c @@ -24,8 +24,6 @@ #ifdef __linux__ -#include "linux-syscalls.h" - #define uv__random_getrandom_init() 0 #else /* !__linux__ */ diff --git a/deps/uv/src/unix/signal.c b/deps/uv/src/unix/signal.c index 1133c73a955525..bb70523f561dee 100644 --- a/deps/uv/src/unix/signal.c +++ b/deps/uv/src/unix/signal.c @@ -279,6 +279,8 @@ static int uv__signal_loop_once_init(uv_loop_t* loop) { int uv__signal_loop_fork(uv_loop_t* loop) { + if (loop->signal_pipefd[0] == -1) + return 0; uv__io_stop(loop, &loop->signal_io_watcher, POLLIN); uv__close(loop->signal_pipefd[0]); uv__close(loop->signal_pipefd[1]); diff --git a/deps/uv/src/unix/spinlock.h b/deps/uv/src/unix/spinlock.h deleted file mode 100644 index a20c83cc601d9f..00000000000000 --- a/deps/uv/src/unix/spinlock.h +++ /dev/null @@ -1,53 +0,0 @@ -/* Copyright (c) 2013, Ben Noordhuis - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#ifndef UV_SPINLOCK_H_ -#define UV_SPINLOCK_H_ - -#include "internal.h" /* ACCESS_ONCE, UV_UNUSED */ -#include "atomic-ops.h" - -#define UV_SPINLOCK_INITIALIZER { 0 } - -typedef struct { - int lock; -} uv_spinlock_t; - -UV_UNUSED(static void uv_spinlock_init(uv_spinlock_t* spinlock)); -UV_UNUSED(static void uv_spinlock_lock(uv_spinlock_t* spinlock)); -UV_UNUSED(static void uv_spinlock_unlock(uv_spinlock_t* spinlock)); -UV_UNUSED(static int uv_spinlock_trylock(uv_spinlock_t* spinlock)); - -UV_UNUSED(static void uv_spinlock_init(uv_spinlock_t* spinlock)) { - ACCESS_ONCE(int, spinlock->lock) = 0; -} - -UV_UNUSED(static void uv_spinlock_lock(uv_spinlock_t* spinlock)) { - while (!uv_spinlock_trylock(spinlock)) cpu_relax(); -} - -UV_UNUSED(static void uv_spinlock_unlock(uv_spinlock_t* spinlock)) { - ACCESS_ONCE(int, spinlock->lock) = 0; -} - -UV_UNUSED(static int uv_spinlock_trylock(uv_spinlock_t* spinlock)) { - /* TODO(bnoordhuis) Maybe change to a ticket lock to guarantee fair queueing. - * Not really critical until we have locks that are (frequently) contended - * for by several threads. - */ - return 0 == cmpxchgi(&spinlock->lock, 0, 1); -} - -#endif /* UV_SPINLOCK_H_ */ diff --git a/deps/uv/src/unix/stream.c b/deps/uv/src/unix/stream.c index b1f6359e0de2c5..03f92b5045ab4e 100644 --- a/deps/uv/src/unix/stream.c +++ b/deps/uv/src/unix/stream.c @@ -60,6 +60,16 @@ struct uv__stream_select_s { }; #endif /* defined(__APPLE__) */ +union uv__cmsg { + struct cmsghdr hdr; + /* This cannot be larger because of the IBMi PASE limitation that + * the total size of control messages cannot exceed 256 bytes. + */ + char pad[256]; +}; + +STATIC_ASSERT(256 == sizeof(union uv__cmsg)); + static void uv__stream_connect(uv_stream_t*); static void uv__write(uv_stream_t* stream); static void uv__read(uv_stream_t* stream); @@ -495,76 +505,34 @@ static int uv__emfile_trick(uv_loop_t* loop, int accept_fd) { } -#if defined(UV_HAVE_KQUEUE) -# define UV_DEC_BACKLOG(w) w->rcount--; -#else -# define UV_DEC_BACKLOG(w) /* no-op */ -#endif /* defined(UV_HAVE_KQUEUE) */ - - void uv__server_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) { uv_stream_t* stream; int err; + int fd; stream = container_of(w, uv_stream_t, io_watcher); assert(events & POLLIN); assert(stream->accepted_fd == -1); assert(!(stream->flags & UV_HANDLE_CLOSING)); - uv__io_start(stream->loop, &stream->io_watcher, POLLIN); - - /* connection_cb can close the server socket while we're - * in the loop so check it on each iteration. - */ - while (uv__stream_fd(stream) != -1) { - assert(stream->accepted_fd == -1); - -#if defined(UV_HAVE_KQUEUE) - if (w->rcount <= 0) - return; -#endif /* defined(UV_HAVE_KQUEUE) */ - - err = uv__accept(uv__stream_fd(stream)); - if (err < 0) { - if (err == UV_EAGAIN || err == UV__ERR(EWOULDBLOCK)) - return; /* Not an error. */ + fd = uv__stream_fd(stream); + err = uv__accept(fd); - if (err == UV_ECONNABORTED) - continue; /* Ignore. Nothing we can do about that. */ + if (err == UV_EMFILE || err == UV_ENFILE) + err = uv__emfile_trick(loop, fd); /* Shed load. */ - if (err == UV_EMFILE || err == UV_ENFILE) { - err = uv__emfile_trick(loop, uv__stream_fd(stream)); - if (err == UV_EAGAIN || err == UV__ERR(EWOULDBLOCK)) - break; - } - - stream->connection_cb(stream, err); - continue; - } - - UV_DEC_BACKLOG(w) - stream->accepted_fd = err; - stream->connection_cb(stream, 0); + if (err < 0) + return; - if (stream->accepted_fd != -1) { - /* The user hasn't yet accepted called uv_accept() */ - uv__io_stop(loop, &stream->io_watcher, POLLIN); - return; - } + stream->accepted_fd = err; + stream->connection_cb(stream, 0); - if (stream->type == UV_TCP && - (stream->flags & UV_HANDLE_TCP_SINGLE_ACCEPT)) { - /* Give other processes a chance to accept connections. */ - struct timespec timeout = { 0, 1 }; - nanosleep(&timeout, NULL); - } - } + if (stream->accepted_fd != -1) + /* The user hasn't yet accepted called uv_accept() */ + uv__io_stop(loop, &stream->io_watcher, POLLIN); } -#undef UV_DEC_BACKLOG - - int uv_accept(uv_stream_t* server, uv_stream_t* client) { int err; @@ -665,7 +633,7 @@ static void uv__drain(uv_stream_t* stream) { uv__stream_osx_interrupt_select(stream); } - if (!(stream->flags & UV_HANDLE_SHUTTING)) + if (!uv__is_stream_shutting(stream)) return; req = stream->shutdown_req; @@ -674,7 +642,6 @@ static void uv__drain(uv_stream_t* stream) { if ((stream->flags & UV_HANDLE_CLOSING) || !(stream->flags & UV_HANDLE_SHUT)) { stream->shutdown_req = NULL; - stream->flags &= ~UV_HANDLE_SHUTTING; uv__req_unregister(stream->loop, req); err = 0; @@ -812,18 +779,14 @@ static int uv__try_write(uv_stream_t* stream, if (send_handle != NULL) { int fd_to_send; struct msghdr msg; - struct cmsghdr *cmsg; - union { - char data[64]; - struct cmsghdr alias; - } scratch; + union uv__cmsg cmsg; if (uv__is_closing(send_handle)) return UV_EBADF; fd_to_send = uv__handle_fd((uv_handle_t*) send_handle); - memset(&scratch, 0, sizeof(scratch)); + memset(&cmsg, 0, sizeof(cmsg)); assert(fd_to_send >= 0); @@ -833,20 +796,13 @@ static int uv__try_write(uv_stream_t* stream, msg.msg_iovlen = iovcnt; msg.msg_flags = 0; - msg.msg_control = &scratch.alias; + msg.msg_control = &cmsg.hdr; msg.msg_controllen = CMSG_SPACE(sizeof(fd_to_send)); - cmsg = CMSG_FIRSTHDR(&msg); - cmsg->cmsg_level = SOL_SOCKET; - cmsg->cmsg_type = SCM_RIGHTS; - cmsg->cmsg_len = CMSG_LEN(sizeof(fd_to_send)); - - /* silence aliasing warning */ - { - void* pv = CMSG_DATA(cmsg); - int* pi = pv; - *pi = fd_to_send; - } + cmsg.hdr.cmsg_level = SOL_SOCKET; + cmsg.hdr.cmsg_type = SCM_RIGHTS; + cmsg.hdr.cmsg_len = CMSG_LEN(sizeof(fd_to_send)); + memcpy(CMSG_DATA(&cmsg.hdr), &fd_to_send, sizeof(fd_to_send)); do n = sendmsg(uv__stream_fd(stream), &msg, 0); @@ -884,9 +840,16 @@ static void uv__write(uv_stream_t* stream) { QUEUE* q; uv_write_t* req; ssize_t n; + int count; assert(uv__stream_fd(stream) >= 0); + /* Prevent loop starvation when the consumer of this stream read as fast as + * (or faster than) we can write it. This `count` mechanism does not need to + * change even if we switch to edge-triggered I/O. + */ + count = 32; + for (;;) { if (QUEUE_EMPTY(&stream->write_queue)) return; @@ -905,10 +868,13 @@ static void uv__write(uv_stream_t* stream) { req->send_handle = NULL; if (uv__write_req_update(stream, req, n)) { uv__write_req_finish(req); - return; /* TODO(bnoordhuis) Start trying to write the next request. */ + if (count-- > 0) + continue; /* Start trying to write the next request. */ + + return; } } else if (n != UV_EAGAIN) - break; + goto error; /* If this is a blocking stream, try again. */ if (stream->flags & UV_HANDLE_BLOCKING_WRITES) @@ -923,6 +889,7 @@ static void uv__write(uv_stream_t* stream) { return; } +error: req->error = n; uv__write_req_finish(req); uv__io_stop(stream->loop, &stream->io_watcher, POLLOUT); @@ -1010,57 +977,38 @@ static int uv__stream_queue_fd(uv_stream_t* stream, int fd) { } -#if defined(__PASE__) -/* on IBMi PASE the control message length can not exceed 256. */ -# define UV__CMSG_FD_COUNT 60 -#else -# define UV__CMSG_FD_COUNT 64 -#endif -#define UV__CMSG_FD_SIZE (UV__CMSG_FD_COUNT * sizeof(int)) - - static int uv__stream_recv_cmsg(uv_stream_t* stream, struct msghdr* msg) { struct cmsghdr* cmsg; + int fd; + int err; + size_t i; + size_t count; for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg)) { - char* start; - char* end; - int err; - void* pv; - int* pi; - unsigned int i; - unsigned int count; - if (cmsg->cmsg_type != SCM_RIGHTS) { fprintf(stderr, "ignoring non-SCM_RIGHTS ancillary data: %d\n", cmsg->cmsg_type); continue; } - /* silence aliasing warning */ - pv = CMSG_DATA(cmsg); - pi = pv; - - /* Count available fds */ - start = (char*) cmsg; - end = (char*) cmsg + cmsg->cmsg_len; - count = 0; - while (start + CMSG_LEN(count * sizeof(*pi)) < end) - count++; - assert(start + CMSG_LEN(count * sizeof(*pi)) == end); + assert(cmsg->cmsg_len >= CMSG_LEN(0)); + count = cmsg->cmsg_len - CMSG_LEN(0); + assert(count % sizeof(fd) == 0); + count /= sizeof(fd); for (i = 0; i < count; i++) { + memcpy(&fd, (char*) CMSG_DATA(cmsg) + i * sizeof(fd), sizeof(fd)); /* Already has accepted fd, queue now */ if (stream->accepted_fd != -1) { - err = uv__stream_queue_fd(stream, pi[i]); + err = uv__stream_queue_fd(stream, fd); if (err != 0) { /* Close rest */ for (; i < count; i++) - uv__close(pi[i]); + uv__close(fd); return err; } } else { - stream->accepted_fd = pi[i]; + stream->accepted_fd = fd; } } } @@ -1069,17 +1017,11 @@ static int uv__stream_recv_cmsg(uv_stream_t* stream, struct msghdr* msg) { } -#ifdef __clang__ -# pragma clang diagnostic push -# pragma clang diagnostic ignored "-Wgnu-folding-constant" -# pragma clang diagnostic ignored "-Wvla-extension" -#endif - static void uv__read(uv_stream_t* stream) { uv_buf_t buf; ssize_t nread; struct msghdr msg; - char cmsg_space[CMSG_SPACE(UV__CMSG_FD_SIZE)]; + union uv__cmsg cmsg; int count; int err; int is_ipc; @@ -1125,8 +1067,8 @@ static void uv__read(uv_stream_t* stream) { msg.msg_name = NULL; msg.msg_namelen = 0; /* Set up to receive a descriptor even if one isn't in the message */ - msg.msg_controllen = sizeof(cmsg_space); - msg.msg_control = cmsg_space; + msg.msg_controllen = sizeof(cmsg); + msg.msg_control = &cmsg.hdr; do { nread = uv__recvmsg(uv__stream_fd(stream), &msg, 0); @@ -1210,14 +1152,6 @@ static void uv__read(uv_stream_t* stream) { } -#ifdef __clang__ -# pragma clang diagnostic pop -#endif - -#undef UV__CMSG_FD_COUNT -#undef UV__CMSG_FD_SIZE - - int uv_shutdown(uv_shutdown_t* req, uv_stream_t* stream, uv_shutdown_cb cb) { assert(stream->type == UV_TCP || stream->type == UV_TTY || @@ -1225,7 +1159,7 @@ int uv_shutdown(uv_shutdown_t* req, uv_stream_t* stream, uv_shutdown_cb cb) { if (!(stream->flags & UV_HANDLE_WRITABLE) || stream->flags & UV_HANDLE_SHUT || - stream->flags & UV_HANDLE_SHUTTING || + uv__is_stream_shutting(stream) || uv__is_closing(stream)) { return UV_ENOTCONN; } @@ -1238,7 +1172,6 @@ int uv_shutdown(uv_shutdown_t* req, uv_stream_t* stream, uv_shutdown_cb cb) { req->handle = stream; req->cb = cb; stream->shutdown_req = req; - stream->flags |= UV_HANDLE_SHUTTING; stream->flags &= ~UV_HANDLE_WRITABLE; if (QUEUE_EMPTY(&stream->write_queue)) diff --git a/deps/uv/src/unix/sunos.c b/deps/uv/src/unix/sunos.c index 7835bed75e0cc5..75b6fbad493707 100644 --- a/deps/uv/src/unix/sunos.c +++ b/deps/uv/src/unix/sunos.c @@ -320,9 +320,11 @@ void uv__io_poll(uv_loop_t* loop, int timeout) { QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue); } + uv__metrics_inc_events(loop, nevents); if (reset_timeout != 0) { timeout = user_timeout; reset_timeout = 0; + uv__metrics_inc_events_waiting(loop, nevents); } if (have_signals != 0) { @@ -415,6 +417,11 @@ uint64_t uv_get_constrained_memory(void) { } +uint64_t uv_get_available_memory(void) { + return uv_get_free_memory(); +} + + void uv_loadavg(double avg[3]) { (void) getloadavg(avg, 3); } diff --git a/deps/uv/src/unix/tcp.c b/deps/uv/src/unix/tcp.c index 73fc657a86d41b..ab4e06c2f67974 100644 --- a/deps/uv/src/unix/tcp.c +++ b/deps/uv/src/unix/tcp.c @@ -28,16 +28,39 @@ #include -static int new_socket(uv_tcp_t* handle, int domain, unsigned long flags) { - struct sockaddr_storage saddr; +static int maybe_bind_socket(int fd) { + union uv__sockaddr s; socklen_t slen; + + slen = sizeof(s); + memset(&s, 0, sizeof(s)); + + if (getsockname(fd, &s.addr, &slen)) + return UV__ERR(errno); + + if (s.addr.sa_family == AF_INET) + if (s.in.sin_port != 0) + return 0; /* Already bound to a port. */ + + if (s.addr.sa_family == AF_INET6) + if (s.in6.sin6_port != 0) + return 0; /* Already bound to a port. */ + + /* Bind to an arbitrary port. */ + if (bind(fd, &s.addr, slen)) + return UV__ERR(errno); + + return 0; +} + + +static int new_socket(uv_tcp_t* handle, int domain, unsigned int flags) { int sockfd; int err; - err = uv__socket(domain, SOCK_STREAM, 0); - if (err < 0) - return err; - sockfd = err; + sockfd = uv__socket(domain, SOCK_STREAM, 0); + if (sockfd < 0) + return sockfd; err = uv__stream_open((uv_stream_t*) handle, sockfd, flags); if (err) { @@ -45,74 +68,44 @@ static int new_socket(uv_tcp_t* handle, int domain, unsigned long flags) { return err; } - if (flags & UV_HANDLE_BOUND) { - /* Bind this new socket to an arbitrary port */ - slen = sizeof(saddr); - memset(&saddr, 0, sizeof(saddr)); - if (getsockname(uv__stream_fd(handle), (struct sockaddr*) &saddr, &slen)) { - uv__close(sockfd); - return UV__ERR(errno); - } - - if (bind(uv__stream_fd(handle), (struct sockaddr*) &saddr, slen)) { - uv__close(sockfd); - return UV__ERR(errno); - } - } + if (flags & UV_HANDLE_BOUND) + return maybe_bind_socket(sockfd); return 0; } -static int maybe_new_socket(uv_tcp_t* handle, int domain, unsigned long flags) { - struct sockaddr_storage saddr; - socklen_t slen; +static int maybe_new_socket(uv_tcp_t* handle, int domain, unsigned int flags) { + int sockfd; + int err; - if (domain == AF_UNSPEC) { - handle->flags |= flags; - return 0; - } + if (domain == AF_UNSPEC) + goto out; - if (uv__stream_fd(handle) != -1) { + sockfd = uv__stream_fd(handle); + if (sockfd == -1) + return new_socket(handle, domain, flags); - if (flags & UV_HANDLE_BOUND) { - - if (handle->flags & UV_HANDLE_BOUND) { - /* It is already bound to a port. */ - handle->flags |= flags; - return 0; - } - - /* Query to see if tcp socket is bound. */ - slen = sizeof(saddr); - memset(&saddr, 0, sizeof(saddr)); - if (getsockname(uv__stream_fd(handle), (struct sockaddr*) &saddr, &slen)) - return UV__ERR(errno); - - if ((saddr.ss_family == AF_INET6 && - ((struct sockaddr_in6*) &saddr)->sin6_port != 0) || - (saddr.ss_family == AF_INET && - ((struct sockaddr_in*) &saddr)->sin_port != 0)) { - /* Handle is already bound to a port. */ - handle->flags |= flags; - return 0; - } - - /* Bind to arbitrary port */ - if (bind(uv__stream_fd(handle), (struct sockaddr*) &saddr, slen)) - return UV__ERR(errno); - } + if (!(flags & UV_HANDLE_BOUND)) + goto out; - handle->flags |= flags; - return 0; - } + if (handle->flags & UV_HANDLE_BOUND) + goto out; /* Already bound to a port. */ + + err = maybe_bind_socket(sockfd); + if (err) + return err; - return new_socket(handle, domain, flags); +out: + + handle->flags |= flags; + return 0; } int uv_tcp_init_ex(uv_loop_t* loop, uv_tcp_t* tcp, unsigned int flags) { int domain; + int err; /* Use the lower 8 bits for the domain */ domain = flags & 0xFF; @@ -129,9 +122,12 @@ int uv_tcp_init_ex(uv_loop_t* loop, uv_tcp_t* tcp, unsigned int flags) { */ if (domain != AF_UNSPEC) { - int err = maybe_new_socket(tcp, domain, 0); + err = new_socket(tcp, domain, 0); if (err) { QUEUE_REMOVE(&tcp->handle_queue); + if (tcp->io_watcher.fd != -1) + uv__close(tcp->io_watcher.fd); + tcp->io_watcher.fd = -1; return err; } } @@ -317,7 +313,7 @@ int uv_tcp_close_reset(uv_tcp_t* handle, uv_close_cb close_cb) { struct linger l = { 1, 0 }; /* Disallow setting SO_LINGER to zero due to some platform inconsistencies */ - if (handle->flags & UV_HANDLE_SHUTTING) + if (uv__is_stream_shutting(handle)) return UV_EINVAL; fd = uv__stream_fd(handle); @@ -338,24 +334,12 @@ int uv_tcp_close_reset(uv_tcp_t* handle, uv_close_cb close_cb) { int uv__tcp_listen(uv_tcp_t* tcp, int backlog, uv_connection_cb cb) { - static int single_accept_cached = -1; - unsigned long flags; - int single_accept; + unsigned int flags; int err; if (tcp->delayed_error) return tcp->delayed_error; - single_accept = uv__load_relaxed(&single_accept_cached); - if (single_accept == -1) { - const char* val = getenv("UV_TCP_SINGLE_ACCEPT"); - single_accept = (val != NULL && atoi(val) != 0); /* Off by default. */ - uv__store_relaxed(&single_accept_cached, single_accept); - } - - if (single_accept) - tcp->flags |= UV_HANDLE_TCP_SINGLE_ACCEPT; - flags = 0; #if defined(__MVS__) /* on zOS the listen call does not bind automatically @@ -460,10 +444,6 @@ int uv_tcp_keepalive(uv_tcp_t* handle, int on, unsigned int delay) { int uv_tcp_simultaneous_accepts(uv_tcp_t* handle, int enable) { - if (enable) - handle->flags &= ~UV_HANDLE_TCP_SINGLE_ACCEPT; - else - handle->flags |= UV_HANDLE_TCP_SINGLE_ACCEPT; return 0; } diff --git a/deps/uv/src/unix/thread.c b/deps/uv/src/unix/thread.c index d89e5cd13ba0ad..4d6f4b8232ec6d 100644 --- a/deps/uv/src/unix/thread.c +++ b/deps/uv/src/unix/thread.c @@ -41,126 +41,19 @@ #include /* gnu_get_libc_version() */ #endif -#undef NANOSEC -#define NANOSEC ((uint64_t) 1e9) - -#if defined(PTHREAD_BARRIER_SERIAL_THREAD) -STATIC_ASSERT(sizeof(uv_barrier_t) == sizeof(pthread_barrier_t)); +#if defined(__linux__) +# include +# define uv__cpu_set_t cpu_set_t +#elif defined(__FreeBSD__) +# include +# include +# include +# define uv__cpu_set_t cpuset_t #endif -/* Note: guard clauses should match uv_barrier_t's in include/uv/unix.h. */ -#if defined(_AIX) || \ - defined(__OpenBSD__) || \ - !defined(PTHREAD_BARRIER_SERIAL_THREAD) -int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) { - struct _uv_barrier* b; - int rc; - - if (barrier == NULL || count == 0) - return UV_EINVAL; - - b = uv__malloc(sizeof(*b)); - if (b == NULL) - return UV_ENOMEM; - - b->in = 0; - b->out = 0; - b->threshold = count; - - rc = uv_mutex_init(&b->mutex); - if (rc != 0) - goto error2; - - rc = uv_cond_init(&b->cond); - if (rc != 0) - goto error; - - barrier->b = b; - return 0; - -error: - uv_mutex_destroy(&b->mutex); -error2: - uv__free(b); - return rc; -} - - -int uv_barrier_wait(uv_barrier_t* barrier) { - struct _uv_barrier* b; - int last; - - if (barrier == NULL || barrier->b == NULL) - return UV_EINVAL; - - b = barrier->b; - uv_mutex_lock(&b->mutex); - - if (++b->in == b->threshold) { - b->in = 0; - b->out = b->threshold; - uv_cond_signal(&b->cond); - } else { - do - uv_cond_wait(&b->cond, &b->mutex); - while (b->in != 0); - } - - last = (--b->out == 0); - uv_cond_signal(&b->cond); - - uv_mutex_unlock(&b->mutex); - return last; -} - - -void uv_barrier_destroy(uv_barrier_t* barrier) { - struct _uv_barrier* b; - - b = barrier->b; - uv_mutex_lock(&b->mutex); - - assert(b->in == 0); - while (b->out != 0) - uv_cond_wait(&b->cond, &b->mutex); - - if (b->in != 0) - abort(); - - uv_mutex_unlock(&b->mutex); - uv_mutex_destroy(&b->mutex); - uv_cond_destroy(&b->cond); - - uv__free(barrier->b); - barrier->b = NULL; -} - -#else - -int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) { - return UV__ERR(pthread_barrier_init(barrier, NULL, count)); -} - - -int uv_barrier_wait(uv_barrier_t* barrier) { - int rc; - - rc = pthread_barrier_wait(barrier); - if (rc != 0) - if (rc != PTHREAD_BARRIER_SERIAL_THREAD) - abort(); - - return rc == PTHREAD_BARRIER_SERIAL_THREAD; -} - - -void uv_barrier_destroy(uv_barrier_t* barrier) { - if (pthread_barrier_destroy(barrier)) - abort(); -} - -#endif +#undef NANOSEC +#define NANOSEC ((uint64_t) 1e9) /* Musl's PTHREAD_STACK_MIN is 2 KB on all architectures, which is * too small to safely receive signals on. @@ -284,6 +177,106 @@ int uv_thread_create_ex(uv_thread_t* tid, return UV__ERR(err); } +#if UV__CPU_AFFINITY_SUPPORTED + +int uv_thread_setaffinity(uv_thread_t* tid, + char* cpumask, + char* oldmask, + size_t mask_size) { + int i; + int r; + uv__cpu_set_t cpuset; + int cpumasksize; + + cpumasksize = uv_cpumask_size(); + if (cpumasksize < 0) + return cpumasksize; + if (mask_size < (size_t)cpumasksize) + return UV_EINVAL; + + if (oldmask != NULL) { + r = uv_thread_getaffinity(tid, oldmask, mask_size); + if (r < 0) + return r; + } + + CPU_ZERO(&cpuset); + for (i = 0; i < cpumasksize; i++) + if (cpumask[i]) + CPU_SET(i, &cpuset); + +#if defined(__ANDROID__) + if (sched_setaffinity(pthread_gettid_np(*tid), sizeof(cpuset), &cpuset)) + r = errno; + else + r = 0; +#else + r = pthread_setaffinity_np(*tid, sizeof(cpuset), &cpuset); +#endif + + return UV__ERR(r); +} + + +int uv_thread_getaffinity(uv_thread_t* tid, + char* cpumask, + size_t mask_size) { + int r; + int i; + uv__cpu_set_t cpuset; + int cpumasksize; + + cpumasksize = uv_cpumask_size(); + if (cpumasksize < 0) + return cpumasksize; + if (mask_size < (size_t)cpumasksize) + return UV_EINVAL; + + CPU_ZERO(&cpuset); +#if defined(__ANDROID__) + if (sched_getaffinity(pthread_gettid_np(*tid), sizeof(cpuset), &cpuset)) + r = errno; + else + r = 0; +#else + r = pthread_getaffinity_np(*tid, sizeof(cpuset), &cpuset); +#endif + if (r) + return UV__ERR(r); + for (i = 0; i < cpumasksize; i++) + cpumask[i] = !!CPU_ISSET(i, &cpuset); + + return 0; +} +#else +int uv_thread_setaffinity(uv_thread_t* tid, + char* cpumask, + char* oldmask, + size_t mask_size) { + return UV_ENOTSUP; +} + + +int uv_thread_getaffinity(uv_thread_t* tid, + char* cpumask, + size_t mask_size) { + return UV_ENOTSUP; +} +#endif /* defined(__linux__) || defined(UV_BSD_H) */ + +int uv_thread_getcpu(void) { +#if UV__CPU_AFFINITY_SUPPORTED + int cpu; + + cpu = sched_getcpu(); + if (cpu < 0) + return UV__ERR(errno); + + return cpu; +#else + return UV_ENOTSUP; +#endif +} uv_thread_t uv_thread_self(void) { return pthread_self(); @@ -585,7 +578,7 @@ static void uv__custom_sem_post(uv_sem_t* sem_) { uv_mutex_lock(&sem->mutex); sem->value++; if (sem->value == 1) - uv_cond_signal(&sem->cond); + uv_cond_signal(&sem->cond); /* Release one to replace us. */ uv_mutex_unlock(&sem->mutex); } diff --git a/deps/uv/src/unix/tty.c b/deps/uv/src/unix/tty.c index b41505258ff822..7a5390c1a8bd83 100644 --- a/deps/uv/src/unix/tty.c +++ b/deps/uv/src/unix/tty.c @@ -21,8 +21,8 @@ #include "uv.h" #include "internal.h" -#include "spinlock.h" +#include #include #include #include @@ -64,7 +64,7 @@ static int isreallyatty(int file) { static int orig_termios_fd = -1; static struct termios orig_termios; -static uv_spinlock_t termios_spinlock = UV_SPINLOCK_INITIALIZER; +static _Atomic int termios_spinlock; int uv__tcsetattr(int fd, int how, const struct termios *term) { int rc; @@ -81,7 +81,7 @@ int uv__tcsetattr(int fd, int how, const struct termios *term) { static int uv__tty_is_slave(const int fd) { int result; -#if defined(__linux__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) +#if defined(__linux__) || defined(__FreeBSD__) int dummy; result = ioctl(fd, TIOCGPTN, &dummy) != 0; @@ -113,7 +113,7 @@ static int uv__tty_is_slave(const int fd) { } /* Lookup stat structure behind the file descriptor. */ - if (fstat(fd, &sb) != 0) + if (uv__fstat(fd, &sb) != 0) abort(); /* Assert character device. */ @@ -280,6 +280,7 @@ static void uv__tty_make_raw(struct termios* tio) { int uv_tty_set_mode(uv_tty_t* tty, uv_tty_mode_t mode) { struct termios tmp; + int expected; int fd; int rc; @@ -296,12 +297,16 @@ int uv_tty_set_mode(uv_tty_t* tty, uv_tty_mode_t mode) { return UV__ERR(errno); /* This is used for uv_tty_reset_mode() */ - uv_spinlock_lock(&termios_spinlock); + do + expected = 0; + while (!atomic_compare_exchange_strong(&termios_spinlock, &expected, 1)); + if (orig_termios_fd == -1) { orig_termios = tty->orig_termios; orig_termios_fd = fd; } - uv_spinlock_unlock(&termios_spinlock); + + atomic_store(&termios_spinlock, 0); } tmp = tty->orig_termios; @@ -360,7 +365,7 @@ uv_handle_type uv_guess_handle(uv_file file) { if (isatty(file)) return UV_TTY; - if (fstat(file, &s)) { + if (uv__fstat(file, &s)) { #if defined(__PASE__) /* On ibmi receiving RST from TCP instead of FIN immediately puts fd into * an error state. fstat will return EINVAL, getsockname will also return @@ -445,14 +450,15 @@ int uv_tty_reset_mode(void) { int err; saved_errno = errno; - if (!uv_spinlock_trylock(&termios_spinlock)) + + if (atomic_exchange(&termios_spinlock, 1)) return UV_EBUSY; /* In uv_tty_set_mode(). */ err = 0; if (orig_termios_fd != -1) err = uv__tcsetattr(orig_termios_fd, TCSANOW, &orig_termios); - uv_spinlock_unlock(&termios_spinlock); + atomic_store(&termios_spinlock, 0); errno = saved_errno; return err; diff --git a/deps/uv/src/unix/udp.c b/deps/uv/src/unix/udp.c index 4d985b88ba9304..f556808fbae68e 100644 --- a/deps/uv/src/unix/udp.c +++ b/deps/uv/src/unix/udp.c @@ -40,12 +40,6 @@ # define IPV6_DROP_MEMBERSHIP IPV6_LEAVE_GROUP #endif -union uv__sockaddr { - struct sockaddr_in6 in6; - struct sockaddr_in in; - struct sockaddr addr; -}; - static void uv__udp_run_completed(uv_udp_t* handle); static void uv__udp_io(uv_loop_t* loop, uv__io_t* w, unsigned int revents); static void uv__udp_recvmsg(uv_udp_t* handle); @@ -54,36 +48,6 @@ static int uv__udp_maybe_deferred_bind(uv_udp_t* handle, int domain, unsigned int flags); -#if HAVE_MMSG - -#define UV__MMSG_MAXWIDTH 20 - -static int uv__udp_recvmmsg(uv_udp_t* handle, uv_buf_t* buf); -static void uv__udp_sendmmsg(uv_udp_t* handle); - -static int uv__recvmmsg_avail; -static int uv__sendmmsg_avail; -static uv_once_t once = UV_ONCE_INIT; - -static void uv__udp_mmsg_init(void) { - int ret; - int s; - s = uv__socket(AF_INET, SOCK_DGRAM, 0); - if (s < 0) - return; - ret = uv__sendmmsg(s, NULL, 0); - if (ret == 0 || errno != ENOSYS) { - uv__sendmmsg_avail = 1; - uv__recvmmsg_avail = 1; - } else { - ret = uv__recvmmsg(s, NULL, 0); - if (ret == 0 || errno != ENOSYS) - uv__recvmmsg_avail = 1; - } - uv__close(s); -} - -#endif void uv__udp_close(uv_udp_t* handle) { uv__io_close(handle->loop, &handle->io_watcher); @@ -183,11 +147,11 @@ static void uv__udp_io(uv_loop_t* loop, uv__io_t* w, unsigned int revents) { } } -#if HAVE_MMSG static int uv__udp_recvmmsg(uv_udp_t* handle, uv_buf_t* buf) { - struct sockaddr_in6 peers[UV__MMSG_MAXWIDTH]; - struct iovec iov[UV__MMSG_MAXWIDTH]; - struct uv__mmsghdr msgs[UV__MMSG_MAXWIDTH]; +#if defined(__linux__) || defined(__FreeBSD__) + struct sockaddr_in6 peers[20]; + struct iovec iov[ARRAY_SIZE(peers)]; + struct mmsghdr msgs[ARRAY_SIZE(peers)]; ssize_t nread; uv_buf_t chunk_buf; size_t chunks; @@ -212,7 +176,7 @@ static int uv__udp_recvmmsg(uv_udp_t* handle, uv_buf_t* buf) { } do - nread = uv__recvmmsg(handle->io_watcher.fd, msgs, chunks); + nread = recvmmsg(handle->io_watcher.fd, msgs, chunks, 0, NULL); while (nread == -1 && errno == EINTR); if (nread < 1) { @@ -240,8 +204,10 @@ static int uv__udp_recvmmsg(uv_udp_t* handle, uv_buf_t* buf) { handle->recv_cb(handle, 0, buf, NULL, UV_UDP_MMSG_FREE); } return nread; +#else /* __linux__ || ____FreeBSD__ */ + return UV_ENOSYS; +#endif /* __linux__ || ____FreeBSD__ */ } -#endif static void uv__udp_recvmsg(uv_udp_t* handle) { struct sockaddr_storage peer; @@ -268,14 +234,12 @@ static void uv__udp_recvmsg(uv_udp_t* handle) { } assert(buf.base != NULL); -#if HAVE_MMSG if (uv_udp_using_recvmmsg(handle)) { nread = uv__udp_recvmmsg(handle, &buf); if (nread > 0) count -= nread; continue; } -#endif memset(&h, 0, sizeof(h)); memset(&peer, 0, sizeof(peer)); @@ -311,11 +275,11 @@ static void uv__udp_recvmsg(uv_udp_t* handle) { && handle->recv_cb != NULL); } -#if HAVE_MMSG -static void uv__udp_sendmmsg(uv_udp_t* handle) { +static void uv__udp_sendmsg(uv_udp_t* handle) { +#if defined(__linux__) || defined(__FreeBSD__) uv_udp_send_t* req; - struct uv__mmsghdr h[UV__MMSG_MAXWIDTH]; - struct uv__mmsghdr *p; + struct mmsghdr h[20]; + struct mmsghdr* p; QUEUE* q; ssize_t npkts; size_t pkts; @@ -326,7 +290,7 @@ static void uv__udp_sendmmsg(uv_udp_t* handle) { write_queue_drain: for (pkts = 0, q = QUEUE_HEAD(&handle->write_queue); - pkts < UV__MMSG_MAXWIDTH && q != &handle->write_queue; + pkts < ARRAY_SIZE(h) && q != &handle->write_queue; ++pkts, q = QUEUE_HEAD(q)) { assert(q != NULL); req = QUEUE_DATA(q, uv_udp_send_t, queue); @@ -355,7 +319,7 @@ static void uv__udp_sendmmsg(uv_udp_t* handle) { } do - npkts = uv__sendmmsg(handle->io_watcher.fd, h, pkts); + npkts = sendmmsg(handle->io_watcher.fd, h, pkts, 0); while (npkts == -1 && errno == EINTR); if (npkts < 1) { @@ -401,24 +365,12 @@ static void uv__udp_sendmmsg(uv_udp_t* handle) { if (!QUEUE_EMPTY(&handle->write_queue)) goto write_queue_drain; uv__io_feed(handle->loop, &handle->io_watcher); - return; -} -#endif - -static void uv__udp_sendmsg(uv_udp_t* handle) { +#else /* __linux__ || ____FreeBSD__ */ uv_udp_send_t* req; struct msghdr h; QUEUE* q; ssize_t size; -#if HAVE_MMSG - uv_once(&once, uv__udp_mmsg_init); - if (uv__sendmmsg_avail) { - uv__udp_sendmmsg(handle); - return; - } -#endif - while (!QUEUE_EMPTY(&handle->write_queue)) { q = QUEUE_HEAD(&handle->write_queue); assert(q != NULL); @@ -466,6 +418,7 @@ static void uv__udp_sendmsg(uv_udp_t* handle) { QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue); uv__io_feed(handle->loop, &handle->io_watcher); } +#endif /* __linux__ || ____FreeBSD__ */ } /* On the BSDs, SO_REUSEPORT implies SO_REUSEADDR but with some additional @@ -495,7 +448,8 @@ static int uv__set_reuse(int fd) { if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &yes, sizeof(yes))) return UV__ERR(errno); } -#elif defined(SO_REUSEPORT) && !defined(__linux__) && !defined(__GNU__) +#elif defined(SO_REUSEPORT) && !defined(__linux__) && !defined(__GNU__) && \ + !defined(__sun__) if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &yes, sizeof(yes))) return UV__ERR(errno); #else @@ -1061,11 +1015,9 @@ int uv__udp_init_ex(uv_loop_t* loop, int uv_udp_using_recvmmsg(const uv_udp_t* handle) { -#if HAVE_MMSG - if (handle->flags & UV_HANDLE_UDP_RECVMMSG) { - uv_once(&once, uv__udp_mmsg_init); - return uv__recvmmsg_avail; - } +#if defined(__linux__) || defined(__FreeBSD__) + if (handle->flags & UV_HANDLE_UDP_RECVMMSG) + return 1; #endif return 0; } diff --git a/deps/uv/src/uv-common.c b/deps/uv/src/uv-common.c index efc9eb50ee3b9e..cec771fab21339 100644 --- a/deps/uv/src/uv-common.c +++ b/deps/uv/src/uv-common.c @@ -128,6 +128,39 @@ int uv_replace_allocator(uv_malloc_func malloc_func, return 0; } + +void uv_os_free_passwd(uv_passwd_t* pwd) { + if (pwd == NULL) + return; + + /* On unix, the memory for name, shell, and homedir are allocated in a single + * uv__malloc() call. The base of the pointer is stored in pwd->username, so + * that is the field that needs to be freed. + */ + uv__free(pwd->username); +#ifdef _WIN32 + uv__free(pwd->homedir); +#endif + pwd->username = NULL; + pwd->shell = NULL; + pwd->homedir = NULL; +} + + +void uv_os_free_group(uv_group_t *grp) { + if (grp == NULL) + return; + + /* The memory for is allocated in a single uv__malloc() call. The base of the + * pointer is stored in grp->members, so that is the only field that needs to + * be freed. + */ + uv__free(grp->members); + grp->members = NULL; + grp->groupname = NULL; +} + + #define XX(uc, lc) case UV_##uc: return sizeof(uv_##lc##_t); size_t uv_handle_size(uv_handle_type type) { @@ -650,14 +683,22 @@ static unsigned int* uv__get_nbufs(uv_fs_t* req) { void uv__fs_scandir_cleanup(uv_fs_t* req) { uv__dirent_t** dents; + unsigned int* nbufs; + unsigned int i; + unsigned int n; - unsigned int* nbufs = uv__get_nbufs(req); + if (req->result >= 0) { + dents = req->ptr; + nbufs = uv__get_nbufs(req); - dents = req->ptr; - if (*nbufs > 0 && *nbufs != (unsigned int) req->result) - (*nbufs)--; - for (; *nbufs < (unsigned int) req->result; (*nbufs)++) - uv__fs_scandir_free(dents[*nbufs]); + i = 0; + if (*nbufs > 0) + i = *nbufs - 1; + + n = (unsigned int) req->result; + for (; i < n; i++) + uv__fs_scandir_free(dents[i]); + } uv__fs_scandir_free(req->ptr); req->ptr = NULL; @@ -879,12 +920,17 @@ void uv_os_free_environ(uv_env_item_t* envitems, int count) { void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) { +#ifdef __linux__ + (void) &count; + uv__free(cpu_infos); +#else int i; for (i = 0; i < count; i++) uv__free(cpu_infos[i].model); uv__free(cpu_infos); +#endif /* __linux__ */ } @@ -898,7 +944,7 @@ __attribute__((destructor)) void uv_library_shutdown(void) { static int was_shutdown; - if (uv__load_relaxed(&was_shutdown)) + if (uv__exchange_int_relaxed(&was_shutdown, 1)) return; uv__process_title_cleanup(); @@ -909,7 +955,6 @@ void uv_library_shutdown(void) { #else uv__threadpool_cleanup(); #endif - uv__store_relaxed(&was_shutdown, 1); } @@ -955,6 +1000,15 @@ void uv__metrics_set_provider_entry_time(uv_loop_t* loop) { } +int uv_metrics_info(uv_loop_t* loop, uv_metrics_t* metrics) { + memcpy(metrics, + &uv__get_loop_metrics(loop)->metrics, + sizeof(*metrics)); + + return 0; +} + + uint64_t uv_metrics_idle_time(uv_loop_t* loop) { uv__loop_metrics_t* loop_metrics; uint64_t entry_time; diff --git a/deps/uv/src/uv-common.h b/deps/uv/src/uv-common.h index 6001b0cf68d0b0..decde5362c85f4 100644 --- a/deps/uv/src/uv-common.h +++ b/deps/uv/src/uv-common.h @@ -30,18 +30,17 @@ #include #include #include - -#if defined(_MSC_VER) && _MSC_VER < 1600 -# include "uv/stdint-msvc2008.h" -#else -# include -#endif +#include #include "uv.h" #include "uv/tree.h" #include "queue.h" #include "strscpy.h" +#ifndef _MSC_VER +# include +#endif + #if EDOM > 0 # define UV__ERR(x) (-(x)) #else @@ -53,19 +52,25 @@ extern int snprintf(char*, size_t, const char*, ...); #endif #define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0])) +#define ARRAY_END(a) ((a) + ARRAY_SIZE(a)) #define container_of(ptr, type, member) \ ((type *) ((char *) (ptr) - offsetof(type, member))) +/* C11 defines static_assert to be a macro which calls _Static_assert. */ +#if defined(static_assert) +#define STATIC_ASSERT(expr) static_assert(expr, #expr) +#else #define STATIC_ASSERT(expr) \ void uv__static_assert(int static_assert_failed[1 - 2 * !(expr)]) +#endif -#if defined(__GNUC__) && (__GNUC__ > 4 || __GNUC__ == 4 && __GNUC_MINOR__ >= 7) -#define uv__load_relaxed(p) __atomic_load_n(p, __ATOMIC_RELAXED) -#define uv__store_relaxed(p, v) __atomic_store_n(p, v, __ATOMIC_RELAXED) +#ifdef _MSC_VER +#define uv__exchange_int_relaxed(p, v) \ + InterlockedExchangeNoFence((LONG volatile*)(p), v) #else -#define uv__load_relaxed(p) (*p) -#define uv__store_relaxed(p, v) do *p = v; while (0) +#define uv__exchange_int_relaxed(p, v) \ + atomic_exchange_explicit((_Atomic int*)(p), v, memory_order_relaxed) #endif #define UV__UDP_DGRAM_MAXSIZE (64 * 1024) @@ -83,7 +88,6 @@ enum { /* Used by streams. */ UV_HANDLE_LISTENING = 0x00000040, UV_HANDLE_CONNECTION = 0x00000080, - UV_HANDLE_SHUTTING = 0x00000100, UV_HANDLE_SHUT = 0x00000200, UV_HANDLE_READ_PARTIAL = 0x00000400, UV_HANDLE_READ_EOF = 0x00000800, @@ -263,6 +267,14 @@ void uv__threadpool_cleanup(void); #define uv__is_closing(h) \ (((h)->flags & (UV_HANDLE_CLOSING | UV_HANDLE_CLOSED)) != 0) +#if defined(_WIN32) +# define uv__is_stream_shutting(h) \ + (h->stream.conn.shutdown_req != NULL) +#else +# define uv__is_stream_shutting(h) \ + (h->shutdown_req != NULL) +#endif + #define uv__handle_start(h) \ do { \ if (((h)->flags & UV_HANDLE_ACTIVE) != 0) break; \ @@ -347,6 +359,21 @@ void uv__threadpool_cleanup(void); #define uv__get_loop_metrics(loop) \ (&uv__get_internal_fields(loop)->loop_metrics) +#define uv__metrics_inc_loop_count(loop) \ + do { \ + uv__get_loop_metrics(loop)->metrics.loop_count++; \ + } while (0) + +#define uv__metrics_inc_events(loop, e) \ + do { \ + uv__get_loop_metrics(loop)->metrics.events += (e); \ + } while (0) + +#define uv__metrics_inc_events_waiting(loop, e) \ + do { \ + uv__get_loop_metrics(loop)->metrics.events_waiting += (e); \ + } while (0) + /* Allocator prototypes */ void *uv__calloc(size_t count, size_t size); char *uv__strdup(const char* s); @@ -360,6 +387,7 @@ typedef struct uv__loop_metrics_s uv__loop_metrics_t; typedef struct uv__loop_internal_fields_s uv__loop_internal_fields_t; struct uv__loop_metrics_s { + uv_metrics_t metrics; uint64_t provider_entry_time; uint64_t provider_idle_time; uv_mutex_t lock; @@ -368,9 +396,37 @@ struct uv__loop_metrics_s { void uv__metrics_update_idle_time(uv_loop_t* loop); void uv__metrics_set_provider_entry_time(uv_loop_t* loop); +#ifdef __linux__ +struct uv__iou { + uint32_t* sqhead; + uint32_t* sqtail; + uint32_t* sqarray; + uint32_t sqmask; + uint32_t* sqflags; + uint32_t* cqhead; + uint32_t* cqtail; + uint32_t cqmask; + void* sq; /* pointer to munmap() on event loop teardown */ + void* cqe; /* pointer to array of struct uv__io_uring_cqe */ + void* sqe; /* pointer to array of struct uv__io_uring_sqe */ + size_t sqlen; + size_t cqlen; + size_t maxlen; + size_t sqelen; + int ringfd; + uint32_t in_flight; +}; +#endif /* __linux__ */ + struct uv__loop_internal_fields_s { unsigned int flags; uv__loop_metrics_t loop_metrics; + int current_timeout; +#ifdef __linux__ + struct uv__iou ctl; + struct uv__iou iou; + void* inv; /* used by uv__platform_invalidate_fd() */ +#endif /* __linux__ */ }; #endif /* UV_COMMON_H_ */ diff --git a/deps/uv/src/win/core.c b/deps/uv/src/win/core.c index 67af93e6571ed4..426edb18eb51d0 100644 --- a/deps/uv/src/win/core.c +++ b/deps/uv/src/win/core.c @@ -245,6 +245,9 @@ int uv_loop_init(uv_loop_t* loop) { err = uv_mutex_init(&lfields->loop_metrics.lock); if (err) goto fail_metrics_mutex_init; + memset(&lfields->loop_metrics.metrics, + 0, + sizeof(lfields->loop_metrics.metrics)); /* To prevent uninitialized memory access, loop->time must be initialized * to zero before calling uv_update_time for the first time. @@ -279,9 +282,6 @@ int uv_loop_init(uv_loop_t* loop) { memset(&loop->poll_peer_sockets, 0, sizeof loop->poll_peer_sockets); - loop->active_tcp_streams = 0; - loop->active_udp_streams = 0; - loop->timer_counter = 0; loop->stop_flag = 0; @@ -424,6 +424,7 @@ int uv_backend_timeout(const uv_loop_t* loop) { static void uv__poll_wine(uv_loop_t* loop, DWORD timeout) { + uv__loop_internal_fields_t* lfields; DWORD bytes; ULONG_PTR key; OVERLAPPED* overlapped; @@ -433,9 +434,10 @@ static void uv__poll_wine(uv_loop_t* loop, DWORD timeout) { uint64_t user_timeout; int reset_timeout; + lfields = uv__get_internal_fields(loop); timeout_time = loop->time + timeout; - if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) { + if (lfields->flags & UV_METRICS_IDLE_TIME) { reset_timeout = 1; user_timeout = timeout; timeout = 0; @@ -450,6 +452,12 @@ static void uv__poll_wine(uv_loop_t* loop, DWORD timeout) { if (timeout != 0) uv__metrics_set_provider_entry_time(loop); + /* Store the current timeout in a location that's globally accessible so + * other locations like uv__work_done() can determine whether the queue + * of events in the callback were waiting when poll was called. + */ + lfields->current_timeout = timeout; + GetQueuedCompletionStatus(loop->iocp, &bytes, &key, @@ -457,6 +465,8 @@ static void uv__poll_wine(uv_loop_t* loop, DWORD timeout) { timeout); if (reset_timeout != 0) { + if (overlapped && timeout == 0) + uv__metrics_inc_events_waiting(loop, 1); timeout = user_timeout; reset_timeout = 0; } @@ -469,6 +479,8 @@ static void uv__poll_wine(uv_loop_t* loop, DWORD timeout) { uv__metrics_update_idle_time(loop); if (overlapped) { + uv__metrics_inc_events(loop, 1); + /* Package was dequeued */ req = uv__overlapped_to_req(overlapped); uv__insert_pending_req(loop, req); @@ -503,6 +515,7 @@ static void uv__poll_wine(uv_loop_t* loop, DWORD timeout) { static void uv__poll(uv_loop_t* loop, DWORD timeout) { + uv__loop_internal_fields_t* lfields; BOOL success; uv_req_t* req; OVERLAPPED_ENTRY overlappeds[128]; @@ -511,11 +524,13 @@ static void uv__poll(uv_loop_t* loop, DWORD timeout) { int repeat; uint64_t timeout_time; uint64_t user_timeout; + uint64_t actual_timeout; int reset_timeout; + lfields = uv__get_internal_fields(loop); timeout_time = loop->time + timeout; - if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) { + if (lfields->flags & UV_METRICS_IDLE_TIME) { reset_timeout = 1; user_timeout = timeout; timeout = 0; @@ -524,12 +539,20 @@ static void uv__poll(uv_loop_t* loop, DWORD timeout) { } for (repeat = 0; ; repeat++) { + actual_timeout = timeout; + /* Only need to set the provider_entry_time if timeout != 0. The function * will return early if the loop isn't configured with UV_METRICS_IDLE_TIME. */ if (timeout != 0) uv__metrics_set_provider_entry_time(loop); + /* Store the current timeout in a location that's globally accessible so + * other locations like uv__work_done() can determine whether the queue + * of events in the callback were waiting when poll was called. + */ + lfields->current_timeout = timeout; + success = pGetQueuedCompletionStatusEx(loop->iocp, overlappeds, ARRAY_SIZE(overlappeds), @@ -543,9 +566,9 @@ static void uv__poll(uv_loop_t* loop, DWORD timeout) { } /* Placed here because on success the loop will break whether there is an - * empty package or not, or if GetQueuedCompletionStatus returned early then - * the timeout will be updated and the loop will run again. In either case - * the idle time will need to be updated. + * empty package or not, or if pGetQueuedCompletionStatusEx returned early + * then the timeout will be updated and the loop will run again. In either + * case the idle time will need to be updated. */ uv__metrics_update_idle_time(loop); @@ -555,6 +578,10 @@ static void uv__poll(uv_loop_t* loop, DWORD timeout) { * meant only to wake us up. */ if (overlappeds[i].lpOverlapped) { + uv__metrics_inc_events(loop, 1); + if (actual_timeout == 0) + uv__metrics_inc_events_waiting(loop, 1); + req = uv__overlapped_to_req(overlappeds[i].lpOverlapped); uv__insert_pending_req(loop, req); } @@ -598,10 +625,17 @@ int uv_run(uv_loop_t *loop, uv_run_mode mode) { if (!r) uv_update_time(loop); - while (r != 0 && loop->stop_flag == 0) { - uv_update_time(loop); + /* Maintain backwards compatibility by processing timers before entering the + * while loop for UV_RUN_DEFAULT. Otherwise timers only need to be executed + * once, which should be done after polling in order to maintain proper + * execution order of the conceptual event loop. */ + if (mode == UV_RUN_DEFAULT) { + if (r) + uv_update_time(loop); uv__run_timers(loop); + } + while (r != 0 && loop->stop_flag == 0) { can_sleep = loop->pending_reqs_tail == NULL && loop->idle_handles == NULL; uv__process_reqs(loop); @@ -612,6 +646,8 @@ int uv_run(uv_loop_t *loop, uv_run_mode mode) { if ((mode == UV_RUN_ONCE && can_sleep) || mode == UV_RUN_DEFAULT) timeout = uv_backend_timeout(loop); + uv__metrics_inc_loop_count(loop); + if (pGetQueuedCompletionStatusEx) uv__poll(loop, timeout); else @@ -632,18 +668,8 @@ int uv_run(uv_loop_t *loop, uv_run_mode mode) { uv__check_invoke(loop); uv__process_endgames(loop); - if (mode == UV_RUN_ONCE) { - /* UV_RUN_ONCE implies forward progress: at least one callback must have - * been invoked when it returns. uv__io_poll() can return without doing - * I/O (meaning: no callbacks) when its timeout expires - which means we - * have pending timers that satisfy the forward progress constraint. - * - * UV_RUN_NOWAIT makes no guarantees about progress so it's omitted from - * the check. - */ - uv_update_time(loop); - uv__run_timers(loop); - } + uv_update_time(loop); + uv__run_timers(loop); r = uv__loop_alive(loop); if (mode == UV_RUN_ONCE || mode == UV_RUN_NOWAIT) diff --git a/deps/uv/src/win/fs.c b/deps/uv/src/win/fs.c index 792307995f60c8..2fc7481f9a2d1e 100644 --- a/deps/uv/src/win/fs.c +++ b/deps/uv/src/win/fs.c @@ -36,6 +36,8 @@ #include "handle-inl.h" #include "fs-fd-hash-inl.h" +#include + #define UV_FS_FREE_PATHS 0x0002 #define UV_FS_FREE_PTR 0x0008 @@ -1706,11 +1708,36 @@ void fs__closedir(uv_fs_t* req) { INLINE static int fs__stat_handle(HANDLE handle, uv_stat_t* statbuf, int do_lstat) { + FILE_FS_DEVICE_INFORMATION device_info; FILE_ALL_INFORMATION file_info; FILE_FS_VOLUME_INFORMATION volume_info; NTSTATUS nt_status; IO_STATUS_BLOCK io_status; + nt_status = pNtQueryVolumeInformationFile(handle, + &io_status, + &device_info, + sizeof device_info, + FileFsDeviceInformation); + + /* Buffer overflow (a warning status code) is expected here. */ + if (NT_ERROR(nt_status)) { + SetLastError(pRtlNtStatusToDosError(nt_status)); + return -1; + } + + /* If it's NUL device set fields as reasonable as possible and return. */ + if (device_info.DeviceType == FILE_DEVICE_NULL) { + memset(statbuf, 0, sizeof(uv_stat_t)); + statbuf->st_mode = _S_IFCHR; + statbuf->st_mode |= (_S_IREAD | _S_IWRITE) | ((_S_IREAD | _S_IWRITE) >> 3) | + ((_S_IREAD | _S_IWRITE) >> 6); + statbuf->st_nlink = 1; + statbuf->st_blksize = 4096; + statbuf->st_rdev = FILE_DEVICE_NULL << 16; + return 0; + } + nt_status = pNtQueryInformationFile(handle, &io_status, &file_info, @@ -1915,6 +1942,37 @@ INLINE static void fs__stat_impl(uv_fs_t* req, int do_lstat) { } +INLINE static int fs__fstat_handle(int fd, HANDLE handle, uv_stat_t* statbuf) { + DWORD file_type; + + /* Each file type is processed differently. */ + file_type = uv_guess_handle(fd); + switch (file_type) { + /* Disk files use the existing logic from fs__stat_handle. */ + case UV_FILE: + return fs__stat_handle(handle, statbuf, 0); + + /* Devices and pipes are processed identically. There is no more information + * for them from any API. Fields are set as reasonably as possible and the + * function returns. */ + case UV_TTY: + case UV_NAMED_PIPE: + memset(statbuf, 0, sizeof(uv_stat_t)); + statbuf->st_mode = file_type == UV_TTY ? _S_IFCHR : _S_IFIFO; + statbuf->st_nlink = 1; + statbuf->st_rdev = (file_type == UV_TTY ? FILE_DEVICE_CONSOLE : FILE_DEVICE_NAMED_PIPE) << 16; + statbuf->st_ino = (uint64_t) handle; + return 0; + + /* If file type is unknown it is an error. */ + case UV_UNKNOWN_HANDLE: + default: + SetLastError(ERROR_INVALID_HANDLE); + return -1; + } +} + + static void fs__stat(uv_fs_t* req) { fs__stat_prepare_path(req->file.pathw); fs__stat_impl(req, 0); @@ -1940,7 +1998,7 @@ static void fs__fstat(uv_fs_t* req) { return; } - if (fs__stat_handle(handle, &req->statbuf, 0) != 0) { + if (fs__fstat_handle(fd, handle, &req->statbuf) != 0) { SET_REQ_WIN32_ERROR(req, GetLastError()); return; } @@ -2221,7 +2279,7 @@ static void fs__fchmod(uv_fs_t* req) { SET_REQ_WIN32_ERROR(req, pRtlNtStatusToDosError(nt_status)); goto fchmod_cleanup; } - /* Remeber to clear the flag later on */ + /* Remember to clear the flag later on */ clear_archive_flag = 1; } else { clear_archive_flag = 0; @@ -2604,7 +2662,10 @@ static void fs__readlink(uv_fs_t* req) { } if (fs__readlink_handle(handle, (char**) &req->ptr, NULL) != 0) { - SET_REQ_WIN32_ERROR(req, GetLastError()); + DWORD error = GetLastError(); + SET_REQ_WIN32_ERROR(req, error); + if (error == ERROR_NOT_A_REPARSE_POINT) + req->result = UV_EINVAL; CloseHandle(handle); return; } diff --git a/deps/uv/src/win/internal.h b/deps/uv/src/win/internal.h index 89c72b8a1a6dc0..bda321c17dc25d 100644 --- a/deps/uv/src/win/internal.h +++ b/deps/uv/src/win/internal.h @@ -267,7 +267,6 @@ void uv__util_init(void); uint64_t uv__hrtime(unsigned int scale); __declspec(noreturn) void uv_fatal_error(const int errorno, const char* syscall); -int uv__getpwuid_r(uv_passwd_t* pwd); int uv__convert_utf16_to_utf8(const WCHAR* utf16, int utf16len, char** utf8); int uv__convert_utf8_to_utf16(const char* utf8, int utf8len, WCHAR** utf16); diff --git a/deps/uv/src/win/pipe.c b/deps/uv/src/win/pipe.c index 998461811fb87f..787ba105c935b8 100644 --- a/deps/uv/src/win/pipe.c +++ b/deps/uv/src/win/pipe.c @@ -792,15 +792,17 @@ static DWORD WINAPI pipe_connect_thread_proc(void* parameter) { /* We're here because CreateFile on a pipe returned ERROR_PIPE_BUSY. We wait * up to 30 seconds for the pipe to become available with WaitNamedPipe. */ - while (WaitNamedPipeW(handle->name, 30000)) { + while (WaitNamedPipeW(req->u.connect.name, 30000)) { /* The pipe is now available, try to connect. */ - pipeHandle = open_named_pipe(handle->name, &duplex_flags); + pipeHandle = open_named_pipe(req->u.connect.name, &duplex_flags); if (pipeHandle != INVALID_HANDLE_VALUE) break; SwitchToThread(); } + uv__free(req->u.connect.name); + req->u.connect.name = NULL; if (pipeHandle != INVALID_HANDLE_VALUE) { SET_REQ_SUCCESS(req); req->u.connect.pipeHandle = pipeHandle; @@ -828,6 +830,7 @@ void uv_pipe_connect(uv_connect_t* req, uv_pipe_t* handle, req->cb = cb; req->u.connect.pipeHandle = INVALID_HANDLE_VALUE; req->u.connect.duplex_flags = 0; + req->u.connect.name = NULL; if (handle->flags & UV_HANDLE_PIPESERVER) { err = ERROR_INVALID_PARAMETER; @@ -859,10 +862,19 @@ void uv_pipe_connect(uv_connect_t* req, uv_pipe_t* handle, pipeHandle = open_named_pipe(handle->name, &duplex_flags); if (pipeHandle == INVALID_HANDLE_VALUE) { if (GetLastError() == ERROR_PIPE_BUSY) { + req->u.connect.name = uv__malloc(nameSize); + if (!req->u.connect.name) { + uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc"); + } + + memcpy(req->u.connect.name, handle->name, nameSize); + /* Wait for the server to make a pipe instance available. */ if (!QueueUserWorkItem(&pipe_connect_thread_proc, req, WT_EXECUTELONGFUNCTION)) { + uv__free(req->u.connect.name); + req->u.connect.name = NULL; err = GetLastError(); goto error; } @@ -1067,11 +1079,12 @@ int uv__pipe_accept(uv_pipe_t* server, uv_stream_t* client) { err = uv__tcp_xfer_import( (uv_tcp_t*) client, item->xfer_type, &item->xfer_info); + + uv__free(item); + if (err != 0) return err; - uv__free(item); - } else { pipe_client = (uv_pipe_t*) client; uv__pipe_connection_init(pipe_client); @@ -1638,9 +1651,13 @@ static DWORD uv__pipe_get_ipc_remote_pid(uv_pipe_t* handle) { /* If the both ends of the IPC pipe are owned by the same process, * the remote end pid may not yet be set. If so, do it here. * TODO: this is weird; it'd probably better to use a handshake. */ - if (*pid == 0) - *pid = GetCurrentProcessId(); - + if (*pid == 0) { + GetNamedPipeClientProcessId(handle->handle, pid); + if (*pid == GetCurrentProcessId()) { + GetNamedPipeServerProcessId(handle->handle, pid); + } + } + return *pid; } @@ -2069,9 +2086,9 @@ void uv__process_pipe_write_req(uv_loop_t* loop, uv_pipe_t* handle, uv__queue_non_overlapped_write(handle); } - if (handle->stream.conn.write_reqs_pending == 0) - if (handle->flags & UV_HANDLE_SHUTTING) - uv__pipe_shutdown(loop, handle, handle->stream.conn.shutdown_req); + if (handle->stream.conn.write_reqs_pending == 0 && + uv__is_stream_shutting(handle)) + uv__pipe_shutdown(loop, handle, handle->stream.conn.shutdown_req); DECREASE_PENDING_REQ_COUNT(handle); } @@ -2126,7 +2143,10 @@ void uv__process_pipe_connect_req(uv_loop_t* loop, uv_pipe_t* handle, if (REQ_SUCCESS(req)) { pipeHandle = req->u.connect.pipeHandle; duplex_flags = req->u.connect.duplex_flags; - err = uv__set_pipe_handle(loop, handle, pipeHandle, -1, duplex_flags); + if (handle->flags & UV_HANDLE_CLOSING) + err = UV_ECANCELED; + else + err = uv__set_pipe_handle(loop, handle, pipeHandle, -1, duplex_flags); if (err) CloseHandle(pipeHandle); } else { @@ -2149,7 +2169,6 @@ void uv__process_pipe_shutdown_req(uv_loop_t* loop, uv_pipe_t* handle, /* Clear the shutdown_req field so we don't go here again. */ handle->stream.conn.shutdown_req = NULL; - handle->flags &= ~UV_HANDLE_SHUTTING; UNREGISTER_HANDLE_REQ(loop, handle, req); if (handle->flags & UV_HANDLE_CLOSING) { @@ -2342,7 +2361,10 @@ int uv_pipe_open(uv_pipe_t* pipe, uv_file file) { if (pipe->ipc) { assert(!(pipe->flags & UV_HANDLE_NON_OVERLAPPED_PIPE)); - pipe->pipe.conn.ipc_remote_pid = uv_os_getppid(); + GetNamedPipeClientProcessId(os_handle, &pipe->pipe.conn.ipc_remote_pid); + if (pipe->pipe.conn.ipc_remote_pid == GetCurrentProcessId()) { + GetNamedPipeServerProcessId(os_handle, &pipe->pipe.conn.ipc_remote_pid); + } assert(pipe->pipe.conn.ipc_remote_pid != (DWORD)(uv_pid_t) -1); } return 0; diff --git a/deps/uv/src/win/poll.c b/deps/uv/src/win/poll.c index 53a4fd976121f7..7fec2b99650646 100644 --- a/deps/uv/src/win/poll.c +++ b/deps/uv/src/win/poll.c @@ -34,7 +34,9 @@ static const GUID uv_msafd_provider_ids[UV_MSAFD_PROVIDER_COUNT] = { {0xf9eab0c0, 0x26d4, 0x11d0, {0xbb, 0xbf, 0x00, 0xaa, 0x00, 0x6c, 0x34, 0xe4}}, {0x9fc48064, 0x7298, 0x43e4, - {0xb7, 0xbd, 0x18, 0x1f, 0x20, 0x89, 0x79, 0x2a}} + {0xb7, 0xbd, 0x18, 0x1f, 0x20, 0x89, 0x79, 0x2a}}, + {0xa00943d9, 0x9c2e, 0x4633, + {0x9b, 0x59, 0x00, 0x57, 0xa3, 0x16, 0x09, 0x94}} }; typedef struct uv_single_fd_set_s { @@ -423,9 +425,8 @@ int uv_poll_init_socket(uv_loop_t* loop, uv_poll_t* handle, return uv_translate_sys_error(WSAGetLastError()); /* Try to obtain a base handle for the socket. This increases this chances that - * we find an AFD handle and are able to use the fast poll mechanism. This will - * always fail on windows XP/2k3, since they don't support the. SIO_BASE_HANDLE - * ioctl. */ + * we find an AFD handle and are able to use the fast poll mechanism. + */ #ifndef NDEBUG base_socket = INVALID_SOCKET; #endif diff --git a/deps/uv/src/win/process.c b/deps/uv/src/win/process.c index 24c633393fd15d..3e451e2291d6ed 100644 --- a/deps/uv/src/win/process.c +++ b/deps/uv/src/win/process.c @@ -32,6 +32,9 @@ #include "internal.h" #include "handle-inl.h" #include "req-inl.h" +#include +#include +#include /* GetModuleBaseNameW */ #define SIGKILL 9 @@ -144,7 +147,6 @@ static void uv__process_init(uv_loop_t* loop, uv_process_t* handle) { handle->exit_signal = 0; handle->wait_handle = INVALID_HANDLE_VALUE; handle->process_handle = INVALID_HANDLE_VALUE; - handle->child_stdio_buffer = NULL; handle->exit_cb_pending = 0; UV_REQ_INIT(&handle->exit_req, UV_PROCESS_EXIT); @@ -947,9 +949,11 @@ int uv_spawn(uv_loop_t* loop, STARTUPINFOW startup; PROCESS_INFORMATION info; DWORD process_flags; + BYTE* child_stdio_buffer; uv__process_init(loop, process); process->exit_cb = options->exit_cb; + child_stdio_buffer = NULL; if (options->flags & (UV_PROCESS_SETGID | UV_PROCESS_SETUID)) { return UV_ENOTSUP; @@ -1040,7 +1044,7 @@ int uv_spawn(uv_loop_t* loop, } } - err = uv__stdio_create(loop, options, &process->child_stdio_buffer); + err = uv__stdio_create(loop, options, &child_stdio_buffer); if (err) goto done; @@ -1059,12 +1063,12 @@ int uv_spawn(uv_loop_t* loop, startup.lpTitle = NULL; startup.dwFlags = STARTF_USESTDHANDLES | STARTF_USESHOWWINDOW; - startup.cbReserved2 = uv__stdio_size(process->child_stdio_buffer); - startup.lpReserved2 = (BYTE*) process->child_stdio_buffer; + startup.cbReserved2 = uv__stdio_size(child_stdio_buffer); + startup.lpReserved2 = (BYTE*) child_stdio_buffer; - startup.hStdInput = uv__stdio_handle(process->child_stdio_buffer, 0); - startup.hStdOutput = uv__stdio_handle(process->child_stdio_buffer, 1); - startup.hStdError = uv__stdio_handle(process->child_stdio_buffer, 2); + startup.hStdInput = uv__stdio_handle(child_stdio_buffer, 0); + startup.hStdOutput = uv__stdio_handle(child_stdio_buffer, 1); + startup.hStdError = uv__stdio_handle(child_stdio_buffer, 2); process_flags = CREATE_UNICODE_ENVIRONMENT; @@ -1178,10 +1182,10 @@ int uv_spawn(uv_loop_t* loop, uv__free(env); uv__free(alloc_path); - if (process->child_stdio_buffer != NULL) { + if (child_stdio_buffer != NULL) { /* Clean up child stdio handles. */ - uv__stdio_destroy(process->child_stdio_buffer); - process->child_stdio_buffer = NULL; + uv__stdio_destroy(child_stdio_buffer); + child_stdio_buffer = NULL; } return uv_translate_sys_error(err); @@ -1193,7 +1197,120 @@ static int uv__kill(HANDLE process_handle, int signum) { return UV_EINVAL; } + /* Create a dump file for the targeted process, if the registry key + * `HKLM:Software\Microsoft\Windows\Windows Error Reporting\LocalDumps` + * exists. The location of the dumps can be influenced by the `DumpFolder` + * sub-key, which has a default value of `%LOCALAPPDATA%\CrashDumps`, see [0] + * for more detail. Note that if the dump folder does not exist, we attempt + * to create it, to match behavior with WER itself. + * [0]: https://learn.microsoft.com/en-us/windows/win32/wer/collecting-user-mode-dumps */ + if (signum == SIGQUIT) { + HKEY registry_key; + DWORD pid, ret; + WCHAR basename[MAX_PATH]; + + /* Get target process name. */ + GetModuleBaseNameW(process_handle, NULL, &basename[0], sizeof(basename)); + + /* Get PID of target process. */ + pid = GetProcessId(process_handle); + + /* Get LocalDumps directory path. */ + ret = RegOpenKeyExW( + HKEY_LOCAL_MACHINE, + L"SOFTWARE\\Microsoft\\Windows\\Windows Error Reporting\\LocalDumps", + 0, + KEY_QUERY_VALUE, + ®istry_key); + if (ret == ERROR_SUCCESS) { + HANDLE hDumpFile = NULL; + WCHAR dump_folder[MAX_PATH], dump_name[MAX_PATH]; + DWORD dump_folder_len = sizeof(dump_folder), key_type = 0; + ret = RegGetValueW(registry_key, + NULL, + L"DumpFolder", + RRF_RT_ANY, + &key_type, + (PVOID) dump_folder, + &dump_folder_len); + if (ret != ERROR_SUCCESS) { + /* Default value for `dump_folder` is `%LOCALAPPDATA%\CrashDumps`. */ + WCHAR* localappdata; + SHGetKnownFolderPath(&FOLDERID_LocalAppData, 0, NULL, &localappdata); + _snwprintf_s(dump_folder, + sizeof(dump_folder), + _TRUNCATE, + L"%ls\\CrashDumps", + localappdata); + CoTaskMemFree(localappdata); + } + RegCloseKey(registry_key); + + /* Create dump folder if it doesn't already exist. */ + CreateDirectoryW(dump_folder, NULL); + + /* Construct dump filename from process name and PID. */ + _snwprintf_s(dump_name, + sizeof(dump_name), + _TRUNCATE, + L"%ls\\%ls.%d.dmp", + dump_folder, + basename, + pid); + + hDumpFile = CreateFileW(dump_name, + GENERIC_WRITE, + 0, + NULL, + CREATE_NEW, + FILE_ATTRIBUTE_NORMAL, + NULL); + if (hDumpFile != INVALID_HANDLE_VALUE) { + DWORD dump_options, sym_options; + FILE_DISPOSITION_INFO DeleteOnClose = { TRUE }; + + /* If something goes wrong while writing it out, delete the file. */ + SetFileInformationByHandle(hDumpFile, + FileDispositionInfo, + &DeleteOnClose, + sizeof(DeleteOnClose)); + + /* Tell wine to dump ELF modules as well. */ + sym_options = SymGetOptions(); + SymSetOptions(sym_options | 0x40000000); + +/* MiniDumpWithAvxXStateContext might be undef in server2012r2 or mingw < 12 */ +#ifndef MiniDumpWithAvxXStateContext +#define MiniDumpWithAvxXStateContext 0x00200000 +#endif + /* We default to a fairly complete dump. In the future, we may want to + * allow clients to customize what kind of dump to create. */ + dump_options = MiniDumpWithFullMemory | + MiniDumpIgnoreInaccessibleMemory | + MiniDumpWithAvxXStateContext; + + if (MiniDumpWriteDump(process_handle, + pid, + hDumpFile, + dump_options, + NULL, + NULL, + NULL)) { + /* Don't delete the file on close if we successfully wrote it out. */ + FILE_DISPOSITION_INFO DontDeleteOnClose = { FALSE }; + SetFileInformationByHandle(hDumpFile, + FileDispositionInfo, + &DontDeleteOnClose, + sizeof(DontDeleteOnClose)); + } + SymSetOptions(sym_options); + CloseHandle(hDumpFile); + } + } + } + switch (signum) { + case SIGQUIT: case SIGTERM: case SIGKILL: case SIGINT: { diff --git a/deps/uv/src/win/stream.c b/deps/uv/src/win/stream.c index 292bf588da6b2f..7bf9ca388cb0f0 100644 --- a/deps/uv/src/win/stream.c +++ b/deps/uv/src/win/stream.c @@ -204,7 +204,7 @@ int uv_shutdown(uv_shutdown_t* req, uv_stream_t* handle, uv_shutdown_cb cb) { uv_loop_t* loop = handle->loop; if (!(handle->flags & UV_HANDLE_WRITABLE) || - handle->flags & UV_HANDLE_SHUTTING || + uv__is_stream_shutting(handle) || uv__is_closing(handle)) { return UV_ENOTCONN; } @@ -214,7 +214,6 @@ int uv_shutdown(uv_shutdown_t* req, uv_stream_t* handle, uv_shutdown_cb cb) { req->cb = cb; handle->flags &= ~UV_HANDLE_WRITABLE; - handle->flags |= UV_HANDLE_SHUTTING; handle->stream.conn.shutdown_req = req; handle->reqs_pending++; REGISTER_HANDLE_REQ(loop, handle, req); diff --git a/deps/uv/src/win/tcp.c b/deps/uv/src/win/tcp.c index b6aa4c512050e0..6b282e0b501c0d 100644 --- a/deps/uv/src/win/tcp.c +++ b/deps/uv/src/win/tcp.c @@ -29,14 +29,6 @@ #include "req-inl.h" -/* - * Threshold of active tcp streams for which to preallocate tcp read buffers. - * (Due to node slab allocator performing poorly under this pattern, - * the optimization is temporarily disabled (threshold=0). This will be - * revisited once node allocator is improved.) - */ -const unsigned int uv_active_tcp_streams_threshold = 0; - /* * Number of simultaneous pending AcceptEx calls. */ @@ -214,7 +206,6 @@ void uv__process_tcp_shutdown_req(uv_loop_t* loop, uv_tcp_t* stream, uv_shutdown assert(stream->flags & UV_HANDLE_CONNECTION); stream->stream.conn.shutdown_req = NULL; - stream->flags &= ~UV_HANDLE_SHUTTING; UNREGISTER_HANDLE_REQ(loop, stream, req); err = 0; @@ -274,7 +265,6 @@ void uv__tcp_endgame(uv_loop_t* loop, uv_tcp_t* handle) { } uv__handle_close(handle); - loop->active_tcp_streams--; } @@ -484,26 +474,9 @@ static void uv__tcp_queue_read(uv_loop_t* loop, uv_tcp_t* handle) { req = &handle->read_req; memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped)); - /* - * Preallocate a read buffer if the number of active streams is below - * the threshold. - */ - if (loop->active_tcp_streams < uv_active_tcp_streams_threshold) { - handle->flags &= ~UV_HANDLE_ZERO_READ; - handle->tcp.conn.read_buffer = uv_buf_init(NULL, 0); - handle->alloc_cb((uv_handle_t*) handle, 65536, &handle->tcp.conn.read_buffer); - if (handle->tcp.conn.read_buffer.base == NULL || - handle->tcp.conn.read_buffer.len == 0) { - handle->read_cb((uv_stream_t*) handle, UV_ENOBUFS, &handle->tcp.conn.read_buffer); - return; - } - assert(handle->tcp.conn.read_buffer.base != NULL); - buf = handle->tcp.conn.read_buffer; - } else { - handle->flags |= UV_HANDLE_ZERO_READ; - buf.base = (char*) &uv_zero_; - buf.len = 0; - } + handle->flags |= UV_HANDLE_ZERO_READ; + buf.base = (char*) &uv_zero_; + buf.len = 0; /* Prepare the overlapped structure. */ memset(&(req->u.io.overlapped), 0, sizeof(req->u.io.overlapped)); @@ -550,7 +523,7 @@ int uv_tcp_close_reset(uv_tcp_t* handle, uv_close_cb close_cb) { struct linger l = { 1, 0 }; /* Disallow setting SO_LINGER to zero due to some platform inconsistencies */ - if (handle->flags & UV_HANDLE_SHUTTING) + if (uv__is_stream_shutting(handle)) return UV_EINVAL; if (0 != setsockopt(handle->socket, SOL_SOCKET, SO_LINGER, (const char*)&l, sizeof(l))) @@ -654,7 +627,6 @@ int uv__tcp_listen(uv_tcp_t* handle, int backlog, uv_connection_cb cb) { int uv__tcp_accept(uv_tcp_t* server, uv_tcp_t* client) { - uv_loop_t* loop = server->loop; int err = 0; int family; @@ -716,8 +688,6 @@ int uv__tcp_accept(uv_tcp_t* server, uv_tcp_t* client) { } } - loop->active_tcp_streams++; - return err; } @@ -1163,7 +1133,7 @@ void uv__process_tcp_write_req(uv_loop_t* loop, uv_tcp_t* handle, closesocket(handle->socket); handle->socket = INVALID_SOCKET; } - if (handle->flags & UV_HANDLE_SHUTTING) + if (uv__is_stream_shutting(handle)) uv__process_tcp_shutdown_req(loop, handle, handle->stream.conn.shutdown_req); @@ -1248,7 +1218,6 @@ void uv__process_tcp_connect_req(uv_loop_t* loop, uv_tcp_t* handle, 0) == 0) { uv__connection_init((uv_stream_t*)handle); handle->flags |= UV_HANDLE_READABLE | UV_HANDLE_WRITABLE; - loop->active_tcp_streams++; } else { err = WSAGetLastError(); } @@ -1331,7 +1300,6 @@ int uv__tcp_xfer_import(uv_tcp_t* tcp, tcp->flags |= UV_HANDLE_READABLE | UV_HANDLE_WRITABLE; } - tcp->loop->active_tcp_streams++; return 0; } @@ -1432,7 +1400,7 @@ static void uv__tcp_try_cancel_reqs(uv_tcp_t* tcp) { uv_tcp_non_ifs_lsp_ipv4; /* If there are non-ifs LSPs then try to obtain a base handle for the socket. - * This will always fail on Windows XP/3k. */ + */ if (non_ifs_lsp) { DWORD bytes; if (WSAIoctl(socket, diff --git a/deps/uv/src/win/thread.c b/deps/uv/src/win/thread.c index d3b1c96b6199a7..57c25e8f5a861c 100644 --- a/deps/uv/src/win/thread.c +++ b/deps/uv/src/win/thread.c @@ -180,6 +180,81 @@ int uv_thread_create_ex(uv_thread_t* tid, return UV_EIO; } +int uv_thread_setaffinity(uv_thread_t* tid, + char* cpumask, + char* oldmask, + size_t mask_size) { + int i; + HANDLE hproc; + DWORD_PTR procmask; + DWORD_PTR sysmask; + DWORD_PTR threadmask; + DWORD_PTR oldthreadmask; + int cpumasksize; + + cpumasksize = uv_cpumask_size(); + assert(cpumasksize > 0); + if (mask_size < (size_t)cpumasksize) + return UV_EINVAL; + + hproc = GetCurrentProcess(); + if (!GetProcessAffinityMask(hproc, &procmask, &sysmask)) + return uv_translate_sys_error(GetLastError()); + + threadmask = 0; + for (i = 0; i < cpumasksize; i++) { + if (cpumask[i]) { + if (procmask & (1 << i)) + threadmask |= 1 << i; + else + return UV_EINVAL; + } + } + + oldthreadmask = SetThreadAffinityMask(*tid, threadmask); + if (oldthreadmask == 0) + return uv_translate_sys_error(GetLastError()); + + if (oldmask != NULL) { + for (i = 0; i < cpumasksize; i++) + oldmask[i] = (oldthreadmask >> i) & 1; + } + + return 0; +} + +int uv_thread_getaffinity(uv_thread_t* tid, + char* cpumask, + size_t mask_size) { + int i; + HANDLE hproc; + DWORD_PTR procmask; + DWORD_PTR sysmask; + DWORD_PTR threadmask; + int cpumasksize; + + cpumasksize = uv_cpumask_size(); + assert(cpumasksize > 0); + if (mask_size < (size_t)cpumasksize) + return UV_EINVAL; + + hproc = GetCurrentProcess(); + if (!GetProcessAffinityMask(hproc, &procmask, &sysmask)) + return uv_translate_sys_error(GetLastError()); + + threadmask = SetThreadAffinityMask(*tid, procmask); + if (threadmask == 0 || SetThreadAffinityMask(*tid, threadmask) == 0) + return uv_translate_sys_error(GetLastError()); + + for (i = 0; i < cpumasksize; i++) + cpumask[i] = (threadmask >> i) & 1; + + return 0; +} + +int uv_thread_getcpu(void) { + return GetCurrentProcessorNumber(); +} uv_thread_t uv_thread_self(void) { uv_thread_t key; @@ -374,6 +449,7 @@ void uv_cond_wait(uv_cond_t* cond, uv_mutex_t* mutex) { abort(); } + int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, uint64_t timeout) { if (SleepConditionVariableCS(&cond->cond_var, mutex, (DWORD)(timeout / 1e6))) return 0; @@ -383,69 +459,6 @@ int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, uint64_t timeout) { } -int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) { - int err; - - barrier->n = count; - barrier->count = 0; - - err = uv_mutex_init(&barrier->mutex); - if (err) - return err; - - err = uv_sem_init(&barrier->turnstile1, 0); - if (err) - goto error2; - - err = uv_sem_init(&barrier->turnstile2, 1); - if (err) - goto error; - - return 0; - -error: - uv_sem_destroy(&barrier->turnstile1); -error2: - uv_mutex_destroy(&barrier->mutex); - return err; - -} - - -void uv_barrier_destroy(uv_barrier_t* barrier) { - uv_sem_destroy(&barrier->turnstile2); - uv_sem_destroy(&barrier->turnstile1); - uv_mutex_destroy(&barrier->mutex); -} - - -int uv_barrier_wait(uv_barrier_t* barrier) { - int serial_thread; - - uv_mutex_lock(&barrier->mutex); - if (++barrier->count == barrier->n) { - uv_sem_wait(&barrier->turnstile2); - uv_sem_post(&barrier->turnstile1); - } - uv_mutex_unlock(&barrier->mutex); - - uv_sem_wait(&barrier->turnstile1); - uv_sem_post(&barrier->turnstile1); - - uv_mutex_lock(&barrier->mutex); - serial_thread = (--barrier->count == 0); - if (serial_thread) { - uv_sem_wait(&barrier->turnstile1); - uv_sem_post(&barrier->turnstile2); - } - uv_mutex_unlock(&barrier->mutex); - - uv_sem_wait(&barrier->turnstile2); - uv_sem_post(&barrier->turnstile2); - return serial_thread; -} - - int uv_key_create(uv_key_t* key) { key->tls_index = TlsAlloc(); if (key->tls_index == TLS_OUT_OF_INDEXES) diff --git a/deps/uv/src/win/tty.c b/deps/uv/src/win/tty.c index 267ca64519963f..60f249b6a2af6f 100644 --- a/deps/uv/src/win/tty.c +++ b/deps/uv/src/win/tty.c @@ -23,12 +23,7 @@ #include #include #include - -#if defined(_MSC_VER) && _MSC_VER < 1600 -# include "uv/stdint-msvc2008.h" -#else -# include -#endif +#include #ifndef COMMON_LVB_REVERSE_VIDEO # define COMMON_LVB_REVERSE_VIDEO 0x4000 @@ -175,14 +170,14 @@ void uv__console_init(void) { 0); if (uv__tty_console_handle != INVALID_HANDLE_VALUE) { CONSOLE_SCREEN_BUFFER_INFO sb_info; - QueueUserWorkItem(uv__tty_console_resize_message_loop_thread, - NULL, - WT_EXECUTELONGFUNCTION); uv_mutex_init(&uv__tty_console_resize_mutex); if (GetConsoleScreenBufferInfo(uv__tty_console_handle, &sb_info)) { uv__tty_console_width = sb_info.dwSize.X; uv__tty_console_height = sb_info.srWindow.Bottom - sb_info.srWindow.Top + 1; } + QueueUserWorkItem(uv__tty_console_resize_message_loop_thread, + NULL, + WT_EXECUTELONGFUNCTION); } } @@ -2239,11 +2234,11 @@ void uv__process_tty_write_req(uv_loop_t* loop, uv_tty_t* handle, handle->stream.conn.write_reqs_pending--; - if (handle->stream.conn.write_reqs_pending == 0) - if (handle->flags & UV_HANDLE_SHUTTING) - uv__process_tty_shutdown_req(loop, - handle, - handle->stream.conn.shutdown_req); + if (handle->stream.conn.write_reqs_pending == 0 && + uv__is_stream_shutting(handle)) + uv__process_tty_shutdown_req(loop, + handle, + handle->stream.conn.shutdown_req); DECREASE_PENDING_REQ_COUNT(handle); } @@ -2274,7 +2269,6 @@ void uv__process_tty_shutdown_req(uv_loop_t* loop, uv_tty_t* stream, uv_shutdown assert(req); stream->stream.conn.shutdown_req = NULL; - stream->flags &= ~UV_HANDLE_SHUTTING; UNREGISTER_HANDLE_REQ(loop, stream, req); /* TTY shutdown is really just a no-op */ @@ -2429,7 +2423,6 @@ static void uv__tty_console_signal_resize(void) { height = sb_info.srWindow.Bottom - sb_info.srWindow.Top + 1; uv_mutex_lock(&uv__tty_console_resize_mutex); - assert(uv__tty_console_width != -1 && uv__tty_console_height != -1); if (width != uv__tty_console_width || height != uv__tty_console_height) { uv__tty_console_width = width; uv__tty_console_height = height; diff --git a/deps/uv/src/win/udp.c b/deps/uv/src/win/udp.c index eaebc1eda8f492..8a982d1907d707 100644 --- a/deps/uv/src/win/udp.c +++ b/deps/uv/src/win/udp.c @@ -29,11 +29,6 @@ #include "req-inl.h" -/* - * Threshold of active udp streams for which to preallocate udp read buffers. - */ -const unsigned int uv_active_udp_streams_threshold = 0; - /* A zero-size buffer for use by uv_udp_read */ static char uv_zero_[] = ""; int uv_udp_getpeername(const uv_udp_t* handle, @@ -276,84 +271,35 @@ static void uv__udp_queue_recv(uv_loop_t* loop, uv_udp_t* handle) { req = &handle->recv_req; memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped)); - /* - * Preallocate a read buffer if the number of active streams is below - * the threshold. - */ - if (loop->active_udp_streams < uv_active_udp_streams_threshold) { - handle->flags &= ~UV_HANDLE_ZERO_READ; - - handle->recv_buffer = uv_buf_init(NULL, 0); - handle->alloc_cb((uv_handle_t*) handle, UV__UDP_DGRAM_MAXSIZE, &handle->recv_buffer); - if (handle->recv_buffer.base == NULL || handle->recv_buffer.len == 0) { - handle->recv_cb(handle, UV_ENOBUFS, &handle->recv_buffer, NULL, 0); - return; - } - assert(handle->recv_buffer.base != NULL); - - buf = handle->recv_buffer; - memset(&handle->recv_from, 0, sizeof handle->recv_from); - handle->recv_from_len = sizeof handle->recv_from; - flags = 0; - - result = handle->func_wsarecvfrom(handle->socket, - (WSABUF*) &buf, - 1, - &bytes, - &flags, - (struct sockaddr*) &handle->recv_from, - &handle->recv_from_len, - &req->u.io.overlapped, - NULL); - - if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) { - /* Process the req without IOCP. */ - handle->flags |= UV_HANDLE_READ_PENDING; - req->u.io.overlapped.InternalHigh = bytes; - handle->reqs_pending++; - uv__insert_pending_req(loop, req); - } else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) { - /* The req will be processed with IOCP. */ - handle->flags |= UV_HANDLE_READ_PENDING; - handle->reqs_pending++; - } else { - /* Make this req pending reporting an error. */ - SET_REQ_ERROR(req, WSAGetLastError()); - uv__insert_pending_req(loop, req); - handle->reqs_pending++; - } + handle->flags |= UV_HANDLE_ZERO_READ; + + buf.base = (char*) uv_zero_; + buf.len = 0; + flags = MSG_PEEK; + result = handle->func_wsarecv(handle->socket, + (WSABUF*) &buf, + 1, + &bytes, + &flags, + &req->u.io.overlapped, + NULL); + + if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) { + /* Process the req without IOCP. */ + handle->flags |= UV_HANDLE_READ_PENDING; + req->u.io.overlapped.InternalHigh = bytes; + handle->reqs_pending++; + uv__insert_pending_req(loop, req); + } else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) { + /* The req will be processed with IOCP. */ + handle->flags |= UV_HANDLE_READ_PENDING; + handle->reqs_pending++; } else { - handle->flags |= UV_HANDLE_ZERO_READ; - - buf.base = (char*) uv_zero_; - buf.len = 0; - flags = MSG_PEEK; - - result = handle->func_wsarecv(handle->socket, - (WSABUF*) &buf, - 1, - &bytes, - &flags, - &req->u.io.overlapped, - NULL); - - if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) { - /* Process the req without IOCP. */ - handle->flags |= UV_HANDLE_READ_PENDING; - req->u.io.overlapped.InternalHigh = bytes; - handle->reqs_pending++; - uv__insert_pending_req(loop, req); - } else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) { - /* The req will be processed with IOCP. */ - handle->flags |= UV_HANDLE_READ_PENDING; - handle->reqs_pending++; - } else { - /* Make this req pending reporting an error. */ - SET_REQ_ERROR(req, WSAGetLastError()); - uv__insert_pending_req(loop, req); - handle->reqs_pending++; - } + /* Make this req pending reporting an error. */ + SET_REQ_ERROR(req, WSAGetLastError()); + uv__insert_pending_req(loop, req); + handle->reqs_pending++; } } @@ -376,7 +322,6 @@ int uv__udp_recv_start(uv_udp_t* handle, uv_alloc_cb alloc_cb, handle->flags |= UV_HANDLE_READING; INCREASE_ACTIVE_COUNT(loop, handle); - loop->active_udp_streams++; handle->recv_cb = recv_cb; handle->alloc_cb = alloc_cb; @@ -393,7 +338,6 @@ int uv__udp_recv_start(uv_udp_t* handle, uv_alloc_cb alloc_cb, int uv__udp_recv_stop(uv_udp_t* handle) { if (handle->flags & UV_HANDLE_READING) { handle->flags &= ~UV_HANDLE_READING; - handle->loop->active_udp_streams--; DECREASE_ACTIVE_COUNT(loop, handle); } @@ -497,57 +441,68 @@ void uv__process_udp_recv_req(uv_loop_t* loop, uv_udp_t* handle, DWORD bytes, err, flags; struct sockaddr_storage from; int from_len; + int count; + + /* Prevent loop starvation when the data comes in as fast as + * (or faster than) we can read it. */ + count = 32; + + do { + /* Do at most `count` nonblocking receive. */ + buf = uv_buf_init(NULL, 0); + handle->alloc_cb((uv_handle_t*) handle, UV__UDP_DGRAM_MAXSIZE, &buf); + if (buf.base == NULL || buf.len == 0) { + handle->recv_cb(handle, UV_ENOBUFS, &buf, NULL, 0); + goto done; + } - /* Do a nonblocking receive. - * TODO: try to read multiple datagrams at once. FIONREAD maybe? */ - buf = uv_buf_init(NULL, 0); - handle->alloc_cb((uv_handle_t*) handle, UV__UDP_DGRAM_MAXSIZE, &buf); - if (buf.base == NULL || buf.len == 0) { - handle->recv_cb(handle, UV_ENOBUFS, &buf, NULL, 0); - goto done; - } - assert(buf.base != NULL); - - memset(&from, 0, sizeof from); - from_len = sizeof from; + memset(&from, 0, sizeof from); + from_len = sizeof from; - flags = 0; + flags = 0; - if (WSARecvFrom(handle->socket, - (WSABUF*)&buf, - 1, - &bytes, - &flags, - (struct sockaddr*) &from, - &from_len, - NULL, - NULL) != SOCKET_ERROR) { + if (WSARecvFrom(handle->socket, + (WSABUF*)&buf, + 1, + &bytes, + &flags, + (struct sockaddr*) &from, + &from_len, + NULL, + NULL) != SOCKET_ERROR) { - /* Message received */ - handle->recv_cb(handle, bytes, &buf, (const struct sockaddr*) &from, 0); - } else { - err = WSAGetLastError(); - if (err == WSAEMSGSIZE) { - /* Message truncated */ - handle->recv_cb(handle, - bytes, - &buf, - (const struct sockaddr*) &from, - UV_UDP_PARTIAL); - } else if (err == WSAEWOULDBLOCK) { - /* Kernel buffer empty */ - handle->recv_cb(handle, 0, &buf, NULL, 0); - } else if (err == WSAECONNRESET || err == WSAENETRESET) { - /* WSAECONNRESET/WSANETRESET is ignored because this just indicates - * that a previous sendto operation failed. - */ - handle->recv_cb(handle, 0, &buf, NULL, 0); + /* Message received */ + err = ERROR_SUCCESS; + handle->recv_cb(handle, bytes, &buf, (const struct sockaddr*) &from, 0); } else { - /* Any other error that we want to report back to the user. */ - uv_udp_recv_stop(handle); - handle->recv_cb(handle, uv_translate_sys_error(err), &buf, NULL, 0); + err = WSAGetLastError(); + if (err == WSAEMSGSIZE) { + /* Message truncated */ + handle->recv_cb(handle, + bytes, + &buf, + (const struct sockaddr*) &from, + UV_UDP_PARTIAL); + } else if (err == WSAEWOULDBLOCK) { + /* Kernel buffer empty */ + handle->recv_cb(handle, 0, &buf, NULL, 0); + } else if (err == WSAECONNRESET || err == WSAENETRESET) { + /* WSAECONNRESET/WSANETRESET is ignored because this just indicates + * that a previous sendto operation failed. + */ + handle->recv_cb(handle, 0, &buf, NULL, 0); + } else { + /* Any other error that we want to report back to the user. */ + uv_udp_recv_stop(handle); + handle->recv_cb(handle, uv_translate_sys_error(err), &buf, NULL, 0); + } } } + while (err == ERROR_SUCCESS && + count-- > 0 && + /* The recv_cb callback may decide to pause or close the handle. */ + (handle->flags & UV_HANDLE_READING) && + !(handle->flags & UV_HANDLE_READ_PENDING)); } done: diff --git a/deps/uv/src/win/util.c b/deps/uv/src/win/util.c index 99432053cc3b24..f6ec79cd57b501 100644 --- a/deps/uv/src/win/util.c +++ b/deps/uv/src/win/util.c @@ -31,6 +31,7 @@ #include "internal.h" /* clang-format off */ +#include #include #include #include @@ -121,9 +122,6 @@ int uv_exepath(char* buffer, size_t* size_ptr) { goto error; } - /* utf16_len contains the length, *not* including the terminating null. */ - utf16_buffer[utf16_len] = L'\0'; - /* Convert to UTF-8 */ utf8_len = WideCharToMultiByte(CP_UTF8, 0, @@ -151,6 +149,51 @@ int uv_exepath(char* buffer, size_t* size_ptr) { } +static int uv__cwd(WCHAR** buf, DWORD *len) { + WCHAR* p; + DWORD n; + DWORD t; + + t = GetCurrentDirectoryW(0, NULL); + for (;;) { + if (t == 0) + return uv_translate_sys_error(GetLastError()); + + /* |t| is the size of the buffer _including_ nul. */ + p = uv__malloc(t * sizeof(*p)); + if (p == NULL) + return UV_ENOMEM; + + /* |n| is the size of the buffer _excluding_ nul but _only on success_. + * If |t| was too small because another thread changed the working + * directory, |n| is the size the buffer should be _including_ nul. + * It therefore follows we must resize when n >= t and fail when n == 0. + */ + n = GetCurrentDirectoryW(t, p); + if (n > 0) + if (n < t) + break; + + uv__free(p); + t = n; + } + + /* The returned directory should not have a trailing slash, unless it points + * at a drive root, like c:\. Remove it if needed. + */ + t = n - 1; + if (p[t] == L'\\' && !(n == 3 && p[1] == L':')) { + p[t] = L'\0'; + n = t; + } + + *buf = p; + *len = n; + + return 0; +} + + int uv_cwd(char* buffer, size_t* size) { DWORD utf16_len; WCHAR *utf16_buffer; @@ -160,30 +203,9 @@ int uv_cwd(char* buffer, size_t* size) { return UV_EINVAL; } - utf16_len = GetCurrentDirectoryW(0, NULL); - if (utf16_len == 0) { - return uv_translate_sys_error(GetLastError()); - } - utf16_buffer = uv__malloc(utf16_len * sizeof(WCHAR)); - if (utf16_buffer == NULL) { - return UV_ENOMEM; - } - - utf16_len = GetCurrentDirectoryW(utf16_len, utf16_buffer); - if (utf16_len == 0) { - uv__free(utf16_buffer); - return uv_translate_sys_error(GetLastError()); - } - - /* utf16_len contains the length, *not* including the terminating null. */ - utf16_buffer[utf16_len] = L'\0'; - - /* The returned directory should not have a trailing slash, unless it points - * at a drive root, like c:\. Remove it if needed. */ - if (utf16_buffer[utf16_len - 1] == L'\\' && - !(utf16_len == 3 && utf16_buffer[1] == L':')) { - utf16_len--; - utf16_buffer[utf16_len] = L'\0'; + r = uv__cwd(&utf16_buffer, &utf16_len); + if (r < 0) { + return r; } /* Check how much space we need */ @@ -226,8 +248,9 @@ int uv_cwd(char* buffer, size_t* size) { int uv_chdir(const char* dir) { WCHAR *utf16_buffer; - size_t utf16_len, new_utf16_len; + DWORD utf16_len; WCHAR drive_letter, env_var[4]; + int r; if (dir == NULL) { return UV_EINVAL; @@ -262,32 +285,22 @@ int uv_chdir(const char* dir) { return uv_translate_sys_error(GetLastError()); } + /* uv__cwd() will return a new buffer. */ + uv__free(utf16_buffer); + utf16_buffer = NULL; + /* Windows stores the drive-local path in an "hidden" environment variable, * which has the form "=C:=C:\Windows". SetCurrentDirectory does not update * this, so we'll have to do it. */ - new_utf16_len = GetCurrentDirectoryW(utf16_len, utf16_buffer); - if (new_utf16_len > utf16_len ) { - uv__free(utf16_buffer); - utf16_buffer = uv__malloc(new_utf16_len * sizeof(WCHAR)); - if (utf16_buffer == NULL) { - /* When updating the environment variable fails, return UV_OK anyway. - * We did successfully change current working directory, only updating - * hidden env variable failed. */ - return 0; - } - new_utf16_len = GetCurrentDirectoryW(new_utf16_len, utf16_buffer); - } - if (utf16_len == 0) { - uv__free(utf16_buffer); + r = uv__cwd(&utf16_buffer, &utf16_len); + if (r == UV_ENOMEM) { + /* When updating the environment variable fails, return UV_OK anyway. + * We did successfully change current working directory, only updating + * hidden env variable failed. */ return 0; } - - /* The returned directory should not have a trailing slash, unless it points - * at a drive root, like c:\. Remove it if needed. */ - if (utf16_buffer[utf16_len - 1] == L'\\' && - !(utf16_len == 3 && utf16_buffer[1] == L':')) { - utf16_len--; - utf16_buffer[utf16_len] = L'\0'; + if (r < 0) { + return r; } if (utf16_len < 2 || utf16_buffer[1] != L':') { @@ -330,7 +343,7 @@ uint64_t uv_get_free_memory(void) { memory_status.dwLength = sizeof(memory_status); if (!GlobalMemoryStatusEx(&memory_status)) { - return -1; + return 0; } return (uint64_t)memory_status.ullAvailPhys; @@ -342,7 +355,7 @@ uint64_t uv_get_total_memory(void) { memory_status.dwLength = sizeof(memory_status); if (!GlobalMemoryStatusEx(&memory_status)) { - return -1; + return 0; } return (uint64_t)memory_status.ullTotalPhys; @@ -354,6 +367,11 @@ uint64_t uv_get_constrained_memory(void) { } +uint64_t uv_get_available_memory(void) { + return uv_get_free_memory(); +} + + uv_pid_t uv_os_getpid(void) { return GetCurrentProcessId(); } @@ -487,11 +505,43 @@ int uv_get_process_title(char* buffer, size_t size) { } +/* https://github.com/libuv/libuv/issues/1674 */ +int uv_clock_gettime(uv_clock_id clock_id, uv_timespec64_t* ts) { + FILETIME ft; + int64_t t; + + if (ts == NULL) + return UV_EFAULT; + + switch (clock_id) { + case UV_CLOCK_MONOTONIC: + uv__once_init(); + t = uv__hrtime(UV__NANOSEC); + ts->tv_sec = t / 1000000000; + ts->tv_nsec = t % 1000000000; + return 0; + case UV_CLOCK_REALTIME: + GetSystemTimePreciseAsFileTime(&ft); + /* In 100-nanosecond increments from 1601-01-01 UTC because why not? */ + t = (int64_t) ft.dwHighDateTime << 32 | ft.dwLowDateTime; + /* Convert to UNIX epoch, 1970-01-01. Still in 100 ns increments. */ + t -= 116444736000000000ll; + /* Now convert to seconds and nanoseconds. */ + ts->tv_sec = t / 10000000; + ts->tv_nsec = t % 10000000 * 100; + return 0; + } + + return UV_EINVAL; +} + + uint64_t uv_hrtime(void) { uv__once_init(); return uv__hrtime(UV__NANOSEC); } + uint64_t uv__hrtime(unsigned int scale) { LARGE_INTEGER counter; double scaled_freq; @@ -678,71 +728,6 @@ int uv_cpu_info(uv_cpu_info_t** cpu_infos_ptr, int* cpu_count_ptr) { } -static int is_windows_version_or_greater(DWORD os_major, - DWORD os_minor, - WORD service_pack_major, - WORD service_pack_minor) { - OSVERSIONINFOEX osvi; - DWORDLONG condition_mask = 0; - int op = VER_GREATER_EQUAL; - - /* Initialize the OSVERSIONINFOEX structure. */ - ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX)); - osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); - osvi.dwMajorVersion = os_major; - osvi.dwMinorVersion = os_minor; - osvi.wServicePackMajor = service_pack_major; - osvi.wServicePackMinor = service_pack_minor; - - /* Initialize the condition mask. */ - VER_SET_CONDITION(condition_mask, VER_MAJORVERSION, op); - VER_SET_CONDITION(condition_mask, VER_MINORVERSION, op); - VER_SET_CONDITION(condition_mask, VER_SERVICEPACKMAJOR, op); - VER_SET_CONDITION(condition_mask, VER_SERVICEPACKMINOR, op); - - /* Perform the test. */ - return (int) VerifyVersionInfo( - &osvi, - VER_MAJORVERSION | VER_MINORVERSION | - VER_SERVICEPACKMAJOR | VER_SERVICEPACKMINOR, - condition_mask); -} - - -static int address_prefix_match(int family, - struct sockaddr* address, - struct sockaddr* prefix_address, - int prefix_len) { - uint8_t* address_data; - uint8_t* prefix_address_data; - int i; - - assert(address->sa_family == family); - assert(prefix_address->sa_family == family); - - if (family == AF_INET6) { - address_data = (uint8_t*) &(((struct sockaddr_in6 *) address)->sin6_addr); - prefix_address_data = - (uint8_t*) &(((struct sockaddr_in6 *) prefix_address)->sin6_addr); - } else { - address_data = (uint8_t*) &(((struct sockaddr_in *) address)->sin_addr); - prefix_address_data = - (uint8_t*) &(((struct sockaddr_in *) prefix_address)->sin_addr); - } - - for (i = 0; i < prefix_len >> 3; i++) { - if (address_data[i] != prefix_address_data[i]) - return 0; - } - - if (prefix_len % 8) - return prefix_address_data[i] == - (address_data[i] & (0xff << (8 - prefix_len % 8))); - - return 1; -} - - int uv_interface_addresses(uv_interface_address_t** addresses_ptr, int* count_ptr) { IP_ADAPTER_ADDRESSES* win_address_buf; @@ -755,26 +740,13 @@ int uv_interface_addresses(uv_interface_address_t** addresses_ptr, uv_interface_address_t* uv_address; int count; - - int is_vista_or_greater; ULONG flags; *addresses_ptr = NULL; *count_ptr = 0; - is_vista_or_greater = is_windows_version_or_greater(6, 0, 0, 0); - if (is_vista_or_greater) { - flags = GAA_FLAG_SKIP_ANYCAST | GAA_FLAG_SKIP_MULTICAST | - GAA_FLAG_SKIP_DNS_SERVER; - } else { - /* We need at least XP SP1. */ - if (!is_windows_version_or_greater(5, 1, 1, 0)) - return UV_ENOTSUP; - - flags = GAA_FLAG_SKIP_ANYCAST | GAA_FLAG_SKIP_MULTICAST | - GAA_FLAG_SKIP_DNS_SERVER | GAA_FLAG_INCLUDE_PREFIX; - } - + flags = GAA_FLAG_SKIP_ANYCAST | GAA_FLAG_SKIP_MULTICAST | + GAA_FLAG_SKIP_DNS_SERVER; /* Fetch the size of the adapters reported by windows, and then get the list * itself. */ @@ -938,37 +910,8 @@ int uv_interface_addresses(uv_interface_address_t** addresses_ptr, sa = unicast_address->Address.lpSockaddr; - /* XP has no OnLinkPrefixLength field. */ - if (is_vista_or_greater) { - prefix_len = - ((IP_ADAPTER_UNICAST_ADDRESS_LH*) unicast_address)->OnLinkPrefixLength; - } else { - /* Prior to Windows Vista the FirstPrefix pointed to the list with - * single prefix for each IP address assigned to the adapter. - * Order of FirstPrefix does not match order of FirstUnicastAddress, - * so we need to find corresponding prefix. - */ - IP_ADAPTER_PREFIX* prefix; - prefix_len = 0; - - for (prefix = adapter->FirstPrefix; prefix; prefix = prefix->Next) { - /* We want the longest matching prefix. */ - if (prefix->Address.lpSockaddr->sa_family != sa->sa_family || - prefix->PrefixLength <= prefix_len) - continue; - - if (address_prefix_match(sa->sa_family, sa, - prefix->Address.lpSockaddr, prefix->PrefixLength)) { - prefix_len = prefix->PrefixLength; - } - } - - /* If there is no matching prefix information, return a single-host - * subnet mask (e.g. 255.255.255.255 for IPv4). - */ - if (!prefix_len) - prefix_len = (sa->sa_family == AF_INET6) ? 128 : 32; - } + prefix_len = + ((IP_ADAPTER_UNICAST_ADDRESS_LH*) unicast_address)->OnLinkPrefixLength; memset(uv_address, 0, sizeof *uv_address); @@ -1093,8 +1036,8 @@ int uv_os_homedir(char* buffer, size_t* size) { if (r != UV_ENOENT) return r; - /* USERPROFILE is not set, so call uv__getpwuid_r() */ - r = uv__getpwuid_r(&pwd); + /* USERPROFILE is not set, so call uv_os_get_passwd() */ + r = uv_os_get_passwd(&pwd); if (r != 0) { return r; @@ -1181,17 +1124,6 @@ int uv_os_tmpdir(char* buffer, size_t* size) { } -void uv_os_free_passwd(uv_passwd_t* pwd) { - if (pwd == NULL) - return; - - uv__free(pwd->username); - uv__free(pwd->homedir); - pwd->username = NULL; - pwd->homedir = NULL; -} - - /* * Converts a UTF-16 string into a UTF-8 one. The resulting string is * null-terminated. @@ -1288,7 +1220,7 @@ int uv__convert_utf8_to_utf16(const char* utf8, int utf8len, WCHAR** utf16) { } -int uv__getpwuid_r(uv_passwd_t* pwd) { +static int uv__getpwuid_r(uv_passwd_t* pwd) { HANDLE token; wchar_t username[UNLEN + 1]; wchar_t *path; @@ -1366,6 +1298,16 @@ int uv_os_get_passwd(uv_passwd_t* pwd) { } +int uv_os_get_passwd2(uv_passwd_t* pwd, uv_uid_t uid) { + return UV_ENOTSUP; +} + + +int uv_os_get_group(uv_group_t* grp, uv_uid_t gid) { + return UV_ENOTSUP; +} + + int uv_os_environ(uv_env_item_t** envitems, int* count) { wchar_t* env; wchar_t* penv; @@ -1769,6 +1711,22 @@ int uv_os_uname(uv_utsname_t* buffer) { RegCloseKey(registry_key); if (r == ERROR_SUCCESS) { + /* Windows 11 shares dwMajorVersion with Windows 10 + * this workaround tries to disambiguate that by checking + * if the dwBuildNumber is from Windows 11 releases (>= 22000). + * + * This workaround replaces the ProductName key value + * from "Windows 10 *" to "Windows 11 *" */ + if (os_info.dwMajorVersion == 10 && + os_info.dwBuildNumber >= 22000 && + product_name_w_size >= ARRAY_SIZE(L"Windows 10")) { + /* If ProductName starts with "Windows 10" */ + if (wcsncmp(product_name_w, L"Windows 10", ARRAY_SIZE(L"Windows 10") - 1) == 0) { + /* Bump 10 to 11 */ + product_name_w[9] = '1'; + } + } + version_size = WideCharToMultiByte(CP_UTF8, 0, product_name_w, diff --git a/deps/uv/test/benchmark-async-pummel.c b/deps/uv/test/benchmark-async-pummel.c index 49660a6f5755c0..bec91850616150 100644 --- a/deps/uv/test/benchmark-async-pummel.c +++ b/deps/uv/test/benchmark-async-pummel.c @@ -62,6 +62,7 @@ static void pummel(void* arg) { static int test_async_pummel(int nthreads) { + char fmtbuf[2][32]; uv_thread_t* tids; uv_async_t handle; uint64_t time; @@ -88,13 +89,13 @@ static int test_async_pummel(int nthreads) { printf("async_pummel_%d: %s callbacks in %.2f seconds (%s/sec)\n", nthreads, - fmt(callbacks), + fmt(&fmtbuf[0], callbacks), time / 1e9, - fmt(callbacks / (time / 1e9))); + fmt(&fmtbuf[1], callbacks / (time / 1e9))); free(tids); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/benchmark-async.c b/deps/uv/test/benchmark-async.c index 5167ecbd758d7a..d4b7c8bd91482f 100644 --- a/deps/uv/test/benchmark-async.c +++ b/deps/uv/test/benchmark-async.c @@ -73,6 +73,7 @@ static void worker(void* arg) { static int test_async(int nthreads) { + char fmtbuf[32]; struct ctx* threads; struct ctx* ctx; uint64_t time; @@ -112,11 +113,11 @@ static int test_async(int nthreads) { printf("async%d: %.2f sec (%s/sec)\n", nthreads, time / 1e9, - fmt(NUM_PINGS / (time / 1e9))); + fmt(&fmtbuf, NUM_PINGS / (time / 1e9))); free(threads); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/benchmark-fs-stat.c b/deps/uv/test/benchmark-fs-stat.c index 32d2589586c1b0..c4106224109100 100644 --- a/deps/uv/test/benchmark-fs-stat.c +++ b/deps/uv/test/benchmark-fs-stat.c @@ -60,6 +60,7 @@ static void warmup(const char* path) { static void sync_bench(const char* path) { + char fmtbuf[2][32]; uint64_t before; uint64_t after; uv_fs_t req; @@ -74,9 +75,9 @@ static void sync_bench(const char* path) { after = uv_hrtime(); printf("%s stats (sync): %.2fs (%s/s)\n", - fmt(1.0 * NUM_SYNC_REQS), + fmt(&fmtbuf[0], 1.0 * NUM_SYNC_REQS), (after - before) / 1e9, - fmt((1.0 * NUM_SYNC_REQS) / ((after - before) / 1e9))); + fmt(&fmtbuf[1], (1.0 * NUM_SYNC_REQS) / ((after - before) / 1e9))); fflush(stdout); } @@ -93,6 +94,7 @@ static void stat_cb(uv_fs_t* fs_req) { static void async_bench(const char* path) { struct async_req reqs[MAX_CONCURRENT_REQS]; struct async_req* req; + char fmtbuf[2][32]; uint64_t before; uint64_t after; int count; @@ -112,10 +114,10 @@ static void async_bench(const char* path) { after = uv_hrtime(); printf("%s stats (%d concurrent): %.2fs (%s/s)\n", - fmt(1.0 * NUM_ASYNC_REQS), + fmt(&fmtbuf[0], 1.0 * NUM_ASYNC_REQS), i, (after - before) / 1e9, - fmt((1.0 * NUM_ASYNC_REQS) / ((after - before) / 1e9))); + fmt(&fmtbuf[1], (1.0 * NUM_ASYNC_REQS) / ((after - before) / 1e9))); fflush(stdout); } } @@ -131,6 +133,6 @@ BENCHMARK_IMPL(fs_stat) { warmup(path); sync_bench(path); async_bench(path); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/benchmark-getaddrinfo.c b/deps/uv/test/benchmark-getaddrinfo.c index 1dbc23ddba009d..1ef7b1ef095937 100644 --- a/deps/uv/test/benchmark-getaddrinfo.c +++ b/deps/uv/test/benchmark-getaddrinfo.c @@ -87,6 +87,6 @@ BENCHMARK_IMPL(getaddrinfo) { (double) calls_completed / (double) (end_time - start_time) * 1000.0); fflush(stderr); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/benchmark-loop-count.c b/deps/uv/test/benchmark-loop-count.c index 970a94c2fecb5c..4aa39867bb16a8 100644 --- a/deps/uv/test/benchmark-loop-count.c +++ b/deps/uv/test/benchmark-loop-count.c @@ -68,7 +68,7 @@ BENCHMARK_IMPL(loop_count) { NUM_TICKS / (ns / 1e9)); fflush(stderr); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -87,6 +87,6 @@ BENCHMARK_IMPL(loop_count_timed) { fprintf(stderr, "loop_count: %lu ticks (%.0f ticks/s)\n", ticks, ticks / 5.0); fflush(stderr); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/benchmark-million-async.c b/deps/uv/test/benchmark-million-async.c index 937a12f81e64be..30c21c38af15c4 100644 --- a/deps/uv/test/benchmark-million-async.c +++ b/deps/uv/test/benchmark-million-async.c @@ -76,6 +76,7 @@ static void timer_cb(uv_timer_t* handle) { BENCHMARK_IMPL(million_async) { + char fmtbuf[3][32]; uv_timer_t timer_handle; uv_async_t* handle; uv_loop_t* loop; @@ -101,12 +102,12 @@ BENCHMARK_IMPL(million_async) { ASSERT(0 == uv_thread_create(&thread_id, thread_cb, NULL)); ASSERT(0 == uv_run(loop, UV_RUN_DEFAULT)); printf("%s async events in %.1f seconds (%s/s, %s unique handles seen)\n", - fmt(container->async_events), + fmt(&fmtbuf[0], container->async_events), timeout / 1000., - fmt(container->async_events / (timeout / 1000.)), - fmt(container->handles_seen)); + fmt(&fmtbuf[1], container->async_events / (timeout / 1000.)), + fmt(&fmtbuf[2], container->handles_seen)); free(container); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/benchmark-million-timers.c b/deps/uv/test/benchmark-million-timers.c index ef25c2052d6f66..b35fd5e788224d 100644 --- a/deps/uv/test/benchmark-million-timers.c +++ b/deps/uv/test/benchmark-million-timers.c @@ -81,6 +81,6 @@ BENCHMARK_IMPL(million_timers) { fprintf(stderr, "%.2f seconds cleanup\n", (after_all - after_run) / 1e9); fflush(stderr); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/benchmark-multi-accept.c b/deps/uv/test/benchmark-multi-accept.c index 86b7da5acd158a..e2026276721b4e 100644 --- a/deps/uv/test/benchmark-multi-accept.c +++ b/deps/uv/test/benchmark-multi-accept.c @@ -431,7 +431,7 @@ static int test_tcp(unsigned int num_servers, unsigned int num_clients) { free(clients); free(servers); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/benchmark-ping-pongs.c b/deps/uv/test/benchmark-ping-pongs.c index 646a7df9447036..0357704e66e3c9 100644 --- a/deps/uv/test/benchmark-ping-pongs.c +++ b/deps/uv/test/benchmark-ping-pongs.c @@ -216,6 +216,6 @@ BENCHMARK_IMPL(ping_pongs) { ASSERT(completed_pingers == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/benchmark-ping-udp.c b/deps/uv/test/benchmark-ping-udp.c index cf9ca9811f7a7b..3db8765bf9cf4c 100644 --- a/deps/uv/test/benchmark-ping-udp.c +++ b/deps/uv/test/benchmark-ping-udp.c @@ -153,7 +153,7 @@ static int ping_udp(unsigned pingers) { fprintf(stderr, "ping_pongs: %d pingers, ~ %lu roundtrips/s\n", completed_pingers, completed_pings / (TIME/1000)); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/benchmark-pound.c b/deps/uv/test/benchmark-pound.c index 830bc554b34348..acfe4497a2ed48 100644 --- a/deps/uv/test/benchmark-pound.c +++ b/deps/uv/test/benchmark-pound.c @@ -306,7 +306,7 @@ static int pound_it(int concurrency, conns_failed); fflush(stderr); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/benchmark-pump.c b/deps/uv/test/benchmark-pump.c index 7d3977dfc32d0d..316c680996065e 100644 --- a/deps/uv/test/benchmark-pump.c +++ b/deps/uv/test/benchmark-pump.c @@ -415,7 +415,7 @@ HELPER_IMPL(pipe_pump_server) { notify_parent_process(); uv_run(loop, UV_RUN_DEFAULT); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -434,7 +434,7 @@ static void tcp_pump(int n) { uv_run(loop, UV_RUN_DEFAULT); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); } @@ -450,7 +450,7 @@ static void pipe_pump(int n) { uv_run(loop, UV_RUN_DEFAULT); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); } diff --git a/deps/uv/test/benchmark-queue-work.c b/deps/uv/test/benchmark-queue-work.c index 2dd5cb665617b1..6e7b74becf63b4 100644 --- a/deps/uv/test/benchmark-queue-work.c +++ b/deps/uv/test/benchmark-queue-work.c @@ -46,6 +46,7 @@ static void after_work_cb(uv_work_t* req, int status) { static void timer_cb(uv_timer_t* handle) { done = 1; } BENCHMARK_IMPL(queue_work) { + char fmtbuf[2][32]; uv_timer_t timer_handle; uv_work_t work; uv_loop_t* loop; @@ -60,9 +61,11 @@ BENCHMARK_IMPL(queue_work) { ASSERT_EQ(0, uv_queue_work(loop, &work, work_cb, after_work_cb)); ASSERT_EQ(0, uv_run(loop, UV_RUN_DEFAULT)); - printf("%s async jobs in %.1f seconds (%s/s)\n", fmt(events), timeout / 1000., - fmt(events / (timeout / 1000.))); + printf("%s async jobs in %.1f seconds (%s/s)\n", + fmt(&fmtbuf[0], events), + timeout / 1000., + fmt(&fmtbuf[1], events / (timeout / 1000.))); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/benchmark-spawn.c b/deps/uv/test/benchmark-spawn.c index ed9ad608f3790e..bdaf6c1a254e19 100644 --- a/deps/uv/test/benchmark-spawn.c +++ b/deps/uv/test/benchmark-spawn.c @@ -159,6 +159,6 @@ BENCHMARK_IMPL(spawn) { (double) N / (double) (end_time - start_time) * 1000.0); fflush(stderr); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/benchmark-tcp-write-batch.c b/deps/uv/test/benchmark-tcp-write-batch.c index 16aa72f6bf73c8..aedefb742559c5 100644 --- a/deps/uv/test/benchmark-tcp-write-batch.c +++ b/deps/uv/test/benchmark-tcp-write-batch.c @@ -139,6 +139,6 @@ BENCHMARK_IMPL(tcp_write_batch) { (long)NUM_WRITE_REQS, (stop - start) / 1e9); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/benchmark-udp-pummel.c b/deps/uv/test/benchmark-udp-pummel.c index 1a2205702603e0..f89913b6cebad0 100644 --- a/deps/uv/test/benchmark-udp-pummel.c +++ b/deps/uv/test/benchmark-udp-pummel.c @@ -215,7 +215,7 @@ static int pummel(unsigned int n_senders, send_cb_called, duration / 1000.0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/fixtures/one_file/one_file b/deps/uv/test/fixtures/one_file/one_file new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/deps/uv/test/run-tests.c b/deps/uv/test/run-tests.c index 86b0359949b7f1..d8cfe297c49524 100644 --- a/deps/uv/test/run-tests.c +++ b/deps/uv/test/run-tests.c @@ -85,10 +85,6 @@ int main(int argc, char **argv) { fflush(stderr); return EXIT_FAILURE; } - -#ifndef __SUNPRO_C - return EXIT_SUCCESS; -#endif } diff --git a/deps/uv/test/runner-unix.c b/deps/uv/test/runner-unix.c index c165aab9305623..09191dbdaa1f32 100644 --- a/deps/uv/test/runner-unix.c +++ b/deps/uv/test/runner-unix.c @@ -344,6 +344,7 @@ long int process_output_size(process_info_t *p) { /* Size of the p->stdout_file */ struct stat buf; + memset(&buf, 0, sizeof(buf)); int r = fstat(fileno(p->stdout_file), &buf); if (r < 0) { return -1; diff --git a/deps/uv/test/runner.c b/deps/uv/test/runner.c index 789108275cda11..d1dd02f5ce0806 100644 --- a/deps/uv/test/runner.c +++ b/deps/uv/test/runner.c @@ -37,28 +37,14 @@ static int compare_task(const void* va, const void* vb) { } -const char* fmt(double d) { - static char buf[1024]; - static char* p; +char* fmt(char (*buf)[32], double d) { uint64_t v; + char* p; - if (p == NULL) - p = buf; - - p += 31; - - if (p >= buf + sizeof(buf)) - return ""; - + p = &(*buf)[32]; v = (uint64_t) d; -#if 0 /* works but we don't care about fractional precision */ - if (d - v >= 0.01) { - *--p = '0' + (uint64_t) (d * 100) % 10; - *--p = '0' + (uint64_t) (d * 10) % 10; - *--p = '.'; - } -#endif + *--p = '\0'; if (v == 0) *--p = '0'; @@ -77,9 +63,7 @@ const char* fmt(double d) { int run_tests(int benchmark_output) { int actual; int total; - int passed; int failed; - int skipped; int current; int test_result; int skip; @@ -102,9 +86,7 @@ int run_tests(int benchmark_output) { fflush(stdout); /* Run all tests. */ - passed = 0; failed = 0; - skipped = 0; current = 1; for (task = TASKS; task->main; task++) { if (task->is_helper) { @@ -113,8 +95,8 @@ int run_tests(int benchmark_output) { test_result = run_test(task->task_name, benchmark_output, current); switch (test_result) { - case TEST_OK: passed++; break; - case TEST_SKIP: skipped++; break; + case TEST_OK: break; + case TEST_SKIP: break; default: failed++; } current++; diff --git a/deps/uv/test/task.h b/deps/uv/test/task.h index 925f1b1c0aeac6..fa6cc0ed535a5c 100644 --- a/deps/uv/test/task.h +++ b/deps/uv/test/task.h @@ -29,12 +29,7 @@ #include #include #include - -#if defined(_MSC_VER) && _MSC_VER < 1600 -# include "uv/stdint-msvc2008.h" -#else -# include -#endif +#include #if !defined(_WIN32) # include @@ -55,9 +50,9 @@ #define TEST_PORT_3 9125 #ifdef _WIN32 -# define TEST_PIPENAME "\\\\?\\pipe\\uv-test" -# define TEST_PIPENAME_2 "\\\\?\\pipe\\uv-test2" -# define TEST_PIPENAME_3 "\\\\?\\pipe\\uv-test3" +# define TEST_PIPENAME "\\\\.\\pipe\\uv-test" +# define TEST_PIPENAME_2 "\\\\.\\pipe\\uv-test2" +# define TEST_PIPENAME_3 "\\\\.\\pipe\\uv-test3" #else # define TEST_PIPENAME "/tmp/uv-test-sock" # define TEST_PIPENAME_2 "/tmp/uv-test-sock2" @@ -203,6 +198,7 @@ typedef enum { #define ASSERT_LE(a, b) ASSERT_BASE(a, <=, b, int64_t, PRId64) #define ASSERT_LT(a, b) ASSERT_BASE(a, <, b, int64_t, PRId64) #define ASSERT_NE(a, b) ASSERT_BASE(a, !=, b, int64_t, PRId64) +#define ASSERT_OK(a) ASSERT_BASE(a, ==, 0, int64_t, PRId64) #define ASSERT_UINT64_EQ(a, b) ASSERT_BASE(a, ==, b, uint64_t, PRIu64) #define ASSERT_UINT64_GE(a, b) ASSERT_BASE(a, >=, b, uint64_t, PRIu64) @@ -248,13 +244,13 @@ typedef enum { #define ASSERT_PTR_NE(a, b) \ ASSERT_BASE(a, !=, b, void*, "p") -/* This macro cleans up the main loop. This is used to avoid valgrind - * warnings about memory being "leaked" by the main event loop. +/* This macro cleans up the event loop. This is used to avoid valgrind + * warnings about memory being "leaked" by the event loop. */ -#define MAKE_VALGRIND_HAPPY() \ +#define MAKE_VALGRIND_HAPPY(loop) \ do { \ - close_loop(uv_default_loop()); \ - ASSERT(0 == uv_loop_close(uv_default_loop())); \ + close_loop(loop); \ + ASSERT(0 == uv_loop_close(loop)); \ uv_library_shutdown(); \ } while (0) @@ -271,8 +267,8 @@ typedef enum { int run_helper_##name(void); \ int run_helper_##name(void) -/* Format big numbers nicely. WARNING: leaks memory. */ -const char* fmt(double d); +/* Format big numbers nicely. */ +char* fmt(char (*buf)[32], double d); /* Reserved test exit codes. */ enum test_status { @@ -375,4 +371,11 @@ UNUSED static int can_ipv6(void) { "Cygwin runtime hangs on listen+connect in same process." #endif +#if !defined(__linux__) && \ + !(defined(__FreeBSD__) && __FreeBSD_version >= 1301000) && \ + !defined(_WIN32) +# define NO_CPU_AFFINITY \ + "affinity not supported on this platform." +#endif + #endif /* TASK_H_ */ diff --git a/deps/uv/test/test-active.c b/deps/uv/test/test-active.c index 384389561a79e8..aaff97087b190d 100644 --- a/deps/uv/test/test-active.c +++ b/deps/uv/test/test-active.c @@ -79,6 +79,6 @@ TEST_IMPL(active) { ASSERT(close_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-async-null-cb.c b/deps/uv/test/test-async-null-cb.c index 52652d91ebf098..1bdd0e032497a9 100644 --- a/deps/uv/test/test-async-null-cb.c +++ b/deps/uv/test/test-async-null-cb.c @@ -59,6 +59,6 @@ TEST_IMPL(async_null_cb) { ASSERT(0 == uv_run(uv_default_loop(), UV_RUN_DEFAULT)); ASSERT(0 == uv_thread_join(&thread)); ASSERT(1 == check_cb_called); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-async.c b/deps/uv/test/test-async.c index 619be620e3e916..73664ea5d67efa 100644 --- a/deps/uv/test/test-async.c +++ b/deps/uv/test/test-async.c @@ -129,6 +129,6 @@ TEST_IMPL(async) { ASSERT(0 == uv_thread_join(&thread)); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-barrier.c b/deps/uv/test/test-barrier.c index 89858db5711482..c780f0cf2dd48b 100644 --- a/deps/uv/test/test-barrier.c +++ b/deps/uv/test/test-barrier.c @@ -27,20 +27,22 @@ typedef struct { uv_barrier_t barrier; - int delay; - volatile int posted; - int main_barrier_wait_rval; - int worker_barrier_wait_rval; + unsigned delay; + unsigned niter; + unsigned main_barrier_wait_rval; + unsigned worker_barrier_wait_rval; } worker_config; static void worker(void* arg) { worker_config* c = arg; + unsigned i; if (c->delay) uv_sleep(c->delay); - c->worker_barrier_wait_rval = uv_barrier_wait(&c->barrier); + for (i = 0; i < c->niter; i++) + c->worker_barrier_wait_rval += uv_barrier_wait(&c->barrier); } @@ -49,17 +51,18 @@ TEST_IMPL(barrier_1) { worker_config wc; memset(&wc, 0, sizeof(wc)); + wc.niter = 1; - ASSERT(0 == uv_barrier_init(&wc.barrier, 2)); - ASSERT(0 == uv_thread_create(&thread, worker, &wc)); + ASSERT_EQ(0, uv_barrier_init(&wc.barrier, 2)); + ASSERT_EQ(0, uv_thread_create(&thread, worker, &wc)); uv_sleep(100); wc.main_barrier_wait_rval = uv_barrier_wait(&wc.barrier); - ASSERT(0 == uv_thread_join(&thread)); + ASSERT_EQ(0, uv_thread_join(&thread)); uv_barrier_destroy(&wc.barrier); - ASSERT(1 == (wc.main_barrier_wait_rval ^ wc.worker_barrier_wait_rval)); + ASSERT_EQ(1, (wc.main_barrier_wait_rval ^ wc.worker_barrier_wait_rval)); return 0; } @@ -71,16 +74,17 @@ TEST_IMPL(barrier_2) { memset(&wc, 0, sizeof(wc)); wc.delay = 100; + wc.niter = 1; - ASSERT(0 == uv_barrier_init(&wc.barrier, 2)); - ASSERT(0 == uv_thread_create(&thread, worker, &wc)); + ASSERT_EQ(0, uv_barrier_init(&wc.barrier, 2)); + ASSERT_EQ(0, uv_thread_create(&thread, worker, &wc)); wc.main_barrier_wait_rval = uv_barrier_wait(&wc.barrier); - ASSERT(0 == uv_thread_join(&thread)); + ASSERT_EQ(0, uv_thread_join(&thread)); uv_barrier_destroy(&wc.barrier); - ASSERT(1 == (wc.main_barrier_wait_rval ^ wc.worker_barrier_wait_rval)); + ASSERT_EQ(1, (wc.main_barrier_wait_rval ^ wc.worker_barrier_wait_rval)); return 0; } @@ -89,26 +93,32 @@ TEST_IMPL(barrier_2) { TEST_IMPL(barrier_3) { uv_thread_t thread; worker_config wc; + unsigned i; memset(&wc, 0, sizeof(wc)); + wc.niter = 5; - ASSERT(0 == uv_barrier_init(&wc.barrier, 2)); - ASSERT(0 == uv_thread_create(&thread, worker, &wc)); + ASSERT_EQ(0, uv_barrier_init(&wc.barrier, 2)); + ASSERT_EQ(0, uv_thread_create(&thread, worker, &wc)); - wc.main_barrier_wait_rval = uv_barrier_wait(&wc.barrier); + for (i = 0; i < wc.niter; i++) + wc.main_barrier_wait_rval += uv_barrier_wait(&wc.barrier); - ASSERT(0 == uv_thread_join(&thread)); + ASSERT_EQ(0, uv_thread_join(&thread)); uv_barrier_destroy(&wc.barrier); - ASSERT(1 == (wc.main_barrier_wait_rval ^ wc.worker_barrier_wait_rval)); + ASSERT_EQ(wc.niter, wc.main_barrier_wait_rval + wc.worker_barrier_wait_rval); return 0; } static void serial_worker(void* data) { uv_barrier_t* barrier; + unsigned i; barrier = data; + for (i = 0; i < 5; i++) + uv_barrier_wait(barrier); if (uv_barrier_wait(barrier) > 0) uv_barrier_destroy(barrier); @@ -123,16 +133,18 @@ TEST_IMPL(barrier_serial_thread) { uv_barrier_t barrier; unsigned i; - ASSERT(0 == uv_barrier_init(&barrier, ARRAY_SIZE(threads) + 1)); + ASSERT_EQ(0, uv_barrier_init(&barrier, ARRAY_SIZE(threads) + 1)); for (i = 0; i < ARRAY_SIZE(threads); ++i) - ASSERT(0 == uv_thread_create(&threads[i], serial_worker, &barrier)); + ASSERT_EQ(0, uv_thread_create(&threads[i], serial_worker, &barrier)); + for (i = 0; i < 5; i++) + uv_barrier_wait(&barrier); if (uv_barrier_wait(&barrier) > 0) uv_barrier_destroy(&barrier); for (i = 0; i < ARRAY_SIZE(threads); ++i) - ASSERT(0 == uv_thread_join(&threads[i])); + ASSERT_EQ(0, uv_thread_join(&threads[i])); return 0; } @@ -141,8 +153,8 @@ TEST_IMPL(barrier_serial_thread) { TEST_IMPL(barrier_serial_thread_single) { uv_barrier_t barrier; - ASSERT(0 == uv_barrier_init(&barrier, 1)); - ASSERT(0 < uv_barrier_wait(&barrier)); + ASSERT_EQ(0, uv_barrier_init(&barrier, 1)); + ASSERT_LT(0, uv_barrier_wait(&barrier)); uv_barrier_destroy(&barrier); return 0; } diff --git a/deps/uv/test/test-callback-stack.c b/deps/uv/test/test-callback-stack.c index a5195c7b7f3dd1..5dad8d75d2aa9a 100644 --- a/deps/uv/test/test-callback-stack.c +++ b/deps/uv/test/test-callback-stack.c @@ -199,6 +199,6 @@ TEST_IMPL(callback_stack) { ASSERT(shutdown_cb_called == 1 && "shutdown_cb must be called exactly once"); ASSERT(close_cb_called == 2 && "close_cb must be called exactly twice"); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-close-fd.c b/deps/uv/test/test-close-fd.c index 0d3927f652ede0..d8e12653f77feb 100644 --- a/deps/uv/test/test-close-fd.c +++ b/deps/uv/test/test-close-fd.c @@ -79,6 +79,6 @@ TEST_IMPL(close_fd) { ASSERT(2 == read_cb_called); ASSERT(0 != uv_is_closing((const uv_handle_t *) &pipe_handle)); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-close-order.c b/deps/uv/test/test-close-order.c index c2fd6c3d0dec16..768e1ceedbe474 100644 --- a/deps/uv/test/test-close-order.c +++ b/deps/uv/test/test-close-order.c @@ -75,6 +75,6 @@ TEST_IMPL(close_order) { ASSERT(close_cb_called == 3); ASSERT(timer_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/test-condvar.c b/deps/uv/test/test-condvar.c index 32abccc2e7602e..61592d0e2d159a 100644 --- a/deps/uv/test/test-condvar.c +++ b/deps/uv/test/test-condvar.c @@ -228,11 +228,6 @@ TEST_IMPL(condvar_4) { /* uv_cond_timedwait: One thread waits, no signal. Timeout should be delivered. */ TEST_IMPL(condvar_5) { worker_config wc; - int r; - /* ns */ - uint64_t before; - uint64_t after; - uint64_t elapsed; uint64_t timeout; timeout = 100 * 1000 * 1000; /* 100 ms in ns */ @@ -242,25 +237,11 @@ TEST_IMPL(condvar_5) { uv_mutex_lock(&wc.mutex); - /* We wait. - * No signaler, so this will only return if timeout is delivered. */ - before = uv_hrtime(); - r = uv_cond_timedwait(&wc.cond, &wc.mutex, timeout); - after = uv_hrtime(); + /* We wait. No signaler, so this will only return if timeout is delivered. */ + ASSERT_EQ(UV_ETIMEDOUT, uv_cond_timedwait(&wc.cond, &wc.mutex, timeout)); uv_mutex_unlock(&wc.mutex); - /* It timed out. */ - ASSERT(r == UV_ETIMEDOUT); - - /* It must have taken at least timeout, modulo system timer ticks. - * But it should not take too much longer. - * cf. MSDN docs: - * https://msdn.microsoft.com/en-us/library/ms687069(VS.85).aspx */ - elapsed = after - before; - ASSERT(0.75 * timeout <= elapsed); /* 1.0 too large for Windows. */ - ASSERT(elapsed <= 5.0 * timeout); /* MacOS has reported failures up to 1.75. */ - worker_config_destroy(&wc); return 0; diff --git a/deps/uv/test/test-connect-unspecified.c b/deps/uv/test/test-connect-unspecified.c index 5f32b67a6a4daa..ecbe98538edc9d 100644 --- a/deps/uv/test/test-connect-unspecified.c +++ b/deps/uv/test/test-connect-unspecified.c @@ -59,5 +59,6 @@ TEST_IMPL(connect_unspecified) { ASSERT(uv_run(loop, UV_RUN_DEFAULT) == 0); + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/test-connection-fail.c b/deps/uv/test/test-connection-fail.c index 5904810252995f..aa7db30d85a2d1 100644 --- a/deps/uv/test/test-connection-fail.c +++ b/deps/uv/test/test-connection-fail.c @@ -130,7 +130,7 @@ TEST_IMPL(connection_fail) { ASSERT(timer_close_cb_calls == 0); ASSERT(timer_cb_calls == 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -156,6 +156,6 @@ TEST_IMPL(connection_fail_doesnt_auto_close) { ASSERT(timer_close_cb_calls == 1); ASSERT(timer_cb_calls == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-default-loop-close.c b/deps/uv/test/test-default-loop-close.c index 51e1e7dc23bffa..8d960e1130a7cb 100644 --- a/deps/uv/test/test-default-loop-close.c +++ b/deps/uv/test/test-default-loop-close.c @@ -52,8 +52,7 @@ TEST_IMPL(default_loop_close) { ASSERT(0 == uv_timer_start(&timer_handle, timer_cb, 1, 0)); ASSERT(0 == uv_run(loop, UV_RUN_DEFAULT)); ASSERT(2 == timer_cb_called); - ASSERT(0 == uv_loop_close(loop)); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/test-delayed-accept.c b/deps/uv/test/test-delayed-accept.c index 88b31e26903f09..c1d6ce0b45b203 100644 --- a/deps/uv/test/test-delayed-accept.c +++ b/deps/uv/test/test-delayed-accept.c @@ -184,6 +184,6 @@ TEST_IMPL(delayed_accept) { ASSERT(connect_cb_called == 2); ASSERT(close_cb_called == 7); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-dlerror.c b/deps/uv/test/test-dlerror.c index a436ec016bfec8..631e67cc5f3e89 100644 --- a/deps/uv/test/test-dlerror.c +++ b/deps/uv/test/test-dlerror.c @@ -43,7 +43,9 @@ TEST_IMPL(dlerror) { msg = uv_dlerror(&lib); ASSERT_NOT_NULL(msg); #if !defined(__OpenBSD__) && !defined(__QNX__) - ASSERT_NOT_NULL(strstr(msg, path)); + /* musl's libc.a does not support dlopen(), only libc.so does. */ + if (NULL == strstr(msg, "Dynamic loading not supported")) + ASSERT_NOT_NULL(strstr(msg, path)); #endif ASSERT_NULL(strstr(msg, dlerror_no_error)); @@ -51,7 +53,9 @@ TEST_IMPL(dlerror) { msg = uv_dlerror(&lib); ASSERT_NOT_NULL(msg); #if !defined(__OpenBSD__) && !defined(__QNX__) - ASSERT_NOT_NULL(strstr(msg, path)); + /* musl's libc.a does not support dlopen(), only libc.so does. */ + if (NULL == strstr(msg, "Dynamic loading not supported")) + ASSERT_NOT_NULL(strstr(msg, path)); #endif ASSERT_NULL(strstr(msg, dlerror_no_error)); diff --git a/deps/uv/test/test-eintr-handling.c b/deps/uv/test/test-eintr-handling.c index 1aaf623b789b6e..d37aba4aa529ba 100644 --- a/deps/uv/test/test-eintr-handling.c +++ b/deps/uv/test/test-eintr-handling.c @@ -87,7 +87,9 @@ TEST_IMPL(eintr_handling) { ASSERT(0 == close(pipe_fds[1])); uv_close((uv_handle_t*) &signal, NULL); - MAKE_VALGRIND_HAPPY(); + ASSERT_EQ(0, uv_thread_join(&thread)); + + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/test-embed.c b/deps/uv/test/test-embed.c index 1d3355fdc67310..bbe56e176db17a 100644 --- a/deps/uv/test/test-embed.c +++ b/deps/uv/test/test-embed.c @@ -74,6 +74,6 @@ TEST_IMPL(embed) { ASSERT_EQ(0, uv_thread_join(&thread)); uv_barrier_destroy(&barrier); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/test-emfile.c b/deps/uv/test/test-emfile.c index bc1fce5f5591f0..343c9521dc7eba 100644 --- a/deps/uv/test/test-emfile.c +++ b/deps/uv/test/test-emfile.c @@ -94,7 +94,7 @@ TEST_IMPL(emfile) { first_fd += 1; } - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/test-env-vars.c b/deps/uv/test/test-env-vars.c index ecaba337ca1088..8118e3da5d7c59 100644 --- a/deps/uv/test/test-env-vars.c +++ b/deps/uv/test/test-env-vars.c @@ -131,7 +131,10 @@ TEST_IMPL(env_vars) { ASSERT(found == 2); #ifdef _WIN32 - ASSERT(found_win_special > 0); + ASSERT_GT(found_win_special, 0); +#else + /* There's no rule saying a key can't start with '='. */ + (void) &found_win_special; #endif uv_os_free_environ(envitems, envcount); diff --git a/deps/uv/test/test-fork.c b/deps/uv/test/test-fork.c index 9e4684f0e15376..7a6eb9c411b36a 100644 --- a/deps/uv/test/test-fork.c +++ b/deps/uv/test/test-fork.c @@ -59,17 +59,18 @@ static void socket_cb(uv_poll_t* poll, int status, int events) { static void run_timer_loop_once(void) { - uv_loop_t* loop; + uv_loop_t loop; uv_timer_t timer_handle; - loop = uv_default_loop(); + ASSERT_EQ(0, uv_loop_init(&loop)); timer_cb_called = 0; /* Reset for the child. */ - ASSERT(0 == uv_timer_init(loop, &timer_handle)); + ASSERT(0 == uv_timer_init(&loop, &timer_handle)); ASSERT(0 == uv_timer_start(&timer_handle, timer_cb, 1, 0)); - ASSERT(0 == uv_run(loop, UV_RUN_DEFAULT)); + ASSERT(0 == uv_run(&loop, UV_RUN_DEFAULT)); ASSERT(1 == timer_cb_called); + ASSERT_EQ(0, uv_loop_close(&loop)); } @@ -111,7 +112,7 @@ TEST_IMPL(fork_timer) { run_timer_loop_once(); } - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -148,7 +149,7 @@ TEST_IMPL(fork_socketpair) { ASSERT(1 == socket_cb_called); } - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -212,7 +213,7 @@ TEST_IMPL(fork_socketpair_started) { ASSERT(0 == strcmp("hi\n", socket_cb_read_buf)); } - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -269,7 +270,7 @@ TEST_IMPL(fork_signal_to_child) { ASSERT(SIGUSR1 == fork_signal_cb_called); } - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -342,7 +343,7 @@ TEST_IMPL(fork_signal_to_child_closed) { exit(0); } - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -500,7 +501,7 @@ static int _do_fork_fs_events_child(int file_or_dir) { printf("Exiting child \n"); } - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -597,7 +598,7 @@ TEST_IMPL(fork_fs_events_file_parent_child) { } - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; #endif } @@ -646,6 +647,10 @@ TEST_IMPL(fork_threadpool_queue_work_simple) { pid_t child_pid; uv_loop_t loop; +#ifdef __TSAN__ + RETURN_SKIP("ThreadSanitizer doesn't support multi-threaded fork"); +#endif + /* Prime the pool and default loop. */ assert_run_work(uv_default_loop()); @@ -671,7 +676,7 @@ TEST_IMPL(fork_threadpool_queue_work_simple) { } - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } #endif /* !__MVS__ */ diff --git a/deps/uv/test/test-fs-copyfile.c b/deps/uv/test/test-fs-copyfile.c index c785a4b51fbb10..d7f04cf4cddea8 100644 --- a/deps/uv/test/test-fs-copyfile.c +++ b/deps/uv/test/test-fs-copyfile.c @@ -151,14 +151,18 @@ TEST_IMPL(fs_copyfile) { handle_result(&req); /* Fails to overwrites existing file. */ + ASSERT_EQ(uv_fs_chmod(NULL, &req, dst, 0644, NULL), 0); + uv_fs_req_cleanup(&req); r = uv_fs_copyfile(NULL, &req, fixture, dst, UV_FS_COPYFILE_EXCL, NULL); ASSERT(r == UV_EEXIST); uv_fs_req_cleanup(&req); /* Truncates when an existing destination is larger than the source file. */ + ASSERT_EQ(uv_fs_chmod(NULL, &req, dst, 0644, NULL), 0); + uv_fs_req_cleanup(&req); touch_file(src, 1); r = uv_fs_copyfile(NULL, &req, src, dst, 0, NULL); - ASSERT(r == 0); + ASSERT_EQ(r, 0); handle_result(&req); /* Copies a larger file. */ @@ -176,6 +180,9 @@ TEST_IMPL(fs_copyfile) { ASSERT(result_check_count == 5); uv_run(loop, UV_RUN_DEFAULT); ASSERT(result_check_count == 6); + /* Ensure file is user-writable (not copied from src). */ + ASSERT_EQ(uv_fs_chmod(NULL, &req, dst, 0644, NULL), 0); + uv_fs_req_cleanup(&req); /* If the flags are invalid, the loop should not be kept open */ unlink(dst); @@ -213,5 +220,6 @@ TEST_IMPL(fs_copyfile) { #endif unlink(dst); /* Cleanup */ + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/test-fs-event.c b/deps/uv/test/test-fs-event.c index a08bfb9100ce6e..9f231ebfc01e9a 100644 --- a/deps/uv/test/test-fs-event.c +++ b/deps/uv/test/test-fs-event.c @@ -33,19 +33,12 @@ # if defined(__APPLE__) || \ defined(__DragonFly__) || \ defined(__FreeBSD__) || \ - defined(__FreeBSD_kernel__) || \ defined(__OpenBSD__) || \ defined(__NetBSD__) # define HAVE_KQUEUE 1 # endif #endif -#if defined(__arm__)/* Increase the timeout so the test passes on arm CI bots */ -# define CREATE_TIMEOUT 100 -#else -# define CREATE_TIMEOUT 1 -#endif - static uv_fs_event_t fs_event; static const char file_prefix[] = "fsevent-"; static const int fs_event_file_count = 16; @@ -163,10 +156,7 @@ static void fs_event_create_files(uv_timer_t* handle) { if (++fs_event_created < fs_event_file_count) { /* Create another file on a different event loop tick. We do it this way * to avoid fs events coalescing into one fs event. */ - ASSERT(0 == uv_timer_start(&timer, - fs_event_create_files, - CREATE_TIMEOUT, - 0)); + ASSERT_EQ(0, uv_timer_start(&timer, fs_event_create_files, 100, 0)); } } @@ -242,7 +232,8 @@ static void fs_event_create_files_in_subdir(uv_timer_t* handle) { if (++fs_event_created < fs_event_file_count) { /* Create another file on a different event loop tick. We do it this way * to avoid fs events coalescing into one fs event. */ - ASSERT(0 == uv_timer_start(&timer, fs_event_create_files_in_subdir, 1, 0)); + ASSERT_EQ(0, + uv_timer_start(&timer, fs_event_create_files_in_subdir, 100, 0)); } } @@ -441,7 +432,7 @@ TEST_IMPL(fs_event_watch_dir) { remove("watch_dir/file1"); remove("watch_dir/"); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -503,7 +494,7 @@ TEST_IMPL(fs_event_watch_dir_recursive) { remove("watch_dir/subdir"); remove("watch_dir/"); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; #else RETURN_SKIP("Recursive directory watching not supported on this platform."); @@ -550,7 +541,7 @@ TEST_IMPL(fs_event_watch_dir_short_path) { remove("watch_dir/file1"); remove("watch_dir/"); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); if (!has_shortnames) RETURN_SKIP("Was not able to address files with 8.3 short name."); @@ -596,7 +587,7 @@ TEST_IMPL(fs_event_watch_file) { remove("watch_dir/file1"); remove("watch_dir/"); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -623,7 +614,7 @@ TEST_IMPL(fs_event_watch_file_exact_path) { create_file("watch_dir/file.js"); create_file("watch_dir/file.jsx"); #if defined(__APPLE__) && !defined(MAC_OS_X_VERSION_10_12) - /* Empirically, FSEvents seems to (reliably) report the preceeding + /* Empirically, FSEvents seems to (reliably) report the preceding * create_file events prior to macOS 10.11.6 in the subsequent fs_watch * creation, but that behavior hasn't been observed to occur on newer * versions. Give a long delay here to let the system settle before running @@ -649,7 +640,7 @@ TEST_IMPL(fs_event_watch_file_exact_path) { remove("watch_dir/file.jsx"); remove("watch_dir/"); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -673,7 +664,7 @@ TEST_IMPL(fs_event_watch_file_twice) { ASSERT(0 == uv_timer_start(&timer, timer_cb_watch_twice, 10, 0)); ASSERT(0 == uv_run(loop, UV_RUN_DEFAULT)); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -691,7 +682,7 @@ TEST_IMPL(fs_event_watch_file_current_dir) { remove("watch_file"); create_file("watch_file"); #if defined(__APPLE__) && !defined(MAC_OS_X_VERSION_10_12) - /* Empirically, kevent seems to (sometimes) report the preceeding + /* Empirically, kevent seems to (sometimes) report the preceding * create_file events prior to macOS 10.11.6 in the subsequent fs_event_start * So let the system settle before running the test. */ uv_sleep(1100); @@ -728,7 +719,7 @@ TEST_IMPL(fs_event_watch_file_current_dir) { /* Cleanup */ remove("watch_file"); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -754,7 +745,7 @@ TEST_IMPL(fs_event_watch_file_root_dir) { uv_close((uv_handle_t*) &fs_event, NULL); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } #endif @@ -793,7 +784,7 @@ TEST_IMPL(fs_event_no_callback_after_close) { remove("watch_dir/file1"); remove("watch_dir/"); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -830,7 +821,7 @@ TEST_IMPL(fs_event_no_callback_on_close) { remove("watch_dir/file1"); remove("watch_dir/"); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -868,7 +859,7 @@ TEST_IMPL(fs_event_immediate_close) { ASSERT(close_cb_called == 2); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -903,7 +894,7 @@ TEST_IMPL(fs_event_close_with_pending_event) { remove("watch_dir/file"); remove("watch_dir/"); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -941,7 +932,7 @@ TEST_IMPL(fs_event_close_with_pending_delete_event) { /* Clean up */ remove("watch_dir/"); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -982,7 +973,7 @@ TEST_IMPL(fs_event_close_in_callback) { fs_event_unlink_files(NULL); remove("watch_dir/"); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -1017,7 +1008,7 @@ TEST_IMPL(fs_event_start_and_close) { ASSERT(close_cb_called == 2); remove("watch_dir/"); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -1070,7 +1061,7 @@ TEST_IMPL(fs_event_getpath) { } remove("watch_dir/"); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -1159,7 +1150,7 @@ TEST_IMPL(fs_event_error_reporting) { } while (i-- != 0); remove("watch_dir/"); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -1168,7 +1159,7 @@ TEST_IMPL(fs_event_error_reporting) { TEST_IMPL(fs_event_error_reporting) { /* No-op, needed only for FSEvents backend */ - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -1191,7 +1182,7 @@ TEST_IMPL(fs_event_watch_invalid_path) { r = uv_fs_event_start(&fs_event, fs_event_cb_file, "", 0); ASSERT(r != 0); ASSERT(uv_is_active((uv_handle_t*) &fs_event) == 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -1237,6 +1228,6 @@ TEST_IMPL(fs_event_stop_in_cb) { remove(path); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-fs-open-flags.c b/deps/uv/test/test-fs-open-flags.c index 372afe1397572a..ea9be25afc1593 100644 --- a/deps/uv/test/test-fs-open-flags.c +++ b/deps/uv/test/test-fs-open-flags.c @@ -424,7 +424,7 @@ TEST_IMPL(fs_open_flags) { /* Cleanup. */ rmdir(empty_dir); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-fs-poll.c b/deps/uv/test/test-fs-poll.c index 76fe6fc3957192..af486023d10c87 100644 --- a/deps/uv/test/test-fs-poll.c +++ b/deps/uv/test/test-fs-poll.c @@ -164,7 +164,7 @@ TEST_IMPL(fs_poll) { ASSERT(timer_cb_called == 2); ASSERT(close_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -192,7 +192,7 @@ TEST_IMPL(fs_poll_getpath) { ASSERT(close_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -212,9 +212,7 @@ TEST_IMPL(fs_poll_close_request) { uv_run(&loop, UV_RUN_ONCE); ASSERT(close_cb_called == 1); - ASSERT(0 == uv_loop_close(&loop)); - - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(&loop); return 0; } @@ -238,9 +236,7 @@ TEST_IMPL(fs_poll_close_request_multi_start_stop) { uv_run(&loop, UV_RUN_ONCE); ASSERT(close_cb_called == 1); - ASSERT(0 == uv_loop_close(&loop)); - - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(&loop); return 0; } @@ -264,9 +260,7 @@ TEST_IMPL(fs_poll_close_request_multi_stop_start) { uv_run(&loop, UV_RUN_ONCE); ASSERT(close_cb_called == 1); - ASSERT(0 == uv_loop_close(&loop)); - - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(&loop); return 0; } @@ -293,8 +287,6 @@ TEST_IMPL(fs_poll_close_request_stop_when_active) { uv_run(&loop, UV_RUN_ONCE); ASSERT(close_cb_called == 1); - ASSERT(0 == uv_loop_close(&loop)); - - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(&loop); return 0; } diff --git a/deps/uv/test/test-fs-readdir.c b/deps/uv/test/test-fs-readdir.c index 6bb691784151f5..43c9edf178be98 100644 --- a/deps/uv/test/test-fs-readdir.c +++ b/deps/uv/test/test-fs-readdir.c @@ -156,7 +156,7 @@ TEST_IMPL(fs_readdir_empty_dir) { ASSERT(empty_closedir_cb_count == 1); uv_fs_rmdir(uv_default_loop(), &rmdir_req, path, NULL); uv_fs_req_cleanup(&rmdir_req); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -208,7 +208,7 @@ TEST_IMPL(fs_readdir_non_existing_dir) { ASSERT(r == 0); ASSERT(non_existing_opendir_cb_count == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -258,7 +258,7 @@ TEST_IMPL(fs_readdir_file) { r = uv_run(uv_default_loop(), UV_RUN_DEFAULT); ASSERT(r == 0); ASSERT(file_opendir_cb_count == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -457,6 +457,6 @@ TEST_IMPL(fs_readdir_non_empty_dir) { uv_fs_req_cleanup(&rmdir_req); cleanup_test_files(); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-fs.c b/deps/uv/test/test-fs.c index c879f6298483d8..f9fa20eff6b55c 100644 --- a/deps/uv/test/test-fs.c +++ b/deps/uv/test/test-fs.c @@ -37,6 +37,9 @@ # ifndef ERROR_SYMLINK_NOT_SUPPORTED # define ERROR_SYMLINK_NOT_SUPPORTED 1464 # endif +# ifndef S_IFIFO +# define S_IFIFO _S_IFIFO +# endif # define unlink _unlink # define rmdir _rmdir # define open _open @@ -219,16 +222,6 @@ static void realpath_cb(uv_fs_t* req) { char test_file_abs_buf[PATHMAX]; size_t test_file_abs_size = sizeof(test_file_abs_buf); ASSERT(req->fs_type == UV_FS_REALPATH); -#ifdef _WIN32 - /* - * Windows XP and Server 2003 don't support GetFinalPathNameByHandleW() - */ - if (req->result == UV_ENOSYS) { - realpath_cb_count++; - uv_fs_req_cleanup(req); - return; - } -#endif ASSERT(req->result == 0); uv_cwd(test_file_abs_buf, &test_file_abs_size); @@ -669,6 +662,15 @@ static void stat_cb(uv_fs_t* req) { ASSERT(!req->ptr); } +static void stat_batch_cb(uv_fs_t* req) { + ASSERT(req->fs_type == UV_FS_STAT || req->fs_type == UV_FS_LSTAT); + ASSERT(req->result == 0); + ASSERT(req->ptr); + stat_cb_count++; + uv_fs_req_cleanup(req); + ASSERT(!req->ptr); +} + static void sendfile_cb(uv_fs_t* req) { ASSERT(req == &sendfile_req); @@ -730,7 +732,7 @@ TEST_IMPL(fs_file_noent) { /* TODO add EACCES test */ - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -756,7 +758,7 @@ TEST_IMPL(fs_file_nametoolong) { uv_run(loop, UV_RUN_DEFAULT); ASSERT(open_cb_count == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -770,11 +772,10 @@ TEST_IMPL(fs_file_loop) { r = uv_fs_symlink(NULL, &req, "test_symlink", "test_symlink", 0, NULL); #ifdef _WIN32 /* - * Windows XP and Server 2003 don't support symlinks; we'll get UV_ENOTSUP. - * Starting with vista they are supported, but only when elevated, otherwise + * Symlinks are only suported but only when elevated, otherwise * we'll see UV_EPERM. */ - if (r == UV_ENOTSUP || r == UV_EPERM) + if (r == UV_EPERM) return 0; #elif defined(__MSYS__) /* MSYS2's approximation of symlinks with copies does not work for broken @@ -799,7 +800,7 @@ TEST_IMPL(fs_file_loop) { unlink("test_symlink"); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -966,7 +967,7 @@ TEST_IMPL(fs_file_async) { unlink("test_file"); unlink("test_file2"); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -1056,7 +1057,7 @@ TEST_IMPL(fs_file_sync) { fs_file_sync(0); fs_file_sync(UV_FS_O_FILEMAP); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -1092,7 +1093,7 @@ TEST_IMPL(fs_file_write_null_buffer) { fs_file_write_null_buffer(0); fs_file_write_null_buffer(UV_FS_O_FILEMAP); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -1187,7 +1188,7 @@ TEST_IMPL(fs_async_dir) { unlink("test_dir/file2"); rmdir("test_dir"); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -1239,6 +1240,8 @@ static int test_sendfile(void (*setup)(int), uv_fs_cb cb, off_t expected_size) { ASSERT(r == 0); uv_fs_req_cleanup(&close_req); + memset(&s1, 0, sizeof(s1)); + memset(&s2, 0, sizeof(s2)); ASSERT(0 == stat("test_file", &s1)); ASSERT(0 == stat("test_file2", &s2)); ASSERT(s2.st_size == expected_size); @@ -1265,7 +1268,7 @@ static int test_sendfile(void (*setup)(int), uv_fs_cb cb, off_t expected_size) { unlink("test_file"); unlink("test_file2"); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -1313,7 +1316,7 @@ TEST_IMPL(fs_mkdtemp) { uv_fs_req_cleanup(&mkdtemp_req1); uv_fs_req_cleanup(&mkdtemp_req2); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -1346,6 +1349,8 @@ TEST_IMPL(fs_mkstemp) { /* Make sure that path is empty string */ ASSERT_EQ(0, strlen(mkstemp_req3.path)); + uv_fs_req_cleanup(&mkstemp_req3); + /* We can write to the opened file */ iov = uv_buf_init(test_buf, sizeof(test_buf)); r = uv_fs_write(NULL, &req, mkstemp_req1.result, &iov, 1, -1, NULL); @@ -1379,7 +1384,7 @@ TEST_IMPL(fs_mkstemp) { uv_fs_req_cleanup(&mkstemp_req1); uv_fs_req_cleanup(&mkstemp_req2); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -1393,6 +1398,13 @@ TEST_IMPL(fs_fstat) { struct stat t; #endif +#if defined(__s390__) && defined(__QEMU__) + /* qemu-user-s390x has this weird bug where statx() reports nanoseconds + * but plain fstat() does not. + */ + RETURN_SKIP("Test does not currently work in QEMU"); +#endif + /* Setup. */ unlink("test_file"); @@ -1406,6 +1418,7 @@ TEST_IMPL(fs_fstat) { uv_fs_req_cleanup(&req); #ifndef _WIN32 + memset(&t, 0, sizeof(t)); ASSERT(0 == fstat(file, &t)); ASSERT(0 == uv_fs_fstat(NULL, &req, file, NULL)); ASSERT(req.result == 0); @@ -1535,7 +1548,44 @@ TEST_IMPL(fs_fstat) { /* Cleanup. */ unlink("test_file"); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); + return 0; +} + + +TEST_IMPL(fs_fstat_stdio) { + int fd; + int res; + uv_fs_t req; +#ifdef _WIN32 + uv_stat_t* st; + DWORD ft; +#endif + + for (fd = 0; fd <= 2; ++fd) { + res = uv_fs_fstat(NULL, &req, fd, NULL); + ASSERT(res == 0); + ASSERT(req.result == 0); + +#ifdef _WIN32 + st = req.ptr; + ft = uv_guess_handle(fd); + switch (ft) { + case UV_TTY: + case UV_NAMED_PIPE: + ASSERT(st->st_mode == (ft == UV_TTY ? S_IFCHR : S_IFIFO)); + ASSERT(st->st_nlink == 1); + ASSERT(st->st_rdev == (ft == UV_TTY ? FILE_DEVICE_CONSOLE : FILE_DEVICE_NAMED_PIPE) << 16); + break; + default: + break; + } +#endif + + uv_fs_req_cleanup(&req); + } + + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -1611,7 +1661,7 @@ TEST_IMPL(fs_access) { unlink("test_file"); rmdir("test_dir"); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -1709,7 +1759,7 @@ TEST_IMPL(fs_chmod) { /* Cleanup. */ unlink("test_file"); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -1768,7 +1818,7 @@ TEST_IMPL(fs_unlink_readonly) { uv_fs_req_cleanup(&req); unlink("test_file"); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -1826,7 +1876,7 @@ TEST_IMPL(fs_unlink_archive_readonly) { uv_fs_req_cleanup(&req); unlink("test_file"); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } #endif @@ -1919,7 +1969,7 @@ TEST_IMPL(fs_chown) { unlink("test_file"); unlink("test_file_link"); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -2005,28 +2055,61 @@ TEST_IMPL(fs_link) { unlink("test_file_link"); unlink("test_file_link2"); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } TEST_IMPL(fs_readlink) { - uv_fs_t req; + /* Must return UV_ENOENT on an inexistent file */ + { + uv_fs_t req; - loop = uv_default_loop(); - ASSERT(0 == uv_fs_readlink(loop, &req, "no_such_file", dummy_cb)); - ASSERT(0 == uv_run(loop, UV_RUN_DEFAULT)); - ASSERT(dummy_cb_count == 1); - ASSERT_NULL(req.ptr); - ASSERT(req.result == UV_ENOENT); - uv_fs_req_cleanup(&req); + loop = uv_default_loop(); + ASSERT(0 == uv_fs_readlink(loop, &req, "no_such_file", dummy_cb)); + ASSERT(0 == uv_run(loop, UV_RUN_DEFAULT)); + ASSERT(dummy_cb_count == 1); + ASSERT_NULL(req.ptr); + ASSERT(req.result == UV_ENOENT); + uv_fs_req_cleanup(&req); - ASSERT(UV_ENOENT == uv_fs_readlink(NULL, &req, "no_such_file", NULL)); - ASSERT_NULL(req.ptr); - ASSERT(req.result == UV_ENOENT); - uv_fs_req_cleanup(&req); + ASSERT(UV_ENOENT == uv_fs_readlink(NULL, &req, "no_such_file", NULL)); + ASSERT_NULL(req.ptr); + ASSERT(req.result == UV_ENOENT); + uv_fs_req_cleanup(&req); + } + + /* Must return UV_EINVAL on a non-symlink file */ + { + int r; + uv_fs_t req; + uv_file file; + + /* Setup */ + + /* Create a non-symlink file */ + r = uv_fs_open(NULL, &req, "test_file", O_RDWR | O_CREAT, + S_IWUSR | S_IRUSR, NULL); + ASSERT_GE(r, 0); + ASSERT_GE(req.result, 0); + file = req.result; + uv_fs_req_cleanup(&req); + + r = uv_fs_close(NULL, &req, file, NULL); + ASSERT_EQ(r, 0); + ASSERT_EQ(req.result, 0); + uv_fs_req_cleanup(&req); + + /* Test */ + r = uv_fs_readlink(NULL, &req, "test_file", NULL); + ASSERT_EQ(r, UV_EINVAL); + uv_fs_req_cleanup(&req); - MAKE_VALGRIND_HAPPY(); + /* Cleanup */ + unlink("test_file"); + } + + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -2039,15 +2122,6 @@ TEST_IMPL(fs_realpath) { ASSERT(0 == uv_run(loop, UV_RUN_DEFAULT)); ASSERT(dummy_cb_count == 1); ASSERT_NULL(req.ptr); -#ifdef _WIN32 - /* - * Windows XP and Server 2003 don't support GetFinalPathNameByHandleW() - */ - if (req.result == UV_ENOSYS) { - uv_fs_req_cleanup(&req); - RETURN_SKIP("realpath is not supported on Windows XP"); - } -#endif ASSERT(req.result == UV_ENOENT); uv_fs_req_cleanup(&req); @@ -2056,7 +2130,7 @@ TEST_IMPL(fs_realpath) { ASSERT(req.result == UV_ENOENT); uv_fs_req_cleanup(&req); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -2158,15 +2232,6 @@ TEST_IMPL(fs_symlink) { uv_fs_req_cleanup(&req); r = uv_fs_realpath(NULL, &req, "test_file_symlink_symlink", NULL); -#ifdef _WIN32 - /* - * Windows XP and Server 2003 don't support GetFinalPathNameByHandleW() - */ - if (r == UV_ENOSYS) { - uv_fs_req_cleanup(&req); - RETURN_SKIP("realpath is not supported on Windows XP"); - } -#endif ASSERT(r == 0); #ifdef _WIN32 ASSERT(stricmp(req.ptr, test_file_abs_buf) == 0); @@ -2216,15 +2281,6 @@ TEST_IMPL(fs_symlink) { ASSERT(readlink_cb_count == 1); r = uv_fs_realpath(loop, &req, "test_file", realpath_cb); -#ifdef _WIN32 - /* - * Windows XP and Server 2003 don't support GetFinalPathNameByHandleW() - */ - if (r == UV_ENOSYS) { - uv_fs_req_cleanup(&req); - RETURN_SKIP("realpath is not supported on Windows XP"); - } -#endif ASSERT(r == 0); uv_run(loop, UV_RUN_DEFAULT); ASSERT(realpath_cb_count == 1); @@ -2242,7 +2298,7 @@ TEST_IMPL(fs_symlink) { unlink("test_file_symlink2"); unlink("test_file_symlink2_symlink"); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -2325,15 +2381,6 @@ int test_symlink_dir_impl(int type) { uv_fs_req_cleanup(&req); r = uv_fs_realpath(NULL, &req, "test_dir_symlink", NULL); -#ifdef _WIN32 - /* - * Windows XP and Server 2003 don't support GetFinalPathNameByHandleW() - */ - if (r == UV_ENOSYS) { - uv_fs_req_cleanup(&req); - RETURN_SKIP("realpath is not supported on Windows XP"); - } -#endif ASSERT(r == 0); #ifdef _WIN32 ASSERT(strlen(req.ptr) == test_dir_abs_size - 5); @@ -2396,7 +2443,7 @@ int test_symlink_dir_impl(int type) { rmdir("test_dir"); rmdir("test_dir_symlink"); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -2505,7 +2552,7 @@ TEST_IMPL(fs_non_symlink_reparse_point) { unlink("test_dir/test_file"); rmdir("test_dir"); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -2525,7 +2572,7 @@ TEST_IMPL(fs_lstat_windows_store_apps) { len = sizeof(localappdata); r = uv_os_getenv("LOCALAPPDATA", localappdata, &len); if (r == UV_ENOENT) { - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return TEST_SKIP; } ASSERT_EQ(r, 0); @@ -2536,11 +2583,11 @@ TEST_IMPL(fs_lstat_windows_store_apps) { ASSERT_GT(r, 0); if (uv_fs_opendir(loop, &req, windowsapps_path, NULL) != 0) { /* If we cannot read the directory, skip the test. */ - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return TEST_SKIP; } if (uv_fs_scandir(loop, &req, windowsapps_path, 0, NULL) <= 0) { - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return TEST_SKIP; } while (uv_fs_scandir_next(&req, &dirent) != UV_EOF) { @@ -2556,7 +2603,7 @@ TEST_IMPL(fs_lstat_windows_store_apps) { } ASSERT_EQ(uv_fs_lstat(loop, &stat_req, file_path, NULL), 0); } - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } #endif @@ -2603,7 +2650,7 @@ TEST_IMPL(fs_utime) { /* Cleanup. */ unlink(path); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -2642,7 +2689,7 @@ TEST_IMPL(fs_utime_round) { check_utime(path, atime, mtime, /* test_lutime */ 0); unlink(path); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -2673,7 +2720,7 @@ TEST_IMPL(fs_stat_root) { r = uv_fs_stat(NULL, &stat_req, "\\\\?\\C:\\", NULL); ASSERT(r == 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } #endif @@ -2736,7 +2783,7 @@ TEST_IMPL(fs_futime) { /* Cleanup. */ unlink(path); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -2755,8 +2802,8 @@ TEST_IMPL(fs_lutime) { loop = uv_default_loop(); unlink(path); r = uv_fs_open(NULL, &req, path, O_RDWR | O_CREAT, S_IWUSR | S_IRUSR, NULL); - ASSERT(r >= 0); - ASSERT(req.result >= 0); + ASSERT_GE(r, 0); + ASSERT_GE(req.result, 0); uv_fs_req_cleanup(&req); uv_fs_close(loop, &req, r, NULL); @@ -2772,8 +2819,8 @@ TEST_IMPL(fs_lutime) { "Symlink creation requires elevated console (with admin rights)"); } #endif - ASSERT(s == 0); - ASSERT(req.result == 0); + ASSERT_EQ(s, 0); + ASSERT_EQ(req.result, 0); uv_fs_req_cleanup(&req); /* Test the synchronous version. */ @@ -2787,12 +2834,12 @@ TEST_IMPL(fs_lutime) { r = uv_fs_lutime(NULL, &req, symlink_path, atime, mtime, NULL); #if (defined(_AIX) && !defined(_AIX71)) || \ defined(__MVS__) - ASSERT(r == UV_ENOSYS); + ASSERT_EQ(r, UV_ENOSYS); RETURN_SKIP("lutime is not implemented for z/OS and AIX versions below 7.1"); #endif - ASSERT(r == 0); + ASSERT_EQ(r, 0); lutime_cb(&req); - ASSERT(lutime_cb_count == 1); + ASSERT_EQ(lutime_cb_count, 1); /* Test the asynchronous version. */ atime = mtime = 1291404900; /* 2010-12-03 20:35:00 */ @@ -2802,15 +2849,15 @@ TEST_IMPL(fs_lutime) { checkme.path = symlink_path; r = uv_fs_lutime(loop, &req, symlink_path, atime, mtime, lutime_cb); - ASSERT(r == 0); + ASSERT_EQ(r, 0); uv_run(loop, UV_RUN_DEFAULT); - ASSERT(lutime_cb_count == 2); + ASSERT_EQ(lutime_cb_count, 2); /* Cleanup. */ unlink(path); unlink(symlink_path); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -2826,7 +2873,7 @@ TEST_IMPL(fs_stat_missing_path) { ASSERT(req.result == UV_ENOENT); uv_fs_req_cleanup(&req); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -2863,7 +2910,7 @@ TEST_IMPL(fs_scandir_empty_dir) { uv_fs_rmdir(NULL, &req, path, NULL); uv_fs_req_cleanup(&req); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -2897,7 +2944,7 @@ TEST_IMPL(fs_scandir_non_existent_dir) { uv_run(loop, UV_RUN_DEFAULT); ASSERT(scandir_cb_count == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -2919,7 +2966,25 @@ TEST_IMPL(fs_scandir_file) { uv_run(loop, UV_RUN_DEFAULT); ASSERT(scandir_cb_count == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); + return 0; +} + + +/* Run in Valgrind. Should not leak when the iterator isn't exhausted. */ +TEST_IMPL(fs_scandir_early_exit) { + uv_dirent_t d; + uv_fs_t req; + + ASSERT_LT(0, uv_fs_scandir(NULL, &req, "test/fixtures/one_file", 0, NULL)); + ASSERT_NE(UV_EOF, uv_fs_scandir_next(&req, &d)); + uv_fs_req_cleanup(&req); + + ASSERT_LT(0, uv_fs_scandir(NULL, &req, "test/fixtures", 0, NULL)); + ASSERT_NE(UV_EOF, uv_fs_scandir_next(&req, &d)); + uv_fs_req_cleanup(&req); + + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -2949,7 +3014,7 @@ TEST_IMPL(fs_open_dir) { uv_run(loop, UV_RUN_DEFAULT); ASSERT(open_cb_count == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -3024,7 +3089,7 @@ TEST_IMPL(fs_file_open_append) { fs_file_open_append(0); fs_file_open_append(UV_FS_O_FILEMAP); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -3093,7 +3158,7 @@ TEST_IMPL(fs_rename_to_existing_file) { unlink("test_file"); unlink("test_file2"); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -3151,7 +3216,7 @@ TEST_IMPL(fs_read_bufs) { fs_read_bufs(0); fs_read_bufs(UV_FS_O_FILEMAP); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -3217,7 +3282,7 @@ TEST_IMPL(fs_read_file_eof) { fs_read_file_eof(0); fs_read_file_eof(UV_FS_O_FILEMAP); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -3311,7 +3376,7 @@ TEST_IMPL(fs_write_multiple_bufs) { fs_write_multiple_bufs(0); fs_write_multiple_bufs(UV_FS_O_FILEMAP); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -3418,7 +3483,7 @@ TEST_IMPL(fs_write_alotof_bufs) { fs_write_alotof_bufs(0); fs_write_alotof_bufs(UV_FS_O_FILEMAP); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -3534,7 +3599,7 @@ TEST_IMPL(fs_write_alotof_bufs_with_offset) { fs_write_alotof_bufs_with_offset(0); fs_write_alotof_bufs_with_offset(UV_FS_O_FILEMAP); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -3589,7 +3654,7 @@ TEST_IMPL(fs_read_dir) { /* Cleanup */ rmdir("test_dir"); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -3682,9 +3747,9 @@ static void test_fs_partial(int doread) { ctx.doread = doread; ctx.interval = 1000; ctx.size = sizeof(test_buf) * iovcount; - ctx.data = malloc(ctx.size); + ctx.data = calloc(ctx.size, 1); ASSERT_NOT_NULL(ctx.data); - buffer = malloc(ctx.size); + buffer = calloc(ctx.size, 1); ASSERT_NOT_NULL(buffer); for (index = 0; index < iovcount; ++index) @@ -3727,9 +3792,10 @@ static void test_fs_partial(int doread) { uv_fs_req_cleanup(&write_req); } - ASSERT(0 == memcmp(buffer, ctx.data, ctx.size)); - ASSERT(0 == uv_thread_join(&thread)); + + ASSERT_MEM_EQ(buffer, ctx.data, ctx.size); + ASSERT(0 == uv_run(loop, UV_RUN_DEFAULT)); ASSERT(0 == close(pipe_fds[1])); @@ -3747,7 +3813,7 @@ static void test_fs_partial(int doread) { free(buffer); free(ctx.data); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); } TEST_IMPL(fs_partial_read) { @@ -3820,6 +3886,7 @@ TEST_IMPL(fs_read_write_null_arguments) { uv_run(loop, UV_RUN_DEFAULT); uv_fs_req_cleanup(&write_req); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -3858,7 +3925,7 @@ TEST_IMPL(get_osfhandle_valid_handle) { /* Cleanup. */ unlink("test_file"); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -3904,7 +3971,7 @@ TEST_IMPL(open_osfhandle_valid_handle) { /* Cleanup. */ unlink("test_file"); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -3944,7 +4011,7 @@ TEST_IMPL(fs_file_pos_after_op_with_offset) { /* Cleanup */ unlink("test_file"); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -4043,7 +4110,7 @@ TEST_IMPL(fs_file_pos_write) { fs_file_pos_write(0); fs_file_pos_write(UV_FS_O_FILEMAP); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -4083,7 +4150,7 @@ TEST_IMPL(fs_file_pos_append) { fs_file_pos_append(0); fs_file_pos_append(UV_FS_O_FILEMAP); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } #endif @@ -4243,7 +4310,7 @@ TEST_IMPL(fs_exclusive_sharing_mode) { /* Cleanup */ unlink("test_file"); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } #endif @@ -4290,7 +4357,7 @@ TEST_IMPL(fs_file_flag_no_buffering) { /* Cleanup */ unlink("test_file"); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } #endif @@ -4376,7 +4443,7 @@ TEST_IMPL(fs_open_readonly_acl) { unlink("test_file_icacls"); uv_os_free_passwd(&pwd); ASSERT(r == 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } #endif @@ -4461,6 +4528,7 @@ TEST_IMPL(fs_statfs) { uv_run(loop, UV_RUN_DEFAULT); ASSERT(statfs_cb_count == 2); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -4481,3 +4549,27 @@ TEST_IMPL(fs_get_system_error) { return 0; } + +TEST_IMPL(fs_stat_batch_multiple) { + uv_fs_t req[300]; + int r; + int i; + + rmdir("test_dir"); + + r = uv_fs_mkdir(NULL, &mkdir_req, "test_dir", 0755, NULL); + ASSERT_EQ(r, 0); + + loop = uv_default_loop(); + + for (i = 0; i < (int) ARRAY_SIZE(req); ++i) { + r = uv_fs_stat(loop, &req[i], "test_dir", stat_batch_cb); + ASSERT_EQ(r, 0); + } + + uv_run(loop, UV_RUN_DEFAULT); + ASSERT_EQ(stat_cb_count, ARRAY_SIZE(req)); + + MAKE_VALGRIND_HAPPY(loop); + return 0; +} diff --git a/deps/uv/test/test-get-currentexe.c b/deps/uv/test/test-get-currentexe.c index dc239cc89d1435..becaf5c1bb8613 100644 --- a/deps/uv/test/test-get-currentexe.c +++ b/deps/uv/test/test-get-currentexe.c @@ -35,6 +35,9 @@ TEST_IMPL(get_currentexe) { #if defined(__QEMU__) RETURN_SKIP("Test does not currently work in QEMU"); #endif +#if defined(__OpenBSD__) + RETURN_SKIP("Test does not currently work in OpenBSD"); +#endif char buffer[PATHMAX]; char path[PATHMAX]; diff --git a/deps/uv/test/test-get-memory.c b/deps/uv/test/test-get-memory.c index 4555ba08895e7d..9ac42c383c918b 100644 --- a/deps/uv/test/test-get-memory.c +++ b/deps/uv/test/test-get-memory.c @@ -26,11 +26,14 @@ TEST_IMPL(get_memory) { uint64_t free_mem = uv_get_free_memory(); uint64_t total_mem = uv_get_total_memory(); uint64_t constrained_mem = uv_get_constrained_memory(); + uint64_t available_mem = uv_get_available_memory(); - printf("free_mem=%llu, total_mem=%llu, constrained_mem=%llu\n", + printf("free_mem=%llu, total_mem=%llu, constrained_mem=%llu, " + "available_mem=%llu\n", (unsigned long long) free_mem, (unsigned long long) total_mem, - (unsigned long long) constrained_mem); + (unsigned long long) constrained_mem, + (unsigned long long) available_mem); ASSERT(free_mem > 0); ASSERT(total_mem > 0); @@ -40,5 +43,11 @@ TEST_IMPL(get_memory) { #else ASSERT(total_mem > free_mem); #endif + ASSERT_LE(available_mem, total_mem); + /* we'd really want to test if available <= free, but that is fragile: + * with no limit set, get_available calls and returns get_free; so if + * any memory was freed between our calls to get_free and get_available + * we would fail such a test test (as observed on CI). + */ return 0; } diff --git a/deps/uv/test/test-get-passwd.c b/deps/uv/test/test-get-passwd.c index d2c7431fe7f72b..d046e40c6e80f0 100644 --- a/deps/uv/test/test-get-passwd.c +++ b/deps/uv/test/test-get-passwd.c @@ -39,32 +39,32 @@ TEST_IMPL(get_passwd) { /* Test the normal case */ r = uv_os_get_passwd(&pwd); - ASSERT(r == 0); + ASSERT_EQ(r, 0); len = strlen(pwd.username); - ASSERT(len > 0); + ASSERT_GT(len, 0); #ifdef _WIN32 ASSERT_NULL(pwd.shell); #else len = strlen(pwd.shell); # ifndef __PASE__ - ASSERT(len > 0); + ASSERT_GT(len, 0); # endif #endif len = strlen(pwd.homedir); - ASSERT(len > 0); + ASSERT_GT(len, 0); #ifdef _WIN32 if (len == 3 && pwd.homedir[1] == ':') - ASSERT(pwd.homedir[2] == '\\'); + ASSERT_EQ(pwd.homedir[2], '\\'); else - ASSERT(pwd.homedir[len - 1] != '\\'); + ASSERT_NE(pwd.homedir[len - 1], '\\'); #else if (len == 1) - ASSERT(pwd.homedir[0] == '/'); + ASSERT_EQ(pwd.homedir[0], '/'); else - ASSERT(pwd.homedir[len - 1] != '/'); + ASSERT_NE(pwd.homedir[len - 1], '/'); #endif #ifdef _WIN32 @@ -95,7 +95,110 @@ TEST_IMPL(get_passwd) { /* Test invalid input */ r = uv_os_get_passwd(NULL); - ASSERT(r == UV_EINVAL); + ASSERT_EQ(r, UV_EINVAL); + + return 0; +} + + +TEST_IMPL(get_passwd2) { +/* TODO(gengjiawen): Fix test on QEMU. */ +#if defined(__QEMU__) + RETURN_SKIP("Test does not currently work in QEMU"); +#endif + + uv_passwd_t pwd; + uv_passwd_t pwd2; + size_t len; + int r; + + /* Test the normal case */ + r = uv_os_get_passwd(&pwd); + ASSERT_EQ(r, 0); + + r = uv_os_get_passwd2(&pwd2, pwd.uid); + +#ifdef _WIN32 + ASSERT_EQ(r, UV_ENOTSUP); + +#else + ASSERT_EQ(r, 0); + ASSERT_EQ(pwd.uid, pwd2.uid); + ASSERT_STR_EQ(pwd.username, pwd2.username); + ASSERT_STR_EQ(pwd.shell, pwd2.shell); + ASSERT_STR_EQ(pwd.homedir, pwd2.homedir); + uv_os_free_passwd(&pwd2); + + r = uv_os_get_passwd2(&pwd2, 0); + ASSERT_EQ(r, 0); + + len = strlen(pwd2.username); + ASSERT_GT(len, 0); + ASSERT_STR_EQ(pwd2.username, "root"); + + len = strlen(pwd2.homedir); + ASSERT_GT(len, 0); + + len = strlen(pwd2.shell); +# ifndef __PASE__ + ASSERT_GT(len, 0); +# endif + + uv_os_free_passwd(&pwd2); +#endif + + uv_os_free_passwd(&pwd); + + /* Test invalid input */ + r = uv_os_get_passwd2(NULL, pwd.uid); +#ifdef _WIN32 + ASSERT_EQ(r, UV_ENOTSUP); +#else + ASSERT_EQ(r, UV_EINVAL); +#endif + + return 0; +} + + +TEST_IMPL(get_group) { +/* TODO(gengjiawen): Fix test on QEMU. */ +#if defined(__QEMU__) + RETURN_SKIP("Test does not currently work in QEMU"); +#endif + + uv_passwd_t pwd; + uv_group_t grp; + size_t len; + int r; + + r = uv_os_get_passwd(&pwd); + ASSERT_EQ(r, 0); + + r = uv_os_get_group(&grp, pwd.gid); + +#ifdef _WIN32 + ASSERT_EQ(r, UV_ENOTSUP); + +#else + ASSERT_EQ(r, 0); + ASSERT_EQ(pwd.gid, grp.gid); + + len = strlen(grp.groupname); + ASSERT_GT(len, 0); + + uv_os_free_group(&grp); +#endif + + uv_os_free_passwd(&pwd); + + /* Test invalid input */ + r = uv_os_get_group(NULL, pwd.gid); +#ifdef _WIN32 + ASSERT_EQ(r, UV_ENOTSUP); +#else + ASSERT_EQ(r, UV_EINVAL); +#endif return 0; } diff --git a/deps/uv/test/test-getaddrinfo.c b/deps/uv/test/test-getaddrinfo.c index d0b6a505016dd8..1032537ad563a0 100644 --- a/deps/uv/test/test-getaddrinfo.c +++ b/deps/uv/test/test-getaddrinfo.c @@ -106,7 +106,7 @@ TEST_IMPL(getaddrinfo_fail) { ASSERT(0 == uv_run(uv_default_loop(), UV_RUN_DEFAULT)); ASSERT(fail_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -127,7 +127,7 @@ TEST_IMPL(getaddrinfo_fail_sync) { NULL)); uv_freeaddrinfo(req.addrinfo); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -153,7 +153,7 @@ TEST_IMPL(getaddrinfo_basic) { ASSERT(getaddrinfo_cbs == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -173,7 +173,7 @@ TEST_IMPL(getaddrinfo_basic_sync) { NULL)); uv_freeaddrinfo(req.addrinfo); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -210,6 +210,6 @@ TEST_IMPL(getaddrinfo_concurrent) { ASSERT(callback_counts[i] == 1); } - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-gethostname.c b/deps/uv/test/test-gethostname.c index 1a9816d43c619e..dc29cd69234a13 100644 --- a/deps/uv/test/test-gethostname.c +++ b/deps/uv/test/test-gethostname.c @@ -43,7 +43,7 @@ TEST_IMPL(gethostname) { enobufs_size = 1; buf[0] = '\0'; r = uv_os_gethostname(buf, &enobufs_size); - ASSERT(r == UV_ENOBUFS); + ASSERT_EQ(r, UV_ENOBUFS); ASSERT(buf[0] == '\0'); ASSERT(enobufs_size > 1); diff --git a/deps/uv/test/test-getnameinfo.c b/deps/uv/test/test-getnameinfo.c index 2bfedd3a39b233..cea57b012f1322 100644 --- a/deps/uv/test/test-getnameinfo.c +++ b/deps/uv/test/test-getnameinfo.c @@ -65,7 +65,7 @@ TEST_IMPL(getnameinfo_basic_ip4) { uv_run(uv_default_loop(), UV_RUN_DEFAULT); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -86,7 +86,7 @@ TEST_IMPL(getnameinfo_basic_ip4_sync) { ASSERT(req.host[0] != '\0'); ASSERT(req.service[0] != '\0'); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -111,6 +111,6 @@ TEST_IMPL(getnameinfo_basic_ip6) { uv_run(uv_default_loop(), UV_RUN_DEFAULT); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-getsockname.c b/deps/uv/test/test-getsockname.c index 6e0f8c18982eec..1d4d9f12bdaab3 100644 --- a/deps/uv/test/test-getsockname.c +++ b/deps/uv/test/test-getsockname.c @@ -337,7 +337,7 @@ TEST_IMPL(getsockname_tcp) { ASSERT(getsocknamecount_tcp == 3); ASSERT(getpeernamecount == 3); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -357,6 +357,6 @@ TEST_IMPL(getsockname_udp) { ASSERT(udp.send_queue_size == 0); ASSERT(udpServer.send_queue_size == 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/test-getters-setters.c b/deps/uv/test/test-getters-setters.c index 2a37122df3f6aa..9869f7b9da8b1f 100644 --- a/deps/uv/test/test-getters-setters.c +++ b/deps/uv/test/test-getters-setters.c @@ -68,6 +68,7 @@ TEST_IMPL(getters_setters) { pipe = malloc(uv_handle_size(UV_NAMED_PIPE)); r = uv_pipe_init(loop, pipe, 0); + ASSERT(r == 0); ASSERT(uv_handle_get_type((uv_handle_t*)pipe) == UV_NAMED_PIPE); ASSERT(uv_handle_get_loop((uv_handle_t*)pipe) == loop); diff --git a/deps/uv/test/test-handle-fileno.c b/deps/uv/test/test-handle-fileno.c index 8a093e2ea46e2c..6c4c2b6601d4a3 100644 --- a/deps/uv/test/test-handle-fileno.c +++ b/deps/uv/test/test-handle-fileno.c @@ -120,6 +120,6 @@ TEST_IMPL(handle_fileno) { uv_run(loop, UV_RUN_DEFAULT); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/test-hrtime.c b/deps/uv/test/test-hrtime.c index 9d461d9623d660..854a482f23c189 100644 --- a/deps/uv/test/test-hrtime.c +++ b/deps/uv/test/test-hrtime.c @@ -50,3 +50,16 @@ TEST_IMPL(hrtime) { } return 0; } + + +TEST_IMPL(clock_gettime) { + uv_timespec64_t t; + + ASSERT_EQ(UV_EINVAL, uv_clock_gettime(1337, &t)); + ASSERT_EQ(UV_EFAULT, uv_clock_gettime(1337, NULL)); + ASSERT_EQ(0, uv_clock_gettime(UV_CLOCK_MONOTONIC, &t)); + ASSERT_EQ(0, uv_clock_gettime(UV_CLOCK_REALTIME, &t)); + ASSERT_GT(1682500000000ll, t.tv_sec); /* 2023-04-26T09:06:40.000Z */ + + return 0; +} diff --git a/deps/uv/test/test-idle.c b/deps/uv/test/test-idle.c index 427cf545beb921..e3f4c2bcb659f9 100644 --- a/deps/uv/test/test-idle.c +++ b/deps/uv/test/test-idle.c @@ -94,7 +94,7 @@ TEST_IMPL(idle_starvation) { ASSERT(timer_cb_called == 1); ASSERT(close_cb_called == 3); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -120,6 +120,6 @@ TEST_IMPL(idle_check) { ASSERT_EQ(0, uv_run(uv_default_loop(), UV_RUN_ONCE)); ASSERT_EQ(2, close_cb_called); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-idna.c b/deps/uv/test/test-idna.c index f4fad9653df2cf..9b7002819fa354 100644 --- a/deps/uv/test/test-idna.c +++ b/deps/uv/test/test-idna.c @@ -104,13 +104,13 @@ TEST_IMPL(utf8_decode1_overrun) { p = b; b[0] = 0x7F; ASSERT_EQ(0x7F, uv__utf8_decode1(&p, b + 1)); - ASSERT_EQ(p, b + 1); + ASSERT_PTR_EQ(p, b + 1); /* Multi-byte. */ p = b; b[0] = 0xC0; ASSERT_EQ((unsigned) -1, uv__utf8_decode1(&p, b + 1)); - ASSERT_EQ(p, b + 1); + ASSERT_PTR_EQ(p, b + 1); return 0; } diff --git a/deps/uv/test/test-ip-name.c b/deps/uv/test/test-ip-name.c index 1cb1b6058348ec..006095f5f9731b 100644 --- a/deps/uv/test/test-ip-name.c +++ b/deps/uv/test/test-ip-name.c @@ -51,7 +51,7 @@ TEST_IMPL(ip_name) { ASSERT_EQ(0, uv_ip6_addr("fe80::2acf:daff:fedd:342a", TEST_PORT, addr6)); ASSERT_EQ(0, uv_ip6_name(addr6, dst, INET6_ADDRSTRLEN)); ASSERT_EQ(0, strcmp("fe80::2acf:daff:fedd:342a", dst)); - + ASSERT_EQ(0, uv_ip_name(addr, dst, INET6_ADDRSTRLEN)); ASSERT_EQ(0, strcmp("fe80::2acf:daff:fedd:342a", dst)); @@ -60,6 +60,6 @@ TEST_IMPL(ip_name) { /* size is not a concern here */ ASSERT_EQ(UV_EAFNOSUPPORT, uv_ip_name(addr, dst, INET6_ADDRSTRLEN)); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-ip4-addr.c b/deps/uv/test/test-ip4-addr.c index dfefb0f914a6ef..722ffb390a99e4 100644 --- a/deps/uv/test/test-ip4-addr.c +++ b/deps/uv/test/test-ip4-addr.c @@ -50,6 +50,6 @@ TEST_IMPL(ip4_addr) { ASSERT(UV_EAFNOSUPPORT == uv_inet_pton(42, "127.0.0.1", &addr.sin_addr.s_addr)); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-ip6-addr.c b/deps/uv/test/test-ip6-addr.c index 8036c4b171237f..8f0c18601ba815 100644 --- a/deps/uv/test/test-ip6-addr.c +++ b/deps/uv/test/test-ip6-addr.c @@ -103,7 +103,7 @@ TEST_IMPL(ip6_addr_link_local) { fflush(stderr); ASSERT(0 == uv_ip6_addr(scoped_addr, TEST_PORT, &addr)); - fprintf(stderr, "Got scope_id 0x%02x\n", addr.sin6_scope_id); + fprintf(stderr, "Got scope_id 0x%2x\n", (unsigned)addr.sin6_scope_id); fflush(stderr); ASSERT(iface_index == addr.sin6_scope_id); } @@ -113,7 +113,7 @@ TEST_IMPL(ip6_addr_link_local) { scoped_addr_len = sizeof(scoped_addr); ASSERT(0 != uv_if_indextoname((unsigned int)-1, scoped_addr, &scoped_addr_len)); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -154,7 +154,7 @@ TEST_IMPL(ip6_pton) { GOOD_ADDR_LIST(TEST_GOOD) BAD_ADDR_LIST(TEST_BAD) - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-ipc-heavy-traffic-deadlock-bug.c b/deps/uv/test/test-ipc-heavy-traffic-deadlock-bug.c index 89b977d2c34080..f239d1fc01fb7e 100644 --- a/deps/uv/test/test-ipc-heavy-traffic-deadlock-bug.c +++ b/deps/uv/test/test-ipc-heavy-traffic-deadlock-bug.c @@ -137,7 +137,7 @@ TEST_IMPL(ipc_heavy_traffic_deadlock_bug) { spawn_helper(&pipe, &process, "ipc_helper_heavy_traffic_deadlock_bug"); do_writes_and_reads((uv_stream_t*) &pipe); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(pipe.loop); return 0; } @@ -154,6 +154,6 @@ int ipc_helper_heavy_traffic_deadlock_bug(void) { do_writes_and_reads((uv_stream_t*) &pipe); uv_sleep(100); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-ipc-send-recv.c b/deps/uv/test/test-ipc-send-recv.c index 8a0e9708f02429..48eea7286b87d9 100644 --- a/deps/uv/test/test-ipc-send-recv.c +++ b/deps/uv/test/test-ipc-send-recv.c @@ -76,10 +76,12 @@ static int write2_cb_called; static void alloc_cb(uv_handle_t* handle, size_t suggested_size, uv_buf_t* buf) { - /* we're not actually reading anything so a small buffer is okay */ - static char slab[8]; - buf->base = slab; - buf->len = sizeof(slab); + /* We're not actually reading anything so a small buffer is okay + * but it needs to be heap-allocated to appease TSan. + */ + buf->len = 8; + buf->base = malloc(buf->len); + ASSERT_NOT_NULL(buf->base); } @@ -91,6 +93,8 @@ static void recv_cb(uv_stream_t* handle, int r; union handles* recv; + free(buf->base); + pipe = (uv_pipe_t*) handle; ASSERT(pipe == &ctx.channel); @@ -219,7 +223,7 @@ static int run_ipc_send_recv_pipe(int inprocess) { r = run_test(inprocess); ASSERT(r == 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -260,7 +264,7 @@ static int run_ipc_send_recv_tcp(int inprocess) { r = run_test(inprocess); ASSERT(r == 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -304,6 +308,8 @@ static void read_cb(uv_stream_t* handle, union handles* recv; uv_write_t* write_req; + free(rdbuf->base); + if (nread == UV_EOF || nread == UV_ECONNABORTED) { return; } @@ -410,7 +416,7 @@ int ipc_send_recv_helper(void) { r = run_ipc_send_recv_helper(uv_default_loop(), 0); ASSERT(r == 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-ipc.c b/deps/uv/test/test-ipc.c index cf131ffd193f41..7ec6ec9ce3af5b 100644 --- a/deps/uv/test/test-ipc.c +++ b/deps/uv/test/test-ipc.c @@ -424,7 +424,7 @@ static int run_ipc_test(const char* helper, uv_read_cb read_cb) { r = uv_run(uv_default_loop(), UV_RUN_DEFAULT); ASSERT_EQ(r, 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -489,7 +489,7 @@ TEST_IMPL(listen_with_simultaneous_accepts) { ASSERT_EQ(r, 0); ASSERT_EQ(server.reqs_pending, 32); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -514,7 +514,7 @@ TEST_IMPL(listen_no_simultaneous_accepts) { ASSERT_EQ(r, 0); ASSERT_EQ(server.reqs_pending, 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -721,7 +721,7 @@ int ipc_helper(int listen_after_write) { ASSERT_EQ(connection_accepted, 1); ASSERT_EQ(close_cb_called, 3); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -774,7 +774,7 @@ int ipc_helper_tcp_connection(void) { ASSERT_EQ(tcp_conn_write_cb_called, 1); ASSERT_EQ(close_cb_called, 4); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -820,7 +820,7 @@ int ipc_helper_bind_twice(void) { r = uv_run(uv_default_loop(), UV_RUN_DEFAULT); ASSERT_EQ(r, 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -852,6 +852,6 @@ int ipc_helper_send_zero(void) { ASSERT_EQ(send_zero_write, 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-list.h b/deps/uv/test/test-list.h index b19c10c7e40c77..68c9c1171a7fe8 100644 --- a/deps/uv/test/test-list.h +++ b/deps/uv/test/test-list.h @@ -105,6 +105,7 @@ TEST_DECLARE (tcp_write_after_connect) TEST_DECLARE (tcp_writealot) TEST_DECLARE (tcp_write_fail) TEST_DECLARE (tcp_try_write) +TEST_DECLARE (tcp_write_in_a_row) TEST_DECLARE (tcp_try_write_error) TEST_DECLARE (tcp_write_queue_order) TEST_DECLARE (tcp_open) @@ -184,6 +185,7 @@ TEST_DECLARE (udp_open) TEST_DECLARE (udp_open_twice) TEST_DECLARE (udp_open_bound) TEST_DECLARE (udp_open_connect) +TEST_DECLARE (udp_recv_in_a_row) #ifndef _WIN32 TEST_DECLARE (udp_send_unix) #endif @@ -192,6 +194,7 @@ TEST_DECLARE (udp_try_send) TEST_DECLARE (pipe_bind_error_addrinuse) TEST_DECLARE (pipe_bind_error_addrnotavail) TEST_DECLARE (pipe_bind_error_inval) +TEST_DECLARE (pipe_connect_close_multiple) TEST_DECLARE (pipe_connect_multiple) TEST_DECLARE (pipe_listen_without_bind) TEST_DECLARE (pipe_bind_or_listen_error_after_close) @@ -227,6 +230,8 @@ TEST_DECLARE (timer_from_check) TEST_DECLARE (timer_is_closing) TEST_DECLARE (timer_null_callback) TEST_DECLARE (timer_early_check) +TEST_DECLARE (timer_no_double_call_once) +TEST_DECLARE (timer_no_double_call_nowait) TEST_DECLARE (idle_starvation) TEST_DECLARE (idle_check) TEST_DECLARE (loop_handles) @@ -275,10 +280,13 @@ TEST_DECLARE (process_title_threadsafe) TEST_DECLARE (cwd_and_chdir) TEST_DECLARE (get_memory) TEST_DECLARE (get_passwd) +TEST_DECLARE (get_passwd2) +TEST_DECLARE (get_group) TEST_DECLARE (handle_fileno) TEST_DECLARE (homedir) TEST_DECLARE (tmpdir) TEST_DECLARE (hrtime) +TEST_DECLARE (clock_gettime) TEST_DECLARE (getaddrinfo_fail) TEST_DECLARE (getaddrinfo_fail_sync) TEST_DECLARE (getaddrinfo_basic) @@ -344,6 +352,7 @@ TEST_DECLARE (fs_async_sendfile_nodata) TEST_DECLARE (fs_mkdtemp) TEST_DECLARE (fs_mkstemp) TEST_DECLARE (fs_fstat) +TEST_DECLARE (fs_fstat_stdio) TEST_DECLARE (fs_access) TEST_DECLARE (fs_chmod) TEST_DECLARE (fs_copyfile) @@ -372,6 +381,7 @@ TEST_DECLARE (fs_futime) TEST_DECLARE (fs_lutime) TEST_DECLARE (fs_file_open_append) TEST_DECLARE (fs_statfs) +TEST_DECLARE (fs_stat_batch_multiple) TEST_DECLARE (fs_stat_missing_path) TEST_DECLARE (fs_read_bufs) TEST_DECLARE (fs_read_file_eof) @@ -401,6 +411,7 @@ TEST_DECLARE (fs_event_stop_in_cb) TEST_DECLARE (fs_scandir_empty_dir) TEST_DECLARE (fs_scandir_non_existent_dir) TEST_DECLARE (fs_scandir_file) +TEST_DECLARE (fs_scandir_early_exit) TEST_DECLARE (fs_open_dir) TEST_DECLARE (fs_readdir_empty_dir) TEST_DECLARE (fs_readdir_file) @@ -448,6 +459,7 @@ TEST_DECLARE (thread_rwlock) TEST_DECLARE (thread_rwlock_trylock) TEST_DECLARE (thread_create) TEST_DECLARE (thread_equal) +TEST_DECLARE (thread_affinity) TEST_DECLARE (dlerror) #if (defined(__unix__) || (defined(__APPLE__) && defined(__MACH__))) && \ !defined(__sun) @@ -542,6 +554,8 @@ TEST_DECLARE (utf8_decode1) TEST_DECLARE (utf8_decode1_overrun) TEST_DECLARE (uname) +TEST_DECLARE (metrics_info_check) +TEST_DECLARE (metrics_pool_events) TEST_DECLARE (metrics_idle_time) TEST_DECLARE (metrics_idle_time_thread) TEST_DECLARE (metrics_idle_time_zero) @@ -669,6 +683,7 @@ TASK_LIST_START TEST_HELPER (tcp_write_fail, tcp4_echo_server) TEST_ENTRY (tcp_try_write) + TEST_ENTRY (tcp_write_in_a_row) TEST_ENTRY (tcp_try_write_error) TEST_ENTRY (tcp_write_queue_order) @@ -766,6 +781,7 @@ TASK_LIST_START TEST_ENTRY (udp_multicast_ttl) TEST_ENTRY (udp_sendmmsg_error) TEST_ENTRY (udp_try_send) + TEST_ENTRY (udp_recv_in_a_row) TEST_ENTRY (udp_open) TEST_ENTRY (udp_open_twice) @@ -778,6 +794,7 @@ TASK_LIST_START TEST_ENTRY (pipe_bind_error_addrinuse) TEST_ENTRY (pipe_bind_error_addrnotavail) TEST_ENTRY (pipe_bind_error_inval) + TEST_ENTRY (pipe_connect_close_multiple) TEST_ENTRY (pipe_connect_multiple) TEST_ENTRY (pipe_listen_without_bind) TEST_ENTRY (pipe_bind_or_listen_error_after_close) @@ -824,6 +841,8 @@ TASK_LIST_START TEST_ENTRY (timer_is_closing) TEST_ENTRY (timer_null_callback) TEST_ENTRY (timer_early_check) + TEST_ENTRY (timer_no_double_call_once) + TEST_ENTRY (timer_no_double_call_nowait) TEST_ENTRY (idle_starvation) TEST_ENTRY (idle_check) @@ -883,6 +902,8 @@ TASK_LIST_START TEST_ENTRY (get_memory) TEST_ENTRY (get_passwd) + TEST_ENTRY (get_passwd2) + TEST_ENTRY (get_group) TEST_ENTRY (get_loadavg) @@ -894,6 +915,8 @@ TASK_LIST_START TEST_ENTRY_CUSTOM (hrtime, 0, 0, 20000) + TEST_ENTRY (clock_gettime) + TEST_ENTRY_CUSTOM (getaddrinfo_fail, 0, 0, 10000) TEST_ENTRY_CUSTOM (getaddrinfo_fail_sync, 0, 0, 10000) @@ -1015,6 +1038,7 @@ TASK_LIST_START TEST_ENTRY (fs_mkdtemp) TEST_ENTRY (fs_mkstemp) TEST_ENTRY (fs_fstat) + TEST_ENTRY (fs_fstat_stdio) TEST_ENTRY (fs_access) TEST_ENTRY (fs_chmod) TEST_ENTRY (fs_copyfile) @@ -1041,6 +1065,7 @@ TASK_LIST_START TEST_ENTRY (fs_fd_hash) #endif TEST_ENTRY (fs_statfs) + TEST_ENTRY (fs_stat_batch_multiple) TEST_ENTRY (fs_stat_missing_path) TEST_ENTRY (fs_read_bufs) TEST_ENTRY (fs_read_file_eof) @@ -1071,6 +1096,7 @@ TASK_LIST_START TEST_ENTRY (fs_scandir_empty_dir) TEST_ENTRY (fs_scandir_non_existent_dir) TEST_ENTRY (fs_scandir_file) + TEST_ENTRY (fs_scandir_early_exit) TEST_ENTRY (fs_open_dir) TEST_ENTRY (fs_readdir_empty_dir) TEST_ENTRY (fs_readdir_file) @@ -1118,6 +1144,7 @@ TASK_LIST_START TEST_ENTRY (thread_rwlock_trylock) TEST_ENTRY (thread_create) TEST_ENTRY (thread_equal) + TEST_ENTRY (thread_affinity) TEST_ENTRY (dlerror) TEST_ENTRY (ip4_addr) TEST_ENTRY (ip6_addr_link_local) @@ -1164,6 +1191,8 @@ TASK_LIST_START TEST_ENTRY (readable_on_eof) TEST_HELPER (readable_on_eof, tcp4_echo_server) + TEST_ENTRY (metrics_info_check) + TEST_ENTRY (metrics_pool_events) TEST_ENTRY (metrics_idle_time) TEST_ENTRY (metrics_idle_time_thread) TEST_ENTRY (metrics_idle_time_zero) diff --git a/deps/uv/test/test-loop-alive.c b/deps/uv/test/test-loop-alive.c index cf4d301930c518..76a2b04cc9d1e7 100644 --- a/deps/uv/test/test-loop-alive.c +++ b/deps/uv/test/test-loop-alive.c @@ -63,5 +63,6 @@ TEST_IMPL(loop_alive) { ASSERT(r == 0); ASSERT(!uv_loop_alive(uv_default_loop())); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-loop-close.c b/deps/uv/test/test-loop-close.c index f0f3e627f971e2..f5814796e8f3f0 100644 --- a/deps/uv/test/test-loop-close.c +++ b/deps/uv/test/test-loop-close.c @@ -62,6 +62,8 @@ static void loop_instant_close_work_cb(uv_work_t* req) { static void loop_instant_close_after_work_cb(uv_work_t* req, int status) { } +/* It's impossible to properly cleanup after this test because loop can't be + * closed while work has been queued. */ TEST_IMPL(loop_instant_close) { static uv_loop_t loop; static uv_work_t req; @@ -70,6 +72,6 @@ TEST_IMPL(loop_instant_close) { &req, loop_instant_close_work_cb, loop_instant_close_after_work_cb)); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-loop-handles.c b/deps/uv/test/test-loop-handles.c index 05cb8466ca626e..5d3df0245aa064 100644 --- a/deps/uv/test/test-loop-handles.c +++ b/deps/uv/test/test-loop-handles.c @@ -332,6 +332,6 @@ TEST_IMPL(loop_handles) { ASSERT(idle_2_close_cb_called == idle_2_cb_started); ASSERT(idle_2_is_active == 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-loop-stop.c b/deps/uv/test/test-loop-stop.c index 14b8c11186c9b4..02aa12f1a83b59 100644 --- a/deps/uv/test/test-loop-stop.c +++ b/deps/uv/test/test-loop-stop.c @@ -67,5 +67,6 @@ TEST_IMPL(loop_stop) { ASSERT(timer_called == 10); ASSERT(prepare_called == 10); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-loop-time.c b/deps/uv/test/test-loop-time.c index 087720b9e9eb35..5d083064ae3133 100644 --- a/deps/uv/test/test-loop-time.c +++ b/deps/uv/test/test-loop-time.c @@ -30,7 +30,7 @@ TEST_IMPL(loop_update_time) { while (uv_now(uv_default_loop()) - start < 1000) ASSERT_EQ(0, uv_run(uv_default_loop(), UV_RUN_NOWAIT)); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -64,6 +64,6 @@ TEST_IMPL(loop_backend_timeout) { ASSERT_EQ(r, 0); ASSERT_EQ(uv_backend_timeout(loop), 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/test-metrics.c b/deps/uv/test/test-metrics.c index f527494470e920..d532f4eff49ec1 100644 --- a/deps/uv/test/test-metrics.c +++ b/deps/uv/test/test-metrics.c @@ -25,6 +25,17 @@ #define UV_NS_TO_MS 1000000 +typedef struct { + uv_fs_t open_req; + uv_fs_t write_req; + uv_fs_t close_req; +} fs_reqs_t; + +static uint64_t last_events_count; +static char test_buf[] = "test-buffer\n"; +static fs_reqs_t fs_reqs; +static int pool_events_counter; + static void timer_spin_cb(uv_timer_t* handle) { uint64_t t; @@ -37,6 +48,9 @@ static void timer_spin_cb(uv_timer_t* handle) { TEST_IMPL(metrics_idle_time) { +#if defined(__OpenBSD__) + RETURN_SKIP("Test does not currently work in OpenBSD"); +#endif const uint64_t timeout = 1000; uv_timer_t timer; uint64_t idle_time; @@ -55,10 +69,10 @@ TEST_IMPL(metrics_idle_time) { idle_time = uv_metrics_idle_time(uv_default_loop()); /* Permissive check that the idle time matches within the timeout ±500 ms. */ - ASSERT((idle_time <= (timeout + 500) * UV_NS_TO_MS) && - (idle_time >= (timeout - 500) * UV_NS_TO_MS)); + ASSERT_LE(idle_time, (timeout + 500) * UV_NS_TO_MS); + ASSERT_GE(idle_time, (timeout - 500) * UV_NS_TO_MS); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -116,6 +130,7 @@ static void timer_noop_cb(uv_timer_t* handle) { TEST_IMPL(metrics_idle_time_zero) { + uv_metrics_t metrics; uv_timer_t timer; int cntr; @@ -130,6 +145,248 @@ TEST_IMPL(metrics_idle_time_zero) { ASSERT_GT(cntr, 0); ASSERT_EQ(0, uv_metrics_idle_time(uv_default_loop())); - MAKE_VALGRIND_HAPPY(); + ASSERT_EQ(0, uv_metrics_info(uv_default_loop(), &metrics)); + ASSERT_UINT64_EQ(cntr, metrics.loop_count); + + MAKE_VALGRIND_HAPPY(uv_default_loop()); + return 0; +} + + +static void close_cb(uv_fs_t* req) { + uv_metrics_t metrics; + + ASSERT_EQ(0, uv_metrics_info(uv_default_loop(), &metrics)); + ASSERT_UINT64_EQ(3, metrics.loop_count); + ASSERT_UINT64_GT(metrics.events, last_events_count); + + uv_fs_req_cleanup(req); + last_events_count = metrics.events; +} + + +static void write_cb(uv_fs_t* req) { + uv_metrics_t metrics; + + ASSERT_EQ(0, uv_metrics_info(uv_default_loop(), &metrics)); + ASSERT_UINT64_EQ(2, metrics.loop_count); + ASSERT_UINT64_GT(metrics.events, last_events_count); + ASSERT_EQ(req->result, sizeof(test_buf)); + + uv_fs_req_cleanup(req); + last_events_count = metrics.events; + + ASSERT_EQ(0, uv_fs_close(uv_default_loop(), + &fs_reqs.close_req, + fs_reqs.open_req.result, + close_cb)); +} + + +static void create_cb(uv_fs_t* req) { + uv_metrics_t metrics; + + ASSERT_EQ(0, uv_metrics_info(uv_default_loop(), &metrics)); + /* Event count here is still 0 so not going to check. */ + ASSERT_UINT64_EQ(1, metrics.loop_count); + ASSERT_GE(req->result, 0); + + uv_fs_req_cleanup(req); + last_events_count = metrics.events; + + uv_buf_t iov = uv_buf_init(test_buf, sizeof(test_buf)); + ASSERT_EQ(0, uv_fs_write(uv_default_loop(), + &fs_reqs.write_req, + req->result, + &iov, + 1, + 0, + write_cb)); +} + + +static void prepare_cb(uv_prepare_t* handle) { + uv_metrics_t metrics; + + uv_prepare_stop(handle); + + ASSERT_EQ(0, uv_metrics_info(uv_default_loop(), &metrics)); + ASSERT_UINT64_EQ(0, metrics.loop_count); + ASSERT_UINT64_EQ(0, metrics.events); + + ASSERT_EQ(0, uv_fs_open(uv_default_loop(), + &fs_reqs.open_req, + "test_file", + O_WRONLY | O_CREAT, S_IRUSR | S_IWUSR, + create_cb)); +} + + +TEST_IMPL(metrics_info_check) { + uv_fs_t unlink_req; + uv_prepare_t prepare; + + uv_fs_unlink(NULL, &unlink_req, "test_file", NULL); + uv_fs_req_cleanup(&unlink_req); + + ASSERT_EQ(0, uv_prepare_init(uv_default_loop(), &prepare)); + ASSERT_EQ(0, uv_prepare_start(&prepare, prepare_cb)); + + ASSERT_EQ(0, uv_run(uv_default_loop(), UV_RUN_DEFAULT)); + + uv_fs_unlink(NULL, &unlink_req, "test_file", NULL); + uv_fs_req_cleanup(&unlink_req); + + MAKE_VALGRIND_HAPPY(uv_default_loop()); + return 0; +} + + +static void fs_prepare_cb(uv_prepare_t* handle) { + uv_metrics_t metrics; + + ASSERT_OK(uv_metrics_info(uv_default_loop(), &metrics)); + + if (pool_events_counter == 1) + ASSERT_EQ(metrics.events, metrics.events_waiting); + + if (pool_events_counter < 7) + return; + + uv_prepare_stop(handle); + pool_events_counter = -42; +} + + +static void fs_stat_cb(uv_fs_t* req) { + uv_fs_req_cleanup(req); + pool_events_counter++; +} + + +static void fs_work_cb(uv_work_t* req) { +} + + +static void fs_after_work_cb(uv_work_t* req, int status) { + free(req); + pool_events_counter++; +} + + +static void fs_write_cb(uv_fs_t* req) { + uv_work_t* work1 = malloc(sizeof(*work1)); + uv_work_t* work2 = malloc(sizeof(*work2)); + pool_events_counter++; + + uv_fs_req_cleanup(req); + + ASSERT_OK(uv_queue_work(uv_default_loop(), + work1, + fs_work_cb, + fs_after_work_cb)); + ASSERT_OK(uv_queue_work(uv_default_loop(), + work2, + fs_work_cb, + fs_after_work_cb)); +} + + +static void fs_random_cb(uv_random_t* req, int status, void* buf, size_t len) { + pool_events_counter++; +} + + +static void fs_addrinfo_cb(uv_getaddrinfo_t* req, + int status, + struct addrinfo* res) { + uv_freeaddrinfo(req->addrinfo); + pool_events_counter++; +} + + +TEST_IMPL(metrics_pool_events) { + uv_buf_t iov; + uv_fs_t open_req; + uv_fs_t stat1_req; + uv_fs_t stat2_req; + uv_fs_t unlink_req; + uv_fs_t write_req; + uv_getaddrinfo_t addrinfo_req; + uv_metrics_t metrics; + uv_prepare_t prepare; + uv_random_t random_req; + int fd; + char rdata; + + ASSERT_OK(uv_loop_configure(uv_default_loop(), UV_METRICS_IDLE_TIME)); + + uv_fs_unlink(NULL, &unlink_req, "test_file", NULL); + uv_fs_req_cleanup(&unlink_req); + + ASSERT_OK(uv_prepare_init(uv_default_loop(), &prepare)); + ASSERT_OK(uv_prepare_start(&prepare, fs_prepare_cb)); + + pool_events_counter = 0; + fd = uv_fs_open(NULL, + &open_req, + "test_file", + O_WRONLY | O_CREAT, + S_IRUSR | S_IWUSR, + NULL); + ASSERT_GT(fd, 0); + uv_fs_req_cleanup(&open_req); + + iov = uv_buf_init(test_buf, sizeof(test_buf)); + ASSERT_OK(uv_fs_write(uv_default_loop(), + &write_req, + fd, + &iov, + 1, + 0, + fs_write_cb)); + ASSERT_OK(uv_fs_stat(uv_default_loop(), + &stat1_req, + "test_file", + fs_stat_cb)); + ASSERT_OK(uv_fs_stat(uv_default_loop(), + &stat2_req, + "test_file", + fs_stat_cb)); + ASSERT_OK(uv_random(uv_default_loop(), + &random_req, + &rdata, + 1, + 0, + fs_random_cb)); + ASSERT_OK(uv_getaddrinfo(uv_default_loop(), + &addrinfo_req, + fs_addrinfo_cb, + "example.invalid", + NULL, + NULL)); + + /* Sleep for a moment to hopefully force the events to complete before + * entering the event loop. */ + uv_sleep(100); + + ASSERT_OK(uv_run(uv_default_loop(), UV_RUN_DEFAULT)); + + ASSERT_OK(uv_metrics_info(uv_default_loop(), &metrics)); + /* It's possible for uv__work_done() to execute one extra time even though the + * QUEUE has already been cleared out. This has to do with the way we use an + * uv_async to tell the event loop thread to process the worker pool QUEUE. */ + ASSERT_GE(metrics.events, 7); + /* It's possible one of the other events also got stuck in the event queue, so + * check GE instead of EQ. Reason for 4 instead of 5 is because the call to + * uv_getaddrinfo() is racey and slow. So can't guarantee that it'll always + * execute before sleep completes. */ + ASSERT_GE(metrics.events_waiting, 4); + ASSERT_EQ(pool_events_counter, -42); + + uv_fs_unlink(NULL, &unlink_req, "test_file", NULL); + uv_fs_req_cleanup(&unlink_req); + + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-multiple-listen.c b/deps/uv/test/test-multiple-listen.c index 0b2851411a862b..bbaa9bc1ef1265 100644 --- a/deps/uv/test/test-multiple-listen.c +++ b/deps/uv/test/test-multiple-listen.c @@ -104,6 +104,6 @@ TEST_IMPL(multiple_listen) { ASSERT(connect_cb_called == 1); ASSERT(close_cb_called == 2); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-not-readable-nor-writable-on-read-error.c b/deps/uv/test/test-not-readable-nor-writable-on-read-error.c index ae951e39893b36..823a4e91e29096 100644 --- a/deps/uv/test/test-not-readable-nor-writable-on-read-error.c +++ b/deps/uv/test/test-not-readable-nor-writable-on-read-error.c @@ -99,6 +99,6 @@ TEST_IMPL(not_readable_nor_writable_on_read_error) { ASSERT(write_cb_called == 1); ASSERT(close_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(&loop); return 0; } diff --git a/deps/uv/test/test-not-writable-after-shutdown.c b/deps/uv/test/test-not-writable-after-shutdown.c index 9cd93703cea292..84e09177bd3651 100644 --- a/deps/uv/test/test-not-writable-after-shutdown.c +++ b/deps/uv/test/test-not-writable-after-shutdown.c @@ -61,9 +61,9 @@ TEST_IMPL(not_writable_after_shutdown) { connect_cb); ASSERT(r == 0); - r = uv_run(uv_default_loop(), UV_RUN_DEFAULT); + r = uv_run(loop, UV_RUN_DEFAULT); ASSERT(r == 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/test-osx-select.c b/deps/uv/test/test-osx-select.c index a0afda9181ebd9..00ae540b405a6c 100644 --- a/deps/uv/test/test-osx-select.c +++ b/deps/uv/test/test-osx-select.c @@ -79,7 +79,7 @@ TEST_IMPL(osx_select) { ASSERT(read_count == 3); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -133,7 +133,7 @@ TEST_IMPL(osx_select_many_fds) { ASSERT(read_count == 3); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-ping-pong.c b/deps/uv/test/test-ping-pong.c index c598587d112f56..f54f2ad22e59d9 100644 --- a/deps/uv/test/test-ping-pong.c +++ b/deps/uv/test/test-ping-pong.c @@ -378,7 +378,7 @@ static int run_ping_pong_test(void) { uv_run(uv_default_loop(), UV_RUN_DEFAULT); ASSERT_EQ(completed_pingers, 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-pipe-bind-error.c b/deps/uv/test/test-pipe-bind-error.c index aacde456543d76..88ece6ba5d8d38 100644 --- a/deps/uv/test/test-pipe-bind-error.c +++ b/deps/uv/test/test-pipe-bind-error.c @@ -67,7 +67,7 @@ TEST_IMPL(pipe_bind_error_addrinuse) { ASSERT(close_cb_called == 2); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -88,7 +88,7 @@ TEST_IMPL(pipe_bind_error_addrnotavail) { ASSERT(close_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -110,7 +110,7 @@ TEST_IMPL(pipe_bind_error_inval) { ASSERT(close_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -134,7 +134,7 @@ TEST_IMPL(pipe_listen_without_bind) { ASSERT(close_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -150,6 +150,6 @@ TEST_IMPL(pipe_bind_or_listen_error_after_close) { ASSERT_EQ(uv_run(uv_default_loop(), UV_RUN_DEFAULT), 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-pipe-close-stdout-read-stdin.c b/deps/uv/test/test-pipe-close-stdout-read-stdin.c index 126be2cc46f0e3..e0f864e9cdf653 100644 --- a/deps/uv/test/test-pipe-close-stdout-read-stdin.c +++ b/deps/uv/test/test-pipe-close-stdout-read-stdin.c @@ -101,7 +101,7 @@ TEST_IMPL(pipe_close_stdout_read_stdin) { ASSERT(WIFEXITED(status) && WEXITSTATUS(status) == 0); } - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-pipe-connect-error.c b/deps/uv/test/test-pipe-connect-error.c index 0f1e2b1c1ed538..140e7d32dafc44 100644 --- a/deps/uv/test/test-pipe-connect-error.c +++ b/deps/uv/test/test-pipe-connect-error.c @@ -43,15 +43,17 @@ static void close_cb(uv_handle_t* handle) { static void connect_cb(uv_connect_t* connect_req, int status) { - ASSERT(status == UV_ENOENT); - uv_close((uv_handle_t*)connect_req->handle, close_cb); + ASSERT_EQ(status, UV_ENOENT); + uv_close((uv_handle_t*) connect_req->handle, close_cb); connect_cb_called++; } static void connect_cb_file(uv_connect_t* connect_req, int status) { - ASSERT(status == UV_ENOTSOCK || status == UV_ECONNREFUSED); - uv_close((uv_handle_t*)connect_req->handle, close_cb); + if (status != UV_ENOTSOCK) + if (status != UV_EACCES) + ASSERT_EQ(status, UV_ECONNREFUSED); + uv_close((uv_handle_t*) connect_req->handle, close_cb); connect_cb_called++; } @@ -62,15 +64,15 @@ TEST_IMPL(pipe_connect_bad_name) { int r; r = uv_pipe_init(uv_default_loop(), &client, 0); - ASSERT(r == 0); + ASSERT_EQ(r, 0); uv_pipe_connect(&req, &client, BAD_PIPENAME, connect_cb); uv_run(uv_default_loop(), UV_RUN_DEFAULT); - ASSERT(close_cb_called == 1); - ASSERT(connect_cb_called == 1); + ASSERT_EQ(close_cb_called, 1); + ASSERT_EQ(connect_cb_called, 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -82,14 +84,14 @@ TEST_IMPL(pipe_connect_to_file) { int r; r = uv_pipe_init(uv_default_loop(), &client, 0); - ASSERT(r == 0); + ASSERT_EQ(r, 0); uv_pipe_connect(&req, &client, path, connect_cb_file); uv_run(uv_default_loop(), UV_RUN_DEFAULT); - ASSERT(close_cb_called == 1); - ASSERT(connect_cb_called == 1); + ASSERT_EQ(close_cb_called, 1); + ASSERT_EQ(connect_cb_called, 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-pipe-connect-multiple.c b/deps/uv/test/test-pipe-connect-multiple.c index 0a60d4a9642433..b8f417e81caff0 100644 --- a/deps/uv/test/test-pipe-connect-multiple.c +++ b/deps/uv/test/test-pipe-connect-multiple.c @@ -29,7 +29,7 @@ static int connection_cb_called = 0; static int connect_cb_called = 0; -#define NUM_CLIENTS 4 +#define NUM_CLIENTS 10 typedef struct { uv_pipe_t pipe_handle; @@ -102,6 +102,77 @@ TEST_IMPL(pipe_connect_multiple) { ASSERT(connection_cb_called == NUM_CLIENTS); ASSERT(connect_cb_called == NUM_CLIENTS); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); + return 0; +} + + +static void connection_cb2(uv_stream_t* server, int status) { + int r; + uv_pipe_t* conn; + ASSERT_EQ(status, 0); + + conn = &connections[connection_cb_called]; + r = uv_pipe_init(server->loop, conn, 0); + ASSERT_EQ(r, 0); + + r = uv_accept(server, (uv_stream_t*)conn); + ASSERT_EQ(r, 0); + + uv_close((uv_handle_t*)conn, NULL); + if (++connection_cb_called == NUM_CLIENTS && + connect_cb_called == NUM_CLIENTS) { + uv_close((uv_handle_t*)&server_handle, NULL); + } +} + +static void connect_cb2(uv_connect_t* connect_req, int status) { + ASSERT_EQ(status, UV_ECANCELED); + if (++connect_cb_called == NUM_CLIENTS && + connection_cb_called == NUM_CLIENTS) { + uv_close((uv_handle_t*)&server_handle, NULL); + } +} + + +TEST_IMPL(pipe_connect_close_multiple) { +#if defined(NO_SELF_CONNECT) + RETURN_SKIP(NO_SELF_CONNECT); +#endif + int i; + int r; + uv_loop_t* loop; + + loop = uv_default_loop(); + + r = uv_pipe_init(loop, &server_handle, 0); + ASSERT_EQ(r, 0); + + r = uv_pipe_bind(&server_handle, TEST_PIPENAME); + ASSERT_EQ(r, 0); + + r = uv_listen((uv_stream_t*)&server_handle, 128, connection_cb2); + ASSERT_EQ(r, 0); + + for (i = 0; i < NUM_CLIENTS; i++) { + r = uv_pipe_init(loop, &clients[i].pipe_handle, 0); + ASSERT_EQ(r, 0); + uv_pipe_connect(&clients[i].conn_req, + &clients[i].pipe_handle, + TEST_PIPENAME, + connect_cb2); + } + + for (i = 0; i < NUM_CLIENTS; i++) { + uv_close((uv_handle_t*)&clients[i].pipe_handle, NULL); + } + + + uv_run(loop, UV_RUN_DEFAULT); + + ASSERT_EQ(connection_cb_called, NUM_CLIENTS); + ASSERT_EQ(connect_cb_called, NUM_CLIENTS); + + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/test-pipe-connect-prepare.c b/deps/uv/test/test-pipe-connect-prepare.c index 08b57cbf51094c..f7a79404048c15 100644 --- a/deps/uv/test/test-pipe-connect-prepare.c +++ b/deps/uv/test/test-pipe-connect-prepare.c @@ -78,6 +78,6 @@ TEST_IMPL(pipe_connect_on_prepare) { ASSERT(close_cb_called == 2); ASSERT(connect_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-pipe-getsockname.c b/deps/uv/test/test-pipe-getsockname.c index 79db8eba7177e0..4b0aa53b9271bd 100644 --- a/deps/uv/test/test-pipe-getsockname.c +++ b/deps/uv/test/test-pipe-getsockname.c @@ -156,7 +156,7 @@ TEST_IMPL(pipe_getsockname) { ASSERT(pipe_client_connect_cb_called == 1); ASSERT(pipe_close_cb_called == 2); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -200,10 +200,10 @@ TEST_IMPL(pipe_getsockname_abstract) { close(sock); ASSERT(pipe_close_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; #else - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; #endif } @@ -265,6 +265,6 @@ TEST_IMPL(pipe_getsockname_blocking) { CloseHandle(writeh); #endif - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-pipe-pending-instances.c b/deps/uv/test/test-pipe-pending-instances.c index b6ff911a0f2ab0..9b1bfbc9aaccc4 100644 --- a/deps/uv/test/test-pipe-pending-instances.c +++ b/deps/uv/test/test-pipe-pending-instances.c @@ -54,6 +54,6 @@ TEST_IMPL(pipe_pending_instances) { r = uv_run(loop, UV_RUN_DEFAULT); ASSERT(r == 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/test-pipe-sendmsg.c b/deps/uv/test/test-pipe-sendmsg.c index 3bf427f8aa0634..7758b65b05cff9 100644 --- a/deps/uv/test/test-pipe-sendmsg.c +++ b/deps/uv/test/test-pipe-sendmsg.c @@ -158,14 +158,14 @@ TEST_IMPL(pipe_sendmsg) { ASSERT(ARRAY_SIZE(incoming) + 1 == close_called); close(fds[0]); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } #else /* !_WIN32 */ TEST_IMPL(pipe_sendmsg) { - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-pipe-server-close.c b/deps/uv/test/test-pipe-server-close.c index 25305b397b2e05..dc20661916d889 100644 --- a/deps/uv/test/test-pipe-server-close.c +++ b/deps/uv/test/test-pipe-server-close.c @@ -89,6 +89,6 @@ TEST_IMPL(pipe_server_close) { ASSERT(pipe_client_connect_cb_called == 1); ASSERT(pipe_close_cb_called == 2); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/test-pipe-set-fchmod.c b/deps/uv/test/test-pipe-set-fchmod.c index 91e476652e027f..402970e3da9922 100644 --- a/deps/uv/test/test-pipe-set-fchmod.c +++ b/deps/uv/test/test-pipe-set-fchmod.c @@ -22,6 +22,7 @@ #include "uv.h" #include "task.h" +#include TEST_IMPL(pipe_set_chmod) { uv_pipe_t pipe_handle; @@ -43,12 +44,13 @@ TEST_IMPL(pipe_set_chmod) { * successful. */ r = uv_pipe_chmod(&pipe_handle, UV_READABLE); if (r == UV_EPERM) { - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); RETURN_SKIP("Insufficient privileges to alter pipe fmode"); } ASSERT(r == 0); #ifndef _WIN32 - stat(TEST_PIPENAME, &stat_buf); + memset(&stat_buf, 0, sizeof(stat_buf)); + ASSERT_EQ(0, stat(TEST_PIPENAME, &stat_buf)); ASSERT(stat_buf.st_mode & S_IRUSR); ASSERT(stat_buf.st_mode & S_IRGRP); ASSERT(stat_buf.st_mode & S_IROTH); @@ -85,6 +87,6 @@ TEST_IMPL(pipe_set_chmod) { r = uv_pipe_chmod(&pipe_handle, UV_WRITABLE | UV_READABLE); ASSERT(r == UV_EBADF); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/test-pipe-set-non-blocking.c b/deps/uv/test/test-pipe-set-non-blocking.c index c780460950ea8a..1b90bca3d2ab90 100644 --- a/deps/uv/test/test-pipe-set-non-blocking.c +++ b/deps/uv/test/test-pipe-set-non-blocking.c @@ -122,6 +122,6 @@ TEST_IMPL(pipe_set_non_blocking) { fd[0] = -1; uv_barrier_destroy(&ctx.barrier); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-platform-output.c b/deps/uv/test/test-platform-output.c index 5827dca1cb2ba8..5839f52dfe5885 100644 --- a/deps/uv/test/test-platform-output.c +++ b/deps/uv/test/test-platform-output.c @@ -25,11 +25,6 @@ TEST_IMPL(platform_output) { -/* TODO(gengjiawen): Fix test on QEMU. */ -#if defined(__QEMU__) - RETURN_SKIP("Test does not currently work in QEMU"); -#endif - char buffer[512]; size_t rss; size_t size; @@ -40,8 +35,10 @@ TEST_IMPL(platform_output) { uv_cpu_info_t* cpus; uv_interface_address_t* interfaces; uv_passwd_t pwd; + uv_group_t grp; uv_utsname_t uname; unsigned par; + char* const* member; int count; int i; int err; @@ -152,15 +149,38 @@ TEST_IMPL(platform_output) { uv_free_interface_addresses(interfaces, count); err = uv_os_get_passwd(&pwd); - ASSERT(err == 0); + ASSERT_EQ(err, 0); + + err = uv_os_get_group(&grp, pwd.gid); +#if defined(_WIN32) + ASSERT_EQ(err, UV_ENOTSUP); + ASSERT_EQ(pwd.uid, (unsigned long) -1); + ASSERT_EQ(pwd.gid, (unsigned long) -1); + (void) member; + grp.groupname = "ENOTSUP"; +#else + ASSERT_EQ(err, 0); + ASSERT_EQ(pwd.gid, grp.gid); +#endif printf("uv_os_get_passwd:\n"); printf(" euid: %ld\n", pwd.uid); - printf(" gid: %ld\n", pwd.gid); + printf(" gid: %ld (%s)\n", pwd.gid, grp.groupname); +#if !defined(_WIN32) + printf(" members: ["); + for (member = grp.members; *member != NULL; member++) { + printf(" %s", *member); + } + printf(" ]\n"); +#endif printf(" username: %s\n", pwd.username); - printf(" shell: %s\n", pwd.shell); + if (pwd.shell != NULL) /* Not set on Windows */ + printf(" shell: %s\n", pwd.shell); printf(" home directory: %s\n", pwd.homedir); uv_os_free_passwd(&pwd); +#if !defined(_WIN32) + uv_os_free_group(&grp); +#endif pid = uv_os_getpid(); ASSERT(pid > 0); diff --git a/deps/uv/test/test-poll-close-doesnt-corrupt-stack.c b/deps/uv/test/test-poll-close-doesnt-corrupt-stack.c index 1d7e84f60398ae..a19f42769b5788 100644 --- a/deps/uv/test/test-poll-close-doesnt-corrupt-stack.c +++ b/deps/uv/test/test-poll-close-doesnt-corrupt-stack.c @@ -108,7 +108,7 @@ TEST_IMPL(poll_close_doesnt_corrupt_stack) { ASSERT(close_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; #endif } diff --git a/deps/uv/test/test-poll-close.c b/deps/uv/test/test-poll-close.c index 2eccddf5b0b923..b4ad4c7834674b 100644 --- a/deps/uv/test/test-poll-close.c +++ b/deps/uv/test/test-poll-close.c @@ -68,6 +68,6 @@ TEST_IMPL(poll_close) { ASSERT(close_cb_called == NUM_SOCKETS); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-poll-closesocket.c b/deps/uv/test/test-poll-closesocket.c index 1a1c364112a177..a81d0b09ff8d0d 100644 --- a/deps/uv/test/test-poll-closesocket.c +++ b/deps/uv/test/test-poll-closesocket.c @@ -86,7 +86,7 @@ TEST_IMPL(poll_closesocket) { ASSERT(close_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; #endif } diff --git a/deps/uv/test/test-poll-multiple-handles.c b/deps/uv/test/test-poll-multiple-handles.c index fc2205ddec74d5..1aad1ef21065b7 100644 --- a/deps/uv/test/test-poll-multiple-handles.c +++ b/deps/uv/test/test-poll-multiple-handles.c @@ -94,6 +94,6 @@ TEST_IMPL(poll_multiple_handles) { ASSERT(0 == uv_run(uv_default_loop(), UV_RUN_DEFAULT)); ASSERT(close_cb_called == 2); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-poll-oob.c b/deps/uv/test/test-poll-oob.c index 77ffe31e962f10..b1ff41f5b8ad9f 100644 --- a/deps/uv/test/test-poll-oob.c +++ b/deps/uv/test/test-poll-oob.c @@ -199,7 +199,7 @@ TEST_IMPL(poll_oob) { */ ASSERT(srv_rd_check == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/test-poll.c b/deps/uv/test/test-poll.c index 3bc422d2795543..a0f28324e2f2ca 100644 --- a/deps/uv/test/test-poll.c +++ b/deps/uv/test/test-poll.c @@ -589,7 +589,7 @@ static void start_poll_test(void) { #if !defined(__sun) && !defined(_AIX) && !defined(__MVS__) ASSERT(disconnects == NUM_CLIENTS * 2); #endif - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); } @@ -631,7 +631,7 @@ TEST_IMPL(poll_unidirectional) { */ TEST_IMPL(poll_bad_fdtype) { #if !defined(__DragonFly__) && !defined(__FreeBSD__) && !defined(__sun) && \ - !defined(_AIX) && !defined(__MVS__) && !defined(__FreeBSD_kernel__) && \ + !defined(_AIX) && !defined(__MVS__) && \ !defined(__OpenBSD__) && !defined(__CYGWIN__) && !defined(__MSYS__) && \ !defined(__NetBSD__) uv_poll_t poll_handle; @@ -647,7 +647,7 @@ TEST_IMPL(poll_bad_fdtype) { ASSERT(0 == close(fd)); #endif - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -668,7 +668,7 @@ TEST_IMPL(poll_nested_epoll) { ASSERT(0 == uv_run(uv_default_loop(), UV_RUN_DEFAULT)); ASSERT(0 == close(fd)); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } #endif /* __linux__ */ @@ -690,7 +690,7 @@ TEST_IMPL(poll_nested_kqueue) { ASSERT(0 == uv_run(uv_default_loop(), UV_RUN_DEFAULT)); ASSERT(0 == close(fd)); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } #endif /* UV_HAVE_KQUEUE */ diff --git a/deps/uv/test/test-process-title.c b/deps/uv/test/test-process-title.c index 35a14809fb3ccd..c5cff9723da4d3 100644 --- a/deps/uv/test/test-process-title.c +++ b/deps/uv/test/test-process-title.c @@ -120,7 +120,7 @@ TEST_IMPL(process_title_big_argv) { ASSERT(0 == uv_spawn(uv_default_loop(), &process, &options)); ASSERT(0 == uv_run(uv_default_loop(), UV_RUN_DEFAULT)); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-queue-foreach-delete.c b/deps/uv/test/test-queue-foreach-delete.c index 049ea776e34322..75d63f5cf1798b 100644 --- a/deps/uv/test/test-queue-foreach-delete.c +++ b/deps/uv/test/test-queue-foreach-delete.c @@ -198,7 +198,7 @@ TEST_IMPL(queue_foreach_delete) { ASSERT(helper_timer_cb_calls == 1); #endif - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/test-random.c b/deps/uv/test/test-random.c index 2e3ce4424d29eb..3ff3fa8b364824 100644 --- a/deps/uv/test/test-random.c +++ b/deps/uv/test/test-random.c @@ -70,7 +70,7 @@ TEST_IMPL(random_async) { ASSERT(0 == uv_run(loop, UV_RUN_DEFAULT)); ASSERT(2 == random_cb_called); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -89,6 +89,6 @@ TEST_IMPL(random_sync) { memset(zero, 0, sizeof(zero)); ASSERT(0 != memcmp(buf, zero, sizeof(zero))); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-readable-on-eof.c b/deps/uv/test/test-readable-on-eof.c index 68e845424775ca..1162f8db1d06a4 100644 --- a/deps/uv/test/test-readable-on-eof.c +++ b/deps/uv/test/test-readable-on-eof.c @@ -106,6 +106,6 @@ TEST_IMPL(readable_on_eof) { ASSERT_EQ(write_cb_called, 1); ASSERT_EQ(close_cb_called, 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(&loop); return 0; } diff --git a/deps/uv/test/test-ref.c b/deps/uv/test/test-ref.c index d24ea4a01e8aba..7a9a0b9315b697 100644 --- a/deps/uv/test/test-ref.c +++ b/deps/uv/test/test-ref.c @@ -101,7 +101,7 @@ static void connect_and_shutdown(uv_connect_t* req, int status) { TEST_IMPL(ref) { uv_run(uv_default_loop(), UV_RUN_DEFAULT); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -113,7 +113,7 @@ TEST_IMPL(idle_ref) { uv_unref((uv_handle_t*)&h); uv_run(uv_default_loop(), UV_RUN_DEFAULT); do_close(&h); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -124,7 +124,7 @@ TEST_IMPL(async_ref) { uv_unref((uv_handle_t*)&h); uv_run(uv_default_loop(), UV_RUN_DEFAULT); do_close(&h); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -136,7 +136,7 @@ TEST_IMPL(prepare_ref) { uv_unref((uv_handle_t*)&h); uv_run(uv_default_loop(), UV_RUN_DEFAULT); do_close(&h); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -148,7 +148,7 @@ TEST_IMPL(check_ref) { uv_unref((uv_handle_t*)&h); uv_run(uv_default_loop(), UV_RUN_DEFAULT); do_close(&h); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -165,7 +165,7 @@ TEST_IMPL(unref_in_prepare_cb) { uv_prepare_start(&h, prepare_cb); uv_run(uv_default_loop(), UV_RUN_DEFAULT); do_close(&h); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -176,7 +176,7 @@ TEST_IMPL(timer_ref) { uv_unref((uv_handle_t*)&h); uv_run(uv_default_loop(), UV_RUN_DEFAULT); do_close(&h); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -188,7 +188,7 @@ TEST_IMPL(timer_ref2) { uv_unref((uv_handle_t*)&h); uv_run(uv_default_loop(), UV_RUN_DEFAULT); do_close(&h); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -203,7 +203,7 @@ TEST_IMPL(fs_event_ref) { uv_unref((uv_handle_t*)&h); uv_run(uv_default_loop(), UV_RUN_DEFAULT); do_close(&h); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -215,7 +215,7 @@ TEST_IMPL(fs_poll_ref) { uv_unref((uv_handle_t*)&h); uv_run(uv_default_loop(), UV_RUN_DEFAULT); do_close(&h); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -226,7 +226,7 @@ TEST_IMPL(tcp_ref) { uv_unref((uv_handle_t*)&h); uv_run(uv_default_loop(), UV_RUN_DEFAULT); do_close(&h); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -238,7 +238,7 @@ TEST_IMPL(tcp_ref2) { uv_unref((uv_handle_t*)&h); uv_run(uv_default_loop(), UV_RUN_DEFAULT); do_close(&h); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -251,7 +251,7 @@ TEST_IMPL(tcp_ref2b) { uv_close((uv_handle_t*)&h, close_cb); uv_run(uv_default_loop(), UV_RUN_DEFAULT); ASSERT(close_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -270,7 +270,7 @@ TEST_IMPL(tcp_ref3) { ASSERT(connect_cb_called == 1); ASSERT(shutdown_cb_called == 1); do_close(&h); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -290,7 +290,7 @@ TEST_IMPL(tcp_ref4) { ASSERT(write_cb_called == 1); ASSERT(shutdown_cb_called == 1); do_close(&h); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -301,7 +301,7 @@ TEST_IMPL(udp_ref) { uv_unref((uv_handle_t*)&h); uv_run(uv_default_loop(), UV_RUN_DEFAULT); do_close(&h); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -316,7 +316,7 @@ TEST_IMPL(udp_ref2) { uv_unref((uv_handle_t*)&h); uv_run(uv_default_loop(), UV_RUN_DEFAULT); do_close(&h); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -340,7 +340,7 @@ TEST_IMPL(udp_ref3) { ASSERT(req_cb_called == 1); do_close(&h); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -351,7 +351,7 @@ TEST_IMPL(pipe_ref) { uv_unref((uv_handle_t*)&h); uv_run(uv_default_loop(), UV_RUN_DEFAULT); do_close(&h); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -363,7 +363,7 @@ TEST_IMPL(pipe_ref2) { uv_unref((uv_handle_t*)&h); uv_run(uv_default_loop(), UV_RUN_DEFAULT); do_close(&h); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -377,7 +377,7 @@ TEST_IMPL(pipe_ref3) { ASSERT(connect_cb_called == 1); ASSERT(shutdown_cb_called == 1); do_close(&h); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -392,7 +392,7 @@ TEST_IMPL(pipe_ref4) { ASSERT(write_cb_called == 1); ASSERT(shutdown_cb_called == 1); do_close(&h); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -428,7 +428,7 @@ TEST_IMPL(process_ref) { do_close(&h); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -440,6 +440,6 @@ TEST_IMPL(has_ref) { ASSERT(uv_has_ref((uv_handle_t*)&h) == 1); uv_unref((uv_handle_t*)&h); ASSERT(uv_has_ref((uv_handle_t*)&h) == 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-run-nowait.c b/deps/uv/test/test-run-nowait.c index 43524f636d8575..704105376faeac 100644 --- a/deps/uv/test/test-run-nowait.c +++ b/deps/uv/test/test-run-nowait.c @@ -41,5 +41,6 @@ TEST_IMPL(run_nowait) { ASSERT(r != 0); ASSERT(timer_called == 0); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-run-once.c b/deps/uv/test/test-run-once.c index 10cbf95e4adf59..ee332fa1a057d7 100644 --- a/deps/uv/test/test-run-once.c +++ b/deps/uv/test/test-run-once.c @@ -43,6 +43,6 @@ TEST_IMPL(run_once) { while (uv_run(uv_default_loop(), UV_RUN_ONCE)); ASSERT(idle_counter == NUM_TICKS); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-shutdown-close.c b/deps/uv/test/test-shutdown-close.c index 78c369be2d9b6b..cb478b5fdd2939 100644 --- a/deps/uv/test/test-shutdown-close.c +++ b/deps/uv/test/test-shutdown-close.c @@ -84,7 +84,7 @@ TEST_IMPL(shutdown_close_tcp) { ASSERT(shutdown_cb_called == 1); ASSERT(close_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -103,6 +103,6 @@ TEST_IMPL(shutdown_close_pipe) { ASSERT(shutdown_cb_called == 1); ASSERT(close_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-shutdown-eof.c b/deps/uv/test/test-shutdown-eof.c index 0abab9175e9d26..9c0b85652aef12 100644 --- a/deps/uv/test/test-shutdown-eof.c +++ b/deps/uv/test/test-shutdown-eof.c @@ -182,7 +182,7 @@ TEST_IMPL(shutdown_eof) { ASSERT(called_timer_close_cb == 1); ASSERT(called_timer_cb == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-shutdown-simultaneous.c b/deps/uv/test/test-shutdown-simultaneous.c index 7de3bd42252e62..14cc443730d565 100644 --- a/deps/uv/test/test-shutdown-simultaneous.c +++ b/deps/uv/test/test-shutdown-simultaneous.c @@ -130,6 +130,6 @@ TEST_IMPL(shutdown_simultaneous) { ASSERT_EQ(got_eof, 1); ASSERT_EQ(got_q, 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-shutdown-twice.c b/deps/uv/test/test-shutdown-twice.c index d7aae89914dad6..d936a70cb7d7bb 100644 --- a/deps/uv/test/test-shutdown-twice.c +++ b/deps/uv/test/test-shutdown-twice.c @@ -75,11 +75,11 @@ TEST_IMPL(shutdown_twice) { connect_cb); ASSERT(r == 0); - r = uv_run(uv_default_loop(), UV_RUN_DEFAULT); + r = uv_run(loop, UV_RUN_DEFAULT); ASSERT(r == 0); ASSERT(shutdown_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/test-signal-multiple-loops.c b/deps/uv/test/test-signal-multiple-loops.c index 7d61ff61e0d9e9..de511fc06804ef 100644 --- a/deps/uv/test/test-signal-multiple-loops.c +++ b/deps/uv/test/test-signal-multiple-loops.c @@ -50,18 +50,18 @@ enum signal_action { }; static uv_sem_t sem; -static uv_mutex_t counter_lock; -static volatile int stop = 0; +static uv_mutex_t lock; +static int stop = 0; -static volatile int signal1_cb_counter = 0; -static volatile int signal2_cb_counter = 0; -static volatile int loop_creation_counter = 0; +static int signal1_cb_counter = 0; +static int signal2_cb_counter = 0; +static int loop_creation_counter = 0; -static void increment_counter(volatile int* counter) { - uv_mutex_lock(&counter_lock); +static void increment_counter(int* counter) { + uv_mutex_lock(&lock); ++(*counter); - uv_mutex_unlock(&counter_lock); + uv_mutex_unlock(&lock); } @@ -162,6 +162,8 @@ static void signal_unexpected_cb(uv_signal_t* handle, int signum) { static void loop_creating_worker(void* context) { + int done; + (void) context; do { @@ -188,7 +190,11 @@ static void loop_creating_worker(void* context) { free(loop); increment_counter(&loop_creation_counter); - } while (!stop); + + uv_mutex_lock(&lock); + done = stop; + uv_mutex_unlock(&lock); + } while (!done); } @@ -202,8 +208,18 @@ TEST_IMPL(signal_multiple_loops) { #endif /* TODO(gengjiawen): Fix test on QEMU. */ #if defined(__QEMU__) - // See https://github.com/libuv/libuv/issues/2859 + /* See https://github.com/libuv/libuv/issues/2859 */ RETURN_SKIP("QEMU's signal emulation code is notoriously tricky"); +#endif +#if defined(__ASAN__) || defined(__MSAN__) + /* See https://github.com/libuv/libuv/issues/3956 */ + RETURN_SKIP("Test is too slow to run under ASan or MSan"); +#endif +#if defined(__TSAN__) + /* ThreadSanitizer complains - likely legitimately - about data races + * in uv__signal_compare() in src/unix/signal.c but that's pre-existing. + */ + RETURN_SKIP("Fix test under ThreadSanitizer"); #endif uv_thread_t loop_creating_threads[NUM_LOOP_CREATING_THREADS]; uv_thread_t signal_handling_threads[NUM_SIGNAL_HANDLING_THREADS]; @@ -215,7 +231,7 @@ TEST_IMPL(signal_multiple_loops) { r = uv_sem_init(&sem, 0); ASSERT(r == 0); - r = uv_mutex_init(&counter_lock); + r = uv_mutex_init(&lock); ASSERT(r == 0); /* Create a couple of threads that create a destroy loops continuously. */ @@ -272,7 +288,9 @@ TEST_IMPL(signal_multiple_loops) { } /* Tell all loop creating threads to stop. */ + uv_mutex_lock(&lock); stop = 1; + uv_mutex_unlock(&lock); /* Wait for all loop creating threads to exit. */ for (i = 0; i < NUM_LOOP_CREATING_THREADS; i++) { @@ -296,7 +314,7 @@ TEST_IMPL(signal_multiple_loops) { */ ASSERT(loop_creation_counter >= NUM_LOOP_CREATING_THREADS); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-signal-pending-on-close.c b/deps/uv/test/test-signal-pending-on-close.c index 428a97ef5ae30d..e0b9bc300b159e 100644 --- a/deps/uv/test/test-signal-pending-on-close.c +++ b/deps/uv/test/test-signal-pending-on-close.c @@ -88,11 +88,9 @@ TEST_IMPL(signal_pending_on_close) { ASSERT(0 == uv_run(&loop, UV_RUN_DEFAULT)); - ASSERT(0 == uv_loop_close(&loop)); - ASSERT(2 == close_cb_called); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(&loop); return 0; } @@ -109,10 +107,9 @@ TEST_IMPL(signal_close_loop_alive) { ASSERT(1 == uv_loop_alive(&loop)); ASSERT(0 == uv_run(&loop, UV_RUN_DEFAULT)); - ASSERT(0 == uv_loop_close(&loop)); ASSERT(1 == close_cb_called); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(&loop); return 0; } diff --git a/deps/uv/test/test-signal.c b/deps/uv/test/test-signal.c index c2ce5ec0e0a85e..f8222d14b4e71f 100644 --- a/deps/uv/test/test-signal.c +++ b/deps/uv/test/test-signal.c @@ -38,7 +38,7 @@ TEST_IMPL(kill_invalid_signum) { #endif ASSERT(uv_kill(pid, 4096) == UV_EINVAL); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -69,7 +69,7 @@ TEST_IMPL(win32_signum_number) { ASSERT(uv_signal_start(&signal, signum_test_cb, -1) == UV_EINVAL); ASSERT(uv_signal_start(&signal, signum_test_cb, NSIG) == UV_EINVAL); ASSERT(uv_signal_start(&signal, signum_test_cb, 1024) == UV_EINVAL); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } #else @@ -180,7 +180,7 @@ TEST_IMPL(we_get_signal) { ASSERT(tc.ncalls == NSIGNALS); ASSERT(sc.ncalls == NSIGNALS); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -206,7 +206,7 @@ TEST_IMPL(we_get_signals) { for (i = 0; i < ARRAY_SIZE(tc); i++) ASSERT(tc[i].ncalls == NSIGNALS); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -235,7 +235,7 @@ TEST_IMPL(we_get_signal_one_shot) { ASSERT(tc.ncalls == NSIGNALS); ASSERT(sc.ncalls == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -318,7 +318,7 @@ TEST_IMPL(we_get_signals_mixed) { ASSERT(sc[2].ncalls == 0); ASSERT(sc[3].ncalls == NSIGNALS); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/test-socket-buffer-size.c b/deps/uv/test/test-socket-buffer-size.c index 72f8c2524c09a7..5f072cb02be252 100644 --- a/deps/uv/test/test-socket-buffer-size.c +++ b/deps/uv/test/test-socket-buffer-size.c @@ -72,6 +72,6 @@ TEST_IMPL(socket_buffer_size) { ASSERT(close_cb_called == 2); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-spawn.c b/deps/uv/test/test-spawn.c index 7a07482b1c98f6..182cf5ec3ebaa7 100644 --- a/deps/uv/test/test-spawn.c +++ b/deps/uv/test/test-spawn.c @@ -193,7 +193,7 @@ TEST_IMPL(spawn_fails) { uv_close((uv_handle_t*) &process, NULL); ASSERT(0 == uv_run(uv_default_loop(), UV_RUN_DEFAULT)); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -223,7 +223,7 @@ TEST_IMPL(spawn_fails_check_for_waitpid_cleanup) { uv_close((uv_handle_t*) &process, NULL); ASSERT(0 == uv_run(uv_default_loop(), UV_RUN_DEFAULT)); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } #endif @@ -253,7 +253,7 @@ TEST_IMPL(spawn_empty_env) { ASSERT(exit_cb_called == 1); ASSERT(close_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -272,7 +272,7 @@ TEST_IMPL(spawn_exit_code) { ASSERT(exit_cb_called == 1); ASSERT(close_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -305,7 +305,7 @@ TEST_IMPL(spawn_stdout) { printf("output is: %s", output); ASSERT(strcmp("hello world\n", output) == 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -359,7 +359,7 @@ TEST_IMPL(spawn_stdout_to_file) { /* Cleanup. */ unlink("stdout_file"); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -415,7 +415,7 @@ TEST_IMPL(spawn_stdout_and_stderr_to_file) { /* Cleanup. */ unlink("stdout_file"); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -477,7 +477,7 @@ TEST_IMPL(spawn_stdout_and_stderr_to_file2) { /* Cleanup. */ unlink("stdout_file"); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; #else RETURN_SKIP("Unix only test"); @@ -569,7 +569,7 @@ TEST_IMPL(spawn_stdout_and_stderr_to_file_swap) { unlink("stdout_file"); unlink("stderr_file"); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; #else RETURN_SKIP("Unix only test"); @@ -615,7 +615,7 @@ TEST_IMPL(spawn_stdin) { ASSERT(close_cb_called == 3); /* Once for process twice for the pipe. */ ASSERT(strcmp(buffer, output) == 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -650,7 +650,7 @@ TEST_IMPL(spawn_stdio_greater_than_3) { printf("output from stdio[3] is: %s", output); ASSERT(strcmp("fourth stdio!\n", output) == 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -728,7 +728,7 @@ TEST_IMPL(spawn_tcp_server) { ASSERT(exit_cb_called == 1); ASSERT(close_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -750,7 +750,7 @@ TEST_IMPL(spawn_ignored_stdio) { ASSERT(exit_cb_called == 1); ASSERT(close_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -775,7 +775,7 @@ TEST_IMPL(spawn_and_kill) { ASSERT(exit_cb_called == 1); ASSERT(close_cb_called == 2); /* Once for process and once for timer. */ - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -815,7 +815,7 @@ TEST_IMPL(spawn_preserve_env) { printf("output is: %s", output); ASSERT(strcmp("testval", output) == 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -845,7 +845,7 @@ TEST_IMPL(spawn_detached) { r = uv_kill(process.pid, SIGTERM); ASSERT(r == 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -903,7 +903,7 @@ TEST_IMPL(spawn_and_kill_with_std) { ASSERT(exit_cb_called == 1); ASSERT(close_cb_called == 5); /* process x 1, timer x 1, stdio x 3. */ - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -950,7 +950,7 @@ TEST_IMPL(spawn_and_ping) { ASSERT(exit_cb_called == 1); ASSERT(strcmp(output, "TEST") == 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -997,7 +997,7 @@ TEST_IMPL(spawn_same_stdout_stderr) { ASSERT(exit_cb_called == 1); ASSERT(strcmp(output, "TEST") == 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -1029,7 +1029,7 @@ TEST_IMPL(spawn_closed_process_io) { ASSERT(exit_cb_called == 1); ASSERT(close_cb_called == 2); /* process, child stdin */ - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -1083,7 +1083,7 @@ TEST_IMPL(kill) { ASSERT(exit_cb_called == 1); ASSERT(close_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -1135,7 +1135,7 @@ TEST_IMPL(spawn_detect_pipe_name_collisions_on_windows) { printf("output is: %s", output); ASSERT(strcmp("hello world\n", output) == 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -1363,7 +1363,7 @@ TEST_IMPL(spawn_with_an_odd_path) { uv_close((uv_handle_t*) &process, NULL); ASSERT(0 == uv_run(uv_default_loop(), UV_RUN_DEFAULT)); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } #endif @@ -1406,7 +1406,7 @@ TEST_IMPL(spawn_setuid_setgid) { ASSERT(exit_cb_called == 1); ASSERT(close_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } #endif @@ -1459,7 +1459,7 @@ TEST_IMPL(spawn_setuid_fails) { ASSERT(close_cb_called == 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -1504,7 +1504,7 @@ TEST_IMPL(spawn_setgid_fails) { ASSERT(close_cb_called == 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } #endif @@ -1535,7 +1535,7 @@ TEST_IMPL(spawn_setuid_fails) { ASSERT(close_cb_called == 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -1556,7 +1556,7 @@ TEST_IMPL(spawn_setgid_fails) { ASSERT(close_cb_called == 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } #endif @@ -1570,7 +1570,7 @@ TEST_IMPL(spawn_auto_unref) { uv_close((uv_handle_t*) &process, NULL); ASSERT(0 == uv_run(uv_default_loop(), UV_RUN_DEFAULT)); ASSERT(1 == uv_is_closing((uv_handle_t*) &process)); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -1633,7 +1633,7 @@ TEST_IMPL(spawn_fs_open) { ASSERT(exit_cb_called == 1); ASSERT(close_cb_called == 2); /* One for `in`, one for process */ - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -1705,7 +1705,7 @@ TEST_IMPL(closed_fd_events) { ASSERT(0 == close(fd[1])); #endif - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -1775,7 +1775,7 @@ TEST_IMPL(spawn_reads_child_path) { ASSERT(exit_cb_called == 1); ASSERT(close_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -1859,7 +1859,7 @@ TEST_IMPL(spawn_inherit_streams) { r = memcmp(ubuf, output, sizeof ubuf); ASSERT(r == 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -1883,7 +1883,7 @@ TEST_IMPL(spawn_quoted_path) { /* We test if libuv will not segfault. */ uv_spawn(uv_default_loop(), &process, &options); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; #endif } @@ -1922,7 +1922,7 @@ TEST_IMPL(spawn_exercise_sigchld_issue) { ASSERT_EQ(exit_cb_called, 1); ASSERT_EQ(close_cb_called, 101); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -2009,6 +2009,6 @@ TEST_IMPL(spawn_relative_path) { ASSERT_EQ(1, exit_cb_called); ASSERT_EQ(1, close_cb_called); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-stdio-over-pipes.c b/deps/uv/test/test-stdio-over-pipes.c index 1aed47122772ab..1b7f17297cabb2 100644 --- a/deps/uv/test/test-stdio-over-pipes.c +++ b/deps/uv/test/test-stdio-over-pipes.c @@ -145,7 +145,7 @@ static void test_stdio_over_pipes(int overlapped) { r = uv_read_start((uv_stream_t*) &out, on_alloc, on_read); ASSERT(r == 0); - r = uv_run(uv_default_loop(), UV_RUN_DEFAULT); + r = uv_run(loop, UV_RUN_DEFAULT); ASSERT(r == 0); ASSERT(on_read_cb_called > 1); @@ -155,7 +155,7 @@ static void test_stdio_over_pipes(int overlapped) { ASSERT(memcmp("hello world\nhello world\n", output, 24) == 0); ASSERT(output_used == 24); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); } TEST_IMPL(stdio_over_pipes) { @@ -294,6 +294,6 @@ int stdio_over_pipes_helper(void) { ASSERT(on_pipe_read_called == 2); ASSERT(close_cb_called == 4); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/test-tcp-alloc-cb-fail.c b/deps/uv/test/test-tcp-alloc-cb-fail.c index b6f4ca38850bac..a1b5e84d3b13dd 100644 --- a/deps/uv/test/test-tcp-alloc-cb-fail.c +++ b/deps/uv/test/test-tcp-alloc-cb-fail.c @@ -118,6 +118,6 @@ TEST_IMPL(tcp_alloc_cb_fail) { ASSERT(connection_cb_called == 1); ASSERT(close_cb_called == 3); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-tcp-bind-error.c b/deps/uv/test/test-tcp-bind-error.c index c3ca6ec824a948..edb44c21459261 100644 --- a/deps/uv/test/test-tcp-bind-error.c +++ b/deps/uv/test/test-tcp-bind-error.c @@ -72,7 +72,7 @@ TEST_IMPL(tcp_bind_error_addrinuse_connect) { ASSERT(connect_cb_called == 1); ASSERT(close_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -105,7 +105,7 @@ TEST_IMPL(tcp_bind_error_addrinuse_listen) { ASSERT(close_cb_called == 2); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -130,7 +130,7 @@ TEST_IMPL(tcp_bind_error_addrnotavail_1) { ASSERT(close_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -153,7 +153,7 @@ TEST_IMPL(tcp_bind_error_addrnotavail_2) { ASSERT(close_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -178,7 +178,7 @@ TEST_IMPL(tcp_bind_error_fault) { ASSERT(close_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -206,7 +206,7 @@ TEST_IMPL(tcp_bind_error_inval) { ASSERT(close_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -223,7 +223,7 @@ TEST_IMPL(tcp_bind_localhost_ok) { r = uv_tcp_bind(&server, (const struct sockaddr*) &addr, 0); ASSERT(r == 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -240,7 +240,7 @@ TEST_IMPL(tcp_bind_invalid_flags) { r = uv_tcp_bind(&server, (const struct sockaddr*) &addr, UV_TCP_IPV6ONLY); ASSERT(r == UV_EINVAL); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -254,7 +254,7 @@ TEST_IMPL(tcp_listen_without_bind) { r = uv_listen((uv_stream_t*)&server, 128, NULL); ASSERT(r == 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -294,7 +294,7 @@ TEST_IMPL(tcp_bind_writable_flags) { ASSERT(close_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -312,6 +312,6 @@ TEST_IMPL(tcp_bind_or_listen_error_after_close) { ASSERT_EQ(uv_tcp_bind(&tcp, (struct sockaddr*) &addr, 0), UV_EINVAL); ASSERT_EQ(uv_listen((uv_stream_t*) &tcp, 5, NULL), UV_EINVAL); ASSERT_EQ(uv_run(uv_default_loop(), UV_RUN_DEFAULT), 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-tcp-bind6-error.c b/deps/uv/test/test-tcp-bind6-error.c index 86181b708e3fd2..656ebe34e297da 100644 --- a/deps/uv/test/test-tcp-bind6-error.c +++ b/deps/uv/test/test-tcp-bind6-error.c @@ -66,7 +66,7 @@ TEST_IMPL(tcp_bind6_error_addrinuse) { ASSERT(close_cb_called == 2); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -92,7 +92,7 @@ TEST_IMPL(tcp_bind6_error_addrnotavail) { ASSERT(close_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -120,7 +120,7 @@ TEST_IMPL(tcp_bind6_error_fault) { ASSERT(close_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -151,7 +151,7 @@ TEST_IMPL(tcp_bind6_error_inval) { ASSERT(close_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -171,6 +171,6 @@ TEST_IMPL(tcp_bind6_localhost_ok) { r = uv_tcp_bind(&server, (const struct sockaddr*) &addr, 0); ASSERT(r == 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-tcp-close-accept.c b/deps/uv/test/test-tcp-close-accept.c index 624262bcfe9000..b255cfbd9f362e 100644 --- a/deps/uv/test/test-tcp-close-accept.c +++ b/deps/uv/test/test-tcp-close-accept.c @@ -187,7 +187,7 @@ TEST_IMPL(tcp_close_accept) { ASSERT(ARRAY_SIZE(tcp_outgoing) == write_cb_called); ASSERT(1 == read_cb_called); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/test-tcp-close-after-read-timeout.c b/deps/uv/test/test-tcp-close-after-read-timeout.c index 493492dba6ad4e..098e405a87529c 100644 --- a/deps/uv/test/test-tcp-close-after-read-timeout.c +++ b/deps/uv/test/test-tcp-close-after-read-timeout.c @@ -178,6 +178,6 @@ TEST_IMPL(tcp_close_after_read_timeout) { ASSERT_EQ(read_cb_called, 1); ASSERT_EQ(on_close_called, 3); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/test-tcp-close-reset.c b/deps/uv/test/test-tcp-close-reset.c index 66dfc82eb47838..7415646996cd9f 100644 --- a/deps/uv/test/test-tcp-close-reset.c +++ b/deps/uv/test/test-tcp-close-reset.c @@ -223,7 +223,7 @@ TEST_IMPL(tcp_close_reset_client) { ASSERT(close_cb_called == 1); ASSERT(shutdown_cb_called == 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -250,7 +250,7 @@ TEST_IMPL(tcp_close_reset_client_after_shutdown) { ASSERT(close_cb_called == 0); ASSERT(shutdown_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -277,7 +277,7 @@ TEST_IMPL(tcp_close_reset_accepted) { ASSERT(close_cb_called == 1); ASSERT(shutdown_cb_called == 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -304,7 +304,7 @@ TEST_IMPL(tcp_close_reset_accepted_after_shutdown) { ASSERT(close_cb_called == 0); ASSERT(shutdown_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -331,6 +331,6 @@ TEST_IMPL(tcp_close_reset_accepted_after_socket_shutdown) { ASSERT_EQ(close_cb_called, 1); ASSERT_EQ(shutdown_cb_called, 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/test-tcp-close-while-connecting.c b/deps/uv/test/test-tcp-close-while-connecting.c index 8d0b8270645c76..490413891bbc0c 100644 --- a/deps/uv/test/test-tcp-close-while-connecting.c +++ b/deps/uv/test/test-tcp-close-while-connecting.c @@ -88,7 +88,7 @@ TEST_IMPL(tcp_close_while_connecting) { ASSERT(timer1_cb_called == 1); ASSERT(close_cb_called == 2); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); if (netunreach_errors > 0) RETURN_SKIP("Network unreachable."); diff --git a/deps/uv/test/test-tcp-close.c b/deps/uv/test/test-tcp-close.c index 5a7bd6893bf479..6879bae20f89ad 100644 --- a/deps/uv/test/test-tcp-close.c +++ b/deps/uv/test/test-tcp-close.c @@ -131,6 +131,6 @@ TEST_IMPL(tcp_close) { ASSERT(write_cb_called == NUM_WRITE_REQS); ASSERT(close_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/test-tcp-connect-error-after-write.c b/deps/uv/test/test-tcp-connect-error-after-write.c index 3f2e3572da9be2..1800b4d6ed06d3 100644 --- a/deps/uv/test/test-tcp-connect-error-after-write.c +++ b/deps/uv/test/test-tcp-connect-error-after-write.c @@ -55,6 +55,11 @@ static void write_cb(uv_write_t* req, int status) { * Related issue: https://github.com/joyent/libuv/issues/443 */ TEST_IMPL(tcp_connect_error_after_write) { +#ifdef _WIN32 + RETURN_SKIP("This test is disabled on Windows for now. " + "See https://github.com/joyent/libuv/issues/444\n"); +#else + uv_connect_t connect_req; struct sockaddr_in addr; uv_write_t write_req; @@ -62,12 +67,6 @@ TEST_IMPL(tcp_connect_error_after_write) { uv_buf_t buf; int r; -#ifdef _WIN32 - fprintf(stderr, "This test is disabled on Windows for now.\n"); - fprintf(stderr, "See https://github.com/joyent/libuv/issues/444\n"); - return 0; /* windows slackers... */ -#endif - ASSERT(0 == uv_ip4_addr("127.0.0.1", TEST_PORT, &addr)); buf = uv_buf_init("TEST", 4); @@ -93,6 +92,7 @@ TEST_IMPL(tcp_connect_error_after_write) { ASSERT(write_cb_called == 1); ASSERT(close_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; +#endif } diff --git a/deps/uv/test/test-tcp-connect-error.c b/deps/uv/test/test-tcp-connect-error.c index dda30a58064f8f..9384ebce57c450 100644 --- a/deps/uv/test/test-tcp-connect-error.c +++ b/deps/uv/test/test-tcp-connect-error.c @@ -68,6 +68,6 @@ TEST_IMPL(tcp_connect_error_fault) { ASSERT(connect_cb_called == 0); ASSERT(close_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-tcp-connect-timeout.c b/deps/uv/test/test-tcp-connect-timeout.c index 0f968157127d0d..4cd83e1d38b58c 100644 --- a/deps/uv/test/test-tcp-connect-timeout.c +++ b/deps/uv/test/test-tcp-connect-timeout.c @@ -86,7 +86,7 @@ TEST_IMPL(tcp_connect_timeout) { r = uv_run(uv_default_loop(), UV_RUN_DEFAULT); ASSERT(r == 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -153,7 +153,7 @@ TEST_IMPL(tcp_local_connect_timeout) { r = uv_run(uv_default_loop(), UV_RUN_DEFAULT); ASSERT(r == 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -191,6 +191,6 @@ TEST_IMPL(tcp6_local_connect_timeout) { r = uv_run(uv_default_loop(), UV_RUN_DEFAULT); ASSERT_EQ(r, 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-tcp-connect6-error.c b/deps/uv/test/test-tcp-connect6-error.c index 2f6e9cbce14336..8646dd56496eb0 100644 --- a/deps/uv/test/test-tcp-connect6-error.c +++ b/deps/uv/test/test-tcp-connect6-error.c @@ -66,6 +66,6 @@ TEST_IMPL(tcp_connect6_error_fault) { ASSERT(connect_cb_called == 0); ASSERT(close_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-tcp-create-socket-early.c b/deps/uv/test/test-tcp-create-socket-early.c index f2bc60d7c7c0ec..c84882dad216ef 100644 --- a/deps/uv/test/test-tcp-create-socket-early.c +++ b/deps/uv/test/test-tcp-create-socket-early.c @@ -128,7 +128,7 @@ TEST_IMPL(tcp_create_early) { uv_close((uv_handle_t*) &client, NULL); uv_run(uv_default_loop(), UV_RUN_DEFAULT); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -173,7 +173,7 @@ TEST_IMPL(tcp_create_early_bad_bind) { uv_close((uv_handle_t*) &client, NULL); uv_run(uv_default_loop(), UV_RUN_DEFAULT); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -190,7 +190,7 @@ TEST_IMPL(tcp_create_early_bad_domain) { uv_run(uv_default_loop(), UV_RUN_DEFAULT); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -204,6 +204,6 @@ TEST_IMPL(tcp_create_early_accept) { uv_run(uv_default_loop(), UV_RUN_DEFAULT); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-tcp-flags.c b/deps/uv/test/test-tcp-flags.c index 68afb39f456875..6856429aa1dc75 100644 --- a/deps/uv/test/test-tcp-flags.c +++ b/deps/uv/test/test-tcp-flags.c @@ -47,6 +47,6 @@ TEST_IMPL(tcp_flags) { r = uv_run(loop, UV_RUN_DEFAULT); ASSERT(r == 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/test-tcp-oob.c b/deps/uv/test/test-tcp-oob.c index 53f8231e83e497..989454ed8787f9 100644 --- a/deps/uv/test/test-tcp-oob.c +++ b/deps/uv/test/test-tcp-oob.c @@ -135,7 +135,7 @@ TEST_IMPL(tcp_oob) { ASSERT(ticks == kMaxTicks); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/test-tcp-open.c b/deps/uv/test/test-tcp-open.c index 7e49139cd81a86..b5c5621a793679 100644 --- a/deps/uv/test/test-tcp-open.c +++ b/deps/uv/test/test-tcp-open.c @@ -277,7 +277,7 @@ TEST_IMPL(tcp_open) { ASSERT(write_cb_called == 1); ASSERT(close_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -304,7 +304,7 @@ TEST_IMPL(tcp_open_twice) { uv_close((uv_handle_t*) &client, NULL); uv_run(uv_default_loop(), UV_RUN_DEFAULT); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -327,7 +327,7 @@ TEST_IMPL(tcp_open_bound) { ASSERT(0 == uv_listen((uv_stream_t*) &server, 128, NULL)); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -361,7 +361,7 @@ TEST_IMPL(tcp_open_connected) { ASSERT(write_cb_called == 1); ASSERT(close_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -396,6 +396,6 @@ TEST_IMPL(tcp_write_ready) { ASSERT(write_cb_called > 0); ASSERT(close_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-tcp-read-stop-start.c b/deps/uv/test/test-tcp-read-stop-start.c index 9bccbc12fc58e6..9be12bb75b0b34 100644 --- a/deps/uv/test/test-tcp-read-stop-start.c +++ b/deps/uv/test/test-tcp-read-stop-start.c @@ -131,6 +131,6 @@ TEST_IMPL(tcp_read_stop_start) { ASSERT(read_cb_called >= 2); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/test-tcp-read-stop.c b/deps/uv/test/test-tcp-read-stop.c index 488e8fb49a904b..1754876d4f1263 100644 --- a/deps/uv/test/test-tcp-read-stop.c +++ b/deps/uv/test/test-tcp-read-stop.c @@ -70,7 +70,7 @@ TEST_IMPL(tcp_read_stop) { (const struct sockaddr*) &addr, connect_cb)); ASSERT(0 == uv_run(uv_default_loop(), UV_RUN_DEFAULT)); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-tcp-rst.c b/deps/uv/test/test-tcp-rst.c index ed48e741908361..b5d216ce7e8e51 100644 --- a/deps/uv/test/test-tcp-rst.c +++ b/deps/uv/test/test-tcp-rst.c @@ -76,6 +76,9 @@ static void connect_cb(uv_connect_t *req, int status) { * RST. Test checks that uv_guess_handle still works on a reset TCP handle. */ TEST_IMPL(tcp_rst) { +#if defined(__OpenBSD__) + RETURN_SKIP("Test does not currently work in OpenBSD"); +#endif #ifndef _WIN32 struct sockaddr_in server_addr; int r; @@ -99,7 +102,7 @@ TEST_IMPL(tcp_rst) { ASSERT_EQ(called_connect_cb, 1); ASSERT_EQ(called_close_cb, 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; #else RETURN_SKIP("Unix only test"); diff --git a/deps/uv/test/test-tcp-shutdown-after-write.c b/deps/uv/test/test-tcp-shutdown-after-write.c index 463b4b0d79cb71..d2401e8fdb36f6 100644 --- a/deps/uv/test/test-tcp-shutdown-after-write.c +++ b/deps/uv/test/test-tcp-shutdown-after-write.c @@ -133,6 +133,6 @@ TEST_IMPL(tcp_shutdown_after_write) { ASSERT(conn_close_cb_called == 1); ASSERT(timer_close_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/test-tcp-try-write-error.c b/deps/uv/test/test-tcp-try-write-error.c index 2201d0ea61ad31..97deccaa0dd7eb 100644 --- a/deps/uv/test/test-tcp-try-write-error.c +++ b/deps/uv/test/test-tcp-try-write-error.c @@ -104,6 +104,6 @@ TEST_IMPL(tcp_try_write_error) { ASSERT(close_cb_called == 3); ASSERT(connection_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-tcp-try-write.c b/deps/uv/test/test-tcp-try-write.c index 97a1d6e3d5794f..6458857a85c879 100644 --- a/deps/uv/test/test-tcp-try-write.c +++ b/deps/uv/test/test-tcp-try-write.c @@ -130,6 +130,6 @@ TEST_IMPL(tcp_try_write) { ASSERT(bytes_read == bytes_written); ASSERT(bytes_written > 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-tcp-unexpected-read.c b/deps/uv/test/test-tcp-unexpected-read.c index c7b981456be469..e11f77473f4030 100644 --- a/deps/uv/test/test-tcp-unexpected-read.c +++ b/deps/uv/test/test-tcp-unexpected-read.c @@ -112,6 +112,6 @@ TEST_IMPL(tcp_unexpected_read) { */ ASSERT(ticks <= 20); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/test-tcp-write-after-connect.c b/deps/uv/test/test-tcp-write-after-connect.c index 8a698f44bd5db5..4a786995ff1002 100644 --- a/deps/uv/test/test-tcp-write-after-connect.c +++ b/deps/uv/test/test-tcp-write-after-connect.c @@ -66,7 +66,7 @@ TEST_IMPL(tcp_write_after_connect) { uv_run(&loop, UV_RUN_DEFAULT); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(&loop); return 0; } diff --git a/deps/uv/test/test-tcp-write-fail.c b/deps/uv/test/test-tcp-write-fail.c index 58ee00faedb410..2912e7c5068586 100644 --- a/deps/uv/test/test-tcp-write-fail.c +++ b/deps/uv/test/test-tcp-write-fail.c @@ -110,6 +110,6 @@ TEST_IMPL(tcp_write_fail) { ASSERT(write_cb_called == 1); ASSERT(close_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-tcp-write-in-a-row.c b/deps/uv/test/test-tcp-write-in-a-row.c new file mode 100644 index 00000000000000..99f4dee125e1a8 --- /dev/null +++ b/deps/uv/test/test-tcp-write-in-a-row.c @@ -0,0 +1,142 @@ +/* Copyright libuv project contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include +#include +#include + +#include "task.h" +#include "uv.h" + +static uv_tcp_t server; +static uv_tcp_t client; +static uv_tcp_t incoming; +static int connect_cb_called; +static int close_cb_called; +static int connection_cb_called; +static int write_cb_called; +static uv_write_t small_write; +static uv_write_t big_write; + +/* 10 MB, which is large than the send buffer size and the recv buffer */ +static char data[1024 * 1024 * 10]; + +static void close_cb(uv_handle_t* handle) { + close_cb_called++; +} + +static void write_cb(uv_write_t* w, int status) { + /* the small write should finish immediately after the big write */ + ASSERT_EQ(0, uv_stream_get_write_queue_size((uv_stream_t*) &client)); + + write_cb_called++; + + if (write_cb_called == 2) { + /* we are done */ + uv_close((uv_handle_t*) &client, close_cb); + uv_close((uv_handle_t*) &incoming, close_cb); + uv_close((uv_handle_t*) &server, close_cb); + } +} + +static void connect_cb(uv_connect_t* _, int status) { + int r; + uv_buf_t buf; + size_t write_queue_size0, write_queue_size1; + + ASSERT_EQ(0, status); + connect_cb_called++; + + /* fire a big write */ + buf = uv_buf_init(data, sizeof(data)); + r = uv_write(&small_write, (uv_stream_t*) &client, &buf, 1, write_cb); + ASSERT_EQ(0, r); + + /* check that the write process gets stuck */ + write_queue_size0 = uv_stream_get_write_queue_size((uv_stream_t*) &client); + ASSERT_GT(write_queue_size0, 0); + + /* fire a small write, which should be queued */ + buf = uv_buf_init("A", 1); + r = uv_write(&big_write, (uv_stream_t*) &client, &buf, 1, write_cb); + ASSERT_EQ(0, r); + + write_queue_size1 = uv_stream_get_write_queue_size((uv_stream_t*) &client); + ASSERT_EQ(write_queue_size1, write_queue_size0 + 1); +} + +static void alloc_cb(uv_handle_t* handle, size_t size, uv_buf_t* buf) { + static char base[1024]; + + buf->base = base; + buf->len = sizeof(base); +} + +static void read_cb(uv_stream_t* tcp, ssize_t nread, const uv_buf_t* buf) {} + +static void connection_cb(uv_stream_t* tcp, int status) { + ASSERT_EQ(0, status); + connection_cb_called++; + + ASSERT_EQ(0, uv_tcp_init(tcp->loop, &incoming)); + ASSERT_EQ(0, uv_accept(tcp, (uv_stream_t*) &incoming)); + ASSERT_EQ(0, uv_read_start((uv_stream_t*) &incoming, alloc_cb, read_cb)); +} + +static void start_server(void) { + struct sockaddr_in addr; + + ASSERT_EQ(0, uv_ip4_addr("0.0.0.0", TEST_PORT, &addr)); + + ASSERT_EQ(0, uv_tcp_init(uv_default_loop(), &server)); + ASSERT_EQ(0, uv_tcp_bind(&server, (struct sockaddr*) &addr, 0)); + ASSERT_EQ(0, uv_listen((uv_stream_t*) &server, 128, connection_cb)); +} + +TEST_IMPL(tcp_write_in_a_row) { +#if defined(_WIN32) + RETURN_SKIP("tcp_write_in_a_row does not work on Windows"); +#else + + uv_connect_t connect_req; + struct sockaddr_in addr; + + start_server(); + + ASSERT_EQ(0, uv_ip4_addr("127.0.0.1", TEST_PORT, &addr)); + + ASSERT_EQ(0, uv_tcp_init(uv_default_loop(), &client)); + ASSERT_EQ(0, uv_tcp_connect(&connect_req, + &client, + (struct sockaddr*) &addr, + connect_cb)); + + ASSERT_EQ(0, uv_run(uv_default_loop(), UV_RUN_DEFAULT)); + + ASSERT_EQ(1, connect_cb_called); + ASSERT_EQ(3, close_cb_called); + ASSERT_EQ(1, connection_cb_called); + ASSERT_EQ(2, write_cb_called); + + MAKE_VALGRIND_HAPPY(uv_default_loop()); + return 0; +#endif +} diff --git a/deps/uv/test/test-tcp-write-queue-order.c b/deps/uv/test/test-tcp-write-queue-order.c index 1ff9c517cec1c6..7562c41d3de208 100644 --- a/deps/uv/test/test-tcp-write-queue-order.c +++ b/deps/uv/test/test-tcp-write-queue-order.c @@ -134,6 +134,6 @@ TEST_IMPL(tcp_write_queue_order) { write_cancelled_callbacks == REQ_COUNT); ASSERT(close_cb_called == 3); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-tcp-write-to-half-open-connection.c b/deps/uv/test/test-tcp-write-to-half-open-connection.c index ae4251317d80e6..8978211d2b7663 100644 --- a/deps/uv/test/test-tcp-write-to-half-open-connection.c +++ b/deps/uv/test/test-tcp-write-to-half-open-connection.c @@ -136,6 +136,6 @@ TEST_IMPL(tcp_write_to_half_open_connection) { ASSERT(write_cb_called > 0); ASSERT(read_cb_called > 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/test-tcp-writealot.c b/deps/uv/test/test-tcp-writealot.c index 40dce96e8d8c34..3c6c149e92c590 100644 --- a/deps/uv/test/test-tcp-writealot.c +++ b/deps/uv/test/test-tcp-writealot.c @@ -149,6 +149,10 @@ TEST_IMPL(tcp_writealot) { uv_tcp_t client; int r; +#ifdef __TSAN__ + RETURN_SKIP("Test is too slow to run under ThreadSanitizer"); +#endif + ASSERT(0 == uv_ip4_addr("127.0.0.1", TEST_PORT, &addr)); send_buffer = calloc(1, TOTAL_BYTES); @@ -175,6 +179,6 @@ TEST_IMPL(tcp_writealot) { free(send_buffer); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-thread-affinity.c b/deps/uv/test/test-thread-affinity.c new file mode 100644 index 00000000000000..2c9b696ec7a5d2 --- /dev/null +++ b/deps/uv/test/test-thread-affinity.c @@ -0,0 +1,136 @@ +/* Copyright libuv project contributors. All rights reserved. + */ + +#include "uv.h" +#include "task.h" + +#include + +#ifndef NO_CPU_AFFINITY + +static void check_affinity(void* arg) { + int r; + char* cpumask; + int cpumasksize; + uv_thread_t tid; + + cpumask = (char*)arg; + cpumasksize = uv_cpumask_size(); + ASSERT(cpumasksize > 0); + tid = uv_thread_self(); + r = uv_thread_setaffinity(&tid, cpumask, NULL, cpumasksize); + ASSERT(r == 0); + r = uv_thread_setaffinity(&tid, cpumask + cpumasksize, cpumask, cpumasksize); + ASSERT(r == 0); +} + + +TEST_IMPL(thread_affinity) { + int t1first; + int t1second; + int t2first; + int t2second; + int cpumasksize; + char* cpumask; + int ncpus; + int r; + int c; + int i; + uv_thread_t threads[3]; + +#ifdef _WIN32 + /* uv_thread_self isn't defined for the main thread on Windows */ + threads[0] = GetCurrentThread(); +#else + threads[0] = uv_thread_self(); +#endif + cpumasksize = uv_cpumask_size(); + ASSERT(cpumasksize > 0); + + cpumask = calloc(4 * cpumasksize, 1); + ASSERT(cpumask); + + r = uv_thread_getaffinity(&threads[0], cpumask, cpumasksize); + ASSERT(r == 0); + ASSERT(cpumask[0] && "test must be run with cpu 0 affinity"); + ncpus = 0; + while (cpumask[++ncpus]) { } + memset(cpumask, 0, 4 * cpumasksize); + + t1first = cpumasksize * 0; + t1second = cpumasksize * 1; + t2first = cpumasksize * 2; + t2second = cpumasksize * 3; + + cpumask[t1second + 0] = 1; + cpumask[t2first + 0] = 1; + cpumask[t1first + (ncpus >= 2)] = 1; + cpumask[t2second + (ncpus >= 2)] = 1; +#ifdef __linux__ + cpumask[t1second + 2] = 1; + cpumask[t2first + 2] = 1; + cpumask[t1first + 3] = 1; + cpumask[t2second + 3] = 1; +#else + if (ncpus >= 3) { + cpumask[t1second + 2] = 1; + cpumask[t2first + 2] = 1; + } + if (ncpus >= 4) { + cpumask[t1first + 3] = 1; + cpumask[t2second + 3] = 1; + } +#endif + + ASSERT(0 == uv_thread_create(threads + 1, + check_affinity, + &cpumask[t1first])); + ASSERT(0 == uv_thread_create(threads + 2, + check_affinity, + &cpumask[t2first])); + ASSERT(0 == uv_thread_join(threads + 1)); + ASSERT(0 == uv_thread_join(threads + 2)); + + ASSERT(cpumask[t1first + 0] == (ncpus == 1)); + ASSERT(cpumask[t1first + 1] == (ncpus >= 2)); + ASSERT(cpumask[t1first + 2] == 0); + ASSERT(cpumask[t1first + 3] == (ncpus >= 4)); + + ASSERT(cpumask[t2first + 0] == 1); + ASSERT(cpumask[t2first + 1] == 0); + ASSERT(cpumask[t2first + 2] == (ncpus >= 3)); + ASSERT(cpumask[t2first + 3] == 0); + + c = uv_thread_getcpu(); + ASSERT_GE(c, 0); + + memset(cpumask, 0, cpumasksize); + cpumask[c] = 1; + r = uv_thread_setaffinity(&threads[0], cpumask, NULL, cpumasksize); + ASSERT_EQ(r, 0); + + memset(cpumask, 0, cpumasksize); + r = uv_thread_getaffinity(&threads[0], cpumask, cpumasksize); + ASSERT_EQ(r, 0); + for (i = 0; i < cpumasksize; i++) { + if (i == c) + ASSERT_EQ(1, cpumask[i]); + else + ASSERT_EQ(0, cpumask[i]); + } + + free(cpumask); + + return 0; +} + +#else + +TEST_IMPL(thread_affinity) { + int cpumasksize; + cpumasksize = uv_cpumask_size(); + ASSERT(cpumasksize == UV_ENOTSUP); + return 0; +} + +#endif diff --git a/deps/uv/test/test-threadpool-cancel.c b/deps/uv/test/test-threadpool-cancel.c index 1e867c51cf9c3f..263d54a5234e97 100644 --- a/deps/uv/test/test-threadpool-cancel.c +++ b/deps/uv/test/test-threadpool-cancel.c @@ -87,8 +87,34 @@ static void unblock_threadpool(void) { } +static int known_broken(uv_req_t* req) { + if (req->type != UV_FS) + return 0; + +#ifdef __linux__ + /* TODO(bnoordhuis) make cancellation work with io_uring */ + switch (((uv_fs_t*) req)->fs_type) { + case UV_FS_CLOSE: + case UV_FS_FDATASYNC: + case UV_FS_FSTAT: + case UV_FS_FSYNC: + case UV_FS_LSTAT: + case UV_FS_OPEN: + case UV_FS_READ: + case UV_FS_STAT: + case UV_FS_WRITE: + return 1; + default: /* Squelch -Wswitch warnings. */ + break; + } +#endif + + return 0; +} + + static void fs_cb(uv_fs_t* req) { - ASSERT(req->result == UV_ECANCELED); + ASSERT(known_broken((uv_req_t*) req) || req->result == UV_ECANCELED); uv_fs_req_cleanup(req); fs_cb_called++; } @@ -133,7 +159,7 @@ static void timer_cb(uv_timer_t* handle) { for (i = 0; i < ci->nreqs; i++) { req = (uv_req_t*) ((char*) ci->reqs + i * ci->stride); - ASSERT(0 == uv_cancel(req)); + ASSERT(known_broken(req) || 0 == uv_cancel(req)); } uv_close((uv_handle_t*) &ci->timer_handle, NULL); @@ -189,7 +215,7 @@ TEST_IMPL(threadpool_cancel_getaddrinfo) { ASSERT(0 == uv_run(loop, UV_RUN_DEFAULT)); ASSERT(1 == timer_cb_called); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -225,7 +251,7 @@ TEST_IMPL(threadpool_cancel_getnameinfo) { ASSERT(0 == uv_run(loop, UV_RUN_DEFAULT)); ASSERT(1 == timer_cb_called); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -248,7 +274,7 @@ TEST_IMPL(threadpool_cancel_random) { ASSERT(0 == uv_run(loop, UV_RUN_DEFAULT)); ASSERT(1 == done_cb_called); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -272,7 +298,7 @@ TEST_IMPL(threadpool_cancel_work) { ASSERT(1 == timer_cb_called); ASSERT(ARRAY_SIZE(reqs) == done2_cb_called); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -305,7 +331,7 @@ TEST_IMPL(threadpool_cancel_fs) { ASSERT(0 == uv_fs_lstat(loop, reqs + n++, "/", fs_cb)); ASSERT(0 == uv_fs_mkdir(loop, reqs + n++, "/", 0, fs_cb)); ASSERT(0 == uv_fs_open(loop, reqs + n++, "/", 0, 0, fs_cb)); - ASSERT(0 == uv_fs_read(loop, reqs + n++, 0, &iov, 1, 0, fs_cb)); + ASSERT(0 == uv_fs_read(loop, reqs + n++, -1, &iov, 1, 0, fs_cb)); ASSERT(0 == uv_fs_scandir(loop, reqs + n++, "/", 0, fs_cb)); ASSERT(0 == uv_fs_readlink(loop, reqs + n++, "/", fs_cb)); ASSERT(0 == uv_fs_realpath(loop, reqs + n++, "/", fs_cb)); @@ -316,7 +342,7 @@ TEST_IMPL(threadpool_cancel_fs) { ASSERT(0 == uv_fs_symlink(loop, reqs + n++, "/", "/", 0, fs_cb)); ASSERT(0 == uv_fs_unlink(loop, reqs + n++, "/", fs_cb)); ASSERT(0 == uv_fs_utime(loop, reqs + n++, "/", 0, 0, fs_cb)); - ASSERT(0 == uv_fs_write(loop, reqs + n++, 0, &iov, 1, 0, fs_cb)); + ASSERT(0 == uv_fs_write(loop, reqs + n++, -1, &iov, 1, 0, fs_cb)); ASSERT(n == ARRAY_SIZE(reqs)); ASSERT(0 == uv_timer_init(loop, &ci.timer_handle)); @@ -326,7 +352,7 @@ TEST_IMPL(threadpool_cancel_fs) { ASSERT(1 == timer_cb_called); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -344,6 +370,6 @@ TEST_IMPL(threadpool_cancel_single) { ASSERT(0 == uv_run(loop, UV_RUN_DEFAULT)); ASSERT(1 == done_cb_called); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/test-threadpool.c b/deps/uv/test/test-threadpool.c index e3d17d7546f66b..5254131bce303d 100644 --- a/deps/uv/test/test-threadpool.c +++ b/deps/uv/test/test-threadpool.c @@ -54,7 +54,7 @@ TEST_IMPL(threadpool_queue_work_simple) { ASSERT(work_cb_count == 1); ASSERT(after_work_cb_count == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -71,6 +71,6 @@ TEST_IMPL(threadpool_queue_work_einval) { ASSERT(work_cb_count == 0); ASSERT(after_work_cb_count == 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-timer-again.c b/deps/uv/test/test-timer-again.c index 834b59d718c8aa..cb298956aa94ac 100644 --- a/deps/uv/test/test-timer-again.c +++ b/deps/uv/test/test-timer-again.c @@ -136,6 +136,6 @@ TEST_IMPL(timer_again) { (long int)(uv_now(uv_default_loop()) - start_time)); fflush(stderr); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-timer-from-check.c b/deps/uv/test/test-timer-from-check.c index a18c7e1fb99637..e1a002d8121065 100644 --- a/deps/uv/test/test-timer-from-check.c +++ b/deps/uv/test/test-timer-from-check.c @@ -75,6 +75,6 @@ TEST_IMPL(timer_from_check) { uv_close((uv_handle_t*) &check_handle, NULL); uv_close((uv_handle_t*) &timer_handle, NULL); ASSERT(0 == uv_run(uv_default_loop(), UV_RUN_ONCE)); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-timer.c b/deps/uv/test/test-timer.c index a9fa534f5ad6b1..eb54bb25f0c7d2 100644 --- a/deps/uv/test/test-timer.c +++ b/deps/uv/test/test-timer.c @@ -30,6 +30,7 @@ static int twice_close_cb_called = 0; static int repeat_cb_called = 0; static int repeat_close_cb_called = 0; static int order_cb_called = 0; +static int timer_check_double_call_called = 0; static uint64_t start_time; static uv_timer_t tiny_timer; static uv_timer_t huge_timer1; @@ -154,7 +155,7 @@ TEST_IMPL(timer) { ASSERT(500 <= uv_now(uv_default_loop()) - start_time); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -174,7 +175,7 @@ TEST_IMPL(timer_start_twice) { ASSERT(twice_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -187,7 +188,7 @@ TEST_IMPL(timer_init) { ASSERT_UINT64_LE(0, uv_timer_get_due_in(&handle)); ASSERT(0 == uv_is_active((uv_handle_t*) &handle)); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -236,7 +237,7 @@ TEST_IMPL(timer_order) { ASSERT(order_cb_called == 2); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -260,7 +261,7 @@ TEST_IMPL(timer_huge_timeout) { ASSERT_UINT64_EQ(281474976710655, uv_timer_get_due_in(&huge_timer1)); ASSERT_UINT64_LE(0, uv_timer_get_due_in(&huge_timer2)); ASSERT(0 == uv_run(uv_default_loop(), UV_RUN_DEFAULT)); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -286,7 +287,7 @@ TEST_IMPL(timer_huge_repeat) { ASSERT(0 == uv_timer_start(&tiny_timer, huge_repeat_cb, 2, 2)); ASSERT(0 == uv_timer_start(&huge_timer1, huge_repeat_cb, 1, (uint64_t) -1)); ASSERT(0 == uv_run(uv_default_loop(), UV_RUN_DEFAULT)); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -314,7 +315,7 @@ TEST_IMPL(timer_run_once) { uv_close((uv_handle_t*) &timer_handle, NULL); ASSERT(0 == uv_run(uv_default_loop(), UV_RUN_ONCE)); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -327,7 +328,7 @@ TEST_IMPL(timer_is_closing) { ASSERT(UV_EINVAL == uv_timer_start(&handle, never_cb, 100, 100)); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -338,7 +339,7 @@ TEST_IMPL(timer_null_callback) { ASSERT(0 == uv_timer_init(uv_default_loop(), &handle)); ASSERT(UV_EINVAL == uv_timer_start(&handle, NULL, 100, 100)); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -365,6 +366,44 @@ TEST_IMPL(timer_early_check) { uv_close((uv_handle_t*) &timer_handle, NULL); ASSERT(0 == uv_run(uv_default_loop(), UV_RUN_DEFAULT)); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); + return 0; +} + +static void timer_check_double_call(uv_timer_t* handle) { + timer_check_double_call_called++; +} + +TEST_IMPL(timer_no_double_call_once) { + uv_timer_t timer_handle; + const uint64_t timeout_ms = 10; + + ASSERT_EQ(0, uv_timer_init(uv_default_loop(), &timer_handle)); + ASSERT_EQ(0, uv_timer_start(&timer_handle, + timer_check_double_call, + timeout_ms, + timeout_ms)); + uv_sleep(timeout_ms * 2); + ASSERT_EQ(1, uv_run(uv_default_loop(), UV_RUN_ONCE)); + ASSERT_EQ(1, timer_check_double_call_called); + + MAKE_VALGRIND_HAPPY(uv_default_loop()); + return 0; +} + +TEST_IMPL(timer_no_double_call_nowait) { + uv_timer_t timer_handle; + const uint64_t timeout_ms = 10; + + ASSERT_EQ(0, uv_timer_init(uv_default_loop(), &timer_handle)); + ASSERT_EQ(0, uv_timer_start(&timer_handle, + timer_check_double_call, + timeout_ms, + timeout_ms)); + uv_sleep(timeout_ms * 2); + ASSERT_EQ(1, uv_run(uv_default_loop(), UV_RUN_NOWAIT)); + ASSERT_EQ(1, timer_check_double_call_called); + + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-tty-duplicate-key.c b/deps/uv/test/test-tty-duplicate-key.c index efd79e14786d69..6ba96c81352337 100644 --- a/deps/uv/test/test-tty-duplicate-key.c +++ b/deps/uv/test/test-tty-duplicate-key.c @@ -180,7 +180,7 @@ TEST_IMPL(tty_duplicate_vt100_fn_key) { uv_run(loop, UV_RUN_DEFAULT); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -246,7 +246,7 @@ TEST_IMPL(tty_duplicate_alt_modifier_key) { uv_run(loop, UV_RUN_DEFAULT); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -310,7 +310,7 @@ TEST_IMPL(tty_composing_character) { uv_run(loop, UV_RUN_DEFAULT); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/test-tty-escape-sequence-processing.c b/deps/uv/test/test-tty-escape-sequence-processing.c index 5f04291d24415d..2f7d0364b8b5f0 100644 --- a/deps/uv/test/test-tty-escape-sequence-processing.c +++ b/deps/uv/test/test-tty-escape-sequence-processing.c @@ -420,7 +420,7 @@ TEST_IMPL(tty_cursor_up) { uv_run(loop, UV_RUN_DEFAULT); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -471,7 +471,7 @@ TEST_IMPL(tty_cursor_down) { uv_run(loop, UV_RUN_DEFAULT); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -532,7 +532,7 @@ TEST_IMPL(tty_cursor_forward) { uv_run(loop, UV_RUN_DEFAULT); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -593,7 +593,7 @@ TEST_IMPL(tty_cursor_back) { uv_run(loop, UV_RUN_DEFAULT); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -644,7 +644,7 @@ TEST_IMPL(tty_cursor_next_line) { uv_run(loop, UV_RUN_DEFAULT); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -695,7 +695,7 @@ TEST_IMPL(tty_cursor_previous_line) { uv_run(loop, UV_RUN_DEFAULT); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -741,7 +741,7 @@ TEST_IMPL(tty_cursor_horizontal_move_absolute) { uv_run(loop, UV_RUN_DEFAULT); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -797,7 +797,7 @@ TEST_IMPL(tty_cursor_move_absolute) { uv_run(loop, UV_RUN_DEFAULT); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -831,7 +831,7 @@ TEST_IMPL(tty_hide_show_cursor) { uv_run(loop, UV_RUN_DEFAULT); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -905,7 +905,7 @@ TEST_IMPL(tty_erase) { uv_run(loop, UV_RUN_DEFAULT); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -979,7 +979,7 @@ TEST_IMPL(tty_erase_line) { uv_run(loop, UV_RUN_DEFAULT); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -1037,12 +1037,17 @@ TEST_IMPL(tty_set_cursor_shape) { uv_run(loop, UV_RUN_DEFAULT); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } TEST_IMPL(tty_set_style) { +#if _MSC_VER >= 1920 && _MSC_VER <= 1929 + RETURN_SKIP("Broken on Microsoft Visual Studio 2019, to be investigated. " + "See: https://github.com/libuv/libuv/issues/3304"); +#else + uv_tty_t tty_out; uv_loop_t* loop; COORD cursor_pos; @@ -1070,11 +1075,6 @@ TEST_IMPL(tty_set_style) { WORD attr; int i, length; -#if _MSC_VER >= 1920 && _MSC_VER <= 1929 - RETURN_SKIP("Broken on Microsoft Visual Studio 2019, to be investigated. " - "See: https://github.com/libuv/libuv/issues/3304"); -#endif - loop = uv_default_loop(); initialize_tty(&tty_out); @@ -1121,7 +1121,7 @@ TEST_IMPL(tty_set_style) { ASSERT(compare_screen(&tty_out, &actual, &expect)); } - /* Set foregroud and background color */ + /* Set foreground and background color */ ASSERT(ARRAY_SIZE(fg_attrs) == ARRAY_SIZE(bg_attrs)); length = ARRAY_SIZE(bg_attrs); for (i = 0; i < length; i++) { @@ -1237,8 +1237,9 @@ TEST_IMPL(tty_set_style) { uv_run(loop, UV_RUN_DEFAULT); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; +#endif } @@ -1296,7 +1297,7 @@ TEST_IMPL(tty_save_restore_cursor_position) { uv_run(loop, UV_RUN_DEFAULT); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -1339,12 +1340,16 @@ TEST_IMPL(tty_full_reset) { uv_run(loop, UV_RUN_DEFAULT); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } TEST_IMPL(tty_escape_sequence_processing) { +#if _MSC_VER >= 1920 && _MSC_VER <= 1929 + RETURN_SKIP("Broken on Microsoft Visual Studio 2019, to be investigated. " + "See: https://github.com/libuv/libuv/issues/3304"); +#else uv_tty_t tty_out; uv_loop_t* loop; COORD cursor_pos, cursor_pos_old; @@ -1353,16 +1358,11 @@ TEST_IMPL(tty_escape_sequence_processing) { struct captured_screen actual = {0}, expect = {0}; int dir; -#if _MSC_VER >= 1920 && _MSC_VER <= 1929 - RETURN_SKIP("Broken on Microsoft Visual Studio 2019, to be investigated. " - "See: https://github.com/libuv/libuv/issues/3304"); -#endif - loop = uv_default_loop(); initialize_tty(&tty_out); - /* CSI + finaly byte does not output anything */ + /* CSI + finally byte does not output anything */ cursor_pos.X = 1; cursor_pos.Y = 1; set_cursor_position(&tty_out, cursor_pos); @@ -1375,7 +1375,7 @@ TEST_IMPL(tty_escape_sequence_processing) { capture_screen(&tty_out, &actual); ASSERT(compare_screen(&tty_out, &actual, &expect)); - /* CSI(C1) + finaly byte does not output anything */ + /* CSI(C1) + finally byte does not output anything */ cursor_pos.X = 1; cursor_pos.Y = 1; set_cursor_position(&tty_out, cursor_pos); @@ -1388,7 +1388,7 @@ TEST_IMPL(tty_escape_sequence_processing) { capture_screen(&tty_out, &actual); ASSERT(compare_screen(&tty_out, &actual, &expect)); - /* CSI + intermediate byte + finaly byte does not output anything */ + /* CSI + intermediate byte + finally byte does not output anything */ cursor_pos.X = 1; cursor_pos.Y = 1; set_cursor_position(&tty_out, cursor_pos); @@ -1401,7 +1401,7 @@ TEST_IMPL(tty_escape_sequence_processing) { capture_screen(&tty_out, &actual); ASSERT(compare_screen(&tty_out, &actual, &expect)); - /* CSI + parameter byte + finaly byte does not output anything */ + /* CSI + parameter byte + finally byte does not output anything */ cursor_pos.X = 1; cursor_pos.Y = 1; set_cursor_position(&tty_out, cursor_pos); @@ -1605,7 +1605,7 @@ TEST_IMPL(tty_escape_sequence_processing) { capture_screen(&tty_out, &actual); ASSERT(compare_screen(&tty_out, &actual, &expect)); - /* Finaly byte immedately after CSI [ are also output(#1874 1.) */ + /* Finally byte immedately after CSI [ are also output(#1874 1.) */ cursor_pos.X = expect.si.width / 2; cursor_pos.Y = expect.si.height / 2; set_cursor_position(&tty_out, cursor_pos); @@ -1620,8 +1620,9 @@ TEST_IMPL(tty_escape_sequence_processing) { uv_run(loop, UV_RUN_DEFAULT); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; +#endif } #else diff --git a/deps/uv/test/test-tty.c b/deps/uv/test/test-tty.c index ff7d388d7c00f3..418ec31e4b53b3 100644 --- a/deps/uv/test/test-tty.c +++ b/deps/uv/test/test-tty.c @@ -28,7 +28,7 @@ #else /* Unix */ # include # include -# if (defined(__linux__) || defined(__GLIBC__)) && !defined(__ANDROID__) +# if defined(__linux__) && !defined(__ANDROID__) # include # elif defined(__OpenBSD__) || defined(__NetBSD__) || defined(__APPLE__) # include @@ -94,12 +94,12 @@ TEST_IMPL(tty) { ASSERT(UV_TTY == uv_guess_handle(ttyin_fd)); ASSERT(UV_TTY == uv_guess_handle(ttyout_fd)); - r = uv_tty_init(uv_default_loop(), &tty_in, ttyin_fd, 1); /* Readable. */ + r = uv_tty_init(loop, &tty_in, ttyin_fd, 1); /* Readable. */ ASSERT(r == 0); ASSERT(uv_is_readable((uv_stream_t*) &tty_in)); ASSERT(!uv_is_writable((uv_stream_t*) &tty_in)); - r = uv_tty_init(uv_default_loop(), &tty_out, ttyout_fd, 0); /* Writable. */ + r = uv_tty_init(loop, &tty_out, ttyout_fd, 0); /* Writable. */ ASSERT(r == 0); ASSERT(!uv_is_readable((uv_stream_t*) &tty_out)); ASSERT(uv_is_writable((uv_stream_t*) &tty_out)); @@ -112,16 +112,12 @@ TEST_IMPL(tty) { if (width == 0 && height == 0) { /* Some environments such as containers or Jenkins behave like this * sometimes */ - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return TEST_SKIP; } - /* - * Is it a safe assumption that most people have terminals larger than - * 10x10? - */ - ASSERT(width > 10); - ASSERT(height > 10); + ASSERT_GT(width, 0); + ASSERT_GT(height, 0); /* Turn on raw mode. */ r = uv_tty_set_mode(&tty_in, UV_TTY_MODE_RAW); @@ -145,7 +141,7 @@ TEST_IMPL(tty) { uv_run(loop, UV_RUN_DEFAULT); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -188,7 +184,7 @@ TEST_IMPL(tty_raw) { ASSERT(ttyin_fd >= 0); ASSERT(UV_TTY == uv_guess_handle(ttyin_fd)); - r = uv_tty_init(uv_default_loop(), &tty_in, ttyin_fd, 1); /* Readable. */ + r = uv_tty_init(loop, &tty_in, ttyin_fd, 1); /* Readable. */ ASSERT(r == 0); ASSERT(uv_is_readable((uv_stream_t*) &tty_in)); ASSERT(!uv_is_writable((uv_stream_t*) &tty_in)); @@ -215,7 +211,7 @@ TEST_IMPL(tty_raw) { uv_run(loop, UV_RUN_DEFAULT); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -246,7 +242,7 @@ TEST_IMPL(tty_empty_write) { ASSERT(UV_TTY == uv_guess_handle(ttyout_fd)); - r = uv_tty_init(uv_default_loop(), &tty_out, ttyout_fd, 0); /* Writable. */ + r = uv_tty_init(loop, &tty_out, ttyout_fd, 0); /* Writable. */ ASSERT(r == 0); ASSERT(!uv_is_readable((uv_stream_t*) &tty_out)); ASSERT(uv_is_writable((uv_stream_t*) &tty_out)); @@ -261,7 +257,7 @@ TEST_IMPL(tty_empty_write) { uv_run(loop, UV_RUN_DEFAULT); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -292,7 +288,7 @@ TEST_IMPL(tty_large_write) { ASSERT(UV_TTY == uv_guess_handle(ttyout_fd)); - r = uv_tty_init(uv_default_loop(), &tty_out, ttyout_fd, 0); /* Writable. */ + r = uv_tty_init(loop, &tty_out, ttyout_fd, 0); /* Writable. */ ASSERT(r == 0); memset(dummy, '.', sizeof(dummy) - 1); @@ -307,7 +303,7 @@ TEST_IMPL(tty_large_write) { uv_run(loop, UV_RUN_DEFAULT); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -340,7 +336,7 @@ TEST_IMPL(tty_raw_cancel) { r = uv_read_stop((uv_stream_t*) &tty_in); ASSERT(r == 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } #endif @@ -414,9 +410,8 @@ TEST_IMPL(tty_file) { ASSERT(0 == uv_run(&loop, UV_RUN_DEFAULT)); - ASSERT(0 == uv_loop_close(&loop)); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(&loop); #endif return 0; } @@ -433,7 +428,6 @@ TEST_IMPL(tty_pty) { #if defined(__APPLE__) || \ defined(__DragonFly__) || \ defined(__FreeBSD__) || \ - defined(__FreeBSD_kernel__) || \ (defined(__linux__) && !defined(__ANDROID__)) || \ defined(__NetBSD__) || \ defined(__OpenBSD__) @@ -468,7 +462,7 @@ TEST_IMPL(tty_pty) { ASSERT(0 == uv_run(&loop, UV_RUN_DEFAULT)); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(&loop); #endif return 0; } diff --git a/deps/uv/test/test-udp-alloc-cb-fail.c b/deps/uv/test/test-udp-alloc-cb-fail.c index 6b0980163a5f9e..073dea977821d6 100644 --- a/deps/uv/test/test-udp-alloc-cb-fail.c +++ b/deps/uv/test/test-udp-alloc-cb-fail.c @@ -191,6 +191,6 @@ TEST_IMPL(udp_alloc_cb_fail) { ASSERT(sv_recv_cb_called == 1); ASSERT(close_cb_called == 2); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-udp-bind.c b/deps/uv/test/test-udp-bind.c index a1e080ee70c880..200cdc7c927fa9 100644 --- a/deps/uv/test/test-udp-bind.c +++ b/deps/uv/test/test-udp-bind.c @@ -55,7 +55,7 @@ TEST_IMPL(udp_bind) { r = uv_run(loop, UV_RUN_DEFAULT); ASSERT(r == 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -88,6 +88,6 @@ TEST_IMPL(udp_bind_reuseaddr) { r = uv_run(loop, UV_RUN_DEFAULT); ASSERT(r == 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/test-udp-connect.c b/deps/uv/test/test-udp-connect.c index 0be702efef403e..c1e4064b94e255 100644 --- a/deps/uv/test/test-udp-connect.c +++ b/deps/uv/test/test-udp-connect.c @@ -98,6 +98,9 @@ static void sv_recv_cb(uv_udp_t* handle, TEST_IMPL(udp_connect) { +#if defined(__OpenBSD__) + RETURN_SKIP("Test does not currently work in OpenBSD"); +#endif uv_udp_send_t req; struct sockaddr_in ext_addr; struct sockaddr_in tmp_addr; @@ -188,6 +191,6 @@ TEST_IMPL(udp_connect) { ASSERT(client.send_queue_size == 0); ASSERT(server.send_queue_size == 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-udp-connect6.c b/deps/uv/test/test-udp-connect6.c index d000daf17fbb2c..076d8d77b92df4 100644 --- a/deps/uv/test/test-udp-connect6.c +++ b/deps/uv/test/test-udp-connect6.c @@ -98,6 +98,9 @@ static void sv_recv_cb(uv_udp_t* handle, TEST_IMPL(udp_connect6) { +#if defined(__OpenBSD__) + RETURN_SKIP("Test does not currently work in OpenBSD"); +#endif uv_udp_send_t req; struct sockaddr_in6 ext_addr; struct sockaddr_in6 tmp_addr; @@ -191,6 +194,6 @@ TEST_IMPL(udp_connect6) { ASSERT_EQ(client.send_queue_size, 0); ASSERT_EQ(server.send_queue_size, 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-udp-create-socket-early.c b/deps/uv/test/test-udp-create-socket-early.c index f7e46abc98da54..f51e275ba1e352 100644 --- a/deps/uv/test/test-udp-create-socket-early.c +++ b/deps/uv/test/test-udp-create-socket-early.c @@ -68,7 +68,7 @@ TEST_IMPL(udp_create_early) { uv_close((uv_handle_t*) &client, NULL); uv_run(uv_default_loop(), UV_RUN_DEFAULT); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -113,7 +113,7 @@ TEST_IMPL(udp_create_early_bad_bind) { uv_close((uv_handle_t*) &client, NULL); uv_run(uv_default_loop(), UV_RUN_DEFAULT); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -130,6 +130,6 @@ TEST_IMPL(udp_create_early_bad_domain) { uv_run(uv_default_loop(), UV_RUN_DEFAULT); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-udp-dgram-too-big.c b/deps/uv/test/test-udp-dgram-too-big.c index bd44c425287cca..9db8b47be18da8 100644 --- a/deps/uv/test/test-udp-dgram-too-big.c +++ b/deps/uv/test/test-udp-dgram-too-big.c @@ -86,6 +86,6 @@ TEST_IMPL(udp_dgram_too_big) { ASSERT(send_cb_called == 1); ASSERT(close_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-udp-ipv6.c b/deps/uv/test/test-udp-ipv6.c index 7099953097cdd8..ae55cd01b0b2a7 100644 --- a/deps/uv/test/test-udp-ipv6.c +++ b/deps/uv/test/test-udp-ipv6.c @@ -26,7 +26,7 @@ #include #include -#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__NetBSD__) +#if defined(__FreeBSD__) || defined(__NetBSD__) #include #endif @@ -49,7 +49,7 @@ static int recv_cb_called; static int close_cb_called; static uint16_t client_port; -#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__NetBSD__) +#if defined(__FreeBSD__) || defined(__NetBSD__) static int can_ipv6_ipv4_dual(void) { int v6only; size_t size = sizeof(int); @@ -207,7 +207,7 @@ static void do_test(uv_udp_recv_cb recv_cb, int bind_flags) { ASSERT(close_cb_called == 3); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); } @@ -220,7 +220,7 @@ TEST_IMPL(udp_dual_stack) { if (!can_ipv6()) RETURN_SKIP("IPv6 not supported"); -#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__NetBSD__) +#if defined(__FreeBSD__) || defined(__NetBSD__) if (!can_ipv6_ipv4_dual()) RETURN_SKIP("IPv6-IPv4 dual stack not supported"); #elif defined(__OpenBSD__) diff --git a/deps/uv/test/test-udp-mmsg.c b/deps/uv/test/test-udp-mmsg.c index f722608a185bc9..c37343f8c9f4c5 100644 --- a/deps/uv/test/test-udp-mmsg.c +++ b/deps/uv/test/test-udp-mmsg.c @@ -144,6 +144,6 @@ TEST_IMPL(udp_mmsg) { else ASSERT_EQ(alloc_cb_called, recv_cb_called); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-udp-multicast-interface.c b/deps/uv/test/test-udp-multicast-interface.c index bd9a61c98aa6e2..447d3487f5cefc 100644 --- a/deps/uv/test/test-udp-multicast-interface.c +++ b/deps/uv/test/test-udp-multicast-interface.c @@ -99,6 +99,6 @@ TEST_IMPL(udp_multicast_interface) { ASSERT(client.send_queue_size == 0); ASSERT(server.send_queue_size == 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-udp-multicast-interface6.c b/deps/uv/test/test-udp-multicast-interface6.c index be11514c805900..1d40aefa8cb236 100644 --- a/deps/uv/test/test-udp-multicast-interface6.c +++ b/deps/uv/test/test-udp-multicast-interface6.c @@ -77,7 +77,7 @@ TEST_IMPL(udp_multicast_interface6) { r = uv_udp_bind(&server, (const struct sockaddr*)&baddr, 0); ASSERT(r == 0); -#if defined(__APPLE__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) +#if defined(__APPLE__) || defined(__FreeBSD__) r = uv_udp_set_multicast_interface(&server, "::1%lo0"); #else r = uv_udp_set_multicast_interface(&server, NULL); @@ -103,6 +103,6 @@ TEST_IMPL(udp_multicast_interface6) { ASSERT(sv_send_cb_called == 1); ASSERT(close_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-udp-multicast-join.c b/deps/uv/test/test-udp-multicast-join.c index 9e603a8455f736..dddcea4662f1d9 100644 --- a/deps/uv/test/test-udp-multicast-join.c +++ b/deps/uv/test/test-udp-multicast-join.c @@ -138,6 +138,9 @@ static void cl_recv_cb(uv_udp_t* handle, TEST_IMPL(udp_multicast_join) { +#if defined(__OpenBSD__) + RETURN_SKIP("Test does not currently work in OpenBSD"); +#endif int r; struct sockaddr_in addr; @@ -176,6 +179,6 @@ TEST_IMPL(udp_multicast_join) { ASSERT(sv_send_cb_called == 2); ASSERT(close_cb_called == 2); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-udp-multicast-join6.c b/deps/uv/test/test-udp-multicast-join6.c index e67c5ee59bb1fb..d5262b6a9a07bf 100644 --- a/deps/uv/test/test-udp-multicast-join6.c +++ b/deps/uv/test/test-udp-multicast-join6.c @@ -33,7 +33,6 @@ #if defined(__APPLE__) || \ defined(_AIX) || \ defined(__MVS__) || \ - defined(__FreeBSD_kernel__) || \ defined(__NetBSD__) || \ defined(__OpenBSD__) #define MULTICAST_ADDR "ff02::1%lo0" @@ -187,7 +186,7 @@ TEST_IMPL(udp_multicast_join6) { r = uv_udp_set_membership(&server, MULTICAST_ADDR, INTERFACE_ADDR, UV_JOIN_GROUP); if (r == UV_ENODEV) { - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); RETURN_SKIP("No ipv6 multicast route"); } @@ -214,6 +213,6 @@ TEST_IMPL(udp_multicast_join6) { ASSERT(sv_send_cb_called == 2); ASSERT(close_cb_called == 2); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-udp-multicast-ttl.c b/deps/uv/test/test-udp-multicast-ttl.c index fbddd90914ca86..9aa5bb9147ffbe 100644 --- a/deps/uv/test/test-udp-multicast-ttl.c +++ b/deps/uv/test/test-udp-multicast-ttl.c @@ -89,6 +89,6 @@ TEST_IMPL(udp_multicast_ttl) { ASSERT(sv_send_cb_called == 1); ASSERT(close_cb_called == 1); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-udp-open.c b/deps/uv/test/test-udp-open.c index f5136b6d4f281f..0e09f56a49fa59 100644 --- a/deps/uv/test/test-udp-open.c +++ b/deps/uv/test/test-udp-open.c @@ -188,7 +188,7 @@ TEST_IMPL(udp_open) { ASSERT(client.send_queue_size == 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -215,7 +215,7 @@ TEST_IMPL(udp_open_twice) { uv_close((uv_handle_t*) &client, NULL); uv_run(uv_default_loop(), UV_RUN_DEFAULT); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -245,7 +245,7 @@ TEST_IMPL(udp_open_bound) { uv_close((uv_handle_t*) &client, NULL); uv_run(uv_default_loop(), UV_RUN_DEFAULT); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -295,7 +295,7 @@ TEST_IMPL(udp_open_connect) { ASSERT(client.send_queue_size == 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } @@ -344,7 +344,7 @@ TEST_IMPL(udp_send_unix) { close(fd); unlink(TEST_PIPENAME); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } #endif diff --git a/deps/uv/test/test-udp-options.c b/deps/uv/test/test-udp-options.c index 3ea51baf40b736..11e58b996a18ac 100644 --- a/deps/uv/test/test-udp-options.c +++ b/deps/uv/test/test-udp-options.c @@ -87,7 +87,7 @@ static int udp_options_test(const struct sockaddr* addr) { r = uv_run(loop, UV_RUN_DEFAULT); ASSERT(r == 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } @@ -155,6 +155,6 @@ TEST_IMPL(udp_no_autobind) { ASSERT(0 == uv_run(loop, UV_RUN_DEFAULT)); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/test-udp-recv-in-a-row.c b/deps/uv/test/test-udp-recv-in-a-row.c new file mode 100644 index 00000000000000..98aca28e193693 --- /dev/null +++ b/deps/uv/test/test-udp-recv-in-a-row.c @@ -0,0 +1,121 @@ +/* Copyright The libuv project and contributors. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "uv.h" +#include "task.h" + +#include +#include +#include + +static uv_udp_t server; +static uv_udp_t client; +static uv_check_t check_handle; +static uv_buf_t buf; +static struct sockaddr_in addr; +static char send_data[10]; +static int check_cb_called; + +#define N 5 +static int recv_cnt; + +static void alloc_cb(uv_handle_t* handle, + size_t suggested_size, + uv_buf_t* buf) { + static char slab[sizeof(send_data)]; + buf->base = slab; + buf->len = sizeof(slab); +} + +static void sv_recv_cb(uv_udp_t* handle, + ssize_t nread, + const uv_buf_t* rcvbuf, + const struct sockaddr* addr, + unsigned flags) { + if (++ recv_cnt < N) { + ASSERT_EQ(sizeof(send_data), nread); + } else { + ASSERT_EQ(0, nread); + } +} + +static void check_cb(uv_check_t* handle) { + ASSERT_PTR_EQ(&check_handle, handle); + + /** + * sv_recv_cb() is called with nread set to zero to indicate + * there is no more udp packet in the kernel, so the actual + * recv_cnt is one larger than N. + */ + ASSERT_EQ(N+1, recv_cnt); + check_cb_called = 1; + + /* we are done */ + ASSERT_EQ(0, uv_check_stop(handle)); + uv_close((uv_handle_t*) &client, NULL); + uv_close((uv_handle_t*) &check_handle, NULL); + uv_close((uv_handle_t*) &server, NULL); +} + + +TEST_IMPL(udp_recv_in_a_row) { + int i, r; + + ASSERT_EQ(0, uv_check_init(uv_default_loop(), &check_handle)); + ASSERT_EQ(0, uv_check_start(&check_handle, check_cb)); + + ASSERT_EQ(0, uv_ip4_addr("127.0.0.1", TEST_PORT, &addr)); + + ASSERT_EQ(0, uv_udp_init(uv_default_loop(), &server)); + ASSERT_EQ(0, uv_udp_bind(&server, (const struct sockaddr*) &addr, 0)); + ASSERT_EQ(0, uv_udp_recv_start(&server, alloc_cb, sv_recv_cb)); + + ASSERT_EQ(0, uv_udp_init(uv_default_loop(), &client)); + + /* send N-1 udp packets */ + buf = uv_buf_init(send_data, sizeof(send_data)); + for (i = 0; i < N - 1; i ++) { + r = uv_udp_try_send(&client, + &buf, + 1, + (const struct sockaddr*) &addr); + ASSERT_EQ(sizeof(send_data), r); + } + + /* send an empty udp packet */ + buf = uv_buf_init(NULL, 0); + r = uv_udp_try_send(&client, + &buf, + 1, + (const struct sockaddr*) &addr); + ASSERT_EQ(0, r); + + /* check_cb() asserts that the N packets can be received + * before it gets called. + */ + + ASSERT_EQ(0, uv_run(uv_default_loop(), UV_RUN_DEFAULT)); + + ASSERT(check_cb_called); + + MAKE_VALGRIND_HAPPY(uv_default_loop()); + return 0; +} diff --git a/deps/uv/test/test-udp-send-and-recv.c b/deps/uv/test/test-udp-send-and-recv.c index d60209059b9887..ab60e84a138179 100644 --- a/deps/uv/test/test-udp-send-and-recv.c +++ b/deps/uv/test/test-udp-send-and-recv.c @@ -207,6 +207,6 @@ TEST_IMPL(udp_send_and_recv) { ASSERT(client.send_queue_size == 0); ASSERT(server.send_queue_size == 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-udp-send-hang-loop.c b/deps/uv/test/test-udp-send-hang-loop.c index 072070b60e5e65..b1e02263d3ca5a 100644 --- a/deps/uv/test/test-udp-send-hang-loop.c +++ b/deps/uv/test/test-udp-send-hang-loop.c @@ -94,6 +94,6 @@ TEST_IMPL(udp_send_hang_loop) { ASSERT(loop_hang_called > 1000); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-udp-send-immediate.c b/deps/uv/test/test-udp-send-immediate.c index a1c95d34384ce2..ee70a6165a21da 100644 --- a/deps/uv/test/test-udp-send-immediate.c +++ b/deps/uv/test/test-udp-send-immediate.c @@ -143,6 +143,6 @@ TEST_IMPL(udp_send_immediate) { ASSERT(sv_recv_cb_called == 2); ASSERT(close_cb_called == 2); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-udp-send-unreachable.c b/deps/uv/test/test-udp-send-unreachable.c index c67a23b38529fe..7075deb189666b 100644 --- a/deps/uv/test/test-udp-send-unreachable.c +++ b/deps/uv/test/test-udp-send-unreachable.c @@ -196,6 +196,6 @@ TEST_IMPL(udp_send_unreachable) { ASSERT_EQ(timer_cb_called, 1); ASSERT_EQ(close_cb_called, (long)(can_recverr ? 3 : 2)); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-udp-sendmmsg-error.c b/deps/uv/test/test-udp-sendmmsg-error.c index c8a411b2dee423..7b1741a3e04f2f 100644 --- a/deps/uv/test/test-udp-sendmmsg-error.c +++ b/deps/uv/test/test-udp-sendmmsg-error.c @@ -70,6 +70,6 @@ TEST_IMPL(udp_sendmmsg_error) { ASSERT_EQ(0, client.send_queue_size); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-udp-try-send.c b/deps/uv/test/test-udp-try-send.c index 85caaaca41d5a5..b81506cc39f3ce 100644 --- a/deps/uv/test/test-udp-try-send.c +++ b/deps/uv/test/test-udp-try-send.c @@ -116,6 +116,6 @@ TEST_IMPL(udp_try_send) { ASSERT(client.send_queue_size == 0); ASSERT(server.send_queue_size == 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(uv_default_loop()); return 0; } diff --git a/deps/uv/test/test-walk-handles.c b/deps/uv/test/test-walk-handles.c index 4b0ca6ebc55849..50f0ce84f1e846 100644 --- a/deps/uv/test/test-walk-handles.c +++ b/deps/uv/test/test-walk-handles.c @@ -72,6 +72,6 @@ TEST_IMPL(walk_handles) { uv_walk(loop, walk_cb, magic_cookie); ASSERT(seen_timer_handle == 0); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/test/test-watcher-cross-stop.c b/deps/uv/test/test-watcher-cross-stop.c index b26deb8d88c50f..bbc0c30574a7f7 100644 --- a/deps/uv/test/test-watcher-cross-stop.c +++ b/deps/uv/test/test-watcher-cross-stop.c @@ -107,6 +107,6 @@ TEST_IMPL(watcher_cross_stop) { ASSERT(ARRAY_SIZE(sockets) == send_cb_called); ASSERT(ARRAY_SIZE(sockets) == close_cb_called); - MAKE_VALGRIND_HAPPY(); + MAKE_VALGRIND_HAPPY(loop); return 0; } diff --git a/deps/uv/tsansupp.txt b/deps/uv/tsansupp.txt new file mode 100644 index 00000000000000..bde4060803e2da --- /dev/null +++ b/deps/uv/tsansupp.txt @@ -0,0 +1,2 @@ +# glibc reads `count` field unsynchronized, not a libuv bug +race:pthread_barrier_destroy diff --git a/deps/uv/uv.gyp b/deps/uv/uv.gyp index ad13f89dfa5bde..ba6d715867fb90 100644 --- a/deps/uv/uv.gyp +++ b/deps/uv/uv.gyp @@ -129,25 +129,18 @@ 'src/unix/random-getentropy.c', ], 'uv_sources_linux': [ - 'src/unix/epoll.c', - 'src/unix/linux-core.c', - 'src/unix/linux-inotify.c', - 'src/unix/linux-syscalls.c', - 'src/unix/linux-syscalls.h', + 'src/unix/linux.c', 'src/unix/procfs-exepath.c', 'src/unix/random-getrandom.c', 'src/unix/random-sysctl-linux.c', ], 'uv_sources_android': [ - 'src/unix/linux-core.c', - 'src/unix/linux-inotify.c', - 'src/unix/linux-syscalls.c', + 'src/unix/linux.c', 'src/unix/procfs-exepath.c', 'src/unix/pthread-fixes.c', 'src/unix/random-getentropy.c', 'src/unix/random-getrandom.c', 'src/unix/random-sysctl-linux.c', - 'src/unix/epoll.c', ], 'uv_sources_solaris': [ 'src/unix/no-proctitle.c', @@ -202,7 +195,7 @@ 'conditions': [ [ 'OS=="win"', { 'defines': [ - '_WIN32_WINNT=0x0600', + '_WIN32_WINNT=0x0602', '_GNU_SOURCE', ], 'sources': [ @@ -211,11 +204,14 @@ 'link_settings': { 'libraries': [ '-ladvapi32', + '-ldbghelp', + '-lole32', '-liphlpapi', '-lpsapi', '-lshell32', '-luser32', '-luserenv', + '-luuid', '-lws2_32' ], }, diff --git a/test/known_issues/test-vm-timeout-escape-nexttick.js b/test/known_issues/test-vm-timeout-escape-nexttick.js index 6555370820f1f7..31f4110430c042 100644 --- a/test/known_issues/test-vm-timeout-escape-nexttick.js +++ b/test/known_issues/test-vm-timeout-escape-nexttick.js @@ -13,7 +13,7 @@ const NS_PER_MS = 1000000n; const hrtime = process.hrtime.bigint; const nextTick = process.nextTick; -const waitDuration = common.platformTimeout(100n); +const waitDuration = common.platformTimeout(200n); function loop() { const start = hrtime(); @@ -38,7 +38,7 @@ for (let i = 0; i < 4; i++) { nextTick, loop, }, - { timeout: common.platformTimeout(10) }, + { timeout: common.platformTimeout(100) }, ); }, { code: 'ERR_SCRIPT_EXECUTION_TIMEOUT', diff --git a/tools/dep_updaters/update-libuv.sh b/tools/dep_updaters/update-libuv.sh index b679d935a91431..1a0b2c8efc7c89 100755 --- a/tools/dep_updaters/update-libuv.sh +++ b/tools/dep_updaters/update-libuv.sh @@ -1,5 +1,6 @@ #!/bin/sh set -e +set -x # Shell script to update libuv in the source tree to a specific version BASE_DIR=$(cd "$(dirname "$0")/../.." && pwd) diff --git a/tools/license-builder.sh b/tools/license-builder.sh index aaedfeed5c7b1b..10b894262946ea 100755 --- a/tools/license-builder.sh +++ b/tools/license-builder.sh @@ -59,7 +59,7 @@ else exit 1 fi -licenseText="$(cat "${rootdir}/deps/uv/LICENSE")" +licenseText="$(cat "${rootdir}/deps/uv/LICENSE" "${rootdir}/deps/uv/LICENSE-extra")" addlicense "libuv" "deps/uv" "$licenseText" licenseText="$(cat deps/llhttp/LICENSE-MIT)" addlicense "llhttp" "deps/llhttp" "$licenseText"