From 925fdd8f53863ead3cb53c150893f2907a6af5fd Mon Sep 17 00:00:00 2001 From: Kirill Artemev Date: Tue, 19 Nov 2024 01:15:47 +0500 Subject: [PATCH] proto-ntsync Signed-off-by: Kirill Artemev --- configure.ac | 9 +- dlls/kernel32/tests/sync.c | 383 +------- dlls/ntdll/Makefile.in | 2 - dlls/ntdll/loader.c | 6 +- dlls/ntdll/unix/esync.c | 1354 --------------------------- dlls/ntdll/unix/esync.h | 61 -- dlls/ntdll/unix/file.c | 3 +- dlls/ntdll/unix/fsync.c | 1478 ------------------------------ dlls/ntdll/unix/fsync.h | 54 -- dlls/ntdll/unix/loader.c | 25 - dlls/ntdll/unix/process.c | 2 +- dlls/ntdll/unix/server.c | 31 +- dlls/ntdll/unix/sync.c | 1103 +++++++++++++++++++--- dlls/ntdll/unix/thread.c | 2 +- dlls/ntdll/unix/unix_private.h | 12 +- dlls/ntdll/unix/virtual.c | 2 - dlls/rpcrt4/rpc_server.c | 8 +- dlls/webservices/tests/channel.c | 3 + include/wine/server_protocol.h | 205 +---- server/Makefile.in | 3 +- server/async.c | 6 +- server/atom.c | 3 +- server/change.c | 3 +- server/clipboard.c | 3 +- server/completion.c | 49 +- server/console.c | 140 +-- server/debugger.c | 27 +- server/device.c | 72 +- server/directory.c | 6 +- server/esync.c | 591 ------------ server/esync.h | 35 - server/event.c | 117 +-- server/fast_sync.c | 434 +++++++++ server/fd.c | 72 +- server/file.c | 3 +- server/file.h | 3 +- server/fsync.c | 630 ------------- server/fsync.h | 36 - server/handle.c | 3 +- server/hook.c | 3 +- server/mailslot.c | 12 +- server/main.c | 11 - server/mapping.c | 9 +- server/mutex.c | 43 +- server/named_pipe.c | 15 +- server/object.c | 6 + server/object.h | 18 +- server/process.c | 72 +- server/process.h | 3 +- server/protocol.def | 124 +-- server/queue.c | 167 ++-- server/registry.c | 3 +- server/request.c | 3 +- server/request.h | 106 +-- server/semaphore.c | 26 +- server/serial.c | 3 +- server/signal.c | 3 +- server/sock.c | 9 +- server/symlink.c | 3 +- server/thread.c | 105 +-- server/thread.h | 6 +- server/timer.c | 52 +- server/token.c | 3 +- server/trace.c | 141 +-- server/window.c | 3 +- server/winstation.c | 6 +- 66 files changed, 2092 insertions(+), 5842 deletions(-) delete mode 100644 dlls/ntdll/unix/esync.c delete mode 100644 dlls/ntdll/unix/esync.h delete mode 100644 dlls/ntdll/unix/fsync.c delete mode 100644 dlls/ntdll/unix/fsync.h delete mode 100644 server/esync.c delete mode 100644 server/esync.h create mode 100644 server/fast_sync.c delete mode 100644 server/fsync.c delete mode 100644 server/fsync.h diff --git a/configure.ac b/configure.ac index 977a70abccb..69c16422f30 100644 --- a/configure.ac +++ b/configure.ac @@ -431,6 +431,7 @@ AC_CHECK_HEADERS(\ linux/input.h \ linux/ioctl.h \ linux/major.h \ + linux/ntsync.h \ linux/param.h \ linux/seccomp.h \ linux/serial.h \ @@ -461,7 +462,6 @@ AC_CHECK_HEADERS(\ sys/cdio.h \ sys/epoll.h \ sys/event.h \ - sys/eventfd.h \ sys/extattr.h \ sys/filio.h \ sys/ipc.h \ @@ -2135,7 +2135,6 @@ AC_CHECK_FUNCS(\ port_create \ posix_fadvise \ posix_fallocate \ - ppoll \ prctl \ sched_yield \ setproctitle \ @@ -2158,12 +2157,6 @@ case $host_os in ;; esac -ac_save_LIBS=$LIBS -AC_SEARCH_LIBS(shm_open, rt, - [AC_DEFINE(HAVE_SHM_OPEN, 1, [Define to 1 if you have the `shm_open' function.]) - test "$ac_res" = "none required" || AC_SUBST(RT_LIBS,"$ac_res")]) -LIBS=$ac_save_LIBS - AC_CACHE_CHECK([for sched_setaffinity],wine_cv_have_sched_setaffinity, AC_LINK_IFELSE([AC_LANG_PROGRAM( [[#include ]], [[sched_setaffinity(0, 0, 0);]])],[wine_cv_have_sched_setaffinity=yes],[wine_cv_have_sched_setaffinity=no])) diff --git a/dlls/kernel32/tests/sync.c b/dlls/kernel32/tests/sync.c index 8c609b603dd..d74105b84b8 100644 --- a/dlls/kernel32/tests/sync.c +++ b/dlls/kernel32/tests/sync.c @@ -57,7 +57,6 @@ static BOOLEAN (WINAPI *pTryAcquireSRWLockShared)(PSRWLOCK); static NTSTATUS (WINAPI *pNtAllocateVirtualMemory)(HANDLE, PVOID *, ULONG_PTR, SIZE_T *, ULONG, ULONG); static NTSTATUS (WINAPI *pNtFreeVirtualMemory)(HANDLE, PVOID *, SIZE_T *, ULONG); -static NTSTATUS (WINAPI *pNtQuerySystemTime)(LARGE_INTEGER *); static NTSTATUS (WINAPI *pNtWaitForSingleObject)(HANDLE, BOOLEAN, const LARGE_INTEGER *); static NTSTATUS (WINAPI *pNtWaitForMultipleObjects)(ULONG,const HANDLE*,BOOLEAN,BOOLEAN,const LARGE_INTEGER*); static PSLIST_ENTRY (__fastcall *pRtlInterlockedPushListSList)(PSLIST_HEADER list, PSLIST_ENTRY first, @@ -228,23 +227,8 @@ static void test_temporary_objects(void) ok(GetLastError() == ERROR_FILE_NOT_FOUND, "wrong error %lu\n", GetLastError()); } -static HANDLE mutex, mutex2, mutices[2]; - -static DWORD WINAPI mutex_thread( void *param ) -{ - DWORD expect = (DWORD)(DWORD_PTR)param; - DWORD ret; - - ret = WaitForSingleObject( mutex, 0 ); - ok(ret == expect, "expected %lu, got %lu\n", expect, ret); - - if (!ret) ReleaseMutex( mutex ); - return 0; -} - static void test_mutex(void) { - HANDLE thread; DWORD wait_ret; BOOL ret; HANDLE hCreated; @@ -284,8 +268,7 @@ static void test_mutex(void) SetLastError(0xdeadbeef); hOpened = OpenMutexA(GENERIC_READ | GENERIC_WRITE, FALSE, "WineTestMutex"); ok(hOpened != NULL, "OpenMutex failed with error %ld\n", GetLastError()); - wait_ret = WaitForSingleObject(hOpened, 0); -todo_wine_if(getenv("WINEESYNC")) /* XFAIL: validation is not implemented */ + wait_ret = WaitForSingleObject(hOpened, INFINITE); ok(wait_ret == WAIT_FAILED, "WaitForSingleObject succeeded\n"); CloseHandle(hOpened); @@ -316,7 +299,6 @@ todo_wine_if(getenv("WINEESYNC")) /* XFAIL: validation is not implemented */ SetLastError(0xdeadbeef); ret = ReleaseMutex(hCreated); -todo_wine_if(getenv("WINEESYNC")) /* XFAIL: due to the above */ ok(!ret && (GetLastError() == ERROR_NOT_OWNER), "ReleaseMutex should have failed with ERROR_NOT_OWNER instead of %ld\n", GetLastError()); @@ -355,85 +337,6 @@ todo_wine_if(getenv("WINEESYNC")) /* XFAIL: due to the above */ CloseHandle(hOpened); CloseHandle(hCreated); - - mutex = CreateMutexA( NULL, FALSE, NULL ); - ok(!!mutex, "got error %lu\n", GetLastError()); - - ret = ReleaseMutex( mutex ); - ok(!ret, "got %d\n", ret); - ok(GetLastError() == ERROR_NOT_OWNER, "got error %lu\n", GetLastError()); - - for (i = 0; i < 100; i++) - { - ret = WaitForSingleObject( mutex, 0 ); - ok(ret == 0, "got %u\n", ret); - } - - for (i = 0; i < 100; i++) - { - ret = ReleaseMutex( mutex ); - ok(ret, "got error %lu\n", GetLastError()); - } - - ret = ReleaseMutex( mutex ); - ok(!ret, "got %d\n", ret); - ok(GetLastError() == ERROR_NOT_OWNER, "got error %lu\n", GetLastError()); - - thread = CreateThread( NULL, 0, mutex_thread, (void *)0, 0, NULL ); - ret = WaitForSingleObject( thread, 2000 ); - ok(ret == 0, "wait failed: %u\n", ret); - - WaitForSingleObject( mutex, 0 ); - - thread = CreateThread( NULL, 0, mutex_thread, (void *)WAIT_TIMEOUT, 0, NULL ); - ret = WaitForSingleObject( thread, 2000 ); - ok(ret == 0, "wait failed: %u\n", ret); - - ret = ReleaseMutex( mutex ); - ok(ret, "got error %lu\n", GetLastError()); - - thread = CreateThread( NULL, 0, mutex_thread, (void *)0, 0, NULL ); - ret = WaitForSingleObject( thread, 2000 ); - ok(ret == 0, "wait failed: %u\n", ret); - - mutex2 = CreateMutexA( NULL, TRUE, NULL ); - ok(!!mutex2, "got error %lu\n", GetLastError()); - - ret = ReleaseMutex( mutex2 ); - ok(ret, "got error %lu\n", GetLastError()); - - ret = ReleaseMutex( mutex2 ); - ok(!ret, "got %d\n", ret); - ok(GetLastError() == ERROR_NOT_OWNER, "got error %lu\n", GetLastError()); - - mutices[0] = mutex; - mutices[1] = mutex2; - - ret = WaitForMultipleObjects( 2, mutices, FALSE, 0 ); - ok(ret == 0, "got %u\n", ret); - - ret = ReleaseMutex( mutex ); - ok(ret, "got error %lu\n", GetLastError()); - - ret = ReleaseMutex( mutex2 ); - ok(!ret, "got %d\n", ret); - ok(GetLastError() == ERROR_NOT_OWNER, "got error %lu\n", GetLastError()); - - ret = WaitForMultipleObjects( 2, mutices, TRUE, 0 ); - ok(ret == 0, "got %u\n", ret); - - ret = ReleaseMutex( mutex ); - ok(ret, "got error %lu\n", GetLastError()); - - ret = ReleaseMutex( mutex2 ); - ok(ret, "got error %lu\n", GetLastError()); - - ret = CloseHandle( mutex ); - ok(ret, "got error %lu\n", GetLastError()); - - ret = CloseHandle( mutex2 ); - ok(ret, "got error %lu\n", GetLastError()); - } static void test_slist(void) @@ -609,13 +512,12 @@ static void test_slist(void) static void test_event(void) { - HANDLE handle, handle2, handles[2]; + HANDLE handle, handle2; SECURITY_ATTRIBUTES sa; SECURITY_DESCRIPTOR sd; ACL acl; DWORD ret; BOOL val; - int i; /* no sd */ handle = CreateEventA(NULL, FALSE, FALSE, __FILE__ ": Test Event"); @@ -719,130 +621,11 @@ static void test_event(void) ok( ret, "QueryMemoryResourceNotification failed err %lu\n", GetLastError() ); ok( val == FALSE || val == TRUE, "wrong value %u\n", val ); CloseHandle( handle ); - - handle = CreateEventA( NULL, TRUE, FALSE, NULL ); - ok(!!handle, "got error %lu\n", GetLastError()); - - ret = WaitForSingleObject( handle, 0 ); - ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); - - ret = SetEvent( handle ); - ok(ret, "got error %lu\n", GetLastError()); - - ret = SetEvent( handle ); - ok(ret, "got error %lu\n", GetLastError()); - - for (i = 0; i < 100; i++) - { - ret = WaitForSingleObject( handle, 0 ); - ok(ret == 0, "got %lu\n", ret); - } - - ret = ResetEvent( handle ); - ok(ret, "got error %lu\n", GetLastError()); - - ret = ResetEvent( handle ); - ok(ret, "got error %lu\n", GetLastError()); - - ret = WaitForSingleObject( handle, 0 ); - ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); - - handle2 = CreateEventA( NULL, FALSE, TRUE, NULL ); - ok(!!handle2, "got error %lu\n", GetLastError()); - - ret = WaitForSingleObject( handle2, 0 ); - ok(ret == 0, "got %lu\n", ret); - - ret = WaitForSingleObject( handle2, 0 ); - ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); - - ret = SetEvent( handle2 ); - ok(ret, "got error %lu\n", GetLastError()); - - ret = SetEvent( handle2 ); - ok(ret, "got error %lu\n", GetLastError()); - - ret = ResetEvent( handle2 ); - ok(ret, "got error %lu\n", GetLastError()); - - ret = ResetEvent( handle2 ); - ok(ret, "got error %lu\n", GetLastError()); - - ret = WaitForSingleObject( handle2, 0 ); - ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); - - handles[0] = handle; - handles[1] = handle2; - - ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); - ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); - - SetEvent( handle ); - SetEvent( handle2 ); - - ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); - ok(ret == 0, "got %lu\n", ret); - - ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); - ok(ret == 0, "got %lu\n", ret); - - ret = WaitForSingleObject( handle2, 0 ); - ok(ret == 0, "got %lu\n", ret); - - ResetEvent( handle ); - SetEvent( handle2 ); - - ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); - ok(ret == 1, "got %lu\n", ret); - - ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); - ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); - - SetEvent( handle ); - SetEvent( handle2 ); - - ret = WaitForMultipleObjects( 2, handles, TRUE, 0 ); - ok(ret == 0, "got %lu\n", ret); - - ret = WaitForMultipleObjects( 2, handles, TRUE, 0 ); - ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); - - SetEvent( handle2 ); - ResetEvent( handle ); - - ret = WaitForMultipleObjects( 2, handles, TRUE, 0 ); - ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); - - ret = WaitForSingleObject( handle2, 0 ); - ok(ret == 0, "got %lu\n", ret); - - handles[0] = handle2; - handles[1] = handle; - SetEvent( handle ); - SetEvent( handle2 ); - - ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); - ok(ret == 0, "got %lu\n", ret); - - ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); - ok(ret == 1, "got %lu\n", ret); - - ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); - ok(ret == 1, "got %lu\n", ret); - - ret = CloseHandle( handle ); - ok(ret, "got error %lu\n", GetLastError()); - - ret = CloseHandle( handle2 ); - ok(ret, "got error %lu\n", GetLastError()); } static void test_semaphore(void) { - HANDLE handle, handle2, handles[2]; - DWORD ret; - LONG prev; - int i; + HANDLE handle, handle2; /* test case sensitivity */ @@ -884,99 +667,6 @@ static void test_semaphore(void) ok( GetLastError() == ERROR_INVALID_PARAMETER, "wrong error %lu\n", GetLastError()); CloseHandle( handle ); - - handle = CreateSemaphoreA( NULL, 0, 5, NULL ); - ok(!!handle, "CreateSemaphore failed: %lu\n", GetLastError()); - - ret = WaitForSingleObject( handle, 0 ); - ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); - - ret = ReleaseSemaphore( handle, 1, &prev ); - ok(ret, "got error %lu\n", GetLastError()); - ok(prev == 0, "got prev %ld\n", prev); - - ret = ReleaseSemaphore( handle, 1, &prev ); - ok(ret, "got error %lu\n", GetLastError()); - ok(prev == 1, "got prev %ld\n", prev); - - ret = ReleaseSemaphore( handle, 5, &prev ); - ok(!ret, "got %ld\n", ret); - ok(GetLastError() == ERROR_TOO_MANY_POSTS, "got error %lu\n", GetLastError()); - ok(prev == 1, "got prev %ld\n", prev); - - ret = ReleaseSemaphore( handle, 2, &prev ); - ok(ret, "got error %lu\n", GetLastError()); - ok(prev == 2, "got prev %ld\n", prev); - - ret = ReleaseSemaphore( handle, 1, &prev ); - ok(ret, "got error %lu\n", GetLastError()); - ok(prev == 4, "got prev %ld\n", prev); - - for (i = 0; i < 5; i++) - { - ret = WaitForSingleObject( handle, 0 ); - ok(ret == 0, "got %lu\n", ret); - } - - ret = WaitForSingleObject( handle, 0 ); - ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); - - handle2 = CreateSemaphoreA( NULL, 3, 5, NULL ); - ok(!!handle2, "CreateSemaphore failed: %lu\n", GetLastError()); - - ret = ReleaseSemaphore( handle2, 1, &prev ); - ok(ret, "got error %lu\n", GetLastError()); - ok(prev == 3, "got prev %ld\n", prev); - - for (i = 0; i < 4; i++) - { - ret = WaitForSingleObject( handle2, 0 ); - ok(ret == 0, "got %lu\n", ret); - } - - ret = WaitForSingleObject( handle2, 0 ); - ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); - - handles[0] = handle; - handles[1] = handle2; - - ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); - ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); - - ReleaseSemaphore( handle, 1, NULL ); - ReleaseSemaphore( handle2, 1, NULL ); - - ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); - ok(ret == 0, "got %lu\n", ret); - - ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); - ok(ret == 1, "got %lu\n", ret); - - ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); - ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); - - ReleaseSemaphore( handle, 1, NULL ); - ReleaseSemaphore( handle2, 1, NULL ); - - ret = WaitForMultipleObjects( 2, handles, TRUE, 0 ); - ok(ret == 0, "got %lu\n", ret); - - ret = WaitForMultipleObjects( 2, handles, FALSE, 0 ); - ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); - - ReleaseSemaphore( handle, 1, NULL ); - - ret = WaitForMultipleObjects( 2, handles, TRUE, 0 ); - ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); - - ret = WaitForSingleObject( handle, 0 ); - ok(ret == 0, "got %lu\n", ret); - - ret = CloseHandle( handle ); - ok(ret, "got error %lu\n", ret); - - ret = CloseHandle( handle2 ); - ok(ret, "got error %lu\n", ret); } static void test_waitable_timer(void) @@ -1531,15 +1221,11 @@ static HANDLE modify_handle(HANDLE handle, DWORD modify) return ULongToHandle(tmp); } -#define TIMEOUT_INFINITE (((LONGLONG)0x7fffffff) << 32 | 0xffffffff) - static void test_WaitForSingleObject(void) { HANDLE signaled, nonsignaled, invalid; - LARGE_INTEGER ntnow, ntthen; LARGE_INTEGER timeout; NTSTATUS status; - DWORD now, then; DWORD ret; signaled = CreateEventW(NULL, TRUE, TRUE, NULL); @@ -1624,68 +1310,6 @@ static void test_WaitForSingleObject(void) status = pNtWaitForSingleObject(GetCurrentThread(), FALSE, &timeout); ok(status == STATUS_TIMEOUT, "expected STATUS_TIMEOUT, got %08lx\n", status); - ret = WaitForSingleObject( signaled, 0 ); - ok(ret == 0, "got %lu\n", ret); - - ret = WaitForSingleObject( nonsignaled, 0 ); - ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); - - /* test that a timed wait actually does wait */ - now = GetTickCount(); - ret = WaitForSingleObject( nonsignaled, 100 ); - then = GetTickCount(); - ok(ret == WAIT_TIMEOUT, "got %lu\n", ret); - ok(abs((then - now) - 100) < 5, "got %lu ms\n", then - now); - - now = GetTickCount(); - ret = WaitForSingleObject( signaled, 100 ); - then = GetTickCount(); - ok(ret == 0, "got %lu\n", ret); - ok(abs(then - now) < 5, "got %lu ms\n", then - now); - - ret = WaitForSingleObject( signaled, INFINITE ); - ok(ret == 0, "got %lu\n", ret); - - /* test NT timeouts */ - pNtQuerySystemTime( &ntnow ); - timeout.QuadPart = ntnow.QuadPart + 100 * 10000; - status = pNtWaitForSingleObject( nonsignaled, FALSE, &timeout ); - pNtQuerySystemTime( &ntthen ); - ok(status == STATUS_TIMEOUT, "got %#lx\n", status); - ok(abs(((ntthen.QuadPart - ntnow.QuadPart) / 10000) - 100) < 5, "got %s ns\n", - wine_dbgstr_longlong((ntthen.QuadPart - ntnow.QuadPart) * 100)); - - pNtQuerySystemTime( &ntnow ); - timeout.QuadPart = -100 * 10000; - status = pNtWaitForSingleObject( nonsignaled, FALSE, &timeout ); - pNtQuerySystemTime( &ntthen ); - ok(status == STATUS_TIMEOUT, "got %#lx\n", status); - ok(abs(((ntthen.QuadPart - ntnow.QuadPart) / 10000) - 100) < 5, "got %s ns\n", - wine_dbgstr_longlong((ntthen.QuadPart - ntnow.QuadPart) * 100)); - - status = pNtWaitForSingleObject( signaled, FALSE, NULL ); - ok(status == 0, "got %#lx\n", status); - - timeout.QuadPart = TIMEOUT_INFINITE; - status = pNtWaitForSingleObject( signaled, FALSE, &timeout ); - ok(status == 0, "got %#lx\n", status); - - pNtQuerySystemTime( &ntnow ); - timeout.QuadPart = ntnow.QuadPart; - status = pNtWaitForSingleObject( nonsignaled, FALSE, &timeout ); - pNtQuerySystemTime( &ntthen ); - ok(status == STATUS_TIMEOUT, "got %#lx\n", status); - ok(abs((ntthen.QuadPart - ntnow.QuadPart) / 10000) < 5, "got %s ns\n", - wine_dbgstr_longlong((ntthen.QuadPart - ntnow.QuadPart) * 100)); - - pNtQuerySystemTime( &ntnow ); - timeout.QuadPart = ntnow.QuadPart - 100 * 10000; - status = pNtWaitForSingleObject( nonsignaled, FALSE, &timeout ); - pNtQuerySystemTime( &ntthen ); - ok(status == STATUS_TIMEOUT, "got %#lx\n", status); - ok(abs((ntthen.QuadPart - ntnow.QuadPart) / 10000) < 5, "got %s ns\n", - wine_dbgstr_longlong((ntthen.QuadPart - ntnow.QuadPart) * 100)); - CloseHandle(signaled); CloseHandle(nonsignaled); } @@ -3342,7 +2966,6 @@ START_TEST(sync) pTryAcquireSRWLockShared = (void *)GetProcAddress(hdll, "TryAcquireSRWLockShared"); pNtAllocateVirtualMemory = (void *)GetProcAddress(hntdll, "NtAllocateVirtualMemory"); pNtFreeVirtualMemory = (void *)GetProcAddress(hntdll, "NtFreeVirtualMemory"); - pNtQuerySystemTime = (void *)GetProcAddress(hntdll, "NtQuerySystemTime"); pNtWaitForSingleObject = (void *)GetProcAddress(hntdll, "NtWaitForSingleObject"); pNtWaitForMultipleObjects = (void *)GetProcAddress(hntdll, "NtWaitForMultipleObjects"); pRtlInterlockedPushListSList = (void *)GetProcAddress(hntdll, "RtlInterlockedPushListSList"); diff --git a/dlls/ntdll/Makefile.in b/dlls/ntdll/Makefile.in index 0ce1f44a34d..d3f2a0e5523 100644 --- a/dlls/ntdll/Makefile.in +++ b/dlls/ntdll/Makefile.in @@ -48,9 +48,7 @@ SOURCES = \ unix/cdrom.c \ unix/debug.c \ unix/env.c \ - unix/esync.c \ unix/file.c \ - unix/fsync.c \ unix/loader.c \ unix/loadorder.c \ unix/process.c \ diff --git a/dlls/ntdll/unix/esync.c b/dlls/ntdll/unix/esync.c deleted file mode 100644 index 3074f7c72ea..00000000000 --- a/dlls/ntdll/unix/esync.c +++ /dev/null @@ -1,1354 +0,0 @@ -/* - * eventfd-based synchronization objects - * - * Copyright (C) 2018 Zebediah Figura - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA - */ - -#if 0 -#pragma makedep unix -#endif - -#include "config.h" - -#include -#include -#include -#include -#include -#include -#include -#ifdef HAVE_SYS_STAT_H -# include -#endif -#include -#include -#include - -#include "ntstatus.h" -#define WIN32_NO_STATUS -#include "windef.h" -#include "winternl.h" -#include "wine/server.h" -#include "wine/debug.h" - -#include "unix_private.h" -#include "esync.h" -#include "fsync.h" - -WINE_DEFAULT_DEBUG_CHANNEL(esync); - -int do_esync(void) -{ -#ifdef HAVE_SYS_EVENTFD_H - static int do_esync_cached = -1; - - if (do_esync_cached == -1) - do_esync_cached = getenv("WINEESYNC") && atoi(getenv("WINEESYNC")) && !do_fsync(); - - return do_esync_cached; -#else - static int once; - if (!once++) - FIXME("eventfd not supported on this platform.\n"); - return 0; -#endif -} - -struct esync -{ - enum esync_type type; - int fd; - void *shm; -}; - -struct semaphore -{ - int max; - int count; -}; -C_ASSERT(sizeof(struct semaphore) == 8); - -struct mutex -{ - DWORD tid; - int count; /* recursion count */ -}; -C_ASSERT(sizeof(struct mutex) == 8); - -struct event -{ - int signaled; - int locked; -}; -C_ASSERT(sizeof(struct event) == 8); - -static char shm_name[29]; -static int shm_fd; -static void **shm_addrs; -static int shm_addrs_size; /* length of the allocated shm_addrs array */ -static long pagesize; - -static pthread_mutex_t shm_addrs_mutex = PTHREAD_MUTEX_INITIALIZER; - -static void *get_shm( unsigned int idx ) -{ - int entry = (idx * 8) / pagesize; - int offset = (idx * 8) % pagesize; - void *ret; - - pthread_mutex_lock( &shm_addrs_mutex ); - - if (entry >= shm_addrs_size) - { - int new_size = max(shm_addrs_size * 2, entry + 1); - - if (!(shm_addrs = realloc( shm_addrs, new_size * sizeof(shm_addrs[0]) ))) - ERR("Failed to grow shm_addrs array to size %d.\n", shm_addrs_size); - memset( shm_addrs + shm_addrs_size, 0, (new_size - shm_addrs_size) * sizeof(shm_addrs[0]) ); - shm_addrs_size = new_size; - } - - if (!shm_addrs[entry]) - { - void *addr = mmap( NULL, pagesize, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, entry * pagesize ); - if (addr == (void *)-1) - ERR("Failed to map page %d (offset %#lx).\n", entry, entry * pagesize); - - TRACE("Mapping page %d at %p.\n", entry, addr); - - if (InterlockedCompareExchangePointer( &shm_addrs[entry], addr, 0 )) - munmap( addr, pagesize ); /* someone beat us to it */ - } - - ret = (void *)((unsigned long)shm_addrs[entry] + offset); - - pthread_mutex_unlock( &shm_addrs_mutex ); - - return ret; -} - -/* We'd like lookup to be fast. To that end, we use a static list indexed by handle. - * This is copied and adapted from the fd cache code. */ - -#define ESYNC_LIST_BLOCK_SIZE (65536 / sizeof(struct esync)) -#define ESYNC_LIST_ENTRIES 256 - -static struct esync *esync_list[ESYNC_LIST_ENTRIES]; -static struct esync esync_list_initial_block[ESYNC_LIST_BLOCK_SIZE]; - -static inline UINT_PTR handle_to_index( HANDLE handle, UINT_PTR *entry ) -{ - UINT_PTR idx = (((UINT_PTR)handle) >> 2) - 1; - *entry = idx / ESYNC_LIST_BLOCK_SIZE; - return idx % ESYNC_LIST_BLOCK_SIZE; -} - -static struct esync *add_to_list( HANDLE handle, enum esync_type type, int fd, void *shm ) -{ - UINT_PTR entry, idx = handle_to_index( handle, &entry ); - - if (entry >= ESYNC_LIST_ENTRIES) - { - FIXME( "too many allocated handles, not caching %p\n", handle ); - return FALSE; - } - - if (!esync_list[entry]) /* do we need to allocate a new block of entries? */ - { - if (!entry) esync_list[0] = esync_list_initial_block; - else - { - void *ptr = anon_mmap_alloc( ESYNC_LIST_BLOCK_SIZE * sizeof(struct esync), - PROT_READ | PROT_WRITE ); - if (ptr == MAP_FAILED) return FALSE; - esync_list[entry] = ptr; - } - } - - if (!InterlockedCompareExchange( (LONG *)&esync_list[entry][idx].type, type, 0 )) - { - esync_list[entry][idx].fd = fd; - esync_list[entry][idx].shm = shm; - } - return &esync_list[entry][idx]; -} - -static struct esync *get_cached_object( HANDLE handle ) -{ - UINT_PTR entry, idx = handle_to_index( handle, &entry ); - - if (entry >= ESYNC_LIST_ENTRIES || !esync_list[entry]) return NULL; - if (!esync_list[entry][idx].type) return NULL; - - return &esync_list[entry][idx]; -} - -/* Gets an object. This is either a proper esync object (i.e. an event, - * semaphore, etc. created using create_esync) or a generic synchronizable - * server-side object which the server will signal (e.g. a process, thread, - * message queue, etc.) */ -static NTSTATUS get_object( HANDLE handle, struct esync **obj ) -{ - NTSTATUS ret = STATUS_SUCCESS; - enum esync_type type = 0; - unsigned int shm_idx = 0; - obj_handle_t fd_handle; - sigset_t sigset; - int fd = -1; - - if ((*obj = get_cached_object( handle ))) return STATUS_SUCCESS; - - if ((INT_PTR)handle < 0) - { - /* We can deal with pseudo-handles, but it's just easier this way */ - return STATUS_NOT_IMPLEMENTED; - } - - if (!handle) - { - /* Shadow of the Tomb Raider really likes passing in NULL handles to - * various functions. Concerning, but let's avoid a server call. */ - return STATUS_INVALID_HANDLE; - } - - /* We need to try grabbing it from the server. */ - server_enter_uninterrupted_section( &fd_cache_mutex, &sigset ); - if (!(*obj = get_cached_object( handle ))) - { - SERVER_START_REQ( get_esync_fd ) - { - req->handle = wine_server_obj_handle( handle ); - if (!(ret = wine_server_call( req ))) - { - type = reply->type; - shm_idx = reply->shm_idx; - fd = receive_fd( &fd_handle ); - assert( wine_server_ptr_handle(fd_handle) == handle ); - } - } - SERVER_END_REQ; - } - server_leave_uninterrupted_section( &fd_cache_mutex, &sigset ); - - if (*obj) - { - /* We managed to grab it while in the CS; return it. */ - return STATUS_SUCCESS; - } - - if (ret) - { - WARN("Failed to retrieve fd for handle %p, status %#x.\n", handle, (unsigned int)ret); - *obj = NULL; - return ret; - } - - TRACE("Got fd %d for handle %p.\n", fd, handle); - - *obj = add_to_list( handle, type, fd, shm_idx ? get_shm( shm_idx ) : 0 ); - return ret; -} - -NTSTATUS esync_close( HANDLE handle ) -{ - UINT_PTR entry, idx = handle_to_index( handle, &entry ); - - TRACE("%p.\n", handle); - - if (entry < ESYNC_LIST_ENTRIES && esync_list[entry]) - { - if (InterlockedExchange((LONG *)&esync_list[entry][idx].type, 0)) - { - close( esync_list[entry][idx].fd ); - return STATUS_SUCCESS; - } - } - - return STATUS_INVALID_HANDLE; -} - -static NTSTATUS create_esync( enum esync_type type, HANDLE *handle, ACCESS_MASK access, - const OBJECT_ATTRIBUTES *attr, int initval, int max ) -{ - NTSTATUS ret; - data_size_t len; - struct object_attributes *objattr; - obj_handle_t fd_handle; - unsigned int shm_idx; - sigset_t sigset; - int fd; - - if ((ret = alloc_object_attributes( attr, &objattr, &len ))) return ret; - - /* We have to synchronize on the fd cache CS so that our calls to - * receive_fd don't race with theirs. */ - server_enter_uninterrupted_section( &fd_cache_mutex, &sigset ); - SERVER_START_REQ( create_esync ) - { - req->access = access; - req->initval = initval; - req->type = type; - req->max = max; - wine_server_add_data( req, objattr, len ); - ret = wine_server_call( req ); - if (!ret || ret == STATUS_OBJECT_NAME_EXISTS) - { - *handle = wine_server_ptr_handle( reply->handle ); - type = reply->type; - shm_idx = reply->shm_idx; - fd = receive_fd( &fd_handle ); - assert( wine_server_ptr_handle(fd_handle) == *handle ); - } - } - SERVER_END_REQ; - server_leave_uninterrupted_section( &fd_cache_mutex, &sigset ); - - if (!ret || ret == STATUS_OBJECT_NAME_EXISTS) - { - add_to_list( *handle, type, fd, shm_idx ? get_shm( shm_idx ) : 0 ); - TRACE("-> handle %p, fd %d.\n", *handle, fd); - } - - free( objattr ); - return ret; -} - -static NTSTATUS open_esync( enum esync_type type, HANDLE *handle, - ACCESS_MASK access, const OBJECT_ATTRIBUTES *attr ) -{ - NTSTATUS ret; - obj_handle_t fd_handle; - unsigned int shm_idx; - sigset_t sigset; - int fd; - - server_enter_uninterrupted_section( &fd_cache_mutex, &sigset ); - SERVER_START_REQ( open_esync ) - { - req->access = access; - req->attributes = attr->Attributes; - req->rootdir = wine_server_obj_handle( attr->RootDirectory ); - req->type = type; - if (attr->ObjectName) - wine_server_add_data( req, attr->ObjectName->Buffer, attr->ObjectName->Length ); - if (!(ret = wine_server_call( req ))) - { - *handle = wine_server_ptr_handle( reply->handle ); - type = reply->type; - shm_idx = reply->shm_idx; - fd = receive_fd( &fd_handle ); - assert( wine_server_ptr_handle(fd_handle) == *handle ); - } - } - SERVER_END_REQ; - server_leave_uninterrupted_section( &fd_cache_mutex, &sigset ); - - if (!ret) - { - add_to_list( *handle, type, fd, shm_idx ? get_shm( shm_idx ) : 0 ); - - TRACE("-> handle %p, fd %d.\n", *handle, fd); - } - return ret; -} - -extern NTSTATUS esync_create_semaphore(HANDLE *handle, ACCESS_MASK access, - const OBJECT_ATTRIBUTES *attr, LONG initial, LONG max) -{ - TRACE("name %s, initial %d, max %d.\n", - attr ? debugstr_us(attr->ObjectName) : "", (int)initial, (int)max); - - return create_esync( ESYNC_SEMAPHORE, handle, access, attr, initial, max ); -} - -NTSTATUS esync_open_semaphore( HANDLE *handle, ACCESS_MASK access, - const OBJECT_ATTRIBUTES *attr ) -{ - TRACE("name %s.\n", debugstr_us(attr->ObjectName)); - - return open_esync( ESYNC_SEMAPHORE, handle, access, attr ); -} - -NTSTATUS esync_release_semaphore( HANDLE handle, ULONG count, ULONG *prev ) -{ - struct esync *obj; - struct semaphore *semaphore; - uint64_t count64 = count; - ULONG current; - NTSTATUS ret; - - TRACE("%p, %d, %p.\n", handle, (int)count, prev); - - if ((ret = get_object( handle, &obj))) return ret; - semaphore = obj->shm; - - do - { - current = semaphore->count; - - if (count + current > semaphore->max) - return STATUS_SEMAPHORE_LIMIT_EXCEEDED; - } while (InterlockedCompareExchange( (LONG *)&semaphore->count, count + current, current ) != current); - - if (prev) *prev = current; - - /* We don't have to worry about a race between increasing the count and - * write(). The fact that we were able to increase the count means that we - * have permission to actually write that many releases to the semaphore. */ - - if (write( obj->fd, &count64, sizeof(count64) ) == -1) - return errno_to_status( errno ); - - return STATUS_SUCCESS; -} - -NTSTATUS esync_query_semaphore( HANDLE handle, void *info, ULONG *ret_len ) -{ - struct esync *obj; - struct semaphore *semaphore; - SEMAPHORE_BASIC_INFORMATION *out = info; - NTSTATUS ret; - - TRACE("handle %p, info %p, ret_len %p.\n", handle, info, ret_len); - - if ((ret = get_object( handle, &obj ))) return ret; - semaphore = obj->shm; - - out->CurrentCount = semaphore->count; - out->MaximumCount = semaphore->max; - if (ret_len) *ret_len = sizeof(*out); - - return STATUS_SUCCESS; -} - -NTSTATUS esync_create_event( HANDLE *handle, ACCESS_MASK access, - const OBJECT_ATTRIBUTES *attr, EVENT_TYPE event_type, BOOLEAN initial ) -{ - enum esync_type type = (event_type == SynchronizationEvent ? ESYNC_AUTO_EVENT : ESYNC_MANUAL_EVENT); - - TRACE("name %s, %s-reset, initial %d.\n", - attr ? debugstr_us(attr->ObjectName) : "", - event_type == NotificationEvent ? "manual" : "auto", initial); - - return create_esync( type, handle, access, attr, initial, 0 ); -} - -NTSTATUS esync_open_event( HANDLE *handle, ACCESS_MASK access, - const OBJECT_ATTRIBUTES *attr ) -{ - TRACE("name %s.\n", debugstr_us(attr->ObjectName)); - - return open_esync( ESYNC_AUTO_EVENT, handle, access, attr ); /* doesn't matter which */ -} - -static inline void small_pause(void) -{ -#ifdef __i386__ - __asm__ __volatile__( "rep;nop" : : : "memory" ); -#else - __asm__ __volatile__( "" : : : "memory" ); -#endif -} - -/* Manual-reset events are actually racier than other objects in terms of shm - * state. With other objects, races don't matter, because we only treat the shm - * state as a hint that lets us skip poll()—we still have to read(). But with - * manual-reset events we don't, which means that the shm state can be out of - * sync with the actual state. - * - * In general we shouldn't have to worry about races between modifying the - * event and waiting on it. If the state changes while we're waiting, it's - * equally plausible that we caught it before or after the state changed. - * However, we can have races between SetEvent() and ResetEvent(), so that the - * event has inconsistent internal state. - * - * To solve this we have to use the other field to lock the event. Currently - * this is implemented as a spinlock, but I'm not sure if a futex might be - * better. I'm also not sure if it's possible to obviate locking by arranging - * writes and reads in a certain way. - * - * Note that we don't have to worry about locking in esync_wait_objects(). - * There's only two general patterns: - * - * WaitFor() SetEvent() - * ------------------------- - * read() - * signaled = 0 - * signaled = 1 - * write() - * ------------------------- - * read() - * signaled = 1 - * signaled = 0 - * - * ------------------------- - * - * That is, if SetEvent() tries to signal the event before WaitFor() resets its - * signaled state, it won't bother trying to write(), and then the signaled - * state will be reset, so the result is a consistent non-signaled event. - * There's several variations to this pattern but all of them are protected in - * the same way. Note however this is why we have to use interlocked_xchg() - * event inside of the lock. - */ - -/* Removing this spinlock is harder than it looks. esync_wait_objects() can - * deal with inconsistent state well enough, and a race between SetEvent() and - * ResetEvent() gives us license to yield either result as long as we act - * consistently, but that's not enough. Notably, esync_wait_objects() should - * probably act like a fence, so that the second half of esync_set_event() does - * not seep past a subsequent reset. That's one problem, but no guarantee there - * aren't others. */ - -NTSTATUS esync_set_event( HANDLE handle ) -{ - static const uint64_t value = 1; - struct esync *obj; - struct event *event; - NTSTATUS ret; - - TRACE("%p.\n", handle); - - if ((ret = get_object( handle, &obj ))) return ret; - event = obj->shm; - - if (obj->type != ESYNC_MANUAL_EVENT && obj->type != ESYNC_AUTO_EVENT) - return STATUS_OBJECT_TYPE_MISMATCH; - - if (obj->type == ESYNC_MANUAL_EVENT) - { - /* Acquire the spinlock. */ - while (InterlockedCompareExchange( (LONG *)&event->locked, 1, 0 )) - small_pause(); - } - - /* For manual-reset events, as long as we're in a lock, we can take the - * optimization of only calling write() if the event wasn't already - * signaled. - * - * For auto-reset events, esync_wait_objects() must grab the kernel object. - * Thus if we got into a race so that the shm state is signaled but the - * eventfd is unsignaled (i.e. reset shm, set shm, set fd, reset fd), we - * *must* signal the fd now, or any waiting threads will never wake up. */ - - if (!InterlockedExchange( (LONG *)&event->signaled, 1 ) || obj->type == ESYNC_AUTO_EVENT) - { - if (write( obj->fd, &value, sizeof(value) ) == -1) - ERR("write: %s\n", strerror(errno)); - } - - if (obj->type == ESYNC_MANUAL_EVENT) - { - /* Release the spinlock. */ - event->locked = 0; - } - - return STATUS_SUCCESS; -} - -NTSTATUS esync_reset_event( HANDLE handle ) -{ - uint64_t value; - struct esync *obj; - struct event *event; - NTSTATUS ret; - - TRACE("%p.\n", handle); - - if ((ret = get_object( handle, &obj ))) return ret; - event = obj->shm; - - if (obj->type != ESYNC_MANUAL_EVENT && obj->type != ESYNC_AUTO_EVENT) - return STATUS_OBJECT_TYPE_MISMATCH; - - if (obj->type == ESYNC_MANUAL_EVENT) - { - /* Acquire the spinlock. */ - while (InterlockedCompareExchange( (LONG *)&event->locked, 1, 0 )) - small_pause(); - } - - /* For manual-reset events, as long as we're in a lock, we can take the - * optimization of only calling read() if the event was already signaled. - * - * For auto-reset events, we have no guarantee that the previous "signaled" - * state is actually correct. We need to leave both states unsignaled after - * leaving this function, so we always have to read(). */ - if (InterlockedExchange( (LONG *)&event->signaled, 0 ) || obj->type == ESYNC_AUTO_EVENT) - { - if (read( obj->fd, &value, sizeof(value) ) == -1 && errno != EWOULDBLOCK && errno != EAGAIN) - { - ERR("read: %s\n", strerror(errno)); - } - } - - if (obj->type == ESYNC_MANUAL_EVENT) - { - /* Release the spinlock. */ - event->locked = 0; - } - - return STATUS_SUCCESS; -} - -NTSTATUS esync_pulse_event( HANDLE handle ) -{ - uint64_t value = 1; - struct esync *obj; - NTSTATUS ret; - - TRACE("%p.\n", handle); - - if ((ret = get_object( handle, &obj ))) return ret; - - if (obj->type != ESYNC_MANUAL_EVENT && obj->type != ESYNC_AUTO_EVENT) - return STATUS_OBJECT_TYPE_MISMATCH; - - /* This isn't really correct; an application could miss the write. - * Unfortunately we can't really do much better. Fortunately this is rarely - * used (and publicly deprecated). */ - if (write( obj->fd, &value, sizeof(value) ) == -1) - return errno_to_status( errno ); - - /* Try to give other threads a chance to wake up. Hopefully erring on this - * side is the better thing to do... */ - usleep(0); - - read( obj->fd, &value, sizeof(value) ); - - return STATUS_SUCCESS; -} - -NTSTATUS esync_query_event( HANDLE handle, void *info, ULONG *ret_len ) -{ - struct esync *obj; - EVENT_BASIC_INFORMATION *out = info; - struct pollfd fd; - NTSTATUS ret; - - TRACE("handle %p, info %p, ret_len %p.\n", handle, info, ret_len); - - if ((ret = get_object( handle, &obj ))) return ret; - - fd.fd = obj->fd; - fd.events = POLLIN; - out->EventState = poll( &fd, 1, 0 ); - out->EventType = (obj->type == ESYNC_AUTO_EVENT ? SynchronizationEvent : NotificationEvent); - if (ret_len) *ret_len = sizeof(*out); - - return STATUS_SUCCESS; -} - -NTSTATUS esync_create_mutex( HANDLE *handle, ACCESS_MASK access, - const OBJECT_ATTRIBUTES *attr, BOOLEAN initial ) -{ - TRACE("name %s, initial %d.\n", - attr ? debugstr_us(attr->ObjectName) : "", initial); - - return create_esync( ESYNC_MUTEX, handle, access, attr, initial ? 0 : 1, 0 ); -} - -NTSTATUS esync_open_mutex( HANDLE *handle, ACCESS_MASK access, - const OBJECT_ATTRIBUTES *attr ) -{ - TRACE("name %s.\n", debugstr_us(attr->ObjectName)); - - return open_esync( ESYNC_MUTEX, handle, access, attr ); -} - -NTSTATUS esync_release_mutex( HANDLE *handle, LONG *prev ) -{ - struct esync *obj; - struct mutex *mutex; - static const uint64_t value = 1; - NTSTATUS ret; - - TRACE("%p, %p.\n", handle, prev); - - if ((ret = get_object( handle, &obj ))) return ret; - mutex = obj->shm; - - /* This is thread-safe, because the only thread that can change the tid to - * or from our tid is ours. */ - if (mutex->tid != GetCurrentThreadId()) return STATUS_MUTANT_NOT_OWNED; - - if (prev) *prev = mutex->count; - - mutex->count--; - - if (!mutex->count) - { - /* This is also thread-safe, as long as signaling the file is the last - * thing we do. Other threads don't care about the tid if it isn't - * theirs. */ - mutex->tid = 0; - - if (write( obj->fd, &value, sizeof(value) ) == -1) - return errno_to_status( errno ); - } - - return STATUS_SUCCESS; -} - -NTSTATUS esync_query_mutex( HANDLE handle, void *info, ULONG *ret_len ) -{ - struct esync *obj; - struct mutex *mutex; - MUTANT_BASIC_INFORMATION *out = info; - NTSTATUS ret; - - TRACE("handle %p, info %p, ret_len %p.\n", handle, info, ret_len); - - if ((ret = get_object( handle, &obj ))) return ret; - mutex = obj->shm; - - out->CurrentCount = 1 - mutex->count; - out->OwnedByCaller = (mutex->tid == GetCurrentThreadId()); - out->AbandonedState = (mutex->tid == ~0); - if (ret_len) *ret_len = sizeof(*out); - - return STATUS_SUCCESS; -} - -#define TICKSPERSEC 10000000 -#define TICKSPERMSEC 10000 - -static LONGLONG update_timeout( ULONGLONG end ) -{ - LARGE_INTEGER now; - LONGLONG timeleft; - - NtQuerySystemTime( &now ); - timeleft = end - now.QuadPart; - if (timeleft < 0) timeleft = 0; - return timeleft; -} - -static int do_poll( struct pollfd *fds, nfds_t nfds, ULONGLONG *end ) -{ - int ret; - - do - { - if (end) - { - LONGLONG timeleft = update_timeout( *end ); - -#ifdef HAVE_PPOLL - /* We use ppoll() if available since the time granularity is better. */ - struct timespec tmo_p; - tmo_p.tv_sec = timeleft / (ULONGLONG)TICKSPERSEC; - tmo_p.tv_nsec = (timeleft % TICKSPERSEC) * 100; - ret = ppoll( fds, nfds, &tmo_p, NULL ); -#else - ret = poll( fds, nfds, timeleft / TICKSPERMSEC ); -#endif - } - else - ret = poll( fds, nfds, -1 ); - - /* If we receive EINTR we were probably suspended (SIGUSR1), possibly for a - * system APC. The right thing to do is just try again. */ - } while (ret < 0 && errno == EINTR); - - return ret; -} - -/* Return TRUE if abandoned. */ -static BOOL update_grabbed_object( struct esync *obj ) -{ - BOOL ret = FALSE; - - if (obj->type == ESYNC_MUTEX) - { - struct mutex *mutex = obj->shm; - /* We don't have to worry about a race between this and read(); the - * fact that we grabbed it means the count is now zero, so nobody else - * can (and the only thread that can release it is us). */ - if (mutex->tid == ~0) - ret = TRUE; - mutex->tid = GetCurrentThreadId(); - mutex->count++; - } - else if (obj->type == ESYNC_SEMAPHORE) - { - struct semaphore *semaphore = obj->shm; - /* We don't have to worry about a race between this and read(); the - * fact that we were able to grab it at all means the count is nonzero, - * and if someone else grabbed it then the count must have been >= 2, - * etc. */ - InterlockedExchangeAdd( (LONG *)&semaphore->count, -1 ); - } - else if (obj->type == ESYNC_AUTO_EVENT) - { - struct event *event = obj->shm; - /* We don't have to worry about a race between this and read(), since - * this is just a hint, and the real state is in the kernel object. - * This might already be 0, but that's okay! */ - event->signaled = 0; - } - - return ret; -} - -/* A value of STATUS_NOT_IMPLEMENTED returned from this function means that we - * need to delegate to server_select(). */ -static NTSTATUS __esync_wait_objects( DWORD count, const HANDLE *handles, BOOLEAN wait_any, - BOOLEAN alertable, const LARGE_INTEGER *timeout ) -{ - static const LARGE_INTEGER zero; - - struct esync *objs[MAXIMUM_WAIT_OBJECTS]; - struct pollfd fds[MAXIMUM_WAIT_OBJECTS + 1]; - int has_esync = 0, has_server = 0; - BOOL msgwait = FALSE; - LONGLONG timeleft; - LARGE_INTEGER now; - DWORD pollcount; - ULONGLONG end; - int64_t value; - ssize_t size; - int i, j, ret; - - /* Grab the APC fd if we don't already have it. */ - if (alertable && ntdll_get_thread_data()->esync_apc_fd == -1) - { - obj_handle_t fd_handle; - sigset_t sigset; - int fd = -1; - - server_enter_uninterrupted_section( &fd_cache_mutex, &sigset ); - SERVER_START_REQ( get_esync_apc_fd ) - { - if (!(ret = wine_server_call( req ))) - { - fd = receive_fd( &fd_handle ); - assert( fd_handle == GetCurrentThreadId() ); - } - } - SERVER_END_REQ; - server_leave_uninterrupted_section( &fd_cache_mutex, &sigset ); - - ntdll_get_thread_data()->esync_apc_fd = fd; - } - - NtQuerySystemTime( &now ); - if (timeout) - { - if (timeout->QuadPart == TIMEOUT_INFINITE) - timeout = NULL; - else if (timeout->QuadPart >= 0) - end = timeout->QuadPart; - else - end = now.QuadPart - timeout->QuadPart; - } - - for (i = 0; i < count; i++) - { - ret = get_object( handles[i], &objs[i] ); - if (ret == STATUS_SUCCESS) - has_esync = 1; - else if (ret == STATUS_NOT_IMPLEMENTED) - has_server = 1; - else - return ret; - } - - if (count && objs[count - 1] && objs[count - 1]->type == ESYNC_QUEUE) - msgwait = TRUE; - - if (has_esync && has_server) - FIXME("Can't wait on esync and server objects at the same time!\n"); - else if (has_server) - return STATUS_NOT_IMPLEMENTED; - - if (TRACE_ON(esync)) - { - TRACE("Waiting for %s of %d handles:", wait_any ? "any" : "all", (int)count); - for (i = 0; i < count; i++) - TRACE(" %p", handles[i]); - - if (msgwait) - TRACE(" or driver events"); - if (alertable) - TRACE(", alertable"); - - if (!timeout) - TRACE(", timeout = INFINITE.\n"); - else - { - timeleft = update_timeout( end ); - TRACE(", timeout = %ld.%07ld sec.\n", - (long) timeleft / TICKSPERSEC, (long) timeleft % TICKSPERSEC); - } - } - - if (wait_any || count <= 1) - { - /* Try to check objects now, so we can obviate poll() at least. */ - for (i = 0; i < count; i++) - { - struct esync *obj = objs[i]; - - if (obj) - { - switch (obj->type) - { - case ESYNC_MUTEX: - { - struct mutex *mutex = obj->shm; - - if (mutex->tid == GetCurrentThreadId()) - { - TRACE("Woken up by handle %p [%d].\n", handles[i], i); - mutex->count++; - return i; - } - else if (!mutex->count) - { - if ((size = read( obj->fd, &value, sizeof(value) )) == sizeof(value)) - { - if (mutex->tid == ~0) - { - TRACE("Woken up by abandoned mutex %p [%d].\n", handles[i], i); - i += STATUS_ABANDONED_WAIT_0; - } - else - TRACE("Woken up by handle %p [%d].\n", handles[i], i); - mutex->tid = GetCurrentThreadId(); - mutex->count++; - return i; - } - } - break; - } - case ESYNC_SEMAPHORE: - { - struct semaphore *semaphore = obj->shm; - - if (semaphore->count) - { - if ((size = read( obj->fd, &value, sizeof(value) )) == sizeof(value)) - { - TRACE("Woken up by handle %p [%d].\n", handles[i], i); - InterlockedDecrement( (LONG *)&semaphore->count ); - return i; - } - } - break; - } - case ESYNC_AUTO_EVENT: - { - struct event *event = obj->shm; - - if (event->signaled) - { - if (ac_odyssey && alertable) - usleep( 0 ); - if ((size = read( obj->fd, &value, sizeof(value) )) == sizeof(value)) - { - TRACE("Woken up by handle %p [%d].\n", handles[i], i); - event->signaled = 0; - return i; - } - } - break; - } - case ESYNC_MANUAL_EVENT: - { - struct event *event = obj->shm; - - if (event->signaled) - { - if (ac_odyssey && alertable) - { - usleep( 0 ); - if (!event->signaled) - break; - } - TRACE("Woken up by handle %p [%d].\n", handles[i], i); - return i; - } - break; - } - case ESYNC_AUTO_SERVER: - case ESYNC_MANUAL_SERVER: - case ESYNC_QUEUE: - /* We can't wait on any of these. Fortunately I don't think - * they'll ever be uncontended anyway (at least, they won't be - * performance-critical). */ - break; - } - } - - fds[i].fd = obj ? obj->fd : -1; - fds[i].events = POLLIN; - } - if (alertable) - { - fds[i].fd = ntdll_get_thread_data()->esync_apc_fd; - fds[i].events = POLLIN; - i++; - } - pollcount = i; - - while (1) - { - if (ac_odyssey && alertable) - usleep( 0 ); - - ret = do_poll( fds, pollcount, timeout ? &end : NULL ); - if (ret > 0) - { - /* We must check this first! The server may set an event that - * we're waiting on, but we need to return STATUS_USER_APC. */ - if (alertable) - { - if (fds[pollcount - 1].revents & POLLIN) - goto userapc; - } - - /* Find out which object triggered the wait. */ - for (i = 0; i < count; i++) - { - struct esync *obj = objs[i]; - - if (fds[i].revents & (POLLERR | POLLHUP | POLLNVAL)) - { - ERR("Polling on fd %d returned %#x.\n", fds[i].fd, fds[i].revents); - return STATUS_INVALID_HANDLE; - } - - if (obj) - { - if (obj->type == ESYNC_MANUAL_EVENT - || obj->type == ESYNC_MANUAL_SERVER - || obj->type == ESYNC_QUEUE) - { - /* Don't grab the object, just check if it's signaled. */ - if (fds[i].revents & POLLIN) - { - TRACE("Woken up by handle %p [%d].\n", handles[i], i); - return i; - } - } - else - { - if ((size = read( fds[i].fd, &value, sizeof(value) )) == sizeof(value)) - { - /* We found our object. */ - TRACE("Woken up by handle %p [%d].\n", handles[i], i); - if (update_grabbed_object( obj )) - return STATUS_ABANDONED_WAIT_0 + i; - return i; - } - } - } - } - - /* If we got here, someone else stole (or reset, etc.) whatever - * we were waiting for. So keep waiting. */ - NtQuerySystemTime( &now ); - } - else - goto err; - } - } - else - { - /* Wait-all is a little trickier to implement correctly. Fortunately, - * it's not as common. - * - * The idea is basically just to wait in sequence on every object in the - * set. Then when we're done, try to grab them all in a tight loop. If - * that fails, release any resources we've grabbed (and yes, we can - * reliably do this—it's just mutexes and semaphores that we have to - * put back, and in both cases we just put back 1), and if any of that - * fails we start over. - * - * What makes this inherently bad is that we might temporarily grab a - * resource incorrectly. Hopefully it'll be quick (and hey, it won't - * block on wineserver) so nobody will notice. Besides, consider: if - * object A becomes signaled but someone grabs it before we can grab it - * and everything else, then they could just as well have grabbed it - * before it became signaled. Similarly if object A was signaled and we - * were blocking on object B, then B becomes available and someone grabs - * A before we can, then they might have grabbed A before B became - * signaled. In either case anyone who tries to wait on A or B will be - * waiting for an instant while we put things back. */ - - while (1) - { -tryagain: - /* First step: try to poll on each object in sequence. */ - fds[0].events = POLLIN; - pollcount = 1; - if (alertable) - { - /* We also need to wait on APCs. */ - fds[1].fd = ntdll_get_thread_data()->esync_apc_fd; - fds[1].events = POLLIN; - pollcount++; - } - for (i = 0; i < count; i++) - { - struct esync *obj = objs[i]; - - fds[0].fd = obj ? obj->fd : -1; - - if (obj && obj->type == ESYNC_MUTEX) - { - /* It might be ours. */ - struct mutex *mutex = obj->shm; - - if (mutex->tid == GetCurrentThreadId()) - continue; - } - - ret = do_poll( fds, pollcount, timeout ? &end : NULL ); - if (ret <= 0) - goto err; - else if (alertable && (fds[1].revents & POLLIN)) - goto userapc; - - if (fds[0].revents & (POLLHUP | POLLERR | POLLNVAL)) - { - ERR("Polling on fd %d returned %#x.\n", fds[0].fd, fds[0].revents); - return STATUS_INVALID_HANDLE; - } - } - - /* If we got here and we haven't timed out, that means all of the - * handles were signaled. Check to make sure they still are. */ - for (i = 0; i < count; i++) - { - fds[i].fd = objs[i] ? objs[i]->fd : -1; - fds[i].events = POLLIN; - } - /* There's no reason to check for APCs here. */ - pollcount = i; - - /* Poll everything to see if they're still signaled. */ - ret = poll( fds, pollcount, 0 ); - if (ret == pollcount) - { - BOOL abandoned = FALSE; - - /* Quick, grab everything. */ - for (i = 0; i < count; i++) - { - struct esync *obj = objs[i]; - - switch (obj->type) - { - case ESYNC_MUTEX: - { - struct mutex *mutex = obj->shm; - if (mutex->tid == GetCurrentThreadId()) - break; - /* otherwise fall through */ - } - case ESYNC_SEMAPHORE: - case ESYNC_AUTO_EVENT: - if ((size = read( fds[i].fd, &value, sizeof(value) )) != sizeof(value)) - { - /* We were too slow. Put everything back. */ - value = 1; - for (j = i - 1; j >= 0; j--) - { - struct esync *obj = objs[j]; - - if (obj->type == ESYNC_MUTEX) - { - struct mutex *mutex = obj->shm; - - if (mutex->tid == GetCurrentThreadId()) - continue; - } - if (write( fds[j].fd, &value, sizeof(value) ) == -1) - { - ERR("write failed.\n"); - return errno_to_status( errno ); - } - } - - goto tryagain; /* break out of two loops and a switch */ - } - break; - default: - /* If a manual-reset event changed between there and - * here, it's shouldn't be a problem. */ - break; - } - } - - /* If we got here, we successfully waited on every object. */ - /* Make sure to let ourselves know that we grabbed the mutexes - * and semaphores. */ - for (i = 0; i < count; i++) - abandoned |= update_grabbed_object( objs[i] ); - - if (abandoned) - { - TRACE("Wait successful, but some object(s) were abandoned.\n"); - return STATUS_ABANDONED; - } - TRACE("Wait successful.\n"); - return STATUS_SUCCESS; - } - - /* If we got here, ppoll() returned less than all of our objects. - * So loop back to the beginning and try again. */ - } /* while(1) */ - } /* else (wait-all) */ - -err: - /* We should only get here if poll() failed. */ - - if (ret == 0) - { - TRACE("Wait timed out.\n"); - return STATUS_TIMEOUT; - } - else - { - ERR("ppoll failed: %s\n", strerror(errno)); - return errno_to_status( errno ); - } - -userapc: - TRACE("Woken up by user APC.\n"); - - /* We have to make a server call anyway to get the APC to execute, so just - * delegate down to server_select(). */ - ret = server_wait( NULL, 0, SELECT_INTERRUPTIBLE | SELECT_ALERTABLE, &zero ); - - /* This can happen if we received a system APC, and the APC fd was woken up - * before we got SIGUSR1. poll() doesn't return EINTR in that case. The - * right thing to do seems to be to return STATUS_USER_APC anyway. */ - if (ret == STATUS_TIMEOUT) ret = STATUS_USER_APC; - return ret; -} - -/* We need to let the server know when we are doing a message wait, and when we - * are done with one, so that all of the code surrounding hung queues works. - * We also need this for WaitForInputIdle(). */ -static void server_set_msgwait( int in_msgwait ) -{ - SERVER_START_REQ( esync_msgwait ) - { - req->in_msgwait = in_msgwait; - wine_server_call( req ); - } - SERVER_END_REQ; -} - -/* This is a very thin wrapper around the proper implementation above. The - * purpose is to make sure the server knows when we are doing a message wait. - * This is separated into a wrapper function since there are at least a dozen - * exit paths from esync_wait_objects(). */ -NTSTATUS esync_wait_objects( DWORD count, const HANDLE *handles, BOOLEAN wait_any, - BOOLEAN alertable, const LARGE_INTEGER *timeout ) -{ - BOOL msgwait = FALSE; - struct esync *obj; - NTSTATUS ret; - - if (count && !get_object( handles[count - 1], &obj ) && obj->type == ESYNC_QUEUE) - { - msgwait = TRUE; - server_set_msgwait( 1 ); - } - - ret = __esync_wait_objects( count, handles, wait_any, alertable, timeout ); - - if (msgwait) - server_set_msgwait( 0 ); - - return ret; -} - -NTSTATUS esync_signal_and_wait( HANDLE signal, HANDLE wait, BOOLEAN alertable, - const LARGE_INTEGER *timeout ) -{ - struct esync *obj; - NTSTATUS ret; - - if ((ret = get_object( signal, &obj ))) return ret; - - switch (obj->type) - { - case ESYNC_SEMAPHORE: - ret = esync_release_semaphore( signal, 1, NULL ); - break; - case ESYNC_AUTO_EVENT: - case ESYNC_MANUAL_EVENT: - ret = esync_set_event( signal ); - break; - case ESYNC_MUTEX: - ret = esync_release_mutex( signal, NULL ); - break; - default: - return STATUS_OBJECT_TYPE_MISMATCH; - } - if (ret) return ret; - - return esync_wait_objects( 1, &wait, TRUE, alertable, timeout ); -} - -void esync_init(void) -{ - struct stat st; - - if (!do_esync()) - { - /* make sure the server isn't running with WINEESYNC */ - HANDLE handle; - NTSTATUS ret; - - ret = create_esync( 0, &handle, 0, NULL, 0, 0 ); - if (ret != STATUS_NOT_IMPLEMENTED) - { - ERR("Server is running with WINEESYNC but this process is not, please enable WINEESYNC or restart wineserver.\n"); - exit(1); - } - - return; - } - - if (stat( config_dir, &st ) == -1) - ERR("Cannot stat %s\n", config_dir); - - if (st.st_ino != (unsigned long)st.st_ino) - sprintf( shm_name, "/wine-%lx%08lx-esync", (unsigned long)((unsigned long long)st.st_ino >> 32), (unsigned long)st.st_ino ); - else - sprintf( shm_name, "/wine-%lx-esync", (unsigned long)st.st_ino ); - - if ((shm_fd = shm_open( shm_name, O_RDWR, 0644 )) == -1) - { - /* probably the server isn't running with WINEESYNC, tell the user and bail */ - if (errno == ENOENT) - ERR("Failed to open esync shared memory file; make sure no stale wineserver instances are running without WINEESYNC.\n"); - else - ERR("Failed to initialize shared memory: %s\n", strerror( errno )); - exit(1); - } - - pagesize = sysconf( _SC_PAGESIZE ); - - shm_addrs = calloc( 128, sizeof(shm_addrs[0]) ); - shm_addrs_size = 128; -} diff --git a/dlls/ntdll/unix/esync.h b/dlls/ntdll/unix/esync.h deleted file mode 100644 index 59f8809fc1a..00000000000 --- a/dlls/ntdll/unix/esync.h +++ /dev/null @@ -1,61 +0,0 @@ -/* - * eventfd-based synchronization objects - * - * Copyright (C) 2018 Zebediah Figura - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA - */ - -extern int do_esync(void); -extern void esync_init(void); -extern NTSTATUS esync_close( HANDLE handle ); - -extern NTSTATUS esync_create_semaphore(HANDLE *handle, ACCESS_MASK access, - const OBJECT_ATTRIBUTES *attr, LONG initial, LONG max); -extern NTSTATUS esync_open_semaphore( HANDLE *handle, ACCESS_MASK access, - const OBJECT_ATTRIBUTES *attr ); -extern NTSTATUS esync_query_semaphore( HANDLE handle, void *info, ULONG *ret_len ); -extern NTSTATUS esync_release_semaphore( HANDLE handle, ULONG count, ULONG *prev ); - -extern NTSTATUS esync_create_event( HANDLE *handle, ACCESS_MASK access, - const OBJECT_ATTRIBUTES *attr, EVENT_TYPE type, BOOLEAN initial ); -extern NTSTATUS esync_open_event( HANDLE *handle, ACCESS_MASK access, - const OBJECT_ATTRIBUTES *attr ); -extern NTSTATUS esync_pulse_event( HANDLE handle ); -extern NTSTATUS esync_query_event( HANDLE handle, void *info, ULONG *ret_len ); -extern NTSTATUS esync_reset_event( HANDLE handle ); -extern NTSTATUS esync_set_event( HANDLE handle ); - -extern NTSTATUS esync_create_mutex( HANDLE *handle, ACCESS_MASK access, - const OBJECT_ATTRIBUTES *attr, BOOLEAN initial ); -extern NTSTATUS esync_open_mutex( HANDLE *handle, ACCESS_MASK access, - const OBJECT_ATTRIBUTES *attr ); -extern NTSTATUS esync_query_mutex( HANDLE handle, void *info, ULONG *ret_len ); -extern NTSTATUS esync_release_mutex( HANDLE *handle, LONG *prev ); - -extern NTSTATUS esync_wait_objects( DWORD count, const HANDLE *handles, BOOLEAN wait_any, - BOOLEAN alertable, const LARGE_INTEGER *timeout ); -extern NTSTATUS esync_signal_and_wait( HANDLE signal, HANDLE wait, BOOLEAN alertable, - const LARGE_INTEGER *timeout ); - - -/* We have to synchronize on the fd cache mutex so that our calls to receive_fd - * don't race with theirs. It looks weird, I know. - * - * If we weren't trying to avoid touching the code I'd rename the mutex to - * "server_fd_mutex" or something similar. */ -extern pthread_mutex_t fd_cache_mutex; - -extern int receive_fd( obj_handle_t *handle ); diff --git a/dlls/ntdll/unix/file.c b/dlls/ntdll/unix/file.c index 0bef6d01a00..69558375448 100644 --- a/dlls/ntdll/unix/file.c +++ b/dlls/ntdll/unix/file.c @@ -6742,7 +6742,6 @@ NTSTATUS WINAPI NtFlushBuffersFile( HANDLE handle, IO_STATUS_BLOCK *io ) if (!ret && (type == FD_TYPE_FILE || type == FD_TYPE_DIR || type == FD_TYPE_CHAR)) { - if (fsync(fd)) ret = errno_to_status( errno ); io->Status = ret; io->Information = 0; } @@ -6906,7 +6905,7 @@ NTSTATUS WINAPI NtLockFile( HANDLE file, HANDLE event, PIO_APC_ROUTINE apc, void } if (handle) { - NtWaitForSingleObject( handle, FALSE, NULL ); + server_wait_for_object( handle, FALSE, NULL ); NtClose( handle ); } else /* Unix lock conflict, sleep a bit and retry */ diff --git a/dlls/ntdll/unix/fsync.c b/dlls/ntdll/unix/fsync.c deleted file mode 100644 index c3da44e4f26..00000000000 --- a/dlls/ntdll/unix/fsync.c +++ /dev/null @@ -1,1478 +0,0 @@ -/* - * futex-based synchronization objects - * - * Copyright (C) 2018 Zebediah Figura - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA - */ - -#if 0 -#pragma makedep unix -#endif - -#include "config.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#ifdef HAVE_SYS_STAT_H -# include -#endif -#ifdef HAVE_SYS_SYSCALL_H -# include -#endif -#ifdef HAVE_LINUX_FUTEX_H -# include -#endif -#include -#include - -#include "ntstatus.h" -#define WIN32_NO_STATUS -#include "windef.h" -#include "winternl.h" -#include "wine/debug.h" -#include "wine/server.h" - -#include "unix_private.h" -#include "fsync.h" - -WINE_DEFAULT_DEBUG_CHANNEL(fsync); - -#include "pshpack4.h" -#include "poppack.h" - -static int current_pid; - -/* futex_waitv interface */ - -#ifndef __NR_futex_waitv - -# define __NR_futex_waitv 449 -# define FUTEX_32 2 -struct futex_waitv { - uint64_t val; - uint64_t uaddr; - uint32_t flags; - uint32_t __reserved; -}; - -#endif - -#define u64_to_ptr(x) (void *)(uintptr_t)(x) - -struct timespec64 -{ - long long tv_sec; - long long tv_nsec; -}; - -static LONGLONG nt_time_from_ts( struct timespec *ts ) -{ - return ticks_from_time_t( ts->tv_sec ) + (ts->tv_nsec + 50) / 100; -} - -static void get_wait_end_time( const LARGE_INTEGER **timeout, struct timespec64 *end, clockid_t *clock_id ) -{ - ULONGLONG nt_end; - - if (!*timeout) return; - if ((*timeout)->QuadPart == TIMEOUT_INFINITE) - { - *timeout = NULL; - return; - } - - if ((*timeout)->QuadPart > 0) - { - nt_end = (*timeout)->QuadPart; - *clock_id = CLOCK_REALTIME; - } - else - { - struct timespec ts; - - clock_gettime( CLOCK_MONOTONIC, &ts ); - nt_end = nt_time_from_ts( &ts ) - (*timeout)->QuadPart; - *clock_id = CLOCK_MONOTONIC; - } - - nt_end -= SECS_1601_TO_1970 * TICKSPERSEC; - end->tv_sec = nt_end / (ULONGLONG)TICKSPERSEC; - end->tv_nsec = (nt_end % TICKSPERSEC) * 100; -} - -static LONGLONG update_timeout( const struct timespec64 *end, clockid_t clock_id ) -{ - struct timespec end_ts, ts; - LONGLONG timeleft; - - clock_gettime( clock_id, &ts ); - end_ts.tv_sec = end->tv_sec; - end_ts.tv_nsec = end->tv_nsec; - timeleft = nt_time_from_ts( &end_ts ) - nt_time_from_ts( &ts ); - if (timeleft < 0) timeleft = 0; - return timeleft; -} - -static inline void futex_vector_set( struct futex_waitv *waitv, int *addr, int val ) -{ - waitv->uaddr = (uintptr_t) addr; - waitv->val = val; - waitv->flags = FUTEX_32; - waitv->__reserved = 0; -} - -static void simulate_sched_quantum(void) -{ - if (!fsync_simulate_sched_quantum) return; - /* futex wait is often very quick to resume a waiting thread when woken. - * That reveals synchonization bugs in some games which happen to work on - * Windows due to the waiting threads having some minimal delay to wake up. */ - usleep(0); -} - -static inline int futex_wait_multiple( const struct futex_waitv *futexes, - int count, const struct timespec64 *end, clockid_t clock_id ) -{ - if (end) - return syscall( __NR_futex_waitv, futexes, count, 0, end, clock_id ); - else - return syscall( __NR_futex_waitv, futexes, count, 0, NULL, 0 ); -} - -static inline int futex_wake( int *addr, int val ) -{ - return syscall( __NR_futex, addr, 1, val, NULL, 0, 0 ); -} - -int do_fsync(void) -{ -#ifdef __linux__ - static int do_fsync_cached = -1; - - if (do_fsync_cached == -1) - { - syscall( __NR_futex_waitv, NULL, 0, 0, NULL, 0 ); - do_fsync_cached = getenv("WINEFSYNC") && atoi(getenv("WINEFSYNC")) && errno != ENOSYS; - } - - return do_fsync_cached; -#else - static int once; - if (!once++) - FIXME("futexes not supported on this platform.\n"); - return 0; -#endif -} - -struct fsync -{ - enum fsync_type type; - void *shm; /* pointer to shm section */ -}; - -struct semaphore -{ - int count; - int max; - int ref; - int last_pid; -}; -C_ASSERT(sizeof(struct semaphore) == 16); - -struct event -{ - int signaled; - int unused; - int ref; - int last_pid; -}; -C_ASSERT(sizeof(struct event) == 16); - -struct mutex -{ - int tid; - int count; /* recursion count */ - int ref; - int last_pid; -}; -C_ASSERT(sizeof(struct mutex) == 16); - -static char shm_name[29]; -static int shm_fd; -static volatile void *shm_addrs[8192]; - -static void *get_shm( unsigned int idx ) -{ - int entry = (idx * 16) / FSYNC_SHM_PAGE_SIZE; - int offset = (idx * 16) % FSYNC_SHM_PAGE_SIZE; - - if (entry >= ARRAY_SIZE(shm_addrs)) - { - ERR( "idx %u exceeds maximum of %u.\n", idx, - (unsigned int)ARRAY_SIZE(shm_addrs) * (FSYNC_SHM_PAGE_SIZE / 16) ); - return NULL; - } - - if (!shm_addrs[entry]) - { - void *addr = mmap( NULL, FSYNC_SHM_PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, - (off_t)entry * FSYNC_SHM_PAGE_SIZE ); - if (addr == (void *)-1) - ERR("Failed to map page %d (offset %s).\n", entry, - wine_dbgstr_longlong((off_t)entry * FSYNC_SHM_PAGE_SIZE)); - - TRACE("Mapping page %d at %p.\n", entry, addr); - - if (__sync_val_compare_and_swap( &shm_addrs[entry], 0, addr )) - munmap( addr, FSYNC_SHM_PAGE_SIZE ); /* someone beat us to it */ - } - - return (char *)shm_addrs[entry] + offset; -} - -/* We'd like lookup to be fast. To that end, we use a static list indexed by handle. - * This is copied and adapted from the fd cache code. */ - -#define FSYNC_LIST_BLOCK_SIZE (65536 / sizeof(struct fsync)) -#define FSYNC_LIST_ENTRIES 256 - -struct fsync_cache -{ - enum fsync_type type; - unsigned int shm_idx; -}; - -C_ASSERT(sizeof(struct fsync_cache) == sizeof(uint64_t)); - -static struct fsync_cache *fsync_list[FSYNC_LIST_ENTRIES]; -static struct fsync_cache fsync_list_initial_block[FSYNC_LIST_BLOCK_SIZE]; - -static inline UINT_PTR handle_to_index( HANDLE handle, UINT_PTR *entry ) -{ - UINT_PTR idx = (((UINT_PTR)handle) >> 2) - 1; - *entry = idx / FSYNC_LIST_BLOCK_SIZE; - return idx % FSYNC_LIST_BLOCK_SIZE; -} - -static void add_to_list( HANDLE handle, enum fsync_type type, unsigned int shm_idx ) -{ - UINT_PTR entry, idx = handle_to_index( handle, &entry ); - struct fsync_cache cache; - - if (entry >= FSYNC_LIST_ENTRIES) - { - FIXME( "too many allocated handles, not caching %p\n", handle ); - return; - } - - if (!fsync_list[entry]) /* do we need to allocate a new block of entries? */ - { - if (!entry) fsync_list[0] = fsync_list_initial_block; - else - { - void *ptr = anon_mmap_alloc( FSYNC_LIST_BLOCK_SIZE * sizeof(*fsync_list[entry]), - PROT_READ | PROT_WRITE ); - if (ptr == MAP_FAILED) return; - if (__sync_val_compare_and_swap( &fsync_list[entry], NULL, ptr )) - munmap( ptr, FSYNC_LIST_BLOCK_SIZE * sizeof(*fsync_list[entry]) ); - } - } - - cache.type = type; - cache.shm_idx = shm_idx; - __atomic_store_n( (uint64_t *)&fsync_list[entry][idx], *(uint64_t *)&cache, __ATOMIC_SEQ_CST ); -} - -static void grab_object( struct fsync *obj ) -{ - int *shm = obj->shm; - - __atomic_add_fetch( &shm[2], 1, __ATOMIC_SEQ_CST ); -} - -static unsigned int shm_index_from_shm( char *shm ) -{ - unsigned int i, idx_offset; - - for (i = 0; i < ARRAY_SIZE(shm_addrs); ++i) - { - if (shm >= (char *)shm_addrs[i] && shm < (char *)shm_addrs[i] + FSYNC_SHM_PAGE_SIZE) - { - idx_offset = (shm - (char *)shm_addrs[i]) / 16; - return i * (FSYNC_SHM_PAGE_SIZE / 16) + idx_offset; - } - } - - ERR( "Index for shm %p not found.\n", shm ); - return ~0u; -} - -static void put_object( struct fsync *obj ) -{ - int *shm = obj->shm; - - if (__atomic_load_n( &shm[2], __ATOMIC_SEQ_CST ) == 1) - { - /* We are holding the last reference, it should be released on server so shm idx get freed. */ - SERVER_START_REQ( fsync_free_shm_idx ) - { - req->shm_idx = shm_index_from_shm( obj->shm ); - wine_server_call( req ); - } - SERVER_END_REQ; - } - else - { - __atomic_sub_fetch( &shm[2], 1, __ATOMIC_SEQ_CST ); - } -} - -static void put_object_from_wait( struct fsync *obj ) -{ - int *shm = obj->shm; - - __sync_val_compare_and_swap( &shm[3], current_pid, 0 ); - put_object( obj ); -} - -static BOOL get_cached_object( HANDLE handle, struct fsync *obj ) -{ - UINT_PTR entry, idx = handle_to_index( handle, &entry ); - struct fsync_cache cache; - - if (entry >= FSYNC_LIST_ENTRIES || !fsync_list[entry]) return FALSE; - -again: - *(uint64_t *)&cache = __atomic_load_n( (uint64_t *)&fsync_list[entry][idx], __ATOMIC_SEQ_CST ); - - if (!cache.type || !cache.shm_idx) return FALSE; - - obj->type = cache.type; - obj->shm = get_shm( cache.shm_idx ); - grab_object( obj ); - if (((int *)obj->shm)[2] < 2 || - *(uint64_t *)&cache != __atomic_load_n( (uint64_t *)&fsync_list[entry][idx], __ATOMIC_SEQ_CST )) - { - /* This check does not strictly guarantee that we avoid the potential race but is supposed to greatly - * reduce the probability of that. */ - FIXME( "Cache changed while getting object, handle %p, shm_idx %d, refcount %d.\n", - handle, cache.shm_idx, ((int *)obj->shm)[2] ); - put_object( obj ); - goto again; - } - return TRUE; -} - -/* Gets an object. This is either a proper fsync object (i.e. an event, - * semaphore, etc. created using create_fsync) or a generic synchronizable - * server-side object which the server will signal (e.g. a process, thread, - * message queue, etc.) */ -static NTSTATUS get_object( HANDLE handle, struct fsync *obj ) -{ - NTSTATUS ret = STATUS_SUCCESS; - unsigned int shm_idx = 0; - enum fsync_type type; - sigset_t sigset; - - if (get_cached_object( handle, obj )) return STATUS_SUCCESS; - - if ((INT_PTR)handle < 0) - { - /* We can deal with pseudo-handles, but it's just easier this way */ - return STATUS_NOT_IMPLEMENTED; - } - - if (!handle) return STATUS_INVALID_HANDLE; - - /* We need to try grabbing it from the server. Uninterrupted section - * is needed to avoid race with NtClose() which first calls fsync_close() - * and then closes handle on server. Without the section we might cache - * already closed handle back. */ - server_enter_uninterrupted_section( &fd_cache_mutex, &sigset ); - if (get_cached_object( handle, obj )) - { - server_leave_uninterrupted_section( &fd_cache_mutex, &sigset ); - return STATUS_SUCCESS; - } - SERVER_START_REQ( get_fsync_idx ) - { - req->handle = wine_server_obj_handle( handle ); - if (!(ret = wine_server_call( req ))) - { - shm_idx = reply->shm_idx; - type = reply->type; - } - } - SERVER_END_REQ; - if (!ret) add_to_list( handle, type, shm_idx ); - server_leave_uninterrupted_section( &fd_cache_mutex, &sigset ); - - if (ret) - { - WARN("Failed to retrieve shm index for handle %p, status %#x.\n", handle, (unsigned int)ret); - return ret; - } - - TRACE("Got shm index %d for handle %p.\n", shm_idx, handle); - - obj->type = type; - obj->shm = get_shm( shm_idx ); - /* get_fsync_idx server request increments shared mem refcount, so not grabbing object here. */ - return ret; -} - -static NTSTATUS get_object_for_wait( HANDLE handle, struct fsync *obj, int *prev_pid ) -{ - NTSTATUS ret; - int *shm; - - if ((ret = get_object( handle, obj ))) return ret; - - shm = obj->shm; - /* Give wineserver a chance to cleanup shm index if the process - * is killed while we are waiting on the object. */ - if (fsync_yield_to_waiters) - *prev_pid = __atomic_exchange_n( &shm[3], current_pid, __ATOMIC_SEQ_CST ); - else - __atomic_store_n( &shm[3], current_pid, __ATOMIC_SEQ_CST ); - return STATUS_SUCCESS; -} - -NTSTATUS fsync_close( HANDLE handle ) -{ - UINT_PTR entry, idx = handle_to_index( handle, &entry ); - - TRACE("%p.\n", handle); - - if (entry < FSYNC_LIST_ENTRIES && fsync_list[entry]) - { - struct fsync_cache cache; - - cache.type = 0; - cache.shm_idx = 0; - *(uint64_t *)&cache = __atomic_exchange_n( (uint64_t *)&fsync_list[entry][idx], - *(uint64_t *)&cache, __ATOMIC_SEQ_CST ); - if (cache.type) return STATUS_SUCCESS; - } - - return STATUS_INVALID_HANDLE; -} - -static NTSTATUS create_fsync( enum fsync_type type, HANDLE *handle, - ACCESS_MASK access, const OBJECT_ATTRIBUTES *attr, int low, int high ) -{ - NTSTATUS ret; - data_size_t len; - struct object_attributes *objattr; - unsigned int shm_idx; - - if ((ret = alloc_object_attributes( attr, &objattr, &len ))) return ret; - - SERVER_START_REQ( create_fsync ) - { - req->access = access; - req->low = low; - req->high = high; - req->type = type; - wine_server_add_data( req, objattr, len ); - ret = wine_server_call( req ); - if (!ret || ret == STATUS_OBJECT_NAME_EXISTS) - { - *handle = wine_server_ptr_handle( reply->handle ); - shm_idx = reply->shm_idx; - type = reply->type; - } - } - SERVER_END_REQ; - - if (!ret || ret == STATUS_OBJECT_NAME_EXISTS) - { - add_to_list( *handle, type, shm_idx ); - TRACE("-> handle %p, shm index %d.\n", *handle, shm_idx); - } - - free( objattr ); - return ret; -} - -static NTSTATUS open_fsync( enum fsync_type type, HANDLE *handle, - ACCESS_MASK access, const OBJECT_ATTRIBUTES *attr ) -{ - NTSTATUS ret; - unsigned int shm_idx; - - SERVER_START_REQ( open_fsync ) - { - req->access = access; - req->attributes = attr->Attributes; - req->rootdir = wine_server_obj_handle( attr->RootDirectory ); - req->type = type; - if (attr->ObjectName) - wine_server_add_data( req, attr->ObjectName->Buffer, attr->ObjectName->Length ); - if (!(ret = wine_server_call( req ))) - { - *handle = wine_server_ptr_handle( reply->handle ); - type = reply->type; - shm_idx = reply->shm_idx; - } - } - SERVER_END_REQ; - - if (!ret) - { - add_to_list( *handle, type, shm_idx ); - - TRACE("-> handle %p, shm index %u.\n", *handle, shm_idx); - } - return ret; -} - -void fsync_init(void) -{ - struct stat st; - - if (!do_fsync()) - { - /* make sure the server isn't running with WINEFSYNC */ - HANDLE handle; - NTSTATUS ret; - - ret = create_fsync( 0, &handle, 0, NULL, 0, 0 ); - if (ret != STATUS_NOT_IMPLEMENTED) - { - ERR("Server is running with WINEFSYNC but this process is not, please enable WINEFSYNC or restart wineserver.\n"); - exit(1); - } - - return; - } - - if (stat( config_dir, &st ) == -1) - ERR("Cannot stat %s\n", config_dir); - - if (st.st_ino != (unsigned long)st.st_ino) - sprintf( shm_name, "/wine-%lx%08lx-fsync", (unsigned long)((unsigned long long)st.st_ino >> 32), (unsigned long)st.st_ino ); - else - sprintf( shm_name, "/wine-%lx-fsync", (unsigned long)st.st_ino ); - - if ((shm_fd = shm_open( shm_name, O_RDWR, 0644 )) == -1) - { - /* probably the server isn't running with WINEFSYNC, tell the user and bail */ - if (errno == ENOENT) - ERR("Failed to open fsync shared memory file; make sure no stale wineserver instances are running without WINEFSYNC.\n"); - else - ERR("Failed to initialize shared memory: %s\n", strerror( errno )); - exit(1); - } - - current_pid = GetCurrentProcessId(); - assert(current_pid); -} - -NTSTATUS fsync_create_semaphore( HANDLE *handle, ACCESS_MASK access, - const OBJECT_ATTRIBUTES *attr, LONG initial, LONG max ) -{ - TRACE("name %s, initial %d, max %d.\n", - attr ? debugstr_us(attr->ObjectName) : "", (int)initial, (int)max); - - return create_fsync( FSYNC_SEMAPHORE, handle, access, attr, initial, max ); -} - -NTSTATUS fsync_open_semaphore( HANDLE *handle, ACCESS_MASK access, - const OBJECT_ATTRIBUTES *attr ) -{ - TRACE("name %s.\n", debugstr_us(attr->ObjectName)); - - return open_fsync( FSYNC_SEMAPHORE, handle, access, attr ); -} - -NTSTATUS fsync_release_semaphore( HANDLE handle, ULONG count, ULONG *prev ) -{ - struct fsync obj; - struct semaphore *semaphore; - ULONG current; - NTSTATUS ret; - - TRACE("%p, %d, %p.\n", handle, (int)count, prev); - - if ((ret = get_object( handle, &obj ))) return ret; - semaphore = obj.shm; - - do - { - current = semaphore->count; - if (count + current > semaphore->max) - { - put_object( &obj ); - return STATUS_SEMAPHORE_LIMIT_EXCEEDED; - } - } while (__sync_val_compare_and_swap( &semaphore->count, current, count + current ) != current); - - if (prev) *prev = current; - - futex_wake( &semaphore->count, INT_MAX ); - - put_object( &obj ); - return STATUS_SUCCESS; -} - -NTSTATUS fsync_query_semaphore( HANDLE handle, void *info, ULONG *ret_len ) -{ - struct fsync obj; - struct semaphore *semaphore; - SEMAPHORE_BASIC_INFORMATION *out = info; - NTSTATUS ret; - - TRACE("handle %p, info %p, ret_len %p.\n", handle, info, ret_len); - - if ((ret = get_object( handle, &obj ))) return ret; - semaphore = obj.shm; - - out->CurrentCount = semaphore->count; - out->MaximumCount = semaphore->max; - if (ret_len) *ret_len = sizeof(*out); - - put_object( &obj ); - return STATUS_SUCCESS; -} - -NTSTATUS fsync_create_event( HANDLE *handle, ACCESS_MASK access, - const OBJECT_ATTRIBUTES *attr, EVENT_TYPE event_type, BOOLEAN initial ) -{ - enum fsync_type type = (event_type == SynchronizationEvent ? FSYNC_AUTO_EVENT : FSYNC_MANUAL_EVENT); - - TRACE("name %s, %s-reset, initial %d.\n", - attr ? debugstr_us(attr->ObjectName) : "", - event_type == NotificationEvent ? "manual" : "auto", initial); - - return create_fsync( type, handle, access, attr, initial, 0xdeadbeef ); -} - -NTSTATUS fsync_open_event( HANDLE *handle, ACCESS_MASK access, - const OBJECT_ATTRIBUTES *attr ) -{ - TRACE("name %s.\n", debugstr_us(attr->ObjectName)); - - return open_fsync( FSYNC_AUTO_EVENT, handle, access, attr ); -} - -NTSTATUS fsync_set_event( HANDLE handle, LONG *prev ) -{ - struct event *event; - struct fsync obj; - LONG current; - NTSTATUS ret; - - TRACE("%p.\n", handle); - - if ((ret = get_object( handle, &obj ))) return ret; - event = obj.shm; - - if (obj.type != FSYNC_MANUAL_EVENT && obj.type != FSYNC_AUTO_EVENT) - { - put_object( &obj ); - return STATUS_OBJECT_TYPE_MISMATCH; - } - - if (!(current = __atomic_exchange_n( &event->signaled, 1, __ATOMIC_SEQ_CST ))) - futex_wake( &event->signaled, INT_MAX ); - - if (prev) *prev = current; - - put_object( &obj ); - return STATUS_SUCCESS; -} - -NTSTATUS fsync_reset_event( HANDLE handle, LONG *prev ) -{ - struct event *event; - struct fsync obj; - LONG current; - NTSTATUS ret; - - TRACE("%p.\n", handle); - - if ((ret = get_object( handle, &obj ))) return ret; - event = obj.shm; - - if (obj.type != FSYNC_MANUAL_EVENT && obj.type != FSYNC_AUTO_EVENT) - { - put_object( &obj ); - return STATUS_OBJECT_TYPE_MISMATCH; - } - - current = __atomic_exchange_n( &event->signaled, 0, __ATOMIC_SEQ_CST ); - - if (prev) *prev = current; - - put_object( &obj ); - return STATUS_SUCCESS; -} - -NTSTATUS fsync_pulse_event( HANDLE handle, LONG *prev ) -{ - struct event *event; - struct fsync obj; - LONG current; - NTSTATUS ret; - - TRACE("%p.\n", handle); - - if ((ret = get_object( handle, &obj ))) return ret; - event = obj.shm; - - if (obj.type != FSYNC_MANUAL_EVENT && obj.type != FSYNC_AUTO_EVENT) - { - put_object( &obj ); - return STATUS_OBJECT_TYPE_MISMATCH; - } - - /* This isn't really correct; an application could miss the write. - * Unfortunately we can't really do much better. Fortunately this is rarely - * used (and publicly deprecated). */ - if (!(current = __atomic_exchange_n( &event->signaled, 1, __ATOMIC_SEQ_CST ))) - futex_wake( &event->signaled, INT_MAX ); - - /* Try to give other threads a chance to wake up. Hopefully erring on this - * side is the better thing to do... */ - usleep(0); - - __atomic_store_n( &event->signaled, 0, __ATOMIC_SEQ_CST ); - - if (prev) *prev = current; - - put_object( &obj ); - return STATUS_SUCCESS; -} - -NTSTATUS fsync_query_event( HANDLE handle, void *info, ULONG *ret_len ) -{ - struct event *event; - struct fsync obj; - EVENT_BASIC_INFORMATION *out = info; - NTSTATUS ret; - - TRACE("handle %p, info %p, ret_len %p.\n", handle, info, ret_len); - - if ((ret = get_object( handle, &obj ))) return ret; - event = obj.shm; - - out->EventState = event->signaled; - out->EventType = (obj.type == FSYNC_AUTO_EVENT ? SynchronizationEvent : NotificationEvent); - if (ret_len) *ret_len = sizeof(*out); - - put_object( &obj ); - return STATUS_SUCCESS; -} - -NTSTATUS fsync_create_mutex( HANDLE *handle, ACCESS_MASK access, - const OBJECT_ATTRIBUTES *attr, BOOLEAN initial ) -{ - TRACE("name %s, initial %d.\n", - attr ? debugstr_us(attr->ObjectName) : "", initial); - - return create_fsync( FSYNC_MUTEX, handle, access, attr, - initial ? GetCurrentThreadId() : 0, initial ? 1 : 0 ); -} - -NTSTATUS fsync_open_mutex( HANDLE *handle, ACCESS_MASK access, - const OBJECT_ATTRIBUTES *attr ) -{ - TRACE("name %s.\n", debugstr_us(attr->ObjectName)); - - return open_fsync( FSYNC_MUTEX, handle, access, attr ); -} - -NTSTATUS fsync_release_mutex( HANDLE handle, LONG *prev ) -{ - struct mutex *mutex; - struct fsync obj; - NTSTATUS ret; - - TRACE("%p, %p.\n", handle, prev); - - if ((ret = get_object( handle, &obj ))) return ret; - mutex = obj.shm; - - if (mutex->tid != GetCurrentThreadId()) - { - put_object( &obj ); - return STATUS_MUTANT_NOT_OWNED; - } - - if (prev) *prev = mutex->count; - - if (!--mutex->count) - { - __atomic_store_n( &mutex->tid, 0, __ATOMIC_SEQ_CST ); - futex_wake( &mutex->tid, INT_MAX ); - } - - put_object( &obj ); - return STATUS_SUCCESS; -} - -NTSTATUS fsync_query_mutex( HANDLE handle, void *info, ULONG *ret_len ) -{ - struct fsync obj; - struct mutex *mutex; - MUTANT_BASIC_INFORMATION *out = info; - NTSTATUS ret; - - TRACE("handle %p, info %p, ret_len %p.\n", handle, info, ret_len); - - if ((ret = get_object( handle, &obj ))) return ret; - mutex = obj.shm; - - out->CurrentCount = 1 - mutex->count; - out->OwnedByCaller = (mutex->tid == GetCurrentThreadId()); - out->AbandonedState = (mutex->tid == ~0); - if (ret_len) *ret_len = sizeof(*out); - - put_object( &obj ); - return STATUS_SUCCESS; -} - -static inline void try_yield_to_waiters( int prev_pid ) -{ - if (!fsync_yield_to_waiters) return; - - /* On Windows singaling an object will wake the threads waiting on the object. With fsync - * it may happen that signaling thread (or other thread) grabs the object before the already waiting - * thread gets a chance. Try to workaround that for the affected apps. Non-zero 'prev_pid' indicates - * that the object is grabbed in __fsync_wait_objects() by some other thread. It is the same for - * a non-current pid, but we may currently have a stale PID on an object from a terminated process - * and it is probably safer to skip this workaround. This won't work great if the object is used in 'wait all' - * and the waiter is blocked on the other object. - * This check is also not entirely reliable as if multiple waiters from the same process enter - * __fsync_wait_objects() the first one leaving will clear 'last_pid' in the object. */ - - if (prev_pid == current_pid) - usleep(0); -} - -static NTSTATUS do_single_wait( int *addr, int val, const struct timespec64 *end, clockid_t clock_id, - BOOLEAN alertable ) -{ - struct futex_waitv futexes[2]; - int ret; - - futex_vector_set( &futexes[0], addr, val ); - - if (alertable) - { - int *apc_futex = ntdll_get_thread_data()->fsync_apc_futex; - - if (__atomic_load_n( apc_futex, __ATOMIC_SEQ_CST )) - return STATUS_USER_APC; - - futex_vector_set( &futexes[1], apc_futex, 0 ); - - ret = futex_wait_multiple( futexes, 2, end, clock_id ); - - if (__atomic_load_n( apc_futex, __ATOMIC_SEQ_CST )) - return STATUS_USER_APC; - } - else - { - ret = futex_wait_multiple( futexes, 1, end, clock_id ); - } - - if (!ret) - return 0; - else if (ret < 0 && errno == ETIMEDOUT) - return STATUS_TIMEOUT; - else - return STATUS_PENDING; -} - -static void put_objects( struct fsync *objs, unsigned int count ) -{ - unsigned int i; - - for (i = 0; i < count; ++i) - if (objs[i].type) put_object_from_wait( &objs[i] ); -} - -static NTSTATUS __fsync_wait_objects( DWORD count, const HANDLE *handles, - BOOLEAN wait_any, BOOLEAN alertable, const LARGE_INTEGER *timeout ) -{ - static const LARGE_INTEGER zero = {0}; - - int current_tid = 0; -#define CURRENT_TID (current_tid ? current_tid : (current_tid = GetCurrentThreadId())) - - struct futex_waitv futexes[MAXIMUM_WAIT_OBJECTS + 1]; - struct fsync objs[MAXIMUM_WAIT_OBJECTS]; - BOOL msgwait = FALSE, waited = FALSE; - int prev_pids[MAXIMUM_WAIT_OBJECTS]; - int has_fsync = 0, has_server = 0; - clockid_t clock_id = 0; - struct timespec64 end; - int dummy_futex = 0; - LONGLONG timeleft; - DWORD waitcount; - int i, ret; - - /* Grab the APC futex if we don't already have it. */ - if (alertable && !ntdll_get_thread_data()->fsync_apc_futex) - { - unsigned int idx = 0; - SERVER_START_REQ( get_fsync_apc_idx ) - { - if (!(ret = wine_server_call( req ))) - idx = reply->shm_idx; - } - SERVER_END_REQ; - - if (idx) - { - struct event *apc_event = get_shm( idx ); - ntdll_get_thread_data()->fsync_apc_futex = &apc_event->signaled; - } - } - - get_wait_end_time( &timeout, &end, &clock_id ); - - for (i = 0; i < count; i++) - { - ret = get_object_for_wait( handles[i], &objs[i], &prev_pids[i] ); - if (ret == STATUS_SUCCESS) - { - assert( objs[i].type ); - has_fsync = 1; - } - else if (ret == STATUS_NOT_IMPLEMENTED) - { - objs[i].type = 0; - objs[i].shm = NULL; - has_server = 1; - } - else - { - put_objects( objs, i ); - return ret; - } - } - - if (count && objs[count - 1].type == FSYNC_QUEUE) - msgwait = TRUE; - - if (has_fsync && has_server) - FIXME("Can't wait on fsync and server objects at the same time!\n"); - else if (has_server) - { - put_objects( objs, count ); - return STATUS_NOT_IMPLEMENTED; - } - - if (TRACE_ON(fsync)) - { - TRACE("Waiting for %s of %d handles:", wait_any ? "any" : "all", (int)count); - for (i = 0; i < count; i++) - TRACE(" %p", handles[i]); - - if (msgwait) - TRACE(" or driver events"); - if (alertable) - TRACE(", alertable"); - - if (!timeout) - TRACE(", timeout = INFINITE.\n"); - else - { - timeleft = update_timeout( &end, clock_id ); - TRACE(", timeout = %ld.%07ld sec.\n", - (long) (timeleft / TICKSPERSEC), (long) (timeleft % TICKSPERSEC)); - } - } - - if (wait_any || count <= 1) - { - while (1) - { - /* Try to grab anything. */ - - if (alertable) - { - /* We must check this first! The server may set an event that - * we're waiting on, but we need to return STATUS_USER_APC. */ - if (__atomic_load_n( ntdll_get_thread_data()->fsync_apc_futex, __ATOMIC_SEQ_CST )) - goto userapc; - } - - for (i = 0; i < count; i++) - { - struct fsync *obj = &objs[i]; - - if (obj->type) - { - switch (obj->type) - { - case FSYNC_SEMAPHORE: - { - struct semaphore *semaphore = obj->shm; - int current, new; - - new = __atomic_load_n( &semaphore->count, __ATOMIC_SEQ_CST ); - if (!waited && new) - try_yield_to_waiters(prev_pids[i]); - - while ((current = new)) - { - if ((new = __sync_val_compare_and_swap( &semaphore->count, current, current - 1 )) == current) - { - TRACE("Woken up by handle %p [%d].\n", handles[i], i); - if (waited) simulate_sched_quantum(); - put_objects( objs, count ); - return i; - } - } - futex_vector_set( &futexes[i], &semaphore->count, 0 ); - break; - } - case FSYNC_MUTEX: - { - struct mutex *mutex = obj->shm; - int tid; - - if (mutex->tid == CURRENT_TID) - { - TRACE("Woken up by handle %p [%d].\n", handles[i], i); - mutex->count++; - if (waited) simulate_sched_quantum(); - put_objects( objs, count ); - return i; - } - - if (!waited && !mutex->tid) - try_yield_to_waiters(prev_pids[i]); - - if (!(tid = __sync_val_compare_and_swap( &mutex->tid, 0, CURRENT_TID ))) - { - TRACE("Woken up by handle %p [%d].\n", handles[i], i); - mutex->count = 1; - if (waited) simulate_sched_quantum(); - put_objects( objs, count ); - return i; - } - else if (tid == ~0 && (tid = __sync_val_compare_and_swap( &mutex->tid, ~0, CURRENT_TID )) == ~0) - { - TRACE("Woken up by abandoned mutex %p [%d].\n", handles[i], i); - mutex->count = 1; - put_objects( objs, count ); - return STATUS_ABANDONED_WAIT_0 + i; - } - - futex_vector_set( &futexes[i], &mutex->tid, tid ); - break; - } - case FSYNC_AUTO_EVENT: - case FSYNC_AUTO_SERVER: - { - struct event *event = obj->shm; - - if (!waited && event->signaled) - try_yield_to_waiters(prev_pids[i]); - - if (__sync_val_compare_and_swap( &event->signaled, 1, 0 )) - { - if (ac_odyssey && alertable) - usleep( 0 ); - - TRACE("Woken up by handle %p [%d].\n", handles[i], i); - if (waited) simulate_sched_quantum(); - put_objects( objs, count ); - return i; - } - futex_vector_set( &futexes[i], &event->signaled, 0 ); - break; - } - case FSYNC_MANUAL_EVENT: - case FSYNC_MANUAL_SERVER: - case FSYNC_QUEUE: - { - struct event *event = obj->shm; - - if (__atomic_load_n( &event->signaled, __ATOMIC_SEQ_CST )) - { - if (ac_odyssey && alertable) - usleep( 0 ); - - TRACE("Woken up by handle %p [%d].\n", handles[i], i); - if (waited) simulate_sched_quantum(); - put_objects( objs, count ); - return i; - } - futex_vector_set( &futexes[i], &event->signaled, 0 ); - break; - } - default: - ERR("Invalid type %#x for handle %p.\n", obj->type, handles[i]); - assert(0); - } - } - else - { - /* Avoid breaking things entirely. */ - futex_vector_set( &futexes[i], &dummy_futex, dummy_futex ); - } - } - - if (alertable) - { - /* We already checked if it was signaled; don't bother doing it again. */ - futex_vector_set( &futexes[i++], ntdll_get_thread_data()->fsync_apc_futex, 0 ); - } - waitcount = i; - - /* Looks like everything is contended, so wait. */ - - if (ac_odyssey && alertable) - usleep( 0 ); - - if (timeout && !timeout->QuadPart) - { - /* Unlike esync, we already know that we've timed out, so we - * can avoid a syscall. */ - TRACE("Wait timed out.\n"); - put_objects( objs, count ); - return STATUS_TIMEOUT; - } - - ret = futex_wait_multiple( futexes, waitcount, timeout ? &end : NULL, clock_id ); - - /* FUTEX_WAIT_MULTIPLE can succeed or return -EINTR, -EAGAIN, - * -EFAULT/-EACCES, -ETIMEDOUT. In the first three cases we need to - * try again, bad address is already handled by the fact that we - * tried to read from it, so only break out on a timeout. */ - if (ret == -1 && errno == ETIMEDOUT) - { - TRACE("Wait timed out.\n"); - put_objects( objs, count ); - return STATUS_TIMEOUT; - } - else waited = TRUE; - } /* while (1) */ - } - else - { - /* Wait-all is a little trickier to implement correctly. Fortunately, - * it's not as common. - * - * The idea is basically just to wait in sequence on every object in the - * set. Then when we're done, try to grab them all in a tight loop. If - * that fails, release any resources we've grabbed (and yes, we can - * reliably do this—it's just mutexes and semaphores that we have to - * put back, and in both cases we just put back 1), and if any of that - * fails we start over. - * - * What makes this inherently bad is that we might temporarily grab a - * resource incorrectly. Hopefully it'll be quick (and hey, it won't - * block on wineserver) so nobody will notice. Besides, consider: if - * object A becomes signaled but someone grabs it before we can grab it - * and everything else, then they could just as well have grabbed it - * before it became signaled. Similarly if object A was signaled and we - * were blocking on object B, then B becomes available and someone grabs - * A before we can, then they might have grabbed A before B became - * signaled. In either case anyone who tries to wait on A or B will be - * waiting for an instant while we put things back. */ - - NTSTATUS status = STATUS_SUCCESS; - int current; - - while (1) - { - BOOL abandoned; - -tryagain: - abandoned = FALSE; - - /* First step: try to wait on each object in sequence. */ - - for (i = 0; i < count; i++) - { - struct fsync *obj = &objs[i]; - - if (obj->type == FSYNC_MUTEX) - { - struct mutex *mutex = obj->shm; - - if (mutex->tid == CURRENT_TID) - continue; - - while ((current = __atomic_load_n( &mutex->tid, __ATOMIC_SEQ_CST ))) - { - status = do_single_wait( &mutex->tid, current, timeout ? &end : NULL, clock_id, alertable ); - if (status != STATUS_PENDING) - break; - } - } - else if (obj->type) - { - /* this works for semaphores too */ - struct event *event = obj->shm; - - while (!__atomic_load_n( &event->signaled, __ATOMIC_SEQ_CST )) - { - status = do_single_wait( &event->signaled, 0, timeout ? &end : NULL, clock_id, alertable ); - if (status != STATUS_PENDING) - break; - } - } - - if (status == STATUS_TIMEOUT) - { - TRACE("Wait timed out.\n"); - put_objects( objs, count ); - return status; - } - else if (status == STATUS_USER_APC) - goto userapc; - } - - /* If we got here and we haven't timed out, that means all of the - * handles were signaled. Check to make sure they still are. */ - for (i = 0; i < count; i++) - { - struct fsync *obj = &objs[i]; - - if (obj->type == FSYNC_MUTEX) - { - struct mutex *mutex = obj->shm; - int tid = __atomic_load_n( &mutex->tid, __ATOMIC_SEQ_CST ); - - if (tid && tid != ~0 && tid != CURRENT_TID) - goto tryagain; - } - else if (obj->type) - { - struct event *event = obj->shm; - - if (!__atomic_load_n( &event->signaled, __ATOMIC_SEQ_CST )) - goto tryagain; - } - } - - /* Yep, still signaled. Now quick, grab everything. */ - for (i = 0; i < count; i++) - { - struct fsync *obj = &objs[i]; - if (!obj->type) continue; - switch (obj->type) - { - case FSYNC_MUTEX: - { - struct mutex *mutex = obj->shm; - int tid = __atomic_load_n( &mutex->tid, __ATOMIC_SEQ_CST ); - if (tid == CURRENT_TID) - break; - if (tid && tid != ~0) - goto tooslow; - if (__sync_val_compare_and_swap( &mutex->tid, tid, CURRENT_TID ) != tid) - goto tooslow; - if (tid == ~0) - abandoned = TRUE; - break; - } - case FSYNC_SEMAPHORE: - { - struct semaphore *semaphore = obj->shm; - int current, new; - - new = __atomic_load_n( &semaphore->count, __ATOMIC_SEQ_CST ); - while ((current = new)) - { - if ((new = __sync_val_compare_and_swap( &semaphore->count, current, current - 1 )) == current) - break; - } - if (!current) - goto tooslow; - break; - } - case FSYNC_AUTO_EVENT: - case FSYNC_AUTO_SERVER: - { - struct event *event = obj->shm; - if (!__sync_val_compare_and_swap( &event->signaled, 1, 0 )) - goto tooslow; - break; - } - default: - /* If a manual-reset event changed between there and - * here, it's shouldn't be a problem. */ - break; - } - } - - /* If we got here, we successfully waited on every object. - * Make sure to let ourselves know that we grabbed the mutexes. */ - for (i = 0; i < count; i++) - { - if (objs[i].type == FSYNC_MUTEX) - { - struct mutex *mutex = objs[i].shm; - mutex->count++; - } - } - - if (abandoned) - { - TRACE("Wait successful, but some object(s) were abandoned.\n"); - put_objects( objs, count ); - return STATUS_ABANDONED; - } - TRACE("Wait successful.\n"); - put_objects( objs, count ); - return STATUS_SUCCESS; - -tooslow: - for (--i; i >= 0; i--) - { - struct fsync *obj = &objs[i]; - if (!obj->type) continue; - switch (obj->type) - { - case FSYNC_MUTEX: - { - struct mutex *mutex = obj->shm; - /* HACK: This won't do the right thing with abandoned - * mutexes, but fixing it is probably more trouble than - * it's worth. */ - __atomic_store_n( &mutex->tid, 0, __ATOMIC_SEQ_CST ); - break; - } - case FSYNC_SEMAPHORE: - { - struct semaphore *semaphore = obj->shm; - __sync_fetch_and_add( &semaphore->count, 1 ); - break; - } - case FSYNC_AUTO_EVENT: - case FSYNC_AUTO_SERVER: - { - struct event *event = obj->shm; - __atomic_store_n( &event->signaled, 1, __ATOMIC_SEQ_CST ); - break; - } - default: - /* doesn't need to be put back */ - break; - } - } - } /* while (1) */ - } /* else (wait-all) */ - - assert(0); /* shouldn't reach here... */ - -userapc: - TRACE("Woken up by user APC.\n"); - - put_objects( objs, count ); - - /* We have to make a server call anyway to get the APC to execute, so just - * delegate down to server_wait(). */ - ret = server_wait( NULL, 0, SELECT_INTERRUPTIBLE | SELECT_ALERTABLE, &zero ); - - /* This can happen if we received a system APC, and the APC fd was woken up - * before we got SIGUSR1. poll() doesn't return EINTR in that case. The - * right thing to do seems to be to return STATUS_USER_APC anyway. */ - if (ret == STATUS_TIMEOUT) ret = STATUS_USER_APC; - return ret; -#undef CURRENT_TID -} - -/* Like esync, we need to let the server know when we are doing a message wait, - * and when we are done with one, so that all of the code surrounding hung - * queues works, and we also need this for WaitForInputIdle(). - * - * Unlike esync, we can't wait on the queue fd itself locally. Instead we let - * the server do that for us, the way it normally does. This could actually - * work for esync too, and that might be better. */ -static void server_set_msgwait( int in_msgwait ) -{ - SERVER_START_REQ( fsync_msgwait ) - { - req->in_msgwait = in_msgwait; - wine_server_call( req ); - } - SERVER_END_REQ; -} - -/* This is a very thin wrapper around the proper implementation above. The - * purpose is to make sure the server knows when we are doing a message wait. - * This is separated into a wrapper function since there are at least a dozen - * exit paths from fsync_wait_objects(). */ -NTSTATUS fsync_wait_objects( DWORD count, const HANDLE *handles, BOOLEAN wait_any, - BOOLEAN alertable, const LARGE_INTEGER *timeout ) -{ - BOOL msgwait = FALSE; - struct fsync obj; - NTSTATUS ret; - - if (count && !get_object( handles[count - 1], &obj )) - { - if (obj.type == FSYNC_QUEUE) - { - msgwait = TRUE; - server_set_msgwait( 1 ); - } - put_object( &obj ); - } - - ret = __fsync_wait_objects( count, handles, wait_any, alertable, timeout ); - - if (msgwait) - server_set_msgwait( 0 ); - - return ret; -} - -NTSTATUS fsync_signal_and_wait( HANDLE signal, HANDLE wait, BOOLEAN alertable, - const LARGE_INTEGER *timeout ) -{ - struct fsync obj; - NTSTATUS ret; - - if ((ret = get_object( signal, &obj ))) return ret; - - switch (obj.type) - { - case FSYNC_SEMAPHORE: - ret = fsync_release_semaphore( signal, 1, NULL ); - break; - case FSYNC_AUTO_EVENT: - case FSYNC_MANUAL_EVENT: - ret = fsync_set_event( signal, NULL ); - break; - case FSYNC_MUTEX: - ret = fsync_release_mutex( signal, NULL ); - break; - default: - ret = STATUS_OBJECT_TYPE_MISMATCH; - break; - } - put_object( &obj ); - if (ret) return ret; - - return fsync_wait_objects( 1, &wait, TRUE, alertable, timeout ); -} diff --git a/dlls/ntdll/unix/fsync.h b/dlls/ntdll/unix/fsync.h deleted file mode 100644 index 6005c0fa322..00000000000 --- a/dlls/ntdll/unix/fsync.h +++ /dev/null @@ -1,54 +0,0 @@ -/* - * futex-based synchronization objects - * - * Copyright (C) 2018 Zebediah Figura - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA - */ - -extern int do_fsync(void); -extern void fsync_init(void); -extern NTSTATUS fsync_close( HANDLE handle ); - -extern NTSTATUS fsync_create_semaphore(HANDLE *handle, ACCESS_MASK access, - const OBJECT_ATTRIBUTES *attr, LONG initial, LONG max); -extern NTSTATUS fsync_release_semaphore( HANDLE handle, ULONG count, ULONG *prev ); -extern NTSTATUS fsync_open_semaphore( HANDLE *handle, ACCESS_MASK access, - const OBJECT_ATTRIBUTES *attr ); -extern NTSTATUS fsync_query_semaphore( HANDLE handle, void *info, ULONG *ret_len ); -extern NTSTATUS fsync_create_event( HANDLE *handle, ACCESS_MASK access, - const OBJECT_ATTRIBUTES *attr, EVENT_TYPE type, BOOLEAN initial ); -extern NTSTATUS fsync_open_event( HANDLE *handle, ACCESS_MASK access, - const OBJECT_ATTRIBUTES *attr ); -extern NTSTATUS fsync_set_event( HANDLE handle, LONG *prev ); -extern NTSTATUS fsync_reset_event( HANDLE handle, LONG *prev ); -extern NTSTATUS fsync_pulse_event( HANDLE handle, LONG *prev ); -extern NTSTATUS fsync_query_event( HANDLE handle, void *info, ULONG *ret_len ); -extern NTSTATUS fsync_create_mutex( HANDLE *handle, ACCESS_MASK access, - const OBJECT_ATTRIBUTES *attr, BOOLEAN initial ); -extern NTSTATUS fsync_open_mutex( HANDLE *handle, ACCESS_MASK access, - const OBJECT_ATTRIBUTES *attr ); -extern NTSTATUS fsync_release_mutex( HANDLE handle, LONG *prev ); -extern NTSTATUS fsync_query_mutex( HANDLE handle, void *info, ULONG *ret_len ); - -extern NTSTATUS fsync_wait_objects( DWORD count, const HANDLE *handles, BOOLEAN wait_any, - BOOLEAN alertable, const LARGE_INTEGER *timeout ); -extern NTSTATUS fsync_signal_and_wait( HANDLE signal, HANDLE wait, - BOOLEAN alertable, const LARGE_INTEGER *timeout ); - -/* We have to synchronize on the fd cache mutex so that fsync_close(), close_handle() sequence - * called from NtClose() doesn't race with get_fsync_idx(), add_to_list() sequence called - * from get_object(). */ -extern pthread_mutex_t fd_cache_mutex; diff --git a/dlls/ntdll/unix/loader.c b/dlls/ntdll/unix/loader.c index 1e75eff1595..da355bc193e 100644 --- a/dlls/ntdll/unix/loader.c +++ b/dlls/ntdll/unix/loader.c @@ -88,8 +88,6 @@ extern char **environ; #include "winioctl.h" #include "winternl.h" #include "unix_private.h" -#include "esync.h" -#include "fsync.h" #include "wine/list.h" #include "ntsyscalls.h" #include "wine/debug.h" @@ -2021,9 +2019,7 @@ static ULONG_PTR get_image_address(void) } BOOL ac_odyssey; -BOOL fsync_simulate_sched_quantum; BOOL alert_simulate_sched_quantum; -BOOL fsync_yield_to_waiters; BOOL no_priv_elevation; BOOL localsystem_sid; BOOL simulate_writecopy; @@ -2050,18 +2046,6 @@ static void hacks_init(void) if (ac_odyssey) ERR("HACK: AC Odyssey sync tweak on.\n"); - env_str = getenv("WINE_FSYNC_SIMULATE_SCHED_QUANTUM"); - if (env_str) - fsync_simulate_sched_quantum = !!atoi(env_str); - else if (main_argc > 1) - { - fsync_simulate_sched_quantum = !!strstr(main_argv[1], "Ubisoft Game Launcher\\upc.exe"); - fsync_simulate_sched_quantum = fsync_simulate_sched_quantum || !!strstr(main_argv[1], "PlanetZoo.exe"); - fsync_simulate_sched_quantum = fsync_simulate_sched_quantum || !!strstr(main_argv[1], "GTA5.exe"); - } - if (fsync_simulate_sched_quantum) - ERR("HACK: Simulating sched quantum in fsync.\n"); - env_str = getenv("WINE_ALERT_SIMULATE_SCHED_QUANTUM"); if (env_str) alert_simulate_sched_quantum = !!atoi(env_str); @@ -2073,13 +2057,6 @@ static void hacks_init(void) if (alert_simulate_sched_quantum) ERR("HACK: Simulating sched quantum in NtWaitForAlertByThreadId.\n"); - env_str = getenv("WINE_FSYNC_YIELD_TO_WAITERS"); - if (env_str) - fsync_yield_to_waiters = !!atoi(env_str); - else if (sgi) fsync_yield_to_waiters = !strcmp(sgi, "292120") || !strcmp(sgi, "345350") || !strcmp(sgi, "292140"); - if (fsync_yield_to_waiters) - ERR("HACK: fsync: yield to waiters.\n"); - switch (sgi ? atoi( sgi ) : -1) { case 25700: /* Madballs in Babo: Invasion */ @@ -2177,8 +2154,6 @@ static void start_main_thread(void) dbg_init(); startup_info_size = server_init_process(); hacks_init(); - fsync_init(); - esync_init(); virtual_map_user_shared_data(); init_cpu_info(); init_files(); diff --git a/dlls/ntdll/unix/process.c b/dlls/ntdll/unix/process.c index f544e2afae3..c58525b661d 100644 --- a/dlls/ntdll/unix/process.c +++ b/dlls/ntdll/unix/process.c @@ -916,7 +916,7 @@ NTSTATUS WINAPI NtCreateUserProcess( HANDLE *process_handle_ptr, HANDLE *thread_ /* wait for the new process info to be ready */ - NtWaitForSingleObject( process_info, FALSE, NULL ); + server_wait_for_object( process_info, FALSE, NULL ); SERVER_START_REQ( get_new_process_info ) { req->info = wine_server_obj_handle( process_info ); diff --git a/dlls/ntdll/unix/server.c b/dlls/ntdll/unix/server.c index 992e5cfdc3d..5c3fb809a5c 100644 --- a/dlls/ntdll/unix/server.c +++ b/dlls/ntdll/unix/server.c @@ -79,8 +79,6 @@ #include "wine/server.h" #include "wine/debug.h" #include "unix_private.h" -#include "esync.h" -#include "fsync.h" #include "ddk/wdm.h" WINE_DEFAULT_DEBUG_CHANNEL(server); @@ -799,6 +797,19 @@ unsigned int server_wait( const select_op_t *select_op, data_size_t size, UINT f return ret; } +/* helper function to perform a server-side wait on an internal handle without + * using the fast synchronization path */ +unsigned int server_wait_for_object( HANDLE handle, BOOL alertable, const LARGE_INTEGER *timeout ) +{ + select_op_t select_op; + UINT flags = SELECT_INTERRUPTIBLE; + + if (alertable) flags |= SELECT_ALERTABLE; + + select_op.wait.op = SELECT_WAIT; + select_op.wait.handles[0] = wine_server_obj_handle( handle ); + return server_wait( &select_op, offsetof( select_op_t, wait.handles[1] ), flags, timeout ); +} /*********************************************************************** * NtContinue (NTDLL.@) @@ -861,7 +872,7 @@ unsigned int server_queue_process_apc( HANDLE process, const apc_call_t *call, a } else { - NtWaitForSingleObject( handle, FALSE, NULL ); + server_wait_for_object( handle, FALSE, NULL ); SERVER_START_REQ( get_apc_result ) { @@ -933,7 +944,7 @@ void wine_server_send_fd( int fd ) * * Receive a file descriptor passed from the server. */ -int receive_fd( obj_handle_t *handle ) +static int receive_fd( obj_handle_t *handle ) { struct iovec vec; struct msghdr msghdr; @@ -1791,13 +1802,17 @@ NTSTATUS WINAPI NtDuplicateObject( HANDLE source_process, HANDLE source, HANDLE *dest = wine_server_ptr_handle( result.dup_handle.handle ); return result.dup_handle.status; } - + /* hold fd_cache_mutex to prevent the fd from being added again between the + * call to remove_fd_from_cache and close_handle */ server_enter_uninterrupted_section( &fd_cache_mutex, &sigset ); /* always remove the cached fd; if the server request fails we'll just * retrieve it again */ if (options & DUPLICATE_CLOSE_SOURCE) + { fd = remove_fd_from_cache( source ); + close_fast_sync_obj( source ); + } SERVER_START_REQ( dup_handle ) { @@ -1859,11 +1874,7 @@ NTSTATUS WINAPI NtClose( HANDLE handle ) * retrieve it again */ fd = remove_fd_from_cache( handle ); - if (do_fsync()) - fsync_close( handle ); - - if (do_esync()) - esync_close( handle ); + close_fast_sync_obj( handle ); SERVER_START_REQ( close_handle ) { diff --git a/dlls/ntdll/unix/sync.c b/dlls/ntdll/unix/sync.c index 8361f12bb89..3f13f2da5c2 100644 --- a/dlls/ntdll/unix/sync.c +++ b/dlls/ntdll/unix/sync.c @@ -30,9 +30,11 @@ #include #include #include +#include #include #include #include +#include #include #ifdef HAVE_SYS_SYSCALL_H #include @@ -48,12 +50,16 @@ #endif #include #include +#include #include #include #include #ifdef __APPLE__ # include #endif +#ifdef HAVE_LINUX_NTSYNC_H +# include +#endif #ifdef HAVE_KQUEUE # include #endif @@ -66,8 +72,6 @@ #include "wine/server.h" #include "wine/debug.h" #include "unix_private.h" -#include "esync.h" -#include "fsync.h" WINE_DEFAULT_DEBUG_CHANNEL(sync); @@ -76,7 +80,8 @@ HANDLE keyed_event = 0; static const char *debugstr_timeout( const LARGE_INTEGER *timeout ) { if (!timeout) return "(infinite)"; - return wine_dbgstr_longlong( timeout->QuadPart ); + return wine_dbg_sprintf( "%lld.%07ld", (long long)(timeout->QuadPart / TICKSPERSEC), + (long)(timeout->QuadPart % TICKSPERSEC) ); } /* return a monotonic time counter, in Win32 ticks */ @@ -262,6 +267,901 @@ static unsigned int validate_open_object_attributes( const OBJECT_ATTRIBUTES *at return STATUS_SUCCESS; } +#ifdef HAVE_LINUX_NTSYNC_H + +static int get_linux_sync_device(void) +{ + static LONG fast_sync_fd = -2; + + if (fast_sync_fd == -2) + { + HANDLE device; + int fd, needs_close; + NTSTATUS ret; + + SERVER_START_REQ( get_linux_sync_device ) + { + if (!(ret = wine_server_call( req ))) device = wine_server_ptr_handle( reply->handle ); + } + SERVER_END_REQ; + + if (!ret) + { + if (!server_get_unix_fd( device, 0, &fd, &needs_close, NULL, NULL )) + { + if (InterlockedCompareExchange( &fast_sync_fd, fd, -2 ) != -2) + { + /* someone beat us to it */ + if (needs_close) close( fd ); + NtClose( device ); + } + /* otherwise don't close the device */ + } + else + { + InterlockedCompareExchange( &fast_sync_fd, -1, -2 ); + NtClose( device ); + } + } + else + { + InterlockedCompareExchange( &fast_sync_fd, -1, -2 ); + } + } + return fast_sync_fd; +} + +/* It's possible for synchronization primitives to remain alive even after being + * closed, because a thread is still waiting on them. It's rare in practice, and + * documented as being undefined behaviour by Microsoft, but it works, and some + * applications rely on it. This means we need to refcount handles, and defer + * deleting them on the server side until the refcount reaches zero. We do this + * by having each client process hold a handle to the fast synchronization + * object, as well as a private refcount. When the client refcount reaches zero, + * it closes the handle; when all handles are closed, the server deletes the + * fast synchronization object. + * + * We also need this for signal-and-wait. The signal and wait operations aren't + * atomic, but we can't perform the signal and then return STATUS_INVALID_HANDLE + * for the wait—we need to either do both operations or neither. That means we + * need to grab references to both objects, and prevent them from being + * destroyed before we're done with them. + * + * We want lookup of objects from the cache to be very fast; ideally, it should + * be lock-free. We achieve this by using atomic modifications to "refcount", + * and guaranteeing that all other fields are valid and correct *as long as* + * refcount is nonzero, and we store the entire structure in memory which will + * never be freed. + * + * This means that acquiring the object can't use a simple atomic increment; it + * has to use a compare-and-swap loop to ensure that it doesn't try to increment + * an object with a zero refcount. That's still leagues better than a real lock, + * though, and release can be a single atomic decrement. + * + * It also means that threads modifying the cache need to take a lock, to + * prevent other threads from writing to it concurrently. + * + * It's possible for an object currently in use (by a waiter) to be closed and + * the same handle immediately reallocated to a different object. This should be + * a very rare situation, and in that case we simply don't cache the handle. + */ +struct fast_sync_cache_entry +{ + LONG refcount; + int fd; + enum fast_sync_type type; + unsigned int access; + BOOL closed; + /* handle to the underlying fast sync object, stored as obj_handle_t to save + * space */ + obj_handle_t handle; +}; + + +static void release_fast_sync_obj( struct fast_sync_cache_entry *cache ) +{ + /* save the handle and fd now; as soon as the refcount hits 0 we cannot + * access the cache anymore */ + HANDLE handle = wine_server_ptr_handle( cache->handle ); + int fd = cache->fd; + LONG refcount = InterlockedDecrement( &cache->refcount ); + + assert( refcount >= 0 ); + + if (!refcount) + { + NTSTATUS ret; + + /* we can't call NtClose here as we may be inside fd_cache_mutex */ + SERVER_START_REQ( close_handle ) + { + req->handle = wine_server_obj_handle( handle ); + ret = wine_server_call( req ); + } + SERVER_END_REQ; + + assert( !ret ); + close( fd ); + } +} + + +#define FAST_SYNC_CACHE_BLOCK_SIZE (65536 / sizeof(struct fast_sync_cache_entry)) +#define FAST_SYNC_CACHE_ENTRIES 128 + +static struct fast_sync_cache_entry *fast_sync_cache[FAST_SYNC_CACHE_ENTRIES]; +static struct fast_sync_cache_entry fast_sync_cache_initial_block[FAST_SYNC_CACHE_BLOCK_SIZE]; + +static inline unsigned int fast_sync_handle_to_index( HANDLE handle, unsigned int *entry ) +{ + unsigned int idx = (wine_server_obj_handle(handle) >> 2) - 1; + *entry = idx / FAST_SYNC_CACHE_BLOCK_SIZE; + return idx % FAST_SYNC_CACHE_BLOCK_SIZE; +} + + +static struct fast_sync_cache_entry *cache_fast_sync_obj( HANDLE handle, obj_handle_t fast_sync, int fd, + enum fast_sync_type type, unsigned int access ) +{ + unsigned int entry, idx = fast_sync_handle_to_index( handle, &entry ); + struct fast_sync_cache_entry *cache; + sigset_t sigset; + int refcount; + + if (entry >= FAST_SYNC_CACHE_ENTRIES) + { + FIXME( "too many allocated handles, not caching %p\n", handle ); + return NULL; + } + + if (!fast_sync_cache[entry]) /* do we need to allocate a new block of entries? */ + { + if (!entry) fast_sync_cache[0] = fast_sync_cache_initial_block; + else + { + static const size_t size = FAST_SYNC_CACHE_BLOCK_SIZE * sizeof(struct fast_sync_cache_entry); + void *ptr = anon_mmap_alloc( size, PROT_READ | PROT_WRITE ); + if (ptr == MAP_FAILED) return NULL; + if (InterlockedCompareExchangePointer( (void **)&fast_sync_cache[entry], ptr, NULL )) + munmap( ptr, size ); /* someone beat us to it */ + } + } + + cache = &fast_sync_cache[entry][idx]; + + /* Hold fd_cache_mutex instead of a separate mutex, to prevent the same + * race between this function and NtClose. That is, prevent the object from + * being cached again between close_fast_sync_obj() and close_handle. */ + server_enter_uninterrupted_section( &fd_cache_mutex, &sigset ); + + if (InterlockedCompareExchange( &cache->refcount, 0, 0 )) + { + /* We lost the race with another thread trying to cache this object, or + * the handle is currently being used for another object (i.e. it was + * closed and then reused). We have no way of knowing which, and in the + * latter case we can't cache this object until the old one is + * completely destroyed, so always return failure. */ + server_leave_uninterrupted_section( &fd_cache_mutex, &sigset ); + return NULL; + } + + cache->handle = fast_sync; + cache->fd = fd; + cache->type = type; + cache->access = access; + cache->closed = FALSE; + /* Make sure we set the other members before the refcount; this store needs + * release semantics [paired with the load in get_cached_fast_sync_obj()]. + * Set the refcount to 2 (one for the handle, one for the caller). */ + refcount = InterlockedExchange( &cache->refcount, 2 ); + assert( !refcount ); + + server_leave_uninterrupted_section( &fd_cache_mutex, &sigset ); + + return cache; +} + + +/* returns the previous value */ +static inline LONG interlocked_inc_if_nonzero( LONG *dest ) +{ + LONG val, tmp; + for (val = *dest;; val = tmp) + { + if (!val || (tmp = InterlockedCompareExchange( dest, val + 1, val )) == val) + break; + } + return val; +} + + +static struct fast_sync_cache_entry *get_cached_fast_sync_obj( HANDLE handle ) +{ + unsigned int entry, idx = fast_sync_handle_to_index( handle, &entry ); + struct fast_sync_cache_entry *cache; + + if (entry >= FAST_SYNC_CACHE_ENTRIES || !fast_sync_cache[entry]) + return NULL; + + cache = &fast_sync_cache[entry][idx]; + + /* this load needs acquire semantics [paired with the store in + * cache_fast_sync_obj()] */ + if (!interlocked_inc_if_nonzero( &cache->refcount )) + return NULL; + + if (cache->closed) + { + /* The object is still being used, but "handle" has been closed. The + * handle value might have been reused for another object in the + * meantime, in which case we have to report that valid object, so + * force the caller to check the server. */ + release_fast_sync_obj( cache ); + return NULL; + } + + return cache; +} + + +static BOOL fast_sync_types_match( enum fast_sync_type a, enum fast_sync_type b ) +{ + if (a == b) return TRUE; + if (a == FAST_SYNC_AUTO_EVENT && b == FAST_SYNC_MANUAL_EVENT) return TRUE; + if (b == FAST_SYNC_AUTO_EVENT && a == FAST_SYNC_MANUAL_EVENT) return TRUE; + return FALSE; +} + + +/* returns a pointer to a cache entry; if the object could not be cached, + * returns "stack_cache" instead, which should be allocated on stack */ +static NTSTATUS get_fast_sync_obj( HANDLE handle, enum fast_sync_type desired_type, ACCESS_MASK desired_access, + struct fast_sync_cache_entry *stack_cache, + struct fast_sync_cache_entry **ret_cache ) +{ + struct fast_sync_cache_entry *cache; + obj_handle_t fast_sync_handle; + enum fast_sync_type type; + unsigned int access; + int fd, needs_close; + NTSTATUS ret; + + /* try to find it in the cache already */ + if ((cache = get_cached_fast_sync_obj( handle ))) + { + *ret_cache = cache; + return STATUS_SUCCESS; + } + + /* try to retrieve it from the server */ + SERVER_START_REQ( get_linux_sync_obj ) + { + req->handle = wine_server_obj_handle( handle ); + if (!(ret = wine_server_call( req ))) + { + fast_sync_handle = reply->handle; + access = reply->access; + type = reply->type; + } + } + SERVER_END_REQ; + + if (ret) return ret; + + if ((ret = server_get_unix_fd( wine_server_ptr_handle( fast_sync_handle ), + 0, &fd, &needs_close, NULL, NULL ))) + return ret; + + cache = cache_fast_sync_obj( handle, fast_sync_handle, fd, type, access ); + if (!cache) + { + cache = stack_cache; + cache->handle = fast_sync_handle; + cache->fd = fd; + cache->type = type; + cache->access = access; + cache->closed = FALSE; + cache->refcount = 1; + } + + *ret_cache = cache; + + if (desired_type && !fast_sync_types_match( cache->type, desired_type )) + { + release_fast_sync_obj( cache ); + return STATUS_OBJECT_TYPE_MISMATCH; + } + + if ((cache->access & desired_access) != desired_access) + { + release_fast_sync_obj( cache ); + return STATUS_ACCESS_DENIED; + } + + return STATUS_SUCCESS; +} + + +/* caller must hold fd_cache_mutex */ +void close_fast_sync_obj( HANDLE handle ) +{ + struct fast_sync_cache_entry *cache = get_cached_fast_sync_obj( handle ); + + if (cache) + { + cache->closed = TRUE; + /* once for the reference we just grabbed, and once for the handle */ + release_fast_sync_obj( cache ); + release_fast_sync_obj( cache ); + } +} + + +static NTSTATUS linux_release_semaphore_obj( int obj, ULONG count, ULONG *prev_count ) +{ + NTSTATUS ret; + + ret = ioctl( obj, NTSYNC_IOC_SEM_POST, &count ); + if (ret < 0) + { + if (errno == EOVERFLOW) + return STATUS_SEMAPHORE_LIMIT_EXCEEDED; + else + return errno_to_status( errno ); + } + if (prev_count) *prev_count = count; + return STATUS_SUCCESS; +} + + +static NTSTATUS fast_release_semaphore( HANDLE handle, ULONG count, ULONG *prev_count ) +{ + struct fast_sync_cache_entry stack_cache, *cache; + NTSTATUS ret; + + if ((ret = get_fast_sync_obj( handle, FAST_SYNC_SEMAPHORE, + SEMAPHORE_MODIFY_STATE, &stack_cache, &cache ))) + return ret; + + ret = linux_release_semaphore_obj( cache->fd, count, prev_count ); + + release_fast_sync_obj( cache ); + return ret; +} + + +static NTSTATUS linux_query_semaphore_obj( int obj, SEMAPHORE_BASIC_INFORMATION *info ) +{ + struct ntsync_sem_args args = {0}; + NTSTATUS ret; + + ret = ioctl( obj, NTSYNC_IOC_SEM_READ, &args ); + if (ret < 0) + return errno_to_status( errno ); + info->CurrentCount = args.count; + info->MaximumCount = args.max; + return STATUS_SUCCESS; +} + + +static NTSTATUS fast_query_semaphore( HANDLE handle, SEMAPHORE_BASIC_INFORMATION *info ) +{ + struct fast_sync_cache_entry stack_cache, *cache; + NTSTATUS ret; + + if ((ret = get_fast_sync_obj( handle, FAST_SYNC_SEMAPHORE, + SEMAPHORE_QUERY_STATE, &stack_cache, &cache ))) + return ret; + + ret = linux_query_semaphore_obj( cache->fd, info ); + + release_fast_sync_obj( cache ); + return ret; +} + + +static NTSTATUS linux_set_event_obj( int obj, LONG *prev_state ) +{ + NTSTATUS ret; + __u32 prev; + + ret = ioctl( obj, NTSYNC_IOC_EVENT_SET, &prev ); + if (ret < 0) + return errno_to_status( errno ); + if (prev_state) *prev_state = prev; + return STATUS_SUCCESS; +} + + +static NTSTATUS fast_set_event( HANDLE handle, LONG *prev_state ) +{ + struct fast_sync_cache_entry stack_cache, *cache; + NTSTATUS ret; + + if ((ret = get_fast_sync_obj( handle, FAST_SYNC_AUTO_EVENT, + EVENT_MODIFY_STATE, &stack_cache, &cache ))) + return ret; + + ret = linux_set_event_obj( cache->fd, prev_state ); + + release_fast_sync_obj( cache ); + return ret; +} + + +static NTSTATUS linux_reset_event_obj( int obj, LONG *prev_state ) +{ + NTSTATUS ret; + __u32 prev; + + ret = ioctl( obj, NTSYNC_IOC_EVENT_RESET, &prev ); + if (ret < 0) + return errno_to_status( errno ); + if (prev_state) *prev_state = prev; + return STATUS_SUCCESS; +} + + +static NTSTATUS fast_reset_event( HANDLE handle, LONG *prev_state ) +{ + struct fast_sync_cache_entry stack_cache, *cache; + NTSTATUS ret; + + if ((ret = get_fast_sync_obj( handle, FAST_SYNC_AUTO_EVENT, + EVENT_MODIFY_STATE, &stack_cache, &cache ))) + return ret; + + ret = linux_reset_event_obj( cache->fd, prev_state ); + + release_fast_sync_obj( cache ); + return ret; +} + + +static NTSTATUS linux_pulse_event_obj( int obj, LONG *prev_state ) +{ + NTSTATUS ret; + __u32 prev; + + ret = ioctl( obj, NTSYNC_IOC_EVENT_PULSE, &prev ); + if (ret < 0) + return errno_to_status( errno ); + if (prev_state) *prev_state = prev; + return STATUS_SUCCESS; +} + + +static NTSTATUS fast_pulse_event( HANDLE handle, LONG *prev_state ) +{ + struct fast_sync_cache_entry stack_cache, *cache; + NTSTATUS ret; + + if ((ret = get_fast_sync_obj( handle, FAST_SYNC_AUTO_EVENT, + EVENT_MODIFY_STATE, &stack_cache, &cache ))) + return ret; + + ret = linux_pulse_event_obj( cache->fd, prev_state ); + + release_fast_sync_obj( cache ); + return ret; +} + + +static NTSTATUS linux_query_event_obj( int obj, enum fast_sync_type type, EVENT_BASIC_INFORMATION *info ) +{ + struct ntsync_event_args args = {0}; + NTSTATUS ret; + + ret = ioctl( obj, NTSYNC_IOC_EVENT_READ, &args ); + if (ret < 0) + return errno_to_status( errno ); + info->EventType = (type == FAST_SYNC_AUTO_EVENT) ? SynchronizationEvent : NotificationEvent; + info->EventState = args.signaled; + return STATUS_SUCCESS; +} + + +static NTSTATUS fast_query_event( HANDLE handle, EVENT_BASIC_INFORMATION *info ) +{ + struct fast_sync_cache_entry stack_cache, *cache; + NTSTATUS ret; + + if ((ret = get_fast_sync_obj( handle, FAST_SYNC_AUTO_EVENT, + EVENT_QUERY_STATE, &stack_cache, &cache ))) + return ret; + + ret = linux_query_event_obj( cache->fd, cache->type, info ); + + release_fast_sync_obj( cache ); + return ret; +} + + +static NTSTATUS linux_release_mutex_obj( int obj, LONG *prev_count ) +{ + struct ntsync_mutex_args args = {0}; + NTSTATUS ret; + + args.owner = GetCurrentThreadId(); + ret = ioctl( obj, NTSYNC_IOC_MUTEX_UNLOCK, &args ); + + if (ret < 0) + { + if (errno == EOVERFLOW) + return STATUS_MUTANT_LIMIT_EXCEEDED; + else if (errno == EPERM) + return STATUS_MUTANT_NOT_OWNED; + else + return errno_to_status( errno ); + } + if (prev_count) *prev_count = 1 - args.count; + return STATUS_SUCCESS; +} + + +static NTSTATUS fast_release_mutex( HANDLE handle, LONG *prev_count ) +{ + struct fast_sync_cache_entry stack_cache, *cache; + NTSTATUS ret; + + if ((ret = get_fast_sync_obj( handle, FAST_SYNC_MUTEX, 0, &stack_cache, &cache ))) + return ret; + + ret = linux_release_mutex_obj( cache->fd, prev_count ); + + release_fast_sync_obj( cache ); + return ret; +} + + +static NTSTATUS linux_query_mutex_obj( int obj, MUTANT_BASIC_INFORMATION *info ) +{ + struct ntsync_mutex_args args = {0}; + NTSTATUS ret; + + ret = ioctl( obj, NTSYNC_IOC_MUTEX_READ, &args ); + + if (ret < 0) + { + if (errno == EOWNERDEAD) + { + info->AbandonedState = TRUE; + info->OwnedByCaller = FALSE; + info->CurrentCount = 1; + return STATUS_SUCCESS; + } + else + return errno_to_status( errno ); + } + info->AbandonedState = FALSE; + info->OwnedByCaller = (args.owner == GetCurrentThreadId()); + info->CurrentCount = 1 - args.count; + return STATUS_SUCCESS; +} + + +static NTSTATUS fast_query_mutex( HANDLE handle, MUTANT_BASIC_INFORMATION *info ) +{ + struct fast_sync_cache_entry stack_cache, *cache; + NTSTATUS ret; + + if ((ret = get_fast_sync_obj( handle, FAST_SYNC_MUTEX, MUTANT_QUERY_STATE, + &stack_cache, &cache ))) + return ret; + + ret = linux_query_mutex_obj( cache->fd, info ); + + release_fast_sync_obj( cache ); + return ret; +} + +static void select_queue( HANDLE queue ) +{ + SERVER_START_REQ( fast_select_queue ) + { + req->handle = wine_server_obj_handle( queue ); + wine_server_call( req ); + } + SERVER_END_REQ; +} + +static void unselect_queue( HANDLE queue, BOOL signaled ) +{ + SERVER_START_REQ( fast_unselect_queue ) + { + req->handle = wine_server_obj_handle( queue ); + req->signaled = signaled; + wine_server_call( req ); + } + SERVER_END_REQ; +} + +static int get_fast_alert_obj(void) +{ + struct ntdll_thread_data *data = ntdll_get_thread_data(); + struct fast_sync_cache_entry stack_cache, *cache; + HANDLE alert_handle; + unsigned int ret; + + if (!data->fast_alert_obj) + { + SERVER_START_REQ( get_fast_alert_event ) + { + if ((ret = wine_server_call( req ))) + ERR( "failed to get fast alert event, status %#x\n", ret ); + alert_handle = wine_server_ptr_handle( reply->handle ); + } + SERVER_END_REQ; + + if ((ret = get_fast_sync_obj( alert_handle, 0, SYNCHRONIZE, &stack_cache, &cache ))) + ERR( "failed to get fast alert obj, status %#x\n", ret ); + data->fast_alert_obj = cache->fd; + /* Set the fd to -1 so release_fast_sync_obj() won't close it. + * Manhandling the cache entry here is fine since we're the only thread + * that can access our own alert event. */ + cache->fd = -1; + release_fast_sync_obj( cache ); + NtClose( alert_handle ); + } + + return data->fast_alert_obj; +} + +static NTSTATUS linux_wait_objs( int device, const DWORD count, const int *objs, + BOOLEAN wait_any, BOOLEAN alertable, const LARGE_INTEGER *timeout ) +{ + struct ntsync_wait_args args = {0}; + unsigned long request; + struct timespec now; + int ret; + + if (!timeout || timeout->QuadPart == TIMEOUT_INFINITE) + { + args.timeout = ~(__u64)0; + } + else if (timeout->QuadPart <= 0) + { + clock_gettime( CLOCK_MONOTONIC, &now ); + args.timeout = (now.tv_sec * NSECPERSEC) + now.tv_nsec + (-timeout->QuadPart * 100); + } + else + { + args.timeout = (timeout->QuadPart * 100) - (SECS_1601_TO_1970 * NSECPERSEC); + args.flags |= NTSYNC_WAIT_REALTIME; + } + + args.objs = (uintptr_t)objs; + args.count = count; + args.owner = GetCurrentThreadId(); + args.index = ~0u; + + if (alertable) + args.alert = get_fast_alert_obj(); + + if (wait_any || count == 1) + request = NTSYNC_IOC_WAIT_ANY; + else + request = NTSYNC_IOC_WAIT_ALL; + + do + { + ret = ioctl( device, request, &args ); + } while (ret < 0 && errno == EINTR); + + if (!ret) + { + if (args.index == count) + { + static const LARGE_INTEGER timeout; + + ret = server_wait( NULL, 0, SELECT_INTERRUPTIBLE | SELECT_ALERTABLE, &timeout ); + assert( ret == STATUS_USER_APC ); + return ret; + } + + return wait_any ? args.index : 0; + } + else if (errno == EOWNERDEAD) + return STATUS_ABANDONED + (wait_any ? args.index : 0); + else if (errno == ETIMEDOUT) + return STATUS_TIMEOUT; + else + return errno_to_status( errno ); +} + +static NTSTATUS fast_wait( DWORD count, const HANDLE *handles, BOOLEAN wait_any, + BOOLEAN alertable, const LARGE_INTEGER *timeout ) +{ + struct fast_sync_cache_entry stack_cache[64], *cache[64]; + int device, objs[64]; + HANDLE queue = NULL; + NTSTATUS ret; + DWORD i, j; + + if ((device = get_linux_sync_device()) < 0) + return STATUS_NOT_IMPLEMENTED; + + for (i = 0; i < count; ++i) + { + if ((ret = get_fast_sync_obj( handles[i], 0, SYNCHRONIZE, &stack_cache[i], &cache[i] ))) + { + for (j = 0; j < i; ++j) + release_fast_sync_obj( cache[j] ); + return ret; + } + if (cache[i]->type == FAST_SYNC_QUEUE) + queue = handles[i]; + + objs[i] = cache[i]->fd; + } + + /* It's common to wait on the message queue alone. Some applications wait + * on it in fast paths, with a zero timeout. Since we take two server calls + * instead of one when going through fast_wait_objs(), and since we only + * need to go through that path if we're waiting on other objects, just + * delegate to the server if we're only waiting on the message queue. */ + if (count == 1 && queue) + { + release_fast_sync_obj( cache[0] ); + return server_wait_for_object( handles[0], alertable, timeout ); + } + + if (queue) select_queue( queue ); + + ret = linux_wait_objs( device, count, objs, wait_any, alertable, timeout ); + + if (queue) unselect_queue( queue, handles[ret] == queue ); + + for (i = 0; i < count; ++i) + release_fast_sync_obj( cache[i] ); + + return ret; +} + +static NTSTATUS fast_signal_and_wait( HANDLE signal, HANDLE wait, + BOOLEAN alertable, const LARGE_INTEGER *timeout ) +{ + struct fast_sync_cache_entry signal_stack_cache, *signal_cache; + struct fast_sync_cache_entry wait_stack_cache, *wait_cache; + HANDLE queue = NULL; + NTSTATUS ret; + int device; + + if ((device = get_linux_sync_device()) < 0) + return STATUS_NOT_IMPLEMENTED; + + if ((ret = get_fast_sync_obj( signal, 0, 0, &signal_stack_cache, &signal_cache ))) + return ret; + + switch (signal_cache->type) + { + case FAST_SYNC_SEMAPHORE: + if (!(signal_cache->access & SEMAPHORE_MODIFY_STATE)) + { + release_fast_sync_obj( signal_cache ); + return STATUS_ACCESS_DENIED; + } + break; + + case FAST_SYNC_AUTO_EVENT: + case FAST_SYNC_MANUAL_EVENT: + if (!(signal_cache->access & EVENT_MODIFY_STATE)) + { + release_fast_sync_obj( signal_cache ); + return STATUS_ACCESS_DENIED; + } + break; + + case FAST_SYNC_MUTEX: + break; + + default: + /* can't be signaled */ + release_fast_sync_obj( signal_cache ); + return STATUS_OBJECT_TYPE_MISMATCH; + } + + if ((ret = get_fast_sync_obj( wait, 0, SYNCHRONIZE, &wait_stack_cache, &wait_cache ))) + { + release_fast_sync_obj( signal_cache ); + return ret; + } + + if (wait_cache->type == FAST_SYNC_QUEUE) + queue = wait; + + switch (signal_cache->type) + { + case FAST_SYNC_SEMAPHORE: + ret = linux_release_semaphore_obj( signal_cache->fd, 1, NULL ); + break; + + case FAST_SYNC_AUTO_EVENT: + case FAST_SYNC_MANUAL_EVENT: + ret = linux_set_event_obj( signal_cache->fd, NULL ); + break; + + case FAST_SYNC_MUTEX: + ret = linux_release_mutex_obj( signal_cache->fd, NULL ); + break; + + default: + assert( 0 ); + break; + } + + if (!ret) + { + if (queue) select_queue( queue ); + ret = linux_wait_objs( device, 1, &wait_cache->fd, TRUE, alertable, timeout ); + if (queue) unselect_queue( queue, !ret ); + } + + release_fast_sync_obj( signal_cache ); + release_fast_sync_obj( wait_cache ); + return ret; +} + +#else + +void close_fast_sync_obj( HANDLE handle ) +{ +} + +static NTSTATUS fast_release_semaphore( HANDLE handle, ULONG count, ULONG *prev_count ) +{ + return STATUS_NOT_IMPLEMENTED; +} + +static NTSTATUS fast_query_semaphore( HANDLE handle, SEMAPHORE_BASIC_INFORMATION *info ) +{ + return STATUS_NOT_IMPLEMENTED; +} + +static NTSTATUS fast_set_event( HANDLE handle, LONG *prev_state ) +{ + return STATUS_NOT_IMPLEMENTED; +} + +static NTSTATUS fast_reset_event( HANDLE handle, LONG *prev_state ) +{ + return STATUS_NOT_IMPLEMENTED; +} + +static NTSTATUS fast_pulse_event( HANDLE handle, LONG *prev_state ) +{ + return STATUS_NOT_IMPLEMENTED; +} + +static NTSTATUS fast_query_event( HANDLE handle, EVENT_BASIC_INFORMATION *info ) +{ + return STATUS_NOT_IMPLEMENTED; +} + +static NTSTATUS fast_release_mutex( HANDLE handle, LONG *prev_count ) +{ + return STATUS_NOT_IMPLEMENTED; +} + +static NTSTATUS fast_query_mutex( HANDLE handle, MUTANT_BASIC_INFORMATION *info ) +{ + return STATUS_NOT_IMPLEMENTED; +} + +static NTSTATUS fast_wait( DWORD count, const HANDLE *handles, BOOLEAN wait_any, + BOOLEAN alertable, const LARGE_INTEGER *timeout ) +{ + return STATUS_NOT_IMPLEMENTED; +} + +static NTSTATUS fast_signal_and_wait( HANDLE signal, HANDLE wait, + BOOLEAN alertable, const LARGE_INTEGER *timeout ) +{ + return STATUS_NOT_IMPLEMENTED; +} + +#endif + /****************************************************************************** * NtCreateSemaphore (NTDLL.@) @@ -273,15 +1173,12 @@ NTSTATUS WINAPI NtCreateSemaphore( HANDLE *handle, ACCESS_MASK access, const OBJ data_size_t len; struct object_attributes *objattr; + TRACE( "access %#x, name %s, initial %d, max %d\n", (int)access, + attr ? debugstr_us(attr->ObjectName) : "(null)", (int)initial, (int)max ); + *handle = 0; if (max <= 0 || initial < 0 || initial > max) return STATUS_INVALID_PARAMETER; - if (do_fsync()) - return fsync_create_semaphore( handle, access, attr, initial, max ); - - if (do_esync()) - return esync_create_semaphore( handle, access, attr, initial, max ); - if ((ret = alloc_object_attributes( attr, &objattr, &len ))) return ret; SERVER_START_REQ( create_semaphore ) @@ -307,13 +1204,9 @@ NTSTATUS WINAPI NtOpenSemaphore( HANDLE *handle, ACCESS_MASK access, const OBJEC { unsigned int ret; - *handle = 0; + TRACE( "access %#x, name %s\n", (int)access, attr ? debugstr_us(attr->ObjectName) : "(null)" ); - if (do_fsync()) - return fsync_open_semaphore( handle, access, attr ); - - if (do_esync()) - return esync_open_semaphore( handle, access, attr ); + *handle = 0; if ((ret = validate_open_object_attributes( attr ))) return ret; @@ -351,11 +1244,11 @@ NTSTATUS WINAPI NtQuerySemaphore( HANDLE handle, SEMAPHORE_INFORMATION_CLASS cla if (len != sizeof(SEMAPHORE_BASIC_INFORMATION)) return STATUS_INFO_LENGTH_MISMATCH; - if (do_fsync()) - return fsync_query_semaphore( handle, info, ret_len ); - - if (do_esync()) - return esync_query_semaphore( handle, info, ret_len ); + if ((ret = fast_query_semaphore( handle, out )) != STATUS_NOT_IMPLEMENTED) + { + if (!ret && ret_len) *ret_len = sizeof(SEMAPHORE_BASIC_INFORMATION); + return ret; + } SERVER_START_REQ( query_semaphore ) { @@ -379,11 +1272,10 @@ NTSTATUS WINAPI NtReleaseSemaphore( HANDLE handle, ULONG count, ULONG *previous { unsigned int ret; - if (do_fsync()) - return fsync_release_semaphore( handle, count, previous ); + TRACE( "handle %p, count %u, prev_count %p\n", handle, (int)count, previous ); - if (do_esync()) - return esync_release_semaphore( handle, count, previous ); + if ((ret = fast_release_semaphore( handle, count, previous )) != STATUS_NOT_IMPLEMENTED) + return ret; SERVER_START_REQ( release_semaphore ) { @@ -409,15 +1301,12 @@ NTSTATUS WINAPI NtCreateEvent( HANDLE *handle, ACCESS_MASK access, const OBJECT_ data_size_t len; struct object_attributes *objattr; + TRACE( "access %#x, name %s, type %u, state %u\n", (int)access, + attr ? debugstr_us(attr->ObjectName) : "(null)", type, state ); + *handle = 0; if (type != NotificationEvent && type != SynchronizationEvent) return STATUS_INVALID_PARAMETER; - if (do_fsync()) - return fsync_create_event( handle, access, attr, type, state ); - - if (do_esync()) - return esync_create_event( handle, access, attr, type, state ); - if ((ret = alloc_object_attributes( attr, &objattr, &len ))) return ret; SERVER_START_REQ( create_event ) @@ -443,15 +1332,11 @@ NTSTATUS WINAPI NtOpenEvent( HANDLE *handle, ACCESS_MASK access, const OBJECT_AT { unsigned int ret; + TRACE( "access %#x, name %s\n", (int)access, attr ? debugstr_us(attr->ObjectName) : "(null)" ); + *handle = 0; if ((ret = validate_open_object_attributes( attr ))) return ret; - if (do_fsync()) - return fsync_open_event( handle, access, attr ); - - if (do_esync()) - return esync_open_event( handle, access, attr ); - SERVER_START_REQ( open_event ) { req->access = access; @@ -475,11 +1360,10 @@ NTSTATUS WINAPI NtSetEvent( HANDLE handle, LONG *prev_state ) /* This comment is a dummy to make sure this patch applies in the right place. */ unsigned int ret; - if (do_fsync()) - return fsync_set_event( handle, prev_state ); + TRACE( "handle %p, prev_state %p\n", handle, prev_state ); - if (do_esync()) - return esync_set_event( handle ); + if ((ret = fast_set_event( handle, prev_state )) != STATUS_NOT_IMPLEMENTED) + return ret; SERVER_START_REQ( event_op ) { @@ -501,12 +1385,10 @@ NTSTATUS WINAPI NtResetEvent( HANDLE handle, LONG *prev_state ) /* This comment is a dummy to make sure this patch applies in the right place. */ unsigned int ret; - if (do_fsync()) - return fsync_reset_event( handle, prev_state ); - - if (do_esync()) - return esync_reset_event( handle ); + TRACE( "handle %p, prev_state %p\n", handle, prev_state ); + if ((ret = fast_reset_event( handle, prev_state )) != STATUS_NOT_IMPLEMENTED) + return ret; SERVER_START_REQ( event_op ) { @@ -537,11 +1419,10 @@ NTSTATUS WINAPI NtPulseEvent( HANDLE handle, LONG *prev_state ) { unsigned int ret; - if (do_fsync()) - return fsync_pulse_event( handle, prev_state ); + TRACE( "handle %p, prev_state %p\n", handle, prev_state ); - if (do_esync()) - return esync_pulse_event( handle ); + if ((ret = fast_pulse_event( handle, prev_state )) != STATUS_NOT_IMPLEMENTED) + return ret; SERVER_START_REQ( event_op ) { @@ -574,11 +1455,11 @@ NTSTATUS WINAPI NtQueryEvent( HANDLE handle, EVENT_INFORMATION_CLASS class, if (len != sizeof(EVENT_BASIC_INFORMATION)) return STATUS_INFO_LENGTH_MISMATCH; - if (do_fsync()) - return fsync_query_event( handle, info, ret_len ); - - if (do_esync()) - return esync_query_event( handle, info, ret_len ); + if ((ret = fast_query_event( handle, out )) != STATUS_NOT_IMPLEMENTED) + { + if (!ret && ret_len) *ret_len = sizeof(EVENT_BASIC_INFORMATION); + return ret; + } SERVER_START_REQ( query_event ) { @@ -605,13 +1486,10 @@ NTSTATUS WINAPI NtCreateMutant( HANDLE *handle, ACCESS_MASK access, const OBJECT data_size_t len; struct object_attributes *objattr; - *handle = 0; - - if (do_fsync()) - return fsync_create_mutex( handle, access, attr, owned ); + TRACE( "access %#x, name %s, owned %u\n", (int)access, + attr ? debugstr_us(attr->ObjectName) : "(null)", owned ); - if (do_esync()) - return esync_create_mutex( handle, access, attr, owned ); + *handle = 0; if ((ret = alloc_object_attributes( attr, &objattr, &len ))) return ret; @@ -637,14 +1515,11 @@ NTSTATUS WINAPI NtOpenMutant( HANDLE *handle, ACCESS_MASK access, const OBJECT_A { unsigned int ret; + TRACE( "access %#x, name %s\n", (int)access, attr ? debugstr_us(attr->ObjectName) : "(null)" ); + *handle = 0; if ((ret = validate_open_object_attributes( attr ))) return ret; - if (do_fsync()) - return fsync_open_mutex( handle, access, attr ); - - if (do_esync()) - return esync_open_mutex( handle, access, attr ); SERVER_START_REQ( open_mutex ) { @@ -668,11 +1543,10 @@ NTSTATUS WINAPI NtReleaseMutant( HANDLE handle, LONG *prev_count ) { unsigned int ret; - if (do_fsync()) - return fsync_release_mutex( handle, prev_count ); + TRACE( "handle %p, prev_count %p\n", handle, prev_count ); - if (do_esync()) - return esync_release_mutex( handle, prev_count ); + if ((ret = fast_release_mutex( handle, prev_count )) != STATUS_NOT_IMPLEMENTED) + return ret; SERVER_START_REQ( release_mutex ) { @@ -704,11 +1578,11 @@ NTSTATUS WINAPI NtQueryMutant( HANDLE handle, MUTANT_INFORMATION_CLASS class, if (len != sizeof(MUTANT_BASIC_INFORMATION)) return STATUS_INFO_LENGTH_MISMATCH; - if (do_fsync()) - return fsync_query_mutex( handle, info, ret_len ); - - if (do_esync()) - return esync_query_mutex( handle, info, ret_len ); + if ((ret = fast_query_mutex( handle, out )) != STATUS_NOT_IMPLEMENTED) + { + if (!ret && ret_len) *ret_len = sizeof(MUTANT_BASIC_INFORMATION); + return ret; + } SERVER_START_REQ( query_mutex ) { @@ -1417,6 +2291,9 @@ NTSTATUS WINAPI NtCreateTimer( HANDLE *handle, ACCESS_MASK access, const OBJECT_ data_size_t len; struct object_attributes *objattr; + TRACE( "access %#x, name %s, type %u\n", (int)access, + attr ? debugstr_us(attr->ObjectName) : "(null)", type ); + *handle = 0; if (type != NotificationTimer && type != SynchronizationTimer) return STATUS_INVALID_PARAMETER; if ((ret = alloc_object_attributes( attr, &objattr, &len ))) return ret; @@ -1444,6 +2321,8 @@ NTSTATUS WINAPI NtOpenTimer( HANDLE *handle, ACCESS_MASK access, const OBJECT_AT { unsigned int ret; + TRACE( "access %#x, name %s\n", (int)access, attr ? debugstr_us(attr->ObjectName) : "(null)" ); + *handle = 0; if ((ret = validate_open_object_attributes( attr ))) return ret; @@ -1497,6 +2376,8 @@ NTSTATUS WINAPI NtCancelTimer( HANDLE handle, BOOLEAN *state ) { unsigned int ret; + TRACE( "handle %p, state %p\n", handle, state ); + SERVER_START_REQ( cancel_timer ) { req->handle = wine_server_obj_handle( handle ); @@ -1565,27 +2446,29 @@ NTSTATUS WINAPI NtWaitForMultipleObjects( DWORD count, const HANDLE *handles, BO { select_op_t select_op; UINT i, flags = SELECT_INTERRUPTIBLE; + unsigned int ret; if (!count || count > MAXIMUM_WAIT_OBJECTS) return STATUS_INVALID_PARAMETER_1; - if (do_fsync()) + if (TRACE_ON(sync)) { - NTSTATUS ret = fsync_wait_objects( count, handles, wait_any, alertable, timeout ); - if (ret != STATUS_NOT_IMPLEMENTED) - return ret; + TRACE( "wait_any %u, alertable %u, handles {%p", wait_any, alertable, handles[0] ); + for (i = 1; i < count; i++) TRACE( ", %p", handles[i] ); + TRACE( "}, timeout %s\n", debugstr_timeout(timeout) ); } - if (do_esync()) + if ((ret = fast_wait( count, handles, wait_any, alertable, timeout )) != STATUS_NOT_IMPLEMENTED) { - NTSTATUS ret = esync_wait_objects( count, handles, wait_any, alertable, timeout ); - if (ret != STATUS_NOT_IMPLEMENTED) - return ret; + TRACE( "-> %#x\n", ret ); + return ret; } if (alertable) flags |= SELECT_ALERTABLE; select_op.wait.op = wait_any ? SELECT_WAIT : SELECT_WAIT_ALL; for (i = 0; i < count; i++) select_op.wait.handles[i] = wine_server_obj_handle( handles[i] ); - return server_wait( &select_op, offsetof( select_op_t, wait.handles[count] ), flags, timeout ); + ret = server_wait( &select_op, offsetof( select_op_t, wait.handles[count] ), flags, timeout ); + TRACE( "-> %#x\n", ret ); + return ret; } @@ -1616,15 +2499,15 @@ NTSTATUS WINAPI NtSignalAndWaitForSingleObject( HANDLE signal, HANDLE wait, { select_op_t select_op; UINT flags = SELECT_INTERRUPTIBLE; + NTSTATUS ret; - if (do_fsync()) - return fsync_signal_and_wait( signal, wait, alertable, timeout ); - - if (do_esync()) - return esync_signal_and_wait( signal, wait, alertable, timeout ); + TRACE( "signal %p, wait %p, alertable %u, timeout %s\n", signal, wait, alertable, debugstr_timeout(timeout) ); if (!signal) return STATUS_INVALID_HANDLE; + if ((ret = fast_signal_and_wait( signal, wait, alertable, timeout )) != STATUS_NOT_IMPLEMENTED) + return ret; + if (alertable) flags |= SELECT_ALERTABLE; select_op.signal_and_wait.op = SELECT_SIGNAL_AND_WAIT; select_op.signal_and_wait.wait = wine_server_obj_handle( wait ); @@ -1663,24 +2546,7 @@ NTSTATUS WINAPI NtYieldExecution(void) NTSTATUS WINAPI NtDelayExecution( BOOLEAN alertable, const LARGE_INTEGER *timeout ) { /* if alertable, we need to query the server */ - if (alertable) - { - if (do_fsync()) - { - NTSTATUS ret = fsync_wait_objects( 0, NULL, TRUE, TRUE, timeout ); - if (ret != STATUS_NOT_IMPLEMENTED) - return ret; - } - - if (do_esync()) - { - NTSTATUS ret = esync_wait_objects( 0, NULL, TRUE, TRUE, timeout ); - if (ret != STATUS_NOT_IMPLEMENTED) - return ret; - } - - return server_wait( NULL, 0, SELECT_INTERRUPTIBLE | SELECT_ALERTABLE, timeout ); - } + if (alertable) return server_wait( NULL, 0, SELECT_INTERRUPTIBLE | SELECT_ALERTABLE, timeout ); if (!timeout || timeout->QuadPart == TIMEOUT_INFINITE) /* sleep forever */ { @@ -1874,6 +2740,9 @@ NTSTATUS WINAPI NtCreateKeyedEvent( HANDLE *handle, ACCESS_MASK access, data_size_t len; struct object_attributes *objattr; + TRACE( "access %#x, name %s, flags %#x\n", (int)access, + attr ? debugstr_us(attr->ObjectName) : "(null)", (int)flags ); + *handle = 0; if ((ret = alloc_object_attributes( attr, &objattr, &len ))) return ret; @@ -1898,6 +2767,8 @@ NTSTATUS WINAPI NtOpenKeyedEvent( HANDLE *handle, ACCESS_MASK access, const OBJE { unsigned int ret; + TRACE( "access %#x, name %s\n", (int)access, attr ? debugstr_us(attr->ObjectName) : "(null)" ); + *handle = 0; if ((ret = validate_open_object_attributes( attr ))) return ret; @@ -1924,6 +2795,8 @@ NTSTATUS WINAPI NtWaitForKeyedEvent( HANDLE handle, const void *key, select_op_t select_op; UINT flags = SELECT_INTERRUPTIBLE; + TRACE( "handle %p, key %p, alertable %u, timeout %s\n", handle, key, alertable, debugstr_timeout(timeout) ); + if (!handle) handle = keyed_event; if ((ULONG_PTR)key & 1) return STATUS_INVALID_PARAMETER_1; if (alertable) flags |= SELECT_ALERTABLE; @@ -1943,6 +2816,8 @@ NTSTATUS WINAPI NtReleaseKeyedEvent( HANDLE handle, const void *key, select_op_t select_op; UINT flags = SELECT_INTERRUPTIBLE; + TRACE( "handle %p, key %p, alertable %u, timeout %s\n", handle, key, alertable, debugstr_timeout(timeout) ); + if (!handle) handle = keyed_event; if ((ULONG_PTR)key & 1) return STATUS_INVALID_PARAMETER_1; if (alertable) flags |= SELECT_ALERTABLE; @@ -2042,12 +2917,6 @@ NTSTATUS WINAPI NtRemoveIoCompletion( HANDLE handle, ULONG_PTR *key, ULONG_PTR * TRACE( "(%p, %p, %p, %p, %p)\n", handle, key, value, io, timeout ); - if (timeout && !timeout->QuadPart && (do_esync() || do_fsync())) - { - status = NtWaitForSingleObject( handle, FALSE, timeout ); - if (status != WAIT_OBJECT_0) return status; - } - SERVER_START_REQ( remove_completion ) { req->handle = wine_server_obj_handle( handle ); @@ -2063,7 +2932,7 @@ NTSTATUS WINAPI NtRemoveIoCompletion( HANDLE handle, ULONG_PTR *key, ULONG_PTR * } SERVER_END_REQ; if (status != STATUS_PENDING) return status; - if (!timeout || timeout->QuadPart) status = wait_internal_server( wait_handle, FALSE, timeout ); + if (!timeout || timeout->QuadPart) status = server_wait_for_object( wait_handle, FALSE, timeout ); else status = STATUS_TIMEOUT; if (status != WAIT_OBJECT_0) return status; @@ -2095,12 +2964,6 @@ NTSTATUS WINAPI NtRemoveIoCompletionEx( HANDLE handle, FILE_IO_COMPLETION_INFORM TRACE( "%p %p %u %p %p %u\n", handle, info, (int)count, written, timeout, alertable ); - if (timeout && !timeout->QuadPart && (do_esync() || do_fsync())) - { - status = NtWaitForSingleObject( handle, alertable, timeout ); - if (status != WAIT_OBJECT_0) goto done; - } - while (i < count) { SERVER_START_REQ( remove_completion ) @@ -2131,7 +2994,7 @@ NTSTATUS WINAPI NtRemoveIoCompletionEx( HANDLE handle, FILE_IO_COMPLETION_INFORM assert( status == STATUS_USER_APC ); goto done; } - if (!timeout || timeout->QuadPart) status = wait_internal_server( wait_handle, alertable, timeout ); + if (!timeout || timeout->QuadPart) status = server_wait_for_object( wait_handle, alertable, timeout ); else status = STATUS_TIMEOUT; if (status != WAIT_OBJECT_0) goto done; diff --git a/dlls/ntdll/unix/thread.c b/dlls/ntdll/unix/thread.c index c3ab1317295..8314d8800b0 100644 --- a/dlls/ntdll/unix/thread.c +++ b/dlls/ntdll/unix/thread.c @@ -1644,7 +1644,7 @@ if (ret == STATUS_PENDING && wait_handle) { - NtWaitForSingleObject( wait_handle, FALSE, NULL ); + server_wait_for_object( wait_handle, FALSE, NULL ); SERVER_START_REQ( suspend_thread ) { @@ -1808,7 +1808,7 @@ NTSTATUS get_thread_context( HANDLE handle, void *context, BOOL *self, USHORT ma if (ret == STATUS_PENDING) { - NtWaitForSingleObject( context_handle, FALSE, NULL ); + server_wait_for_object( context_handle, FALSE, NULL ); SERVER_START_REQ( get_thread_context ) { diff --git a/dlls/ntdll/unix/unix_private.h b/dlls/ntdll/unix/unix_private.h index ce5a24116f4..5c15edf5ae4 100644 --- a/dlls/ntdll/unix/unix_private.h +++ b/dlls/ntdll/unix/unix_private.h @@ -93,8 +93,6 @@ struct ntdll_thread_data { void *cpu_data[16]; /* reserved for CPU-specific data */ void *kernel_stack; /* stack for thread startup and kernel syscalls */ - int esync_apc_fd; /* fd to wait on for user APCs */ - int *fsync_apc_futex; int request_fd; /* fd for sending server requests */ int reply_fd; /* fd for receiving server replies */ int wait_fd[2]; /* fd for sleeping server requests */ @@ -103,6 +101,7 @@ struct ntdll_thread_data PRTL_THREAD_START_ROUTINE start; /* thread entry point */ void *param; /* thread entry point parameter */ void *jmp_buf; /* setjmp buffer for exception handling */ + int fast_alert_obj; /* fd for the fast alert event */ }; C_ASSERT( sizeof(struct ntdll_thread_data) <= sizeof(((TEB *)0)->GdiTebBatch) ); @@ -208,6 +207,8 @@ extern NTSTATUS load_main_exe( const WCHAR *name, const char *unix_name, const W extern NTSTATUS load_start_exe( WCHAR **image, void **module ); extern void start_server( BOOL debug ); +extern pthread_mutex_t fd_cache_mutex; + extern unsigned int server_call_unlocked( void *req_ptr ); extern void server_enter_uninterrupted_section( pthread_mutex_t *mutex, sigset_t *sigset ); extern void server_leave_uninterrupted_section( pthread_mutex_t *mutex, sigset_t *sigset ); @@ -215,7 +216,7 @@ extern unsigned int server_select( const select_op_t *select_op, data_size_t siz timeout_t abs_timeout, context_t *context, user_apc_t *user_apc ); extern unsigned int server_wait( const select_op_t *select_op, data_size_t size, UINT flags, const LARGE_INTEGER *timeout ); -extern NTSTATUS wait_internal_server( HANDLE handle, BOOLEAN alertable, const LARGE_INTEGER *timeout ); +extern unsigned int server_wait_for_object( HANDLE handle, BOOL alertable, const LARGE_INTEGER *timeout ); extern unsigned int server_queue_process_apc( HANDLE process, const apc_call_t *call, apc_result_t *result ); extern int server_get_unix_fd( HANDLE handle, unsigned int wanted_access, int *unix_fd, @@ -382,6 +383,8 @@ extern NTSTATUS wow64_wine_spawnvp( void *args ); extern void dbg_init(void); +extern void close_fast_sync_obj( HANDLE handle ); + extern NTSTATUS call_user_apc_dispatcher( CONTEXT *context_ptr, ULONG_PTR arg1, ULONG_PTR arg2, ULONG_PTR arg3, PNTAPCFUNC func, NTSTATUS status ); extern NTSTATUS call_user_exception_dispatcher( EXCEPTION_RECORD *rec, CONTEXT *context ); @@ -392,6 +395,7 @@ extern void call_raise_user_exception_dispatcher(void); extern const char * wine_debuginfostr_pc(void *pc); #define TICKSPERSEC 10000000 +#define NSECPERSEC 1000000000 #define SECS_1601_TO_1970 ((369 * 365 + 89) * (ULONGLONG)86400) static inline ULONGLONG ticks_from_time_t( time_t time ) @@ -457,7 +461,7 @@ static inline async_data_t server_async( HANDLE handle, struct async_fileio *use static inline NTSTATUS wait_async( HANDLE handle, BOOL alertable ) { - return wait_internal_server( handle, alertable, NULL ); + return server_wait_for_object( handle, alertable, NULL ); } static inline BOOL in_wow64_call(void) diff --git a/dlls/ntdll/unix/virtual.c b/dlls/ntdll/unix/virtual.c index 0f5576a6b17..0c568e7a8b5 100644 --- a/dlls/ntdll/unix/virtual.c +++ b/dlls/ntdll/unix/virtual.c @@ -4124,8 +4124,6 @@ static TEB *init_teb( void *ptr, BOOL is_wow ) teb->StaticUnicodeString.Buffer = teb->StaticUnicodeBuffer; teb->StaticUnicodeString.MaximumLength = sizeof(teb->StaticUnicodeBuffer); thread_data = (struct ntdll_thread_data *)&teb->GdiTebBatch; - thread_data->esync_apc_fd = -1; - thread_data->fsync_apc_futex = NULL; thread_data->request_fd = -1; thread_data->reply_fd = -1; thread_data->wait_fd[0] = -1; diff --git a/dlls/rpcrt4/rpc_server.c b/dlls/rpcrt4/rpc_server.c index 77b5d83b3c0..41431ebca02 100644 --- a/dlls/rpcrt4/rpc_server.c +++ b/dlls/rpcrt4/rpc_server.c @@ -701,6 +701,10 @@ static DWORD CALLBACK RPCRT4_server_thread(LPVOID the_arg) } LeaveCriticalSection(&cps->cs); + EnterCriticalSection(&listen_cs); + CloseHandle(cps->server_thread); + cps->server_thread = NULL; + LeaveCriticalSection(&listen_cs); TRACE("done\n"); return 0; } @@ -1566,10 +1570,7 @@ RPC_STATUS WINAPI RpcMgmtWaitServerListen( void ) LIST_FOR_EACH_ENTRY(protseq, &protseqs, RpcServerProtseq, entry) { if ((wait_thread = protseq->server_thread)) - { - protseq->server_thread = NULL; break; - } } LeaveCriticalSection(&server_cs); if (!wait_thread) @@ -1578,7 +1579,6 @@ RPC_STATUS WINAPI RpcMgmtWaitServerListen( void ) TRACE("waiting for thread %lu\n", GetThreadId(wait_thread)); LeaveCriticalSection(&listen_cs); WaitForSingleObject(wait_thread, INFINITE); - CloseHandle(wait_thread); EnterCriticalSection(&listen_cs); } if (listen_done_event == event) diff --git a/dlls/webservices/tests/channel.c b/dlls/webservices/tests/channel.c index 4cf39c10732..1fb12297eef 100644 --- a/dlls/webservices/tests/channel.c +++ b/dlls/webservices/tests/channel.c @@ -1214,6 +1214,9 @@ static const char send_record_begin[] = { static const char send_record_middle[] = { 0x01, 0x56, 0x0e, 0x42 }; static const char send_record_end[] = { 0x08, 0x02, 0x6e, 0x73, 0x89, 0xff, 0x01, 0x01 }; +#pragma GCC diagnostic ignored "-Warray-bounds" +#pragma GCC diagnostic ignored "-Wstringop-overflow" + static BOOL send_dict_str( int sock, char *addr, const char *str, int dict_str_count ) { char buf[512], dict_buf[256], body_buf[128], dict_size_buf[5]; diff --git a/include/wine/server_protocol.h b/include/wine/server_protocol.h index 59c8b5f12c8..15ccd35b7f9 100644 --- a/include/wine/server_protocol.h +++ b/include/wine/server_protocol.h @@ -5883,189 +5883,88 @@ struct get_next_thread_reply char __pad_12[4]; }; -enum esync_type + +enum fast_sync_type { - ESYNC_SEMAPHORE = 1, - ESYNC_AUTO_EVENT, - ESYNC_MANUAL_EVENT, - ESYNC_MUTEX, - ESYNC_AUTO_SERVER, - ESYNC_MANUAL_SERVER, - ESYNC_QUEUE, + FAST_SYNC_SEMAPHORE = 1, + FAST_SYNC_MUTEX, + FAST_SYNC_AUTO_EVENT, + FAST_SYNC_MANUAL_EVENT, + FAST_SYNC_AUTO_SERVER, + FAST_SYNC_MANUAL_SERVER, + FAST_SYNC_QUEUE, }; -struct create_esync_request -{ - struct request_header __header; - unsigned int access; - int initval; - int type; - int max; - /* VARARG(objattr,object_attributes); */ - char __pad_28[4]; -}; -struct create_esync_reply -{ - struct reply_header __header; - obj_handle_t handle; - int type; - unsigned int shm_idx; - char __pad_20[4]; -}; -struct open_esync_request +struct get_linux_sync_device_request { struct request_header __header; - unsigned int access; - unsigned int attributes; - obj_handle_t rootdir; - int type; - /* VARARG(name,unicode_str); */ - char __pad_28[4]; + char __pad_12[4]; }; -struct open_esync_reply +struct get_linux_sync_device_reply { struct reply_header __header; obj_handle_t handle; - int type; - unsigned int shm_idx; - char __pad_20[4]; + char __pad_12[4]; }; -struct get_esync_fd_request + +struct get_linux_sync_obj_request { struct request_header __header; obj_handle_t handle; }; -struct get_esync_fd_reply +struct get_linux_sync_obj_reply { struct reply_header __header; + obj_handle_t handle; int type; - unsigned int shm_idx; + unsigned int access; + char __pad_20[4]; }; -struct esync_msgwait_request -{ - struct request_header __header; - int in_msgwait; -}; -struct esync_msgwait_reply -{ - struct reply_header __header; -}; - -struct get_esync_apc_fd_request +struct fast_select_queue_request { struct request_header __header; - char __pad_12[4]; + obj_handle_t handle; }; -struct get_esync_apc_fd_reply +struct fast_select_queue_reply { struct reply_header __header; }; -#define FSYNC_SHM_PAGE_SIZE 0x10000 - -enum fsync_type -{ - FSYNC_SEMAPHORE = 1, - FSYNC_AUTO_EVENT, - FSYNC_MANUAL_EVENT, - FSYNC_MUTEX, - FSYNC_AUTO_SERVER, - FSYNC_MANUAL_SERVER, - FSYNC_QUEUE, -}; -struct create_fsync_request +struct fast_unselect_queue_request { struct request_header __header; - unsigned int access; - int low; - int high; - int type; - /* VARARG(objattr,object_attributes); */ - char __pad_28[4]; -}; -struct create_fsync_reply -{ - struct reply_header __header; obj_handle_t handle; - int type; - unsigned int shm_idx; + int signaled; char __pad_20[4]; }; - - -struct open_fsync_request -{ - struct request_header __header; - unsigned int access; - unsigned int attributes; - obj_handle_t rootdir; - int type; - /* VARARG(name,unicode_str); */ - char __pad_28[4]; -}; -struct open_fsync_reply +struct fast_unselect_queue_reply { struct reply_header __header; - obj_handle_t handle; - int type; - unsigned int shm_idx; - char __pad_20[4]; }; -struct get_fsync_idx_request -{ - struct request_header __header; - obj_handle_t handle; -}; -struct get_fsync_idx_reply -{ - struct reply_header __header; - int type; - unsigned int shm_idx; -}; -struct fsync_msgwait_request -{ - struct request_header __header; - int in_msgwait; -}; -struct fsync_msgwait_reply -{ - struct reply_header __header; -}; - -struct get_fsync_apc_idx_request +struct get_fast_alert_event_request { struct request_header __header; char __pad_12[4]; }; -struct get_fsync_apc_idx_reply +struct get_fast_alert_event_reply { struct reply_header __header; - unsigned int shm_idx; + obj_handle_t handle; char __pad_12[4]; }; -struct fsync_free_shm_idx_request -{ - struct request_header __header; - unsigned int shm_idx; -}; -struct fsync_free_shm_idx_reply -{ - struct reply_header __header; -}; - enum request { @@ -6359,17 +6258,11 @@ enum request REQ_suspend_process, REQ_resume_process, REQ_get_next_thread, - REQ_create_esync, - REQ_open_esync, - REQ_get_esync_fd, - REQ_esync_msgwait, - REQ_get_esync_apc_fd, - REQ_create_fsync, - REQ_open_fsync, - REQ_get_fsync_idx, - REQ_fsync_msgwait, - REQ_get_fsync_apc_idx, - REQ_fsync_free_shm_idx, + REQ_get_linux_sync_device, + REQ_get_linux_sync_obj, + REQ_fast_select_queue, + REQ_fast_unselect_queue, + REQ_get_fast_alert_event, REQ_NB_REQUESTS }; @@ -6667,17 +6560,11 @@ union generic_request struct suspend_process_request suspend_process_request; struct resume_process_request resume_process_request; struct get_next_thread_request get_next_thread_request; - struct create_esync_request create_esync_request; - struct open_esync_request open_esync_request; - struct get_esync_fd_request get_esync_fd_request; - struct esync_msgwait_request esync_msgwait_request; - struct get_esync_apc_fd_request get_esync_apc_fd_request; - struct create_fsync_request create_fsync_request; - struct open_fsync_request open_fsync_request; - struct get_fsync_idx_request get_fsync_idx_request; - struct fsync_msgwait_request fsync_msgwait_request; - struct get_fsync_apc_idx_request get_fsync_apc_idx_request; - struct fsync_free_shm_idx_request fsync_free_shm_idx_request; + struct get_linux_sync_device_request get_linux_sync_device_request; + struct get_linux_sync_obj_request get_linux_sync_obj_request; + struct fast_select_queue_request fast_select_queue_request; + struct fast_unselect_queue_request fast_unselect_queue_request; + struct get_fast_alert_event_request get_fast_alert_event_request; }; union generic_reply { @@ -6973,17 +6860,11 @@ union generic_reply struct suspend_process_reply suspend_process_reply; struct resume_process_reply resume_process_reply; struct get_next_thread_reply get_next_thread_reply; - struct create_esync_reply create_esync_reply; - struct open_esync_reply open_esync_reply; - struct get_esync_fd_reply get_esync_fd_reply; - struct esync_msgwait_reply esync_msgwait_reply; - struct get_esync_apc_fd_reply get_esync_apc_fd_reply; - struct create_fsync_reply create_fsync_reply; - struct open_fsync_reply open_fsync_reply; - struct get_fsync_idx_reply get_fsync_idx_reply; - struct fsync_msgwait_reply fsync_msgwait_reply; - struct get_fsync_apc_idx_reply get_fsync_apc_idx_reply; - struct fsync_free_shm_idx_reply fsync_free_shm_idx_reply; + struct get_linux_sync_device_reply get_linux_sync_device_reply; + struct get_linux_sync_obj_reply get_linux_sync_obj_reply; + struct fast_select_queue_reply fast_select_queue_reply; + struct fast_unselect_queue_reply fast_unselect_queue_reply; + struct get_fast_alert_event_reply get_fast_alert_event_reply; }; /* ### protocol_version begin ### */ diff --git a/server/Makefile.in b/server/Makefile.in index a98615cce72..d21bd5541c7 100644 --- a/server/Makefile.in +++ b/server/Makefile.in @@ -11,11 +11,10 @@ SOURCES = \ debugger.c \ device.c \ directory.c \ - esync.c \ event.c \ + fast_sync.c \ fd.c \ file.c \ - fsync.c \ handle.c \ hook.c \ mach.c \ diff --git a/server/async.c b/server/async.c index 1f32d10a0f5..69f1ce912b1 100644 --- a/server/async.c +++ b/server/async.c @@ -78,8 +78,6 @@ static const struct object_ops async_ops = add_queue, /* add_queue */ remove_queue, /* remove_queue */ async_signaled, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ async_satisfied, /* satisfied */ no_signal, /* signal */ no_get_fd, /* get_fd */ @@ -92,6 +90,7 @@ static const struct object_ops async_ops = NULL, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + no_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ async_destroy /* destroy */ }; @@ -700,8 +699,6 @@ static const struct object_ops iosb_ops = no_add_queue, /* add_queue */ NULL, /* remove_queue */ NULL, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ NULL, /* satisfied */ no_signal, /* signal */ no_get_fd, /* get_fd */ @@ -714,6 +711,7 @@ static const struct object_ops iosb_ops = NULL, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + no_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ iosb_destroy /* destroy */ }; diff --git a/server/atom.c b/server/atom.c index 6b95a546597..ba320c4c630 100644 --- a/server/atom.c +++ b/server/atom.c @@ -79,8 +79,6 @@ static const struct object_ops atom_table_ops = no_add_queue, /* add_queue */ NULL, /* remove_queue */ NULL, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ NULL, /* satisfied */ no_signal, /* signal */ no_get_fd, /* get_fd */ @@ -93,6 +91,7 @@ static const struct object_ops atom_table_ops = NULL, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + no_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ atom_table_destroy /* destroy */ }; diff --git a/server/change.c b/server/change.c index 8a97b66409d..dafa7e8339c 100644 --- a/server/change.c +++ b/server/change.c @@ -112,8 +112,6 @@ static const struct object_ops dir_ops = add_queue, /* add_queue */ remove_queue, /* remove_queue */ default_fd_signaled, /* signaled */ - default_fd_get_esync_fd, /* get_esync_fd */ - default_fd_get_fsync_idx, /* get_fsync_idx */ no_satisfied, /* satisfied */ no_signal, /* signal */ dir_get_fd, /* get_fd */ @@ -126,6 +124,7 @@ static const struct object_ops dir_ops = NULL, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + default_fd_get_fast_sync, /* get_fast_sync */ dir_close_handle, /* close_handle */ dir_destroy /* destroy */ }; diff --git a/server/clipboard.c b/server/clipboard.c index f24924eafa5..de9f84f74d0 100644 --- a/server/clipboard.c +++ b/server/clipboard.c @@ -76,8 +76,6 @@ static const struct object_ops clipboard_ops = no_add_queue, /* add_queue */ NULL, /* remove_queue */ NULL, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ NULL, /* satisfied */ no_signal, /* signal */ no_get_fd, /* get_fd */ @@ -90,6 +88,7 @@ static const struct object_ops clipboard_ops = NULL, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + no_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ clipboard_destroy /* destroy */ }; diff --git a/server/completion.c b/server/completion.c index 580f83b4a82..cdc22edfa75 100644 --- a/server/completion.c +++ b/server/completion.c @@ -35,8 +35,6 @@ #include "file.h" #include "handle.h" #include "request.h" -#include "esync.h" -#include "fsync.h" static const WCHAR completion_name[] = {'I','o','C','o','m','p','l','e','t','i','o','n'}; @@ -79,8 +77,7 @@ struct completion struct list wait_queue; unsigned int depth; int closed; - int esync_fd; - unsigned int fsync_idx; + struct fast_sync *fast_sync; }; static void completion_wait_dump( struct object*, int ); @@ -96,8 +93,6 @@ static const struct object_ops completion_wait_ops = add_queue, /* add_queue */ remove_queue, /* remove_queue */ completion_wait_signaled, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ completion_wait_satisfied, /* satisfied */ no_signal, /* signal */ no_get_fd, /* get_fd */ @@ -110,6 +105,7 @@ static const struct object_ops completion_wait_ops = NULL, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + no_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ completion_wait_destroy /* destroy */ }; @@ -157,8 +153,7 @@ static void completion_wait_satisfied( struct object *obj, struct wait_queue_ent list_remove( &msg->queue_entry ); if (list_empty( &wait->completion->queue )) { - if (do_esync()) esync_clear( wait->completion->esync_fd ); - if (do_fsync()) fsync_clear( &wait->completion->obj ); + fast_reset_event( wait->completion->fast_sync ); } if (wait->msg) free( wait->msg ); wait->msg = msg; @@ -166,8 +161,7 @@ static void completion_wait_satisfied( struct object *obj, struct wait_queue_ent static void completion_dump( struct object*, int ); static int completion_signaled( struct object *obj, struct wait_queue_entry *entry ); -static int completion_get_esync_fd( struct object *obj, enum esync_type *type ); -static unsigned int completion_get_fsync_idx( struct object *obj, enum fsync_type *type ); +static struct fast_sync *completion_get_fast_sync( struct object *obj ); static int completion_close_handle( struct object *obj, struct process *process, obj_handle_t handle ); static void completion_destroy( struct object * ); @@ -179,8 +173,6 @@ static const struct object_ops completion_ops = add_queue, /* add_queue */ remove_queue, /* remove_queue */ completion_signaled, /* signaled */ - completion_get_esync_fd, /* get_esync_fd */ - completion_get_fsync_idx, /* get_fsync_idx */ no_satisfied, /* satisfied */ no_signal, /* signal */ no_get_fd, /* get_fd */ @@ -193,6 +185,7 @@ static const struct object_ops completion_ops = default_unlink_name, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + completion_get_fast_sync, /* get_fast_sync */ completion_close_handle, /* close_handle */ completion_destroy /* destroy */ }; @@ -202,13 +195,11 @@ static void completion_destroy( struct object *obj) struct completion *completion = (struct completion *) obj; struct comp_msg *tmp, *next; - if (do_esync()) close( completion->esync_fd ); - if (completion->fsync_idx) fsync_free_shm_idx( completion->fsync_idx ); - LIST_FOR_EACH_ENTRY_SAFE( tmp, next, &completion->queue, struct comp_msg, queue_entry ) { free( tmp ); } + if (completion->fast_sync) release_object( completion->fast_sync ); } static void completion_dump( struct object *obj, int verbose ) @@ -226,21 +217,14 @@ static int completion_signaled( struct object *obj, struct wait_queue_entry *ent return !list_empty( &completion->queue ) || completion->closed; } -static int completion_get_esync_fd( struct object *obj, enum esync_type *type ) -{ - struct completion *completion = (struct completion *)obj; - - *type = ESYNC_MANUAL_SERVER; - return completion->esync_fd; -} - -static unsigned int completion_get_fsync_idx( struct object *obj, enum fsync_type *type ) +static struct fast_sync *completion_get_fast_sync( struct object *obj ) { struct completion *completion = (struct completion *)obj; - assert( obj->ops == &completion_ops ); - *type = FSYNC_MANUAL_SERVER; - return completion->fsync_idx; + if (!completion->fast_sync) + completion->fast_sync = fast_create_event( FAST_SYNC_MANUAL_SERVER, !list_empty( &completion->queue ) ); + if (completion->fast_sync) grab_object( completion->fast_sync ); + return completion->fast_sync; } static int completion_close_handle( struct object *obj, struct process *process, obj_handle_t handle ) @@ -310,12 +294,11 @@ static struct completion *create_completion( struct object *root, const struct u list_init( &completion->queue ); list_init( &completion->wait_queue ); completion->depth = 0; + completion->fast_sync = NULL; completion->closed = 0; } } - if (do_esync()) completion->esync_fd = esync_create_fd( 0, 0 ); - completion->fsync_idx = 0; - if (do_fsync()) completion->fsync_idx = fsync_alloc_shm( 0, 0 ); + return completion; } @@ -346,6 +329,7 @@ void add_completion( struct completion *completion, apc_param_t ckey, apc_param_ if (list_empty( &completion->queue )) return; } if (!list_empty( &completion->queue )) wake_up( &completion->obj, 0 ); + fast_set_event( completion->fast_sync ); } /* create a completion */ @@ -428,10 +412,7 @@ DECL_HANDLER(remove_completion) free( msg ); reply->wait_handle = 0; if (list_empty( &completion->queue )) - { - if (do_esync()) esync_clear( completion->esync_fd ); - if (do_fsync()) fsync_clear( &completion->obj ); - } + fast_reset_event( completion->fast_sync ); } release_object( completion ); diff --git a/server/console.c b/server/console.c index 9084e5a3828..2c45cdd0141 100644 --- a/server/console.c +++ b/server/console.c @@ -41,8 +41,6 @@ #include "wincon.h" #include "winternl.h" #include "wine/condrv.h" -#include "esync.h" -#include "fsync.h" struct screen_buffer; @@ -63,6 +61,7 @@ struct console struct fd *fd; /* for bare console, attached input fd */ struct async_queue ioctl_q; /* ioctl queue */ struct async_queue read_q; /* read queue */ + struct fast_sync *fast_sync; /* fast synchronization object */ }; static void console_dump( struct object *obj, int verbose ); @@ -74,6 +73,7 @@ static struct object *console_lookup_name( struct object *obj, struct unicode_st static struct object *console_open_file( struct object *obj, unsigned int access, unsigned int sharing, unsigned int options ); static int console_add_queue( struct object *obj, struct wait_queue_entry *entry ); +static struct fast_sync *console_get_fast_sync( struct object *obj ); static const struct object_ops console_ops = { @@ -83,8 +83,6 @@ static const struct object_ops console_ops = console_add_queue, /* add_queue */ remove_queue, /* remove_queue */ console_signaled, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ no_satisfied, /* satisfied */ no_signal, /* signal */ console_get_fd, /* get_fd */ @@ -97,6 +95,7 @@ static const struct object_ops console_ops = NULL, /* unlink_name */ console_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + console_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ console_destroy /* destroy */ }; @@ -143,20 +142,18 @@ struct console_server unsigned int once_input : 1; /* flag if input thread has already been requested */ int term_fd; /* UNIX terminal fd */ struct termios termios; /* original termios */ - int esync_fd; - unsigned int fsync_idx; + struct fast_sync *fast_sync; /* fast synchronization object */ }; static void console_server_dump( struct object *obj, int verbose ); static void console_server_destroy( struct object *obj ); static int console_server_signaled( struct object *obj, struct wait_queue_entry *entry ); -static int console_server_get_esync_fd( struct object *obj, enum esync_type *type ); -static unsigned int console_server_get_fsync_idx( struct object *obj, enum fsync_type *type ); static struct fd *console_server_get_fd( struct object *obj ); static struct object *console_server_lookup_name( struct object *obj, struct unicode_str *name, unsigned int attr, struct object *root ); static struct object *console_server_open_file( struct object *obj, unsigned int access, unsigned int sharing, unsigned int options ); +static struct fast_sync *console_server_get_fast_sync( struct object *obj ); static const struct object_ops console_server_ops = { @@ -166,8 +163,6 @@ static const struct object_ops console_server_ops = add_queue, /* add_queue */ remove_queue, /* remove_queue */ console_server_signaled, /* signaled */ - console_server_get_esync_fd, /* get_esync_fd */ - console_server_get_fsync_idx, /* get_fsync_idx */ no_satisfied, /* satisfied */ no_signal, /* signal */ console_server_get_fd, /* get_fd */ @@ -180,6 +175,7 @@ static const struct object_ops console_server_ops = NULL, /* unlink_name */ console_server_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + console_server_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ console_server_destroy /* destroy */ }; @@ -228,6 +224,7 @@ static int screen_buffer_add_queue( struct object *obj, struct wait_queue_entry static struct fd *screen_buffer_get_fd( struct object *obj ); static struct object *screen_buffer_open_file( struct object *obj, unsigned int access, unsigned int sharing, unsigned int options ); +static struct fast_sync *screen_buffer_get_fast_sync( struct object *obj ); static const struct object_ops screen_buffer_ops = { @@ -237,8 +234,6 @@ static const struct object_ops screen_buffer_ops = screen_buffer_add_queue, /* add_queue */ NULL, /* remove_queue */ NULL, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ NULL, /* satisfied */ no_signal, /* signal */ screen_buffer_get_fd, /* get_fd */ @@ -251,6 +246,7 @@ static const struct object_ops screen_buffer_ops = NULL, /* unlink_name */ screen_buffer_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + screen_buffer_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ screen_buffer_destroy /* destroy */ }; @@ -288,8 +284,6 @@ static const struct object_ops console_device_ops = no_add_queue, /* add_queue */ NULL, /* remove_queue */ NULL, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ no_satisfied, /* satisfied */ no_signal, /* signal */ no_get_fd, /* get_fd */ @@ -302,6 +296,7 @@ static const struct object_ops console_device_ops = default_unlink_name, /* unlink_name */ console_device_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + no_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ no_destroy /* destroy */ }; @@ -317,6 +312,7 @@ static struct object *console_input_open_file( struct object *obj, unsigned int unsigned int sharing, unsigned int options ); static int console_input_add_queue( struct object *obj, struct wait_queue_entry *entry ); static struct fd *console_input_get_fd( struct object *obj ); +static struct fast_sync *console_input_get_fast_sync( struct object *obj ); static void console_input_destroy( struct object *obj ); static const struct object_ops console_input_ops = @@ -327,8 +323,6 @@ static const struct object_ops console_input_ops = console_input_add_queue, /* add_queue */ NULL, /* remove_queue */ NULL, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ no_satisfied, /* satisfied */ no_signal, /* signal */ console_input_get_fd, /* get_fd */ @@ -341,6 +335,7 @@ static const struct object_ops console_input_ops = default_unlink_name, /* unlink_name */ console_input_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + console_input_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ console_input_destroy /* destroy */ }; @@ -376,6 +371,7 @@ static int console_output_add_queue( struct object *obj, struct wait_queue_entry static struct fd *console_output_get_fd( struct object *obj ); static struct object *console_output_open_file( struct object *obj, unsigned int access, unsigned int sharing, unsigned int options ); +static struct fast_sync *console_output_get_fast_sync( struct object *obj ); static void console_output_destroy( struct object *obj ); static const struct object_ops console_output_ops = @@ -386,8 +382,6 @@ static const struct object_ops console_output_ops = console_output_add_queue, /* add_queue */ NULL, /* remove_queue */ NULL, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ no_satisfied, /* satisfied */ no_signal, /* signal */ console_output_get_fd, /* get_fd */ @@ -400,6 +394,7 @@ static const struct object_ops console_output_ops = default_unlink_name, /* unlink_name */ console_output_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + console_output_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ console_output_destroy /* destroy */ }; @@ -446,8 +441,6 @@ static const struct object_ops console_connection_ops = no_add_queue, /* add_queue */ NULL, /* remove_queue */ NULL, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ no_satisfied, /* satisfied */ no_signal, /* signal */ console_connection_get_fd, /* get_fd */ @@ -460,6 +453,7 @@ static const struct object_ops console_connection_ops = default_unlink_name, /* unlink_name */ console_connection_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + no_get_fast_sync, /* get_fast_sync */ console_connection_close_handle, /* close_handle */ console_connection_destroy /* destroy */ }; @@ -559,6 +553,7 @@ static struct object *create_console(void) console->server = NULL; console->fd = NULL; console->last_id = 0; + console->fast_sync = NULL; init_async_queue( &console->ioctl_q ); init_async_queue( &console->read_q ); @@ -598,6 +593,7 @@ static int queue_host_ioctl( struct console_server *server, unsigned int code, u } list_add_tail( &server->queue, &ioctl->entry ); wake_up( &server->obj, 0 ); + fast_set_event( server->fast_sync ); if (async) set_error( STATUS_PENDING ); return 1; } @@ -610,10 +606,6 @@ static void disconnect_console_server( struct console_server *server ) list_remove( &call->entry ); console_host_ioctl_terminate( call, STATUS_CANCELLED ); } - if (do_fsync()) - fsync_clear( &server->obj ); - if (do_esync()) - esync_clear( server->esync_fd ); while (!list_empty( &server->read_queue )) { struct console_host_ioctl *call = LIST_ENTRY( list_head( &server->read_queue ), struct console_host_ioctl, entry ); @@ -634,6 +626,7 @@ static void disconnect_console_server( struct console_server *server ) server->console->server = NULL; server->console = NULL; wake_up( &server->obj, 0 ); + fast_set_event( server->fast_sync ); } } @@ -788,6 +781,8 @@ static void console_destroy( struct object *obj ) free_async_queue( &console->read_q ); if (console->fd) release_object( console->fd ); + + if (console->fast_sync) release_object( console->fast_sync ); } static struct object *create_console_connection( struct console *console ) @@ -835,6 +830,16 @@ static struct object *console_open_file( struct object *obj, unsigned int access return grab_object( obj ); } +static struct fast_sync *console_get_fast_sync( struct object *obj ) +{ + struct console *console = (struct console *)obj; + + if (!console->fast_sync) + console->fast_sync = fast_create_event( FAST_SYNC_MANUAL_SERVER, console->signaled ); + if (console->fast_sync) grab_object( console->fast_sync ); + return console->fast_sync; +} + static void screen_buffer_dump( struct object *obj, int verbose ) { struct screen_buffer *screen_buffer = (struct screen_buffer *)obj; @@ -884,6 +889,17 @@ static struct fd *screen_buffer_get_fd( struct object *obj ) return NULL; } +static struct fast_sync *screen_buffer_get_fast_sync( struct object *obj ) +{ + struct screen_buffer *screen_buffer = (struct screen_buffer *)obj; + if (!screen_buffer->input) + { + set_error( STATUS_ACCESS_DENIED ); + return NULL; + } + return console_get_fast_sync( &screen_buffer->input->obj ); +} + static void console_server_dump( struct object *obj, int verbose ) { assert( obj->ops == &console_server_ops ); @@ -896,8 +912,7 @@ static void console_server_destroy( struct object *obj ) assert( obj->ops == &console_server_ops ); disconnect_console_server( server ); if (server->fd) release_object( server->fd ); - if (do_esync()) close( server->esync_fd ); - if (server->fsync_idx) fsync_free_shm_idx( server->fsync_idx ); + if (server->fast_sync) release_object( server->fast_sync ); } static struct object *console_server_lookup_name( struct object *obj, struct unicode_str *name, @@ -939,20 +954,6 @@ static int console_server_signaled( struct object *obj, struct wait_queue_entry return !server->console || !list_empty( &server->queue ); } -static int console_server_get_esync_fd( struct object *obj, enum esync_type *type ) -{ - struct console_server *server = (struct console_server*)obj; - *type = ESYNC_MANUAL_SERVER; - return server->esync_fd; -} - -static unsigned int console_server_get_fsync_idx( struct object *obj, enum fsync_type *type ) -{ - struct console_server *server = (struct console_server*)obj; - *type = FSYNC_MANUAL_SERVER; - return server->fsync_idx; -} - static struct fd *console_server_get_fd( struct object* obj ) { struct console_server *server = (struct console_server*)obj; @@ -966,6 +967,17 @@ static struct object *console_server_open_file( struct object *obj, unsigned int return grab_object( obj ); } +static struct fast_sync *console_server_get_fast_sync( struct object *obj ) +{ + struct console_server *server = (struct console_server *)obj; + int signaled = !server->console || !list_empty( &server->queue ); + + if (!server->fast_sync) + server->fast_sync = fast_create_event( FAST_SYNC_MANUAL_SERVER, signaled ); + if (server->fast_sync) grab_object( server->fast_sync ); + return server->fast_sync; +} + static struct object *create_console_server( void ) { struct console_server *server; @@ -977,6 +989,7 @@ static struct object *create_console_server( void ) server->term_fd = -1; list_init( &server->queue ); list_init( &server->read_queue ); + server->fast_sync = NULL; server->fd = alloc_pseudo_fd( &console_server_fd_ops, &server->obj, FILE_SYNCHRONOUS_IO_NONALERT ); if (!server->fd) { @@ -984,14 +997,6 @@ static struct object *create_console_server( void ) return NULL; } allow_fd_caching(server->fd); - server->esync_fd = -1; - server->fsync_idx = 0; - - if (do_fsync()) - server->fsync_idx = fsync_alloc_shm( 0, 0 ); - - if (do_esync()) - server->esync_fd = esync_create_fd( 0, 0 ); return &server->obj; } @@ -1450,6 +1455,16 @@ static struct object *console_input_open_file( struct object *obj, unsigned int return grab_object( obj ); } +static struct fast_sync *console_input_get_fast_sync( struct object *obj ) +{ + if (!current->process->console) + { + set_error( STATUS_ACCESS_DENIED ); + return NULL; + } + return console_get_fast_sync( ¤t->process->console->obj ); +} + static void console_input_destroy( struct object *obj ) { struct console_input *console_input = (struct console_input *)obj; @@ -1522,6 +1537,16 @@ static struct object *console_output_open_file( struct object *obj, unsigned int return grab_object( obj ); } +static struct fast_sync *console_output_get_fast_sync( struct object *obj ) +{ + if (!current->process->console || !current->process->console->active) + { + set_error( STATUS_ACCESS_DENIED ); + return NULL; + } + return console_get_fast_sync( ¤t->process->console->obj ); +} + static void console_output_destroy( struct object *obj ) { struct console_output *console_output = (struct console_output *)obj; @@ -1579,11 +1604,16 @@ DECL_HANDLER(get_next_console_request) if (!server->console->renderer) server->console->renderer = current; - if (!req->signal) server->console->signaled = 0; + if (!req->signal) + { + server->console->signaled = 0; + fast_reset_event( server->console->fast_sync ); + } else if (!server->console->signaled) { server->console->signaled = 1; wake_up( &server->console->obj, 0 ); + fast_set_event( server->console->fast_sync ); } if (req->read) @@ -1605,10 +1635,8 @@ DECL_HANDLER(get_next_console_request) /* set result of previous ioctl */ ioctl = LIST_ENTRY( list_head( &server->queue ), struct console_host_ioctl, entry ); list_remove( &ioctl->entry ); - if (do_fsync() && list_empty( &server->queue )) - fsync_clear( &server->obj ); - if (do_esync() && list_empty( &server->queue )) - esync_clear( server->esync_fd ); + if (list_empty( &server->queue )) + fast_reset_event( server->fast_sync ); } if (ioctl) @@ -1694,10 +1722,8 @@ DECL_HANDLER(get_next_console_request) { set_error( STATUS_PENDING ); } - if (do_fsync() && list_empty( &server->queue )) - fsync_clear( &server->obj ); - if (do_esync() && list_empty( &server->queue )) - esync_clear( server->esync_fd ); + if (list_empty( &server->queue )) + fast_reset_event( server->fast_sync ); release_object( server ); } diff --git a/server/debugger.c b/server/debugger.c index b0cd35604d2..04172b7e66d 100644 --- a/server/debugger.c +++ b/server/debugger.c @@ -71,6 +71,7 @@ struct debug_obj struct object obj; /* object header */ struct list event_queue; /* pending events queue */ unsigned int flags; /* debug flags */ + struct fast_sync *fast_sync; /* fast synchronization object */ }; @@ -86,8 +87,6 @@ static const struct object_ops debug_event_ops = add_queue, /* add_queue */ remove_queue, /* remove_queue */ debug_event_signaled, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ no_satisfied, /* satisfied */ no_signal, /* signal */ no_get_fd, /* get_fd */ @@ -100,12 +99,14 @@ static const struct object_ops debug_event_ops = NULL, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + no_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ debug_event_destroy /* destroy */ }; static void debug_obj_dump( struct object *obj, int verbose ); static int debug_obj_signaled( struct object *obj, struct wait_queue_entry *entry ); +static struct fast_sync *debug_obj_get_fast_sync( struct object *obj ); static void debug_obj_destroy( struct object *obj ); static const struct object_ops debug_obj_ops = @@ -116,8 +117,6 @@ static const struct object_ops debug_obj_ops = add_queue, /* add_queue */ remove_queue, /* remove_queue */ debug_obj_signaled, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ no_satisfied, /* satisfied */ no_signal, /* signal */ no_get_fd, /* get_fd */ @@ -130,6 +129,7 @@ static const struct object_ops debug_obj_ops = default_unlink_name, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + debug_obj_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ debug_obj_destroy /* destroy */ }; @@ -257,6 +257,7 @@ static void link_event( struct debug_obj *debug_obj, struct debug_event *event ) /* grab reference since debugger could be killed while trying to wake up */ grab_object( debug_obj ); wake_up( &debug_obj->obj, 0 ); + fast_set_event( debug_obj->fast_sync ); release_object( debug_obj ); } } @@ -269,6 +270,7 @@ static void resume_event( struct debug_obj *debug_obj, struct debug_event *event { grab_object( debug_obj ); wake_up( &debug_obj->obj, 0 ); + fast_set_event( debug_obj->fast_sync ); release_object( debug_obj ); } } @@ -334,6 +336,17 @@ static int debug_obj_signaled( struct object *obj, struct wait_queue_entry *entr return find_event_to_send( debug_obj ) != NULL; } +static struct fast_sync *debug_obj_get_fast_sync( struct object *obj ) +{ + struct debug_obj *debug_obj = (struct debug_obj *)obj; + int signaled = find_event_to_send( debug_obj ) != NULL; + + if (!debug_obj->fast_sync) + debug_obj->fast_sync = fast_create_event( FAST_SYNC_MANUAL_SERVER, signaled ); + if (debug_obj->fast_sync) grab_object( debug_obj->fast_sync ); + return debug_obj->fast_sync; +} + static void debug_obj_destroy( struct object *obj ) { struct list *ptr; @@ -346,6 +359,8 @@ static void debug_obj_destroy( struct object *obj ) /* free all pending events */ while ((ptr = list_head( &debug_obj->event_queue ))) unlink_event( debug_obj, LIST_ENTRY( ptr, struct debug_event, entry )); + + if (debug_obj->fast_sync) release_object( debug_obj->fast_sync ); } struct debug_obj *get_debug_obj( struct process *process, obj_handle_t handle, unsigned int access ) @@ -365,6 +380,7 @@ static struct debug_obj *create_debug_obj( struct object *root, const struct uni { debug_obj->flags = flags; list_init( &debug_obj->event_queue ); + debug_obj->fast_sync = NULL; } } return debug_obj; @@ -573,6 +589,9 @@ DECL_HANDLER(wait_debug_event) reply->tid = get_thread_id( event->sender ); alloc_event_handles( event, current->process ); set_reply_data( &event->data, min( get_reply_max_size(), sizeof(event->data) )); + + if (!find_event_to_send( debug_obj )) + fast_reset_event( debug_obj->fast_sync ); } else { diff --git a/server/device.c b/server/device.c index 7e9911efb6a..698fee63f03 100644 --- a/server/device.c +++ b/server/device.c @@ -38,8 +38,6 @@ #include "handle.h" #include "request.h" #include "process.h" -#include "esync.h" -#include "fsync.h" /* IRP object */ @@ -68,8 +66,6 @@ static const struct object_ops irp_call_ops = no_add_queue, /* add_queue */ NULL, /* remove_queue */ NULL, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* satisfied */ NULL, /* satisfied */ no_signal, /* signal */ no_get_fd, /* get_fd */ @@ -82,6 +78,7 @@ static const struct object_ops irp_call_ops = NULL, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + no_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ irp_call_destroy /* destroy */ }; @@ -96,14 +93,12 @@ struct device_manager struct list requests; /* list of pending irps across all devices */ struct irp_call *current_call; /* call currently executed on client side */ struct wine_rb_tree kernel_objects; /* map of objects that have client side pointer associated */ - int esync_fd; /* esync file descriptor */ - unsigned int fsync_idx; + struct fast_sync *fast_sync; /* fast synchronization object */ }; static void device_manager_dump( struct object *obj, int verbose ); static int device_manager_signaled( struct object *obj, struct wait_queue_entry *entry ); -static int device_manager_get_esync_fd( struct object *obj, enum esync_type *type ); -static unsigned int device_manager_get_fsync_idx( struct object *obj, enum fsync_type *type ); +static struct fast_sync *device_manager_get_fast_sync( struct object *obj ); static void device_manager_destroy( struct object *obj ); static const struct object_ops device_manager_ops = @@ -114,8 +109,6 @@ static const struct object_ops device_manager_ops = add_queue, /* add_queue */ remove_queue, /* remove_queue */ device_manager_signaled, /* signaled */ - device_manager_get_esync_fd, /* get_esync_fd */ - device_manager_get_fsync_idx, /* get_fsync_idx */ no_satisfied, /* satisfied */ no_signal, /* signal */ no_get_fd, /* get_fd */ @@ -128,6 +121,7 @@ static const struct object_ops device_manager_ops = NULL, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + device_manager_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ device_manager_destroy /* destroy */ }; @@ -173,8 +167,6 @@ static const struct object_ops device_ops = no_add_queue, /* add_queue */ NULL, /* remove_queue */ NULL, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ no_satisfied, /* satisfied */ no_signal, /* signal */ no_get_fd, /* get_fd */ @@ -187,6 +179,7 @@ static const struct object_ops device_ops = default_unlink_name, /* unlink_name */ device_open_file, /* open_file */ device_get_kernel_obj_list, /* get_kernel_obj_list */ + no_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ device_destroy /* destroy */ }; @@ -227,8 +220,6 @@ static const struct object_ops device_file_ops = add_queue, /* add_queue */ remove_queue, /* remove_queue */ default_fd_signaled, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ no_satisfied, /* satisfied */ no_signal, /* signal */ device_file_get_fd, /* get_fd */ @@ -241,6 +232,7 @@ static const struct object_ops device_file_ops = NULL, /* unlink_name */ no_open_file, /* open_file */ device_file_get_kernel_obj_list, /* get_kernel_obj_list */ + default_fd_get_fast_sync, /* get_fast_sync */ device_file_close_handle, /* close_handle */ device_file_destroy /* destroy */ }; @@ -431,7 +423,12 @@ static void add_irp_to_queue( struct device_manager *manager, struct irp_call *i irp->thread = thread ? (struct thread *)grab_object( thread ) : NULL; if (irp->file) list_add_tail( &irp->file->requests, &irp->dev_entry ); list_add_tail( &manager->requests, &irp->mgr_entry ); - if (list_head( &manager->requests ) == &irp->mgr_entry) wake_up( &manager->obj, 0 ); /* first one */ + if (list_head( &manager->requests ) == &irp->mgr_entry) + { + /* first one */ + wake_up( &manager->obj, 0 ); + fast_set_event( manager->fast_sync ); + } } static struct object *device_open_file( struct object *obj, unsigned int access, @@ -761,16 +758,13 @@ static void delete_file( struct device_file *file ) /* terminate all pending requests */ LIST_FOR_EACH_ENTRY_SAFE( irp, next, &file->requests, struct irp_call, dev_entry ) { - if (do_fsync() && file->device->manager && list_empty( &file->device->manager->requests )) - fsync_clear( &file->device->manager->obj ); - - if (do_esync() && file->device->manager && list_empty( &file->device->manager->requests )) - esync_clear( file->device->manager->esync_fd ); - list_remove( &irp->mgr_entry ); set_irp_result( irp, STATUS_FILE_DELETED, NULL, 0, 0 ); } + if (list_empty( &file->device->manager->requests )) + fast_reset_event( file->device->manager->fast_sync ); + release_object( file ); } @@ -802,18 +796,14 @@ static int device_manager_signaled( struct object *obj, struct wait_queue_entry return !list_empty( &manager->requests ); } -static int device_manager_get_esync_fd( struct object *obj, enum esync_type *type ) +static struct fast_sync *device_manager_get_fast_sync( struct object *obj ) { struct device_manager *manager = (struct device_manager *)obj; - *type = ESYNC_MANUAL_SERVER; - return manager->esync_fd; -} -static unsigned int device_manager_get_fsync_idx( struct object *obj, enum fsync_type *type ) -{ - struct device_manager *manager = (struct device_manager *)obj; - *type = FSYNC_MANUAL_SERVER; - return manager->fsync_idx; + if (!manager->fast_sync) + manager->fast_sync = fast_create_event( FAST_SYNC_MANUAL_SERVER, !list_empty( &manager->requests ) ); + if (manager->fast_sync) grab_object( manager->fast_sync ); + return manager->fast_sync; } static void device_manager_destroy( struct object *obj ) @@ -851,9 +841,7 @@ static void device_manager_destroy( struct object *obj ) release_object( irp ); } - if (do_esync()) - close( manager->esync_fd ); - if (manager->fsync_idx) fsync_free_shm_idx( manager->fsync_idx ); + if (manager->fast_sync) release_object( manager->fast_sync ); } static struct device_manager *create_device_manager(void) @@ -863,16 +851,10 @@ static struct device_manager *create_device_manager(void) if ((manager = alloc_object( &device_manager_ops ))) { manager->current_call = NULL; + manager->fast_sync = NULL; list_init( &manager->devices ); list_init( &manager->requests ); wine_rb_init( &manager->kernel_objects, compare_kernel_object ); - manager->fsync_idx = 0; - - if (do_fsync()) - manager->fsync_idx = fsync_alloc_shm( 0, 0 ); - - if (do_esync()) - manager->esync_fd = esync_create_fd( 0, 0 ); } return manager; } @@ -1059,15 +1041,13 @@ DECL_HANDLER(get_next_device_request) } list_remove( &irp->mgr_entry ); list_init( &irp->mgr_entry ); + + if (list_empty( &manager->requests )) + fast_reset_event( manager->fast_sync ); + /* we already own the object if it's only on manager queue */ if (irp->file) grab_object( irp ); manager->current_call = irp; - - if (do_fsync() && list_empty( &manager->requests )) - fsync_clear( &manager->obj ); - - if (do_esync() && list_empty( &manager->requests )) - esync_clear( manager->esync_fd ); } else close_handle( current->process, reply->next ); } diff --git a/server/directory.c b/server/directory.c index 878941cddbf..91fcf468ed7 100644 --- a/server/directory.c +++ b/server/directory.c @@ -70,8 +70,6 @@ static const struct object_ops object_type_ops = no_add_queue, /* add_queue */ NULL, /* remove_queue */ NULL, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ NULL, /* satisfied */ no_signal, /* signal */ no_get_fd, /* get_fd */ @@ -84,6 +82,7 @@ static const struct object_ops object_type_ops = default_unlink_name, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + no_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ no_destroy /* destroy */ }; @@ -122,8 +121,6 @@ static const struct object_ops directory_ops = no_add_queue, /* add_queue */ NULL, /* remove_queue */ NULL, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ NULL, /* satisfied */ no_signal, /* signal */ no_get_fd, /* get_fd */ @@ -136,6 +133,7 @@ static const struct object_ops directory_ops = default_unlink_name, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + no_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ directory_destroy /* destroy */ }; diff --git a/server/esync.c b/server/esync.c deleted file mode 100644 index a5164435ed6..00000000000 --- a/server/esync.c +++ /dev/null @@ -1,591 +0,0 @@ -/* - * eventfd-based synchronization objects - * - * Copyright (C) 2018 Zebediah Figura - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA - */ - -#include "config.h" - - -#include -#include -#include -#include -#ifdef HAVE_SYS_EVENTFD_H -# include -#endif -#include -#ifdef HAVE_SYS_STAT_H -# include -#endif -#include - -#include "ntstatus.h" -#define WIN32_NO_STATUS -#include "windef.h" -#include "winternl.h" - -#include "handle.h" -#include "request.h" -#include "file.h" -#include "esync.h" -#include "fsync.h" - -int do_esync(void) -{ -#ifdef HAVE_SYS_EVENTFD_H - static int do_esync_cached = -1; - - if (do_esync_cached == -1) - do_esync_cached = getenv("WINEESYNC") && atoi(getenv("WINEESYNC")) && !do_fsync(); - - return do_esync_cached; -#else - return 0; -#endif -} - -static char shm_name[29]; -static int shm_fd; -static off_t shm_size; -static void **shm_addrs; -static int shm_addrs_size; /* length of the allocated shm_addrs array */ -static long pagesize; - -static void shm_cleanup(void) -{ - close( shm_fd ); - if (shm_unlink( shm_name ) == -1) - perror( "shm_unlink" ); -} - -void esync_init(void) -{ - struct stat st; - - if (fstat( config_dir_fd, &st ) == -1) - fatal_error( "cannot stat config dir\n" ); - - if (st.st_ino != (unsigned long)st.st_ino) - sprintf( shm_name, "/wine-%lx%08lx-esync", (unsigned long)((unsigned long long)st.st_ino >> 32), (unsigned long)st.st_ino ); - else - sprintf( shm_name, "/wine-%lx-esync", (unsigned long)st.st_ino ); - - shm_unlink( shm_name ); - - shm_fd = shm_open( shm_name, O_RDWR | O_CREAT | O_EXCL, 0644 ); - if (shm_fd == -1) - perror( "shm_open" ); - - pagesize = sysconf( _SC_PAGESIZE ); - - shm_addrs = calloc( 128, sizeof(shm_addrs[0]) ); - shm_addrs_size = 128; - - shm_size = pagesize; - if (ftruncate( shm_fd, shm_size ) == -1) - perror( "ftruncate" ); - - fprintf( stderr, "esync: up and running.\n" ); - - atexit( shm_cleanup ); -} - -static struct list mutex_list = LIST_INIT(mutex_list); - -struct esync -{ - struct object obj; /* object header */ - int fd; /* eventfd file descriptor */ - enum esync_type type; - unsigned int shm_idx; /* index into the shared memory section */ - struct list mutex_entry; /* entry in the mutex list (if applicable) */ -}; - -static void esync_dump( struct object *obj, int verbose ); -static int esync_get_esync_fd( struct object *obj, enum esync_type *type ); -static unsigned int esync_map_access( struct object *obj, unsigned int access ); -static void esync_destroy( struct object *obj ); - -const struct object_ops esync_ops = -{ - sizeof(struct esync), /* size */ - &no_type, /* type */ - esync_dump, /* dump */ - no_add_queue, /* add_queue */ - NULL, /* remove_queue */ - NULL, /* signaled */ - esync_get_esync_fd, /* get_esync_fd */ - NULL, /* get_fsync_idx */ - NULL, /* satisfied */ - no_signal, /* signal */ - no_get_fd, /* get_fd */ - esync_map_access, /* map_access */ - default_get_sd, /* get_sd */ - default_set_sd, /* set_sd */ - default_get_full_name, /* get_full_name */ - no_lookup_name, /* lookup_name */ - directory_link_name, /* link_name */ - default_unlink_name, /* unlink_name */ - no_open_file, /* open_file */ - no_kernel_obj_list, /* get_kernel_obj_list */ - no_close_handle, /* close_handle */ - esync_destroy /* destroy */ -}; - -static void esync_dump( struct object *obj, int verbose ) -{ - struct esync *esync = (struct esync *)obj; - assert( obj->ops == &esync_ops ); - fprintf( stderr, "esync fd=%d\n", esync->fd ); -} - -static int esync_get_esync_fd( struct object *obj, enum esync_type *type ) -{ - struct esync *esync = (struct esync *)obj; - *type = esync->type; - return esync->fd; -} - -static unsigned int esync_map_access( struct object *obj, unsigned int access ) -{ - /* Sync objects have the same flags. */ - if (access & GENERIC_READ) access |= STANDARD_RIGHTS_READ | EVENT_QUERY_STATE; - if (access & GENERIC_WRITE) access |= STANDARD_RIGHTS_WRITE | EVENT_MODIFY_STATE; - if (access & GENERIC_EXECUTE) access |= STANDARD_RIGHTS_EXECUTE | SYNCHRONIZE; - if (access & GENERIC_ALL) access |= STANDARD_RIGHTS_ALL | EVENT_QUERY_STATE | EVENT_MODIFY_STATE; - return access & ~(GENERIC_READ | GENERIC_WRITE | GENERIC_EXECUTE | GENERIC_ALL); -} - -static void esync_destroy( struct object *obj ) -{ - struct esync *esync = (struct esync *)obj; - if (esync->type == ESYNC_MUTEX) - list_remove( &esync->mutex_entry ); - close( esync->fd ); -} - -static int type_matches( enum esync_type type1, enum esync_type type2 ) -{ - return (type1 == type2) || - ((type1 == ESYNC_AUTO_EVENT || type1 == ESYNC_MANUAL_EVENT) && - (type2 == ESYNC_AUTO_EVENT || type2 == ESYNC_MANUAL_EVENT)); -} - -static void *get_shm( unsigned int idx ) -{ - int entry = (idx * 8) / pagesize; - int offset = (idx * 8) % pagesize; - - if (entry >= shm_addrs_size) - { - int new_size = max(shm_addrs_size * 2, entry + 1); - - if (!(shm_addrs = realloc( shm_addrs, new_size * sizeof(shm_addrs[0]) ))) - fprintf( stderr, "esync: couldn't expand shm_addrs array to size %d\n", entry + 1 ); - - memset( shm_addrs + shm_addrs_size, 0, (new_size - shm_addrs_size) * sizeof(shm_addrs[0]) ); - - shm_addrs_size = new_size; - } - - if (!shm_addrs[entry]) - { - void *addr = mmap( NULL, pagesize, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, entry * pagesize ); - if (addr == (void *)-1) - { - fprintf( stderr, "esync: failed to map page %d (offset %#lx): ", entry, entry * pagesize ); - perror( "mmap" ); - } - - if (debug_level) - fprintf( stderr, "esync: Mapping page %d at %p.\n", entry, addr ); - - if (__sync_val_compare_and_swap( &shm_addrs[entry], 0, addr )) - munmap( addr, pagesize ); /* someone beat us to it */ - } - - return (void *)((unsigned long)shm_addrs[entry] + offset); -} - -struct semaphore -{ - int max; - int count; -}; -C_ASSERT(sizeof(struct semaphore) == 8); - -struct mutex -{ - DWORD tid; - int count; /* recursion count */ -}; -C_ASSERT(sizeof(struct mutex) == 8); - -struct event -{ - int signaled; - int locked; -}; -C_ASSERT(sizeof(struct event) == 8); - -struct esync *create_esync( struct object *root, const struct unicode_str *name, - unsigned int attr, int initval, int max, enum esync_type type, - const struct security_descriptor *sd ) -{ -#ifdef HAVE_SYS_EVENTFD_H - struct esync *esync; - - if ((esync = create_named_object( root, &esync_ops, name, attr, sd ))) - { - if (get_error() != STATUS_OBJECT_NAME_EXISTS) - { - int flags = EFD_CLOEXEC | EFD_NONBLOCK; - - if (type == ESYNC_SEMAPHORE) - flags |= EFD_SEMAPHORE; - - /* initialize it if it didn't already exist */ - esync->fd = eventfd( initval, flags ); - if (esync->fd == -1) - { - perror( "eventfd" ); - file_set_error(); - release_object( esync ); - return NULL; - } - esync->type = type; - - /* Use the fd as index, since that'll be unique across all - * processes, but should hopefully end up also allowing reuse. */ - esync->shm_idx = esync->fd + 1; /* we keep index 0 reserved */ - while (esync->shm_idx * 8 >= shm_size) - { - /* Better expand the shm section. */ - shm_size += pagesize; - if (ftruncate( shm_fd, shm_size ) == -1) - { - fprintf( stderr, "esync: couldn't expand %s to size %ld: ", - shm_name, (long)shm_size ); - perror( "ftruncate" ); - } - } - - /* Initialize the shared memory portion. We want to do this on the - * server side to avoid a potential though unlikely race whereby - * the same object is opened and used between the time it's created - * and the time its shared memory portion is initialized. */ - switch (type) - { - case ESYNC_SEMAPHORE: - { - struct semaphore *semaphore = get_shm( esync->shm_idx ); - semaphore->max = max; - semaphore->count = initval; - break; - } - case ESYNC_AUTO_EVENT: - case ESYNC_MANUAL_EVENT: - { - struct event *event = get_shm( esync->shm_idx ); - event->signaled = initval ? 1 : 0; - event->locked = 0; - break; - } - case ESYNC_MUTEX: - { - struct mutex *mutex = get_shm( esync->shm_idx ); - mutex->tid = initval ? 0 : current->id; - mutex->count = initval ? 0 : 1; - list_add_tail( &mutex_list, &esync->mutex_entry ); - break; - } - default: - assert( 0 ); - } - } - else - { - /* validate the type */ - if (!type_matches( type, esync->type )) - { - release_object( &esync->obj ); - set_error( STATUS_OBJECT_TYPE_MISMATCH ); - return NULL; - } - } - } - return esync; -#else - /* FIXME: Provide a fallback implementation using pipe(). */ - set_error( STATUS_NOT_IMPLEMENTED ); - return NULL; -#endif -} - -/* Create a file descriptor for an existing handle. - * Caller must close the handle when it's done; it's not linked to an esync - * server object in any way. */ -int esync_create_fd( int initval, int flags ) -{ -#ifdef HAVE_SYS_EVENTFD_H - int fd; - - fd = eventfd( initval, flags | EFD_CLOEXEC | EFD_NONBLOCK ); - if (fd == -1) - perror( "eventfd" ); - - return fd; -#else - return -1; -#endif -} - -/* Wake up a specific fd. */ -void esync_wake_fd( int fd ) -{ - static const uint64_t value = 1; - - if (write( fd, &value, sizeof(value) ) == -1) - perror( "esync: write" ); -} - -/* Wake up a server-side esync object. */ -void esync_wake_up( struct object *obj ) -{ - enum esync_type dummy; - int fd; - - if (obj->ops->get_esync_fd) - { - fd = obj->ops->get_esync_fd( obj, &dummy ); - esync_wake_fd( fd ); - } -} - -void esync_clear( int fd ) -{ - uint64_t value; - - /* we don't care about the return value */ - read( fd, &value, sizeof(value) ); -} - -static inline void small_pause(void) -{ -#ifdef __i386__ - __asm__ __volatile__( "rep;nop" : : : "memory" ); -#else - __asm__ __volatile__( "" : : : "memory" ); -#endif -} - -/* Server-side event support. */ -void esync_set_event( struct esync *esync ) -{ - static const uint64_t value = 1; - struct event *event = get_shm( esync->shm_idx ); - - assert( esync->obj.ops == &esync_ops ); - assert( event != NULL ); - - if (debug_level) - fprintf( stderr, "esync_set_event() fd=%d\n", esync->fd ); - - if (esync->type == ESYNC_MANUAL_EVENT) - { - /* Acquire the spinlock. */ - while (__sync_val_compare_and_swap( &event->locked, 0, 1 )) - small_pause(); - } - - if (!__atomic_exchange_n( &event->signaled, 1, __ATOMIC_SEQ_CST )) - { - if (write( esync->fd, &value, sizeof(value) ) == -1) - perror( "esync: write" ); - } - - if (esync->type == ESYNC_MANUAL_EVENT) - { - /* Release the spinlock. */ - event->locked = 0; - } -} - -void esync_reset_event( struct esync *esync ) -{ - static uint64_t value = 1; - struct event *event = get_shm( esync->shm_idx ); - - assert( esync->obj.ops == &esync_ops ); - assert( event != NULL ); - - if (debug_level) - fprintf( stderr, "esync_reset_event() fd=%d\n", esync->fd ); - - if (esync->type == ESYNC_MANUAL_EVENT) - { - /* Acquire the spinlock. */ - while (__sync_val_compare_and_swap( &event->locked, 0, 1 )) - small_pause(); - } - - /* Only bother signaling the fd if we weren't already signaled. */ - if (__atomic_exchange_n( &event->signaled, 0, __ATOMIC_SEQ_CST )) - { - /* we don't care about the return value */ - read( esync->fd, &value, sizeof(value) ); - } - - if (esync->type == ESYNC_MANUAL_EVENT) - { - /* Release the spinlock. */ - event->locked = 0; - } -} - -void esync_abandon_mutexes( struct thread *thread ) -{ - struct esync *esync; - - LIST_FOR_EACH_ENTRY( esync, &mutex_list, struct esync, mutex_entry ) - { - struct mutex *mutex = get_shm( esync->shm_idx ); - - if (mutex->tid == thread->id) - { - if (debug_level) - fprintf( stderr, "esync_abandon_mutexes() fd=%d\n", esync->fd ); - mutex->tid = ~0; - mutex->count = 0; - esync_wake_fd( esync->fd ); - } - } -} - -DECL_HANDLER(create_esync) -{ - struct esync *esync; - struct unicode_str name; - struct object *root; - const struct security_descriptor *sd; - const struct object_attributes *objattr = get_req_object_attributes( &sd, &name, &root ); - - if (!do_esync()) - { - set_error( STATUS_NOT_IMPLEMENTED ); - return; - } - - if (!req->type) - { - set_error( STATUS_INVALID_PARAMETER ); - return; - } - - if (!objattr) return; - - if ((esync = create_esync( root, &name, objattr->attributes, req->initval, req->max, req->type, sd ))) - { - if (get_error() == STATUS_OBJECT_NAME_EXISTS) - reply->handle = alloc_handle( current->process, esync, req->access, objattr->attributes ); - else - reply->handle = alloc_handle_no_access_check( current->process, esync, - req->access, objattr->attributes ); - - reply->type = esync->type; - reply->shm_idx = esync->shm_idx; - send_client_fd( current->process, esync->fd, reply->handle ); - release_object( esync ); - } - - if (root) release_object( root ); -} - -DECL_HANDLER(open_esync) -{ - struct unicode_str name = get_req_unicode_str(); - - reply->handle = open_object( current->process, req->rootdir, req->access, - &esync_ops, &name, req->attributes ); - - /* send over the fd */ - if (reply->handle) - { - struct esync *esync; - - if (!(esync = (struct esync *)get_handle_obj( current->process, reply->handle, - 0, &esync_ops ))) - return; - - if (!type_matches( req->type, esync->type )) - { - set_error( STATUS_OBJECT_TYPE_MISMATCH ); - release_object( esync ); - return; - } - - reply->type = esync->type; - reply->shm_idx = esync->shm_idx; - - send_client_fd( current->process, esync->fd, reply->handle ); - release_object( esync ); - } -} - -/* Retrieve a file descriptor for an esync object which will be signaled by the - * server. The client should only read from (i.e. wait on) this object. */ -DECL_HANDLER(get_esync_fd) -{ - struct object *obj; - enum esync_type type; - int fd; - - if (!(obj = get_handle_obj( current->process, req->handle, SYNCHRONIZE, NULL ))) - return; - - if (obj->ops->get_esync_fd) - { - fd = obj->ops->get_esync_fd( obj, &type ); - reply->type = type; - if (obj->ops == &esync_ops) - { - struct esync *esync = (struct esync *)obj; - reply->shm_idx = esync->shm_idx; - } - else - reply->shm_idx = 0; - send_client_fd( current->process, fd, req->handle ); - } - else - { - if (debug_level) - { - fprintf( stderr, "%04x: esync: can't wait on object: ", current->id ); - obj->ops->dump( obj, 0 ); - } - set_error( STATUS_NOT_IMPLEMENTED ); - } - - release_object( obj ); -} - -/* Return the fd used for waiting on user APCs. */ -DECL_HANDLER(get_esync_apc_fd) -{ - send_client_fd( current->process, current->esync_apc_fd, current->id ); -} diff --git a/server/esync.h b/server/esync.h deleted file mode 100644 index d39f4efa3ec..00000000000 --- a/server/esync.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * eventfd-based synchronization objects - * - * Copyright (C) 2018 Zebediah Figura - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA - */ - -#include - -extern int do_esync(void); -void esync_init(void); -int esync_create_fd( int initval, int flags ); -void esync_wake_fd( int fd ); -void esync_wake_up( struct object *obj ); -void esync_clear( int fd ); - -struct esync; - -extern const struct object_ops esync_ops; -void esync_set_event( struct esync *esync ); -void esync_reset_event( struct esync *esync ); -void esync_abandon_mutexes( struct thread *thread ); diff --git a/server/event.c b/server/event.c index b93a9960ad2..7f2b2c6a370 100644 --- a/server/event.c +++ b/server/event.c @@ -35,8 +35,6 @@ #include "thread.h" #include "request.h" #include "security.h" -#include "esync.h" -#include "fsync.h" static const WCHAR event_name[] = {'E','v','e','n','t'}; @@ -58,17 +56,15 @@ struct event struct list kernel_object; /* list of kernel object pointers */ int manual_reset; /* is it a manual reset event? */ int signaled; /* event has been signaled */ - int esync_fd; /* esync file descriptor */ - unsigned int fsync_idx; + struct fast_sync *fast_sync; /* fast synchronization object */ }; static void event_dump( struct object *obj, int verbose ); static int event_signaled( struct object *obj, struct wait_queue_entry *entry ); static void event_satisfied( struct object *obj, struct wait_queue_entry *entry ); -static int event_get_esync_fd( struct object *obj, enum esync_type *type ); -static unsigned int event_get_fsync_idx( struct object *obj, enum fsync_type *type ); static int event_signal( struct object *obj, unsigned int access); static struct list *event_get_kernel_obj_list( struct object *obj ); +static struct fast_sync *event_get_fast_sync( struct object *obj ); static void event_destroy( struct object *obj ); static const struct object_ops event_ops = @@ -79,8 +75,6 @@ static const struct object_ops event_ops = add_queue, /* add_queue */ remove_queue, /* remove_queue */ event_signaled, /* signaled */ - event_get_esync_fd, /* get_esync_fd */ - event_get_fsync_idx, /* get_fsync_idx */ event_satisfied, /* satisfied */ event_signal, /* signal */ no_get_fd, /* get_fd */ @@ -93,6 +87,7 @@ static const struct object_ops event_ops = default_unlink_name, /* unlink_name */ no_open_file, /* open_file */ event_get_kernel_obj_list, /* get_kernel_obj_list */ + event_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ event_destroy /* destroy */ }; @@ -115,10 +110,13 @@ struct type_descr keyed_event_type = struct keyed_event { struct object obj; /* object header */ + struct fast_sync *fast_sync; /* fast synchronization object */ }; static void keyed_event_dump( struct object *obj, int verbose ); static int keyed_event_signaled( struct object *obj, struct wait_queue_entry *entry ); +static struct fast_sync *keyed_event_get_fast_sync( struct object *obj ); +static void keyed_event_destroy( struct object *obj ); static const struct object_ops keyed_event_ops = { @@ -128,8 +126,6 @@ static const struct object_ops keyed_event_ops = add_queue, /* add_queue */ remove_queue, /* remove_queue */ keyed_event_signaled, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ no_satisfied, /* satisfied */ no_signal, /* signal */ no_get_fd, /* get_fd */ @@ -142,8 +138,9 @@ static const struct object_ops keyed_event_ops = default_unlink_name, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + keyed_event_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ - no_destroy /* destroy */ + keyed_event_destroy /* destroy */ }; @@ -161,13 +158,7 @@ struct event *create_event( struct object *root, const struct unicode_str *name, list_init( &event->kernel_object ); event->manual_reset = manual_reset; event->signaled = initial_state; - event->fsync_idx = 0; - - if (do_fsync()) - event->fsync_idx = fsync_alloc_shm( initial_state, 0 ); - - if (do_esync()) - event->esync_fd = esync_create_fd( initial_state, 0 ); + event->fast_sync = NULL; } } return event; @@ -175,14 +166,6 @@ struct event *create_event( struct object *root, const struct unicode_str *name, struct event *get_event_obj( struct process *process, obj_handle_t handle, unsigned int access ) { - struct object *obj; - - if (do_fsync() && (obj = get_handle_obj( process, handle, access, &fsync_ops))) - return (struct event *)obj; /* even though it's not an event */ - - if (do_esync() && (obj = get_handle_obj( process, handle, access, &esync_ops))) - return (struct event *)obj; /* even though it's not an event */ - return (struct event *)get_handle_obj( process, handle, access, &event_ops ); } @@ -193,49 +176,20 @@ static void pulse_event( struct event *event ) wake_up( &event->obj, !event->manual_reset ); event->signaled = 0; - if (do_fsync()) - fsync_clear( &event->obj ); } void set_event( struct event *event ) { - if (do_fsync() && event->obj.ops == &fsync_ops) - { - fsync_set_event( (struct fsync *)event ); - return; - } - - if (do_esync() && event->obj.ops == &esync_ops) - { - esync_set_event( (struct esync *)event ); - return; - } - event->signaled = 1; /* wake up all waiters if manual reset, a single one otherwise */ wake_up( &event->obj, !event->manual_reset ); + fast_set_event( event->fast_sync ); } void reset_event( struct event *event ) { - if (do_fsync() && event->obj.ops == &fsync_ops) - { - fsync_reset_event( (struct fsync *)event ); - return; - } - - if (do_esync() && event->obj.ops == &esync_ops) - { - esync_reset_event( (struct esync *)event ); - return; - } event->signaled = 0; - - if (do_fsync()) - fsync_clear( &event->obj ); - - if (do_esync()) - esync_clear( event->esync_fd ); + fast_reset_event( event->fast_sync ); } static void event_dump( struct object *obj, int verbose ) @@ -253,20 +207,6 @@ static int event_signaled( struct object *obj, struct wait_queue_entry *entry ) return event->signaled; } -static int event_get_esync_fd( struct object *obj, enum esync_type *type ) -{ - struct event *event = (struct event *)obj; - *type = event->manual_reset ? ESYNC_MANUAL_SERVER : ESYNC_AUTO_SERVER; - return event->esync_fd; -} - -static unsigned int event_get_fsync_idx( struct object *obj, enum fsync_type *type ) -{ - struct event *event = (struct event *)obj; - *type = FSYNC_MANUAL_SERVER; - return event->fsync_idx; -} - static void event_satisfied( struct object *obj, struct wait_queue_entry *entry ) { struct event *event = (struct event *)obj; @@ -295,13 +235,24 @@ static struct list *event_get_kernel_obj_list( struct object *obj ) return &event->kernel_object; } +static struct fast_sync *event_get_fast_sync( struct object *obj ) +{ + struct event *event = (struct event *)obj; + + if (!event->fast_sync) + { + enum fast_sync_type type = event->manual_reset ? FAST_SYNC_MANUAL_EVENT : FAST_SYNC_AUTO_EVENT; + event->fast_sync = fast_create_event( type, event->signaled ); + } + if (event->fast_sync) grab_object( event->fast_sync ); + return event->fast_sync; +} + static void event_destroy( struct object *obj ) { struct event *event = (struct event *)obj; - if (do_esync()) - close( event->esync_fd ); - if (event->fsync_idx) fsync_free_shm_idx( event->fsync_idx ); + if (event->fast_sync) release_object( event->fast_sync ); } struct keyed_event *create_keyed_event( struct object *root, const struct unicode_str *name, @@ -314,6 +265,7 @@ struct keyed_event *create_keyed_event( struct object *root, const struct unicod if (get_error() != STATUS_OBJECT_NAME_EXISTS) { /* initialize it if it didn't already exist */ + event->fast_sync = NULL; } } return event; @@ -357,6 +309,23 @@ static int keyed_event_signaled( struct object *obj, struct wait_queue_entry *en return 0; } +static struct fast_sync *keyed_event_get_fast_sync( struct object *obj ) +{ + struct keyed_event *event = (struct keyed_event *)obj; + + if (!event->fast_sync) + event->fast_sync = fast_create_event( FAST_SYNC_MANUAL_SERVER, 1 ); + if (event->fast_sync) grab_object( event->fast_sync ); + return event->fast_sync; +} + +static void keyed_event_destroy( struct object *obj ) +{ + struct keyed_event *event = (struct keyed_event *)obj; + + if (event->fast_sync) release_object( event->fast_sync ); +} + /* create an event */ DECL_HANDLER(create_event) { diff --git a/server/fast_sync.c b/server/fast_sync.c new file mode 100644 index 00000000000..fed6eb90639 --- /dev/null +++ b/server/fast_sync.c @@ -0,0 +1,434 @@ +/* + * Fast synchronization primitives + * + * Copyright (C) 2021-2022 Elizabeth Figura for CodeWeavers + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA + */ + +#include "config.h" + +#include +#include +#include +#include + +#include "ntstatus.h" +#define WIN32_NO_STATUS +#include "winternl.h" + +#include "file.h" +#include "handle.h" +#include "request.h" +#include "thread.h" + +#ifdef HAVE_LINUX_NTSYNC_H + +#include +#include +#include +#include +#include + +struct linux_device +{ + struct object obj; /* object header */ + struct fd *fd; /* fd for unix fd */ +}; + +static struct linux_device *linux_device_object; + +static void linux_device_dump( struct object *obj, int verbose ); +static struct fd *linux_device_get_fd( struct object *obj ); +static void linux_device_destroy( struct object *obj ); +static enum server_fd_type fast_sync_get_fd_type( struct fd *fd ); + +static const struct object_ops linux_device_ops = +{ + sizeof(struct linux_device), /* size */ + &no_type, /* type */ + linux_device_dump, /* dump */ + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ + NULL, /* satisfied */ + no_signal, /* signal */ + linux_device_get_fd, /* get_fd */ + default_map_access, /* map_access */ + default_get_sd, /* get_sd */ + default_set_sd, /* set_sd */ + no_get_full_name, /* get_full_name */ + no_lookup_name, /* lookup_name */ + no_link_name, /* link_name */ + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ + no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + linux_device_destroy /* destroy */ +}; + +static const struct fd_ops fast_sync_fd_ops = +{ + default_fd_get_poll_events, /* get_poll_events */ + default_poll_event, /* poll_event */ + fast_sync_get_fd_type, /* get_fd_type */ + no_fd_read, /* read */ + no_fd_write, /* write */ + no_fd_flush, /* flush */ + no_fd_get_file_info, /* get_file_info */ + no_fd_get_volume_info, /* get_volume_info */ + no_fd_ioctl, /* ioctl */ + default_fd_cancel_async, /* cancel_async */ + no_fd_queue_async, /* queue_async */ + default_fd_reselect_async /* reselect_async */ +}; + +static void linux_device_dump( struct object *obj, int verbose ) +{ + struct linux_device *device = (struct linux_device *)obj; + assert( obj->ops == &linux_device_ops ); + fprintf( stderr, "Fast synchronization device fd=%p\n", device->fd ); +} + +static struct fd *linux_device_get_fd( struct object *obj ) +{ + struct linux_device *device = (struct linux_device *)obj; + return (struct fd *)grab_object( device->fd ); +} + +static void linux_device_destroy( struct object *obj ) +{ + struct linux_device *device = (struct linux_device *)obj; + assert( obj->ops == &linux_device_ops ); + if (device->fd) release_object( device->fd ); + linux_device_object = NULL; +} + +static enum server_fd_type fast_sync_get_fd_type( struct fd *fd ) +{ + return FD_TYPE_FILE; +} + +static struct linux_device *get_linux_device(void) +{ + struct linux_device *device; + static int initialized; + int unix_fd; + + if (initialized) + { + if (linux_device_object) + grab_object( linux_device_object ); + else + set_error( STATUS_NOT_IMPLEMENTED ); + return linux_device_object; + } + + if (getenv( "WINE_DISABLE_FAST_SYNC" ) && atoi( getenv( "WINE_DISABLE_FAST_SYNC" ) )) + { + static int once; + set_error( STATUS_NOT_IMPLEMENTED ); + if (!once++) fprintf(stderr, "ntsync is explicitly disabled.\n"); + initialized = 1; + return NULL; + } + + unix_fd = open( "/dev/ntsync", O_CLOEXEC | O_RDONLY ); + if (unix_fd == -1) + { + static int once; + file_set_error(); + if (!once++) fprintf(stderr, "Cannot open /dev/ntsync: %s\n", strerror(errno)); + initialized = 1; + return NULL; + } + + if (!(device = alloc_object( &linux_device_ops ))) + { + close( unix_fd ); + set_error( STATUS_NO_MEMORY ); + initialized = 1; + return NULL; + } + + if (!(device->fd = create_anonymous_fd( &fast_sync_fd_ops, unix_fd, &device->obj, 0 ))) + { + release_object( device ); + initialized = 1; + return NULL; + } + + fprintf( stderr, "wine: using fast synchronization.\n" ); + linux_device_object = device; + initialized = 1; + return device; +} + +struct fast_sync +{ + struct object obj; + enum fast_sync_type type; + struct fd *fd; +}; + +static void linux_obj_dump( struct object *obj, int verbose ); +static void linux_obj_destroy( struct object *obj ); +static struct fd *linux_obj_get_fd( struct object *obj ); + +static const struct object_ops linux_obj_ops = +{ + sizeof(struct fast_sync), /* size */ + &no_type, /* type */ + linux_obj_dump, /* dump */ + no_add_queue, /* add_queue */ + NULL, /* remove_queue */ + NULL, /* signaled */ + NULL, /* satisfied */ + no_signal, /* signal */ + linux_obj_get_fd, /* get_fd */ + default_map_access, /* map_access */ + default_get_sd, /* get_sd */ + default_set_sd, /* set_sd */ + no_get_full_name, /* get_full_name */ + no_lookup_name, /* lookup_name */ + no_link_name, /* link_name */ + NULL, /* unlink_name */ + no_open_file, /* open_file */ + no_kernel_obj_list, /* get_kernel_obj_list */ + no_get_fast_sync, /* get_fast_sync */ + no_close_handle, /* close_handle */ + linux_obj_destroy /* destroy */ +}; + +static void linux_obj_dump( struct object *obj, int verbose ) +{ + struct fast_sync *fast_sync = (struct fast_sync *)obj; + assert( obj->ops == &linux_obj_ops ); + fprintf( stderr, "Fast synchronization object type=%u fd=%p\n", fast_sync->type, fast_sync->fd ); +} + +static void linux_obj_destroy( struct object *obj ) +{ + struct fast_sync *fast_sync = (struct fast_sync *)obj; + assert( obj->ops == &linux_obj_ops ); + if (fast_sync->fd) release_object( fast_sync->fd ); +} + +static struct fd *linux_obj_get_fd( struct object *obj ) +{ + struct fast_sync *fast_sync = (struct fast_sync *)obj; + assert( obj->ops == &linux_obj_ops ); + return (struct fd *)grab_object( fast_sync->fd ); +} + +static struct fast_sync *create_fast_sync( enum fast_sync_type type, int unix_fd ) +{ + struct fast_sync *fast_sync; + + if (!(fast_sync = alloc_object( &linux_obj_ops ))) + { + close( unix_fd ); + return NULL; + } + + fast_sync->type = type; + + if (!(fast_sync->fd = create_anonymous_fd( &fast_sync_fd_ops, unix_fd, &fast_sync->obj, 0 ))) + { + release_object( fast_sync ); + return NULL; + } + + return fast_sync; +} + +struct fast_sync *fast_create_event( enum fast_sync_type type, int signaled ) +{ + struct ntsync_event_args args = {0}; + struct linux_device *device; + + if (!(device = get_linux_device())) return NULL; + + args.signaled = signaled; + switch (type) + { + case FAST_SYNC_AUTO_EVENT: + case FAST_SYNC_AUTO_SERVER: + args.manual = 0; + break; + + case FAST_SYNC_MANUAL_EVENT: + case FAST_SYNC_MANUAL_SERVER: + case FAST_SYNC_QUEUE: + args.manual = 1; + break; + + case FAST_SYNC_MUTEX: + case FAST_SYNC_SEMAPHORE: + assert(0); + break; + } + if (ioctl( get_unix_fd( device->fd ), NTSYNC_IOC_CREATE_EVENT, &args ) < 0) + { + file_set_error(); + release_object( device ); + return NULL; + } + release_object( device ); + + return create_fast_sync( type, args.event ); +} + +struct fast_sync *fast_create_semaphore( unsigned int count, unsigned int max ) +{ + struct ntsync_sem_args args = {0}; + struct linux_device *device; + + if (!(device = get_linux_device())) return NULL; + + args.count = count; + args.max = max; + if (ioctl( get_unix_fd( device->fd ), NTSYNC_IOC_CREATE_SEM, &args ) < 0) + { + file_set_error(); + release_object( device ); + return NULL; + } + + release_object( device ); + + return create_fast_sync( FAST_SYNC_SEMAPHORE, args.sem ); +} + +struct fast_sync *fast_create_mutex( thread_id_t owner, unsigned int count ) +{ + struct ntsync_mutex_args args = {0}; + struct linux_device *device; + + if (!(device = get_linux_device())) return NULL; + + args.owner = owner; + args.count = count; + if (ioctl( get_unix_fd( device->fd ), NTSYNC_IOC_CREATE_MUTEX, &args ) < 0) + { + file_set_error(); + release_object( device ); + return NULL; + } + + release_object( device ); + + return create_fast_sync( FAST_SYNC_MUTEX, args.mutex ); +} + +void fast_set_event( struct fast_sync *fast_sync ) +{ + __u32 count; + + if (!fast_sync) return; + + if (debug_level) fprintf( stderr, "fast_set_event %p\n", fast_sync->fd ); + + ioctl( get_unix_fd( fast_sync->fd ), NTSYNC_IOC_EVENT_SET, &count ); +} + +void fast_reset_event( struct fast_sync *fast_sync ) +{ + __u32 count; + + if (!fast_sync) return; + + if (debug_level) fprintf( stderr, "fast_set_event %p\n", fast_sync->fd ); + + ioctl( get_unix_fd( fast_sync->fd ), NTSYNC_IOC_EVENT_RESET, &count ); +} + +void fast_abandon_mutex( thread_id_t tid, struct fast_sync *fast_sync ) +{ + ioctl( get_unix_fd( fast_sync->fd ), NTSYNC_IOC_MUTEX_KILL, &tid ); +} + +#else + +struct fast_sync *fast_create_event( enum fast_sync_type type, int signaled ) +{ + set_error( STATUS_NOT_IMPLEMENTED ); + return NULL; +} + +struct fast_sync *fast_create_semaphore( unsigned int count, unsigned int max ) +{ + set_error( STATUS_NOT_IMPLEMENTED ); + return NULL; +} + +struct fast_sync *fast_create_mutex( thread_id_t owner, unsigned int count ) +{ + set_error( STATUS_NOT_IMPLEMENTED ); + return NULL; +} + +void fast_set_event( struct fast_sync *fast_sync ) +{ +} + +void fast_reset_event( struct fast_sync *obj ) +{ +} + +void fast_abandon_mutex( thread_id_t tid, struct fast_sync *fast_sync ) +{ +} + +#endif + +DECL_HANDLER(get_linux_sync_device) +{ +#ifdef HAVE_LINUX_NTSYNC_H + struct linux_device *device; + + if ((device = get_linux_device())) + { + reply->handle = alloc_handle( current->process, device, 0, 0 ); + release_object( device ); + } +#else + set_error( STATUS_NOT_IMPLEMENTED ); +#endif +} + +DECL_HANDLER(get_linux_sync_obj) +{ +#ifdef HAVE_LINUX_NTSYNC_H + struct object *obj; + + if ((obj = get_handle_obj( current->process, req->handle, 0, NULL ))) + { + struct fast_sync *fast_sync; + + if ((fast_sync = obj->ops->get_fast_sync( obj ))) + { + reply->handle = alloc_handle( current->process, fast_sync, 0, 0 ); + reply->type = fast_sync->type; + reply->access = get_handle_access( current->process, req->handle ); + release_object( fast_sync ); + } + release_object( obj ); + } +#else + set_error( STATUS_NOT_IMPLEMENTED ); +#endif +} diff --git a/server/fd.c b/server/fd.c index 85676910e3f..12319ce5839 100644 --- a/server/fd.c +++ b/server/fd.c @@ -96,8 +96,6 @@ #include "handle.h" #include "process.h" #include "request.h" -#include "esync.h" -#include "fsync.h" #include "winternl.h" #include "winioctl.h" @@ -158,8 +156,7 @@ struct fd struct completion *completion; /* completion object attached to this fd */ apc_param_t comp_key; /* completion key to set in completion events */ unsigned int comp_flags; /* completion flags */ - int esync_fd; /* esync file descriptor */ - unsigned int fsync_idx; /* fsync shm index */ + struct fast_sync *fast_sync; /* fast synchronization object */ }; static void fd_dump( struct object *obj, int verbose ); @@ -173,8 +170,6 @@ static const struct object_ops fd_ops = no_add_queue, /* add_queue */ NULL, /* remove_queue */ NULL, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ NULL, /* satisfied */ no_signal, /* signal */ no_get_fd, /* get_fd */ @@ -187,6 +182,7 @@ static const struct object_ops fd_ops = NULL, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + no_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ fd_destroy /* destroy */ }; @@ -216,8 +212,6 @@ static const struct object_ops device_ops = no_add_queue, /* add_queue */ NULL, /* remove_queue */ NULL, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ NULL, /* satisfied */ no_signal, /* signal */ no_get_fd, /* get_fd */ @@ -230,6 +224,7 @@ static const struct object_ops device_ops = NULL, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + no_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ device_destroy /* destroy */ }; @@ -258,8 +253,6 @@ static const struct object_ops inode_ops = no_add_queue, /* add_queue */ NULL, /* remove_queue */ NULL, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ NULL, /* satisfied */ no_signal, /* signal */ no_get_fd, /* get_fd */ @@ -272,6 +265,7 @@ static const struct object_ops inode_ops = NULL, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + no_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ inode_destroy /* destroy */ }; @@ -302,8 +296,6 @@ static const struct object_ops file_lock_ops = add_queue, /* add_queue */ remove_queue, /* remove_queue */ file_lock_signaled, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ no_satisfied, /* satisfied */ no_signal, /* signal */ no_get_fd, /* get_fd */ @@ -316,6 +308,7 @@ static const struct object_ops file_lock_ops = NULL, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + no_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ no_destroy /* destroy */ }; @@ -1578,9 +1571,7 @@ static void fd_destroy( struct object *obj ) free( fd->unix_name ); } - if (do_esync()) - close( fd->esync_fd ); - if (fd->fsync_idx) fsync_free_shm_idx( fd->fsync_idx ); + if (fd->fast_sync) release_object( fd->fast_sync ); } /* check if the desired access is possible without violating */ @@ -1699,20 +1690,13 @@ static struct fd *alloc_fd_object(void) fd->poll_index = -1; fd->completion = NULL; fd->comp_flags = 0; - fd->esync_fd = -1; - fd->fsync_idx = 0; + fd->fast_sync = NULL; init_async_queue( &fd->read_q ); init_async_queue( &fd->write_q ); init_async_queue( &fd->wait_q ); list_init( &fd->inode_entry ); list_init( &fd->locks ); - if (do_esync()) - fd->esync_fd = esync_create_fd( 1, 0 ); - - if (do_fsync()) - fd->fsync_idx = fsync_alloc_shm( 1, 0 ); - if ((fd->poll_index = add_poll_user( fd )) == -1) { release_object( fd ); @@ -1747,21 +1731,14 @@ struct fd *alloc_pseudo_fd( const struct fd_ops *fd_user_ops, struct object *use fd->poll_index = -1; fd->completion = NULL; fd->comp_flags = 0; + fd->fast_sync = NULL; fd->no_fd_status = STATUS_BAD_DEVICE_TYPE; - fd->esync_fd = -1; - fd->fsync_idx = 0; init_async_queue( &fd->read_q ); init_async_queue( &fd->write_q ); init_async_queue( &fd->wait_q ); list_init( &fd->inode_entry ); list_init( &fd->locks ); - if (do_fsync()) - fd->fsync_idx = fsync_alloc_shm( 0, 0 ); - - if (do_esync()) - fd->esync_fd = esync_create_fd( 0, 0 ); - return fd; } @@ -2239,13 +2216,15 @@ void set_fd_signaled( struct fd *fd, int signaled ) { if (fd->comp_flags & FILE_SKIP_SET_EVENT_ON_HANDLE) return; fd->signaled = signaled; - if (signaled) wake_up( fd->user, 0 ); - - if (do_fsync() && !signaled) - fsync_clear( fd->user ); - - if (do_esync() && !signaled) - esync_clear( fd->esync_fd ); + if (signaled) + { + wake_up( fd->user, 0 ); + fast_set_event( fd->fast_sync ); + } + else + { + fast_reset_event( fd->fast_sync ); + } } /* check if events are pending and if yes return which one(s) */ @@ -2271,21 +2250,16 @@ int default_fd_signaled( struct object *obj, struct wait_queue_entry *entry ) return ret; } -int default_fd_get_esync_fd( struct object *obj, enum esync_type *type ) +struct fast_sync *default_fd_get_fast_sync( struct object *obj ) { struct fd *fd = get_obj_fd( obj ); - int ret = fd->esync_fd; - *type = ESYNC_MANUAL_SERVER; - release_object( fd ); - return ret; -} + struct fast_sync *ret; -unsigned int default_fd_get_fsync_idx( struct object *obj, enum fsync_type *type ) -{ - struct fd *fd = get_obj_fd( obj ); - unsigned int ret = fd->fsync_idx; - *type = FSYNC_MANUAL_SERVER; + if (!fd->fast_sync) + fd->fast_sync = fast_create_event( FAST_SYNC_MANUAL_SERVER, fd->signaled ); + ret = fd->fast_sync; release_object( fd ); + if (ret) grab_object( ret ); return ret; } diff --git a/server/file.c b/server/file.c index 3d9bb46c590..be184f9d1eb 100644 --- a/server/file.c +++ b/server/file.c @@ -123,8 +123,6 @@ static const struct object_ops file_ops = add_queue, /* add_queue */ remove_queue, /* remove_queue */ default_fd_signaled, /* signaled */ - default_fd_get_esync_fd, /* get_esync_fd */ - default_fd_get_fsync_idx, /* get_fsync_idx */ no_satisfied, /* satisfied */ no_signal, /* signal */ file_get_fd, /* get_fd */ @@ -137,6 +135,7 @@ static const struct object_ops file_ops = NULL, /* unlink_name */ file_open_file, /* open_file */ file_get_kernel_obj_list, /* get_kernel_obj_list */ + default_fd_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ file_destroy /* destroy */ }; diff --git a/server/file.h b/server/file.h index 8f0957ec8d5..93d531a9f25 100644 --- a/server/file.h +++ b/server/file.h @@ -110,8 +110,7 @@ extern char *dup_fd_name( struct fd *root, const char *name ) __WINE_DEALLOC(fre extern void get_nt_name( struct fd *fd, struct unicode_str *name ); extern int default_fd_signaled( struct object *obj, struct wait_queue_entry *entry ); -extern int default_fd_get_esync_fd( struct object *obj, enum esync_type *type ); -extern unsigned int default_fd_get_fsync_idx( struct object *obj, enum fsync_type *type ); +extern struct fast_sync *default_fd_get_fast_sync( struct object *obj ); extern int default_fd_get_poll_events( struct fd *fd ); extern void default_poll_event( struct fd *fd, int event ); extern void fd_cancel_async( struct fd *fd, struct async *async ); diff --git a/server/fsync.c b/server/fsync.c deleted file mode 100644 index dc50aa0a1f3..00000000000 --- a/server/fsync.c +++ /dev/null @@ -1,630 +0,0 @@ -/* - * futex-based synchronization objects - * - * Copyright (C) 2018 Zebediah Figura - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA - */ - -#include "config.h" - -#include -#include -#include -#include -#include -#include -#include -#ifdef HAVE_SYS_STAT_H -# include -#endif -#ifdef HAVE_SYS_SYSCALL_H -# include -#endif -#include - -#include "ntstatus.h" -#define WIN32_NO_STATUS -#include "windef.h" -#include "winternl.h" - -#include "handle.h" -#include "request.h" -#include "fsync.h" - -#include "pshpack4.h" -#include "poppack.h" - -#ifndef __NR_futex_waitv -#define __NR_futex_waitv 449 -#endif - -int do_fsync(void) -{ -#ifdef __linux__ - static int do_fsync_cached = -1; - - if (do_fsync_cached == -1) - { - syscall( __NR_futex_waitv, 0, 0, 0, 0, 0); - do_fsync_cached = getenv("WINEFSYNC") && atoi(getenv("WINEFSYNC")) && errno != ENOSYS; - } - - return do_fsync_cached; -#else - return 0; -#endif -} - -static char shm_name[29]; -static int shm_fd; -static off_t shm_size; -static void **shm_addrs; -static int shm_addrs_size; /* length of the allocated shm_addrs array */ - -static int is_fsync_initialized; - -static uint64_t *shm_idx_free_map; -static uint32_t shm_idx_free_map_size; /* uint64_t word count */ -static uint32_t shm_idx_free_search_start_hint; - -#define BITS_IN_FREE_MAP_WORD (8 * sizeof(*shm_idx_free_map)) - -static void shm_cleanup(void) -{ - close( shm_fd ); - if (shm_unlink( shm_name ) == -1) - perror( "shm_unlink" ); -} - -void fsync_init(void) -{ - struct stat st; - - if (fstat( config_dir_fd, &st ) == -1) - fatal_error( "cannot stat config dir\n" ); - - if (st.st_ino != (unsigned long)st.st_ino) - sprintf( shm_name, "/wine-%lx%08lx-fsync", (unsigned long)((unsigned long long)st.st_ino >> 32), (unsigned long)st.st_ino ); - else - sprintf( shm_name, "/wine-%lx-fsync", (unsigned long)st.st_ino ); - - if (!shm_unlink( shm_name )) - fprintf( stderr, "fsync: warning: a previous shm file %s was not properly removed\n", shm_name ); - - shm_fd = shm_open( shm_name, O_RDWR | O_CREAT | O_EXCL, 0644 ); - if (shm_fd == -1) - perror( "shm_open" ); - - shm_addrs = calloc( 128, sizeof(shm_addrs[0]) ); - shm_addrs_size = 128; - - shm_size = FSYNC_SHM_PAGE_SIZE; - if (ftruncate( shm_fd, shm_size ) == -1) - perror( "ftruncate" ); - - is_fsync_initialized = 1; - - fprintf( stderr, "fsync: up and running.\n" ); - - shm_idx_free_map_size = 256; - shm_idx_free_map = malloc( shm_idx_free_map_size * sizeof(*shm_idx_free_map) ); - memset( shm_idx_free_map, 0xff, shm_idx_free_map_size * sizeof(*shm_idx_free_map) ); - shm_idx_free_map[0] &= ~(uint64_t)1; /* Avoid allocating shm_index 0. */ - - atexit( shm_cleanup ); -} - -static struct list mutex_list = LIST_INIT(mutex_list); - -struct fsync -{ - struct object obj; - unsigned int shm_idx; - enum fsync_type type; - struct list mutex_entry; -}; - -static void fsync_dump( struct object *obj, int verbose ); -static unsigned int fsync_get_fsync_idx( struct object *obj, enum fsync_type *type ); -static unsigned int fsync_map_access( struct object *obj, unsigned int access ); -static void fsync_destroy( struct object *obj ); - -const struct object_ops fsync_ops = -{ - sizeof(struct fsync), /* size */ - &no_type, /* type */ - fsync_dump, /* dump */ - no_add_queue, /* add_queue */ - NULL, /* remove_queue */ - NULL, /* signaled */ - NULL, /* get_esync_fd */ - fsync_get_fsync_idx, /* get_fsync_idx */ - NULL, /* satisfied */ - no_signal, /* signal */ - no_get_fd, /* get_fd */ - fsync_map_access, /* map_access */ - default_get_sd, /* get_sd */ - default_set_sd, /* set_sd */ - default_get_full_name, /* get_full_name */ - no_lookup_name, /* lookup_name */ - directory_link_name, /* link_name */ - default_unlink_name, /* unlink_name */ - no_open_file, /* open_file */ - no_kernel_obj_list, /* get_kernel_obj_list */ - no_close_handle, /* close_handle */ - fsync_destroy /* destroy */ -}; - -static void fsync_dump( struct object *obj, int verbose ) -{ - struct fsync *fsync = (struct fsync *)obj; - assert( obj->ops == &fsync_ops ); - fprintf( stderr, "fsync idx=%d\n", fsync->shm_idx ); -} - -static unsigned int fsync_get_fsync_idx( struct object *obj, enum fsync_type *type) -{ - struct fsync *fsync = (struct fsync *)obj; - *type = fsync->type; - return fsync->shm_idx; -} - -static unsigned int fsync_map_access( struct object *obj, unsigned int access ) -{ - /* Sync objects have the same flags. */ - if (access & GENERIC_READ) access |= STANDARD_RIGHTS_READ | EVENT_QUERY_STATE; - if (access & GENERIC_WRITE) access |= STANDARD_RIGHTS_WRITE | EVENT_MODIFY_STATE; - if (access & GENERIC_EXECUTE) access |= STANDARD_RIGHTS_EXECUTE | SYNCHRONIZE; - if (access & GENERIC_ALL) access |= STANDARD_RIGHTS_ALL | EVENT_QUERY_STATE | EVENT_MODIFY_STATE; - return access & ~(GENERIC_READ | GENERIC_WRITE | GENERIC_EXECUTE | GENERIC_ALL); -} - -static void fsync_destroy( struct object *obj ) -{ - struct fsync *fsync = (struct fsync *)obj; - if (fsync->type == FSYNC_MUTEX) - list_remove( &fsync->mutex_entry ); - fsync_free_shm_idx( fsync->shm_idx ); -} - -static void *get_shm( unsigned int idx ) -{ - int entry = (idx * 16) / FSYNC_SHM_PAGE_SIZE; - int offset = (idx * 16) % FSYNC_SHM_PAGE_SIZE; - - if (entry >= shm_addrs_size) - { - int new_size = max(shm_addrs_size * 2, entry + 1); - - if (!(shm_addrs = realloc( shm_addrs, new_size * sizeof(shm_addrs[0]) ))) - fprintf( stderr, "fsync: couldn't expand shm_addrs array to size %d\n", entry + 1 ); - - memset( shm_addrs + shm_addrs_size, 0, (new_size - shm_addrs_size) * sizeof(shm_addrs[0]) ); - - shm_addrs_size = new_size; - } - - if (!shm_addrs[entry]) - { - void *addr = mmap( NULL, FSYNC_SHM_PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, - (off_t)entry * FSYNC_SHM_PAGE_SIZE ); - if (addr == (void *)-1) - { - fprintf( stderr, "fsync: failed to map page %d (offset %#zx): ", - entry, (size_t)entry * FSYNC_SHM_PAGE_SIZE ); - perror( "mmap" ); - } - - if (debug_level) - fprintf( stderr, "fsync: Mapping page %d at %p.\n", entry, addr ); - - if (__sync_val_compare_and_swap( &shm_addrs[entry], 0, addr )) - munmap( addr, FSYNC_SHM_PAGE_SIZE ); /* someone beat us to it */ - } - - return (void *)((unsigned long)shm_addrs[entry] + offset); -} - -static int alloc_shm_idx_from_word( unsigned int word_index ) -{ - int ret; - - if (!shm_idx_free_map[word_index]) return 0; - - ret = __builtin_ctzll( shm_idx_free_map[word_index] ); - shm_idx_free_map[word_index] &= ~((uint64_t)1 << ret); - shm_idx_free_search_start_hint = shm_idx_free_map[word_index] ? word_index : word_index + 1; - return word_index * BITS_IN_FREE_MAP_WORD + ret; -} - -unsigned int fsync_alloc_shm( int low, int high ) -{ -#ifdef __linux__ - unsigned int i; - int shm_idx; - int *shm; - - /* this is arguably a bit of a hack, but we need some way to prevent - * allocating shm for the master socket */ - if (!is_fsync_initialized) - return 0; - - /* shm_idx_free_search_start_hint is always at the first word with a free index or before that. */ - for (i = shm_idx_free_search_start_hint; i < shm_idx_free_map_size; ++i) - if ((shm_idx = alloc_shm_idx_from_word( i ))) break; - - if (!shm_idx) - { - uint32_t old_size, new_size; - uint64_t *new_alloc; - - old_size = shm_idx_free_map_size; - new_size = old_size + 256; - new_alloc = realloc( shm_idx_free_map, new_size * sizeof(*new_alloc) ); - if (!new_alloc) - { - fprintf( stderr, "fsync: couldn't expand shm_idx_free_map to size %zd.", - new_size * sizeof(*new_alloc) ); - return 0; - } - memset( new_alloc + old_size, 0xff, (new_size - old_size) * sizeof(*new_alloc) ); - shm_idx_free_map = new_alloc; - shm_idx_free_map_size = new_size; - shm_idx = alloc_shm_idx_from_word( old_size ); - } - - while (shm_idx * 16 >= shm_size) - { - /* Better expand the shm section. */ - shm_size += FSYNC_SHM_PAGE_SIZE; - if (ftruncate( shm_fd, shm_size ) == -1) - { - fprintf( stderr, "fsync: couldn't expand %s to size %jd: ", - shm_name, shm_size ); - perror( "ftruncate" ); - } - } - - shm = get_shm( shm_idx ); - assert(shm); - shm[0] = low; - shm[1] = high; - shm[2] = 1; /* Reference count. */ - shm[3] = 0; /* Last reference process id. */ - - return shm_idx; -#else - return 0; -#endif -} - -void fsync_free_shm_idx( int shm_idx ) -{ - unsigned int idx; - uint64_t mask; - int *shm; - - assert( shm_idx ); - assert( shm_idx < shm_idx_free_map_size * BITS_IN_FREE_MAP_WORD ); - - shm = get_shm( shm_idx ); - if (shm[2] <= 0) - { - fprintf( stderr, "wineserver: fsync err: shm refcount is %d.\n", shm[2] ); - return; - } - - if (__atomic_sub_fetch( &shm[2], 1, __ATOMIC_SEQ_CST )) - { - /* Sync object is still referenced in a process. */ - return; - } - - idx = shm_idx / BITS_IN_FREE_MAP_WORD; - mask = (uint64_t)1 << (shm_idx % BITS_IN_FREE_MAP_WORD); - assert( !(shm_idx_free_map[idx] & mask) ); - shm_idx_free_map[idx] |= mask; - if (idx < shm_idx_free_search_start_hint) - shm_idx_free_search_start_hint = idx; -} - -/* Try to cleanup the shared mem indices locked by the wait on the killed processes. - * This is not fully reliable but should avoid leaking the majority of indices on - * process kill. */ -void fsync_cleanup_process_shm_indices( process_id_t id ) -{ - uint64_t free_word; - unsigned int i, j; - void *shmbase; - int *shm; - - for (i = 0; i < shm_idx_free_map_size; ++i) - { - free_word = shm_idx_free_map[i]; - if (free_word == ~(uint64_t)0) continue; - shmbase = get_shm( i * BITS_IN_FREE_MAP_WORD ); - for (j = !i; j < BITS_IN_FREE_MAP_WORD; ++j) - { - shm = (int *)((char *)shmbase + j * 16); - if (!(free_word & ((uint64_t)1 << j)) && shm[3] == id - && __atomic_load_n( &shm[2], __ATOMIC_SEQ_CST ) == 1) - fsync_free_shm_idx( i * BITS_IN_FREE_MAP_WORD + j ); - } - } -} - -static int type_matches( enum fsync_type type1, enum fsync_type type2 ) -{ - return (type1 == type2) || - ((type1 == FSYNC_AUTO_EVENT || type1 == FSYNC_MANUAL_EVENT) && - (type2 == FSYNC_AUTO_EVENT || type2 == FSYNC_MANUAL_EVENT)); -} - -struct fsync *create_fsync( struct object *root, const struct unicode_str *name, - unsigned int attr, int low, int high, enum fsync_type type, - const struct security_descriptor *sd ) -{ -#ifdef __linux__ - struct fsync *fsync; - - if ((fsync = create_named_object( root, &fsync_ops, name, attr, sd ))) - { - if (get_error() != STATUS_OBJECT_NAME_EXISTS) - { - /* initialize it if it didn't already exist */ - - /* Initialize the shared memory portion. We want to do this on the - * server side to avoid a potential though unlikely race whereby - * the same object is opened and used between the time it's created - * and the time its shared memory portion is initialized. */ - - fsync->shm_idx = fsync_alloc_shm( low, high ); - fsync->type = type; - if (type == FSYNC_MUTEX) - list_add_tail( &mutex_list, &fsync->mutex_entry ); - } - else - { - /* validate the type */ - if (!type_matches( type, fsync->type )) - { - release_object( &fsync->obj ); - set_error( STATUS_OBJECT_TYPE_MISMATCH ); - return NULL; - } - } - } - - return fsync; -#else - set_error( STATUS_NOT_IMPLEMENTED ); - return NULL; -#endif -} - -static inline int futex_wake( int *addr, int val ) -{ - return syscall( __NR_futex, addr, 1, val, NULL, 0, 0 ); -} - -/* shm layout for events or event-like objects. */ -struct fsync_event -{ - int signaled; - int unused; - int ref; - int last_pid; -}; - -void fsync_wake_futex( unsigned int shm_idx ) -{ - struct fsync_event *event; - - if (debug_level) - fprintf( stderr, "fsync_wake_futex: index %u\n", shm_idx ); - - if (!shm_idx) - return; - - event = get_shm( shm_idx ); - if (!__atomic_exchange_n( &event->signaled, 1, __ATOMIC_SEQ_CST )) - futex_wake( &event->signaled, INT_MAX ); -} - -void fsync_wake_up( struct object *obj ) -{ - enum fsync_type type; - - if (debug_level) - fprintf( stderr, "fsync_wake_up: object %p\n", obj ); - - if (obj->ops->get_fsync_idx) - fsync_wake_futex( obj->ops->get_fsync_idx( obj, &type ) ); -} - -void fsync_clear_futex( unsigned int shm_idx ) -{ - struct fsync_event *event; - - if (debug_level) - fprintf( stderr, "fsync_clear_futex: index %u\n", shm_idx ); - - if (!shm_idx) - return; - - event = get_shm( shm_idx ); - __atomic_store_n( &event->signaled, 0, __ATOMIC_SEQ_CST ); -} - -void fsync_clear( struct object *obj ) -{ - enum fsync_type type; - - if (debug_level) - fprintf( stderr, "fsync_clear: object %p\n", obj ); - - if (obj->ops->get_fsync_idx) - fsync_clear_futex( obj->ops->get_fsync_idx( obj, &type ) ); -} - -void fsync_set_event( struct fsync *fsync ) -{ - struct fsync_event *event = get_shm( fsync->shm_idx ); - assert( fsync->obj.ops == &fsync_ops ); - - if (!__atomic_exchange_n( &event->signaled, 1, __ATOMIC_SEQ_CST )) - futex_wake( &event->signaled, INT_MAX ); -} - -void fsync_reset_event( struct fsync *fsync ) -{ - struct fsync_event *event = get_shm( fsync->shm_idx ); - assert( fsync->obj.ops == &fsync_ops ); - - __atomic_store_n( &event->signaled, 0, __ATOMIC_SEQ_CST ); -} - -struct mutex -{ - int tid; - int count; /* recursion count */ -}; - -void fsync_abandon_mutexes( struct thread *thread ) -{ - struct fsync *fsync; - - LIST_FOR_EACH_ENTRY( fsync, &mutex_list, struct fsync, mutex_entry ) - { - struct mutex *mutex = get_shm( fsync->shm_idx ); - - if (mutex->tid == thread->id) - { - if (debug_level) - fprintf( stderr, "fsync_abandon_mutexes() idx=%d\n", fsync->shm_idx ); - mutex->tid = ~0; - mutex->count = 0; - futex_wake( &mutex->tid, INT_MAX ); - } - } -} - -DECL_HANDLER(create_fsync) -{ - struct fsync *fsync; - struct unicode_str name; - struct object *root; - const struct security_descriptor *sd; - const struct object_attributes *objattr = get_req_object_attributes( &sd, &name, &root ); - - if (!do_fsync()) - { - set_error( STATUS_NOT_IMPLEMENTED ); - return; - } - - if (!objattr) return; - - if ((fsync = create_fsync( root, &name, objattr->attributes, req->low, - req->high, req->type, sd ))) - { - if (get_error() == STATUS_OBJECT_NAME_EXISTS) - reply->handle = alloc_handle( current->process, fsync, req->access, objattr->attributes ); - else - reply->handle = alloc_handle_no_access_check( current->process, fsync, - req->access, objattr->attributes ); - - reply->shm_idx = fsync->shm_idx; - reply->type = fsync->type; - release_object( fsync ); - } - - if (root) release_object( root ); -} - -DECL_HANDLER(open_fsync) -{ - struct unicode_str name = get_req_unicode_str(); - - reply->handle = open_object( current->process, req->rootdir, req->access, - &fsync_ops, &name, req->attributes ); - - if (reply->handle) - { - struct fsync *fsync; - - if (!(fsync = (struct fsync *)get_handle_obj( current->process, reply->handle, - 0, &fsync_ops ))) - return; - - if (!type_matches( req->type, fsync->type )) - { - set_error( STATUS_OBJECT_TYPE_MISMATCH ); - release_object( fsync ); - return; - } - - reply->type = fsync->type; - reply->shm_idx = fsync->shm_idx; - release_object( fsync ); - } -} - -/* Retrieve the index of a shm section which will be signaled by the server. */ -DECL_HANDLER(get_fsync_idx) -{ - struct object *obj; - enum fsync_type type; - - if (!(obj = get_handle_obj( current->process, req->handle, SYNCHRONIZE, NULL ))) - return; - - if (obj->ops->get_fsync_idx) - { - int *shm; - - reply->shm_idx = obj->ops->get_fsync_idx( obj, &type ); - reply->type = type; - shm = get_shm( reply->shm_idx ); - __atomic_add_fetch( &shm[2], 1, __ATOMIC_SEQ_CST ); - } - else - { - if (debug_level) - { - fprintf( stderr, "%04x: fsync: can't wait on object: ", current->id ); - obj->ops->dump( obj, 0 ); - } - set_error( STATUS_NOT_IMPLEMENTED ); - } - - release_object( obj ); -} - -DECL_HANDLER(get_fsync_apc_idx) -{ - reply->shm_idx = current->fsync_apc_idx; -} - -DECL_HANDLER(fsync_free_shm_idx) -{ - if (!req->shm_idx || req->shm_idx >= shm_idx_free_map_size * BITS_IN_FREE_MAP_WORD) - { - set_error( STATUS_INVALID_PARAMETER ); - return; - } - fsync_free_shm_idx( req->shm_idx ); -} diff --git a/server/fsync.h b/server/fsync.h deleted file mode 100644 index d4bd889a7f8..00000000000 --- a/server/fsync.h +++ /dev/null @@ -1,36 +0,0 @@ -/* - * futex-based synchronization objects - * - * Copyright (C) 2018 Zebediah Figura - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA - */ - -extern int do_fsync(void); -extern void fsync_init(void); -extern unsigned int fsync_alloc_shm( int low, int high ); -extern void fsync_free_shm_idx( int shm_idx ); -extern void fsync_wake_futex( unsigned int shm_idx ); -extern void fsync_clear_futex( unsigned int shm_idx ); -extern void fsync_wake_up( struct object *obj ); -extern void fsync_clear( struct object *obj ); - -struct fsync; - -extern const struct object_ops fsync_ops; -extern void fsync_set_event( struct fsync *fsync ); -extern void fsync_reset_event( struct fsync *fsync ); -extern void fsync_abandon_mutexes( struct thread *thread ); -extern void fsync_cleanup_process_shm_indices( process_id_t id ); diff --git a/server/handle.c b/server/handle.c index 2f47442d1f8..26cc66cd34b 100644 --- a/server/handle.c +++ b/server/handle.c @@ -126,8 +126,6 @@ static const struct object_ops handle_table_ops = no_add_queue, /* add_queue */ NULL, /* remove_queue */ NULL, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ NULL, /* satisfied */ no_signal, /* signal */ no_get_fd, /* get_fd */ @@ -140,6 +138,7 @@ static const struct object_ops handle_table_ops = NULL, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + no_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ handle_table_destroy /* destroy */ }; diff --git a/server/hook.c b/server/hook.c index 24eb27434db..fde45b40842 100644 --- a/server/hook.c +++ b/server/hook.c @@ -80,8 +80,6 @@ static const struct object_ops hook_table_ops = no_add_queue, /* add_queue */ NULL, /* remove_queue */ NULL, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ NULL, /* satisfied */ no_signal, /* signal */ no_get_fd, /* get_fd */ @@ -94,6 +92,7 @@ static const struct object_ops hook_table_ops = NULL, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + no_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ hook_table_destroy /* destroy */ }; diff --git a/server/mailslot.c b/server/mailslot.c index 41fb020aaf0..d9807b4ad23 100644 --- a/server/mailslot.c +++ b/server/mailslot.c @@ -74,8 +74,6 @@ static const struct object_ops mailslot_ops = add_queue, /* add_queue */ remove_queue, /* remove_queue */ default_fd_signaled, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ no_satisfied, /* satisfied */ no_signal, /* signal */ mailslot_get_fd, /* get_fd */ @@ -88,6 +86,7 @@ static const struct object_ops mailslot_ops = default_unlink_name, /* unlink_name */ mailslot_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + default_fd_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ mailslot_destroy /* destroy */ }; @@ -135,8 +134,6 @@ static const struct object_ops mail_writer_ops = no_add_queue, /* add_queue */ NULL, /* remove_queue */ NULL, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ NULL, /* satisfied */ no_signal, /* signal */ mail_writer_get_fd, /* get_fd */ @@ -149,6 +146,7 @@ static const struct object_ops mail_writer_ops = NULL, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + no_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ mail_writer_destroy /* destroy */ }; @@ -200,8 +198,6 @@ static const struct object_ops mailslot_device_ops = no_add_queue, /* add_queue */ NULL, /* remove_queue */ NULL, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ no_satisfied, /* satisfied */ no_signal, /* signal */ no_get_fd, /* get_fd */ @@ -214,6 +210,7 @@ static const struct object_ops mailslot_device_ops = default_unlink_name, /* unlink_name */ mailslot_device_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + no_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ mailslot_device_destroy /* destroy */ }; @@ -232,8 +229,6 @@ static const struct object_ops mailslot_device_file_ops = add_queue, /* add_queue */ remove_queue, /* remove_queue */ default_fd_signaled, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ no_satisfied, /* satisfied */ no_signal, /* signal */ mailslot_device_file_get_fd, /* get_fd */ @@ -246,6 +241,7 @@ static const struct object_ops mailslot_device_file_ops = NULL, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + default_fd_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ mailslot_device_file_destroy /* destroy */ }; diff --git a/server/main.c b/server/main.c index d0a0a4879b5..34595404863 100644 --- a/server/main.c +++ b/server/main.c @@ -35,8 +35,6 @@ #include "request.h" #include "unicode.h" #include "security.h" -#include "esync.h" -#include "fsync.h" /* command-line options */ int debug_level = 0; @@ -232,15 +230,6 @@ int main( int argc, char *argv[] ) sock_init(); open_master_socket(); - if (do_fsync()) - fsync_init(); - - if (do_esync()) - esync_init(); - - if (!do_fsync() && !do_esync()) - fprintf( stderr, "wineserver: using server-side synchronization.\n" ); - if (debug_level) fprintf( stderr, "wineserver: starting (pid=%ld)\n", (long) getpid() ); set_current_time(); init_signals(); diff --git a/server/mapping.c b/server/mapping.c index d6c6c46d7c4..b5d925ca615 100644 --- a/server/mapping.c +++ b/server/mapping.c @@ -72,8 +72,6 @@ static const struct object_ops ranges_ops = no_add_queue, /* add_queue */ NULL, /* remove_queue */ NULL, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ NULL, /* satisfied */ no_signal, /* signal */ no_get_fd, /* get_fd */ @@ -86,6 +84,7 @@ static const struct object_ops ranges_ops = NULL, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + no_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ ranges_destroy /* destroy */ }; @@ -110,8 +109,6 @@ static const struct object_ops shared_map_ops = no_add_queue, /* add_queue */ NULL, /* remove_queue */ NULL, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ NULL, /* satisfied */ no_signal, /* signal */ no_get_fd, /* get_fd */ @@ -124,6 +121,7 @@ static const struct object_ops shared_map_ops = NULL, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + no_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ shared_map_destroy /* destroy */ }; @@ -188,8 +186,6 @@ static const struct object_ops mapping_ops = no_add_queue, /* add_queue */ NULL, /* remove_queue */ NULL, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ NULL, /* satisfied */ no_signal, /* signal */ mapping_get_fd, /* get_fd */ @@ -202,6 +198,7 @@ static const struct object_ops mapping_ops = default_unlink_name, /* unlink_name */ no_open_file, /* open_file */ mapping_get_kernel_obj_list, /* get_kernel_obj_list */ + no_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ mapping_destroy /* destroy */ }; diff --git a/server/mutex.c b/server/mutex.c index 2503d12057f..167c236e014 100644 --- a/server/mutex.c +++ b/server/mutex.c @@ -38,6 +38,8 @@ static const WCHAR mutex_name[] = {'M','u','t','a','n','t'}; +static struct list fast_mutexes = LIST_INIT(fast_mutexes); + struct type_descr mutex_type = { { mutex_name, sizeof(mutex_name) }, /* name */ @@ -57,6 +59,8 @@ struct mutex unsigned int count; /* recursion count */ int abandoned; /* has it been abandoned? */ struct list entry; /* entry in owner thread mutex list */ + struct list fast_mutexes_entry; /* entry in fast_mutexes list */ + struct fast_sync *fast_sync; /* fast synchronization object */ }; static void mutex_dump( struct object *obj, int verbose ); @@ -64,6 +68,7 @@ static int mutex_signaled( struct object *obj, struct wait_queue_entry *entry ); static void mutex_satisfied( struct object *obj, struct wait_queue_entry *entry ); static void mutex_destroy( struct object *obj ); static int mutex_signal( struct object *obj, unsigned int access ); +static struct fast_sync *mutex_get_fast_sync( struct object *obj ); static const struct object_ops mutex_ops = { @@ -73,8 +78,6 @@ static const struct object_ops mutex_ops = add_queue, /* add_queue */ remove_queue, /* remove_queue */ mutex_signaled, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ mutex_satisfied, /* satisfied */ mutex_signal, /* signal */ no_get_fd, /* get_fd */ @@ -87,6 +90,7 @@ static const struct object_ops mutex_ops = default_unlink_name, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + mutex_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ mutex_destroy /* destroy */ }; @@ -129,6 +133,7 @@ static struct mutex *create_mutex( struct object *root, const struct unicode_str mutex->owner = NULL; mutex->abandoned = 0; if (owned) do_grab( mutex, current ); + mutex->fast_sync = NULL; } } return mutex; @@ -136,16 +141,22 @@ static struct mutex *create_mutex( struct object *root, const struct unicode_str void abandon_mutexes( struct thread *thread ) { + struct mutex *mutex; struct list *ptr; while ((ptr = list_head( &thread->mutex_list )) != NULL) { - struct mutex *mutex = LIST_ENTRY( ptr, struct mutex, entry ); + mutex = LIST_ENTRY( ptr, struct mutex, entry ); assert( mutex->owner == thread ); mutex->count = 0; mutex->abandoned = 1; do_release( mutex ); } + + LIST_FOR_EACH_ENTRY(mutex, &fast_mutexes, struct mutex, fast_mutexes_entry) + { + fast_abandon_mutex( thread->id, mutex->fast_sync ); + } } static void mutex_dump( struct object *obj, int verbose ) @@ -191,14 +202,34 @@ static int mutex_signal( struct object *obj, unsigned int access ) return 1; } +static struct fast_sync *mutex_get_fast_sync( struct object *obj ) +{ + struct mutex *mutex = (struct mutex *)obj; + + if (!mutex->fast_sync) + { + mutex->fast_sync = fast_create_mutex( mutex->owner ? mutex->owner->id : 0, mutex->count ); + if (mutex->fast_sync) list_add_tail( &fast_mutexes, &mutex->fast_mutexes_entry ); + } + if (mutex->fast_sync) grab_object( mutex->fast_sync ); + return mutex->fast_sync; +} + static void mutex_destroy( struct object *obj ) { struct mutex *mutex = (struct mutex *)obj; assert( obj->ops == &mutex_ops ); - if (!mutex->count) return; - mutex->count = 0; - do_release( mutex ); + if (mutex->count) + { + mutex->count = 0; + do_release( mutex ); + } + if (mutex->fast_sync) + { + release_object( mutex->fast_sync ); + list_remove( &mutex->fast_mutexes_entry ); + } } /* create a mutex */ diff --git a/server/named_pipe.c b/server/named_pipe.c index 25299747dae..d7eb0e9dabf 100644 --- a/server/named_pipe.c +++ b/server/named_pipe.c @@ -122,8 +122,6 @@ static const struct object_ops named_pipe_ops = no_add_queue, /* add_queue */ NULL, /* remove_queue */ NULL, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ NULL, /* satisfied */ no_signal, /* signal */ no_get_fd, /* get_fd */ @@ -136,6 +134,7 @@ static const struct object_ops named_pipe_ops = default_unlink_name, /* unlink_name */ named_pipe_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + no_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ named_pipe_destroy /* destroy */ }; @@ -172,8 +171,6 @@ static const struct object_ops pipe_server_ops = add_queue, /* add_queue */ remove_queue, /* remove_queue */ default_fd_signaled, /* signaled */ - default_fd_get_esync_fd, /* get_esync_fd */ - default_fd_get_fsync_idx, /* get_fsync_idx */ no_satisfied, /* satisfied */ no_signal, /* signal */ pipe_end_get_fd, /* get_fd */ @@ -186,6 +183,7 @@ static const struct object_ops pipe_server_ops = NULL, /* unlink_name */ pipe_server_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + default_fd_get_fast_sync, /* get_fast_sync */ async_close_obj_handle, /* close_handle */ pipe_server_destroy /* destroy */ }; @@ -218,8 +216,6 @@ static const struct object_ops pipe_client_ops = add_queue, /* add_queue */ remove_queue, /* remove_queue */ default_fd_signaled, /* signaled */ - default_fd_get_esync_fd, /* get_esync_fd */ - default_fd_get_fsync_idx, /* get_fsync_idx */ no_satisfied, /* satisfied */ no_signal, /* signal */ pipe_end_get_fd, /* get_fd */ @@ -232,6 +228,7 @@ static const struct object_ops pipe_client_ops = NULL, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + default_fd_get_fast_sync, /* get_fast_sync */ async_close_obj_handle, /* close_handle */ pipe_end_destroy /* destroy */ }; @@ -267,8 +264,6 @@ static const struct object_ops named_pipe_device_ops = no_add_queue, /* add_queue */ NULL, /* remove_queue */ NULL, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ no_satisfied, /* satisfied */ no_signal, /* signal */ no_get_fd, /* get_fd */ @@ -281,6 +276,7 @@ static const struct object_ops named_pipe_device_ops = default_unlink_name, /* unlink_name */ named_pipe_device_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + no_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ named_pipe_device_destroy /* destroy */ }; @@ -301,8 +297,6 @@ static const struct object_ops named_pipe_device_file_ops = add_queue, /* add_queue */ remove_queue, /* remove_queue */ default_fd_signaled, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ no_satisfied, /* satisfied */ no_signal, /* signal */ named_pipe_device_file_get_fd, /* get_fd */ @@ -315,6 +309,7 @@ static const struct object_ops named_pipe_device_file_ops = NULL, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + default_fd_get_fast_sync, /* get_fast_sync */ named_pipe_device_file_close_handle, /* close_handle */ named_pipe_device_file_destroy /* destroy */ }; diff --git a/server/object.c b/server/object.c index 29f1ea96129..33fc18cd0df 100644 --- a/server/object.c +++ b/server/object.c @@ -538,6 +538,12 @@ struct fd *no_get_fd( struct object *obj ) return NULL; } +struct fast_sync *no_get_fast_sync( struct object *obj ) +{ + set_error( STATUS_OBJECT_TYPE_MISMATCH ); + return NULL; +} + unsigned int default_map_access( struct object *obj, unsigned int access ) { return map_access( access, &obj->ops->type->mapping ); diff --git a/server/object.h b/server/object.h index 3b405e36db0..fe8690351c8 100644 --- a/server/object.h +++ b/server/object.h @@ -42,6 +42,7 @@ struct async; struct async_queue; struct winstation; struct object_type; +struct fast_sync; struct unicode_str @@ -78,10 +79,6 @@ struct object_ops void (*remove_queue)(struct object *,struct wait_queue_entry *); /* is object signaled? */ int (*signaled)(struct object *,struct wait_queue_entry *); - /* return the esync fd for this object */ - int (*get_esync_fd)(struct object *, enum esync_type *type); - /* return the fsync shm idx for this object */ - unsigned int (*get_fsync_idx)(struct object *, enum fsync_type *type); /* wait satisfied */ void (*satisfied)(struct object *,struct wait_queue_entry *); /* signal an object */ @@ -107,6 +104,8 @@ struct object_ops unsigned int options); /* return list of kernel objects */ struct list *(*get_kernel_obj_list)(struct object *); + /* get a client-waitable fast-synchronization handle to this object */ + struct fast_sync *(*get_fast_sync)(struct object *); /* close a handle to this object */ int (*close_handle)(struct object *,struct process *,obj_handle_t); /* destroy on refcount == 0 */ @@ -226,6 +225,17 @@ extern void reset_event( struct event *event ); extern void abandon_mutexes( struct thread *thread ); +/* fast-synchronization functions */ + +extern struct fast_sync *fast_create_event( enum fast_sync_type type, int signaled ); +extern struct fast_sync *fast_create_semaphore( unsigned int count, unsigned int max ); +extern struct fast_sync *fast_create_mutex( thread_id_t owner, unsigned int count ); +extern void fast_set_event( struct fast_sync *obj ); +extern void fast_reset_event( struct fast_sync *obj ); +extern void fast_abandon_mutex( thread_id_t tid, struct fast_sync *fast_sync ); + +extern struct fast_sync *no_get_fast_sync( struct object *obj ); + /* serial functions */ int get_serial_async_timeout(struct object *obj, int type, int count); diff --git a/server/process.c b/server/process.c index 2e46f71f25d..3738416d4af 100644 --- a/server/process.c +++ b/server/process.c @@ -63,8 +63,6 @@ #include "request.h" #include "user.h" #include "security.h" -#include "esync.h" -#include "fsync.h" /* process object */ @@ -96,9 +94,8 @@ static unsigned int process_map_access( struct object *obj, unsigned int access static struct security_descriptor *process_get_sd( struct object *obj ); static void process_poll_event( struct fd *fd, int event ); static struct list *process_get_kernel_obj_list( struct object *obj ); +static struct fast_sync *process_get_fast_sync( struct object *obj ); static void process_destroy( struct object *obj ); -static int process_get_esync_fd( struct object *obj, enum esync_type *type ); -static unsigned int process_get_fsync_idx( struct object *obj, enum fsync_type *type ); static void terminate_process( struct process *process, struct thread *skip, int exit_code ); static void set_process_affinity( struct process *process, affinity_t affinity ); @@ -110,8 +107,6 @@ static const struct object_ops process_ops = add_queue, /* add_queue */ remove_queue, /* remove_queue */ process_signaled, /* signaled */ - process_get_esync_fd, /* get_esync_fd */ - process_get_fsync_idx, /* get_fsync_idx */ no_satisfied, /* satisfied */ no_signal, /* signal */ no_get_fd, /* get_fd */ @@ -124,6 +119,7 @@ static const struct object_ops process_ops = NULL, /* unlink_name */ no_open_file, /* open_file */ process_get_kernel_obj_list, /* get_kernel_obj_list */ + process_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ process_destroy /* destroy */ }; @@ -163,8 +159,6 @@ static const struct object_ops startup_info_ops = add_queue, /* add_queue */ remove_queue, /* remove_queue */ startup_info_signaled, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ no_satisfied, /* satisfied */ no_signal, /* signal */ no_get_fd, /* get_fd */ @@ -177,6 +171,7 @@ static const struct object_ops startup_info_ops = NULL, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + no_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ startup_info_destroy /* destroy */ }; @@ -199,6 +194,7 @@ struct type_descr job_type = static void job_dump( struct object *obj, int verbose ); static int job_signaled( struct object *obj, struct wait_queue_entry *entry ); +static struct fast_sync *job_get_fast_sync( struct object *obj ); static int job_close_handle( struct object *obj, struct process *process, obj_handle_t handle ); static void job_destroy( struct object *obj ); @@ -216,6 +212,7 @@ struct job struct job *parent; struct list parent_job_entry; /* list entry for parent job */ struct list child_job_list; /* list of child jobs */ + struct fast_sync *fast_sync; /* fast synchronization object */ }; static const struct object_ops job_ops = @@ -226,8 +223,6 @@ static const struct object_ops job_ops = add_queue, /* add_queue */ remove_queue, /* remove_queue */ job_signaled, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ no_satisfied, /* satisfied */ no_signal, /* signal */ no_get_fd, /* get_fd */ @@ -240,6 +235,7 @@ static const struct object_ops job_ops = default_unlink_name, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + job_get_fast_sync, /* get_fast_sync */ job_close_handle, /* close_handle */ job_destroy /* destroy */ }; @@ -264,6 +260,7 @@ static struct job *create_job_object( struct object *root, const struct unicode_ job->completion_port = NULL; job->completion_key = 0; job->parent = NULL; + job->fast_sync = NULL; } } return job; @@ -420,6 +417,17 @@ static void terminate_job( struct job *job, int exit_code ) job->terminating = 0; job->signaled = 1; wake_up( &job->obj, 0 ); + fast_set_event( job->fast_sync ); +} + +static struct fast_sync *job_get_fast_sync( struct object *obj ) +{ + struct job *job = (struct job *)obj; + + if (!job->fast_sync) + job->fast_sync = fast_create_event( FAST_SYNC_MANUAL_SERVER, job->signaled ); + if (job->fast_sync) grab_object( job->fast_sync ); + return job->fast_sync; } static int job_close_handle( struct object *obj, struct process *process, obj_handle_t handle ) @@ -450,6 +458,8 @@ static void job_destroy( struct object *obj ) list_remove( &job->parent_job_entry ); release_object( job->parent ); } + + if (job->fast_sync) release_object( job->fast_sync ); } static void job_dump( struct object *obj, int verbose ) @@ -693,9 +703,8 @@ struct process *create_process( int fd, struct process *parent, unsigned int fla process->rawinput_device_count = 0; process->rawinput_mouse = NULL; process->rawinput_kbd = NULL; + process->fast_sync = NULL; memset( &process->image_info, 0, sizeof(process->image_info) ); - process->esync_fd = -1; - process->fsync_idx = 0; process->cpu_override.cpu_count = 0; list_init( &process->kernel_object ); list_init( &process->thread_list ); @@ -753,12 +762,6 @@ struct process *create_process( int fd, struct process *parent, unsigned int fla if (!token_assign_label( process->token, &high_label_sid )) goto error; - if (do_fsync()) - process->fsync_idx = fsync_alloc_shm( 0, 0 ); - - if (do_esync()) - process->esync_fd = esync_create_fd( 0, 0 ); - set_fd_events( process->msg_fd, POLLIN ); /* start listening to events */ return process; @@ -806,12 +809,8 @@ static void process_destroy( struct object *obj ) free( process->rawinput_devices ); free( process->dir_cache ); free( process->image ); - if (do_esync()) close( process->esync_fd ); - if (process->fsync_idx) - { - fsync_cleanup_process_shm_indices( process->id ); - fsync_free_shm_idx( process->fsync_idx ); - } + + if (process->fast_sync) release_object( process->fast_sync ); } /* dump a process on stdout for debugging purposes */ @@ -829,20 +828,6 @@ static int process_signaled( struct object *obj, struct wait_queue_entry *entry return !process->running_threads; } -static int process_get_esync_fd( struct object *obj, enum esync_type *type ) -{ - struct process *process = (struct process *)obj; - *type = ESYNC_MANUAL_SERVER; - return process->esync_fd; -} - -static unsigned int process_get_fsync_idx( struct object *obj, enum fsync_type *type ) -{ - struct process *process = (struct process *)obj; - *type = FSYNC_MANUAL_SERVER; - return process->fsync_idx; -} - static unsigned int process_map_access( struct object *obj, unsigned int access ) { access = default_map_access( obj, access ); @@ -857,6 +842,16 @@ static struct list *process_get_kernel_obj_list( struct object *obj ) return &process->kernel_object; } +static struct fast_sync *process_get_fast_sync( struct object *obj ) +{ + struct process *process = (struct process *)obj; + + if (!process->fast_sync) + process->fast_sync = fast_create_event( FAST_SYNC_MANUAL_SERVER, !process->running_threads ); + if (process->fast_sync) grab_object( process->fast_sync ); + return process->fast_sync; +} + static struct security_descriptor *process_get_sd( struct object *obj ) { static struct security_descriptor *process_default_sd; @@ -1021,6 +1016,7 @@ static void process_killed( struct process *process ) release_job_process( process ); start_sigkill_timer( process ); wake_up( &process->obj, 0 ); + fast_set_event( process->fast_sync ); } /* add a thread to a process running threads list */ diff --git a/server/process.h b/server/process.h index 112aa5d5062..3a0bc5c6db6 100644 --- a/server/process.h +++ b/server/process.h @@ -85,8 +85,7 @@ struct process const struct rawinput_device *rawinput_kbd; /* rawinput keyboard device, if any */ struct list kernel_object; /* list of kernel object pointers */ pe_image_info_t image_info; /* main exe image info */ - int esync_fd; /* esync file descriptor (signaled on exit) */ - unsigned int fsync_idx; + struct fast_sync *fast_sync; /* fast synchronization object */ struct cpu_topology_override cpu_override; /* Overridden CPUs to host CPUs mapping. */ unsigned char wine_cpu_id_from_host[64]; /* Host to overridden CPU mapping. */ }; diff --git a/server/protocol.def b/server/protocol.def index f52d2cf3b49..036e37f6851 100644 --- a/server/protocol.def +++ b/server/protocol.def @@ -4092,116 +4092,46 @@ typedef union obj_handle_t handle; /* next thread handle */ @END -enum esync_type -{ - ESYNC_SEMAPHORE = 1, - ESYNC_AUTO_EVENT, - ESYNC_MANUAL_EVENT, - ESYNC_MUTEX, - ESYNC_AUTO_SERVER, - ESYNC_MANUAL_SERVER, - ESYNC_QUEUE, -}; - -/* Create a new eventfd-based synchronization object */ -@REQ(create_esync) - unsigned int access; /* wanted access rights */ - int initval; /* initial value */ - int type; /* type of esync object */ - int max; /* maximum count on a semaphore */ - VARARG(objattr,object_attributes); /* object attributes */ -@REPLY - obj_handle_t handle; /* handle to the object */ - int type; /* actual type (may be different for events) */ - unsigned int shm_idx; -@END - -@REQ(open_esync) - unsigned int access; /* wanted access rights */ - unsigned int attributes; /* object attributes */ - obj_handle_t rootdir; /* root directory */ - int type; /* type of esync object (above) */ - VARARG(name,unicode_str); /* object name */ -@REPLY - obj_handle_t handle; /* handle to the event */ - int type; /* type of esync object (above) */ - unsigned int shm_idx; /* this object's index into the shm section */ -@END - -/* Retrieve the esync fd for an object. */ -@REQ(get_esync_fd) - obj_handle_t handle; /* handle to the object */ -@REPLY - int type; - unsigned int shm_idx; -@END -/* Notify the server that we are doing a message wait or done with one. */ -@REQ(esync_msgwait) - int in_msgwait; /* are we in a message wait? */ -@END - -/* Retrieve the fd to wait on for user APCs. */ -@REQ(get_esync_apc_fd) -@END - -#define FSYNC_SHM_PAGE_SIZE 0x10000 - -enum fsync_type +enum fast_sync_type { - FSYNC_SEMAPHORE = 1, - FSYNC_AUTO_EVENT, - FSYNC_MANUAL_EVENT, - FSYNC_MUTEX, - FSYNC_AUTO_SERVER, - FSYNC_MANUAL_SERVER, - FSYNC_QUEUE, + FAST_SYNC_SEMAPHORE = 1, + FAST_SYNC_MUTEX, + FAST_SYNC_AUTO_EVENT, + FAST_SYNC_MANUAL_EVENT, + FAST_SYNC_AUTO_SERVER, + FAST_SYNC_MANUAL_SERVER, + FAST_SYNC_QUEUE, }; -/* Create a new futex-based synchronization object */ -@REQ(create_fsync) - unsigned int access; /* wanted access rights */ - int low; /* initial value of low word */ - int high; /* initial value of high word */ - int type; /* type of fsync object */ - VARARG(objattr,object_attributes); /* object attributes */ -@REPLY - obj_handle_t handle; /* handle to the object */ - int type; /* type of fsync object */ - unsigned int shm_idx; /* this object's index into the shm section */ -@END - -/* Open an fsync object */ -@REQ(open_fsync) - unsigned int access; /* wanted access rights */ - unsigned int attributes; /* object attributes */ - obj_handle_t rootdir; /* root directory */ - int type; /* type of fsync object */ - VARARG(name,unicode_str); /* object name */ +/* Obtain a handle to the fast synchronization device object */ +@REQ(get_linux_sync_device) @REPLY - obj_handle_t handle; /* handle to the event */ - int type; /* type of fsync object */ - unsigned int shm_idx; /* this object's index into the shm section */ + obj_handle_t handle; /* handle to the device */ @END -/* Retrieve the shm index for an object. */ -@REQ(get_fsync_idx) - obj_handle_t handle; /* handle to the object */ +/* Get the fast synchronization object associated with the given handle */ +@REQ(get_linux_sync_obj) + obj_handle_t handle; /* handle to the object */ @REPLY - int type; - unsigned int shm_idx; + obj_handle_t handle; /* handle to the fast synchronization object */ + int type; /* object type */ + unsigned int access; /* handle access rights */ @END -@REQ(fsync_msgwait) - int in_msgwait; /* are we in a message wait? */ +/* Begin a client-side wait on a message queue */ +@REQ(fast_select_queue) + obj_handle_t handle; /* handle to the queue */ @END -@REQ(get_fsync_apc_idx) -@REPLY - unsigned int shm_idx; +/* End a client-side wait on a message queue */ +@REQ(fast_unselect_queue) + obj_handle_t handle; /* handle to the queue */ + int signaled; /* was the queue signaled? */ @END -@REQ(fsync_free_shm_idx) - unsigned int shm_idx; +/* Get an event handle to be used for thread alerts with fast synchronization */ +@REQ(get_fast_alert_event) @REPLY + obj_handle_t handle; /* handle to the event */ @END diff --git a/server/queue.c b/server/queue.c index 87949f82da2..3fd7e620414 100644 --- a/server/queue.c +++ b/server/queue.c @@ -42,8 +42,6 @@ #include "process.h" #include "request.h" #include "user.h" -#include "esync.h" -#include "fsync.h" #define WM_NCMOUSEFIRST WM_NCMOUSEMOVE #define WM_NCMOUSELAST (WM_NCMOUSEFIRST+(WM_MOUSELAST-WM_MOUSEFIRST)) @@ -139,10 +137,8 @@ struct msg_queue timeout_t last_get_msg; /* time of last get message call */ int keystate_lock; /* owns an input keystate lock */ const queue_shm_t *shared; /* thread queue shared memory ptr */ - int esync_fd; /* esync file descriptor (signalled on message) */ - int esync_in_msgwait; /* our thread is currently waiting on us */ - unsigned int fsync_idx; - int fsync_in_msgwait; /* our thread is currently waiting on us */ + struct fast_sync *fast_sync; /* fast synchronization object */ + int in_fast_wait; /* are we in a client-side wait? */ }; struct hotkey @@ -159,9 +155,8 @@ static void msg_queue_dump( struct object *obj, int verbose ); static int msg_queue_add_queue( struct object *obj, struct wait_queue_entry *entry ); static void msg_queue_remove_queue( struct object *obj, struct wait_queue_entry *entry ); static int msg_queue_signaled( struct object *obj, struct wait_queue_entry *entry ); -static int msg_queue_get_esync_fd( struct object *obj, enum esync_type *type ); -static unsigned int msg_queue_get_fsync_idx( struct object *obj, enum fsync_type *type ); static void msg_queue_satisfied( struct object *obj, struct wait_queue_entry *entry ); +static struct fast_sync *msg_queue_get_fast_sync( struct object *obj ); static void msg_queue_destroy( struct object *obj ); static void msg_queue_poll_event( struct fd *fd, int event ); static void thread_input_dump( struct object *obj, int verbose ); @@ -176,8 +171,6 @@ static const struct object_ops msg_queue_ops = msg_queue_add_queue, /* add_queue */ msg_queue_remove_queue, /* remove_queue */ msg_queue_signaled, /* signaled */ - msg_queue_get_esync_fd, /* get_esync_fd */ - msg_queue_get_fsync_idx, /* get_fsync_idx */ msg_queue_satisfied, /* satisfied */ no_signal, /* signal */ no_get_fd, /* get_fd */ @@ -190,6 +183,7 @@ static const struct object_ops msg_queue_ops = NULL, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + msg_queue_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ msg_queue_destroy /* destroy */ }; @@ -215,8 +209,6 @@ static const struct object_ops thread_input_ops = no_add_queue, /* add_queue */ NULL, /* remove_queue */ NULL, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ NULL, /* satisfied */ no_signal, /* signal */ no_get_fd, /* get_fd */ @@ -229,6 +221,7 @@ static const struct object_ops thread_input_ops = NULL, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + no_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ thread_input_destroy /* destroy */ }; @@ -348,21 +341,14 @@ static struct msg_queue *create_msg_queue( struct thread *thread, struct thread_ queue->last_get_msg = current_time; queue->keystate_lock = 0; queue->shared = thread->queue_shared; - queue->esync_fd = -1; - queue->esync_in_msgwait = 0; - queue->fsync_idx = 0; - queue->fsync_in_msgwait = 0; + queue->fast_sync = NULL; + queue->in_fast_wait = 0; list_init( &queue->send_result ); list_init( &queue->callback_result ); list_init( &queue->pending_timers ); list_init( &queue->expired_timers ); for (i = 0; i < NB_MSG_KINDS; i++) list_init( &queue->msg_list[i] ); - if (do_fsync()) - queue->fsync_idx = fsync_alloc_shm( 0, 0 ); - - if (do_esync()) - queue->esync_fd = esync_create_fd( 0, 0 ); SHARED_WRITE_BEGIN( queue, queue_shm_t ) { @@ -706,7 +692,11 @@ static inline void set_queue_bits( struct msg_queue *queue, unsigned int bits ) } SHARED_WRITE_END - if (is_signaled( queue )) wake_up( &queue->obj, 0 ); + if (is_signaled( queue )) + { + wake_up( &queue->obj, 0 ); + fast_set_event( queue->fast_sync ); + } } /* clear some queue bits */ @@ -720,11 +710,8 @@ static inline void clear_queue_bits( struct msg_queue *queue, unsigned int bits queue->keystate_lock = 0; } - if (do_fsync() && !is_signaled( queue )) - fsync_clear( &queue->obj ); - - if (do_esync() && !is_signaled( queue )) - esync_clear( queue->esync_fd ); + if (!is_signaled( queue )) + fast_reset_event( queue->fast_sync ); SHARED_WRITE_BEGIN( queue, queue_shm_t ) { @@ -1253,11 +1240,8 @@ static int is_queue_hung( struct msg_queue *queue ) return 0; /* thread is waiting on queue -> not hung */ } - if (do_fsync() && queue->fsync_in_msgwait) - return 0; /* thread is waiting on queue in absentia -> not hung */ - - if (do_esync() && queue->esync_in_msgwait) - return 0; /* thread is waiting on queue in absentia -> not hung */ + if (queue->in_fast_wait) + return 0; /* thread is waiting on queue in absentia -> not hung */ return 1; } @@ -1314,32 +1298,29 @@ static int msg_queue_signaled( struct object *obj, struct wait_queue_entry *entr return ret || is_signaled( queue ); } -static int msg_queue_get_esync_fd( struct object *obj, enum esync_type *type ) -{ - struct msg_queue *queue = (struct msg_queue *)obj; - *type = ESYNC_QUEUE; - return queue->esync_fd; -} - -static unsigned int msg_queue_get_fsync_idx( struct object *obj, enum fsync_type *type ) -{ - struct msg_queue *queue = (struct msg_queue *)obj; - *type = FSYNC_QUEUE; - return queue->fsync_idx; -} - static void msg_queue_satisfied( struct object *obj, struct wait_queue_entry *entry ) { struct msg_queue *queue = (struct msg_queue *)obj; queue->wake_mask = 0; queue->changed_mask = 0; + fast_reset_event( queue->fast_sync ); SHARED_WRITE_BEGIN( queue, queue_shm_t ) { shared->wake_mask = queue->wake_mask; shared->changed_mask = queue->changed_mask; } - SHARED_WRITE_END + SHARED_WRITE_END; +} + +static struct fast_sync *msg_queue_get_fast_sync( struct object *obj ) +{ + struct msg_queue *queue = (struct msg_queue *)obj; + + if (!queue->fast_sync) + queue->fast_sync = fast_create_event( FAST_SYNC_QUEUE, is_signaled( queue ) ); + if (queue->fast_sync) grab_object( queue->fast_sync ); + return queue->fast_sync; } static void cleanup_msg_queue( struct msg_queue *queue ) @@ -1384,14 +1365,13 @@ static void cleanup_msg_queue( struct msg_queue *queue ) if (queue->hooks) release_object( queue->hooks ); if (queue->fd) release_object( queue->fd ); queue->destroyed = 1; - if (do_esync()) close( queue->esync_fd ); + if (queue->fast_sync) release_object( queue->fast_sync ); } static void msg_queue_destroy( struct object *obj ) { struct msg_queue *queue = (struct msg_queue *)obj; if (!queue->destroyed) cleanup_msg_queue( queue ); - if (queue->fsync_idx) fsync_free_shm_idx( queue->fsync_idx ); } /* free the message queue of a thread at thread exit */ @@ -1412,6 +1392,7 @@ static void msg_queue_poll_event( struct fd *fd, int event ) if (event & (POLLERR | POLLHUP)) set_fd_events( fd, -1 ); else set_fd_events( queue->fd, 0 ); wake_up( &queue->obj, 0 ); + fast_set_event( queue->fast_sync ); } static void thread_input_dump( struct object *obj, int verbose ) @@ -2962,15 +2943,15 @@ DECL_HANDLER(set_queue_mask) shared->changed_mask = queue->changed_mask; } SHARED_WRITE_END + + fast_reset_event( queue->fast_sync ); + } + else + { + wake_up( &queue->obj, 0 ); + fast_set_event( queue->fast_sync ); } - else wake_up( &queue->obj, 0 ); } - - if (do_fsync() && !is_signaled( queue )) - fsync_clear( &queue->obj ); - - if (do_esync() && !is_signaled( queue )) - esync_clear( queue->esync_fd ); } } @@ -2985,17 +2966,14 @@ DECL_HANDLER(get_queue_status) reply->changed_bits = queue->changed_bits; queue->changed_bits &= ~req->clear_bits; - if (do_fsync() && !is_signaled( queue )) - fsync_clear( &queue->obj ); - - if (do_esync() && !is_signaled( queue )) - esync_clear( queue->esync_fd ); SHARED_WRITE_BEGIN( queue, queue_shm_t ) { shared->changed_bits = queue->changed_bits; } SHARED_WRITE_END + if (!is_signaled( queue )) + fast_reset_event( queue->fast_sync ); } else reply->wake_bits = reply->changed_bits = 0; } @@ -3183,6 +3161,9 @@ DECL_HANDLER(get_message) } SHARED_WRITE_END + if (!is_signaled( queue )) + fast_reset_event( queue->fast_sync ); + /* then check for posted messages */ if ((filter & QS_POSTMESSAGE) && get_posted_message( queue, get_win, req->get_first, req->get_last, req->flags, reply )) @@ -3251,12 +3232,8 @@ DECL_HANDLER(get_message) set_error( STATUS_PENDING ); /* FIXME */ - if (do_fsync() && !is_signaled( queue )) - fsync_clear( &queue->obj ); - - if (do_esync() && !is_signaled( queue )) - esync_clear( queue->esync_fd ); + fast_reset_event( queue->fast_sync ); } @@ -4016,32 +3993,54 @@ DECL_HANDLER(update_rawinput_devices) process->rawinput_kbd = find_rawinput_device( process, 1, 6 ); } -DECL_HANDLER(esync_msgwait) +DECL_HANDLER(fast_select_queue) { - struct msg_queue *queue = get_current_queue(); + struct msg_queue *queue; - if (!queue) return; - queue->esync_in_msgwait = req->in_msgwait; + if (!(queue = (struct msg_queue *)get_handle_obj( current->process, req->handle, + SYNCHRONIZE, &msg_queue_ops ))) + return; + /* a thread can only wait on its own queue */ + if (current->queue != queue || queue->in_fast_wait) + { + set_error( STATUS_ACCESS_DENIED ); + } + else + { + if (current->process->idle_event && !(queue->wake_mask & QS_SMRESULT)) + set_event( current->process->idle_event ); - if (current->process->idle_event && !(queue->wake_mask & QS_SMRESULT)) - set_event( current->process->idle_event ); + if (queue->fd) + set_fd_events( queue->fd, POLLIN ); - /* and start/stop waiting on the driver */ - if (queue->fd) - set_fd_events( queue->fd, req->in_msgwait ? POLLIN : 0 ); + queue->in_fast_wait = 1; + } + + release_object( queue ); } -DECL_HANDLER(fsync_msgwait) +DECL_HANDLER(fast_unselect_queue) { - struct msg_queue *queue = get_current_queue(); + struct msg_queue *queue; - if (!queue) return; - queue->fsync_in_msgwait = req->in_msgwait; + if (!(queue = (struct msg_queue *)get_handle_obj( current->process, req->handle, + SYNCHRONIZE, &msg_queue_ops ))) + return; + + if (current->queue != queue || !queue->in_fast_wait) + { + set_error( STATUS_ACCESS_DENIED ); + } + else + { + if (queue->fd) + set_fd_events( queue->fd, 0 ); - if (current->process->idle_event && !(queue->wake_mask & QS_SMRESULT)) - set_event( current->process->idle_event ); + if (req->signaled) + msg_queue_satisfied( &queue->obj, NULL ); - /* and start/stop waiting on the driver */ - if (queue->fd) - set_fd_events( queue->fd, req->in_msgwait ? POLLIN : 0 ); + queue->in_fast_wait = 0; + } + + release_object( queue ); } diff --git a/server/registry.c b/server/registry.c index c8de2376989..c51315206e8 100644 --- a/server/registry.c +++ b/server/registry.c @@ -181,8 +181,6 @@ static const struct object_ops key_ops = no_add_queue, /* add_queue */ NULL, /* remove_queue */ NULL, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ NULL, /* satisfied */ no_signal, /* signal */ no_get_fd, /* get_fd */ @@ -195,6 +193,7 @@ static const struct object_ops key_ops = key_unlink_name, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + no_get_fast_sync, /* get_fast_sync */ key_close_handle, /* close_handle */ key_destroy /* destroy */ }; diff --git a/server/request.c b/server/request.c index 343e1a92e0e..8c50f993d25 100644 --- a/server/request.c +++ b/server/request.c @@ -90,8 +90,6 @@ static const struct object_ops master_socket_ops = no_add_queue, /* add_queue */ NULL, /* remove_queue */ NULL, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ NULL, /* satisfied */ no_signal, /* signal */ no_get_fd, /* get_fd */ @@ -104,6 +102,7 @@ static const struct object_ops master_socket_ops = NULL, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + no_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ master_socket_destroy /* destroy */ }; diff --git a/server/request.h b/server/request.h index ea9654fcf73..5bbdc99eff1 100644 --- a/server/request.h +++ b/server/request.h @@ -409,17 +409,11 @@ DECL_HANDLER(terminate_job); DECL_HANDLER(suspend_process); DECL_HANDLER(resume_process); DECL_HANDLER(get_next_thread); -DECL_HANDLER(create_esync); -DECL_HANDLER(open_esync); -DECL_HANDLER(get_esync_fd); -DECL_HANDLER(esync_msgwait); -DECL_HANDLER(get_esync_apc_fd); -DECL_HANDLER(create_fsync); -DECL_HANDLER(open_fsync); -DECL_HANDLER(get_fsync_idx); -DECL_HANDLER(fsync_msgwait); -DECL_HANDLER(get_fsync_apc_idx); -DECL_HANDLER(fsync_free_shm_idx); +DECL_HANDLER(get_linux_sync_device); +DECL_HANDLER(get_linux_sync_obj); +DECL_HANDLER(fast_select_queue); +DECL_HANDLER(fast_unselect_queue); +DECL_HANDLER(get_fast_alert_event); #ifdef WANT_REQUEST_HANDLERS @@ -716,17 +710,11 @@ static const req_handler req_handlers[REQ_NB_REQUESTS] = (req_handler)req_suspend_process, (req_handler)req_resume_process, (req_handler)req_get_next_thread, - (req_handler)req_create_esync, - (req_handler)req_open_esync, - (req_handler)req_get_esync_fd, - (req_handler)req_esync_msgwait, - (req_handler)req_get_esync_apc_fd, - (req_handler)req_create_fsync, - (req_handler)req_open_fsync, - (req_handler)req_get_fsync_idx, - (req_handler)req_fsync_msgwait, - (req_handler)req_get_fsync_apc_idx, - (req_handler)req_fsync_free_shm_idx, + (req_handler)req_get_linux_sync_device, + (req_handler)req_get_linux_sync_obj, + (req_handler)req_fast_select_queue, + (req_handler)req_fast_unselect_queue, + (req_handler)req_get_fast_alert_event, }; C_ASSERT( sizeof(abstime_t) == 8 ); @@ -2409,63 +2397,23 @@ C_ASSERT( FIELD_OFFSET(struct get_next_thread_request, flags) == 28 ); C_ASSERT( sizeof(struct get_next_thread_request) == 32 ); C_ASSERT( FIELD_OFFSET(struct get_next_thread_reply, handle) == 8 ); C_ASSERT( sizeof(struct get_next_thread_reply) == 16 ); -C_ASSERT( FIELD_OFFSET(struct create_esync_request, access) == 12 ); -C_ASSERT( FIELD_OFFSET(struct create_esync_request, initval) == 16 ); -C_ASSERT( FIELD_OFFSET(struct create_esync_request, type) == 20 ); -C_ASSERT( FIELD_OFFSET(struct create_esync_request, max) == 24 ); -C_ASSERT( sizeof(struct create_esync_request) == 32 ); -C_ASSERT( FIELD_OFFSET(struct create_esync_reply, handle) == 8 ); -C_ASSERT( FIELD_OFFSET(struct create_esync_reply, type) == 12 ); -C_ASSERT( FIELD_OFFSET(struct create_esync_reply, shm_idx) == 16 ); -C_ASSERT( sizeof(struct create_esync_reply) == 24 ); -C_ASSERT( FIELD_OFFSET(struct open_esync_request, access) == 12 ); -C_ASSERT( FIELD_OFFSET(struct open_esync_request, attributes) == 16 ); -C_ASSERT( FIELD_OFFSET(struct open_esync_request, rootdir) == 20 ); -C_ASSERT( FIELD_OFFSET(struct open_esync_request, type) == 24 ); -C_ASSERT( sizeof(struct open_esync_request) == 32 ); -C_ASSERT( FIELD_OFFSET(struct open_esync_reply, handle) == 8 ); -C_ASSERT( FIELD_OFFSET(struct open_esync_reply, type) == 12 ); -C_ASSERT( FIELD_OFFSET(struct open_esync_reply, shm_idx) == 16 ); -C_ASSERT( sizeof(struct open_esync_reply) == 24 ); -C_ASSERT( FIELD_OFFSET(struct get_esync_fd_request, handle) == 12 ); -C_ASSERT( sizeof(struct get_esync_fd_request) == 16 ); -C_ASSERT( FIELD_OFFSET(struct get_esync_fd_reply, type) == 8 ); -C_ASSERT( FIELD_OFFSET(struct get_esync_fd_reply, shm_idx) == 12 ); -C_ASSERT( sizeof(struct get_esync_fd_reply) == 16 ); -C_ASSERT( FIELD_OFFSET(struct esync_msgwait_request, in_msgwait) == 12 ); -C_ASSERT( sizeof(struct esync_msgwait_request) == 16 ); -C_ASSERT( sizeof(struct get_esync_apc_fd_request) == 16 ); -C_ASSERT( FIELD_OFFSET(struct create_fsync_request, access) == 12 ); -C_ASSERT( FIELD_OFFSET(struct create_fsync_request, low) == 16 ); -C_ASSERT( FIELD_OFFSET(struct create_fsync_request, high) == 20 ); -C_ASSERT( FIELD_OFFSET(struct create_fsync_request, type) == 24 ); -C_ASSERT( sizeof(struct create_fsync_request) == 32 ); -C_ASSERT( FIELD_OFFSET(struct create_fsync_reply, handle) == 8 ); -C_ASSERT( FIELD_OFFSET(struct create_fsync_reply, type) == 12 ); -C_ASSERT( FIELD_OFFSET(struct create_fsync_reply, shm_idx) == 16 ); -C_ASSERT( sizeof(struct create_fsync_reply) == 24 ); -C_ASSERT( FIELD_OFFSET(struct open_fsync_request, access) == 12 ); -C_ASSERT( FIELD_OFFSET(struct open_fsync_request, attributes) == 16 ); -C_ASSERT( FIELD_OFFSET(struct open_fsync_request, rootdir) == 20 ); -C_ASSERT( FIELD_OFFSET(struct open_fsync_request, type) == 24 ); -C_ASSERT( sizeof(struct open_fsync_request) == 32 ); -C_ASSERT( FIELD_OFFSET(struct open_fsync_reply, handle) == 8 ); -C_ASSERT( FIELD_OFFSET(struct open_fsync_reply, type) == 12 ); -C_ASSERT( FIELD_OFFSET(struct open_fsync_reply, shm_idx) == 16 ); -C_ASSERT( sizeof(struct open_fsync_reply) == 24 ); -C_ASSERT( FIELD_OFFSET(struct get_fsync_idx_request, handle) == 12 ); -C_ASSERT( sizeof(struct get_fsync_idx_request) == 16 ); -C_ASSERT( FIELD_OFFSET(struct get_fsync_idx_reply, type) == 8 ); -C_ASSERT( FIELD_OFFSET(struct get_fsync_idx_reply, shm_idx) == 12 ); -C_ASSERT( sizeof(struct get_fsync_idx_reply) == 16 ); -C_ASSERT( FIELD_OFFSET(struct fsync_msgwait_request, in_msgwait) == 12 ); -C_ASSERT( sizeof(struct fsync_msgwait_request) == 16 ); -C_ASSERT( sizeof(struct get_fsync_apc_idx_request) == 16 ); -C_ASSERT( FIELD_OFFSET(struct get_fsync_apc_idx_reply, shm_idx) == 8 ); -C_ASSERT( sizeof(struct get_fsync_apc_idx_reply) == 16 ); -C_ASSERT( FIELD_OFFSET(struct fsync_free_shm_idx_request, shm_idx) == 12 ); -C_ASSERT( sizeof(struct fsync_free_shm_idx_request) == 16 ); -C_ASSERT( sizeof(struct fsync_free_shm_idx_reply) == 8 ); +C_ASSERT( sizeof(struct get_linux_sync_device_request) == 16 ); +C_ASSERT( FIELD_OFFSET(struct get_linux_sync_device_reply, handle) == 8 ); +C_ASSERT( sizeof(struct get_linux_sync_device_reply) == 16 ); +C_ASSERT( FIELD_OFFSET(struct get_linux_sync_obj_request, handle) == 12 ); +C_ASSERT( sizeof(struct get_linux_sync_obj_request) == 16 ); +C_ASSERT( FIELD_OFFSET(struct get_linux_sync_obj_reply, handle) == 8 ); +C_ASSERT( FIELD_OFFSET(struct get_linux_sync_obj_reply, type) == 12 ); +C_ASSERT( FIELD_OFFSET(struct get_linux_sync_obj_reply, access) == 16 ); +C_ASSERT( sizeof(struct get_linux_sync_obj_reply) == 24 ); +C_ASSERT( FIELD_OFFSET(struct fast_select_queue_request, handle) == 12 ); +C_ASSERT( sizeof(struct fast_select_queue_request) == 16 ); +C_ASSERT( FIELD_OFFSET(struct fast_unselect_queue_request, handle) == 12 ); +C_ASSERT( FIELD_OFFSET(struct fast_unselect_queue_request, signaled) == 16 ); +C_ASSERT( sizeof(struct fast_unselect_queue_request) == 24 ); +C_ASSERT( sizeof(struct get_fast_alert_event_request) == 16 ); +C_ASSERT( FIELD_OFFSET(struct get_fast_alert_event_reply, handle) == 8 ); +C_ASSERT( sizeof(struct get_fast_alert_event_reply) == 16 ); #endif /* WANT_REQUEST_HANDLERS */ diff --git a/server/semaphore.c b/server/semaphore.c index d354892c224..99409198d68 100644 --- a/server/semaphore.c +++ b/server/semaphore.c @@ -55,12 +55,15 @@ struct semaphore struct object obj; /* object header */ unsigned int count; /* current count */ unsigned int max; /* maximum possible count */ + struct fast_sync *fast_sync; /* fast synchronization object */ }; static void semaphore_dump( struct object *obj, int verbose ); static int semaphore_signaled( struct object *obj, struct wait_queue_entry *entry ); static void semaphore_satisfied( struct object *obj, struct wait_queue_entry *entry ); static int semaphore_signal( struct object *obj, unsigned int access ); +static struct fast_sync *semaphore_get_fast_sync( struct object *obj ); +static void semaphore_destroy( struct object *obj ); static const struct object_ops semaphore_ops = { @@ -70,8 +73,6 @@ static const struct object_ops semaphore_ops = add_queue, /* add_queue */ remove_queue, /* remove_queue */ semaphore_signaled, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ semaphore_satisfied, /* satisfied */ semaphore_signal, /* signal */ no_get_fd, /* get_fd */ @@ -84,8 +85,9 @@ static const struct object_ops semaphore_ops = default_unlink_name, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + semaphore_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ - no_destroy /* destroy */ + semaphore_destroy /* destroy */ }; @@ -107,6 +109,7 @@ static struct semaphore *create_semaphore( struct object *root, const struct uni /* initialize it if it didn't already exist */ sem->count = initial; sem->max = max; + sem->fast_sync = NULL; } } return sem; @@ -169,6 +172,23 @@ static int semaphore_signal( struct object *obj, unsigned int access ) return release_semaphore( sem, 1, NULL ); } +static struct fast_sync *semaphore_get_fast_sync( struct object *obj ) +{ + struct semaphore *semaphore = (struct semaphore *)obj; + + if (!semaphore->fast_sync) + semaphore->fast_sync = fast_create_semaphore( semaphore->count, semaphore->max ); + if (semaphore->fast_sync) grab_object( semaphore->fast_sync ); + return semaphore->fast_sync; +} + +static void semaphore_destroy( struct object *obj ) +{ + struct semaphore *semaphore = (struct semaphore *)obj; + + if (semaphore->fast_sync) release_object( semaphore->fast_sync ); +} + /* create a semaphore */ DECL_HANDLER(create_semaphore) { diff --git a/server/serial.c b/server/serial.c index 1915d00a977..5c210d10a80 100644 --- a/server/serial.c +++ b/server/serial.c @@ -85,8 +85,6 @@ static const struct object_ops serial_ops = add_queue, /* add_queue */ remove_queue, /* remove_queue */ default_fd_signaled, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ no_satisfied, /* satisfied */ no_signal, /* signal */ serial_get_fd, /* get_fd */ @@ -99,6 +97,7 @@ static const struct object_ops serial_ops = NULL, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + default_fd_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ serial_destroy /* destroy */ }; diff --git a/server/signal.c b/server/signal.c index 802b7f936b9..e5def3dc899 100644 --- a/server/signal.c +++ b/server/signal.c @@ -62,8 +62,6 @@ static const struct object_ops handler_ops = no_add_queue, /* add_queue */ NULL, /* remove_queue */ NULL, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ NULL, /* satisfied */ no_signal, /* signal */ no_get_fd, /* get_fd */ @@ -76,6 +74,7 @@ static const struct object_ops handler_ops = NULL, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + no_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ handler_destroy /* destroy */ }; diff --git a/server/sock.c b/server/sock.c index beb6b2921b3..83dfa29239b 100644 --- a/server/sock.c +++ b/server/sock.c @@ -471,8 +471,6 @@ static const struct object_ops sock_ops = add_queue, /* add_queue */ remove_queue, /* remove_queue */ default_fd_signaled, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ no_satisfied, /* satisfied */ no_signal, /* signal */ sock_get_fd, /* get_fd */ @@ -485,6 +483,7 @@ static const struct object_ops sock_ops = NULL, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + default_fd_get_fast_sync, /* get_fast_sync */ sock_close_handle, /* close_handle */ sock_destroy /* destroy */ }; @@ -3604,8 +3603,6 @@ static const struct object_ops ifchange_ops = no_add_queue, /* add_queue */ NULL, /* remove_queue */ NULL, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ no_satisfied, /* satisfied */ no_signal, /* signal */ ifchange_get_fd, /* get_fd */ @@ -3618,6 +3615,7 @@ static const struct object_ops ifchange_ops = NULL, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + no_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ ifchange_destroy /* destroy */ }; @@ -3827,8 +3825,6 @@ static const struct object_ops socket_device_ops = no_add_queue, /* add_queue */ NULL, /* remove_queue */ NULL, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ no_satisfied, /* satisfied */ no_signal, /* signal */ no_get_fd, /* get_fd */ @@ -3841,6 +3837,7 @@ static const struct object_ops socket_device_ops = default_unlink_name, /* unlink_name */ socket_device_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + no_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ no_destroy /* destroy */ }; diff --git a/server/symlink.c b/server/symlink.c index 47098fe5823..4a7cf68f269 100644 --- a/server/symlink.c +++ b/server/symlink.c @@ -71,8 +71,6 @@ static const struct object_ops symlink_ops = no_add_queue, /* add_queue */ NULL, /* remove_queue */ NULL, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ NULL, /* satisfied */ no_signal, /* signal */ no_get_fd, /* get_fd */ @@ -85,6 +83,7 @@ static const struct object_ops symlink_ops = default_unlink_name, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + no_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ symlink_destroy /* destroy */ }; diff --git a/server/thread.c b/server/thread.c index 71c00fb6e35..2ded8ae6c68 100644 --- a/server/thread.c +++ b/server/thread.c @@ -54,8 +54,6 @@ #include "user.h" #include "security.h" #include "unicode.h" -#include "esync.h" -#include "fsync.h" /* thread queues */ @@ -102,8 +100,6 @@ static const struct object_ops thread_apc_ops = add_queue, /* add_queue */ remove_queue, /* remove_queue */ thread_apc_signaled, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ no_satisfied, /* satisfied */ no_signal, /* signal */ no_get_fd, /* get_fd */ @@ -116,6 +112,7 @@ static const struct object_ops thread_apc_ops = NULL, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + no_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ thread_apc_destroy /* destroy */ }; @@ -146,8 +143,6 @@ static const struct object_ops context_ops = add_queue, /* add_queue */ remove_queue, /* remove_queue */ context_signaled, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ no_satisfied, /* satisfied */ no_signal, /* signal */ no_get_fd, /* get_fd */ @@ -160,6 +155,7 @@ static const struct object_ops context_ops = NULL, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + no_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ no_destroy /* destroy */ }; @@ -184,11 +180,10 @@ struct type_descr thread_type = static void dump_thread( struct object *obj, int verbose ); static int thread_signaled( struct object *obj, struct wait_queue_entry *entry ); -static int thread_get_esync_fd( struct object *obj, enum esync_type *type ); -static unsigned int thread_get_fsync_idx( struct object *obj, enum fsync_type *type ); static unsigned int thread_map_access( struct object *obj, unsigned int access ); static void thread_poll_event( struct fd *fd, int event ); static struct list *thread_get_kernel_obj_list( struct object *obj ); +static struct fast_sync *thread_get_fast_sync( struct object *obj ); static void destroy_thread( struct object *obj ); static const struct object_ops thread_ops = @@ -199,8 +194,6 @@ static const struct object_ops thread_ops = add_queue, /* add_queue */ remove_queue, /* remove_queue */ thread_signaled, /* signaled */ - thread_get_esync_fd, /* get_esync_fd */ - thread_get_fsync_idx, /* get_fsync_idx */ no_satisfied, /* satisfied */ no_signal, /* signal */ no_get_fd, /* get_fd */ @@ -213,6 +206,7 @@ static const struct object_ops thread_ops = NULL, /* unlink_name */ no_open_file, /* open_file */ thread_get_kernel_obj_list, /* get_kernel_obj_list */ + thread_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ destroy_thread /* destroy */ }; @@ -268,9 +262,6 @@ static inline void init_thread_structure( struct thread *thread ) thread->context = NULL; thread->teb = 0; thread->entry_point = 0; - thread->esync_fd = -1; - thread->esync_apc_fd = -1; - thread->fsync_idx = 0; thread->system_regs = 0; thread->queue = NULL; thread->wait = NULL; @@ -291,6 +282,8 @@ static inline void init_thread_structure( struct thread *thread ) thread->token = NULL; thread->desc = NULL; thread->desc_len = 0; + thread->fast_sync = NULL; + thread->fast_alert_event = NULL; thread->queue_shared_mapping = NULL; thread->queue_shared = NULL; thread->input_shared_mapping = NULL; @@ -484,20 +477,6 @@ struct thread *create_thread( int fd, struct process *process, const struct secu } } - thread->fsync_idx = 0; - - if (do_fsync()) - { - thread->fsync_idx = fsync_alloc_shm( 0, 0 ); - thread->fsync_apc_idx = fsync_alloc_shm( 0, 0 ); - } - - if (do_esync()) - { - thread->esync_fd = esync_create_fd( 0, 0 ); - thread->esync_apc_fd = esync_create_fd( 0, 0 ); - } - set_fd_events( thread->request_fd, POLLIN ); /* start listening to events */ add_process_thread( thread->process, thread ); return thread; @@ -522,6 +501,16 @@ static struct list *thread_get_kernel_obj_list( struct object *obj ) return &thread->kernel_object; } +static struct fast_sync *thread_get_fast_sync( struct object *obj ) +{ + struct thread *thread = (struct thread *)obj; + + if (!thread->fast_sync) + thread->fast_sync = fast_create_event( FAST_SYNC_MANUAL_SERVER, thread->state == TERMINATED ); + if (thread->fast_sync) grab_object( thread->fast_sync ); + return thread->fast_sync; +} + /* cleanup everything that is no longer needed by a dead thread */ /* used by destroy_thread and kill_thread */ static void cleanup_thread( struct thread *thread ) @@ -582,13 +571,8 @@ static void destroy_thread( struct object *obj ) if (thread->id) free_ptid( thread->id ); if (thread->token) release_object( thread->token ); - if (do_esync()) - close( thread->esync_fd ); - if (thread->fsync_idx) - { - fsync_free_shm_idx( thread->fsync_idx ); - fsync_free_shm_idx( thread->fsync_apc_idx ); - } + if (thread->fast_sync) release_object( thread->fast_sync ); + if (thread->fast_alert_event) release_object( thread->fast_alert_event ); } /* dump a thread on stdout for debugging purposes */ @@ -607,20 +591,6 @@ static int thread_signaled( struct object *obj, struct wait_queue_entry *entry ) return (mythread->state == TERMINATED); } -static int thread_get_esync_fd( struct object *obj, enum esync_type *type ) -{ - struct thread *thread = (struct thread *)obj; - *type = ESYNC_MANUAL_SERVER; - return thread->esync_fd; -} - -static unsigned int thread_get_fsync_idx( struct object *obj, enum fsync_type *type ) -{ - struct thread *thread = (struct thread *)obj; - *type = FSYNC_MANUAL_SERVER; - return thread->fsync_idx; -} - static unsigned int thread_map_access( struct object *obj, unsigned int access ) { access = default_map_access( obj, access ); @@ -1295,12 +1265,6 @@ void wake_up( struct object *obj, int max ) struct list *ptr; int ret; - if (do_fsync()) - fsync_wake_up( obj ); - - if (do_esync()) - esync_wake_up( obj ); - LIST_FOR_EACH( ptr, &obj->wait_queue ) { struct wait_queue_entry *entry = LIST_ENTRY( ptr, struct wait_queue_entry, entry ); @@ -1388,11 +1352,8 @@ static int queue_apc( struct process *process, struct thread *thread, struct thr { wake_thread( thread ); - if (do_fsync() && queue == &thread->user_apc) - fsync_wake_futex( thread->fsync_apc_idx ); - - if (do_esync() && queue == &thread->user_apc) - esync_wake_fd( thread->esync_apc_fd ); + if (apc->call.type == APC_USER && thread->fast_alert_event) + set_event( thread->fast_alert_event ); } return 1; @@ -1425,6 +1386,8 @@ void thread_cancel_apc( struct thread *thread, struct object *owner, enum apc_ty apc->executed = 1; wake_up( &apc->obj, 0 ); release_object( apc ); + if (list_empty( &thread->user_apc ) && thread->fast_alert_event) + reset_event( thread->fast_alert_event ); return; } } @@ -1439,14 +1402,10 @@ static struct thread_apc *thread_dequeue_apc( struct thread *thread, int system { apc = LIST_ENTRY( ptr, struct thread_apc, entry ); list_remove( ptr ); - } - - if (do_fsync() && list_empty( &thread->system_apc ) && list_empty( &thread->user_apc )) - fsync_clear_futex( thread->fsync_apc_idx ); - - if (do_esync() && list_empty( &thread->system_apc ) && list_empty( &thread->user_apc )) - esync_clear( thread->esync_apc_fd ); + if (list_empty( &thread->user_apc ) && thread->fast_alert_event) + reset_event( thread->fast_alert_event ); + } return apc; } @@ -1542,10 +1501,7 @@ void kill_thread( struct thread *thread, int violent_death ) } kill_console_processes( thread, 0 ); abandon_mutexes( thread ); - if (do_fsync()) - fsync_abandon_mutexes( thread ); - if (do_esync()) - esync_abandon_mutexes( thread ); + fast_set_event( thread->fast_sync ); wake_up( &thread->obj, 0 ); if (violent_death) send_thread_signal( thread, SIGQUIT ); cleanup_thread( thread ); @@ -2316,3 +2272,12 @@ DECL_HANDLER(get_next_thread) set_error( STATUS_NO_MORE_ENTRIES ); release_object( process ); } + +DECL_HANDLER(get_fast_alert_event) +{ + if (!current->fast_alert_event) + current->fast_alert_event = create_event( NULL, NULL, 0, 1, !list_empty( ¤t->user_apc ), NULL ); + + if (current->fast_alert_event) + reply->handle = alloc_handle( current->process, current->fast_alert_event, SYNCHRONIZE, 0 ); +} diff --git a/server/thread.h b/server/thread.h index 416b01db318..d0f9efce675 100644 --- a/server/thread.h +++ b/server/thread.h @@ -55,10 +55,6 @@ struct thread struct process *process; thread_id_t id; /* thread id */ struct list mutex_list; /* list of currently owned mutexes */ - int esync_fd; /* esync file descriptor (signalled on exit) */ - int esync_apc_fd; /* esync apc fd (signalled when APCs are present) */ - unsigned int fsync_idx; - unsigned int fsync_apc_idx; unsigned int system_regs; /* which system regs have been set */ struct msg_queue *queue; /* message queue */ struct thread_wait *wait; /* current wait condition if sleeping */ @@ -100,6 +96,8 @@ struct thread struct object *input_shared_mapping; /* thread input shared memory mapping */ input_shm_t *input_shared; /* thread input shared memory ptr */ struct completion_wait *completion_wait; /* completion port wait object the thread is associated with */ + struct fast_sync *fast_sync; /* fast synchronization object */ + struct event *fast_alert_event; /* fast synchronization alert event */ }; extern struct thread *current; diff --git a/server/timer.c b/server/timer.c index 884ace9376f..aee64de93f1 100644 --- a/server/timer.c +++ b/server/timer.c @@ -35,8 +35,6 @@ #include "file.h" #include "handle.h" #include "request.h" -#include "esync.h" -#include "fsync.h" static const WCHAR timer_name[] = {'T','i','m','e','r'}; @@ -63,15 +61,13 @@ struct timer struct thread *thread; /* thread that set the APC function */ client_ptr_t callback; /* callback APC function */ client_ptr_t arg; /* callback argument */ - int esync_fd; /* esync file descriptor */ - unsigned int fsync_idx; /* fsync shm index */ + struct fast_sync *fast_sync; /* fast synchronization object */ }; static void timer_dump( struct object *obj, int verbose ); static int timer_signaled( struct object *obj, struct wait_queue_entry *entry ); -static int timer_get_esync_fd( struct object *obj, enum esync_type *type ); -static unsigned int timer_get_fsync_idx( struct object *obj, enum fsync_type *type ); static void timer_satisfied( struct object *obj, struct wait_queue_entry *entry ); +static struct fast_sync *timer_get_fast_sync( struct object *obj ); static void timer_destroy( struct object *obj ); static const struct object_ops timer_ops = @@ -82,8 +78,6 @@ static const struct object_ops timer_ops = add_queue, /* add_queue */ remove_queue, /* remove_queue */ timer_signaled, /* signaled */ - timer_get_esync_fd, /* get_esync_fd */ - timer_get_fsync_idx, /* get_fsync_idx */ timer_satisfied, /* satisfied */ no_signal, /* signal */ no_get_fd, /* get_fd */ @@ -96,6 +90,7 @@ static const struct object_ops timer_ops = default_unlink_name, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + timer_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ timer_destroy /* destroy */ }; @@ -118,14 +113,8 @@ static struct timer *create_timer( struct object *root, const struct unicode_str timer->period = 0; timer->timeout = NULL; timer->thread = NULL; - timer->esync_fd = -1; - timer->fsync_idx = 0; - if (do_fsync()) - timer->fsync_idx = fsync_alloc_shm( 0, 0 ); - - if (do_esync()) - timer->esync_fd = esync_create_fd( 0, 0 ); + timer->fast_sync = NULL; } } return timer; @@ -167,6 +156,7 @@ static void timer_callback( void *private ) /* wake up waiters */ timer->signaled = 1; wake_up( &timer->obj, 0 ); + fast_set_event( timer->fast_sync ); } /* cancel a running timer */ @@ -198,11 +188,7 @@ static int set_timer( struct timer *timer, timeout_t expire, unsigned int period period = 0; /* period doesn't make any sense for a manual timer */ timer->signaled = 0; - if (do_fsync()) - fsync_clear( &timer->obj ); - - if (do_esync()) - esync_clear( timer->esync_fd ); + fast_reset_event( timer->fast_sync ); } timer->when = (expire <= 0) ? expire - monotonic_time : max( expire, current_time ); timer->period = period; @@ -230,25 +216,24 @@ static int timer_signaled( struct object *obj, struct wait_queue_entry *entry ) return timer->signaled; } -static int timer_get_esync_fd( struct object *obj, enum esync_type *type ) +static void timer_satisfied( struct object *obj, struct wait_queue_entry *entry ) { struct timer *timer = (struct timer *)obj; - *type = timer->manual ? ESYNC_MANUAL_SERVER : ESYNC_AUTO_SERVER; - return timer->esync_fd; + assert( obj->ops == &timer_ops ); + if (!timer->manual) timer->signaled = 0; } -static unsigned int timer_get_fsync_idx( struct object *obj, enum fsync_type *type ) +static struct fast_sync *timer_get_fast_sync( struct object *obj ) { struct timer *timer = (struct timer *)obj; - *type = timer->manual ? FSYNC_MANUAL_SERVER : FSYNC_AUTO_SERVER; - return timer->fsync_idx; -} -static void timer_satisfied( struct object *obj, struct wait_queue_entry *entry ) -{ - struct timer *timer = (struct timer *)obj; - assert( obj->ops == &timer_ops ); - if (!timer->manual) timer->signaled = 0; + if (!timer->fast_sync) + { + enum fast_sync_type type = timer->manual ? FAST_SYNC_MANUAL_SERVER : FAST_SYNC_AUTO_SERVER; + timer->fast_sync = fast_create_event( type, timer->signaled ); + } + if (timer->fast_sync) grab_object( timer->fast_sync ); + return timer->fast_sync; } static void timer_destroy( struct object *obj ) @@ -258,8 +243,7 @@ static void timer_destroy( struct object *obj ) if (timer->timeout) remove_timeout_user( timer->timeout ); if (timer->thread) release_object( timer->thread ); - if (do_esync()) close( timer->esync_fd ); - if (timer->fsync_idx) fsync_free_shm_idx( timer->fsync_idx ); + if (timer->fast_sync) release_object( timer->fast_sync ); } /* create a timer */ diff --git a/server/token.c b/server/token.c index ec2d7539262..a4a707b55a4 100644 --- a/server/token.c +++ b/server/token.c @@ -148,8 +148,6 @@ static const struct object_ops token_ops = no_add_queue, /* add_queue */ NULL, /* remove_queue */ NULL, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ NULL, /* satisfied */ no_signal, /* signal */ no_get_fd, /* get_fd */ @@ -162,6 +160,7 @@ static const struct object_ops token_ops = NULL, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + no_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ token_destroy /* destroy */ }; diff --git a/server/trace.c b/server/trace.c index abb2182ecfc..8c49073ecd2 100644 --- a/server/trace.c +++ b/server/trace.c @@ -4839,118 +4839,57 @@ static void dump_get_next_thread_reply( const struct get_next_thread_reply *req fprintf( stderr, " handle=%04x", req->handle ); } -static void dump_create_esync_request( const struct create_esync_request *req ) +static void dump_get_linux_sync_device_request( const struct get_linux_sync_device_request *req ) { - fprintf( stderr, " access=%08x", req->access ); - fprintf( stderr, ", initval=%d", req->initval ); - fprintf( stderr, ", type=%d", req->type ); - fprintf( stderr, ", max=%d", req->max ); - dump_varargs_object_attributes( ", objattr=", cur_size ); } -static void dump_create_esync_reply( const struct create_esync_reply *req ) +static void dump_get_linux_sync_device_reply( const struct get_linux_sync_device_reply *req ) { fprintf( stderr, " handle=%04x", req->handle ); - fprintf( stderr, ", type=%d", req->type ); - fprintf( stderr, ", shm_idx=%08x", req->shm_idx ); -} - -static void dump_open_esync_request( const struct open_esync_request *req ) -{ - fprintf( stderr, " access=%08x", req->access ); - fprintf( stderr, ", attributes=%08x", req->attributes ); - fprintf( stderr, ", rootdir=%04x", req->rootdir ); - fprintf( stderr, ", type=%d", req->type ); - dump_varargs_unicode_str( ", name=", cur_size ); } -static void dump_open_esync_reply( const struct open_esync_reply *req ) +static void dump_get_linux_sync_obj_request( const struct get_linux_sync_obj_request *req ) { fprintf( stderr, " handle=%04x", req->handle ); - fprintf( stderr, ", type=%d", req->type ); - fprintf( stderr, ", shm_idx=%08x", req->shm_idx ); -} - -static void dump_get_esync_fd_request( const struct get_esync_fd_request *req ) -{ - fprintf( stderr, " handle=%04x", req->handle ); -} - -static void dump_get_esync_fd_reply( const struct get_esync_fd_reply *req ) -{ - fprintf( stderr, " type=%d", req->type ); - fprintf( stderr, ", shm_idx=%08x", req->shm_idx ); -} - -static void dump_esync_msgwait_request( const struct esync_msgwait_request *req ) -{ - fprintf( stderr, " in_msgwait=%d", req->in_msgwait ); -} - -static void dump_get_esync_apc_fd_request( const struct get_esync_apc_fd_request *req ) -{ } -static void dump_create_fsync_request( const struct create_fsync_request *req ) -{ - fprintf( stderr, " access=%08x", req->access ); - fprintf( stderr, ", low=%d", req->low ); - fprintf( stderr, ", high=%d", req->high ); - fprintf( stderr, ", type=%d", req->type ); - dump_varargs_object_attributes( ", objattr=", cur_size ); -} - -static void dump_create_fsync_reply( const struct create_fsync_reply *req ) +static void dump_get_linux_sync_obj_reply( const struct get_linux_sync_obj_reply *req ) { fprintf( stderr, " handle=%04x", req->handle ); fprintf( stderr, ", type=%d", req->type ); - fprintf( stderr, ", shm_idx=%08x", req->shm_idx ); -} - -static void dump_open_fsync_request( const struct open_fsync_request *req ) -{ - fprintf( stderr, " access=%08x", req->access ); - fprintf( stderr, ", attributes=%08x", req->attributes ); - fprintf( stderr, ", rootdir=%04x", req->rootdir ); - fprintf( stderr, ", type=%d", req->type ); - dump_varargs_unicode_str( ", name=", cur_size ); + fprintf( stderr, ", access=%08x", req->access ); } -static void dump_open_fsync_reply( const struct open_fsync_reply *req ) +static void dump_fast_select_queue_request( const struct fast_select_queue_request *req ) { fprintf( stderr, " handle=%04x", req->handle ); - fprintf( stderr, ", type=%d", req->type ); - fprintf( stderr, ", shm_idx=%08x", req->shm_idx ); } -static void dump_get_fsync_idx_request( const struct get_fsync_idx_request *req ) +static void dump_fast_unselect_queue_request( const struct fast_unselect_queue_request *req ) { fprintf( stderr, " handle=%04x", req->handle ); + fprintf( stderr, ", signaled=%d", req->signaled ); } -static void dump_get_fsync_idx_reply( const struct get_fsync_idx_reply *req ) -{ - fprintf( stderr, " type=%d", req->type ); - fprintf( stderr, ", shm_idx=%08x", req->shm_idx ); -} - -static void dump_fsync_msgwait_request( const struct fsync_msgwait_request *req ) +static void dump_set_keyboard_repeat_request( const struct set_keyboard_repeat_request *req ) { - fprintf( stderr, " in_msgwait=%d", req->in_msgwait ); + fprintf( stderr, " enable=%d", req->enable ); + fprintf( stderr, ", delay=%d", req->delay ); + fprintf( stderr, ", period=%d", req->period ); } -static void dump_get_fsync_apc_idx_request( const struct get_fsync_apc_idx_request *req ) +static void dump_set_keyboard_repeat_reply( const struct set_keyboard_repeat_reply *req ) { + fprintf( stderr, " enable=%d", req->enable ); } -static void dump_get_fsync_apc_idx_reply( const struct get_fsync_apc_idx_reply *req ) +static void dump_get_fast_alert_event_request( const struct get_fast_alert_event_request *req ) { - fprintf( stderr, " shm_idx=%08x", req->shm_idx ); } -static void dump_fsync_free_shm_idx_request( const struct fsync_free_shm_idx_request *req ) +static void dump_get_fast_alert_event_reply( const struct get_fast_alert_event_reply *req ) { - fprintf( stderr, " shm_idx=%08x", req->shm_idx ); + fprintf( stderr, " handle=%04x", req->handle ); } static const dump_func req_dumpers[REQ_NB_REQUESTS] = { @@ -5244,17 +5183,11 @@ static const dump_func req_dumpers[REQ_NB_REQUESTS] = { (dump_func)dump_suspend_process_request, (dump_func)dump_resume_process_request, (dump_func)dump_get_next_thread_request, - (dump_func)dump_create_esync_request, - (dump_func)dump_open_esync_request, - (dump_func)dump_get_esync_fd_request, - (dump_func)dump_esync_msgwait_request, - (dump_func)dump_get_esync_apc_fd_request, - (dump_func)dump_create_fsync_request, - (dump_func)dump_open_fsync_request, - (dump_func)dump_get_fsync_idx_request, - (dump_func)dump_fsync_msgwait_request, - (dump_func)dump_get_fsync_apc_idx_request, - (dump_func)dump_fsync_free_shm_idx_request, + (dump_func)dump_get_linux_sync_device_request, + (dump_func)dump_get_linux_sync_obj_request, + (dump_func)dump_fast_select_queue_request, + (dump_func)dump_fast_unselect_queue_request, + (dump_func)dump_get_fast_alert_event_request, }; static const dump_func reply_dumpers[REQ_NB_REQUESTS] = { @@ -5548,17 +5481,9 @@ static const dump_func reply_dumpers[REQ_NB_REQUESTS] = { NULL, NULL, (dump_func)dump_get_next_thread_reply, - (dump_func)dump_create_esync_reply, - (dump_func)dump_open_esync_reply, - (dump_func)dump_get_esync_fd_reply, - NULL, - NULL, - (dump_func)dump_create_fsync_reply, - (dump_func)dump_open_fsync_reply, - (dump_func)dump_get_fsync_idx_reply, - NULL, - (dump_func)dump_get_fsync_apc_idx_reply, - NULL, + (dump_func)dump_get_linux_sync_device_reply, + (dump_func)dump_get_linux_sync_obj_reply, + (dump_func)dump_get_fast_alert_event_reply, }; static const char * const req_names[REQ_NB_REQUESTS] = { @@ -5852,17 +5777,11 @@ static const char * const req_names[REQ_NB_REQUESTS] = { "suspend_process", "resume_process", "get_next_thread", - "create_esync", - "open_esync", - "get_esync_fd", - "esync_msgwait", - "get_esync_apc_fd", - "create_fsync", - "open_fsync", - "get_fsync_idx", - "fsync_msgwait", - "get_fsync_apc_idx", - "fsync_free_shm_idx", + "get_linux_sync_device", + "get_linux_sync_obj", + "fast_select_queue", + "fast_unselect_queue", + "get_fast_alert_event", }; static const struct diff --git a/server/window.c b/server/window.c index 184d4e2e212..7125919771c 100644 --- a/server/window.c +++ b/server/window.c @@ -107,8 +107,6 @@ static const struct object_ops window_ops = no_add_queue, /* add_queue */ NULL, /* remove_queue */ NULL, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ NULL, /* satisfied */ no_signal, /* signal */ no_get_fd, /* get_fd */ @@ -121,6 +119,7 @@ static const struct object_ops window_ops = NULL, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + no_get_fast_sync, /* get_fast_sync */ no_close_handle, /* close_handle */ window_destroy /* destroy */ }; diff --git a/server/winstation.c b/server/winstation.c index 875e87ed6b2..960e3078cd3 100644 --- a/server/winstation.c +++ b/server/winstation.c @@ -76,8 +76,6 @@ static const struct object_ops winstation_ops = no_add_queue, /* add_queue */ NULL, /* remove_queue */ NULL, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ NULL, /* satisfied */ no_signal, /* signal */ no_get_fd, /* get_fd */ @@ -90,6 +88,7 @@ static const struct object_ops winstation_ops = default_unlink_name, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + no_get_fast_sync, /* get_fast_sync */ winstation_close_handle, /* close_handle */ winstation_destroy /* destroy */ }; @@ -118,8 +117,6 @@ static const struct object_ops desktop_ops = no_add_queue, /* add_queue */ NULL, /* remove_queue */ NULL, /* signaled */ - NULL, /* get_esync_fd */ - NULL, /* get_fsync_idx */ NULL, /* satisfied */ no_signal, /* signal */ no_get_fd, /* get_fd */ @@ -132,6 +129,7 @@ static const struct object_ops desktop_ops = default_unlink_name, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ + no_get_fast_sync, /* get_fast_sync */ desktop_close_handle, /* close_handle */ desktop_destroy /* destroy */ }; -- 2.47.0