diff --git a/src/common/kevent.c b/src/common/kevent.c index 79602420..51f347b9 100644 --- a/src/common/kevent.c +++ b/src/common/kevent.c @@ -331,9 +331,11 @@ kevent_copyin(struct kqueue *kq, const struct kevent changelist[], int nchanges, #ifndef _WIN32 static void -kevent_release_kq_mutex(void *kq) +kevent_release_kq_mutex(void *arg) { - kqueue_unlock((struct kqueue *)kq); + struct kqueue *kq = arg; + dbg_printf("Unlocking kq=%p due to cancellation", kq); + kqueue_unlock(kq); } #endif @@ -367,7 +369,7 @@ kevent(int kqfd, if (!changelist) changelist = null_kev; #ifndef _WIN32 - prev_cancel_state = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL); + pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &prev_cancel_state); #endif /* * Grab the global mutex. This prevents @@ -439,8 +441,10 @@ kevent(int kqfd, */ #ifndef _WIN32 (void)pthread_setcancelstate(prev_cancel_state, NULL); - if (prev_cancel_state == PTHREAD_CANCEL_ENABLE) + if (prev_cancel_state == PTHREAD_CANCEL_ENABLE) { + dbg_printf("Checking for deferred cancellations"); pthread_testcancel(); + } #endif rv = kqops.kevent_wait(kq, nevents, timeout); #ifndef _WIN32 @@ -482,16 +486,23 @@ kevent(int kqfd, out: #ifndef _WIN32 - pthread_cleanup_pop(0); + /* + * Test for cancellations first, so we don't + * double unlock the kqueue. + */ + pthread_setcancelstate(prev_cancel_state, NULL); + if (prev_cancel_state == PTHREAD_CANCEL_ENABLE) { + dbg_printf("Checking for deferred cancellations"); + pthread_testcancel(); + } #endif - kqueue_unlock(kq); - dbg_printf("--- END kevent %u ret %d ---", myid, rv); #ifndef _WIN32 - pthread_setcancelstate(prev_cancel_state, NULL); - if (prev_cancel_state == PTHREAD_CANCEL_ENABLE) - pthread_testcancel(); + pthread_cleanup_pop(0); #endif + kqueue_unlock(kq); + dbg_printf("--- END kevent %u ret %d ---", myid, rv); + return (rv); } diff --git a/src/common/kqueue.c b/src/common/kqueue.c index b154720a..c532258d 100644 --- a/src/common/kqueue.c +++ b/src/common/kqueue.c @@ -190,6 +190,8 @@ libkqueue_parent_fork(void) if (!libkqueue_fork_cleanup_active) return; + dbg_puts("resuming execution in parent"); + tracing_mutex_unlock(&kq_mtx); } @@ -359,7 +361,7 @@ kqueue(void) #endif #ifndef _WIN32 - prev_cancel_state = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL); + pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &prev_cancel_state); #endif kq = calloc(1, sizeof(*kq)); if (kq == NULL) diff --git a/src/common/private.h b/src/common/private.h index e516ae13..6e4b9fec 100644 --- a/src/common/private.h +++ b/src/common/private.h @@ -78,10 +78,10 @@ struct evfilt_data; * */ #ifndef LIST_FOREACH_SAFE -#define LIST_FOREACH_SAFE(var, head, field, tvar) \ - for ((var) = LIST_FIRST((head)); \ - (var) && ((tvar) = LIST_NEXT((var), field), 1); \ - (var) = (tvar)) +#define LIST_FOREACH_SAFE(var, head, field, tvar) \ + for ((var) = LIST_FIRST((head)); \ + (var) && ((tvar) = LIST_NEXT((var), field), 1); \ + (var) = (tvar)) #endif /** Convenience macros @@ -666,8 +666,16 @@ extern unsigned int kq_cnt; * kqueue internal API */ #define kqueue_mutex_assert(kq, state) tracing_mutex_assert(&(kq)->kq_mtx, state) -#define kqueue_lock(kq) tracing_mutex_lock(&(kq)->kq_mtx) -#define kqueue_unlock(kq) tracing_mutex_unlock(&(kq)->kq_mtx) + +#define kqueue_lock(kq) do { \ + dbg_printf("locking kq=%p", kq); \ + tracing_mutex_lock(&(kq)->kq_mtx); \ + } while(0) + +#define kqueue_unlock(kq) do { \ + dbg_printf("unlocking kq=%p", kq); \ + tracing_mutex_unlock(&(kq)->kq_mtx); \ + } while(0) /* * knote internal API diff --git a/test/libkqueue.c b/test/libkqueue.c index 5e71d7e2..2d84b503 100644 --- a/test/libkqueue.c +++ b/test/libkqueue.c @@ -14,6 +14,7 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "common.h" +#include #ifdef EVFILT_LIBKQUEUE static void @@ -52,10 +53,84 @@ test_libkqueue_version_str(struct test_context *ctx) } } +static void * +test_libkqueue_fork_no_hang_thread(void *arg) +{ + struct test_context *ctx = arg; + struct kevent receipt; + + /* + * We shouldn't ever wait for 10 seconds... + */ + if (kevent(ctx->kqfd, NULL, 0, &receipt, 1, &(struct timespec){ .tv_sec = 10 }) > 0) { + printf("Failed waiting...\n"); + die("kevent - waiting"); + } + + printf("Shouldn't have hit timeout, expected to be cancelled\n"); + die("kevent - timeout"); + + return NULL; +} + +static void +test_libkqueue_fork_no_hang(struct test_context *ctx) +{ + struct kevent kev, receipt; + pthread_t thread; + time_t start, end; + pid_t child; + + start = time(NULL); + + /* + * Create a new thread + */ + if (pthread_create(&thread, NULL, test_libkqueue_fork_no_hang_thread, ctx) < 0) + die("kevent"); + + printf("Created test_libkqueue_fork_no_hang_thread [%u]\n", (unsigned int)thread); + + /* + * We don't know when the thread will start + * listening on the kqueue, so we just + * deschedule ourselves for 10ms and hope... + */ + nanosleep(&(struct timespec){ .tv_nsec = 10000000}, NULL); + + /* + * Test that we can fork... The child exits + * immediately, we're just check that we _can_ + * fork(). + */ +#if 0 + child = fork(); + if (child == 0) { + testing_end_quiet(); + exit(EXIT_SUCCESS); + } + + printf("Forked child [%u]\n", (unsigned int)child); +#endif + + /* + * This also tests proper behaviour of kqueues + * on cancellation. + */ + if (pthread_cancel(thread) < 0) + die("pthread_cancel"); + + if ((time(NULL) - start) > 5) { + printf("Thread hung instead of being cancelled"); + die("kevent"); + } +} + void test_evfilt_libkqueue(struct test_context *ctx) { test(libkqueue_version, ctx); test(libkqueue_version_str, ctx); + test(libkqueue_fork_no_hang, ctx); } #endif