38
38
#include <sys/eventfd.h>
39
39
#endif
40
40
41
- #if UV__KQUEUE_EVFILT_USER
42
- static uv_once_t kqueue_runtime_detection_guard = UV_ONCE_INIT ;
43
- static int kqueue_evfilt_user_support = 1 ;
44
-
45
-
46
- static void uv__kqueue_runtime_detection (void ) {
47
- int kq ;
48
- struct kevent ev [2 ];
49
- struct timespec timeout = {0 , 0 };
50
-
51
- /* Perform the runtime detection to ensure that kqueue with
52
- * EVFILT_USER actually works. */
53
- kq = kqueue ();
54
- EV_SET (ev , UV__KQUEUE_EVFILT_USER_IDENT , EVFILT_USER ,
55
- EV_ADD | EV_CLEAR , 0 , 0 , 0 );
56
- EV_SET (ev + 1 , UV__KQUEUE_EVFILT_USER_IDENT , EVFILT_USER ,
57
- 0 , NOTE_TRIGGER , 0 , 0 );
58
- if (kevent (kq , ev , 2 , ev , 1 , & timeout ) < 1 ||
59
- ev [0 ].filter != EVFILT_USER ||
60
- ev [0 ].ident != UV__KQUEUE_EVFILT_USER_IDENT ||
61
- ev [0 ].flags & EV_ERROR )
62
- /* If we wind up here, we can assume that EVFILT_USER is defined but
63
- * broken on the current system. */
64
- kqueue_evfilt_user_support = 0 ;
65
- uv__close (kq );
66
- }
67
- #endif
68
-
69
41
static void uv__async_send (uv_loop_t * loop );
70
42
static int uv__async_start (uv_loop_t * loop );
71
43
static void uv__cpu_relax (void );
@@ -158,23 +130,16 @@ void uv__async_close(uv_async_t* handle) {
158
130
159
131
160
132
static void uv__async_io (uv_loop_t * loop , uv__io_t * w , unsigned int events ) {
161
- #ifndef __linux__
162
133
char buf [1024 ];
163
134
ssize_t r ;
164
- #endif
165
135
struct uv__queue queue ;
166
136
struct uv__queue * q ;
167
137
uv_async_t * h ;
168
138
_Atomic int * pending ;
169
139
170
140
assert (w == & loop -> async_io_watcher );
171
141
172
- #ifndef __linux__
173
- #if UV__KQUEUE_EVFILT_USER
174
- for (;!kqueue_evfilt_user_support ;) {
175
- #else
176
142
for (;;) {
177
- #endif
178
143
r = read (w -> fd , buf , sizeof (buf ));
179
144
180
145
if (r == sizeof (buf ))
@@ -191,7 +156,6 @@ static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
191
156
192
157
abort ();
193
158
}
194
- #endif /* !__linux__ */
195
159
196
160
uv__queue_move (& loop -> async_handles , & queue );
197
161
while (!uv__queue_empty (& queue )) {
@@ -215,58 +179,34 @@ static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
215
179
216
180
217
181
static void uv__async_send (uv_loop_t * loop ) {
182
+ const void * buf ;
183
+ ssize_t len ;
218
184
int fd ;
219
- ssize_t r ;
220
- #ifdef __linux__
221
- uint64_t val ;
222
-
223
- fd = loop -> async_io_watcher .fd ; /* eventfd */
224
- for (val = 1 ; /* empty */ ; val = 1 ) {
225
- r = write (fd , & val , sizeof (uint64_t ));
226
- if (r < 0 ) {
227
- /* When EAGAIN occurs, the eventfd counter hits the maximum value of the unsigned 64-bit.
228
- * We need to first drain the eventfd and then write again.
229
- *
230
- * Check out https://man7.org/linux/man-pages/man2/eventfd.2.html for details.
231
- */
232
- if (errno == EAGAIN ) {
233
- /* It's ready to retry. */
234
- if (read (fd , & val , sizeof (uint64_t )) > 0 || errno == EAGAIN ) {
235
- continue ;
236
- }
237
- }
238
- /* Unknown error occurs. */
239
- break ;
240
- }
241
- return ;
242
- }
243
- #else
244
- #if UV__KQUEUE_EVFILT_USER
245
- struct kevent ev ;
246
-
247
- if (kqueue_evfilt_user_support ) {
248
- fd = loop -> async_io_watcher .fd ; /* magic number for EVFILT_USER */
249
- EV_SET (& ev , fd , EVFILT_USER , 0 , NOTE_TRIGGER , 0 , 0 );
250
- r = kevent (loop -> backend_fd , & ev , 1 , NULL , 0 , NULL );
251
- if (r == 0 )
252
- return ;
253
- else
254
- abort ();
185
+ int r ;
186
+
187
+ buf = "" ;
188
+ len = 1 ;
189
+ fd = loop -> async_wfd ;
190
+
191
+ #if defined(__linux__ )
192
+ if (fd == -1 ) {
193
+ static const uint64_t val = 1 ;
194
+ buf = & val ;
195
+ len = sizeof (val );
196
+ fd = loop -> async_io_watcher .fd ; /* eventfd */
255
197
}
256
198
#endif
257
199
258
- fd = loop -> async_wfd ; /* write end of the pipe */
259
200
do
260
- r = write (fd , "x" , 1 );
201
+ r = write (fd , buf , len );
261
202
while (r == -1 && errno == EINTR );
262
203
263
- if (r == 1 )
204
+ if (r == len )
264
205
return ;
265
206
266
207
if (r == -1 )
267
208
if (errno == EAGAIN || errno == EWOULDBLOCK )
268
209
return ;
269
- #endif
270
210
271
211
abort ();
272
212
}
@@ -275,9 +215,6 @@ static void uv__async_send(uv_loop_t* loop) {
275
215
static int uv__async_start (uv_loop_t * loop ) {
276
216
int pipefd [2 ];
277
217
int err ;
278
- #if UV__KQUEUE_EVFILT_USER
279
- struct kevent ev ;
280
- #endif
281
218
282
219
if (loop -> async_io_watcher .fd != -1 )
283
220
return 0 ;
@@ -289,36 +226,6 @@ static int uv__async_start(uv_loop_t* loop) {
289
226
290
227
pipefd [0 ] = err ;
291
228
pipefd [1 ] = -1 ;
292
- #elif UV__KQUEUE_EVFILT_USER
293
- uv_once (& kqueue_runtime_detection_guard , uv__kqueue_runtime_detection );
294
- if (kqueue_evfilt_user_support ) {
295
- /* In order not to break the generic pattern of I/O polling, a valid
296
- * file descriptor is required to take up a room in loop->watchers,
297
- * thus we create one for that, but this fd will not be actually used,
298
- * it's just a placeholder and magic number which is going to be closed
299
- * during the cleanup, as other FDs. */
300
- err = uv__open_cloexec ("/dev/null" , O_RDONLY );
301
- if (err < 0 )
302
- return err ;
303
-
304
- pipefd [0 ] = err ;
305
- pipefd [1 ] = -1 ;
306
-
307
- /* When using EVFILT_USER event to wake up the kqueue, this event must be
308
- * registered beforehand. Otherwise, calling kevent() to issue an
309
- * unregistered EVFILT_USER event will get an ENOENT.
310
- * Since uv__async_send() may happen before uv__io_poll() with multi-threads,
311
- * we can't defer this registration of EVFILT_USER event as we did for other
312
- * events, but must perform it right away. */
313
- EV_SET (& ev , err , EVFILT_USER , EV_ADD | EV_CLEAR , 0 , 0 , 0 );
314
- err = kevent (loop -> backend_fd , & ev , 1 , NULL , 0 , NULL );
315
- if (err < 0 )
316
- return UV__ERR (errno );
317
- } else {
318
- err = uv__make_pipe (pipefd , UV_NONBLOCK_PIPE );
319
- if (err < 0 )
320
- return err ;
321
- }
322
229
#else
323
230
err = uv__make_pipe (pipefd , UV_NONBLOCK_PIPE );
324
231
if (err < 0 )
@@ -329,13 +236,6 @@ static int uv__async_start(uv_loop_t* loop) {
329
236
uv__io_start (loop , & loop -> async_io_watcher , POLLIN );
330
237
loop -> async_wfd = pipefd [1 ];
331
238
332
- #if UV__KQUEUE_EVFILT_USER
333
- /* Prevent the EVFILT_USER event from being added to kqueue redundantly
334
- * and mistakenly later in uv__io_poll(). */
335
- if (kqueue_evfilt_user_support )
336
- loop -> async_io_watcher .events = loop -> async_io_watcher .pevents ;
337
- #endif
338
-
339
239
return 0 ;
340
240
}
341
241
0 commit comments