2424
2525#include "uv.h"
2626#include "internal.h"
27- #include "atomic-ops.h"
2827
2928#include <errno.h>
29+ #include <stdatomic.h>
3030#include <stdio.h> /* snprintf() */
3131#include <assert.h>
3232#include <stdlib.h>
4040
4141static void uv__async_send (uv_loop_t * loop );
4242static int uv__async_start (uv_loop_t * loop );
43+ static void uv__cpu_relax (void );
4344
4445
4546int uv_async_init (uv_loop_t * loop , uv_async_t * handle , uv_async_cb async_cb ) {
@@ -61,19 +62,26 @@ int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) {
6162
6263
6364int uv_async_send (uv_async_t * handle ) {
65+ _Atomic int * pending ;
66+ int expected ;
67+
68+ pending = (_Atomic int * ) & handle -> pending ;
69+
6470 /* Do a cheap read first. */
65- if (ACCESS_ONCE ( int , handle -> pending ) != 0 )
71+ if (atomic_load_explicit ( pending , memory_order_relaxed ) != 0 )
6672 return 0 ;
6773
6874 /* Tell the other thread we're busy with the handle. */
69- if (cmpxchgi (& handle -> pending , 0 , 1 ) != 0 )
75+ expected = 0 ;
76+ if (!atomic_compare_exchange_strong (pending , & expected , 1 ))
7077 return 0 ;
7178
7279 /* Wake up the other thread's event loop. */
7380 uv__async_send (handle -> loop );
7481
7582 /* Tell the other thread we're done. */
76- if (cmpxchgi (& handle -> pending , 1 , 2 ) != 1 )
83+ expected = 1 ;
84+ if (!atomic_compare_exchange_strong (pending , & expected , 2 ))
7785 abort ();
7886
7987 return 0 ;
@@ -82,8 +90,11 @@ int uv_async_send(uv_async_t* handle) {
8290
8391/* Only call this from the event loop thread. */
8492static int uv__async_spin (uv_async_t * handle ) {
93+ _Atomic int * pending ;
94+ int expected ;
8595 int i ;
86- int rc ;
96+
97+ pending = (_Atomic int * ) & handle -> pending ;
8798
8899 for (;;) {
89100 /* 997 is not completely chosen at random. It's a prime number, acyclical
@@ -94,13 +105,14 @@ static int uv__async_spin(uv_async_t* handle) {
94105 * rc=1 -- handle is pending, other thread is still working with it.
95106 * rc=2 -- handle is pending, other thread is done.
96107 */
97- rc = cmpxchgi (& handle -> pending , 2 , 0 );
108+ expected = 2 ;
109+ atomic_compare_exchange_strong (pending , & expected , 0 );
98110
99- if (rc != 1 )
100- return rc ;
111+ if (expected != 1 )
112+ return expected ;
101113
102114 /* Other thread is busy with this handle, spin until it's done. */
103- cpu_relax ();
115+ uv__cpu_relax ();
104116 }
105117
106118 /* Yield the CPU. We may have preempted the other thread while it's
@@ -251,3 +263,16 @@ void uv__async_stop(uv_loop_t* loop) {
251263 uv__close (loop -> async_io_watcher .fd );
252264 loop -> async_io_watcher .fd = -1 ;
253265}
266+
267+
268+ static void uv__cpu_relax (void ) {
269+ #if defined(__i386__ ) || defined(__x86_64__ )
270+ __asm__ __volatile__ ("rep; nop" ::: "memory" ); /* a.k.a. PAUSE */
271+ #elif (defined(__arm__ ) && __ARM_ARCH >= 7 ) || defined(__aarch64__ )
272+ __asm__ __volatile__ ("yield" ::: "memory" );
273+ #elif (defined(__ppc__ ) || defined(__ppc64__ )) && defined(__APPLE__ )
274+ __asm volatile ("" : : : "memory" );
275+ #elif !defined(__APPLE__ ) && (defined(__powerpc64__ ) || defined(__ppc64__ ) || defined(__PPC64__ ))
276+ __asm__ __volatile__ ("or 1,1,1; or 2,2,2" ::: "memory" );
277+ #endif
278+ }
0 commit comments