@@ -284,81 +284,43 @@ else version( LDC )
284284 static assert (0 );
285285 }
286286
287- template _passAsSizeT (T)
287+ private template _AtomicType (T)
288288 {
289- // LLVM currently does not support atomic load/store for pointers, thus
290- // we have to manually cast them to size_t.
291- static if (is (T P == U* , U)) // pointer
292- {
293- enum _passAsSizeT = true ;
294- }
295- else static if (is (T == interface ) || is (T == class ))
296- {
297- enum _passAsSizeT = true ;
298- }
289+ static if (T.sizeof == ubyte .sizeof)
290+ alias _AtomicType = ubyte ;
291+ else static if (T.sizeof == ushort .sizeof)
292+ alias _AtomicType = ushort ;
293+ else static if (T.sizeof == uint .sizeof)
294+ alias _AtomicType = uint ;
295+ else static if (T.sizeof == ulong .sizeof)
296+ alias _AtomicType = ulong ;
299297 else
300- {
301- enum _passAsSizeT = false ;
302- }
298+ static assert (is (_AtomicType! T),
299+ " Cannot atomically load/store type of size " ~ T.sizeof.stringof);
303300 }
304301
305- HeadUnshared! (T) atomicLoad (MemoryOrder ms = MemoryOrder.seq, T)( ref const shared T val )
302+ // This could also handle floating-point types, the constraint is just there
303+ // to avoid ambiguities with below "general" floating point definition from
304+ // the upstream runtime.
305+ HeadUnshared! T atomicLoad (MemoryOrder ms = MemoryOrder.seq, T)(ref const shared T val)
306306 if (! __traits(isFloating, T))
307307 {
308+ alias Int = _AtomicType! T;
308309 enum ordering = _ordering! (ms == MemoryOrder.acq ? MemoryOrder.seq : ms);
309- static if (_passAsSizeT! T)
310- {
311- return cast (HeadUnshared! (T))cast (void * )llvm_atomic_load! (size_t )(cast (shared (size_t )* )&val, ordering);
312- }
313- else static if (T.sizeof == bool .sizeof)
314- {
315- return cast (HeadUnshared! (T))llvm_atomic_load! (ubyte )(cast (shared (ubyte )* )&val, ordering);
316- }
317- else
318- {
319- return cast (HeadUnshared! (T))llvm_atomic_load! (T)(&val, ordering);
320- }
321- }
322310
323- void atomicStore (MemoryOrder ms = MemoryOrder.seq, T, V1 )( ref shared T val, V1 newval )
324- if (__traits(isFloating, T))
325- {
326- static if (T.sizeof == int .sizeof)
327- {
328- static assert (is (T : float ));
329- auto ptrVal = cast (shared int * )&val;
330- auto ptrNewval = cast (int * )&newval;
331- atomicStore! (ms)(* ptrVal, * ptrNewval);
332- }
333- else static if (T.sizeof == long .sizeof)
334- {
335- static assert (is (T : double ));
336- auto ptrVal = cast (shared long * )&val;
337- auto ptrNewval = cast (long * )&newval;
338- atomicStore! (ms)(* ptrVal, * ptrNewval);
339- }
340- else
341- {
342- static assert (0 , " Cannot atomically store 80-bit reals." );
343- }
311+ auto asInt = llvm_atomic_load! Int(cast (shared (Int)* )cast (void * )&val, ordering);
312+ return * cast (HeadUnshared! T* )&asInt;
344313 }
345314
346315 void atomicStore (MemoryOrder ms = MemoryOrder.seq, T, V1 )( ref shared T val, V1 newval )
347- if (! __traits(isFloating, T) && __traits(compiles, mixin (" val = newval" )))
316+ if (__traits(compiles, mixin (" val = newval" )))
348317 {
318+ alias Int = _AtomicType! T;
349319 enum ordering = _ordering! (ms == MemoryOrder.rel ? MemoryOrder.seq : ms);
350- static if (_passAsSizeT! T)
351- {
352- llvm_atomic_store! (size_t )(cast (size_t )newval, cast (shared (size_t )* )&val, ordering);
353- }
354- else static if (T.sizeof == bool .sizeof)
355- {
356- llvm_atomic_store! (ubyte )(newval, cast (shared (ubyte )* )&val, ordering);
357- }
358- else
359- {
360- llvm_atomic_store! (T)(cast (T)newval, &val, ordering);
361- }
320+
321+ auto target = cast (shared (Int)* )cast (void * )&val;
322+ auto newPtr = cast (Int* )&newval;
323+ llvm_atomic_store! Int(* newPtr, target, ordering);
362324 }
363325
364326 void atomicFence () nothrow
0 commit comments