|
10 | 10 |
|
11 | 11 | // TODO: Stop manually mangling this name. Need C++ namespaces to get the exact mangling. |
12 | 12 |
|
13 | | -#define IMPL(TYPE, TYPE_MANGLED, AS, AS_MANGLED, NAME, PREFIX, SUFFIX) \ |
14 | | - _CLC_DEF TYPE \ |
15 | | - _Z18##NAME##PU3##AS_MANGLED##TYPE_MANGLED##N5__spv5Scope4FlagENS1_19MemorySemanticsMask4FlagE##TYPE_MANGLED( \ |
16 | | - volatile AS TYPE *p, enum Scope scope, \ |
17 | | - enum MemorySemanticsMask semantics, TYPE val) { \ |
18 | | - return PREFIX##__sync_fetch_and_##SUFFIX(p, val); \ |
| 13 | +#define IMPL(TYPE, TYPE_MANGLED, AS_PREFIX, AS, AS_MANGLED, NAME, PREFIX, \ |
| 14 | + SUFFIX) \ |
| 15 | + _CLC_DEF TYPE \ |
| 16 | + _Z18##NAME##P##AS_PREFIX##AS_MANGLED##TYPE_MANGLED##N5__spv5Scope4FlagENS1_19MemorySemanticsMask4FlagE##TYPE_MANGLED( \ |
| 17 | + volatile AS TYPE *p, enum Scope scope, \ |
| 18 | + enum MemorySemanticsMask semantics, TYPE val) { \ |
| 19 | + return PREFIX##__sync_fetch_and_##SUFFIX(p, val); \ |
19 | 20 | } |
20 | 21 |
|
21 | | -IMPL(int, i, global, AS1, __spirv_AtomicSMin, , min) |
22 | | -IMPL(unsigned int, j, global, AS1, __spirv_AtomicUMin, , umin) |
23 | | -IMPL(int, i, local, AS3, __spirv_AtomicSMin, , min) |
24 | | -IMPL(unsigned int, j, local, AS3, __spirv_AtomicUMin, , umin) |
| 22 | +IMPL(int, i, U3, global, AS1, __spirv_AtomicSMin, , min) |
| 23 | +IMPL(unsigned int, j, U3, global, AS1, __spirv_AtomicUMin, , umin) |
| 24 | +IMPL(int, i, U3, local, AS3, __spirv_AtomicSMin, , min) |
| 25 | +IMPL(unsigned int, j, U3, local, AS3, __spirv_AtomicUMin, , umin) |
| 26 | +IMPL(int, i, , , , __spirv_AtomicSMin, , min) |
| 27 | +IMPL(unsigned int, j, , , , __spirv_AtomicUMin, , umin) |
25 | 28 |
|
26 | 29 | #ifdef cl_khr_int64_extended_atomics |
27 | 30 | unsigned long __clc__sync_fetch_and_min_local_8(volatile local long *, long); |
28 | 31 | unsigned long __clc__sync_fetch_and_min_global_8(volatile global long *, long); |
| 32 | +unsigned long __clc__sync_fetch_and_min_8(volatile long *, long); |
29 | 33 | unsigned long __clc__sync_fetch_and_umin_local_8(volatile local unsigned long *, unsigned long); |
30 | 34 | unsigned long __clc__sync_fetch_and_umin_global_8(volatile global unsigned long *, unsigned long); |
| 35 | +unsigned long __clc__sync_fetch_and_umin_8(volatile unsigned long *, unsigned long); |
31 | 36 |
|
32 | | -IMPL(long, l, global, AS1, __spirv_AtomicSMin, __clc, min_global_8) |
33 | | -IMPL(unsigned long, m, global, AS1, __spirv_AtomicUMin, __clc, umin_global_8) |
34 | | -IMPL(long, l, local, AS3, __spirv_AtomicSMin, __clc, min_local_8) |
35 | | -IMPL(unsigned long, m, local, AS3, __spirv_AtomicUMin, __clc, umin_local_8) |
| 37 | +IMPL(long, l, U3, global, AS1, __spirv_AtomicSMin, __clc, min_global_8) |
| 38 | +IMPL(unsigned long, m, U3, global, AS1, __spirv_AtomicUMin, __clc, umin_global_8) |
| 39 | +IMPL(long, l, U3, local, AS3, __spirv_AtomicSMin, __clc, min_local_8) |
| 40 | +IMPL(unsigned long, m, U3, local, AS3, __spirv_AtomicUMin, __clc, umin_local_8) |
| 41 | +IMPL(long, l, , , , __spirv_AtomicSMin, __clc, min_8) |
| 42 | +IMPL(unsigned long, m, , , , __spirv_AtomicUMin, __clc, umin_8) |
36 | 43 | #endif |
37 | 44 | #undef IMPL |
0 commit comments