|
| 1 | +// assembly-output: ptx-linker |
| 2 | +// compile-flags: --crate-type cdylib |
| 3 | +// only-nvptx64 |
| 4 | + |
| 5 | +#![feature(abi_ptx, core_intrinsics)] |
| 6 | +#![no_std] |
| 7 | + |
| 8 | +use core::intrinsics::*; |
| 9 | + |
| 10 | +// aux-build: breakpoint-panic-handler.rs |
| 11 | +extern crate breakpoint_panic_handler; |
| 12 | + |
| 13 | +// Currently, LLVM NVPTX backend can only emit atomic instructions with |
| 14 | +// `relaxed` (PTX default) ordering. But it's also useful to make sure |
| 15 | +// the backend won't fail with other orders. Apparently, the backend |
| 16 | +// doesn't support fences as well. As a workaround `llvm.nvvm.membar.*` |
| 17 | +// could work, and perhaps on the long run, all the atomic operations |
| 18 | +// should rather be provided by `core::arch::nvptx`. |
| 19 | + |
| 20 | +// Also, PTX ISA doesn't have atomic `load`, `store` and `nand`. |
| 21 | + |
| 22 | +// FIXME(denzp): add tests for `core::sync::atomic::*`. |
| 23 | + |
| 24 | +#[no_mangle] |
| 25 | +pub unsafe extern "ptx-kernel" fn atomics_kernel(a: *mut u32) { |
| 26 | + // CHECK: atom.global.and.b32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1; |
| 27 | + // CHECK: atom.global.and.b32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1; |
| 28 | + atomic_and(a, 1); |
| 29 | + atomic_and_relaxed(a, 1); |
| 30 | + |
| 31 | + // CHECK: atom.global.cas.b32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1, 2; |
| 32 | + // CHECK: atom.global.cas.b32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1, 2; |
| 33 | + atomic_cxchg(a, 1, 2); |
| 34 | + atomic_cxchg_relaxed(a, 1, 2); |
| 35 | + |
| 36 | + // CHECK: atom.global.max.s32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1; |
| 37 | + // CHECK: atom.global.max.s32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1; |
| 38 | + atomic_max(a, 1); |
| 39 | + atomic_max_relaxed(a, 1); |
| 40 | + |
| 41 | + // CHECK: atom.global.min.s32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1; |
| 42 | + // CHECK: atom.global.min.s32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1; |
| 43 | + atomic_min(a, 1); |
| 44 | + atomic_min_relaxed(a, 1); |
| 45 | + |
| 46 | + // CHECK: atom.global.or.b32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1; |
| 47 | + // CHECK: atom.global.or.b32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1; |
| 48 | + atomic_or(a, 1); |
| 49 | + atomic_or_relaxed(a, 1); |
| 50 | + |
| 51 | + // CHECK: atom.global.max.u32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1; |
| 52 | + // CHECK: atom.global.max.u32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1; |
| 53 | + atomic_umax(a, 1); |
| 54 | + atomic_umax_relaxed(a, 1); |
| 55 | + |
| 56 | + // CHECK: atom.global.min.u32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1; |
| 57 | + // CHECK: atom.global.min.u32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1; |
| 58 | + atomic_umin(a, 1); |
| 59 | + atomic_umin_relaxed(a, 1); |
| 60 | + |
| 61 | + // CHECK: atom.global.add.u32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1; |
| 62 | + // CHECK: atom.global.add.u32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1; |
| 63 | + atomic_xadd(a, 1); |
| 64 | + atomic_xadd_relaxed(a, 1); |
| 65 | + |
| 66 | + // CHECK: atom.global.exch.b32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1; |
| 67 | + // CHECK: atom.global.exch.b32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1; |
| 68 | + atomic_xchg(a, 1); |
| 69 | + atomic_xchg_relaxed(a, 1); |
| 70 | + |
| 71 | + // CHECK: atom.global.xor.b32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1; |
| 72 | + // CHECK: atom.global.xor.b32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], 1; |
| 73 | + atomic_xor(a, 1); |
| 74 | + atomic_xor_relaxed(a, 1); |
| 75 | + |
| 76 | + // CHECK: mov.u32 %[[sub_0_arg:r[0-9]+]], 100; |
| 77 | + // CHECK: neg.s32 temp, %[[sub_0_arg]]; |
| 78 | + // CHECK: atom.global.add.u32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], temp; |
| 79 | + atomic_xsub(a, 100); |
| 80 | + |
| 81 | + // CHECK: mov.u32 %[[sub_1_arg:r[0-9]+]], 200; |
| 82 | + // CHECK: neg.s32 temp, %[[sub_1_arg]]; |
| 83 | + // CHECK: atom.global.add.u32 %{{r[0-9]+}}, [%{{rd[0-9]+}}], temp; |
| 84 | + atomic_xsub_relaxed(a, 200); |
| 85 | +} |
0 commit comments