|
39 | 39 | #define FULL_COMPILER_ATOMIC_SUPPORT |
40 | 40 | #endif |
41 | 41 |
|
| 42 | +#if defined(__clang_major__) |
| 43 | +#define CORRECT_COMPILER_ATOMIC_SUPPORT |
| 44 | +#elif defined(__GNUC__) && (__riscv_xlen <= 32 || __GNUC__ > 13) |
| 45 | +#define CORRECT_COMPILER_ATOMIC_SUPPORT |
| 46 | +#endif |
| 47 | + |
42 | 48 | template<size_t byte_size> |
43 | 49 | struct Atomic::PlatformAdd { |
44 | 50 | template<typename D, typename I> |
@@ -114,6 +120,44 @@ inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest __attribute__(( |
114 | 120 | } |
115 | 121 | #endif |
116 | 122 |
|
| 123 | +#ifndef CORRECT_COMPILER_ATOMIC_SUPPORT |
| 124 | +// The implementation of `__atomic_compare_exchange` lacks sign extensions |
| 125 | +// in GCC 13 and lower when using with 32-bit unsigned integers on RV64, |
| 126 | +// so we should implement it manually. |
| 127 | +// GCC bug: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=114130. |
| 128 | +// See also JDK-8326936. |
| 129 | +template<> |
| 130 | +template<typename T> |
| 131 | +inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest __attribute__((unused)), |
| 132 | + T compare_value, |
| 133 | + T exchange_value, |
| 134 | + atomic_memory_order order) const { |
| 135 | + STATIC_ASSERT(4 == sizeof(T)); |
| 136 | + |
| 137 | + int32_t old_value; |
| 138 | + uint64_t rc_temp; |
| 139 | + |
| 140 | + if (order != memory_order_relaxed) { |
| 141 | + FULL_MEM_BARRIER; |
| 142 | + } |
| 143 | + |
| 144 | + __asm__ __volatile__ ( |
| 145 | + "1: lr.w %0, %2 \n\t" |
| 146 | + " bne %0, %3, 2f \n\t" |
| 147 | + " sc.w %1, %4, %2 \n\t" |
| 148 | + " bnez %1, 1b \n\t" |
| 149 | + "2: \n\t" |
| 150 | + : /*%0*/"=&r" (old_value), /*%1*/"=&r" (rc_temp), /*%2*/"+A" (*dest) |
| 151 | + : /*%3*/"r" ((int64_t)(int32_t)compare_value), /*%4*/"r" (exchange_value) |
| 152 | + : "memory" ); |
| 153 | + |
| 154 | + if (order != memory_order_relaxed) { |
| 155 | + FULL_MEM_BARRIER; |
| 156 | + } |
| 157 | + return (T)old_value; |
| 158 | +} |
| 159 | +#endif |
| 160 | + |
117 | 161 | template<size_t byte_size> |
118 | 162 | template<typename T> |
119 | 163 | inline T Atomic::PlatformXchg<byte_size>::operator()(T volatile* dest, |
@@ -151,6 +195,10 @@ inline T Atomic::PlatformCmpxchg<byte_size>::operator()(T volatile* dest __attri |
151 | 195 | STATIC_ASSERT(byte_size >= 4); |
152 | 196 | #endif |
153 | 197 |
|
| 198 | +#ifndef CORRECT_COMPILER_ATOMIC_SUPPORT |
| 199 | + STATIC_ASSERT(byte_size != 4); |
| 200 | +#endif |
| 201 | + |
154 | 202 | STATIC_ASSERT(byte_size == sizeof(T)); |
155 | 203 | if (order != memory_order_relaxed) { |
156 | 204 | FULL_MEM_BARRIER; |
@@ -187,5 +235,6 @@ struct Atomic::PlatformOrderedStore<byte_size, RELEASE_X_FENCE> |
187 | 235 | }; |
188 | 236 |
|
189 | 237 | #undef FULL_COMPILER_ATOMIC_SUPPORT |
| 238 | +#undef CORRECT_COMPILER_ATOMIC_SUPPORT |
190 | 239 |
|
191 | 240 | #endif // OS_CPU_LINUX_RISCV_ATOMIC_LINUX_RISCV_HPP |
0 commit comments