-
Notifications
You must be signed in to change notification settings - Fork 46
/
atomic.h
259 lines (241 loc) · 7.67 KB
/
atomic.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
/* Copyright 2019 SiFive, Inc */
/* SPDX-License-Identifier: Apache-2.0 */
#ifndef METAL__ATOMIC_H
#define METAL__ATOMIC_H
#include <stdint.h>
#include <metal/compiler.h>
typedef volatile int32_t metal_atomic_t;
#define METAL_ATOMIC_DECLARE(name) \
__attribute((section(".data.atomics"))) metal_atomic_t name
#define _METAL_STORE_AMO_ACCESS_FAULT 7
/* This macro stores the memory address in mtval like a normal store/amo access
* fault, triggers a trap, and then if execution returns, returns 0 as an
* arbitrary choice */
#define _METAL_TRAP_AMO_ACCESS(addr) \
__asm__("csrw mtval, %[atomic]" ::[atomic] "r"(a)); \
_metal_trap(_METAL_STORE_AMO_ACCESS_FAULT); \
return 0;
/*!
* @brief Check if the platform supports atomic operations
*
* @return 1 if atomic operations are supported, 0 if not
*/
__inline__ int32_t metal_atomic_available(void) {
#ifdef __riscv_atomic
return 1;
#else
return 0;
#endif
}
/*!
* @brief Atomically increment a metal_atomic_t and return its old value
*
* If atomics are not supported on the platform, this function will trap with
* a Store/AMO access fault.
*
* @param a The pointer to the value to increment
* @param increment the amount to increment the value
*
* @return The previous value of the metal_atomic_t
*/
__inline__ int32_t metal_atomic_add(metal_atomic_t *a, int32_t increment) {
#ifdef __riscv_atomic
int32_t old;
__asm__ volatile("amoadd.w %[old], %[increment], (%[atomic])"
: [old] "=r"(old)
: [increment] "r"(increment), [atomic] "r"(a)
: "memory");
return old;
#else
_METAL_TRAP_AMO_ACCESS(a);
#endif
}
/*!
* @brief Atomically bitwise-AND a metal_atomic_t and return its old value
*
* If atomics are not supported on the platform, this function will trap with
* a Store/AMO access fault.
*
* @param a The pointer to the value to bitwise-AND
* @param mask the bitmask to AND
*
* @return The previous value of the metal_atomic_t
*/
__inline__ int32_t metal_atomic_and(metal_atomic_t *a, int32_t mask) {
#ifdef __riscv_atomic
int32_t old;
__asm__ volatile("amoand.w %[old], %[mask], (%[atomic])"
: [old] "=r"(old)
: [mask] "r"(mask), [atomic] "r"(a)
: "memory");
return old;
#else
_METAL_TRAP_AMO_ACCESS(a);
#endif
}
/*!
* @brief Atomically bitwise-OR a metal_atomic_t and return its old value
*
* If atomics are not supported on the platform, this function will trap with
* a Store/AMO access fault.
*
* @param a The pointer to the value to bitwise-OR
* @param mask the bitmask to OR
*
* @return The previous value of the metal_atomic_t
*/
__inline__ int32_t metal_atomic_or(metal_atomic_t *a, int32_t mask) {
#ifdef __riscv_atomic
int32_t old;
__asm__ volatile("amoor.w %[old], %[mask], (%[atomic])"
: [old] "=r"(old)
: [mask] "r"(mask), [atomic] "r"(a)
: "memory");
return old;
#else
_METAL_TRAP_AMO_ACCESS(a);
#endif
}
/*!
* @brief Atomically swap a metal_atomic_t and return its old value
*
* If atomics are not supported on the platform, this function will trap with
* a Store/AMO access fault.
*
* @param a The pointer to the value to swap
* @param new_value the value to store in the metal_atomic_t
*
* @return The previous value of the metal_atomic_t
*/
__inline__ int32_t metal_atomic_swap(metal_atomic_t *a, int32_t new_value) {
#ifdef __riscv_atomic
int32_t old;
__asm__ volatile("amoswap.w %[old], %[newval], (%[atomic])"
: [old] "=r"(old)
: [newval] "r"(new_value), [atomic] "r"(a)
: "memory");
return old;
#else
_METAL_TRAP_AMO_ACCESS(a);
#endif
}
/*!
* @brief Atomically bitwise-XOR a metal_atomic_t and return its old value
*
* If atomics are not supported on the platform, this function will trap with
* a Store/AMO access fault.
*
* @param a The pointer to the value to bitwise-XOR
* @param mask the bitmask to XOR
*
* @return The previous value of the metal_atomic_t
*/
__inline__ int32_t metal_atomic_xor(metal_atomic_t *a, int32_t mask) {
#ifdef __riscv_atomic
int32_t old;
__asm__ volatile("amoxor.w %[old], %[mask], (%[atomic])"
: [old] "=r"(old)
: [mask] "r"(mask), [atomic] "r"(a)
: "memory");
return old;
#else
_METAL_TRAP_AMO_ACCESS(a);
#endif
}
/*!
* @brief Atomically set the value of a memory location to the greater of
* its current value or a value to compare it with.
*
* If atomics are not supported on the platform, this function will trap with
* a Store/AMO access fault.
*
* @param a The pointer to the value to swap
* @param compare the value to compare with the value in memory
*
* @return The previous value of the metal_atomic_t
*/
__inline__ int32_t metal_atomic_max(metal_atomic_t *a, int32_t compare) {
#ifdef __riscv_atomic
int32_t old;
__asm__ volatile("amomax.w %[old], %[compare], (%[atomic])"
: [old] "=r"(old)
: [compare] "r"(compare), [atomic] "r"(a)
: "memory");
return old;
#else
_METAL_TRAP_AMO_ACCESS(a);
#endif
}
/*!
* @brief Atomically set the value of a memory location to the (unsigned)
* greater of its current value or a value to compare it with.
*
* If atomics are not supported on the platform, this function will trap with
* a Store/AMO access fault.
*
* @param a The pointer to the value to swap
* @param compare the value to compare with the value in memory
*
* @return The previous value of the metal_atomic_t
*/
__inline__ uint32_t metal_atomic_max_u(metal_atomic_t *a, uint32_t compare) {
#ifdef __riscv_atomic
int32_t old;
__asm__ volatile("amomaxu.w %[old], %[compare], (%[atomic])"
: [old] "=r"(old)
: [compare] "r"(compare), [atomic] "r"(a)
: "memory");
return old;
#else
_METAL_TRAP_AMO_ACCESS(a);
#endif
}
/*!
* @brief Atomically set the value of a memory location to the lesser of
* its current value or a value to compare it with.
*
* If atomics are not supported on the platform, this function will trap with
* a Store/AMO access fault.
*
* @param a The pointer to the value to swap
* @param compare the value to compare with the value in memory
*
* @return The previous value of the metal_atomic_t
*/
__inline__ int32_t metal_atomic_min(metal_atomic_t *a, int32_t compare) {
#ifdef __riscv_atomic
int32_t old;
__asm__ volatile("amomin.w %[old], %[compare], (%[atomic])"
: [old] "=r"(old)
: [compare] "r"(compare), [atomic] "r"(a)
: "memory");
return old;
#else
_METAL_TRAP_AMO_ACCESS(a);
#endif
}
/*!
* @brief Atomically set the value of a memory location to the (unsigned) lesser
* of its current value or a value to compare it with.
*
* If atomics are not supported on the platform, this function will trap with
* a Store/AMO access fault.
*
* @param a The pointer to the value to swap
* @param compare the value to compare with the value in memory
*
* @return The previous value of the metal_atomic_t
*/
__inline__ uint32_t metal_atomic_min_u(metal_atomic_t *a, uint32_t compare) {
#ifdef __riscv_atomic
int32_t old;
__asm__ volatile("amominu.w %[old], %[compare], (%[atomic])"
: [old] "=r"(old)
: [compare] "r"(compare), [atomic] "r"(a)
: "memory");
return old;
#else
_METAL_TRAP_AMO_ACCESS(a);
#endif
}
#endif /* METAL__ATOMIC_H */