26
26
.globl aarch64_atomic_fetch_add_8_default_impl
27
27
. align 5
28
28
aarch64_atomic_fetch_add_8_default_impl:
29
+ #ifdef __ARM_FEATURE_ATOMICS
30
+ ldaddal x1 , x2 , [ x0 ]
31
+ #else
29
32
prfm pstl1strm , [ x0 ]
30
33
0 : ldaxr x2 , [ x0 ]
31
34
add x8 , x2 , x1
32
35
stlxr w9 , x8 , [ x0 ]
33
36
cbnz w9 , 0b
37
+ #endif
34
38
dmb ish
35
39
mov x0 , x2
36
40
ret
37
41
38
42
.globl aarch64_atomic_fetch_add_4_default_impl
39
43
. align 5
40
44
aarch64_atomic_fetch_add_4_default_impl:
45
+ #ifdef __ARM_FEATURE_ATOMICS
46
+ ldaddal w1 , w2 , [ x0 ]
47
+ #else
41
48
prfm pstl1strm , [ x0 ]
42
49
0 : ldaxr w2 , [ x0 ]
43
50
add w8 , w2 , w1
44
51
stlxr w9 , w8 , [ x0 ]
45
52
cbnz w9 , 0b
53
+ #endif
46
54
dmb ish
47
55
mov w0 , w2
48
56
ret
49
57
50
58
. global aarch64_atomic_fetch_add_8_relaxed_default_impl
51
59
. align 5
52
60
aarch64_atomic_fetch_add_8_relaxed_default_impl:
61
+ #ifdef __ARM_FEATURE_ATOMICS
62
+ ldadd x1 , x2 , [ x0 ]
63
+ #else
53
64
prfm pstl1strm , [ x0 ]
54
65
0 : ldxr x2 , [ x0 ]
55
66
add x8 , x2 , x1
56
67
stxr w9 , x8 , [ x0 ]
57
68
cbnz w9 , 0b
69
+ #endif
58
70
mov x0 , x2
59
71
ret
60
72
61
73
. global aarch64_atomic_fetch_add_4_relaxed_default_impl
62
74
. align 5
63
75
aarch64_atomic_fetch_add_4_relaxed_default_impl:
76
+ #ifdef __ARM_FEATURE_ATOMICS
77
+ ldadd w1 , w2 , [ x0 ]
78
+ #else
64
79
prfm pstl1strm , [ x0 ]
65
80
0 : ldxr w2 , [ x0 ]
66
81
add w8 , w2 , w1
67
82
stxr w9 , w8 , [ x0 ]
68
83
cbnz w9 , 0b
84
+ #endif
69
85
mov w0 , w2
70
86
ret
71
87
72
88
.globl aarch64_atomic_xchg_4_default_impl
73
89
. align 5
74
90
aarch64_atomic_xchg_4_default_impl:
91
+ #ifdef __ARM_FEATURE_ATOMICS
92
+ swpal w1 , w2 , [ x0 ]
93
+ #else
75
94
prfm pstl1strm , [ x0 ]
76
95
0 : ldaxr w2 , [ x0 ]
77
96
stlxr w8 , w1 , [ x0 ]
78
97
cbnz w8 , 0b
98
+ #endif
79
99
dmb ish
80
100
mov w0 , w2
81
101
ret
82
102
83
103
.globl aarch64_atomic_xchg_8_default_impl
84
104
. align 5
85
105
aarch64_atomic_xchg_8_default_impl:
106
+ #ifdef __ARM_FEATURE_ATOMICS
107
+ swpal x1 , x2 , [ x0 ]
108
+ #else
86
109
prfm pstl1strm , [ x0 ]
87
110
0 : ldaxr x2 , [ x0 ]
88
111
stlxr w8 , x1 , [ x0 ]
89
112
cbnz w8 , 0b
113
+ #endif
90
114
dmb ish
91
115
mov x0 , x2
92
116
ret
93
117
94
118
.globl aarch64_atomic_cmpxchg_1_default_impl
95
119
. align 5
96
120
aarch64_atomic_cmpxchg_1_default_impl:
121
+ #ifdef __ARM_FEATURE_ATOMICS
122
+ mov x3 , x1
123
+ casalb w3 , w2 , [ x0 ]
124
+ #else
97
125
dmb ish
98
126
prfm pstl1strm , [ x0 ]
99
127
0 : ldxrb w3 , [ x0 ]
@@ -102,119 +130,165 @@ aarch64_atomic_cmpxchg_1_default_impl:
102
130
b.ne 1f
103
131
stxrb w8 , w2 , [ x0 ]
104
132
cbnz w8 , 0b
105
- 1 : mov w0 , w3
106
- dmb ish
133
+ #endif
134
+ 1 : dmb ish
135
+ mov w0 , w3
107
136
ret
108
137
109
138
.globl aarch64_atomic_cmpxchg_4_default_impl
110
139
. align 5
111
140
aarch64_atomic_cmpxchg_4_default_impl:
141
+ #ifdef __ARM_FEATURE_ATOMICS
142
+ mov x3 , x1
143
+ casal w3 , w2 , [ x0 ]
144
+ #else
112
145
dmb ish
113
146
prfm pstl1strm , [ x0 ]
114
147
0 : ldxr w3 , [ x0 ]
115
148
cmp w3 , w1
116
149
b.ne 1f
117
150
stxr w8 , w2 , [ x0 ]
118
151
cbnz w8 , 0b
119
- 1 : mov w0 , w3
120
- dmb ish
152
+ #endif
153
+ 1 : dmb ish
154
+ mov w0 , w3
121
155
ret
122
156
123
157
.globl aarch64_atomic_cmpxchg_8_default_impl
124
158
. align 5
125
159
aarch64_atomic_cmpxchg_8_default_impl:
160
+ #ifdef __ARM_FEATURE_ATOMICS
161
+ mov x3 , x1
162
+ casal x3 , x2 , [ x0 ]
163
+ #else
126
164
dmb ish
127
165
prfm pstl1strm , [ x0 ]
128
166
0 : ldxr x3 , [ x0 ]
129
167
cmp x3 , x1
130
168
b.ne 1f
131
169
stxr w8 , x2 , [ x0 ]
132
170
cbnz w8 , 0b
133
- 1 : mov x0 , x3
134
- dmb ish
171
+ #endif
172
+ 1 : dmb ish
173
+ mov x0 , x3
135
174
ret
136
175
137
176
.globl aarch64_atomic_cmpxchg_4_release_default_impl
138
177
. align 5
139
178
aarch64_atomic_cmpxchg_4_release_default_impl:
179
+ #ifdef __ARM_FEATURE_ATOMICS
180
+ mov x3 , x1
181
+ casl w3 , w2 , [ x0 ]
182
+ #else
140
183
prfm pstl1strm , [ x0 ]
141
184
0 : ldxr w3 , [ x0 ]
142
185
cmp w3 , w1
143
186
b.ne 1f
144
187
stlxr w8 , w2 , [ x0 ]
145
188
cbnz w8 , 0b
189
+ #endif
146
190
1 : mov w0 , w3
147
191
ret
148
192
149
193
.globl aarch64_atomic_cmpxchg_8_release_default_impl
150
194
. align 5
151
195
aarch64_atomic_cmpxchg_8_release_default_impl:
196
+ #ifdef __ARM_FEATURE_ATOMICS
197
+ mov x3 , x1
198
+ casl x3 , x2 , [ x0 ]
199
+ #else
152
200
prfm pstl1strm , [ x0 ]
153
201
0 : ldxr x3 , [ x0 ]
154
202
cmp x3 , x1
155
203
b.ne 1f
156
204
stlxr w8 , x2 , [ x0 ]
157
205
cbnz w8 , 0b
206
+ #endif
158
207
1 : mov x0 , x3
159
208
ret
160
209
161
210
.globl aarch64_atomic_cmpxchg_4_seq_cst_default_impl
162
211
. align 5
163
212
aarch64_atomic_cmpxchg_4_seq_cst_default_impl:
213
+ #ifdef __ARM_FEATURE_ATOMICS
214
+ mov x3 , x1
215
+ casal w3 , w2 , [ x0 ]
216
+ #else
164
217
prfm pstl1strm , [ x0 ]
165
218
0 : ldaxr w3 , [ x0 ]
166
219
cmp w3 , w1
167
220
b.ne 1f
168
221
stlxr w8 , w2 , [ x0 ]
169
222
cbnz w8 , 0b
223
+ #endif
170
224
1 : mov w0 , w3
171
225
ret
172
226
173
227
.globl aarch64_atomic_cmpxchg_8_seq_cst_default_impl
174
228
. align 5
175
229
aarch64_atomic_cmpxchg_8_seq_cst_default_impl:
230
+ #ifdef __ARM_FEATURE_ATOMICS
231
+ mov x3 , x1
232
+ casal x3 , x2 , [ x0 ]
233
+ #else
176
234
prfm pstl1strm , [ x0 ]
177
235
0 : ldaxr x3 , [ x0 ]
178
236
cmp x3 , x1
179
237
b.ne 1f
180
238
stlxr w8 , x2 , [ x0 ]
181
239
cbnz w8 , 0b
240
+ #endif
182
241
1 : mov x0 , x3
183
242
ret
184
243
185
244
.globl aarch64_atomic_cmpxchg_1_relaxed_default_impl
186
245
. align 5
187
246
aarch64_atomic_cmpxchg_1_relaxed_default_impl:
247
+ #ifdef __ARM_FEATURE_ATOMICS
248
+ mov x3 , x1
249
+ casb w3 , w2 , [ x0 ]
250
+ #else
188
251
prfm pstl1strm , [ x0 ]
189
252
0 : ldxrb w3 , [ x0 ]
190
253
eor w8 , w3 , w1
191
254
tst x8 , # 0xff
192
255
b.ne 1f
193
256
stxrb w8 , w2 , [ x0 ]
194
257
cbnz w8 , 0b
258
+ #endif
195
259
1 : mov w0 , w3
196
260
ret
197
261
198
262
.globl aarch64_atomic_cmpxchg_4_relaxed_default_impl
199
263
. align 5
200
264
aarch64_atomic_cmpxchg_4_relaxed_default_impl:
265
+ #ifdef __ARM_FEATURE_ATOMICS
266
+ mov x3 , x1
267
+ cas w3 , w2 , [ x0 ]
268
+ #else
201
269
prfm pstl1strm , [ x0 ]
202
270
0 : ldxr w3 , [ x0 ]
203
271
cmp w3 , w1
204
272
b.ne 1f
205
273
stxr w8 , w2 , [ x0 ]
206
274
cbnz w8 , 0b
275
+ #endif
207
276
1 : mov w0 , w3
208
277
ret
209
278
210
279
.globl aarch64_atomic_cmpxchg_8_relaxed_default_impl
211
280
. align 5
212
281
aarch64_atomic_cmpxchg_8_relaxed_default_impl:
282
+ #ifdef __ARM_FEATURE_ATOMICS
283
+ mov x3 , x1
284
+ cas x3 , x2 , [ x0 ]
285
+ #else
213
286
prfm pstl1strm , [ x0 ]
214
287
0 : ldxr x3 , [ x0 ]
215
288
cmp x3 , x1
216
289
b.ne 1f
217
290
stxr w8 , x2 , [ x0 ]
218
291
cbnz w8 , 0b
292
+ #endif
219
293
1 : mov x0 , x3
220
294
ret
0 commit comments