@@ -62,7 +62,12 @@ pub unsafe fn copy_forward(mut dest: *mut u8, mut src: *const u8, mut n: usize)
62
62
// Realign src
63
63
let mut src_aligned = ( src as usize & !WORD_MASK ) as * mut usize ;
64
64
// This will read (but won't use) bytes out of bound.
65
+ // cfg needed because not all targets will have atomic loads that can be lowered
66
+ // (e.g. BPF, MSP430), or provided by an external library (e.g. RV32I)
67
+ #[ cfg( target_has_atomic_load_store = "ptr" ) ]
65
68
let mut prev_word = core:: intrinsics:: atomic_load_unordered ( src_aligned) ;
69
+ #[ cfg( not( target_has_atomic_load_store = "ptr" ) ) ]
70
+ let mut prev_word = core:: ptr:: read_volatile ( src_aligned) ;
66
71
67
72
while dest_usize < dest_end {
68
73
src_aligned = src_aligned. add ( 1 ) ;
@@ -155,7 +160,12 @@ pub unsafe fn copy_backward(dest: *mut u8, src: *const u8, mut n: usize) {
155
160
// Realign src_aligned
156
161
let mut src_aligned = ( src as usize & !WORD_MASK ) as * mut usize ;
157
162
// This will read (but won't use) bytes out of bound.
163
+ // cfg needed because not all targets will have atomic loads that can be lowered
164
+ // (e.g. BPF, MSP430), or provided by an external library (e.g. RV32I)
165
+ #[ cfg( target_has_atomic_load_store = "ptr" ) ]
158
166
let mut prev_word = core:: intrinsics:: atomic_load_unordered ( src_aligned) ;
167
+ #[ cfg( not( target_has_atomic_load_store = "ptr" ) ) ]
168
+ let mut prev_word = core:: ptr:: read_volatile ( src_aligned) ;
159
169
160
170
while dest_start < dest_usize {
161
171
src_aligned = src_aligned. sub ( 1 ) ;
0 commit comments