@@ -606,30 +606,30 @@ MBED_FORCEINLINE bool core_util_atomic_flag_test_and_set_explicit(volatile core_
606606
607607/* Lock-free loads and stores don't need assembler - just aligned accesses */
608608/* Silly ordering of `T volatile` is because T can be `void *` */
609- #define DO_MBED_LOCKFREE_LOADSTORE (T, fn_suffix ) \
610- MBED_FORCEINLINE T core_util_atomic_load_##fn_suffix(T const volatile *valuePtr) \
609+ #define DO_MBED_LOCKFREE_LOADSTORE (T, V, fn_suffix ) \
610+ MBED_FORCEINLINE T core_util_atomic_load_##fn_suffix(T const V *valuePtr) \
611611{ \
612612 T value = *valuePtr; \
613613 MBED_BARRIER (); \
614614 return value; \
615615} \
616616 \
617- MBED_FORCEINLINE T core_util_atomic_load_explicit_##fn_suffix(T const volatile *valuePtr, mbed_memory_order order) \
617+ MBED_FORCEINLINE T core_util_atomic_load_explicit_##fn_suffix(T const V *valuePtr, mbed_memory_order order) \
618618{ \
619619 MBED_CHECK_LOAD_ORDER (order); \
620620 T value = *valuePtr; \
621621 MBED_ACQUIRE_BARRIER (order); \
622622 return value; \
623623} \
624624 \
625- MBED_FORCEINLINE void core_util_atomic_store_##fn_suffix(T volatile *valuePtr, T value) \
625+ MBED_FORCEINLINE void core_util_atomic_store_##fn_suffix(T V *valuePtr, T value) \
626626{ \
627627 MBED_BARRIER (); \
628628 *valuePtr = value; \
629629 MBED_BARRIER (); \
630630} \
631631 \
632- MBED_FORCEINLINE void core_util_atomic_store_explicit_##fn_suffix(T volatile *valuePtr, T value, mbed_memory_order order) \
632+ MBED_FORCEINLINE void core_util_atomic_store_explicit_##fn_suffix(T V *valuePtr, T value, mbed_memory_order order) \
633633{ \
634634 MBED_CHECK_STORE_ORDER (order); \
635635 MBED_RELEASE_BARRIER (order); \
@@ -651,15 +651,51 @@ MBED_FORCEINLINE void core_util_atomic_flag_clear_explicit(volatile core_util_at
651651 flagPtr->_flag = false ;
652652 MBED_SEQ_CST_BARRIER (order);
653653}
654- DO_MBED_LOCKFREE_LOADSTORE (uint8_t , u8 )
655- DO_MBED_LOCKFREE_LOADSTORE(uint16_t , u16 )
656- DO_MBED_LOCKFREE_LOADSTORE(uint32_t , u32 )
657- DO_MBED_LOCKFREE_LOADSTORE(int8_t , s8)
658- DO_MBED_LOCKFREE_LOADSTORE(int16_t , s16)
659- DO_MBED_LOCKFREE_LOADSTORE(int32_t , s32)
660- DO_MBED_LOCKFREE_LOADSTORE(bool , bool )
661- DO_MBED_LOCKFREE_LOADSTORE(void *, ptr)
662654
655+ #ifdef __cplusplus
656+ // Temporarily turn off extern "C", so we can provide non-volatile load/store
657+ // overloads for efficiency. All these functions are static inline, so this has
658+ // no linkage effect exactly, it just permits the overloads.
659+ } // extern "C"
660+
661+ // For efficiency it's worth having non-volatile overloads
662+ MBED_FORCEINLINE void core_util_atomic_flag_clear (core_util_atomic_flag *flagPtr)
663+ {
664+ MBED_BARRIER ();
665+ flagPtr->_flag = false ;
666+ MBED_BARRIER ();
667+ }
668+
669+ MBED_FORCEINLINE void core_util_atomic_flag_clear_explicit (core_util_atomic_flag *flagPtr, mbed_memory_order order)
670+ {
671+ MBED_RELEASE_BARRIER (order);
672+ flagPtr->_flag = false ;
673+ MBED_SEQ_CST_BARRIER (order);
674+ }
675+
676+ DO_MBED_LOCKFREE_LOADSTORE (uint8_t ,, u8 )
677+ DO_MBED_LOCKFREE_LOADSTORE(uint16_t ,, u16 )
678+ DO_MBED_LOCKFREE_LOADSTORE(uint32_t ,, u32 )
679+ DO_MBED_LOCKFREE_LOADSTORE(int8_t ,, s8)
680+ DO_MBED_LOCKFREE_LOADSTORE(int16_t ,, s16)
681+ DO_MBED_LOCKFREE_LOADSTORE(int32_t ,, s32)
682+ DO_MBED_LOCKFREE_LOADSTORE(bool ,, bool )
683+ DO_MBED_LOCKFREE_LOADSTORE(void *,, ptr)
684+
685+ #endif
686+
687+ DO_MBED_LOCKFREE_LOADSTORE (uint8_t , volatile , u8 )
688+ DO_MBED_LOCKFREE_LOADSTORE(uint16_t , volatile , u16 )
689+ DO_MBED_LOCKFREE_LOADSTORE(uint32_t , volatile , u32 )
690+ DO_MBED_LOCKFREE_LOADSTORE(int8_t , volatile , s8)
691+ DO_MBED_LOCKFREE_LOADSTORE(int16_t , volatile , s16)
692+ DO_MBED_LOCKFREE_LOADSTORE(int32_t , volatile , s32)
693+ DO_MBED_LOCKFREE_LOADSTORE(bool , volatile , bool )
694+ DO_MBED_LOCKFREE_LOADSTORE(void *, volatile , ptr)
695+
696+ #ifdef __cplusplus
697+ extern " C" {
698+ #endif
663699
664700/* ******************** GENERIC VARIANTS - SIGNED, BOOL, POINTERS ****************/
665701
@@ -906,7 +942,19 @@ inline T core_util_atomic_load(const volatile T *valuePtr)
906942} \
907943 \
908944template <> \
945+ inline T core_util_atomic_load (const T *valuePtr) \
946+ { \
947+ return core_util_atomic_load_##fn_suffix (valuePtr); \
948+ } \
949+ \
950+ template <> \
909951inline T core_util_atomic_load_explicit (const volatile T *valuePtr, mbed_memory_order order) \
952+ { \
953+ return core_util_atomic_load_explicit_##fn_suffix (valuePtr, order); \
954+ } \
955+ \
956+ template <> \
957+ inline T core_util_atomic_load_explicit (const T *valuePtr, mbed_memory_order order) \
910958{ \
911959 return core_util_atomic_load_explicit_##fn_suffix (valuePtr, order); \
912960}
@@ -917,12 +965,24 @@ inline T *core_util_atomic_load(T *const volatile *valuePtr)
917965 return (T *) core_util_atomic_load_ptr ((void *const volatile *) valuePtr);
918966}
919967
968+ template <typename T>
969+ inline T *core_util_atomic_load (T *const *valuePtr)
970+ {
971+ return (T *) core_util_atomic_load_ptr ((void *const *) valuePtr);
972+ }
973+
920974template <typename T>
921975inline T *core_util_atomic_load_explicit (T *const volatile *valuePtr, mbed_memory_order order)
922976{
923977 return (T *) core_util_atomic_load_explicit_ptr ((void *const volatile *) valuePtr, order);
924978}
925979
980+ template <typename T>
981+ inline T *core_util_atomic_load_explicit (T *const *valuePtr, mbed_memory_order order)
982+ {
983+ return (T *) core_util_atomic_load_explicit_ptr ((void *const *) valuePtr, order);
984+ }
985+
926986DO_MBED_ATOMIC_LOAD_TEMPLATE (uint8_t , u8 )
927987DO_MBED_ATOMIC_LOAD_TEMPLATE(uint16_t , u16 )
928988DO_MBED_ATOMIC_LOAD_TEMPLATE(uint32_t , u32 )
@@ -941,7 +1001,19 @@ inline void core_util_atomic_store(volatile T *valuePtr, T val)
9411001} \
9421002 \
9431003template <> \
1004+ inline void core_util_atomic_store (T *valuePtr, T val) \
1005+ { \
1006+ core_util_atomic_store_##fn_suffix (valuePtr, val); \
1007+ } \
1008+ \
1009+ template <> \
9441010inline void core_util_atomic_store_explicit (volatile T *valuePtr, T val, mbed_memory_order order) \
1011+ { \
1012+ core_util_atomic_store_explicit_##fn_suffix (valuePtr, val, order); \
1013+ } \
1014+ \
1015+ template <> \
1016+ inline void core_util_atomic_store_explicit (T *valuePtr, T val, mbed_memory_order order) \
9451017{ \
9461018 core_util_atomic_store_explicit_##fn_suffix (valuePtr, val, order); \
9471019}
@@ -952,12 +1024,24 @@ inline void core_util_atomic_store(T *volatile *valuePtr, T *val)
9521024 core_util_atomic_store_ptr ((void *volatile *) valuePtr, val);
9531025}
9541026
1027+ template <typename T>
1028+ inline void core_util_atomic_store (T **valuePtr, T *val)
1029+ {
1030+ core_util_atomic_store_ptr ((void **) valuePtr, val);
1031+ }
1032+
9551033template <typename T>
9561034inline void core_util_atomic_store_explicit (T *volatile *valuePtr, T *val, mbed_memory_order order)
9571035{
9581036 core_util_atomic_store_ptr ((void *volatile *) valuePtr, val, order);
9591037}
9601038
1039+ template <typename T>
1040+ inline void core_util_atomic_store_explicit (T **valuePtr, T *val, mbed_memory_order order)
1041+ {
1042+ core_util_atomic_store_ptr ((void **) valuePtr, val, order);
1043+ }
1044+
9611045DO_MBED_ATOMIC_STORE_TEMPLATE (uint8_t , u8 )
9621046DO_MBED_ATOMIC_STORE_TEMPLATE(uint16_t , u16 )
9631047DO_MBED_ATOMIC_STORE_TEMPLATE(uint32_t , u32 )
0 commit comments