diff --git a/core/include/rmutex.h b/core/include/rmutex.h index 3fc6585782ef..7a3daa7146fa 100644 --- a/core/include/rmutex.h +++ b/core/include/rmutex.h @@ -24,11 +24,6 @@ #define RMUTEX_H #include -#ifdef __cplusplus -#include "c11_atomics_compat.hpp" -#else -#include -#endif #include "mutex.h" #include "sched.h" @@ -62,14 +57,14 @@ typedef struct rmutex_t { * atomic_int_least16_t is used. Note @ref kernel_pid_t is an int16 * @internal */ - atomic_int_least16_t owner; + kernel_pid_t owner; } rmutex_t; /** * @brief Static initializer for rmutex_t. * @details This initializer is preferable to rmutex_init(). */ -#define RMUTEX_INIT { MUTEX_INIT, 0, ATOMIC_VAR_INIT(KERNEL_PID_UNDEF) } +#define RMUTEX_INIT { MUTEX_INIT, 0, KERNEL_PID_UNDEF } /** * @brief Initializes a recursive mutex object. diff --git a/core/rmutex.c b/core/rmutex.c index f967049258c2..4eebca0bb479 100644 --- a/core/rmutex.c +++ b/core/rmutex.c @@ -24,9 +24,10 @@ #include #include +#include "assert.h" +#include "atomic_utils.h" #include "rmutex.h" #include "thread.h" -#include "assert.h" #define ENABLE_DEBUG 0 #include "debug.h" @@ -78,7 +79,7 @@ static int _lock(rmutex_t *rmutex, int trylock) */ /* ensure that owner is read atomically, since I need a consistent value */ - owner = atomic_load_explicit(&rmutex->owner, memory_order_relaxed); + owner = atomic_load_kernel_pid(&rmutex->owner); DEBUG("rmutex %" PRIi16 " : mutex held by %" PRIi16 " \n", thread_getpid(), owner); @@ -104,8 +105,7 @@ static int _lock(rmutex_t *rmutex, int trylock) DEBUG("rmutex %" PRIi16 " : setting the owner\n", thread_getpid()); /* ensure that owner is written atomically, since others need a consistent value */ - atomic_store_explicit(&rmutex->owner, thread_getpid(), - memory_order_relaxed); + atomic_store_kernel_pid(&rmutex->owner, thread_getpid()); DEBUG("rmutex %" PRIi16 " : increasing refs\n", thread_getpid()); @@ -127,8 +127,8 @@ int rmutex_trylock(rmutex_t *rmutex) void rmutex_unlock(rmutex_t *rmutex) { - assert(atomic_load_explicit(&rmutex->owner, - memory_order_relaxed) == thread_getpid()); + /* ensure that owner is read atomically, since I need a consistent value */ + assert(atomic_load_kernel_pid(&rmutex->owner) == thread_getpid()); assert(rmutex->refcount > 0); DEBUG("rmutex %" PRIi16 " : decrementing refs refs\n", thread_getpid()); @@ -143,8 +143,7 @@ void rmutex_unlock(rmutex_t *rmutex) DEBUG("rmutex %" PRIi16 " : resetting owner\n", thread_getpid()); /* ensure that owner is written only once */ - atomic_store_explicit(&rmutex->owner, KERNEL_PID_UNDEF, - memory_order_relaxed); + atomic_store_kernel_pid(&rmutex->owner, KERNEL_PID_UNDEF); DEBUG("rmutex %" PRIi16 " : releasing mutex\n", thread_getpid()); diff --git a/sys/xtimer/xtimer.c b/sys/xtimer/xtimer.c index 3b3541bdfb5c..0381bd368ff7 100644 --- a/sys/xtimer/xtimer.c +++ b/sys/xtimer/xtimer.c @@ -23,6 +23,7 @@ #include #include "xtimer.h" +#include "atomic_utils.h" #include "msg.h" #include "mutex.h" #include "rmutex.h" @@ -224,8 +225,7 @@ int xtimer_rmutex_lock_timeout(rmutex_t *rmutex, uint64_t timeout) return 0; } if (xtimer_mutex_lock_timeout(&rmutex->mutex, timeout) == 0) { - atomic_store_explicit(&rmutex->owner, - thread_getpid(), memory_order_relaxed); + atomic_store_kernel_pid(&rmutex->owner, thread_getpid()); rmutex->refcount++; return 0; } diff --git a/sys/ztimer/util.c b/sys/ztimer/util.c index 822b82a5be0b..be0716422b56 100644 --- a/sys/ztimer/util.c +++ b/sys/ztimer/util.c @@ -23,6 +23,7 @@ #include #include +#include "atomic_utils.h" #include "irq.h" #include "mutex.h" #include "rmutex.h" @@ -190,8 +191,7 @@ int ztimer_rmutex_lock_timeout(ztimer_clock_t *clock, rmutex_t *rmutex, return 0; } if (ztimer_mutex_lock_timeout(clock, &rmutex->mutex, timeout) == 0) { - atomic_store_explicit(&rmutex->owner, - thread_getpid(), memory_order_relaxed); + atomic_store_kernel_pid(&rmutex->owner, thread_getpid()); rmutex->refcount++; return 0; } diff --git a/sys/ztimer64/util.c b/sys/ztimer64/util.c index 3e36accde5b7..1d1e8ff5db5d 100644 --- a/sys/ztimer64/util.c +++ b/sys/ztimer64/util.c @@ -23,6 +23,7 @@ #include #include +#include "atomic_utils.h" #include "irq.h" #include "mutex.h" #include "rmutex.h" @@ -183,8 +184,7 @@ int ztimer64_rmutex_lock_until(ztimer64_clock_t *clock, rmutex_t *rmutex, return 0; } if (ztimer64_mutex_lock_until(clock, &rmutex->mutex, target) == 0) { - atomic_store_explicit(&rmutex->owner, - thread_getpid(), memory_order_relaxed); + atomic_store_kernel_pid(&rmutex->owner, thread_getpid()); rmutex->refcount++; return 0; } diff --git a/tests/xtimer_rmutex_lock_timeout/main.c b/tests/xtimer_rmutex_lock_timeout/main.c index 8cac5b0fda0b..35ad64e51e9f 100644 --- a/tests/xtimer_rmutex_lock_timeout/main.c +++ b/tests/xtimer_rmutex_lock_timeout/main.c @@ -20,11 +20,13 @@ #include #include + +#include "atomic_utils.h" +#include "irq.h" +#include "msg.h" #include "shell.h" -#include "xtimer.h" #include "thread.h" -#include "msg.h" -#include "irq.h" +#include "xtimer.h" /* XTIMER_SHIFT can be undefined when using xtimer_on_ztimer on boards * incompatible with xtimers tick conversion, e.g. the waspmote-pro @@ -155,8 +157,7 @@ static int cmd_test_xtimer_rmutex_lock_timeout_long_unlocked(int argc, if (xtimer_rmutex_lock_timeout(&test_rmutex, LONG_RMUTEX_TIMEOUT) == 0) { /* rmutex has to be locked once */ - if (atomic_load_explicit(&test_rmutex.owner, - memory_order_relaxed) == thread_getpid() && + if (atomic_load_kernel_pid(&test_rmutex.owner) == thread_getpid() && test_rmutex.refcount == 1 && mutex_trylock(&test_rmutex.mutex) == 0) { puts("OK"); @@ -209,8 +210,7 @@ static int cmd_test_xtimer_rmutex_lock_timeout_long_locked(int argc, } else { /* rmutex has to be locked once */ - if (atomic_load_explicit(&test_rmutex.owner, - memory_order_relaxed) == second_t_pid && + if (atomic_load_kernel_pid(&test_rmutex.owner) == second_t_pid && test_rmutex.refcount == 1 && mutex_trylock(&test_rmutex.mutex) == 0) { puts("OK"); @@ -261,8 +261,7 @@ static int cmd_test_xtimer_rmutex_lock_timeout_low_prio_thread( if (xtimer_rmutex_lock_timeout(&test_rmutex, LONG_RMUTEX_TIMEOUT) == 0) { /* rmutex has to be locked once */ - if (atomic_load_explicit(&test_rmutex.owner, - memory_order_relaxed) == thread_getpid() && + if (atomic_load_kernel_pid(&test_rmutex.owner) == thread_getpid() && test_rmutex.refcount == 1 && mutex_trylock(&test_rmutex.mutex) == 0) { puts("OK"); @@ -317,8 +316,7 @@ static int cmd_test_xtimer_rmutex_lock_timeout_short_locked(int argc, } else { /* rmutex has to be locked once */ - if (atomic_load_explicit(&test_rmutex.owner, - memory_order_relaxed) == second_t_pid && + if (atomic_load_kernel_pid(&test_rmutex.owner) == second_t_pid && test_rmutex.refcount == 1 && mutex_trylock(&test_rmutex.mutex) == 0) { puts("OK"); @@ -355,8 +353,7 @@ static int cmd_test_xtimer_rmutex_lock_timeout_short_unlocked(int argc, if (xtimer_rmutex_lock_timeout(&test_rmutex, SHORT_RMUTEX_TIMEOUT) == 0) { /* rmutex has to be locked once */ - if (atomic_load_explicit(&test_rmutex.owner, - memory_order_relaxed) == thread_getpid() && + if (atomic_load_kernel_pid(&test_rmutex.owner) == thread_getpid() && test_rmutex.refcount == 1 && mutex_trylock(&test_rmutex.mutex) == 0) { puts("OK");