Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

core/rmutex: use atomic utils #16919

Merged
merged 2 commits into from
Feb 4, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 2 additions & 7 deletions core/include/rmutex.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,11 +24,6 @@
#define RMUTEX_H

#include <stdint.h>
#ifdef __cplusplus
#include "c11_atomics_compat.hpp"
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The cross language issues with C11 atomics are not unique to rust, btw. C++ also has its own atomics, which are inherently incompatible. The c11_atomics_compat.hpp could be dropped if we would move away from C11 atomics in-tree.

But I guess I should try to improve the ROM costs of the atomic utils first. There is one issue with GCC not optimizing unused variables across memory barriers - which LLVM just does if the variable could be (at least in theory) be allocated in registers, which LLVM reasons is not memory and thus not affected by memory barriers. I won't get past this issue, but GCC's optimizer is generally freaking out when it sees memory barriers or volatile and stops optimizing things that are not affected. (E.g. I often saw that pointer arithmetic done for every access, despite the memory the pointer points to was marked as volatile, not the pointer itself.) Making atomic utils consistently performing equally good or better than C11 atomics in every aspect will make them easy to sell.

#else
#include <stdatomic.h>
#endif

#include "mutex.h"
#include "sched.h"
Expand Down Expand Up @@ -62,14 +57,14 @@ typedef struct rmutex_t {
* atomic_int_least16_t is used. Note @ref kernel_pid_t is an int16
* @internal
*/
atomic_int_least16_t owner;
kernel_pid_t owner;
} rmutex_t;

/**
* @brief Static initializer for rmutex_t.
* @details This initializer is preferable to rmutex_init().
*/
#define RMUTEX_INIT { MUTEX_INIT, 0, ATOMIC_VAR_INIT(KERNEL_PID_UNDEF) }
#define RMUTEX_INIT { MUTEX_INIT, 0, KERNEL_PID_UNDEF }

/**
* @brief Initializes a recursive mutex object.
Expand Down
15 changes: 7 additions & 8 deletions core/rmutex.c
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,10 @@
#include <stdio.h>
#include <inttypes.h>

#include "assert.h"
#include "atomic_utils.h"
#include "rmutex.h"
#include "thread.h"
#include "assert.h"

#define ENABLE_DEBUG 0
#include "debug.h"
Expand Down Expand Up @@ -78,7 +79,7 @@ static int _lock(rmutex_t *rmutex, int trylock)
*/

/* ensure that owner is read atomically, since I need a consistent value */
owner = atomic_load_explicit(&rmutex->owner, memory_order_relaxed);
owner = atomic_load_kernel_pid(&rmutex->owner);
DEBUG("rmutex %" PRIi16 " : mutex held by %" PRIi16 " \n",
thread_getpid(), owner);

Expand All @@ -104,8 +105,7 @@ static int _lock(rmutex_t *rmutex, int trylock)
DEBUG("rmutex %" PRIi16 " : setting the owner\n", thread_getpid());

/* ensure that owner is written atomically, since others need a consistent value */
atomic_store_explicit(&rmutex->owner, thread_getpid(),
memory_order_relaxed);
atomic_store_kernel_pid(&rmutex->owner, thread_getpid());

DEBUG("rmutex %" PRIi16 " : increasing refs\n", thread_getpid());

Expand All @@ -127,8 +127,8 @@ int rmutex_trylock(rmutex_t *rmutex)

void rmutex_unlock(rmutex_t *rmutex)
{
assert(atomic_load_explicit(&rmutex->owner,
memory_order_relaxed) == thread_getpid());
/* ensure that owner is read atomically, since I need a consistent value */
assert(atomic_load_kernel_pid(&rmutex->owner) == thread_getpid());
assert(rmutex->refcount > 0);

DEBUG("rmutex %" PRIi16 " : decrementing refs refs\n", thread_getpid());
Expand All @@ -143,8 +143,7 @@ void rmutex_unlock(rmutex_t *rmutex)
DEBUG("rmutex %" PRIi16 " : resetting owner\n", thread_getpid());

/* ensure that owner is written only once */
atomic_store_explicit(&rmutex->owner, KERNEL_PID_UNDEF,
memory_order_relaxed);
atomic_store_kernel_pid(&rmutex->owner, KERNEL_PID_UNDEF);

DEBUG("rmutex %" PRIi16 " : releasing mutex\n", thread_getpid());

Expand Down
23 changes: 23 additions & 0 deletions sys/include/atomic_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,8 @@
#include <stdint.h>

#include "irq.h"
#include "sched.h"

#include "atomic_utils_arch.h"

#ifdef __cplusplus
Expand Down Expand Up @@ -263,6 +265,16 @@ static inline uintptr_t atomic_load_uintptr(const volatile uintptr_t *var) {
static inline void * atomic_load_ptr(void **ptr_addr) {
return (void *)atomic_load_uintptr((const volatile uintptr_t *)ptr_addr);
}
/**
* @brief Load an `kernel_pid_t` atomically
*
* @param[in] var Variable to load atomically
* @return The value stored in @p var
*/
static inline kernel_pid_t atomic_load_kernel_pid(const volatile kernel_pid_t *var)
{
return atomic_load_u16((const volatile uint16_t *)var);
}
/** @} */

/**
Expand Down Expand Up @@ -321,6 +333,17 @@ static inline void atomic_store_uintptr(volatile uintptr_t *dest, uintptr_t val)
static inline void atomic_store_ptr(void **dest, const void *val) {
atomic_store_uintptr((volatile uintptr_t *)dest, (uintptr_t)val);
}
/**
* @brief Store an `kernel_pid_t` atomically
*
* @param[out] dest Location to atomically write the new value to
* @param[in] val Value to write
*/
static inline void atomic_store_kernel_pid(volatile kernel_pid_t *dest,
kernel_pid_t val)
{
atomic_store_u16((volatile uint16_t *)dest, (uint16_t)val);
}
/** @} */

/**
Expand Down
4 changes: 2 additions & 2 deletions sys/xtimer/xtimer.c
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
#include <string.h>

#include "xtimer.h"
#include "atomic_utils.h"
#include "msg.h"
#include "mutex.h"
#include "rmutex.h"
Expand Down Expand Up @@ -224,8 +225,7 @@ int xtimer_rmutex_lock_timeout(rmutex_t *rmutex, uint64_t timeout)
return 0;
}
if (xtimer_mutex_lock_timeout(&rmutex->mutex, timeout) == 0) {
atomic_store_explicit(&rmutex->owner,
thread_getpid(), memory_order_relaxed);
atomic_store_kernel_pid(&rmutex->owner, thread_getpid());
rmutex->refcount++;
return 0;
}
Expand Down
4 changes: 2 additions & 2 deletions sys/ztimer/util.c
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
#include <assert.h>
#include <errno.h>

#include "atomic_utils.h"
#include "irq.h"
#include "mutex.h"
#include "rmutex.h"
Expand Down Expand Up @@ -190,8 +191,7 @@ int ztimer_rmutex_lock_timeout(ztimer_clock_t *clock, rmutex_t *rmutex,
return 0;
}
if (ztimer_mutex_lock_timeout(clock, &rmutex->mutex, timeout) == 0) {
atomic_store_explicit(&rmutex->owner,
thread_getpid(), memory_order_relaxed);
atomic_store_kernel_pid(&rmutex->owner, thread_getpid());
rmutex->refcount++;
return 0;
}
Expand Down
4 changes: 2 additions & 2 deletions sys/ztimer64/util.c
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
#include <assert.h>
#include <errno.h>

#include "atomic_utils.h"
#include "irq.h"
#include "mutex.h"
#include "rmutex.h"
Expand Down Expand Up @@ -183,8 +184,7 @@ int ztimer64_rmutex_lock_until(ztimer64_clock_t *clock, rmutex_t *rmutex,
return 0;
}
if (ztimer64_mutex_lock_until(clock, &rmutex->mutex, target) == 0) {
atomic_store_explicit(&rmutex->owner,
thread_getpid(), memory_order_relaxed);
atomic_store_kernel_pid(&rmutex->owner, thread_getpid());
rmutex->refcount++;
return 0;
}
Expand Down
23 changes: 10 additions & 13 deletions tests/xtimer_rmutex_lock_timeout/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -20,11 +20,13 @@

#include <stdio.h>
#include <stdlib.h>

#include "atomic_utils.h"
#include "irq.h"
#include "msg.h"
#include "shell.h"
#include "xtimer.h"
#include "thread.h"
#include "msg.h"
#include "irq.h"
#include "xtimer.h"

/* XTIMER_SHIFT can be undefined when using xtimer_on_ztimer on boards
* incompatible with xtimers tick conversion, e.g. the waspmote-pro
Expand Down Expand Up @@ -155,8 +157,7 @@ static int cmd_test_xtimer_rmutex_lock_timeout_long_unlocked(int argc,

if (xtimer_rmutex_lock_timeout(&test_rmutex, LONG_RMUTEX_TIMEOUT) == 0) {
/* rmutex has to be locked once */
if (atomic_load_explicit(&test_rmutex.owner,
memory_order_relaxed) == thread_getpid() &&
if (atomic_load_kernel_pid(&test_rmutex.owner) == thread_getpid() &&
test_rmutex.refcount == 1 &&
mutex_trylock(&test_rmutex.mutex) == 0) {
puts("OK");
Expand Down Expand Up @@ -209,8 +210,7 @@ static int cmd_test_xtimer_rmutex_lock_timeout_long_locked(int argc,
}
else {
/* rmutex has to be locked once */
if (atomic_load_explicit(&test_rmutex.owner,
memory_order_relaxed) == second_t_pid &&
if (atomic_load_kernel_pid(&test_rmutex.owner) == second_t_pid &&
test_rmutex.refcount == 1 &&
mutex_trylock(&test_rmutex.mutex) == 0) {
puts("OK");
Expand Down Expand Up @@ -261,8 +261,7 @@ static int cmd_test_xtimer_rmutex_lock_timeout_low_prio_thread(

if (xtimer_rmutex_lock_timeout(&test_rmutex, LONG_RMUTEX_TIMEOUT) == 0) {
/* rmutex has to be locked once */
if (atomic_load_explicit(&test_rmutex.owner,
memory_order_relaxed) == thread_getpid() &&
if (atomic_load_kernel_pid(&test_rmutex.owner) == thread_getpid() &&
test_rmutex.refcount == 1 &&
mutex_trylock(&test_rmutex.mutex) == 0) {
puts("OK");
Expand Down Expand Up @@ -317,8 +316,7 @@ static int cmd_test_xtimer_rmutex_lock_timeout_short_locked(int argc,
}
else {
/* rmutex has to be locked once */
if (atomic_load_explicit(&test_rmutex.owner,
memory_order_relaxed) == second_t_pid &&
if (atomic_load_kernel_pid(&test_rmutex.owner) == second_t_pid &&
test_rmutex.refcount == 1 &&
mutex_trylock(&test_rmutex.mutex) == 0) {
puts("OK");
Expand Down Expand Up @@ -355,8 +353,7 @@ static int cmd_test_xtimer_rmutex_lock_timeout_short_unlocked(int argc,

if (xtimer_rmutex_lock_timeout(&test_rmutex, SHORT_RMUTEX_TIMEOUT) == 0) {
/* rmutex has to be locked once */
if (atomic_load_explicit(&test_rmutex.owner,
memory_order_relaxed) == thread_getpid() &&
if (atomic_load_kernel_pid(&test_rmutex.owner) == thread_getpid() &&
test_rmutex.refcount == 1 &&
mutex_trylock(&test_rmutex.mutex) == 0) {
puts("OK");
Expand Down
23 changes: 10 additions & 13 deletions tests/ztimer_rmutex_lock_timeout/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -20,11 +20,13 @@

#include <stdio.h>
#include <stdlib.h>

#include "atomic_utils.h"
#include "irq.h"
#include "msg.h"
#include "shell.h"
#include "ztimer.h"
#include "thread.h"
#include "msg.h"
#include "irq.h"
#include "ztimer.h"

/* timeout at one millisecond (1000 us) to make sure it does not spin. */
#define LONG_RMUTEX_TIMEOUT 1000
Expand Down Expand Up @@ -148,8 +150,7 @@ static int cmd_test_ztimer_rmutex_lock_timeout_long_unlocked(int argc,

if (ztimer_rmutex_lock_timeout(ZTIMER_USEC, &test_rmutex, LONG_RMUTEX_TIMEOUT) == 0) {
/* rmutex has to be locked once */
if (atomic_load_explicit(&test_rmutex.owner,
memory_order_relaxed) == thread_getpid() &&
if (atomic_load_kernel_pid(&test_rmutex.owner) == thread_getpid() &&
test_rmutex.refcount == 1 &&
mutex_trylock(&test_rmutex.mutex) == 0) {
puts("OK");
Expand Down Expand Up @@ -202,8 +203,7 @@ static int cmd_test_ztimer_rmutex_lock_timeout_long_locked(int argc,
}
else {
/* rmutex has to be locked once */
if (atomic_load_explicit(&test_rmutex.owner,
memory_order_relaxed) == second_t_pid &&
if (atomic_load_kernel_pid(&test_rmutex.owner) == second_t_pid &&
test_rmutex.refcount == 1 &&
mutex_trylock(&test_rmutex.mutex) == 0) {
puts("OK");
Expand Down Expand Up @@ -254,8 +254,7 @@ static int cmd_test_ztimer_rmutex_lock_timeout_low_prio_thread(

if (ztimer_rmutex_lock_timeout(ZTIMER_USEC, &test_rmutex, LONG_RMUTEX_TIMEOUT) == 0) {
/* rmutex has to be locked once */
if (atomic_load_explicit(&test_rmutex.owner,
memory_order_relaxed) == thread_getpid() &&
if (atomic_load_kernel_pid(&test_rmutex.owner) == thread_getpid() &&
test_rmutex.refcount == 1 &&
mutex_trylock(&test_rmutex.mutex) == 0) {
puts("OK");
Expand Down Expand Up @@ -310,8 +309,7 @@ static int cmd_test_ztimer_rmutex_lock_timeout_short_locked(int argc,
}
else {
/* rmutex has to be locked once */
if (atomic_load_explicit(&test_rmutex.owner,
memory_order_relaxed) == second_t_pid &&
if (atomic_load_kernel_pid(&test_rmutex.owner) == second_t_pid &&
test_rmutex.refcount == 1 &&
mutex_trylock(&test_rmutex.mutex) == 0) {
puts("OK");
Expand Down Expand Up @@ -348,8 +346,7 @@ static int cmd_test_ztimer_rmutex_lock_timeout_short_unlocked(int argc,

if (ztimer_rmutex_lock_timeout(ZTIMER_USEC, &test_rmutex, SHORT_RMUTEX_TIMEOUT) == 0) {
/* rmutex has to be locked once */
if (atomic_load_explicit(&test_rmutex.owner,
memory_order_relaxed) == thread_getpid() &&
if (atomic_load_kernel_pid(&test_rmutex.owner) == thread_getpid() &&
test_rmutex.refcount == 1 &&
mutex_trylock(&test_rmutex.mutex) == 0) {
puts("OK");
Expand Down