Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

i#3544 RV64: Fixed atomic functions and macros #6089

Merged
merged 5 commits into from
Jun 9, 2023
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
84 changes: 44 additions & 40 deletions core/arch/atomic_exports.h
Original file line number Diff line number Diff line change
Expand Up @@ -733,7 +733,8 @@ atomic_dec_becomes_zero(volatile int *var)
# define ATOMIC_1BYTE_READ(addr_src, addr_res) \
ksco marked this conversation as resolved.
Show resolved Hide resolved
do { \
/* Fence for acquire semantics. */ \
__asm__ __volatile__("lbu %0, 0(%1)\n\t" \
__asm__ __volatile__("lbu t0, 0(%0)\n\t" \
"sb t0, 0(%1)\n\t" \
"fence iorw,iorw" \
: \
: "r"(addr_src), "r"(addr_res) \
Expand Down Expand Up @@ -779,7 +780,8 @@ atomic_dec_becomes_zero(volatile int *var)
ASSERT(ALIGNED(addr_src, 4)); \
ASSERT(ALIGNED(addr_res, 4)); \
/* Fence for acquire semantics. */ \
__asm__ __volatile__("lw %0, 0(%1)\n\t" \
__asm__ __volatile__("lw t0, 0(%0)\n\t" \
"sw t0, 0(%1) \n\t" \
"fence iorw,iorw" \
: \
: "r"(addr_src), "r"(addr_res) \
Expand All @@ -799,9 +801,9 @@ atomic_dec_becomes_zero(volatile int *var)
ASSERT_CURIOSITY(!hot_patch); \
/* Fence for release semantics. */ \
__asm__ __volatile__("fence iorw,iorw\n\t" \
"sd %0, 0(%1)\n\t" \
"sd %1, 0(%0)\n\t" \
: \
: "r"(value), "r"(target) \
: "r"(target), "r"(value) \
: "memory"); \
} while (0)
# define ATOMIC_8BYTE_WRITE ATOMIC_8BYTE_ALIGNED_WRITE
Expand All @@ -813,24 +815,25 @@ atomic_dec_becomes_zero(volatile int *var)
*/ \
ASSERT(ALIGNED(addr_src, 8)); \
ASSERT(ALIGNED(addr_res, 8)); \
__asm__ __volatile__("ld %0, 0(%1)\n\t" \
__asm__ __volatile__("ld t0, 0(%0)\n\t" \
"sd t0, 0(%1)\n\t" \
"fence iorw,iorw" \
: \
: "r"(addr_src), "r"(addr_res) \
: "memory"); \
} while (0)

# define ADDI_suffix(sfx, var, val, align) \
do { \
/* Page 53 of Volume I: RISC-V Unprivileged ISA V20191213 \
* requires that var address is naturally aligned. \
*/ \
ASSERT(ALIGNED(var, align)); \
__asm__ __volatile__("li t0, " #val "\n\t" \
"amoadd." sfx ".aqrl zero, t0, 0(%0)\n\t" \
: \
: "r"(var) \
: "t0", "memory"); \
# define ADDI_suffix(sfx, var, val, align) \
do { \
/* Page 53 of Volume I: RISC-V Unprivileged ISA V20191213 \
* requires that var address is naturally aligned. \
*/ \
ASSERT(ALIGNED(var, align)); \
__asm__ __volatile__("li t0, " #val "\n\t" \
"amoadd." sfx ".aqrl zero, t0, (%0)\n\t" \
: \
: "r"(var) \
: "t0", "memory"); \
} while (0)

static inline void
Expand Down Expand Up @@ -869,7 +872,7 @@ ATOMIC_ADD_int(volatile int *var, int val)
* naturally aligned.
*/
ASSERT(ALIGNED(var, 4));
__asm__ __volatile__("amoadd.w.aqrl zero, %1, 0(%0)\n\t"
__asm__ __volatile__("amoadd.w.aqrl zero, %1, (%0)\n\t"
:
: "r"(var), "r"(val)
: "memory");
Expand All @@ -882,7 +885,7 @@ ATOMIC_ADD_int64(volatile int64 *var, int64 val)
* naturally aligned.
*/
ASSERT(ALIGNED(var, 8));
__asm__ __volatile__("amoadd.d.aqrl zero, %1, 0(%0)\n\t"
__asm__ __volatile__("amoadd.d.aqrl zero, %1, (%0)\n\t"
:
: "r"(var), "r"(val)
: "memory");
Expand All @@ -893,12 +896,12 @@ ATOMIC_ADD_int64(volatile int64 *var, int64 val)
static inline int
atomic_add_exchange_int(volatile int *var, int val)
{
int res;
int res = 0;
/* Page 53 of Volume I: RISC-V Unprivileged ISA V20191213 requires that var address is
* naturally aligned.
*/
ASSERT(ALIGNED(var, 4));
__asm__ __volatile__("amoadd.w.aqrl %0, %2, 0(%1)\n\t"
__asm__ __volatile__("amoadd.w.aqrl %0, %2, (%1)\n\t"
: "=r"(res)
: "r"(var), "r"(val)
: "memory");
Expand All @@ -908,12 +911,12 @@ atomic_add_exchange_int(volatile int *var, int val)
static inline int64
atomic_add_exchange_int64(volatile int64 *var, int64 val)
{
int64 res;
int64 res = 0;
/* Page 53 of Volume I: RISC-V Unprivileged ISA V20191213 requires that var address is
* naturally aligned.
*/
ASSERT(ALIGNED(var, 8));
__asm__ __volatile__("amoadd.d.aqrl %0, %2, 0(%1)\n\t"
__asm__ __volatile__("amoadd.d.aqrl %0, %2, (%1)\n\t"
: "=r"(res)
: "r"(var), "r"(val)
: "memory");
Expand All @@ -932,15 +935,15 @@ atomic_compare_exchange_int(volatile int *var, int compare, int exchange)
ASSERT(ALIGNED(var, 4));
/* This code is based on GCC's atomic_compare_exchange_strong().
* The fence ensures no re-ordering beyond the start of the lr/sc loop.
* The lr/sc use acquire ordering to ensure that the full loop has
* sequentially consistent memory order semantics.
* Provides acquire/release semantics for LR/SC sequences by setting the
* aq and rl bits on lr and sc respectively.
*/
__asm__ __volatile__("fence iorw,ow\n\t"
"1: lr.w.aq t0, (%1)\n\t"
" bne t0, %2, 2f\n\t"
" sc.w.aq %0, %3, (%1)\n\t"
" bnez %0, 1b\n\t"
"2:"
"1: lr.w.aq t0, (%1)\n\t"
" bne t0, %2, 2f\n\t"
" sc.w.rl %0, %3, (%1)\n\t"
" bnez %0, 1b\n\t"
"2:\n\t"
: "+r"(res)
: "r"(var), "r"(compare), "r"(exchange)
: "t0", "memory");
Expand All @@ -957,15 +960,15 @@ atomic_compare_exchange_int64(volatile int64 *var, int64 compare, int64 exchange
ASSERT(ALIGNED(var, 8));
/* This code is based on GCC's atomic_compare_exchange_strong().
* The fence ensures no re-ordering beyond the start of the lr/sc loop.
* The lr/sc use acquire ordering to ensure that the full loop has
* sequentially consistent memory order semantics.
* Provides acquire/release semantics for LR/SC sequences by setting the
* aq and rl bits on lr and sc respectively.
*/
__asm__ __volatile__("fence iorw,ow\n\t"
"1: lr.d.aq t0, (%1)\n\t"
" bne t0, %2, 2f\n\t"
" sc.d.aq %0, %3, (%1)\n\t"
" bnez %0, 1b\n\t"
"2:"
"1: lr.d.aq t0, (%1)\n\t"
" bne t0, %2, 2f\n\t"
" sc.d.rl %0, %3, (%1)\n\t"
" bnez %0, 1b\n\t"
"2:\n\t"
: "+r"(res)
: "r"(var), "r"(compare), "r"(exchange)
: "t0", "memory");
Expand All @@ -977,6 +980,7 @@ atomic_compare_exchange_int64(volatile int64 *var, int64 compare, int64 exchange
# define ATOMIC_COMPARE_EXCHANGE_int64(var, compare, exchange) \
atomic_compare_exchange_int64(var, compare, exchange)

/* exchanges *var with newval and returns original *var */
static inline int
atomic_exchange_int(volatile int *var, int newval)
{
Expand All @@ -985,7 +989,7 @@ atomic_exchange_int(volatile int *var, int newval)
* is naturally aligned.
*/
ASSERT(ALIGNED(var, 4));
__asm__ __volatile__("amoswap.w.aqrl %0, %2, 0(%1)\n\t"
__asm__ __volatile__("amoswap.w.aqrl %0, %2, (%1)\n\t"
: "=r"(res)
: "r"(var), "r"(newval)
: "memory");
Expand Down Expand Up @@ -1154,15 +1158,15 @@ atomic_add_exchange_int64(volatile int64 *var, int64 value)
static inline int
atomic_aligned_read_int(volatile int *var)
{
int temp;
int temp = 0;
ATOMIC_4BYTE_ALIGNED_READ(var, &temp);
return temp;
}

static inline bool
atomic_read_bool(volatile bool *var)
{
bool temp;
bool temp = 0;
ATOMIC_1BYTE_READ(var, &temp);
return temp;
}
Expand All @@ -1171,7 +1175,7 @@ atomic_read_bool(volatile bool *var)
static inline int64
atomic_aligned_read_int64(volatile int64 *var)
{
int64 temp;
int64 temp = 0;
ATOMIC_8BYTE_ALIGNED_READ(var, &temp);
return temp;
}
Expand Down