Skip to content

Commit

Permalink
Removed memory order argument from atomic_fence.
Browse files Browse the repository at this point in the history
The code uses memory_order_seq_cst in all call sites of atomic_fence,
so remove the argument and simplifiy the implementation a bit. Also, renamed
the function to make the memory order it implements apparent.

Signed-off-by: Andrey Semashev <andrey.semashev@gmail.com>
  • Loading branch information
Lastique committed Nov 25, 2021
1 parent de51c60 commit 8feefce
Show file tree
Hide file tree
Showing 3 changed files with 12 additions and 19 deletions.
17 changes: 5 additions & 12 deletions include/oneapi/tbb/detail/_machine.h
Original file line number Diff line number Diff line change
Expand Up @@ -76,22 +76,15 @@ using std::this_thread::yield;
#endif

//--------------------------------------------------------------------------------------------------
// atomic_fence implementation
// atomic_fence_seq_cst implementation
//--------------------------------------------------------------------------------------------------

static inline void atomic_fence(std::memory_order order) {
static inline void atomic_fence_seq_cst() {
#if (__TBB_x86_64 || __TBB_x86_32) && defined(__GNUC__) && __GNUC__ < 11
if (order == std::memory_order_seq_cst)
{
unsigned char dummy = 0u;
__asm__ __volatile__ ("lock; notb %0" : "+m" (dummy) :: "memory");
}
else if (order != std::memory_order_relaxed)
{
__asm__ __volatile__ ("" ::: "memory");
}
unsigned char dummy = 0u;
__asm__ __volatile__ ("lock; notb %0" : "+m" (dummy) :: "memory");
#else
std::atomic_thread_fence(order);
std::atomic_thread_fence(std::memory_order_seq_cst);
#endif
}

Expand Down
4 changes: 2 additions & 2 deletions src/tbb/arena.h
Original file line number Diff line number Diff line change
Expand Up @@ -494,7 +494,7 @@ void arena::advertise_new_work() {
};

if( work_type == work_enqueued ) {
atomic_fence(std::memory_order_seq_cst);
atomic_fence_seq_cst();
#if __TBB_ENQUEUE_ENFORCED_CONCURRENCY
if ( my_market->my_num_workers_soft_limit.load(std::memory_order_acquire) == 0 &&
my_global_concurrency_mode.load(std::memory_order_acquire) == false )
Expand All @@ -508,7 +508,7 @@ void arena::advertise_new_work() {
// Starvation resistant tasks require concurrency, so missed wakeups are unacceptable.
}
else if( work_type == wakeup ) {
atomic_fence(std::memory_order_seq_cst);
atomic_fence_seq_cst();
}

// Double-check idiom that, in case of spawning, is deliberately sloppy about memory fences.
Expand Down
10 changes: 5 additions & 5 deletions src/tbb/concurrent_monitor.h
Original file line number Diff line number Diff line change
Expand Up @@ -220,7 +220,7 @@ class concurrent_monitor_base {

// Prepare wait guarantees Write Read memory barrier.
// In C++ only full fence covers this type of barrier.
atomic_fence(std::memory_order_seq_cst);
atomic_fence_seq_cst();
}

//! Commit wait if event count has not changed; otherwise, cancel wait.
Expand Down Expand Up @@ -272,7 +272,7 @@ class concurrent_monitor_base {

//! Notify one thread about the event
void notify_one() {
atomic_fence(std::memory_order_seq_cst);
atomic_fence_seq_cst();
notify_one_relaxed();
}

Expand Down Expand Up @@ -301,7 +301,7 @@ class concurrent_monitor_base {

//! Notify all waiting threads of the event
void notify_all() {
atomic_fence(std::memory_order_seq_cst);
atomic_fence_seq_cst();
notify_all_relaxed();
}

Expand Down Expand Up @@ -337,7 +337,7 @@ class concurrent_monitor_base {
//! Notify waiting threads of the event that satisfies the given predicate
template <typename P>
void notify( const P& predicate ) {
atomic_fence(std::memory_order_seq_cst);
atomic_fence_seq_cst();
notify_relaxed( predicate );
}

Expand Down Expand Up @@ -409,7 +409,7 @@ class concurrent_monitor_base {

//! Abort any sleeping threads at the time of the call
void abort_all() {
atomic_fence( std::memory_order_seq_cst );
atomic_fence_seq_cst();
abort_all_relaxed();
}

Expand Down

0 comments on commit 8feefce

Please sign in to comment.