2929
3030#include " cppstdlib/type_traits.hpp"
3131#include " memory/allocation.inline.hpp"
32+ #include " runtime/atomic.hpp"
3233#include " runtime/atomicAccess.hpp"
3334#include " runtime/orderAccess.hpp"
3435#include " runtime/prefetch.inline.hpp"
@@ -221,8 +222,8 @@ inline ConcurrentHashTable<CONFIG, MT>::
221222 _cs_context(GlobalCounter::critical_section_begin(_thread))
222223{
223224 // This version is published now.
224- if (AtomicAccess::load_acquire (& _cht->_invisible_epoch ) != nullptr ) {
225- AtomicAccess::release_store_fence (& _cht->_invisible_epoch , (Thread*) nullptr );
225+ if (_cht->_invisible_epoch . load_acquire ( ) != nullptr ) {
226+ _cht->_invisible_epoch . release_store_fence ( nullptr );
226227 }
227228}
228229
@@ -282,16 +283,16 @@ template <typename CONFIG, MemTag MT>
282283inline void ConcurrentHashTable<CONFIG, MT>::
283284 write_synchonize_on_visible_epoch (Thread* thread)
284285{
285- assert (_resize_lock_owner == thread, " Re-size lock not held" );
286+ assert (_resize_lock_owner. load_relaxed () == thread, " Re-size lock not held" );
286287 OrderAccess::fence (); // Prevent below load from floating up.
287288 // If no reader saw this version we can skip write_synchronize.
288- if (AtomicAccess:: load_acquire (&_invisible_epoch ) == thread) {
289+ if (_invisible_epoch. load_acquire () == thread) {
289290 return ;
290291 }
291- assert (_invisible_epoch == nullptr , " Two thread doing bulk operations" );
292+ assert (_invisible_epoch. load_relaxed () == nullptr , " Two thread doing bulk operations" );
292293 // We set this/next version that we are synchronizing for to not published.
293294 // A reader will zero this flag if it reads this/next version.
294- AtomicAccess:: release_store (&_invisible_epoch, thread);
295+ _invisible_epoch. release_store (thread);
295296 GlobalCounter::write_synchronize ();
296297}
297298
@@ -300,17 +301,17 @@ inline bool ConcurrentHashTable<CONFIG, MT>::
300301 try_resize_lock (Thread* locker)
301302{
302303 if (_resize_lock->try_lock ()) {
303- if (_resize_lock_owner != nullptr ) {
304- assert (locker != _resize_lock_owner, " Already own lock" );
304+ if (_resize_lock_owner. load_relaxed () != nullptr ) {
305+ assert (locker != _resize_lock_owner. load_relaxed () , " Already own lock" );
305306 // We got mutex but internal state is locked.
306307 _resize_lock->unlock ();
307308 return false ;
308309 }
309310 } else {
310311 return false ;
311312 }
312- _invisible_epoch = nullptr ;
313- _resize_lock_owner = locker;
313+ _invisible_epoch. store_relaxed ( nullptr ) ;
314+ _resize_lock_owner. store_relaxed ( locker) ;
314315 return true ;
315316}
316317
@@ -326,26 +327,26 @@ inline void ConcurrentHashTable<CONFIG, MT>::
326327 _resize_lock->lock_without_safepoint_check ();
327328 // If holder of lock dropped mutex for safepoint mutex might be unlocked,
328329 // and _resize_lock_owner will contain the owner.
329- if (_resize_lock_owner != nullptr ) {
330- assert (locker != _resize_lock_owner, " Already own lock" );
330+ if (_resize_lock_owner. load_relaxed () != nullptr ) {
331+ assert (locker != _resize_lock_owner. load_relaxed () , " Already own lock" );
331332 // We got mutex but internal state is locked.
332333 _resize_lock->unlock ();
333334 yield.wait ();
334335 } else {
335336 break ;
336337 }
337338 } while (true );
338- _resize_lock_owner = locker;
339- _invisible_epoch = nullptr ;
339+ _resize_lock_owner. store_relaxed ( locker) ;
340+ _invisible_epoch. store_relaxed ( nullptr ) ;
340341}
341342
342343template <typename CONFIG, MemTag MT>
343344inline void ConcurrentHashTable<CONFIG, MT>::
344345 unlock_resize_lock (Thread* locker)
345346{
346- _invisible_epoch = nullptr ;
347- assert (locker == _resize_lock_owner, " Not unlocked by locker." );
348- _resize_lock_owner = nullptr ;
347+ _invisible_epoch. store_relaxed ( nullptr ) ;
348+ assert (locker == _resize_lock_owner. load_relaxed () , " Not unlocked by locker." );
349+ _resize_lock_owner. store_relaxed ( nullptr ) ;
349350 _resize_lock->unlock ();
350351}
351352
@@ -477,8 +478,8 @@ inline void ConcurrentHashTable<CONFIG, MT>::
477478{
478479 // Here we have resize lock so table is SMR safe, and there is no new
479480 // table. Can do this in parallel if we want.
480- assert ((is_mt && _resize_lock_owner != nullptr ) ||
481- (!is_mt && _resize_lock_owner == thread), " Re-size lock not held" );
481+ assert ((is_mt && _resize_lock_owner. load_relaxed () != nullptr ) ||
482+ (!is_mt && _resize_lock_owner. load_relaxed () == thread), " Re-size lock not held" );
482483 Node* ndel_stack[StackBufferSize];
483484 InternalTable* table = get_table ();
484485 assert (start_idx < stop_idx, " Must be" );
@@ -696,7 +697,7 @@ inline bool ConcurrentHashTable<CONFIG, MT>::
696697 if (!try_resize_lock (thread)) {
697698 return false ;
698699 }
699- assert (_resize_lock_owner == thread, " Re-size lock not held" );
700+ assert (_resize_lock_owner. load_relaxed () == thread, " Re-size lock not held" );
700701 if (_table->_log2_size == _log2_start_size ||
701702 _table->_log2_size <= log2_size) {
702703 unlock_resize_lock (thread);
@@ -710,10 +711,10 @@ template <typename CONFIG, MemTag MT>
710711inline void ConcurrentHashTable<CONFIG, MT>::
711712 internal_shrink_epilog (Thread* thread)
712713{
713- assert (_resize_lock_owner == thread, " Re-size lock not held" );
714+ assert (_resize_lock_owner. load_relaxed () == thread, " Re-size lock not held" );
714715
715716 InternalTable* old_table = set_table_from_new ();
716- _size_limit_reached = false ;
717+ _size_limit_reached. store_relaxed ( false ) ;
717718 unlock_resize_lock (thread);
718719#ifdef ASSERT
719720 for (size_t i = 0 ; i < old_table->_size ; i++) {
@@ -767,13 +768,13 @@ inline bool ConcurrentHashTable<CONFIG, MT>::
767768 internal_shrink (Thread* thread, size_t log2_size)
768769{
769770 if (!internal_shrink_prolog (thread, log2_size)) {
770- assert (_resize_lock_owner != thread, " Re-size lock held" );
771+ assert (_resize_lock_owner. load_relaxed () != thread, " Re-size lock held" );
771772 return false ;
772773 }
773- assert (_resize_lock_owner == thread, " Should be locked by me" );
774+ assert (_resize_lock_owner. load_relaxed () == thread, " Should be locked by me" );
774775 internal_shrink_range (thread, 0 , _new_table->_size );
775776 internal_shrink_epilog (thread);
776- assert (_resize_lock_owner != thread, " Re-size lock held" );
777+ assert (_resize_lock_owner. load_relaxed () != thread, " Re-size lock held" );
777778 return true ;
778779}
779780
@@ -787,7 +788,7 @@ inline void ConcurrentHashTable<CONFIG, MT>::
787788 delete _table;
788789 // Create and publish a new table
789790 InternalTable* table = new InternalTable (log2_size);
790- _size_limit_reached = (log2_size == _log2_size_limit);
791+ _size_limit_reached. store_relaxed (log2_size == _log2_size_limit);
791792 AtomicAccess::release_store (&_table, table);
792793}
793794
@@ -812,7 +813,7 @@ inline bool ConcurrentHashTable<CONFIG, MT>::
812813 }
813814
814815 _new_table = new InternalTable (_table->_log2_size + 1 );
815- _size_limit_reached = _new_table->_log2_size == _log2_size_limit;
816+ _size_limit_reached. store_relaxed ( _new_table->_log2_size == _log2_size_limit) ;
816817
817818 return true ;
818819}
@@ -821,7 +822,7 @@ template <typename CONFIG, MemTag MT>
821822inline void ConcurrentHashTable<CONFIG, MT>::
822823 internal_grow_epilog (Thread* thread)
823824{
824- assert (_resize_lock_owner == thread, " Should be locked" );
825+ assert (_resize_lock_owner. load_relaxed () == thread, " Should be locked" );
825826
826827 InternalTable* old_table = set_table_from_new ();
827828 unlock_resize_lock (thread);
@@ -840,13 +841,13 @@ inline bool ConcurrentHashTable<CONFIG, MT>::
840841 internal_grow (Thread* thread, size_t log2_size)
841842{
842843 if (!internal_grow_prolog (thread, log2_size)) {
843- assert (_resize_lock_owner != thread, " Re-size lock held" );
844+ assert (_resize_lock_owner. load_relaxed () != thread, " Re-size lock held" );
844845 return false ;
845846 }
846- assert (_resize_lock_owner == thread, " Should be locked by me" );
847+ assert (_resize_lock_owner. load_relaxed () == thread, " Should be locked by me" );
847848 internal_grow_range (thread, 0 , _table->_size );
848849 internal_grow_epilog (thread);
849- assert (_resize_lock_owner != thread, " Re-size lock held" );
850+ assert (_resize_lock_owner. load_relaxed () != thread, " Re-size lock held" );
850851 return true ;
851852}
852853
@@ -961,7 +962,7 @@ template <typename FUNC>
961962inline void ConcurrentHashTable<CONFIG, MT>::
962963 do_scan_locked (Thread* thread, FUNC& scan_f)
963964{
964- assert (_resize_lock_owner == thread, " Re-size lock not held" );
965+ assert (_resize_lock_owner. load_relaxed () == thread, " Re-size lock not held" );
965966 // We can do a critical section over the entire loop but that would block
966967 // updates for a long time. Instead we choose to block resizes.
967968 InternalTable* table = get_table ();
@@ -1020,7 +1021,7 @@ ConcurrentHashTable(size_t log2size, size_t log2size_limit, size_t grow_hint, bo
10201021 _resize_lock = new Mutex (rank, " ConcurrentHashTableResize_lock" );
10211022 _table = new InternalTable (log2size);
10221023 assert (log2size_limit >= log2size, " bad ergo" );
1023- _size_limit_reached = _table->_log2_size == _log2_size_limit;
1024+ _size_limit_reached. store_relaxed ( _table->_log2_size == _log2_size_limit) ;
10241025}
10251026
10261027template <typename CONFIG, MemTag MT>
@@ -1139,11 +1140,11 @@ inline void ConcurrentHashTable<CONFIG, MT>::
11391140{
11401141 assert (!SafepointSynchronize::is_at_safepoint (),
11411142 " must be outside a safepoint" );
1142- assert (_resize_lock_owner != thread, " Re-size lock held" );
1143+ assert (_resize_lock_owner. load_relaxed () != thread, " Re-size lock held" );
11431144 lock_resize_lock (thread);
11441145 do_scan_locked (thread, scan_f);
11451146 unlock_resize_lock (thread);
1146- assert (_resize_lock_owner != thread, " Re-size lock held" );
1147+ assert (_resize_lock_owner. load_relaxed () != thread, " Re-size lock held" );
11471148}
11481149
11491150template <typename CONFIG, MemTag MT>
@@ -1205,7 +1206,7 @@ inline bool ConcurrentHashTable<CONFIG, MT>::
12051206 }
12061207 do_bulk_delete_locked (thread, eval_f, del_f);
12071208 unlock_resize_lock (thread);
1208- assert (_resize_lock_owner != thread, " Re-size lock held" );
1209+ assert (_resize_lock_owner. load_relaxed () != thread, " Re-size lock held" );
12091210 return true ;
12101211}
12111212
0 commit comments