Skip to content

Commit 3f33eaa

Browse files
author
Kim Barrett
committed
8373649: Convert simple AtomicAccess usage in ConcurrentHashTable to use Atomic<T>
Reviewed-by: tschatzl, iwalulya
1 parent 1748737 commit 3f33eaa

File tree

3 files changed

+58
-55
lines changed

3 files changed

+58
-55
lines changed

src/hotspot/share/utilities/concurrentHashTable.hpp

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
#define SHARE_UTILITIES_CONCURRENTHASHTABLE_HPP
2727

2828
#include "memory/allocation.hpp"
29+
#include "runtime/atomic.hpp"
2930
#include "runtime/mutex.hpp"
3031
#include "utilities/globalCounter.hpp"
3132
#include "utilities/globalDefinitions.hpp"
@@ -246,7 +247,7 @@ class ConcurrentHashTable : public CHeapObj<MT> {
246247
const size_t _log2_start_size; // Start size.
247248
const size_t _grow_hint; // Number of linked items
248249

249-
volatile bool _size_limit_reached;
250+
Atomic<bool> _size_limit_reached;
250251

251252
// We serialize resizers and other bulk operations which do not support
252253
// concurrent resize with this lock.
@@ -255,7 +256,7 @@ class ConcurrentHashTable : public CHeapObj<MT> {
255256
// taking the mutex after a safepoint this bool is the actual state. After
256257
// acquiring the mutex you must check if this is already locked. If so you
257258
// must drop the mutex until the real lock holder grabs the mutex.
258-
volatile Thread* _resize_lock_owner;
259+
Atomic<Thread*> _resize_lock_owner;
259260

260261
// Return true if lock mutex/state succeeded.
261262
bool try_resize_lock(Thread* locker);
@@ -273,7 +274,7 @@ class ConcurrentHashTable : public CHeapObj<MT> {
273274
// this field keep tracks if a version of the hash-table was ever been seen.
274275
// We the working thread pointer as tag for debugging. The _invisible_epoch
275276
// can only be used by the owner of _resize_lock.
276-
volatile Thread* _invisible_epoch;
277+
Atomic<Thread*> _invisible_epoch;
277278

278279
// Scoped critical section, which also handles the invisible epochs.
279280
// An invisible epoch/version do not need a write_synchronize().
@@ -435,11 +436,11 @@ class ConcurrentHashTable : public CHeapObj<MT> {
435436
size_t get_size_log2(Thread* thread);
436437
static size_t get_node_size() { return sizeof(Node); }
437438
static size_t get_dynamic_node_size(size_t value_size);
438-
bool is_max_size_reached() { return _size_limit_reached; }
439+
bool is_max_size_reached() { return _size_limit_reached.load_relaxed(); }
439440

440441
// This means no paused bucket resize operation is going to resume
441442
// on this table.
442-
bool is_safepoint_safe() { return _resize_lock_owner == nullptr; }
443+
bool is_safepoint_safe() { return _resize_lock_owner.load_relaxed() == nullptr; }
443444

444445
// Re-size operations.
445446
bool shrink(Thread* thread, size_t size_limit_log2 = 0);

src/hotspot/share/utilities/concurrentHashTable.inline.hpp

Lines changed: 37 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@
2929

3030
#include "cppstdlib/type_traits.hpp"
3131
#include "memory/allocation.inline.hpp"
32+
#include "runtime/atomic.hpp"
3233
#include "runtime/atomicAccess.hpp"
3334
#include "runtime/orderAccess.hpp"
3435
#include "runtime/prefetch.inline.hpp"
@@ -221,8 +222,8 @@ inline ConcurrentHashTable<CONFIG, MT>::
221222
_cs_context(GlobalCounter::critical_section_begin(_thread))
222223
{
223224
// This version is published now.
224-
if (AtomicAccess::load_acquire(&_cht->_invisible_epoch) != nullptr) {
225-
AtomicAccess::release_store_fence(&_cht->_invisible_epoch, (Thread*)nullptr);
225+
if (_cht->_invisible_epoch.load_acquire() != nullptr) {
226+
_cht->_invisible_epoch.release_store_fence(nullptr);
226227
}
227228
}
228229

@@ -282,16 +283,16 @@ template <typename CONFIG, MemTag MT>
282283
inline void ConcurrentHashTable<CONFIG, MT>::
283284
write_synchonize_on_visible_epoch(Thread* thread)
284285
{
285-
assert(_resize_lock_owner == thread, "Re-size lock not held");
286+
assert(_resize_lock_owner.load_relaxed() == thread, "Re-size lock not held");
286287
OrderAccess::fence(); // Prevent below load from floating up.
287288
// If no reader saw this version we can skip write_synchronize.
288-
if (AtomicAccess::load_acquire(&_invisible_epoch) == thread) {
289+
if (_invisible_epoch.load_acquire() == thread) {
289290
return;
290291
}
291-
assert(_invisible_epoch == nullptr, "Two thread doing bulk operations");
292+
assert(_invisible_epoch.load_relaxed() == nullptr, "Two thread doing bulk operations");
292293
// We set this/next version that we are synchronizing for to not published.
293294
// A reader will zero this flag if it reads this/next version.
294-
AtomicAccess::release_store(&_invisible_epoch, thread);
295+
_invisible_epoch.release_store(thread);
295296
GlobalCounter::write_synchronize();
296297
}
297298

@@ -300,17 +301,17 @@ inline bool ConcurrentHashTable<CONFIG, MT>::
300301
try_resize_lock(Thread* locker)
301302
{
302303
if (_resize_lock->try_lock()) {
303-
if (_resize_lock_owner != nullptr) {
304-
assert(locker != _resize_lock_owner, "Already own lock");
304+
if (_resize_lock_owner.load_relaxed() != nullptr) {
305+
assert(locker != _resize_lock_owner.load_relaxed(), "Already own lock");
305306
// We got mutex but internal state is locked.
306307
_resize_lock->unlock();
307308
return false;
308309
}
309310
} else {
310311
return false;
311312
}
312-
_invisible_epoch = nullptr;
313-
_resize_lock_owner = locker;
313+
_invisible_epoch.store_relaxed(nullptr);
314+
_resize_lock_owner.store_relaxed(locker);
314315
return true;
315316
}
316317

@@ -326,26 +327,26 @@ inline void ConcurrentHashTable<CONFIG, MT>::
326327
_resize_lock->lock_without_safepoint_check();
327328
// If holder of lock dropped mutex for safepoint mutex might be unlocked,
328329
// and _resize_lock_owner will contain the owner.
329-
if (_resize_lock_owner != nullptr) {
330-
assert(locker != _resize_lock_owner, "Already own lock");
330+
if (_resize_lock_owner.load_relaxed() != nullptr) {
331+
assert(locker != _resize_lock_owner.load_relaxed(), "Already own lock");
331332
// We got mutex but internal state is locked.
332333
_resize_lock->unlock();
333334
yield.wait();
334335
} else {
335336
break;
336337
}
337338
} while(true);
338-
_resize_lock_owner = locker;
339-
_invisible_epoch = nullptr;
339+
_resize_lock_owner.store_relaxed(locker);
340+
_invisible_epoch.store_relaxed(nullptr);
340341
}
341342

342343
template <typename CONFIG, MemTag MT>
343344
inline void ConcurrentHashTable<CONFIG, MT>::
344345
unlock_resize_lock(Thread* locker)
345346
{
346-
_invisible_epoch = nullptr;
347-
assert(locker == _resize_lock_owner, "Not unlocked by locker.");
348-
_resize_lock_owner = nullptr;
347+
_invisible_epoch.store_relaxed(nullptr);
348+
assert(locker == _resize_lock_owner.load_relaxed(), "Not unlocked by locker.");
349+
_resize_lock_owner.store_relaxed(nullptr);
349350
_resize_lock->unlock();
350351
}
351352

@@ -477,8 +478,8 @@ inline void ConcurrentHashTable<CONFIG, MT>::
477478
{
478479
// Here we have resize lock so table is SMR safe, and there is no new
479480
// table. Can do this in parallel if we want.
480-
assert((is_mt && _resize_lock_owner != nullptr) ||
481-
(!is_mt && _resize_lock_owner == thread), "Re-size lock not held");
481+
assert((is_mt && _resize_lock_owner.load_relaxed() != nullptr) ||
482+
(!is_mt && _resize_lock_owner.load_relaxed() == thread), "Re-size lock not held");
482483
Node* ndel_stack[StackBufferSize];
483484
InternalTable* table = get_table();
484485
assert(start_idx < stop_idx, "Must be");
@@ -696,7 +697,7 @@ inline bool ConcurrentHashTable<CONFIG, MT>::
696697
if (!try_resize_lock(thread)) {
697698
return false;
698699
}
699-
assert(_resize_lock_owner == thread, "Re-size lock not held");
700+
assert(_resize_lock_owner.load_relaxed() == thread, "Re-size lock not held");
700701
if (_table->_log2_size == _log2_start_size ||
701702
_table->_log2_size <= log2_size) {
702703
unlock_resize_lock(thread);
@@ -710,10 +711,10 @@ template <typename CONFIG, MemTag MT>
710711
inline void ConcurrentHashTable<CONFIG, MT>::
711712
internal_shrink_epilog(Thread* thread)
712713
{
713-
assert(_resize_lock_owner == thread, "Re-size lock not held");
714+
assert(_resize_lock_owner.load_relaxed() == thread, "Re-size lock not held");
714715

715716
InternalTable* old_table = set_table_from_new();
716-
_size_limit_reached = false;
717+
_size_limit_reached.store_relaxed(false);
717718
unlock_resize_lock(thread);
718719
#ifdef ASSERT
719720
for (size_t i = 0; i < old_table->_size; i++) {
@@ -767,13 +768,13 @@ inline bool ConcurrentHashTable<CONFIG, MT>::
767768
internal_shrink(Thread* thread, size_t log2_size)
768769
{
769770
if (!internal_shrink_prolog(thread, log2_size)) {
770-
assert(_resize_lock_owner != thread, "Re-size lock held");
771+
assert(_resize_lock_owner.load_relaxed() != thread, "Re-size lock held");
771772
return false;
772773
}
773-
assert(_resize_lock_owner == thread, "Should be locked by me");
774+
assert(_resize_lock_owner.load_relaxed() == thread, "Should be locked by me");
774775
internal_shrink_range(thread, 0, _new_table->_size);
775776
internal_shrink_epilog(thread);
776-
assert(_resize_lock_owner != thread, "Re-size lock held");
777+
assert(_resize_lock_owner.load_relaxed() != thread, "Re-size lock held");
777778
return true;
778779
}
779780

@@ -787,7 +788,7 @@ inline void ConcurrentHashTable<CONFIG, MT>::
787788
delete _table;
788789
// Create and publish a new table
789790
InternalTable* table = new InternalTable(log2_size);
790-
_size_limit_reached = (log2_size == _log2_size_limit);
791+
_size_limit_reached.store_relaxed(log2_size == _log2_size_limit);
791792
AtomicAccess::release_store(&_table, table);
792793
}
793794

@@ -812,7 +813,7 @@ inline bool ConcurrentHashTable<CONFIG, MT>::
812813
}
813814

814815
_new_table = new InternalTable(_table->_log2_size + 1);
815-
_size_limit_reached = _new_table->_log2_size == _log2_size_limit;
816+
_size_limit_reached.store_relaxed(_new_table->_log2_size == _log2_size_limit);
816817

817818
return true;
818819
}
@@ -821,7 +822,7 @@ template <typename CONFIG, MemTag MT>
821822
inline void ConcurrentHashTable<CONFIG, MT>::
822823
internal_grow_epilog(Thread* thread)
823824
{
824-
assert(_resize_lock_owner == thread, "Should be locked");
825+
assert(_resize_lock_owner.load_relaxed() == thread, "Should be locked");
825826

826827
InternalTable* old_table = set_table_from_new();
827828
unlock_resize_lock(thread);
@@ -840,13 +841,13 @@ inline bool ConcurrentHashTable<CONFIG, MT>::
840841
internal_grow(Thread* thread, size_t log2_size)
841842
{
842843
if (!internal_grow_prolog(thread, log2_size)) {
843-
assert(_resize_lock_owner != thread, "Re-size lock held");
844+
assert(_resize_lock_owner.load_relaxed() != thread, "Re-size lock held");
844845
return false;
845846
}
846-
assert(_resize_lock_owner == thread, "Should be locked by me");
847+
assert(_resize_lock_owner.load_relaxed() == thread, "Should be locked by me");
847848
internal_grow_range(thread, 0, _table->_size);
848849
internal_grow_epilog(thread);
849-
assert(_resize_lock_owner != thread, "Re-size lock held");
850+
assert(_resize_lock_owner.load_relaxed() != thread, "Re-size lock held");
850851
return true;
851852
}
852853

@@ -961,7 +962,7 @@ template <typename FUNC>
961962
inline void ConcurrentHashTable<CONFIG, MT>::
962963
do_scan_locked(Thread* thread, FUNC& scan_f)
963964
{
964-
assert(_resize_lock_owner == thread, "Re-size lock not held");
965+
assert(_resize_lock_owner.load_relaxed() == thread, "Re-size lock not held");
965966
// We can do a critical section over the entire loop but that would block
966967
// updates for a long time. Instead we choose to block resizes.
967968
InternalTable* table = get_table();
@@ -1020,7 +1021,7 @@ ConcurrentHashTable(size_t log2size, size_t log2size_limit, size_t grow_hint, bo
10201021
_resize_lock = new Mutex(rank, "ConcurrentHashTableResize_lock");
10211022
_table = new InternalTable(log2size);
10221023
assert(log2size_limit >= log2size, "bad ergo");
1023-
_size_limit_reached = _table->_log2_size == _log2_size_limit;
1024+
_size_limit_reached.store_relaxed(_table->_log2_size == _log2_size_limit);
10241025
}
10251026

10261027
template <typename CONFIG, MemTag MT>
@@ -1139,11 +1140,11 @@ inline void ConcurrentHashTable<CONFIG, MT>::
11391140
{
11401141
assert(!SafepointSynchronize::is_at_safepoint(),
11411142
"must be outside a safepoint");
1142-
assert(_resize_lock_owner != thread, "Re-size lock held");
1143+
assert(_resize_lock_owner.load_relaxed() != thread, "Re-size lock held");
11431144
lock_resize_lock(thread);
11441145
do_scan_locked(thread, scan_f);
11451146
unlock_resize_lock(thread);
1146-
assert(_resize_lock_owner != thread, "Re-size lock held");
1147+
assert(_resize_lock_owner.load_relaxed() != thread, "Re-size lock held");
11471148
}
11481149

11491150
template <typename CONFIG, MemTag MT>
@@ -1205,7 +1206,7 @@ inline bool ConcurrentHashTable<CONFIG, MT>::
12051206
}
12061207
do_bulk_delete_locked(thread, eval_f, del_f);
12071208
unlock_resize_lock(thread);
1208-
assert(_resize_lock_owner != thread, "Re-size lock held");
1209+
assert(_resize_lock_owner.load_relaxed() != thread, "Re-size lock held");
12091210
return true;
12101211
}
12111212

src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp

Lines changed: 15 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@
2727

2828
// No concurrentHashTableTasks.hpp
2929

30+
#include "runtime/atomic.hpp"
3031
#include "runtime/atomicAccess.hpp"
3132
#include "utilities/concurrentHashTable.inline.hpp"
3233
#include "utilities/globalDefinitions.hpp"
@@ -41,7 +42,7 @@ class ConcurrentHashTable<CONFIG, MT>::BucketsOperation {
4142
ConcurrentHashTable<CONFIG, MT>* _cht;
4243

4344
class InternalTableClaimer {
44-
volatile size_t _next;
45+
Atomic<size_t> _next;
4546
size_t _limit;
4647
size_t _size;
4748

@@ -56,14 +57,14 @@ class ConcurrentHashTable<CONFIG, MT>::BucketsOperation {
5657

5758
void set(size_t claim_size, InternalTable* table) {
5859
assert(table != nullptr, "precondition");
59-
_next = 0;
60+
_next.store_relaxed(0);
6061
_limit = table->_size;
6162
_size = MIN2(claim_size, _limit);
6263
}
6364

6465
bool claim(size_t* start, size_t* stop) {
65-
if (AtomicAccess::load(&_next) < _limit) {
66-
size_t claimed = AtomicAccess::fetch_then_add(&_next, _size);
66+
if (_next.load_relaxed() < _limit) {
67+
size_t claimed = _next.fetch_then_add(_size);
6768
if (claimed < _limit) {
6869
*start = claimed;
6970
*stop = MIN2(claimed + _size, _limit);
@@ -78,7 +79,7 @@ class ConcurrentHashTable<CONFIG, MT>::BucketsOperation {
7879
}
7980

8081
bool have_more_work() {
81-
return AtomicAccess::load_acquire(&_next) >= _limit;
82+
return _next.load_acquire() >= _limit;
8283
}
8384
};
8485

@@ -108,21 +109,21 @@ class ConcurrentHashTable<CONFIG, MT>::BucketsOperation {
108109
}
109110

110111
void thread_owns_resize_lock(Thread* thread) {
111-
assert(BucketsOperation::_cht->_resize_lock_owner == thread,
112+
assert(BucketsOperation::_cht->_resize_lock_owner.load_relaxed() == thread,
112113
"Should be locked by me");
113114
assert(BucketsOperation::_cht->_resize_lock->owned_by_self(),
114115
"Operations lock not held");
115116
}
116117
void thread_owns_only_state_lock(Thread* thread) {
117-
assert(BucketsOperation::_cht->_resize_lock_owner == thread,
118+
assert(BucketsOperation::_cht->_resize_lock_owner.load_relaxed() == thread,
118119
"Should be locked by me");
119120
assert(!BucketsOperation::_cht->_resize_lock->owned_by_self(),
120121
"Operations lock held");
121122
}
122123
void thread_do_not_own_resize_lock(Thread* thread) {
123124
assert(!BucketsOperation::_cht->_resize_lock->owned_by_self(),
124125
"Operations lock held");
125-
assert(BucketsOperation::_cht->_resize_lock_owner != thread,
126+
assert(BucketsOperation::_cht->_resize_lock_owner.load_relaxed() != thread,
126127
"Should not be locked by me");
127128
}
128129

@@ -169,15 +170,15 @@ class ConcurrentHashTable<CONFIG, MT>::BulkDeleteTask :
169170
template <typename EVALUATE_FUNC, typename DELETE_FUNC>
170171
bool do_task(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f) {
171172
size_t start, stop;
172-
assert(BucketsOperation::_cht->_resize_lock_owner != nullptr,
173+
assert(BucketsOperation::_cht->_resize_lock_owner.load_relaxed() != nullptr,
173174
"Should be locked");
174175
if (!this->claim(&start, &stop)) {
175176
return false;
176177
}
177178
BucketsOperation::_cht->do_bulk_delete_locked_for(thread, start, stop,
178179
eval_f, del_f,
179180
BucketsOperation::_is_mt);
180-
assert(BucketsOperation::_cht->_resize_lock_owner != nullptr,
181+
assert(BucketsOperation::_cht->_resize_lock_owner.load_relaxed() != nullptr,
181182
"Should be locked");
182183
return true;
183184
}
@@ -210,13 +211,13 @@ class ConcurrentHashTable<CONFIG, MT>::GrowTask :
210211
// Re-sizes a portion of the table. Returns true if there is more work.
211212
bool do_task(Thread* thread) {
212213
size_t start, stop;
213-
assert(BucketsOperation::_cht->_resize_lock_owner != nullptr,
214+
assert(BucketsOperation::_cht->_resize_lock_owner.load_relaxed() != nullptr,
214215
"Should be locked");
215216
if (!this->claim(&start, &stop)) {
216217
return false;
217218
}
218219
BucketsOperation::_cht->internal_grow_range(thread, start, stop);
219-
assert(BucketsOperation::_cht->_resize_lock_owner != nullptr,
220+
assert(BucketsOperation::_cht->_resize_lock_owner.load_relaxed() != nullptr,
220221
"Should be locked");
221222
return true;
222223
}
@@ -253,13 +254,13 @@ class ConcurrentHashTable<CONFIG, MT>::StatisticsTask :
253254
template <typename VALUE_SIZE_FUNC>
254255
bool do_task(Thread* thread, VALUE_SIZE_FUNC& sz) {
255256
size_t start, stop;
256-
assert(BucketsOperation::_cht->_resize_lock_owner != nullptr,
257+
assert(BucketsOperation::_cht->_resize_lock_owner.load_relaxed() != nullptr,
257258
"Should be locked");
258259
if (!this->claim(&start, &stop)) {
259260
return false;
260261
}
261262
BucketsOperation::_cht->internal_statistics_range(thread, start, stop, sz, _summary, _literal_bytes);
262-
assert(BucketsOperation::_cht->_resize_lock_owner != nullptr,
263+
assert(BucketsOperation::_cht->_resize_lock_owner.load_relaxed() != nullptr,
263264
"Should be locked");
264265
return true;
265266
}

0 commit comments

Comments
 (0)