diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp index b9d64059680a5..8347f0dd2c3f3 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp @@ -44,6 +44,9 @@ class ShenandoahYoungHeuristics : public ShenandoahGenerationalHeuristics { bool should_start_gc() override; + // Young collections can never unload classes + bool can_unload_classes() override { return false; } + size_t bytes_of_allocation_runway_before_gc_trigger(size_t young_regions_to_be_reclaimed); private: diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp index cee8727a3f4de..b3942b684c870 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp @@ -670,7 +670,7 @@ void ShenandoahConcurrentGC::op_init_mark() { if (heap->mode()->is_generational()) { if (_generation->is_global()) { - heap->old_generation()->cancel_gc(); + heap->old_generation()->abandon_gc(); } { @@ -696,18 +696,17 @@ void ShenandoahConcurrentGC::op_init_mark() { start_mark(); - if (_do_old_gc_bootstrap) { - shenandoah_assert_generational(); - // Update region state for both young and old regions - ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states); - ShenandoahInitMarkUpdateRegionStateClosure cl; - heap->parallel_heap_region_iterate(&cl); - heap->old_generation()->ref_processor()->reset_thread_locals(); - } else { - // Update region state for only young regions + { ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states); ShenandoahInitMarkUpdateRegionStateClosure cl; - _generation->parallel_heap_region_iterate(&cl); + if (_do_old_gc_bootstrap) { + // Update region state for both young and old regions + shenandoah_assert_generational(); + heap->parallel_heap_region_iterate(&cl); + } else { + // Update region state for only current generation regions + _generation->parallel_heap_region_iterate(&cl); + } } // Weak reference processing @@ -1098,7 +1097,7 @@ void ShenandoahConcurrentGC::op_init_update_refs() { } } -void ShenandoahConcurrentGC::op_update_refs() { +void ShenandoahConcurrentGC::op_update_refs() const { ShenandoahHeap::heap()->update_heap_references(_generation, true /*concurrent*/); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.hpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.hpp index 54d43416fdb3a..4fe863e856f1a 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.hpp @@ -118,7 +118,7 @@ class ShenandoahConcurrentGC : public ShenandoahGC { void op_cleanup_early(); void op_evacuate(); void op_init_update_refs(); - void op_update_refs(); + void op_update_refs() const; void op_update_thread_roots(); void op_final_update_refs(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp index cd079d29afe39..963a159fb11b7 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp @@ -162,10 +162,11 @@ void ShenandoahDegenGC::op_degenerated() { } else { if (_generation->is_concurrent_mark_in_progress()) { // We want to allow old generation marking to be punctuated by young collections - // (even if they have degenerated). If this is a global cycle, we'd have cancelled + // (even if they have degenerated). If this is a global cycle, we'd have abandoned // the entire old gc before coming into this switch. Note that cancel_marking on - // the generation does NOT abandon incomplete SATB buffers as cancel_concurrent_mark does. - // We need to separate out the old pointers which is done below. + // the young generation does NOT abandon incomplete SATB buffers in the old generation + // as cancel_concurrent_mark does. We need to separate out the old pointers which + // is done below. _generation->cancel_marking(); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp index 06cf132f94606..52c92fa0a3833 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp @@ -104,7 +104,7 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { virtual ShenandoahHeuristics* heuristics() const { return _heuristics; } - ShenandoahReferenceProcessor* ref_processor() { return _ref_processor; } + ShenandoahReferenceProcessor* ref_processor() const { return _ref_processor; } virtual ShenandoahHeuristics* initialize_heuristics(ShenandoahMode* gc_mode); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.cpp index ece4150f577e8..ca386eef9bab6 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.cpp @@ -170,19 +170,18 @@ ShenandoahGenerationalControlThread::GCMode ShenandoahGenerationalControlThread: } ShenandoahGenerationalControlThread::GCMode ShenandoahGenerationalControlThread::prepare_for_explicit_gc(ShenandoahGCRequest &request) const { - ShenandoahHeuristics* global_heuristics = _heap->global_generation()->heuristics(); - request.generation = _heap->global_generation(); - global_heuristics->log_trigger("GC request (%s)", GCCause::to_string(request.cause)); - global_heuristics->record_requested_gc(); + ShenandoahHeuristics* heuristics = request.generation->heuristics(); + heuristics->log_trigger("GC request (%s)", GCCause::to_string(request.cause)); + heuristics->record_requested_gc(); if (ShenandoahCollectorPolicy::should_run_full_gc(request.cause)) { - return stw_full;; - } else { - // Unload and clean up everything. Note that this is an _explicit_ request and so does not use - // the same `should_unload_classes` call as the regulator's concurrent gc request. - _heap->set_unload_classes(global_heuristics->can_unload_classes()); - return concurrent_normal; + return stw_full; } + + // Unload and clean up everything. Note that this is an _explicit_ request and so does not use + // the same `should_unload_classes` call as the regulator's concurrent gc request. + _heap->set_unload_classes(heuristics->can_unload_classes()); + return concurrent_normal; } ShenandoahGenerationalControlThread::GCMode ShenandoahGenerationalControlThread::prepare_for_concurrent_gc(const ShenandoahGCRequest &request) const { @@ -407,15 +406,11 @@ void ShenandoahGenerationalControlThread::service_concurrent_old_cycle(const She return; } case ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP: + // Configure the young generation for bootstrapping the old mark + young_generation->prepare_for_bootstrap(old_generation); old_generation->transition_to(ShenandoahOldGeneration::BOOTSTRAPPING); case ShenandoahOldGeneration::BOOTSTRAPPING: { - // Configure the young generation's concurrent mark to put objects in - // old regions into the concurrent mark queues associated with the old - // generation. The young cycle will run as normal except that rather than - // ignore old references it will mark and enqueue them in the old concurrent - // task queues but it will not traverse them. set_gc_mode(bootstrapping_old); - young_generation->set_old_gen_task_queues(old_generation->task_queues()); service_concurrent_cycle(young_generation, request.cause, true); _heap->process_gc_stats(); if (_heap->cancelled_gc()) { @@ -561,10 +556,10 @@ void ShenandoahGenerationalControlThread::service_concurrent_cycle(ShenandoahGen } } } else { - assert(generation->is_global(), "If not young, must be GLOBAL"); - assert(!do_old_gc_bootstrap, "Do not bootstrap with GLOBAL GC"); + assert(generation->is_global(), "If not young, must be Global"); + assert(!do_old_gc_bootstrap, "Do not bootstrap with Global GC"); if (_heap->cancelled_gc()) { - msg = "At end of Interrupted Concurrent GLOBAL GC"; + msg = "At end of Interrupted Concurrent Global GC"; } else { // We only record GC results if GC was successful msg = "At end of Concurrent Global GC"; @@ -716,16 +711,7 @@ bool ShenandoahGenerationalControlThread::preempt_old_marking(ShenandoahGenerati return generation->is_young() && _allow_old_preemption.try_unset(); } -void ShenandoahGenerationalControlThread::handle_requested_gc(GCCause::Cause cause) { - // For normal requested GCs (System.gc) we want to block the caller. However, - // for whitebox requested GC, we want to initiate the GC and return immediately. - // The whitebox caller thread will arrange for itself to wait until the GC notifies - // it that has reached the requested breakpoint (phase in the GC). - if (cause == GCCause::_wb_breakpoint) { - notify_control_thread(cause, ShenandoahHeap::heap()->global_generation()); - return; - } - +void ShenandoahGenerationalControlThread::wait_for_gc_cycle(GCCause::Cause cause, ShenandoahGeneration* generation) { // Make sure we have at least one complete GC cycle before unblocking // from the explicit GC request. // @@ -740,12 +726,27 @@ void ShenandoahGenerationalControlThread::handle_requested_gc(GCCause::Cause cau const size_t required_gc_id = current_gc_id + 1; while (current_gc_id < required_gc_id && !should_terminate()) { // Make requests to run a global cycle until at least one is completed - notify_control_thread(cause, ShenandoahHeap::heap()->global_generation()); + notify_control_thread(cause, generation); ml.wait(); current_gc_id = get_gc_id(); } } +void ShenandoahGenerationalControlThread::handle_requested_gc(GCCause::Cause cause) { + // For normal requested GCs (System.gc) we want to block the caller. However, + // for whitebox requested GC, we want to initiate the GC and return immediately. + // The whitebox caller thread will arrange for itself to wait until the GC notifies + // it that has reached the requested breakpoint (phase in the GC). + if (cause == GCCause::_wb_breakpoint) { + notify_control_thread(cause, ShenandoahHeap::heap()->global_generation()); + return; + } + ShenandoahGeneration* generation = cause == GCCause::_wb_young_gc + ? ShenandoahHeap::heap()->young_generation() + : ShenandoahHeap::heap()->global_generation(); + wait_for_gc_cycle(cause, generation); +} + void ShenandoahGenerationalControlThread::notify_gc_waiters() { MonitorLocker ml(&_gc_waiters_lock); ml.notify_all(); @@ -771,8 +772,9 @@ void ShenandoahGenerationalControlThread::set_gc_mode(GCMode new_mode) { void ShenandoahGenerationalControlThread::set_gc_mode(MonitorLocker& ml, GCMode new_mode) { if (_gc_mode != new_mode) { - log_debug(gc, thread)("Transition from: %s to: %s", gc_mode_name(_gc_mode), gc_mode_name(new_mode)); - EventMark event("Control thread transition from: %s, to %s", gc_mode_name(_gc_mode), gc_mode_name(new_mode)); + FormatBuffer<> msg("Transition from: %s to: %s", gc_mode_name(_gc_mode), gc_mode_name(new_mode)); + log_debug(gc, thread)("%s", msg.buffer()); + Events::log(this, "%s", msg.buffer()); _gc_mode = new_mode; ml.notify_all(); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.hpp index b7dbedd5e8461..ccd11e81b3c25 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.hpp @@ -98,6 +98,9 @@ class ShenandoahGenerationalControlThread: public ShenandoahController { // Return true if the request to start a concurrent GC for the given generation succeeded. bool request_concurrent_gc(ShenandoahGeneration* generation); + // Visible for white box API to start an old cycle + void wait_for_gc_cycle(GCCause::Cause cause, ShenandoahGeneration* generation); + // Returns the current state of the control thread GCMode gc_mode() const { return _gc_mode; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.cpp index 78672ee10a533..073b743c58e1b 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.cpp @@ -58,8 +58,11 @@ void ShenandoahGenerationalFullGC::prepare() { // No need for old_gen->increase_used() as this was done when plabs were allocated. heap->reset_generation_reserves(); + // If we were bootstrapping, we don't need that configuration anymore + heap->young_generation()->clear_bootstrap_configuration(); + // Full GC supersedes any marking or coalescing in old generation. - heap->old_generation()->cancel_gc(); + heap->old_generation()->abandon_gc(); } void ShenandoahGenerationalFullGC::handle_completion(ShenandoahHeap* heap) { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp index f887cc9064e82..2da4c5ee23dce 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp @@ -127,6 +127,11 @@ void ShenandoahGenerationalHeap::post_initialize_heuristics() { _old_generation->post_initialize(this); } +bool ShenandoahGenerationalHeap::start_old_collection() { + static_cast(_control_thread)->wait_for_gc_cycle(GCCause::_shenandoah_concurrent_gc, old_generation()); + return true; +} + void ShenandoahGenerationalHeap::initialize_serviceability() { assert(mode()->is_generational(), "Only for the generational mode"); _young_gen_memory_pool = new ShenandoahYoungGenMemoryPool(this); @@ -938,6 +943,17 @@ class ShenandoahGenerationalUpdateHeapRefsTask : public WorkerTask { void ShenandoahGenerationalHeap::update_heap_references(ShenandoahGeneration* generation, bool concurrent) { assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC"); + + + ShenandoahReferenceProcessor* old_ref_processor = generation->ref_processor()->get_old_generation_ref_processor(); + if (old_ref_processor != nullptr) { + // Discovered lists may have young references with old referents. These references will be + // processed at the end of old marking. We need to update them. + assert(generation->is_young(), "We should only have old discovered lists in a young collection"); + ShenandoahPhaseTimings::Phase phase = concurrent ? ShenandoahPhaseTimings::conc_weak_refs : ShenandoahPhaseTimings::degen_gc_weakrefs; + old_ref_processor->heal_discovered_lists(phase, workers(), concurrent); + } + const uint nworkers = workers()->active_workers(); ShenandoahRegionChunkIterator work_list(nworkers); if (concurrent) { @@ -1026,9 +1042,8 @@ void ShenandoahGenerationalHeap::final_update_refs_update_region_states() { void ShenandoahGenerationalHeap::complete_degenerated_cycle() { shenandoah_assert_heaplocked_or_safepoint(); - // In case degeneration interrupted concurrent evacuation or update references, we need to clean up - // transient state. Otherwise, these actions have no effect. - reset_generation_reserves(); + + complete_cycle(); if (!old_generation()->is_parsable()) { ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_coalesce_and_fill); @@ -1037,6 +1052,8 @@ void ShenandoahGenerationalHeap::complete_degenerated_cycle() { } void ShenandoahGenerationalHeap::complete_concurrent_cycle() { + complete_cycle(); + if (!old_generation()->is_parsable()) { // Class unloading may render the card offsets unusable, so we must rebuild them before // the next remembered set scan. We _could_ let the control thread do this sometime after @@ -1047,6 +1064,17 @@ void ShenandoahGenerationalHeap::complete_concurrent_cycle() { // throw off the heuristics. entry_global_coalesce_and_fill(); } + +} + +void ShenandoahGenerationalHeap::complete_cycle() { + if (young_generation()->is_bootstrap_cycle()) { + // Once the bootstrap cycle is completed, the young generation is no longer obliged to mark old + young_generation()->clear_bootstrap_configuration(); + } + + // In case degeneration interrupted concurrent evacuation or update references, we need to clean up + // transient state. Otherwise, these actions have no effect. reset_generation_reserves(); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp index 736026916f7e4..4685d6664feff 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp @@ -43,6 +43,8 @@ class ShenandoahGenerationalHeap : public ShenandoahHeap { void initialize_heuristics() override; void post_initialize_heuristics() override; + bool start_old_collection(); + static ShenandoahGenerationalHeap* heap() { assert(ShenandoahCardBarrier, "Should have card barrier to use genenrational heap"); CollectedHeap* heap = Universe::heap(); @@ -142,6 +144,7 @@ class ShenandoahGenerationalHeap : public ShenandoahHeap { void complete_degenerated_cycle(); void complete_concurrent_cycle(); private: + void complete_cycle(); void initialize_controller() override; void entry_global_coalesce_and_fill(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp index a44a831ef3df0..a39197bfdea39 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp @@ -57,10 +57,6 @@ void ShenandoahOldGC::op_final_mark() { _mark.finish_mark(); assert(!heap->cancelled_gc(), "STW mark cannot OOM"); - // Old collection is complete, the young generation no longer needs this - // reference to the old concurrent mark so clean it up. - heap->young_generation()->set_old_gen_task_queues(nullptr); - // We need to do this because weak root cleaning reports the number of dead handles JvmtiTagMap::set_needs_cleaning(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp index c7cf013d034f2..14822e198bcc4 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp @@ -293,7 +293,7 @@ void ShenandoahOldGeneration::cancel_marking() { ShenandoahGeneration::cancel_marking(); } -void ShenandoahOldGeneration::cancel_gc() { +void ShenandoahOldGeneration::abandon_gc() { shenandoah_assert_safepoint(); if (is_idle()) { #ifdef ASSERT @@ -305,8 +305,6 @@ void ShenandoahOldGeneration::cancel_gc() { cancel_marking(); // Stop tracking old regions abandon_collection_candidates(); - // Remove old generation access to young generation mark queues - ShenandoahHeap::heap()->young_generation()->set_old_gen_task_queues(nullptr); // Transition to IDLE now. transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP); } @@ -446,8 +444,9 @@ const char* ShenandoahOldGeneration::state_name(State state) { void ShenandoahOldGeneration::transition_to(State new_state) { if (_state != new_state) { - log_debug(gc, thread)("Old generation transition from %s to %s", state_name(_state), state_name(new_state)); - EventMark event("Old was %s, now is %s", state_name(_state), state_name(new_state)); + FormatBuffer<> msg("Old was %s, now is %s", state_name(_state), state_name(new_state)); + log_debug(gc, thread)("%s", msg.buffer()); + Events::log(Thread::current(), "%s", msg.buffer()); validate_transition(new_state); _state = new_state; } @@ -531,10 +530,11 @@ void ShenandoahOldGeneration::validate_transition(State new_state) { assert(_state == WAITING_FOR_BOOTSTRAP, "Cannot reset bitmap without making old regions parsable, state is '%s'", state_name(_state)); assert(_old_heuristics->unprocessed_old_collection_candidates() == 0, "Cannot bootstrap with mixed collection candidates"); assert(!heap->is_prepare_for_old_mark_in_progress(), "Cannot still be making old regions parsable."); + assert(heap->young_generation()->is_bootstrap_cycle(), "Young generation needs old mark queues."); break; case MARKING: assert(_state == BOOTSTRAPPING, "Must have finished bootstrapping before marking, state is '%s'", state_name(_state)); - assert(heap->young_generation()->old_gen_task_queues() != nullptr, "Young generation needs old mark queues."); + assert(!heap->young_generation()->is_bootstrap_cycle(), "Young generation is done with bootstrapping"); assert(heap->is_concurrent_old_mark_in_progress(), "Should be marking old now."); break; case EVACUATING_AFTER_GLOBAL: @@ -552,7 +552,6 @@ void ShenandoahOldGeneration::validate_transition(State new_state) { bool ShenandoahOldGeneration::validate_waiting_for_bootstrap() { ShenandoahHeap* heap = ShenandoahHeap::heap(); assert(!heap->is_concurrent_old_mark_in_progress(), "Cannot become ready for bootstrap during old mark."); - assert(heap->young_generation()->old_gen_task_queues() == nullptr, "Cannot become ready for bootstrap when still setup for bootstrapping."); assert(!is_concurrent_mark_in_progress(), "Cannot be marking in IDLE"); assert(!heap->young_generation()->is_bootstrap_cycle(), "Cannot have old mark queues if IDLE"); assert(!_old_heuristics->has_coalesce_and_fill_candidates(), "Cannot have coalesce and fill candidates in IDLE"); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp index 90c1458ac9755..b4c42cd2bf26d 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp @@ -225,8 +225,8 @@ class ShenandoahOldGeneration : public ShenandoahGeneration { void record_success_concurrent(bool abbreviated) override; void cancel_marking() override; - // Cancels old gc and transitions to the idle state - void cancel_gc(); + // Abandons all old gc state and transitions to the idle state + void abandon_gc(); // The SATB barrier will be "enabled" until old marking completes. This means it is // possible for an entire young collection cycle to execute while the SATB barrier is enabled. diff --git a/src/hotspot/share/gc/shenandoah/shenandoahReferenceProcessor.cpp b/src/hotspot/share/gc/shenandoah/shenandoahReferenceProcessor.cpp index 7187431c8f806..cbad1d38144bf 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahReferenceProcessor.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahReferenceProcessor.cpp @@ -179,6 +179,49 @@ static void soft_reference_update_clock() { java_lang_ref_SoftReference::set_clock(now); } +template +class ShenandoahReferenceProcessorTask : public WorkerTask { +private: + bool const _concurrent; + ShenandoahPhaseTimings::Phase const _phase; + ShenandoahRefProcThreadLocal* const _ref_proc_thread_locals; + CallbackT _callback; + volatile uint _iterate_discovered_list_id; + +public: + ShenandoahReferenceProcessorTask(ShenandoahPhaseTimings::Phase phase, bool concurrent, + ShenandoahRefProcThreadLocal* ref_proc_thread_locals, CallbackT callback) : + WorkerTask("ShenandoahReferenceProcessorTask"), + _concurrent(concurrent), + _phase(phase), + _ref_proc_thread_locals(ref_proc_thread_locals), + _callback(callback), + _iterate_discovered_list_id(0) { + } + + virtual void work(uint worker_id) { + if (_concurrent) { + ShenandoahConcurrentWorkerSession worker_session(worker_id); + ShenandoahWorkerTimingsTracker x(_phase, ShenandoahPhaseTimings::WeakRefProc, worker_id, true); + do_work(); + } else { + ShenandoahParallelWorkerSession worker_session(worker_id); + ShenandoahWorkerTimingsTracker x(_phase, ShenandoahPhaseTimings::WeakRefProc, worker_id, true); + do_work(); + } + } + + void do_work() { + const uint max_workers = ShenandoahHeap::heap()->max_workers(); + uint worker_id = AtomicAccess::add(&_iterate_discovered_list_id, 1U, memory_order_relaxed) - 1; + while (worker_id < max_workers) { + ShenandoahRefProcThreadLocal& ref_proc_data = _ref_proc_thread_locals[worker_id]; + _callback(ref_proc_data, worker_id); + worker_id = AtomicAccess::add(&_iterate_discovered_list_id, 1U, memory_order_relaxed) - 1; + } + } +}; + ShenandoahRefProcThreadLocal::ShenandoahRefProcThreadLocal() : _discovered_list(nullptr), _encountered_count(), @@ -196,6 +239,30 @@ void ShenandoahRefProcThreadLocal::reset() { } } +template +void ShenandoahRefProcThreadLocal::heal_discovered_list() { + if (_discovered_list == nullptr) { + return; + } + + T* list = reinterpret_cast(&_discovered_list); + while (list != nullptr) { + const oop discovered_ref = CompressedOops::decode(*list); + const oop reference = lrb(discovered_ref); + if (discovered_ref != reference) { + // Update our list with the forwarded object + set_oop_field(list, reference); + } + + // Discovered list terminates with a self-loop + const oop discovered = lrb(reference_discovered(reference)); + if (reference == discovered) { + break; + } + list = reference_discovered_addr(reference); + } +} + template T* ShenandoahRefProcThreadLocal::discovered_list_addr() { return reinterpret_cast(&_discovered_list); @@ -228,8 +295,8 @@ ShenandoahReferenceProcessor::ShenandoahReferenceProcessor(ShenandoahGeneration* _ref_proc_thread_locals(NEW_C_HEAP_ARRAY(ShenandoahRefProcThreadLocal, max_workers, mtGC)), _pending_list(nullptr), _pending_list_tail(&_pending_list), - _iterate_discovered_list_id(0U), - _generation(generation) { + _generation(generation), + _old_generation_ref_processor(nullptr) { for (size_t i = 0; i < max_workers; i++) { _ref_proc_thread_locals[i].reset(); } @@ -259,6 +326,20 @@ void ShenandoahReferenceProcessor::set_soft_reference_policy(bool clear) { _soft_reference_policy->setup(); } +void ShenandoahReferenceProcessor::heal_discovered_lists(ShenandoahPhaseTimings::Phase phase, WorkerThreads* workers, bool concurrent) { + ShenandoahReferenceProcessorTask heal_lists_task(phase, concurrent, _ref_proc_thread_locals, +[&](ShenandoahRefProcThreadLocal& ref_proc_data, uint worker_id) { + if (UseCompressedOops) { + ref_proc_data.heal_discovered_list(); + } else { + ref_proc_data.heal_discovered_list(); + } + } + ); + workers->run_task(&heal_lists_task); +} + + template bool ShenandoahReferenceProcessor::is_inactive(oop reference, oop referent, ReferenceType type) const { if (type == REF_FINAL) { @@ -294,7 +375,6 @@ bool ShenandoahReferenceProcessor::should_discover(oop reference, ReferenceType T* referent_addr = (T*) java_lang_ref_Reference::referent_addr_raw(reference); T heap_oop = RawAccess<>::oop_load(referent_addr); oop referent = CompressedOops::decode(heap_oop); - ShenandoahHeap* heap = ShenandoahHeap::heap(); if (is_inactive(reference, referent, type)) { log_trace(gc,ref)("Reference inactive: " PTR_FORMAT, p2i(reference)); @@ -312,6 +392,12 @@ bool ShenandoahReferenceProcessor::should_discover(oop reference, ReferenceType } if (!_generation->contains(referent)) { + if (_old_generation_ref_processor != nullptr) { + log_trace(gc,ref)("Discovered reference for old: " PTR_FORMAT, p2i(reference)); + _old_generation_ref_processor->discover_reference(reference, type); + return true; + } + log_trace(gc,ref)("Referent outside of active generation: " PTR_FORMAT, p2i(referent)); return false; } @@ -453,12 +539,9 @@ oop ShenandoahReferenceProcessor::drop(oop reference, ReferenceType type) { } template -T* ShenandoahReferenceProcessor::keep(oop reference, ReferenceType type, uint worker_id) { +T* ShenandoahReferenceProcessor::keep(oop reference, ReferenceType type) { log_trace(gc, ref)("Enqueued Reference: " PTR_FORMAT " (%s)", p2i(reference), reference_type_name(type)); - // Update statistics - _ref_proc_thread_locals[worker_id].inc_enqueued(type); - // Make reference inactive make_inactive(reference, type); @@ -488,7 +571,11 @@ void ShenandoahReferenceProcessor::process_references(ShenandoahRefProcThreadLoc if (should_drop(reference, type)) { set_oop_field(p, drop(reference, type)); } else { - p = keep(reference, type, worker_id); + // Update statistics + refproc_data.inc_enqueued(type); + + // Keep this reference on the list and make it inactive + p = keep(reference, type); } const oop discovered = lrb(reference_discovered(reference)); @@ -516,53 +603,18 @@ void ShenandoahReferenceProcessor::process_references(ShenandoahRefProcThreadLoc } } -void ShenandoahReferenceProcessor::work() { - // Process discovered references - uint max_workers = ShenandoahHeap::heap()->max_workers(); - uint worker_id = AtomicAccess::add(&_iterate_discovered_list_id, 1U, memory_order_relaxed) - 1; - while (worker_id < max_workers) { - if (UseCompressedOops) { - process_references(_ref_proc_thread_locals[worker_id], worker_id); - } else { - process_references(_ref_proc_thread_locals[worker_id], worker_id); - } - worker_id = AtomicAccess::add(&_iterate_discovered_list_id, 1U, memory_order_relaxed) - 1; - } -} - -class ShenandoahReferenceProcessorTask : public WorkerTask { -private: - bool const _concurrent; - ShenandoahPhaseTimings::Phase const _phase; - ShenandoahReferenceProcessor* const _reference_processor; - -public: - ShenandoahReferenceProcessorTask(ShenandoahPhaseTimings::Phase phase, bool concurrent, ShenandoahReferenceProcessor* reference_processor) : - WorkerTask("ShenandoahReferenceProcessorTask"), - _concurrent(concurrent), - _phase(phase), - _reference_processor(reference_processor) { - } +void ShenandoahReferenceProcessor::process_references(ShenandoahPhaseTimings::Phase phase, WorkerThreads* workers, bool concurrent) { - virtual void work(uint worker_id) { - if (_concurrent) { - ShenandoahConcurrentWorkerSession worker_session(worker_id); - ShenandoahWorkerTimingsTracker x(_phase, ShenandoahPhaseTimings::WeakRefProc, worker_id); - _reference_processor->work(); + auto process_refs = [&](ShenandoahRefProcThreadLocal& ref_proc_data, uint worker_id) { + if (UseCompressedOops) { + process_references(ref_proc_data, worker_id); } else { - ShenandoahParallelWorkerSession worker_session(worker_id); - ShenandoahWorkerTimingsTracker x(_phase, ShenandoahPhaseTimings::WeakRefProc, worker_id); - _reference_processor->work(); + process_references(ref_proc_data, worker_id); } - } -}; - -void ShenandoahReferenceProcessor::process_references(ShenandoahPhaseTimings::Phase phase, WorkerThreads* workers, bool concurrent) { - - AtomicAccess::release_store_fence(&_iterate_discovered_list_id, 0U); + }; // Process discovered lists - ShenandoahReferenceProcessorTask task(phase, concurrent, this); + ShenandoahReferenceProcessorTask task(phase, concurrent, _ref_proc_thread_locals, process_refs); workers->run_task(&task); // Update SoftReference clock diff --git a/src/hotspot/share/gc/shenandoah/shenandoahReferenceProcessor.hpp b/src/hotspot/share/gc/shenandoah/shenandoahReferenceProcessor.hpp index be11a364ab7c5..1c1668f88ff19 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahReferenceProcessor.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahReferenceProcessor.hpp @@ -97,6 +97,9 @@ class ShenandoahRefProcThreadLocal : public CHeapObj { _mark_closure = mark_closure; } + template + void heal_discovered_list(); + template T* discovered_list_addr(); template @@ -136,12 +139,12 @@ class ShenandoahReferenceProcessor : public ReferenceDiscoverer { oop _pending_list; void* _pending_list_tail; // T* - volatile uint _iterate_discovered_list_id; - ReferenceProcessorStats _stats; ShenandoahGeneration* _generation; + ShenandoahReferenceProcessor* _old_generation_ref_processor; + template bool is_inactive(oop reference, oop referent, ReferenceType type) const; bool is_strongly_live(oop referent) const; @@ -161,7 +164,7 @@ class ShenandoahReferenceProcessor : public ReferenceDiscoverer { template oop drop(oop reference, ReferenceType type); template - T* keep(oop reference, ReferenceType type, uint worker_id); + T* keep(oop reference, ReferenceType type); template void process_references(ShenandoahRefProcThreadLocal& refproc_data, uint worker_id); @@ -181,13 +184,44 @@ class ShenandoahReferenceProcessor : public ReferenceDiscoverer { void set_soft_reference_policy(bool clear); + void set_old_generation_ref_processor(ShenandoahReferenceProcessor* ref_processor) { + _old_generation_ref_processor = ref_processor; + } + + void clear_old_generation_ref_processor() { + _old_generation_ref_processor = nullptr; + } + + ShenandoahReferenceProcessor* get_old_generation_ref_processor() const { + return _old_generation_ref_processor; + } + + // The generational mode for Shenandoah will collect _referents_ for the generation + // being collected. For example, if we have a young reference pointing to an old + // referent, that young reference will be processed after we finish marking the old + // generation. This presents a problem for discovery. + // + // When the young mark _encounters_ a young reference with an old referent, it + // cannot "discover" it because old marking hasn't finished. However, if it does not + // discover it, the old referent will be strongly marked. This will prevent the + // old generation from clearing the referent (if it even reaches it again during + // old marking). + // + // To solve this, we let young reference processing discover the old reference + // by having it use the old generation reference processor to discover it. This means + // the old reference processor can have a discovered list that contains young + // weak references. If any of these young references reside in a region that is collected, + // old reference processing will crash when it processes this young reference. Therefore, + // we have this method to traverse the discovered lists after young evacuation is + // complete. It will replace any forwarded entries in the discovered list with the + // forwardee. + void heal_discovered_lists(ShenandoahPhaseTimings::Phase phase, WorkerThreads* workers, bool concurrent); + bool discover_reference(oop obj, ReferenceType type) override; void process_references(ShenandoahPhaseTimings::Phase phase, WorkerThreads* workers, bool concurrent); - const ReferenceProcessorStats& reference_process_stats() { return _stats; } - - void work(); + const ReferenceProcessorStats& reference_process_stats() const { return _stats; } void abandon_partial_discovery(); }; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahYoungGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahYoungGeneration.cpp index f00ce16136f13..541b013d04fe6 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahYoungGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahYoungGeneration.cpp @@ -27,6 +27,7 @@ #include "gc/shenandoah/shenandoahFreeSet.hpp" #include "gc/shenandoah/shenandoahHeap.hpp" #include "gc/shenandoah/shenandoahHeapRegionClosures.hpp" +#include "gc/shenandoah/shenandoahReferenceProcessor.hpp" #include "gc/shenandoah/shenandoahUtils.hpp" #include "gc/shenandoah/shenandoahYoungGeneration.hpp" @@ -39,15 +40,31 @@ ShenandoahYoungGeneration::ShenandoahYoungGeneration(uint max_queues) : void ShenandoahYoungGeneration::set_concurrent_mark_in_progress(bool in_progress) { ShenandoahHeap* heap = ShenandoahHeap::heap(); heap->set_concurrent_young_mark_in_progress(in_progress); - if (is_bootstrap_cycle() && in_progress && !heap->is_prepare_for_old_mark_in_progress()) { - // This is not a bug. When the bootstrapping marking phase is complete, - // the old generation marking is still in progress, unless it's not. - // In the case that old-gen preparation for mixed evacuation has been - // preempted, we do not want to set concurrent old mark to be in progress. - heap->set_concurrent_old_mark_in_progress(in_progress); + if (is_bootstrap_cycle() && in_progress) { + // The start of concurrent mark for young is also the start of the concurrent mark for old + assert(!heap->is_prepare_for_old_mark_in_progress(), "Filling old regions must be complete before bootstrap"); + heap->set_concurrent_old_mark_in_progress(true); } } +// A bootstrap cycle will run as normal young cycle except that rather than +// ignore old references it will mark and enqueue them in the old concurrent +// task queues, but it will not traverse them. Similarly, we must configure +// the young ref processor to have the old ref processor discover old weak +// references. +void ShenandoahYoungGeneration::prepare_for_bootstrap(ShenandoahGeneration* generation) { + assert(generation->is_old(), "Need old generation to prepare for bootstrap"); + ShenandoahReferenceProcessor* old_ref_processor = generation->ref_processor(); + _old_gen_task_queues = generation->task_queues(); + ref_processor()->set_old_generation_ref_processor(old_ref_processor); + old_ref_processor->reset_thread_locals(); +} + +void ShenandoahYoungGeneration::clear_bootstrap_configuration() { + _old_gen_task_queues = nullptr; + ref_processor()->clear_old_generation_ref_processor(); +} + bool ShenandoahYoungGeneration::contains(ShenandoahAffiliation affiliation) const { return affiliation == YOUNG_GENERATION; } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahYoungGeneration.hpp b/src/hotspot/share/gc/shenandoah/shenandoahYoungGeneration.hpp index 930c5ff174737..76d50fe09de4c 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahYoungGeneration.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahYoungGeneration.hpp @@ -60,19 +60,23 @@ class ShenandoahYoungGeneration : public ShenandoahGeneration { bool contains(oop obj) const override; void reserve_task_queues(uint workers) override; - void set_old_gen_task_queues(ShenandoahObjToScanQueueSet* old_gen_queues) { - _old_gen_task_queues = old_gen_queues; - } + ShenandoahObjToScanQueueSet* old_gen_task_queues() const override { return _old_gen_task_queues; } // Returns true if the young generation is configured to enqueue old // oops for the old generation mark queues. - bool is_bootstrap_cycle() { + bool is_bootstrap_cycle() const { return _old_gen_task_queues != nullptr; } + // Take a reference to the old task queues and reference processor + void prepare_for_bootstrap(ShenandoahGeneration* generation); + + // Clear references to old gen marking + void clear_bootstrap_configuration(); + size_t bytes_allocated_since_gc_start() const override; size_t used() const override; size_t used_regions() const override; diff --git a/src/hotspot/share/prims/whitebox.cpp b/src/hotspot/share/prims/whitebox.cpp index 1c2f5527ff96e..44205eb259254 100644 --- a/src/hotspot/share/prims/whitebox.cpp +++ b/src/hotspot/share/prims/whitebox.cpp @@ -118,6 +118,10 @@ #if INCLUDE_SERIALGC #include "gc/serial/serialHeap.hpp" #endif // INCLUDE_SERIALGC +#if INCLUDE_SHENANDOAHGC +#include "gc/shenandoah/shenandoahGenerationalHeap.hpp" +#include "gc/shenandoah/shenandoahHeap.hpp" +#endif // INCLUDE_SHENANDOAHGC #if INCLUDE_ZGC #include "gc/z/zAddress.inline.hpp" #include "gc/z/zHeap.inline.hpp" @@ -701,6 +705,46 @@ WB_END #endif // INCLUDE_G1GC +#if INCLUDE_SHENANDOAHGC + +WB_ENTRY(jint, WB_ShenandoahRegionSize(JNIEnv* env, jobject o)) + if (UseShenandoahGC) { + return ShenandoahHeapRegion::region_size_bytes_jint(); + } +THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_ShenandoahRegionSize: Shenandoah GC is not enabled"); +WB_END + +WB_ENTRY(jint, WB_ShenandoahRegionCount(JNIEnv* env, jobject o)) + if (UseShenandoahGC) { + return static_cast(ShenandoahHeap::heap()->num_regions()); + } +THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_ShenandoahRegionSize: Shenandoah GC is not enabled"); +WB_END + +WB_ENTRY(jint, WB_ShenandoahRegionIndex(JNIEnv* env, jobject o, jobject obj)) + if (UseShenandoahGC) { + oop resolved = JNIHandles::resolve(obj); + ShenandoahHeap* heap = ShenandoahHeap::heap(); + if (heap->is_in(resolved)) { + return static_cast(heap->heap_region_containing(resolved)->index()); + } + return -1; + } +THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_ShenandoahRegionSize: Shenandoah GC is not enabled"); +WB_END + +WB_ENTRY(jboolean, WB_ShenandoahOldGC(JNIEnv* env, jobject o)) + if (UseShenandoahGC) { + if (ShenandoahHeap::heap()->mode()->is_generational()) { + return ShenandoahGenerationalHeap::heap()->start_old_collection(); + } + return false; + } +THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_ShenandoahRegionSize: Shenandoah GC is not enabled"); +WB_END + +#endif // INCLUDE_SHENANDOAHGC + // Alloc memory using the test memory tag so that we can use that to see if // NMT picks it up correctly WB_ENTRY(jlong, WB_NMTMalloc(JNIEnv* env, jobject o, jlong size)) @@ -2890,6 +2934,12 @@ static JNINativeMethod methods[] = { {CC"g1MemoryNodeIds", CC"()[I", (void*)&WB_G1MemoryNodeIds }, {CC"g1GetMixedGCInfo", CC"(I)[J", (void*)&WB_G1GetMixedGCInfo }, #endif // INCLUDE_G1GC +#if INCLUDE_SHENANDOAHGC + {CC"shenandoahRegionSize", CC"()I", (void*)&WB_ShenandoahRegionSize }, + {CC"shenandoahRegionCount", CC"()I", (void*)&WB_ShenandoahRegionCount }, + {CC"shenandoahRegionIndex", CC"(Ljava/lang/Object;)I", (void*)&WB_ShenandoahRegionIndex }, + {CC"shenandoahOldGC", CC"()Z", (void*)&WB_ShenandoahOldGC }, +#endif {CC"NMTMalloc", CC"(J)J", (void*)&WB_NMTMalloc }, {CC"NMTMallocWithPseudoStack", CC"(JI)J", (void*)&WB_NMTMallocWithPseudoStack}, {CC"NMTMallocWithPseudoStackAndType", CC"(JII)J", (void*)&WB_NMTMallocWithPseudoStackAndType}, diff --git a/test/hotspot/jtreg/gc/shenandoah/generational/TestGenerationalReferenceProcessing.java b/test/hotspot/jtreg/gc/shenandoah/generational/TestGenerationalReferenceProcessing.java new file mode 100644 index 0000000000000..8b06975b2c021 --- /dev/null +++ b/test/hotspot/jtreg/gc/shenandoah/generational/TestGenerationalReferenceProcessing.java @@ -0,0 +1,250 @@ +package gc.shenandoah.generational; + +import java.lang.ref.Reference; +import java.lang.ref.WeakReference; +import java.lang.ref.ReferenceQueue; +import java.util.*; +import java.util.function.Supplier; + +import jdk.test.whitebox.WhiteBox; + +/* + * @test id=young + * @requires vm.gc.Shenandoah + * @summary Confirm that young non-strong references are collected. + * @library /testlibrary /test/lib / + * @build jdk.test.whitebox.WhiteBox + * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox + * @run main/othervm -Xbootclasspath/a:. + * -XX:+IgnoreUnrecognizedVMOptions + * -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI + * -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational + * -XX:ShenandoahGenerationalMinTenuringAge=1 -XX:ShenandoahGenerationalMaxTenuringAge=1 + * -XX:ShenandoahLearningSteps=0 -XX:ShenandoahIgnoreOldGrowthBelowPercentage=100 + * -XX:-UseCompressedOops + * -Xmx128M -Xms128M -ea + * gc.shenandoah.generational.TestGenerationalReferenceProcessing young + */ + +/* + * @test id=old + * @requires vm.gc.Shenandoah + * @summary Confirm that young non-strong references are collected. + * @library /testlibrary /test/lib / + * @build jdk.test.whitebox.WhiteBox + * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox + * @run main/othervm -Xbootclasspath/a:. + * -XX:+IgnoreUnrecognizedVMOptions + * -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI + * -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational + * -XX:ShenandoahGenerationalMinTenuringAge=1 -XX:ShenandoahGenerationalMaxTenuringAge=1 + * -XX:ShenandoahLearningSteps=0 -XX:ShenandoahIgnoreOldGrowthBelowPercentage=100 + * -XX:-UseCompressedOops + * -Xmx128M -Xms128M -ea + * gc.shenandoah.generational.TestGenerationalReferenceProcessing old + */ +public class TestGenerationalReferenceProcessing { + static final int OLD = 0; + static final int YOUNG = 1; + + private static final WhiteBox WB = WhiteBox.getWhiteBox(); + + private static class LeakedObject {} + + private static final int REGION_SIZE = WB.shenandoahRegionSize(); + private static final int REGION_COUNT = WB.shenandoahRegionCount(); + private static final int OBJECT_SIZE = (int)WB.getObjectSize(new LeakedObject()); + + // We don't want to fill too much of the heap, or the heuristics will trigger GCs instead of our test + private static final int REGIONS_TO_FILL = REGION_COUNT / 12; + private static final int OBJECTS_PER_REGION = REGION_SIZE / OBJECT_SIZE / 2; + private static final int OBJECT_COUNT = OBJECTS_PER_REGION * REGIONS_TO_FILL; + + private static final List> WEAK_REFS = new ArrayList<>(OBJECT_COUNT); + private static final List REFERENTS = new ArrayList<>(OBJECT_COUNT); + private static final ReferenceQueue refQueue = new ReferenceQueue<>(); + + private static final int MINIMUM_CROSS_GENERATIONAL_REFERENCE_COUNT = 50; + + static class ReferenceClassifier { + private final Object[][] references; + + ReferenceClassifier() { + references = new Object[][]{ + {new HashSet>(), new HashSet>()}, + {new HashSet>(), new HashSet>()} + }; + } + + void classify() { + clear(); + + for (int j = 0; j < TestGenerationalReferenceProcessing.WEAK_REFS.size(); ++j) { + var weakRef = TestGenerationalReferenceProcessing.WEAK_REFS.get(j); + var referent = weakRef.get(); + if (referent != null) { + int row = WB.isObjectInOldGen(weakRef) ? OLD : YOUNG; + int column = WB.isObjectInOldGen(referent) ? OLD : YOUNG; + getReferences(row, column).add(weakRef); + } + } + } + + private void clear() { + getReferences(OLD, OLD).clear(); + getReferences(OLD, YOUNG).clear(); + getReferences(YOUNG, OLD).clear(); + getReferences(YOUNG, YOUNG).clear(); + } + + HashSet> getReferences(int reference, int referent) { + assert(reference == OLD || reference == YOUNG); + assert(referent == OLD || referent == YOUNG); + return (HashSet>)references[reference][referent]; + } + + @Override + public String toString() { + return String.format("OO: %d, OY: %d, YO: %d, YY: %d", + getReferences(OLD, OLD).size(), getReferences(OLD, YOUNG).size(), + getReferences(YOUNG, OLD).size(), getReferences(YOUNG, YOUNG).size()); + } + } + + public static void main(String[] args) throws Exception { + if (args.length != 1) { + System.out.println("Call with generation to test: young|old"); + return; + } + + if ("young".equals(args[0])) { + testCollectCrossGenerationalReferents(OLD, YOUNG); + } else if ("old".equals(args[0])) { + testCollectCrossGenerationalReferents(YOUNG, OLD); + } + } + + private static String name(int generation) { + return generation == OLD ? "old" : "young"; + } + + private static void testCollectCrossGenerationalReferents(int referenceGen, int referentGen) { + ReferenceClassifier classifier = new ReferenceClassifier(); + + useMemoryUntil(() -> { + classifier.classify(); + return classifier.getReferences(referenceGen, referentGen).size() > MINIMUM_CROSS_GENERATIONAL_REFERENCE_COUNT; + }); + + assert !classifier.getReferences(referenceGen, referentGen).isEmpty() : "Conditions for test not met: " + classifier; + + System.out.println("Before clearing all referents: " + classifier); + drainReferenceQueueAndClearReferents(); + + if (referentGen == YOUNG) { + WB.youngGC(); + } else { + // Print address of old references before old GC. + var oldToOld = classifier.getReferences(referentGen, referentGen); + printReferences(OLD, OLD, oldToOld); + WB.shenandoahOldGC(); + } + + int cleared = removeClearedWeakReferences(); + classifier.classify(); + System.out.println("After " + name(referentGen) + " GC, cleared: " + cleared + ", referents: " + classifier); + + assertReferencesCleared(referentGen, referentGen, classifier); + assertReferencesCleared(referenceGen, referentGen, classifier); + } + + private static void assertReferencesCleared(int referenceGen, int referentGen, ReferenceClassifier classifier) { + var references = classifier.getReferences(referenceGen, referentGen); + if (references.isEmpty()) { + return; + } + + // Addresses here could be relocated and may not match logs from old gen collection + printReferences(referenceGen, referentGen, references); + throw new AssertionError(name(referenceGen) + " to " + name(referentGen) + " referents should have been cleared"); + } + + private static void printReferences(int referenceGen, int referentGen, HashSet> references) { + final int max_references = 10; + int references_shown = 0; + for (var reference : references) { + if (references_shown > max_references) { + break; + } + + ++references_shown; + System.out.printf("reference: 0x%x in %s refers to 0x%x in %s\n", + WB.getObjectAddress(reference), name(referenceGen), + WB.getObjectAddress(reference.get()), name(referentGen)); + } + } + + private static int removeClearedWeakReferences() { + int cleared = 0; + Reference weak; + while ((weak = refQueue.poll()) != null) { + WEAK_REFS.remove(weak); + ++cleared; + } + return cleared; + } + + private static void drainReferenceQueueAndClearReferents() { + // Drain the reference queue of any incidental weak references from outside the test + while (refQueue.poll() != null); + + // Make all our referents unreachable now + REFERENTS.clear(); + } + + private static void useMemoryUntil(Supplier exitCondition) { + // This is not an exact science here. We want to create weak references + // with referents in a different region. We also don't want to allocate + // everything up front, or else they will all end up in old together, and + // we won't get a good mix of cross generational pointers. + for (int i = 0; i < REGIONS_TO_FILL; i += 4) { + allocateReferents(2); + allocateReferences(2); + + WB.youngGC(); + if (exitCondition.get()) { + break; + } + } + } + + private static void allocateReferents(int regions) { + for (int j = 0; j < regions; j++) { + for (int i = 0; i < OBJECTS_PER_REGION; ++i) { + var leakedObject = new LeakedObject(); + REFERENTS.add(leakedObject); + byte[] garbage = new byte[OBJECT_SIZE]; + garbage[i % garbage.length] = (byte) i; + } + } + } + + private static void allocateReferences(int regions) { + + // Fill up regions that are equal parts garbage and references + // We want to create cross region references to increase the chances + // of cross generational references. + int referentCount = REFERENTS.size() - 1; + for (int j = 0; j < regions; j++) { + for (int i = 0; i < OBJECTS_PER_REGION; ++i) { + var leakedObject = REFERENTS.get(referentCount - i); + var ref = new WeakReference<>(leakedObject, refQueue); + WEAK_REFS.add(ref); + byte[] garbage = new byte[OBJECT_SIZE]; + garbage[i % garbage.length] = (byte) i; + } + } + } +} diff --git a/test/lib/jdk/test/whitebox/WhiteBox.java b/test/lib/jdk/test/whitebox/WhiteBox.java index cc570caef7c1e..215948a87921e 100644 --- a/test/lib/jdk/test/whitebox/WhiteBox.java +++ b/test/lib/jdk/test/whitebox/WhiteBox.java @@ -315,6 +315,13 @@ public Object[] parseCommandLine(String commandline, char delim, Dia */ public native long[] g1GetMixedGCInfo(int liveness); + // Shenandoah + + public native int shenandoahRegionSize(); + public native int shenandoahRegionCount(); + public native int shenandoahRegionIndex(Object o); + public native boolean shenandoahOldGC(); + // NMT public native long NMTMalloc(long size); public native void NMTFree(long mem);