From ef5f75478f5908fd3a6bc6a927d6a453adfcc719 Mon Sep 17 00:00:00 2001 From: William Kemper Date: Thu, 23 Oct 2025 15:32:26 -0700 Subject: [PATCH] Use tags to control log volume, rename methods in collection set, general cleanup --- .../heuristics/shenandoahOldHeuristics.cpp | 10 +++++-- .../gc/shenandoah/shenandoahCollectionSet.cpp | 6 ++-- .../gc/shenandoah/shenandoahCollectionSet.hpp | 12 ++++---- .../shenandoahCollectionSet.inline.hpp | 6 ++-- .../gc/shenandoah/shenandoahGeneration.cpp | 30 +++++++++++-------- .../gc/shenandoah/shenandoahGeneration.hpp | 2 +- .../shenandoah/shenandoahGenerationalHeap.cpp | 21 ++++++------- .../share/gc/shenandoah/shenandoahHeap.cpp | 2 +- .../share/gc/shenandoah/shenandoahOldGC.cpp | 6 ---- .../gc/shenandoah/shenandoahOldGeneration.cpp | 5 ++++ .../share/gc/shenandoah/shenandoahTrace.cpp | 6 ++-- .../gc/shenandoah/shenandoah_globals.hpp | 16 +++++----- 12 files changed, 65 insertions(+), 57 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp index 2361a50e76dc5..e963bcc35bb47 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp @@ -141,7 +141,7 @@ bool ShenandoahOldHeuristics::prime_collection_set(ShenandoahCollectionSet* coll // If region r is evacuated to fragmented memory (to free memory within a partially used region), then we need // to decrease the capacity of the fragmented memory by the scaled loss. - size_t live_data_for_evacuation = r->get_live_data_bytes(); + const size_t live_data_for_evacuation = r->get_live_data_bytes(); size_t lost_available = r->free(); if ((lost_available > 0) && (excess_fragmented_available > 0)) { @@ -169,7 +169,9 @@ bool ShenandoahOldHeuristics::prime_collection_set(ShenandoahCollectionSet* coll // We were not able to account for the lost free memory within fragmented memory, so we need to take this // allocation out of unfragmented memory. Unfragmented memory does not need to account for loss of free. if (live_data_for_evacuation > unfragmented_available) { - // There is not room to evacuate this region or any that come after it in within the candidates array. + // There is no room to evacuate this region or any that come after it in within the candidates array. + log_debug(gc, cset)("Not enough unfragmented memory (%zu) to hold evacuees (%zu) from region: (%zu)", + unfragmented_available, live_data_for_evacuation, r->index()); break; } else { unfragmented_available -= live_data_for_evacuation; @@ -187,7 +189,9 @@ bool ShenandoahOldHeuristics::prime_collection_set(ShenandoahCollectionSet* coll evacuation_need = 0; } if (evacuation_need > unfragmented_available) { - // There is not room to evacuate this region or any that come after it in within the candidates array. + // There is no room to evacuate this region or any that come after it in within the candidates array. + log_debug(gc, cset)("Not enough unfragmented memory (%zu) to hold evacuees (%zu) from region: (%zu)", + unfragmented_available, live_data_for_evacuation, r->index()); break; } else { unfragmented_available -= evacuation_need; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp index 745d45ace1e25..e58a7f4079608 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp @@ -225,9 +225,9 @@ void ShenandoahCollectionSet::summarize(size_t total_garbage, size_t immediate_g count()); if (garbage() > 0) { - const size_t young_evac_bytes = get_young_bytes_reserved_for_evacuation(); - const size_t promote_evac_bytes = get_young_bytes_to_be_promoted(); - const size_t old_evac_bytes = get_old_bytes_reserved_for_evacuation(); + const size_t young_evac_bytes = get_live_bytes_in_untenurable_regions(); + const size_t promote_evac_bytes = get_live_bytes_in_tenurable_regions(); + const size_t old_evac_bytes = get_live_bytes_in_old_regions(); const size_t total_evac_bytes = young_evac_bytes + promote_evac_bytes + old_evac_bytes; ls.print_cr("Evacuation Targets: " "YOUNG: " PROPERFMT ", " "PROMOTE: " PROPERFMT ", " "OLD: " PROPERFMT ", " "TOTAL: " PROPERFMT, diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.hpp index d4a590a3d89a6..a1b77baa2d3cd 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.hpp @@ -109,14 +109,14 @@ class ShenandoahCollectionSet : public CHeapObj { // Prints a summary of the collection set when gc+ergo=info void summarize(size_t total_garbage, size_t immediate_garbage, size_t immediate_regions) const; - // Returns the amount of live bytes in young regions in the collection set. It is not known how many of these bytes will be promoted. - inline size_t get_young_bytes_reserved_for_evacuation() const; + // Returns the amount of live bytes in young regions with an age below the tenuring threshold. + inline size_t get_live_bytes_in_untenurable_regions() const; // Returns the amount of live bytes in old regions in the collection set. - inline size_t get_old_bytes_reserved_for_evacuation() const; + inline size_t get_live_bytes_in_old_regions() const; - // Returns the amount of live bytes in young regions with an age above the tenuring threshold. - inline size_t get_young_bytes_to_be_promoted() const; + // Returns the amount of live bytes in young regions with an age at or above the tenuring threshold. + inline size_t get_live_bytes_in_tenurable_regions() const; // Returns the amount of free bytes in young regions in the collection set. size_t get_young_available_bytes_collected() const { return _young_available_bytes_collected; } @@ -125,7 +125,7 @@ class ShenandoahCollectionSet : public CHeapObj { inline size_t get_old_garbage() const; bool is_preselected(size_t region_idx) { - assert(_preselected_regions != nullptr, "Missing etsablish after abandon"); + assert(_preselected_regions != nullptr, "Missing establish after abandon"); return _preselected_regions[region_idx]; } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.inline.hpp index 4adcec4fbb552..3ff5f2f81d70b 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.inline.hpp @@ -54,15 +54,15 @@ bool ShenandoahCollectionSet::is_in_loc(void* p) const { return _biased_cset_map[index] == 1; } -size_t ShenandoahCollectionSet::get_old_bytes_reserved_for_evacuation() const { +size_t ShenandoahCollectionSet::get_live_bytes_in_old_regions() const { return _old_bytes_to_evacuate; } -size_t ShenandoahCollectionSet::get_young_bytes_reserved_for_evacuation() const { +size_t ShenandoahCollectionSet::get_live_bytes_in_untenurable_regions() const { return _young_bytes_to_evacuate - _young_bytes_to_promote; } -size_t ShenandoahCollectionSet::get_young_bytes_to_be_promoted() const { +size_t ShenandoahCollectionSet::get_live_bytes_in_tenurable_regions() const { return _young_bytes_to_promote; } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp index 0c55613efcc23..f82b62d30a54c 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp @@ -382,11 +382,11 @@ void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap, // available that results from a decrease in memory consumed by old evacuation is not necessarily available to be loaned // to young-gen. - size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); + const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); ShenandoahOldGeneration* const old_generation = heap->old_generation(); ShenandoahYoungGeneration* const young_generation = heap->young_generation(); - size_t old_evacuated = collection_set->get_old_bytes_reserved_for_evacuation(); + const size_t old_evacuated = collection_set->get_live_bytes_in_old_regions(); size_t old_evacuated_committed = (size_t) (ShenandoahOldEvacWaste * double(old_evacuated)); size_t old_evacuation_reserve = old_generation->get_evacuation_reserve(); @@ -399,14 +399,15 @@ void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap, // Leave old_evac_reserve as previously configured } else if (old_evacuated_committed < old_evacuation_reserve) { // This happens if the old-gen collection consumes less than full budget. + log_debug(gc, cset)("Shrinking old evac reserve to match old_evac_commited: " PROPERFMT, PROPERFMTARGS(old_evacuated_committed)); old_evacuation_reserve = old_evacuated_committed; old_generation->set_evacuation_reserve(old_evacuation_reserve); } - size_t young_advance_promoted = collection_set->get_young_bytes_to_be_promoted(); + size_t young_advance_promoted = collection_set->get_live_bytes_in_tenurable_regions(); size_t young_advance_promoted_reserve_used = (size_t) (ShenandoahPromoEvacWaste * double(young_advance_promoted)); - size_t young_evacuated = collection_set->get_young_bytes_reserved_for_evacuation(); + size_t young_evacuated = collection_set->get_live_bytes_in_untenurable_regions(); size_t young_evacuated_reserve_used = (size_t) (ShenandoahEvacWaste * double(young_evacuated)); size_t total_young_available = young_generation->available_with_reserve(); @@ -524,7 +525,7 @@ inline void assert_no_in_place_promotions() { // that this allows us to more accurately budget memory to hold the results of evacuation. Memory for evacuation // of aged regions must be reserved in the old generation. Memory for evacuation of all other regions must be // reserved in the young generation. -size_t ShenandoahGeneration::select_aged_regions(size_t old_available) { +size_t ShenandoahGeneration::select_aged_regions(const size_t old_promotion_reserve) { // There should be no regions configured for subsequent in-place-promotions carried over from the previous cycle. assert_no_in_place_promotions(); @@ -537,7 +538,6 @@ size_t ShenandoahGeneration::select_aged_regions(size_t old_available) { const size_t pip_used_threshold = (ShenandoahHeapRegion::region_size_bytes() * ShenandoahGenerationalMinPIPUsage) / 100; - size_t old_consumed = 0; size_t promo_potential = 0; size_t candidates = 0; @@ -560,7 +560,7 @@ size_t ShenandoahGeneration::select_aged_regions(size_t old_available) { } if (heap->is_tenurable(r)) { if ((r->garbage() < old_garbage_threshold) && (r->used() > pip_used_threshold)) { - // We prefer to promote this region in place because is has a small amount of garbage and a large usage. + // We prefer to promote this region in place because it has a small amount of garbage and a large usage. HeapWord* tams = ctx->top_at_mark_start(r); HeapWord* original_top = r->top(); if (!heap->is_concurrent_old_mark_in_progress() && tams == original_top) { @@ -620,17 +620,21 @@ size_t ShenandoahGeneration::select_aged_regions(size_t old_available) { // Note that we keep going even if one region is excluded from selection. // Subsequent regions may be selected if they have smaller live data. } + + log_info(gc, ergo)("Promotion potential of aged regions with sufficient garbage: " PROPERFMT, PROPERFMTARGS(promo_potential)); + // Sort in increasing order according to live data bytes. Note that candidates represents the number of regions // that qualify to be promoted by evacuation. + size_t old_consumed = 0; if (candidates > 0) { size_t selected_regions = 0; size_t selected_live = 0; QuickSort::sort(sorted_regions, candidates, compare_by_aged_live); for (size_t i = 0; i < candidates; i++) { ShenandoahHeapRegion* const region = sorted_regions[i]._region; - size_t region_live_data = sorted_regions[i]._live_data; - size_t promotion_need = (size_t) (region_live_data * ShenandoahPromoEvacWaste); - if (old_consumed + promotion_need <= old_available) { + const size_t region_live_data = sorted_regions[i]._live_data; + const size_t promotion_need = (size_t) (region_live_data * ShenandoahPromoEvacWaste); + if (old_consumed + promotion_need <= old_promotion_reserve) { old_consumed += promotion_need; candidate_regions_for_promotion_by_copy[region->index()] = true; selected_regions++; @@ -644,9 +648,9 @@ size_t ShenandoahGeneration::select_aged_regions(size_t old_available) { // We keep going even if one region is excluded from selection because we need to accumulate all eligible // regions that are not preselected into promo_potential } - log_debug(gc)("Preselected %zu regions containing %zu live bytes," - " consuming: %zu of budgeted: %zu", - selected_regions, selected_live, old_consumed, old_available); + log_debug(gc, ergo)("Preselected %zu regions containing " PROPERFMT " live data," + " consuming: " PROPERFMT " of budgeted: " PROPERFMT, + selected_regions, PROPERFMTARGS(selected_live), PROPERFMTARGS(old_consumed), PROPERFMTARGS(old_promotion_reserve)); } heap->old_generation()->set_pad_for_promote_in_place(promote_in_place_pad); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp index d2e25176c1fce..b926a5a0913c9 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp @@ -97,7 +97,7 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { // regions, which are marked in the preselected_regions() indicator // array of the heap's collection set, which should be initialized // to false. - size_t select_aged_regions(size_t old_available); + size_t select_aged_regions(size_t old_promotion_reserve); size_t available(size_t capacity) const; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp index bc653b030a8ca..b008592dbf631 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp @@ -110,7 +110,6 @@ void ShenandoahGenerationalHeap::initialize_heuristics() { _generation_sizer.heap_size_changed(max_capacity()); size_t initial_capacity_young = _generation_sizer.max_young_size(); size_t max_capacity_young = _generation_sizer.max_young_size(); - size_t initial_capacity_old = max_capacity() - max_capacity_young; size_t max_capacity_old = max_capacity() - initial_capacity_young; _young_generation = new ShenandoahYoungGeneration(max_workers(), max_capacity_young); @@ -267,6 +266,7 @@ oop ShenandoahGenerationalHeap::try_evacuate_object(oop p, Thread* thread, Shena // the requested object does not fit within the current plab but the plab still has an "abundance" of memory, // where abundance is defined as >= ShenGenHeap::plab_min_size(). In the former case, we try shrinking the // desired PLAB size to the minimum and retry PLAB allocation to avoid cascading of shared memory allocations. + // Shrinking the desired PLAB size may allow us to eke out a small PLAB while staying beneath evacuation reserve. if (plab->words_remaining() < plab_min_size()) { ShenandoahThreadLocalData::set_plab_size(thread, plab_min_size()); copy = allocate_from_plab(thread, size, is_promotion); @@ -436,9 +436,8 @@ inline HeapWord* ShenandoahGenerationalHeap::allocate_from_plab(Thread* thread, // Establish a new PLAB and allocate size HeapWords within it. HeapWord* ShenandoahGenerationalHeap::allocate_from_plab_slow(Thread* thread, size_t size, bool is_promotion) { - // New object should fit the PLAB size - assert(mode()->is_generational(), "PLABs only relevant to generational GC"); + const size_t plab_min_size = this->plab_min_size(); // PLABs are aligned to card boundaries to avoid synchronization with concurrent // allocations in other PLABs. @@ -451,23 +450,24 @@ HeapWord* ShenandoahGenerationalHeap::allocate_from_plab_slow(Thread* thread, si } // Expand aggressively, doubling at each refill in this epoch, ceiling at plab_max_size() - size_t future_size = MIN2(cur_size * 2, plab_max_size()); + const size_t future_size = MIN2(cur_size * 2, plab_max_size()); // Doubling, starting at a card-multiple, should give us a card-multiple. (Ceiling and floor // are card multiples.) assert(is_aligned(future_size, CardTable::card_size_in_words()), "Card multiple by construction, future_size: %zu" - ", card_size: %zu, cur_size: %zu, max: %zu", - future_size, (size_t) CardTable::card_size_in_words(), cur_size, plab_max_size()); + ", card_size: %u, cur_size: %zu, max: %zu", + future_size, CardTable::card_size_in_words(), cur_size, plab_max_size()); // Record new heuristic value even if we take any shortcut. This captures // the case when moderately-sized objects always take a shortcut. At some point, // heuristics should catch up with them. Note that the requested cur_size may // not be honored, but we remember that this is the preferred size. - log_debug(gc, free)("Set new PLAB size: %zu", future_size); + log_debug(gc, plab)("Set next PLAB refill size: %zu bytes", future_size * HeapWordSize); ShenandoahThreadLocalData::set_plab_size(thread, future_size); + if (cur_size < size) { // The PLAB to be allocated is still not large enough to hold the object. Fall back to shared allocation. // This avoids retiring perfectly good PLABs in order to represent a single large object allocation. - log_debug(gc, free)("Current PLAB size (%zu) is too small for %zu", cur_size, size); + log_debug(gc, plab)("Current PLAB size (%zu) is too small for %zu", cur_size * HeapWordSize, size * HeapWordSize); return nullptr; } @@ -553,6 +553,7 @@ void ShenandoahGenerationalHeap::retire_plab(PLAB* plab, Thread* thread) { ShenandoahThreadLocalData::reset_plab_promoted(thread); ShenandoahThreadLocalData::set_plab_actual_size(thread, 0); if (not_promoted > 0) { + log_debug(gc, plab)("Retire PLAB, unexpend unpromoted: %zu", not_promoted * HeapWordSize); old_generation()->unexpend_promoted(not_promoted); } const size_t original_waste = plab->waste(); @@ -564,8 +565,8 @@ void ShenandoahGenerationalHeap::retire_plab(PLAB* plab, Thread* thread) { if (top != nullptr && plab->waste() > original_waste && is_in_old(top)) { // If retiring the plab created a filler object, then we need to register it with our card scanner so it can // safely walk the region backing the plab. - log_debug(gc)("retire_plab() is registering remnant of size %zu at " PTR_FORMAT, - plab->waste() - original_waste, p2i(top)); + log_debug(gc, plab)("retire_plab() is registering remnant of size %zu at " PTR_FORMAT, + (plab->waste() - original_waste) * HeapWordSize, p2i(top)); // No lock is necessary because the PLAB memory is aligned on card boundaries. old_generation()->card_scan()->register_object_without_lock(top); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index cb22c794d8554..10c86c12d1d42 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -1264,7 +1264,7 @@ void ShenandoahHeap::evacuate_collection_set(ShenandoahGeneration* generation, b void ShenandoahHeap::concurrent_prepare_for_update_refs() { { - // Java threads take this lock while they are being attached and added to the list of thread. + // Java threads take this lock while they are being attached and added to the list of threads. // If another thread holds this lock before we update the gc state, it will receive a stale // gc state, but they will have been added to the list of java threads and so will be corrected // by the following handshake. diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp index 3e9f3a490df9f..d980a9e3e0c21 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp @@ -69,12 +69,6 @@ void ShenandoahOldGC::op_final_mark() { heap->set_unload_classes(false); heap->prepare_concurrent_roots(); - // Believe verification following old-gen concurrent mark needs to be different than verification following - // young-gen concurrent mark, so am commenting this out for now: - // if (ShenandoahVerify) { - // heap->verifier()->verify_after_concmark(); - // } - if (VerifyAfterGC) { Universe::verify(); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp index ac3107eb396ec..cb5e9f4b0267d 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp @@ -299,6 +299,8 @@ ShenandoahOldGeneration::configure_plab_for_current_thread(const ShenandoahAlloc if (can_promote(actual_size)) { // Assume the entirety of this PLAB will be used for promotion. This prevents promotion from overreach. // When we retire this plab, we'll unexpend what we don't really use. + log_debug(gc, plab)("Thread can promote using PLAB of %zu bytes. Expended: %zu, available: %zu", + actual_size, get_promoted_expended(), get_promoted_reserve()); expend_promoted(actual_size); ShenandoahThreadLocalData::enable_plab_promotions(thread); ShenandoahThreadLocalData::set_plab_actual_size(thread, actual_size); @@ -306,9 +308,12 @@ ShenandoahOldGeneration::configure_plab_for_current_thread(const ShenandoahAlloc // Disable promotions in this thread because entirety of this PLAB must be available to hold old-gen evacuations. ShenandoahThreadLocalData::disable_plab_promotions(thread); ShenandoahThreadLocalData::set_plab_actual_size(thread, 0); + log_debug(gc, plab)("Thread cannot promote using PLAB of %zu bytes. Expended: %zu, available: %zu, mixed evacuations? %s", + actual_size, get_promoted_expended(), get_promoted_reserve(), BOOL_TO_STR(ShenandoahHeap::heap()->collection_set()->has_old_regions())); } } else if (req.is_promotion()) { // Shared promotion. + log_debug(gc, plab)("Expend shared promotion of %zu bytes", actual_size); expend_promoted(actual_size); } } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahTrace.cpp b/src/hotspot/share/gc/shenandoah/shenandoahTrace.cpp index a786f8ae216b3..bbb44348355b6 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahTrace.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahTrace.cpp @@ -37,9 +37,9 @@ void ShenandoahTracer::report_evacuation_info(const ShenandoahCollectionSet* cse e.set_cSetRegions(cset->count()); e.set_cSetUsedBefore(cset->used()); e.set_cSetUsedAfter(cset->live()); - e.set_collectedOld(cset->get_old_bytes_reserved_for_evacuation()); - e.set_collectedPromoted(cset->get_young_bytes_to_be_promoted()); - e.set_collectedYoung(cset->get_young_bytes_reserved_for_evacuation()); + e.set_collectedOld(cset->get_live_bytes_in_old_regions()); + e.set_collectedPromoted(cset->get_live_bytes_in_tenurable_regions()); + e.set_collectedYoung(cset->get_live_bytes_in_untenurable_regions()); e.set_regionsPromotedHumongous(regions_promoted_humongous); e.set_regionsPromotedRegular(regions_promoted_regular); e.set_regularPromotedGarbage(regular_promoted_garbage); diff --git a/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp b/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp index d1531c51236f5..8bd59beb93b7a 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp @@ -387,13 +387,13 @@ \ product(uintx, ShenandoahOldEvacRatioPercent, 75, EXPERIMENTAL, \ "The maximum proportion of evacuation from old-gen memory, " \ - "expressed as a percentage. The default value 75 denotes that no" \ - "more than 75% of the collection set evacuation workload may be " \ - "towards evacuation of old-gen heap regions. This limits both the"\ - "promotion of aged regions and the compaction of existing old " \ - "regions. A value of 75 denotes that the total evacuation work" \ - "may increase to up to four times the young gen evacuation work." \ - "A larger value allows quicker promotion and allows" \ + "expressed as a percentage. The default value 75 denotes that " \ + "no more than 75% of the collection set evacuation workload may " \ + "be towards evacuation of old-gen heap regions. This limits both "\ + "the promotion of aged regions and the compaction of existing " \ + "old regions. A value of 75 denotes that the total evacuation " \ + "work may increase to up to four times the young gen evacuation " \ + "work. A larger value allows quicker promotion and allows " \ "a smaller number of mixed evacuations to process " \ "the entire list of old-gen collection candidates at the cost " \ "of an increased disruption of the normal cadence of young-gen " \ @@ -401,7 +401,7 @@ "focus entirely on old-gen memory, allowing no young-gen " \ "regions to be collected, likely resulting in subsequent " \ "allocation failures because the allocation pool is not " \ - "replenished. A value of 0 allows a mixed evacuation to" \ + "replenished. A value of 0 allows a mixed evacuation to " \ "focus entirely on young-gen memory, allowing no old-gen " \ "regions to be collected, likely resulting in subsequent " \ "promotion failures and triggering of stop-the-world full GC " \