Skip to content

Commit

Permalink
Add GC heap hard limit for 32 bit
Browse files Browse the repository at this point in the history
  • Loading branch information
gbalykov committed Apr 14, 2024
1 parent f84d33c commit 82d6a05
Show file tree
Hide file tree
Showing 2 changed files with 130 additions and 23 deletions.
151 changes: 128 additions & 23 deletions src/coreclr/gc/gc.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1919,7 +1919,16 @@ uint8_t* gc_heap::pad_for_alignment_large (uint8_t* newAlloc, int requiredAlignm
#endif //BACKGROUND_GC && !USE_REGIONS

// This is always power of 2.
#ifdef HOST_64BIT
const size_t min_segment_size_hard_limit = 1024*1024*16;
#else //HOST_64BIT
const size_t min_segment_size_hard_limit = 1024*1024*4;
#endif //HOST_64BIT

#ifndef HOST_64BIT
// Max size of heap hard limit (2^31) to be able to be aligned and rounded up on power of 2 and not overflow
const size_t max_heap_hard_limit = (size_t)2 * (size_t)1024 * (size_t)1024 * (size_t)1024;
#endif //!HOST_64BIT

inline
size_t align_on_segment_hard_limit (size_t add)
Expand Down Expand Up @@ -7278,10 +7287,6 @@ bool gc_heap::virtual_commit (void* address, size_t size, int bucket, int h_numb
*
* Note : We never commit into free directly, so bucket != recorded_committed_free_bucket
*/
#ifndef HOST_64BIT
assert (heap_hard_limit == 0);
#endif //!HOST_64BIT

assert(0 <= bucket && bucket < recorded_committed_bucket_counts);
assert(bucket < total_oh_count || h_number == -1);
assert(bucket != recorded_committed_free_bucket);
Expand Down Expand Up @@ -7385,10 +7390,6 @@ bool gc_heap::virtual_decommit (void* address, size_t size, int bucket, int h_nu
* Case 2: This is for bookkeeping - the bucket will be recorded_committed_bookkeeping_bucket, and the h_number will be -1
* Case 3: This is for free - the bucket will be recorded_committed_free_bucket, and the h_number will be -1
*/
#ifndef HOST_64BIT
assert (heap_hard_limit == 0);
#endif //!HOST_64BIT

assert(0 <= bucket && bucket < recorded_committed_bucket_counts);
assert(bucket < total_oh_count || h_number == -1);

Expand Down Expand Up @@ -14167,6 +14168,11 @@ HRESULT gc_heap::initialize_gc (size_t soh_segment_size,
return E_OUTOFMEMORY;
if (use_large_pages_p)
{
#ifndef HOST_64BIT
// Large pages are not supported on 32bit
assert (false);
#endif //!HOST_64BIT

if (heap_hard_limit_oh[soh])
{
heap_hard_limit_oh[soh] = soh_segment_size * number_of_heaps;
Expand Down Expand Up @@ -20774,12 +20780,12 @@ int gc_heap::joined_generation_to_condemn (BOOL should_evaluate_elevation,
gc_data_global.gen_to_condemn_reasons.set_condition(gen_joined_limit_before_oom);
full_compact_gc_p = true;
}
else if ((current_total_committed * 10) >= (heap_hard_limit * 9))
else if (((uint64_t)current_total_committed * (uint64_t)10) >= ((uint64_t)heap_hard_limit * (uint64_t)9))
{
size_t loh_frag = get_total_gen_fragmentation (loh_generation);

// If the LOH frag is >= 1/8 it's worth compacting it
if ((loh_frag * 8) >= heap_hard_limit)
if (loh_frag >= heap_hard_limit / 8)
{
dprintf (GTC_LOG, ("loh frag: %zd > 1/8 of limit %zd", loh_frag, (heap_hard_limit / 8)));
gc_data_global.gen_to_condemn_reasons.set_condition(gen_joined_limit_loh_frag);
Expand All @@ -20790,7 +20796,7 @@ int gc_heap::joined_generation_to_condemn (BOOL should_evaluate_elevation,
// If there's not much fragmentation but it looks like it'll be productive to
// collect LOH, do that.
size_t est_loh_reclaim = get_total_gen_estimated_reclaim (loh_generation);
if ((est_loh_reclaim * 8) >= heap_hard_limit)
if (est_loh_reclaim >= heap_hard_limit / 8)
{
gc_data_global.gen_to_condemn_reasons.set_condition(gen_joined_limit_loh_reclaim);
full_compact_gc_p = true;
Expand Down Expand Up @@ -24369,7 +24375,7 @@ heap_segment* gc_heap::unlink_first_rw_region (int gen_idx)
assert (!heap_segment_read_only_p (region));
dprintf (REGIONS_LOG, ("unlink_first_rw_region on heap: %d gen: %d region: %p", heap_number, gen_idx, heap_segment_mem (region)));

#if defined(_DEBUG) && defined(HOST_64BIT)
#if defined(_DEBUG)
#ifndef COMMITTED_BYTES_SHADOW
if (heap_hard_limit)
#endif //!COMMITTED_BYTES_SHADOW
Expand All @@ -24384,7 +24390,7 @@ heap_segment* gc_heap::unlink_first_rw_region (int gen_idx)
g_heaps[old_heap]->committed_by_oh_per_heap[old_oh] -= committed;
check_commit_cs.Leave();
}
#endif // _DEBUG && HOST_64BIT
#endif // _DEBUG

set_heap_for_contained_basic_regions (region, nullptr);

Expand Down Expand Up @@ -24412,7 +24418,7 @@ void gc_heap::thread_rw_region_front (int gen_idx, heap_segment* region)
}
dprintf (REGIONS_LOG, ("thread_rw_region_front on heap: %d gen: %d region: %p", heap_number, gen_idx, heap_segment_mem (region)));

#if defined(_DEBUG) && defined(HOST_64BIT)
#if defined(_DEBUG)
#ifndef COMMITTED_BYTES_SHADOW
if (heap_hard_limit)
#endif //!COMMITTED_BYTES_SHADOW
Expand All @@ -24427,7 +24433,7 @@ void gc_heap::thread_rw_region_front (int gen_idx, heap_segment* region)
g_heaps[new_heap]->committed_by_oh_per_heap[new_oh] += committed;
check_commit_cs.Leave();
}
#endif // _DEBUG && HOST_64BIT
#endif // _DEBUG

set_heap_for_contained_basic_regions (region, this);
}
Expand Down Expand Up @@ -43369,6 +43375,13 @@ void gc_heap::init_static_data()
);
#endif //MULTIPLE_HEAPS

if (heap_hard_limit)
{
size_t gen1_max_size_seg = soh_segment_size / 2;
dprintf (GTC_LOG, ("limit gen1 max %zd->%zd", gen1_max_size, gen1_max_size_seg));
gen1_max_size = min (gen1_max_size, gen1_max_size_seg);
}

size_t gen1_max_size_config = (size_t)GCConfig::GetGCGen1MaxBudget();

if (gen1_max_size_config)
Expand Down Expand Up @@ -48480,6 +48493,11 @@ HRESULT GCHeap::Initialize()
{
if (gc_heap::heap_hard_limit)
{
#ifndef HOST_64BIT
// Regions are not supported on 32bit
assert(false);
#endif //!HOST_64BIT

if (gc_heap::heap_hard_limit_oh[soh])
{
gc_heap::regions_range = gc_heap::heap_hard_limit;
Expand Down Expand Up @@ -48514,12 +48532,32 @@ HRESULT GCHeap::Initialize()
{
if (gc_heap::heap_hard_limit_oh[soh])
{
// On 32bit we have next guarantees:
// 0 <= seg_size_from_config <= 1Gb (from max_heap_hard_limit/2)
// 0 <= (heap_hard_limit = heap_hard_limit_oh[soh] + heap_hard_limit_oh[loh] + heap_hard_limit_oh[poh]) < 4Gb (from gc_heap::compute_hard_limit_from_heap_limits)
// 0 <= heap_hard_limit_oh[loh] <= 1Gb or < 2Gb
// 0 <= heap_hard_limit_oh[poh] <= 1Gb or < 2Gb
// 0 <= large_seg_size <= 1Gb or <= 2Gb (alignment and round up)
// 0 <= pin_seg_size <= 1Gb or <= 2Gb (alignment and round up)
// 0 <= soh_segment_size + large_seg_size + pin_seg_size <= 4Gb
// 4Gb overflow is ok, because 0 size allocation will fail
large_seg_size = max (gc_heap::adjust_segment_size_hard_limit (gc_heap::heap_hard_limit_oh[loh], nhp), seg_size_from_config);
pin_seg_size = max (gc_heap::adjust_segment_size_hard_limit (gc_heap::heap_hard_limit_oh[poh], nhp), seg_size_from_config);
}
else
{
// On 32bit we have next guarantees:
// 0 <= heap_hard_limit <= 1Gb (from gc_heap::compute_hard_limit)
// 0 <= soh_segment_size <= 1Gb
// 0 <= large_seg_size <= 1Gb
// 0 <= pin_seg_size <= 1Gb
// 0 <= soh_segment_size + large_seg_size + pin_seg_size <= 3Gb
#ifdef HOST_64BIT
large_seg_size = gc_heap::use_large_pages_p ? gc_heap::soh_segment_size : gc_heap::soh_segment_size * 2;
#else //HOST_64BIT
assert (!gc_heap::use_large_pages_p);
large_seg_size = gc_heap::soh_segment_size;
#endif //HOST_64BIT
pin_seg_size = large_seg_size;
}
if (gc_heap::use_large_pages_p)
Expand Down Expand Up @@ -52683,16 +52721,45 @@ int GCHeap::RefreshMemoryLimit()
return gc_heap::refresh_memory_limit();
}

bool gc_heap::compute_hard_limit_from_heap_limits()
{
#ifndef HOST_64BIT
// need to consider overflows:
if (! ((heap_hard_limit_oh[soh] < max_heap_hard_limit && heap_hard_limit_oh[loh] <= max_heap_hard_limit / 2 && heap_hard_limit_oh[poh] <= max_heap_hard_limit / 2)
|| (heap_hard_limit_oh[soh] <= max_heap_hard_limit / 2 && heap_hard_limit_oh[loh] < max_heap_hard_limit && heap_hard_limit_oh[poh] <= max_heap_hard_limit / 2)
|| (heap_hard_limit_oh[soh] <= max_heap_hard_limit / 2 && heap_hard_limit_oh[loh] <= max_heap_hard_limit / 2 && heap_hard_limit_oh[poh] < max_heap_hard_limit)))
{
return false;
}
#endif //!HOST_64BIT

heap_hard_limit = heap_hard_limit_oh[soh] + heap_hard_limit_oh[loh] + heap_hard_limit_oh[poh];
return true;
}

// On 32bit we have next guarantees for limits:
// 1) heap-specific limits:
// 0 <= (heap_hard_limit = heap_hard_limit_oh[soh] + heap_hard_limit_oh[loh] + heap_hard_limit_oh[poh]) < 4Gb
// a) 0 <= heap_hard_limit_oh[soh] < 2Gb, 0 <= heap_hard_limit_oh[loh] <= 1Gb, 0 <= heap_hard_limit_oh[poh] <= 1Gb
// b) 0 <= heap_hard_limit_oh[soh] <= 1Gb, 0 <= heap_hard_limit_oh[loh] < 2Gb, 0 <= heap_hard_limit_oh[poh] <= 1Gb
// c) 0 <= heap_hard_limit_oh[soh] <= 1Gb, 0 <= heap_hard_limit_oh[loh] <= 1Gb, 0 <= heap_hard_limit_oh[poh] < 2Gb
// 2) same limit for all heaps:
// 0 <= heap_hard_limit <= 1Gb
//
// These ranges guarantee that calculation of soh_segment_size, loh_segment_size and poh_segment_size with alignment and round up won't overflow,
// as well as calculation of sum of them (overflow to 0 is allowed, because allocation with 0 size will fail later).
bool gc_heap::compute_hard_limit()
{
heap_hard_limit_oh[soh] = 0;
#ifdef HOST_64BIT

heap_hard_limit = (size_t)GCConfig::GetGCHeapHardLimit();
heap_hard_limit_oh[soh] = (size_t)GCConfig::GetGCHeapHardLimitSOH();
heap_hard_limit_oh[loh] = (size_t)GCConfig::GetGCHeapHardLimitLOH();
heap_hard_limit_oh[poh] = (size_t)GCConfig::GetGCHeapHardLimitPOH();

#ifdef HOST_64BIT
use_large_pages_p = GCConfig::GetGCLargePages();
#endif //HOST_64BIT

if (heap_hard_limit_oh[soh] || heap_hard_limit_oh[loh] || heap_hard_limit_oh[poh])
{
Expand All @@ -52704,8 +52771,10 @@ bool gc_heap::compute_hard_limit()
{
return false;
}
heap_hard_limit = heap_hard_limit_oh[soh] +
heap_hard_limit_oh[loh] + heap_hard_limit_oh[poh];
if (!compute_hard_limit_from_heap_limits())
{
return false;
}
}
else
{
Expand Down Expand Up @@ -52733,9 +52802,22 @@ bool gc_heap::compute_hard_limit()
heap_hard_limit_oh[soh] = (size_t)(total_physical_mem * (uint64_t)percent_of_mem_soh / (uint64_t)100);
heap_hard_limit_oh[loh] = (size_t)(total_physical_mem * (uint64_t)percent_of_mem_loh / (uint64_t)100);
heap_hard_limit_oh[poh] = (size_t)(total_physical_mem * (uint64_t)percent_of_mem_poh / (uint64_t)100);
heap_hard_limit = heap_hard_limit_oh[soh] +
heap_hard_limit_oh[loh] + heap_hard_limit_oh[poh];

if (!compute_hard_limit_from_heap_limits())
{
return false;
}
}
#ifndef HOST_64BIT
else
{
// need to consider overflows
if (heap_hard_limit > max_heap_hard_limit / 2)
{
return false;
}
}
#endif //!HOST_64BIT
}

if (heap_hard_limit_oh[soh] && (!heap_hard_limit_oh[poh]) && (!use_large_pages_p))
Expand All @@ -52749,9 +52831,17 @@ bool gc_heap::compute_hard_limit()
if ((percent_of_mem > 0) && (percent_of_mem < 100))
{
heap_hard_limit = (size_t)(total_physical_mem * (uint64_t)percent_of_mem / (uint64_t)100);

#ifndef HOST_64BIT
// need to consider overflows
if (heap_hard_limit > max_heap_hard_limit / 2)
{
return false;
}
#endif //!HOST_64BIT
}
}
#endif //HOST_64BIT

return true;
}

Expand All @@ -52776,12 +52866,12 @@ bool gc_heap::compute_memory_settings(bool is_initialization, uint32_t& nhp, uin
}
}
}
#endif //HOST_64BIT

if (heap_hard_limit && (heap_hard_limit < new_current_total_committed))
{
return false;
}
#endif //HOST_64BIT

#ifdef USE_REGIONS
{
Expand All @@ -52800,9 +52890,24 @@ bool gc_heap::compute_memory_settings(bool is_initialization, uint32_t& nhp, uin
seg_size_from_config = (size_t)GCConfig::GetSegmentSize();
if (seg_size_from_config)
{
seg_size_from_config = adjust_segment_size_hard_limit_va (seg_size_from_config);
seg_size_from_config = use_large_pages_p ? align_on_segment_hard_limit (seg_size_from_config) :
#ifdef HOST_64BIT
round_up_power2 (seg_size_from_config);
#else //HOST_64BIT
round_down_power2 (seg_size_from_config);
seg_size_from_config = min (seg_size_from_config, max_heap_hard_limit / 2);
#endif //HOST_64BIT
}

// On 32bit we have next guarantees:
// 0 <= seg_size_from_config <= 1Gb (from max_heap_hard_limit/2)
// a) heap-specific limits:
// 0 <= (heap_hard_limit = heap_hard_limit_oh[soh] + heap_hard_limit_oh[loh] + heap_hard_limit_oh[poh]) < 4Gb (from gc_heap::compute_hard_limit_from_heap_limits)
// 0 <= heap_hard_limit_oh[soh] <= 1Gb or < 2Gb
// 0 <= soh_segment_size <= 1Gb or <= 2Gb (alignment and round up)
// b) same limit for all heaps:
// 0 <= heap_hard_limit <= 1Gb
// 0 <= soh_segment_size <= 1Gb
size_t limit_to_check = (heap_hard_limit_oh[soh] ? heap_hard_limit_oh[soh] : heap_hard_limit);
soh_segment_size = max (adjust_segment_size_hard_limit (limit_to_check, nhp), seg_size_from_config);
}
Expand Down
2 changes: 2 additions & 0 deletions src/coreclr/gc/gcpriv.h
Original file line number Diff line number Diff line change
Expand Up @@ -3339,6 +3339,8 @@ class gc_heap

PER_HEAP_ISOLATED_METHOD BOOL dt_high_memory_load_p();

PER_HEAP_ISOLATED_METHOD bool compute_hard_limit_from_heap_limits();

PER_HEAP_ISOLATED_METHOD bool compute_hard_limit();

PER_HEAP_ISOLATED_METHOD bool compute_memory_settings(bool is_initialization, uint32_t& nhp, uint32_t nhp_from_config, size_t& seg_size_from_config,
Expand Down

0 comments on commit 82d6a05

Please sign in to comment.