Skip to content

Commit

Permalink
Delay accounting of frees to when the GC runs
Browse files Browse the repository at this point in the history
  • Loading branch information
gbaraldi committed Oct 23, 2023
1 parent 42f291d commit b6d008f
Showing 1 changed file with 18 additions and 19 deletions.
37 changes: 18 additions & 19 deletions src/gc.c
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
#ifdef __GLIBC__
#include <malloc.h> // for malloc_trim
#endif
#include <stdbool.h>

This comment has been minimized.

Copy link
@vtjnash

vtjnash Oct 23, 2023

Member

I am somewhat reluctant to start adding the stdbool header, since it looks like C++ but seems unspecified if it is ABI-compatible with it (the header will be deprecated in C23 when these symbols become reserved keywords instead)

This comment has been minimized.

Copy link
@gbaraldi

gbaraldi Oct 26, 2023

Author Member

I can use ints for now then


#ifdef __cplusplus
extern "C" {
Expand Down Expand Up @@ -704,6 +705,7 @@ static uint64_t gc_end_time = 0;
static int thrash_counter = 0;
static int thrashing = 0;
// global variables for GC stats
static uint64_t freed_in_runtime = 0;

// Resetting the object to a young object, this is used when marking the
// finalizer list to collect them the next time because the object is very
Expand Down Expand Up @@ -1023,13 +1025,7 @@ STATIC_INLINE void jl_batch_accum_heap_size(jl_ptls_t ptls, uint64_t sz)

STATIC_INLINE void jl_batch_accum_free_size(jl_ptls_t ptls, uint64_t sz)
{
uint64_t free_acc = jl_atomic_load_relaxed(&ptls->gc_num.free_acc) + sz;
if (free_acc < 16*1024)
jl_atomic_store_relaxed(&ptls->gc_num.free_acc, free_acc);
else {
jl_atomic_fetch_add_relaxed(&gc_heap_stats.heap_size, -free_acc);
jl_atomic_store_relaxed(&ptls->gc_num.free_acc, 0);
}
jl_atomic_store_relaxed(&ptls->gc_num.free_acc, jl_atomic_load_relaxed(&ptls->gc_num.free_acc) + sz);
}

// big value list
Expand Down Expand Up @@ -1170,8 +1166,8 @@ void jl_gc_count_allocd(size_t sz) JL_NOTSAFEPOINT
jl_atomic_load_relaxed(&ptls->gc_num.allocd) + sz);
jl_batch_accum_heap_size(ptls, sz);
}

static void combine_thread_gc_counts(jl_gc_num_t *dest) JL_NOTSAFEPOINT
// Only safe to update the heap inside the GC
static void combine_thread_gc_counts(jl_gc_num_t *dest, bool update_heap) JL_NOTSAFEPOINT
{
int gc_n_threads;
jl_ptls_t* gc_all_tls_states;
Expand All @@ -1185,13 +1181,14 @@ static void combine_thread_gc_counts(jl_gc_num_t *dest) JL_NOTSAFEPOINT
dest->realloc += jl_atomic_load_relaxed(&ptls->gc_num.realloc);
dest->poolalloc += jl_atomic_load_relaxed(&ptls->gc_num.poolalloc);
dest->bigalloc += jl_atomic_load_relaxed(&ptls->gc_num.bigalloc);
uint64_t alloc_acc = jl_atomic_load_relaxed(&ptls->gc_num.alloc_acc);
uint64_t free_acc = jl_atomic_load_relaxed(&ptls->gc_num.free_acc);
dest->freed += jl_atomic_load_relaxed(&ptls->gc_num.free_acc);
int64_t diff = alloc_acc - free_acc;
jl_atomic_store_relaxed(&gc_heap_stats.heap_size, diff + jl_atomic_load_relaxed(&gc_heap_stats.heap_size));
jl_atomic_store_relaxed(&ptls->gc_num.alloc_acc, 0);
jl_atomic_store_relaxed(&ptls->gc_num.free_acc, 0);
if (update_heap) {
uint64_t alloc_acc = jl_atomic_load_relaxed(&ptls->gc_num.alloc_acc);
freed_in_runtime = jl_atomic_load_relaxed(&ptls->gc_num.free_acc);
jl_atomic_store_relaxed(&gc_heap_stats.heap_size, alloc_acc + jl_atomic_load_relaxed(&gc_heap_stats.heap_size));
jl_atomic_store_relaxed(&ptls->gc_num.alloc_acc, 0);
jl_atomic_store_relaxed(&ptls->gc_num.free_acc, 0);
}
}
}
}
Expand Down Expand Up @@ -1219,7 +1216,7 @@ static int64_t inc_live_bytes(int64_t inc) JL_NOTSAFEPOINT

void jl_gc_reset_alloc_count(void) JL_NOTSAFEPOINT
{
combine_thread_gc_counts(&gc_num);
combine_thread_gc_counts(&gc_num, false);
inc_live_bytes(gc_num.deferred_alloc + gc_num.allocd);
gc_num.allocd = 0;
gc_num.deferred_alloc = 0;
Expand Down Expand Up @@ -3156,7 +3153,7 @@ JL_DLLEXPORT int jl_gc_is_enabled(void)
JL_DLLEXPORT void jl_gc_get_total_bytes(int64_t *bytes) JL_NOTSAFEPOINT
{
jl_gc_num_t num = gc_num;
combine_thread_gc_counts(&num);
combine_thread_gc_counts(&num, false);
// Sync this logic with `base/util.jl:GC_Diff`
*bytes = (num.total_allocd + num.deferred_alloc + num.allocd);
}
Expand All @@ -3169,7 +3166,7 @@ JL_DLLEXPORT uint64_t jl_gc_total_hrtime(void)
JL_DLLEXPORT jl_gc_num_t jl_gc_num(void)
{
jl_gc_num_t num = gc_num;
combine_thread_gc_counts(&num);
combine_thread_gc_counts(&num, false);
return num;
}

Expand Down Expand Up @@ -3219,7 +3216,7 @@ size_t jl_maxrss(void);
// Only one thread should be running in this function
static int _jl_gc_collect(jl_ptls_t ptls, jl_gc_collection_t collection)
{
combine_thread_gc_counts(&gc_num);
combine_thread_gc_counts(&gc_num, true);

// We separate the update of the graph from the update of live_bytes here
// so that the sweep shows a downward trend in memory usage.
Expand Down Expand Up @@ -3404,6 +3401,8 @@ static int _jl_gc_collect(jl_ptls_t ptls, jl_gc_collection_t collection)
gc_num.last_incremental_sweep = gc_end_time;
}

jl_atomic_store_relaxed(&gc_heap_stats.heap_size, jl_atomic_load_relaxed(&gc_heap_stats.heap_size) - freed_in_runtime);
freed_in_runtime = 0;
size_t heap_size = jl_atomic_load_relaxed(&gc_heap_stats.heap_size);
double target_allocs = 0.0;
double min_interval = default_collect_interval;
Expand Down

0 comments on commit b6d008f

Please sign in to comment.