Skip to content

Commit 46e8327

Browse files
d-nettoRAI CI (GitHub Action Automation)
authored and
RAI CI (GitHub Action Automation)
committed
reduce contention on page metadata lists during the sweeping phase (JuliaLang#52943)
**EDIT**: fixes JuliaLang#52937 by decreasing the contention on the page lists and only waking GC threads up if we have a sufficiently large number of pages. Seems to address the regression from the MWE of JuliaLang#52937: - master: ``` ../julia-master/julia --project=. run_benchmarks.jl serial obj_arrays issue-52937 -n5 --gcthreads=1 bench = "issue-52937.jl" ┌─────────┬────────────┬─────────┬───────────┬────────────┬──────────────┬───────────────────┬──────────┬────────────┐ │ │ total time │ gc time │ mark time │ sweep time │ max GC pause │ time to safepoint │ max heap │ percent gc │ │ │ ms │ ms │ ms │ ms │ ms │ us │ MB │ % │ ├─────────┼────────────┼─────────┼───────────┼────────────┼──────────────┼───────────────────┼──────────┼────────────┤ │ minimum │ 24841 │ 818 │ 78 │ 740 │ 44 │ 10088 │ 96 │ 3 │ │ median │ 24881 │ 834 │ 83 │ 751 │ 45 │ 10738 │ 97 │ 3 │ │ maximum │ 25002 │ 891 │ 87 │ 803 │ 48 │ 11074 │ 112 │ 4 │ │ stdev │ 78 │ 29 │ 4 │ 26 │ 1 │ 393 │ 7 │ 0 │ └─────────┴────────────┴─────────┴───────────┴────────────┴──────────────┴───────────────────┴──────────┴────────────┘ ../julia-master/julia --project=. run_benchmarks.jl serial obj_arrays issue-52937 -n5 --gcthreads=8 bench = "issue-52937.jl" ┌─────────┬────────────┬─────────┬───────────┬────────────┬──────────────┬───────────────────┬──────────┬────────────┐ │ │ total time │ gc time │ mark time │ sweep time │ max GC pause │ time to safepoint │ max heap │ percent gc │ │ │ ms │ ms │ ms │ ms │ ms │ us │ MB │ % │ ├─────────┼────────────┼─────────┼───────────┼────────────┼──────────────┼───────────────────┼──────────┼────────────┤ │ minimum │ 29113 │ 5200 │ 68 │ 5130 │ 12 │ 9724 │ 95 │ 18 │ │ median │ 29354 │ 5274 │ 69 │ 5204 │ 12 │ 10456 │ 96 │ 18 │ │ maximum │ 29472 │ 5333 │ 70 │ 5264 │ 14 │ 11913 │ 97 │ 18 │ │ stdev │ 138 │ 54 │ 1 │ 55 │ 1 │ 937 │ 1 │ 0 │ └─────────┴────────────┴─────────┴───────────┴────────────┴──────────────┴───────────────────┴──────────┴────────────┘ ``` - PR: ``` ../julia-master/julia --project=. run_benchmarks.jl serial obj_arrays issue-52937 -n5 --gcthreads=1 bench = "issue-52937.jl" ┌─────────┬────────────┬─────────┬───────────┬────────────┬──────────────┬───────────────────┬──────────┬────────────┐ │ │ total time │ gc time │ mark time │ sweep time │ max GC pause │ time to safepoint │ max heap │ percent gc │ │ │ ms │ ms │ ms │ ms │ ms │ us │ MB │ % │ ├─────────┼────────────┼─────────┼───────────┼────────────┼──────────────┼───────────────────┼──────────┼────────────┤ │ minimum │ 24475 │ 761 │ 77 │ 681 │ 40 │ 9499 │ 94 │ 3 │ │ median │ 24845 │ 775 │ 80 │ 698 │ 43 │ 10793 │ 97 │ 3 │ │ maximum │ 25128 │ 811 │ 85 │ 726 │ 47 │ 12820 │ 113 │ 3 │ │ stdev │ 240 │ 22 │ 3 │ 21 │ 3 │ 1236 │ 8 │ 0 │ └─────────┴────────────┴─────────┴───────────┴────────────┴──────────────┴───────────────────┴──────────┴────────────┘ ../julia-master/julia --project=. run_benchmarks.jl serial obj_arrays issue-52937 -n5 --gcthreads=8 bench = "issue-52937.jl" ┌─────────┬────────────┬─────────┬───────────┬────────────┬──────────────┬───────────────────┬──────────┬────────────┐ │ │ total time │ gc time │ mark time │ sweep time │ max GC pause │ time to safepoint │ max heap │ percent gc │ │ │ ms │ ms │ ms │ ms │ ms │ us │ MB │ % │ ├─────────┼────────────┼─────────┼───────────┼────────────┼──────────────┼───────────────────┼──────────┼────────────┤ │ minimum │ 24709 │ 679 │ 70 │ 609 │ 11 │ 9981 │ 95 │ 3 │ │ median │ 24869 │ 702 │ 70 │ 631 │ 12 │ 10705 │ 96 │ 3 │ │ maximum │ 24911 │ 708 │ 72 │ 638 │ 13 │ 10820 │ 98 │ 3 │ │ stdev │ 79 │ 12 │ 1 │ 12 │ 1 │ 401 │ 1 │ 0 │ └─────────┴────────────┴─────────┴───────────┴────────────┴──────────────┴───────────────────┴──────────┴────────────┘ ``` Also, performance on `objarray.jl` (an example of benchmark in which sweeping parallelizes well with the current implementation) seems fine: - master: ``` ../julia-master/julia --project=. run_benchmarks.jl multithreaded bigarrays -n5 --gcthreads=1 bench = "objarray.jl" ┌─────────┬────────────┬─────────┬───────────┬────────────┬──────────────┬───────────────────┬──────────┬────────────┐ │ │ total time │ gc time │ mark time │ sweep time │ max GC pause │ time to safepoint │ max heap │ percent gc │ │ │ ms │ ms │ ms │ ms │ ms │ us │ MB │ % │ ├─────────┼────────────┼─────────┼───────────┼────────────┼──────────────┼───────────────────┼──────────┼────────────┤ │ minimum │ 19301 │ 10792 │ 7485 │ 3307 │ 1651 │ 196 │ 4519 │ 56 │ │ median │ 21415 │ 12646 │ 9094 │ 3551 │ 1985 │ 241 │ 6576 │ 59 │ │ maximum │ 21873 │ 13118 │ 9353 │ 3765 │ 2781 │ 330 │ 8793 │ 60 │ │ stdev │ 1009 │ 932 │ 757 │ 190 │ 449 │ 50 │ 1537 │ 2 │ └─────────┴────────────┴─────────┴───────────┴────────────┴──────────────┴───────────────────┴──────────┴────────────┘ ../julia-master/julia --project=. run_benchmarks.jl multithreaded bigarrays -n5 --gcthreads=8 bench = "objarray.jl" ┌─────────┬────────────┬─────────┬───────────┬────────────┬──────────────┬───────────────────┬──────────┬────────────┐ │ │ total time │ gc time │ mark time │ sweep time │ max GC pause │ time to safepoint │ max heap │ percent gc │ │ │ ms │ ms │ ms │ ms │ ms │ us │ MB │ % │ ├─────────┼────────────┼─────────┼───────────┼────────────┼──────────────┼───────────────────┼──────────┼────────────┤ │ minimum │ 13135 │ 4377 │ 3350 │ 1007 │ 491 │ 231 │ 6062 │ 33 │ │ median │ 13164 │ 4540 │ 3370 │ 1177 │ 669 │ 256 │ 6383 │ 35 │ │ maximum │ 13525 │ 4859 │ 3675 │ 1184 │ 748 │ 320 │ 7528 │ 36 │ │ stdev │ 183 │ 189 │ 146 │ 77 │ 129 │ 42 │ 584 │ 1 │ └─────────┴────────────┴─────────┴───────────┴────────────┴──────────────┴───────────────────┴──────────┴────────────┘ ``` - PR: ``` ../julia-master/julia --project=. run_benchmarks.jl multithreaded bigarrays -n5 --gcthreads=1 bench = "objarray.jl" ┌─────────┬────────────┬─────────┬───────────┬────────────┬──────────────┬───────────────────┬──────────┬────────────┐ │ │ total time │ gc time │ mark time │ sweep time │ max GC pause │ time to safepoint │ max heap │ percent gc │ │ │ ms │ ms │ ms │ ms │ ms │ us │ MB │ % │ ├─────────┼────────────┼─────────┼───────────┼────────────┼──────────────┼───────────────────┼──────────┼────────────┤ │ minimum │ 19642 │ 10931 │ 7566 │ 3365 │ 1653 │ 204 │ 5688 │ 56 │ │ median │ 21441 │ 12717 │ 8948 │ 3770 │ 1796 │ 217 │ 6972 │ 59 │ │ maximum │ 23494 │ 14643 │ 10576 │ 4067 │ 2513 │ 248 │ 8229 │ 62 │ │ stdev │ 1408 │ 1339 │ 1079 │ 267 │ 393 │ 19 │ 965 │ 2 │ └─────────┴────────────┴─────────┴───────────┴────────────┴──────────────┴───────────────────┴──────────┴────────────┘ ../julia-master/julia --project=. run_benchmarks.jl multithreaded bigarrays -n5 --gcthreads=8 bench = "objarray.jl" ┌─────────┬────────────┬─────────┬───────────┬────────────┬──────────────┬───────────────────┬──────────┬────────────┐ │ │ total time │ gc time │ mark time │ sweep time │ max GC pause │ time to safepoint │ max heap │ percent gc │ │ │ ms │ ms │ ms │ ms │ ms │ us │ MB │ % │ ├─────────┼────────────┼─────────┼───────────┼────────────┼──────────────┼───────────────────┼──────────┼────────────┤ │ minimum │ 13365 │ 4544 │ 3389 │ 1104 │ 516 │ 255 │ 6349 │ 34 │ │ median │ 13445 │ 4624 │ 3404 │ 1233 │ 578 │ 275 │ 6385 │ 34 │ │ maximum │ 14413 │ 5278 │ 3837 │ 1441 │ 753 │ 300 │ 7547 │ 37 │ │ stdev │ 442 │ 303 │ 194 │ 121 │ 89 │ 18 │ 522 │ 1 │ └─────────┴────────────┴─────────┴───────────┴────────────┴──────────────┴───────────────────┴──────────┴────────────┘ ```
1 parent ea18314 commit 46e8327

File tree

3 files changed

+154
-18
lines changed

3 files changed

+154
-18
lines changed

src/gc.c

+108-16
Original file line numberDiff line numberDiff line change
@@ -21,8 +21,8 @@ int jl_n_sweepthreads;
2121
_Atomic(int) gc_n_threads_marking;
2222
// Number of threads sweeping
2323
_Atomic(int) gc_n_threads_sweeping;
24-
// Temporary for the `ptls->page_metadata_allocd` used during parallel sweeping
25-
_Atomic(jl_gc_page_stack_t *) gc_allocd_scratch;
24+
// Temporary for the `ptls->page_metadata_allocd` used during parallel sweeping (padded to avoid false sharing)
25+
_Atomic(jl_gc_padded_page_stack_t *) gc_allocd_scratch;
2626
// `tid` of mutator thread that triggered GC
2727
_Atomic(int) gc_master_tid;
2828
// `tid` of first GC thread
@@ -1593,8 +1593,72 @@ static void gc_pool_sync_nfree(jl_gc_pagemeta_t *pg, jl_taggedvalue_t *last) JL_
15931593
pg->nfree = nfree;
15941594
}
15951595

1596-
void gc_sweep_wake_all(void)
1596+
// pre-scan pages to check whether there are enough pages so that's worth parallelizing
1597+
// also sweeps pages that don't need to be linearly scanned
1598+
int gc_sweep_prescan(jl_ptls_t ptls, jl_gc_padded_page_stack_t *new_gc_allocd_scratch)
15971599
{
1600+
// 4MB worth of pages is worth parallelizing
1601+
const int n_pages_worth_parallel_sweep = (int)(4 * (1 << 20) / GC_PAGE_SZ);
1602+
int n_pages_to_scan = 0;
1603+
gc_page_profiler_serializer_t serializer = gc_page_serializer_create();
1604+
for (int t_i = 0; t_i < gc_n_threads; t_i++) {
1605+
jl_ptls_t ptls2 = gc_all_tls_states[t_i];
1606+
if (ptls2 == NULL) {
1607+
continue;
1608+
}
1609+
jl_gc_page_stack_t *dest = &new_gc_allocd_scratch[ptls2->tid].stack;
1610+
jl_gc_page_stack_t tmp;
1611+
jl_gc_pagemeta_t *tail = NULL;
1612+
memset(&tmp, 0, sizeof(tmp));
1613+
while (1) {
1614+
jl_gc_pagemeta_t *pg = pop_lf_back_nosync(&ptls2->page_metadata_allocd);
1615+
if (pg == NULL) {
1616+
break;
1617+
}
1618+
int should_scan = 1;
1619+
if (!pg->has_marked) {
1620+
should_scan = 0;
1621+
}
1622+
if (!current_sweep_full && !pg->has_young) {
1623+
assert(!prev_sweep_full || pg->prev_nold >= pg->nold);
1624+
if (!prev_sweep_full || pg->prev_nold == pg->nold) {
1625+
should_scan = 0;
1626+
}
1627+
}
1628+
if (should_scan) {
1629+
if (tail == NULL) {
1630+
tail = pg;
1631+
}
1632+
n_pages_to_scan++;
1633+
push_lf_back_nosync(&tmp, pg);
1634+
}
1635+
else {
1636+
gc_sweep_pool_page(&serializer, dest, &ptls2->page_metadata_buffered, pg);
1637+
}
1638+
if (n_pages_to_scan >= n_pages_worth_parallel_sweep) {
1639+
break;
1640+
}
1641+
}
1642+
if (tail != NULL) {
1643+
tail->next = jl_atomic_load_relaxed(&ptls2->page_metadata_allocd.bottom);
1644+
}
1645+
ptls2->page_metadata_allocd = tmp;
1646+
if (n_pages_to_scan >= n_pages_worth_parallel_sweep) {
1647+
break;
1648+
}
1649+
}
1650+
gc_page_serializer_destroy(&serializer);
1651+
return n_pages_to_scan >= n_pages_worth_parallel_sweep;
1652+
}
1653+
1654+
// wake up all threads to sweep the pages
1655+
void gc_sweep_wake_all(jl_ptls_t ptls, jl_gc_padded_page_stack_t *new_gc_allocd_scratch)
1656+
{
1657+
int parallel_sweep_worthwhile = gc_sweep_prescan(ptls, new_gc_allocd_scratch);
1658+
jl_atomic_store(&gc_allocd_scratch, new_gc_allocd_scratch);
1659+
if (!parallel_sweep_worthwhile) {
1660+
return;
1661+
}
15981662
uv_mutex_lock(&gc_threads_lock);
15991663
for (int i = gc_first_tid; i < gc_first_tid + jl_n_markthreads; i++) {
16001664
jl_ptls_t ptls2 = gc_all_tls_states[i];
@@ -1604,6 +1668,7 @@ void gc_sweep_wake_all(void)
16041668
uv_mutex_unlock(&gc_threads_lock);
16051669
}
16061670

1671+
// wait for all threads to finish sweeping
16071672
void gc_sweep_wait_for_all(void)
16081673
{
16091674
jl_atomic_store(&gc_allocd_scratch, NULL);
@@ -1612,36 +1677,58 @@ void gc_sweep_wait_for_all(void)
16121677
}
16131678
}
16141679

1615-
void gc_sweep_pool_parallel(void)
1680+
// sweep all pools
1681+
void gc_sweep_pool_parallel(jl_ptls_t ptls)
16161682
{
16171683
jl_atomic_fetch_add(&gc_n_threads_sweeping, 1);
1618-
jl_gc_page_stack_t *allocd_scratch = jl_atomic_load(&gc_allocd_scratch);
1684+
jl_gc_padded_page_stack_t *allocd_scratch = jl_atomic_load(&gc_allocd_scratch);
16191685
if (allocd_scratch != NULL) {
16201686
gc_page_profiler_serializer_t serializer = gc_page_serializer_create();
16211687
while (1) {
16221688
int found_pg = 0;
1689+
// sequentially walk the threads and sweep the pages
16231690
for (int t_i = 0; t_i < gc_n_threads; t_i++) {
16241691
jl_ptls_t ptls2 = gc_all_tls_states[t_i];
1692+
// skip foreign threads that already exited
16251693
if (ptls2 == NULL) {
16261694
continue;
16271695
}
1628-
jl_gc_page_stack_t *allocd = &allocd_scratch[t_i];
1629-
jl_gc_pagemeta_t *pg = pop_lf_back(&ptls2->page_metadata_allocd);
1696+
jl_gc_page_stack_t *dest = &allocd_scratch[ptls2->tid].stack;
1697+
jl_gc_pagemeta_t *pg = try_pop_lf_back(&ptls2->page_metadata_allocd);
1698+
// failed steal attempt
16301699
if (pg == NULL) {
16311700
continue;
16321701
}
1633-
gc_sweep_pool_page(&serializer, allocd, &ptls2->page_metadata_buffered, pg);
1702+
gc_sweep_pool_page(&serializer, dest, &ptls2->page_metadata_buffered, pg);
16341703
found_pg = 1;
16351704
}
16361705
if (!found_pg) {
1637-
break;
1706+
// check for termination
1707+
int no_more_work = 1;
1708+
for (int t_i = 0; t_i < gc_n_threads; t_i++) {
1709+
jl_ptls_t ptls2 = gc_all_tls_states[t_i];
1710+
// skip foreign threads that already exited
1711+
if (ptls2 == NULL) {
1712+
continue;
1713+
}
1714+
jl_gc_pagemeta_t *pg = jl_atomic_load_relaxed(&ptls2->page_metadata_allocd.bottom);
1715+
if (pg != NULL) {
1716+
no_more_work = 0;
1717+
break;
1718+
}
1719+
}
1720+
if (no_more_work) {
1721+
break;
1722+
}
16381723
}
1724+
jl_cpu_pause();
16391725
}
16401726
gc_page_serializer_destroy(&serializer);
16411727
}
16421728
jl_atomic_fetch_add(&gc_n_threads_sweeping, -1);
16431729
}
16441730

1731+
// free all pages (i.e. through `madvise` on Linux) that were lazily freed
16451732
void gc_free_pages(void)
16461733
{
16471734
while (1) {
@@ -1666,7 +1753,7 @@ static void gc_sweep_pool(void)
16661753

16671754
// allocate enough space to hold the end of the free list chain
16681755
// for every thread and pool size
1669-
jl_taggedvalue_t ***pfl = (jl_taggedvalue_t ***) alloca(n_threads * JL_GC_N_POOLS * sizeof(jl_taggedvalue_t**));
1756+
jl_taggedvalue_t ***pfl = (jl_taggedvalue_t ***) malloc_s(n_threads * JL_GC_N_POOLS * sizeof(jl_taggedvalue_t**));
16701757

16711758
// update metadata of pages that were pointed to by freelist or newpages from a pool
16721759
// i.e. pages being the current allocation target
@@ -1708,17 +1795,18 @@ static void gc_sweep_pool(void)
17081795
}
17091796

17101797
// the actual sweeping
1711-
jl_gc_page_stack_t *tmp = (jl_gc_page_stack_t *)alloca(n_threads * sizeof(jl_gc_page_stack_t));
1712-
memset(tmp, 0, n_threads * sizeof(jl_gc_page_stack_t));
1713-
jl_atomic_store(&gc_allocd_scratch, tmp);
1714-
gc_sweep_wake_all();
1715-
gc_sweep_pool_parallel();
1798+
jl_gc_padded_page_stack_t *new_gc_allocd_scratch = (jl_gc_padded_page_stack_t *) malloc_s(n_threads * sizeof(jl_gc_padded_page_stack_t));
1799+
memset(new_gc_allocd_scratch, 0, n_threads * sizeof(jl_gc_padded_page_stack_t));
1800+
jl_ptls_t ptls = jl_current_task->ptls;
1801+
gc_sweep_wake_all(ptls, new_gc_allocd_scratch);
1802+
gc_sweep_pool_parallel(ptls);
17161803
gc_sweep_wait_for_all();
17171804

1805+
// reset half-pages pointers
17181806
for (int t_i = 0; t_i < n_threads; t_i++) {
17191807
jl_ptls_t ptls2 = gc_all_tls_states[t_i];
17201808
if (ptls2 != NULL) {
1721-
ptls2->page_metadata_allocd = tmp[t_i];
1809+
ptls2->page_metadata_allocd = new_gc_allocd_scratch[t_i].stack;
17221810
for (int i = 0; i < JL_GC_N_POOLS; i++) {
17231811
jl_gc_pool_t *p = &ptls2->heap.norm_pools[i];
17241812
p->newpages = NULL;
@@ -1756,6 +1844,10 @@ static void gc_sweep_pool(void)
17561844
}
17571845
}
17581846

1847+
// cleanup
1848+
free(pfl);
1849+
free(new_gc_allocd_scratch);
1850+
17591851
#ifdef _P64 // only enable concurrent sweeping on 64bit
17601852
// wake thread up to sweep concurrently
17611853
if (jl_n_sweepthreads > 0) {

src/gc.h

+45-1
Original file line numberDiff line numberDiff line change
@@ -195,6 +195,23 @@ extern jl_gc_page_stack_t global_page_pool_freed;
195195
// in the sweeping phase, which also doesn't push a node into the
196196
// same stack after it's popped
197197

198+
STATIC_INLINE void push_lf_back_nosync(jl_gc_page_stack_t *pool, jl_gc_pagemeta_t *elt) JL_NOTSAFEPOINT
199+
{
200+
jl_gc_pagemeta_t *old_back = jl_atomic_load_relaxed(&pool->bottom);
201+
elt->next = old_back;
202+
jl_atomic_store_relaxed(&pool->bottom, elt);
203+
}
204+
205+
STATIC_INLINE jl_gc_pagemeta_t *pop_lf_back_nosync(jl_gc_page_stack_t *pool) JL_NOTSAFEPOINT
206+
{
207+
jl_gc_pagemeta_t *old_back = jl_atomic_load_relaxed(&pool->bottom);
208+
if (old_back == NULL) {
209+
return NULL;
210+
}
211+
jl_atomic_store_relaxed(&pool->bottom, old_back->next);
212+
return old_back;
213+
}
214+
198215
STATIC_INLINE void push_lf_back(jl_gc_page_stack_t *pool, jl_gc_pagemeta_t *elt) JL_NOTSAFEPOINT
199216
{
200217
while (1) {
@@ -207,6 +224,23 @@ STATIC_INLINE void push_lf_back(jl_gc_page_stack_t *pool, jl_gc_pagemeta_t *elt)
207224
}
208225
}
209226

227+
#define MAX_POP_ATTEMPTS (1 << 10)
228+
229+
STATIC_INLINE jl_gc_pagemeta_t *try_pop_lf_back(jl_gc_page_stack_t *pool) JL_NOTSAFEPOINT
230+
{
231+
for (int i = 0; i < MAX_POP_ATTEMPTS; i++) {
232+
jl_gc_pagemeta_t *old_back = jl_atomic_load_relaxed(&pool->bottom);
233+
if (old_back == NULL) {
234+
return NULL;
235+
}
236+
if (jl_atomic_cmpswap(&pool->bottom, &old_back, old_back->next)) {
237+
return old_back;
238+
}
239+
jl_cpu_pause();
240+
}
241+
return NULL;
242+
}
243+
210244
STATIC_INLINE jl_gc_pagemeta_t *pop_lf_back(jl_gc_page_stack_t *pool) JL_NOTSAFEPOINT
211245
{
212246
while (1) {
@@ -220,6 +254,16 @@ STATIC_INLINE jl_gc_pagemeta_t *pop_lf_back(jl_gc_page_stack_t *pool) JL_NOTSAFE
220254
jl_cpu_pause();
221255
}
222256
}
257+
typedef struct {
258+
jl_gc_page_stack_t stack;
259+
// pad to 128 bytes to avoid false-sharing
260+
#ifdef _P64
261+
void *_pad[15];
262+
#else
263+
void *_pad[31];
264+
#endif
265+
} jl_gc_padded_page_stack_t;
266+
static_assert(sizeof(jl_gc_padded_page_stack_t) == 128, "jl_gc_padded_page_stack_t is not 128 bytes");
223267

224268
typedef struct {
225269
_Atomic(size_t) n_freed_objs;
@@ -461,7 +505,7 @@ void gc_mark_finlist(jl_gc_markqueue_t *mq, arraylist_t *list, size_t start) JL_
461505
void gc_mark_loop_serial_(jl_ptls_t ptls, jl_gc_markqueue_t *mq);
462506
void gc_mark_loop_serial(jl_ptls_t ptls);
463507
void gc_mark_loop_parallel(jl_ptls_t ptls, int master);
464-
void gc_sweep_pool_parallel(void);
508+
void gc_sweep_pool_parallel(jl_ptls_t ptls);
465509
void gc_free_pages(void);
466510
void sweep_stack_pools(void);
467511
void jl_gc_debug_init(void);

src/partr.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -143,7 +143,7 @@ void jl_parallel_gc_threadfun(void *arg)
143143
gc_mark_loop_parallel(ptls, 0);
144144
}
145145
if (may_sweep(ptls)) { // not an else!
146-
gc_sweep_pool_parallel();
146+
gc_sweep_pool_parallel(ptls);
147147
jl_atomic_fetch_add(&ptls->gc_sweeps_requested, -1);
148148
}
149149
}

0 commit comments

Comments
 (0)