Skip to content

Commit 40f65bd

Browse files
d-nettoRAI CI (GitHub Action Automation)
authored and
RAI CI (GitHub Action Automation)
committed
reduce contention on page metadata lists during the sweeping phase (JuliaLang#52943)
**EDIT**: fixes JuliaLang#52937 by decreasing the contention on the page lists and only waking GC threads up if we have a sufficiently large number of pages. Seems to address the regression from the MWE of JuliaLang#52937: - master: ``` ../julia-master/julia --project=. run_benchmarks.jl serial obj_arrays issue-52937 -n5 --gcthreads=1 bench = "issue-52937.jl" ┌─────────┬────────────┬─────────┬───────────┬────────────┬──────────────┬───────────────────┬──────────┬────────────┐ │ │ total time │ gc time │ mark time │ sweep time │ max GC pause │ time to safepoint │ max heap │ percent gc │ │ │ ms │ ms │ ms │ ms │ ms │ us │ MB │ % │ ├─────────┼────────────┼─────────┼───────────┼────────────┼──────────────┼───────────────────┼──────────┼────────────┤ │ minimum │ 24841 │ 818 │ 78 │ 740 │ 44 │ 10088 │ 96 │ 3 │ │ median │ 24881 │ 834 │ 83 │ 751 │ 45 │ 10738 │ 97 │ 3 │ │ maximum │ 25002 │ 891 │ 87 │ 803 │ 48 │ 11074 │ 112 │ 4 │ │ stdev │ 78 │ 29 │ 4 │ 26 │ 1 │ 393 │ 7 │ 0 │ └─────────┴────────────┴─────────┴───────────┴────────────┴──────────────┴───────────────────┴──────────┴────────────┘ ../julia-master/julia --project=. run_benchmarks.jl serial obj_arrays issue-52937 -n5 --gcthreads=8 bench = "issue-52937.jl" ┌─────────┬────────────┬─────────┬───────────┬────────────┬──────────────┬───────────────────┬──────────┬────────────┐ │ │ total time │ gc time │ mark time │ sweep time │ max GC pause │ time to safepoint │ max heap │ percent gc │ │ │ ms │ ms │ ms │ ms │ ms │ us │ MB │ % │ ├─────────┼────────────┼─────────┼───────────┼────────────┼──────────────┼───────────────────┼──────────┼────────────┤ │ minimum │ 29113 │ 5200 │ 68 │ 5130 │ 12 │ 9724 │ 95 │ 18 │ │ median │ 29354 │ 5274 │ 69 │ 5204 │ 12 │ 10456 │ 96 │ 18 │ │ maximum │ 29472 │ 5333 │ 70 │ 5264 │ 14 │ 11913 │ 97 │ 18 │ │ stdev │ 138 │ 54 │ 1 │ 55 │ 1 │ 937 │ 1 │ 0 │ └─────────┴────────────┴─────────┴───────────┴────────────┴──────────────┴───────────────────┴──────────┴────────────┘ ``` - PR: ``` ../julia-master/julia --project=. run_benchmarks.jl serial obj_arrays issue-52937 -n5 --gcthreads=1 bench = "issue-52937.jl" ┌─────────┬────────────┬─────────┬───────────┬────────────┬──────────────┬───────────────────┬──────────┬────────────┐ │ │ total time │ gc time │ mark time │ sweep time │ max GC pause │ time to safepoint │ max heap │ percent gc │ │ │ ms │ ms │ ms │ ms │ ms │ us │ MB │ % │ ├─────────┼────────────┼─────────┼───────────┼────────────┼──────────────┼───────────────────┼──────────┼────────────┤ │ minimum │ 24475 │ 761 │ 77 │ 681 │ 40 │ 9499 │ 94 │ 3 │ │ median │ 24845 │ 775 │ 80 │ 698 │ 43 │ 10793 │ 97 │ 3 │ │ maximum │ 25128 │ 811 │ 85 │ 726 │ 47 │ 12820 │ 113 │ 3 │ │ stdev │ 240 │ 22 │ 3 │ 21 │ 3 │ 1236 │ 8 │ 0 │ └─────────┴────────────┴─────────┴───────────┴────────────┴──────────────┴───────────────────┴──────────┴────────────┘ ../julia-master/julia --project=. run_benchmarks.jl serial obj_arrays issue-52937 -n5 --gcthreads=8 bench = "issue-52937.jl" ┌─────────┬────────────┬─────────┬───────────┬────────────┬──────────────┬───────────────────┬──────────┬────────────┐ │ │ total time │ gc time │ mark time │ sweep time │ max GC pause │ time to safepoint │ max heap │ percent gc │ │ │ ms │ ms │ ms │ ms │ ms │ us │ MB │ % │ ├─────────┼────────────┼─────────┼───────────┼────────────┼──────────────┼───────────────────┼──────────┼────────────┤ │ minimum │ 24709 │ 679 │ 70 │ 609 │ 11 │ 9981 │ 95 │ 3 │ │ median │ 24869 │ 702 │ 70 │ 631 │ 12 │ 10705 │ 96 │ 3 │ │ maximum │ 24911 │ 708 │ 72 │ 638 │ 13 │ 10820 │ 98 │ 3 │ │ stdev │ 79 │ 12 │ 1 │ 12 │ 1 │ 401 │ 1 │ 0 │ └─────────┴────────────┴─────────┴───────────┴────────────┴──────────────┴───────────────────┴──────────┴────────────┘ ``` Also, performance on `objarray.jl` (an example of benchmark in which sweeping parallelizes well with the current implementation) seems fine: - master: ``` ../julia-master/julia --project=. run_benchmarks.jl multithreaded bigarrays -n5 --gcthreads=1 bench = "objarray.jl" ┌─────────┬────────────┬─────────┬───────────┬────────────┬──────────────┬───────────────────┬──────────┬────────────┐ │ │ total time │ gc time │ mark time │ sweep time │ max GC pause │ time to safepoint │ max heap │ percent gc │ │ │ ms │ ms │ ms │ ms │ ms │ us │ MB │ % │ ├─────────┼────────────┼─────────┼───────────┼────────────┼──────────────┼───────────────────┼──────────┼────────────┤ │ minimum │ 19301 │ 10792 │ 7485 │ 3307 │ 1651 │ 196 │ 4519 │ 56 │ │ median │ 21415 │ 12646 │ 9094 │ 3551 │ 1985 │ 241 │ 6576 │ 59 │ │ maximum │ 21873 │ 13118 │ 9353 │ 3765 │ 2781 │ 330 │ 8793 │ 60 │ │ stdev │ 1009 │ 932 │ 757 │ 190 │ 449 │ 50 │ 1537 │ 2 │ └─────────┴────────────┴─────────┴───────────┴────────────┴──────────────┴───────────────────┴──────────┴────────────┘ ../julia-master/julia --project=. run_benchmarks.jl multithreaded bigarrays -n5 --gcthreads=8 bench = "objarray.jl" ┌─────────┬────────────┬─────────┬───────────┬────────────┬──────────────┬───────────────────┬──────────┬────────────┐ │ │ total time │ gc time │ mark time │ sweep time │ max GC pause │ time to safepoint │ max heap │ percent gc │ │ │ ms │ ms │ ms │ ms │ ms │ us │ MB │ % │ ├─────────┼────────────┼─────────┼───────────┼────────────┼──────────────┼───────────────────┼──────────┼────────────┤ │ minimum │ 13135 │ 4377 │ 3350 │ 1007 │ 491 │ 231 │ 6062 │ 33 │ │ median │ 13164 │ 4540 │ 3370 │ 1177 │ 669 │ 256 │ 6383 │ 35 │ │ maximum │ 13525 │ 4859 │ 3675 │ 1184 │ 748 │ 320 │ 7528 │ 36 │ │ stdev │ 183 │ 189 │ 146 │ 77 │ 129 │ 42 │ 584 │ 1 │ └─────────┴────────────┴─────────┴───────────┴────────────┴──────────────┴───────────────────┴──────────┴────────────┘ ``` - PR: ``` ../julia-master/julia --project=. run_benchmarks.jl multithreaded bigarrays -n5 --gcthreads=1 bench = "objarray.jl" ┌─────────┬────────────┬─────────┬───────────┬────────────┬──────────────┬───────────────────┬──────────┬────────────┐ │ │ total time │ gc time │ mark time │ sweep time │ max GC pause │ time to safepoint │ max heap │ percent gc │ │ │ ms │ ms │ ms │ ms │ ms │ us │ MB │ % │ ├─────────┼────────────┼─────────┼───────────┼────────────┼──────────────┼───────────────────┼──────────┼────────────┤ │ minimum │ 19642 │ 10931 │ 7566 │ 3365 │ 1653 │ 204 │ 5688 │ 56 │ │ median │ 21441 │ 12717 │ 8948 │ 3770 │ 1796 │ 217 │ 6972 │ 59 │ │ maximum │ 23494 │ 14643 │ 10576 │ 4067 │ 2513 │ 248 │ 8229 │ 62 │ │ stdev │ 1408 │ 1339 │ 1079 │ 267 │ 393 │ 19 │ 965 │ 2 │ └─────────┴────────────┴─────────┴───────────┴────────────┴──────────────┴───────────────────┴──────────┴────────────┘ ../julia-master/julia --project=. run_benchmarks.jl multithreaded bigarrays -n5 --gcthreads=8 bench = "objarray.jl" ┌─────────┬────────────┬─────────┬───────────┬────────────┬──────────────┬───────────────────┬──────────┬────────────┐ │ │ total time │ gc time │ mark time │ sweep time │ max GC pause │ time to safepoint │ max heap │ percent gc │ │ │ ms │ ms │ ms │ ms │ ms │ us │ MB │ % │ ├─────────┼────────────┼─────────┼───────────┼────────────┼──────────────┼───────────────────┼──────────┼────────────┤ │ minimum │ 13365 │ 4544 │ 3389 │ 1104 │ 516 │ 255 │ 6349 │ 34 │ │ median │ 13445 │ 4624 │ 3404 │ 1233 │ 578 │ 275 │ 6385 │ 34 │ │ maximum │ 14413 │ 5278 │ 3837 │ 1441 │ 753 │ 300 │ 7547 │ 37 │ │ stdev │ 442 │ 303 │ 194 │ 121 │ 89 │ 18 │ 522 │ 1 │ └─────────┴────────────┴─────────┴───────────┴────────────┴──────────────┴───────────────────┴──────────┴────────────┘ ```
1 parent 1cafd8a commit 40f65bd

File tree

3 files changed

+154
-18
lines changed

3 files changed

+154
-18
lines changed

src/gc.c

+108-16
Original file line numberDiff line numberDiff line change
@@ -21,8 +21,8 @@ int jl_n_sweepthreads;
2121
_Atomic(int) gc_n_threads_marking;
2222
// Number of threads sweeping
2323
_Atomic(int) gc_n_threads_sweeping;
24-
// Temporary for the `ptls->page_metadata_allocd` used during parallel sweeping
25-
_Atomic(jl_gc_page_stack_t *) gc_allocd_scratch;
24+
// Temporary for the `ptls->page_metadata_allocd` used during parallel sweeping (padded to avoid false sharing)
25+
_Atomic(jl_gc_padded_page_stack_t *) gc_allocd_scratch;
2626
// `tid` of mutator thread that triggered GC
2727
_Atomic(int) gc_master_tid;
2828
// `tid` of first GC thread
@@ -1586,8 +1586,72 @@ static void gc_pool_sync_nfree(jl_gc_pagemeta_t *pg, jl_taggedvalue_t *last) JL_
15861586
pg->nfree = nfree;
15871587
}
15881588

1589-
void gc_sweep_wake_all(void)
1589+
// pre-scan pages to check whether there are enough pages so that's worth parallelizing
1590+
// also sweeps pages that don't need to be linearly scanned
1591+
int gc_sweep_prescan(jl_ptls_t ptls, jl_gc_padded_page_stack_t *new_gc_allocd_scratch)
15901592
{
1593+
// 4MB worth of pages is worth parallelizing
1594+
const int n_pages_worth_parallel_sweep = (int)(4 * (1 << 20) / GC_PAGE_SZ);
1595+
int n_pages_to_scan = 0;
1596+
gc_page_profiler_serializer_t serializer = gc_page_serializer_create();
1597+
for (int t_i = 0; t_i < gc_n_threads; t_i++) {
1598+
jl_ptls_t ptls2 = gc_all_tls_states[t_i];
1599+
if (ptls2 == NULL) {
1600+
continue;
1601+
}
1602+
jl_gc_page_stack_t *dest = &new_gc_allocd_scratch[ptls2->tid].stack;
1603+
jl_gc_page_stack_t tmp;
1604+
jl_gc_pagemeta_t *tail = NULL;
1605+
memset(&tmp, 0, sizeof(tmp));
1606+
while (1) {
1607+
jl_gc_pagemeta_t *pg = pop_lf_back_nosync(&ptls2->page_metadata_allocd);
1608+
if (pg == NULL) {
1609+
break;
1610+
}
1611+
int should_scan = 1;
1612+
if (!pg->has_marked) {
1613+
should_scan = 0;
1614+
}
1615+
if (!current_sweep_full && !pg->has_young) {
1616+
assert(!prev_sweep_full || pg->prev_nold >= pg->nold);
1617+
if (!prev_sweep_full || pg->prev_nold == pg->nold) {
1618+
should_scan = 0;
1619+
}
1620+
}
1621+
if (should_scan) {
1622+
if (tail == NULL) {
1623+
tail = pg;
1624+
}
1625+
n_pages_to_scan++;
1626+
push_lf_back_nosync(&tmp, pg);
1627+
}
1628+
else {
1629+
gc_sweep_pool_page(&serializer, dest, &ptls2->page_metadata_buffered, pg);
1630+
}
1631+
if (n_pages_to_scan >= n_pages_worth_parallel_sweep) {
1632+
break;
1633+
}
1634+
}
1635+
if (tail != NULL) {
1636+
tail->next = jl_atomic_load_relaxed(&ptls2->page_metadata_allocd.bottom);
1637+
}
1638+
ptls2->page_metadata_allocd = tmp;
1639+
if (n_pages_to_scan >= n_pages_worth_parallel_sweep) {
1640+
break;
1641+
}
1642+
}
1643+
gc_page_serializer_destroy(&serializer);
1644+
return n_pages_to_scan >= n_pages_worth_parallel_sweep;
1645+
}
1646+
1647+
// wake up all threads to sweep the pages
1648+
void gc_sweep_wake_all(jl_ptls_t ptls, jl_gc_padded_page_stack_t *new_gc_allocd_scratch)
1649+
{
1650+
int parallel_sweep_worthwhile = gc_sweep_prescan(ptls, new_gc_allocd_scratch);
1651+
jl_atomic_store(&gc_allocd_scratch, new_gc_allocd_scratch);
1652+
if (!parallel_sweep_worthwhile) {
1653+
return;
1654+
}
15911655
uv_mutex_lock(&gc_threads_lock);
15921656
for (int i = gc_first_tid; i < gc_first_tid + jl_n_markthreads; i++) {
15931657
jl_ptls_t ptls2 = gc_all_tls_states[i];
@@ -1597,6 +1661,7 @@ void gc_sweep_wake_all(void)
15971661
uv_mutex_unlock(&gc_threads_lock);
15981662
}
15991663

1664+
// wait for all threads to finish sweeping
16001665
void gc_sweep_wait_for_all(void)
16011666
{
16021667
jl_atomic_store(&gc_allocd_scratch, NULL);
@@ -1605,36 +1670,58 @@ void gc_sweep_wait_for_all(void)
16051670
}
16061671
}
16071672

1608-
void gc_sweep_pool_parallel(void)
1673+
// sweep all pools
1674+
void gc_sweep_pool_parallel(jl_ptls_t ptls)
16091675
{
16101676
jl_atomic_fetch_add(&gc_n_threads_sweeping, 1);
1611-
jl_gc_page_stack_t *allocd_scratch = jl_atomic_load(&gc_allocd_scratch);
1677+
jl_gc_padded_page_stack_t *allocd_scratch = jl_atomic_load(&gc_allocd_scratch);
16121678
if (allocd_scratch != NULL) {
16131679
gc_page_profiler_serializer_t serializer = gc_page_serializer_create();
16141680
while (1) {
16151681
int found_pg = 0;
1682+
// sequentially walk the threads and sweep the pages
16161683
for (int t_i = 0; t_i < gc_n_threads; t_i++) {
16171684
jl_ptls_t ptls2 = gc_all_tls_states[t_i];
1685+
// skip foreign threads that already exited
16181686
if (ptls2 == NULL) {
16191687
continue;
16201688
}
1621-
jl_gc_page_stack_t *allocd = &allocd_scratch[t_i];
1622-
jl_gc_pagemeta_t *pg = pop_lf_back(&ptls2->page_metadata_allocd);
1689+
jl_gc_page_stack_t *dest = &allocd_scratch[ptls2->tid].stack;
1690+
jl_gc_pagemeta_t *pg = try_pop_lf_back(&ptls2->page_metadata_allocd);
1691+
// failed steal attempt
16231692
if (pg == NULL) {
16241693
continue;
16251694
}
1626-
gc_sweep_pool_page(&serializer, allocd, &ptls2->page_metadata_buffered, pg);
1695+
gc_sweep_pool_page(&serializer, dest, &ptls2->page_metadata_buffered, pg);
16271696
found_pg = 1;
16281697
}
16291698
if (!found_pg) {
1630-
break;
1699+
// check for termination
1700+
int no_more_work = 1;
1701+
for (int t_i = 0; t_i < gc_n_threads; t_i++) {
1702+
jl_ptls_t ptls2 = gc_all_tls_states[t_i];
1703+
// skip foreign threads that already exited
1704+
if (ptls2 == NULL) {
1705+
continue;
1706+
}
1707+
jl_gc_pagemeta_t *pg = jl_atomic_load_relaxed(&ptls2->page_metadata_allocd.bottom);
1708+
if (pg != NULL) {
1709+
no_more_work = 0;
1710+
break;
1711+
}
1712+
}
1713+
if (no_more_work) {
1714+
break;
1715+
}
16311716
}
1717+
jl_cpu_pause();
16321718
}
16331719
gc_page_serializer_destroy(&serializer);
16341720
}
16351721
jl_atomic_fetch_add(&gc_n_threads_sweeping, -1);
16361722
}
16371723

1724+
// free all pages (i.e. through `madvise` on Linux) that were lazily freed
16381725
void gc_free_pages(void)
16391726
{
16401727
while (1) {
@@ -1659,7 +1746,7 @@ static void gc_sweep_pool(void)
16591746

16601747
// allocate enough space to hold the end of the free list chain
16611748
// for every thread and pool size
1662-
jl_taggedvalue_t ***pfl = (jl_taggedvalue_t ***) alloca(n_threads * JL_GC_N_POOLS * sizeof(jl_taggedvalue_t**));
1749+
jl_taggedvalue_t ***pfl = (jl_taggedvalue_t ***) malloc_s(n_threads * JL_GC_N_POOLS * sizeof(jl_taggedvalue_t**));
16631750

16641751
// update metadata of pages that were pointed to by freelist or newpages from a pool
16651752
// i.e. pages being the current allocation target
@@ -1701,17 +1788,18 @@ static void gc_sweep_pool(void)
17011788
}
17021789

17031790
// the actual sweeping
1704-
jl_gc_page_stack_t *tmp = (jl_gc_page_stack_t *)alloca(n_threads * sizeof(jl_gc_page_stack_t));
1705-
memset(tmp, 0, n_threads * sizeof(jl_gc_page_stack_t));
1706-
jl_atomic_store(&gc_allocd_scratch, tmp);
1707-
gc_sweep_wake_all();
1708-
gc_sweep_pool_parallel();
1791+
jl_gc_padded_page_stack_t *new_gc_allocd_scratch = (jl_gc_padded_page_stack_t *) malloc_s(n_threads * sizeof(jl_gc_padded_page_stack_t));
1792+
memset(new_gc_allocd_scratch, 0, n_threads * sizeof(jl_gc_padded_page_stack_t));
1793+
jl_ptls_t ptls = jl_current_task->ptls;
1794+
gc_sweep_wake_all(ptls, new_gc_allocd_scratch);
1795+
gc_sweep_pool_parallel(ptls);
17091796
gc_sweep_wait_for_all();
17101797

1798+
// reset half-pages pointers
17111799
for (int t_i = 0; t_i < n_threads; t_i++) {
17121800
jl_ptls_t ptls2 = gc_all_tls_states[t_i];
17131801
if (ptls2 != NULL) {
1714-
ptls2->page_metadata_allocd = tmp[t_i];
1802+
ptls2->page_metadata_allocd = new_gc_allocd_scratch[t_i].stack;
17151803
for (int i = 0; i < JL_GC_N_POOLS; i++) {
17161804
jl_gc_pool_t *p = &ptls2->heap.norm_pools[i];
17171805
p->newpages = NULL;
@@ -1749,6 +1837,10 @@ static void gc_sweep_pool(void)
17491837
}
17501838
}
17511839

1840+
// cleanup
1841+
free(pfl);
1842+
free(new_gc_allocd_scratch);
1843+
17521844
#ifdef _P64 // only enable concurrent sweeping on 64bit
17531845
// wake thread up to sweep concurrently
17541846
if (jl_n_sweepthreads > 0) {

src/gc.h

+45-1
Original file line numberDiff line numberDiff line change
@@ -195,6 +195,23 @@ extern jl_gc_page_stack_t global_page_pool_freed;
195195
// in the sweeping phase, which also doesn't push a node into the
196196
// same stack after it's popped
197197

198+
STATIC_INLINE void push_lf_back_nosync(jl_gc_page_stack_t *pool, jl_gc_pagemeta_t *elt) JL_NOTSAFEPOINT
199+
{
200+
jl_gc_pagemeta_t *old_back = jl_atomic_load_relaxed(&pool->bottom);
201+
elt->next = old_back;
202+
jl_atomic_store_relaxed(&pool->bottom, elt);
203+
}
204+
205+
STATIC_INLINE jl_gc_pagemeta_t *pop_lf_back_nosync(jl_gc_page_stack_t *pool) JL_NOTSAFEPOINT
206+
{
207+
jl_gc_pagemeta_t *old_back = jl_atomic_load_relaxed(&pool->bottom);
208+
if (old_back == NULL) {
209+
return NULL;
210+
}
211+
jl_atomic_store_relaxed(&pool->bottom, old_back->next);
212+
return old_back;
213+
}
214+
198215
STATIC_INLINE void push_lf_back(jl_gc_page_stack_t *pool, jl_gc_pagemeta_t *elt) JL_NOTSAFEPOINT
199216
{
200217
while (1) {
@@ -207,6 +224,23 @@ STATIC_INLINE void push_lf_back(jl_gc_page_stack_t *pool, jl_gc_pagemeta_t *elt)
207224
}
208225
}
209226

227+
#define MAX_POP_ATTEMPTS (1 << 10)
228+
229+
STATIC_INLINE jl_gc_pagemeta_t *try_pop_lf_back(jl_gc_page_stack_t *pool) JL_NOTSAFEPOINT
230+
{
231+
for (int i = 0; i < MAX_POP_ATTEMPTS; i++) {
232+
jl_gc_pagemeta_t *old_back = jl_atomic_load_relaxed(&pool->bottom);
233+
if (old_back == NULL) {
234+
return NULL;
235+
}
236+
if (jl_atomic_cmpswap(&pool->bottom, &old_back, old_back->next)) {
237+
return old_back;
238+
}
239+
jl_cpu_pause();
240+
}
241+
return NULL;
242+
}
243+
210244
STATIC_INLINE jl_gc_pagemeta_t *pop_lf_back(jl_gc_page_stack_t *pool) JL_NOTSAFEPOINT
211245
{
212246
while (1) {
@@ -220,6 +254,16 @@ STATIC_INLINE jl_gc_pagemeta_t *pop_lf_back(jl_gc_page_stack_t *pool) JL_NOTSAFE
220254
jl_cpu_pause();
221255
}
222256
}
257+
typedef struct {
258+
jl_gc_page_stack_t stack;
259+
// pad to 128 bytes to avoid false-sharing
260+
#ifdef _P64
261+
void *_pad[15];
262+
#else
263+
void *_pad[31];
264+
#endif
265+
} jl_gc_padded_page_stack_t;
266+
static_assert(sizeof(jl_gc_padded_page_stack_t) == 128, "jl_gc_padded_page_stack_t is not 128 bytes");
223267

224268
typedef struct {
225269
_Atomic(size_t) n_freed_objs;
@@ -461,7 +505,7 @@ void gc_mark_finlist(jl_gc_markqueue_t *mq, arraylist_t *list, size_t start) JL_
461505
void gc_mark_loop_serial_(jl_ptls_t ptls, jl_gc_markqueue_t *mq);
462506
void gc_mark_loop_serial(jl_ptls_t ptls);
463507
void gc_mark_loop_parallel(jl_ptls_t ptls, int master);
464-
void gc_sweep_pool_parallel(void);
508+
void gc_sweep_pool_parallel(jl_ptls_t ptls);
465509
void gc_free_pages(void);
466510
void sweep_stack_pools(void);
467511
void jl_gc_debug_init(void);

src/partr.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -143,7 +143,7 @@ void jl_parallel_gc_threadfun(void *arg)
143143
gc_mark_loop_parallel(ptls, 0);
144144
}
145145
if (may_sweep(ptls)) { // not an else!
146-
gc_sweep_pool_parallel();
146+
gc_sweep_pool_parallel(ptls);
147147
jl_atomic_fetch_add(&ptls->gc_sweeps_requested, -1);
148148
}
149149
}

0 commit comments

Comments
 (0)