@@ -480,6 +480,7 @@ typedef enum {
480480 GPR_FLAG_HAVE_FINALIZE = 0x4000 ,
481481 GPR_FLAG_IMMEDIATE_MARK = 0x8000 ,
482482 GPR_FLAG_FULL_MARK = 0x10000 ,
483+ GPR_FLAG_COMPACT = 0x20000 ,
483484
484485 GPR_DEFAULT_REASON =
485486 (GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
@@ -634,6 +635,8 @@ typedef struct rb_heap_struct {
634635 struct heap_page * using_page ;
635636 struct list_head pages ;
636637 struct heap_page * sweeping_page ; /* iterator for .pages */
638+ struct heap_page * compact_cursor ;
639+ VALUE moved_list ;
637640#if GC_ENABLE_INCREMENTAL_MARK
638641 struct heap_page * pooled_pages ;
639642#endif
@@ -667,6 +670,7 @@ typedef struct rb_objspace {
667670 unsigned int gc_stressful : 1 ;
668671 unsigned int has_hook : 1 ;
669672 unsigned int during_minor_gc : 1 ;
673+ unsigned int compact : 1 ;
670674#if GC_ENABLE_INCREMENTAL_MARK
671675 unsigned int during_incremental_marking : 1 ;
672676#endif
@@ -4178,17 +4182,71 @@ gc_setup_mark_bits(struct heap_page *page)
41784182 memcpy (& page -> mark_bits [0 ], & page -> uncollectible_bits [0 ], HEAP_PAGE_BITMAP_SIZE );
41794183}
41804184
4185+ static int gc_is_moveable_obj (rb_objspace_t * objspace , VALUE obj );
4186+ static VALUE gc_move (rb_objspace_t * objspace , VALUE scan , VALUE free , VALUE moved_list );
4187+
4188+ static short
4189+ try_move (rb_objspace_t * objspace , rb_heap_t * heap , struct heap_page * sweep_page , VALUE vp )
4190+ {
4191+ struct heap_page * cursor = heap -> compact_cursor ;
4192+
4193+ while (1 ) {
4194+ bits_t * mark_bits = cursor -> mark_bits ;
4195+ RVALUE * p = cursor -> start ;
4196+ RVALUE * offset = p - NUM_IN_PAGE (p );
4197+
4198+ /* Find an object to move and move it. Movable objects must be
4199+ * marked, so we iterate using the marking bitmap */
4200+ for (size_t i = 0 ; i < HEAP_PAGE_BITMAP_LIMIT ; i ++ ) {
4201+ bits_t bits = mark_bits [i ];
4202+
4203+ if (bits ) {
4204+ p = offset + i * BITS_BITLENGTH ;
4205+
4206+ do {
4207+ if (bits & 1 ) {
4208+ if (gc_is_moveable_obj (objspace , (VALUE )p )) {
4209+ heap -> moved_list = gc_move (objspace , (VALUE )p , vp , heap -> moved_list );
4210+ return 1 ;
4211+ }
4212+ }
4213+ p ++ ;
4214+ bits >>= 1 ;
4215+ } while (bits );
4216+ }
4217+ }
4218+
4219+ struct heap_page * next ;
4220+
4221+ next = list_prev (& heap -> pages , cursor , page_node );
4222+
4223+ // Cursors have met, lets quit
4224+ if (next == sweep_page ) {
4225+ break ;
4226+ } else {
4227+ heap -> compact_cursor = next ;
4228+ cursor = next ;
4229+ }
4230+ }
4231+
4232+ return 0 ;
4233+ }
4234+
41814235static inline int
41824236gc_page_sweep (rb_objspace_t * objspace , rb_heap_t * heap , struct heap_page * sweep_page )
41834237{
41844238 int i ;
4185- int empty_slots = 0 , freed_slots = 0 , final_slots = 0 ;
4239+ int empty_slots = 0 , freed_slots = 0 , final_slots = 0 , moved_slots = 0 ;
41864240 RVALUE * p , * pend ,* offset ;
41874241 bits_t * bits , bitset ;
41884242
41894243 gc_report (2 , objspace , "page_sweep: start.\n" );
41904244
41914245 sweep_page -> flags .before_sweep = FALSE;
4246+ if (heap -> compact_cursor && sweep_page == heap -> compact_cursor ) {
4247+ /* The compaction cursor and sweep page met, so we need to quit compacting */
4248+ heap -> compact_cursor = NULL ;
4249+ }
41924250
41934251 p = sweep_page -> start ; pend = p + sweep_page -> total_slots ;
41944252 offset = p - NUM_IN_PAGE (p );
@@ -4220,20 +4278,51 @@ gc_page_sweep(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *sweep_
42204278 }
42214279 else {
42224280 (void )VALGRIND_MAKE_MEM_UNDEFINED ((void * )p , sizeof (RVALUE ));
4223- heap_page_add_freeobj (objspace , sweep_page , vp );
4281+
4282+ if (heap -> compact_cursor ) {
4283+ /* If try_move couldn't compact anything, it means
4284+ * the cursors have met and there are no objects left that
4285+ * /can/ be compacted, so we need to quit. */
4286+ if (try_move (objspace , heap , sweep_page , vp )) {
4287+ moved_slots ++ ;
4288+ } else {
4289+ heap -> compact_cursor = NULL ;
4290+ heap_page_add_freeobj (objspace , sweep_page , vp );
4291+ }
4292+ } else {
4293+ heap_page_add_freeobj (objspace , sweep_page , vp );
4294+ }
4295+
42244296 gc_report (3 , objspace , "page_sweep: %s is added to freelist\n" , obj_info (vp ));
4225- freed_slots ++ ;
4226- asan_poison_object (vp );
4297+ freed_slots ++ ;
42274298 }
42284299 break ;
42294300 }
42304301
42314302 /* minor cases */
4303+ case T_MOVED :
4304+ if (!objspace -> flags .during_compacting ) {
4305+ /* When compaction is combined with sweeping, some of the swept pages
4306+ * will have T_MOVED objects on them. These need to be kept alive
4307+ * until references are finished updating. Once references are finished
4308+ * updating, `gc_unlink_moved_list` will clear the T_MOVED slots
4309+ * and release them back to the heap. If there are T_MOVED slots
4310+ * in the heap and we're _not_ compacting, then it's a bug. */
4311+ rb_bug ("T_MOVED shouldn't be on the heap unless compacting\n" );
4312+ }
4313+ break ;
42324314 case T_ZOMBIE :
42334315 /* already counted */
42344316 break ;
42354317 case T_NONE :
4236- empty_slots ++ ; /* already freed */
4318+ if (heap -> compact_cursor ) {
4319+ if (try_move (objspace , heap , sweep_page , vp )) {
4320+ moved_slots ++ ;
4321+ } else {
4322+ heap -> compact_cursor = NULL ;
4323+ }
4324+ }
4325+ empty_slots ++ ; /* already freed */
42374326 break ;
42384327 }
42394328 }
@@ -4257,7 +4346,7 @@ gc_page_sweep(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *sweep_
42574346 (int )sweep_page -> total_slots ,
42584347 freed_slots , empty_slots , final_slots );
42594348
4260- sweep_page -> free_slots = freed_slots + empty_slots ;
4349+ sweep_page -> free_slots = ( freed_slots + empty_slots ) - moved_slots ;
42614350 objspace -> profile .total_freed_objects += freed_slots ;
42624351 heap_pages_final_slots += final_slots ;
42634352 sweep_page -> final_slots += final_slots ;
@@ -4271,7 +4360,7 @@ gc_page_sweep(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *sweep_
42714360
42724361 gc_report (2 , objspace , "page_sweep: end.\n" );
42734362
4274- return freed_slots + empty_slots ;
4363+ return ( freed_slots + empty_slots ) - moved_slots ;
42754364}
42764365
42774366/* allocate additional minimum page to work */
@@ -4456,6 +4545,29 @@ gc_sweep_continue(rb_objspace_t *objspace, rb_heap_t *heap)
44564545 gc_exit (objspace , "sweep_continue" );
44574546}
44584547
4548+ static void
4549+ gc_compact_start (rb_objspace_t * objspace , rb_heap_t * heap )
4550+ {
4551+ heap -> compact_cursor = list_tail (& heap -> pages , struct heap_page , page_node );
4552+ objspace -> profile .compact_count ++ ;
4553+ heap -> moved_list = Qfalse ;
4554+ }
4555+
4556+ static void gc_update_references (rb_objspace_t * objspace );
4557+ static void gc_unlink_moved_list (rb_objspace_t * objspace , VALUE moved_list_head );
4558+
4559+ static void
4560+ gc_compact_finish (rb_objspace_t * objspace , rb_heap_t * heap )
4561+ {
4562+ heap -> compact_cursor = NULL ;
4563+ gc_update_references (objspace );
4564+ rb_clear_constant_cache ();
4565+ gc_unlink_moved_list (objspace , heap -> moved_list );
4566+ heap -> moved_list = Qfalse ;
4567+ objspace -> flags .compact = 0 ;
4568+ objspace -> flags .during_compacting = FALSE;
4569+ }
4570+
44594571static void
44604572gc_sweep (rb_objspace_t * objspace )
44614573{
@@ -4468,7 +4580,13 @@ gc_sweep(rb_objspace_t *objspace)
44684580 gc_prof_sweep_timer_start (objspace );
44694581#endif
44704582 gc_sweep_start (objspace );
4583+ if (objspace -> flags .compact ) {
4584+ gc_compact_start (objspace , heap_eden );
4585+ }
44714586 gc_sweep_rest (objspace );
4587+ if (objspace -> flags .compact ) {
4588+ gc_compact_finish (objspace , heap_eden );
4589+ }
44724590#if !GC_ENABLE_LAZY_SWEEP
44734591 gc_prof_sweep_timer_stop (objspace );
44744592#endif
@@ -7283,6 +7401,8 @@ gc_start(rb_objspace_t *objspace, int reason)
72837401
72847402 /* reason may be clobbered, later, so keep set immediate_sweep here */
72857403 objspace -> flags .immediate_sweep = !!((unsigned )reason & GPR_FLAG_IMMEDIATE_SWEEP );
7404+ objspace -> flags .compact = !!((unsigned )reason & GPR_FLAG_COMPACT );
7405+ objspace -> flags .during_compacting = TRUE;
72867406
72877407 if (!heap_allocated_pages ) return FALSE; /* heap is not ready */
72887408 if (!(reason & GPR_FLAG_METHOD ) && !ready_to_gc (objspace )) return TRUE; /* GC is not allowed */
@@ -7544,17 +7664,22 @@ garbage_collect_with_gvl(rb_objspace_t *objspace, int reason)
75447664}
75457665
75467666static VALUE
7547- gc_start_internal (rb_execution_context_t * ec , VALUE self , VALUE full_mark , VALUE immediate_mark , VALUE immediate_sweep )
7667+ gc_start_internal (rb_execution_context_t * ec , VALUE self , VALUE full_mark , VALUE immediate_mark , VALUE immediate_sweep , VALUE compact )
75487668{
75497669 rb_objspace_t * objspace = & rb_objspace ;
75507670 int reason = GPR_FLAG_FULL_MARK |
75517671 GPR_FLAG_IMMEDIATE_MARK |
75527672 GPR_FLAG_IMMEDIATE_SWEEP |
75537673 GPR_FLAG_METHOD ;
75547674
7555- if (!RTEST (full_mark )) reason &= ~GPR_FLAG_FULL_MARK ;
7556- if (!RTEST (immediate_mark )) reason &= ~GPR_FLAG_IMMEDIATE_MARK ;
7557- if (!RTEST (immediate_sweep )) reason &= ~GPR_FLAG_IMMEDIATE_SWEEP ;
7675+ /* For now, compact implies full mark / sweep, so ignore other flags */
7676+ if (RTEST (compact )) {
7677+ reason |= GPR_FLAG_COMPACT ;
7678+ } else {
7679+ if (!RTEST (full_mark )) reason &= ~GPR_FLAG_FULL_MARK ;
7680+ if (!RTEST (immediate_mark )) reason &= ~GPR_FLAG_IMMEDIATE_MARK ;
7681+ if (!RTEST (immediate_sweep )) reason &= ~GPR_FLAG_IMMEDIATE_SWEEP ;
7682+ }
75587683
75597684 garbage_collect (objspace , reason );
75607685 gc_finalize_deferred (objspace );
0 commit comments