@@ -323,6 +323,24 @@ JL_DLLEXPORT void jl_gc_prepare_to_collect(void)
323323 errno = last_errno ;
324324}
325325
326+ JL_DLLEXPORT unsigned char jl_gc_pin_object (void * obj ) {
327+ return mmtk_pin_object (obj );
328+ }
329+
330+ JL_DLLEXPORT void jl_gc_notify_thread_yield (jl_ptls_t ptls , void * ctx ) {
331+ if (ctx == NULL ) {
332+ // Save the context for the thread as it was running at the time of the call
333+ int r = getcontext (& ptls -> gc_tls .ctx_at_the_time_gc_started );
334+ if (r == -1 ) {
335+ jl_safe_printf ("Failed to save context for conservative scanning\n" );
336+ abort ();
337+ }
338+ return ;
339+ }
340+ memcpy (& ptls -> gc_tls .ctx_at_the_time_gc_started , ctx , sizeof (ucontext_t ));
341+ }
342+
343+
326344// ========================================================================= //
327345// GC Statistics
328346// ========================================================================= //
@@ -847,24 +865,25 @@ STATIC_INLINE void* mmtk_immix_alloc_fast(MMTkMutatorContext* mutator, size_t si
847865 return bump_alloc_fast (mutator , (uintptr_t * )& allocator -> cursor , (intptr_t )allocator -> limit , size , align , offset , 0 );
848866}
849867
850- inline void mmtk_immix_post_alloc_slow (MMTkMutatorContext * mutator , void * obj , size_t size ) {
851- mmtk_post_alloc (mutator , obj , size , 0 );
852- }
853-
854868STATIC_INLINE void mmtk_immix_post_alloc_fast (MMTkMutatorContext * mutator , void * obj , size_t size ) {
855- // FIXME: for now, we do nothing
856- // but when supporting moving, this is where we set the valid object (VO) bit
869+ if (MMTK_NEEDS_VO_BIT ) {
870+ mmtk_set_side_metadata (MMTK_SIDE_VO_BIT_BASE_ADDRESS , obj );
871+ }
857872}
858873
859874STATIC_INLINE void * mmtk_immortal_alloc_fast (MMTkMutatorContext * mutator , size_t size , size_t align , size_t offset ) {
860875 BumpAllocator * allocator = & mutator -> allocators .bump_pointer [MMTK_IMMORTAL_BUMP_ALLOCATOR ];
861876 return bump_alloc_fast (mutator , (uintptr_t * )& allocator -> cursor , (uintptr_t )allocator -> limit , size , align , offset , 1 );
862877}
863878
864- STATIC_INLINE void mmtk_immortal_post_alloc_fast (MMTkMutatorContext * mutator , void * obj , size_t size ) {
865- // FIXME: Similarly, for now, we do nothing
866- // but when supporting moving, this is where we set the valid object (VO) bit
867- // and log (old gen) bit
879+ STATIC_INLINE void mmtk_immortal_post_alloc_fast (MMTkMutatorContext * mutator , void * obj , size_t size ) {
880+ if (MMTK_NEEDS_WRITE_BARRIER == MMTK_OBJECT_BARRIER ) {
881+ mmtk_set_side_metadata (MMTK_SIDE_LOG_BIT_BASE_ADDRESS , obj );
882+ }
883+
884+ if (MMTK_NEEDS_VO_BIT ) {
885+ mmtk_set_side_metadata (MMTK_SIDE_VO_BIT_BASE_ADDRESS , obj );
886+ }
868887}
869888
870889JL_DLLEXPORT jl_value_t * jl_mmtk_gc_alloc_default (jl_ptls_t ptls , int osize , size_t align , void * ty )
@@ -1042,6 +1061,16 @@ jl_value_t *jl_gc_permobj(size_t sz, void *ty) JL_NOTSAFEPOINT
10421061 return jl_valueof (o );
10431062}
10441063
1064+ jl_value_t * jl_gc_permsymbol (size_t sz ) JL_NOTSAFEPOINT
1065+ {
1066+ jl_taggedvalue_t * tag = (jl_taggedvalue_t * )jl_gc_perm_alloc (sz , 0 , sizeof (void * ), 0 );
1067+ jl_value_t * sym = jl_valueof (tag );
1068+ jl_ptls_t ptls = jl_current_task -> ptls ;
1069+ jl_set_typetagof (sym , jl_symbol_tag , 0 ); // We need to set symbol tag. The GC tag doesnt matter.
1070+ mmtk_immortal_post_alloc_fast (& ptls -> gc_tls .mmtk_mutator , sym , sz );
1071+ return sym ;
1072+ }
1073+
10451074JL_DLLEXPORT void * jl_gc_managed_malloc (size_t sz )
10461075{
10471076 jl_ptls_t ptls = jl_current_task -> ptls ;
@@ -1079,6 +1108,47 @@ void jl_gc_notify_image_load(const char* img_data, size_t len)
10791108 mmtk_set_vm_space ((void * )img_data , len );
10801109}
10811110
1111+ void jl_gc_notify_image_alloc (const char * img_data , size_t len )
1112+ {
1113+ mmtk_immortal_region_post_alloc ((void * )img_data , len );
1114+ }
1115+
1116+ // ========================================================================= //
1117+ // Write Barriers
1118+ // ========================================================================= //
1119+
1120+ extern const void * MMTK_SIDE_LOG_BIT_BASE_ADDRESS ;
1121+ extern const void * MMTK_SIDE_VO_BIT_BASE_ADDRESS ;
1122+
1123+ // Directly call into MMTk for write barrier (debugging only)
1124+ STATIC_INLINE void mmtk_gc_wb_full (const void * parent , const void * ptr ) JL_NOTSAFEPOINT
1125+ {
1126+ jl_task_t * ct = jl_current_task ;
1127+ jl_ptls_t ptls = ct -> ptls ;
1128+ mmtk_object_reference_write_post (& ptls -> gc_tls .mmtk_mutator , parent , ptr );
1129+ }
1130+
1131+ // Inlined fastpath
1132+ STATIC_INLINE void mmtk_gc_wb_fast (const void * parent , const void * ptr ) JL_NOTSAFEPOINT
1133+ {
1134+ if (MMTK_NEEDS_WRITE_BARRIER == MMTK_OBJECT_BARRIER ) {
1135+ intptr_t addr = (intptr_t ) (void * ) parent ;
1136+ uint8_t * meta_addr = (uint8_t * ) (MMTK_SIDE_LOG_BIT_BASE_ADDRESS ) + (addr >> 6 );
1137+ intptr_t shift = (addr >> 3 ) & 0b111 ;
1138+ uint8_t byte_val = * meta_addr ;
1139+ if (((byte_val >> shift ) & 1 ) == 1 ) {
1140+ jl_task_t * ct = jl_current_task ;
1141+ jl_ptls_t ptls = ct -> ptls ;
1142+ mmtk_object_reference_write_slow (& ptls -> gc_tls .mmtk_mutator , parent , ptr );
1143+ }
1144+ }
1145+ }
1146+
1147+ STATIC_INLINE void mmtk_gc_wb (const void * parent , const void * ptr ) JL_NOTSAFEPOINT
1148+ {
1149+ mmtk_gc_wb_fast (parent , ptr );
1150+ }
1151+
10821152// ========================================================================= //
10831153// Code specific to stock that is not supported by MMTk
10841154// ========================================================================= //
@@ -1208,6 +1278,51 @@ JL_DLLEXPORT jl_value_t *jl_gc_internal_obj_base_ptr(void *p)
12081278 return NULL ;
12091279}
12101280
1281+ // This macro currently uses malloc instead of alloca because this function will exit
1282+ // after pushing the roots into the gc_preserve_stack, which means that the preserve_begin function's
1283+ // stack frame will be destroyed (together with its alloca variables). When we support lowering this code
1284+ // inside the same function that is doing the preserve_begin/preserve_end calls we should be able to simple use allocas.
1285+ // Note also that we use a separate stack for gc preserve roots to avoid the possibility of calling free
1286+ // on a stack that has been allocated with alloca instead of malloc, which could happen depending on the order in which
1287+ // JL_GC_POP() and jl_gc_preserve_end_hook() occurs.
1288+
1289+ #define JL_GC_PUSHARGS_PRESERVE_ROOT_OBJS (rts_var ,n ) \
1290+ rts_var = ((jl_value_t**)malloc(((n)+2)*sizeof(jl_value_t*)))+2; \
1291+ ((void**)rts_var)[-2] = (void*)JL_GC_ENCODE_PUSHARGS(n); \
1292+ ((void**)rts_var)[-1] = jl_p_gcpreserve_stack; \
1293+ memset((void*)rts_var, 0, (n)*sizeof(jl_value_t*)); \
1294+ jl_p_gcpreserve_stack = (jl_gcframe_t*)&(((void**)rts_var)[-2]); \
1295+
1296+ #define JL_GC_POP_PRESERVE_ROOT_OBJS () \
1297+ jl_gcframe_t *curr = jl_p_gcpreserve_stack; \
1298+ if(curr) { \
1299+ (jl_p_gcpreserve_stack = jl_p_gcpreserve_stack->prev); \
1300+ free(curr); \
1301+ }
1302+
1303+ // Add each argument as a tpin root object.
1304+ // However, we cannot use JL_GC_PUSH and JL_GC_POP since the slots should live
1305+ // beyond this function. Instead, we maintain a tpin stack by mallocing/freeing
1306+ // the frames for each of the preserve regions we encounter
1307+ JL_DLLEXPORT void jl_gc_preserve_begin_hook (int n , ...) JL_NOTSAFEPOINT
1308+ {
1309+ jl_value_t * * frame ;
1310+ JL_GC_PUSHARGS_PRESERVE_ROOT_OBJS (frame , n );
1311+ if (n == 0 ) return ;
1312+
1313+ va_list args ;
1314+ va_start (args , n );
1315+ for (int i = 0 ; i < n ; i ++ ) {
1316+ frame [i ] = va_arg (args , jl_value_t * );
1317+ }
1318+ va_end (args );
1319+ }
1320+
1321+ JL_DLLEXPORT void jl_gc_preserve_end_hook (void ) JL_NOTSAFEPOINT
1322+ {
1323+ JL_GC_POP_PRESERVE_ROOT_OBJS ();
1324+ }
1325+
12111326#ifdef __cplusplus
12121327}
12131328#endif
0 commit comments