@@ -92,7 +92,7 @@ static bool mi_heap_page_collect(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t
9292 mi_collect_t collect = * ((mi_collect_t * )arg_collect );
9393 _mi_page_free_collect (page , collect >= MI_FORCE );
9494 if (mi_page_all_free (page )) {
95- // no more used blocks, free the page.
95+ // no more used blocks, free the page.
9696 // note: this will free retired pages as well.
9797 _mi_page_free (page , pq , collect >= MI_FORCE );
9898 }
@@ -133,15 +133,15 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
133133 // if all memory is freed by now, all segments should be freed.
134134 _mi_abandoned_reclaim_all (heap , & heap -> tld -> segments );
135135 }
136-
136+
137137 // if abandoning, mark all pages to no longer add to delayed_free
138138 if (collect == MI_ABANDON ) {
139139 mi_heap_visit_pages (heap , & mi_heap_page_never_delayed_free , NULL , NULL );
140140 }
141141
142- // free thread delayed blocks.
142+ // free all current thread delayed blocks.
143143 // (if abandoning, after this there are no more thread-delayed references into the pages.)
144- _mi_heap_delayed_free (heap );
144+ _mi_heap_delayed_free_all (heap );
145145
146146 // collect retired pages
147147 _mi_heap_collect_retired (heap , force );
@@ -200,13 +200,14 @@ mi_heap_t* mi_heap_get_backing(void) {
200200 return bheap ;
201201}
202202
203- mi_heap_t * mi_heap_new ( void ) {
203+ mi_decl_nodiscard mi_heap_t * mi_heap_new_in_arena ( mi_arena_id_t arena_id ) {
204204 mi_heap_t * bheap = mi_heap_get_backing ();
205205 mi_heap_t * heap = mi_heap_malloc_tp (bheap , mi_heap_t ); // todo: OS allocate in secure mode?
206206 if (heap == NULL ) return NULL ;
207207 _mi_memcpy_aligned (heap , & _mi_heap_empty , sizeof (mi_heap_t ));
208208 heap -> tld = bheap -> tld ;
209209 heap -> thread_id = _mi_thread_id ();
210+ heap -> arena_id = arena_id ;
210211 _mi_random_split (& bheap -> random , & heap -> random );
211212 heap -> cookie = _mi_heap_random_next (heap ) | 1 ;
212213 heap -> keys [0 ] = _mi_heap_random_next (heap );
@@ -218,6 +219,14 @@ mi_heap_t* mi_heap_new(void) {
218219 return heap ;
219220}
220221
222+ mi_decl_nodiscard mi_heap_t * mi_heap_new (void ) {
223+ return mi_heap_new_in_arena (_mi_arena_id_none ());
224+ }
225+
226+ bool _mi_heap_memid_is_suitable (mi_heap_t * heap , size_t memid ) {
227+ return _mi_arena_memid_is_suitable (memid , heap -> arena_id );
228+ }
229+
221230uintptr_t _mi_heap_random_next (mi_heap_t * heap ) {
222231 return _mi_random_next (& heap -> random );
223232}
@@ -251,7 +260,7 @@ static void mi_heap_free(mi_heap_t* heap) {
251260 // remove ourselves from the thread local heaps list
252261 // linear search but we expect the number of heaps to be relatively small
253262 mi_heap_t * prev = NULL ;
254- mi_heap_t * curr = heap -> tld -> heaps ;
263+ mi_heap_t * curr = heap -> tld -> heaps ;
255264 while (curr != heap && curr != NULL ) {
256265 prev = curr ;
257266 curr = curr -> next ;
@@ -338,7 +347,20 @@ void mi_heap_destroy(mi_heap_t* heap) {
338347 }
339348}
340349
341-
350+ void _mi_heap_destroy_all (void ) {
351+ mi_heap_t * bheap = mi_heap_get_backing ();
352+ mi_heap_t * curr = bheap -> tld -> heaps ;
353+ while (curr != NULL ) {
354+ mi_heap_t * next = curr -> next ;
355+ if (curr -> no_reclaim ) {
356+ mi_heap_destroy (curr );
357+ }
358+ else {
359+ _mi_heap_destroy_pages (curr );
360+ }
361+ curr = next ;
362+ }
363+ }
342364
343365/* -----------------------------------------------------------
344366 Safe Heap delete
@@ -350,9 +372,9 @@ static void mi_heap_absorb(mi_heap_t* heap, mi_heap_t* from) {
350372 if (from == NULL || from -> page_count == 0 ) return ;
351373
352374 // reduce the size of the delayed frees
353- _mi_heap_delayed_free (from );
354-
355- // transfer all pages by appending the queues; this will set a new heap field
375+ _mi_heap_delayed_free_partial (from );
376+
377+ // transfer all pages by appending the queues; this will set a new heap field
356378 // so threads may do delayed frees in either heap for a while.
357379 // note: appending waits for each page to not be in the `MI_DELAYED_FREEING` state
358380 // so after this only the new heap will get delayed frees
@@ -365,17 +387,17 @@ static void mi_heap_absorb(mi_heap_t* heap, mi_heap_t* from) {
365387 }
366388 mi_assert_internal (from -> page_count == 0 );
367389
368- // and do outstanding delayed frees in the `from` heap
390+ // and do outstanding delayed frees in the `from` heap
369391 // note: be careful here as the `heap` field in all those pages no longer point to `from`,
370- // turns out to be ok as `_mi_heap_delayed_free` only visits the list and calls a
392+ // turns out to be ok as `_mi_heap_delayed_free` only visits the list and calls a
371393 // the regular `_mi_free_delayed_block` which is safe.
372- _mi_heap_delayed_free (from );
394+ _mi_heap_delayed_free_all (from );
373395 #if !defined(_MSC_VER ) || (_MSC_VER > 1900 ) // somehow the following line gives an error in VS2015, issue #353
374396 mi_assert_internal (mi_atomic_load_ptr_relaxed (mi_block_t ,& from -> thread_delayed_free ) == NULL );
375397 #endif
376398
377399 // and reset the `from` heap
378- mi_heap_reset_pages (from );
400+ mi_heap_reset_pages (from );
379401}
380402
381403// Safe delete a heap without freeing any still allocated blocks in that heap.
@@ -421,7 +443,7 @@ static mi_heap_t* mi_heap_of_block(const void* p) {
421443 mi_segment_t * segment = _mi_ptr_segment (p );
422444 bool valid = (_mi_ptr_cookie (segment ) == segment -> cookie );
423445 mi_assert_internal (valid );
424- if ( mi_unlikely (!valid ) ) return NULL ;
446+ if mi_unlikely (!valid ) return NULL ;
425447 return mi_page_heap (_mi_segment_page_of (segment ,p ));
426448}
427449
@@ -543,7 +565,7 @@ static bool mi_heap_visit_areas_page(mi_heap_t* heap, mi_page_queue_t* pq, mi_pa
543565 xarea .area .reserved = page -> reserved * bsize ;
544566 xarea .area .committed = page -> capacity * bsize ;
545567 xarea .area .blocks = _mi_page_start (_mi_page_segment (page ), page , NULL );
546- xarea .area .used = page -> used * bsize ;
568+ xarea .area .used = page -> used ; // number of blocks in use (#553)
547569 xarea .area .block_size = ubsize ;
548570 xarea .area .full_block_size = bsize ;
549571 return fun (heap , & xarea , arg );
0 commit comments