Skip to content

Upgrade to mimalloc v2.2.2 #5508

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 8 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 4 additions & 3 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -2176,13 +2176,13 @@ ifdef USE_MIMALLOC
compat/mimalloc/bitmap.o \
compat/mimalloc/heap.o \
compat/mimalloc/init.o \
compat/mimalloc/libc.o \
compat/mimalloc/options.o \
compat/mimalloc/os.o \
compat/mimalloc/page.o \
compat/mimalloc/random.o \
compat/mimalloc/prim/windows/prim.o \
compat/mimalloc/prim/prim.o \
compat/mimalloc/segment.o \
compat/mimalloc/segment-cache.o \
compat/mimalloc/segment-map.o \
compat/mimalloc/stats.o

Expand All @@ -2201,7 +2201,8 @@ $(MIMALLOC_OBJS): COMPAT_CFLAGS += \
-Wno-pedantic \
-Wno-declaration-after-statement \
-Wno-old-style-definition \
-Wno-missing-prototypes
-Wno-missing-prototypes \
-Wno-implicit-function-declaration
endif
endif

Expand Down
1 change: 1 addition & 0 deletions compat/.gitattributes
Original file line number Diff line number Diff line change
@@ -1 +1,2 @@
/zlib-uncompress2.c whitespace=-indent-with-non-tab,-trailing-space
/mimalloc/**/* whitespace=-trailing-space
2 changes: 1 addition & 1 deletion compat/mimalloc/LICENSE
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
MIT License

Copyright (c) 2018-2021 Microsoft Corporation, Daan Leijen
Copyright (c) 2018-2025 Microsoft Corporation, Daan Leijen

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
Expand Down
193 changes: 129 additions & 64 deletions compat/mimalloc/alloc-aligned.c
Original file line number Diff line number Diff line change
Expand Up @@ -15,68 +15,107 @@ terms of the MIT license. A copy of the license can be found in the file
// Aligned Allocation
// ------------------------------------------------------

// Fallback primitive aligned allocation -- split out for better codegen
static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero) mi_attr_noexcept
{
mi_assert_internal(size <= PTRDIFF_MAX);
mi_assert_internal(alignment != 0 && _mi_is_power_of_two(alignment));
static bool mi_malloc_is_naturally_aligned( size_t size, size_t alignment ) {
// objects up to `MI_MAX_ALIGN_GUARANTEE` are allocated aligned to their size (see `segment.c:_mi_segment_page_start`).
mi_assert_internal(_mi_is_power_of_two(alignment) && (alignment > 0));
if (alignment > size) return false;
if (alignment <= MI_MAX_ALIGN_SIZE) return true;
const size_t bsize = mi_good_size(size);
return (bsize <= MI_MAX_ALIGN_GUARANTEE && (bsize & (alignment-1)) == 0);
}

const uintptr_t align_mask = alignment - 1; // for any x, `(x & align_mask) == (x % alignment)`
const size_t padsize = size + MI_PADDING_SIZE;
#if MI_GUARDED
static mi_decl_restrict void* mi_heap_malloc_guarded_aligned(mi_heap_t* heap, size_t size, size_t alignment, bool zero) mi_attr_noexcept {
// use over allocation for guarded blocksl
mi_assert_internal(alignment > 0 && alignment < MI_BLOCK_ALIGNMENT_MAX);
const size_t oversize = size + alignment - 1;
void* base = _mi_heap_malloc_guarded(heap, oversize, zero);
void* p = mi_align_up_ptr(base, alignment);
mi_track_align(base, p, (uint8_t*)p - (uint8_t*)base, size);
mi_assert_internal(mi_usable_size(p) >= size);
mi_assert_internal(_mi_is_aligned(p, alignment));
return p;
}

// use regular allocation if it is guaranteed to fit the alignment constraints
if (offset==0 && alignment<=padsize && padsize<=MI_MAX_ALIGN_GUARANTEE && (padsize&align_mask)==0) {
void* p = _mi_heap_malloc_zero(heap, size, zero);
mi_assert_internal(p == NULL || ((uintptr_t)p % alignment) == 0);
return p;
}
static void* mi_heap_malloc_zero_no_guarded(mi_heap_t* heap, size_t size, bool zero) {
const size_t rate = heap->guarded_sample_rate;
// only write if `rate!=0` so we don't write to the constant `_mi_heap_empty`
if (rate != 0) { heap->guarded_sample_rate = 0; }
void* p = _mi_heap_malloc_zero(heap, size, zero);
if (rate != 0) { heap->guarded_sample_rate = rate; }
return p;
}
#else
static void* mi_heap_malloc_zero_no_guarded(mi_heap_t* heap, size_t size, bool zero) {
return _mi_heap_malloc_zero(heap, size, zero);
}
#endif

// Fallback aligned allocation that over-allocates -- split out for better codegen
static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_overalloc(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero) mi_attr_noexcept
{
mi_assert_internal(size <= (MI_MAX_ALLOC_SIZE - MI_PADDING_SIZE));
mi_assert_internal(alignment != 0 && _mi_is_power_of_two(alignment));

void* p;
size_t oversize;
if mi_unlikely(alignment > MI_ALIGNMENT_MAX) {
if mi_unlikely(alignment > MI_BLOCK_ALIGNMENT_MAX) {
// use OS allocation for very large alignment and allocate inside a huge page (dedicated segment with 1 page)
// This can support alignments >= MI_SEGMENT_SIZE by ensuring the object can be aligned at a point in the
// first (and single) page such that the segment info is `MI_SEGMENT_SIZE` bytes before it (so it can be found by aligning the pointer down)
if mi_unlikely(offset != 0) {
// todo: cannot support offset alignment for very large alignments yet
#if MI_DEBUG > 0
#if MI_DEBUG > 0
_mi_error_message(EOVERFLOW, "aligned allocation with a very large alignment cannot be used with an alignment offset (size %zu, alignment %zu, offset %zu)\n", size, alignment, offset);
#endif
#endif
return NULL;
}
oversize = (size <= MI_SMALL_SIZE_MAX ? MI_SMALL_SIZE_MAX + 1 /* ensure we use generic malloc path */ : size);
// note: no guarded as alignment > 0
p = _mi_heap_malloc_zero_ex(heap, oversize, false, alignment); // the page block size should be large enough to align in the single huge page block
// zero afterwards as only the area from the aligned_p may be committed!
if (p == NULL) return NULL;
}
else {
// otherwise over-allocate
oversize = size + alignment - 1;
p = _mi_heap_malloc_zero(heap, oversize, zero);
oversize = (size < MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : size) + alignment - 1; // adjust for size <= 16; with size 0 and aligment 64k, we would allocate a 64k block and pointing just beyond that.
p = mi_heap_malloc_zero_no_guarded(heap, oversize, zero);
if (p == NULL) return NULL;
}
mi_page_t* page = _mi_ptr_page(p);

// .. and align within the allocation
const uintptr_t align_mask = alignment - 1; // for any x, `(x & align_mask) == (x % alignment)`
const uintptr_t poffset = ((uintptr_t)p + offset) & align_mask;
const uintptr_t adjust = (poffset == 0 ? 0 : alignment - poffset);
mi_assert_internal(adjust < alignment);
void* aligned_p = (void*)((uintptr_t)p + adjust);
if (aligned_p != p) {
mi_page_t* page = _mi_ptr_page(p);
mi_page_set_has_aligned(page, true);
#if MI_GUARDED
// set tag to aligned so mi_usable_size works with guard pages
if (adjust >= sizeof(mi_block_t)) {
mi_block_t* const block = (mi_block_t*)p;
block->next = MI_BLOCK_TAG_ALIGNED;
}
#endif
_mi_padding_shrink(page, (mi_block_t*)p, adjust + size);
}
// todo: expand padding if overallocated ?

mi_assert_internal(mi_page_usable_block_size(_mi_ptr_page(p)) >= adjust + size);
mi_assert_internal(p == _mi_page_ptr_unalign(_mi_ptr_segment(aligned_p), _mi_ptr_page(aligned_p), aligned_p));
mi_assert_internal(mi_page_usable_block_size(page) >= adjust + size);
mi_assert_internal(((uintptr_t)aligned_p + offset) % alignment == 0);
mi_assert_internal(mi_usable_size(aligned_p)>=size);
mi_assert_internal(mi_usable_size(p) == mi_usable_size(aligned_p)+adjust);
#if MI_DEBUG > 1
mi_page_t* const apage = _mi_ptr_page(aligned_p);
void* unalign_p = _mi_page_ptr_unalign(apage, aligned_p);
mi_assert_internal(p == unalign_p);
#endif

// now zero the block if needed
if (alignment > MI_ALIGNMENT_MAX) {
// for the tracker, on huge aligned allocations only from the start of the large block is defined
if (alignment > MI_BLOCK_ALIGNMENT_MAX) {
// for the tracker, on huge aligned allocations only the memory from the start of the large block is defined
mi_track_mem_undefined(aligned_p, size);
if (zero) {
_mi_memzero_aligned(aligned_p, mi_usable_size(aligned_p));
Expand All @@ -85,10 +124,47 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t*

if (p != aligned_p) {
mi_track_align(p,aligned_p,adjust,mi_usable_size(aligned_p));
#if MI_GUARDED
mi_track_mem_defined(p, sizeof(mi_block_t));
#endif
}
return aligned_p;
}

// Generic primitive aligned allocation -- split out for better codegen
static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_generic(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero) mi_attr_noexcept
{
mi_assert_internal(alignment != 0 && _mi_is_power_of_two(alignment));
// we don't allocate more than MI_MAX_ALLOC_SIZE (see <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
if mi_unlikely(size > (MI_MAX_ALLOC_SIZE - MI_PADDING_SIZE)) {
#if MI_DEBUG > 0
_mi_error_message(EOVERFLOW, "aligned allocation request is too large (size %zu, alignment %zu)\n", size, alignment);
#endif
return NULL;
}

// use regular allocation if it is guaranteed to fit the alignment constraints.
// this is important to try as the fast path in `mi_heap_malloc_zero_aligned` only works when there exist
// a page with the right block size, and if we always use the over-alloc fallback that would never happen.
if (offset == 0 && mi_malloc_is_naturally_aligned(size,alignment)) {
void* p = mi_heap_malloc_zero_no_guarded(heap, size, zero);
mi_assert_internal(p == NULL || ((uintptr_t)p % alignment) == 0);
const bool is_aligned_or_null = (((uintptr_t)p) & (alignment-1))==0;
if mi_likely(is_aligned_or_null) {
return p;
}
else {
// this should never happen if the `mi_malloc_is_naturally_aligned` check is correct..
mi_assert(false);
mi_free(p);
}
}

// fall back to over-allocation
return mi_heap_malloc_zero_aligned_at_overalloc(heap,size,alignment,offset,zero);
}


// Primitive aligned allocation
static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t size, const size_t alignment, const size_t offset, const bool zero) mi_attr_noexcept
{
Expand All @@ -100,33 +176,35 @@ static void* mi_heap_malloc_zero_aligned_at(mi_heap_t* const heap, const size_t
return NULL;
}

if mi_unlikely(size > PTRDIFF_MAX) { // we don't allocate more than PTRDIFF_MAX (see <https://sourceware.org/ml/libc-announce/2019/msg00001.html>)
#if MI_DEBUG > 0
_mi_error_message(EOVERFLOW, "aligned allocation request is too large (size %zu, alignment %zu)\n", size, alignment);
#endif
return NULL;
#if MI_GUARDED
if (offset==0 && alignment < MI_BLOCK_ALIGNMENT_MAX && mi_heap_malloc_use_guarded(heap,size)) {
return mi_heap_malloc_guarded_aligned(heap, size, alignment, zero);
}
const uintptr_t align_mask = alignment-1; // for any x, `(x & align_mask) == (x % alignment)`
const size_t padsize = size + MI_PADDING_SIZE; // note: cannot overflow due to earlier size > PTRDIFF_MAX check
#endif

// try first if there happens to be a small block available with just the right alignment
if mi_likely(padsize <= MI_SMALL_SIZE_MAX && alignment <= padsize) {
if mi_likely(size <= MI_SMALL_SIZE_MAX && alignment <= size) {
const uintptr_t align_mask = alignment-1; // for any x, `(x & align_mask) == (x % alignment)`
const size_t padsize = size + MI_PADDING_SIZE;
mi_page_t* page = _mi_heap_get_free_small_page(heap, padsize);
const bool is_aligned = (((uintptr_t)page->free+offset) & align_mask)==0;
if mi_likely(page->free != NULL && is_aligned)
{
#if MI_STAT>1
mi_heap_stat_increase(heap, malloc, size);
#endif
void* p = _mi_page_malloc(heap, page, padsize, zero); // TODO: inline _mi_page_malloc
mi_assert_internal(p != NULL);
mi_assert_internal(((uintptr_t)p + offset) % alignment == 0);
mi_track_malloc(p,size,zero);
return p;
if mi_likely(page->free != NULL) {
const bool is_aligned = (((uintptr_t)page->free + offset) & align_mask)==0;
if mi_likely(is_aligned)
{
#if MI_STAT>1
mi_heap_stat_increase(heap, malloc_requested, size);
#endif
void* p = (zero ? _mi_page_malloc_zeroed(heap,page,padsize) : _mi_page_malloc(heap,page,padsize)); // call specific page malloc for better codegen
mi_assert_internal(p != NULL);
mi_assert_internal(((uintptr_t)p + offset) % alignment == 0);
mi_track_malloc(p,size,zero);
return p;
}
}
}
// fallback
return mi_heap_malloc_zero_aligned_at_fallback(heap, size, alignment, offset, zero);

// fallback to generic aligned allocation
return mi_heap_malloc_zero_aligned_at_generic(heap, size, alignment, offset, zero);
}


Expand All @@ -139,27 +217,12 @@ mi_decl_nodiscard mi_decl_restrict void* mi_heap_malloc_aligned_at(mi_heap_t* he
}

mi_decl_nodiscard mi_decl_restrict void* mi_heap_malloc_aligned(mi_heap_t* heap, size_t size, size_t alignment) mi_attr_noexcept {
if mi_unlikely(alignment == 0 || !_mi_is_power_of_two(alignment)) return NULL;
#if !MI_PADDING
// without padding, any small sized allocation is naturally aligned (see also `_mi_segment_page_start`)
if mi_likely(_mi_is_power_of_two(size) && size >= alignment && size <= MI_SMALL_SIZE_MAX)
#else
// with padding, we can only guarantee this for fixed alignments
if mi_likely((alignment == sizeof(void*) || (alignment == MI_MAX_ALIGN_SIZE && size > (MI_MAX_ALIGN_SIZE/2)))
&& size <= MI_SMALL_SIZE_MAX)
#endif
{
// fast path for common alignment and size
return mi_heap_malloc_small(heap, size);
}
else {
return mi_heap_malloc_aligned_at(heap, size, alignment, 0);
}
return mi_heap_malloc_aligned_at(heap, size, alignment, 0);
}

// ensure a definition is emitted
#if defined(__cplusplus)
static void* _mi_heap_malloc_aligned = (void*)&mi_heap_malloc_aligned;
void* _mi_extern_heap_malloc_aligned = (void*)&mi_heap_malloc_aligned;
#endif

// ------------------------------------------------------
Expand Down Expand Up @@ -227,9 +290,9 @@ static void* mi_heap_realloc_zero_aligned_at(mi_heap_t* heap, void* p, size_t ne
void* newp = mi_heap_malloc_aligned_at(heap,newsize,alignment,offset);
if (newp != NULL) {
if (zero && newsize > size) {
// also set last word in the previous allocation to zero to ensure any padding is zero-initialized
size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0);
_mi_memzero((uint8_t*)newp + start, newsize - start);
// also set last word in the previous allocation to zero to ensure any padding is zero-initialized
size_t start = (size >= sizeof(intptr_t) ? size - sizeof(intptr_t) : 0);
_mi_memzero((uint8_t*)newp + start, newsize - start);
}
_mi_memcpy_aligned(newp, p, (newsize > size ? size : newsize));
mi_free(p); // only free if successful
Expand Down Expand Up @@ -296,3 +359,5 @@ mi_decl_nodiscard void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t
mi_decl_nodiscard void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept {
return mi_heap_recalloc_aligned(mi_prim_get_default_heap(), p, newcount, size, alignment);
}


Loading
Loading