diff --git a/src/snmalloc/mem/corealloc.h b/src/snmalloc/mem/corealloc.h index 58763aa64..1a8ce98aa 100644 --- a/src/snmalloc/mem/corealloc.h +++ b/src/snmalloc/mem/corealloc.h @@ -642,7 +642,7 @@ namespace snmalloc Allocator* alloc, smallsizeclass_t sizeclass, freelist::Iter<>* fl, - size_t size) -> void* { + size_t size) SNMALLOC_FAST_PATH_LAMBDA { return alloc->small_refill(sizeclass, *fl, size); }, this, @@ -673,9 +673,9 @@ namespace snmalloc } return self->handle_message_queue( - [](Allocator* self, size_t size) -> void* { + [](Allocator* self, size_t size) SNMALLOC_FAST_PATH_LAMBDA { return CheckInit::check_init( - [self, size]() { + [self, size]() SNMALLOC_FAST_PATH_LAMBDA { if (size > bits::one_at_bit(bits::BITS - 1)) { // Cannot allocate something that is more that half the size of @@ -728,7 +728,7 @@ namespace snmalloc return Conts::failure(size); }, - [](Allocator* a, size_t size) { + [](Allocator* a, size_t size) SNMALLOC_FAST_PATH_LAMBDA { return alloc_not_small(size, a); }, size); @@ -812,7 +812,7 @@ namespace snmalloc smallsizeclass_t sizeclass, freelist::Iter<>& fast_free_list, size_t size) { return CheckInit::check_init( - [this, size, sizeclass, &fast_free_list]() -> void* { + [this, size, sizeclass, &fast_free_list]() SNMALLOC_FAST_PATH_LAMBDA { size_t rsize = sizeclass_to_size(sizeclass); // No existing free list get a new slab. @@ -862,7 +862,7 @@ namespace snmalloc auto r = finish_alloc(p, size); return ticker.check_tick(r); }, - [](Allocator* a, size_t size) { + [](Allocator* a, size_t size) SNMALLOC_FAST_PATH_LAMBDA { return a->small_alloc(size); }, size); diff --git a/src/snmalloc/mem/freelist_queue.h b/src/snmalloc/mem/freelist_queue.h index b58a7bdb2..452033249 100644 --- a/src/snmalloc/mem/freelist_queue.h +++ b/src/snmalloc/mem/freelist_queue.h @@ -104,13 +104,17 @@ namespace snmalloc invariant(); freelist::Object::atomic_store_null(last, Key, Key_tweak); - // The following non-linearisable effect is normally benign, - // but could lead to a remote list become completely detached - // during a fork in a multi-threaded process. This would lead - // to a memory leak, which is probably the least of your problems - // if you forked in during a deallocation. - PreventFork pf; - snmalloc::UNUSED(pf); + // // The following non-linearisable effect is normally benign, + // // but could lead to a remote list become completely detached + // // during a fork in a multi-threaded process. This would lead + // // to a memory leak, which is probably the least of your problems + // // if you forked in during a deallocation. We can prevent this + // // with the following code, but it is not currently enabled as it + // // has negative performance impact. + // // An alternative would be to reset the queue on the child postfork + // // handler to ensure that the queue has not been blackholed. + // PreventFork pf; + // snmalloc::UNUSED(pf); // Exchange needs to be acq_rel. // * It needs to be a release, so nullptr in next is visible. diff --git a/src/snmalloc/mem/remotecache.h b/src/snmalloc/mem/remotecache.h index 3d5ed70b8..045ab141b 100644 --- a/src/snmalloc/mem/remotecache.h +++ b/src/snmalloc/mem/remotecache.h @@ -268,7 +268,7 @@ namespace snmalloc entropy, [this]( RemoteAllocator::alloc_id_t target_id, - capptr::Alloc msg) { + capptr::Alloc msg) SNMALLOC_FAST_PATH_LAMBDA { forward(target_id, msg); }); }