diff --git a/src/snmalloc/aal/address.h b/src/snmalloc/aal/address.h index 2a9d614ae..d10474c27 100644 --- a/src/snmalloc/aal/address.h +++ b/src/snmalloc/aal/address.h @@ -290,4 +290,14 @@ namespace snmalloc return static_cast(a - pointer_align_down(a)); } + /** + * Convert an address_t to a pointer. The returned pointer should never be + * followed. On CHERI following this pointer will result in a capability + * violation. + */ + template + SNMALLOC_FAST_PATH_INLINE T* useless_ptr_from_addr(address_t p) + { + return reinterpret_cast(static_cast(p)); + } } // namespace snmalloc diff --git a/src/snmalloc/ds_core/seqset.h b/src/snmalloc/ds_core/seqset.h index ef854958c..600ec07df 100644 --- a/src/snmalloc/ds_core/seqset.h +++ b/src/snmalloc/ds_core/seqset.h @@ -83,6 +83,12 @@ namespace snmalloc #endif } + public: + /** + * Empty queue + */ + constexpr SeqSet() = default; + /** * Check for empty */ @@ -95,12 +101,6 @@ namespace snmalloc return head.next == &head; } - public: - /** - * Empty queue - */ - constexpr SeqSet() = default; - /** * Remove an element from the queue * diff --git a/src/snmalloc/mem/corealloc.h b/src/snmalloc/mem/corealloc.h index 93df53e66..4b5b9828f 100644 --- a/src/snmalloc/mem/corealloc.h +++ b/src/snmalloc/mem/corealloc.h @@ -61,6 +61,12 @@ namespace snmalloc uint16_t length = 0; } alloc_classes[NUM_SMALL_SIZECLASSES]{}; + /** + * The set of all slabs and large allocations + * from this allocator that are full or almost full. + */ + SeqSet laden{}; + /** * Local entropy source and current version of keys for * this thread @@ -420,6 +426,9 @@ namespace snmalloc UNUSED(size); #endif + // Remove from set of fully used slabs. + meta->node.remove(); + Config::Backend::dealloc_chunk( get_backend_local_state(), *meta, p, size); @@ -436,6 +445,9 @@ namespace snmalloc // Wake slab up. meta->set_not_sleeping(sizeclass); + // Remove from set of fully used slabs. + meta->node.remove(); + alloc_classes[sizeclass].available.insert(meta); alloc_classes[sizeclass].length++; @@ -744,6 +756,10 @@ namespace snmalloc alloc_classes[sizeclass].length++; sl.insert(meta); } + else + { + laden.insert(meta); + } auto r = finish_alloc(p, sizeclass); return ticker.check_tick(r); @@ -794,7 +810,8 @@ namespace snmalloc } // Set meta slab to empty. - meta->initialise(sizeclass); + meta->initialise( + sizeclass, address_cast(slab), entropy.get_free_list_key()); // Build a free list for the slab alloc_new_list(slab, meta, rsize, slab_size, entropy); @@ -811,6 +828,10 @@ namespace snmalloc alloc_classes[sizeclass].length++; alloc_classes[sizeclass].available.insert(meta); } + else + { + laden.insert(meta); + } auto r = finish_alloc(p, sizeclass); return ticker.check_tick(r); @@ -864,6 +885,14 @@ namespace snmalloc dealloc_local_slabs(sizeclass); } + laden.iterate([this, domesticate]( + BackendSlabMetadata* meta) SNMALLOC_FAST_PATH_LAMBDA { + if (!meta->is_large()) + { + meta->free_queue.validate(entropy.get_free_list_key(), domesticate); + } + }); + return posted; } @@ -883,7 +912,7 @@ namespace snmalloc c->remote_allocator = public_state(); // Set up remote cache. - c->remote_dealloc_cache.init(); + c->remote_dealloc_cache.init(entropy.get_free_list_key()); } /** @@ -892,28 +921,46 @@ namespace snmalloc */ bool debug_is_empty_impl(bool* result) { - auto test = [&result](auto& queue, smallsizeclass_t size_class) { - queue.iterate([&result, size_class](auto slab_metadata) { + auto& key = entropy.get_free_list_key(); + + auto error = [&result, &key](auto slab_metadata) { + auto slab_interior = slab_metadata->get_slab_interior(key); + const PagemapEntry& entry = + Config::Backend::get_metaentry(slab_interior); + SNMALLOC_ASSERT(slab_metadata == entry.get_slab_metadata()); + auto size_class = entry.get_sizeclass(); + auto slab_size = sizeclass_full_to_slab_size(size_class); + auto slab_start = bits::align_down(slab_interior, slab_size); + + if (result != nullptr) + *result = false; + else + report_fatal_error( + "debug_is_empty: found non-empty allocator: size={} on " + "slab_start {}", + sizeclass_full_to_size(size_class), + slab_start); + }; + + auto test = [&error](auto& queue) { + queue.iterate([&error](auto slab_metadata) { if (slab_metadata->needed() != 0) { - if (result != nullptr) - *result = false; - else - report_fatal_error( - "debug_is_empty: found non-empty allocator: size={} ({})", - sizeclass_to_size(size_class), - size_class); + error(slab_metadata); } }); }; bool sent_something = flush(true); - smallsizeclass_t size_class = 0; for (auto& alloc_class : alloc_classes) { - test(alloc_class.available, size_class); - size_class++; + test(alloc_class.available); + } + + if (!laden.is_empty()) + { + error(laden.peek()); } // Place the static stub message on the queue. diff --git a/src/snmalloc/mem/freelist.h b/src/snmalloc/mem/freelist.h index 335f12881..2830f9b8f 100644 --- a/src/snmalloc/mem/freelist.h +++ b/src/snmalloc/mem/freelist.h @@ -115,7 +115,6 @@ namespace snmalloc class T { template< - bool, bool, SNMALLOC_CONCEPT(capptr::IsBound), SNMALLOC_CONCEPT(capptr::IsBound)> @@ -220,7 +219,6 @@ namespace snmalloc return reinterpret_cast*>(ptr); } - private: /** * Involutive encryption with raw pointers */ @@ -247,7 +245,6 @@ namespace snmalloc } } - public: /** * Encode next. We perform two convenient little bits of type-level * sleight of hand here: @@ -506,7 +503,6 @@ namespace snmalloc */ template< bool RANDOM, - bool INIT = true, SNMALLOC_CONCEPT(capptr::IsBound) BView = capptr::bounds::Alloc, SNMALLOC_CONCEPT(capptr::IsBound) BQueue = capptr::bounds::AllocWild> class Builder @@ -532,7 +528,7 @@ namespace snmalloc // This enables branch free enqueuing. std::array end{nullptr}; - Object::BQueuePtr* cast_end(uint32_t ix) + [[nodiscard]] Object::BQueuePtr* cast_end(uint32_t ix) const { return reinterpret_cast*>(end[ix]); } @@ -542,7 +538,7 @@ namespace snmalloc end[ix] = reinterpret_cast(p); } - Object::BHeadPtr cast_head(uint32_t ix) + [[nodiscard]] Object::BHeadPtr cast_head(uint32_t ix) const { return Object::BHeadPtr::unsafe_from( static_cast*>(head[ix])); @@ -551,13 +547,7 @@ namespace snmalloc std::array length{}; public: - constexpr Builder() - { - if (INIT) - { - init(); - } - } + constexpr Builder() = default; /** * Checks if the builder contains any elements. @@ -629,8 +619,8 @@ namespace snmalloc * and is thus subject to encoding if the next_object pointers * encoded. */ - Object::BHeadPtr - read_head(uint32_t index, const FreeListKey& key) + [[nodiscard]] Object::BHeadPtr + read_head(uint32_t index, const FreeListKey& key) const { return Object::decode_next( address_cast(&head[index]), cast_head(index), key); @@ -688,7 +678,7 @@ namespace snmalloc /** * Set the builder to a not building state. */ - constexpr void init() + constexpr void init(address_t slab, const FreeListKey& key) { for (size_t i = 0; i < LENGTH; i++) { @@ -697,6 +687,16 @@ namespace snmalloc { length[i] = 0; } + + // Head is not live when a building is initialised. + // We use this slot to store a pointer into the slab for the + // allocations. This then establishes the invariant that head is + // always (a possibly encoded) pointer into the slab, and thus + // the Freelist builder always knows which block it is referring too. + head[i] = Object::code_next( + address_cast(&head[i]), + useless_ptr_from_addr>(slab), + key); } } @@ -718,7 +718,7 @@ namespace snmalloc // empty, but you are not allowed to call this in the empty case. auto last = Object::BHeadPtr::unsafe_from( Object::from_next_ptr(cast_end(0))); - init(); + init(address_cast(head[0]), key); return {first, last}; } diff --git a/src/snmalloc/mem/localalloc.h b/src/snmalloc/mem/localalloc.h index 82d75dabd..f2827be88 100644 --- a/src/snmalloc/mem/localalloc.h +++ b/src/snmalloc/mem/localalloc.h @@ -199,7 +199,11 @@ namespace snmalloc // Initialise meta data for a successful large allocation. if (meta != nullptr) - meta->initialise_large(); + { + meta->initialise_large( + address_cast(chunk), local_cache.entropy.get_free_list_key()); + core_alloc->laden.insert(meta); + } if (zero_mem == YesZero && chunk.unsafe_ptr() != nullptr) { diff --git a/src/snmalloc/mem/metadata.h b/src/snmalloc/mem/metadata.h index fabf85611..9dc21eb13 100644 --- a/src/snmalloc/mem/metadata.h +++ b/src/snmalloc/mem/metadata.h @@ -440,12 +440,13 @@ namespace snmalloc /** * Initialise FrontendSlabMetadata for a slab. */ - void initialise(smallsizeclass_t sizeclass) + void initialise( + smallsizeclass_t sizeclass, address_t slab, const FreeListKey& key) { static_assert( std::is_base_of::value, "Template should be a subclass of FrontendSlabMetadata"); - free_queue.init(); + free_queue.init(slab, key); // Set up meta data as if the entire slab has been turned into a free // list. This means we don't have to check for special cases where we have // returned all the elements, but this is a slab that is still being bump @@ -461,10 +462,10 @@ namespace snmalloc * * Set needed so immediately moves to slow path. */ - void initialise_large() + void initialise_large(address_t slab, const FreeListKey& key) { // We will push to this just to make the fast path clean. - free_queue.init(); + free_queue.init(slab, key); // Flag to detect that it is a large alloc on the slow path large_ = true; @@ -579,6 +580,13 @@ namespace snmalloc return {p, !sleeping}; } + + // Returns a pointer to somewhere in the slab. May not be the + // start of the slab. + [[nodiscard]] address_t get_slab_interior(const FreeListKey& key) const + { + return address_cast(free_queue.read_head(0, key)); + } }; /** diff --git a/src/snmalloc/mem/remotecache.h b/src/snmalloc/mem/remotecache.h index a415a1daa..90e1eee55 100644 --- a/src/snmalloc/mem/remotecache.h +++ b/src/snmalloc/mem/remotecache.h @@ -17,7 +17,7 @@ namespace snmalloc */ struct RemoteDeallocCache { - std::array, REMOTE_SLOTS> list; + std::array, REMOTE_SLOTS> list; /** * The total amount of memory we are waiting for before we will dispatch @@ -165,14 +165,16 @@ namespace snmalloc * Must be called before anything else to ensure actually initialised * not just zero init. */ - void init() + void init(const FreeListKey& key) { #ifndef NDEBUG initialised = true; #endif for (auto& l : list) { - l.init(); + // We do not need to initialise with a particular slab, so pass + // a null address. + l.init(0, key); } capacity = REMOTE_CACHE; } diff --git a/src/test/func/statistics/stats.cc b/src/test/func/statistics/stats.cc index 2de3e2d9b..c8db1cad7 100644 --- a/src/test/func/statistics/stats.cc +++ b/src/test/func/statistics/stats.cc @@ -1,16 +1,27 @@ -#include - +#ifdef SNMALLOC_PASS_THROUGH // This test depends on snmalloc internals int main() { -#ifndef SNMALLOC_PASS_THROUGH // This test depends on snmalloc internals + return 0; +} +#else +# include +# include +# include + +template +void debug_check_empty_1() +{ + std::cout << "debug_check_empty_1 " << size << std::endl; snmalloc::Alloc& a = snmalloc::ThreadAlloc::get(); bool result; - auto r = a.alloc(16); + auto r = a.alloc(size); snmalloc::debug_check_empty(&result); if (result != false) { + std::cout << "debug_check_empty failed to detect leaked memory:" << size + << std::endl; abort(); } @@ -19,14 +30,17 @@ int main() snmalloc::debug_check_empty(&result); if (result != true) { + std::cout << "debug_check_empty failed to say empty:" << size << std::endl; abort(); } - r = a.alloc(16); + r = a.alloc(size); snmalloc::debug_check_empty(&result); if (result != false) { + std::cout << "debug_check_empty failed to detect leaked memory:" << size + << std::endl; abort(); } @@ -35,7 +49,70 @@ int main() snmalloc::debug_check_empty(&result); if (result != true) { + std::cout << "debug_check_empty failed to say empty:" << size << std::endl; abort(); } -#endif } + +template +void debug_check_empty_2() +{ + std::cout << "debug_check_empty_2 " << size << std::endl; + snmalloc::Alloc& a = snmalloc::ThreadAlloc::get(); + bool result; + std::vector allocs; + // 1GB of allocations + size_t count = snmalloc::bits::min(2048, 1024 * 1024 * 1024 / size); + + for (size_t i = 0; i < count; i++) + { + if (i % (count / 16) == 0) + { + std::cout << "." << std::flush; + } + auto r = a.alloc(size); + allocs.push_back(r); + snmalloc::debug_check_empty(&result); + if (result != false) + { + std::cout << "False empty after " << i << " allocations of " << size + << std::endl; + abort(); + } + } + std::cout << std::endl; + + for (size_t i = 0; i < count; i++) + { + if (i % (count / 16) == 0) + { + std::cout << "." << std::flush; + } + snmalloc::debug_check_empty(&result); + if (result != false) + { + std::cout << "False empty after " << i << " deallocations of " << size + << std::endl; + abort(); + } + a.dealloc(allocs[i]); + } + std::cout << std::endl; + snmalloc::debug_check_empty(); +} + +int main() +{ + debug_check_empty_1<16>(); + debug_check_empty_1<16384>(); + debug_check_empty_1<65536>(); + debug_check_empty_1<1024 * 1024 * 32>(); + + debug_check_empty_2<32>(); + debug_check_empty_2<16384>(); + debug_check_empty_2<65535>(); + debug_check_empty_2<1024 * 1024 * 32>(); + + return 0; +} +#endif \ No newline at end of file