diff --git a/std/experimental/allocator/building_blocks/free_list.d b/std/experimental/allocator/building_blocks/free_list.d index fae0fda7233..d6f81b8c929 100644 --- a/std/experimental/allocator/building_blocks/free_list.d +++ b/std/experimental/allocator/building_blocks/free_list.d @@ -760,6 +760,7 @@ struct SharedFreeList(ParentAllocator, "Maximum size must accommodate a pointer."); import core.atomic : atomicOp, cas; + import core.internal.spinlock : SpinLock; static if (minSize != chooseAtRuntime) { @@ -854,6 +855,10 @@ struct SharedFreeList(ParentAllocator, assert(nodes); atomicOp!("-=")(nodes, 1); } + private void resetNodes() shared + { + nodes = 0; + } private bool nodesFull() shared { return nodes >= approxMaxLength; @@ -863,6 +868,7 @@ struct SharedFreeList(ParentAllocator, { private static void incNodes() { } private static void decNodes() { } + private static void resetNodes() { } private enum bool nodesFull = false; } @@ -924,6 +930,8 @@ struct SharedFreeList(ParentAllocator, mixin(forwardToMember("parent", "expand")); + private SpinLock lock; + private struct Node { Node* next; } static assert(ParentAllocator.alignment >= Node.alignof); private Node* _root; @@ -958,18 +966,22 @@ struct SharedFreeList(ParentAllocator, assert(bytes < size_t.max / 2); if (!freeListEligible(bytes)) return parent.allocate(bytes); if (maxSize != unbounded) bytes = max; - // Pop off the freelist - shared Node* oldRoot = void, next = void; - do + + // Try to pop off the freelist + lock.lock(); + if (!_root) { - oldRoot = _root; // atomic load - if (!oldRoot) return allocateFresh(bytes); - next = oldRoot.next; // atomic load + lock.unlock(); + return allocateFresh(bytes); + } + else + { + auto oldRoot = _root; + _root = _root.next; + decNodes(); + lock.unlock(); + return (cast(ubyte*) oldRoot)[0 .. bytes]; } - while (!cas(&_root, oldRoot, next)); - // great, snatched the root - decNodes(); - return (cast(ubyte*) oldRoot)[0 .. bytes]; } private void[] allocateFresh(const size_t bytes) shared @@ -984,14 +996,11 @@ struct SharedFreeList(ParentAllocator, if (!nodesFull && freeListEligible(b.length)) { auto newRoot = cast(shared Node*) b.ptr; - shared Node* oldRoot; - do - { - oldRoot = _root; - newRoot.next = oldRoot; - } - while (!cas(&_root, oldRoot, newRoot)); + lock.lock(); + newRoot.next = _root; + _root = newRoot; incNodes(); + lock.unlock(); return true; } static if (hasMember!(ParentAllocator, "deallocate")) @@ -1004,6 +1013,8 @@ struct SharedFreeList(ParentAllocator, bool deallocateAll() shared { bool result = false; + lock.lock(); + scope(exit) lock.unlock(); static if (hasMember!(ParentAllocator, "deallocateAll")) { result = parent.deallocateAll(); @@ -1011,13 +1022,16 @@ struct SharedFreeList(ParentAllocator, else static if (hasMember!(ParentAllocator, "deallocate")) { result = true; - for (auto n = _root; n; n = n.next) + for (auto n = _root; n;) { + auto tmp = n.next; if (!parent.deallocate((cast(ubyte*)n)[0 .. max])) result = false; + n = tmp; } } _root = null; + resetNodes(); return result; } } @@ -1058,6 +1072,18 @@ unittest } } +unittest +{ + import std.experimental.allocator.mallocator : Mallocator; + static shared SharedFreeList!(Mallocator, 64, 128, 10) a; + auto b = a.allocate(100); + a.deallocate(b); + assert(a.nodes == 1); + b = []; + a.deallocateAll(); + assert(a.nodes == 0); +} + unittest { import std.experimental.allocator.mallocator : Mallocator;