From 1adaf7a17af7099ded6cb5bcf6cacd93f2ef9d2f Mon Sep 17 00:00:00 2001 From: Kevin Schoedel <67607049+kpschoedel@users.noreply.github.com> Date: Fri, 19 Nov 2021 11:58:50 -0500 Subject: [PATCH] Add heap allocation to lib/support/Pool.h (#11698) #### Problem We have too many pool allocators. Previous PRs (#11428, #11487) transitionally use `BitMapObjectPool` where previously `System::ObjectPool` had been used, but this lost the ability to configure heap allocation. #### Change overview - Add a heap allocator (from #9590) - Add allocation selection (from #11371) - Use this for `System::Timer` (complementing #11487) - Factor out common code. - Use a heap-allocated list to track heap-allocated objects. - More unit tests. Co-authored-by: Zang MingJie Co-authored-by: C Freeman A future PR will use this for Inet pools (complementing #11428); that is not done here because it would conflict with other Inet changes under way. #### Testing Added heap versions of unit tests in TestPool. (A future PR will add `System::Object`-style statistics and re-unify most of these tests.) CI should show `.bss` decreases corresponding to increases in #11487. --- src/lib/support/Pool.cpp | 67 ++++++- src/lib/support/Pool.h | 281 ++++++++++++++++++++++++---- src/lib/support/tests/TestPool.cpp | 284 +++++++++++++++++++++++++---- src/system/SystemTimer.cpp | 2 +- src/system/SystemTimer.h | 2 +- 5 files changed, 564 insertions(+), 72 deletions(-) diff --git a/src/lib/support/Pool.cpp b/src/lib/support/Pool.cpp index 5e4af82f3e6e46..ff8789e3c0a956 100644 --- a/src/lib/support/Pool.cpp +++ b/src/lib/support/Pool.cpp @@ -23,6 +23,8 @@ namespace chip { +namespace internal { + StaticAllocatorBitmap::StaticAllocatorBitmap(void * storage, std::atomic * usage, size_t capacity, size_t elementSize) : StaticAllocatorBase(capacity), @@ -46,7 +48,7 @@ void * StaticAllocatorBitmap::Allocate() { if (usage.compare_exchange_strong(value, value | (kBit1 << offset))) { - mAllocated++; + IncreaseUsage(); return At(word * kBitChunkSize + offset); } else @@ -70,7 +72,7 @@ void StaticAllocatorBitmap::Deallocate(void * element) auto value = mUsage[word].fetch_and(~(kBit1 << offset)); nlASSERT((value & (kBit1 << offset)) != 0); // assert fail when free an unused slot - mAllocated--; + DecreaseUsage(); } size_t StaticAllocatorBitmap::IndexOf(void * element) @@ -83,7 +85,7 @@ size_t StaticAllocatorBitmap::IndexOf(void * element) return index; } -bool StaticAllocatorBitmap::ForEachActiveObjectInner(void * context, Lambda lambda) +bool StaticAllocatorBitmap::ForEachActiveObjectInner(void * context, bool lambda(void * context, void * object)) { for (size_t word = 0; word * kBitChunkSize < Capacity(); ++word) { @@ -101,4 +103,63 @@ bool StaticAllocatorBitmap::ForEachActiveObjectInner(void * context, Lambda lamb return true; } +#if CHIP_SYSTEM_CONFIG_POOL_USE_HEAP + +HeapObjectListNode * HeapObjectList::FindNode(void * object) const +{ + for (HeapObjectListNode * p = mNext; p != this; p = p->mNext) + { + if (p->mObject == object) + { + return p; + } + } + return nullptr; +} + +using Lambda = bool (*)(void *, void *); +bool HeapObjectList::ForEachNode(void * context, bool lambda(void * context, void * object)) +{ + ++mIterationDepth; + bool result = true; + bool anyReleased = false; + HeapObjectListNode * p = mNext; + while (p != this) + { + if (p->mObject != nullptr) + { + if (!lambda(context, p->mObject)) + { + result = false; + break; + } + } + if (p->mObject == nullptr) + { + anyReleased = true; + } + p = p->mNext; + } + --mIterationDepth; + if (mIterationDepth == 0 && anyReleased) + { + // Remove nodes for released objects. + p = mNext; + while (p != this) + { + HeapObjectListNode * next = p->mNext; + if (p->mObject == nullptr) + { + p->Remove(); + delete p; + } + p = next; + } + } + return result; +} + +#endif // CHIP_SYSTEM_CONFIG_POOL_USE_HEAP + +} // namespace internal } // namespace chip diff --git a/src/lib/support/Pool.h b/src/lib/support/Pool.h index f9fe67aaa8d735..47df3efd9ef729 100644 --- a/src/lib/support/Pool.h +++ b/src/lib/support/Pool.h @@ -17,36 +17,57 @@ */ /** - * @file - * Defines a memory pool class BitMapObjectPool. + * Defines memory pool classes. */ #pragma once -#include +#include + #include #include #include #include #include +#include namespace chip { -class StaticAllocatorBase +namespace internal { + +class Statistics { public: - StaticAllocatorBase(size_t capacity) : mAllocated(0), mCapacity(capacity) {} + Statistics() : mAllocated(0), mHighWaterMark(0) {} - size_t Capacity() const { return mCapacity; } size_t Allocated() const { return mAllocated; } - bool Exhausted() const { return mAllocated == mCapacity; } + size_t HighWaterMark() const { return mHighWaterMark; } + void IncreaseUsage() + { + if (++mAllocated > mHighWaterMark) + { + mHighWaterMark = mAllocated; + } + } + void DecreaseUsage() { --mAllocated; } protected: size_t mAllocated; + size_t mHighWaterMark; +}; + +class StaticAllocatorBase : public Statistics +{ +public: + StaticAllocatorBase(size_t capacity) : mCapacity(capacity) {} + size_t Capacity() const { return mCapacity; } + bool Exhausted() const { return mAllocated == mCapacity; } + +protected: const size_t mCapacity; }; -class StaticAllocatorBitmap : public StaticAllocatorBase +class StaticAllocatorBitmap : public internal::StaticAllocatorBase { protected: /** @@ -67,8 +88,7 @@ class StaticAllocatorBitmap : public StaticAllocatorBase void * At(size_t index) { return static_cast(mElements) + mElementSize * index; } size_t IndexOf(void * element); - using Lambda = bool (*)(void *, void *); - bool ForEachActiveObjectInner(void * context, Lambda lambda); + bool ForEachActiveObjectInner(void * context, bool lambda(void * context, void * object)); private: void * mElements; @@ -76,20 +96,111 @@ class StaticAllocatorBitmap : public StaticAllocatorBase std::atomic * mUsage; }; +template +class PoolCommon +{ +public: + template + void ResetObject(T * element, Args &&... args) + { + element->~T(); + new (element) T(std::forward(args)...); + } +}; + +template +class LambdaProxy +{ +public: + LambdaProxy(Function && function) : mFunction(std::move(function)) {} + static bool Call(void * context, void * target) + { + return static_cast(context)->mFunction(static_cast(target)); + } + +private: + Function mFunction; +}; + +#if CHIP_SYSTEM_CONFIG_POOL_USE_HEAP + +struct HeapObjectListNode +{ + void Remove() + { + mNext->mPrev = mPrev; + mPrev->mNext = mNext; + } + + void * mObject; + HeapObjectListNode * mNext; + HeapObjectListNode * mPrev; +}; + +struct HeapObjectList : HeapObjectListNode +{ + HeapObjectList() : mIterationDepth(0) { mNext = mPrev = this; } + + void Append(HeapObjectListNode * node) + { + node->mNext = this; + node->mPrev = mPrev; + mPrev->mNext = node; + mPrev = node; + } + + HeapObjectListNode * FindNode(void * object) const; + + bool ForEachNode(void * context, bool lambda(void * context, void * object)); + + size_t mIterationDepth; +}; + +#endif // CHIP_SYSTEM_CONFIG_POOL_USE_HEAP + +} // namespace internal + +/** + * @class ObjectPool + * + * Depending on build configuration, ObjectPool is either a fixed-size static pool or a heap-allocated pool. + * + * @tparam T Type of element to be allocated. + * @tparam N Number of elements in the pool, in the fixed-size case. + * + * @fn CreateObject + * @memberof ObjectPool + * + * Create an object from the pool. Forwards its arguments to construct a T. + * + * @fn ReleaseObject + * @memberof ObjectPool + * @param object Pointer to object to release (or return to the pool). Its destructor runs. + * + * @fn ForEachActiveObject + * @memberof ObjectPool + * @param visitor A function that takes a T* and returns true to continue iterating or false to stop iterating. + * @returns false if a visitor call returned false, true otherwise. + * + * Iteration may be nested. ReleaseObject() can be called during iteration, on the current object or any other. + * CreateObject() can be called, but it is undefined whether or not a newly created object will be visited. + */ + /** - * @brief - * A class template used for allocating Objects. + * A class template used for allocating objects from a fixed-size static pool. * - * @tparam T a subclass of element to be allocated. + * @tparam T type of element to be allocated. * @tparam N a positive integer max number of elements the pool provides. */ template -class BitMapObjectPool : public StaticAllocatorBitmap +class BitMapObjectPool : public internal::StaticAllocatorBitmap, public internal::PoolCommon { public: BitMapObjectPool() : StaticAllocatorBitmap(mData.mMemory, mUsage, N, sizeof(T)) {} - - static size_t Size() { return N; } + ~BitMapObjectPool() + { + // ReleaseAll(); + } template T * CreateObject(Args &&... args) @@ -110,12 +221,7 @@ class BitMapObjectPool : public StaticAllocatorBitmap Deallocate(element); } - template - void ResetObject(T * element, Args &&... args) - { - element->~T(); - new (element) T(std::forward(args)...); - } + void ReleaseAll() { ForEachActiveObjectInner(this, ReleaseObject); } /** * @brief @@ -131,24 +237,16 @@ class BitMapObjectPool : public StaticAllocatorBitmap template bool ForEachActiveObject(Function && function) { - LambdaProxy proxy(std::forward(function)); - return ForEachActiveObjectInner(&proxy, &LambdaProxy::Call); + internal::LambdaProxy proxy(std::forward(function)); + return ForEachActiveObjectInner(&proxy, &internal::LambdaProxy::Call); } private: - template - class LambdaProxy + static bool ReleaseObject(void * context, void * object) { - public: - LambdaProxy(Function && function) : mFunction(std::move(function)) {} - static bool Call(void * context, void * target) - { - return static_cast(context)->mFunction(static_cast(target)); - } - - private: - Function mFunction; - }; + static_cast(context)->ReleaseObject(static_cast(object)); + return true; + } std::atomic mUsage[(N + kBitChunkSize - 1) / kBitChunkSize]; union Data @@ -160,4 +258,115 @@ class BitMapObjectPool : public StaticAllocatorBitmap } mData; }; +#if CHIP_SYSTEM_CONFIG_POOL_USE_HEAP + +/** + * A class template used for allocating objects from the heap. + * + * @tparam T type to be allocated. + */ +template +class HeapObjectPool : public internal::Statistics, public internal::PoolCommon +{ +public: + HeapObjectPool() {} + ~HeapObjectPool() + { + // TODO(#11880): Release all active objects (or verify that none are active) when destroying the pool. + // ReleaseAll(); + } + + template + T * CreateObject(Args &&... args) + { + T * object = new T(std::forward(args)...); + if (object != nullptr) + { + auto node = new internal::HeapObjectListNode(); + if (node != nullptr) + { + node->mObject = object; + mObjects.Append(node); + IncreaseUsage(); + return object; + } + } + return nullptr; + } + + void ReleaseObject(T * object) + { + if (object != nullptr) + { + internal::HeapObjectListNode * node = mObjects.FindNode(object); + if (node != nullptr) + { + // Note that the node is not removed here; that is deferred until the end of the next pool iteration. + node->mObject = nullptr; + delete object; + DecreaseUsage(); + } + } + } + + void ReleaseAll() { mObjects.ForEachNode(this, ReleaseObject); } + + /** + * @brief + * Run a functor for each active object in the pool + * + * @param function The functor of type `bool (*)(T*)`, return false to break the iteration + * @return bool Returns false if broke during iteration + */ + template + bool ForEachActiveObject(Function && function) + { + // return ForEachNode([function](void *object) { return function(static_cast(object)); }); + internal::LambdaProxy proxy(std::forward(function)); + return mObjects.ForEachNode(&proxy, &internal::LambdaProxy::Call); + } + +private: + static bool ReleaseObject(void * context, void * object) + { + static_cast(context)->ReleaseObject(static_cast(object)); + return true; + } + + internal::HeapObjectList mObjects; +}; + +#endif // CHIP_SYSTEM_CONFIG_POOL_USE_HEAP + +#if CHIP_SYSTEM_CONFIG_POOL_USE_HEAP +template +using ObjectPool = HeapObjectPool; +#else // CHIP_SYSTEM_CONFIG_POOL_USE_HEAP +template +using ObjectPool = BitMapObjectPool; +#endif // CHIP_SYSTEM_CONFIG_POOL_USE_HEAP + +enum class ObjectPoolMem +{ + kStatic, +#if CHIP_SYSTEM_CONFIG_POOL_USE_HEAP + kDynamic +#endif // CHIP_SYSTEM_CONFIG_POOL_USE_HEAP +}; + +template +class MemTypeObjectPool; + +template +class MemTypeObjectPool : public BitMapObjectPool +{ +}; + +#if CHIP_SYSTEM_CONFIG_POOL_USE_HEAP +template +class MemTypeObjectPool : public HeapObjectPool +{ +}; +#endif // CHIP_SYSTEM_CONFIG_POOL_USE_HEAP + } // namespace chip diff --git a/src/lib/support/tests/TestPool.cpp b/src/lib/support/tests/TestPool.cpp index 57366ff2b3ce8f..613bb87d00d578 100644 --- a/src/lib/support/tests/TestPool.cpp +++ b/src/lib/support/tests/TestPool.cpp @@ -32,8 +32,8 @@ namespace chip { -template -size_t GetNumObjectsInUse(BitMapObjectPool & pool) +template +size_t GetNumObjectsInUse(POOL & pool) { size_t count = 0; pool.ForEachActiveObject([&count](void *) { @@ -49,21 +49,65 @@ namespace { using namespace chip; +template void TestReleaseNull(nlTestSuite * inSuite, void * inContext) { - constexpr const size_t size = 10; - BitMapObjectPool pool; + MemTypeObjectPool pool; pool.ReleaseObject(nullptr); NL_TEST_ASSERT(inSuite, GetNumObjectsInUse(pool) == 0); NL_TEST_ASSERT(inSuite, pool.Allocated() == 0); } +void TestReleaseNullStatic(nlTestSuite * inSuite, void * inContext) +{ + TestReleaseNull(inSuite, inContext); +} + +#if CHIP_SYSTEM_CONFIG_POOL_USE_HEAP +void TestReleaseNullDynamic(nlTestSuite * inSuite, void * inContext) +{ + TestReleaseNull(inSuite, inContext); +} +#endif // CHIP_SYSTEM_CONFIG_POOL_USE_HEAP + +template void TestCreateReleaseObject(nlTestSuite * inSuite, void * inContext) { - constexpr const size_t size = 100; - BitMapObjectPool pool; - uint32_t * obj[size]; - for (size_t i = 0; i < pool.Size(); ++i) + MemTypeObjectPool pool; + uint32_t * obj[N]; + + NL_TEST_ASSERT(inSuite, pool.Allocated() == 0); + for (int t = 0; t < 2; ++t) + { + pool.ReleaseAll(); + NL_TEST_ASSERT(inSuite, pool.Allocated() == 0); + + for (size_t i = 0; i < N; ++i) + { + obj[i] = pool.CreateObject(); + NL_TEST_ASSERT(inSuite, obj[i] != nullptr); + NL_TEST_ASSERT(inSuite, GetNumObjectsInUse(pool) == i + 1); + NL_TEST_ASSERT(inSuite, pool.Allocated() == i + 1); + } + } + + for (size_t i = 0; i < N; ++i) + { + pool.ReleaseObject(obj[i]); + NL_TEST_ASSERT(inSuite, GetNumObjectsInUse(pool) == N - i - 1); + NL_TEST_ASSERT(inSuite, pool.Allocated() == N - i - 1); + } +} + +void TestCreateReleaseObjectStatic(nlTestSuite * inSuite, void * inContext) +{ + constexpr const size_t kSize = 100; + TestCreateReleaseObject(inSuite, inContext); + + MemTypeObjectPool pool; + uint32_t * obj[kSize]; + + for (size_t i = 0; i < kSize; ++i) { obj[i] = pool.CreateObject(); NL_TEST_ASSERT(inSuite, obj[i] != nullptr); @@ -73,33 +117,34 @@ void TestCreateReleaseObject(nlTestSuite * inSuite, void * inContext) uint32_t * fail = pool.CreateObject(); NL_TEST_ASSERT(inSuite, fail == nullptr); - NL_TEST_ASSERT(inSuite, GetNumObjectsInUse(pool) == size); - NL_TEST_ASSERT(inSuite, pool.Allocated() == size); + NL_TEST_ASSERT(inSuite, GetNumObjectsInUse(pool) == kSize); + NL_TEST_ASSERT(inSuite, pool.Allocated() == kSize); NL_TEST_ASSERT(inSuite, pool.Exhausted()); pool.ReleaseObject(obj[55]); - NL_TEST_ASSERT(inSuite, GetNumObjectsInUse(pool) == size - 1); - NL_TEST_ASSERT(inSuite, pool.Allocated() == size - 1); + NL_TEST_ASSERT(inSuite, GetNumObjectsInUse(pool) == kSize - 1); + NL_TEST_ASSERT(inSuite, pool.Allocated() == kSize - 1); NL_TEST_ASSERT(inSuite, !pool.Exhausted()); NL_TEST_ASSERT(inSuite, obj[55] == pool.CreateObject()); - NL_TEST_ASSERT(inSuite, GetNumObjectsInUse(pool) == size); - NL_TEST_ASSERT(inSuite, pool.Allocated() == size); + NL_TEST_ASSERT(inSuite, GetNumObjectsInUse(pool) == kSize); + NL_TEST_ASSERT(inSuite, pool.Allocated() == kSize); NL_TEST_ASSERT(inSuite, pool.Exhausted()); fail = pool.CreateObject(); NL_TEST_ASSERT(inSuite, fail == nullptr); - NL_TEST_ASSERT(inSuite, GetNumObjectsInUse(pool) == size); - NL_TEST_ASSERT(inSuite, pool.Allocated() == size); + NL_TEST_ASSERT(inSuite, GetNumObjectsInUse(pool) == kSize); + NL_TEST_ASSERT(inSuite, pool.Allocated() == kSize); NL_TEST_ASSERT(inSuite, pool.Exhausted()); +} - for (size_t i = 0; i < pool.Size(); ++i) - { - pool.ReleaseObject(obj[i]); - NL_TEST_ASSERT(inSuite, GetNumObjectsInUse(pool) == size - i - 1); - NL_TEST_ASSERT(inSuite, pool.Allocated() == size - i - 1); - } +#if CHIP_SYSTEM_CONFIG_POOL_USE_HEAP +void TestCreateReleaseObjectDynamic(nlTestSuite * inSuite, void * inContext) +{ + TestCreateReleaseObject(inSuite, inContext); } +#endif // CHIP_SYSTEM_CONFIG_POOL_USE_HEAP +template void TestCreateReleaseStruct(nlTestSuite * inSuite, void * inContext) { struct S @@ -108,26 +153,190 @@ void TestCreateReleaseStruct(nlTestSuite * inSuite, void * inContext) ~S() { mSet.erase(this); } std::set & mSet; }; - std::set objs1; - constexpr const size_t size = 100; - BitMapObjectPool pool; - S * objs2[size]; - for (size_t i = 0; i < pool.Size(); ++i) + constexpr const size_t kSize = 100; + MemTypeObjectPool pool; + + S * objs2[kSize]; + for (size_t i = 0; i < kSize; ++i) { objs2[i] = pool.CreateObject(objs1); NL_TEST_ASSERT(inSuite, objs2[i] != nullptr); + NL_TEST_ASSERT(inSuite, pool.Allocated() == i + 1); NL_TEST_ASSERT(inSuite, GetNumObjectsInUse(pool) == i + 1); NL_TEST_ASSERT(inSuite, GetNumObjectsInUse(pool) == objs1.size()); } - for (size_t i = 0; i < pool.Size(); ++i) + for (size_t i = 0; i < kSize; ++i) { pool.ReleaseObject(objs2[i]); - NL_TEST_ASSERT(inSuite, GetNumObjectsInUse(pool) == size - i - 1); + NL_TEST_ASSERT(inSuite, pool.Allocated() == kSize - i - 1); + NL_TEST_ASSERT(inSuite, GetNumObjectsInUse(pool) == kSize - i - 1); NL_TEST_ASSERT(inSuite, GetNumObjectsInUse(pool) == objs1.size()); } + + // Verify that ReleaseAll() calls the destructors. + for (size_t i = 0; i < kSize; ++i) + { + objs2[i] = pool.CreateObject(objs1); + } + NL_TEST_ASSERT(inSuite, objs1.size() == kSize); + NL_TEST_ASSERT(inSuite, pool.Allocated() == kSize); + NL_TEST_ASSERT(inSuite, GetNumObjectsInUse(pool) == kSize); + printf("allocated = %zu\n", pool.Allocated()); + printf("highwater = %zu\n", pool.HighWaterMark()); + + pool.ReleaseAll(); + printf("allocated = %zu\n", pool.Allocated()); + printf("highwater = %zu\n", pool.HighWaterMark()); + NL_TEST_ASSERT(inSuite, objs1.size() == 0); + NL_TEST_ASSERT(inSuite, GetNumObjectsInUse(pool) == 0); + NL_TEST_ASSERT(inSuite, pool.Allocated() == 0); + NL_TEST_ASSERT(inSuite, pool.HighWaterMark() == kSize); +} + +void TestCreateReleaseStructStatic(nlTestSuite * inSuite, void * inContext) +{ + TestCreateReleaseStruct(inSuite, inContext); +} + +#if CHIP_SYSTEM_CONFIG_POOL_USE_HEAP +void TestCreateReleaseStructDynamic(nlTestSuite * inSuite, void * inContext) +{ + TestCreateReleaseStruct(inSuite, inContext); +} +#endif // CHIP_SYSTEM_CONFIG_POOL_USE_HEAP + +template +void TestForEachActiveObject(nlTestSuite * inSuite, void * inContext) +{ + struct S + { + S(size_t id) : mId(id) {} + size_t mId; + }; + + constexpr size_t kSize = 50; + S * objArray[kSize]; + std::set objIds; + + MemTypeObjectPool pool; + + for (size_t i = 0; i < kSize; ++i) + { + objArray[i] = pool.CreateObject(i); + NL_TEST_ASSERT(inSuite, objArray[i] != nullptr); + NL_TEST_ASSERT(inSuite, objArray[i]->mId == i); + objIds.insert(i); + } + + // Verify that iteration visits all objects. + size_t count = 0; + size_t sum = 0; + pool.ForEachActiveObject([&](S * object) { + NL_TEST_ASSERT(inSuite, object != nullptr); + NL_TEST_ASSERT(inSuite, objIds.count(object->mId) == 1); + objIds.erase(object->mId); + ++count; + sum += object->mId; + return true; + }); + NL_TEST_ASSERT(inSuite, count == kSize); + NL_TEST_ASSERT(inSuite, sum == kSize * (kSize - 1) / 2); + NL_TEST_ASSERT(inSuite, objIds.size() == 0); + + // Verify that returning false stops iterating. + count = 0; + pool.ForEachActiveObject([&](S * object) { + objIds.insert(object->mId); + return ++count != kSize / 2; + }); + NL_TEST_ASSERT(inSuite, count == kSize / 2); + NL_TEST_ASSERT(inSuite, objIds.size() == kSize / 2); + + // Verify that iteration can be nested. + count = 0; + pool.ForEachActiveObject([&](S * outer) { + if (objIds.count(outer->mId) == 1) + { + pool.ForEachActiveObject([&](S * inner) { + if (inner == outer) + { + objIds.erase(inner->mId); + } + else + { + ++count; + } + return true; + }); + } + return true; + }); + NL_TEST_ASSERT(inSuite, count == (kSize - 1) * kSize / 2); + NL_TEST_ASSERT(inSuite, objIds.size() == 0); + + count = 0; + pool.ForEachActiveObject([&](S * object) { + ++count; + if ((object->mId % 2) == 0) + { + objArray[object->mId] = nullptr; + pool.ReleaseObject(object); + } + else + { + objIds.insert(object->mId); + } + return true; + }); + NL_TEST_ASSERT(inSuite, count == kSize); + NL_TEST_ASSERT(inSuite, objIds.size() == kSize / 2); + for (size_t i = 0; i < kSize; ++i) + { + if ((i % 2) == 0) + { + NL_TEST_ASSERT(inSuite, objArray[i] == nullptr); + } + else + { + NL_TEST_ASSERT(inSuite, objArray[i] != nullptr); + NL_TEST_ASSERT(inSuite, objArray[i]->mId == i); + } + } + + count = 0; + pool.ForEachActiveObject([&](S * object) { + ++count; + if ((object->mId % 2) == 1) + { + size_t id = object->mId - 1; + NL_TEST_ASSERT(inSuite, objArray[id] == nullptr); + objArray[id] = pool.CreateObject(id); + NL_TEST_ASSERT(inSuite, objArray[id] != nullptr); + } + return true; + }); + for (size_t i = 0; i < kSize; ++i) + { + NL_TEST_ASSERT(inSuite, objArray[i] != nullptr); + NL_TEST_ASSERT(inSuite, objArray[i]->mId == i); + } + NL_TEST_ASSERT(inSuite, count >= kSize / 2); + NL_TEST_ASSERT(inSuite, count <= kSize); +} + +void TestForEachActiveObjectStatic(nlTestSuite * inSuite, void * inContext) +{ + TestForEachActiveObject(inSuite, inContext); +} + +#if CHIP_SYSTEM_CONFIG_POOL_USE_HEAP +void TestForEachActiveObjectDynamic(nlTestSuite * inSuite, void * inContext) +{ + TestForEachActiveObject(inSuite, inContext); } +#endif // CHIP_SYSTEM_CONFIG_POOL_USE_HEAP int Setup(void * inContext) { @@ -145,8 +354,21 @@ int Teardown(void * inContext) /** * Test Suite. It lists all the test functions. */ -static const nlTest sTests[] = { NL_TEST_DEF_FN(TestReleaseNull), NL_TEST_DEF_FN(TestCreateReleaseObject), - NL_TEST_DEF_FN(TestCreateReleaseStruct), NL_TEST_SENTINEL() }; +static const nlTest sTests[] = { + // clang-format off + NL_TEST_DEF_FN(TestReleaseNullStatic), + NL_TEST_DEF_FN(TestCreateReleaseObjectStatic), + NL_TEST_DEF_FN(TestCreateReleaseStructStatic), + NL_TEST_DEF_FN(TestForEachActiveObjectStatic), +#if CHIP_SYSTEM_CONFIG_POOL_USE_HEAP + NL_TEST_DEF_FN(TestReleaseNullDynamic), + NL_TEST_DEF_FN(TestCreateReleaseObjectDynamic), + NL_TEST_DEF_FN(TestCreateReleaseStructDynamic), + NL_TEST_DEF_FN(TestForEachActiveObjectDynamic), +#endif // CHIP_SYSTEM_CONFIG_POOL_USE_HEAP + NL_TEST_SENTINEL() + // clang-format on +}; int TestPool() { diff --git a/src/system/SystemTimer.cpp b/src/system/SystemTimer.cpp index d9a1600802a106..7e4a606099e5ea 100644 --- a/src/system/SystemTimer.cpp +++ b/src/system/SystemTimer.cpp @@ -74,7 +74,7 @@ namespace System { ******************************************************************************* */ -BitMapObjectPool Timer::sPool; +chip::ObjectPool Timer::sPool; Stats::count_t Timer::mNumInUse = 0; Stats::count_t Timer::mHighWatermark = 0; diff --git a/src/system/SystemTimer.h b/src/system/SystemTimer.h index 390307ba97f2d8..91efa2a0cfcdca 100644 --- a/src/system/SystemTimer.h +++ b/src/system/SystemTimer.h @@ -229,7 +229,7 @@ class DLL_EXPORT Timer private: friend class LayerImplLwIP; - static BitMapObjectPool sPool; + static chip::ObjectPool sPool; static Stats::count_t mNumInUse; static Stats::count_t mHighWatermark;