Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Revert "[scudo] Make local cache be agnostic to the type of node in f… #68626

Merged
merged 1 commit into from
Oct 9, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion compiler-rt/lib/scudo/standalone/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,6 @@ if(ANDROID)
endif()

set(SCUDO_HEADERS
allocator_common.h
allocator_config.h
atomic_helpers.h
bytemap.h
Expand Down
85 changes: 0 additions & 85 deletions compiler-rt/lib/scudo/standalone/allocator_common.h

This file was deleted.

110 changes: 91 additions & 19 deletions compiler-rt/lib/scudo/standalone/local_cache.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,74 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
typedef typename SizeClassAllocator::SizeClassMap SizeClassMap;
typedef typename SizeClassAllocator::CompactPtrT CompactPtrT;

struct TransferBatch {
static const u16 MaxNumCached = SizeClassMap::MaxNumCachedHint;
void setFromArray(CompactPtrT *Array, u16 N) {
DCHECK_LE(N, MaxNumCached);
Count = N;
memcpy(Batch, Array, sizeof(Batch[0]) * Count);
}
void appendFromArray(CompactPtrT *Array, u16 N) {
DCHECK_LE(N, MaxNumCached - Count);
memcpy(Batch + Count, Array, sizeof(Batch[0]) * N);
// u16 will be promoted to int by arithmetic type conversion.
Count = static_cast<u16>(Count + N);
}
void appendFromTransferBatch(TransferBatch *B, u16 N) {
DCHECK_LE(N, MaxNumCached - Count);
DCHECK_GE(B->Count, N);
// Append from the back of `B`.
memcpy(Batch + Count, B->Batch + (B->Count - N), sizeof(Batch[0]) * N);
// u16 will be promoted to int by arithmetic type conversion.
Count = static_cast<u16>(Count + N);
B->Count = static_cast<u16>(B->Count - N);
}
void clear() { Count = 0; }
void add(CompactPtrT P) {
DCHECK_LT(Count, MaxNumCached);
Batch[Count++] = P;
}
void copyToArray(CompactPtrT *Array) const {
memcpy(Array, Batch, sizeof(Batch[0]) * Count);
}
u16 getCount() const { return Count; }
bool isEmpty() const { return Count == 0U; }
CompactPtrT get(u16 I) const {
DCHECK_LE(I, Count);
return Batch[I];
}
static u16 getMaxCached(uptr Size) {
return Min(MaxNumCached, SizeClassMap::getMaxCachedHint(Size));
}
TransferBatch *Next;

private:
CompactPtrT Batch[MaxNumCached];
u16 Count;
};

// A BatchGroup is used to collect blocks. Each group has a group id to
// identify the group kind of contained blocks.
struct BatchGroup {
// `Next` is used by IntrusiveList.
BatchGroup *Next;
// The compact base address of each group
uptr CompactPtrGroupBase;
// Cache value of TransferBatch::getMaxCached()
u16 MaxCachedPerBatch;
// Number of blocks pushed into this group. This is an increment-only
// counter.
uptr PushedBlocks;
// This is used to track how many bytes are not in-use since last time we
// tried to release pages.
uptr BytesInBGAtLastCheckpoint;
// Blocks are managed by TransferBatch in a list.
SinglyLinkedList<TransferBatch> Batches;
};

static_assert(sizeof(BatchGroup) <= sizeof(TransferBatch),
"BatchGroup uses the same class size as TransferBatch");

void init(GlobalStats *S, SizeClassAllocator *A) {
DCHECK(isEmpty());
Stats.init();
Expand Down Expand Up @@ -83,7 +151,7 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
}

void drain() {
// Drain BatchClassId last as it may be needed while draining normal blocks.
// Drain BatchClassId last as createBatch can refill it.
for (uptr I = 0; I < NumClasses; ++I) {
if (I == BatchClassId)
continue;
Expand All @@ -95,11 +163,19 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
DCHECK(isEmpty());
}

void *getBatchClassBlock() {
void *B = allocate(BatchClassId);
TransferBatch *createBatch(uptr ClassId, void *B) {
if (ClassId != BatchClassId)
B = allocate(BatchClassId);
if (UNLIKELY(!B))
reportOutOfMemory(SizeClassAllocator::getSizeByClassId(BatchClassId));
return B;
return reinterpret_cast<TransferBatch *>(B);
}

BatchGroup *createGroup() {
void *Ptr = allocate(BatchClassId);
if (UNLIKELY(!Ptr))
reportOutOfMemory(SizeClassAllocator::getSizeByClassId(BatchClassId));
return reinterpret_cast<BatchGroup *>(Ptr);
}

LocalStats &getStats() { return Stats; }
Expand Down Expand Up @@ -127,11 +203,6 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
Str->append(" No block is cached.\n");
}

static u16 getMaxCached(uptr Size) {
return Min(SizeClassMap::MaxNumCachedHint,
SizeClassMap::getMaxCachedHint(Size));
}

private:
static const uptr NumClasses = SizeClassMap::NumClasses;
static const uptr BatchClassId = SizeClassMap::BatchClassId;
Expand All @@ -140,7 +211,7 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
u16 MaxCount;
// Note: ClassSize is zero for the transfer batch.
uptr ClassSize;
CompactPtrT Chunks[2 * SizeClassMap::MaxNumCachedHint];
CompactPtrT Chunks[2 * TransferBatch::MaxNumCached];
};
PerClass PerClassArray[NumClasses] = {};
LocalStats Stats;
Expand All @@ -157,7 +228,7 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
for (uptr I = 0; I < NumClasses; I++) {
PerClass *P = &PerClassArray[I];
const uptr Size = SizeClassAllocator::getSizeByClassId(I);
P->MaxCount = static_cast<u16>(2 * getMaxCached(Size));
P->MaxCount = static_cast<u16>(2 * TransferBatch::getMaxCached(Size));
if (I != BatchClassId) {
P->ClassSize = Size;
} else {
Expand All @@ -175,14 +246,15 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {

NOINLINE bool refill(PerClass *C, uptr ClassId) {
initCacheMaybe(C);

// TODO(chiahungduan): Pass the max number cached for each size class.
const u16 NumBlocksRefilled =
Allocator->popBlocks(this, ClassId, C->Chunks);
DCHECK_LE(NumBlocksRefilled,
getMaxCached(SizeClassAllocator::getSizeByClassId(ClassId)));
C->Count += NumBlocksRefilled;
return NumBlocksRefilled != 0;
TransferBatch *B = Allocator->popBatch(this, ClassId);
if (UNLIKELY(!B))
return false;
DCHECK_GT(B->getCount(), 0);
C->Count = B->getCount();
B->copyToArray(C->Chunks);
B->clear();
destroyBatch(ClassId, B);
return true;
}

NOINLINE void drain(PerClass *C, uptr ClassId) {
Expand Down
41 changes: 12 additions & 29 deletions compiler-rt/lib/scudo/standalone/primary32.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
#ifndef SCUDO_PRIMARY32_H_
#define SCUDO_PRIMARY32_H_

#include "allocator_common.h"
#include "bytemap.h"
#include "common.h"
#include "list.h"
Expand Down Expand Up @@ -54,11 +53,8 @@ template <typename Config> class SizeClassAllocator32 {
"");
typedef SizeClassAllocator32<Config> ThisT;
typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
typedef TransferBatch<ThisT> TransferBatch;
typedef BatchGroup<ThisT> BatchGroup;

static_assert(sizeof(BatchGroup) <= sizeof(TransferBatch),
"BatchGroup uses the same class size as TransferBatch");
typedef typename CacheT::TransferBatch TransferBatch;
typedef typename CacheT::BatchGroup BatchGroup;

static uptr getSizeByClassId(uptr ClassId) {
return (ClassId == SizeClassMap::BatchClassId)
Expand Down Expand Up @@ -191,21 +187,6 @@ template <typename Config> class SizeClassAllocator32 {
return BlockSize > PageSize;
}

u16 popBlocks(CacheT *C, uptr ClassId, CompactPtrT *ToArray) {
TransferBatch *B = popBatch(C, ClassId);
if (!B)
return 0;

const u16 Count = B->getCount();
DCHECK_GT(Count, 0U);
B->moveToArray(ToArray);

if (ClassId != SizeClassMap::BatchClassId)
C->deallocate(SizeClassMap::BatchClassId, B);

return Count;
}

TransferBatch *popBatch(CacheT *C, uptr ClassId) {
DCHECK_LT(ClassId, NumClasses);
SizeClassInfo *Sci = getSizeClassInfo(ClassId);
Expand Down Expand Up @@ -539,8 +520,8 @@ template <typename Config> class SizeClassAllocator32 {
// from `CreateGroup` in `pushBlocksImpl`
BG->PushedBlocks = 1;
BG->BytesInBGAtLastCheckpoint = 0;
BG->MaxCachedPerBatch =
CacheT::getMaxCached(getSizeByClassId(SizeClassMap::BatchClassId));
BG->MaxCachedPerBatch = TransferBatch::getMaxCached(
getSizeByClassId(SizeClassMap::BatchClassId));

Sci->FreeListInfo.BlockList.push_front(BG);
}
Expand Down Expand Up @@ -619,17 +600,17 @@ template <typename Config> class SizeClassAllocator32 {
DCHECK_GT(Size, 0U);

auto CreateGroup = [&](uptr CompactPtrGroupBase) {
BatchGroup *BG = reinterpret_cast<BatchGroup *>(C->getBatchClassBlock());
BatchGroup *BG = C->createGroup();
BG->Batches.clear();
TransferBatch *TB =
reinterpret_cast<TransferBatch *>(C->getBatchClassBlock());
TransferBatch *TB = C->createBatch(ClassId, nullptr);
TB->clear();

BG->CompactPtrGroupBase = CompactPtrGroupBase;
BG->Batches.push_front(TB);
BG->PushedBlocks = 0;
BG->BytesInBGAtLastCheckpoint = 0;
BG->MaxCachedPerBatch = CacheT::getMaxCached(getSizeByClassId(ClassId));
BG->MaxCachedPerBatch =
TransferBatch::getMaxCached(getSizeByClassId(ClassId));

return BG;
};
Expand All @@ -644,7 +625,9 @@ template <typename Config> class SizeClassAllocator32 {
u16 UnusedSlots =
static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount());
if (UnusedSlots == 0) {
CurBatch = reinterpret_cast<TransferBatch *>(C->getBatchClassBlock());
CurBatch = C->createBatch(
ClassId,
reinterpret_cast<void *>(decompactPtr(ClassId, Array[I])));
CurBatch->clear();
Batches.push_front(CurBatch);
UnusedSlots = BG->MaxCachedPerBatch;
Expand Down Expand Up @@ -792,7 +775,7 @@ template <typename Config> class SizeClassAllocator32 {
}

const uptr Size = getSizeByClassId(ClassId);
const u16 MaxCount = CacheT::getMaxCached(Size);
const u16 MaxCount = TransferBatch::getMaxCached(Size);
DCHECK_GT(MaxCount, 0U);
// The maximum number of blocks we should carve in the region is dictated
// by the maximum number of batches we want to fill, and the amount of
Expand Down
Loading