diff --git a/compiler-rt/lib/scudo/standalone/allocator_config.def b/compiler-rt/lib/scudo/standalone/allocator_config.def index 748530820cd64..0aea7b8f2fb9a 100644 --- a/compiler-rt/lib/scudo/standalone/allocator_config.def +++ b/compiler-rt/lib/scudo/standalone/allocator_config.def @@ -57,6 +57,10 @@ BASE_OPTIONAL(const bool, MaySupportMemoryTagging, false) // Disable the quarantine code. BASE_OPTIONAL(const bool, QuarantineDisabled, false) +// If set to true, malloc_usable_size returns the exact size of the allocation. +// If set to false, return the total available size in the allocation. +BASE_OPTIONAL(const bool, ExactUsableSize, true) + // PRIMARY_REQUIRED_TYPE(NAME) // // SizeClassMap to use with the Primary. diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h index 329ec4596482b..ffe9554203241 100644 --- a/compiler-rt/lib/scudo/standalone/combined.h +++ b/compiler-rt/lib/scudo/standalone/combined.h @@ -706,19 +706,26 @@ class Allocator { if (!getChunkFromBlock(Block, &Chunk, &Header) && !getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header)) return; - } else { - if (!getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header)) - return; + } else if (!getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header)) { + return; } - if (Header.State == Chunk::State::Allocated) { - uptr TaggedChunk = Chunk; - if (allocatorSupportsMemoryTagging()) - TaggedChunk = untagPointer(TaggedChunk); - if (useMemoryTagging(Primary.Options.load())) - TaggedChunk = loadTag(Chunk); - Callback(TaggedChunk, getSize(reinterpret_cast(Chunk), &Header), - Arg); + + if (Header.State != Chunk::State::Allocated) + return; + + uptr TaggedChunk = Chunk; + if (allocatorSupportsMemoryTagging()) + TaggedChunk = untagPointer(TaggedChunk); + uptr Size; + if (UNLIKELY(useMemoryTagging(Primary.Options.load()))) { + TaggedChunk = loadTag(Chunk); + Size = getSize(reinterpret_cast(Chunk), &Header); + } else if (AllocatorConfig::getExactUsableSize()) { + Size = getSize(reinterpret_cast(Chunk), &Header); + } else { + Size = getUsableSize(reinterpret_cast(Chunk), &Header); } + Callback(TaggedChunk, Size, Arg); }; Primary.iterateOverBlocks(Lambda); Secondary.iterateOverBlocks(Lambda); @@ -759,16 +766,50 @@ class Allocator { return false; } - // Return the usable size for a given chunk. Technically we lie, as we just - // report the actual size of a chunk. This is done to counteract code actively - // writing past the end of a chunk (like sqlite3) when the usable size allows - // for it, which then forces realloc to copy the usable size of a chunk as - // opposed to its actual size. + ALWAYS_INLINE uptr getUsableSize(const void *Ptr, + Chunk::UnpackedHeader *Header) { + void *BlockBegin = getBlockBegin(Ptr, Header); + if (LIKELY(Header->ClassId)) { + return SizeClassMap::getSizeByClassId(Header->ClassId) - + (reinterpret_cast(Ptr) - reinterpret_cast(BlockBegin)); + } + + uptr UntaggedPtr = reinterpret_cast(Ptr); + if (allocatorSupportsMemoryTagging()) { + UntaggedPtr = untagPointer(UntaggedPtr); + BlockBegin = untagPointer(BlockBegin); + } + return SecondaryT::getBlockEnd(BlockBegin) - UntaggedPtr; + } + + // Return the usable size for a given chunk. If MTE is enabled or if the + // ExactUsableSize config parameter is true, we report the exact size of + // the original allocation size. Otherwise, we will return the total + // actual usable size. uptr getUsableSize(const void *Ptr) { if (UNLIKELY(!Ptr)) return 0; - return getAllocSize(Ptr); + if (AllocatorConfig::getExactUsableSize() || + UNLIKELY(useMemoryTagging(Primary.Options.load()))) + return getAllocSize(Ptr); + + initThreadMaybe(); + +#ifdef GWP_ASAN_HOOKS + if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))) + return GuardedAlloc.getSize(Ptr); +#endif // GWP_ASAN_HOOKS + + Ptr = getHeaderTaggedPointer(const_cast(Ptr)); + Chunk::UnpackedHeader Header; + Chunk::loadHeader(Cookie, Ptr, &Header); + + // Getting the alloc size of a chunk only makes sense if it's allocated. + if (UNLIKELY(Header.State != Chunk::State::Allocated)) + reportInvalidChunkState(AllocatorAction::Sizing, Ptr); + + return getUsableSize(Ptr, &Header); } uptr getAllocSize(const void *Ptr) { @@ -951,6 +992,19 @@ class Allocator { MemorySize, 2, 16); } + uptr getBlockBeginTestOnly(const void *Ptr) { + Chunk::UnpackedHeader Header; + Chunk::loadHeader(Cookie, Ptr, &Header); + DCHECK(Header.State == Chunk::State::Allocated); + + if (allocatorSupportsMemoryTagging()) + Ptr = untagPointer(const_cast(Ptr)); + void *Begin = getBlockBegin(Ptr, &Header); + if (allocatorSupportsMemoryTagging()) + Begin = untagPointer(Begin); + return reinterpret_cast(Begin); + } + private: typedef typename PrimaryT::SizeClassMap SizeClassMap; diff --git a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp index 5fdfd1e7c55cc..4837ac96b9b26 100644 --- a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp +++ b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp @@ -1152,6 +1152,248 @@ TEST(ScudoCombinedTest, QuarantineDisabled) { EXPECT_EQ(Stats.find("Stats: Quarantine"), std::string::npos); } +struct UsableSizeClassConfig { + static const scudo::uptr NumBits = 1; + static const scudo::uptr MinSizeLog = 10; + static const scudo::uptr MidSizeLog = 10; + static const scudo::uptr MaxSizeLog = 13; + static const scudo::u16 MaxNumCachedHint = 8; + static const scudo::uptr MaxBytesCachedLog = 12; + static const scudo::uptr SizeDelta = 0; +}; + +struct TestExactUsableSizeConfig { + static const bool MaySupportMemoryTagging = false; + static const bool QuarantineDisabled = true; + + template using TSDRegistryT = scudo::TSDRegistrySharedT; + + struct Primary { + // In order to properly test the usable size, this Primary config has + // four real size classes: 1024, 2048, 4096, 8192. + using SizeClassMap = scudo::FixedSizeClassMap; + static const scudo::uptr RegionSizeLog = 21U; + static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN; + static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX; + typedef scudo::uptr CompactPtrT; + static const scudo::uptr CompactPtrScale = 0; + static const bool EnableRandomOffset = true; + static const scudo::uptr MapSizeIncrement = 1UL << 18; + static const scudo::uptr GroupSizeLog = 18; + }; + template + using PrimaryT = scudo::SizeClassAllocator64; + + struct Secondary { + template + using CacheT = scudo::MapAllocatorNoCache; + }; + + template using SecondaryT = scudo::MapAllocator; +}; + +template void VerifyExactUsableSize(AllocatorT &Allocator) { + // Scan through all sizes up to 10000 then some larger sizes. + for (scudo::uptr Size = 1; Size < 10000; Size++) { + void *P = Allocator.allocate(Size, Origin); + EXPECT_EQ(Size, Allocator.getUsableSize(P)) + << "Failed usable size at allocation size " << Size; + Allocator.deallocate(P, Origin); + } + + // Verify that aligned allocations also return the exact size allocated. + const scudo::uptr AllocSize = 313; + for (scudo::uptr Align = 1; Align <= 8; Align++) { + void *P = Allocator.allocate(AllocSize, Origin, 1U << Align); + EXPECT_EQ(AllocSize, Allocator.getUsableSize(P)) + << "Failed usable size at allocation size " << AllocSize << " at align " + << 1 << Align; + Allocator.deallocate(P, Origin); + } + + // Verify an explicitly large allocations. + const scudo::uptr LargeAllocSize = 1000000; + void *P = Allocator.allocate(LargeAllocSize, Origin); + EXPECT_EQ(LargeAllocSize, Allocator.getUsableSize(P)); + Allocator.deallocate(P, Origin); + + // Now do it for aligned allocations for large allocations. + for (scudo::uptr Align = 1; Align <= 8; Align++) { + void *P = Allocator.allocate(LargeAllocSize, Origin, 1U << Align); + EXPECT_EQ(LargeAllocSize, Allocator.getUsableSize(P)) + << "Failed usable size at allocation size " << AllocSize << " at align " + << 1 << Align; + Allocator.deallocate(P, Origin); + } +} + +template +void VerifyIterateOverUsableSize(AllocatorT &Allocator) { + // This will not verify if the size is the exact size or the size of the + // size class. Instead verify that the size matches the usable size and + // assume the other tests have verified getUsableSize. + std::unordered_map Pointers; + Pointers.insert({Allocator.allocate(128, Origin), 0U}); + Pointers.insert({Allocator.allocate(128, Origin, 32), 0U}); + Pointers.insert({Allocator.allocate(2000, Origin), 0U}); + Pointers.insert({Allocator.allocate(2000, Origin, 64), 0U}); + Pointers.insert({Allocator.allocate(8000, Origin), 0U}); + Pointers.insert({Allocator.allocate(8000, Origin, 128), 0U}); + Pointers.insert({Allocator.allocate(2000205, Origin), 0U}); + Pointers.insert({Allocator.allocate(2000205, Origin, 128), 0U}); + Pointers.insert({Allocator.allocate(2000205, Origin, 256), 0U}); + + Allocator.disable(); + Allocator.iterateOverChunks( + 0, static_cast(SCUDO_MMAP_RANGE_SIZE - 1), + [](uintptr_t Base, size_t Size, void *Arg) { + std::unordered_map *Pointers = + reinterpret_cast *>(Arg); + (*Pointers)[reinterpret_cast(Base)] = Size; + }, + reinterpret_cast(&Pointers)); + Allocator.enable(); + + for (auto [Ptr, IterateSize] : Pointers) { + EXPECT_NE(0U, IterateSize) + << "Pointer " << Ptr << " not found in iterateOverChunks call."; + EXPECT_EQ(IterateSize, Allocator.getUsableSize(Ptr)) + << "Pointer " << Ptr + << " mismatch between iterate size and usable size."; + Allocator.deallocate(Ptr, Origin); + } +} + +TEST(ScudoCombinedTest, ExactUsableSize) { + using AllocatorT = scudo::Allocator; + auto Allocator = std::unique_ptr(new AllocatorT()); + + VerifyExactUsableSize(*Allocator); + VerifyIterateOverUsableSize(*Allocator); +} + +struct TestExactUsableSizeMTEConfig : TestExactUsableSizeConfig { + static const bool MaySupportMemoryTagging = true; +}; + +TEST(ScudoCombinedTest, ExactUsableSizeMTE) { + if (!scudo::archSupportsMemoryTagging() || + !scudo::systemDetectsMemoryTagFaultsTestOnly()) + TEST_SKIP("Only supported on systems that can enable MTE."); + + scudo::enableSystemMemoryTaggingTestOnly(); + + using AllocatorT = scudo::Allocator; + auto Allocator = std::unique_ptr(new AllocatorT()); + + VerifyExactUsableSize(*Allocator); + VerifyIterateOverUsableSize(*Allocator); +} + +template +void VerifyUsableSizePrimary(AllocatorT &Allocator) { + std::vector SizeClasses = {1024U, 2048U, 4096U, 8192U}; + for (size_t I = 0; I < SizeClasses.size(); I++) { + scudo::uptr SizeClass = SizeClasses[I]; + scudo::uptr StartSize; + if (I == 0) + StartSize = 1; + else + StartSize = SizeClasses[I - 1]; + scudo::uptr UsableSize = SizeClass - scudo::Chunk::getHeaderSize(); + for (scudo::uptr Size = StartSize; Size < UsableSize; Size++) { + void *P = Allocator.allocate(Size, Origin); + EXPECT_EQ(UsableSize, Allocator.getUsableSize(P)) + << "Failed usable size at allocation size " << Size + << " for size class " << SizeClass; + memset(P, 0xff, UsableSize); + EXPECT_EQ(Allocator.getBlockBeginTestOnly(P) + SizeClass, + reinterpret_cast(P) + UsableSize); + Allocator.deallocate(P, Origin); + } + + StartSize = UsableSize + 1; + } + + std::vector Alignments = {32U, 128U}; + for (size_t I = 0; I < SizeClasses.size(); I++) { + scudo::uptr SizeClass = SizeClasses[I]; + scudo::uptr AllocSize; + if (I == 0) + AllocSize = 1; + else + AllocSize = SizeClasses[I - 1] + 1; + + for (auto Alignment : Alignments) { + void *P = Allocator.allocate(AllocSize, Origin, Alignment); + scudo::uptr UsableSize = Allocator.getUsableSize(P); + memset(P, 0xff, UsableSize); + EXPECT_EQ(Allocator.getBlockBeginTestOnly(P) + SizeClass, + reinterpret_cast(P) + UsableSize) + << "Failed usable size at allocation size " << AllocSize + << " for size class " << SizeClass << " at alignment " << Alignment; + Allocator.deallocate(P, Origin); + } + } +} + +template +void VerifyUsableSizeSecondary(AllocatorT &Allocator) { + const scudo::uptr LargeAllocSize = 996780; + const scudo::uptr PageSize = scudo::getPageSizeCached(); + void *P = Allocator.allocate(LargeAllocSize, Origin); + scudo::uptr UsableSize = Allocator.getUsableSize(P); + memset(P, 0xff, UsableSize); + // Assumes that the secondary always rounds up allocations to a page boundary. + EXPECT_EQ(scudo::roundUp(reinterpret_cast(P) + LargeAllocSize, + PageSize), + reinterpret_cast(P) + UsableSize); + Allocator.deallocate(P, Origin); + + // Check aligned allocations now. + for (scudo::uptr Alignment = 1; Alignment <= 8; Alignment++) { + void *P = Allocator.allocate(LargeAllocSize, Origin, 1U << Alignment); + scudo::uptr UsableSize = Allocator.getUsableSize(P); + EXPECT_EQ(scudo::roundUp(reinterpret_cast(P) + LargeAllocSize, + PageSize), + reinterpret_cast(P) + UsableSize) + << "Failed usable size at allocation size " << LargeAllocSize + << " at alignment " << Alignment; + Allocator.deallocate(P, Origin); + } +} + +struct TestFullUsableSizeConfig : TestExactUsableSizeConfig { + static const bool ExactUsableSize = false; +}; + +TEST(ScudoCombinedTest, FullUsableSize) { + using AllocatorT = scudo::Allocator; + auto Allocator = std::unique_ptr(new AllocatorT()); + + VerifyUsableSizePrimary(*Allocator); + VerifyUsableSizeSecondary(*Allocator); + VerifyIterateOverUsableSize(*Allocator); +} + +struct TestFullUsableSizeMTEConfig : TestFullUsableSizeConfig { + static const bool MaySupportMemoryTagging = true; +}; + +TEST(ScudoCombinedTest, FullUsableSizeMTE) { + if (!scudo::archSupportsMemoryTagging() || + !scudo::systemDetectsMemoryTagFaultsTestOnly()) + TEST_SKIP("Only supported on systems that can enable MTE."); + + scudo::enableSystemMemoryTaggingTestOnly(); + + using AllocatorT = scudo::Allocator; + auto Allocator = std::unique_ptr(new AllocatorT()); + + // When MTE is enabled, you get exact sizes. + VerifyExactUsableSize(*Allocator); + VerifyIterateOverUsableSize(*Allocator); +} // Verify that no special quarantine blocks appear in iterateOverChunks. TEST(ScudoCombinedTest, QuarantineIterateOverChunks) { using AllocatorT = TestAllocator; diff --git a/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp b/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp index 612317b3c3293..9e5d0658e5ed5 100644 --- a/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp +++ b/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp @@ -588,8 +588,13 @@ TEST_F(ScudoWrappersCTest, MallocInfo) { EXPECT_EQ(errno, 0); fclose(F); EXPECT_EQ(strncmp(Buffer, "