Skip to content

Commit

Permalink
Calculate table size of FastLRUCache more accurately (#10235)
Browse files Browse the repository at this point in the history
Summary:
Calculate the required size of the hash table in FastLRUCache more accurately.

Pull Request resolved: #10235

Test Plan: ``make -j24 check``

Reviewed By: gitbw95

Differential Revision: D37460546

Pulled By: guidotag

fbshipit-source-id: 7945128d6f002832f8ed922ef0151919f4350854
  • Loading branch information
Guido Tagliavini Ponce authored and facebook-github-bot committed Jun 28, 2022
1 parent a1eb02f commit c6055cb
Show file tree
Hide file tree
Showing 3 changed files with 117 additions and 6 deletions.
24 changes: 18 additions & 6 deletions cache/fast_lru_cache.cc
Original file line number Diff line number Diff line change
Expand Up @@ -192,8 +192,7 @@ LRUCacheShard::LRUCacheShard(size_t capacity, size_t estimated_value_size,
: capacity_(capacity),
strict_capacity_limit_(strict_capacity_limit),
table_(
CalcHashBits(capacity, estimated_value_size, metadata_charge_policy) +
static_cast<uint8_t>(ceil(log2(1.0 / kLoadFactor)))),
CalcHashBits(capacity, estimated_value_size, metadata_charge_policy)),
usage_(0),
lru_usage_(0) {
set_metadata_charge_policy(metadata_charge_policy);
Expand Down Expand Up @@ -295,16 +294,29 @@ void LRUCacheShard::EvictFromLRU(size_t charge,
}
}

uint8_t LRUCacheShard::CalcHashBits(
size_t capacity, size_t estimated_value_size,
size_t LRUCacheShard::CalcEstimatedHandleCharge(
size_t estimated_value_size,
CacheMetadataChargePolicy metadata_charge_policy) {
LRUHandle h;
h.CalcTotalCharge(estimated_value_size, metadata_charge_policy);
size_t num_entries = capacity / h.total_charge;
return h.total_charge;
}

uint8_t LRUCacheShard::CalcHashBits(
size_t capacity, size_t estimated_value_size,
CacheMetadataChargePolicy metadata_charge_policy) {
size_t handle_charge =
CalcEstimatedHandleCharge(estimated_value_size, metadata_charge_policy);
size_t num_entries =
static_cast<size_t>(capacity / (kLoadFactor * handle_charge));

// Compute the ceiling of log2(num_entries). If num_entries == 0, return 0.
uint8_t num_hash_bits = 0;
while (num_entries >>= 1) {
size_t num_entries_copy = num_entries;
while (num_entries_copy >>= 1) {
++num_hash_bits;
}
num_hash_bits += size_t{1} << num_hash_bits < num_entries ? 1 : 0;
return num_hash_bits;
}

Expand Down
10 changes: 10 additions & 0 deletions cache/fast_lru_cache.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,9 @@ namespace ROCKSDB_NAMESPACE {

namespace fast_lru_cache {

// Forward declaration of friend class.
class FastLRUCacheTest;

// LRU cache implementation using an open-address hash table.
//
// Every slot in the hash table is an LRUHandle. Because handles can be
Expand Down Expand Up @@ -365,6 +368,8 @@ class ALIGN_AS(CACHE_LINE_SIZE) LRUCacheShard final : public CacheShard {

private:
friend class LRUCache;
friend class FastLRUCacheTest;

void LRU_Remove(LRUHandle* e);
void LRU_Insert(LRUHandle* e);

Expand All @@ -374,6 +379,11 @@ class ALIGN_AS(CACHE_LINE_SIZE) LRUCacheShard final : public CacheShard {
// holding the mutex_.
void EvictFromLRU(size_t charge, autovector<LRUHandle>* deleted);

// Returns the charge of a single handle.
static size_t CalcEstimatedHandleCharge(
size_t estimated_value_size,
CacheMetadataChargePolicy metadata_charge_policy);

// Returns the number of bits used to hash an element in the hash
// table.
static uint8_t CalcHashBits(size_t capacity, size_t estimated_value_size,
Expand Down
89 changes: 89 additions & 0 deletions cache/lru_cache_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -206,6 +206,7 @@ TEST_F(LRUCacheTest, EntriesWithPriority) {
ValidateLRUList({"e", "f", "g", "Z", "d"}, 2);
}

namespace fast_lru_cache {
// TODO(guido) Consolidate the following FastLRUCache tests with
// that of LRUCache.
class FastLRUCacheTest : public testing::Test {
Expand Down Expand Up @@ -238,6 +239,38 @@ class FastLRUCacheTest : public testing::Test {

Status Insert(char key, size_t len) { return Insert(std::string(len, key)); }

size_t CalcEstimatedHandleChargeWrapper(
size_t estimated_value_size,
CacheMetadataChargePolicy metadata_charge_policy) {
return fast_lru_cache::LRUCacheShard::CalcEstimatedHandleCharge(
estimated_value_size, metadata_charge_policy);
}

uint8_t CalcHashBitsWrapper(
size_t capacity, size_t estimated_value_size,
CacheMetadataChargePolicy metadata_charge_policy) {
return fast_lru_cache::LRUCacheShard::CalcHashBits(
capacity, estimated_value_size, metadata_charge_policy);
}

// Maximum number of items that a shard can hold.
double CalcMaxOccupancy(size_t capacity, size_t estimated_value_size,
CacheMetadataChargePolicy metadata_charge_policy) {
size_t handle_charge =
fast_lru_cache::LRUCacheShard::CalcEstimatedHandleCharge(
estimated_value_size, metadata_charge_policy);
return capacity / (fast_lru_cache::kLoadFactor * handle_charge);
}

bool TableSizeIsAppropriate(uint8_t hash_bits, double max_occupancy) {
if (hash_bits == 0) {
return max_occupancy <= 1;
} else {
return (1 << hash_bits >= max_occupancy) &&
(1 << (hash_bits - 1) <= max_occupancy);
}
}

private:
fast_lru_cache::LRUCacheShard* cache_ = nullptr;
};
Expand All @@ -253,6 +286,62 @@ TEST_F(FastLRUCacheTest, ValidateKeySize) {
EXPECT_NOK(Insert('f', 0));
}

TEST_F(FastLRUCacheTest, CalcHashBitsTest) {
size_t capacity = 1024;
size_t estimated_value_size = 1;
CacheMetadataChargePolicy metadata_charge_policy = kDontChargeCacheMetadata;
double max_occupancy =
CalcMaxOccupancy(capacity, estimated_value_size, metadata_charge_policy);
uint8_t hash_bits = CalcHashBitsWrapper(capacity, estimated_value_size,
metadata_charge_policy);
EXPECT_TRUE(TableSizeIsAppropriate(hash_bits, max_occupancy));

capacity = 1024;
estimated_value_size = 1;
metadata_charge_policy = kFullChargeCacheMetadata;
max_occupancy =
CalcMaxOccupancy(capacity, estimated_value_size, metadata_charge_policy);
hash_bits = CalcHashBitsWrapper(capacity, estimated_value_size,
metadata_charge_policy);
EXPECT_TRUE(TableSizeIsAppropriate(hash_bits, max_occupancy));

// No elements fit in cache.
capacity = 0;
estimated_value_size = 1;
metadata_charge_policy = kDontChargeCacheMetadata;
hash_bits = CalcHashBitsWrapper(capacity, estimated_value_size,
metadata_charge_policy);
EXPECT_TRUE(TableSizeIsAppropriate(hash_bits, 0 /* max_occupancy */));

// Set the capacity just below a single handle. Because the load factor is <
// 100% at least one handle will fit in the table.
estimated_value_size = 1;
size_t handle_charge = CalcEstimatedHandleChargeWrapper(
8192 /* estimated_value_size */, kDontChargeCacheMetadata);
capacity = handle_charge - 1;
// The load factor should be bounded away from 100%.
assert(static_cast<size_t>(capacity / fast_lru_cache::kLoadFactor) >
handle_charge);
metadata_charge_policy = kDontChargeCacheMetadata;
max_occupancy =
CalcMaxOccupancy(capacity, estimated_value_size, metadata_charge_policy);
hash_bits = CalcHashBitsWrapper(capacity, estimated_value_size,
metadata_charge_policy);
EXPECT_TRUE(TableSizeIsAppropriate(hash_bits, max_occupancy));

// Large capacity.
capacity = 31924172;
estimated_value_size = 321;
metadata_charge_policy = kFullChargeCacheMetadata;
max_occupancy =
CalcMaxOccupancy(capacity, estimated_value_size, metadata_charge_policy);
hash_bits = CalcHashBitsWrapper(capacity, estimated_value_size,
metadata_charge_policy);
EXPECT_TRUE(TableSizeIsAppropriate(hash_bits, max_occupancy));
}

} // namespace fast_lru_cache

class TestSecondaryCache : public SecondaryCache {
public:
// Specifies what action to take on a lookup for a particular key
Expand Down

0 comments on commit c6055cb

Please sign in to comment.