Skip to content

Commit

Permalink
HBASE-28189 Fix the miss count in one of CombinedBlockCache getBlock …
Browse files Browse the repository at this point in the history
…implementations (#5506)

Signed-off-by: Peter Somogyi <psomogyi@apache.org>
  • Loading branch information
wchevreuil committed Nov 9, 2023
1 parent 4227452 commit 43a1090
Show file tree
Hide file tree
Showing 2 changed files with 101 additions and 9 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@
import org.apache.hadoop.hbase.io.HeapSize;
import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**
* CombinedBlockCache is an abstraction layer that combines {@link FirstLevelBlockCache} and
Expand All @@ -36,6 +38,8 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize {
protected final BlockCache l2Cache;
protected final CombinedCacheStats combinedCacheStats;

private static final Logger LOG = LoggerFactory.getLogger(CombinedBlockCache.class);

public CombinedBlockCache(FirstLevelBlockCache l1Cache, BlockCache l2Cache) {
this.l1Cache = l1Cache;
this.l2Cache = l2Cache;
Expand Down Expand Up @@ -75,17 +79,59 @@ public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) {
@Override
public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat,
boolean updateCacheMetrics) {
// We are not in a position to exactly look at LRU cache or BC as BlockType may not be getting
// passed always.
Cacheable block = null;
// We don't know the block type. We should try to get it on one of the caches only,
// but not both otherwise we'll over compute on misses. Here we check if the key is on L1,
// if so, call getBlock on L1 and that will compute the hit. Otherwise, we'll try to get it from
// L2 and whatever happens, we'll update the stats there.
boolean existInL1 = l1Cache.containsBlock(cacheKey);
if (!existInL1 && updateCacheMetrics && !repeat) {
// If the block does not exist in L1, the containsBlock should be counted as one miss.
l1Cache.getStats().miss(caching, cacheKey.isPrimary(), cacheKey.getBlockType());
// if we know it's in L1, just delegate call to l1 and return it
if (existInL1) {
block = l1Cache.getBlock(cacheKey, caching, repeat, false);
} else {
block = l2Cache.getBlock(cacheKey, caching, repeat, false);
}
if (updateCacheMetrics) {
boolean metaBlock = isMetaBlock(cacheKey.getBlockType());
if (metaBlock) {
if (!existInL1 && block != null) {
LOG.warn("Cache key {} had block type {}, but was found in L2 cache.", cacheKey,
cacheKey.getBlockType());
updateBlockMetrics(block, cacheKey, l2Cache, caching);
} else {
updateBlockMetrics(block, cacheKey, l1Cache, caching);
}
} else {
if (existInL1) {
LOG.warn("Cache key {} had block type {}, but was found in L1 cache.", cacheKey,
cacheKey.getBlockType());
updateBlockMetrics(block, cacheKey, l1Cache, caching);
} else {
updateBlockMetrics(block, cacheKey, l2Cache, caching);
}
}
}
return block;
}

private void updateBlockMetrics(Cacheable block, BlockCacheKey key, BlockCache cache,
boolean caching) {
if (block == null) {
cache.getStats().miss(caching, key.isPrimary(), key.getBlockType());
} else {
cache.getStats().hit(caching, key.isPrimary(), key.getBlockType());

}
}

return existInL1
? l1Cache.getBlock(cacheKey, caching, repeat, updateCacheMetrics)
: l2Cache.getBlock(cacheKey, caching, repeat, updateCacheMetrics);
private Cacheable getBlockWithType(BlockCacheKey cacheKey, boolean caching, boolean repeat,
boolean updateCacheMetrics) {
boolean metaBlock = isMetaBlock(cacheKey.getBlockType());
if (metaBlock) {
return l1Cache.getBlock(cacheKey, caching, repeat, updateCacheMetrics);
} else {
return l2Cache.getBlock(cacheKey, caching, repeat, updateCacheMetrics);
}
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,16 @@

import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_IOENGINE_KEY;
import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_SIZE_KEY;
import static org.apache.hadoop.hbase.io.ByteBuffAllocator.HEAP;
import static org.junit.Assert.assertEquals;

import java.nio.ByteBuffer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.io.hfile.CombinedBlockCache.CombinedCacheStats;
import org.apache.hadoop.hbase.nio.ByteBuff;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.junit.Assert;
import org.junit.ClassRule;
Expand Down Expand Up @@ -110,11 +114,53 @@ public void testCombinedCacheStats() {

@Test
public void testMultiThreadGetAndEvictBlock() throws Exception {
BlockCache blockCache = createCombinedBlockCache();
TestLruBlockCache.testMultiThreadGetAndEvictBlockInternal(blockCache);
}

@Test
public void testCombinedBlockCacheStatsWithDataBlockType() throws Exception {
testCombinedBlockCacheStats(BlockType.DATA, 0, 1);
}

@Test
public void testCombinedBlockCacheStatsWithMetaBlockType() throws Exception {
testCombinedBlockCacheStats(BlockType.META, 1, 0);
}

@Test
public void testCombinedBlockCacheStatsWithNoBlockType() throws Exception {
testCombinedBlockCacheStats(null, 0, 1);
}

private CombinedBlockCache createCombinedBlockCache() {
Configuration conf = UTIL.getConfiguration();
conf.set(BUCKET_CACHE_IOENGINE_KEY, "offheap");
conf.setInt(BUCKET_CACHE_SIZE_KEY, 32);
BlockCache blockCache = BlockCacheFactory.createBlockCache(conf);
Assert.assertTrue(blockCache instanceof CombinedBlockCache);
TestLruBlockCache.testMultiThreadGetAndEvictBlockInternal(blockCache);
return (CombinedBlockCache) blockCache;
}

public void testCombinedBlockCacheStats(BlockType type, int expectedL1Miss, int expectedL2Miss)
throws Exception {
CombinedBlockCache blockCache = createCombinedBlockCache();
BlockCacheKey key = new BlockCacheKey("key1", 0, false, type);
int size = 100;
int length = HConstants.HFILEBLOCK_HEADER_SIZE + size;
byte[] byteArr = new byte[length];
HFileContext meta = new HFileContextBuilder().build();
HFileBlock blk = new HFileBlock(type != null ? type : BlockType.DATA, size, size, -1,
ByteBuff.wrap(ByteBuffer.wrap(byteArr, 0, size)), HFileBlock.FILL_HEADER, -1, 52, -1, meta,
HEAP);
blockCache.cacheBlock(key, blk);
blockCache.getBlock(key, true, false, true);
assertEquals(0, blockCache.getStats().getMissCount());
blockCache.evictBlock(key);
blockCache.getBlock(key, true, false, true);
assertEquals(1, blockCache.getStats().getMissCount());
assertEquals(expectedL1Miss, blockCache.getFirstLevelCache().getStats().getMissCount());
assertEquals(expectedL2Miss, blockCache.getSecondLevelCache().getStats().getMissCount());
}

}

0 comments on commit 43a1090

Please sign in to comment.