diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java index 5220f2d82b23..75269e571816 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java @@ -632,4 +632,6 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String SCANNER_LEASE_EXPIRED_COUNT = "scannerLeaseExpiredCount"; String SCANNER_LEASE_EXPIRED_COUNT_DESC = "Count of scanners which were expired due to scanner lease timeout"; + String CURRENT_REGION_CACHE_RATIO = "currentRegionCacheRatio"; + String CURRENT_REGION_CACHE_RATIO_DESC = "The percentage of caching completed for this region."; } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java index 3115603aabfc..4d8a028d89b1 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java @@ -65,6 +65,11 @@ public interface MetricsRegionWrapper { */ long getStoreFileSize(); + /** + * Gets the current cache % ratio for this region. + */ + float getCurrentRegionCacheRatio(); + /** * Get the total number of read requests that have been issued against this region */ diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java index 0c20456e8cb6..92ecaa580885 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java @@ -233,6 +233,10 @@ void snapshot(MetricsRecordBuilder mrb, boolean ignored) { this.regionWrapper.getNumReferenceFiles()); mrb.addGauge(Interns.info(regionNamePrefix + MetricsRegionServerSource.STOREFILE_SIZE, MetricsRegionServerSource.STOREFILE_SIZE_DESC), this.regionWrapper.getStoreFileSize()); + mrb.addGauge( + Interns.info(regionNamePrefix + MetricsRegionServerSource.CURRENT_REGION_CACHE_RATIO, + MetricsRegionServerSource.CURRENT_REGION_CACHE_RATIO_DESC), + this.regionWrapper.getCurrentRegionCacheRatio()); mrb.addCounter( Interns.info(regionNamePrefix + MetricsRegionSource.COMPACTIONS_COMPLETED_COUNT, MetricsRegionSource.COMPACTIONS_COMPLETED_DESC), diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java index 3fe116a11a77..2c8205085d1e 100644 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java +++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java @@ -116,6 +116,11 @@ public long getStoreFileSize() { return 0; } + @Override + public float getCurrentRegionCacheRatio() { + return 0; + } + @Override public long getReadRequestCount() { return 0; diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon index 0df4d2763b0b..e77318437e04 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon @@ -38,6 +38,7 @@ org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad; org.apache.hadoop.hbase.client.RegionReplicaUtil; org.apache.hadoop.hbase.regionserver.MetricsRegionWrapper; + org.apache.hadoop.util.StringUtils; org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix; <%if (onlineRegions != null && onlineRegions.size() > 0) %> @@ -172,6 +173,7 @@ Bloom Size Data Locality Len Of Biggest Cell + % Cached @@ -237,6 +239,7 @@ <% bloomSizeStr %> <% load.getDataLocality() %> <% String.format("%,1d", lenOfBiggestCellInRegion) %> + <% StringUtils.formatPercent(load.getCurrentRegionCachedRatio(), 2) %> diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java index ed9e7dee5c11..5b11035ebe73 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java @@ -154,21 +154,16 @@ default boolean isMetaBlock(BlockType blockType) { /** * Notifies the cache implementation that the given file has been fully cached (all its blocks * made into the cache). - * @param fileName the file that has been completely cached. + * @param fileName the file that has been completely cached. + * @param totalBlockCount the total of blocks cached for this file. + * @param dataBlockCount number of DATA block type cached. + * @param size the size, in bytes, cached. */ default void notifyFileCachingCompleted(Path fileName, int totalBlockCount, int dataBlockCount, long size) { // noop } - /** - * Notifies the cache implementation that the given file had a block evicted - * @param fileName the file had a block evicted. - */ - default void notifyFileBlockEvicted(String fileName) { - // noop - } - /** * Checks whether there's enough space left in the cache to accommodate the passed block. This * method may not be overridden by all implementing classes. In such cases, the returned Optional @@ -230,4 +225,14 @@ default Optional getBlockSize(BlockCacheKey key) { default Optional>> getFullyCachedFiles() { return Optional.empty(); } + + /** + * Returns an Optional containing a map of regions and the percentage of how much of it has been + * cached so far. + * @return empty optional if this method is not supported, otherwise the returned optional + * contains a map of current regions caching percentage. + */ + default Optional> getRegionCachedInfo() { + return Optional.empty(); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java index 1cfdc5868be7..bf22d38e373b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.io.hfile; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.util.ClassSize; import org.apache.yetus.audience.InterfaceAudience; @@ -31,6 +32,7 @@ public class BlockCacheKey implements HeapSize, java.io.Serializable { private final long offset; private BlockType blockType; private final boolean isPrimaryReplicaBlock; + private Path filePath; /** * Construct a new BlockCacheKey @@ -49,6 +51,14 @@ public BlockCacheKey(String hfileName, long offset, boolean isPrimaryReplica, this.blockType = blockType; } + public BlockCacheKey(Path hfilePath, long offset, boolean isPrimaryReplica, BlockType blockType) { + this.filePath = hfilePath; + this.isPrimaryReplicaBlock = isPrimaryReplica; + this.hfileName = hfilePath.getName(); + this.offset = offset; + this.blockType = blockType; + } + @Override public int hashCode() { return hfileName.hashCode() * 127 + (int) (offset ^ (offset >>> 32)); @@ -102,4 +112,9 @@ public BlockType getBlockType() { public void setBlockType(BlockType blockType) { this.blockType = blockType; } + + public Path getFilePath() { + return filePath; + } + } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java index 2af219475980..3a2d4ccc25df 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java @@ -429,6 +429,11 @@ public Optional>> getFullyCachedFiles() { return this.l2Cache.getFullyCachedFiles(); } + @Override + public Optional> getRegionCachedInfo() { + return l2Cache.getRegionCachedInfo(); + } + @Override public void setMaxSize(long size) { this.l1Cache.setMaxSize(size); @@ -457,12 +462,6 @@ public void notifyFileCachingCompleted(Path fileName, int totalBlockCount, int d } - @Override - public void notifyFileBlockEvicted(String fileName) { - l1Cache.notifyFileBlockEvicted(fileName); - l1Cache.notifyFileBlockEvicted(fileName); - } - @Override public Optional blockFitsIntoTheCache(HFileBlock block) { if (isMetaBlock(block.getBlockType())) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java index caf875a89d6f..ace662414f40 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java @@ -1290,7 +1290,7 @@ public HFileBlock readBlock(long dataBlockOffset, long onDiskBlockSize, final bo // from doing). BlockCacheKey cacheKey = - new BlockCacheKey(name, dataBlockOffset, this.isPrimaryReplicaReader(), expectedBlockType); + new BlockCacheKey(path, dataBlockOffset, this.isPrimaryReplicaReader(), expectedBlockType); Attributes attributes = Attributes.of(BLOCK_CACHE_KEY_KEY, cacheKey.toString()); boolean cacheable = cacheBlock && cacheIfCompactionsOff(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index c8111522c659..643f3d8d93d5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -160,7 +160,7 @@ public class BucketCache implements BlockCache, HeapSize { * Map of region -> total size of the region prefetched on this region server. This is the total * size of hFiles for this region prefetched on this region server */ - final Map regionCachedSizeMap = new ConcurrentHashMap<>(); + final Map regionCachedSize = new ConcurrentHashMap<>(); private BucketCachePersister cachePersister; @@ -334,7 +334,7 @@ public BucketCache(String ioEngineName, long capacity, int blockSize, int[] buck fullyCachedFiles.clear(); backingMapValidated.set(true); bucketAllocator = new BucketAllocator(capacity, bucketSizes); - regionCachedSizeMap.clear(); + regionCachedSize.clear(); } } else { bucketAllocator = new BucketAllocator(capacity, bucketSizes); @@ -606,7 +606,7 @@ public Cacheable getBlock(BlockCacheKey key, boolean caching, boolean repeat, // the cache map state might differ from the actual cache. If we reach this block, // we should remove the cache key entry from the backing map backingMap.remove(key); - removeFileFromPrefetch(key.getHfileName()); + fileNotFullyCached(key.getHfileName()); LOG.debug("Failed to fetch block for cache key: {}.", key, hioex); } catch (IOException ioex) { LOG.error("Failed reading block " + key + " from bucket cache", ioex); @@ -631,7 +631,7 @@ void blockEvicted(BlockCacheKey cacheKey, BucketEntry bucketEntry, boolean decre if (decrementBlockNumber) { this.blockNumber.decrement(); if (ioEngine.isPersistent()) { - removeFileFromPrefetch(cacheKey.getHfileName()); + fileNotFullyCached(cacheKey.getHfileName()); } } if (evictedByEvictionProcess) { @@ -642,6 +642,42 @@ void blockEvicted(BlockCacheKey cacheKey, BucketEntry bucketEntry, boolean decre } } + private void fileNotFullyCached(String hfileName) { + // Update the regionPrefetchedSizeMap before removing the file from prefetchCompleted + if (fullyCachedFiles.containsKey(hfileName)) { + Pair regionEntry = fullyCachedFiles.get(hfileName); + String regionEncodedName = regionEntry.getFirst(); + long filePrefetchSize = regionEntry.getSecond(); + LOG.debug("Removing file {} for region {}", hfileName, regionEncodedName); + regionCachedSize.computeIfPresent(regionEncodedName, (rn, pf) -> pf - filePrefetchSize); + // If all the blocks for a region are evicted from the cache, remove the entry for that region + if ( + regionCachedSize.containsKey(regionEncodedName) + && regionCachedSize.get(regionEncodedName) == 0 + ) { + regionCachedSize.remove(regionEncodedName); + } + } + fullyCachedFiles.remove(hfileName); + } + + public void fileCacheCompleted(Path filePath, long size) { + Pair pair = new Pair<>(); + // sets the region name + String regionName = filePath.getParent().getParent().getName(); + pair.setFirst(regionName); + pair.setSecond(size); + fullyCachedFiles.put(filePath.getName(), pair); + } + + private void updateRegionCachedSize(Path filePath, long cachedSize) { + if (filePath != null) { + String regionName = filePath.getParent().getParent().getName(); + regionCachedSize.merge(regionName, cachedSize, + (previousSize, newBlockSize) -> previousSize + newBlockSize); + } + } + /** * Free the {{@link BucketEntry} actually,which could only be invoked when the * {@link BucketEntry#refCnt} becoming 0. @@ -1074,6 +1110,7 @@ public void run() { protected void putIntoBackingMap(BlockCacheKey key, BucketEntry bucketEntry) { BucketEntry previousEntry = backingMap.put(key, bucketEntry); blocksByHFile.add(key); + updateRegionCachedSize(key.getFilePath(), bucketEntry.getLength()); if (previousEntry != null && previousEntry != bucketEntry) { previousEntry.withWriteLock(offsetLock, () -> { blockEvicted(key, previousEntry, false, false); @@ -1295,8 +1332,9 @@ public boolean isCachePersistent() { return ioEngine.isPersistent() && persistencePath != null; } - public Map getRegionCachedInfo() { - return Collections.unmodifiableMap(regionCachedSizeMap); + @Override + public Optional> getRegionCachedInfo() { + return Optional.of(Collections.unmodifiableMap(regionCachedSize)); } /** @@ -1333,17 +1371,17 @@ private void retrieveFromFile(int[] bucketSizes) throws IOException { } private void updateRegionSizeMapWhileRetrievingFromFile() { - // Update the regionCachedSizeMap with the region size while restarting the region server + // Update the regionCachedSize with the region size while restarting the region server if (LOG.isDebugEnabled()) { LOG.debug("Updating region size map after retrieving cached file list"); dumpPrefetchList(); } - regionCachedSizeMap.clear(); + regionCachedSize.clear(); fullyCachedFiles.forEach((hFileName, hFileSize) -> { // Get the region name for each file String regionEncodedName = hFileSize.getFirst(); long cachedFileSize = hFileSize.getSecond(); - regionCachedSizeMap.merge(regionEncodedName, cachedFileSize, + regionCachedSize.merge(regionEncodedName, cachedFileSize, (oldpf, fileSize) -> oldpf + fileSize); }); } @@ -1448,7 +1486,7 @@ private void parsePB(BucketCacheProtos.BucketCacheEntry proto) throws IOExceptio } catch (IOException e1) { LOG.debug("Check for key {} failed. Evicting.", keyEntry.getKey()); evictBlock(keyEntry.getKey()); - removeFileFromPrefetch(keyEntry.getKey().getHfileName()); + fileNotFullyCached(keyEntry.getKey().getHfileName()); } } backingMapValidated.set(true); @@ -1505,7 +1543,7 @@ private void disableCache() { this.backingMap.clear(); this.blocksByHFile.clear(); this.fullyCachedFiles.clear(); - this.regionCachedSizeMap.clear(); + this.regionCachedSize.clear(); } } @@ -1605,7 +1643,7 @@ protected String getAlgorithm() { */ @Override public int evictBlocksByHfileName(String hfileName) { - removeFileFromPrefetch(hfileName); + fileNotFullyCached(hfileName); Set keySet = getAllCacheKeysForFile(hfileName); int numEvicted = 0; for (BlockCacheKey key : keySet) { @@ -2089,11 +2127,6 @@ public void notifyFileCachingCompleted(Path fileName, int totalBlockCount, int d } } - @Override - public void notifyFileBlockEvicted(String fileName) { - removeFileFromPrefetch(fileName); - } - @Override public Optional blockFitsIntoTheCache(HFileBlock block) { long currentUsed = bucketAllocator.getUsedSize(); @@ -2122,34 +2155,4 @@ public Optional getBlockSize(BlockCacheKey key) { } } - - private void removeFileFromPrefetch(String hfileName) { - // Update the regionPrefetchedSizeMap before removing the file from prefetchCompleted - if (fullyCachedFiles.containsKey(hfileName)) { - Pair regionEntry = fullyCachedFiles.get(hfileName); - String regionEncodedName = regionEntry.getFirst(); - long filePrefetchSize = regionEntry.getSecond(); - LOG.debug("Removing file {} for region {}", hfileName, regionEncodedName); - regionCachedSizeMap.computeIfPresent(regionEncodedName, (rn, pf) -> pf - filePrefetchSize); - // If all the blocks for a region are evicted from the cache, remove the entry for that region - if ( - regionCachedSizeMap.containsKey(regionEncodedName) - && regionCachedSizeMap.get(regionEncodedName) == 0 - ) { - regionCachedSizeMap.remove(regionEncodedName); - } - } - fullyCachedFiles.remove(hfileName); - } - - public void fileCacheCompleted(Path filePath, long size) { - Pair pair = new Pair<>(); - // sets the region name - String regionName = filePath.getParent().getParent().getName(); - pair.setFirst(regionName); - pair.setSecond(size); - fullyCachedFiles.put(filePath.getName(), pair); - regionCachedSizeMap.merge(regionName, size, (oldpf, fileSize) -> oldpf + fileSize); - } - } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 2b219898869a..c50d964ca503 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -1517,11 +1517,15 @@ private ClusterStatusProtos.ServerLoad buildServerLoad(long reportStartTime, lon serverLoad.addCoprocessors(coprocessorBuilder.setName(coprocessor).build()); } } - computeIfPersistentBucketCache(bc -> { - bc.getRegionCachedInfo().forEach((regionName, prefetchSize) -> { - serverLoad.putRegionCachedInfo(regionName, roundSize(prefetchSize, unitMB)); + + getBlockCache().ifPresent(cache -> { + cache.getRegionCachedInfo().ifPresent(regionCachedInfo -> { + regionCachedInfo.forEach((regionName, prefetchSize) -> { + serverLoad.putRegionCachedInfo(regionName, roundSize(prefetchSize, unitMB)); + }); }); }); + serverLoad.setReportStartTime(reportStartTime); serverLoad.setReportEndTime(reportEndTime); if (this.infoServer != null) { @@ -1904,13 +1908,14 @@ RegionLoad createRegionLoad(final HRegion r, RegionLoad.Builder regionLoadBldr, int totalStaticBloomSizeKB = roundSize(totalStaticBloomSize, unitKB); int regionSizeMB = roundSize(totalRegionSize, unitMB); final MutableFloat currentRegionCachedRatio = new MutableFloat(0.0f); - computeIfPersistentBucketCache(bc -> { - if (bc.getRegionCachedInfo().containsKey(regionEncodedName)) { - currentRegionCachedRatio.setValue(regionSizeMB == 0 - ? 0.0f - : (float) roundSize(bc.getRegionCachedInfo().get(regionEncodedName), unitMB) - / regionSizeMB); - } + getBlockCache().ifPresent(bc -> { + bc.getRegionCachedInfo().ifPresent(regionCachedInfo -> { + if (regionCachedInfo.containsKey(regionEncodedName)) { + currentRegionCachedRatio.setValue(regionSizeMB == 0 + ? 0.0f + : (float) roundSize(regionCachedInfo.get(regionEncodedName), unitMB) / regionSizeMB); + } + }); }); HDFSBlocksDistribution hdfsBd = r.getHDFSBlocksDistribution(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java index 1402512fdc32..bce961e8f279 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java @@ -26,6 +26,7 @@ import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; +import org.apache.commons.lang3.mutable.MutableLong; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.TableDescriptor; @@ -62,6 +63,8 @@ public class MetricsRegionWrapperImpl implements MetricsRegionWrapper, Closeable private ScheduledFuture regionMetricsUpdateTask; + private float currentRegionCacheRatio; + public MetricsRegionWrapperImpl(HRegion region) { this.region = region; this.executor = CompatibilitySingletonFactory.getInstance(MetricsExecutor.class).getExecutor(); @@ -121,6 +124,10 @@ public long getStoreFileSize() { return storeFileSize; } + public float getCurrentRegionCacheRatio() { + return currentRegionCacheRatio; + } + @Override public long getStoreRefCount() { return storeRefCount; @@ -310,7 +317,15 @@ public void run() { readsOnlyFromMemstore.put(store.getColumnFamilyName(), tempVal); } } - + MutableLong regionCachedAmount = new MutableLong(0); + region.getBlockCache().getRegionCachedInfo().ifPresent(regionCacheRatio -> regionCachedAmount + .addAndGet(regionCacheRatio.getOrDefault(region.getRegionInfo().getEncodedName(), 0L))); + if (tempStoreFileSize > 0) { + LOG.debug("Region {}, had cached {} bytes from a total of {}", + region.getRegionInfo().getEncodedName(), regionCachedAmount.getValue(), + tempStoreFileSize); + currentRegionCacheRatio = regionCachedAmount.floatValue() / tempStoreFileSize; + } numStoreFiles = tempNumStoreFiles; storeRefCount = tempStoreRefCount; maxCompactedStoreFileRefCount = tempMaxCompactedStoreFileRefCount; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java index addea8297dfd..db8f2213d0c0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetchWithBucketCache.java @@ -32,6 +32,7 @@ import java.util.concurrent.ThreadLocalRandom; import java.util.function.BiConsumer; import java.util.function.BiFunction; +import org.apache.commons.lang3.mutable.MutableLong; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -185,6 +186,30 @@ public void testPrefetchDoesntInterruptInMemoryOnCapacity() throws Exception { assertTrue(bc.getStats().getEvictedCount() > 200); } + @Test + public void testPrefetchMetricProgress() throws Exception { + conf.setLong(BUCKET_CACHE_SIZE_KEY, 200); + blockCache = BlockCacheFactory.createBlockCache(conf); + cacheConf = new CacheConfig(conf, blockCache); + Path storeFile = writeStoreFile("testPrefetchMetricsProgress", 100); + // Prefetches the file blocks + LOG.debug("First read should prefetch the blocks."); + readStoreFile(storeFile); + String regionName = storeFile.getParent().getParent().getName(); + BucketCache bc = BucketCache.getBucketCacheFromCacheConfig(cacheConf).get(); + MutableLong regionCachedSize = new MutableLong(0); + // Our file should have 6 DATA blocks. We should wait for all of them to be cached + long waitedTime = Waiter.waitFor(conf, 300, () -> { + if (bc.getBackingMap().size() > 0) { + long currentSize = bc.getRegionCachedInfo().get().get(regionName); + assertTrue(regionCachedSize.getValue() <= currentSize); + LOG.debug("Logging progress of region caching: {}", currentSize); + regionCachedSize.setValue(currentSize); + } + return bc.getBackingMap().size() == 6; + }); + } + private void readStoreFile(Path storeFilePath) throws Exception { readStoreFile(storeFilePath, (r, o) -> { HFileBlock block = null; @@ -216,6 +241,7 @@ private void readStoreFile(Path storeFilePath, Thread.sleep(1000); } long offset = 0; + long sizeForDataBlocks = 0; while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) { HFileBlock block = readFunction.apply(reader, offset); BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(), offset); @@ -276,5 +302,4 @@ public static KeyValue.Type generateKeyType(Random rand) { return keyType; } } - } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperStub.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperStub.java index a99212cb9de4..0995b0faee05 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperStub.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperStub.java @@ -82,6 +82,11 @@ public long getStoreFileSize() { return 104; } + @Override + public float getCurrentRegionCacheRatio() { + return 0; + } + @Override public long getReadRequestCount() { return 105;