Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

HBASE-28657 Backport HBASE-28246 Expose region cached size over JMX m… #5983

Merged
merged 1 commit into from
Jun 17, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -632,4 +632,6 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo
String SCANNER_LEASE_EXPIRED_COUNT = "scannerLeaseExpiredCount";
String SCANNER_LEASE_EXPIRED_COUNT_DESC =
"Count of scanners which were expired due to scanner lease timeout";
String CURRENT_REGION_CACHE_RATIO = "currentRegionCacheRatio";
String CURRENT_REGION_CACHE_RATIO_DESC = "The percentage of caching completed for this region.";
}
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,11 @@ public interface MetricsRegionWrapper {
*/
long getStoreFileSize();

/**
* Gets the current cache % ratio for this region.
*/
float getCurrentRegionCacheRatio();

/**
* Get the total number of read requests that have been issued against this region
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -233,6 +233,10 @@ void snapshot(MetricsRecordBuilder mrb, boolean ignored) {
this.regionWrapper.getNumReferenceFiles());
mrb.addGauge(Interns.info(regionNamePrefix + MetricsRegionServerSource.STOREFILE_SIZE,
MetricsRegionServerSource.STOREFILE_SIZE_DESC), this.regionWrapper.getStoreFileSize());
mrb.addGauge(
Interns.info(regionNamePrefix + MetricsRegionServerSource.CURRENT_REGION_CACHE_RATIO,
MetricsRegionServerSource.CURRENT_REGION_CACHE_RATIO_DESC),
this.regionWrapper.getCurrentRegionCacheRatio());
mrb.addCounter(
Interns.info(regionNamePrefix + MetricsRegionSource.COMPACTIONS_COMPLETED_COUNT,
MetricsRegionSource.COMPACTIONS_COMPLETED_DESC),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,11 @@ public long getStoreFileSize() {
return 0;
}

@Override
public float getCurrentRegionCacheRatio() {
return 0;
}

@Override
public long getReadRequestCount() {
return 0;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@
org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad;
org.apache.hadoop.hbase.client.RegionReplicaUtil;
org.apache.hadoop.hbase.regionserver.MetricsRegionWrapper;
org.apache.hadoop.util.StringUtils;
org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
</%import>
<%if (onlineRegions != null && onlineRegions.size() > 0) %>
Expand Down Expand Up @@ -172,6 +173,7 @@
<th>Bloom Size</th>
<th>Data Locality</th>
<th>Len Of Biggest Cell</th>
<th>% Cached</th>
</tr>
</thead>

Expand Down Expand Up @@ -237,6 +239,7 @@
<td><% bloomSizeStr %></td>
<td><% load.getDataLocality() %></td>
<td><% String.format("%,1d", lenOfBiggestCellInRegion) %></td>
<td><% StringUtils.formatPercent(load.getCurrentRegionCachedRatio(), 2) %></td>
</%if>
</tr>
</%for>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -154,21 +154,16 @@ default boolean isMetaBlock(BlockType blockType) {
/**
* Notifies the cache implementation that the given file has been fully cached (all its blocks
* made into the cache).
* @param fileName the file that has been completely cached.
* @param fileName the file that has been completely cached.
* @param totalBlockCount the total of blocks cached for this file.
* @param dataBlockCount number of DATA block type cached.
* @param size the size, in bytes, cached.
*/
default void notifyFileCachingCompleted(Path fileName, int totalBlockCount, int dataBlockCount,
long size) {
// noop
}

/**
* Notifies the cache implementation that the given file had a block evicted
* @param fileName the file had a block evicted.
*/
default void notifyFileBlockEvicted(String fileName) {
// noop
}

/**
* Checks whether there's enough space left in the cache to accommodate the passed block. This
* method may not be overridden by all implementing classes. In such cases, the returned Optional
Expand Down Expand Up @@ -230,4 +225,14 @@ default Optional<Integer> getBlockSize(BlockCacheKey key) {
default Optional<Map<String, Pair<String, Long>>> getFullyCachedFiles() {
return Optional.empty();
}

/**
* Returns an Optional containing a map of regions and the percentage of how much of it has been
* cached so far.
* @return empty optional if this method is not supported, otherwise the returned optional
* contains a map of current regions caching percentage.
*/
default Optional<Map<String, Long>> getRegionCachedInfo() {
return Optional.empty();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hbase.io.hfile;

import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.io.HeapSize;
import org.apache.hadoop.hbase.util.ClassSize;
import org.apache.yetus.audience.InterfaceAudience;
Expand All @@ -31,6 +32,7 @@ public class BlockCacheKey implements HeapSize, java.io.Serializable {
private final long offset;
private BlockType blockType;
private final boolean isPrimaryReplicaBlock;
private Path filePath;

/**
* Construct a new BlockCacheKey
Expand All @@ -49,6 +51,14 @@ public BlockCacheKey(String hfileName, long offset, boolean isPrimaryReplica,
this.blockType = blockType;
}

public BlockCacheKey(Path hfilePath, long offset, boolean isPrimaryReplica, BlockType blockType) {
this.filePath = hfilePath;
this.isPrimaryReplicaBlock = isPrimaryReplica;
this.hfileName = hfilePath.getName();
this.offset = offset;
this.blockType = blockType;
}

@Override
public int hashCode() {
return hfileName.hashCode() * 127 + (int) (offset ^ (offset >>> 32));
Expand Down Expand Up @@ -102,4 +112,9 @@ public BlockType getBlockType() {
public void setBlockType(BlockType blockType) {
this.blockType = blockType;
}

public Path getFilePath() {
return filePath;
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -429,6 +429,11 @@ public Optional<Map<String, Pair<String, Long>>> getFullyCachedFiles() {
return this.l2Cache.getFullyCachedFiles();
}

@Override
public Optional<Map<String, Long>> getRegionCachedInfo() {
return l2Cache.getRegionCachedInfo();
}

@Override
public void setMaxSize(long size) {
this.l1Cache.setMaxSize(size);
Expand Down Expand Up @@ -457,12 +462,6 @@ public void notifyFileCachingCompleted(Path fileName, int totalBlockCount, int d

}

@Override
public void notifyFileBlockEvicted(String fileName) {
l1Cache.notifyFileBlockEvicted(fileName);
l1Cache.notifyFileBlockEvicted(fileName);
}

@Override
public Optional<Boolean> blockFitsIntoTheCache(HFileBlock block) {
if (isMetaBlock(block.getBlockType())) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1290,7 +1290,7 @@ public HFileBlock readBlock(long dataBlockOffset, long onDiskBlockSize, final bo
// from doing).

BlockCacheKey cacheKey =
new BlockCacheKey(name, dataBlockOffset, this.isPrimaryReplicaReader(), expectedBlockType);
new BlockCacheKey(path, dataBlockOffset, this.isPrimaryReplicaReader(), expectedBlockType);
Attributes attributes = Attributes.of(BLOCK_CACHE_KEY_KEY, cacheKey.toString());

boolean cacheable = cacheBlock && cacheIfCompactionsOff();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ public class BucketCache implements BlockCache, HeapSize {
* Map of region -> total size of the region prefetched on this region server. This is the total
* size of hFiles for this region prefetched on this region server
*/
final Map<String, Long> regionCachedSizeMap = new ConcurrentHashMap<>();
final Map<String, Long> regionCachedSize = new ConcurrentHashMap<>();

private BucketCachePersister cachePersister;

Expand Down Expand Up @@ -334,7 +334,7 @@ public BucketCache(String ioEngineName, long capacity, int blockSize, int[] buck
fullyCachedFiles.clear();
backingMapValidated.set(true);
bucketAllocator = new BucketAllocator(capacity, bucketSizes);
regionCachedSizeMap.clear();
regionCachedSize.clear();
}
} else {
bucketAllocator = new BucketAllocator(capacity, bucketSizes);
Expand Down Expand Up @@ -606,7 +606,7 @@ public Cacheable getBlock(BlockCacheKey key, boolean caching, boolean repeat,
// the cache map state might differ from the actual cache. If we reach this block,
// we should remove the cache key entry from the backing map
backingMap.remove(key);
removeFileFromPrefetch(key.getHfileName());
fileNotFullyCached(key.getHfileName());
LOG.debug("Failed to fetch block for cache key: {}.", key, hioex);
} catch (IOException ioex) {
LOG.error("Failed reading block " + key + " from bucket cache", ioex);
Expand All @@ -631,7 +631,7 @@ void blockEvicted(BlockCacheKey cacheKey, BucketEntry bucketEntry, boolean decre
if (decrementBlockNumber) {
this.blockNumber.decrement();
if (ioEngine.isPersistent()) {
removeFileFromPrefetch(cacheKey.getHfileName());
fileNotFullyCached(cacheKey.getHfileName());
}
}
if (evictedByEvictionProcess) {
Expand All @@ -642,6 +642,42 @@ void blockEvicted(BlockCacheKey cacheKey, BucketEntry bucketEntry, boolean decre
}
}

private void fileNotFullyCached(String hfileName) {
// Update the regionPrefetchedSizeMap before removing the file from prefetchCompleted
if (fullyCachedFiles.containsKey(hfileName)) {
Pair<String, Long> regionEntry = fullyCachedFiles.get(hfileName);
String regionEncodedName = regionEntry.getFirst();
long filePrefetchSize = regionEntry.getSecond();
LOG.debug("Removing file {} for region {}", hfileName, regionEncodedName);
regionCachedSize.computeIfPresent(regionEncodedName, (rn, pf) -> pf - filePrefetchSize);
// If all the blocks for a region are evicted from the cache, remove the entry for that region
if (
regionCachedSize.containsKey(regionEncodedName)
&& regionCachedSize.get(regionEncodedName) == 0
) {
regionCachedSize.remove(regionEncodedName);
}
}
fullyCachedFiles.remove(hfileName);
}

public void fileCacheCompleted(Path filePath, long size) {
Pair<String, Long> pair = new Pair<>();
// sets the region name
String regionName = filePath.getParent().getParent().getName();
pair.setFirst(regionName);
pair.setSecond(size);
fullyCachedFiles.put(filePath.getName(), pair);
}

private void updateRegionCachedSize(Path filePath, long cachedSize) {
if (filePath != null) {
String regionName = filePath.getParent().getParent().getName();
regionCachedSize.merge(regionName, cachedSize,
(previousSize, newBlockSize) -> previousSize + newBlockSize);
}
}

/**
* Free the {{@link BucketEntry} actually,which could only be invoked when the
* {@link BucketEntry#refCnt} becoming 0.
Expand Down Expand Up @@ -1074,6 +1110,7 @@ public void run() {
protected void putIntoBackingMap(BlockCacheKey key, BucketEntry bucketEntry) {
BucketEntry previousEntry = backingMap.put(key, bucketEntry);
blocksByHFile.add(key);
updateRegionCachedSize(key.getFilePath(), bucketEntry.getLength());
if (previousEntry != null && previousEntry != bucketEntry) {
previousEntry.withWriteLock(offsetLock, () -> {
blockEvicted(key, previousEntry, false, false);
Expand Down Expand Up @@ -1295,8 +1332,9 @@ public boolean isCachePersistent() {
return ioEngine.isPersistent() && persistencePath != null;
}

public Map<String, Long> getRegionCachedInfo() {
return Collections.unmodifiableMap(regionCachedSizeMap);
@Override
public Optional<Map<String, Long>> getRegionCachedInfo() {
return Optional.of(Collections.unmodifiableMap(regionCachedSize));
}

/**
Expand Down Expand Up @@ -1333,17 +1371,17 @@ private void retrieveFromFile(int[] bucketSizes) throws IOException {
}

private void updateRegionSizeMapWhileRetrievingFromFile() {
// Update the regionCachedSizeMap with the region size while restarting the region server
// Update the regionCachedSize with the region size while restarting the region server
if (LOG.isDebugEnabled()) {
LOG.debug("Updating region size map after retrieving cached file list");
dumpPrefetchList();
}
regionCachedSizeMap.clear();
regionCachedSize.clear();
fullyCachedFiles.forEach((hFileName, hFileSize) -> {
// Get the region name for each file
String regionEncodedName = hFileSize.getFirst();
long cachedFileSize = hFileSize.getSecond();
regionCachedSizeMap.merge(regionEncodedName, cachedFileSize,
regionCachedSize.merge(regionEncodedName, cachedFileSize,
(oldpf, fileSize) -> oldpf + fileSize);
});
}
Expand Down Expand Up @@ -1448,7 +1486,7 @@ private void parsePB(BucketCacheProtos.BucketCacheEntry proto) throws IOExceptio
} catch (IOException e1) {
LOG.debug("Check for key {} failed. Evicting.", keyEntry.getKey());
evictBlock(keyEntry.getKey());
removeFileFromPrefetch(keyEntry.getKey().getHfileName());
fileNotFullyCached(keyEntry.getKey().getHfileName());
}
}
backingMapValidated.set(true);
Expand Down Expand Up @@ -1505,7 +1543,7 @@ private void disableCache() {
this.backingMap.clear();
this.blocksByHFile.clear();
this.fullyCachedFiles.clear();
this.regionCachedSizeMap.clear();
this.regionCachedSize.clear();
}
}

Expand Down Expand Up @@ -1605,7 +1643,7 @@ protected String getAlgorithm() {
*/
@Override
public int evictBlocksByHfileName(String hfileName) {
removeFileFromPrefetch(hfileName);
fileNotFullyCached(hfileName);
Set<BlockCacheKey> keySet = getAllCacheKeysForFile(hfileName);
int numEvicted = 0;
for (BlockCacheKey key : keySet) {
Expand Down Expand Up @@ -2089,11 +2127,6 @@ public void notifyFileCachingCompleted(Path fileName, int totalBlockCount, int d
}
}

@Override
public void notifyFileBlockEvicted(String fileName) {
removeFileFromPrefetch(fileName);
}

@Override
public Optional<Boolean> blockFitsIntoTheCache(HFileBlock block) {
long currentUsed = bucketAllocator.getUsedSize();
Expand Down Expand Up @@ -2122,34 +2155,4 @@ public Optional<Integer> getBlockSize(BlockCacheKey key) {
}

}

private void removeFileFromPrefetch(String hfileName) {
// Update the regionPrefetchedSizeMap before removing the file from prefetchCompleted
if (fullyCachedFiles.containsKey(hfileName)) {
Pair<String, Long> regionEntry = fullyCachedFiles.get(hfileName);
String regionEncodedName = regionEntry.getFirst();
long filePrefetchSize = regionEntry.getSecond();
LOG.debug("Removing file {} for region {}", hfileName, regionEncodedName);
regionCachedSizeMap.computeIfPresent(regionEncodedName, (rn, pf) -> pf - filePrefetchSize);
// If all the blocks for a region are evicted from the cache, remove the entry for that region
if (
regionCachedSizeMap.containsKey(regionEncodedName)
&& regionCachedSizeMap.get(regionEncodedName) == 0
) {
regionCachedSizeMap.remove(regionEncodedName);
}
}
fullyCachedFiles.remove(hfileName);
}

public void fileCacheCompleted(Path filePath, long size) {
Pair<String, Long> pair = new Pair<>();
// sets the region name
String regionName = filePath.getParent().getParent().getName();
pair.setFirst(regionName);
pair.setSecond(size);
fullyCachedFiles.put(filePath.getName(), pair);
regionCachedSizeMap.merge(regionName, size, (oldpf, fileSize) -> oldpf + fileSize);
}

}
Loading