From fe6bc46ee5ee154fdc7978bf023db542f618683e Mon Sep 17 00:00:00 2001 From: Janardhan Hungund Date: Mon, 22 Apr 2024 14:59:32 +0530 Subject: [PATCH 1/2] HBASE-28535: Add a region-server wide key to enable data-tiering. Data-tiering feature should not be enabled by default, since this feature has specific use-cases. Hence, introduce a system-wide configuration to enable the feature. Avoid the code data-tiering code paths when this system-wide configuration is not enabled. Change-Id: I308119884c42173cfef3b23c360a842c7f516977 --- .../hbase/io/hfile/HFilePreadReader.java | 10 +- .../hbase/io/hfile/bucket/BucketCache.java | 21 +- .../regionserver/DataTieringManager.java | 27 +- .../hbase/regionserver/HRegionServer.java | 4 +- .../regionserver/TestDataTieringManager.java | 235 ++++++++++-------- 5 files changed, 171 insertions(+), 126 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java index 7631dc78c3a3..a44af9cc2044 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java @@ -23,6 +23,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; +import org.apache.hadoop.hbase.regionserver.DataTieringManager; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -40,8 +41,13 @@ public HFilePreadReader(ReaderContext context, HFileInfo fileInfo, CacheConfig c final MutableBoolean shouldCache = new MutableBoolean(true); - // Initialize HFileInfo object with metadata for caching decisions - fileInfo.initMetaAndIndex(this); + DataTieringManager dataTieringManager = DataTieringManager.getInstance(); + if (dataTieringManager != null) { + // Initialize HFileInfo object with metadata for caching decisions. + // Initialize the metadata only if the data-tiering is enabled. + // If not, the metadata will be initialized later. + fileInfo.initMetaAndIndex(this); + } cacheConf.getBlockCache().ifPresent(cache -> { Optional result = cache.shouldCacheFile(fileInfo, conf); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index 5a9c7795a334..00b085db11ce 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -985,11 +985,10 @@ void freeSpace(final String why) { // Check the list of files to determine the cold files which can be readily evicted. Map coldFiles = null; - try { - DataTieringManager dataTieringManager = DataTieringManager.getInstance(); + + DataTieringManager dataTieringManager = DataTieringManager.getInstance(); + if (dataTieringManager != null) { coldFiles = dataTieringManager.getColdFilesList(); - } catch (IllegalStateException e) { - LOG.warn("Data Tiering Manager is not set. Ignore time-based block evictions."); } // Scan entire map putting bucket entry into appropriate bucket entry // group @@ -2195,17 +2194,19 @@ public Optional blockFitsIntoTheCache(HFileBlock block) { @Override public Optional shouldCacheFile(HFileInfo hFileInfo, Configuration conf) { String fileName = hFileInfo.getHFileContext().getHFileName(); - try { - DataTieringManager dataTieringManager = DataTieringManager.getInstance(); + DataTieringManager dataTieringManager = DataTieringManager.getInstance(); + if (dataTieringManager != null) { if (!dataTieringManager.isHotData(hFileInfo, conf)) { LOG.debug("Data tiering is enabled for file: '{}' and it is not hot data", fileName); return Optional.of(false); + } else { + LOG.debug("Data tiering is enabled for file: '{}' and it is hot data", fileName); } - } catch (IllegalStateException e) { - LOG.error("Error while getting DataTieringManager instance: {}", e.getMessage()); + } else { + LOG.debug("Data tiering feature is not enabled. " + + " The file: '{}' will be loaded if not already loaded", fileName); } - - // if we don't have the file in fullyCachedFiles, we should cache it + // if we don't have the file in fullyCachedFiles, we should cache it. return Optional.of(!fullyCachedFiles.containsKey(fileName)); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java index 6c699e77c2f5..084e2b2489e0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java @@ -45,6 +45,8 @@ @InterfaceAudience.Private public class DataTieringManager { private static final Logger LOG = LoggerFactory.getLogger(DataTieringManager.class); + public static final String DATA_TIERING_ENABLED_KEY = "hbase.hstore.datatiering.enable"; + public static final boolean DEFAULT_DATA_TIERING_ENABLED = false; // disabled by default public static final String DATATIERING_KEY = "hbase.hstore.datatiering.type"; public static final String DATATIERING_HOT_DATA_AGE_KEY = "hbase.hstore.datatiering.hot.age.millis"; @@ -61,25 +63,25 @@ private DataTieringManager(Map onlineRegions) { * Initializes the DataTieringManager instance with the provided map of online regions. * @param onlineRegions A map containing online regions. */ - public static synchronized void instantiate(Map onlineRegions) { - if (instance == null) { - instance = new DataTieringManager(onlineRegions); - LOG.info("DataTieringManager instantiated successfully."); + public static synchronized void instantiate(Configuration conf, + Map onlineRegions) { + if (isDataTieringFeatureEnabled(conf)) { + if (instance == null) { + instance = new DataTieringManager(onlineRegions); + LOG.info("DataTieringManager instantiated successfully."); + } else { + LOG.warn("DataTieringManager is already instantiated."); + } } else { - LOG.warn("DataTieringManager is already instantiated."); + LOG.info("Data-Tiering feature is not enabled."); } } /** * Retrieves the instance of DataTieringManager. * @return The instance of DataTieringManager. - * @throws IllegalStateException if DataTieringManager has not been instantiated. */ public static synchronized DataTieringManager getInstance() { - if (instance == null) { - throw new IllegalStateException( - "DataTieringManager has not been instantiated. Call instantiate() first."); - } return instance; } @@ -308,4 +310,9 @@ public Map getColdFilesList() { } return coldFiles; } + + public static boolean isDataTieringFeatureEnabled(Configuration conf) { + return conf.getBoolean(DataTieringManager.DATA_TIERING_ENABLED_KEY, + DataTieringManager.DEFAULT_DATA_TIERING_ENABLED); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 88679a6eb6cd..722b13cbb467 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -534,7 +534,9 @@ public HRegionServer(final Configuration conf) throws IOException { regionServerAccounting = new RegionServerAccounting(conf); blockCache = BlockCacheFactory.createBlockCache(conf); - DataTieringManager.instantiate(onlineRegions); + if (DataTieringManager.isDataTieringFeatureEnabled(conf)) { + DataTieringManager.instantiate(conf, onlineRegions); + } mobFileCache = new MobFileCache(conf); rsSnapshotVerifier = new RSSnapshotVerifier(conf); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDataTieringManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDataTieringManager.java index f758bb11c5b3..714333955be7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDataTieringManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDataTieringManager.java @@ -109,10 +109,11 @@ public static void setupBeforeClass() throws Exception { defaultConf.setBoolean(CacheConfig.PREFETCH_BLOCKS_ON_OPEN_KEY, true); defaultConf.setStrings(HConstants.BUCKET_CACHE_IOENGINE_KEY, "offheap"); defaultConf.setLong(BUCKET_CACHE_SIZE_KEY, 32); + defaultConf.setBoolean(DataTieringManager.DATA_TIERING_ENABLED_KEY, true); fs = HFileSystem.get(defaultConf); blockCache = BlockCacheFactory.createBlockCache(defaultConf); cacheConf = new CacheConfig(defaultConf, blockCache); - DataTieringManager.instantiate(testOnlineRegions); + DataTieringManager.instantiate(defaultConf, testOnlineRegions); setupOnlineRegions(); dataTieringManager = DataTieringManager.getInstance(); } @@ -263,47 +264,56 @@ public void testPickColdDataFiles() { @Test public void testBlockEvictions() throws Exception { long capacitySize = 40 * 1024; - int writeThreads = 3; - int writerQLen = 64; + int writeThreads = 1; + int writerQLen = 4; int[] bucketSizes = new int[] { 8 * 1024 + 1024 }; - // Setup: Create a bucket cache with lower capacity - BucketCache bucketCache = new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, - 8192, bucketSizes, writeThreads, writerQLen, testDir + "/bucket.persistence", - DEFAULT_ERROR_TOLERATION_DURATION, defaultConf); - - // Create three Cache keys with cold data files and a block with hot data. - // hStoreFiles.get(3) is a cold data file, while hStoreFiles.get(0) is a hot file. - Set cacheKeys = new HashSet<>(); - cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 0, true, BlockType.DATA)); - cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 8192, true, BlockType.DATA)); - cacheKeys.add(new BlockCacheKey(hStoreFiles.get(0).getPath(), 0, true, BlockType.DATA)); - - // Create dummy data to be cached and fill the cache completely. - CacheTestUtils.HFileBlockPair[] blocks = CacheTestUtils.generateHFileBlocks(8192, 3); - - int blocksIter = 0; - for (BlockCacheKey key : cacheKeys) { - bucketCache.cacheBlock(key, blocks[blocksIter++].getBlock()); - // Ensure that the block is persisted to the file. - Waiter.waitFor(defaultConf, 10000, 100, () -> (bucketCache.getBackingMap().containsKey(key))); - } + // disable any prefetch in parallel to test execution + defaultConf.setBoolean(CacheConfig.PREFETCH_BLOCKS_ON_OPEN_KEY, false); - // Verify that the bucket cache contains 3 blocks. - assertEquals(3, bucketCache.getBackingMap().keySet().size()); + try { + // Setup: Create a bucket cache with lower capacity + BucketCache bucketCache = new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, + 8192, bucketSizes, writeThreads, writerQLen, testDir + "/bucket.persistence", + DEFAULT_ERROR_TOLERATION_DURATION, defaultConf); + + // Create three Cache keys with cold data files and a block with hot data. + // hStoreFiles.get(3) is a cold data file, while hStoreFiles.get(0) is a hot file. + Set cacheKeys = new HashSet<>(); + cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 0, true, BlockType.DATA)); + cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 8192, true, BlockType.DATA)); + cacheKeys.add(new BlockCacheKey(hStoreFiles.get(0).getPath(), 0, true, BlockType.DATA)); + + // Create dummy data to be cached and fill the cache completely. + CacheTestUtils.HFileBlockPair[] blocks = CacheTestUtils.generateHFileBlocks(8192, 3); + + int blocksIter = 0; + for (BlockCacheKey key : cacheKeys) { + bucketCache.cacheBlock(key, blocks[blocksIter++].getBlock()); + // Ensure that the block is persisted to the file. + Waiter.waitFor(defaultConf, 10000, 100, + () -> (bucketCache.getBackingMap().containsKey(key))); + } - // Add an additional block into cache with hot data which should trigger the eviction - BlockCacheKey newKey = new BlockCacheKey(hStoreFiles.get(2).getPath(), 0, true, BlockType.DATA); - CacheTestUtils.HFileBlockPair[] newBlock = CacheTestUtils.generateHFileBlocks(8192, 1); + // Verify that the bucket cache contains 3 blocks. + assertEquals(3, bucketCache.getBackingMap().keySet().size()); - bucketCache.cacheBlock(newKey, newBlock[0].getBlock()); - Waiter.waitFor(defaultConf, 10000, 100, - () -> (bucketCache.getBackingMap().containsKey(newKey))); + // Add an additional block into cache with hot data which should trigger the eviction + BlockCacheKey newKey = + new BlockCacheKey(hStoreFiles.get(2).getPath(), 0, true, BlockType.DATA); + CacheTestUtils.HFileBlockPair[] newBlock = CacheTestUtils.generateHFileBlocks(8192, 1); - // Verify that the bucket cache now contains 2 hot blocks blocks only. - // Both cold blocks of 8KB will be evicted to make room for 1 block of 8KB + an additional - // space. - validateBlocks(bucketCache.getBackingMap().keySet(), 2, 2, 0); + bucketCache.cacheBlock(newKey, newBlock[0].getBlock()); + Waiter.waitFor(defaultConf, 10000, 100, + () -> (bucketCache.getBackingMap().containsKey(newKey))); + + // Verify that the bucket cache now contains 2 hot blocks blocks only. + // Both cold blocks of 8KB will be evicted to make room for 1 block of 8KB + an additional + // space. + validateBlocks(bucketCache.getBackingMap().keySet(), 2, 2, 0); + } finally { + defaultConf.setBoolean(CacheConfig.PREFETCH_BLOCKS_ON_OPEN_KEY, true); + } } /* @@ -313,45 +323,54 @@ public void testBlockEvictions() throws Exception { @Test public void testBlockEvictionsAllColdBlocks() throws Exception { long capacitySize = 40 * 1024; - int writeThreads = 3; - int writerQLen = 64; + int writeThreads = 1; + int writerQLen = 4; int[] bucketSizes = new int[] { 8 * 1024 + 1024 }; - // Setup: Create a bucket cache with lower capacity - BucketCache bucketCache = new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, - 8192, bucketSizes, writeThreads, writerQLen, testDir + "/bucket.persistence", - DEFAULT_ERROR_TOLERATION_DURATION, defaultConf); - - // Create three Cache keys with three cold data blocks. - // hStoreFiles.get(3) is a cold data file. - Set cacheKeys = new HashSet<>(); - cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 0, true, BlockType.DATA)); - cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 8192, true, BlockType.DATA)); - cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 16384, true, BlockType.DATA)); - - // Create dummy data to be cached and fill the cache completely. - CacheTestUtils.HFileBlockPair[] blocks = CacheTestUtils.generateHFileBlocks(8192, 3); - - int blocksIter = 0; - for (BlockCacheKey key : cacheKeys) { - bucketCache.cacheBlock(key, blocks[blocksIter++].getBlock()); - // Ensure that the block is persisted to the file. - Waiter.waitFor(defaultConf, 10000, 100, () -> (bucketCache.getBackingMap().containsKey(key))); - } + // disable any prefetch in parallel to test execution + defaultConf.setBoolean(CacheConfig.PREFETCH_BLOCKS_ON_OPEN_KEY, false); + + try { + // Setup: Create a bucket cache with lower capacity + BucketCache bucketCache = new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, + 8192, bucketSizes, writeThreads, writerQLen, testDir + "/bucket.persistence", + DEFAULT_ERROR_TOLERATION_DURATION, defaultConf); + + // Create three Cache keys with three cold data blocks. + // hStoreFiles.get(3) is a cold data file. + Set cacheKeys = new HashSet<>(); + cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 0, true, BlockType.DATA)); + cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 8192, true, BlockType.DATA)); + cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 16384, true, BlockType.DATA)); + + // Create dummy data to be cached and fill the cache completely. + CacheTestUtils.HFileBlockPair[] blocks = CacheTestUtils.generateHFileBlocks(8192, 3); + + int blocksIter = 0; + for (BlockCacheKey key : cacheKeys) { + bucketCache.cacheBlock(key, blocks[blocksIter++].getBlock()); + // Ensure that the block is persisted to the file. + Waiter.waitFor(defaultConf, 10000, 100, + () -> (bucketCache.getBackingMap().containsKey(key))); + } - // Verify that the bucket cache contains 3 blocks. - assertEquals(3, bucketCache.getBackingMap().keySet().size()); + // Verify that the bucket cache contains 3 blocks. + assertEquals(3, bucketCache.getBackingMap().keySet().size()); - // Add an additional block into cache with hot data which should trigger the eviction - BlockCacheKey newKey = new BlockCacheKey(hStoreFiles.get(2).getPath(), 0, true, BlockType.DATA); - CacheTestUtils.HFileBlockPair[] newBlock = CacheTestUtils.generateHFileBlocks(8192, 1); + // Add an additional block into cache with hot data which should trigger the eviction + BlockCacheKey newKey = + new BlockCacheKey(hStoreFiles.get(2).getPath(), 0, true, BlockType.DATA); + CacheTestUtils.HFileBlockPair[] newBlock = CacheTestUtils.generateHFileBlocks(8192, 1); - bucketCache.cacheBlock(newKey, newBlock[0].getBlock()); - Waiter.waitFor(defaultConf, 10000, 100, - () -> (bucketCache.getBackingMap().containsKey(newKey))); + bucketCache.cacheBlock(newKey, newBlock[0].getBlock()); + Waiter.waitFor(defaultConf, 10000, 100, + () -> (bucketCache.getBackingMap().containsKey(newKey))); - // Verify that the bucket cache now contains 1 cold block and a newly added hot block. - validateBlocks(bucketCache.getBackingMap().keySet(), 2, 1, 1); + // Verify that the bucket cache now contains 1 cold block and a newly added hot block. + validateBlocks(bucketCache.getBackingMap().keySet(), 2, 1, 1); + } finally { + defaultConf.setBoolean(CacheConfig.PREFETCH_BLOCKS_ON_OPEN_KEY, true); + } } /* @@ -360,46 +379,56 @@ public void testBlockEvictionsAllColdBlocks() throws Exception { @Test public void testBlockEvictionsHotBlocks() throws Exception { long capacitySize = 40 * 1024; - int writeThreads = 3; - int writerQLen = 64; + int writeThreads = 1; + int writerQLen = 4; int[] bucketSizes = new int[] { 8 * 1024 + 1024 }; - // Setup: Create a bucket cache with lower capacity - BucketCache bucketCache = new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, - 8192, bucketSizes, writeThreads, writerQLen, testDir + "/bucket.persistence", - DEFAULT_ERROR_TOLERATION_DURATION, defaultConf); - - // Create three Cache keys with two hot data blocks and one cold data block - // hStoreFiles.get(0) is a hot data file and hStoreFiles.get(3) is a cold data file. - Set cacheKeys = new HashSet<>(); - cacheKeys.add(new BlockCacheKey(hStoreFiles.get(0).getPath(), 0, true, BlockType.DATA)); - cacheKeys.add(new BlockCacheKey(hStoreFiles.get(0).getPath(), 8192, true, BlockType.DATA)); - cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 0, true, BlockType.DATA)); - - // Create dummy data to be cached and fill the cache completely. - CacheTestUtils.HFileBlockPair[] blocks = CacheTestUtils.generateHFileBlocks(8192, 3); - - int blocksIter = 0; - for (BlockCacheKey key : cacheKeys) { - bucketCache.cacheBlock(key, blocks[blocksIter++].getBlock()); - // Ensure that the block is persisted to the file. - Waiter.waitFor(defaultConf, 10000, 100, () -> (bucketCache.getBackingMap().containsKey(key))); - } + // disable any prefetch in parallel to test execution + defaultConf.setBoolean(CacheConfig.PREFETCH_BLOCKS_ON_OPEN_KEY, false); - // Verify that the bucket cache contains 3 blocks. - assertEquals(3, bucketCache.getBackingMap().keySet().size()); + try { + // Setup: Create a bucket cache with lower capacity + BucketCache bucketCache = new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, + 8192, bucketSizes, writeThreads, writerQLen, testDir + "/bucket.persistence", + DEFAULT_ERROR_TOLERATION_DURATION, defaultConf); + + // Create three Cache keys with two hot data blocks and one cold data block + // hStoreFiles.get(0) is a hot data file and hStoreFiles.get(3) is a cold data file. + Set cacheKeys = new HashSet<>(); + cacheKeys.add(new BlockCacheKey(hStoreFiles.get(0).getPath(), 0, true, BlockType.DATA)); + cacheKeys.add(new BlockCacheKey(hStoreFiles.get(0).getPath(), 8192, true, BlockType.DATA)); + cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 0, true, BlockType.DATA)); + + // Create dummy data to be cached and fill the cache completely. + CacheTestUtils.HFileBlockPair[] blocks = CacheTestUtils.generateHFileBlocks(8192, 3); + + int blocksIter = 0; + for (BlockCacheKey key : cacheKeys) { + bucketCache.cacheBlock(key, blocks[blocksIter++].getBlock()); + // Ensure that the block is persisted to the file. + Waiter.waitFor(defaultConf, 10000, 100, + () -> (bucketCache.getBackingMap().containsKey(key))); + } + + // Verify that the bucket cache contains 3 blocks. + assertEquals(3, bucketCache.getBackingMap().keySet().size()); - // Add an additional block which should evict the only cold block with an additional hot block. - BlockCacheKey newKey = new BlockCacheKey(hStoreFiles.get(2).getPath(), 0, true, BlockType.DATA); - CacheTestUtils.HFileBlockPair[] newBlock = CacheTestUtils.generateHFileBlocks(8192, 1); + // Add an additional block which should evict the only cold block with an additional hot + // block. + BlockCacheKey newKey = + new BlockCacheKey(hStoreFiles.get(2).getPath(), 0, true, BlockType.DATA); + CacheTestUtils.HFileBlockPair[] newBlock = CacheTestUtils.generateHFileBlocks(8192, 1); - bucketCache.cacheBlock(newKey, newBlock[0].getBlock()); - Waiter.waitFor(defaultConf, 10000, 100, - () -> (bucketCache.getBackingMap().containsKey(newKey))); + bucketCache.cacheBlock(newKey, newBlock[0].getBlock()); + Waiter.waitFor(defaultConf, 10000, 100, + () -> (bucketCache.getBackingMap().containsKey(newKey))); - // Verify that the bucket cache now contains 2 hot blocks. - // Only one of the older hot blocks is retained and other one is the newly added hot block. - validateBlocks(bucketCache.getBackingMap().keySet(), 2, 2, 0); + // Verify that the bucket cache now contains 2 hot blocks. + // Only one of the older hot blocks is retained and other one is the newly added hot block. + validateBlocks(bucketCache.getBackingMap().keySet(), 2, 2, 0); + } finally { + defaultConf.setBoolean(CacheConfig.PREFETCH_BLOCKS_ON_OPEN_KEY, true); + } } private void validateBlocks(Set keys, int expectedTotalKeys, int expectedHotBlocks, From 1ad9ce52ca4e91fc18e54088b4b6e61ddfa75073 Mon Sep 17 00:00:00 2001 From: Janardhan Hungund Date: Mon, 29 Apr 2024 22:12:04 +0530 Subject: [PATCH 2/2] HBASE-28535: Addressed the review comments. Change-Id: I95ec8691f5d511a8bd452c1492de7ff1222980b6 --- .../hbase/io/hfile/HFilePreadReader.java | 10 +- .../hbase/io/hfile/bucket/BucketCache.java | 15 +- .../regionserver/DataTieringManager.java | 39 +-- .../hbase/regionserver/HRegionServer.java | 7 +- .../regionserver/TestDataTieringManager.java | 246 ++++++++++-------- 5 files changed, 176 insertions(+), 141 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java index a44af9cc2044..7631dc78c3a3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePreadReader.java @@ -23,7 +23,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; -import org.apache.hadoop.hbase.regionserver.DataTieringManager; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -41,13 +40,8 @@ public HFilePreadReader(ReaderContext context, HFileInfo fileInfo, CacheConfig c final MutableBoolean shouldCache = new MutableBoolean(true); - DataTieringManager dataTieringManager = DataTieringManager.getInstance(); - if (dataTieringManager != null) { - // Initialize HFileInfo object with metadata for caching decisions. - // Initialize the metadata only if the data-tiering is enabled. - // If not, the metadata will be initialized later. - fileInfo.initMetaAndIndex(this); - } + // Initialize HFileInfo object with metadata for caching decisions + fileInfo.initMetaAndIndex(this); cacheConf.getBlockCache().ifPresent(cache -> { Optional result = cache.shouldCacheFile(fileInfo, conf); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index 00b085db11ce..0b53d0479902 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -2195,18 +2195,11 @@ public Optional blockFitsIntoTheCache(HFileBlock block) { public Optional shouldCacheFile(HFileInfo hFileInfo, Configuration conf) { String fileName = hFileInfo.getHFileContext().getHFileName(); DataTieringManager dataTieringManager = DataTieringManager.getInstance(); - if (dataTieringManager != null) { - if (!dataTieringManager.isHotData(hFileInfo, conf)) { - LOG.debug("Data tiering is enabled for file: '{}' and it is not hot data", fileName); - return Optional.of(false); - } else { - LOG.debug("Data tiering is enabled for file: '{}' and it is hot data", fileName); - } - } else { - LOG.debug("Data tiering feature is not enabled. " - + " The file: '{}' will be loaded if not already loaded", fileName); + if (dataTieringManager != null && !dataTieringManager.isHotData(hFileInfo, conf)) { + LOG.debug("Data tiering is enabled for file: '{}' and it is not hot data", fileName); + return Optional.of(false); } - // if we don't have the file in fullyCachedFiles, we should cache it. + // if we don't have the file in fullyCachedFiles, we should cache it return Optional.of(!fullyCachedFiles.containsKey(fileName)); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java index 084e2b2489e0..952b4d4938d7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java @@ -45,8 +45,9 @@ @InterfaceAudience.Private public class DataTieringManager { private static final Logger LOG = LoggerFactory.getLogger(DataTieringManager.class); - public static final String DATA_TIERING_ENABLED_KEY = "hbase.hstore.datatiering.enable"; - public static final boolean DEFAULT_DATA_TIERING_ENABLED = false; // disabled by default + public static final String GLOBAL_DATA_TIERING_ENABLED_KEY = + "hbase.regionserver.datatiering.enable"; + public static final boolean DEFAULT_GLOBAL_DATA_TIERING_ENABLED = false; // disabled by default public static final String DATATIERING_KEY = "hbase.hstore.datatiering.type"; public static final String DATATIERING_HOT_DATA_AGE_KEY = "hbase.hstore.datatiering.hot.age.millis"; @@ -60,26 +61,27 @@ private DataTieringManager(Map onlineRegions) { } /** - * Initializes the DataTieringManager instance with the provided map of online regions. + * Initializes the DataTieringManager instance with the provided map of online regions, only if + * the configuration "hbase.regionserver.datatiering.enable" is enabled. + * @param conf Configuration object. * @param onlineRegions A map containing online regions. + * @return True if the instance is instantiated successfully, false otherwise. */ - public static synchronized void instantiate(Configuration conf, + public static synchronized boolean instantiate(Configuration conf, Map onlineRegions) { - if (isDataTieringFeatureEnabled(conf)) { - if (instance == null) { - instance = new DataTieringManager(onlineRegions); - LOG.info("DataTieringManager instantiated successfully."); - } else { - LOG.warn("DataTieringManager is already instantiated."); - } + if (isDataTieringFeatureEnabled(conf) && instance == null) { + instance = new DataTieringManager(onlineRegions); + LOG.info("DataTieringManager instantiated successfully."); + return true; } else { - LOG.info("Data-Tiering feature is not enabled."); + LOG.warn("DataTieringManager is already instantiated."); } + return false; } /** * Retrieves the instance of DataTieringManager. - * @return The instance of DataTieringManager. + * @return The instance of DataTieringManager, if instantiated, null otherwise. */ public static synchronized DataTieringManager getInstance() { return instance; @@ -311,8 +313,13 @@ public Map getColdFilesList() { return coldFiles; } - public static boolean isDataTieringFeatureEnabled(Configuration conf) { - return conf.getBoolean(DataTieringManager.DATA_TIERING_ENABLED_KEY, - DataTieringManager.DEFAULT_DATA_TIERING_ENABLED); + private static boolean isDataTieringFeatureEnabled(Configuration conf) { + return conf.getBoolean(DataTieringManager.GLOBAL_DATA_TIERING_ENABLED_KEY, + DataTieringManager.DEFAULT_GLOBAL_DATA_TIERING_ENABLED); + } + + // Resets the instance to null. To be used only for testing. + public static void resetForTestingOnly() { + instance = null; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 722b13cbb467..2e510b5981ae 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -534,9 +534,10 @@ public HRegionServer(final Configuration conf) throws IOException { regionServerAccounting = new RegionServerAccounting(conf); blockCache = BlockCacheFactory.createBlockCache(conf); - if (DataTieringManager.isDataTieringFeatureEnabled(conf)) { - DataTieringManager.instantiate(conf, onlineRegions); - } + // The call below, instantiates the DataTieringManager only when + // the configuration "hbase.regionserver.datatiering.enable" is set to true. + DataTieringManager.instantiate(conf, onlineRegions); + mobFileCache = new MobFileCache(conf); rsSnapshotVerifier = new RSSnapshotVerifier(conf); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDataTieringManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDataTieringManager.java index 714333955be7..f999a73c4732 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDataTieringManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDataTieringManager.java @@ -20,6 +20,8 @@ import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_SIZE_KEY; import static org.apache.hadoop.hbase.io.hfile.bucket.BucketCache.DEFAULT_ERROR_TOLERATION_DURATION; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -64,6 +66,8 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * This class is used to test the functionality of the DataTieringManager. @@ -91,6 +95,7 @@ public class TestDataTieringManager { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestDataTieringManager.class); + private static final Logger LOG = LoggerFactory.getLogger(TestDataTieringManager.class); private static final HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); private static Configuration defaultConf; private static FileSystem fs; @@ -109,11 +114,11 @@ public static void setupBeforeClass() throws Exception { defaultConf.setBoolean(CacheConfig.PREFETCH_BLOCKS_ON_OPEN_KEY, true); defaultConf.setStrings(HConstants.BUCKET_CACHE_IOENGINE_KEY, "offheap"); defaultConf.setLong(BUCKET_CACHE_SIZE_KEY, 32); - defaultConf.setBoolean(DataTieringManager.DATA_TIERING_ENABLED_KEY, true); + defaultConf.setBoolean(DataTieringManager.GLOBAL_DATA_TIERING_ENABLED_KEY, true); fs = HFileSystem.get(defaultConf); blockCache = BlockCacheFactory.createBlockCache(defaultConf); cacheConf = new CacheConfig(defaultConf, blockCache); - DataTieringManager.instantiate(defaultConf, testOnlineRegions); + assertTrue(DataTieringManager.instantiate(defaultConf, testOnlineRegions)); setupOnlineRegions(); dataTieringManager = DataTieringManager.getInstance(); } @@ -264,56 +269,47 @@ public void testPickColdDataFiles() { @Test public void testBlockEvictions() throws Exception { long capacitySize = 40 * 1024; - int writeThreads = 1; - int writerQLen = 4; + int writeThreads = 3; + int writerQLen = 64; int[] bucketSizes = new int[] { 8 * 1024 + 1024 }; - // disable any prefetch in parallel to test execution - defaultConf.setBoolean(CacheConfig.PREFETCH_BLOCKS_ON_OPEN_KEY, false); - - try { - // Setup: Create a bucket cache with lower capacity - BucketCache bucketCache = new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, - 8192, bucketSizes, writeThreads, writerQLen, testDir + "/bucket.persistence", - DEFAULT_ERROR_TOLERATION_DURATION, defaultConf); - - // Create three Cache keys with cold data files and a block with hot data. - // hStoreFiles.get(3) is a cold data file, while hStoreFiles.get(0) is a hot file. - Set cacheKeys = new HashSet<>(); - cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 0, true, BlockType.DATA)); - cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 8192, true, BlockType.DATA)); - cacheKeys.add(new BlockCacheKey(hStoreFiles.get(0).getPath(), 0, true, BlockType.DATA)); - - // Create dummy data to be cached and fill the cache completely. - CacheTestUtils.HFileBlockPair[] blocks = CacheTestUtils.generateHFileBlocks(8192, 3); - - int blocksIter = 0; - for (BlockCacheKey key : cacheKeys) { - bucketCache.cacheBlock(key, blocks[blocksIter++].getBlock()); - // Ensure that the block is persisted to the file. - Waiter.waitFor(defaultConf, 10000, 100, - () -> (bucketCache.getBackingMap().containsKey(key))); - } + // Setup: Create a bucket cache with lower capacity + BucketCache bucketCache = + new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, 8192, bucketSizes, + writeThreads, writerQLen, null, DEFAULT_ERROR_TOLERATION_DURATION, defaultConf); + + // Create three Cache keys with cold data files and a block with hot data. + // hStoreFiles.get(3) is a cold data file, while hStoreFiles.get(0) is a hot file. + Set cacheKeys = new HashSet<>(); + cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 0, true, BlockType.DATA)); + cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 8192, true, BlockType.DATA)); + cacheKeys.add(new BlockCacheKey(hStoreFiles.get(0).getPath(), 0, true, BlockType.DATA)); + + // Create dummy data to be cached and fill the cache completely. + CacheTestUtils.HFileBlockPair[] blocks = CacheTestUtils.generateHFileBlocks(8192, 3); + + int blocksIter = 0; + for (BlockCacheKey key : cacheKeys) { + bucketCache.cacheBlock(key, blocks[blocksIter++].getBlock()); + // Ensure that the block is persisted to the file. + Waiter.waitFor(defaultConf, 10000, 100, () -> (bucketCache.getBackingMap().containsKey(key))); + } - // Verify that the bucket cache contains 3 blocks. - assertEquals(3, bucketCache.getBackingMap().keySet().size()); + // Verify that the bucket cache contains 3 blocks. + assertEquals(3, bucketCache.getBackingMap().keySet().size()); - // Add an additional block into cache with hot data which should trigger the eviction - BlockCacheKey newKey = - new BlockCacheKey(hStoreFiles.get(2).getPath(), 0, true, BlockType.DATA); - CacheTestUtils.HFileBlockPair[] newBlock = CacheTestUtils.generateHFileBlocks(8192, 1); + // Add an additional block into cache with hot data which should trigger the eviction + BlockCacheKey newKey = new BlockCacheKey(hStoreFiles.get(2).getPath(), 0, true, BlockType.DATA); + CacheTestUtils.HFileBlockPair[] newBlock = CacheTestUtils.generateHFileBlocks(8192, 1); - bucketCache.cacheBlock(newKey, newBlock[0].getBlock()); - Waiter.waitFor(defaultConf, 10000, 100, - () -> (bucketCache.getBackingMap().containsKey(newKey))); + bucketCache.cacheBlock(newKey, newBlock[0].getBlock()); + Waiter.waitFor(defaultConf, 10000, 100, + () -> (bucketCache.getBackingMap().containsKey(newKey))); - // Verify that the bucket cache now contains 2 hot blocks blocks only. - // Both cold blocks of 8KB will be evicted to make room for 1 block of 8KB + an additional - // space. - validateBlocks(bucketCache.getBackingMap().keySet(), 2, 2, 0); - } finally { - defaultConf.setBoolean(CacheConfig.PREFETCH_BLOCKS_ON_OPEN_KEY, true); - } + // Verify that the bucket cache now contains 2 hot blocks blocks only. + // Both cold blocks of 8KB will be evicted to make room for 1 block of 8KB + an additional + // space. + validateBlocks(bucketCache.getBackingMap().keySet(), 2, 2, 0); } /* @@ -323,54 +319,45 @@ public void testBlockEvictions() throws Exception { @Test public void testBlockEvictionsAllColdBlocks() throws Exception { long capacitySize = 40 * 1024; - int writeThreads = 1; - int writerQLen = 4; + int writeThreads = 3; + int writerQLen = 64; int[] bucketSizes = new int[] { 8 * 1024 + 1024 }; - // disable any prefetch in parallel to test execution - defaultConf.setBoolean(CacheConfig.PREFETCH_BLOCKS_ON_OPEN_KEY, false); - - try { - // Setup: Create a bucket cache with lower capacity - BucketCache bucketCache = new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, - 8192, bucketSizes, writeThreads, writerQLen, testDir + "/bucket.persistence", - DEFAULT_ERROR_TOLERATION_DURATION, defaultConf); - - // Create three Cache keys with three cold data blocks. - // hStoreFiles.get(3) is a cold data file. - Set cacheKeys = new HashSet<>(); - cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 0, true, BlockType.DATA)); - cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 8192, true, BlockType.DATA)); - cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 16384, true, BlockType.DATA)); - - // Create dummy data to be cached and fill the cache completely. - CacheTestUtils.HFileBlockPair[] blocks = CacheTestUtils.generateHFileBlocks(8192, 3); + // Setup: Create a bucket cache with lower capacity + BucketCache bucketCache = + new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, 8192, bucketSizes, + writeThreads, writerQLen, null, DEFAULT_ERROR_TOLERATION_DURATION, defaultConf); + + // Create three Cache keys with three cold data blocks. + // hStoreFiles.get(3) is a cold data file. + Set cacheKeys = new HashSet<>(); + cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 0, true, BlockType.DATA)); + cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 8192, true, BlockType.DATA)); + cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 16384, true, BlockType.DATA)); + + // Create dummy data to be cached and fill the cache completely. + CacheTestUtils.HFileBlockPair[] blocks = CacheTestUtils.generateHFileBlocks(8192, 3); + + int blocksIter = 0; + for (BlockCacheKey key : cacheKeys) { + bucketCache.cacheBlock(key, blocks[blocksIter++].getBlock()); + // Ensure that the block is persisted to the file. + Waiter.waitFor(defaultConf, 10000, 100, () -> (bucketCache.getBackingMap().containsKey(key))); + } - int blocksIter = 0; - for (BlockCacheKey key : cacheKeys) { - bucketCache.cacheBlock(key, blocks[blocksIter++].getBlock()); - // Ensure that the block is persisted to the file. - Waiter.waitFor(defaultConf, 10000, 100, - () -> (bucketCache.getBackingMap().containsKey(key))); - } + // Verify that the bucket cache contains 3 blocks. + assertEquals(3, bucketCache.getBackingMap().keySet().size()); - // Verify that the bucket cache contains 3 blocks. - assertEquals(3, bucketCache.getBackingMap().keySet().size()); + // Add an additional block into cache with hot data which should trigger the eviction + BlockCacheKey newKey = new BlockCacheKey(hStoreFiles.get(2).getPath(), 0, true, BlockType.DATA); + CacheTestUtils.HFileBlockPair[] newBlock = CacheTestUtils.generateHFileBlocks(8192, 1); - // Add an additional block into cache with hot data which should trigger the eviction - BlockCacheKey newKey = - new BlockCacheKey(hStoreFiles.get(2).getPath(), 0, true, BlockType.DATA); - CacheTestUtils.HFileBlockPair[] newBlock = CacheTestUtils.generateHFileBlocks(8192, 1); + bucketCache.cacheBlock(newKey, newBlock[0].getBlock()); + Waiter.waitFor(defaultConf, 10000, 100, + () -> (bucketCache.getBackingMap().containsKey(newKey))); - bucketCache.cacheBlock(newKey, newBlock[0].getBlock()); - Waiter.waitFor(defaultConf, 10000, 100, - () -> (bucketCache.getBackingMap().containsKey(newKey))); - - // Verify that the bucket cache now contains 1 cold block and a newly added hot block. - validateBlocks(bucketCache.getBackingMap().keySet(), 2, 1, 1); - } finally { - defaultConf.setBoolean(CacheConfig.PREFETCH_BLOCKS_ON_OPEN_KEY, true); - } + // Verify that the bucket cache now contains 1 cold block and a newly added hot block. + validateBlocks(bucketCache.getBackingMap().keySet(), 2, 1, 1); } /* @@ -379,22 +366,72 @@ public void testBlockEvictionsAllColdBlocks() throws Exception { @Test public void testBlockEvictionsHotBlocks() throws Exception { long capacitySize = 40 * 1024; - int writeThreads = 1; - int writerQLen = 4; + int writeThreads = 3; + int writerQLen = 64; int[] bucketSizes = new int[] { 8 * 1024 + 1024 }; - // disable any prefetch in parallel to test execution - defaultConf.setBoolean(CacheConfig.PREFETCH_BLOCKS_ON_OPEN_KEY, false); + // Setup: Create a bucket cache with lower capacity + BucketCache bucketCache = + new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, 8192, bucketSizes, + writeThreads, writerQLen, null, DEFAULT_ERROR_TOLERATION_DURATION, defaultConf); + + // Create three Cache keys with two hot data blocks and one cold data block + // hStoreFiles.get(0) is a hot data file and hStoreFiles.get(3) is a cold data file. + Set cacheKeys = new HashSet<>(); + cacheKeys.add(new BlockCacheKey(hStoreFiles.get(0).getPath(), 0, true, BlockType.DATA)); + cacheKeys.add(new BlockCacheKey(hStoreFiles.get(0).getPath(), 8192, true, BlockType.DATA)); + cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 0, true, BlockType.DATA)); + + // Create dummy data to be cached and fill the cache completely. + CacheTestUtils.HFileBlockPair[] blocks = CacheTestUtils.generateHFileBlocks(8192, 3); + + int blocksIter = 0; + for (BlockCacheKey key : cacheKeys) { + bucketCache.cacheBlock(key, blocks[blocksIter++].getBlock()); + // Ensure that the block is persisted to the file. + Waiter.waitFor(defaultConf, 10000, 100, () -> (bucketCache.getBackingMap().containsKey(key))); + } + + // Verify that the bucket cache contains 3 blocks. + assertEquals(3, bucketCache.getBackingMap().keySet().size()); + // Add an additional block which should evict the only cold block with an additional hot block. + BlockCacheKey newKey = new BlockCacheKey(hStoreFiles.get(2).getPath(), 0, true, BlockType.DATA); + CacheTestUtils.HFileBlockPair[] newBlock = CacheTestUtils.generateHFileBlocks(8192, 1); + + bucketCache.cacheBlock(newKey, newBlock[0].getBlock()); + Waiter.waitFor(defaultConf, 10000, 100, + () -> (bucketCache.getBackingMap().containsKey(newKey))); + + // Verify that the bucket cache now contains 2 hot blocks. + // Only one of the older hot blocks is retained and other one is the newly added hot block. + validateBlocks(bucketCache.getBackingMap().keySet(), 2, 2, 0); + } + + @Test + public void testFeatureKeyDisabled() throws Exception { + DataTieringManager.resetForTestingOnly(); + defaultConf.setBoolean(DataTieringManager.GLOBAL_DATA_TIERING_ENABLED_KEY, false); try { + assertFalse(DataTieringManager.instantiate(defaultConf, testOnlineRegions)); + // Verify that the DataaTieringManager instance is not instantiated in the + // instantiate call above. + assertNull(DataTieringManager.getInstance()); + + // Also validate that data temperature is not honoured. + long capacitySize = 40 * 1024; + int writeThreads = 3; + int writerQLen = 64; + int[] bucketSizes = new int[] { 8 * 1024 + 1024 }; + // Setup: Create a bucket cache with lower capacity - BucketCache bucketCache = new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, - 8192, bucketSizes, writeThreads, writerQLen, testDir + "/bucket.persistence", - DEFAULT_ERROR_TOLERATION_DURATION, defaultConf); + BucketCache bucketCache = + new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, 8192, bucketSizes, + writeThreads, writerQLen, null, DEFAULT_ERROR_TOLERATION_DURATION, defaultConf); // Create three Cache keys with two hot data blocks and one cold data block // hStoreFiles.get(0) is a hot data file and hStoreFiles.get(3) is a cold data file. - Set cacheKeys = new HashSet<>(); + List cacheKeys = new ArrayList<>(); cacheKeys.add(new BlockCacheKey(hStoreFiles.get(0).getPath(), 0, true, BlockType.DATA)); cacheKeys.add(new BlockCacheKey(hStoreFiles.get(0).getPath(), 8192, true, BlockType.DATA)); cacheKeys.add(new BlockCacheKey(hStoreFiles.get(3).getPath(), 0, true, BlockType.DATA)); @@ -404,6 +441,7 @@ public void testBlockEvictionsHotBlocks() throws Exception { int blocksIter = 0; for (BlockCacheKey key : cacheKeys) { + LOG.info("Adding {}", key); bucketCache.cacheBlock(key, blocks[blocksIter++].getBlock()); // Ensure that the block is persisted to the file. Waiter.waitFor(defaultConf, 10000, 100, @@ -413,8 +451,7 @@ public void testBlockEvictionsHotBlocks() throws Exception { // Verify that the bucket cache contains 3 blocks. assertEquals(3, bucketCache.getBackingMap().keySet().size()); - // Add an additional block which should evict the only cold block with an additional hot - // block. + // Add an additional hot block, which triggers eviction. BlockCacheKey newKey = new BlockCacheKey(hStoreFiles.get(2).getPath(), 0, true, BlockType.DATA); CacheTestUtils.HFileBlockPair[] newBlock = CacheTestUtils.generateHFileBlocks(8192, 1); @@ -423,11 +460,14 @@ public void testBlockEvictionsHotBlocks() throws Exception { Waiter.waitFor(defaultConf, 10000, 100, () -> (bucketCache.getBackingMap().containsKey(newKey))); - // Verify that the bucket cache now contains 2 hot blocks. - // Only one of the older hot blocks is retained and other one is the newly added hot block. - validateBlocks(bucketCache.getBackingMap().keySet(), 2, 2, 0); + // Verify that the bucket still contains the only cold block and one newly added hot block. + // The older hot blocks are evicted and data-tiering mechanism does not kick in to evict + // the cold block. + validateBlocks(bucketCache.getBackingMap().keySet(), 2, 1, 1); } finally { - defaultConf.setBoolean(CacheConfig.PREFETCH_BLOCKS_ON_OPEN_KEY, true); + DataTieringManager.resetForTestingOnly(); + defaultConf.setBoolean(DataTieringManager.GLOBAL_DATA_TIERING_ENABLED_KEY, true); + assertTrue(DataTieringManager.instantiate(defaultConf, testOnlineRegions)); } } @@ -435,7 +475,7 @@ private void validateBlocks(Set keys, int expectedTotalKeys, int int expectedColdBlocks) { int numHotBlocks = 0, numColdBlocks = 0; - assertEquals(expectedTotalKeys, keys.size()); + Waiter.waitFor(defaultConf, 10000, 100, () -> (expectedTotalKeys == keys.size())); int iter = 0; for (BlockCacheKey key : keys) { try {