diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.java index 236c7d31ea0d..c3e6d43aa67d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.java @@ -280,6 +280,9 @@ private int calculateNumberOfChunks(int numOfCells, int chunkSize) { // If the percentage of its remaining free space is above the INDEX_CHUNK_UNUSED_SPACE // threshold, then we will use index chunks (which are smaller) instead. private ChunkCreator.ChunkType useIndexChunks(int numOfCells) { + if (!ChunkCreator.getInstance().hasIndexChunkPool()) { + return ChunkCreator.ChunkType.DATA_CHUNK; + } int dataChunkSize = ChunkCreator.getInstance().getChunkSize(); int numOfCellsInChunk = calcNumOfCellsInChunk(dataChunkSize); int cellsInLastChunk = numOfCells % numOfCellsInChunk; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java index 1e5e94f211a7..b02e335687c5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.regionserver; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.concurrent.BlockingQueue; import java.util.concurrent.ConcurrentHashMap; @@ -28,13 +29,14 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.LongAdder; +import java.util.function.Supplier; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.regionserver.HeapMemoryManager.HeapMemoryTuneObserver; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; @@ -79,35 +81,33 @@ public enum ChunkType { static ChunkCreator instance; @VisibleForTesting static boolean chunkPoolDisabled = false; - private MemStoreChunkPool dataChunksPool; - private int chunkSize; - private MemStoreChunkPool indexChunksPool; + private Optional indexChunksPool; + private Optional dataChunksPool; + private final int chunkSize; + private final int indexChunkSize; @VisibleForTesting ChunkCreator(int chunkSize, boolean offheap, long globalMemStoreSize, float poolSizePercentage, - float initialCountPercentage, HeapMemoryManager heapMemoryManager, - float indexChunkSizePercentage) { + float initialCountPercentage, float indexChunkPercentage, int indexChunkSize, + HeapMemoryManager heapMemoryManager) { this.offheap = offheap; - this.chunkSize = chunkSize; // in case pools are not allocated - initializePools(chunkSize, globalMemStoreSize, poolSizePercentage, indexChunkSizePercentage, - initialCountPercentage, heapMemoryManager); + this.chunkSize = chunkSize; + this.indexChunkSize = indexChunkSize; + initializePools(globalMemStoreSize, poolSizePercentage, indexChunkPercentage, + initialCountPercentage, heapMemoryManager); } @VisibleForTesting - private void initializePools(int chunkSize, long globalMemStoreSize, - float poolSizePercentage, float indexChunkSizePercentage, - float initialCountPercentage, - HeapMemoryManager heapMemoryManager) { + private void initializePools(long globalMemStoreSize, float poolSizePercentage, + float indexChunkPercentage, float initialCountPercentage, + HeapMemoryManager heapMemoryManager) { this.dataChunksPool = initializePool("data", globalMemStoreSize, - (1 - indexChunkSizePercentage) * poolSizePercentage, - initialCountPercentage, chunkSize, heapMemoryManager); + (1 - indexChunkPercentage) * poolSizePercentage, + initialCountPercentage, chunkSize, heapMemoryManager); // The index chunks pool is needed only when the index type is CCM. - // Since the pools are not created at all when the index type isn't CCM, - // we don't need to check it here. this.indexChunksPool = initializePool("index", globalMemStoreSize, - indexChunkSizePercentage * poolSizePercentage, - initialCountPercentage, (int) (indexChunkSizePercentage * chunkSize), - heapMemoryManager); + indexChunkPercentage * poolSizePercentage, initialCountPercentage, + indexChunkSize, heapMemoryManager); } /** @@ -117,24 +117,33 @@ private void initializePools(int chunkSize, long globalMemStoreSize, * @param globalMemStoreSize the global memstore size * @param poolSizePercentage pool size percentage * @param initialCountPercentage the initial count of the chunk pool if any + * @param indexChunkPercentage index size percentage of the pool + * @param indexChunksize index chunk size * @param heapMemoryManager the heapmemory manager * @return singleton MSLABChunkCreator */ @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "LI_LAZY_INIT_STATIC", justification = "Method is called by single thread at the starting of RS") - @VisibleForTesting public static ChunkCreator initialize(int chunkSize, boolean offheap, long globalMemStoreSize, - float poolSizePercentage, float initialCountPercentage, - HeapMemoryManager heapMemoryManager) { + float poolSizePercentage, float initialCountPercentage, float indexChunkPercentage, + int indexChunksize, HeapMemoryManager heapMemoryManager) { if (instance != null) { return instance; } instance = new ChunkCreator(chunkSize, offheap, globalMemStoreSize, poolSizePercentage, - initialCountPercentage, heapMemoryManager, - MemStoreLABImpl.INDEX_CHUNK_PERCENTAGE_DEFAULT); + initialCountPercentage, indexChunkPercentage, indexChunksize, heapMemoryManager); return instance; } + @VisibleForTesting + public static ChunkCreator initialize(int chunkSize, boolean offheap, long globalMemStoreSize, + float poolSizePercentage, float initialCountPercentage, + HeapMemoryManager heapMemoryManager) { + return initialize(chunkSize, offheap, globalMemStoreSize, poolSizePercentage, + initialCountPercentage, MemStoreLAB.INDEX_CHUNK_PERCENTAGE_DEFAULT, + MemStoreLAB.INDEX_CHUNK_SIZE_DEFAULT, heapMemoryManager); + } + @VisibleForTesting public static ChunkCreator getInstance() { return instance; @@ -171,18 +180,13 @@ Chunk getChunk(CompactingMemStore.IndexType chunkIndexType) { Chunk getChunk(CompactingMemStore.IndexType chunkIndexType, ChunkType chunkType) { switch (chunkType) { case INDEX_CHUNK: - if (indexChunksPool != null) { - return getChunk(chunkIndexType, indexChunksPool.getChunkSize()); + if (indexChunksPool.isPresent()) { + return getChunk(chunkIndexType, this.indexChunkSize); } case DATA_CHUNK: - if (dataChunksPool == null) { - return getChunk(chunkIndexType, chunkSize); - } else { - return getChunk(chunkIndexType, dataChunksPool.getChunkSize()); - } + return getChunk(chunkIndexType, this.chunkSize); default: - throw new IllegalArgumentException( - "chunkType must either be INDEX_CHUNK or DATA_CHUNK"); + throw new IllegalArgumentException("chunkType must either be INDEX_CHUNK or DATA_CHUNK"); } } @@ -193,34 +197,19 @@ Chunk getChunk(CompactingMemStore.IndexType chunkIndexType, ChunkType chunkType) * @param size the size of the chunk to be allocated, in bytes */ Chunk getChunk(CompactingMemStore.IndexType chunkIndexType, int size) { - Chunk chunk = null; - MemStoreChunkPool pool = null; - - // if the size is suitable for one of the pools - if (dataChunksPool != null && size == dataChunksPool.getChunkSize()) { - pool = dataChunksPool; - } else if (indexChunksPool != null && size == indexChunksPool.getChunkSize()) { - pool = indexChunksPool; - } - - // if we have a pool - if (pool != null) { - // the pool creates the chunk internally. The chunk#init() call happens here - chunk = pool.getChunk(); - // the pool has run out of maxCount - if (chunk == null) { - if (LOG.isTraceEnabled()) { - LOG.trace("The chunk pool is full. Reached maxCount= " + pool.getMaxCount() - + ". Creating chunk onheap."); + Optional pool = + size == this.indexChunkSize ? indexChunksPool : dataChunksPool; + Chunk chunk = pool.map(MemStoreChunkPool::getChunk).orElseGet( + new Supplier() { + @Override + public Chunk get() { + if (LOG.isTraceEnabled() && pool.isPresent()) { + LOG.trace("The chunk pool is full. Reached maxCount= " + pool.get().getMaxCount() + + ". Creating chunk onheap."); + } + return createChunk(false, chunkIndexType, size); } - } - } - - if (chunk == null) { - // the second parameter explains whether CellChunkMap index is requested, - // in that case, put allocated on demand chunk mapping into chunkIdMap - chunk = createChunk(false, chunkIndexType, size); - } + }); // now we need to actually do the expensive memory allocation step in case of a new chunk, // else only the offset is set to the beginning of the chunk to accept allocations @@ -236,9 +225,9 @@ Chunk getChunk(CompactingMemStore.IndexType chunkIndexType, int size) { */ Chunk getJumboChunk(int jumboSize) { int allocSize = jumboSize + SIZEOF_CHUNK_HEADER; - if (allocSize <= dataChunksPool.getChunkSize()) { + if (allocSize <= this.chunkSize) { LOG.warn("Jumbo chunk size " + jumboSize + " must be more than regular chunk size " - + dataChunksPool.getChunkSize() + ". Converting to regular chunk."); + + this.chunkSize + ". Converting to regular chunk."); return getChunk(CompactingMemStore.IndexType.CHUNK_MAP); } // the new chunk is going to hold the jumbo cell data and needs to be referenced by @@ -273,13 +262,21 @@ private Chunk createChunk(boolean pool, CompactingMemStore.IndexType chunkIndexT // Chunks from pool are created covered with strong references anyway // TODO: change to CHUNK_MAP if it is generally defined private Chunk createChunkForPool(CompactingMemStore.IndexType chunkIndexType, int chunkSize) { - if (chunkSize != dataChunksPool.getChunkSize() && - chunkSize != indexChunksPool.getChunkSize()) { + if ((!dataChunksPool.isPresent() || chunkSize != this.chunkSize) && + (!indexChunksPool.isPresent() || chunkSize != this.indexChunkSize)) { return null; } return createChunk(true, chunkIndexType, chunkSize); } + boolean hasIndexChunkPool() { + return indexChunksPool.isPresent(); + } + + boolean hasDataChunkPool() { + return dataChunksPool.isPresent(); + } + @VisibleForTesting // Used to translate the ChunkID into a chunk ref Chunk getChunk(int id) { @@ -389,10 +386,6 @@ Chunk getChunk(CompactingMemStore.IndexType chunkIndexType) { return chunk; } - int getChunkSize() { - return chunkSize; - } - /** * Add the chunks to the pool, when the pool achieves the max size, it will skip the remaining * chunks @@ -443,8 +436,7 @@ public void onHeapMemoryTune(long newMemstoreSize, long newBlockCacheSize) { LOG.warn("{} not tuning the chunk pool as it is offheap", label); return; } - int newMaxCount = - (int) (newMemstoreSize * poolSizePercentage / getChunkSize()); + int newMaxCount = (int) (newMemstoreSize * poolSizePercentage / chunkSize); if (newMaxCount != this.maxCount) { // We need an adjustment in the chunks numbers if (newMaxCount > this.maxCount) { @@ -476,15 +468,15 @@ static void clearDisableFlag() { chunkPoolDisabled = false; } - private MemStoreChunkPool initializePool(String label, long globalMemStoreSize, + private Optional initializePool(String label, long globalMemStoreSize, float poolSizePercentage, float initialCountPercentage, int chunkSize, HeapMemoryManager heapMemoryManager) { if (poolSizePercentage <= 0) { LOG.info("{} poolSizePercentage is less than 0. So not using pool", label); - return null; + return Optional.empty(); } if (chunkPoolDisabled) { - return null; + return Optional.empty(); } if (poolSizePercentage > 1.0) { throw new IllegalArgumentException( @@ -504,7 +496,7 @@ private MemStoreChunkPool initializePool(String label, long globalMemStoreSize, // Register with Heap Memory manager heapMemoryManager.registerTuneObserver(memStoreChunkPool); } - return memStoreChunkPool; + return Optional.ofNullable(memStoreChunkPool); } @VisibleForTesting @@ -516,21 +508,12 @@ int getMaxCount() { int getMaxCount(ChunkType chunkType) { switch (chunkType) { case INDEX_CHUNK: - if (indexChunksPool != null) { - return indexChunksPool.getMaxCount(); - } - break; + return indexChunksPool.map(MemStoreChunkPool::getMaxCount).orElse(0); case DATA_CHUNK: - if (dataChunksPool != null) { - return dataChunksPool.getMaxCount(); - } - break; + return dataChunksPool.map(MemStoreChunkPool::getMaxCount).orElse(0); default: - throw new IllegalArgumentException( - "chunkType must either be INDEX_CHUNK or DATA_CHUNK"); + throw new IllegalArgumentException("chunkType must either be INDEX_CHUNK or DATA_CHUNK"); } - - return 0; } @VisibleForTesting @@ -542,20 +525,12 @@ int getPoolSize() { int getPoolSize(ChunkType chunkType) { switch (chunkType) { case INDEX_CHUNK: - if (indexChunksPool != null) { - return indexChunksPool.reclaimedChunks.size(); - } - break; + return indexChunksPool.map(e -> e.reclaimedChunks.size()).orElse(0); case DATA_CHUNK: - if (dataChunksPool != null) { - return dataChunksPool.reclaimedChunks.size(); - } - break; + return dataChunksPool.map(e -> e.reclaimedChunks.size()).orElse(0); default: - throw new IllegalArgumentException( - "chunkType must either be INDEX_CHUNK or DATA_CHUNK"); + throw new IllegalArgumentException("chunkType must either be INDEX_CHUNK or DATA_CHUNK"); } - return 0; } @VisibleForTesting @@ -564,11 +539,10 @@ boolean isChunkInPool(int chunkId) { if (c==null) { return false; } - // chunks that are from pool will return true chunk reference not null - if (dataChunksPool != null && dataChunksPool.reclaimedChunks.contains(c)) { + if (dataChunksPool.map(e -> e.reclaimedChunks.contains(c)).orElse(false)) { return true; - } else if (indexChunksPool != null && indexChunksPool.reclaimedChunks.contains(c)) { + } else if (indexChunksPool.map(e -> e.reclaimedChunks.contains(c)).orElse(false)) { return true; } return false; @@ -579,12 +553,8 @@ boolean isChunkInPool(int chunkId) { */ @VisibleForTesting void clearChunksInPool() { - if (dataChunksPool != null) { - dataChunksPool.reclaimedChunks.clear(); - } - if (indexChunksPool != null) { - indexChunksPool.reclaimedChunks.clear(); - } + dataChunksPool.ifPresent(e -> e.reclaimedChunks.clear()); + indexChunksPool.ifPresent(e -> e.reclaimedChunks.clear()); } int getChunkSize() { @@ -594,24 +564,19 @@ int getChunkSize() { int getChunkSize(ChunkType chunkType) { switch (chunkType) { case INDEX_CHUNK: - if (indexChunksPool != null) { - return indexChunksPool.getChunkSize(); + if (indexChunksPool.isPresent()) { + return indexChunkSize; } case DATA_CHUNK: - if (dataChunksPool != null) { - return dataChunksPool.getChunkSize(); - } else { // When pools are empty - return chunkSize; - } + return chunkSize; default: - throw new IllegalArgumentException( - "chunkType must either be INDEX_CHUNK or DATA_CHUNK"); + throw new IllegalArgumentException("chunkType must either be INDEX_CHUNK or DATA_CHUNK"); } } synchronized void putbackChunks(Set chunks) { // if there is no pool just try to clear the chunkIdMap in case there is something - if (dataChunksPool == null && indexChunksPool == null) { + if (!dataChunksPool.isPresent() && !indexChunksPool.isPresent()) { this.removeChunks(chunks); return; } @@ -623,9 +588,9 @@ synchronized void putbackChunks(Set chunks) { Chunk chunk = ChunkCreator.this.getChunk(chunkID); if (chunk != null) { if (chunk.isFromPool() && chunk.isIndexChunk()) { - indexChunksPool.putbackChunks(chunk); - } else if (chunk.isFromPool() && chunk.size == dataChunksPool.getChunkSize()) { - dataChunksPool.putbackChunks(chunk); + indexChunksPool.ifPresent(e -> e.putbackChunks(chunk)); + } else if (chunk.isFromPool() && chunk.size == this.chunkSize) { + dataChunksPool.ifPresent(e -> e.putbackChunks(chunk)); } else { // chunks which are not from one of the pools // should be released without going to the pools. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 11fd8c7e54b7..41b09c8e9c4c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -1611,9 +1611,13 @@ protected void initializeMemStoreChunkCreator() { float initialCountPercentage = conf.getFloat(MemStoreLAB.CHUNK_POOL_INITIALSIZE_KEY, MemStoreLAB.POOL_INITIAL_SIZE_DEFAULT); int chunkSize = conf.getInt(MemStoreLAB.CHUNK_SIZE_KEY, MemStoreLAB.CHUNK_SIZE_DEFAULT); + float indexChunkPercentage = conf.getFloat(MemStoreLAB.INDEX_CHUNK_PERCENTAGE_KEY, + MemStoreLAB.INDEX_CHUNK_PERCENTAGE_DEFAULT); + int indexChunksize = conf.getInt(MemStoreLAB.INDEX_CHUNK_SIZE_KEY, + MemStoreLAB.INDEX_CHUNK_SIZE_DEFAULT); // init the chunkCreator ChunkCreator.initialize(chunkSize, offheap, globalMemStoreSize, poolSizePercentage, - initialCountPercentage, this.hMemManager); + initialCountPercentage, indexChunkPercentage, indexChunksize, this.hMemManager); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLAB.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLAB.java index 90cf93207b91..91b94ea45522 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLAB.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLAB.java @@ -19,8 +19,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.ReflectionUtils; +import org.apache.yetus.audience.InterfaceAudience; /** * A memstore-local allocation buffer. @@ -52,8 +52,8 @@ public interface MemStoreLAB { String CHUNK_SIZE_KEY = "hbase.hregion.memstore.mslab.chunksize"; int CHUNK_SIZE_DEFAULT = 2048 * 1024; - String INDEX_CHUNK_PERCENTAGE_KEY = "hbase.hregion.memstore.mslab.indexchunksize"; - float INDEX_CHUNK_PERCENTAGE_DEFAULT = 0.1f; + String INDEX_CHUNK_SIZE_KEY = "hbase.hregion.memstore.mslab.indexchunksize"; + int INDEX_CHUNK_SIZE_DEFAULT = (int) (CHUNK_SIZE_DEFAULT * 0.1f); String MAX_ALLOC_KEY = "hbase.hregion.memstore.mslab.max.allocation"; int MAX_ALLOC_DEFAULT = 256 * 1024; // allocs bigger than this don't go through // allocator @@ -61,8 +61,10 @@ public interface MemStoreLAB { // MSLAB pool related configs String CHUNK_POOL_MAXSIZE_KEY = "hbase.hregion.memstore.chunkpool.maxsize"; String CHUNK_POOL_INITIALSIZE_KEY = "hbase.hregion.memstore.chunkpool.initialsize"; + String INDEX_CHUNK_PERCENTAGE_KEY = "hbase.hregion.memstore.chunkpool.indexchunkpercent"; float POOL_MAX_SIZE_DEFAULT = 1.0f; float POOL_INITIAL_SIZE_DEFAULT = 0.0f; + float INDEX_CHUNK_PERCENTAGE_DEFAULT = 0.1f; /** * Allocates slice in this LAB and copy the passed Cell into this area. Returns new Cell instance diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestIndexChunkPoolSizeConfigurable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestIndexChunkPoolSizeConfigurable.java new file mode 100644 index 000000000000..852210d740e7 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestIndexChunkPoolSizeConfigurable.java @@ -0,0 +1,52 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +/** + * Test Index Chunk Pool's size configurable, See HBASE-23196 + */ +@Category({RegionServerTests.class, SmallTests.class}) +public class TestIndexChunkPoolSizeConfigurable { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestIndexChunkPoolSizeConfigurable.class); + + @Test + public void testIndexChunkPoolConfigurable() throws Exception { + HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + Configuration conf = TEST_UTIL.getConfiguration(); + conf.setBoolean(MemStoreLAB.USEMSLAB_KEY, true); + conf.setFloat(MemStoreLAB.INDEX_CHUNK_PERCENTAGE_KEY, 0f); + TEST_UTIL.startMiniCluster(1); + assertFalse(ChunkCreator.getInstance().hasIndexChunkPool()); + assertTrue(ChunkCreator.getInstance().hasDataChunkPool()); + TEST_UTIL.shutdownMiniCluster(); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreChunkPool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreChunkPool.java index b29a5cdf7c4f..c36af2bb25d9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreChunkPool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreChunkPool.java @@ -232,7 +232,8 @@ public void testPutbackChunksMultiThreaded() throws Exception { final int chunkSize = 40; final int valSize = 7; ChunkCreator oldCreator = ChunkCreator.getInstance(); - ChunkCreator newCreator = new ChunkCreator(chunkSize, false, 400, 1, 0.5f, null, 0); + ChunkCreator newCreator = new ChunkCreator(chunkSize, false, 400, 1, 0.5f, 0, + MemStoreLAB.INDEX_CHUNK_SIZE_DEFAULT, null); assertEquals(initialCount, newCreator.getPoolSize()); assertEquals(maxCount, newCreator.getMaxCount()); ChunkCreator.instance = newCreator;// Replace the global ref with the new one we created.