diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java index 75b5246d2c88..ee6d5331f3f6 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java @@ -70,6 +70,7 @@ import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; +import org.apache.hadoop.hbase.regionserver.StoreUtils; import org.apache.hadoop.hbase.util.BloomFilterUtil; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; @@ -369,8 +370,8 @@ private WriterLength getNewWriter(byte[] tableName, byte[] family, Configuration encoding = encoding == null ? datablockEncodingMap.get(tableAndFamily) : encoding; encoding = encoding == null ? DataBlockEncoding.NONE : encoding; HFileContextBuilder contextBuilder = new HFileContextBuilder().withCompression(compression) - .withDataBlockEncoding(encoding).withChecksumType(HStore.getChecksumType(conf)) - .withBytesPerCheckSum(HStore.getBytesPerChecksum(conf)).withBlockSize(blockSize) + .withDataBlockEncoding(encoding).withChecksumType(StoreUtils.getChecksumType(conf)) + .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)).withBlockSize(blockSize) .withColumnFamily(family).withTableName(tableName); if (HFile.getFormatVersion(conf) >= HFile.MIN_FORMAT_VERSION_WITH_TAGS) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java index acc8f74a501b..2ae29385eb42 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java @@ -56,9 +56,9 @@ import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.regionserver.BloomType; -import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.HStoreFile; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; +import org.apache.hadoop.hbase.regionserver.StoreUtils; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ChecksumType; import org.apache.hadoop.hbase.util.CommonFSUtils; @@ -540,8 +540,8 @@ public static StoreFileWriter createWriter(Configuration conf, FileSystem fs, Compression.Algorithm compression, CacheConfig cacheConfig, Encryption.Context cryptoContext, boolean isCompaction) throws IOException { return createWriter(conf, fs, family, new Path(basePath, mobFileName.getFileName()), - maxKeyCount, compression, cacheConfig, cryptoContext, HStore.getChecksumType(conf), - HStore.getBytesPerChecksum(conf), family.getBlocksize(), BloomType.NONE, isCompaction); + maxKeyCount, compression, cacheConfig, cryptoContext, StoreUtils.getChecksumType(conf), + StoreUtils.getBytesPerChecksum(conf), family.getBlocksize(), BloomType.NONE, isCompaction); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java index 5960b8030900..7ce7f0310c7d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java @@ -93,7 +93,6 @@ public class HMobStore extends HStore { private AtomicLong mobFlushedCellsSize = new AtomicLong(); private AtomicLong mobScanCellsCount = new AtomicLong(); private AtomicLong mobScanCellsSize = new AtomicLong(); - private ColumnFamilyDescriptor family; private Map> map = new ConcurrentHashMap<>(); private final IdLock keyLock = new IdLock(); // When we add a MOB reference cell to the HFile, we will add 2 tags along with it @@ -107,16 +106,15 @@ public class HMobStore extends HStore { public HMobStore(final HRegion region, final ColumnFamilyDescriptor family, final Configuration confParam, boolean warmup) throws IOException { super(region, family, confParam, warmup); - this.family = family; this.mobFileCache = region.getMobFileCache(); this.homePath = MobUtils.getMobHome(conf); this.mobFamilyPath = MobUtils.getMobFamilyPath(conf, this.getTableName(), - family.getNameAsString()); + getColumnFamilyName()); List locations = new ArrayList<>(2); locations.add(mobFamilyPath); TableName tn = region.getTableDescriptor().getTableName(); locations.add(HFileArchiveUtil.getStoreArchivePath(conf, tn, MobUtils.getMobRegionInfo(tn) - .getEncodedName(), family.getNameAsString())); + .getEncodedName(), getColumnFamilyName())); map.put(tn, locations); List tags = new ArrayList<>(2); tags.add(MobConstants.MOB_REF_TAG); @@ -209,7 +207,7 @@ public StoreFileWriter createWriterInTmp(String date, Path basePath, long maxKey Compression.Algorithm compression, byte[] startKey, boolean isCompaction) throws IOException { MobFileName mobFileName = MobFileName.create(startKey, date, UUID.randomUUID() - .toString().replaceAll("-", ""), region.getRegionInfo().getEncodedName()); + .toString().replaceAll("-", ""), getHRegion().getRegionInfo().getEncodedName()); return createWriterInTmp(mobFileName, basePath, maxKeyCount, compression, isCompaction); } @@ -226,9 +224,11 @@ public StoreFileWriter createWriterInTmp(String date, Path basePath, long maxKey public StoreFileWriter createWriterInTmp(MobFileName mobFileName, Path basePath, long maxKeyCount, Compression.Algorithm compression, boolean isCompaction) throws IOException { - return MobUtils.createWriter(conf, region.getFilesystem(), family, - new Path(basePath, mobFileName.getFileName()), maxKeyCount, compression, cacheConf, - cryptoContext, checksumType, bytesPerChecksum, blocksize, BloomType.NONE, isCompaction); + return MobUtils.createWriter(conf, getFileSystem(), getColumnFamilyDescriptor(), + new Path(basePath, mobFileName.getFileName()), maxKeyCount, compression, getCacheConfig(), + getStoreContext().getEncryptionContext(), StoreUtils.getChecksumType(conf), + StoreUtils.getBytesPerChecksum(conf), getStoreContext().getBlockSize(), BloomType.NONE, + isCompaction); } /** @@ -245,10 +245,10 @@ public void commitFile(final Path sourceFile, Path targetPath) throws IOExceptio validateMobFile(sourceFile); LOG.info(" FLUSH Renaming flushed file from {} to {}", sourceFile, dstPath); Path parent = dstPath.getParent(); - if (!region.getFilesystem().exists(parent)) { - region.getFilesystem().mkdirs(parent); + if (!getFileSystem().exists(parent)) { + getFileSystem().mkdirs(parent); } - if (!region.getFilesystem().rename(sourceFile, dstPath)) { + if (!getFileSystem().rename(sourceFile, dstPath)) { throw new IOException("Failed rename of " + sourceFile + " to " + dstPath); } } @@ -261,7 +261,7 @@ public void commitFile(final Path sourceFile, Path targetPath) throws IOExceptio private void validateMobFile(Path path) throws IOException { HStoreFile storeFile = null; try { - storeFile = new HStoreFile(region.getFilesystem(), path, conf, this.cacheConf, + storeFile = new HStoreFile(getFileSystem(), path, conf, getCacheConfig(), BloomType.NONE, isPrimaryReplicaStore()); storeFile.initReader(); } catch (IOException e) { @@ -352,9 +352,11 @@ public List getLocations(TableName tableName) throws IOException { locations = map.get(tableName); if (locations == null) { locations = new ArrayList<>(2); - locations.add(MobUtils.getMobFamilyPath(conf, tableName, family.getNameAsString())); + locations.add(MobUtils.getMobFamilyPath(conf, tableName, getColumnFamilyDescriptor() + .getNameAsString())); locations.add(HFileArchiveUtil.getStoreArchivePath(conf, tableName, - MobUtils.getMobRegionInfo(tableName).getEncodedName(), family.getNameAsString())); + MobUtils.getMobRegionInfo(tableName).getEncodedName(), getColumnFamilyDescriptor() + .getNameAsString())); map.put(tableName, locations); } } finally { @@ -388,7 +390,7 @@ private MobCell readCell(List locations, String fileName, Cell search, MobFile file = null; Path path = new Path(location, fileName); try { - file = mobFileCache.openFile(fs, path, cacheConf); + file = mobFileCache.openFile(fs, path, getCacheConfig()); return readPt != -1 ? file.readCell(search, cacheMobBlocks, readPt) : file.readCell(search, cacheMobBlocks); } catch (IOException e) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 3a71c230bebe..99880efece73 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -97,7 +97,6 @@ import org.apache.hadoop.hbase.security.EncryptionUtil; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.ChecksumType; import org.apache.hadoop.hbase.util.ClassSize; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -157,11 +156,8 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, protected final MemStore memstore; // This stores directory in the filesystem. - protected final HRegion region; - private final ColumnFamilyDescriptor family; - private final HRegionFileSystem fs; + private final HRegion region; protected Configuration conf; - protected CacheConfig cacheConf; private long lastCompactSize = 0; volatile boolean forceMajor = false; private AtomicLong storeSize = new AtomicLong(); @@ -215,16 +211,8 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, private final Set changedReaderObservers = Collections.newSetFromMap(new ConcurrentHashMap()); - protected final int blocksize; private HFileDataBlockEncoder dataBlockEncoder; - /** Checksum configuration */ - protected ChecksumType checksumType; - protected int bytesPerChecksum; - - // Comparing KeyValues - protected final CellComparator comparator; - final StoreEngine storeEngine; private static final AtomicBoolean offPeakCompactionTracker = new AtomicBoolean(); @@ -236,7 +224,6 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, private long blockingFileCount; private int compactionCheckMultiplier; - protected Encryption.Context cryptoContext = Encryption.Context.NONE; private AtomicLong flushedCellsCount = new AtomicLong(); private AtomicLong compactedCellsCount = new AtomicLong(); @@ -246,6 +233,8 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, private AtomicLong compactedCellsSize = new AtomicLong(); private AtomicLong majorCompactedCellsSize = new AtomicLong(); + private final StoreContext storeContext; + /** * Constructor * @param family HColumnDescriptor for this column @@ -254,12 +243,6 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, protected HStore(final HRegion region, final ColumnFamilyDescriptor family, final Configuration confParam, boolean warmup) throws IOException { - this.fs = region.getRegionFileSystem(); - - // Assemble the store's home directory and Ensure it exists. - fs.createStoreDir(family.getNameAsString()); - this.region = region; - this.family = family; // 'conf' renamed to 'confParam' b/c we use this.conf in the constructor // CompoundConfiguration will look for keys in reverse order of addition, so we'd // add global config first, then table and cf overrides, then cf metadata. @@ -268,18 +251,22 @@ protected HStore(final HRegion region, final ColumnFamilyDescriptor family, .addBytesMap(region.getTableDescriptor().getValues()) .addStringMap(family.getConfiguration()) .addBytesMap(family.getValues()); - this.blocksize = family.getBlocksize(); + + this.region = region; + this.storeContext = initializeStoreContext(family); + + // Assemble the store's home directory and Ensure it exists. + region.getRegionFileSystem().createStoreDir(family.getNameAsString()); // set block storage policy for store directory String policyName = family.getStoragePolicy(); if (null == policyName) { policyName = this.conf.get(BLOCK_STORAGE_POLICY_KEY, DEFAULT_BLOCK_STORAGE_POLICY); } - this.fs.setStoragePolicy(family.getNameAsString(), policyName.trim()); + region.getRegionFileSystem().setStoragePolicy(family.getNameAsString(), policyName.trim()); this.dataBlockEncoder = new HFileDataBlockEncoderImpl(family.getDataBlockEncoding()); - this.comparator = region.getCellComparator(); // used by ScanQueryMatcher long timeToPurgeDeletes = Math.max(conf.getLong("hbase.hstore.time.to.purge.deletes", 0), 0); @@ -288,14 +275,11 @@ protected HStore(final HRegion region, final ColumnFamilyDescriptor family, long ttl = determineTTLFromFamily(family); // Why not just pass a HColumnDescriptor in here altogether? Even if have // to clone it? - scanInfo = new ScanInfo(conf, family, ttl, timeToPurgeDeletes, this.comparator); + scanInfo = new ScanInfo(conf, family, ttl, timeToPurgeDeletes, region.getCellComparator()); this.memstore = getMemstore(); this.offPeakHours = OffPeakHours.getInstance(conf); - // Setting up cache configuration for this family - createCacheConf(family); - this.verifyBulkLoads = conf.getBoolean("hbase.hstore.bulkload.verify", false); this.blockingFileCount = @@ -308,7 +292,7 @@ protected HStore(final HRegion region, final ColumnFamilyDescriptor family, this.compactionCheckMultiplier = DEFAULT_COMPACTCHECKER_INTERVAL_MULTIPLIER; } - this.storeEngine = createStoreEngine(this, this.conf, this.comparator); + this.storeEngine = createStoreEngine(this, this.conf, region.getCellComparator()); List hStoreFiles = loadStoreFiles(warmup); // Move the storeSize calculation out of loadStoreFiles() method, because the secondary read // replica's refreshStoreFiles() will also use loadStoreFiles() to refresh its store files and @@ -318,10 +302,6 @@ protected HStore(final HRegion region, final ColumnFamilyDescriptor family, this.totalUncompressedBytes.addAndGet(getTotalUncompressedBytes(hStoreFiles)); this.storeEngine.getStoreFileManager().loadFiles(hStoreFiles); - // Initialize checksum type from name. The names are CRC32, CRC32C, etc. - this.checksumType = getChecksumType(conf); - // Initialize bytes per checksum - this.bytesPerChecksum = getBytesPerChecksum(conf); flushRetriesNumber = conf.getInt( "hbase.hstore.flush.retries.number", DEFAULT_FLUSH_RETRIES_NUMBER); pauseTime = conf.getInt(HConstants.HBASE_SERVER_PAUSE, HConstants.DEFAULT_HBASE_SERVER_PAUSE); @@ -330,7 +310,6 @@ protected HStore(final HRegion region, final ColumnFamilyDescriptor family, "hbase.hstore.flush.retries.number must be > 0, not " + flushRetriesNumber); } - cryptoContext = EncryptionUtil.createEncryptionContext(conf, family); int confPrintThreshold = this.conf.getInt("hbase.region.store.parallel.put.print.threshold", 50); @@ -347,6 +326,32 @@ protected HStore(final HRegion region, final ColumnFamilyDescriptor family, cacheOnWriteLogged = false; } + private StoreContext initializeStoreContext(ColumnFamilyDescriptor family) throws IOException { + return new StoreContext.Builder() + .withBlockSize(family.getBlocksize()) + .withEncryptionContext(EncryptionUtil.createEncryptionContext(conf, family)) + .withBloomType(family.getBloomFilterType()) + .withCacheConfig(createCacheConf(family)) + .withCellComparator(region.getCellComparator()) + .withColumnFamilyDescriptor(family) + .withCompactedFilesSupplier(this::getCompactedFiles) + .withRegionFileSystem(region.getRegionFileSystem()) + .withFavoredNodesSupplier(this::getFavoredNodes) + .withFamilyStoreDirectoryPath(region.getRegionFileSystem() + .getStoreDir(family.getNameAsString())) + .withRegionCoprocessorHost(region.getCoprocessorHost()) + .build(); + } + + private InetSocketAddress[] getFavoredNodes() { + InetSocketAddress[] favoredNodes = null; + if (region.getRegionServerServices() != null) { + favoredNodes = region.getRegionServerServices().getFavoredNodesForRegion( + region.getRegionInfo().getEncodedName()); + } + return favoredNodes; + } + /** * @return MemStore Instance to use in this store. */ @@ -358,7 +363,7 @@ private MemStore getMemstore() { inMemoryCompaction = MemoryCompactionPolicy.valueOf( conf.get("hbase.systemtables.compacting.memstore.type", "NONE")); } else { - inMemoryCompaction = family.getInMemoryCompaction(); + inMemoryCompaction = getColumnFamilyDescriptor().getInMemoryCompaction(); } if (inMemoryCompaction == null) { inMemoryCompaction = @@ -368,13 +373,13 @@ private MemStore getMemstore() { switch (inMemoryCompaction) { case NONE: ms = ReflectionUtils.newInstance(DefaultMemStore.class, - new Object[] { conf, this.comparator, + new Object[] { conf, getComparator(), this.getHRegion().getRegionServicesForStores()}); break; default: Class clz = conf.getClass(MEMSTORE_CLASS_NAME, CompactingMemStore.class, CompactingMemStore.class); - ms = ReflectionUtils.newInstance(clz, new Object[]{conf, this.comparator, this, + ms = ReflectionUtils.newInstance(clz, new Object[]{conf, getComparator(), this, this.getHRegion().getRegionServicesForStores(), inMemoryCompaction}); } return ms; @@ -384,10 +389,12 @@ private MemStore getMemstore() { * Creates the cache config. * @param family The current column family. */ - protected void createCacheConf(final ColumnFamilyDescriptor family) { - this.cacheConf = new CacheConfig(conf, family, region.getBlockCache(), + protected CacheConfig createCacheConf(final ColumnFamilyDescriptor family) { + CacheConfig cacheConf = new CacheConfig(conf, family, region.getBlockCache(), region.getRegionServicesForStores().getByteBuffAllocator()); - LOG.info("Created cacheConfig: " + this.getCacheConfig() + " for " + this); + LOG.info("Created cacheConfig: {}, for column family {} of region {} ", cacheConf, + family.getNameAsString(), region.getRegionInfo().getEncodedName()); + return cacheConf; } /** @@ -400,7 +407,7 @@ protected void createCacheConf(final ColumnFamilyDescriptor family) { */ protected StoreEngine createStoreEngine(HStore store, Configuration conf, CellComparator kvComparator) throws IOException { - return StoreEngine.create(store, conf, comparator); + return StoreEngine.create(store, conf, kvComparator); } /** @@ -421,9 +428,13 @@ public static long determineTTLFromFamily(final ColumnFamilyDescriptor family) { return ttl; } + StoreContext getStoreContext() { + return storeContext; + } + @Override public String getColumnFamilyName() { - return this.family.getNameAsString(); + return this.storeContext.getFamily().getNameAsString(); } @Override @@ -433,11 +444,11 @@ public TableName getTableName() { @Override public FileSystem getFileSystem() { - return this.fs.getFileSystem(); + return storeContext.getRegionFileSystem().getFileSystem(); } public HRegionFileSystem getRegionFileSystem() { - return this.fs; + return storeContext.getRegionFileSystem(); } /* Implementation of StoreConfigInformation */ @@ -474,33 +485,10 @@ public long getBlockingFileCount() { } /* End implementation of StoreConfigInformation */ - /** - * Returns the configured bytesPerChecksum value. - * @param conf The configuration - * @return The bytesPerChecksum that is set in the configuration - */ - public static int getBytesPerChecksum(Configuration conf) { - return conf.getInt(HConstants.BYTES_PER_CHECKSUM, - HFile.DEFAULT_BYTES_PER_CHECKSUM); - } - - /** - * Returns the configured checksum algorithm. - * @param conf The configuration - * @return The checksum algorithm that is set in the configuration - */ - public static ChecksumType getChecksumType(Configuration conf) { - String checksumName = conf.get(HConstants.CHECKSUM_TYPE_NAME); - if (checksumName == null) { - return ChecksumType.getDefaultChecksumType(); - } else { - return ChecksumType.nameToType(checksumName); - } - } @Override public ColumnFamilyDescriptor getColumnFamilyDescriptor() { - return this.family; + return this.storeContext.getFamily(); } @Override @@ -559,7 +547,7 @@ void setDataBlockEncoderInTest(HFileDataBlockEncoder blockEncoder) { * from the given directory. */ private List loadStoreFiles(boolean warmup) throws IOException { - Collection files = fs.getStoreFiles(getColumnFamilyName()); + Collection files = getRegionFileSystem().getStoreFiles(getColumnFamilyName()); return openStoreFiles(files, warmup); } @@ -610,7 +598,7 @@ private List openStoreFiles(Collection files, boolean if (ioe != null) { // close StoreFile readers boolean evictOnClose = - cacheConf != null? cacheConf.shouldEvictOnClose(): true; + getCacheConfig() != null? getCacheConfig().shouldEvictOnClose(): true; for (HStoreFile file : results) { try { if (file != null) { @@ -638,7 +626,8 @@ private List openStoreFiles(Collection files, boolean results.removeAll(filesToRemove); if (!filesToRemove.isEmpty() && this.isPrimaryReplicaStore()) { LOG.debug("Moving the files {} to archive", filesToRemove); - this.fs.removeStoreFiles(this.getColumnFamilyDescriptor().getNameAsString(), filesToRemove); + getRegionFileSystem().removeStoreFiles(this.getColumnFamilyDescriptor().getNameAsString(), + filesToRemove); } } @@ -647,7 +636,7 @@ private List openStoreFiles(Collection files, boolean @Override public void refreshStoreFiles() throws IOException { - Collection newFiles = fs.getStoreFiles(getColumnFamilyName()); + Collection newFiles = getRegionFileSystem().getStoreFiles(getColumnFamilyName()); refreshStoreFilesInternal(newFiles); } @@ -658,7 +647,7 @@ public void refreshStoreFiles() throws IOException { public void refreshStoreFiles(Collection newFiles) throws IOException { List storeFiles = new ArrayList<>(newFiles.size()); for (String file : newFiles) { - storeFiles.add(fs.getStoreFileInfo(getColumnFamilyName(), file)); + storeFiles.add(getRegionFileSystem().getStoreFileInfo(getColumnFamilyName(), file)); } refreshStoreFilesInternal(storeFiles); } @@ -735,7 +724,8 @@ protected HStoreFile createStoreFileAndReader(final Path p) throws IOException { private HStoreFile createStoreFileAndReader(StoreFileInfo info) throws IOException { info.setRegionCoprocessorHost(this.region.getCoprocessorHost()); - HStoreFile storeFile = new HStoreFile(info, this.family.getBloomFilterType(), this.cacheConf); + HStoreFile storeFile = new HStoreFile(info, getColumnFamilyDescriptor().getBloomFilterType(), + getCacheConfig()); storeFile.initReader(); return storeFile; } @@ -818,7 +808,7 @@ public void assertBulkLoadHFileOk(Path srcPath) throws IOException { LOG.info("Validating hfile at " + srcPath + " for inclusion in " + this); FileSystem srcFs = srcPath.getFileSystem(conf); srcFs.access(srcPath, FsAction.READ_WRITE); - reader = HFile.createReader(srcFs, srcPath, cacheConf, isPrimaryReplicaStore(), conf); + reader = HFile.createReader(srcFs, srcPath, getCacheConfig(), isPrimaryReplicaStore(), conf); Optional firstKey = reader.getFirstRowKey(); Preconditions.checkState(firstKey.isPresent(), "First key can not be null"); @@ -855,7 +845,7 @@ public void assertBulkLoadHFileOk(Path srcPath) throws IOException { do { Cell cell = scanner.getCell(); if (prevCell != null) { - if (comparator.compareRows(prevCell, cell) > 0) { + if (getComparator().compareRows(prevCell, cell) > 0) { throw new InvalidHFileException("Previous row is greater than" + " current row: path=" + srcPath + " previous=" + CellUtil.getCellKeyAsString(prevCell) + " current=" @@ -892,13 +882,13 @@ public void assertBulkLoadHFileOk(Path srcPath) throws IOException { */ public Pair preBulkLoadHFile(String srcPathStr, long seqNum) throws IOException { Path srcPath = new Path(srcPathStr); - return fs.bulkLoadStoreFile(getColumnFamilyName(), srcPath, seqNum); + return getRegionFileSystem().bulkLoadStoreFile(getColumnFamilyName(), srcPath, seqNum); } public Path bulkLoadHFile(byte[] family, String srcPathStr, Path dstPath) throws IOException { Path srcPath = new Path(srcPathStr); try { - fs.commitStoreFile(srcPath, dstPath); + getRegionFileSystem().commitStoreFile(srcPath, dstPath); } finally { if (this.getCoprocessorHost() != null) { this.getCoprocessorHost().postCommitStoreFile(family, srcPath, dstPath); @@ -964,8 +954,8 @@ public ImmutableCollection close() throws IOException { storeEngine.getStoreFileManager().clearCompactedFiles(); // clear the compacted files if (CollectionUtils.isNotEmpty(compactedfiles)) { - removeCompactedfiles(compactedfiles, cacheConf != null ? - cacheConf.shouldEvictOnClose() : true); + removeCompactedfiles(compactedfiles, getCacheConfig() != null ? + getCacheConfig().shouldEvictOnClose() : true); } if (!result.isEmpty()) { // initialize the thread pool for closing store files in parallel. @@ -981,7 +971,7 @@ public ImmutableCollection close() throws IOException { @Override public Void call() throws IOException { boolean evictOnClose = - cacheConf != null? cacheConf.shouldEvictOnClose(): true; + getCacheConfig() != null? getCacheConfig().shouldEvictOnClose(): true; f.closeStoreFile(evictOnClose); return null; } @@ -1092,7 +1082,7 @@ public HStoreFile tryCommitRecoveredHFile(Path path) throws IOException { FileSystem srcFs = path.getFileSystem(conf); srcFs.access(path, FsAction.READ_WRITE); try (HFile.Reader reader = - HFile.createReader(srcFs, path, cacheConf, isPrimaryReplicaStore(), conf)) { + HFile.createReader(srcFs, path, getCacheConfig(), isPrimaryReplicaStore(), conf)) { Optional firstKey = reader.getFirstRowKey(); Preconditions.checkState(firstKey.isPresent(), "First key can not be null"); Optional lk = reader.getLastKey(); @@ -1104,7 +1094,7 @@ public HStoreFile tryCommitRecoveredHFile(Path path) throws IOException { } } - Path dstPath = fs.commitStoreFile(getColumnFamilyName(), path); + Path dstPath = getRegionFileSystem().commitStoreFile(getColumnFamilyName(), path); HStoreFile sf = createStoreFileAndReader(dstPath); StoreFileReader r = sf.getReader(); this.storeSize.addAndGet(r.length()); @@ -1129,7 +1119,7 @@ public HStoreFile tryCommitRecoveredHFile(Path path) throws IOException { private HStoreFile commitFile(Path path, long logCacheFlushId, MonitoredTask status) throws IOException { // Write-out finished successfully, move into the right spot - Path dstPath = fs.commitStoreFile(getColumnFamilyName(), path); + Path dstPath = getRegionFileSystem().commitStoreFile(getColumnFamilyName(), path); status.setStatus("Flushing " + this + ": reopening flushed file"); HStoreFile sf = createStoreFileAndReader(dstPath); @@ -1167,12 +1157,13 @@ public StoreFileWriter createWriterInTmp(long maxKeyCount, Compression.Algorithm boolean shouldDropBehind, long totalCompactedFilesSize, String fileStoragePolicy) throws IOException { // creating new cache config for each new writer + final CacheConfig cacheConf = getCacheConfig(); final CacheConfig writerCacheConf = new CacheConfig(cacheConf); if (isCompaction) { // Don't cache data on write on compactions, unless specifically configured to do so // Cache only when total file size remains lower than configured threshold final boolean cacheCompactedBlocksOnWrite = - cacheConf.shouldCacheCompactedBlocksOnWrite(); + getCacheConfig().shouldCacheCompactedBlocksOnWrite(); // if data blocks are to be cached on write // during compaction, we should forcefully // cache index and bloom blocks as well @@ -1206,53 +1197,48 @@ public StoreFileWriter createWriterInTmp(long maxKeyCount, Compression.Algorithm } } } - InetSocketAddress[] favoredNodes = null; - if (region.getRegionServerServices() != null) { - favoredNodes = region.getRegionServerServices().getFavoredNodesForRegion( - region.getRegionInfo().getEncodedName()); - } + Encryption.Context encryptionContext = storeContext.getEncryptionContext(); HFileContext hFileContext = createFileContext(compression, includeMVCCReadpoint, includesTag, - cryptoContext); - Path familyTempDir = new Path(fs.getTempDir(), family.getNameAsString()); - StoreFileWriter.Builder builder = new StoreFileWriter.Builder(conf, writerCacheConf, - this.getFileSystem()) - .withOutputDir(familyTempDir) - .withBloomType(family.getBloomFilterType()) - .withMaxKeyCount(maxKeyCount) - .withFavoredNodes(favoredNodes) - .withFileContext(hFileContext) - .withShouldDropCacheBehind(shouldDropBehind) - .withCompactedFilesSupplier(this::getCompactedFiles) - .withFileStoragePolicy(fileStoragePolicy); + encryptionContext); + Path familyTempDir = new Path(getRegionFileSystem().getTempDir(), getColumnFamilyName()); + StoreFileWriter.Builder builder = + new StoreFileWriter.Builder(conf, writerCacheConf, getFileSystem()) + .withOutputDir(familyTempDir) + .withBloomType(storeContext.getBloomFilterType()) + .withMaxKeyCount(maxKeyCount) + .withFavoredNodes(storeContext.getFavoredNodes()) + .withFileContext(hFileContext) + .withShouldDropCacheBehind(shouldDropBehind) + .withCompactedFilesSupplier(storeContext.getCompactedFilesSupplier()) + .withFileStoragePolicy(fileStoragePolicy); return builder.build(); } private HFileContext createFileContext(Compression.Algorithm compression, - boolean includeMVCCReadpoint, boolean includesTag, Encryption.Context cryptoContext) { + boolean includeMVCCReadpoint, boolean includesTag, Encryption.Context encryptionContext) { if (compression == null) { compression = HFile.DEFAULT_COMPRESSION_ALGORITHM; } + ColumnFamilyDescriptor family = getColumnFamilyDescriptor(); HFileContext hFileContext = new HFileContextBuilder() - .withIncludesMvcc(includeMVCCReadpoint) - .withIncludesTags(includesTag) - .withCompression(compression) - .withCompressTags(family.isCompressTags()) - .withChecksumType(checksumType) - .withBytesPerCheckSum(bytesPerChecksum) - .withBlockSize(blocksize) - .withHBaseCheckSum(true) - .withDataBlockEncoding(family.getDataBlockEncoding()) - .withEncryptionContext(cryptoContext) - .withCreateTime(EnvironmentEdgeManager.currentTime()) - .withColumnFamily(family.getName()) - .withTableName(region.getTableDescriptor() - .getTableName().getName()) - .withCellComparator(this.comparator) - .build(); + .withIncludesMvcc(includeMVCCReadpoint) + .withIncludesTags(includesTag) + .withCompression(compression) + .withCompressTags(family.isCompressTags()) + .withChecksumType(StoreUtils.getChecksumType(conf)) + .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)) + .withBlockSize(family.getBlocksize()) + .withHBaseCheckSum(true) + .withDataBlockEncoding(family.getDataBlockEncoding()) + .withEncryptionContext(encryptionContext) + .withCreateTime(EnvironmentEdgeManager.currentTime()) + .withColumnFamily(getColumnFamilyDescriptor().getName()) + .withTableName(getTableName().getName()) + .withCellComparator(getComparator()) + .build(); return hFileContext; } - private long getTotalSize(Collection sfs) { return sfs.stream().mapToLong(sf -> sf.getReader().length()).sum(); } @@ -1529,7 +1515,7 @@ public List compact(CompactionContext compaction, // Ready to go. Have list of files to compact. LOG.info("Starting compaction of " + filesToCompact + - " into tmpdir=" + fs.getTempDir() + ", totalSize=" + + " into tmpdir=" + getRegionFileSystem().getTempDir() + ", totalSize=" + TraditionalBinaryPrefix.long2String(cr.getSize(), "", 1)); return doCompaction(cr, filesToCompact, user, compactionStartTime, @@ -1579,7 +1565,7 @@ private void setStoragePolicyFromFileName(List newFiles) throws IOExceptio String prefix = HConstants.STORAGE_POLICY_PREFIX; for (Path newFile : newFiles) { if (newFile.getParent().getName().startsWith(prefix)) { - CommonFSUtils.setStoragePolicy(fs.getFileSystem(), newFile, + CommonFSUtils.setStoragePolicy(getRegionFileSystem().getFileSystem(), newFile, newFile.getParent().getName().substring(prefix.length())); } } @@ -1604,7 +1590,7 @@ private List moveCompactedFilesIntoPlace(CompactionRequestImpl cr, HStoreFile moveFileIntoPlace(Path newFile) throws IOException { validateStoreFile(newFile); // Move the file into the right spot - Path destPath = fs.commitStoreFile(getColumnFamilyName(), newFile); + Path destPath = getRegionFileSystem().commitStoreFile(getColumnFamilyName(), newFile); return createStoreFileAndReader(destPath); } @@ -1624,8 +1610,8 @@ private void writeCompactionWalRecord(Collection filesCompacted, newFiles.stream().map(HStoreFile::getPath).collect(Collectors.toList()); RegionInfo info = this.region.getRegionInfo(); CompactionDescriptor compactionDescriptor = ProtobufUtil.toCompactionDescriptor(info, - family.getName(), inputPaths, outputPaths, - fs.getStoreDir(getColumnFamilyDescriptor().getNameAsString())); + getColumnFamilyDescriptor().getName(), inputPaths, outputPaths, + getRegionFileSystem().getStoreDir(getColumnFamilyDescriptor().getNameAsString())); // Fix reaching into Region to get the maxWaitForSeqId. // Does this method belong in Region altogether given it is making so many references up there? // Could be Region#writeCompactionMarker(compactionDescriptor); @@ -1752,7 +1738,7 @@ public void replayCompactionMarker(CompactionDescriptor compaction, boolean pick String familyName = this.getColumnFamilyName(); Set inputFiles = new HashSet<>(); for (String compactionInput : compactionInputs) { - Path inputPath = fs.getStoreFilePath(familyName, compactionInput); + Path inputPath = getRegionFileSystem().getStoreFilePath(familyName, compactionInput); inputFiles.add(inputPath.getName()); } @@ -1772,7 +1758,8 @@ public void replayCompactionMarker(CompactionDescriptor compaction, boolean pick compactionOutputs.remove(sf.getPath().getName()); } for (String compactionOutput : compactionOutputs) { - StoreFileInfo storeFileInfo = fs.getStoreFileInfo(getColumnFamilyName(), compactionOutput); + StoreFileInfo storeFileInfo = + getRegionFileSystem().getStoreFileInfo(getColumnFamilyName(), compactionOutput); HStoreFile storeFile = createStoreFileAndReader(storeFileInfo); outputStoreFiles.add(storeFile); } @@ -2092,7 +2079,7 @@ int versionsToReturn(final int wantedVersions) { throw new IllegalArgumentException("Number of versions must be > 0"); } // Make sure we do not return more than maximum versions for this store. - int maxVersions = this.family.getMaxVersions(); + int maxVersions = getColumnFamilyDescriptor().getMaxVersions(); return wantedVersions > maxVersions ? maxVersions: wantedVersions; } @@ -2367,7 +2354,7 @@ public RegionCoprocessorHost getCoprocessorHost() { @Override public RegionInfo getRegionInfo() { - return this.fs.getRegionInfo(); + return getRegionFileSystem().getRegionInfo(); } @Override @@ -2509,7 +2496,8 @@ public void replayFlush(List fileNames, boolean dropMemstoreSnapshot) List storeFiles = new ArrayList<>(fileNames.size()); for (String file : fileNames) { // open the file as a store file (hfile link, etc) - StoreFileInfo storeFileInfo = fs.getStoreFileInfo(getColumnFamilyName(), file); + StoreFileInfo storeFileInfo = + getRegionFileSystem().getStoreFileInfo(getColumnFamilyName(), file); HStoreFile storeFile = createStoreFileAndReader(storeFileInfo); storeFiles.add(storeFile); HStore.this.storeSize.addAndGet(storeFile.getReader().length()); @@ -2559,7 +2547,7 @@ public boolean needsCompaction() { * @return cache configuration for this Store. */ public CacheConfig getCacheConfig() { - return this.cacheConf; + return storeContext.getCacheConf(); } public static final long FIXED_OVERHEAD = ClassSize.estimateBase(HStore.class, false); @@ -2573,12 +2561,12 @@ public CacheConfig getCacheConfig() { @Override public long heapSize() { MemStoreSize memstoreSize = this.memstore.size(); - return DEEP_OVERHEAD + memstoreSize.getHeapSize(); + return DEEP_OVERHEAD + memstoreSize.getHeapSize() + storeContext.heapSize(); } @Override public CellComparator getComparator() { - return comparator; + return storeContext.getComparator(); } public ScanInfo getScanInfo() { @@ -2652,7 +2640,7 @@ protected OffPeakHours getOffPeakHours() { public void onConfigurationChange(Configuration conf) { this.conf = new CompoundConfiguration() .add(conf) - .addBytesMap(family.getValues()); + .addBytesMap(getColumnFamilyDescriptor().getValues()); this.storeEngine.compactionPolicy.setConf(conf); this.offPeakHours = OffPeakHours.getInstance(conf); } @@ -2784,8 +2772,8 @@ private void removeCompactedfiles(Collection compactedfiles, boolean LOG.debug("Moving the files {} to archive", filesToRemove); // Only if this is successful it has to be removed try { - this.fs.removeStoreFiles(this.getColumnFamilyDescriptor().getNameAsString(), - filesToRemove); + getRegionFileSystem() + .removeStoreFiles(this.getColumnFamilyDescriptor().getNameAsString(), filesToRemove); } catch (FailedArchiveException fae) { // Even if archiving some files failed, we still need to clear out any of the // files which were successfully archived. Otherwise we will receive a diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreContext.java new file mode 100644 index 000000000000..26233505db73 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreContext.java @@ -0,0 +1,194 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import java.net.InetSocketAddress; +import java.util.Collection; +import java.util.function.Supplier; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.io.HeapSize; +import org.apache.hadoop.hbase.io.crypto.Encryption; +import org.apache.hadoop.hbase.io.hfile.CacheConfig; +import org.apache.hadoop.hbase.util.ClassSize; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * This carries the immutable information and references on some of the meta data about the HStore. + * This meta data can be used across the HFileWriter/Readers and other HStore consumers without the + * need of passing around the complete store. + */ +@InterfaceAudience.Private +public final class StoreContext implements HeapSize { + public static final long FIXED_OVERHEAD = ClassSize.estimateBase(HStore.class, false); + + private final int blockSize; + private final Encryption.Context encryptionContext; + private final CacheConfig cacheConf; + private final HRegionFileSystem regionFileSystem; + private final CellComparator comparator; + private final BloomType bloomFilterType; + private final Supplier> compactedFilesSupplier; + private final Supplier favoredNodesSupplier; + private final ColumnFamilyDescriptor family; + private final Path familyStoreDirectoryPath; + private final RegionCoprocessorHost coprocessorHost; + + private StoreContext(Builder builder) { + this.blockSize = builder.blockSize; + this.encryptionContext = builder.encryptionContext; + this.cacheConf = builder.cacheConf; + this.regionFileSystem = builder.regionFileSystem; + this.comparator = builder.comparator; + this.bloomFilterType = builder.bloomFilterType; + this.compactedFilesSupplier = builder.compactedFilesSupplier; + this.favoredNodesSupplier = builder.favoredNodesSupplier; + this.family = builder.family; + this.familyStoreDirectoryPath = builder.familyStoreDirectoryPath; + this.coprocessorHost = builder.coprocessorHost; + } + + public int getBlockSize() { + return blockSize; + } + + public Encryption.Context getEncryptionContext() { + return encryptionContext; + } + + public CacheConfig getCacheConf() { + return cacheConf; + } + + public HRegionFileSystem getRegionFileSystem() { + return regionFileSystem; + } + + public CellComparator getComparator() { + return comparator; + } + + public BloomType getBloomFilterType() { + return bloomFilterType; + } + + public Supplier> getCompactedFilesSupplier() { + return compactedFilesSupplier; + } + + public InetSocketAddress[] getFavoredNodes() { + return favoredNodesSupplier.get(); + } + + public ColumnFamilyDescriptor getFamily() { + return family; + } + + public Path getFamilyStoreDirectoryPath() { + return familyStoreDirectoryPath; + } + + public RegionCoprocessorHost getCoprocessorHost() { + return coprocessorHost; + } + + public static Builder getBuilder() { + return new Builder(); + } + + @Override + public long heapSize() { + return FIXED_OVERHEAD; + } + + public static class Builder { + private int blockSize; + private Encryption.Context encryptionContext; + private CacheConfig cacheConf; + private HRegionFileSystem regionFileSystem; + private CellComparator comparator; + private BloomType bloomFilterType; + private Supplier> compactedFilesSupplier; + private Supplier favoredNodesSupplier; + private ColumnFamilyDescriptor family; + private Path familyStoreDirectoryPath; + private RegionCoprocessorHost coprocessorHost; + + public Builder withBlockSize(int blockSize) { + this.blockSize = blockSize; + return this; + } + + public Builder withEncryptionContext(Encryption.Context encryptionContext) { + this.encryptionContext = encryptionContext; + return this; + } + + public Builder withCacheConfig(CacheConfig cacheConf) { + this.cacheConf = cacheConf; + return this; + } + + public Builder withRegionFileSystem(HRegionFileSystem regionFileSystem) { + this.regionFileSystem = regionFileSystem; + return this; + } + + public Builder withCellComparator(CellComparator comparator) { + this.comparator = comparator; + return this; + } + + public Builder withBloomType(BloomType bloomFilterType) { + this.bloomFilterType = bloomFilterType; + return this; + } + + public Builder withCompactedFilesSupplier(Supplier> + compactedFilesSupplier) { + this.compactedFilesSupplier = compactedFilesSupplier; + return this; + } + + public Builder withFavoredNodesSupplier(Supplier favoredNodesSupplier) { + this.favoredNodesSupplier = favoredNodesSupplier; + return this; + } + + public Builder withColumnFamilyDescriptor(ColumnFamilyDescriptor family) { + this.family = family; + return this; + } + + public Builder withFamilyStoreDirectoryPath(Path familyStoreDirectoryPath) { + this.familyStoreDirectoryPath = familyStoreDirectoryPath; + return this; + } + + public Builder withRegionCoprocessorHost(RegionCoprocessorHost coprocessorHost) { + this.coprocessorHost = coprocessorHost; + return this; + } + + public StoreContext build() { + return new StoreContext(this); + } + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java index 0e4f6c2bb8a4..ac5955feca7e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java @@ -24,9 +24,13 @@ import java.util.OptionalInt; import java.util.OptionalLong; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.io.hfile.HFile; +import org.apache.hadoop.hbase.util.ChecksumType; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -136,4 +140,25 @@ static Optional getSplitPoint(Collection storefiles, return largestFile.isPresent() ? StoreUtils.getFileSplitPoint(largestFile.get(), comparator) : Optional.empty(); } + + /** + * Returns the configured checksum algorithm. + * @param conf The configuration + * @return The checksum algorithm that is set in the configuration + */ + public static ChecksumType getChecksumType(Configuration conf) { + return ChecksumType.nameToType( + conf.get(HConstants.CHECKSUM_TYPE_NAME, ChecksumType.getDefaultChecksumType().getName())); + } + + /** + * Returns the configured bytesPerChecksum value. + * @param conf The configuration + * @return The bytesPerChecksum that is set in the configuration + */ + public static int getBytesPerChecksum(Configuration conf) { + return conf.getInt(HConstants.BYTES_PER_CHECKSUM, + HFile.DEFAULT_BYTES_PER_CHECKSUM); + } + } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java index ec9a59c7bf5a..b0b086e145a9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/BulkLoadHFilesTool.java @@ -83,9 +83,9 @@ import org.apache.hadoop.hbase.io.hfile.ReaderContext; import org.apache.hadoop.hbase.io.hfile.ReaderContextBuilder; import org.apache.hadoop.hbase.regionserver.BloomType; -import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; +import org.apache.hadoop.hbase.regionserver.StoreUtils; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.security.token.FsDelegationToken; import org.apache.hadoop.hbase.util.Bytes; @@ -749,8 +749,8 @@ private static void copyHFileHalf(Configuration conf, Path inFile, Path outFile, Algorithm compression = familyDescriptor.getCompressionType(); BloomType bloomFilterType = familyDescriptor.getBloomFilterType(); HFileContext hFileContext = new HFileContextBuilder().withCompression(compression) - .withChecksumType(HStore.getChecksumType(conf)) - .withBytesPerCheckSum(HStore.getBytesPerChecksum(conf)).withBlockSize(blocksize) + .withChecksumType(StoreUtils.getChecksumType(conf)) + .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)).withBlockSize(blocksize) .withDataBlockEncoding(familyDescriptor.getDataBlockEncoding()).withIncludesTags(true) .build(); halfWriter = new StoreFileWriter.Builder(conf, cacheConf, fs).withFilePath(outFile) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java index 9623bd1c7220..50bc5fe62fb8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/BoundedRecoveredHFilesOutputSink.java @@ -39,8 +39,8 @@ import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; import org.apache.hadoop.hbase.regionserver.CellSet; -import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.StoreFileWriter; +import org.apache.hadoop.hbase.regionserver.StoreUtils; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.wal.EntryBuffers.RegionEntryBuffer; import org.apache.hadoop.hbase.wal.WAL.Entry; @@ -199,8 +199,8 @@ private StoreFileWriter createRecoveredHFileWriter(TableName tableName, String r new StoreFileWriter.Builder(walSplitter.conf, CacheConfig.DISABLED, walSplitter.rootFS) .withOutputDir(outputDir); HFileContext hFileContext = new HFileContextBuilder(). - withChecksumType(HStore.getChecksumType(walSplitter.conf)). - withBytesPerCheckSum(HStore.getBytesPerChecksum(walSplitter.conf)). + withChecksumType(StoreUtils.getChecksumType(walSplitter.conf)). + withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(walSplitter.conf)). withCellComparator(isMetaTable? MetaCellComparator.META_COMPARATOR: CellComparatorImpl.COMPARATOR).build(); return writerBuilder.withFileContext(hFileContext).build(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java index 3d713052559e..3f326a30cfdf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java @@ -62,6 +62,7 @@ import org.apache.hadoop.hbase.regionserver.MemStoreCompactor; import org.apache.hadoop.hbase.regionserver.MutableSegment; import org.apache.hadoop.hbase.regionserver.Segment; +import org.apache.hadoop.hbase.regionserver.StoreContext; import org.apache.hadoop.hbase.regionserver.TimeRangeTracker.NonSyncTimeRangeTracker; import org.apache.hadoop.hbase.regionserver.TimeRangeTracker.SyncTimeRangeTracker; import org.apache.hadoop.hbase.regionserver.throttle.StoreHotnessProtector; @@ -606,7 +607,7 @@ public void testObjectSize() throws IOException { @Test public void testAutoCalcFixedOverHead() { Class[] classList = new Class[] { HFileContext.class, HRegion.class, BlockCacheKey.class, - HFileBlock.class, HStore.class, LruBlockCache.class }; + HFileBlock.class, HStore.class, LruBlockCache.class, StoreContext.class }; for (Class cl : classList) { // do estimate in advance to ensure class is loaded ClassSize.estimateBase(cl, false); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index fcbc718296ae..bd1715b1ef52 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -7657,7 +7657,7 @@ protected List doCompaction(CompactionRequestImpl cr, LOG.warn("hbase.hstore.compaction.complete is set to false"); List sfs = new ArrayList<>(newFiles.size()); final boolean evictOnClose = - cacheConf != null? cacheConf.shouldEvictOnClose(): true; + getCacheConfig() != null? getCacheConfig().shouldEvictOnClose(): true; for (Path newFile : newFiles) { // Create storefile around what we wrote with a reader on it. HStoreFile sf = createStoreFileAndReader(newFile); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkLoadManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkLoadManager.java index 12cf57671f9c..88f201efff6f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkLoadManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSecureBulkLoadManager.java @@ -221,8 +221,8 @@ private void prepareHFile(Path dir, byte[] key, byte[] value) throws Exception { .withIncludesTags(true) .withCompression(compression) .withCompressTags(family.isCompressTags()) - .withChecksumType(HStore.getChecksumType(conf)) - .withBytesPerCheckSum(HStore.getBytesPerChecksum(conf)) + .withChecksumType(StoreUtils.getChecksumType(conf)) + .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)) .withBlockSize(family.getBlocksize()) .withHBaseCheckSum(true) .withDataBlockEncoding(family.getDataBlockEncoding())