Skip to content

Commit

Permalink
HBASE-26280 Use store file tracker when snapshoting (apache#3685)
Browse files Browse the repository at this point in the history
Signed-off-by: Wellington Chevreuil <wchevreuil@apache.org>
Reviewed-by: Josh Elser <elserj@apache.org>
  • Loading branch information
Apache9 authored and apurtell committed Mar 26, 2022
1 parent 9d5dd03 commit 1e127f6
Show file tree
Hide file tree
Showing 11 changed files with 109 additions and 111 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -613,9 +613,8 @@ private List<Path> mergeStoreFiles(MasterProcedureEnv env, HRegionFileSystem reg
List<Path> mergedFiles = new ArrayList<>();
for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
String family = hcd.getNameAsString();
Configuration trackerConfig =
StoreFileTrackerFactory.mergeConfigurations(env.getMasterConfiguration(), htd, hcd);
StoreFileTracker tracker = StoreFileTrackerFactory.create(trackerConfig, family, regionFs);
StoreFileTracker tracker =
StoreFileTrackerFactory.create(env.getMasterConfiguration(), htd, hcd, regionFs);
final Collection<StoreFileInfo> storeFiles = tracker.load();
if (storeFiles != null && storeFiles.size() > 0) {
final Configuration storeConfiguration =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -668,9 +668,8 @@ private Pair<List<Path>, List<Path>> splitStoreFiles(final MasterProcedureEnv en
new HashMap<String, Collection<StoreFileInfo>>(htd.getColumnFamilyCount());
for (ColumnFamilyDescriptor cfd : htd.getColumnFamilies()) {
String family = cfd.getNameAsString();
Configuration trackerConfig = StoreFileTrackerFactory.
mergeConfigurations(env.getMasterConfiguration(), htd, htd.getColumnFamily(cfd.getName()));
StoreFileTracker tracker = StoreFileTrackerFactory.create(trackerConfig, family, regionFs);
StoreFileTracker tracker =
StoreFileTrackerFactory.create(env.getMasterConfiguration(), htd, cfd, regionFs);
Collection<StoreFileInfo> sfis = tracker.load();
if (sfis == null) {
continue;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -598,7 +598,6 @@ void cleanupDaughterRegion(final RegionInfo regionInfo) throws IOException {
* to the proper location in the filesystem.
*
* @param regionInfo daughter {@link org.apache.hadoop.hbase.client.RegionInfo}
* @throws IOException
*/
public Path commitDaughterRegion(final RegionInfo regionInfo, List<Path> allRegionFiles,
MasterProcedureEnv env) throws IOException {
Expand All @@ -625,12 +624,8 @@ private void insertRegionFilesIntoStoreTracker(List<Path> allFiles, MasterProced
Map<String, List<StoreFileInfo>> fileInfoMap = new HashMap<>();
for(Path file : allFiles) {
String familyName = file.getParent().getName();
trackerMap.computeIfAbsent(familyName, t -> {
Configuration config = StoreFileTrackerFactory.mergeConfigurations(conf, tblDesc,
tblDesc.getColumnFamily(Bytes.toBytes(familyName)));
return StoreFileTrackerFactory.
create(config, familyName, regionFs);
});
trackerMap.computeIfAbsent(familyName, t -> StoreFileTrackerFactory.create(conf, tblDesc,
tblDesc.getColumnFamily(Bytes.toBytes(familyName)), regionFs));
fileInfoMap.computeIfAbsent(familyName, l -> new ArrayList<>());
List<StoreFileInfo> infos = fileInfoMap.get(familyName);
infos.add(new StoreFileInfo(conf, fs, file, true));
Expand Down Expand Up @@ -676,7 +671,6 @@ public void createSplitsDir(RegionInfo daughterA, RegionInfo daughterB) throws I
* this method is invoked on the Master side, then the RegionSplitPolicy will
* NOT have a reference to a Region.
* @return Path to created reference.
* @throws IOException
*/
public Path splitStoreFile(RegionInfo hri, String familyName, HStoreFile f, byte[] splitRow,
boolean top, RegionSplitPolicy splitPolicy) throws IOException {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,13 +22,11 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
import org.apache.hadoop.hbase.regionserver.StoreContext;
import org.apache.hadoop.hbase.regionserver.StoreUtils;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ReflectionUtils;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
Expand Down Expand Up @@ -113,16 +111,15 @@ public static StoreFileTracker create(Configuration conf, boolean isPrimaryRepli
* Used at master side when splitting/merging regions, as we do not have a Store, thus no
* StoreContext at master side.
*/
public static StoreFileTracker create(Configuration conf, String family,
HRegionFileSystem regionFs) {
ColumnFamilyDescriptorBuilder fDescBuilder =
ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(family));
StoreContext ctx = StoreContext.getBuilder().withColumnFamilyDescriptor(fDescBuilder.build())
.withRegionFileSystem(regionFs).build();
return StoreFileTrackerFactory.create(conf, true, ctx);
public static StoreFileTracker create(Configuration conf, TableDescriptor td,
ColumnFamilyDescriptor cfd, HRegionFileSystem regionFs) {
StoreContext ctx =
StoreContext.getBuilder().withColumnFamilyDescriptor(cfd).withRegionFileSystem(regionFs)
.withFamilyStoreDirectoryPath(regionFs.getStoreDir(cfd.getNameAsString())).build();
return StoreFileTrackerFactory.create(mergeConfigurations(conf, td, cfd), true, ctx);
}

public static Configuration mergeConfigurations(Configuration global, TableDescriptor table,
private static Configuration mergeConfigurations(Configuration global, TableDescriptor table,
ColumnFamilyDescriptor family) {
return StoreUtils.createStoreConfiguration(global, table, family);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,8 @@
import org.apache.hadoop.hbase.regionserver.HStore;
import org.apache.hadoop.hbase.regionserver.HStoreFile;
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker;
import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory;
import org.apache.hadoop.hbase.util.CommonFSUtils;
import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.apache.hadoop.hbase.util.Threads;
Expand Down Expand Up @@ -291,17 +292,17 @@ public void addRegion(final Path tableDir, final RegionInfo regionInfo) throws I
addRegion(tableDir, regionInfo, visitor);
}

protected void addRegion(final Path tableDir, final RegionInfo regionInfo, RegionVisitor visitor)
throws IOException {
protected void addRegion(Path tableDir, RegionInfo regionInfo, RegionVisitor visitor)
throws IOException {
boolean isMobRegion = MobUtils.isMobRegionInfo(regionInfo);
try {
Path baseDir = tableDir;
// Open the RegionFS
if (isMobRegion) {
baseDir = CommonFSUtils.getTableDir(MobUtils.getMobHome(conf), regionInfo.getTable());
}
HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(conf, rootFs,
baseDir, regionInfo, true);
HRegionFileSystem regionFs =
HRegionFileSystem.openRegionFromFileSystem(conf, rootFs, baseDir, regionInfo, true);
monitor.rethrowException();

// 1. dump region meta info into the snapshot directory
Expand All @@ -317,26 +318,19 @@ protected void addRegion(final Path tableDir, final RegionInfo regionInfo, Regio
// in batches and may miss files being added/deleted. This could be more robust (iteratively
// checking to see if we have all the files until we are sure), but the limit is currently
// 1000 files/batch, far more than the number of store files under a single column family.
Collection<String> familyNames = regionFs.getFamilies();
if (familyNames != null) {
for (String familyName: familyNames) {
Object familyData = visitor.familyOpen(regionData, Bytes.toBytes(familyName));
monitor.rethrowException();

Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(familyName);
if (storeFiles == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("No files under family: " + familyName);
}
continue;
}

// 2.1. build the snapshot reference for the store
// iterate through all the store's files and create "references".
addReferenceFiles(visitor, regionData, familyData, storeFiles, false);

visitor.familyClose(regionData, familyData);
for (ColumnFamilyDescriptor cfd : htd.getColumnFamilies()) {
Object familyData = visitor.familyOpen(regionData, cfd.getName());
monitor.rethrowException();
StoreFileTracker tracker = StoreFileTrackerFactory.create(conf, htd, cfd, regionFs);
List<StoreFileInfo> storeFiles = tracker.load();
if (storeFiles.isEmpty()) {
LOG.debug("No files under family: {}", cfd.getNameAsString());
continue;
}
// 2.1. build the snapshot reference for the store
// iterate through all the store's files and create "references".
addReferenceFiles(visitor, regionData, familyData, storeFiles, false);
visitor.familyClose(regionData, familyData);
}
visitor.regionClose(regionData);
} catch (IOException e) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
import org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner;
import org.apache.hadoop.hbase.mob.MobConstants;
import org.apache.hadoop.hbase.regionserver.FlushLifeCycleTracker;
import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory;
import org.apache.hadoop.hbase.snapshot.MobSnapshotTestingUtils;
import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
import org.apache.hadoop.hbase.testclassification.ClientTests;
Expand Down Expand Up @@ -92,7 +93,8 @@ public static void setUpBeforeClass() throws Exception {
@Override
protected void createTable() throws IOException, InterruptedException {
MobSnapshotTestingUtils.createMobTable(TEST_UTIL, tableName,
SnapshotTestingUtils.getSplitKeys(), getNumReplicas(), DelayFlushCoprocessor.class.getName(),
SnapshotTestingUtils.getSplitKeys(), getNumReplicas(),
StoreFileTrackerFactory.Trackers.DEFAULT.name(), DelayFlushCoprocessor.class.getName(),
FAMILY);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,6 @@
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.experimental.categories.Category;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**
* Test create/using/deleting snapshots from the client
Expand All @@ -41,8 +39,6 @@ public class TestMobSnapshotFromClient extends TestSnapshotFromClient {
public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestMobSnapshotFromClient.class);

private static final Logger LOG = LoggerFactory.getLogger(TestMobSnapshotFromClient.class);

/**
* Setup the config for the cluster
* @throws Exception on failure
Expand All @@ -60,6 +56,7 @@ protected static void setupConf(Configuration conf) {

@Override
protected void createTable() throws Exception {
MobSnapshotTestingUtils.createMobTable(UTIL, TABLE_NAME, getNumReplicas(), TEST_FAM);
MobSnapshotTestingUtils.createMobTable(UTIL, TABLE_NAME, getNumReplicas(), trackerImpl.name(),
TEST_FAM);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
import static org.junit.Assert.fail;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.regex.Pattern;
import org.apache.hadoop.conf.Configuration;
Expand All @@ -33,9 +34,11 @@
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNameTestRule;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory;
import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
import org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException;
import org.apache.hadoop.hbase.snapshot.SnapshotManifestV1;
Expand All @@ -52,7 +55,10 @@
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.TestName;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameter;
import org.junit.runners.Parameterized.Parameters;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

Expand All @@ -65,7 +71,8 @@
* <p>
* This is an end-to-end test for the snapshot utility
*/
@Category({LargeTests.class, ClientTests.class})
@RunWith(Parameterized.class)
@Category({ LargeTests.class, ClientTests.class })
public class TestSnapshotFromClient {

@ClassRule
Expand All @@ -83,7 +90,16 @@ public class TestSnapshotFromClient {
private static final Pattern MATCH_ALL = Pattern.compile(".*");

@Rule
public TestName name = new TestName();
public TableNameTestRule name = new TableNameTestRule();

@Parameter
public StoreFileTrackerFactory.Trackers trackerImpl;

@Parameters(name = "{index}: tracker={0}")
public static List<Object[]> params() {
return Arrays.asList(new Object[] { StoreFileTrackerFactory.Trackers.DEFAULT },
new Object[] { StoreFileTrackerFactory.Trackers.FILE });
}

/**
* Setup the config for the cluster
Expand All @@ -110,7 +126,6 @@ protected static void setupConf(Configuration conf) {
conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
ConstantSizeRegionSplitPolicy.class.getName());

}

@Before
Expand All @@ -119,9 +134,10 @@ public void setup() throws Exception {
}

protected void createTable() throws Exception {
HTableDescriptor htd = new HTableDescriptor(TABLE_NAME);
htd.setRegionReplication(getNumReplicas());
UTIL.createTable(htd, new byte[][]{TEST_FAM}, null);
TableDescriptor htd =
TableDescriptorBuilder.newBuilder(TABLE_NAME).setRegionReplication(getNumReplicas())
.setValue(StoreFileTrackerFactory.TRACKER_IMPL, trackerImpl.name()).build();
UTIL.createTable(htd, new byte[][] { TEST_FAM }, null);
}

protected int getNumReplicas() {
Expand Down Expand Up @@ -326,7 +342,7 @@ public void testOfflineTableSnapshotWithEmptyRegions() throws Exception {
@Test
public void testListTableSnapshots() throws Exception {
Admin admin = null;
final TableName tableName = TableName.valueOf(name.getMethodName());
final TableName tableName = name.getTableName();
try {
admin = UTIL.getAdmin();

Expand Down Expand Up @@ -411,7 +427,7 @@ public void testListTableSnapshotsWithRegex() throws Exception {
@Test
public void testDeleteTableSnapshots() throws Exception {
Admin admin = null;
final TableName tableName = TableName.valueOf(name.getMethodName());
final TableName tableName = name.getTableName();
try {
admin = UTIL.getAdmin();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
Expand Down Expand Up @@ -1069,10 +1070,9 @@ private Path splitStoreFile(final HRegionFileSystem regionFs, final RegionInfo h
when(mockEnv.getMasterConfiguration()).thenReturn(new Configuration());
TableDescriptors mockTblDescs = mock(TableDescriptors.class);
when(mockServices.getTableDescriptors()).thenReturn(mockTblDescs);
TableDescriptor mockTblDesc = mock(TableDescriptor.class);
TableDescriptor mockTblDesc = TableDescriptorBuilder.newBuilder(hri.getTable())
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build();
when(mockTblDescs.get(any())).thenReturn(mockTblDesc);
ColumnFamilyDescriptor mockCfDesc = mock(ColumnFamilyDescriptor.class);
when(mockTblDesc.getColumnFamily(any())).thenReturn(mockCfDesc);
Path regionDir = regionFs.commitDaughterRegion(hri, splitFiles, mockEnv);
return new Path(new Path(regionDir, family), path.getName());
}
Expand Down
Loading

0 comments on commit 1e127f6

Please sign in to comment.