Skip to content

Commit

Permalink
HBASE-25445: Use WAL FS instead of master FS in SplitWALManager (#2844)
Browse files Browse the repository at this point in the history
Signed-off-by: Pankaj <pankajkumar@apache.org>
Signed-off-by: ramkrish86 <ramkrishna@apache.org>
Signed-off-by: Viraj Jasani <vjasani@apache.org>
  • Loading branch information
dasanjan1296 authored Jan 7, 2021
1 parent a414361 commit 4b62152
Show file tree
Hide file tree
Showing 2 changed files with 65 additions and 2 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -85,8 +85,7 @@ public SplitWALManager(MasterServices master) throws IOException {
this.splitWorkerAssigner = new SplitWorkerAssigner(this.master,
conf.getInt(HBASE_SPLIT_WAL_MAX_SPLITTER, DEFAULT_HBASE_SPLIT_WAL_MAX_SPLITTER));
this.rootDir = master.getMasterFileSystem().getWALRootDir();
// TODO: This should be the WAL FS, not the Master FS?
this.fs = master.getMasterFileSystem().getFileSystem();
this.fs = master.getMasterFileSystem().getWALFileSystem();
this.walArchiveDir = new Path(this.rootDir, HConstants.HREGION_OLDLOGDIR_NAME);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,14 @@
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.master.assignment.SplitTableRegionProcedure;
import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.master.procedure.ServerProcedureInterface;
import org.apache.hadoop.hbase.procedure2.Procedure;
Expand All @@ -43,6 +51,7 @@
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CommonFSUtils;
import org.apache.hadoop.hbase.util.JVMClusterUtil;
import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
import org.junit.After;
Expand All @@ -54,6 +63,8 @@
import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

@Category({ MasterTests.class, LargeTests.class })

Expand All @@ -63,6 +74,7 @@ public class TestSplitWALManager {
public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestSplitWALManager.class);

private static final Logger LOG = LoggerFactory.getLogger(TestSplitWALManager.class);
private static HBaseTestingUtility TEST_UTIL;
private HMaster master;
private SplitWALManager splitWALManager;
Expand All @@ -86,6 +98,58 @@ public void teardown() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}

@Test
public void testWALArchiveWithDifferentWalAndRootFS() throws Exception{
HBaseTestingUtility test_util_2 = new HBaseTestingUtility();
Path dir = TEST_UTIL.getDataTestDirOnTestFS("testWalDir");
test_util_2.getConfiguration().set(CommonFSUtils.HBASE_WAL_DIR, dir.toString());
CommonFSUtils.setWALRootDir(test_util_2.getConfiguration(), dir);
test_util_2.startMiniCluster(3);
HMaster master2 = test_util_2.getHBaseCluster().getMaster();
LOG.info("The Master FS is pointing to: " + master2.getMasterFileSystem()
.getFileSystem().getUri());
LOG.info("The WAL FS is pointing to: " + master2.getMasterFileSystem()
.getWALFileSystem().getUri());
Table table = test_util_2.createTable(TABLE_NAME, FAMILY);
test_util_2.waitTableAvailable(TABLE_NAME);
Admin admin = test_util_2.getAdmin();
MasterProcedureEnv env = test_util_2.getMiniHBaseCluster().getMaster()
.getMasterProcedureExecutor().getEnvironment();
final ProcedureExecutor<MasterProcedureEnv> executor = test_util_2.getMiniHBaseCluster()
.getMaster().getMasterProcedureExecutor();
List<RegionInfo> regionInfos = admin.getRegions(TABLE_NAME);
SplitTableRegionProcedure splitProcedure = new SplitTableRegionProcedure(
env, regionInfos.get(0), Bytes.toBytes("row5"));
// Populate some rows in the table
LOG.info("Beginning put data to the table: " + TABLE_NAME.toString());
int rowCount = 5;
for (int i = 0; i < rowCount; i++) {
byte[] row = Bytes.toBytes("row" + i);
Put put = new Put(row);
put.addColumn(FAMILY, FAMILY, FAMILY);
table.put(put);
}
executor.submitProcedure(splitProcedure);
LOG.info("Submitted SplitProcedure.");
test_util_2.waitFor(30000, () -> executor.getProcedures().stream()
.filter(p -> p instanceof TransitRegionStateProcedure)
.map(p -> (TransitRegionStateProcedure) p)
.anyMatch(p -> TABLE_NAME.equals(p.getTableName())));
test_util_2.getMiniHBaseCluster().killRegionServer(
test_util_2.getMiniHBaseCluster().getRegionServer(0).getServerName());
test_util_2.getMiniHBaseCluster().startRegionServer();
test_util_2.waitUntilNoRegionsInTransition();
Scan scan = new Scan();
ResultScanner results = table.getScanner(scan);
int scanRowCount = 0;
while (results.next() != null) {
scanRowCount++;
}
Assert.assertEquals("Got " + scanRowCount + " rows when " + rowCount +
" were expected.", rowCount, scanRowCount);
test_util_2.shutdownMiniCluster();
}

@Test
public void testAcquireAndRelease() throws Exception {
List<FakeServerProcedure> testProcedures = new ArrayList<>();
Expand Down

0 comments on commit 4b62152

Please sign in to comment.