diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java index aa91c84cb672..6db094c4e6df 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitWALManager.java @@ -85,8 +85,7 @@ public SplitWALManager(MasterServices master) throws IOException { this.splitWorkerAssigner = new SplitWorkerAssigner(this.master, conf.getInt(HBASE_SPLIT_WAL_MAX_SPLITTER, DEFAULT_HBASE_SPLIT_WAL_MAX_SPLITTER)); this.rootDir = master.getMasterFileSystem().getWALRootDir(); - // TODO: This should be the WAL FS, not the Master FS? - this.fs = master.getMasterFileSystem().getFileSystem(); + this.fs = master.getMasterFileSystem().getWALFileSystem(); this.walArchiveDir = new Path(this.rootDir, HConstants.HREGION_OLDLOGDIR_NAME); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitWALManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitWALManager.java index 7edb011f97f4..10eda749891d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitWALManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestSplitWALManager.java @@ -31,6 +31,14 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.master.assignment.SplitTableRegionProcedure; +import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.procedure.ServerProcedureInterface; import org.apache.hadoop.hbase.procedure2.Procedure; @@ -43,6 +51,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.JVMClusterUtil; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.junit.After; @@ -54,6 +63,8 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; @Category({ MasterTests.class, LargeTests.class }) @@ -63,6 +74,7 @@ public class TestSplitWALManager { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestSplitWALManager.class); + private static final Logger LOG = LoggerFactory.getLogger(TestSplitWALManager.class); private static HBaseTestingUtility TEST_UTIL; private HMaster master; private SplitWALManager splitWALManager; @@ -86,6 +98,58 @@ public void teardown() throws Exception { TEST_UTIL.shutdownMiniCluster(); } + @Test + public void testWALArchiveWithDifferentWalAndRootFS() throws Exception{ + HBaseTestingUtility test_util_2 = new HBaseTestingUtility(); + Path dir = TEST_UTIL.getDataTestDirOnTestFS("testWalDir"); + test_util_2.getConfiguration().set(CommonFSUtils.HBASE_WAL_DIR, dir.toString()); + CommonFSUtils.setWALRootDir(test_util_2.getConfiguration(), dir); + test_util_2.startMiniCluster(3); + HMaster master2 = test_util_2.getHBaseCluster().getMaster(); + LOG.info("The Master FS is pointing to: " + master2.getMasterFileSystem() + .getFileSystem().getUri()); + LOG.info("The WAL FS is pointing to: " + master2.getMasterFileSystem() + .getWALFileSystem().getUri()); + Table table = test_util_2.createTable(TABLE_NAME, FAMILY); + test_util_2.waitTableAvailable(TABLE_NAME); + Admin admin = test_util_2.getAdmin(); + MasterProcedureEnv env = test_util_2.getMiniHBaseCluster().getMaster() + .getMasterProcedureExecutor().getEnvironment(); + final ProcedureExecutor executor = test_util_2.getMiniHBaseCluster() + .getMaster().getMasterProcedureExecutor(); + List regionInfos = admin.getRegions(TABLE_NAME); + SplitTableRegionProcedure splitProcedure = new SplitTableRegionProcedure( + env, regionInfos.get(0), Bytes.toBytes("row5")); + // Populate some rows in the table + LOG.info("Beginning put data to the table: " + TABLE_NAME.toString()); + int rowCount = 5; + for (int i = 0; i < rowCount; i++) { + byte[] row = Bytes.toBytes("row" + i); + Put put = new Put(row); + put.addColumn(FAMILY, FAMILY, FAMILY); + table.put(put); + } + executor.submitProcedure(splitProcedure); + LOG.info("Submitted SplitProcedure."); + test_util_2.waitFor(30000, () -> executor.getProcedures().stream() + .filter(p -> p instanceof TransitRegionStateProcedure) + .map(p -> (TransitRegionStateProcedure) p) + .anyMatch(p -> TABLE_NAME.equals(p.getTableName()))); + test_util_2.getMiniHBaseCluster().killRegionServer( + test_util_2.getMiniHBaseCluster().getRegionServer(0).getServerName()); + test_util_2.getMiniHBaseCluster().startRegionServer(); + test_util_2.waitUntilNoRegionsInTransition(); + Scan scan = new Scan(); + ResultScanner results = table.getScanner(scan); + int scanRowCount = 0; + while (results.next() != null) { + scanRowCount++; + } + Assert.assertEquals("Got " + scanRowCount + " rows when " + rowCount + + " were expected.", rowCount, scanRowCount); + test_util_2.shutdownMiniCluster(); + } + @Test public void testAcquireAndRelease() throws Exception { List testProcedures = new ArrayList<>();