Skip to content

Commit

Permalink
HBASE-28539 Merge of incremental backups fails if backups are on a se…
Browse files Browse the repository at this point in the history
…parate FileSystem (apache#5867)

When the backups are stored on a location that is not the DistributedFilesystem underpinning HBase itself merging of incremental backups fails. Detected with backups stored on S3A, but can be reproduced with any other (like LocalFilesystem).

Reviewed-by: Ray Mattingly <rmdmattingly@gmail.com>
Signed-off-by: Nick Dimiduk <ndimiduk@apache.org>
Signed-off-by: Andor Molnár <andor@apache.org>
  • Loading branch information
bcolyn-ngdata authored and ndimiduk committed Jun 6, 2024
1 parent a35145b commit f5df8af
Show file tree
Hide file tree
Showing 2 changed files with 53 additions and 3 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ public void run(String[] backupIds) throws IOException {
boolean finishedTables = false;
Connection conn = ConnectionFactory.createConnection(getConf());
BackupSystemTable table = new BackupSystemTable(conn);
FileSystem fs = FileSystem.get(getConf());
FileSystem fs = null;

try {

Expand All @@ -112,6 +112,8 @@ public void run(String[] backupIds) throws IOException {

BackupInfo bInfo = table.readBackupInfo(backupIds[0]);
String backupRoot = bInfo.getBackupRootDir();
Path backupRootPath = new Path(backupRoot);
fs = backupRootPath.getFileSystem(conf);

for (int i = 0; i < tableNames.length; i++) {
LOG.info("Merge backup images for " + tableNames[i]);
Expand All @@ -120,7 +122,9 @@ public void run(String[] backupIds) throws IOException {
Path[] dirPaths = findInputDirectories(fs, backupRoot, tableNames[i], backupIds);
String dirs = StringUtils.join(dirPaths, ",");

Path bulkOutputPath = BackupUtils.getBulkOutputDir(
// bulkOutputPath should be on the same filesystem as backupRoot
Path tmpRestoreOutputDir = HBackupFileSystem.getBackupTmpDirPath(backupRoot);
Path bulkOutputPath = BackupUtils.getBulkOutputDir(tmpRestoreOutputDir,
BackupUtils.getFileNameCompatibleString(tableNames[i]), getConf(), false);
// Delete content if exists
if (fs.exists(bulkOutputPath)) {
Expand Down Expand Up @@ -186,7 +190,9 @@ public void run(String[] backupIds) throws IOException {
if (!finishedTables) {
// cleanup bulk directories and finish merge
// merge MUST be repeated (no need for repair)
cleanupBulkLoadDirs(fs, toPathList(processedTableList));
if (fs != null) {
cleanupBulkLoadDirs(fs, toPathList(processedTableList));
}
table.finishMergeOperation();
table.finishBackupExclusiveOperation();
throw new IOException("Backup merge operation failed, you should try it again", e);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,10 @@
*/
package org.apache.hadoop.hbase.backup;

import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;

import java.io.File;
import java.util.List;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.TableName;
Expand Down Expand Up @@ -124,4 +126,46 @@ public void TestIncBackupMergeRestore() throws Exception {
admin.close();
conn.close();
}

@Test
public void testIncBackupMergeRestoreSeparateFs() throws Exception {
String originalBackupRoot = BACKUP_ROOT_DIR;
// prepare BACKUP_ROOT_DIR on a different filesystem from HBase.
String backupTargetDir = TEST_UTIL.getDataTestDir("backupTarget").toString();
BACKUP_ROOT_DIR = new File(backupTargetDir).toURI().toString();

try (Connection conn = ConnectionFactory.createConnection(conf1)) {
BackupAdminImpl client = new BackupAdminImpl(conn);
List<TableName> tables = Lists.newArrayList(table1, table2);

BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
String backupIdFull = client.backupTables(request);
assertTrue(checkSucceeded(backupIdFull));

request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
String backupIdIncMultiple = client.backupTables(request);
assertTrue(checkSucceeded(backupIdIncMultiple));

request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
String backupIdIncMultiple2 = client.backupTables(request);
assertTrue(checkSucceeded(backupIdIncMultiple2));

try (BackupAdmin bAdmin = new BackupAdminImpl(conn)) {
String[] backups = new String[] { backupIdIncMultiple, backupIdIncMultiple2 };
// this throws java.lang.IllegalArgumentException: Wrong FS prior to HBASE-28539
bAdmin.mergeBackups(backups);
}

assertTrue(
new File(HBackupFileSystem.getBackupPath(BACKUP_ROOT_DIR, backupIdFull).toUri()).exists());
assertFalse(
new File(HBackupFileSystem.getBackupPath(BACKUP_ROOT_DIR, backupIdIncMultiple).toUri())
.exists());
assertTrue(
new File(HBackupFileSystem.getBackupPath(BACKUP_ROOT_DIR, backupIdIncMultiple2).toUri())
.exists());
} finally {
BACKUP_ROOT_DIR = originalBackupRoot;
}
}
}

0 comments on commit f5df8af

Please sign in to comment.