-
Notifications
You must be signed in to change notification settings - Fork 3.4k
HBASE-29209: Implement Backup Cleanup Command to Remove Older WALs Not Required for PITR of Any Table #6847
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -153,6 +153,7 @@ enum BackupCommand { | |
| SET_DELETE, | ||
| SET_DESCRIBE, | ||
| SET_LIST, | ||
| REPAIR | ||
| REPAIR, | ||
| CLEANUP | ||
| } | ||
| } | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -17,6 +17,8 @@ | |
| */ | ||
| package org.apache.hadoop.hbase.backup.impl; | ||
|
|
||
| import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.CONF_CONTINUOUS_BACKUP_WAL_DIR; | ||
| import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.CONTINUOUS_BACKUP_REPLICATION_PEER; | ||
| import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BACKUP_LIST_DESC; | ||
| import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BANDWIDTH; | ||
| import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_BANDWIDTH_DESC; | ||
|
|
@@ -43,14 +45,22 @@ | |
| import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_WORKERS_DESC; | ||
| import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_YARN_QUEUE_NAME; | ||
| import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_YARN_QUEUE_NAME_DESC; | ||
| import static org.apache.hadoop.hbase.backup.replication.ContinuousBackupReplicationEndpoint.DATE_FORMAT; | ||
| import static org.apache.hadoop.hbase.backup.replication.ContinuousBackupReplicationEndpoint.ONE_DAY_IN_MILLISECONDS; | ||
|
|
||
| import java.io.IOException; | ||
| import java.net.URI; | ||
| import java.text.ParseException; | ||
| import java.text.SimpleDateFormat; | ||
| import java.util.Collections; | ||
| import java.util.HashSet; | ||
| import java.util.List; | ||
| import java.util.Map; | ||
| import java.util.Set; | ||
| import org.apache.commons.lang3.StringUtils; | ||
| import org.apache.hadoop.conf.Configuration; | ||
| import org.apache.hadoop.conf.Configured; | ||
| import org.apache.hadoop.fs.FileStatus; | ||
| import org.apache.hadoop.fs.FileSystem; | ||
| import org.apache.hadoop.fs.Path; | ||
| import org.apache.hadoop.hbase.HBaseConfiguration; | ||
|
|
@@ -63,6 +73,7 @@ | |
| import org.apache.hadoop.hbase.backup.BackupRestoreConstants.BackupCommand; | ||
| import org.apache.hadoop.hbase.backup.BackupType; | ||
| import org.apache.hadoop.hbase.backup.HBackupFileSystem; | ||
| import org.apache.hadoop.hbase.backup.replication.BackupFileSystemManager; | ||
| import org.apache.hadoop.hbase.backup.util.BackupSet; | ||
| import org.apache.hadoop.hbase.backup.util.BackupUtils; | ||
| import org.apache.hadoop.hbase.client.Connection; | ||
|
|
@@ -117,6 +128,8 @@ public final class BackupCommands { | |
|
|
||
| public static final String REPAIR_CMD_USAGE = "Usage: hbase backup repair\n"; | ||
|
|
||
| public static final String CLEANUP_CMD_USAGE = "Usage: hbase backup cleanup\n"; | ||
|
|
||
| public static final String SET_CMD_USAGE = "Usage: hbase backup set COMMAND [name] [tables]\n" | ||
| + " name Backup set name\n" + " tables Comma separated list of tables.\n" | ||
| + "COMMAND is one of:\n" + " add add tables to a set, create a set if needed\n" | ||
|
|
@@ -245,6 +258,9 @@ public static Command createCommand(Configuration conf, BackupCommand type, Comm | |
| case MERGE: | ||
| cmd = new MergeCommand(conf, cmdline); | ||
| break; | ||
| case CLEANUP: | ||
| cmd = new CleanupCommand(conf, cmdline); | ||
| break; | ||
| case HELP: | ||
| default: | ||
| cmd = new HelpCommand(conf, cmdline); | ||
|
|
@@ -853,6 +869,188 @@ protected void printUsage() { | |
| } | ||
| } | ||
|
|
||
| /** | ||
| * The {@code CleanupCommand} class is responsible for removing Write-Ahead Log (WAL) and | ||
| * bulk-loaded files that are no longer needed for Point-in-Time Recovery (PITR). | ||
| * <p> | ||
| * The cleanup process follows these steps: | ||
| * <ol> | ||
| * <li>Identify the oldest full backup and its start timestamp.</li> | ||
| * <li>Delete WAL files older than this timestamp, as they are no longer usable for PITR with any | ||
| * backup.</li> | ||
| * </ol> | ||
|
Comment on lines
+873
to
+881
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The standard approach in HBase is to delete old files via extensions of the These cleaners should be run by the HMaster's
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Thanks, @rmdmattingly, for the review comments! I’d like to clarify that we are specifically cleaning up WALs in the backup location (e.g., S3, where they are continuously replicated), not the cluster’s WALs. If we were dealing with cluster WALs, your point would certainly apply—does that sound correct? Regarding keeping this command manual:
That said, we could explore the possibility of running this command periodically and automatically in future iterations.
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Ah okay, thanks for the clarifications here. Maybe we could bake this clarification into the JavaDocs You make some good points here, but I don't think they full take into account the variety of ways in which people deploy HBase.
This is true to a large extent, but the non-emergency commands have at least been exposed in the Admin interface to make programmatic backups easily achievable. Maybe wiring up through the Admin is a fair compromise?
If this operation can only follow a delete, and WALs are made useless by said delete, then should this operation just be a part of the backup deletion process?
I don't think it's true that backup deletions are necessarily manual from an operator's perspective. For example, a company backing up their data in S3 could be making use of bucket TTLs to clean up their old backups. In that case, it would be nice for unusable WALs to clean themselves up organically too.
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
That's my point as well. We could do this cleanup as part of the backup delete command, in which case we don't need to deal with whether this should be automatic or manual.
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Thanks, @rmdmattingly and @anmolnar. Okay, we can incorporate this cleanup process into the delete command itself. Currently, the delete command is used to remove both full and incremental backups. We have now introduced a new validation for PITR-Critical Backup Deletion. Please check the PR here: #6848 and review it. I will also add this cleanup logic at the end of the delete process to remove any WALs that can be deleted (which were previously retained due to this backup). How does that sound? |
||
| */ | ||
| public static class CleanupCommand extends Command { | ||
| CleanupCommand(Configuration conf, CommandLine cmdline) { | ||
| super(conf); | ||
| this.cmdline = cmdline; | ||
| } | ||
|
|
||
| @Override | ||
| public void execute() throws IOException { | ||
| super.execute(); | ||
|
|
||
| // Validate input arguments | ||
| validateArguments(); | ||
|
|
||
| Configuration conf = getConf() != null ? getConf() : HBaseConfiguration.create(); | ||
| String backupWalDir = conf.get(CONF_CONTINUOUS_BACKUP_WAL_DIR); | ||
|
|
||
| if (backupWalDir == null || backupWalDir.isEmpty()) { | ||
| System.out | ||
| .println("WAL Directory is not specified for continuous backup. Nothing to clean!"); | ||
| return; | ||
| } | ||
|
|
||
| try (final Connection conn = ConnectionFactory.createConnection(conf); | ||
| final BackupSystemTable sysTable = new BackupSystemTable(conn)) { | ||
|
|
||
| // Retrieve tables that are part of continuous backup | ||
| Map<TableName, Long> continuousBackupTables = sysTable.getContinuousBackupTableSet(); | ||
| if (continuousBackupTables.isEmpty()) { | ||
| System.out.println("Continuous Backup is not enabled for any tables. Nothing to clean!"); | ||
| return; | ||
| } | ||
|
|
||
| // Determine the earliest timestamp before which WAL files can be deleted | ||
| long cleanupCutoffTimestamp = determineCleanupCutoffTime(sysTable, continuousBackupTables); | ||
| if (cleanupCutoffTimestamp == 0) { | ||
| System.err.println("ERROR: No valid full backup found. Cleanup aborted."); | ||
| return; | ||
| } | ||
|
|
||
| // Update the continuous backup table's start time to match the cutoff time *before* actual | ||
| // cleanup. | ||
| // This is safe because even if the WAL cleanup fails later, we won't be accessing data | ||
| // older than | ||
| // the cutoff timestamp, ensuring consistency in what the system considers valid for | ||
| // recovery. | ||
| // | ||
| // If we did this the other way around—cleaning up first and updating the table afterward— | ||
| // a failure between these two steps could leave us in an inconsistent state where some WALs | ||
| // are already deleted, but the backup metadata still references them. | ||
| updateContinuousBackupTablesStartTime(sysTable, cleanupCutoffTimestamp); | ||
|
|
||
| // Perform WAL file cleanup | ||
| cleanupOldWALFiles(conf, backupWalDir, cleanupCutoffTimestamp); | ||
| } | ||
| } | ||
|
|
||
| /** | ||
| * Fetches the continuous backup tables from the system table and updates their start timestamps | ||
| * if the current start time is earlier than the given cutoff timestamp. | ||
| * @param sysTable The backup system table from which continuous backup tables are | ||
| * retrieved and updated. | ||
| * @param cleanupCutoffTimestamp The cutoff timestamp before which WAL files can be deleted. | ||
| * @throws IOException If an error occurs while accessing the system table. | ||
| */ | ||
| private void updateContinuousBackupTablesStartTime(BackupSystemTable sysTable, | ||
| long cleanupCutoffTimestamp) throws IOException { | ||
| Map<TableName, Long> continuousBackupTables = sysTable.getContinuousBackupTableSet(); | ||
|
|
||
| // Identify tables that need updating | ||
| Set<TableName> tablesToUpdate = new HashSet<>(); | ||
| for (Map.Entry<TableName, Long> entry : continuousBackupTables.entrySet()) { | ||
| TableName table = entry.getKey(); | ||
| long startTimestamp = entry.getValue(); | ||
|
|
||
| if (startTimestamp < cleanupCutoffTimestamp) { | ||
| tablesToUpdate.add(table); | ||
| } | ||
| } | ||
|
|
||
| // If no tables require updates, exit early | ||
| if (tablesToUpdate.isEmpty()) { | ||
| return; | ||
| } | ||
|
|
||
| // Perform the actual update in the system table | ||
| sysTable.updateContinuousBackupTableSet(tablesToUpdate, cleanupCutoffTimestamp); | ||
| } | ||
|
|
||
| private void validateArguments() throws IOException { | ||
| String[] args = cmdline == null ? null : cmdline.getArgs(); | ||
| if (args != null && args.length > 1) { | ||
| System.err.println("ERROR: wrong number of arguments: " + args.length); | ||
| printUsage(); | ||
| throw new IOException(INCORRECT_USAGE); | ||
| } | ||
| } | ||
|
|
||
| private long determineCleanupCutoffTime(BackupSystemTable sysTable, | ||
| Map<TableName, Long> backupTables) throws IOException { | ||
| List<BackupInfo> backupInfos = sysTable.getBackupInfos(BackupState.COMPLETE); | ||
| Collections.reverse(backupInfos); // Process from oldest to latest | ||
|
|
||
| for (BackupInfo backupInfo : backupInfos) { | ||
| if (BackupType.FULL.equals(backupInfo.getType())) { | ||
| return backupInfo.getStartTs(); | ||
| } | ||
| } | ||
| return 0; | ||
| } | ||
|
|
||
| /** | ||
| * Cleans up old WAL and bulk-loaded files based on the determined cutoff timestamp. | ||
| */ | ||
| private void cleanupOldWALFiles(Configuration conf, String backupWalDir, long cutoffTime) | ||
| throws IOException { | ||
| System.out.println("Starting WAL cleanup in backup directory: " + backupWalDir | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @vinayakphegde, are you using |
||
| + " with cutoff time: " + cutoffTime); | ||
|
|
||
| BackupFileSystemManager manager = | ||
| new BackupFileSystemManager(CONTINUOUS_BACKUP_REPLICATION_PEER, conf, backupWalDir); | ||
| FileSystem fs = manager.getBackupFs(); | ||
| Path walDir = manager.getWalsDir(); | ||
| Path bulkloadDir = manager.getBulkLoadFilesDir(); | ||
| SimpleDateFormat dateFormat = new SimpleDateFormat(DATE_FORMAT); | ||
|
|
||
| System.out.println("Listing directories under: " + walDir); | ||
|
|
||
| FileStatus[] directories = fs.listStatus(walDir); | ||
|
|
||
| for (FileStatus dirStatus : directories) { | ||
| if (!dirStatus.isDirectory()) { | ||
| continue; // Skip files, we only want directories | ||
| } | ||
|
|
||
| Path dirPath = dirStatus.getPath(); | ||
| String dirName = dirPath.getName(); | ||
|
|
||
| try { | ||
| long dayStart = parseDayDirectory(dirName, dateFormat); | ||
| System.out | ||
| .println("Checking WAL directory: " + dirName + " (Start Time: " + dayStart + ")"); | ||
|
|
||
| // If WAL files of that day are older than cutoff time, delete them | ||
| if (dayStart + ONE_DAY_IN_MILLISECONDS - 1 < cutoffTime) { | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @vinayakphegde, just curious, what is the purpose of adding |
||
| System.out.println("Deleting outdated WAL directory: " + dirPath); | ||
| fs.delete(dirPath, true); | ||
| fs.delete(new Path(bulkloadDir, dirPath.getName()), true); | ||
| } | ||
| } catch (ParseException e) { | ||
| System.out.println("WARNING: Failed to parse directory name '" + dirName | ||
| + "'. Skipping. Error: " + e.getMessage()); | ||
| } catch (IOException e) { | ||
| System.out.println("WARNING: Failed to delete directory '" + dirPath | ||
| + "'. Skipping. Error: " + e.getMessage()); | ||
| } | ||
| } | ||
|
|
||
| System.out.println("Completed WAL cleanup for backup directory: " + backupWalDir); | ||
| } | ||
|
|
||
| private long parseDayDirectory(String dayDir, SimpleDateFormat dateFormat) | ||
| throws ParseException { | ||
| return dateFormat.parse(dayDir).getTime(); | ||
| } | ||
|
|
||
| @Override | ||
| protected void printUsage() { | ||
| System.out.println(CLEANUP_CMD_USAGE); | ||
| } | ||
| } | ||
|
|
||
| public static class MergeCommand extends Command { | ||
| MergeCommand(Configuration conf, CommandLine cmdline) { | ||
| super(conf); | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I would name this something more specific, unless this command intends to clean up entries that may be left behind for full and incremental backups as well