Skip to content

Commit

Permalink
HBASE-28696 Partition BackupSystemTable queries (apache#6067)
Browse files Browse the repository at this point in the history
Co-authored-by: Ray Mattingly <rmattingly@hubspot.com>
Signed-off-by: Nick Dimiduk <ndimiduk@apache.org>
  • Loading branch information
2 people authored and ndimiduk committed Sep 9, 2024
1 parent 3f67699 commit 78da9e9
Showing 1 changed file with 9 additions and 8 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@
import org.apache.hadoop.hbase.backup.BackupType;
import org.apache.hadoop.hbase.backup.util.BackupUtils;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.BufferedMutator;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Connection;
Expand Down Expand Up @@ -412,9 +413,9 @@ public void writePathsPostBulkLoad(TableName tabName, byte[] region,
LOG.debug("write bulk load descriptor to backup " + tabName + " with " + finalPaths.size()
+ " entries");
}
try (Table table = connection.getTable(bulkLoadTableName)) {
try (BufferedMutator bufferedMutator = connection.getBufferedMutator(bulkLoadTableName)) {
List<Put> puts = BackupSystemTable.createPutForCommittedBulkload(tabName, region, finalPaths);
table.put(puts);
bufferedMutator.mutate(puts);
LOG.debug("written " + puts.size() + " rows for bulk load of " + tabName);
}
}
Expand Down Expand Up @@ -446,14 +447,14 @@ public void writeFilesForBulkLoadPreCommit(TableName tabName, byte[] region, fin
* @param rows the rows to be deleted
*/
public void deleteBulkLoadedRows(List<byte[]> rows) throws IOException {
try (Table table = connection.getTable(bulkLoadTableName)) {
try (BufferedMutator bufferedMutator = connection.getBufferedMutator(bulkLoadTableName)) {
List<Delete> lstDels = new ArrayList<>();
for (byte[] row : rows) {
Delete del = new Delete(row);
lstDels.add(del);
LOG.debug("orig deleting the row: " + Bytes.toString(row));
}
table.delete(lstDels);
bufferedMutator.mutate(lstDels);
LOG.debug("deleted " + rows.size() + " original bulkload rows");
}
}
Expand Down Expand Up @@ -535,7 +536,7 @@ public void deleteBulkLoadedRows(List<byte[]> rows) throws IOException {
*/
public void writeBulkLoadedFiles(List<TableName> sTableList, Map<byte[], List<Path>>[] maps,
String backupId) throws IOException {
try (Table table = connection.getTable(bulkLoadTableName)) {
try (BufferedMutator bufferedMutator = connection.getBufferedMutator(bulkLoadTableName)) {
long ts = EnvironmentEdgeManager.currentTime();
int cnt = 0;
List<Put> puts = new ArrayList<>();
Expand All @@ -558,7 +559,7 @@ public void writeBulkLoadedFiles(List<TableName> sTableList, Map<byte[], List<Pa
}
}
if (!puts.isEmpty()) {
table.put(puts);
bufferedMutator.mutate(puts);
}
}
}
Expand Down Expand Up @@ -917,8 +918,8 @@ public void writeRegionServerLogTimestamp(Set<TableName> tables, Map<String, Lon
Put put = createPutForWriteRegionServerLogTimestamp(table, smapData, backupRoot);
puts.add(put);
}
try (Table table = connection.getTable(tableName)) {
table.put(puts);
try (BufferedMutator bufferedMutator = connection.getBufferedMutator(tableName)) {
bufferedMutator.mutate(puts);
}
}

Expand Down

0 comments on commit 78da9e9

Please sign in to comment.