Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.backup.impl;

import java.util.List;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage;
import org.apache.yetus.audience.InterfaceAudience;

/**
* Adapter that wraps a {@link BackupImage} to expose it as {@link PitrBackupMetadata}.
*/
@InterfaceAudience.Private
public class BackupImageAdapter implements PitrBackupMetadata {
private final BackupImage image;

public BackupImageAdapter(BackupImage image) {
this.image = image;
}

@Override
public List<TableName> getTableNames() {
return image.getTableNames();
}

@Override
public long getStartTs() {
return image.getStartTs();
}

@Override
public long getCompleteTs() {
return image.getCompleteTs();
}

@Override
public String getBackupId() {
return image.getBackupId();
}

@Override
public String getRootDir() {
return image.getRootDir();
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.backup.impl;

import java.util.List;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.backup.BackupInfo;
import org.apache.yetus.audience.InterfaceAudience;

/**
* Adapter that wraps a {@link BackupInfo} to expose it as {@link PitrBackupMetadata}.
*/
@InterfaceAudience.Private
public class BackupInfoAdapter implements PitrBackupMetadata {
private final BackupInfo info;

public BackupInfoAdapter(BackupInfo info) {
this.info = info;
}

@Override
public List<TableName> getTableNames() {
return info.getTableNames();
}

@Override
public long getStartTs() {
return info.getStartTs();
}

@Override
public long getCompleteTs() {
return info.getCompleteTs();
}

@Override
public String getBackupId() {
return info.getBackupId();
}

@Override
public String getRootDir() {
return info.getBackupRootDir();
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.backup.impl;

import java.io.IOException;
import java.util.List;
import java.util.stream.Collectors;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.backup.HBackupFileSystem;
import org.apache.hadoop.hbase.backup.PointInTimeRestoreRequest;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.yetus.audience.InterfaceAudience;

/**
* PITR restore handler that retrieves backup metadata from a custom backup root directory.
* <p>
* This implementation is used when the PITR request specifies a custom backup location via
* {@code backupRootDir}.
*/
@InterfaceAudience.Private
public class CustomBackupLocationPitrRestoreHandler extends AbstractPitrRestoreHandler {

public CustomBackupLocationPitrRestoreHandler(Connection conn,
PointInTimeRestoreRequest request) {
super(conn, request);
}

/**
* Retrieves completed backup entries from the given custom backup root directory and converts
* them into {@link PitrBackupMetadata} using {@link BackupImageAdapter}.
* @param request the PITR request
* @return list of completed backup metadata entries from the custom location
* @throws IOException if reading from the custom backup directory fails
*/
@Override
protected List<PitrBackupMetadata> getBackupMetadata(PointInTimeRestoreRequest request)
throws IOException {
return HBackupFileSystem
.getAllBackupImages(conn.getConfiguration(), new Path(request.getBackupRootDir())).stream()
.map(BackupImageAdapter::new).collect(Collectors.toList());
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.backup.impl;

import java.io.IOException;
import java.util.List;
import java.util.stream.Collectors;
import org.apache.hadoop.hbase.backup.BackupInfo;
import org.apache.hadoop.hbase.backup.PointInTimeRestoreRequest;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.yetus.audience.InterfaceAudience;

/**
* Default PITR restore handler that retrieves backup metadata from the system table.
* <p>
* This implementation is used when no custom backup root directory is specified in the request.
*/
@InterfaceAudience.Private
public class DefaultPitrRestoreHandler extends AbstractPitrRestoreHandler {

public DefaultPitrRestoreHandler(Connection conn, PointInTimeRestoreRequest request) {
super(conn, request);
}

/**
* Retrieves completed backup entries from the BackupSystemTable and converts them into
* {@link PitrBackupMetadata} using {@link BackupInfoAdapter}.
* @param request the PITR request
* @return list of completed backup metadata entries
* @throws IOException if reading from the backup system table fails
*/
@Override
protected List<PitrBackupMetadata> getBackupMetadata(PointInTimeRestoreRequest request)
throws IOException {
try (BackupSystemTable table = new BackupSystemTable(conn)) {
return table.getBackupInfos(BackupInfo.BackupState.COMPLETE).stream()
.map(BackupInfoAdapter::new).collect(Collectors.toList());
}
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.backup.impl;

import java.util.List;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.backup.BackupInfo;
import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage;
import org.apache.yetus.audience.InterfaceAudience;

/**
* A unified abstraction over backup metadata used during Point-In-Time Restore (PITR).
* <p>
* This interface allows the PITR algorithm to operate uniformly over different types of backup
* metadata sources, such as {@link BackupInfo} (system table) and {@link BackupImage} (custom
* backup location), without knowing their specific implementations.
*/
@InterfaceAudience.Private
public interface PitrBackupMetadata {

/** Returns List of table names included in the backup */
List<TableName> getTableNames();

/** Returns Start timestamp of the backup */
long getStartTs();

/** Returns Completion timestamp of the backup */
long getCompleteTs();

/** Returns Unique identifier for the backup */
String getBackupId();

/** Returns Root directory where the backup is stored */
String getRootDir();
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,107 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.backup;

import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_ENABLE_CONTINUOUS_BACKUP;
import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_PITR_BACKUP_PATH;
import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE;
import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TABLE_MAPPING;
import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.OPTION_TO_DATETIME;

import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.stream.Collectors;
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Table;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

@InterfaceAudience.Private
public final class PITRTestUtil {
private static final Logger LOG = LoggerFactory.getLogger(PITRTestUtil.class);
private static final int DEFAULT_WAIT_FOR_REPLICATION_MS = 30_000;

private PITRTestUtil() {
// Utility class
}

public static String[] buildPITRArgs(TableName[] sourceTables, TableName[] targetTables,
long endTime, String backupRootDir) {
String sourceTableNames =
Arrays.stream(sourceTables).map(TableName::getNameAsString).collect(Collectors.joining(","));
String targetTableNames =
Arrays.stream(targetTables).map(TableName::getNameAsString).collect(Collectors.joining(","));

List<String> args = new ArrayList<>();
args.add("-" + OPTION_TABLE);
args.add(sourceTableNames);
args.add("-" + OPTION_TABLE_MAPPING);
args.add(targetTableNames);
args.add("-" + OPTION_TO_DATETIME);
args.add(String.valueOf(endTime));

if (backupRootDir != null) {
args.add("-" + OPTION_PITR_BACKUP_PATH);
args.add(backupRootDir);
}

return args.toArray(new String[0]);
}

public static String[] buildBackupArgs(String backupType, TableName[] tables,
boolean continuousEnabled, String backupRootDir) {
String tableNames =
Arrays.stream(tables).map(TableName::getNameAsString).collect(Collectors.joining(","));

List<String> args = new ArrayList<>(
Arrays.asList("create", backupType, backupRootDir, "-" + OPTION_TABLE, tableNames));

if (continuousEnabled) {
args.add("-" + OPTION_ENABLE_CONTINUOUS_BACKUP);
}

return args.toArray(new String[0]);
}

public static void loadRandomData(HBaseTestingUtil testUtil, TableName tableName, byte[] family,
int totalRows) throws IOException {
try (Table table = testUtil.getConnection().getTable(tableName)) {
testUtil.loadRandomRows(table, family, 32, totalRows);
}
}

public static void waitForReplication() {
try {
LOG.info("Waiting for replication to complete for {} ms", DEFAULT_WAIT_FOR_REPLICATION_MS);
Thread.sleep(DEFAULT_WAIT_FOR_REPLICATION_MS);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException("Interrupted while waiting for replication", e);
}
}

public static int getRowCount(HBaseTestingUtil testUtil, TableName tableName) throws IOException {
try (Table table = testUtil.getConnection().getTable(tableName)) {
return HBaseTestingUtil.countRows(table);
}
}
}
Loading