Skip to content

Commit

Permalink
HDDS-11124. Removed DELETED_TABLE and DELETED_DIR_TABLE locks
Browse files Browse the repository at this point in the history
  • Loading branch information
hemantk-12 committed Jul 10, 2024
1 parent 0eab761 commit 5dcdf85
Show file tree
Hide file tree
Showing 5 changed files with 17 additions and 77 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.locks.ReentrantReadWriteLock;

import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.utils.DBStoreHAManager;
Expand Down Expand Up @@ -322,13 +321,6 @@ ExpiredOpenKeys getExpiredOpenKeys(Duration expireThreshold, int count,
List<ExpiredMultipartUploadsBucket> getExpiredMultipartUploads(
Duration expireThreshold, int maxParts) throws IOException;

/**
* Retrieve RWLock for the table.
* @param tableName table name.
* @return ReentrantReadWriteLock
*/
ReentrantReadWriteLock getTableLock(String tableName);

/**
* Returns the user Table.
*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@
import java.util.TreeSet;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.stream.Collectors;
import java.util.stream.Stream;

Expand Down Expand Up @@ -297,11 +296,6 @@ public class OmMetadataManagerImpl implements OMMetadataManager,
private boolean ignorePipelineinKey;
private Table deletedDirTable;

// Table-level locks that protects table read/write access. Note:
// Don't use this lock for tables other than deletedTable and deletedDirTable.
// This is a stopgap solution. Will remove when HDDS-5905 (HDDS-6483) is done.
private Map<String, ReentrantReadWriteLock> tableLockMap = new HashMap<>();

private OzoneManager ozoneManager;

// Epoch is used to generate the objectIDs. The most significant 2 bits of
Expand Down Expand Up @@ -435,11 +429,6 @@ private OmMetadataManagerImpl(OzoneConfiguration conf, File dir, String name)
perfMetrics = null;
}

@Override
public ReentrantReadWriteLock getTableLock(String tableName) {
return tableLockMap.get(tableName);
}

public OzoneManager getOzoneManager() {
return ozoneManager;
}
Expand Down Expand Up @@ -692,7 +681,6 @@ protected void initializeOmTables(CacheType cacheType,
deletedTable = this.store.getTable(DELETED_TABLE, String.class,
RepeatedOmKeyInfo.class);
checkTableStatus(deletedTable, DELETED_TABLE, addCacheMetrics);
tableLockMap.put(DELETED_TABLE, new ReentrantReadWriteLock(true));

openKeyTable =
this.store.getTable(OPEN_KEY_TABLE, String.class,
Expand Down Expand Up @@ -730,7 +718,6 @@ protected void initializeOmTables(CacheType cacheType,
deletedDirTable = this.store.getTable(DELETED_DIR_TABLE, String.class,
OmKeyInfo.class);
checkTableStatus(deletedDirTable, DELETED_DIR_TABLE, addCacheMetrics);
tableLockMap.put(DELETED_DIR_TABLE, new ReentrantReadWriteLock(true));

transactionInfoTable = this.store.getTable(TRANSACTION_INFO_TABLE,
String.class, TransactionInfo.class);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -453,44 +453,26 @@ public static DBCheckpoint createOmSnapshotCheckpoint(

final DBCheckpoint dbCheckpoint;

// Acquire active DB deletedDirectoryTable write lock to block
// DirDeletingTask
omMetadataManager.getTableLock(OmMetadataManagerImpl.DELETED_DIR_TABLE)
.writeLock().lock();
// Acquire active DB deletedTable write lock to block KeyDeletingTask
omMetadataManager.getTableLock(OmMetadataManagerImpl.DELETED_TABLE)
.writeLock().lock();

boolean snapshotDirExist = false;

try {
// Create DB checkpoint for snapshot
String checkpointPrefix = store.getDbLocation().getName();
Path snapshotDirPath = Paths.get(store.getSnapshotsParentDir(),
checkpointPrefix + snapshotInfo.getCheckpointDir());
if (Files.exists(snapshotDirPath)) {
snapshotDirExist = true;
dbCheckpoint = new RocksDBCheckpoint(snapshotDirPath);
} else {
dbCheckpoint = store.getSnapshot(snapshotInfo.getCheckpointDirName());
}

// Clean up active DB's deletedTable right after checkpoint is taken,
// with table write lock held
deleteKeysFromDelKeyTableInSnapshotScope(omMetadataManager,
snapshotInfo.getVolumeName(), snapshotInfo.getBucketName());
// Clean up deletedDirectoryTable as well
deleteKeysFromDelDirTableInSnapshotScope(omMetadataManager,
snapshotInfo.getVolumeName(), snapshotInfo.getBucketName());
} finally {
// Release deletedTable write lock
omMetadataManager.getTableLock(OmMetadataManagerImpl.DELETED_TABLE)
.writeLock().unlock();
// Release deletedDirectoryTable write lock
omMetadataManager.getTableLock(OmMetadataManagerImpl.DELETED_DIR_TABLE)
.writeLock().unlock();
// Create DB checkpoint for snapshot
String checkpointPrefix = store.getDbLocation().getName();
Path snapshotDirPath = Paths.get(store.getSnapshotsParentDir(),
checkpointPrefix + snapshotInfo.getCheckpointDir());
if (Files.exists(snapshotDirPath)) {
snapshotDirExist = true;
dbCheckpoint = new RocksDBCheckpoint(snapshotDirPath);
} else {
dbCheckpoint = store.getSnapshot(snapshotInfo.getCheckpointDirName());
}

// Clean up active DB's deletedTable right after checkpoint is taken,
// There is no need to take any lock as of now, because transactions are flushed sequentially.
deleteKeysFromDelKeyTableInSnapshotScope(omMetadataManager,
snapshotInfo.getVolumeName(), snapshotInfo.getBucketName());
// Clean up deletedDirectoryTable as well
deleteKeysFromDelDirTableInSnapshotScope(omMetadataManager,
snapshotInfo.getVolumeName(), snapshotInfo.getBucketName());

if (dbCheckpoint != null && snapshotDirExist) {
LOG.info("Checkpoint : {} for snapshot {} already exists.",
dbCheckpoint.getCheckpointLocation(), snapshotInfo.getName());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -154,13 +154,6 @@ public BackgroundTaskResult call() {
List<Pair<String, OmKeyInfo>> allSubDirList
= new ArrayList<>((int) remainNum);

// Acquire active DB deletedDirectoryTable write lock because of the
// deletedDirTable read-write here to avoid interleaving with
// the table range delete operation in createOmSnapshotCheckpoint()
// that is called from OMSnapshotCreateResponse#addToDBBatch.
getOzoneManager().getMetadataManager().getTableLock(
OmMetadataManagerImpl.DELETED_DIR_TABLE).writeLock().lock();

Table.KeyValue<String, OmKeyInfo> pendingDeletedDirInfo;
try (TableIterator<String, ? extends KeyValue<String, OmKeyInfo>>
deleteTableIterator = getOzoneManager().getMetadataManager().
Expand Down Expand Up @@ -216,10 +209,6 @@ public BackgroundTaskResult call() {
} catch (IOException e) {
LOG.error("Error while running delete directories and files " +
"background task. Will retry at next run.", e);
} finally {
// Release deletedDirectoryTable write lock
getOzoneManager().getMetadataManager().getTableLock(
OmMetadataManagerImpl.DELETED_DIR_TABLE).writeLock().unlock();
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -187,12 +187,6 @@ public BackgroundTaskResult call() {
final long run = getRunCount().incrementAndGet();
LOG.debug("Running KeyDeletingService {}", run);

// Acquire active DB deletedTable write lock because of the
// deletedTable read-write here to avoid interleaving with
// the table range delete operation in createOmSnapshotCheckpoint()
// that is called from OMSnapshotCreateResponse#addToDBBatch.
manager.getMetadataManager().getTableLock(
OmMetadataManagerImpl.DELETED_TABLE).writeLock().lock();
int delCount = 0;
try {
// TODO: [SNAPSHOT] HDDS-7968. Reclaim eligible key blocks in
Expand All @@ -214,10 +208,6 @@ public BackgroundTaskResult call() {
} catch (IOException e) {
LOG.error("Error while running delete keys background task. Will " +
"retry at next run.", e);
} finally {
// Release deletedTable write lock
manager.getMetadataManager().getTableLock(
OmMetadataManagerImpl.DELETED_TABLE).writeLock().unlock();
}

try {
Expand Down

0 comments on commit 5dcdf85

Please sign in to comment.