Skip to content

Commit ca182b6

Browse files
committed
Revert " HDFS-15160. ReplicaMap, Disk Balancer, Directory Scanner and various FsDatasetImpl methods should use datanode readlock. (#3200)"
This reverts commit 318bc5f.
1 parent 318bc5f commit ca182b6

File tree

20 files changed

+114
-345
lines changed

20 files changed

+114
-345
lines changed

hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedReadWriteLock.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ public class InstrumentedReadWriteLock implements ReadWriteLock {
3737
private final Lock readLock;
3838
private final Lock writeLock;
3939

40-
public InstrumentedReadWriteLock(boolean fair, String name, Logger logger,
40+
InstrumentedReadWriteLock(boolean fair, String name, Logger logger,
4141
long minLoggingGapMs, long lockWarningThresholdMs) {
4242
ReentrantReadWriteLock readWriteLock = new ReentrantReadWriteLock(fair);
4343
readLock = new InstrumentedReadLock(name, logger, readWriteLock,

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -549,17 +549,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
549549
"dfs.lock.suppress.warning.interval";
550550
public static final long DFS_LOCK_SUPPRESS_WARNING_INTERVAL_DEFAULT =
551551
10000; //ms
552-
public static final String DFS_DATANODE_LOCK_FAIR_KEY =
553-
"dfs.datanode.lock.fair";
554-
public static final boolean DFS_DATANODE_LOCK_FAIR_DEFAULT = true;
555-
public static final String DFS_DATANODE_LOCK_READ_WRITE_ENABLED_KEY =
556-
"dfs.datanode.lock.read.write.enabled";
557-
public static final Boolean DFS_DATANODE_LOCK_READ_WRITE_ENABLED_DEFAULT =
558-
true;
559-
public static final String DFS_DATANODE_LOCK_REPORTING_THRESHOLD_MS_KEY =
560-
"dfs.datanode.lock-reporting-threshold-ms";
561-
public static final long
562-
DFS_DATANODE_LOCK_REPORTING_THRESHOLD_MS_DEFAULT = 300L;
563552

564553
public static final String DFS_UPGRADE_DOMAIN_FACTOR = "dfs.namenode.upgrade.domain.factor";
565554
public static final int DFS_UPGRADE_DOMAIN_FACTOR_DEFAULT = DFS_REPLICATION_DEFAULT;

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -254,7 +254,7 @@ class BlockSender implements java.io.Closeable {
254254
// the append write.
255255
ChunkChecksum chunkChecksum = null;
256256
final long replicaVisibleLength;
257-
try(AutoCloseableLock lock = datanode.data.acquireDatasetReadLock()) {
257+
try(AutoCloseableLock lock = datanode.data.acquireDatasetLock()) {
258258
replica = getReplica(block, datanode);
259259
replicaVisibleLength = replica.getVisibleLength();
260260
}

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3010,7 +3010,7 @@ void transferReplicaForPipelineRecovery(final ExtendedBlock b,
30103010
final BlockConstructionStage stage;
30113011

30123012
//get replica information
3013-
try(AutoCloseableLock lock = data.acquireDatasetReadLock()) {
3013+
try(AutoCloseableLock lock = data.acquireDatasetLock()) {
30143014
Block storedBlock = data.getStoredBlock(b.getBlockPoolId(),
30153015
b.getBlockId());
30163016
if (null == storedBlock) {

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -504,7 +504,7 @@ private Map<String, String> getStorageIDToVolumeBasePathMap()
504504
Map<String, String> storageIDToVolBasePathMap = new HashMap<>();
505505
FsDatasetSpi.FsVolumeReferences references;
506506
try {
507-
try(AutoCloseableLock lock = this.dataset.acquireDatasetReadLock()) {
507+
try(AutoCloseableLock lock = this.dataset.acquireDatasetLock()) {
508508
references = this.dataset.getFsVolumeReferences();
509509
for (int ndx = 0; ndx < references.size(); ndx++) {
510510
FsVolumeSpi vol = references.get(ndx);

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java

Lines changed: 1 addition & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -657,19 +657,9 @@ ReplicaInfo moveBlockAcrossVolumes(final ExtendedBlock block,
657657
FsVolumeSpi destination) throws IOException;
658658

659659
/**
660-
* Acquire the lock of the data set. This prevents other threads from
661-
* modifying the volume map structure inside the datanode, but other changes
662-
* are still possible. For example modifying the genStamp of a block instance.
660+
* Acquire the lock of the data set.
663661
*/
664662
AutoCloseableLock acquireDatasetLock();
665663

666-
/***
667-
* Acquire the read lock of the data set. This prevents other threads from
668-
* modifying the volume map structure inside the datanode, but other changes
669-
* are still possible. For example modifying the genStamp of a block instance.
670-
* @return The AutoClosable read lock instance.
671-
*/
672-
AutoCloseableLock acquireDatasetReadLock();
673-
674664
Set<? extends Replica> deepCopyReplica(String bpid) throws IOException;
675665
}

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,6 @@
4242
import java.util.concurrent.ForkJoinTask;
4343
import java.util.concurrent.RecursiveAction;
4444
import java.util.concurrent.atomic.AtomicLong;
45-
import java.util.concurrent.locks.ReentrantReadWriteLock;
4645

4746
import org.apache.hadoop.hdfs.server.datanode.FSCachingGetSpaceUsed;
4847
import org.slf4j.Logger;
@@ -67,6 +66,7 @@
6766
import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaInputStreams;
6867
import org.apache.hadoop.io.IOUtils;
6968
import org.apache.hadoop.io.MultipleIOException;
69+
import org.apache.hadoop.util.AutoCloseableLock;
7070
import org.apache.hadoop.util.DataChecksum;
7171
import org.apache.hadoop.util.DataChecksum.Type;
7272
import org.apache.hadoop.util.DiskChecker;
@@ -874,7 +874,7 @@ void shutdown(BlockListAsLongs blocksListToPersist) {
874874

875875
private boolean readReplicasFromCache(ReplicaMap volumeMap,
876876
final RamDiskReplicaTracker lazyWriteReplicaMap) {
877-
ReplicaMap tmpReplicaMap = new ReplicaMap(new ReentrantReadWriteLock());
877+
ReplicaMap tmpReplicaMap = new ReplicaMap(new AutoCloseableLock());
878878
File replicaFile = new File(currentDir, REPLICA_CACHE_FILE);
879879
// Check whether the file exists or not.
880880
if (!replicaFile.exists()) {

0 commit comments

Comments
 (0)