Skip to content

Commit dc2114b

Browse files
authored
HDFS-17414. [FGL] RPCs in DatanodeProtocol support fine-grained lock (apache#6649)
1 parent 8a3eeca commit dc2114b

File tree

8 files changed

+54
-32
lines changed

8 files changed

+54
-32
lines changed

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -97,6 +97,7 @@
9797
import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
9898
import org.apache.hadoop.hdfs.server.namenode.NameNode;
9999
import org.apache.hadoop.hdfs.server.namenode.Namesystem;
100+
import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
100101
import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
101102
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
102103
import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfyManager;
@@ -1895,7 +1896,7 @@ private Block getBlockOnStorage(BlockInfo storedBlock,
18951896
*/
18961897
public void findAndMarkBlockAsCorrupt(final ExtendedBlock blk,
18971898
final DatanodeInfo dn, String storageID, String reason) throws IOException {
1898-
assert namesystem.hasWriteLock();
1899+
assert namesystem.hasWriteLock(FSNamesystemLockMode.BM);
18991900
final Block reportedBlock = blk.getLocalBlock();
19001901
final BlockInfo storedBlock = getStoredBlock(reportedBlock);
19011902
if (storedBlock == null) {
@@ -2708,7 +2709,7 @@ void processPendingReconstructions() {
27082709
}
27092710

27102711
public long requestBlockReportLeaseId(DatanodeRegistration nodeReg) {
2711-
assert namesystem.hasReadLock();
2712+
assert namesystem.hasReadLock(FSNamesystemLockMode.BM);
27122713
DatanodeDescriptor node = null;
27132714
try {
27142715
node = datanodeManager.getDatanode(nodeReg);
@@ -2730,7 +2731,6 @@ public long requestBlockReportLeaseId(DatanodeRegistration nodeReg) {
27302731

27312732
public void registerDatanode(DatanodeRegistration nodeReg)
27322733
throws IOException {
2733-
assert namesystem.hasWriteLock();
27342734
datanodeManager.registerDatanode(nodeReg);
27352735
bmSafeMode.checkSafeMode();
27362736
}
@@ -2997,7 +2997,7 @@ void removeDNLeaseIfNeeded(DatanodeDescriptor node) {
29972997

29982998
public void removeBRLeaseIfNeeded(final DatanodeID nodeID,
29992999
final BlockReportContext context) throws IOException {
3000-
namesystem.writeLock();
3000+
namesystem.writeLock(FSNamesystemLockMode.BM);
30013001
DatanodeDescriptor node;
30023002
try {
30033003
node = datanodeManager.getDatanode(nodeID);
@@ -3015,7 +3015,7 @@ public void removeBRLeaseIfNeeded(final DatanodeID nodeID,
30153015
}
30163016
}
30173017
} finally {
3018-
namesystem.writeUnlock("removeBRLeaseIfNeeded");
3018+
namesystem.writeUnlock(FSNamesystemLockMode.BM, "removeBRLeaseIfNeeded");
30193019
}
30203020
}
30213021

@@ -3207,7 +3207,7 @@ public void markBlockReplicasAsCorrupt(Block oldBlock,
32073207
BlockInfo block,
32083208
long oldGenerationStamp, long oldNumBytes,
32093209
DatanodeStorageInfo[] newStorages) throws IOException {
3210-
assert namesystem.hasWriteLock();
3210+
assert namesystem.hasWriteLock(FSNamesystemLockMode.BM);
32113211
BlockToMarkCorrupt b = null;
32123212
if (block.getGenerationStamp() != oldGenerationStamp) {
32133213
b = new BlockToMarkCorrupt(oldBlock, block, oldGenerationStamp,
@@ -4415,7 +4415,7 @@ private void removeStoredBlock(DatanodeStorageInfo storageInfo, Block block,
44154415
*/
44164416
public void removeStoredBlock(BlockInfo storedBlock, DatanodeDescriptor node) {
44174417
blockLog.debug("BLOCK* removeStoredBlock: {} from {}", storedBlock, node);
4418-
assert (namesystem.hasWriteLock());
4418+
assert (namesystem.hasWriteLock(FSNamesystemLockMode.BM));
44194419
{
44204420
if (storedBlock == null || !blocksMap.removeNode(storedBlock, node)) {
44214421
blockLog.debug("BLOCK* removeStoredBlock: {} has already been removed from node {}",
@@ -4929,7 +4929,7 @@ public int getTotalBlocks() {
49294929
}
49304930

49314931
public void removeBlock(BlockInfo block) {
4932-
assert namesystem.hasWriteLock();
4932+
assert namesystem.hasWriteLock(FSNamesystemLockMode.BM);
49334933
// No need to ACK blocks that are being removed entirely
49344934
// from the namespace, since the removal of the associated
49354935
// file already removes them from the block map below.
@@ -4972,7 +4972,7 @@ public void updateLastBlock(BlockInfo lastBlock, ExtendedBlock newBlock) {
49724972
/** updates a block in needed reconstruction queue. */
49734973
private void updateNeededReconstructions(final BlockInfo block,
49744974
final int curReplicasDelta, int expectedReplicasDelta) {
4975-
namesystem.writeLock();
4975+
namesystem.writeLock(FSNamesystemLockMode.BM);
49764976
try {
49774977
if (!isPopulatingReplQueues() || !block.isComplete()) {
49784978
return;
@@ -4991,7 +4991,7 @@ private void updateNeededReconstructions(final BlockInfo block,
49914991
repl.outOfServiceReplicas(), oldExpectedReplicas);
49924992
}
49934993
} finally {
4994-
namesystem.writeUnlock("updateNeededReconstructions");
4994+
namesystem.writeUnlock(FSNamesystemLockMode.BM, "updateNeededReconstructions");
49954995
}
49964996
}
49974997

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222

2323
import org.apache.commons.lang3.StringUtils;
2424
import org.apache.hadoop.classification.VisibleForTesting;
25+
import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
2526
import org.apache.hadoop.util.Preconditions;
2627

2728
import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
@@ -855,7 +856,7 @@ private void removeDatanode(DatanodeDescriptor nodeInfo) {
855856
*/
856857
private void removeDatanode(DatanodeDescriptor nodeInfo,
857858
boolean removeBlocksFromBlocksMap) {
858-
assert namesystem.hasWriteLock();
859+
assert namesystem.hasWriteLock(FSNamesystemLockMode.BM);
859860
heartbeatManager.removeDatanode(nodeInfo);
860861
if (removeBlocksFromBlocksMap) {
861862
blockManager.removeBlocksAssociatedTo(nodeInfo);
@@ -874,7 +875,7 @@ private void removeDatanode(DatanodeDescriptor nodeInfo,
874875
*/
875876
public void removeDatanode(final DatanodeID node)
876877
throws UnregisteredNodeException {
877-
namesystem.writeLock();
878+
namesystem.writeLock(FSNamesystemLockMode.BM);
878879
try {
879880
final DatanodeDescriptor descriptor = getDatanode(node);
880881
if (descriptor != null) {
@@ -884,7 +885,7 @@ public void removeDatanode(final DatanodeID node)
884885
+ node + " does not exist");
885886
}
886887
} finally {
887-
namesystem.writeUnlock("removeDatanode");
888+
namesystem.writeUnlock(FSNamesystemLockMode.BM, "removeDatanode");
888889
}
889890
}
890891

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,7 @@
4545
import org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap;
4646
import org.apache.hadoop.hdfs.server.common.blockaliasmap.impl.TextFileRegionAliasMap;
4747
import org.apache.hadoop.hdfs.server.common.BlockAlias;
48+
import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
4849
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
4950
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
5051
import org.apache.hadoop.hdfs.util.RwLock;
@@ -173,7 +174,7 @@ public LocatedBlockBuilder newLocatedBlocks(int maxValue) {
173174

174175
public void removeDatanode(DatanodeDescriptor dnToRemove) {
175176
if (providedEnabled) {
176-
assert lock.hasWriteLock() : "Not holding write lock";
177+
assert lock.hasWriteLock(FSNamesystemLockMode.BM) : "Not holding write lock";
177178
providedDescriptor.remove(dnToRemove);
178179
// if all datanodes fail, set the block report count to 0
179180
if (providedDescriptor.activeProvidedDatanodes() == 0) {

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
*/
1818
package org.apache.hadoop.hdfs.server.namenode;
1919

20+
import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
2021
import org.apache.hadoop.util.Preconditions;
2122
import org.apache.hadoop.HadoopIllegalArgumentException;
2223
import org.apache.hadoop.fs.XAttrSetFlag;
@@ -104,7 +105,7 @@ static boolean unprotectedRemoveBlock(
104105
*/
105106
static void persistBlocks(
106107
FSDirectory fsd, String path, INodeFile file, boolean logRetryCache) {
107-
assert fsd.getFSNamesystem().hasWriteLock();
108+
assert fsd.getFSNamesystem().hasWriteLock(FSNamesystemLockMode.FS);
108109
Preconditions.checkArgument(file.isUnderConstruction());
109110
fsd.getEditLog().logUpdateBlocks(path, file, logRetryCache);
110111
if(NameNode.stateChangeLog.isDebugEnabled()) {

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1103,7 +1103,7 @@ static void unprotectedUpdateCount(INodesInPath inodesInPath,
11031103
*/
11041104
public void updateSpaceForCompleteBlock(BlockInfo completeBlk,
11051105
INodesInPath inodes) throws IOException {
1106-
assert namesystem.hasWriteLock();
1106+
assert namesystem.hasWriteLock(FSNamesystemLockMode.GLOBAL);
11071107
INodesInPath iip = inodes != null ? inodes :
11081108
INodesInPath.fromINode(namesystem.getBlockCollection(completeBlk));
11091109
INodeFile fileINode = iip.getLastINode().asFile();

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

Lines changed: 16 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1890,11 +1890,11 @@ public void cpUnlock() {
18901890

18911891

18921892
NamespaceInfo getNamespaceInfo() {
1893-
readLock();
1893+
readLock(FSNamesystemLockMode.FS);
18941894
try {
18951895
return unprotectedGetNamespaceInfo();
18961896
} finally {
1897-
readUnlock("getNamespaceInfo");
1897+
readUnlock(FSNamesystemLockMode.FS, "getNamespaceInfo");
18981898
}
18991899
}
19001900

@@ -3901,7 +3901,7 @@ Lease reassignLeaseInternal(Lease lease, String newHolder, INodeFile pendingFile
39013901
void commitOrCompleteLastBlock(
39023902
final INodeFile fileINode, final INodesInPath iip,
39033903
final Block commitBlock) throws IOException {
3904-
assert hasWriteLock();
3904+
assert hasWriteLock(FSNamesystemLockMode.GLOBAL);
39053905
Preconditions.checkArgument(fileINode.isUnderConstruction());
39063906
blockManager.commitOrCompleteLastBlock(fileINode, commitBlock, iip);
39073907
}
@@ -3923,7 +3923,7 @@ void addCommittedBlocksToPending(final INodeFile pendingFile) {
39233923

39243924
void finalizeINodeFileUnderConstruction(String src, INodeFile pendingFile,
39253925
int latestSnapshot, boolean allowCommittedBlock) throws IOException {
3926-
assert hasWriteLock();
3926+
assert hasWriteLock(FSNamesystemLockMode.GLOBAL);
39273927

39283928
FileUnderConstructionFeature uc = pendingFile.getFileUnderConstructionFeature();
39293929
if (uc == null) {
@@ -3992,7 +3992,8 @@ INodeFile getBlockCollection(BlockInfo b) {
39923992

39933993
@Override
39943994
public INodeFile getBlockCollection(long id) {
3995-
assert hasReadLock() : "Accessing INode id = " + id + " without read lock";
3995+
assert hasReadLock(FSNamesystemLockMode.FS)
3996+
: "Accessing INode id = " + id + " without read lock";
39963997
INode inode = getFSDirectory().getInode(id);
39973998
return inode == null ? null : inode.asFile();
39983999
}
@@ -4010,7 +4011,7 @@ void commitBlockSynchronization(ExtendedBlock oldBlock,
40104011
+ ")");
40114012
checkOperation(OperationCategory.WRITE);
40124013
final String src;
4013-
writeLock();
4014+
writeLock(FSNamesystemLockMode.GLOBAL);
40144015
boolean copyTruncate = false;
40154016
BlockInfo truncatedBlock = null;
40164017
try {
@@ -4145,7 +4146,7 @@ void commitBlockSynchronization(ExtendedBlock oldBlock,
41454146
}
41464147
blockManager.successfulBlockRecovery(storedBlock);
41474148
} finally {
4148-
writeUnlock("commitBlockSynchronization");
4149+
writeUnlock(FSNamesystemLockMode.GLOBAL, "commitBlockSynchronization");
41494150
}
41504151
getEditLog().logSync();
41514152
if (closeFile) {
@@ -4406,11 +4407,11 @@ BatchedDirectoryListing getBatchedListing(String[] srcs, byte[] startAfter,
44064407
* @see org.apache.hadoop.hdfs.server.datanode.DataNode
44074408
*/
44084409
void registerDatanode(DatanodeRegistration nodeReg) throws IOException {
4409-
writeLock();
4410+
writeLock(FSNamesystemLockMode.BM);
44104411
try {
44114412
blockManager.registerDatanode(nodeReg);
44124413
} finally {
4413-
writeUnlock("registerDatanode");
4414+
writeUnlock(FSNamesystemLockMode.BM, "registerDatanode");
44144415
}
44154416
}
44164417

@@ -4443,7 +4444,7 @@ HeartbeatResponse handleHeartbeat(DatanodeRegistration nodeReg,
44434444
@Nonnull SlowPeerReports slowPeers,
44444445
@Nonnull SlowDiskReports slowDisks)
44454446
throws IOException {
4446-
readLock();
4447+
readLock(FSNamesystemLockMode.BM);
44474448
try {
44484449
//get datanode commands
44494450
DatanodeCommand[] cmds = blockManager.getDatanodeManager().handleHeartbeat(
@@ -4466,7 +4467,7 @@ nodeReg, reports, getBlockPoolId(), cacheCapacity, cacheUsed,
44664467
return new HeartbeatResponse(cmds, haState, rollingUpgradeInfo,
44674468
blockReportLeaseId, isSlownode);
44684469
} finally {
4469-
readUnlock("handleHeartbeat");
4470+
readUnlock(FSNamesystemLockMode.BM, "handleHeartbeat");
44704471
}
44714472
}
44724473

@@ -4528,7 +4529,7 @@ void checkAvailableResources() {
45284529
* @param file
45294530
*/
45304531
private void closeFile(String path, INodeFile file) {
4531-
assert hasWriteLock();
4532+
assert hasWriteLock(FSNamesystemLockMode.FS);
45324533
// file is closed
45334534
getEditLog().logCloseFile(path, file);
45344535
NameNode.stateChangeLog.debug("closeFile: {} with {} blocks is persisted to the file system",
@@ -5874,7 +5875,7 @@ private long nextBlockId(BlockType blockType) throws IOException {
58745875
}
58755876

58765877
boolean isFileDeleted(INodeFile file) {
5877-
assert hasReadLock();
5878+
assert hasReadLock(FSNamesystemLockMode.FS);
58785879
// Not in the inodeMap or in the snapshot but marked deleted.
58795880
if (dir.getInode(file.getId()) == null) {
58805881
return true;
@@ -5952,7 +5953,7 @@ private INodeFile checkUCBlock(ExtendedBlock block,
59525953
*/
59535954
void reportBadBlocks(LocatedBlock[] blocks) throws IOException {
59545955
checkOperation(OperationCategory.WRITE);
5955-
writeLock();
5956+
writeLock(FSNamesystemLockMode.BM);
59565957
try {
59575958
checkOperation(OperationCategory.WRITE);
59585959
for (int i = 0; i < blocks.length; i++) {
@@ -5968,7 +5969,7 @@ void reportBadBlocks(LocatedBlock[] blocks) throws IOException {
59685969
}
59695970
}
59705971
} finally {
5971-
writeUnlock("reportBadBlocks");
5972+
writeUnlock(FSNamesystemLockMode.BM, "reportBadBlocks");
59725973
}
59735974
}
59745975

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
import org.apache.hadoop.fs.FileStatus;
2222
import org.apache.hadoop.fs.FileUtil;
2323
import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
24+
import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
2425
import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
2526
import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
2627
import org.apache.hadoop.thirdparty.com.google.common.collect.LinkedListMultimap;
@@ -166,6 +167,10 @@ public void setupMockCluster() throws IOException {
166167
fsn = Mockito.mock(FSNamesystem.class);
167168
Mockito.doReturn(true).when(fsn).hasWriteLock();
168169
Mockito.doReturn(true).when(fsn).hasReadLock();
170+
Mockito.doReturn(true).when(fsn).hasWriteLock(FSNamesystemLockMode.GLOBAL);
171+
Mockito.doReturn(true).when(fsn).hasReadLock(FSNamesystemLockMode.GLOBAL);
172+
Mockito.doReturn(true).when(fsn).hasWriteLock(FSNamesystemLockMode.BM);
173+
Mockito.doReturn(true).when(fsn).hasReadLock(FSNamesystemLockMode.BM);
169174
Mockito.doReturn(true).when(fsn).isRunning();
170175
//Make shouldPopulaeReplQueues return true
171176
HAContext haContext = Mockito.mock(HAContext.class);

0 commit comments

Comments
 (0)