Skip to content

Commit f64cb04

Browse files
ZanderXuzengqiang.xu
authored andcommitted
HDFS-17414. [FGL] RPCs in DatanodeProtocol support fine-grained lock (#6649)
1 parent 5c7719b commit f64cb04

File tree

8 files changed

+54
-32
lines changed

8 files changed

+54
-32
lines changed

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -97,6 +97,7 @@
9797
import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
9898
import org.apache.hadoop.hdfs.server.namenode.NameNode;
9999
import org.apache.hadoop.hdfs.server.namenode.Namesystem;
100+
import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
100101
import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
101102
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
102103
import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfyManager;
@@ -1895,7 +1896,7 @@ private Block getBlockOnStorage(BlockInfo storedBlock,
18951896
*/
18961897
public void findAndMarkBlockAsCorrupt(final ExtendedBlock blk,
18971898
final DatanodeInfo dn, String storageID, String reason) throws IOException {
1898-
assert namesystem.hasWriteLock();
1899+
assert namesystem.hasWriteLock(FSNamesystemLockMode.BM);
18991900
final Block reportedBlock = blk.getLocalBlock();
19001901
final BlockInfo storedBlock = getStoredBlock(reportedBlock);
19011902
if (storedBlock == null) {
@@ -2708,7 +2709,7 @@ void processPendingReconstructions() {
27082709
}
27092710

27102711
public long requestBlockReportLeaseId(DatanodeRegistration nodeReg) {
2711-
assert namesystem.hasReadLock();
2712+
assert namesystem.hasReadLock(FSNamesystemLockMode.BM);
27122713
DatanodeDescriptor node = null;
27132714
try {
27142715
node = datanodeManager.getDatanode(nodeReg);
@@ -2730,7 +2731,6 @@ public long requestBlockReportLeaseId(DatanodeRegistration nodeReg) {
27302731

27312732
public void registerDatanode(DatanodeRegistration nodeReg)
27322733
throws IOException {
2733-
assert namesystem.hasWriteLock();
27342734
datanodeManager.registerDatanode(nodeReg);
27352735
bmSafeMode.checkSafeMode();
27362736
}
@@ -2997,7 +2997,7 @@ void removeDNLeaseIfNeeded(DatanodeDescriptor node) {
29972997

29982998
public void removeBRLeaseIfNeeded(final DatanodeID nodeID,
29992999
final BlockReportContext context) throws IOException {
3000-
namesystem.writeLock();
3000+
namesystem.writeLock(FSNamesystemLockMode.BM);
30013001
DatanodeDescriptor node;
30023002
try {
30033003
node = datanodeManager.getDatanode(nodeID);
@@ -3015,7 +3015,7 @@ public void removeBRLeaseIfNeeded(final DatanodeID nodeID,
30153015
}
30163016
}
30173017
} finally {
3018-
namesystem.writeUnlock("removeBRLeaseIfNeeded");
3018+
namesystem.writeUnlock(FSNamesystemLockMode.BM, "removeBRLeaseIfNeeded");
30193019
}
30203020
}
30213021

@@ -3207,7 +3207,7 @@ public void markBlockReplicasAsCorrupt(Block oldBlock,
32073207
BlockInfo block,
32083208
long oldGenerationStamp, long oldNumBytes,
32093209
DatanodeStorageInfo[] newStorages) throws IOException {
3210-
assert namesystem.hasWriteLock();
3210+
assert namesystem.hasWriteLock(FSNamesystemLockMode.BM);
32113211
BlockToMarkCorrupt b = null;
32123212
if (block.getGenerationStamp() != oldGenerationStamp) {
32133213
b = new BlockToMarkCorrupt(oldBlock, block, oldGenerationStamp,
@@ -4426,7 +4426,7 @@ private void removeStoredBlock(DatanodeStorageInfo storageInfo, Block block,
44264426
*/
44274427
public void removeStoredBlock(BlockInfo storedBlock, DatanodeDescriptor node) {
44284428
blockLog.debug("BLOCK* removeStoredBlock: {} from {}", storedBlock, node);
4429-
assert (namesystem.hasWriteLock());
4429+
assert (namesystem.hasWriteLock(FSNamesystemLockMode.BM));
44304430
{
44314431
if (storedBlock == null || !blocksMap.removeNode(storedBlock, node)) {
44324432
blockLog.debug("BLOCK* removeStoredBlock: {} has already been removed from node {}",
@@ -4941,7 +4941,7 @@ public int getTotalBlocks() {
49414941
}
49424942

49434943
public void removeBlock(BlockInfo block) {
4944-
assert namesystem.hasWriteLock();
4944+
assert namesystem.hasWriteLock(FSNamesystemLockMode.BM);
49454945
// No need to ACK blocks that are being removed entirely
49464946
// from the namespace, since the removal of the associated
49474947
// file already removes them from the block map below.
@@ -4984,7 +4984,7 @@ public void updateLastBlock(BlockInfo lastBlock, ExtendedBlock newBlock) {
49844984
/** updates a block in needed reconstruction queue. */
49854985
private void updateNeededReconstructions(final BlockInfo block,
49864986
final int curReplicasDelta, int expectedReplicasDelta) {
4987-
namesystem.writeLock();
4987+
namesystem.writeLock(FSNamesystemLockMode.BM);
49884988
try {
49894989
if (!isPopulatingReplQueues() || !block.isComplete()) {
49904990
return;
@@ -5003,7 +5003,7 @@ private void updateNeededReconstructions(final BlockInfo block,
50035003
repl.outOfServiceReplicas(), oldExpectedReplicas);
50045004
}
50055005
} finally {
5006-
namesystem.writeUnlock("updateNeededReconstructions");
5006+
namesystem.writeUnlock(FSNamesystemLockMode.BM, "updateNeededReconstructions");
50075007
}
50085008
}
50095009

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222

2323
import org.apache.commons.lang3.StringUtils;
2424
import org.apache.hadoop.classification.VisibleForTesting;
25+
import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
2526
import org.apache.hadoop.util.Preconditions;
2627

2728
import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
@@ -855,7 +856,7 @@ private void removeDatanode(DatanodeDescriptor nodeInfo) {
855856
*/
856857
private void removeDatanode(DatanodeDescriptor nodeInfo,
857858
boolean removeBlocksFromBlocksMap) {
858-
assert namesystem.hasWriteLock();
859+
assert namesystem.hasWriteLock(FSNamesystemLockMode.BM);
859860
heartbeatManager.removeDatanode(nodeInfo);
860861
if (removeBlocksFromBlocksMap) {
861862
blockManager.removeBlocksAssociatedTo(nodeInfo);
@@ -874,7 +875,7 @@ private void removeDatanode(DatanodeDescriptor nodeInfo,
874875
*/
875876
public void removeDatanode(final DatanodeID node)
876877
throws UnregisteredNodeException {
877-
namesystem.writeLock();
878+
namesystem.writeLock(FSNamesystemLockMode.BM);
878879
try {
879880
final DatanodeDescriptor descriptor = getDatanode(node);
880881
if (descriptor != null) {
@@ -884,7 +885,7 @@ public void removeDatanode(final DatanodeID node)
884885
+ node + " does not exist");
885886
}
886887
} finally {
887-
namesystem.writeUnlock("removeDatanode");
888+
namesystem.writeUnlock(FSNamesystemLockMode.BM, "removeDatanode");
888889
}
889890
}
890891

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,7 @@
4545
import org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap;
4646
import org.apache.hadoop.hdfs.server.common.blockaliasmap.impl.TextFileRegionAliasMap;
4747
import org.apache.hadoop.hdfs.server.common.BlockAlias;
48+
import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
4849
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
4950
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
5051
import org.apache.hadoop.hdfs.util.RwLock;
@@ -173,7 +174,7 @@ public LocatedBlockBuilder newLocatedBlocks(int maxValue) {
173174

174175
public void removeDatanode(DatanodeDescriptor dnToRemove) {
175176
if (providedEnabled) {
176-
assert lock.hasWriteLock() : "Not holding write lock";
177+
assert lock.hasWriteLock(FSNamesystemLockMode.BM) : "Not holding write lock";
177178
providedDescriptor.remove(dnToRemove);
178179
// if all datanodes fail, set the block report count to 0
179180
if (providedDescriptor.activeProvidedDatanodes() == 0) {

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
*/
1818
package org.apache.hadoop.hdfs.server.namenode;
1919

20+
import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
2021
import org.apache.hadoop.util.Preconditions;
2122
import org.apache.hadoop.HadoopIllegalArgumentException;
2223
import org.apache.hadoop.fs.XAttrSetFlag;
@@ -104,7 +105,7 @@ static boolean unprotectedRemoveBlock(
104105
*/
105106
static void persistBlocks(
106107
FSDirectory fsd, String path, INodeFile file, boolean logRetryCache) {
107-
assert fsd.getFSNamesystem().hasWriteLock();
108+
assert fsd.getFSNamesystem().hasWriteLock(FSNamesystemLockMode.FS);
108109
Preconditions.checkArgument(file.isUnderConstruction());
109110
fsd.getEditLog().logUpdateBlocks(path, file, logRetryCache);
110111
if(NameNode.stateChangeLog.isDebugEnabled()) {

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1106,7 +1106,7 @@ static void unprotectedUpdateCount(INodesInPath inodesInPath,
11061106
*/
11071107
public void updateSpaceForCompleteBlock(BlockInfo completeBlk,
11081108
INodesInPath inodes) throws IOException {
1109-
assert namesystem.hasWriteLock();
1109+
assert namesystem.hasWriteLock(FSNamesystemLockMode.GLOBAL);
11101110
INodesInPath iip = inodes != null ? inodes :
11111111
INodesInPath.fromINode(namesystem.getBlockCollection(completeBlk));
11121112
INodeFile fileINode = iip.getLastINode().asFile();

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

Lines changed: 16 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1894,11 +1894,11 @@ public void cpUnlock() {
18941894

18951895

18961896
NamespaceInfo getNamespaceInfo() {
1897-
readLock();
1897+
readLock(FSNamesystemLockMode.FS);
18981898
try {
18991899
return unprotectedGetNamespaceInfo();
19001900
} finally {
1901-
readUnlock("getNamespaceInfo");
1901+
readUnlock(FSNamesystemLockMode.FS, "getNamespaceInfo");
19021902
}
19031903
}
19041904

@@ -3905,7 +3905,7 @@ Lease reassignLeaseInternal(Lease lease, String newHolder, INodeFile pendingFile
39053905
void commitOrCompleteLastBlock(
39063906
final INodeFile fileINode, final INodesInPath iip,
39073907
final Block commitBlock) throws IOException {
3908-
assert hasWriteLock();
3908+
assert hasWriteLock(FSNamesystemLockMode.GLOBAL);
39093909
Preconditions.checkArgument(fileINode.isUnderConstruction());
39103910
blockManager.commitOrCompleteLastBlock(fileINode, commitBlock, iip);
39113911
}
@@ -3927,7 +3927,7 @@ void addCommittedBlocksToPending(final INodeFile pendingFile) {
39273927

39283928
void finalizeINodeFileUnderConstruction(String src, INodeFile pendingFile,
39293929
int latestSnapshot, boolean allowCommittedBlock) throws IOException {
3930-
assert hasWriteLock();
3930+
assert hasWriteLock(FSNamesystemLockMode.GLOBAL);
39313931

39323932
FileUnderConstructionFeature uc = pendingFile.getFileUnderConstructionFeature();
39333933
if (uc == null) {
@@ -3996,7 +3996,8 @@ INodeFile getBlockCollection(BlockInfo b) {
39963996

39973997
@Override
39983998
public INodeFile getBlockCollection(long id) {
3999-
assert hasReadLock() : "Accessing INode id = " + id + " without read lock";
3999+
assert hasReadLock(FSNamesystemLockMode.FS)
4000+
: "Accessing INode id = " + id + " without read lock";
40004001
INode inode = getFSDirectory().getInode(id);
40014002
return inode == null ? null : inode.asFile();
40024003
}
@@ -4014,7 +4015,7 @@ void commitBlockSynchronization(ExtendedBlock oldBlock,
40144015
+ ")");
40154016
checkOperation(OperationCategory.WRITE);
40164017
final String src;
4017-
writeLock();
4018+
writeLock(FSNamesystemLockMode.GLOBAL);
40184019
boolean copyTruncate = false;
40194020
BlockInfo truncatedBlock = null;
40204021
try {
@@ -4149,7 +4150,7 @@ void commitBlockSynchronization(ExtendedBlock oldBlock,
41494150
}
41504151
blockManager.successfulBlockRecovery(storedBlock);
41514152
} finally {
4152-
writeUnlock("commitBlockSynchronization");
4153+
writeUnlock(FSNamesystemLockMode.GLOBAL, "commitBlockSynchronization");
41534154
}
41544155
getEditLog().logSync();
41554156
if (closeFile) {
@@ -4410,11 +4411,11 @@ BatchedDirectoryListing getBatchedListing(String[] srcs, byte[] startAfter,
44104411
* @see org.apache.hadoop.hdfs.server.datanode.DataNode
44114412
*/
44124413
void registerDatanode(DatanodeRegistration nodeReg) throws IOException {
4413-
writeLock();
4414+
writeLock(FSNamesystemLockMode.BM);
44144415
try {
44154416
blockManager.registerDatanode(nodeReg);
44164417
} finally {
4417-
writeUnlock("registerDatanode");
4418+
writeUnlock(FSNamesystemLockMode.BM, "registerDatanode");
44184419
}
44194420
}
44204421

@@ -4447,7 +4448,7 @@ HeartbeatResponse handleHeartbeat(DatanodeRegistration nodeReg,
44474448
@Nonnull SlowPeerReports slowPeers,
44484449
@Nonnull SlowDiskReports slowDisks)
44494450
throws IOException {
4450-
readLock();
4451+
readLock(FSNamesystemLockMode.BM);
44514452
try {
44524453
//get datanode commands
44534454
DatanodeCommand[] cmds = blockManager.getDatanodeManager().handleHeartbeat(
@@ -4470,7 +4471,7 @@ nodeReg, reports, getBlockPoolId(), cacheCapacity, cacheUsed,
44704471
return new HeartbeatResponse(cmds, haState, rollingUpgradeInfo,
44714472
blockReportLeaseId, isSlownode);
44724473
} finally {
4473-
readUnlock("handleHeartbeat");
4474+
readUnlock(FSNamesystemLockMode.BM, "handleHeartbeat");
44744475
}
44754476
}
44764477

@@ -4532,7 +4533,7 @@ void checkAvailableResources() {
45324533
* @param file
45334534
*/
45344535
private void closeFile(String path, INodeFile file) {
4535-
assert hasWriteLock();
4536+
assert hasWriteLock(FSNamesystemLockMode.FS);
45364537
// file is closed
45374538
getEditLog().logCloseFile(path, file);
45384539
NameNode.stateChangeLog.debug("closeFile: {} with {} blocks is persisted to the file system",
@@ -5877,7 +5878,7 @@ private long nextBlockId(BlockType blockType) throws IOException {
58775878
}
58785879

58795880
boolean isFileDeleted(INodeFile file) {
5880-
assert hasReadLock();
5881+
assert hasReadLock(FSNamesystemLockMode.FS);
58815882
// Not in the inodeMap or in the snapshot but marked deleted.
58825883
if (dir.getInode(file.getId()) == null) {
58835884
return true;
@@ -5955,7 +5956,7 @@ private INodeFile checkUCBlock(ExtendedBlock block,
59555956
*/
59565957
void reportBadBlocks(LocatedBlock[] blocks) throws IOException {
59575958
checkOperation(OperationCategory.WRITE);
5958-
writeLock();
5959+
writeLock(FSNamesystemLockMode.BM);
59595960
try {
59605961
checkOperation(OperationCategory.WRITE);
59615962
for (int i = 0; i < blocks.length; i++) {
@@ -5971,7 +5972,7 @@ void reportBadBlocks(LocatedBlock[] blocks) throws IOException {
59715972
}
59725973
}
59735974
} finally {
5974-
writeUnlock("reportBadBlocks");
5975+
writeUnlock(FSNamesystemLockMode.BM, "reportBadBlocks");
59755976
}
59765977
}
59775978

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
import org.apache.hadoop.fs.FileStatus;
2222
import org.apache.hadoop.fs.FileUtil;
2323
import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
24+
import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
2425
import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
2526
import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
2627
import org.apache.hadoop.thirdparty.com.google.common.collect.LinkedListMultimap;
@@ -166,6 +167,10 @@ public void setupMockCluster() throws IOException {
166167
fsn = Mockito.mock(FSNamesystem.class);
167168
Mockito.doReturn(true).when(fsn).hasWriteLock();
168169
Mockito.doReturn(true).when(fsn).hasReadLock();
170+
Mockito.doReturn(true).when(fsn).hasWriteLock(FSNamesystemLockMode.GLOBAL);
171+
Mockito.doReturn(true).when(fsn).hasReadLock(FSNamesystemLockMode.GLOBAL);
172+
Mockito.doReturn(true).when(fsn).hasWriteLock(FSNamesystemLockMode.BM);
173+
Mockito.doReturn(true).when(fsn).hasReadLock(FSNamesystemLockMode.BM);
169174
Mockito.doReturn(true).when(fsn).isRunning();
170175
//Make shouldPopulaeReplQueues return true
171176
HAContext haContext = Mockito.mock(HAContext.class);

0 commit comments

Comments
 (0)