Skip to content

Commit 2ffd842

Browse files
committed
HDFS-8653. Code cleanup for DatanodeManager, DatanodeDescriptor and DatanodeStorageInfo. Contributed by Zhe Zhang.
1 parent d3fed8e commit 2ffd842

File tree

5 files changed

+45
-34
lines changed

5 files changed

+45
-34
lines changed

hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -682,6 +682,9 @@ Release 2.8.0 - UNRELEASED
682682
HDFS-8623. Refactor NameNode handling of invalid, corrupt, and under-recovery
683683
blocks. (Zhe Zhang via jing9)
684684

685+
HDFS-8653. Code cleanup for DatanodeManager, DatanodeDescriptor and
686+
DatanodeStorageInfo. (Zhe Zhang via wang)
687+
685688
OPTIMIZATIONS
686689

687690
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -452,8 +452,8 @@ private void rescanFile(CacheDirective directive, INodeFile file) {
452452
file.getFullPathName(), cachedTotal, neededTotal);
453453
}
454454

455-
private String findReasonForNotCaching(CachedBlock cblock,
456-
BlockInfo blockInfo) {
455+
private String findReasonForNotCaching(CachedBlock cblock,
456+
BlockInfo blockInfo) {
457457
if (blockInfo == null) {
458458
// Somehow, a cache report with the block arrived, but the block
459459
// reports from the DataNode haven't (yet?) described such a block.

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java

Lines changed: 17 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,8 @@ public class DatanodeDescriptor extends DatanodeInfo {
6464

6565
// Stores status of decommissioning.
6666
// If node is not decommissioning, do not use this object for anything.
67-
public final DecommissioningStatus decommissioningStatus = new DecommissioningStatus();
67+
public final DecommissioningStatus decommissioningStatus =
68+
new DecommissioningStatus();
6869

6970
private long curBlockReportId = 0;
7071

@@ -115,7 +116,7 @@ synchronized List<E> poll(int numBlocks) {
115116
return null;
116117
}
117118

118-
List<E> results = new ArrayList<E>();
119+
List<E> results = new ArrayList<>();
119120
for(; !blockq.isEmpty() && numBlocks > 0; numBlocks--) {
120121
results.add(blockq.poll());
121122
}
@@ -135,7 +136,7 @@ synchronized void clear() {
135136
}
136137

137138
private final Map<String, DatanodeStorageInfo> storageMap =
138-
new HashMap<String, DatanodeStorageInfo>();
139+
new HashMap<>();
139140

140141
/**
141142
* A list of CachedBlock objects on this datanode.
@@ -217,22 +218,24 @@ public CachedBlocksList getPendingUncached() {
217218
private long bandwidth;
218219

219220
/** A queue of blocks to be replicated by this datanode */
220-
private final BlockQueue<BlockTargetPair> replicateBlocks = new BlockQueue<BlockTargetPair>();
221+
private final BlockQueue<BlockTargetPair> replicateBlocks =
222+
new BlockQueue<>();
221223
/** A queue of blocks to be recovered by this datanode */
222224
private final BlockQueue<BlockInfoUnderConstruction> recoverBlocks =
223-
new BlockQueue<BlockInfoUnderConstruction>();
225+
new BlockQueue<>();
224226
/** A set of blocks to be invalidated by this datanode */
225-
private final LightWeightHashSet<Block> invalidateBlocks = new LightWeightHashSet<Block>();
227+
private final LightWeightHashSet<Block> invalidateBlocks =
228+
new LightWeightHashSet<>();
226229

227230
/* Variables for maintaining number of blocks scheduled to be written to
228231
* this storage. This count is approximate and might be slightly bigger
229232
* in case of errors (e.g. datanode does not report if an error occurs
230233
* while writing the block).
231234
*/
232235
private EnumCounters<StorageType> currApproxBlocksScheduled
233-
= new EnumCounters<StorageType>(StorageType.class);
236+
= new EnumCounters<>(StorageType.class);
234237
private EnumCounters<StorageType> prevApproxBlocksScheduled
235-
= new EnumCounters<StorageType>(StorageType.class);
238+
= new EnumCounters<>(StorageType.class);
236239
private long lastBlocksScheduledRollTime = 0;
237240
private static final int BLOCKS_SCHEDULED_ROLL_INTERVAL = 600*1000; //10min
238241
private int volumeFailures = 0;
@@ -276,6 +279,7 @@ public DatanodeStorageInfo getStorageInfo(String storageID) {
276279
return storageMap.get(storageID);
277280
}
278281
}
282+
279283
DatanodeStorageInfo[] getStorageInfos() {
280284
synchronized (storageMap) {
281285
final Collection<DatanodeStorageInfo> storages = storageMap.values();
@@ -321,7 +325,7 @@ List<DatanodeStorageInfo> removeZombieStorages() {
321325
Long.toHexString(curBlockReportId));
322326
iter.remove();
323327
if (zombies == null) {
324-
zombies = new LinkedList<DatanodeStorageInfo>();
328+
zombies = new LinkedList<>();
325329
}
326330
zombies.add(storageInfo);
327331
}
@@ -350,10 +354,7 @@ boolean removeBlock(BlockInfo b) {
350354
*/
351355
boolean removeBlock(String storageID, BlockInfo b) {
352356
DatanodeStorageInfo s = getStorageInfo(storageID);
353-
if (s != null) {
354-
return s.removeBlock(b);
355-
}
356-
return false;
357+
return s != null && s.removeBlock(b);
357358
}
358359

359360
public void resetBlocks() {
@@ -449,7 +450,7 @@ public void updateHeartbeatState(StorageReport[] reports, long cacheCapacity,
449450
+ this.volumeFailures + " to " + volFailures);
450451
synchronized (storageMap) {
451452
failedStorageInfos =
452-
new HashSet<DatanodeStorageInfo>(storageMap.values());
453+
new HashSet<>(storageMap.values());
453454
}
454455
}
455456

@@ -505,7 +506,7 @@ private void pruneStorageMap(final StorageReport[] reports) {
505506
HashMap<String, DatanodeStorageInfo> excessStorages;
506507

507508
// Init excessStorages with all known storages.
508-
excessStorages = new HashMap<String, DatanodeStorageInfo>(storageMap);
509+
excessStorages = new HashMap<>(storageMap);
509510

510511
// Remove storages that the DN reported in the heartbeat.
511512
for (final StorageReport report : reports) {
@@ -542,7 +543,7 @@ private static class BlockIterator implements Iterator<BlockInfo> {
542543
private final List<Iterator<BlockInfo>> iterators;
543544

544545
private BlockIterator(final DatanodeStorageInfo... storages) {
545-
List<Iterator<BlockInfo>> iterators = new ArrayList<Iterator<BlockInfo>>();
546+
List<Iterator<BlockInfo>> iterators = new ArrayList<>();
546547
for (DatanodeStorageInfo e : storages) {
547548
iterators.add(e.getBlockIterator());
548549
}

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ public class DatanodeManager {
8585
* Mapping: StorageID -> DatanodeDescriptor
8686
*/
8787
private final Map<String, DatanodeDescriptor> datanodeMap
88-
= new HashMap<String, DatanodeDescriptor>();
88+
= new HashMap<>();
8989

9090
/** Cluster network topology */
9191
private final NetworkTopology networktopology;
@@ -162,7 +162,7 @@ public class DatanodeManager {
162162
* Software version -> Number of datanodes with this version
163163
*/
164164
private HashMap<String, Integer> datanodesSoftwareVersions =
165-
new HashMap<String, Integer>(4, 0.75f);
165+
new HashMap<>(4, 0.75f);
166166

167167
/**
168168
* The minimum time between resending caching directives to Datanodes,
@@ -217,7 +217,7 @@ public class DatanodeManager {
217217
// locations of those hosts in the include list and store the mapping
218218
// in the cache; so future calls to resolve will be fast.
219219
if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) {
220-
final ArrayList<String> locations = new ArrayList<String>();
220+
final ArrayList<String> locations = new ArrayList<>();
221221
for (InetSocketAddress addr : hostFileManager.getIncludes()) {
222222
locations.add(addr.getAddress().getHostAddress());
223223
}
@@ -370,7 +370,7 @@ public void sortLocatedBlocks(final String targethost,
370370
// here we should get node but not datanode only .
371371
Node client = getDatanodeByHost(targethost);
372372
if (client == null) {
373-
List<String> hosts = new ArrayList<String> (1);
373+
List<String> hosts = new ArrayList<> (1);
374374
hosts.add(targethost);
375375
List<String> resolvedHosts = dnsToSwitchMapping.resolve(hosts);
376376
if (resolvedHosts != null && !resolvedHosts.isEmpty()) {
@@ -522,7 +522,7 @@ public DatanodeStorageInfo[] getDatanodeStorageInfos(
522522
void datanodeDump(final PrintWriter out) {
523523
synchronized (datanodeMap) {
524524
Map<String,DatanodeDescriptor> sortedDatanodeMap =
525-
new TreeMap<String,DatanodeDescriptor>(datanodeMap);
525+
new TreeMap<>(datanodeMap);
526526
out.println("Metasave: Number of datanodes: " + datanodeMap.size());
527527
for (DatanodeDescriptor node : sortedDatanodeMap.values()) {
528528
out.println(node.dumpDatanode());
@@ -660,7 +660,7 @@ private boolean shouldCountVersion(DatanodeDescriptor node) {
660660

661661
private void countSoftwareVersions() {
662662
synchronized(datanodeMap) {
663-
HashMap<String, Integer> versionCount = new HashMap<String, Integer>();
663+
HashMap<String, Integer> versionCount = new HashMap<>();
664664
for(DatanodeDescriptor dn: datanodeMap.values()) {
665665
// Check isAlive too because right after removeDatanode(),
666666
// isDatanodeDead() is still true
@@ -677,7 +677,7 @@ private void countSoftwareVersions() {
677677

678678
public HashMap<String, Integer> getDatanodesSoftwareVersions() {
679679
synchronized(datanodeMap) {
680-
return new HashMap<String, Integer> (this.datanodesSoftwareVersions);
680+
return new HashMap<> (this.datanodesSoftwareVersions);
681681
}
682682
}
683683

@@ -710,7 +710,7 @@ private String resolveNetworkLocationWithFallBackToDefaultLocation (
710710
*/
711711
private String resolveNetworkLocation (DatanodeID node)
712712
throws UnresolvedTopologyException {
713-
List<String> names = new ArrayList<String>(1);
713+
List<String> names = new ArrayList<>(1);
714714
if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) {
715715
names.add(node.getIpAddr());
716716
} else {
@@ -1000,7 +1000,7 @@ nodes with its data cleared (or user can just remove the StorageID
10001000
// If the network location is invalid, clear the cached mappings
10011001
// so that we have a chance to re-add this DataNode with the
10021002
// correct network location later.
1003-
List<String> invalidNodeNames = new ArrayList<String>(3);
1003+
List<String> invalidNodeNames = new ArrayList<>(3);
10041004
// clear cache for nodes in IP or Hostname
10051005
invalidNodeNames.add(nodeReg.getIpAddr());
10061006
invalidNodeNames.add(nodeReg.getHostName());
@@ -1275,7 +1275,7 @@ public List<DatanodeDescriptor> getDatanodeListForReport(
12751275
final HostFileManager.HostSet excludedNodes = hostFileManager.getExcludes();
12761276

12771277
synchronized(datanodeMap) {
1278-
nodes = new ArrayList<DatanodeDescriptor>(datanodeMap.size());
1278+
nodes = new ArrayList<>(datanodeMap.size());
12791279
for (DatanodeDescriptor dn : datanodeMap.values()) {
12801280
final boolean isDead = isDatanodeDead(dn);
12811281
final boolean isDecommissioning = dn.isDecommissionInProgress();
@@ -1351,7 +1351,7 @@ public DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg,
13511351
VolumeFailureSummary volumeFailureSummary) throws IOException {
13521352
synchronized (heartbeatManager) {
13531353
synchronized (datanodeMap) {
1354-
DatanodeDescriptor nodeinfo = null;
1354+
DatanodeDescriptor nodeinfo;
13551355
try {
13561356
nodeinfo = getDatanode(nodeReg);
13571357
} catch(UnregisteredNodeException e) {
@@ -1389,7 +1389,7 @@ public DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg,
13891389
final DatanodeStorageInfo[] storages = b.getExpectedStorageLocations();
13901390
// Skip stale nodes during recovery - not heart beated for some time (30s by default).
13911391
final List<DatanodeStorageInfo> recoveryLocations =
1392-
new ArrayList<DatanodeStorageInfo>(storages.length);
1392+
new ArrayList<>(storages.length);
13931393
for (int i = 0; i < storages.length; i++) {
13941394
if (!storages[i].getDatanodeDescriptor().isStale(staleInterval)) {
13951395
recoveryLocations.add(storages[i]);
@@ -1431,7 +1431,7 @@ public DatanodeCommand[] handleHeartbeat(DatanodeRegistration nodeReg,
14311431
return new DatanodeCommand[] { brCommand };
14321432
}
14331433

1434-
final List<DatanodeCommand> cmds = new ArrayList<DatanodeCommand>();
1434+
final List<DatanodeCommand> cmds = new ArrayList<>();
14351435
//check pending replication
14361436
List<BlockTargetPair> pendingList = nodeinfo.getReplicationCommand(
14371437
maxTransfers);

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -37,8 +37,9 @@
3737
public class DatanodeStorageInfo {
3838
public static final DatanodeStorageInfo[] EMPTY_ARRAY = {};
3939

40-
public static DatanodeInfo[] toDatanodeInfos(DatanodeStorageInfo[] storages) {
41-
return toDatanodeInfos(Arrays.asList(storages));
40+
public static DatanodeInfo[] toDatanodeInfos(
41+
DatanodeStorageInfo[] storages) {
42+
return storages == null ? null: toDatanodeInfos(Arrays.asList(storages));
4243
}
4344
static DatanodeInfo[] toDatanodeInfos(List<DatanodeStorageInfo> storages) {
4445
final DatanodeInfo[] datanodes = new DatanodeInfo[storages.size()];
@@ -58,6 +59,9 @@ static DatanodeDescriptor[] toDatanodeDescriptors(
5859
}
5960

6061
public static String[] toStorageIDs(DatanodeStorageInfo[] storages) {
62+
if (storages == null) {
63+
return null;
64+
}
6165
String[] storageIDs = new String[storages.length];
6266
for(int i = 0; i < storageIDs.length; i++) {
6367
storageIDs[i] = storages[i].getStorageID();
@@ -66,6 +70,9 @@ public static String[] toStorageIDs(DatanodeStorageInfo[] storages) {
6670
}
6771

6872
public static StorageType[] toStorageTypes(DatanodeStorageInfo[] storages) {
73+
if (storages == null) {
74+
return null;
75+
}
6976
StorageType[] storageTypes = new StorageType[storages.length];
7077
for(int i = 0; i < storageTypes.length; i++) {
7178
storageTypes[i] = storages[i].getStorageType();
@@ -380,6 +387,6 @@ static DatanodeStorageInfo getDatanodeStorageInfo(
380387
}
381388

382389
static enum AddBlockResult {
383-
ADDED, REPLACED, ALREADY_EXIST;
390+
ADDED, REPLACED, ALREADY_EXIST
384391
}
385392
}

0 commit comments

Comments
 (0)