Skip to content

Commit c17439c

Browse files
committed
HDFS-8499. Refactor BlockInfo class hierarchy with static helper class. Contributed by Zhe Zhang.
1 parent 46b0b41 commit c17439c

30 files changed

+418
-232
lines changed

hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -626,6 +626,9 @@ Release 2.8.0 - UNRELEASED
626626
HDFS-7923. The DataNodes should rate-limit their full block reports by
627627
asking the NN on heartbeat messages (cmccabe)
628628

629+
HDFS-8499. Refactor BlockInfo class hierarchy with static helper class.
630+
(Zhe Zhang via wang)
631+
629632
OPTIMIZATIONS
630633

631634
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,7 @@ public interface BlockCollection {
7979
* Convert the last block of the collection to an under-construction block
8080
* and set the locations.
8181
*/
82-
public BlockInfoContiguousUnderConstruction setLastBlock(BlockInfo lastBlock,
82+
public BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock,
8383
DatanodeStorageInfo[] targets) throws IOException;
8484

8585
/**

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java

Lines changed: 13 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ public abstract class BlockInfo extends Block
5151
* per replica is 42 bytes (LinkedList#Entry object per replica) versus 16
5252
* bytes using the triplets.
5353
*/
54-
protected Object[] triplets;
54+
Object[] triplets;
5555

5656
/**
5757
* Construct an entry for blocksmap
@@ -295,7 +295,7 @@ public BlockInfo moveBlockToHead(BlockInfo head,
295295
/**
296296
* BlockInfo represents a block that is not being constructed.
297297
* In order to start modifying the block, the BlockInfo should be converted
298-
* to {@link BlockInfoContiguousUnderConstruction}.
298+
* to {@link BlockInfoUnderConstruction}.
299299
* @return {@link BlockUCState#COMPLETE}
300300
*/
301301
public BlockUCState getBlockUCState() {
@@ -312,27 +312,29 @@ public boolean isComplete() {
312312
}
313313

314314
/**
315-
* Convert a complete block to an under construction block.
315+
* Convert a block to an under construction block.
316316
* @return BlockInfoUnderConstruction - an under construction block.
317317
*/
318-
public BlockInfoContiguousUnderConstruction convertToBlockUnderConstruction(
318+
public BlockInfoUnderConstruction convertToBlockUnderConstruction(
319319
BlockUCState s, DatanodeStorageInfo[] targets) {
320320
if(isComplete()) {
321-
BlockInfoContiguousUnderConstruction ucBlock =
322-
new BlockInfoContiguousUnderConstruction(this,
323-
getBlockCollection().getPreferredBlockReplication(), s, targets);
324-
ucBlock.setBlockCollection(getBlockCollection());
325-
return ucBlock;
321+
return convertCompleteBlockToUC(s, targets);
326322
}
327323
// the block is already under construction
328-
BlockInfoContiguousUnderConstruction ucBlock =
329-
(BlockInfoContiguousUnderConstruction)this;
324+
BlockInfoUnderConstruction ucBlock =
325+
(BlockInfoUnderConstruction)this;
330326
ucBlock.setBlockUCState(s);
331327
ucBlock.setExpectedLocations(targets);
332328
ucBlock.setBlockCollection(getBlockCollection());
333329
return ucBlock;
334330
}
335331

332+
/**
333+
* Convert a complete block to an under construction block.
334+
*/
335+
abstract BlockInfoUnderConstruction convertCompleteBlockToUC(
336+
BlockUCState s, DatanodeStorageInfo[] targets);
337+
336338
@Override
337339
public int hashCode() {
338340
// Super implementation is sufficient

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java

Lines changed: 15 additions & 62 deletions
Original file line numberDiff line numberDiff line change
@@ -19,13 +19,13 @@
1919

2020
import org.apache.hadoop.classification.InterfaceAudience;
2121
import org.apache.hadoop.hdfs.protocol.Block;
22+
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
2223

2324
/**
2425
* Subclass of {@link BlockInfo}, used for a block with replication scheme.
2526
*/
2627
@InterfaceAudience.Private
2728
public class BlockInfoContiguous extends BlockInfo {
28-
public static final BlockInfoContiguous[] EMPTY_ARRAY = {};
2929

3030
public BlockInfoContiguous(short size) {
3131
super(size);
@@ -40,84 +40,37 @@ public BlockInfoContiguous(Block blk, short size) {
4040
* This is used to convert BlockReplicationInfoUnderConstruction
4141
* @param from BlockReplicationInfo to copy from.
4242
*/
43-
protected BlockInfoContiguous(BlockInfoContiguous from) {
43+
protected BlockInfoContiguous(BlockInfo from) {
4444
super(from);
4545
}
4646

47-
/**
48-
* Ensure that there is enough space to include num more triplets.
49-
* @return first free triplet index.
50-
*/
51-
private int ensureCapacity(int num) {
52-
assert this.triplets != null : "BlockInfo is not initialized";
53-
int last = numNodes();
54-
if (triplets.length >= (last+num)*3) {
55-
return last;
56-
}
57-
/* Not enough space left. Create a new array. Should normally
58-
* happen only when replication is manually increased by the user. */
59-
Object[] old = triplets;
60-
triplets = new Object[(last+num)*3];
61-
System.arraycopy(old, 0, triplets, 0, last * 3);
62-
return last;
63-
}
64-
6547
@Override
6648
boolean addStorage(DatanodeStorageInfo storage) {
67-
// find the last null node
68-
int lastNode = ensureCapacity(1);
69-
setStorageInfo(lastNode, storage);
70-
setNext(lastNode, null);
71-
setPrevious(lastNode, null);
72-
return true;
49+
return ContiguousBlockStorageOp.addStorage(this, storage);
7350
}
7451

7552
@Override
7653
boolean removeStorage(DatanodeStorageInfo storage) {
77-
int dnIndex = findStorageInfo(storage);
78-
if (dnIndex < 0) { // the node is not found
79-
return false;
80-
}
81-
assert getPrevious(dnIndex) == null && getNext(dnIndex) == null :
82-
"Block is still in the list and must be removed first.";
83-
// find the last not null node
84-
int lastNode = numNodes()-1;
85-
// replace current node triplet by the lastNode one
86-
setStorageInfo(dnIndex, getStorageInfo(lastNode));
87-
setNext(dnIndex, getNext(lastNode));
88-
setPrevious(dnIndex, getPrevious(lastNode));
89-
// set the last triplet to null
90-
setStorageInfo(lastNode, null);
91-
setNext(lastNode, null);
92-
setPrevious(lastNode, null);
93-
return true;
54+
return ContiguousBlockStorageOp.removeStorage(this, storage);
9455
}
9556

9657
@Override
9758
public int numNodes() {
98-
assert this.triplets != null : "BlockInfo is not initialized";
99-
assert triplets.length % 3 == 0 : "Malformed BlockInfo";
100-
101-
for (int idx = getCapacity()-1; idx >= 0; idx--) {
102-
if (getDatanode(idx) != null) {
103-
return idx + 1;
104-
}
105-
}
106-
return 0;
59+
return ContiguousBlockStorageOp.numNodes(this);
10760
}
10861

10962
@Override
11063
void replaceBlock(BlockInfo newBlock) {
111-
assert newBlock instanceof BlockInfoContiguous;
112-
for (int i = this.numNodes() - 1; i >= 0; i--) {
113-
final DatanodeStorageInfo storage = this.getStorageInfo(i);
114-
final boolean removed = storage.removeBlock(this);
115-
assert removed : "currentBlock not found.";
64+
ContiguousBlockStorageOp.replaceBlock(this, newBlock);
65+
}
11666

117-
final DatanodeStorageInfo.AddBlockResult result = storage.addBlock(
118-
newBlock);
119-
assert result == DatanodeStorageInfo.AddBlockResult.ADDED :
120-
"newBlock already exists.";
121-
}
67+
@Override
68+
BlockInfoUnderConstruction convertCompleteBlockToUC(
69+
HdfsServerConstants.BlockUCState s, DatanodeStorageInfo[] targets) {
70+
BlockInfoUnderConstructionContiguous ucBlock =
71+
new BlockInfoUnderConstructionContiguous(this,
72+
getBlockCollection().getPreferredBlockReplication(), s, targets);
73+
ucBlock.setBlockCollection(getBlockCollection());
74+
return ucBlock;
12275
}
12376
}

0 commit comments

Comments
 (0)