Skip to content

Commit 16ddcfe

Browse files
author
Ritesh Garg
committed
Fixing comments
1 parent 296d37a commit 16ddcfe

File tree

1 file changed

+9
-10
lines changed

1 file changed

+9
-10
lines changed

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java

Lines changed: 9 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1721,12 +1721,12 @@ public void testSingleRackFailureDuringPipelineSetupMinReplicationPossible() thr
17211721
false);
17221722
conf.setInt(HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.
17231723
MIN_REPLICATION, 2);
1724-
// 3 racks & 6 nodes. 1 per rack for 2 racks and 3 nodes in the 3rd rack
1724+
// 3 racks & 6 nodes. 1 per rack for 2 racks and 4 nodes in the 3rd rack
17251725
try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(6)
17261726
.racks(new String[] {"/rack1", "/rack2", "/rack3", "/rack3", "/rack3", "/rack3"}).build()) {
17271727
cluster.waitClusterUp();
17281728
DistributedFileSystem fs = cluster.getFileSystem();
1729-
// kill all the DNs in the 3rd rack.
1729+
// kill all the DNs in the 3rd rack, so only 2 racks stays with 1 active DN each
17301730
cluster.stopDataNode(5);
17311731
cluster.stopDataNode(4);
17321732
cluster.stopDataNode(3);
@@ -1745,12 +1745,12 @@ public void testSingleRackFailureDuringPipelineSetupMinReplicationImpossible()
17451745
BlockPlacementPolicyRackFaultTolerant.class, BlockPlacementPolicy.class);
17461746
conf.setBoolean(HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.ENABLE_KEY, false);
17471747
conf.setInt(HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.MIN_REPLICATION, 3);
1748-
// 3 racks & 6 nodes. 1 per rack for 2 racks and 3 nodes in the 3rd rack
1748+
// 3 racks & 6 nodes. 1 per rack for 2 racks and 4 nodes in the 3rd rack
17491749
try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(6)
17501750
.racks(new String[] {"/rack1", "/rack2", "/rack3", "/rack3", "/rack3", "/rack3"}).build()) {
17511751
cluster.waitClusterUp();
17521752
DistributedFileSystem fs = cluster.getFileSystem();
1753-
// kill one DN, so only 2 racks stays with active DN
1753+
// kill all the DNs in the 3rd rack, so only 2 racks stays with 1 active DN each
17541754
cluster.stopDataNode(5);
17551755
cluster.stopDataNode(4);
17561756
cluster.stopDataNode(3);
@@ -1777,12 +1777,12 @@ public void testMultipleRackFailureDuringPipelineSetupMinReplicationPossible() t
17771777
false);
17781778
conf.setInt(HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.
17791779
MIN_REPLICATION, 1);
1780-
// 3 racks & 3 nodes. 1 per rack
1780+
// 3 racks & 6 nodes. 1 per rack for 2 racks and 4 nodes in the 3rd rack
17811781
try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(6)
17821782
.racks(new String[] {"/rack1", "/rack2", "/rack3", "/rack3", "/rack3", "/rack3"}).build()) {
17831783
cluster.waitClusterUp();
17841784
DistributedFileSystem fs = cluster.getFileSystem();
1785-
// kill one DN, so only 2 racks stays with active DN
1785+
// kill all DNs except 1, so only rack1 stays with 1 active DN
17861786
cluster.stopDataNode(5);
17871787
cluster.stopDataNode(4);
17881788
cluster.stopDataNode(3);
@@ -1806,12 +1806,12 @@ public void testMultipleRackFailureDuringPipelineSetupMinReplicationImpossible()
18061806
false);
18071807
conf.setInt(HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.
18081808
MIN_REPLICATION, 2);
1809-
// 3 racks & 3 nodes. 1 per rack
1809+
// 3 racks & 6 nodes. 1 per rack for 2 racks and 4 nodes in the 3rd rack
18101810
try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(6)
18111811
.racks(new String[] {"/rack1", "/rack2", "/rack3", "/rack3", "/rack3", "/rack3"}).build()) {
18121812
cluster.waitClusterUp();
18131813
DistributedFileSystem fs = cluster.getFileSystem();
1814-
// kill one DN, so only 2 racks stays with active DN
1814+
// kill all DNs except 1, so only rack1 stays with 1 active DN
18151815
cluster.stopDataNode(5);
18161816
cluster.stopDataNode(4);
18171817
cluster.stopDataNode(3);
@@ -1838,7 +1838,7 @@ public void testAllRackFailureDuringPipelineSetup() throws Exception {
18381838
conf.setBoolean(
18391839
HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure.ENABLE_KEY,
18401840
false);
1841-
// 3 racks & 3 nodes. 1 per rack
1841+
// 3 racks & 6 nodes. 1 per rack for 2 racks and 4 nodes in the 3rd rack
18421842
try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(6)
18431843
.racks(new String[] {"/rack1", "/rack2", "/rack3", "/rack3", "/rack3", "/rack3"}).build()) {
18441844
cluster.waitClusterUp();
@@ -1857,5 +1857,4 @@ public void testAllRackFailureDuringPipelineSetup() throws Exception {
18571857
assertTrue(threw);
18581858
}
18591859
}
1860-
18611860
}

0 commit comments

Comments
 (0)