Skip to content

Commit 58135bc

Browse files
committed
unit tests timeout
1 parent 273adf7 commit 58135bc

File tree

2 files changed

+13
-13
lines changed

2 files changed

+13
-13
lines changed

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -435,7 +435,7 @@ private void testDecommission(int numNamenodes, int numDatanodes)
435435
/**
436436
* Test that over-replicated blocks are deleted on recommission.
437437
*/
438-
@Test(timeout=120000)
438+
@Test(timeout=360000)
439439
public void testRecommission() throws Exception {
440440
final int numDatanodes = 6;
441441
try {
@@ -624,7 +624,7 @@ public void testHostsFile(int numNameNodes) throws IOException,
624624
}
625625
}
626626

627-
@Test(timeout=120000)
627+
@Test(timeout=360000)
628628
public void testDecommissionWithOpenfile()
629629
throws IOException, InterruptedException {
630630
LOG.info("Starting test testDecommissionWithOpenfile");
@@ -676,7 +676,7 @@ public void testDecommissionWithOpenfile()
676676
fdos.close();
677677
}
678678

679-
@Test(timeout = 20000)
679+
@Test(timeout = 360000)
680680
public void testDecommissionWithUnknownBlock() throws IOException {
681681
startCluster(1, 3);
682682

@@ -795,7 +795,7 @@ public Boolean get() {
795795
}
796796
}
797797

798-
@Test(timeout=180000)
798+
@Test(timeout=360000)
799799
public void testDecommissionWithOpenfileReporting()
800800
throws Exception {
801801
LOG.info("Starting test testDecommissionWithOpenfileReporting");
@@ -1005,7 +1005,7 @@ public void testDecommissionWithOpenFileAndBlockRecovery()
10051005
assertEquals(dfs.getFileStatus(file).getLen(), writtenBytes);
10061006
}
10071007

1008-
@Test(timeout=120000)
1008+
@Test(timeout=360000)
10091009
public void testCloseWhileDecommission() throws IOException,
10101010
ExecutionException, InterruptedException {
10111011
LOG.info("Starting test testCloseWhileDecommission");
@@ -1064,7 +1064,7 @@ public void testCloseWhileDecommission() throws IOException,
10641064
* to the IBR, all three nodes dn1/dn2/dn3 enter Decommissioning and then the
10651065
* DN reports the IBR.
10661066
*/
1067-
@Test(timeout=120000)
1067+
@Test(timeout=360000)
10681068
public void testAllocAndIBRWhileDecommission() throws IOException {
10691069
LOG.info("Starting test testAllocAndIBRWhileDecommission");
10701070
getConf().setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,
@@ -1314,7 +1314,7 @@ public Boolean get() {
13141314
}, 500, 5000);
13151315
}
13161316

1317-
@Test(timeout=120000)
1317+
@Test(timeout=360000)
13181318
public void testBlocksPerInterval() throws Exception {
13191319
GenericTestUtils.setLogLevel(
13201320
LoggerFactory.getLogger(DatanodeAdminManager.class), Level.TRACE);
@@ -1369,7 +1369,7 @@ private void doDecomCheck(DatanodeManager datanodeManager,
13691369
/**
13701370
* Test DatanodeAdminManager#monitor can swallow any exceptions by default.
13711371
*/
1372-
@Test(timeout=120000)
1372+
@Test(timeout=360000)
13731373
public void testPendingNodeButDecommissioned() throws Exception {
13741374
// Only allow one node to be decom'd at a time
13751375
getConf().setInt(
@@ -1416,7 +1416,7 @@ public void testPendingNodeButDecommissioned() throws Exception {
14161416
}
14171417
}
14181418

1419-
@Test(timeout=120000)
1419+
@Test(timeout=360000)
14201420
public void testPendingNodes() throws Exception {
14211421
GenericTestUtils.setLogLevel(
14221422
LoggerFactory.getLogger(DatanodeAdminManager.class), Level.TRACE);
@@ -1685,7 +1685,7 @@ public Boolean get() {
16851685
* Force the tracked nodes set to be filled with nodes lost while decommissioning,
16861686
* then decommission healthy nodes & validate they are decommissioned eventually.
16871687
*/
1688-
@Test(timeout = 120000)
1688+
@Test(timeout = 360000)
16891689
public void testRequeueUnhealthyDecommissioningNodes() throws Exception {
16901690
// Create a MiniDFSCluster with 3 live datanode in AdminState=NORMAL and
16911691
// 2 dead datanodes in AdminState=DECOMMISSION_INPROGRESS and a file
@@ -1911,7 +1911,7 @@ private void createClusterWithDeadNodesDecommissionInProgress(final int numLiveN
19111911
under-replicated block can be replicated to sufficient datanodes & the decommissioning
19121912
node can be decommissioned.
19131913
*/
1914-
@Test(timeout = 120000)
1914+
@Test(timeout = 300000)
19151915
public void testDeleteCorruptReplicaForUnderReplicatedBlock() throws Exception {
19161916
// Constants
19171917
final Path file = new Path("/test-file");

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRollingUpgrade.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -169,7 +169,7 @@ private static Configuration setConf(Configuration conf, File dir,
169169
return conf;
170170
}
171171

172-
@Test (timeout = 30000)
172+
@Test (timeout = 300000)
173173
public void testRollingUpgradeWithQJM() throws Exception {
174174
String nnDirPrefix = MiniDFSCluster.getBaseDirectory() + "/nn/";
175175
final File nn1Dir = new File(nnDirPrefix + "image1");
@@ -601,7 +601,7 @@ public void testCheckpointWithMultipleNN() throws IOException, InterruptedExcept
601601
testCheckpoint(3);
602602
}
603603

604-
@Test(timeout = 60000)
604+
@Test(timeout = 300000)
605605
public void testRollBackImage() throws Exception {
606606
final Configuration conf = new HdfsConfiguration();
607607
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 10);

0 commit comments

Comments
 (0)