Skip to content

Commit f29c96c

Browse files
tomscutHarshitGupta11
authored andcommitted
HDFS-16281. Fix flaky unit tests failed due to timeout (apache#3574)
1 parent bdbfc2f commit f29c96c

File tree

7 files changed

+11
-11
lines changed

7 files changed

+11
-11
lines changed

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeHdfsFileSystemContract.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,7 @@ protected String getDefaultWorkingDirectory() {
103103
}
104104

105105
@Override
106-
@Test
106+
@Test(timeout = 60000)
107107
public void testAppend() throws IOException {
108108
AppendTestUtil.testAppend(fs, new Path("/append/f"));
109109
}

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeWithHdfsScheme.java

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -496,7 +496,7 @@ private boolean isFallBackExist(Configuration config) {
496496
* When InnerCache disabled, all matching ViewFileSystemOverloadScheme
497497
* initialized scheme file systems would not use FileSystem cache.
498498
*/
499-
@Test(timeout = 3000)
499+
@Test(timeout = 30000)
500500
public void testViewFsOverloadSchemeWithNoInnerCacheAndHdfsTargets()
501501
throws Exception {
502502
final Path hdfsTargetPath = new Path(defaultFSURI + HDFS_USER_FOLDER);
@@ -523,7 +523,7 @@ public void testViewFsOverloadSchemeWithNoInnerCacheAndHdfsTargets()
523523
* initialized scheme file systems should continue to take advantage of
524524
* FileSystem cache.
525525
*/
526-
@Test(timeout = 3000)
526+
@Test(timeout = 30000)
527527
public void testViewFsOverloadSchemeWithNoInnerCacheAndLocalSchemeTargets()
528528
throws Exception {
529529
final Path localTragetPath = new Path(localTargetDir.toURI());
@@ -545,7 +545,7 @@ public void testViewFsOverloadSchemeWithNoInnerCacheAndLocalSchemeTargets()
545545
/**
546546
* Tests the rename with nfly mount link.
547547
*/
548-
@Test(timeout = 3000)
548+
@Test(timeout = 30000)
549549
public void testNflyRename() throws Exception {
550550
final Path hdfsTargetPath1 = new Path(defaultFSURI + HDFS_USER_FOLDER);
551551
final Path hdfsTargetPath2 = new Path(defaultFSURI + HDFS_USER_FOLDER + 1);
@@ -577,7 +577,7 @@ public void testNflyRename() throws Exception {
577577
/**
578578
* Tests the write and read contents with nfly mount link.
579579
*/
580-
@Test(timeout = 3000)
580+
@Test(timeout = 30000)
581581
public void testNflyWriteRead() throws Exception {
582582
final Path hdfsTargetPath1 = new Path(defaultFSURI + HDFS_USER_FOLDER);
583583
final Path hdfsTargetPath2 = new Path(defaultFSURI + HDFS_USER_FOLDER + 1);
@@ -604,7 +604,7 @@ public void testNflyWriteRead() throws Exception {
604604
* target file. 3. Tests the read works with repairOnRead flag. 4. Tests that
605605
* previously deleted file fully recovered and exists.
606606
*/
607-
@Test(timeout = 3000)
607+
@Test(timeout = 30000)
608608
public void testNflyRepair() throws Exception {
609609
final NflyFSystem.NflyKey repairKey = NflyFSystem.NflyKey.repairOnRead;
610610
final Path hdfsTargetPath1 = new Path(defaultFSURI + HDFS_USER_FOLDER);

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ protected String getDefaultWorkingDirectory() {
6363
return defaultWorkingDirectory;
6464
}
6565

66-
@Test
66+
@Test(timeout = 60000)
6767
public void testAppend() throws IOException {
6868
AppendTestUtil.testAppend(fs, new Path("/testAppend/f"));
6969
}

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSnapshotCommands.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -224,7 +224,7 @@ public void testSnapshotCommandsWithURI()throws Exception {
224224
fs.delete(new Path("/Fully/QPath"), true);
225225
}
226226

227-
@Test (timeout=60000)
227+
@Test (timeout=120000)
228228
public void testSnapshotDiff()throws Exception {
229229
Configuration config = new HdfsConfiguration();
230230
Path snapDirPath = new Path(fs.getUri().toString() + "/snap_dir");

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -158,7 +158,7 @@ public void testSubmitPlanInNonRegularStatus() throws Exception {
158158
* Tests running multiple commands under on setup. This mainly covers
159159
* {@link org.apache.hadoop.hdfs.server.diskbalancer.command.Command#close}
160160
*/
161-
@Test(timeout = 60000)
161+
@Test(timeout = 120000)
162162
public void testRunMultipleCommandsUnderOneSetup() throws Exception {
163163

164164
final int numDatanodes = 1;

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeStorageDirectives.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -192,7 +192,7 @@ private void testStorageTypes(StorageType[][] storageTypes,
192192
* Types.
193193
* @throws IOException
194194
*/
195-
@Test(timeout=60000)
195+
@Test(timeout=120000)
196196
public void testTargetStorageTypes() throws ReconfigurationException,
197197
InterruptedException, TimeoutException, IOException {
198198
// DISK and not anything else.

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -396,7 +396,7 @@ public void testOffsetPlusLengthParamsLongerThanFile() throws IOException {
396396
}
397397
}
398398

399-
@Test
399+
@Test(timeout = 60000)
400400
public void testResponseCode() throws IOException {
401401
final WebHdfsFileSystem webhdfs = (WebHdfsFileSystem)fs;
402402
final Path root = new Path("/");

0 commit comments

Comments
 (0)