Skip to content

Commit 68292c7

Browse files
committed
fix unit test
1 parent 38d52f6 commit 68292c7

File tree

2 files changed

+41
-35
lines changed

2 files changed

+41
-35
lines changed

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -305,6 +305,7 @@ public boolean createFile(
305305
final long begin = profilingEventHook.beforeMetadataOp(volume, OPEN);
306306
try {
307307
faultInjectorEventHook.beforeMetadataOp(volume, OPEN);
308+
System.out.println(f.getAbsolutePath());
308309
boolean created = f.createNewFile();
309310
profilingEventHook.afterMetadataOp(volume, OPEN, begin);
310311
return created;

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java

Lines changed: 40 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,7 @@
4949
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
5050
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
5151
import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
52+
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
5253
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver;
5354
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
5455
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
@@ -64,7 +65,6 @@
6465
import org.junit.BeforeClass;
6566
import org.junit.Test;
6667

67-
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_BLOCK_SIZE_KEY;
6868
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.Write.RECOVER_LEASE_ON_CLOSE_EXCEPTION_KEY;
6969
import static org.junit.Assert.assertFalse;
7070
import static org.junit.Assert.assertTrue;
@@ -186,40 +186,6 @@ public void testPreventOverflow() throws IOException, NoSuchFieldException,
186186
runAdjustChunkBoundary(configuredWritePacketSize, finalWritePacketSize);
187187
}
188188

189-
@Test(timeout=60000)
190-
public void testFirstPacketSizeInNewBlocks() throws IOException {
191-
final long blockSize = 1L * 1024 * 1024;
192-
final int numDataNodes = 3;
193-
final Configuration dfsConf = new Configuration();
194-
dfsConf.setLong(DFS_BLOCK_SIZE_KEY, blockSize);
195-
MiniDFSCluster dfsCluster = null;
196-
dfsCluster = new MiniDFSCluster.Builder(dfsConf).numDataNodes(numDataNodes).build();
197-
dfsCluster.waitActive();
198-
199-
DistributedFileSystem fs = dfsCluster.getFileSystem();
200-
Path fileName = new Path("/testfile.dat");
201-
FSDataOutputStream fos = fs.create(fileName);
202-
DataChecksum crc32c = DataChecksum.newDataChecksum(DataChecksum.Type.CRC32C, 512);
203-
204-
long loop = 0;
205-
Random r = new Random();
206-
byte[] buf = new byte[1 * 1024 * 1024];
207-
r.nextBytes(buf);
208-
fos.write(buf);
209-
fos.hflush();
210-
211-
while (loop < 20) {
212-
r.nextBytes(buf);
213-
fos.write(buf);
214-
fos.hflush();
215-
loop++;
216-
Assert.assertNotEquals(crc32c.getBytesPerChecksum() + crc32c.getChecksumSize(),
217-
((DFSOutputStream)fos.getWrappedStream()).packetSize);
218-
}
219-
220-
fos.close();
221-
}
222-
223189
/**
224190
* @configuredWritePacketSize the configured WritePacketSize.
225191
* @finalWritePacketSize the final WritePacketSize picked by
@@ -544,6 +510,45 @@ public void testExceptionInCloseWithoutRecoverLease() throws Exception {
544510
}
545511
}
546512

513+
@Test(timeout=60000)
514+
public void testFirstPacketSizeInNewBlocks() throws IOException {
515+
final long blockSize = (long) 1024 * 1024;
516+
MiniDFSCluster dfsCluster = cluster;
517+
DistributedFileSystem fs = dfsCluster.getFileSystem();
518+
Configuration dfsConf = fs.getConf();
519+
520+
EnumSet<CreateFlag> flags = EnumSet.of(CreateFlag.CREATE);
521+
try(FSDataOutputStream fos = fs.create(new Path("/testfile.dat"),
522+
FsPermission.getDefault(),
523+
flags, 512, (short)3, blockSize, null)) {
524+
525+
DataChecksum crc32c = DataChecksum.newDataChecksum(
526+
DataChecksum.Type.CRC32C, 512);
527+
528+
long loop = 0;
529+
Random r = new Random();
530+
byte[] buf = new byte[(int) blockSize];
531+
r.nextBytes(buf);
532+
fos.write(buf);
533+
fos.hflush();
534+
535+
int chunkSize = crc32c.getBytesPerChecksum() + crc32c.getChecksumSize();
536+
int packetContentSize = (dfsConf.getInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY,
537+
DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT) -
538+
PacketHeader.PKT_MAX_HEADER_LEN) / chunkSize * chunkSize;
539+
540+
while (loop < 20) {
541+
r.nextBytes(buf);
542+
fos.write(buf);
543+
fos.hflush();
544+
loop++;
545+
Assert.assertEquals(((DFSOutputStream) fos.getWrappedStream()).packetSize,
546+
packetContentSize);
547+
}
548+
}
549+
fs.delete(new Path("/testfile.dat"), true);
550+
}
551+
547552
@AfterClass
548553
public static void tearDown() {
549554
if (cluster != null) {

0 commit comments

Comments
 (0)