Skip to content

Commit a8a33e0

Browse files
author
Danny Becker
committed
Fix checkstyle issues
1 parent 44d0969 commit a8a33e0

File tree

5 files changed

+28
-17
lines changed

5 files changed

+28
-17
lines changed

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClientFaultInjector.java

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,8 @@ public void sleepBeforeHedgedGet() {}
6969

7070
public void delayWhenRenewLeaseTimeout() {}
7171

72-
public void onCreateBlockReader(LocatedBlock block, int chunkIndex, long offset, long length) throws IOException {}
72+
public void onCreateBlockReader(LocatedBlock block, int chunkIndex,
73+
long offset, long length) throws IOException {}
7374

7475
public void failCreateBlockReader() throws InvalidBlockTokenException {}
7576
}

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,6 @@
5252
import java.util.Collections;
5353
import java.util.EnumSet;
5454
import java.util.List;
55-
import java.util.Map;
5655
import java.util.Set;
5756
import java.util.Collection;
5857
import java.util.concurrent.ConcurrentHashMap;
@@ -278,9 +277,8 @@ private long getSleepingTimestamp(int attempts) {
278277
* @return True if the {@param block} is on a sleeping node, otherwise false.
279278
*/
280279
protected boolean isBlockOnSleepingNode(LocatedBlock block) {
281-
Collection<DatanodeInfo> sleepingNodes = checkSleepingNodes();
282280
DNAddrPair dnInfo = getBestNodeDNAddrPair(block, null);
283-
return dnInfo != null && sleepingNodes.contains(dnInfo.info);
281+
return dnInfo != null && checkSleepingNodes().contains(dnInfo.info);
284282
}
285283

286284
/**

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StripeReader.java

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -316,7 +316,7 @@ private synchronized void prepareDecodeInputsInternal() {
316316
}
317317

318318
/**
319-
* Decide which chunks to transition from READY to REQUESTED
319+
* Decide which chunks to transition from READY to REQUESTED.
320320
*/
321321
private void requestChunks() throws IOException {
322322
if (alignedStripe.getReadyChunksNum() > 0 &&
@@ -408,8 +408,8 @@ void readStripe() throws IOException {
408408
alignedStripe.setFetched(r.index);
409409
updateState4SuccessRead(r);
410410
} else {
411-
if (r.exception instanceof ExecutionException &&
412-
!(r.exception.getCause() instanceof ChecksumException)) {
411+
if (r.getException() instanceof ExecutionException &&
412+
!(r.getException().getCause() instanceof ChecksumException)) {
413413
if (dfsStripedInputStream.updateSleepingNodes(
414414
readerInfos[r.index].datanode)) {
415415
alignedStripe.setSleeping(r.index);

hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java

Lines changed: 17 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -718,7 +718,7 @@ public static class AlignedStripe {
718718
private Map<Integer, Integer> parityStateCounts = new HashMap<>();
719719

720720
/**
721-
* Get the state of the chunk, or null if the chunk is null
721+
* Get the state of the chunk, or null if the chunk is null.
722722
* @param chunkIndex The chunk to get the state of
723723
* @return The state of the chunk at {@param chunkIndex}, or null if the
724724
* chunk at {@param chunkIndex} is null
@@ -1008,9 +1008,9 @@ public static class StripingChunk {
10081008
* all-zero bytes in codec calculations.
10091009
*/
10101010
public static final int ALLZERO = 0X0f;
1011-
/** Chunk fetch was attempted and it is now sleeping until retry **/
1011+
/** Chunk fetch was attempted and it is now sleeping until retry. **/
10121012
public static final int SLEEPING = 0xf1;
1013-
/** Chunk is ready to start reading **/
1013+
/** Chunk is ready to start reading. **/
10141014
public static final int READY = 0xf2;
10151015

10161016
/**
@@ -1024,7 +1024,15 @@ public static class StripingChunk {
10241024
* null (AlignedStripe created) -&gt; READY (upon failure) -&gt; REQUESTED
10251025
* -&gt; {SLEEPING | PENDING} ...
10261026
*/
1027-
public int state = READY;
1027+
private int state = READY;
1028+
1029+
public int getState() {
1030+
return state;
1031+
}
1032+
1033+
public void setState(int state) {
1034+
this.state = state;
1035+
}
10281036

10291037
private final ChunkByteBuffer chunkBuffer;
10301038
private final ByteBuffer byteBuffer;
@@ -1126,9 +1134,13 @@ public static class StripingChunkReadResult {
11261134

11271135
public final int index;
11281136
public final int state;
1129-
public final Exception exception;
1137+
private final Exception exception;
11301138
private final BlockReadStats readStats;
11311139

1140+
public Exception getException() {
1141+
return exception;
1142+
}
1143+
11321144
public StripingChunkReadResult(int state) {
11331145
Preconditions.checkArgument(state == TIMEOUT,
11341146
"Only timeout result should return negative index.");

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -377,12 +377,12 @@ public void onCreateBlockReader(LocatedBlock block, int chunkIndex,
377377
try (FSDataInputStream in = fs.open(srcPath)) {
378378
long start = Time.monotonicNow();
379379
Assert.assertThrows(IOException.class, () -> {
380-
StripedFileTestUtil
381-
.verifyPread(in, fileLength, expected, largeBuf, ecPolicy);
380+
StripedFileTestUtil
381+
.verifyPread(in, fileLength, expected, largeBuf, ecPolicy);
382382
});
383383
long timeMs = Time.monotonicNow() - start;
384384
Assert.assertTrue("Read should have been slower than 10 seconds but was "
385-
+ timeMs + " ms",timeMs > 10000);
385+
+ timeMs + " ms", timeMs > 10000);
386386
}
387387
try (FSDataInputStream in = fs.open(srcPath)) {
388388
ExecutorService service = Executors.newSingleThreadExecutor();
@@ -401,7 +401,7 @@ public void onCreateBlockReader(LocatedBlock block, int chunkIndex,
401401
.verifyPread(in, fileLength, expected, largeBuf, ecPolicy);
402402
long timeMs = Time.monotonicNow() - start;
403403
Assert.assertTrue("Read should have been slower than 10 seconds but was "
404-
+ timeMs + " ms",timeMs > 10000);
404+
+ timeMs + " ms", timeMs > 10000);
405405
long decodingTimeNanos =
406406
((HdfsDataInputStream) in).getReadStatistics().getTotalEcDecodingTimeNanos();
407407
// Should read without any decoding
@@ -427,7 +427,7 @@ public void onCreateBlockReader(LocatedBlock block, int chunkIndex,
427427
// Should read with decoding
428428
long timeMs = Time.monotonicNow() - start;
429429
Assert.assertTrue("Read should have been slower than 10 seconds but was "
430-
+ timeMs + " ms",timeMs > 10000);
430+
+ timeMs + " ms", timeMs > 10000);
431431
long decodingTimeNanos =
432432
((HdfsDataInputStream) in).getReadStatistics().getTotalEcDecodingTimeNanos();
433433
// Should read without any decoding

0 commit comments

Comments
 (0)