Skip to content

Commit 749aa4f

Browse files
committed
checkstyle fixes
1 parent be8b9cb commit 749aa4f

File tree

1 file changed

+70
-63
lines changed
  • hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer

1 file changed

+70
-63
lines changed

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer2.java

Lines changed: 70 additions & 63 deletions
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@
7878
import static org.junit.Assert.assertTrue;
7979

8080
/**
81-
* Some long running Balancer tasks
81+
* Some long running Balancer tasks.
8282
*/
8383
public class TestBalancer2 {
8484

@@ -94,8 +94,8 @@ public class TestBalancer2 {
9494
private final static String RACK0 = "/rack0";
9595
private final static String RACK1 = "/rack1";
9696
private final static String RACK2 = "/rack2";
97-
private final static String fileName = "/tmp.txt";
98-
private final static Path filePath = new Path(fileName);
97+
private final static String FILE_NAME = "/tmp.txt";
98+
private final static Path FILE_PATH = new Path(FILE_NAME);
9999
private MiniDFSCluster cluster;
100100

101101
@After
@@ -106,7 +106,7 @@ public void shutdown() throws Exception {
106106
}
107107
}
108108

109-
ClientProtocol client;
109+
private ClientProtocol client;
110110

111111
static final int DEFAULT_BLOCK_SIZE = 100;
112112
static final int DEFAULT_RAM_DISK_BLOCK_SIZE = 5 * 1024 * 1024;
@@ -131,7 +131,7 @@ static void initConf(Configuration conf) {
131131

132132
conf.setLong(DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY, 2000L);
133133
conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1L);
134-
conf.setInt(DFSConfigKeys.DFS_BALANCER_MAX_NO_MOVE_INTERVAL_KEY, 5*1000);
134+
conf.setInt(DFSConfigKeys.DFS_BALANCER_MAX_NO_MOVE_INTERVAL_KEY, 5 * 1000);
135135
}
136136

137137
static void initConfWithRamDisk(Configuration conf,
@@ -142,32 +142,33 @@ static void initConfWithRamDisk(Configuration conf,
142142
conf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, 1);
143143
conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500);
144144
conf.setInt(DFS_DATANODE_LAZY_WRITER_INTERVAL_SEC, 1);
145-
conf.setInt(DFSConfigKeys.DFS_BALANCER_MAX_NO_MOVE_INTERVAL_KEY, 5*1000);
145+
conf.setInt(DFSConfigKeys.DFS_BALANCER_MAX_NO_MOVE_INTERVAL_KEY, 5 * 1000);
146146
LazyPersistTestCase.initCacheManipulator();
147147

148148
conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1L);
149149
}
150150

151151
/**
152-
* Test special case. Two replicas belong to same block should not in same node.
152+
* Test special case. Two replicas belong to same block should not in same
153+
* node.
153154
* We have 2 nodes.
154155
* We have a block in (DN0,SSD) and (DN1,DISK).
155156
* Replica in (DN0,SSD) should not be moved to (DN1,SSD).
156157
* Otherwise DN1 has 2 replicas.
157158
*/
158-
@Test(timeout=100000)
159+
@Test(timeout = 100000)
159160
public void testTwoReplicaShouldNotInSameDN() throws Exception {
160161
final Configuration conf = new HdfsConfiguration();
161162

162-
int blockSize = 5 * 1024 * 1024 ;
163+
int blockSize = 5 * 1024 * 1024;
163164
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
164165
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
165166
conf.setLong(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,
166167
1L);
167168

168169
conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1L);
169170

170-
int numOfDatanodes =2;
171+
int numOfDatanodes = 2;
171172
cluster = new MiniDFSCluster.Builder(conf)
172173
.numDataNodes(2)
173174
.racks(new String[]{"/default/rack0", "/default/rack0"})
@@ -184,12 +185,12 @@ public void testTwoReplicaShouldNotInSameDN() throws Exception {
184185
//set "/bar" directory with ONE_SSD storage policy.
185186
DistributedFileSystem fs = cluster.getFileSystem();
186187
Path barDir = new Path("/bar");
187-
fs.mkdir(barDir,new FsPermission((short)777));
188+
fs.mkdir(barDir, new FsPermission((short) 777));
188189
fs.setStoragePolicy(barDir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
189190

190191
// Insert 30 blocks. So (DN0,SSD) and (DN1,DISK) are about half full,
191192
// and (DN0,SSD) and (DN1,DISK) are about 15% full.
192-
long fileLen = 30 * blockSize;
193+
long fileLen = 30 * blockSize;
193194
// fooFile has ONE_SSD policy. So
194195
// (DN0,SSD) and (DN1,DISK) have 2 replicas belong to same block.
195196
// (DN0,DISK) and (DN1,SSD) have 2 replicas belong to same block.
@@ -214,10 +215,10 @@ public void testTwoReplicaShouldNotInSameDN() throws Exception {
214215
* One DN has two files on RAM_DISK, other DN has no files on RAM_DISK.
215216
* Then verify that the balancer does not migrate files on RAM_DISK across DN.
216217
*/
217-
@Test(timeout=300000)
218+
@Test(timeout = 300000)
218219
public void testBalancerWithRamDisk() throws Exception {
219-
final int SEED = 0xFADED;
220-
final short REPL_FACT = 1;
220+
final int seed = 0xFADED;
221+
final short replicationFactor = 1;
221222
Configuration conf = new Configuration();
222223

223224
final int defaultRamDiskCapacity = 10;
@@ -233,33 +234,34 @@ public void testBalancerWithRamDisk() throws Exception {
233234
cluster = new MiniDFSCluster
234235
.Builder(conf)
235236
.numDataNodes(1)
236-
.storageCapacities(new long[] { ramDiskStorageLimit, diskStorageLimit })
237-
.storageTypes(new StorageType[] { RAM_DISK, DEFAULT })
237+
.storageCapacities(new long[]{ramDiskStorageLimit, diskStorageLimit})
238+
.storageTypes(new StorageType[]{RAM_DISK, DEFAULT})
238239
.build();
239240

240241
cluster.waitActive();
241242
// Create few files on RAM_DISK
242-
final String METHOD_NAME = GenericTestUtils.getMethodName();
243-
final Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
244-
final Path path2 = new Path("/" + METHOD_NAME + ".02.dat");
243+
final String methodName = GenericTestUtils.getMethodName();
244+
final Path path1 = new Path("/" + methodName + ".01.dat");
245+
final Path path2 = new Path("/" + methodName + ".02.dat");
245246

246247
DistributedFileSystem fs = cluster.getFileSystem();
247-
DFSClient client = fs.getClient();
248+
DFSClient dfsClient = fs.getClient();
248249
DFSTestUtil.createFile(fs, path1, true,
249250
DEFAULT_RAM_DISK_BLOCK_SIZE, 4 * DEFAULT_RAM_DISK_BLOCK_SIZE,
250-
DEFAULT_RAM_DISK_BLOCK_SIZE, REPL_FACT, SEED, true);
251+
DEFAULT_RAM_DISK_BLOCK_SIZE, replicationFactor, seed, true);
251252
DFSTestUtil.createFile(fs, path2, true,
252253
DEFAULT_RAM_DISK_BLOCK_SIZE, 1 * DEFAULT_RAM_DISK_BLOCK_SIZE,
253-
DEFAULT_RAM_DISK_BLOCK_SIZE, REPL_FACT, SEED, true);
254+
DEFAULT_RAM_DISK_BLOCK_SIZE, replicationFactor, seed, true);
254255

255256
// Sleep for a short time to allow the lazy writer thread to do its job
256257
Thread.sleep(6 * 1000);
257258

258-
// Add another fresh DN with the same type/capacity without files on RAM_DISK
259-
StorageType[][] storageTypes = new StorageType[][] {{RAM_DISK, DEFAULT}};
259+
// Add another fresh DN with the same type/capacity without files on
260+
// RAM_DISK
261+
StorageType[][] storageTypes = new StorageType[][]{{RAM_DISK, DEFAULT}};
260262
long[][] storageCapacities = new long[][]{{ramDiskStorageLimit,
261-
diskStorageLimit}};
262-
cluster.startDataNodes(conf, REPL_FACT, storageTypes, true, null,
263+
diskStorageLimit}};
264+
cluster.startDataNodes(conf, replicationFactor, storageTypes, true, null,
263265
null, null, storageCapacities, null, false, false, false, null);
264266

265267
cluster.triggerHeartbeats();
@@ -273,12 +275,14 @@ public void testBalancerWithRamDisk() throws Exception {
273275
assertEquals(ExitStatus.NO_MOVE_PROGRESS.getExitCode(), r);
274276

275277
// Verify files are still on RAM_DISK
276-
DFSTestUtil.verifyFileReplicasOnStorageType(fs, client, path1, RAM_DISK);
277-
DFSTestUtil.verifyFileReplicasOnStorageType(fs, client, path2, RAM_DISK);
278+
DFSTestUtil.verifyFileReplicasOnStorageType(fs, dfsClient, path1, RAM_DISK);
279+
DFSTestUtil.verifyFileReplicasOnStorageType(fs, dfsClient, path2, RAM_DISK);
278280
}
279281

280-
/** Balancer should not move blocks with size < minBlockSize. */
281-
@Test(timeout=60000)
282+
/**
283+
* Balancer should not move blocks with size < minBlockSize.
284+
*/
285+
@Test(timeout = 60000)
282286
public void testMinBlockSizeAndSourceNodes() throws Exception {
283287
final Configuration conf = new HdfsConfiguration();
284288
initConf(conf);
@@ -299,11 +303,11 @@ public void testMinBlockSizeAndSourceNodes() throws Exception {
299303
ClientProtocol.class).getProxy();
300304

301305
// fill up the cluster to be 80% full
302-
for(int i = 0; i < lengths.length; i++) {
306+
for (int i = 0; i < lengths.length; i++) {
303307
final long size = lengths[i];
304308
final Path p = new Path("/file" + i + "_size" + size);
305-
try(OutputStream out = dfs.create(p)) {
306-
for(int j = 0; j < size; j++) {
309+
try (OutputStream out = dfs.create(p)) {
310+
for (int j = 0; j < size; j++) {
307311
out.write(j);
308312
}
309313
}
@@ -320,7 +324,7 @@ public void testMinBlockSizeAndSourceNodes() throws Exception {
320324
final Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
321325

322326
{ // run Balancer with min-block-size=50
323-
final BalancerParameters p = Balancer.Cli.parse(new String[] {
327+
final BalancerParameters p = Balancer.Cli.parse(new String[]{
324328
"-policy", BalancingPolicy.Node.INSTANCE.getName(),
325329
"-threshold", "1"
326330
});
@@ -337,10 +341,10 @@ public void testMinBlockSizeAndSourceNodes() throws Exception {
337341
{ // run Balancer with empty nodes as source nodes
338342
final Set<String> sourceNodes = new HashSet<>();
339343
final List<DataNode> datanodes = cluster.getDataNodes();
340-
for(int i = capacities.length; i < datanodes.size(); i++) {
344+
for (int i = capacities.length; i < datanodes.size(); i++) {
341345
sourceNodes.add(datanodes.get(i).getDisplayName());
342346
}
343-
final BalancerParameters p = Balancer.Cli.parse(new String[] {
347+
final BalancerParameters p = Balancer.Cli.parse(new String[]{
344348
"-policy", BalancingPolicy.Node.INSTANCE.getName(),
345349
"-threshold", "1",
346350
"-source", StringUtils.join(sourceNodes, ',')
@@ -358,7 +362,7 @@ public void testMinBlockSizeAndSourceNodes() throws Exception {
358362
final Set<String> sourceNodes = new HashSet<>();
359363
final List<DataNode> datanodes = cluster.getDataNodes();
360364
sourceNodes.add(datanodes.get(0).getDisplayName());
361-
final BalancerParameters p = Balancer.Cli.parse(new String[] {
365+
final BalancerParameters p = Balancer.Cli.parse(new String[]{
362366
"-policy", BalancingPolicy.Node.INSTANCE.getName(),
363367
"-threshold", "1",
364368
"-source", StringUtils.join(sourceNodes, ',')
@@ -375,10 +379,10 @@ public void testMinBlockSizeAndSourceNodes() throws Exception {
375379
{ // run Balancer with all filled node as source nodes
376380
final Set<String> sourceNodes = new HashSet<>();
377381
final List<DataNode> datanodes = cluster.getDataNodes();
378-
for(int i = 0; i < capacities.length; i++) {
382+
for (int i = 0; i < capacities.length; i++) {
379383
sourceNodes.add(datanodes.get(i).getDisplayName());
380384
}
381-
final BalancerParameters p = Balancer.Cli.parse(new String[] {
385+
final BalancerParameters p = Balancer.Cli.parse(new String[]{
382386
"-policy", BalancingPolicy.Node.INSTANCE.getName(),
383387
"-threshold", "1",
384388
"-source", StringUtils.join(sourceNodes, ',')
@@ -395,34 +399,36 @@ public void testMinBlockSizeAndSourceNodes() throws Exception {
395399

396400
/**
397401
* Verify balancer won't violate upgrade domain block placement policy.
402+
*
398403
* @throws Exception
399404
*/
400-
@Test(timeout=100000)
405+
@Test(timeout = 100000)
401406
public void testUpgradeDomainPolicyAfterBalance() throws Exception {
402407
final Configuration conf = new HdfsConfiguration();
403408
initConf(conf);
404409
conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,
405410
BlockPlacementPolicyWithUpgradeDomain.class,
406411
BlockPlacementPolicy.class);
407-
long[] capacities = new long[] { CAPACITY, CAPACITY, CAPACITY };
412+
long[] capacities = new long[]{CAPACITY, CAPACITY, CAPACITY};
408413
String[] hosts = {"host0", "host1", "host2"};
409-
String[] racks = { RACK0, RACK1, RACK1 };
410-
String[] UDs = { "ud0", "ud1", "ud2" };
414+
String[] racks = {RACK0, RACK1, RACK1};
415+
String[] uds = {"ud0", "ud1", "ud2"};
411416
runBalancerAndVerifyBlockPlacmentPolicy(conf, capacities, hosts, racks,
412-
UDs, CAPACITY, "host3", RACK2, "ud2");
417+
uds, CAPACITY, "host3", RACK2, "ud2");
413418
}
414419

415420
/**
416421
* Verify balancer won't violate the default block placement policy.
422+
*
417423
* @throws Exception
418424
*/
419-
@Test(timeout=100000)
425+
@Test(timeout = 100000)
420426
public void testRackPolicyAfterBalance() throws Exception {
421427
final Configuration conf = new HdfsConfiguration();
422428
initConf(conf);
423-
long[] capacities = new long[] { CAPACITY, CAPACITY };
429+
long[] capacities = new long[]{CAPACITY, CAPACITY};
424430
String[] hosts = {"host0", "host1"};
425-
String[] racks = { RACK0, RACK1 };
431+
String[] racks = {RACK0, RACK1};
426432
runBalancerAndVerifyBlockPlacmentPolicy(conf, capacities, hosts, racks,
427433
null, CAPACITY, "host2", RACK1, null);
428434
}
@@ -438,7 +444,7 @@ private void runBalancerAndVerifyBlockPlacmentPolicy(Configuration conf,
438444
DatanodeManager dm = cluster.getNamesystem().getBlockManager().
439445
getDatanodeManager();
440446
if (UDs != null) {
441-
for(int i = 0; i < UDs.length; i++) {
447+
for (int i = 0; i < UDs.length; i++) {
442448
DatanodeID datanodeId = cluster.getDataNodes().get(i).getDatanodeId();
443449
dm.getDatanode(datanodeId).setUpgradeDomain(UDs[i]);
444450
}
@@ -454,13 +460,13 @@ private void runBalancerAndVerifyBlockPlacmentPolicy(Configuration conf,
454460
long totalUsedSpace = totalCapacity * 8 / 10;
455461

456462
final long fileSize = totalUsedSpace / numOfDatanodes;
457-
DFSTestUtil.createFile(cluster.getFileSystem(0), filePath, false, 1024,
463+
DFSTestUtil.createFile(cluster.getFileSystem(0), FILE_PATH, false, 1024,
458464
fileSize, DEFAULT_BLOCK_SIZE, (short) numOfDatanodes, 0, false);
459465

460466
// start up an empty node with the same capacity on the same rack as the
461467
// pinned host.
462-
cluster.startDataNodes(conf, 1, true, null, new String[] { newRack },
463-
new String[] { newHost }, new long[] { newCapacity });
468+
cluster.startDataNodes(conf, 1, true, null, new String[]{newRack},
469+
new String[]{newHost}, new long[]{newCapacity});
464470
if (newUD != null) {
465471
DatanodeID newId = cluster.getDataNodes().get(
466472
numOfDatanodes).getDatanodeId();
@@ -478,7 +484,7 @@ private void runBalancerAndVerifyBlockPlacmentPolicy(Configuration conf,
478484
BlockPlacementPolicy placementPolicy =
479485
cluster.getNamesystem().getBlockManager().getBlockPlacementPolicy();
480486
List<LocatedBlock> locatedBlocks = client.
481-
getBlockLocations(fileName, 0, fileSize).getLocatedBlocks();
487+
getBlockLocations(FILE_NAME, 0, fileSize).getLocatedBlocks();
482488
for (LocatedBlock locatedBlock : locatedBlocks) {
483489
BlockPlacementStatus status = placementPolicy.verifyBlockPlacement(
484490
locatedBlock.getLocations(), numOfDatanodes);
@@ -493,9 +499,10 @@ private void runBalancerAndVerifyBlockPlacmentPolicy(Configuration conf,
493499
* Make sure that balancer can't move pinned blocks.
494500
* If specified favoredNodes when create file, blocks will be pinned use
495501
* sticky bit.
502+
*
496503
* @throws Exception
497504
*/
498-
@Test(timeout=100000)
505+
@Test(timeout = 100000)
499506
public void testBalancerWithPinnedBlocks() throws Exception {
500507
// This test assumes stick-bit based block pin mechanism available only
501508
// in Linux/Unix. It can be unblocked on Windows when HDFS-7759 is ready to
@@ -506,9 +513,9 @@ public void testBalancerWithPinnedBlocks() throws Exception {
506513
initConf(conf);
507514
conf.setBoolean(DFS_DATANODE_BLOCK_PINNING_ENABLED, true);
508515

509-
long[] capacities = new long[] { CAPACITY, CAPACITY };
516+
long[] capacities = new long[]{CAPACITY, CAPACITY};
510517
String[] hosts = {"host0", "host1"};
511-
String[] racks = { RACK0, RACK1 };
518+
String[] racks = {RACK0, RACK1};
512519
int numOfDatanodes = capacities.length;
513520

514521
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(capacities.length)
@@ -529,13 +536,13 @@ public void testBalancerWithPinnedBlocks() throws Exception {
529536
favoredNodes[i] = new InetSocketAddress(hosts[i], port);
530537
}
531538

532-
DFSTestUtil.createFile(cluster.getFileSystem(0), filePath, false, 1024,
539+
DFSTestUtil.createFile(cluster.getFileSystem(0), FILE_PATH, false, 1024,
533540
totalUsedSpace / numOfDatanodes, DEFAULT_BLOCK_SIZE,
534541
(short) numOfDatanodes, 0, false, favoredNodes);
535542

536543
// start up an empty node with the same capacity
537-
cluster.startDataNodes(conf, 1, true, null, new String[] { RACK2 },
538-
new long[] { CAPACITY });
544+
cluster.startDataNodes(conf, 1, true, null, new String[]{RACK2},
545+
new long[]{CAPACITY});
539546

540547
totalCapacity += CAPACITY;
541548

@@ -583,7 +590,7 @@ public void testBalancerWithSortTopNodes() throws Exception {
583590
DataNodeTestUtils.triggerHeartbeat(dataNodes.get(i));
584591
DataNodeTestUtils.triggerBlockReport(dataNodes.get(i));
585592
// Create nodes with: 80%, 85%, 90%, 95%, 100%.
586-
int capacityForThisDatanode = (int)capacity
593+
int capacityForThisDatanode = (int) capacity
587594
- diffBetweenNodes * (numOfOverUtilizedDn - i - 1);
588595
TestBalancer.createFile(cluster, new Path("test_big" + i),
589596
capacityForThisDatanode, (short) 1, 0);
@@ -609,7 +616,7 @@ public void testBalancerWithSortTopNodes() throws Exception {
609616
cluster.triggerBlockReports();
610617
cluster.waitFirstBRCompleted(0, 6000);
611618

612-
final BalancerParameters p = Balancer.Cli.parse(new String[] {
619+
final BalancerParameters p = Balancer.Cli.parse(new String[]{
613620
"-policy", BalancingPolicy.Node.INSTANCE.getName(),
614621
"-threshold", "1",
615622
"-sortTopNodes"
@@ -669,7 +676,7 @@ public void testBalancerWithSortTopNodes() throws Exception {
669676
public void testMaxIterationTime() throws Exception {
670677
final Configuration conf = new HdfsConfiguration();
671678
initConf(conf);
672-
int blockSize = 10*1024*1024; // 10MB block size
679+
int blockSize = 10 * 1024 * 1024; // 10MB block size
673680
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
674681
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, blockSize);
675682
// limit the worker thread count of Balancer to have only 1 queue per DN
@@ -684,7 +691,7 @@ public void testMaxIterationTime() throws Exception {
684691
conf.setLong(DFSConfigKeys.DFS_BALANCER_MAX_ITERATION_TIME_KEY, 500L);
685692
// setup the cluster
686693
final long capacity = 10L * blockSize;
687-
final long[] dnCapacities = new long[] {capacity, capacity};
694+
final long[] dnCapacities = new long[]{capacity, capacity};
688695
final short rep = 1;
689696
final long seed = 0xFAFAFA;
690697
cluster = new MiniDFSCluster.Builder(conf)

0 commit comments

Comments
 (0)