Skip to content

Commit 7c20602

Browse files
authored
HDFS-16522. Set Http and Ipc ports for Datanodes in MiniDFSCluster (apache#4108)
Signed-off-by: Akira Ajisaka <aajisaka@apache.org>
1 parent 61bbdfd commit 7c20602

File tree

8 files changed

+167
-30
lines changed

8 files changed

+167
-30
lines changed

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java

Lines changed: 63 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -203,6 +203,8 @@ public static class Builder {
203203
private int nameNodeHttpPort = 0;
204204
private final Configuration conf;
205205
private int numDataNodes = 1;
206+
private int[] dnHttpPorts = null;
207+
private int[] dnIpcPorts = null;
206208
private StorageType[][] storageTypes = null;
207209
private StorageType[] storageTypes1D = null;
208210
private int storagesPerDatanode = DEFAULT_STORAGES_PER_DATANODE;
@@ -277,6 +279,16 @@ public Builder numDataNodes(int val) {
277279
return this;
278280
}
279281

282+
public Builder setDnHttpPorts(int... ports) {
283+
this.dnHttpPorts = ports;
284+
return this;
285+
}
286+
287+
public Builder setDnIpcPorts(int... ports) {
288+
this.dnIpcPorts = ports;
289+
return this;
290+
}
291+
280292
/**
281293
* Default: DEFAULT_STORAGES_PER_DATANODE
282294
*/
@@ -599,7 +611,9 @@ protected MiniDFSCluster(Builder builder) throws IOException {
599611
builder.checkDataNodeHostConfig,
600612
builder.dnConfOverlays,
601613
builder.skipFsyncForTesting,
602-
builder.useConfiguredTopologyMappingClass);
614+
builder.useConfiguredTopologyMappingClass,
615+
builder.dnHttpPorts,
616+
builder.dnIpcPorts);
603617
}
604618

605619
public static class DataNodeProperties {
@@ -873,7 +887,7 @@ public MiniDFSCluster(int nameNodePort,
873887
operation, null, racks, hosts,
874888
null, simulatedCapacities, null, true, false,
875889
MiniDFSNNTopology.simpleSingleNN(nameNodePort, 0),
876-
true, false, false, null, true, false);
890+
true, false, false, null, true, false, null, null);
877891
}
878892

879893
private void initMiniDFSCluster(
@@ -891,7 +905,9 @@ private void initMiniDFSCluster(
891905
boolean checkDataNodeHostConfig,
892906
Configuration[] dnConfOverlays,
893907
boolean skipFsyncForTesting,
894-
boolean useConfiguredTopologyMappingClass)
908+
boolean useConfiguredTopologyMappingClass,
909+
int[] dnHttpPorts,
910+
int[] dnIpcPorts)
895911
throws IOException {
896912
boolean success = false;
897913
try {
@@ -974,9 +990,9 @@ private void initMiniDFSCluster(
974990

975991
// Start the DataNodes
976992
startDataNodes(conf, numDataNodes, storageTypes, manageDataDfsDirs,
977-
dnStartOpt != null ? dnStartOpt : startOpt,
978-
racks, hosts, storageCapacities, simulatedCapacities, setupHostsFile,
979-
checkDataNodeAddrConfig, checkDataNodeHostConfig, dnConfOverlays);
993+
dnStartOpt != null ? dnStartOpt : startOpt, racks, hosts, storageCapacities,
994+
simulatedCapacities, setupHostsFile, checkDataNodeAddrConfig, checkDataNodeHostConfig,
995+
dnConfOverlays, dnHttpPorts, dnIpcPorts);
980996
waitClusterUp();
981997
//make sure ProxyUsers uses the latest conf
982998
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
@@ -1598,8 +1614,8 @@ public synchronized void startDataNodes(Configuration conf, int numDataNodes,
15981614
String[] racks, String[] hosts,
15991615
long[] simulatedCapacities,
16001616
boolean setupHostsFile) throws IOException {
1601-
startDataNodes(conf, numDataNodes, null, manageDfsDirs, operation, racks, hosts,
1602-
null, simulatedCapacities, setupHostsFile, false, false, null);
1617+
startDataNodes(conf, numDataNodes, null, manageDfsDirs, operation, racks, hosts, null,
1618+
simulatedCapacities, setupHostsFile, false, false, null, null, null);
16031619
}
16041620

16051621
public synchronized void startDataNodes(Configuration conf, int numDataNodes,
@@ -1608,14 +1624,14 @@ public synchronized void startDataNodes(Configuration conf, int numDataNodes,
16081624
long[] simulatedCapacities,
16091625
boolean setupHostsFile,
16101626
boolean checkDataNodeAddrConfig) throws IOException {
1611-
startDataNodes(conf, numDataNodes, null, manageDfsDirs, operation, racks, hosts,
1612-
null, simulatedCapacities, setupHostsFile, checkDataNodeAddrConfig, false, null);
1627+
startDataNodes(conf, numDataNodes, null, manageDfsDirs, operation, racks, hosts, null,
1628+
simulatedCapacities, setupHostsFile, checkDataNodeAddrConfig, false, null, null, null);
16131629
}
16141630

16151631
/**
16161632
* Modify the config and start up additional DataNodes. The info port for
16171633
* DataNodes is guaranteed to use a free port.
1618-
*
1634+
*
16191635
* Data nodes can run with the name node in the mini cluster or
16201636
* a real name node. For example, running with a real name node is useful
16211637
* when running simulated data nodes with a real name node.
@@ -1625,20 +1641,24 @@ public synchronized void startDataNodes(Configuration conf, int numDataNodes,
16251641
* @param conf the base configuration to use in starting the DataNodes. This
16261642
* will be modified as necessary.
16271643
* @param numDataNodes Number of DataNodes to start; may be zero
1644+
* @param storageTypes Storage Types for DataNodes.
16281645
* @param manageDfsDirs if true, the data directories for DataNodes will be
16291646
* created and {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be
16301647
* set in the conf
16311648
* @param operation the operation with which to start the DataNodes. If null
16321649
* or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
16331650
* @param racks array of strings indicating the rack that each DataNode is on
16341651
* @param hosts array of strings indicating the hostnames for each DataNode
1652+
* @param storageCapacities array of Storage Capacities to be used while testing.
16351653
* @param simulatedCapacities array of capacities of the simulated data nodes
16361654
* @param setupHostsFile add new nodes to dfs hosts files
16371655
* @param checkDataNodeAddrConfig if true, only set DataNode port addresses if not already set in config
16381656
* @param checkDataNodeHostConfig if true, only set DataNode hostname key if not already set in config
16391657
* @param dnConfOverlays An array of {@link Configuration} objects that will overlay the
16401658
* global MiniDFSCluster Configuration for the corresponding DataNode.
1641-
* @throws IllegalStateException if NameNode has been shutdown
1659+
* @param dnHttpPorts An array of Http ports if present, to be used for DataNodes.
1660+
* @param dnIpcPorts An array of Ipc ports if present, to be used for DataNodes.
1661+
* @throws IOException If the DFS daemons experience some issues.
16421662
*/
16431663
public synchronized void startDataNodes(Configuration conf, int numDataNodes,
16441664
StorageType[][] storageTypes, boolean manageDfsDirs, StartupOption operation,
@@ -1648,14 +1668,29 @@ public synchronized void startDataNodes(Configuration conf, int numDataNodes,
16481668
boolean setupHostsFile,
16491669
boolean checkDataNodeAddrConfig,
16501670
boolean checkDataNodeHostConfig,
1651-
Configuration[] dnConfOverlays) throws IOException {
1671+
Configuration[] dnConfOverlays,
1672+
int[] dnHttpPorts,
1673+
int[] dnIpcPorts) throws IOException {
16521674
assert storageCapacities == null || simulatedCapacities == null;
16531675
assert storageTypes == null || storageTypes.length == numDataNodes;
16541676
assert storageCapacities == null || storageCapacities.length == numDataNodes;
16551677

16561678
if (operation == StartupOption.RECOVER) {
16571679
return;
16581680
}
1681+
1682+
if (dnHttpPorts != null && dnHttpPorts.length != numDataNodes) {
1683+
throw new IllegalArgumentException(
1684+
"Num of http ports (" + dnHttpPorts.length + ") should match num of DataNodes ("
1685+
+ numDataNodes + ")");
1686+
}
1687+
1688+
if (dnIpcPorts != null && dnIpcPorts.length != numDataNodes) {
1689+
throw new IllegalArgumentException(
1690+
"Num of ipc ports (" + dnIpcPorts.length + ") should match num of DataNodes ("
1691+
+ numDataNodes + ")");
1692+
}
1693+
16591694
if (checkDataNodeHostConfig) {
16601695
conf.setIfUnset(DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1");
16611696
} else {
@@ -1711,7 +1746,15 @@ public synchronized void startDataNodes(Configuration conf, int numDataNodes,
17111746
dnConf.addResource(dnConfOverlays[i]);
17121747
}
17131748
// Set up datanode address
1714-
setupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig);
1749+
int httpPort = 0;
1750+
int ipcPort = 0;
1751+
if(dnHttpPorts != null) {
1752+
httpPort = dnHttpPorts[i - curDatanodesNum];
1753+
}
1754+
if(dnIpcPorts != null) {
1755+
ipcPort = dnIpcPorts[i - curDatanodesNum];
1756+
}
1757+
setupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig, httpPort, ipcPort);
17151758
if (manageDfsDirs) {
17161759
String dirs = makeDataNodeDirs(i, storageTypes == null ?
17171760
null : storageTypes[i - curDatanodesNum]);
@@ -3363,9 +3406,9 @@ public void setBlockRecoveryTimeout(long timeout) {
33633406
timeout);
33643407
}
33653408
}
3366-
3409+
33673410
protected void setupDatanodeAddress(Configuration conf, boolean setupHostsFile,
3368-
boolean checkDataNodeAddrConfig) throws IOException {
3411+
boolean checkDataNodeAddrConfig, int httpPort, int ipcPort) throws IOException {
33693412
if (setupHostsFile) {
33703413
String hostsFile = conf.get(DFS_HOSTS, "").trim();
33713414
if (hostsFile.length() == 0) {
@@ -3388,11 +3431,11 @@ protected void setupDatanodeAddress(Configuration conf, boolean setupHostsFile,
33883431
}
33893432
}
33903433
if (checkDataNodeAddrConfig) {
3391-
conf.setIfUnset(DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
3392-
conf.setIfUnset(DFS_DATANODE_IPC_ADDRESS_KEY, "127.0.0.1:0");
3434+
conf.setIfUnset(DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:" + httpPort);
3435+
conf.setIfUnset(DFS_DATANODE_IPC_ADDRESS_KEY, "127.0.0.1:" + ipcPort);
33933436
} else {
3394-
conf.set(DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
3395-
conf.set(DFS_DATANODE_IPC_ADDRESS_KEY, "127.0.0.1:0");
3437+
conf.set(DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:" + httpPort);
3438+
conf.set(DFS_DATANODE_IPC_ADDRESS_KEY, "127.0.0.1:" + ipcPort);
33963439
}
33973440
}
33983441

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSClusterWithNodeGroup.java

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,7 @@ public synchronized void startDataNodes(Configuration conf, int numDataNodes,
118118
for (int i = curDatanodesNum; i < curDatanodesNum+numDataNodes; i++) {
119119
Configuration dnConf = new HdfsConfiguration(conf);
120120
// Set up datanode address
121-
setupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig);
121+
setupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig, 0, 0);
122122
if (manageDfsDirs) {
123123
String dirs = makeDataNodeDirs(i, storageTypes == null ? null : storageTypes[i]);
124124
dnConf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dirs);
@@ -235,7 +235,9 @@ public synchronized void startDataNodes(Configuration conf, int numDataNodes,
235235
boolean setupHostsFile,
236236
boolean checkDataNodeAddrConfig,
237237
boolean checkDataNodeHostConfig,
238-
Configuration[] dnConfOverlays) throws IOException {
238+
Configuration[] dnConfOverlays,
239+
int[] dnHttpPorts,
240+
int[] dnIpcPorts) throws IOException {
239241
startDataNodes(conf, numDataNodes, storageTypes, manageDfsDirs, operation, racks,
240242
NODE_GROUPS, hosts, storageCapacities, simulatedCapacities, setupHostsFile,
241243
checkDataNodeAddrConfig, checkDataNodeHostConfig);

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java

Lines changed: 92 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,13 +19,15 @@
1919
package org.apache.hadoop.hdfs;
2020

2121
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
22+
import static org.assertj.core.api.Assertions.assertThat;
2223
import static org.junit.Assert.assertEquals;
2324
import static org.junit.Assume.assumeTrue;
2425

2526
import java.io.File;
2627
import java.io.IOException;
2728
import java.util.ArrayList;
2829
import java.util.Random;
30+
import java.util.Set;
2931
import java.util.UUID;
3032
import java.util.concurrent.TimeoutException;
3133

@@ -38,9 +40,13 @@
3840
import org.apache.hadoop.hdfs.server.datanode.DataNode;
3941
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
4042
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl;
43+
import org.apache.hadoop.net.NetUtils;
44+
import org.apache.hadoop.test.LambdaTestUtils;
4145
import org.apache.hadoop.test.PathUtils;
4246
import org.junit.Before;
4347
import org.junit.Test;
48+
import org.slf4j.Logger;
49+
import org.slf4j.LoggerFactory;
4450

4551
import org.apache.hadoop.util.Preconditions;
4652

@@ -52,6 +58,8 @@
5258
*/
5359
public class TestMiniDFSCluster {
5460

61+
private static final Logger LOG = LoggerFactory.getLogger(TestMiniDFSCluster.class);
62+
5563
private static final String CLUSTER_1 = "cluster1";
5664
private static final String CLUSTER_2 = "cluster2";
5765
private static final String CLUSTER_3 = "cluster3";
@@ -319,4 +327,88 @@ public void testSetUpFederatedCluster() throws Exception {
319327
cluster.restartNameNode(1);
320328
}
321329
}
330+
331+
// There is a possibility that this test might fail if any other concurrently running
332+
// test could bind same port as one of the ports returned by NetUtils.getFreeSocketPorts(6)
333+
// before datanodes are started.
334+
@Test
335+
public void testStartStopWithPorts() throws Exception {
336+
Configuration conf = new Configuration();
337+
338+
LambdaTestUtils.intercept(
339+
IllegalArgumentException.class,
340+
"Num of http ports (1) should match num of DataNodes (3)",
341+
"MiniJournalCluster port validation failed",
342+
() -> {
343+
new MiniDFSCluster.Builder(conf).numDataNodes(3).setDnHttpPorts(8481).build();
344+
});
345+
346+
LambdaTestUtils.intercept(
347+
IllegalArgumentException.class,
348+
"Num of ipc ports (2) should match num of DataNodes (1)",
349+
"MiniJournalCluster port validation failed",
350+
() -> {
351+
new MiniDFSCluster.Builder(conf).setDnIpcPorts(8481, 8482).build();
352+
});
353+
354+
LambdaTestUtils.intercept(
355+
IllegalArgumentException.class,
356+
"Num of ipc ports (1) should match num of DataNodes (3)",
357+
"MiniJournalCluster port validation failed",
358+
() -> {
359+
new MiniDFSCluster.Builder(conf).numDataNodes(3).setDnHttpPorts(800, 9000, 10000)
360+
.setDnIpcPorts(8481).build();
361+
});
362+
363+
LambdaTestUtils.intercept(
364+
IllegalArgumentException.class,
365+
"Num of http ports (4) should match num of DataNodes (3)",
366+
"MiniJournalCluster port validation failed",
367+
() -> {
368+
new MiniDFSCluster.Builder(conf).setDnHttpPorts(800, 9000, 1000, 2000)
369+
.setDnIpcPorts(8481, 8482, 8483).numDataNodes(3).build();
370+
});
371+
372+
final Set<Integer> httpAndIpcPorts = NetUtils.getFreeSocketPorts(6);
373+
LOG.info("Free socket ports: {}", httpAndIpcPorts);
374+
375+
assertThat(httpAndIpcPorts).doesNotContain(0);
376+
377+
final int[] httpPorts = new int[3];
378+
final int[] ipcPorts = new int[3];
379+
int httpPortIdx = 0;
380+
int ipcPortIdx = 0;
381+
for (Integer httpAndIpcPort : httpAndIpcPorts) {
382+
if (httpPortIdx < 3) {
383+
httpPorts[httpPortIdx++] = httpAndIpcPort;
384+
} else {
385+
ipcPorts[ipcPortIdx++] = httpAndIpcPort;
386+
}
387+
}
388+
389+
LOG.info("Http ports selected: {}", httpPorts);
390+
LOG.info("Ipc ports selected: {}", ipcPorts);
391+
392+
try (MiniDFSCluster miniDfsCluster = new MiniDFSCluster.Builder(conf)
393+
.setDnHttpPorts(httpPorts)
394+
.setDnIpcPorts(ipcPorts)
395+
.numDataNodes(3).build()) {
396+
miniDfsCluster.waitActive();
397+
398+
assertEquals(httpPorts[0],
399+
miniDfsCluster.getDataNode(ipcPorts[0]).getInfoPort());
400+
assertEquals(httpPorts[1],
401+
miniDfsCluster.getDataNode(ipcPorts[1]).getInfoPort());
402+
assertEquals(httpPorts[2],
403+
miniDfsCluster.getDataNode(ipcPorts[2]).getInfoPort());
404+
405+
assertEquals(ipcPorts[0],
406+
miniDfsCluster.getDataNode(ipcPorts[0]).getIpcPort());
407+
assertEquals(ipcPorts[1],
408+
miniDfsCluster.getDataNode(ipcPorts[1]).getIpcPort());
409+
assertEquals(ipcPorts[2],
410+
miniDfsCluster.getDataNode(ipcPorts[2]).getIpcPort());
411+
}
412+
}
413+
322414
}

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerLongRunningTasks.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -262,7 +262,7 @@ public void testBalancerWithRamDisk() throws Exception {
262262
long[][] storageCapacities = new long[][]{{ramDiskStorageLimit,
263263
diskStorageLimit}};
264264
cluster.startDataNodes(conf, replicationFactor, storageTypes, true, null,
265-
null, null, storageCapacities, null, false, false, false, null);
265+
null, null, storageCapacities, null, false, false, false, null, null, null);
266266

267267
cluster.triggerHeartbeats();
268268
Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -949,7 +949,7 @@ public void testMoverWithStripedFile() throws Exception {
949949
{StorageType.ARCHIVE, StorageType.ARCHIVE},
950950
{StorageType.ARCHIVE, StorageType.ARCHIVE},
951951
{StorageType.ARCHIVE, StorageType.ARCHIVE}},
952-
true, null, null, null,capacities, null, false, false, false, null);
952+
true, null, null, null, capacities, null, false, false, false, null, null, null);
953953
cluster.triggerHeartbeats();
954954

955955
// move file to ARCHIVE
@@ -982,7 +982,7 @@ public void testMoverWithStripedFile() throws Exception {
982982
{ StorageType.SSD, StorageType.DISK },
983983
{ StorageType.SSD, StorageType.DISK },
984984
{ StorageType.SSD, StorageType.DISK } },
985-
true, null, null, null, capacities, null, false, false, false, null);
985+
true, null, null, null, capacities, null, false, false, false, null, null, null);
986986
cluster.triggerHeartbeats();
987987

988988
// move file blocks to ONE_SSD policy
@@ -1372,7 +1372,7 @@ private void startAdditionalDNs(final Configuration conf,
13721372
final MiniDFSCluster cluster) throws IOException {
13731373

13741374
cluster.startDataNodes(conf, newNodesRequired, newTypes, true, null, null,
1375-
null, null, null, false, false, false, null);
1375+
null, null, null, false, false, false, null, null, null);
13761376
cluster.triggerHeartbeats();
13771377
}
13781378
}

0 commit comments

Comments
 (0)