From b5b80fcc8d7b5cb76029bc32ebdf7b4a05275ac7 Mon Sep 17 00:00:00 2001 From: Sammi Chen Date: Wed, 31 Jul 2019 19:34:05 +0800 Subject: [PATCH 1/6] HDDS-1879. Support multiple excluded scopes when choosing datanodes in NetworkTopology. --- .../apache/hadoop/hdds/scm/net/InnerNode.java | 5 +- .../hadoop/hdds/scm/net/InnerNodeImpl.java | 54 +++++--- .../apache/hadoop/hdds/scm/net/NetUtils.java | 34 +++-- .../hadoop/hdds/scm/net/NetworkTopology.java | 16 +-- .../hdds/scm/net/NetworkTopologyImpl.java | 122 ++++++++++-------- .../hdds/scm/net/TestNetworkTopologyImpl.java | 74 +++++++---- .../SCMContainerPlacementRackAware.java | 17 ++- .../TestSCMContainerPlacementRackAware.java | 67 ++++++++-- 8 files changed, 254 insertions(+), 135 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNode.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNode.java index a185b01f57403..6cf73bf54800d 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNode.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNode.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdds.scm.net; import java.util.Collection; +import java.util.List; /** * The interface defines an inner node in a network topology. @@ -72,13 +73,13 @@ N newInnerNode(String name, String location, InnerNode parent, int level, * * @param leafIndex ode's index, start from 0, skip the nodes in * excludedScope and excludedNodes with ancestorGen - * @param excludedScope the excluded scope + * @param excludedScopes the excluded scopes * @param excludedNodes nodes to be excluded. If ancestorGen is not 0, * the chosen node will not share same ancestor with * those in excluded nodes at the specified generation * @param ancestorGen ignored with value is 0 * @return the leaf node corresponding to the given index */ - Node getLeaf(int leafIndex, String excludedScope, + Node getLeaf(int leafIndex, List excludedScopes, Collection excludedNodes, int ancestorGen); } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java index 3f1351d63e389..7602927063816 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java @@ -22,6 +22,7 @@ import java.util.HashMap; import java.util.Iterator; import java.util.LinkedHashMap; +import java.util.List; import java.util.Map; import com.google.common.base.Preconditions; @@ -276,7 +277,7 @@ public Node getLeaf(int leafIndex) { * * @param leafIndex node's index, start from 0, skip the nodes in * excludedScope and excludedNodes with ancestorGen - * @param excludedScope the exclude scope + * @param excludedScopes the exclude scopes * @param excludedNodes nodes to be excluded from. If ancestorGen is not 0, * the chosen node will not share same ancestor with * those in excluded nodes at the specified generation @@ -313,12 +314,12 @@ public Node getLeaf(int leafIndex) { * means picking the 3th available node, which is n5. * */ - public Node getLeaf(int leafIndex, String excludedScope, + public Node getLeaf(int leafIndex, List excludedScopes, Collection excludedNodes, int ancestorGen) { Preconditions.checkArgument(leafIndex >= 0 && ancestorGen >= 0); // come to leaf parent layer if (isLeafParent()) { - return getLeafOnLeafParent(leafIndex, excludedScope, excludedNodes); + return getLeafOnLeafParent(leafIndex, excludedScopes, excludedNodes); } int maxLevel = NodeSchemaManager.getInstance().getMaxLevel(); @@ -328,14 +329,16 @@ public Node getLeaf(int leafIndex, String excludedScope, Map countMap = getAncestorCountMap(excludedNodes, ancestorGen, currentGen); // nodes covered by excluded scope - int excludedNodeCount = getExcludedScopeNodeCount(excludedScope); + Map excludedNodeCount = + getExcludedScopeNodeCount(excludedScopes); - for(Node child : childrenMap.values()) { + for (Node child : childrenMap.values()) { int leafCount = child.getNumOfLeaves(); - // skip nodes covered by excluded scope - if (excludedScope != null && - excludedScope.startsWith(child.getNetworkFullPath())) { - leafCount -= excludedNodeCount; + // skip nodes covered by excluded scopes + for (String excludedScope: excludedNodeCount.keySet()) { + if (excludedScope.startsWith(child.getNetworkFullPath())) { + leafCount -= excludedNodeCount.get(excludedScope); + } } // skip nodes covered by excluded nodes and ancestorGen Integer count = countMap.get(child); @@ -343,7 +346,7 @@ public Node getLeaf(int leafIndex, String excludedScope, leafCount -= count; } if (leafIndex < leafCount) { - return ((InnerNode)child).getLeaf(leafIndex, excludedScope, + return ((InnerNode)child).getLeaf(leafIndex, excludedScopes, excludedNodes, ancestorGen); } else { leafIndex -= leafCount; @@ -424,18 +427,22 @@ private Map getAncestorCountMap(Collection nodes, * Get the node with leafIndex, considering skip nodes in excludedScope * and in excludeNodes list. */ - private Node getLeafOnLeafParent(int leafIndex, String excludedScope, + private Node getLeafOnLeafParent(int leafIndex, List excludedScopes, Collection excludedNodes) { Preconditions.checkArgument(isLeafParent() && leafIndex >= 0); if (leafIndex >= getNumOfChildren()) { return null; } for(Node node : childrenMap.values()) { - if ((excludedNodes != null && (excludedNodes.contains(node))) || - (excludedScope != null && - (node.getNetworkFullPath().startsWith(excludedScope)))) { + if (excludedNodes != null && excludedNodes.contains(node)) { continue; } + if (excludedScopes != null && excludedScopes.size() > 0) { + if (excludedScopes.stream().anyMatch(scope -> + node.getNetworkFullPath().startsWith(scope))) { + continue; + } + } if (leafIndex == 0) { return node; } @@ -484,12 +491,19 @@ private Node getChildNode(int index) { return node; } - /** Get how many leaf nodes are covered by the excludedScope. */ - private int getExcludedScopeNodeCount(String excludedScope) { - if (excludedScope == null) { - return 0; + /** Get how many leaf nodes are covered by the excludedScopes(no overlap). */ + private Map getExcludedScopeNodeCount( + List excludedScopes) { + HashMap nodeCounts = new HashMap<>(); + if (excludedScopes == null || excludedScopes.isEmpty()) { + return nodeCounts; + } + + for (String scope: excludedScopes) { + Node excludedScopeNode = getNode(scope); + nodeCounts.put(scope, excludedScopeNode == null ? 0 : + excludedScopeNode.getNumOfLeaves()); } - Node excludedScopeNode = getNode(excludedScope); - return excludedScopeNode == null ? 0 : excludedScopeNode.getNumOfLeaves(); + return nodeCounts; } } \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetUtils.java index 501a9ea3e52af..99713af6f5c98 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetUtils.java @@ -24,6 +24,7 @@ import java.util.Collection; import java.util.Iterator; import java.util.List; +import java.util.stream.Collectors; /** * Utility class to facilitate network topology functions. @@ -73,16 +74,17 @@ public static int locationToDepth(String location) { * function call. * @return the new excludedScope */ - public static String removeDuplicate(NetworkTopology topology, - Collection mutableExcludedNodes, String excludedScope, + public static void removeDuplicate(NetworkTopology topology, + Collection mutableExcludedNodes, List mutableExcludedScopes, int ancestorGen) { if (mutableExcludedNodes == null || mutableExcludedNodes.size() == 0 || - excludedScope == null || topology == null) { - return excludedScope; + mutableExcludedScopes == null || mutableExcludedScopes.size() == 0 || + topology == null) { + return; } Iterator iterator = mutableExcludedNodes.iterator(); - while (iterator.hasNext()) { + while (iterator.hasNext() && (!mutableExcludedScopes.isEmpty())) { Node node = iterator.next(); Node ancestor = topology.getAncestor(node, ancestorGen); if (ancestor == null) { @@ -90,16 +92,20 @@ public static String removeDuplicate(NetworkTopology topology, " of node :" + node); continue; } - if (excludedScope.startsWith(ancestor.getNetworkFullPath())) { - // reset excludedScope if it's covered by exclude node's ancestor - return null; - } - if (ancestor.getNetworkFullPath().startsWith(excludedScope)) { - // remove exclude node if it's covered by excludedScope - iterator.remove(); - } + // excludedScope is child of ancestor + List duplicateList = mutableExcludedScopes.stream() + .filter(scope -> scope.startsWith(ancestor.getNetworkFullPath())) + .collect(Collectors.toList()); + mutableExcludedScopes.removeAll(duplicateList); + + // ancestor is covered by excludedScope + mutableExcludedScopes.stream().forEach(scope -> { + if (ancestor.getNetworkFullPath().startsWith(scope)) { + // remove exclude node if it's covered by excludedScope + iterator.remove(); + } + }); } - return excludedScope; } /** diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopology.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopology.java index 8d8571ddb0aef..b663e7a5c1e32 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopology.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopology.java @@ -119,11 +119,11 @@ public InvalidTopologyException(String msg) { * Randomly choose a node in the scope, ano not in the exclude scope. * @param scope range of nodes from which a node will be chosen. cannot start * with ~ - * @param excludedScope the chosen node cannot be in this range. cannot + * @param excludedScopes the chosen nodes cannot be in these ranges. cannot * starts with ~ * @return the chosen node */ - Node chooseRandom(String scope, String excludedScope); + Node chooseRandom(String scope, List excludedScopes); /** * Randomly choose a leaf node from scope. @@ -166,7 +166,7 @@ Node chooseRandom(String scope, Collection excludedNodes, * * @param scope range from which a node will be chosen, cannot start with ~ * @param excludedNodes nodes to be excluded - * @param excludedScope excluded node range. Cannot start with ~ + * @param excludedScopes excluded node ranges. Cannot start with ~ * @param ancestorGen matters when excludeNodes is not null. It means the * ancestor generation that's not allowed to share between chosen node and the * excludedNodes. For example, if ancestorGen is 1, means chosen node @@ -176,7 +176,7 @@ Node chooseRandom(String scope, Collection excludedNodes, * * @return the chosen node */ - Node chooseRandom(String scope, String excludedScope, + Node chooseRandom(String scope, List excludedScopes, Collection excludedNodes, int ancestorGen); @@ -187,7 +187,7 @@ Node chooseRandom(String scope, String excludedScope, * * @param scope range of nodes from which a node will be chosen, cannot start * with ~ - * @param excludedScope range of nodes to be excluded, cannot start with ~ + * @param excludedScopes ranges of nodes to be excluded, cannot start with ~ * @param excludedNodes nodes to be excluded * @param affinityNode when not null, the chosen node should share the same * ancestor with this node at generation ancestorGen. @@ -198,7 +198,7 @@ Node chooseRandom(String scope, String excludedScope, * excludedNodes if affinityNode is null * @return the chosen node */ - Node chooseRandom(String scope, String excludedScope, + Node chooseRandom(String scope, List excludedScopes, Collection excludedNodes, Node affinityNode, int ancestorGen); /** @@ -210,7 +210,7 @@ Node chooseRandom(String scope, String excludedScope, * excludedNodes * @param scope range of nodes from which a node will be chosen, cannot start * with ~ - * @param excludedScope range of nodes to be excluded, cannot start with ~ + * @param excludedScopes ranges of nodes to be excluded, cannot start with ~ * @param excludedNodes nodes to be excluded * @param affinityNode when not null, the chosen node should share the same * ancestor with this node at generation ancestorGen. @@ -221,7 +221,7 @@ Node chooseRandom(String scope, String excludedScope, * excludedNodes if affinityNode is null * @return the chosen node */ - Node getNode(int leafIndex, String scope, String excludedScope, + Node getNode(int leafIndex, String scope, List excludedScopes, Collection excludedNodes, Node affinityNode, int ancestorGen); /** Return the distance cost between two nodes diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java index 07d86c1a19865..0d7abf7a2bb9d 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java @@ -283,7 +283,9 @@ public Node chooseRandom(String scope) { scope = ROOT; } if (scope.startsWith(SCOPE_REVERSE_STR)) { - return chooseRandom(ROOT, scope.substring(1), null, null, + ArrayList excludedScopes = new ArrayList(); + excludedScopes.add(scope.substring(1)); + return chooseRandom(ROOT, excludedScopes, null, null, ANCESTOR_GENERATION_DEFAULT); } else { return chooseRandom(scope, null, null, null, ANCESTOR_GENERATION_DEFAULT); @@ -294,12 +296,12 @@ public Node chooseRandom(String scope) { * Randomly choose a node in the scope, ano not in the exclude scope. * @param scope range of nodes from which a node will be chosen. cannot start * with ~ - * @param excludedScope the chosen node cannot be in this range. cannot + * @param excludedScopes the chosen node cannot be in these ranges. cannot * starts with ~ * @return the chosen node */ - public Node chooseRandom(String scope, String excludedScope) { - return chooseRandom(scope, excludedScope, null, null, + public Node chooseRandom(String scope, List excludedScopes) { + return chooseRandom(scope, excludedScopes, null, null, ANCESTOR_GENERATION_DEFAULT); } @@ -320,7 +322,9 @@ public Node chooseRandom(String scope, Collection excludedNodes) { scope = ROOT; } if (scope.startsWith(SCOPE_REVERSE_STR)) { - return chooseRandom(ROOT, scope.substring(1), excludedNodes, null, + ArrayList excludedScopes = new ArrayList(); + excludedScopes.add(scope.substring(1)); + return chooseRandom(ROOT, excludedScopes, excludedNodes, null, ANCESTOR_GENERATION_DEFAULT); } else { return chooseRandom(scope, null, excludedNodes, null, @@ -352,7 +356,9 @@ public Node chooseRandom(String scope, Collection excludedNodes, scope = ROOT; } if (scope.startsWith(SCOPE_REVERSE_STR)) { - return chooseRandom(ROOT, scope.substring(1), excludedNodes, null, + ArrayList excludedScopes = new ArrayList(); + excludedScopes.add(scope.substring(1)); + return chooseRandom(ROOT, excludedScopes, excludedNodes, null, ancestorGen); } else { return chooseRandom(scope, null, excludedNodes, null, ancestorGen); @@ -364,7 +370,7 @@ public Node chooseRandom(String scope, Collection excludedNodes, * * @param scope range from which a node will be chosen, cannot start with ~ * @param excludedNodes nodes to be excluded - * @param excludedScope excluded node range. Cannot start with ~ + * @param excludedScopes excluded node ranges. Cannot start with ~ * @param ancestorGen matters when excludeNodes is not null. It means the * ancestor generation that's not allowed to share between chosen node and the * excludedNodes. For example, if ancestorGen is 1, means chosen node @@ -374,9 +380,10 @@ public Node chooseRandom(String scope, Collection excludedNodes, * * @return the chosen node */ - public Node chooseRandom(String scope, String excludedScope, + public Node chooseRandom(String scope, List excludedScopes, Collection excludedNodes, int ancestorGen) { - return chooseRandom(scope, excludedScope, excludedNodes, null, ancestorGen); + return chooseRandom(scope, excludedScopes, excludedNodes, null, + ancestorGen); } /** @@ -386,7 +393,7 @@ public Node chooseRandom(String scope, String excludedScope, * * @param scope range of nodes from which a node will be chosen, cannot start * with ~ - * @param excludedScope range of nodes to be excluded, cannot start with ~ + * @param excludedScopes ranges of nodes to be excluded, cannot start with ~ * @param excludedNodes nodes to be excluded * @param affinityNode when not null, the chosen node should share the same * ancestor with this node at generation ancestorGen. @@ -397,20 +404,20 @@ public Node chooseRandom(String scope, String excludedScope, * excludedNodes if affinityNode is null * @return the chosen node */ - public Node chooseRandom(String scope, String excludedScope, + public Node chooseRandom(String scope, List excludedScopes, Collection excludedNodes, Node affinityNode, int ancestorGen) { if (scope == null) { scope = ROOT; } checkScope(scope); - checkExcludedScope(excludedScope); + checkExcludedScopes(excludedScopes); checkAffinityNode(affinityNode); checkAncestorGen(ancestorGen); netlock.readLock().lock(); try { - return chooseNodeInternal(scope, -1, excludedScope, + return chooseNodeInternal(scope, -1, excludedScopes, excludedNodes, affinityNode, ancestorGen); } finally { netlock.readLock().unlock(); @@ -426,7 +433,7 @@ public Node chooseRandom(String scope, String excludedScope, * excludedNodes * @param scope range of nodes from which a node will be chosen, cannot start * with ~ - * @param excludedScope range of nodes to be excluded, cannot start with ~ + * @param excludedScopes ranges of nodes to be excluded, cannot start with ~ * @param excludedNodes nodes to be excluded * @param affinityNode when not null, the chosen node should share the same * ancestor with this node at generation ancestorGen. @@ -466,20 +473,20 @@ public Node chooseRandom(String scope, String excludedScope, * from subtree /dc1. LeafIndex 1, so we pick the 2nd available node n4. * */ - public Node getNode(int leafIndex, String scope, String excludedScope, + public Node getNode(int leafIndex, String scope, List excludedScopes, Collection excludedNodes, Node affinityNode, int ancestorGen) { Preconditions.checkArgument(leafIndex >= 0); if (scope == null) { scope = ROOT; } checkScope(scope); - checkExcludedScope(excludedScope); + checkExcludedScopes(excludedScopes); checkAffinityNode(affinityNode); checkAncestorGen(ancestorGen); netlock.readLock().lock(); try { - return chooseNodeInternal(scope, leafIndex, excludedScope, + return chooseNodeInternal(scope, leafIndex, excludedScopes, excludedNodes, affinityNode, ancestorGen); } finally { netlock.readLock().unlock(); @@ -487,8 +494,8 @@ public Node getNode(int leafIndex, String scope, String excludedScope, } private Node chooseNodeInternal(String scope, int leafIndex, - String excludedScope, Collection excludedNodes, Node affinityNode, - int ancestorGen) { + List excludedScopes, Collection excludedNodes, + Node affinityNode, int ancestorGen) { Preconditions.checkArgument(scope != null); String finalScope = scope; @@ -509,40 +516,48 @@ private Node chooseNodeInternal(String scope, int leafIndex, ancestorGen = 0; } - // check overlap of excludedScope and finalScope - if (excludedScope != null) { - // excludeScope covers finalScope - if (finalScope.startsWith(excludedScope)) { - return null; - } - // excludeScope and finalScope share nothing - if (!excludedScope.startsWith(finalScope)) { - excludedScope = null; + // check overlap of excludedScopes and finalScope + List mutableExcludedScopes = null; + if (excludedScopes != null && !excludedScopes.isEmpty()) { + mutableExcludedScopes = new ArrayList<>(); + for (String s: excludedScopes) { + // excludeScope covers finalScope + if (finalScope.startsWith(s)) { + return null; + } + // excludeScope and finalScope share nothing case + if (s.startsWith(finalScope)) { + if (!mutableExcludedScopes.stream().anyMatch( + e -> s.startsWith(e))) { + mutableExcludedScopes.add(s); + } + } } } // clone excludedNodes before remove duplicate in it Collection mutableExNodes = null; + + // Remove duplicate in excludedNodes if (excludedNodes != null) { - // Remove duplicate in excludedNodes mutableExNodes = excludedNodes.stream().distinct().collect(Collectors.toList()); } - // remove duplicate in mutableExNodes and excludedScope, given ancestorGen - excludedScope = NetUtils.removeDuplicate(this, mutableExNodes, - excludedScope, ancestorGen); + // remove duplicate in mutableExNodes and mutableExcludedScopes + NetUtils.removeDuplicate(this, mutableExNodes, mutableExcludedScopes, + ancestorGen); // calculate available node count Node scopeNode = getNode(finalScope); int availableNodes = getAvailableNodesCount( - scopeNode.getNetworkFullPath(), excludedScope, mutableExNodes, + scopeNode.getNetworkFullPath(), mutableExcludedScopes, mutableExNodes, ancestorGen); if (availableNodes <= 0) { LOG.warn("No available node in (scope=\"{}\" excludedScope=\"{}\" " + "excludedNodes=\"{}\" ancestorGen=\"{}\").", - scopeNode.getNetworkFullPath(), excludedScope, excludedNodes, + scopeNode.getNetworkFullPath(), excludedScopes, excludedNodes, ancestorGen); return null; } @@ -556,17 +571,17 @@ private Node chooseNodeInternal(String scope, int leafIndex, int nodeIndex; if (leafIndex >= 0) { nodeIndex = leafIndex % availableNodes; - ret = ((InnerNode)scopeNode).getLeaf(nodeIndex, excludedScope, + ret = ((InnerNode)scopeNode).getLeaf(nodeIndex, mutableExcludedScopes, mutableExNodes, ancestorGen); } else { nodeIndex = ThreadLocalRandom.current().nextInt(availableNodes); - ret = ((InnerNode)scopeNode).getLeaf(nodeIndex, excludedScope, + ret = ((InnerNode)scopeNode).getLeaf(nodeIndex, mutableExcludedScopes, mutableExNodes, ancestorGen); } LOG.debug("Choosing node[index={},random={}] from \"{}\" available nodes" + " scope=\"{}\", excludedScope=\"{}\", excludeNodes=\"{}\".", nodeIndex, (leafIndex == -1 ? "true" : "false"), availableNodes, - scopeNode.getNetworkFullPath(), excludedScope, excludedNodes); + scopeNode.getNetworkFullPath(), excludedScopes, excludedNodes); LOG.debug("Chosen node = {}", (ret == null ? "not found" : ret.toString())); return ret; } @@ -678,13 +693,13 @@ public List sortByDistanceCost(Node reader, * Return the number of leaves in scope but not in * excludedNodes and excludeScope. * @param scope the scope - * @param excludedScope excluded scope + * @param excludedScopes excluded scopes * @param mutableExcludedNodes a list of excluded nodes, content might be * changed after the call * @param ancestorGen same generation ancestor prohibit on excludedNodes * @return number of available nodes */ - private int getAvailableNodesCount(String scope, String excludedScope, + private int getAvailableNodesCount(String scope, List excludedScopes, Collection mutableExcludedNodes, int ancestorGen) { Preconditions.checkArgument(scope != null); @@ -702,13 +717,15 @@ private int getAvailableNodesCount(String scope, String excludedScope, } // number of nodes to exclude int excludedCount = 0; - if (excludedScope != null) { - Node excludedScopeNode = getNode(excludedScope); - if (excludedScopeNode != null) { - if (excludedScope.startsWith(scope)) { - excludedCount += excludedScopeNode.getNumOfLeaves(); - } else if (scope.startsWith(excludedScope)) { - return 0; + if (excludedScopes != null) { + for (String excludedScope: excludedScopes) { + Node excludedScopeNode = getNode(excludedScope); + if (excludedScopeNode != null) { + if (excludedScope.startsWith(scope)) { + excludedCount += excludedScopeNode.getNumOfLeaves(); + } else if (scope.startsWith(excludedScope)) { + return 0; + } } } } @@ -766,11 +783,14 @@ private void checkScope(String scope) { } } - private void checkExcludedScope(String excludedScope) { - if (excludedScope != null && - (excludedScope.startsWith(SCOPE_REVERSE_STR))) { - throw new IllegalArgumentException("excludedScope " + excludedScope + - " cannot start with " + SCOPE_REVERSE_STR); + private void checkExcludedScopes(List excludedScopes) { + if (excludedScopes != null && excludedScopes.size() > 0) { + excludedScopes.stream().forEach(scope -> { + if (scope.startsWith(SCOPE_REVERSE_STR)) { + throw new IllegalArgumentException("excludedScope " + scope + + " cannot start with " + SCOPE_REVERSE_STR); + } + }); } } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java index e0041a4ca596a..b31e4a8e9965c 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java @@ -357,9 +357,11 @@ public void testChooseRandomSimple() { // test chooseRandom(String scope, String excludedScope) path = dataNodes[random.nextInt(dataNodes.length)].getNetworkFullPath(); - assertNull(cluster.chooseRandom(path, path)); - assertNotNull(cluster.chooseRandom(null, path)); - assertNotNull(cluster.chooseRandom("", path)); + List pathList = new ArrayList<>(); + pathList.add(path); + assertNull(cluster.chooseRandom(path, pathList)); + assertNotNull(cluster.chooseRandom(null, pathList)); + assertNotNull(cluster.chooseRandom("", pathList)); // test chooseRandom(String scope, Collection excludedNodes) assertNull(cluster.chooseRandom("", Arrays.asList(dataNodes))); @@ -399,7 +401,9 @@ public void testChooseRandomExcludedScope() { } // "" excludedScope, no node will ever be chosen - frequency = pickNodes(100, "", null, null, 0); + List pathList = new ArrayList(); + pathList.add(""); + frequency = pickNodes(100, pathList, null, null, 0); for (Node key : dataNodes) { assertTrue(frequency.get(key) == 0); } @@ -411,8 +415,10 @@ public void testChooseRandomExcludedScope() { assertTrue(frequency.get(key) == 0); } // out network topology excluded scope, every node should be chosen - scope = "/city1"; - frequency = pickNodes(cluster.getNumOfLeafNode(null), scope, null, null, 0); + pathList.clear(); + pathList.add("/city1"); + frequency = pickNodes( + cluster.getNumOfLeafNode(null), pathList, null, null, 0); for (Node key : dataNodes) { assertTrue(frequency.get(key) != 0); } @@ -582,19 +588,32 @@ public void testChooseRandomWithAffinityNode() { }}; int[] affinityNodeIndexs = {0, dataNodes.length - 1, random.nextInt(dataNodes.length), random.nextInt(dataNodes.length)}; + Node[][] excludedScopeIndexs = {{dataNodes[0]}, + {dataNodes[dataNodes.length - 1]}, + {dataNodes[random.nextInt(dataNodes.length)]}, + {dataNodes[random.nextInt(dataNodes.length)], + dataNodes[random.nextInt(dataNodes.length)] + }, + {dataNodes[random.nextInt(dataNodes.length)], + dataNodes[random.nextInt(dataNodes.length)], + dataNodes[random.nextInt(dataNodes.length)], + }}; int leafNum = cluster.getNumOfLeafNode(null); Map frequency; - String scope; + List pathList = new ArrayList<>(); for (int k : affinityNodeIndexs) { - for (int i : excludedNodeIndexs) { - String path = dataNodes[i].getNetworkFullPath(); - while (!path.equals(ROOT)) { + for (Node[] excludedScopes : excludedScopeIndexs) { + pathList.clear(); + pathList.addAll(Arrays.stream(excludedScopes) + .map(node -> node.getNetworkFullPath()) + .collect(Collectors.toList())); + while (!pathList.get(0).equals(ROOT)) { int ancestorGen = cluster.getMaxLevel() - 1; while (ancestorGen > 0) { for (Node[] list : excludedNodeLists) { List excludedList = Arrays.asList(list); - frequency = pickNodes(leafNum, path, excludedList, dataNodes[k], - ancestorGen); + frequency = pickNodes(leafNum, pathList, excludedList, + dataNodes[k], ancestorGen); Node affinityAncestor = dataNodes[k].getAncestor(ancestorGen); for (Node key : dataNodes) { if (affinityAncestor != null) { @@ -605,28 +624,33 @@ public void testChooseRandomWithAffinityNode() { } else if (excludedList != null && excludedList.contains(key)) { continue; - } else if (path != null && - key.getNetworkFullPath().startsWith(path)) { + } else if (pathList != null && + pathList.stream().anyMatch(path -> + key.getNetworkFullPath().startsWith(path))) { continue; } else { fail("Node is not picked when sequentially going " + "through ancestor node's leaf nodes. node:" + key.getNetworkFullPath() + ", ancestor node:" + affinityAncestor.getNetworkFullPath() + - ", excludedScope: " + path + ", " + "excludedList:" + - (excludedList == null ? "" : excludedList.toString())); + ", excludedScope: " + pathList.toString() + ", " + + "excludedList:" + (excludedList == null ? "" : + excludedList.toString())); } } } } ancestorGen--; } - path = path.substring(0, path.lastIndexOf(PATH_SEPARATOR_STR)); + pathList = pathList.stream().map(path -> + path.substring(0, path.lastIndexOf(PATH_SEPARATOR_STR))) + .collect(Collectors.toList()); } } } // all nodes excluded, no node will be picked + String scope; List excludedList = Arrays.asList(dataNodes); for (int k : affinityNodeIndexs) { for (int i : excludedNodeIndexs) { @@ -880,9 +904,12 @@ private Map pickNodesAtRandom(int numNodes, frequency.put(dnd, 0); } + List pathList = new ArrayList<>(); + pathList.add(excludedScope.substring(1)); for (int j = 0; j < numNodes; j++) { - Node node = cluster.chooseRandom("", excludedScope.substring(1), - excludedNodes, affinityNode, ancestorGen); + + Node node = cluster.chooseRandom("", pathList, excludedNodes, + affinityNode, ancestorGen); if (node != null) { frequency.put(node, frequency.get(node) + 1); } @@ -895,7 +922,7 @@ private Map pickNodesAtRandom(int numNodes, * This picks a large amount of nodes sequentially. * * @param numNodes the number of nodes - * @param excludedScope the excluded scope, should not start with "~" + * @param excludedScopes the excluded scopes, should not start with "~" * @param excludedNodes the excluded node list * @param affinityNode the chosen node should share the same ancestor at * generation "ancestorGen" with this node @@ -903,8 +930,9 @@ private Map pickNodesAtRandom(int numNodes, * this generation with excludedNodes * @return the frequency that nodes were chosen */ - private Map pickNodes(int numNodes, String excludedScope, - Collection excludedNodes, Node affinityNode, int ancestorGen) { + private Map pickNodes(int numNodes, + List excludedScopes, Collection excludedNodes, + Node affinityNode, int ancestorGen) { Map frequency = new HashMap<>(); for (Node dnd : dataNodes) { frequency.put(dnd, 0); @@ -912,7 +940,7 @@ private Map pickNodes(int numNodes, String excludedScope, excludedNodes = excludedNodes == null ? null : excludedNodes.stream().distinct().collect(Collectors.toList()); for (int j = 0; j < numNodes; j++) { - Node node = cluster.getNode(j, null, excludedScope, excludedNodes, + Node node = cluster.getNode(j, null, excludedScopes, excludedNodes, affinityNode, ancestorGen); if (node != null) { frequency.put(node, frequency.get(node) + 1); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java index 52db8a48f87ab..4f7f3ef542ec7 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java @@ -242,12 +242,19 @@ private Node chooseNode(List excludedNodes, Node affinityNode, long sizeRequired) throws SCMException { int ancestorGen = RACK_LEVEL; int maxRetry = MAX_RETRY; +<<<<<<< HEAD List excludedNodesForCapacity = null; boolean isFallbacked = false; while(true) { Node node = networkTopology.chooseRandom(NetConstants.ROOT, null, excludedNodes, affinityNode, ancestorGen); metrics.incrDatanodeChooseAttemptCount(); +======= + List excludedNodesForCapacity = null; + while(true) { + Node node = networkTopology.chooseRandom(NetConstants.ROOT, + excludedNodesForCapacity, excludedNodes, affinityNode, ancestorGen); +>>>>>>> HDDS-1879. Support multiple excluded scopes when choosing datanodes in NetworkTopology. if (node == null) { // cannot find the node which meets all constrains LOG.warn("Failed to find the datanode. excludedNodes:" + @@ -274,6 +281,7 @@ private Node chooseNode(List excludedNodes, Node affinityNode, if (hasEnoughSpace((DatanodeDetails)node, sizeRequired)) { LOG.warn("Datanode {} is chosen. Required size is {}", node.toString(), sizeRequired); +<<<<<<< HEAD if (excludedNodes != null && excludedNodesForCapacity != null) { excludedNodes.removeAll(excludedNodesForCapacity); } @@ -281,6 +289,8 @@ private Node chooseNode(List excludedNodes, Node affinityNode, if (isFallbacked) { metrics.incrDatanodeChooseFallbackCount(); } +======= +>>>>>>> HDDS-1879. Support multiple excluded scopes when choosing datanodes in NetworkTopology. return node; } else { maxRetry--; @@ -294,12 +304,7 @@ private Node chooseNode(List excludedNodes, Node affinityNode, if (excludedNodesForCapacity == null) { excludedNodesForCapacity = new ArrayList<>(); } - excludedNodesForCapacity.add(node); - if (excludedNodes == null) { - excludedNodes = excludedNodesForCapacity; - } else { - excludedNodes.add(node); - } + excludedNodesForCapacity.add(node.getNetworkFullPath()); } } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java index b31152eaed14a..cb079bbaa2f61 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java @@ -27,32 +27,47 @@ import org.apache.hadoop.hdds.scm.net.NetConstants; import org.apache.hadoop.hdds.scm.net.NetworkTopology; import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl; +import org.apache.hadoop.hdds.scm.net.Node; import org.apache.hadoop.hdds.scm.net.NodeSchema; import org.apache.hadoop.hdds.scm.net.NodeSchemaManager; import org.apache.hadoop.hdds.scm.node.NodeManager; import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; import org.mockito.Mockito; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashSet; import java.util.List; +import java.util.Set; +import static org.apache.hadoop.hdds.scm.net.NetConstants.DATACENTER_SCHEMA; import static org.apache.hadoop.hdds.scm.net.NetConstants.LEAF_SCHEMA; +import static org.apache.hadoop.hdds.scm.net.NetConstants.NODEGROUP_SCHEMA; import static org.apache.hadoop.hdds.scm.net.NetConstants.RACK_SCHEMA; import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT_SCHEMA; +<<<<<<< HEAD import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; +======= +import static org.junit.Assume.assumeTrue; +>>>>>>> HDDS-1879. Support multiple excluded scopes when choosing datanodes in NetworkTopology. import static org.mockito.Matchers.anyObject; import static org.mockito.Mockito.when; /** * Test for the scm container rack aware placement. */ +@RunWith(Parameterized.class) public class TestSCMContainerPlacementRackAware { private NetworkTopology cluster; private Configuration conf; private NodeManager nodeManager; + private Integer datanodeCount; private List datanodes = new ArrayList<>(); // policy with fallback capability private SCMContainerPlacementRackAware policy; @@ -62,6 +77,16 @@ public class TestSCMContainerPlacementRackAware { private static final long STORAGE_CAPACITY = 100L; private SCMContainerPlacementMetrics metrics; + public TestSCMContainerPlacementRackAware(Integer count) { + this.datanodeCount = count; + } + + @Parameterized.Parameters + public static Collection setupDatanodes() { + return Arrays.asList(new Object[][]{{3}, {4}, {5}, {6}, {7}, {8}, {9}, + {10}, {11}, {12}, {13}, {14}, {15}}); + } + @Before public void setup() { //initialize network topology instance @@ -74,7 +99,7 @@ public void setup() { // build datanodes, and network topology String rack = "/rack"; String hostname = "node"; - for (int i = 0; i < 15; i++) { + for (int i = 0; i < datanodeCount; i++) { // Totally 3 racks, each has 5 datanodes DatanodeDetails node = TestUtils.createDatanodeDetails( hostname + i, rack + (i / 5)); @@ -88,12 +113,22 @@ public void setup() { .thenReturn(new ArrayList<>(datanodes)); when(nodeManager.getNodeStat(anyObject())) .thenReturn(new SCMNodeMetric(STORAGE_CAPACITY, 0L, 100L)); - when(nodeManager.getNodeStat(datanodes.get(2))) - .thenReturn(new SCMNodeMetric(STORAGE_CAPACITY, 90L, 10L)); - when(nodeManager.getNodeStat(datanodes.get(3))) - .thenReturn(new SCMNodeMetric(STORAGE_CAPACITY, 80L, 20L)); - when(nodeManager.getNodeStat(datanodes.get(4))) - .thenReturn(new SCMNodeMetric(STORAGE_CAPACITY, 70L, 30L)); + if (datanodeCount > 4) { + when(nodeManager.getNodeStat(datanodes.get(2))) + .thenReturn(new SCMNodeMetric(STORAGE_CAPACITY, 90L, 10L)); + when(nodeManager.getNodeStat(datanodes.get(3))) + .thenReturn(new SCMNodeMetric(STORAGE_CAPACITY, 80L, 20L)); + when(nodeManager.getNodeStat(datanodes.get(4))) + .thenReturn(new SCMNodeMetric(STORAGE_CAPACITY, 70L, 30L)); + } else if (datanodeCount > 3) { + when(nodeManager.getNodeStat(datanodes.get(2))) + .thenReturn(new SCMNodeMetric(STORAGE_CAPACITY, 90L, 10L)); + when(nodeManager.getNodeStat(datanodes.get(3))) + .thenReturn(new SCMNodeMetric(STORAGE_CAPACITY, 80L, 20L)); + } else if (datanodeCount > 2) { + when(nodeManager.getNodeStat(datanodes.get(2))) + .thenReturn(new SCMNodeMetric(STORAGE_CAPACITY, 84L, 16L)); + } // create placement policy instances metrics = SCMContainerPlacementMetrics.create(); @@ -160,10 +195,10 @@ public void chooseNodeWithExcludedNodes() throws SCMException { Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(0), excludedNodes.get(1))); - // 3 replicas, two existing datanodes on different rack + // 3 replicas, one existing datanode + nodeNum = 2; excludedNodes.clear(); excludedNodes.add(datanodes.get(0)); - excludedNodes.add(datanodes.get(7)); datanodeDetails = policy.chooseDatanodes( excludedNodes, null, nodeNum, 15); Assert.assertEquals(nodeNum, datanodeDetails.size()); @@ -171,23 +206,32 @@ public void chooseNodeWithExcludedNodes() throws SCMException { datanodeDetails.get(0), excludedNodes.get(0)) || cluster.isSameParent(datanodeDetails.get(0), excludedNodes.get(1))); - // 3 replicas, one existing datanode - nodeNum = 2; + // 3 replicas, two existing datanodes on different rack + assumeTrue(datanodeCount > 7); + nodeNum = 1; excludedNodes.clear(); excludedNodes.add(datanodes.get(0)); + excludedNodes.add(datanodes.get(7)); datanodeDetails = policy.chooseDatanodes( excludedNodes, null, nodeNum, 15); Assert.assertEquals(nodeNum, datanodeDetails.size()); Assert.assertTrue(cluster.isSameParent( datanodeDetails.get(0), excludedNodes.get(0)) || cluster.isSameParent(datanodeDetails.get(0), excludedNodes.get(1))); + } @Test public void testFallback() throws SCMException { +<<<<<<< HEAD // 5 replicas. there are only 3 racks. policy with fallback should +======= + + // 5 replicas. policy with fallback should +>>>>>>> HDDS-1879. Support multiple excluded scopes when choosing datanodes in NetworkTopology. // allocate the 5th datanode though it will break the rack rule(first // 2 replicas on same rack, others on different racks). + assumeTrue(datanodeCount >= 5); int nodeNum = 5; List datanodeDetails = policy.chooseDatanodes(null, null, nodeNum, 15); @@ -218,6 +262,7 @@ public void testFallback() throws SCMException { @Test public void testNoFallback() throws SCMException { + assumeTrue(datanodeCount > 10 && datanodeCount <= 15); // 5 replicas. there are only 3 racks. policy prohibit fallback should fail. int nodeNum = 5; try { From d31d42bb17455154c567cecacfdcb29c031ea0fd Mon Sep 17 00:00:00 2001 From: Sammi Chen Date: Wed, 31 Jul 2019 19:35:55 +0800 Subject: [PATCH 2/6] missed changes --- .../algorithms/TestSCMContainerPlacementRackAware.java | 5 ----- 1 file changed, 5 deletions(-) diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java index cb079bbaa2f61..534baeaa4b9f4 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java @@ -27,7 +27,6 @@ import org.apache.hadoop.hdds.scm.net.NetConstants; import org.apache.hadoop.hdds.scm.net.NetworkTopology; import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl; -import org.apache.hadoop.hdds.scm.net.Node; import org.apache.hadoop.hdds.scm.net.NodeSchema; import org.apache.hadoop.hdds.scm.net.NodeSchemaManager; import org.apache.hadoop.hdds.scm.node.NodeManager; @@ -41,13 +40,9 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; -import java.util.HashSet; import java.util.List; -import java.util.Set; -import static org.apache.hadoop.hdds.scm.net.NetConstants.DATACENTER_SCHEMA; import static org.apache.hadoop.hdds.scm.net.NetConstants.LEAF_SCHEMA; -import static org.apache.hadoop.hdds.scm.net.NetConstants.NODEGROUP_SCHEMA; import static org.apache.hadoop.hdds.scm.net.NetConstants.RACK_SCHEMA; import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT_SCHEMA; <<<<<<< HEAD From 863ac697f22556fd427121093879c11c5d7a55fb Mon Sep 17 00:00:00 2001 From: Sammi Chen Date: Thu, 1 Aug 2019 19:01:45 +0800 Subject: [PATCH 3/6] fix findbugs, javadoc, and failed TestSCMContainerPlacementRackAware --- .../java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java | 6 +++--- .../main/java/org/apache/hadoop/hdds/scm/net/NetUtils.java | 1 - .../algorithms/SCMContainerPlacementRackAware.java | 2 +- .../algorithms/TestSCMContainerPlacementRackAware.java | 7 +++++-- 4 files changed, 9 insertions(+), 7 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java index 7602927063816..1858abff5e8da 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java @@ -335,9 +335,9 @@ public Node getLeaf(int leafIndex, List excludedScopes, for (Node child : childrenMap.values()) { int leafCount = child.getNumOfLeaves(); // skip nodes covered by excluded scopes - for (String excludedScope: excludedNodeCount.keySet()) { - if (excludedScope.startsWith(child.getNetworkFullPath())) { - leafCount -= excludedNodeCount.get(excludedScope); + for (Map.Entry entry: excludedNodeCount.entrySet()) { + if (entry.getKey().startsWith(child.getNetworkFullPath())) { + leafCount -= entry.getValue(); } } // skip nodes covered by excluded nodes and ancestorGen diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetUtils.java index 99713af6f5c98..f99f3a01b32b8 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetUtils.java @@ -72,7 +72,6 @@ public static int locationToDepth(String location) { * Remove node from mutableExcludedNodes if it's covered by excludedScope. * Please noted that mutableExcludedNodes content might be changed after the * function call. - * @return the new excludedScope */ public static void removeDuplicate(NetworkTopology topology, Collection mutableExcludedNodes, List mutableExcludedScopes, diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java index 4f7f3ef542ec7..9a32114e47e6e 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java @@ -275,7 +275,7 @@ private Node chooseNode(List excludedNodes, Node affinityNode, } } // there is no constrains to reduce or fallback is true - throw new SCMException("No satisfied datanode to meet the " + + throw new SCMException("No satisfied datanode to meet the" + " excludedNodes and affinityNode constrains.", null); } if (hasEnoughSpace((DatanodeDetails)node, sizeRequired)) { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java index 534baeaa4b9f4..b2424a044b89a 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java @@ -151,11 +151,14 @@ public void chooseNodeWithNoExcludedNodes() throws SCMException { datanodeDetails.get(1))); // 3 replicas + nodeNum = 3; datanodeDetails = policy.chooseDatanodes(null, null, nodeNum, 15); Assert.assertEquals(nodeNum, datanodeDetails.size()); Assert.assertTrue(cluster.isSameParent(datanodeDetails.get(0), datanodeDetails.get(1))); + // requires at least 2 racks for following statement + assumeTrue(datanodeCount > 5); Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(0), datanodeDetails.get(2))); Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(1), @@ -177,6 +180,7 @@ public void chooseNodeWithNoExcludedNodes() throws SCMException { public void chooseNodeWithExcludedNodes() throws SCMException { // test choose new datanodes for under replicated pipeline // 3 replicas, two existing datanodes on same rack + assumeTrue(datanodeCount > 5); int nodeNum = 1; List excludedNodes = new ArrayList<>(); @@ -213,7 +217,6 @@ public void chooseNodeWithExcludedNodes() throws SCMException { Assert.assertTrue(cluster.isSameParent( datanodeDetails.get(0), excludedNodes.get(0)) || cluster.isSameParent(datanodeDetails.get(0), excludedNodes.get(1))); - } @Test @@ -226,7 +229,7 @@ public void testFallback() throws SCMException { >>>>>>> HDDS-1879. Support multiple excluded scopes when choosing datanodes in NetworkTopology. // allocate the 5th datanode though it will break the rack rule(first // 2 replicas on same rack, others on different racks). - assumeTrue(datanodeCount >= 5); + assumeTrue(datanodeCount > 5); int nodeNum = 5; List datanodeDetails = policy.chooseDatanodes(null, null, nodeNum, 15); From d57a66da93ee246b1e5cc391ba41401b933c68c5 Mon Sep 17 00:00:00 2001 From: Sammi Chen Date: Fri, 2 Aug 2019 11:07:01 +0800 Subject: [PATCH 4/6] Fix randomly failed test cases in TestSCMContainerPlacementRackAware --- .../TestSCMContainerPlacementRackAware.java | 31 ++++++++----------- 1 file changed, 13 insertions(+), 18 deletions(-) diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java index b2424a044b89a..0c59f967d3bc1 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java @@ -45,12 +45,9 @@ import static org.apache.hadoop.hdds.scm.net.NetConstants.LEAF_SCHEMA; import static org.apache.hadoop.hdds.scm.net.NetConstants.RACK_SCHEMA; import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT_SCHEMA; -<<<<<<< HEAD import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; -======= import static org.junit.Assume.assumeTrue; ->>>>>>> HDDS-1879. Support multiple excluded scopes when choosing datanodes in NetworkTopology. import static org.mockito.Matchers.anyObject; import static org.mockito.Mockito.when; @@ -71,6 +68,7 @@ public class TestSCMContainerPlacementRackAware { // node storage capacity private static final long STORAGE_CAPACITY = 100L; private SCMContainerPlacementMetrics metrics; + private static final int nodePerRack = 5; public TestSCMContainerPlacementRackAware(Integer count) { this.datanodeCount = count; @@ -97,7 +95,7 @@ public void setup() { for (int i = 0; i < datanodeCount; i++) { // Totally 3 racks, each has 5 datanodes DatanodeDetails node = TestUtils.createDatanodeDetails( - hostname + i, rack + (i / 5)); + hostname + i, rack + (i / nodePerRack)); datanodes.add(node); cluster.add(node); } @@ -148,17 +146,16 @@ public void chooseNodeWithNoExcludedNodes() throws SCMException { datanodeDetails = policy.chooseDatanodes(null, null, nodeNum, 15); Assert.assertEquals(nodeNum, datanodeDetails.size()); Assert.assertTrue(cluster.isSameParent(datanodeDetails.get(0), - datanodeDetails.get(1))); + datanodeDetails.get(1)) || (datanodeCount % nodePerRack == 1)); // 3 replicas - nodeNum = 3; datanodeDetails = policy.chooseDatanodes(null, null, nodeNum, 15); Assert.assertEquals(nodeNum, datanodeDetails.size()); + // requires at least 2 racks for following statement + assumeTrue(datanodeCount > nodePerRack && datanodeCount % nodePerRack > 1); Assert.assertTrue(cluster.isSameParent(datanodeDetails.get(0), datanodeDetails.get(1))); - // requires at least 2 racks for following statement - assumeTrue(datanodeCount > 5); Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(0), datanodeDetails.get(2))); Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(1), @@ -168,6 +165,8 @@ public void chooseNodeWithNoExcludedNodes() throws SCMException { nodeNum = 4; datanodeDetails = policy.chooseDatanodes(null, null, nodeNum, 15); Assert.assertEquals(nodeNum, datanodeDetails.size()); + // requires at least 2 racks and enough datanodes for following statement + assumeTrue(datanodeCount > nodePerRack + 1); Assert.assertTrue(cluster.isSameParent(datanodeDetails.get(0), datanodeDetails.get(1))); Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(0), @@ -180,7 +179,7 @@ public void chooseNodeWithNoExcludedNodes() throws SCMException { public void chooseNodeWithExcludedNodes() throws SCMException { // test choose new datanodes for under replicated pipeline // 3 replicas, two existing datanodes on same rack - assumeTrue(datanodeCount > 5); + assumeTrue(datanodeCount > nodePerRack); int nodeNum = 1; List excludedNodes = new ArrayList<>(); @@ -206,11 +205,10 @@ public void chooseNodeWithExcludedNodes() throws SCMException { cluster.isSameParent(datanodeDetails.get(0), excludedNodes.get(1))); // 3 replicas, two existing datanodes on different rack - assumeTrue(datanodeCount > 7); nodeNum = 1; excludedNodes.clear(); excludedNodes.add(datanodes.get(0)); - excludedNodes.add(datanodes.get(7)); + excludedNodes.add(datanodes.get(5)); datanodeDetails = policy.chooseDatanodes( excludedNodes, null, nodeNum, 15); Assert.assertEquals(nodeNum, datanodeDetails.size()); @@ -221,15 +219,11 @@ public void chooseNodeWithExcludedNodes() throws SCMException { @Test public void testFallback() throws SCMException { -<<<<<<< HEAD // 5 replicas. there are only 3 racks. policy with fallback should -======= - - // 5 replicas. policy with fallback should ->>>>>>> HDDS-1879. Support multiple excluded scopes when choosing datanodes in NetworkTopology. // allocate the 5th datanode though it will break the rack rule(first // 2 replicas on same rack, others on different racks). - assumeTrue(datanodeCount > 5); + assumeTrue(datanodeCount > nodePerRack * 2 && + (datanodeCount % nodePerRack > 1)); int nodeNum = 5; List datanodeDetails = policy.chooseDatanodes(null, null, nodeNum, 15); @@ -260,7 +254,8 @@ public void testFallback() throws SCMException { @Test public void testNoFallback() throws SCMException { - assumeTrue(datanodeCount > 10 && datanodeCount <= 15); + assumeTrue(datanodeCount > (nodePerRack * 2) && + (datanodeCount <= nodePerRack * 3)); // 5 replicas. there are only 3 racks. policy prohibit fallback should fail. int nodeNum = 5; try { From 3debbb8f0acc48cb44f92d9381e2eea85312b7ab Mon Sep 17 00:00:00 2001 From: Sammi Chen Date: Tue, 10 Sep 2019 19:30:12 +0800 Subject: [PATCH 5/6] update per comments --- .../hadoop/hdds/scm/net/InnerNodeImpl.java | 2 +- .../apache/hadoop/hdds/scm/net/NetUtils.java | 10 ++++---- .../hadoop/hdds/scm/net/NetworkTopology.java | 23 ------------------ .../hdds/scm/net/NetworkTopologyImpl.java | 24 ++----------------- .../SCMContainerPlacementMetrics.java | 8 ++----- .../SCMContainerPlacementRackAware.java | 19 +++------------ 6 files changed, 13 insertions(+), 73 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java index 1858abff5e8da..f2183fc9823fe 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java @@ -301,7 +301,7 @@ public Node getLeaf(int leafIndex) { * * Input: * leafIndex = 2 - * excludedScope = /dc2 + * excludedScope = /dc2/rack2 * excludedNodes = {/dc1/rack1/n1} * ancestorGen = 1 * diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetUtils.java index f99f3a01b32b8..4019b1305f6a8 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetUtils.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdds.scm.net; +import org.apache.commons.collections.CollectionUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -76,9 +77,8 @@ public static int locationToDepth(String location) { public static void removeDuplicate(NetworkTopology topology, Collection mutableExcludedNodes, List mutableExcludedScopes, int ancestorGen) { - if (mutableExcludedNodes == null || mutableExcludedNodes.size() == 0 || - mutableExcludedScopes == null || mutableExcludedScopes.size() == 0 || - topology == null) { + if (CollectionUtils.isEmpty(mutableExcludedNodes) || + CollectionUtils.isEmpty(mutableExcludedScopes) || topology == null) { return; } @@ -114,7 +114,7 @@ public static void removeDuplicate(NetworkTopology topology, */ public static void removeOutscope(Collection mutableExcludedNodes, String scope) { - if (mutableExcludedNodes == null || scope == null) { + if (CollectionUtils.isEmpty(mutableExcludedNodes) || scope == null) { return; } synchronized (mutableExcludedNodes) { @@ -139,7 +139,7 @@ public static void removeOutscope(Collection mutableExcludedNodes, public static List getAncestorList(NetworkTopology topology, Collection nodes, int generation) { List ancestorList = new ArrayList<>(); - if (topology == null ||nodes == null || nodes.size() == 0 || + if (topology == null || CollectionUtils.isEmpty(nodes) || generation == 0) { return ancestorList; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopology.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopology.java index b663e7a5c1e32..3a2c7c0f1a5ce 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopology.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopology.java @@ -39,7 +39,6 @@ public InvalidTopologyException(String msg) { */ void add(Node node); - /** * Remove a node from the network topology. This will be called when a * existing datanode is removed from the system. @@ -47,7 +46,6 @@ public InvalidTopologyException(String msg) { */ void remove(Node node); - /** * Check if the tree already contains node node. * @param node a node @@ -68,7 +66,6 @@ public InvalidTopologyException(String msg) { */ boolean isSameAncestor(Node node1, Node node2, int ancestorGen); - /** * Get the ancestor for node on generation ancestorGen. * @@ -160,26 +157,6 @@ public InvalidTopologyException(String msg) { Node chooseRandom(String scope, Collection excludedNodes, int ancestorGen); - - /** - * Randomly choose a leaf node. - * - * @param scope range from which a node will be chosen, cannot start with ~ - * @param excludedNodes nodes to be excluded - * @param excludedScopes excluded node ranges. Cannot start with ~ - * @param ancestorGen matters when excludeNodes is not null. It means the - * ancestor generation that's not allowed to share between chosen node and the - * excludedNodes. For example, if ancestorGen is 1, means chosen node - * cannot share the same parent with excludeNodes. If value is 2, cannot - * share the same grand parent, and so on. If ancestorGen is 0, then no - * effect. - * - * @return the chosen node - */ - Node chooseRandom(String scope, List excludedScopes, - Collection excludedNodes, int ancestorGen); - - /** * Randomly choose one node from scope, share the same generation * ancestor with affinityNode, and exclude nodes in diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java index 0d7abf7a2bb9d..0e9afd80292c0 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java @@ -20,6 +20,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; +import org.apache.commons.collections.CollectionUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -365,27 +366,6 @@ public Node chooseRandom(String scope, Collection excludedNodes, } } - /** - * Randomly choose a leaf node. - * - * @param scope range from which a node will be chosen, cannot start with ~ - * @param excludedNodes nodes to be excluded - * @param excludedScopes excluded node ranges. Cannot start with ~ - * @param ancestorGen matters when excludeNodes is not null. It means the - * ancestor generation that's not allowed to share between chosen node and the - * excludedNodes. For example, if ancestorGen is 1, means chosen node - * cannot share the same parent with excludeNodes. If value is 2, cannot - * share the same grand parent, and so on. If ancestorGen is 0, then no - * effect. - * - * @return the chosen node - */ - public Node chooseRandom(String scope, List excludedScopes, - Collection excludedNodes, int ancestorGen) { - return chooseRandom(scope, excludedScopes, excludedNodes, null, - ancestorGen); - } - /** * Randomly choose one leaf node from scope, share the same generation * ancestor with affinityNode, and exclude nodes in @@ -784,7 +764,7 @@ private void checkScope(String scope) { } private void checkExcludedScopes(List excludedScopes) { - if (excludedScopes != null && excludedScopes.size() > 0) { + if (!CollectionUtils.isEmpty(excludedScopes)) { excludedScopes.stream().forEach(scope -> { if (scope.startsWith(SCOPE_REVERSE_STR)) { throw new IllegalArgumentException("excludedScope " + scope + diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementMetrics.java index 869a65b7d5efc..fb709b146be22 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementMetrics.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementMetrics.java @@ -37,7 +37,7 @@ public class SCMContainerPlacementMetrics implements MetricsSource { public static final String SOURCE_NAME = SCMContainerPlacementMetrics.class.getSimpleName(); private static final MetricsInfo RECORD_INFO = Interns.info(SOURCE_NAME, - "SCM Placement Metrics"); + "SCM Container Placement Metrics"); private static MetricsRegistry registry; // total datanode allocation request count @@ -55,27 +55,23 @@ public SCMContainerPlacementMetrics() { public static SCMContainerPlacementMetrics create() { MetricsSystem ms = DefaultMetricsSystem.instance(); registry = new MetricsRegistry(RECORD_INFO); - return ms.register(SOURCE_NAME, "SCM Placement Metrics", + return ms.register(SOURCE_NAME, "SCM Container Placement Metrics", new SCMContainerPlacementMetrics()); } public void incrDatanodeRequestCount(long count) { - System.out.println("request + 1"); this.datanodeRequestCount.incr(count); } public void incrDatanodeChooseSuccessCount() { - System.out.println("success + 1"); this.datanodeChooseSuccessCount.incr(1); } public void incrDatanodeChooseFallbackCount() { - System.out.println("fallback + 1"); this.datanodeChooseFallbackCount.incr(1); } public void incrDatanodeChooseAttemptCount() { - System.out.println("attempt + 1"); this.datanodeChooseAttemptCount.incr(1); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java index 9a32114e47e6e..8eccf451c9891 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java @@ -242,22 +242,15 @@ private Node chooseNode(List excludedNodes, Node affinityNode, long sizeRequired) throws SCMException { int ancestorGen = RACK_LEVEL; int maxRetry = MAX_RETRY; -<<<<<<< HEAD - List excludedNodesForCapacity = null; + List excludedNodesForCapacity = null; boolean isFallbacked = false; while(true) { - Node node = networkTopology.chooseRandom(NetConstants.ROOT, null, - excludedNodes, affinityNode, ancestorGen); metrics.incrDatanodeChooseAttemptCount(); -======= - List excludedNodesForCapacity = null; - while(true) { Node node = networkTopology.chooseRandom(NetConstants.ROOT, excludedNodesForCapacity, excludedNodes, affinityNode, ancestorGen); ->>>>>>> HDDS-1879. Support multiple excluded scopes when choosing datanodes in NetworkTopology. if (node == null) { // cannot find the node which meets all constrains - LOG.warn("Failed to find the datanode. excludedNodes:" + + LOG.warn("Failed to find the datanode for container. excludedNodes:" + (excludedNodes == null ? "" : excludedNodes.toString()) + ", affinityNode:" + (affinityNode == null ? "" : affinityNode.getNetworkFullPath())); @@ -279,18 +272,12 @@ private Node chooseNode(List excludedNodes, Node affinityNode, " excludedNodes and affinityNode constrains.", null); } if (hasEnoughSpace((DatanodeDetails)node, sizeRequired)) { - LOG.warn("Datanode {} is chosen. Required size is {}", + LOG.debug("Datanode {} is chosen for container. Required size is {}", node.toString(), sizeRequired); -<<<<<<< HEAD - if (excludedNodes != null && excludedNodesForCapacity != null) { - excludedNodes.removeAll(excludedNodesForCapacity); - } metrics.incrDatanodeChooseSuccessCount(); if (isFallbacked) { metrics.incrDatanodeChooseFallbackCount(); } -======= ->>>>>>> HDDS-1879. Support multiple excluded scopes when choosing datanodes in NetworkTopology. return node; } else { maxRetry--; From 53360c6edc05097150dd29cf4eb7eb2d80769c76 Mon Sep 17 00:00:00 2001 From: Sammi Chen Date: Thu, 12 Sep 2019 10:56:33 +0800 Subject: [PATCH 6/6] fix checkstyle --- .../TestSCMContainerPlacementRackAware.java | 21 ++++++++++--------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java index 0c59f967d3bc1..2d8b81633e753 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java @@ -68,7 +68,7 @@ public class TestSCMContainerPlacementRackAware { // node storage capacity private static final long STORAGE_CAPACITY = 100L; private SCMContainerPlacementMetrics metrics; - private static final int nodePerRack = 5; + private static final int NODE_PER_RACK = 5; public TestSCMContainerPlacementRackAware(Integer count) { this.datanodeCount = count; @@ -95,7 +95,7 @@ public void setup() { for (int i = 0; i < datanodeCount; i++) { // Totally 3 racks, each has 5 datanodes DatanodeDetails node = TestUtils.createDatanodeDetails( - hostname + i, rack + (i / nodePerRack)); + hostname + i, rack + (i / NODE_PER_RACK)); datanodes.add(node); cluster.add(node); } @@ -146,14 +146,15 @@ public void chooseNodeWithNoExcludedNodes() throws SCMException { datanodeDetails = policy.chooseDatanodes(null, null, nodeNum, 15); Assert.assertEquals(nodeNum, datanodeDetails.size()); Assert.assertTrue(cluster.isSameParent(datanodeDetails.get(0), - datanodeDetails.get(1)) || (datanodeCount % nodePerRack == 1)); + datanodeDetails.get(1)) || (datanodeCount % NODE_PER_RACK == 1)); // 3 replicas nodeNum = 3; datanodeDetails = policy.chooseDatanodes(null, null, nodeNum, 15); Assert.assertEquals(nodeNum, datanodeDetails.size()); // requires at least 2 racks for following statement - assumeTrue(datanodeCount > nodePerRack && datanodeCount % nodePerRack > 1); + assumeTrue(datanodeCount > NODE_PER_RACK && + datanodeCount % NODE_PER_RACK > 1); Assert.assertTrue(cluster.isSameParent(datanodeDetails.get(0), datanodeDetails.get(1))); Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(0), @@ -166,7 +167,7 @@ public void chooseNodeWithNoExcludedNodes() throws SCMException { datanodeDetails = policy.chooseDatanodes(null, null, nodeNum, 15); Assert.assertEquals(nodeNum, datanodeDetails.size()); // requires at least 2 racks and enough datanodes for following statement - assumeTrue(datanodeCount > nodePerRack + 1); + assumeTrue(datanodeCount > NODE_PER_RACK + 1); Assert.assertTrue(cluster.isSameParent(datanodeDetails.get(0), datanodeDetails.get(1))); Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(0), @@ -179,7 +180,7 @@ public void chooseNodeWithNoExcludedNodes() throws SCMException { public void chooseNodeWithExcludedNodes() throws SCMException { // test choose new datanodes for under replicated pipeline // 3 replicas, two existing datanodes on same rack - assumeTrue(datanodeCount > nodePerRack); + assumeTrue(datanodeCount > NODE_PER_RACK); int nodeNum = 1; List excludedNodes = new ArrayList<>(); @@ -222,8 +223,8 @@ public void testFallback() throws SCMException { // 5 replicas. there are only 3 racks. policy with fallback should // allocate the 5th datanode though it will break the rack rule(first // 2 replicas on same rack, others on different racks). - assumeTrue(datanodeCount > nodePerRack * 2 && - (datanodeCount % nodePerRack > 1)); + assumeTrue(datanodeCount > NODE_PER_RACK * 2 && + (datanodeCount % NODE_PER_RACK > 1)); int nodeNum = 5; List datanodeDetails = policy.chooseDatanodes(null, null, nodeNum, 15); @@ -254,8 +255,8 @@ public void testFallback() throws SCMException { @Test public void testNoFallback() throws SCMException { - assumeTrue(datanodeCount > (nodePerRack * 2) && - (datanodeCount <= nodePerRack * 3)); + assumeTrue(datanodeCount > (NODE_PER_RACK * 2) && + (datanodeCount <= NODE_PER_RACK * 3)); // 5 replicas. there are only 3 racks. policy prohibit fallback should fail. int nodeNum = 5; try {