From 3debbb8f0acc48cb44f92d9381e2eea85312b7ab Mon Sep 17 00:00:00 2001 From: Sammi Chen Date: Tue, 10 Sep 2019 19:30:12 +0800 Subject: [PATCH] update per comments --- .../hadoop/hdds/scm/net/InnerNodeImpl.java | 2 +- .../apache/hadoop/hdds/scm/net/NetUtils.java | 10 ++++---- .../hadoop/hdds/scm/net/NetworkTopology.java | 23 ------------------ .../hdds/scm/net/NetworkTopologyImpl.java | 24 ++----------------- .../SCMContainerPlacementMetrics.java | 8 ++----- .../SCMContainerPlacementRackAware.java | 19 +++------------ 6 files changed, 13 insertions(+), 73 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java index 1858abff5e8da..f2183fc9823fe 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java @@ -301,7 +301,7 @@ public Node getLeaf(int leafIndex) { * * Input: * leafIndex = 2 - * excludedScope = /dc2 + * excludedScope = /dc2/rack2 * excludedNodes = {/dc1/rack1/n1} * ancestorGen = 1 * diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetUtils.java index f99f3a01b32b8..4019b1305f6a8 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetUtils.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdds.scm.net; +import org.apache.commons.collections.CollectionUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -76,9 +77,8 @@ public static int locationToDepth(String location) { public static void removeDuplicate(NetworkTopology topology, Collection mutableExcludedNodes, List mutableExcludedScopes, int ancestorGen) { - if (mutableExcludedNodes == null || mutableExcludedNodes.size() == 0 || - mutableExcludedScopes == null || mutableExcludedScopes.size() == 0 || - topology == null) { + if (CollectionUtils.isEmpty(mutableExcludedNodes) || + CollectionUtils.isEmpty(mutableExcludedScopes) || topology == null) { return; } @@ -114,7 +114,7 @@ public static void removeDuplicate(NetworkTopology topology, */ public static void removeOutscope(Collection mutableExcludedNodes, String scope) { - if (mutableExcludedNodes == null || scope == null) { + if (CollectionUtils.isEmpty(mutableExcludedNodes) || scope == null) { return; } synchronized (mutableExcludedNodes) { @@ -139,7 +139,7 @@ public static void removeOutscope(Collection mutableExcludedNodes, public static List getAncestorList(NetworkTopology topology, Collection nodes, int generation) { List ancestorList = new ArrayList<>(); - if (topology == null ||nodes == null || nodes.size() == 0 || + if (topology == null || CollectionUtils.isEmpty(nodes) || generation == 0) { return ancestorList; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopology.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopology.java index b663e7a5c1e32..3a2c7c0f1a5ce 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopology.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopology.java @@ -39,7 +39,6 @@ public InvalidTopologyException(String msg) { */ void add(Node node); - /** * Remove a node from the network topology. This will be called when a * existing datanode is removed from the system. @@ -47,7 +46,6 @@ public InvalidTopologyException(String msg) { */ void remove(Node node); - /** * Check if the tree already contains node node. * @param node a node @@ -68,7 +66,6 @@ public InvalidTopologyException(String msg) { */ boolean isSameAncestor(Node node1, Node node2, int ancestorGen); - /** * Get the ancestor for node on generation ancestorGen. * @@ -160,26 +157,6 @@ public InvalidTopologyException(String msg) { Node chooseRandom(String scope, Collection excludedNodes, int ancestorGen); - - /** - * Randomly choose a leaf node. - * - * @param scope range from which a node will be chosen, cannot start with ~ - * @param excludedNodes nodes to be excluded - * @param excludedScopes excluded node ranges. Cannot start with ~ - * @param ancestorGen matters when excludeNodes is not null. It means the - * ancestor generation that's not allowed to share between chosen node and the - * excludedNodes. For example, if ancestorGen is 1, means chosen node - * cannot share the same parent with excludeNodes. If value is 2, cannot - * share the same grand parent, and so on. If ancestorGen is 0, then no - * effect. - * - * @return the chosen node - */ - Node chooseRandom(String scope, List excludedScopes, - Collection excludedNodes, int ancestorGen); - - /** * Randomly choose one node from scope, share the same generation * ancestor with affinityNode, and exclude nodes in diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java index 0d7abf7a2bb9d..0e9afd80292c0 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java @@ -20,6 +20,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; +import org.apache.commons.collections.CollectionUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -365,27 +366,6 @@ public Node chooseRandom(String scope, Collection excludedNodes, } } - /** - * Randomly choose a leaf node. - * - * @param scope range from which a node will be chosen, cannot start with ~ - * @param excludedNodes nodes to be excluded - * @param excludedScopes excluded node ranges. Cannot start with ~ - * @param ancestorGen matters when excludeNodes is not null. It means the - * ancestor generation that's not allowed to share between chosen node and the - * excludedNodes. For example, if ancestorGen is 1, means chosen node - * cannot share the same parent with excludeNodes. If value is 2, cannot - * share the same grand parent, and so on. If ancestorGen is 0, then no - * effect. - * - * @return the chosen node - */ - public Node chooseRandom(String scope, List excludedScopes, - Collection excludedNodes, int ancestorGen) { - return chooseRandom(scope, excludedScopes, excludedNodes, null, - ancestorGen); - } - /** * Randomly choose one leaf node from scope, share the same generation * ancestor with affinityNode, and exclude nodes in @@ -784,7 +764,7 @@ private void checkScope(String scope) { } private void checkExcludedScopes(List excludedScopes) { - if (excludedScopes != null && excludedScopes.size() > 0) { + if (!CollectionUtils.isEmpty(excludedScopes)) { excludedScopes.stream().forEach(scope -> { if (scope.startsWith(SCOPE_REVERSE_STR)) { throw new IllegalArgumentException("excludedScope " + scope + diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementMetrics.java index 869a65b7d5efc..fb709b146be22 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementMetrics.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementMetrics.java @@ -37,7 +37,7 @@ public class SCMContainerPlacementMetrics implements MetricsSource { public static final String SOURCE_NAME = SCMContainerPlacementMetrics.class.getSimpleName(); private static final MetricsInfo RECORD_INFO = Interns.info(SOURCE_NAME, - "SCM Placement Metrics"); + "SCM Container Placement Metrics"); private static MetricsRegistry registry; // total datanode allocation request count @@ -55,27 +55,23 @@ public SCMContainerPlacementMetrics() { public static SCMContainerPlacementMetrics create() { MetricsSystem ms = DefaultMetricsSystem.instance(); registry = new MetricsRegistry(RECORD_INFO); - return ms.register(SOURCE_NAME, "SCM Placement Metrics", + return ms.register(SOURCE_NAME, "SCM Container Placement Metrics", new SCMContainerPlacementMetrics()); } public void incrDatanodeRequestCount(long count) { - System.out.println("request + 1"); this.datanodeRequestCount.incr(count); } public void incrDatanodeChooseSuccessCount() { - System.out.println("success + 1"); this.datanodeChooseSuccessCount.incr(1); } public void incrDatanodeChooseFallbackCount() { - System.out.println("fallback + 1"); this.datanodeChooseFallbackCount.incr(1); } public void incrDatanodeChooseAttemptCount() { - System.out.println("attempt + 1"); this.datanodeChooseAttemptCount.incr(1); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java index 9a32114e47e6e..8eccf451c9891 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java @@ -242,22 +242,15 @@ private Node chooseNode(List excludedNodes, Node affinityNode, long sizeRequired) throws SCMException { int ancestorGen = RACK_LEVEL; int maxRetry = MAX_RETRY; -<<<<<<< HEAD - List excludedNodesForCapacity = null; + List excludedNodesForCapacity = null; boolean isFallbacked = false; while(true) { - Node node = networkTopology.chooseRandom(NetConstants.ROOT, null, - excludedNodes, affinityNode, ancestorGen); metrics.incrDatanodeChooseAttemptCount(); -======= - List excludedNodesForCapacity = null; - while(true) { Node node = networkTopology.chooseRandom(NetConstants.ROOT, excludedNodesForCapacity, excludedNodes, affinityNode, ancestorGen); ->>>>>>> HDDS-1879. Support multiple excluded scopes when choosing datanodes in NetworkTopology. if (node == null) { // cannot find the node which meets all constrains - LOG.warn("Failed to find the datanode. excludedNodes:" + + LOG.warn("Failed to find the datanode for container. excludedNodes:" + (excludedNodes == null ? "" : excludedNodes.toString()) + ", affinityNode:" + (affinityNode == null ? "" : affinityNode.getNetworkFullPath())); @@ -279,18 +272,12 @@ private Node chooseNode(List excludedNodes, Node affinityNode, " excludedNodes and affinityNode constrains.", null); } if (hasEnoughSpace((DatanodeDetails)node, sizeRequired)) { - LOG.warn("Datanode {} is chosen. Required size is {}", + LOG.debug("Datanode {} is chosen for container. Required size is {}", node.toString(), sizeRequired); -<<<<<<< HEAD - if (excludedNodes != null && excludedNodesForCapacity != null) { - excludedNodes.removeAll(excludedNodesForCapacity); - } metrics.incrDatanodeChooseSuccessCount(); if (isFallbacked) { metrics.incrDatanodeChooseFallbackCount(); } -======= ->>>>>>> HDDS-1879. Support multiple excluded scopes when choosing datanodes in NetworkTopology. return node; } else { maxRetry--;