From 024bd70b97d549a4336b479b47c23c7f5ba77857 Mon Sep 17 00:00:00 2001 From: Alyssa Wilk Date: Wed, 20 Dec 2017 10:46:25 -0500 Subject: [PATCH 1/3] Restructing the base LB class to for impending weighted failover. Signed-off-by: Alyssa Wilk --- source/common/upstream/load_balancer_impl.cc | 69 +++++++++++-------- source/common/upstream/load_balancer_impl.h | 14 ++-- source/common/upstream/ring_hash_lb.cc | 10 +-- .../upstream/load_balancer_impl_test.cc | 56 +++++++++++++++ 4 files changed, 111 insertions(+), 38 deletions(-) diff --git a/source/common/upstream/load_balancer_impl.cc b/source/common/upstream/load_balancer_impl.cc index e4708facc761..8786f7592242 100644 --- a/source/common/upstream/load_balancer_impl.cc +++ b/source/common/upstream/load_balancer_impl.cc @@ -40,10 +40,17 @@ LoadBalancerBase::LoadBalancerBase(const PrioritySet& priority_set, ClusterStats Runtime::Loader& runtime, Runtime::RandomGenerator& random) : stats_(stats), runtime_(runtime), random_(random), priority_set_(priority_set), best_available_host_set_(bestAvailable(&priority_set)) { + per_priority_load_.resize(priority_set.hostSetsPerPriority().size()); + per_priority_load_[best_available_host_set_->priority()] = 100; priority_set_.addMemberUpdateCb([this](uint32_t, const std::vector&, const std::vector&) -> void { + per_priority_load_.resize(priority_set_.hostSetsPerPriority().size()); + per_priority_load_[best_available_host_set_->priority()] = 0; // Update the host set to use for picking, based on the new state. best_available_host_set_ = bestAvailable(&priority_set_); + // With current picking logic, the best available host set gets 100% of + // traffic and all others get 0% + per_priority_load_[best_available_host_set_->priority()] = 100; }); } // namespace Upstream @@ -58,8 +65,6 @@ ZoneAwareLoadBalancerBase::ZoneAwareLoadBalancerBase(const PrioritySet& priority resizePerPriorityState(); priority_set_.addMemberUpdateCb([this](uint32_t priority, const std::vector&, const std::vector&) -> void { - // Update the host set to use for picking, based on the new state. - best_available_host_set_ = bestAvailable(&priority_set_); // Make sure per_priority_state_ is as large as priority_set_.hostSetsPerPriority() resizePerPriorityState(); // If there's a local priority set, regenerate all routing based on a potential size change to @@ -79,9 +84,11 @@ ZoneAwareLoadBalancerBase::ZoneAwareLoadBalancerBase(const PrioritySet& priority const std::vector&) -> void { ASSERT(priority == 0); UNREFERENCED_PARAMETER(priority); - // If the set of local Envoys changes, regenerate routing based on potential changes to - // the set of servers routing to priority_set_. - regenerateLocalityRoutingStructures(bestAvailablePriority()); + // If the set of local Envoys changes, regenerate routing for all priority levels based on + // potential changes to the set of servers routing to priority_set_. + for (size_t i = 0; i < priority_set_.hostSetsPerPriority().size(); ++i) { + regenerateLocalityRoutingStructures(i); + } }); } } @@ -95,8 +102,7 @@ ZoneAwareLoadBalancerBase::~ZoneAwareLoadBalancerBase() { void ZoneAwareLoadBalancerBase::regenerateLocalityRoutingStructures(uint32_t priority) { ASSERT(local_priority_set_); stats_.lb_recalculate_zone_structures_.inc(); - // We are updating based on a change for a priority level in priority_set_, or the latched - // bestAvailablePriority() which is a latched priority for priority_set_. + // We are updating based on a change for a priority level in priority_set_. ASSERT(priority < priority_set_.hostSetsPerPriority().size()); // resizePerPriorityState should ensure these stay in sync. ASSERT(per_priority_state_.size() == priority_set_.hostSetsPerPriority().size()); @@ -112,9 +118,17 @@ void ZoneAwareLoadBalancerBase::regenerateLocalityRoutingStructures(uint32_t pri size_t num_localities = host_set.healthyHostsPerLocality().size(); ASSERT(num_localities > 0); + // It is worth noting that all of the percentages calculated are orthogonal from + // how much load this priority level receives, percentageLoad(priority). + // + // If the host sets are such that 20% of load is handled locally and 80% is residual, and then + // half the hosts in all host sets go unhealthy, this priority set will + // still send half of the incoming load to the local locality and 80% to residual. + // + // Basically, fariness across localities within a priority is guaranteed. Fairness across + // localities across priorities is not. uint64_t local_percentage[num_localities]; calculateLocalityPercentage(localHostSet().healthyHostsPerLocality(), local_percentage); - uint64_t upstream_percentage[num_localities]; calculateLocalityPercentage(host_set.healthyHostsPerLocality(), upstream_percentage); @@ -231,19 +245,20 @@ void ZoneAwareLoadBalancerBase::calculateLocalityPercentage( } } -const std::vector& ZoneAwareLoadBalancerBase::tryChooseLocalLocalityHosts() { - PerPriorityState& state = *per_priority_state_[bestAvailablePriority()]; +const std::vector& +ZoneAwareLoadBalancerBase::tryChooseLocalLocalityHosts(const HostSet& host_set) { + PerPriorityState& state = *per_priority_state_[host_set.priority()]; ASSERT(state.locality_routing_state_ != LocalityRoutingState::NoLocalityRouting); // At this point it's guaranteed to be at least 2 localities. - size_t number_of_localities = best_available_host_set_->healthyHostsPerLocality().size(); + size_t number_of_localities = host_set.healthyHostsPerLocality().size(); ASSERT(number_of_localities >= 2U); // Try to push all of the requests to the same locality first. if (state.locality_routing_state_ == LocalityRoutingState::LocalityDirect) { stats_.lb_zone_routing_all_directly_.inc(); - return best_available_host_set_->healthyHostsPerLocality()[0]; + return host_set.healthyHostsPerLocality()[0]; } ASSERT(state.locality_routing_state_ == LocalityRoutingState::LocalityResidual); @@ -252,7 +267,7 @@ const std::vector& ZoneAwareLoadBalancerBase::tryChooseLocalLocal // push to the local locality, check if we can push to local locality on current iteration. if (random_.random() % 10000 < state.local_percent_to_route_) { stats_.lb_zone_routing_sampled_.inc(); - return best_available_host_set_->healthyHostsPerLocality()[0]; + return host_set.healthyHostsPerLocality()[0]; } // At this point we must route cross locality as we cannot route to the local locality. @@ -262,8 +277,7 @@ const std::vector& ZoneAwareLoadBalancerBase::tryChooseLocalLocal // locality percentages. In this case just select random locality. if (state.residual_capacity_[number_of_localities - 1] == 0) { stats_.lb_zone_no_capacity_left_.inc(); - return best_available_host_set_ - ->healthyHostsPerLocality()[random_.random() % number_of_localities]; + return host_set.healthyHostsPerLocality()[random_.random() % number_of_localities]; } // Random sampling to select specific locality for cross locality traffic based on the additional @@ -277,39 +291,38 @@ const std::vector& ZoneAwareLoadBalancerBase::tryChooseLocalLocal i++; } - return best_available_host_set_->healthyHostsPerLocality()[i]; + return host_set.healthyHostsPerLocality()[i]; } const std::vector& ZoneAwareLoadBalancerBase::hostsToUse() { - ASSERT(best_available_host_set_->healthyHosts().size() <= - best_available_host_set_->hosts().size()); + const HostSet& host_set = chooseHostSet(); - // If the best available priority has insufficient healthy hosts, return all hosts. - if (isGlobalPanic(*best_available_host_set_, runtime_)) { + // If the selected host set has insufficient healthy hosts, return all hosts. + if (isGlobalPanic(host_set, runtime_)) { stats_.lb_healthy_panic_.inc(); - return best_available_host_set_->hosts(); + return host_set.hosts(); } - // If we've latched that we can't do priority-based routing, return healthy - // hosts for the best available priority. - if (per_priority_state_[bestAvailablePriority()]->locality_routing_state_ == + // If we've latched that we can't do priority-based routing, return healthy hosts for the selected + // host set. + if (per_priority_state_[host_set.priority()]->locality_routing_state_ == LocalityRoutingState::NoLocalityRouting) { - return best_available_host_set_->healthyHosts(); + return host_set.healthyHosts(); } // Determine if the load balancer should do zone based routing for this pick. if (!runtime_.snapshot().featureEnabled(RuntimeZoneEnabled, 100)) { - return best_available_host_set_->healthyHosts(); + return host_set.healthyHosts(); } if (isGlobalPanic(localHostSet(), runtime_)) { stats_.lb_local_cluster_not_ok_.inc(); // If the local Envoy instances are in global panic, do not do locality // based routing. - return best_available_host_set_->healthyHosts(); + return host_set.healthyHosts(); } - return tryChooseLocalLocalityHosts(); + return tryChooseLocalLocalityHosts(host_set); } HostConstSharedPtr RoundRobinLoadBalancer::chooseHost(LoadBalancerContext*) { diff --git a/source/common/upstream/load_balancer_impl.h b/source/common/upstream/load_balancer_impl.h index 3b135fc18f83..fc9806e42086 100644 --- a/source/common/upstream/load_balancer_impl.h +++ b/source/common/upstream/load_balancer_impl.h @@ -28,18 +28,24 @@ class LoadBalancerBase { LoadBalancerBase(const PrioritySet& priority_set, ClusterStats& stats, Runtime::Loader& runtime, Runtime::RandomGenerator& random); - uint32_t bestAvailablePriority() const { return best_available_host_set_->priority(); } + const HostSet& chooseHostSet() { return *best_available_host_set_; } + + uint32_t percentageLoad(uint32_t priority) const { return per_priority_load_[priority]; } ClusterStats& stats_; Runtime::Loader& runtime_; Runtime::RandomGenerator& random_; // The priority-ordered set of hosts to use for load balancing. const PrioritySet& priority_set_; + +private: // The lowest priority host set from priority_set_ with healthy hosts, or the // zero-priority host set if all host sets are fully unhealthy. // This is updated as the hosts and healthy hosts in priority_set_ are updated // but will never be null. const HostSet* best_available_host_set_; + // The percentage load (0-100) for each priority level + std::vector per_priority_load_; }; /** @@ -82,8 +88,9 @@ class ZoneAwareLoadBalancerBase : public LoadBalancerBase { /** * Try to select upstream hosts from the same locality. + * @param host_set the last host set returned by chooseHostSet() */ - const std::vector& tryChooseLocalLocalityHosts(); + const std::vector& tryChooseLocalLocalityHosts(const HostSet& host_set); /** * @return (number of hosts in a given locality)/(total number of hosts) in ret param. @@ -116,9 +123,6 @@ class ZoneAwareLoadBalancerBase : public LoadBalancerBase { }; typedef std::unique_ptr PerPriorityStatePtr; // Routing state broken out for each priority level in priority_set_. - // With the current implementation we could save some CPU and memory by only - // tracking this for best_available_host_set_ but as we support gentle - // failover it's useful to precompute it for all priority levels. std::vector per_priority_state_; Common::CallbackHandle* local_priority_set_member_update_cb_handle_{}; }; diff --git a/source/common/upstream/ring_hash_lb.cc b/source/common/upstream/ring_hash_lb.cc index 3daa30807f31..b1930e3ff1f1 100644 --- a/source/common/upstream/ring_hash_lb.cc +++ b/source/common/upstream/ring_hash_lb.cc @@ -25,13 +25,13 @@ RingHashLoadBalancer::RingHashLoadBalancer( } HostConstSharedPtr RingHashLoadBalancer::chooseHost(LoadBalancerContext* context) { - if (isGlobalPanic(*best_available_host_set_, runtime_)) { + const HostSet& host_set = chooseHostSet(); + if (isGlobalPanic(host_set, runtime_)) { stats_.lb_healthy_panic_.inc(); - return per_priority_state_[bestAvailablePriority()]->all_hosts_ring_.chooseHost(context, - random_); + return per_priority_state_[host_set.priority()]->all_hosts_ring_.chooseHost(context, random_); } else { - return per_priority_state_[bestAvailablePriority()]->healthy_hosts_ring_.chooseHost(context, - random_); + return per_priority_state_[host_set.priority()]->healthy_hosts_ring_.chooseHost(context, + random_); } } diff --git a/test/common/upstream/load_balancer_impl_test.cc b/test/common/upstream/load_balancer_impl_test.cc index 381608527e9e..dd49fd11628e 100644 --- a/test/common/upstream/load_balancer_impl_test.cc +++ b/test/common/upstream/load_balancer_impl_test.cc @@ -39,6 +39,62 @@ class LoadBalancerTestBase : public ::testing::TestWithParam { std::shared_ptr info_{new NiceMock()}; }; +class TestLb : public LoadBalancerBase { +public: + TestLb(const PrioritySet& priority_set, ClusterStats& stats, Runtime::Loader& runtime, + Runtime::RandomGenerator& random) + : LoadBalancerBase(priority_set, stats, runtime, random) {} + using LoadBalancerBase::chooseHostSet; + using LoadBalancerBase::percentageLoad; +}; + +class LoadBalancerBaseTest : public LoadBalancerTestBase { +public: + TestLb lb_{priority_set_, stats_, runtime_, random_}; +}; + +INSTANTIATE_TEST_CASE_P(PrimaryOrFailover, LoadBalancerBaseTest, ::testing::Values(true)); + +// Basic test of host set selection. +TEST_P(LoadBalancerBaseTest, PrioritySelecton) { + host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:80")}; + failover_host_set_.hosts_ = {makeTestHost(info_, "tcp://127.0.0.1:81")}; + host_set_.runCallbacks({}, {}); + + // With both the primary and failover hosts unhealthy, we should select an + // unhealthy primary host. + EXPECT_EQ(100, lb_.percentageLoad(0)); + EXPECT_EQ(0, lb_.percentageLoad(1)); + EXPECT_EQ(&host_set_, &lb_.chooseHostSet()); + + // Update the priority set with a new priority level P=2 and ensure the host + // is chosen + MockHostSet& tertiary_host_set_ = *priority_set_.getMockHostSet(2); + HostVectorSharedPtr hosts( + new std::vector({makeTestHost(info_, "tcp://127.0.0.1:82")})); + tertiary_host_set_.hosts_ = *hosts; + tertiary_host_set_.healthy_hosts_ = tertiary_host_set_.hosts_; + tertiary_host_set_.runCallbacks({}, {}); + EXPECT_EQ(0, lb_.percentageLoad(0)); + EXPECT_EQ(0, lb_.percentageLoad(1)); + EXPECT_EQ(100, lb_.percentageLoad(2)); + EXPECT_EQ(&tertiary_host_set_, &lb_.chooseHostSet()); + + // Now add a healthy host in P=0 and make sure it is immediately selected. + host_set_.healthy_hosts_ = host_set_.hosts_; + tertiary_host_set_.runCallbacks({}, {}); + EXPECT_EQ(100, lb_.percentageLoad(0)); + EXPECT_EQ(0, lb_.percentageLoad(2)); + EXPECT_EQ(&host_set_, &lb_.chooseHostSet()); + + // Remove the healthy host and ensure we fail back over to tertiary_host_set_ + host_set_.healthy_hosts_ = {}; + host_set_.runCallbacks({}, {}); + EXPECT_EQ(0, lb_.percentageLoad(0)); + EXPECT_EQ(100, lb_.percentageLoad(2)); + EXPECT_EQ(&tertiary_host_set_, &lb_.chooseHostSet()); +} + class RoundRobinLoadBalancerTest : public LoadBalancerTestBase { public: void init(bool need_local_cluster) { From af8b8bc21b7c297d925188e92a8b54dc35d66a29 Mon Sep 17 00:00:00 2001 From: Alyssa Wilk Date: Thu, 21 Dec 2017 20:36:35 -0500 Subject: [PATCH 2/3] Disabling zone aware routing for P>0 until someone wants it Signed-off-by: Alyssa Wilk --- source/common/upstream/load_balancer_impl.cc | 3 +- .../upstream/load_balancer_impl_test.cc | 28 +++++++++++++++++-- 2 files changed, 27 insertions(+), 4 deletions(-) diff --git a/source/common/upstream/load_balancer_impl.cc b/source/common/upstream/load_balancer_impl.cc index 8786f7592242..224a527ebd50 100644 --- a/source/common/upstream/load_balancer_impl.cc +++ b/source/common/upstream/load_balancer_impl.cc @@ -187,7 +187,8 @@ void ZoneAwareLoadBalancerBase::resizePerPriorityState() { } bool ZoneAwareLoadBalancerBase::earlyExitNonLocalityRouting(uint32_t priority) { - if (priority_set_.hostSetsPerPriority().size() < priority + 1) { + // Locality routing not supported for multiple priorities. + if (priority > 0) { return true; } diff --git a/test/common/upstream/load_balancer_impl_test.cc b/test/common/upstream/load_balancer_impl_test.cc index dd49fd11628e..e41a97c5c134 100644 --- a/test/common/upstream/load_balancer_impl_test.cc +++ b/test/common/upstream/load_balancer_impl_test.cc @@ -287,9 +287,13 @@ TEST_P(RoundRobinLoadBalancerTest, ZoneAwareSmallCluster) { EXPECT_EQ(hostSet().healthy_hosts_[1], lb_->chooseHost(nullptr)); EXPECT_EQ(hostSet().healthy_hosts_[2], lb_->chooseHost(nullptr)); - // Cluster size is computed once at zone aware struct regeneration point. - EXPECT_EQ(1U, stats_.lb_zone_cluster_too_small_.value()); - + if (&hostSet() == &host_set_) { + // Cluster size is computed once at zone aware struct regeneration point. + EXPECT_EQ(1U, stats_.lb_zone_cluster_too_small_.value()); + } else { + EXPECT_EQ(0U, stats_.lb_zone_cluster_too_small_.value()); + return; + } EXPECT_CALL(runtime_.snapshot_, getInteger("upstream.zone_routing.min_cluster_size", 6)) .WillRepeatedly(Return(1)); // Trigger reload. @@ -299,6 +303,9 @@ TEST_P(RoundRobinLoadBalancerTest, ZoneAwareSmallCluster) { } TEST_P(RoundRobinLoadBalancerTest, NoZoneAwareDifferentZoneSize) { + if (&hostSet() == &failover_host_set_) { // P = 1 does not support zone-aware routing. + return; + } HostVectorSharedPtr hosts(new std::vector( {makeTestHost(info_, "tcp://127.0.0.1:80"), makeTestHost(info_, "tcp://127.0.0.1:81"), makeTestHost(info_, "tcp://127.0.0.1:82")})); @@ -326,6 +333,9 @@ TEST_P(RoundRobinLoadBalancerTest, NoZoneAwareDifferentZoneSize) { } TEST_P(RoundRobinLoadBalancerTest, ZoneAwareRoutingLargeZoneSwitchOnOff) { + if (&hostSet() == &failover_host_set_) { // P = 1 does not support zone-aware routing. + return; + } HostVectorSharedPtr hosts(new std::vector( {makeTestHost(info_, "tcp://127.0.0.1:80"), makeTestHost(info_, "tcp://127.0.0.1:81"), makeTestHost(info_, "tcp://127.0.0.1:82")})); @@ -361,6 +371,9 @@ TEST_P(RoundRobinLoadBalancerTest, ZoneAwareRoutingLargeZoneSwitchOnOff) { } TEST_P(RoundRobinLoadBalancerTest, ZoneAwareRoutingSmallZone) { + if (&hostSet() == &failover_host_set_) { // P = 1 does not support zone-aware routing. + return; + } HostVectorSharedPtr upstream_hosts(new std::vector( {makeTestHost(info_, "tcp://127.0.0.1:80"), makeTestHost(info_, "tcp://127.0.0.1:81"), makeTestHost(info_, "tcp://127.0.0.1:82"), makeTestHost(info_, "tcp://127.0.0.1:83"), @@ -405,6 +418,9 @@ TEST_P(RoundRobinLoadBalancerTest, ZoneAwareRoutingSmallZone) { } TEST_P(RoundRobinLoadBalancerTest, LowPrecisionForDistribution) { + if (&hostSet() == &failover_host_set_) { // P = 1 does not support zone-aware routing. + return; + } // upstream_hosts and local_hosts do not matter, zone aware routing is based on per zone hosts. HostVectorSharedPtr upstream_hosts( new std::vector({makeTestHost(info_, "tcp://127.0.0.1:80")})); @@ -467,6 +483,9 @@ TEST_P(RoundRobinLoadBalancerTest, LowPrecisionForDistribution) { } TEST_P(RoundRobinLoadBalancerTest, NoZoneAwareRoutingOneZone) { + if (&hostSet() == &failover_host_set_) { // P = 1 does not support zone-aware routing. + return; + } HostVectorSharedPtr hosts( new std::vector({makeTestHost(info_, "tcp://127.0.0.1:80")})); HostListsSharedPtr hosts_per_locality( @@ -501,6 +520,9 @@ TEST_P(RoundRobinLoadBalancerTest, NoZoneAwareRoutingNotHealthy) { } TEST_P(RoundRobinLoadBalancerTest, NoZoneAwareRoutingLocalEmpty) { + if (&hostSet() == &failover_host_set_) { // P = 1 does not support zone-aware routing. + return; + } HostVectorSharedPtr upstream_hosts(new std::vector( {makeTestHost(info_, "tcp://127.0.0.1:80"), makeTestHost(info_, "tcp://127.0.0.1:81")})); HostVectorSharedPtr local_hosts(new std::vector({}, {})); From 8d4bd3eba50ba8ba9bf935712485df7c5d3b0c42 Mon Sep 17 00:00:00 2001 From: Alyssa Wilk Date: Fri, 22 Dec 2017 14:17:04 -0500 Subject: [PATCH 3/3] Removing more priorities from zone aware routing Signed-off-by: Alyssa Wilk --- source/common/upstream/load_balancer_impl.cc | 37 +++++++++----------- source/common/upstream/load_balancer_impl.h | 4 +-- 2 files changed, 18 insertions(+), 23 deletions(-) diff --git a/source/common/upstream/load_balancer_impl.cc b/source/common/upstream/load_balancer_impl.cc index cb6620484b6f..c14695231142 100644 --- a/source/common/upstream/load_balancer_impl.cc +++ b/source/common/upstream/load_balancer_impl.cc @@ -67,10 +67,10 @@ ZoneAwareLoadBalancerBase::ZoneAwareLoadBalancerBase(const PrioritySet& priority const std::vector&) -> void { // Make sure per_priority_state_ is as large as priority_set_.hostSetsPerPriority() resizePerPriorityState(); - // If there's a local priority set, regenerate all routing based on a potential size change to - // the hosts routed to. - if (local_priority_set_) { - regenerateLocalityRoutingStructures(priority); + // If P=0 changes, regenerate locality routing structures. Locality based routing is disabled + // at all other levels. + if (local_priority_set_ && priority == 0) { + regenerateLocalityRoutingStructures(); } }); if (local_priority_set_) { @@ -84,11 +84,9 @@ ZoneAwareLoadBalancerBase::ZoneAwareLoadBalancerBase(const PrioritySet& priority const std::vector&) -> void { ASSERT(priority == 0); UNREFERENCED_PARAMETER(priority); - // If the set of local Envoys changes, regenerate routing for all priority levels based on - // potential changes to the set of servers routing to priority_set_. - for (size_t i = 0; i < priority_set_.hostSetsPerPriority().size(); ++i) { - regenerateLocalityRoutingStructures(i); - } + // If the set of local Envoys changes, regenerate routing for P=0 as it does priority + // based routing. + regenerateLocalityRoutingStructures(); }); } } @@ -99,18 +97,18 @@ ZoneAwareLoadBalancerBase::~ZoneAwareLoadBalancerBase() { } } -void ZoneAwareLoadBalancerBase::regenerateLocalityRoutingStructures(uint32_t priority) { +void ZoneAwareLoadBalancerBase::regenerateLocalityRoutingStructures() { ASSERT(local_priority_set_); stats_.lb_recalculate_zone_structures_.inc(); - // We are updating based on a change for a priority level in priority_set_. - ASSERT(priority < priority_set_.hostSetsPerPriority().size()); // resizePerPriorityState should ensure these stay in sync. ASSERT(per_priority_state_.size() == priority_set_.hostSetsPerPriority().size()); + // We only do locality routing for P=0 + uint32_t priority = 0; + PerPriorityState& state = *per_priority_state_[priority]; // Do not perform any calculations if we cannot perform locality routing based on non runtime // params. - PerPriorityState& state = *per_priority_state_[priority]; - if (earlyExitNonLocalityRouting(priority)) { + if (earlyExitNonLocalityRouting()) { state.locality_routing_state_ = LocalityRoutingState::NoLocalityRouting; return; } @@ -182,17 +180,14 @@ void ZoneAwareLoadBalancerBase::regenerateLocalityRoutingStructures(uint32_t pri void ZoneAwareLoadBalancerBase::resizePerPriorityState() { const uint32_t size = priority_set_.hostSetsPerPriority().size(); while (per_priority_state_.size() < size) { + // Note for P!=0, PerPriorityState is created with NoLocalityRouting and never changed. per_priority_state_.push_back(PerPriorityStatePtr{new PerPriorityState}); } } -bool ZoneAwareLoadBalancerBase::earlyExitNonLocalityRouting(uint32_t priority) { - // Locality routing not supported for multiple priorities. - if (priority > 0) { - return true; - } - - HostSet& host_set = *priority_set_.hostSetsPerPriority()[priority]; +bool ZoneAwareLoadBalancerBase::earlyExitNonLocalityRouting() { + // We only do locality routing for P=0. + HostSet& host_set = *priority_set_.hostSetsPerPriority()[0]; if (host_set.healthyHostsPerLocality().size() < 2) { return true; } diff --git a/source/common/upstream/load_balancer_impl.h b/source/common/upstream/load_balancer_impl.h index fc9806e42086..985ae40c72cb 100644 --- a/source/common/upstream/load_balancer_impl.h +++ b/source/common/upstream/load_balancer_impl.h @@ -84,7 +84,7 @@ class ZoneAwareLoadBalancerBase : public LoadBalancerBase { * @return decision on quick exit from locality aware routing based on cluster configuration. * This gets recalculated on update callback. */ - bool earlyExitNonLocalityRouting(uint32_t priority); + bool earlyExitNonLocalityRouting(); /** * Try to select upstream hosts from the same locality. @@ -104,7 +104,7 @@ class ZoneAwareLoadBalancerBase : public LoadBalancerBase { /** * Regenerate locality aware routing structures for fast decisions on upstream locality selection. */ - void regenerateLocalityRoutingStructures(uint32_t priority); + void regenerateLocalityRoutingStructures(); HostSet& localHostSet() const { return *local_priority_set_->hostSetsPerPriority()[0]; }