From 024656bc74c22afe9fe7ab8fed8bee849e47f013 Mon Sep 17 00:00:00 2001 From: lhy1024 Date: Fri, 26 Jul 2024 14:42:06 +0800 Subject: [PATCH] scheduler: add more hot scheduler comments and replace negative rank (#8345) ref tikv/pd#5691 Signed-off-by: lhy1024 Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com> --- pkg/schedule/schedulers/hot_region.go | 11 +-- pkg/schedule/schedulers/hot_region_config.go | 1 - pkg/schedule/schedulers/hot_region_rank_v1.go | 68 ++++++++++-------- pkg/schedule/schedulers/hot_region_rank_v2.go | 72 +++++++++---------- .../schedulers/hot_region_rank_v2_test.go | 10 +-- pkg/statistics/store_hot_peers_infos.go | 52 ++++++-------- 6 files changed, 109 insertions(+), 105 deletions(-) diff --git a/pkg/schedule/schedulers/hot_region.go b/pkg/schedule/schedulers/hot_region.go index 6f671eac72e..f79d8fac760 100644 --- a/pkg/schedule/schedulers/hot_region.go +++ b/pkg/schedule/schedulers/hot_region.go @@ -50,7 +50,7 @@ const ( HotRegionType = "hot-region" splitHotReadBuckets = "split-hot-read-region" splitHotWriteBuckets = "split-hot-write-region" - splitProgressiveRank = int64(-5) + splitProgressiveRank = 5 minHotScheduleInterval = time.Second maxHotScheduleInterval = 20 * time.Second defaultPendingAmpFactor = 2.0 @@ -127,6 +127,7 @@ func (h *baseHotScheduler) prepareForBalance(typ resourceType, cluster sche.Sche switch typ { case readLeader, readPeer: // update read statistics + // avoid to update read statistics frequently if time.Since(h.updateReadTime) >= statisticsInterval { regionRead := cluster.RegionReadStats() prepare(regionRead, utils.Read, constant.LeaderKind) @@ -135,6 +136,7 @@ func (h *baseHotScheduler) prepareForBalance(typ resourceType, cluster sche.Sche } case writeLeader, writePeer: // update write statistics + // avoid to update write statistics frequently if time.Since(h.updateWriteTime) >= statisticsInterval { regionWrite := cluster.RegionWriteStats() prepare(regionWrite, utils.Write, constant.LeaderKind) @@ -408,10 +410,10 @@ type solution struct { cachedPeersRate []float64 // progressiveRank measures the contribution for balance. - // The smaller the rank, the better this solution is. - // If progressiveRank <= 0, this solution makes thing better. + // The bigger the rank, the better this solution is. + // If progressiveRank >= 0, this solution makes thing better. // 0 indicates that this is a solution that cannot be used directly, but can be optimized. - // 1 indicates that this is a non-optimizable solution. + // -1 indicates that this is a non-optimizable solution. // See `calcProgressiveRank` for more about progressive rank. progressiveRank int64 // only for rank v2 @@ -483,6 +485,7 @@ type balanceSolver struct { best *solution ops []*operator.Operator + // maxSrc and minDst are used to calculate the rank. maxSrc *statistics.StoreLoad minDst *statistics.StoreLoad rankStep *statistics.StoreLoad diff --git a/pkg/schedule/schedulers/hot_region_config.go b/pkg/schedule/schedulers/hot_region_config.go index 517edb1d637..5f08d755f76 100644 --- a/pkg/schedule/schedulers/hot_region_config.go +++ b/pkg/schedule/schedulers/hot_region_config.go @@ -38,7 +38,6 @@ import ( ) const ( - // Scheduling has a bigger impact on TiFlash, so it needs to be corrected in configuration items // In the default config, the TiKV difference is 1.05*1.05-1 = 0.1025, and the TiFlash difference is 1.15*1.15-1 = 0.3225 tiflashToleranceRatioCorrection = 0.1 diff --git a/pkg/schedule/schedulers/hot_region_rank_v1.go b/pkg/schedule/schedulers/hot_region_rank_v1.go index 35c9bd00427..ebf6e9bf744 100644 --- a/pkg/schedule/schedulers/hot_region_rank_v1.go +++ b/pkg/schedule/schedulers/hot_region_rank_v1.go @@ -29,9 +29,10 @@ func initRankV1(r *balanceSolver) *rankV1 { } // isAvailable returns the solution is available. -// The solution should have no revertRegion and progressiveRank < 0. +// The solution should progressiveRank > 0. +// v1 does not support revert regions, so no need to check revertRegions. func (*rankV1) isAvailable(s *solution) bool { - return s.progressiveRank < 0 + return s.progressiveRank > 0 } func (r *rankV1) checkByPriorityAndTolerance(loads []float64, f func(int) bool) bool { @@ -66,12 +67,12 @@ func (r *rankV1) filterUniformStore() (string, bool) { // If both dims are enough uniform, any schedule is unnecessary. return "all-dim", true } - if isUniformFirstPriority && (r.cur.progressiveRank == -1 || r.cur.progressiveRank == -3) { - // If first priority dim is enough uniform, -1 is unnecessary and maybe lead to worse balance for second priority dim + if isUniformFirstPriority && (r.cur.progressiveRank == 1 || r.cur.progressiveRank == 3) { + // If first priority dim is enough uniform, rank 1 is unnecessary and maybe lead to worse balance for second priority dim return utils.DimToString(r.firstPriority), true } - if isUniformSecondPriority && r.cur.progressiveRank == -2 { - // If second priority dim is enough uniform, -2 is unnecessary and maybe lead to worse balance for first priority dim + if isUniformSecondPriority && r.cur.progressiveRank == 2 { + // If second priority dim is enough uniform, rank 2 is unnecessary and maybe lead to worse balance for first priority dim return utils.DimToString(r.secondPriority), true } return "", false @@ -79,12 +80,12 @@ func (r *rankV1) filterUniformStore() (string, bool) { // calcProgressiveRank calculates `r.cur.progressiveRank`. // See the comments of `solution.progressiveRank` for more about progressive rank. -// | ↓ firstPriority \ secondPriority → | isBetter | isNotWorsened | Worsened | -// | isBetter | -4 | -3 | -1 / 0 | -// | isNotWorsened | -2 | 1 | 1 | -// | Worsened | 0 | 1 | 1 | +// | ↓ firstPriority \ secondPriority → | isBetter | isNotWorsened | Worsened | +// | isBetter | 4 | 3 | 1 | +// | isNotWorsened | 2 | -1 | -1 | +// | Worsened | 0 | -1 | -1 | func (r *rankV1) calcProgressiveRank() { - r.cur.progressiveRank = 1 + r.cur.progressiveRank = -1 r.cur.calcPeersRate(r.firstPriority, r.secondPriority) if r.cur.getPeersRateFromCache(r.firstPriority) < r.getMinRate(r.firstPriority) && r.cur.getPeersRateFromCache(r.secondPriority) < r.getMinRate(r.secondPriority) { @@ -93,10 +94,10 @@ func (r *rankV1) calcProgressiveRank() { if r.resourceTy == writeLeader { // For write leader, only compare the first priority. - // If the first priority is better, the progressiveRank is -3. + // If the first priority is better, the progressiveRank is 3. // Because it is not a solution that needs to be optimized. if r.isBetterForWriteLeader() { - r.cur.progressiveRank = -3 + r.cur.progressiveRank = 3 } return } @@ -107,16 +108,16 @@ func (r *rankV1) calcProgressiveRank() { switch { case isFirstBetter && isSecondBetter: // If belonging to the case, all two dim will be more balanced, the best choice. - r.cur.progressiveRank = -4 + r.cur.progressiveRank = 4 case isFirstBetter && isSecondNotWorsened: // If belonging to the case, the first priority dim will be more balanced, the second priority dim will be not worsened. - r.cur.progressiveRank = -3 + r.cur.progressiveRank = 3 case isFirstNotWorsened && isSecondBetter: // If belonging to the case, the first priority dim will be not worsened, the second priority dim will be more balanced. - r.cur.progressiveRank = -2 + r.cur.progressiveRank = 2 case isFirstBetter: // If belonging to the case, the first priority dim will be more balanced, ignore the second priority dim. - r.cur.progressiveRank = -1 + r.cur.progressiveRank = 1 case isSecondBetter: // If belonging to the case, the second priority dim will be more balanced, ignore the first priority dim. // It's a solution that cannot be used directly, but can be optimized. @@ -126,12 +127,12 @@ func (r *rankV1) calcProgressiveRank() { // betterThan checks if `r.cur` is a better solution than `old`. func (r *rankV1) betterThan(old *solution) bool { - if old == nil || r.cur.progressiveRank <= splitProgressiveRank { + if old == nil || r.cur.progressiveRank >= splitProgressiveRank { return true } if r.cur.progressiveRank != old.progressiveRank { - // Smaller rank is better. - return r.cur.progressiveRank < old.progressiveRank + // Bigger rank is better. + return r.cur.progressiveRank > old.progressiveRank } if (r.cur.revertRegion == nil) != (old.revertRegion == nil) { // Fewer revertRegions are better. @@ -159,24 +160,28 @@ func (r *rankV1) betterThan(old *solution) bool { // We will firstly consider ensuring converge faster, secondly reduce oscillation firstCmp, secondCmp := r.getRkCmpPriorities(old) switch r.cur.progressiveRank { - case -4: // isBetter(firstPriority) && isBetter(secondPriority) + case 4: // isBetter(firstPriority) && isBetter(secondPriority) + // Both are better, prefer the one with higher first priority rate. + // If the first priority rate is the similar, prefer the one with higher second priority rate. if firstCmp != 0 { return firstCmp > 0 } return secondCmp > 0 - case -3: // isBetter(firstPriority) && isNotWorsened(secondPriority) + case 3: // isBetter(firstPriority) && isNotWorsened(secondPriority) + // The first priority is better, prefer the one with higher first priority rate. if firstCmp != 0 { return firstCmp > 0 } // prefer smaller second priority rate, to reduce oscillation return secondCmp < 0 - case -2: // isNotWorsened(firstPriority) && isBetter(secondPriority) + case 2: // isNotWorsened(firstPriority) && isBetter(secondPriority) + // The second priority is better, prefer the one with higher second priority rate. if secondCmp != 0 { return secondCmp > 0 } // prefer smaller first priority rate, to reduce oscillation return firstCmp < 0 - case -1: // isBetter(firstPriority) + case 1: // isBetter(firstPriority) return firstCmp > 0 // TODO: The smaller the difference between the value and the expectation, the better. } @@ -193,21 +198,24 @@ func (r *rankV1) getRkCmpPriorities(old *solution) (firstCmp int, secondCmp int) func (r *rankV1) rankToDimString() string { switch r.cur.progressiveRank { - case -4: + case 4: return "all" - case -3: + case 3: return utils.DimToString(r.firstPriority) - case -2: + case 2: return utils.DimToString(r.secondPriority) - case -1: + case 1: return utils.DimToString(r.firstPriority) + "-only" default: return "none" } } -func (*rankV1) needSearchRevertRegions() bool { return false } -func (*rankV1) setSearchRevertRegions() {} +func (*rankV1) needSearchRevertRegions() bool { + return false +} + +func (*rankV1) setSearchRevertRegions() {} func (r *rankV1) isBetterForWriteLeader() bool { srcRate, dstRate := r.cur.getExtremeLoad(r.firstPriority) diff --git a/pkg/schedule/schedulers/hot_region_rank_v2.go b/pkg/schedule/schedulers/hot_region_rank_v2.go index d90da3ca8a6..fd00c3b8345 100644 --- a/pkg/schedule/schedulers/hot_region_rank_v2.go +++ b/pkg/schedule/schedulers/hot_region_rank_v2.go @@ -113,11 +113,11 @@ func initRankV2(bs *balanceSolver) *rankV2 { } // isAvailable returns the solution is available. -// If the solution has no revertRegion, progressiveRank should < 0. -// If the solution has some revertRegion, progressiveRank should equal to -4 or -3. +// If the solution has no revertRegion, progressiveRank should > 0. +// If the solution has some revertRegion, progressiveRank should equal to 4 or 3. func (*rankV2) isAvailable(s *solution) bool { - // TODO: Test if revert region can be enabled for -1. - return s.progressiveRank <= -3 || (s.progressiveRank < 0 && s.revertRegion == nil) + // TODO: Test if revert region can be enabled for 1. + return s.progressiveRank >= 3 || (s.progressiveRank > 0 && s.revertRegion == nil) } func (r *rankV2) checkByPriorityAndTolerance(loads []float64, f func(int) bool) bool { @@ -151,12 +151,12 @@ func (r *rankV2) filterUniformStore() (string, bool) { // If both dims are enough uniform, any schedule is unnecessary. return "all-dim", true } - if isUniformFirstPriority && (r.cur.progressiveRank == -2 || r.cur.progressiveRank == -3) { - // If first priority dim is enough uniform, -2 is unnecessary and maybe lead to worse balance for second priority dim + if isUniformFirstPriority && (r.cur.progressiveRank == 2 || r.cur.progressiveRank == 3) { + // If first priority dim is enough uniform, rank 2 is unnecessary and maybe lead to worse balance for second priority dim return utils.DimToString(r.firstPriority), true } - if isUniformSecondPriority && r.cur.progressiveRank == -1 { - // If second priority dim is enough uniform, -1 is unnecessary and maybe lead to worse balance for first priority dim + if isUniformSecondPriority && r.cur.progressiveRank == 1 { + // If second priority dim is enough uniform, rank 1 is unnecessary and maybe lead to worse balance for first priority dim return utils.DimToString(r.secondPriority), true } return "", false @@ -164,24 +164,24 @@ func (r *rankV2) filterUniformStore() (string, bool) { // The search-revert-regions is performed only when the following conditions are met to improve performance. // * `searchRevertRegions` is true. It depends on the result of the last `solve`. -// * The current solution is not good enough. progressiveRank == -2/0 +// * The current solution is not good enough. progressiveRank == 2/0 // * The current best solution is not good enough. -// - The current best solution has progressiveRank < -2 , but contain revert regions. -// - The current best solution has progressiveRank >= -2. +// - The current best solution has progressiveRank > 2 , but contain revert regions. +// - The current best solution has progressiveRank <= 2. func (r *rankV2) needSearchRevertRegions() bool { if !r.sche.searchRevertRegions[r.resourceTy] { return false } - return (r.cur.progressiveRank == -2 || r.cur.progressiveRank == 0) && - (r.best == nil || r.best.progressiveRank >= -2 || r.best.revertRegion != nil) + return (r.cur.progressiveRank == 2 || r.cur.progressiveRank == 0) && + (r.best == nil || r.best.progressiveRank <= 2 || r.best.revertRegion != nil) } func (r *rankV2) setSearchRevertRegions() { // The next solve is allowed to search-revert-regions only when the following conditions are met. // * No best solution was found this time. - // * The progressiveRank of the best solution == -2. (first is better, second is worsened) + // * The progressiveRank of the best solution == 2. (first is better, second is worsened) // * The best solution contain revert regions. - searchRevertRegions := r.best == nil || r.best.progressiveRank == -2 || r.best.revertRegion != nil + searchRevertRegions := r.best == nil || r.best.progressiveRank == 2 || r.best.revertRegion != nil r.sche.searchRevertRegions[r.resourceTy] = searchRevertRegions if searchRevertRegions { event := fmt.Sprintf("%s-%s-allow-search-revert-regions", r.rwTy.String(), r.opTy.String()) @@ -191,15 +191,15 @@ func (r *rankV2) setSearchRevertRegions() { // calcProgressiveRank calculates `r.cur.progressiveRank`. // See the comments of `solution.progressiveRank` for more about progressive rank. -// isBetter: score > 0 +// isBetter: score < 0 // isNotWorsened: score == 0 -// isWorsened: score < 0 +// isWorsened: score > 0 // | ↓ firstPriority \ secondPriority → | isBetter | isNotWorsened | isWorsened | -// | isBetter | -4 | -3 | -2 | -// | isNotWorsened | -1 | 1 | 1 | -// | isWorsened | 0 | 1 | 1 | +// | isBetter | 4 | 3 | 2 | +// | isNotWorsened | 1 | -1 | -1 | +// | isWorsened | 0 | -1 | -1 | func (r *rankV2) calcProgressiveRank() { - r.cur.progressiveRank = 1 + r.cur.progressiveRank = -1 r.cur.calcPeersRate(r.firstPriority, r.secondPriority) if r.cur.getPeersRateFromCache(r.firstPriority) < r.getMinRate(r.firstPriority) && r.cur.getPeersRateFromCache(r.secondPriority) < r.getMinRate(r.secondPriority) { @@ -208,10 +208,10 @@ func (r *rankV2) calcProgressiveRank() { if r.resourceTy == writeLeader { // For write leader, only compare the first priority. - // If the first priority is better, the progressiveRank is -3. + // If the first priority is better, the progressiveRank is 3. // Because it is not a solution that needs to be optimized. if r.getScoreByPriorities(r.firstPriority, r.firstPriorityRatios) > 0 { - r.cur.progressiveRank = -3 + r.cur.progressiveRank = 3 } return } @@ -222,16 +222,16 @@ func (r *rankV2) calcProgressiveRank() { switch { case firstScore > 0 && secondScore > 0: // If belonging to the case, all two dim will be more balanced, the best choice. - r.cur.progressiveRank = -4 + r.cur.progressiveRank = 4 case firstScore > 0 && secondScore == 0: // If belonging to the case, the first priority dim will be more balanced, the second priority dim will be not worsened. - r.cur.progressiveRank = -3 + r.cur.progressiveRank = 3 case firstScore > 0: // If belonging to the case, the first priority dim will be more balanced, ignore the second priority dim. - r.cur.progressiveRank = -2 + r.cur.progressiveRank = 2 case firstScore == 0 && secondScore > 0: // If belonging to the case, the first priority dim will be not worsened, the second priority dim will be more balanced. - r.cur.progressiveRank = -1 + r.cur.progressiveRank = 1 case secondScore > 0: // If belonging to the case, the second priority dim will be more balanced, ignore the first priority dim. // It's a solution that cannot be used directly, but can be optimized. @@ -437,12 +437,12 @@ func (r *rankV2) getScoreByPriorities(dim int, rs *rankRatios) int { // betterThan checks if `r.cur` is a better solution than `old`. func (r *rankV2) betterThan(old *solution) bool { - if old == nil || r.cur.progressiveRank <= splitProgressiveRank { + if old == nil || r.cur.progressiveRank >= splitProgressiveRank { return true } if r.cur.progressiveRank != old.progressiveRank { - // Smaller rank is better. - return r.cur.progressiveRank < old.progressiveRank + // Bigger rank is better. + return r.cur.progressiveRank > old.progressiveRank } if (r.cur.revertRegion == nil) != (old.revertRegion == nil) { // Fewer revertRegions are better. @@ -473,12 +473,12 @@ func (r *rankV2) betterThan(old *solution) bool { secondCmp := getRkCmpByPriority(r.secondPriority, r.cur.secondScore, old.secondScore, r.cur.getPeersRateFromCache(r.secondPriority), old.getPeersRateFromCache(r.secondPriority)) switch r.cur.progressiveRank { - case -4, -3, -2: // firstPriority + case 4, 3, 2: // firstPriority if firstCmp != 0 { return firstCmp > 0 } return secondCmp > 0 - case -1: // secondPriority + case 1: // secondPriority if secondCmp != 0 { return secondCmp > 0 } @@ -509,13 +509,13 @@ func getRkCmpByPriority(dim int, curScore, oldScore int, curPeersRate, oldPeersR func (r *rankV2) rankToDimString() string { switch r.cur.progressiveRank { - case -4: + case 4: return "all" - case -3: + case 3: return utils.DimToString(r.firstPriority) - case -2: + case 2: return utils.DimToString(r.firstPriority) + "-only" - case -1: + case 1: return utils.DimToString(r.secondPriority) default: return "none" diff --git a/pkg/schedule/schedulers/hot_region_rank_v2_test.go b/pkg/schedule/schedulers/hot_region_rank_v2_test.go index dd1d99fc01d..0237c2156ec 100644 --- a/pkg/schedule/schedulers/hot_region_rank_v2_test.go +++ b/pkg/schedule/schedulers/hot_region_rank_v2_test.go @@ -28,7 +28,7 @@ import ( ) func TestHotWriteRegionScheduleWithRevertRegionsDimSecond(t *testing.T) { - // This is a test that searchRevertRegions finds a solution of rank -1. + // This is a test that searchRevertRegions finds a solution of rank 1. re := require.New(t) cancel, _, tc, oc := prepareSchedulersTest() defer cancel() @@ -69,7 +69,7 @@ func TestHotWriteRegionScheduleWithRevertRegionsDimSecond(t *testing.T) { re.True(hb.searchRevertRegions[writePeer]) // Two operators can be generated when RankFormulaVersion == "v2". ops, _ = hb.Schedule(tc, false) - /* The revert region is currently disabled for the -1 case. + /* The revert region is currently disabled for the rank 1 case. re.Len(ops, 2) operatorutil.CheckTransferPeer(re, ops[0], operator.OpHotRegion, 2, 5) operatorutil.CheckTransferPeer(re, ops[1], operator.OpHotRegion, 5, 2) @@ -89,7 +89,7 @@ func TestHotWriteRegionScheduleWithRevertRegionsDimSecond(t *testing.T) { } func TestHotWriteRegionScheduleWithRevertRegionsDimFirst(t *testing.T) { - // This is a test that searchRevertRegions finds a solution of rank -3. + // This is a test that searchRevertRegions finds a solution of rank 3. re := require.New(t) cancel, _, tc, oc := prepareSchedulersTest() defer cancel() @@ -141,7 +141,7 @@ func TestHotWriteRegionScheduleWithRevertRegionsDimFirst(t *testing.T) { } func TestHotWriteRegionScheduleWithRevertRegionsDimFirstOnly(t *testing.T) { - // This is a test that searchRevertRegions finds a solution of rank -2. + // This is a test that searchRevertRegions finds a solution of rank 2. re := require.New(t) cancel, _, tc, oc := prepareSchedulersTest() defer cancel() @@ -242,7 +242,7 @@ func TestHotReadRegionScheduleWithRevertRegionsDimSecond(t *testing.T) { re.True(hb.searchRevertRegions[readLeader]) // Two operators can be generated when RankFormulaVersion == "v2". ops, _ = hb.Schedule(tc, false) - /* The revert region is currently disabled for the -1 case. + /* The revert region is currently disabled for the rank 1 case. re.Len(ops, 2) operatorutil.CheckTransferLeader(re, ops[0], operator.OpHotRegion, 2, 5) operatorutil.CheckTransferLeader(re, ops[1], operator.OpHotRegion, 5, 2) diff --git a/pkg/statistics/store_hot_peers_infos.go b/pkg/statistics/store_hot_peers_infos.go index a65366c41bf..f7873bdd744 100644 --- a/pkg/statistics/store_hot_peers_infos.go +++ b/pkg/statistics/store_hot_peers_infos.go @@ -15,7 +15,6 @@ package statistics import ( - "fmt" "math" "github.com/tikv/pd/pkg/core" @@ -151,7 +150,7 @@ func summaryStoresLoadByEngine( ) []*StoreLoadDetail { loadDetail := make([]*StoreLoadDetail, 0, len(storeInfos)) allStoreLoadSum := make([]float64, utils.DimLen) - allStoreHistoryLoadSum := make([][]float64, utils.DimLen) + allStoreHistoryLoadSum := make([][]float64, utils.DimLen) // row: dim, column: time allStoreCount := 0 allHotPeersCount := 0 @@ -166,49 +165,37 @@ func summaryStoresLoadByEngine( // Find all hot peers first var hotPeers []*HotPeerStat peerLoadSum := make([]float64, utils.DimLen) - // TODO: To remove `filterHotPeers`, we need to: - // HotLeaders consider `Write{Bytes,Keys}`, so when we schedule `writeLeader`, all peers are leader. + // For hot leaders, we need to calculate the sum of the leader's write and read flow rather than the all peers. for _, peer := range filterHotPeers(kind, storeHotPeers[id]) { for i := range peerLoadSum { peerLoadSum[i] += peer.GetLoad(i) } hotPeers = append(hotPeers, peer.Clone()) } - { - // Metric for debug. - // TODO: pre-allocate gauge metrics - ty := "byte-rate-" + rwTy.String() + "-" + kind.String() - hotPeerSummary.WithLabelValues(ty, fmt.Sprintf("%v", id)).Set(peerLoadSum[utils.ByteDim]) - ty = "key-rate-" + rwTy.String() + "-" + kind.String() - hotPeerSummary.WithLabelValues(ty, fmt.Sprintf("%v", id)).Set(peerLoadSum[utils.KeyDim]) - ty = "query-rate-" + rwTy.String() + "-" + kind.String() - hotPeerSummary.WithLabelValues(ty, fmt.Sprintf("%v", id)).Set(peerLoadSum[utils.QueryDim]) - } - loads := collector.GetLoads(storeLoads, peerLoadSum, rwTy, kind) + currentLoads := collector.GetLoads(storeLoads, peerLoadSum, rwTy, kind) var historyLoads [][]float64 if storesHistoryLoads != nil { - historyLoads = storesHistoryLoads.Get(id, rwTy, kind) - for i, loads := range historyLoads { - if allStoreHistoryLoadSum[i] == nil || len(allStoreHistoryLoadSum[i]) < len(loads) { - allStoreHistoryLoadSum[i] = make([]float64, len(loads)) + for i, historyLoads := range storesHistoryLoads.Get(id, rwTy, kind) { + if allStoreHistoryLoadSum[i] == nil || len(allStoreHistoryLoadSum[i]) < len(historyLoads) { + allStoreHistoryLoadSum[i] = make([]float64, len(historyLoads)) } - for j, load := range loads { - allStoreHistoryLoadSum[i][j] += load + for j, historyLoad := range historyLoads { + allStoreHistoryLoadSum[i][j] += historyLoad } } - storesHistoryLoads.Add(id, rwTy, kind, loads) + storesHistoryLoads.Add(id, rwTy, kind, currentLoads) } for i := range allStoreLoadSum { - allStoreLoadSum[i] += loads[i] + allStoreLoadSum[i] += currentLoads[i] } allStoreCount += 1 allHotPeersCount += len(hotPeers) // Build store load prediction from current load and pending influence. stLoadPred := (&StoreLoad{ - Loads: loads, + Loads: currentLoads, Count: float64(len(hotPeers)), HistoryLoads: historyLoads, }).ToLoadPred(rwTy, info.PendingSum) @@ -231,8 +218,8 @@ func summaryStoresLoadByEngine( expectLoads[i] = allStoreLoadSum[i] / float64(allStoreCount) } - // todo: remove some the max value or min value to avoid the effect of extreme value. - expectHistoryLoads := make([][]float64, utils.DimLen) + // TODO: remove some the max value or min value to avoid the effect of extreme value. + expectHistoryLoads := make([][]float64, utils.DimLen) // row: dim, column: time for i := range allStoreHistoryLoadSum { expectHistoryLoads[i] = make([]float64, len(allStoreHistoryLoadSum[i])) for j := range allStoreHistoryLoadSum[i] { @@ -285,13 +272,20 @@ func summaryStoresLoadByEngine( return loadDetail } +// filterHotPeers filters hot peers according to kind. +// If kind is RegionKind, all hot peers will be returned. +// If kind is LeaderKind, only leader hot peers will be returned. func filterHotPeers(kind constant.ResourceKind, peers []*HotPeerStat) []*HotPeerStat { ret := make([]*HotPeerStat, 0, len(peers)) for _, peer := range peers { - if kind == constant.LeaderKind && !peer.IsLeader() { - continue + switch kind { + case constant.RegionKind: + ret = append(ret, peer) + case constant.LeaderKind: + if peer.IsLeader() { + ret = append(ret, peer) + } } - ret = append(ret, peer) } return ret }