From eb3e66f47daf5748aa4142186966405dcfd8c8aa Mon Sep 17 00:00:00 2001 From: Song Gao Date: Thu, 18 Feb 2021 20:20:06 +0800 Subject: [PATCH 01/13] fix scatter region in multi groups Signed-off-by: Song Gao --- pkg/cache/ttl.go | 13 +++++ server/schedule/region_scatterer.go | 62 ++++++++++++++++++++---- server/schedule/region_scatterer_test.go | 35 +++++++++++++ 3 files changed, 100 insertions(+), 10 deletions(-) diff --git a/pkg/cache/ttl.go b/pkg/cache/ttl.go index 1a22f745b6e..c4ece50075b 100644 --- a/pkg/cache/ttl.go +++ b/pkg/cache/ttl.go @@ -256,3 +256,16 @@ func (c *TTLString) Pop() (string, interface{}, bool) { func (c *TTLString) Get(id string) (interface{}, bool) { return c.ttlCache.get(id) } + +// GetAllID returns all key ids +func (c *TTLString) GetAllID() []string { + keys := c.ttlCache.getKeys() + var ids []string + for _, key := range keys { + id, ok := key.(string) + if ok { + ids = append(ids, id) + } + } + return ids +} diff --git a/server/schedule/region_scatterer.go b/server/schedule/region_scatterer.go index 406cc903f8b..06508f32ea1 100644 --- a/server/schedule/region_scatterer.go +++ b/server/schedule/region_scatterer.go @@ -141,6 +141,23 @@ func (s *selectedStores) newFilters(scope, group string) []filter.Filter { return []filter.Filter{filter.NewExcludedFilter(scope, nil, cloned)} } +func (s *selectedStores) storeTotalCount(storeID uint64) uint64 { + groups := s.groupDistribution.GetAllID() + totalCount := uint64(0) + for _, group := range groups { + storeDistribution, ok := s.getGroupDistribution(group) + if !ok { + continue + } + count, ok := storeDistribution[storeID] + if !ok { + continue + } + totalCount += count + } + return totalCount +} + // RegionScatterer scatters regions. type RegionScatterer struct { ctx context.Context @@ -307,15 +324,29 @@ func (r *RegionScatterer) scatterRegion(region *core.RegionInfo, group string) * scatterWithSameEngine := func(peers []*metapb.Peer, context engineContext) { stores := r.collectAvailableStores(group, region, context) + maxStoreTotalCount := uint64(0) + minStoreTotalCount := uint64(math.MaxUint64) + for _, store := range r.cluster.GetStores() { + count := context.selectedPeer.storeTotalCount(store.GetID()) + if count > maxStoreTotalCount { + maxStoreTotalCount = count + } + if count < minStoreTotalCount { + minStoreTotalCount = count + } + } for _, peer := range peers { if len(stores) == 0 { context.selectedPeer.reset() stores = r.collectAvailableStores(group, region, context) } - if context.selectedPeer.put(peer.GetStoreId(), group) { - delete(stores, peer.GetStoreId()) - targetPeers[peer.GetStoreId()] = peer - continue + storeCount := context.selectedPeer.storeTotalCount(peer.GetStoreId()) + if storeCount < maxStoreTotalCount || storeCount == minStoreTotalCount { + if context.selectedPeer.put(peer.GetStoreId(), group) { + delete(stores, peer.GetStoreId()) + targetPeers[peer.GetStoreId()] = peer + continue + } } newPeer := r.selectPeerToReplace(group, stores, region, peer, context) if newPeer == nil { @@ -419,17 +450,28 @@ func (r *RegionScatterer) collectAvailableStores(group string, region *core.Regi // selectAvailableLeaderStores select the target leader store from the candidates. The candidates would be collected by // the existed peers store depended on the leader counts in the group level. func (r *RegionScatterer) selectAvailableLeaderStores(group string, peers map[uint64]*metapb.Peer, context engineContext) uint64 { + maxStoreTotalCount := uint64(0) + for _, store := range r.cluster.GetStores() { + count := r.ordinaryEngine.selectedLeader.storeTotalCount(store.GetID()) + if count > maxStoreTotalCount { + maxStoreTotalCount = count + } + } minStoreGroupLeader := uint64(math.MaxUint64) - id := uint64(0) + targetLeader := uint64(0) for storeID := range peers { + if targetLeader == 0 { + targetLeader = storeID + } + if context.selectedLeader.storeTotalCount(storeID) >= maxStoreTotalCount { + continue + } storeGroupLeaderCount := context.selectedLeader.get(storeID, group) if minStoreGroupLeader > storeGroupLeaderCount { minStoreGroupLeader = storeGroupLeaderCount - id = storeID + targetLeader = storeID } } - if id != 0 { - context.selectedLeader.put(id, group) - } - return id + context.selectedLeader.put(targetLeader, group) + return targetLeader } diff --git a/server/schedule/region_scatterer_test.go b/server/schedule/region_scatterer_test.go index b5a3342f019..a0069874ad0 100644 --- a/server/schedule/region_scatterer_test.go +++ b/server/schedule/region_scatterer_test.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "math" + "math/rand" "time" . "github.com/pingcap/check" @@ -432,3 +433,37 @@ func (s *testScatterRegionSuite) TestSelectedStoreGC(c *C) { _, ok = stores.getGroupDistribution("testgroup") c.Assert(ok, Equals, false) } + +func (s *testScatterRegionSuite) TestRegionFromDifferentGroups(c *C) { + opt := config.NewTestOptions() + tc := mockcluster.NewCluster(opt) + // Add 6 stores. + storeCount := 6 + for i := uint64(1); i <= uint64(6); i++ { + tc.AddRegionStore(i, 0) + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + scatterer := NewRegionScatterer(ctx, tc) + regionCount := 50 + for i := 1; i <= regionCount; i++ { + p := rand.Perm(storeCount) + scatterer.scatterRegion(tc.AddLeaderRegion(uint64(i), uint64(p[0])+1, uint64(p[1])+1, uint64(p[2])+1), fmt.Sprintf("t%d", i)) + } + check := func(ss *selectedStores) { + max := uint64(0) + min := uint64(math.MaxUint64) + for i := uint64(1); i <= uint64(storeCount); i++ { + count := ss.storeTotalCount(i) + if count > max { + max = count + } + if count < min { + min = count + } + } + c.Assert(max-min, Less, uint64(regionCount/10)) + } + check(scatterer.ordinaryEngine.selectedLeader) + check(scatterer.ordinaryEngine.selectedPeer) +} From 37ceec20ebbd610335f85276942ba4f5edda6574 Mon Sep 17 00:00:00 2001 From: Song Gao Date: Thu, 18 Feb 2021 20:31:14 +0800 Subject: [PATCH 02/13] fix test Signed-off-by: Song Gao --- server/schedule/region_scatterer_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/schedule/region_scatterer_test.go b/server/schedule/region_scatterer_test.go index a0069874ad0..6bc75d39c01 100644 --- a/server/schedule/region_scatterer_test.go +++ b/server/schedule/region_scatterer_test.go @@ -348,7 +348,7 @@ func (s *testScatterRegionSuite) TestScatterGroup(c *C) { // 100 regions divided 5 stores, each store expected to have about 20 regions. c.Assert(min, LessEqual, uint64(20)) c.Assert(max, GreaterEqual, uint64(20)) - c.Assert(max-min, LessEqual, uint64(3)) + c.Assert(max-min, LessEqual, uint64(10)) } cancel() } From 5e2e2ae4b4318b9772929cfb4a104538a0b01906 Mon Sep 17 00:00:00 2001 From: Song Gao Date: Thu, 18 Feb 2021 20:37:31 +0800 Subject: [PATCH 03/13] fix test Signed-off-by: Song Gao --- server/schedule/region_scatterer_test.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/server/schedule/region_scatterer_test.go b/server/schedule/region_scatterer_test.go index 6bc75d39c01..04bd9728dc1 100644 --- a/server/schedule/region_scatterer_test.go +++ b/server/schedule/region_scatterer_test.go @@ -198,16 +198,16 @@ func (s *testScatterRegionSuite) scatterSpecial(c *C, numOrdinaryStores, numSpec // Each store should have the same number of peers. for _, count := range countOrdinaryPeers { - c.Assert(float64(count), LessEqual, 1.1*float64(numRegions*3)/float64(numOrdinaryStores)) - c.Assert(float64(count), GreaterEqual, 0.9*float64(numRegions*3)/float64(numOrdinaryStores)) + c.Assert(float64(count), LessEqual, 1.3*float64(numRegions*3)/float64(numOrdinaryStores)) + c.Assert(float64(count), GreaterEqual, 0.7*float64(numRegions*3)/float64(numOrdinaryStores)) } for _, count := range countSpecialPeers { - c.Assert(float64(count), LessEqual, 1.1*float64(numRegions*3)/float64(numSpecialStores)) - c.Assert(float64(count), GreaterEqual, 0.9*float64(numRegions*3)/float64(numSpecialStores)) + c.Assert(float64(count), LessEqual, 1.3*float64(numRegions*3)/float64(numSpecialStores)) + c.Assert(float64(count), GreaterEqual, 0.7*float64(numRegions*3)/float64(numSpecialStores)) } for _, count := range countOrdinaryLeaders { - c.Assert(float64(count), LessEqual, 1.1*float64(numRegions)/float64(numOrdinaryStores)) - c.Assert(float64(count), GreaterEqual, 0.9*float64(numRegions)/float64(numOrdinaryStores)) + c.Assert(float64(count), LessEqual, 1.3*float64(numRegions)/float64(numOrdinaryStores)) + c.Assert(float64(count), GreaterEqual, 0.7*float64(numRegions)/float64(numOrdinaryStores)) } } From 0b9e93dd6f2678fdf918c45ea936064e82a53d7c Mon Sep 17 00:00:00 2001 From: Song Gao Date: Wed, 24 Feb 2021 18:24:55 +0800 Subject: [PATCH 04/13] add comment Signed-off-by: Song Gao --- server/schedule/region_scatterer_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/server/schedule/region_scatterer_test.go b/server/schedule/region_scatterer_test.go index 04bd9728dc1..9d65f357969 100644 --- a/server/schedule/region_scatterer_test.go +++ b/server/schedule/region_scatterer_test.go @@ -434,6 +434,8 @@ func (s *testScatterRegionSuite) TestSelectedStoreGC(c *C) { c.Assert(ok, Equals, false) } +// TestRegionFromDifferentGroups test the multi regions. each region have its own group. +// After scatter, the distribution for the whole cluster should be well. func (s *testScatterRegionSuite) TestRegionFromDifferentGroups(c *C) { opt := config.NewTestOptions() tc := mockcluster.NewCluster(opt) From a11927fe3fa2938896cafb0ecf0a1a128463662b Mon Sep 17 00:00:00 2001 From: Song Gao Date: Thu, 25 Feb 2021 13:07:08 +0800 Subject: [PATCH 05/13] improve test Signed-off-by: Song Gao --- server/schedule/region_scatterer_test.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/server/schedule/region_scatterer_test.go b/server/schedule/region_scatterer_test.go index 9d65f357969..345f66b62bc 100644 --- a/server/schedule/region_scatterer_test.go +++ b/server/schedule/region_scatterer_test.go @@ -198,16 +198,16 @@ func (s *testScatterRegionSuite) scatterSpecial(c *C, numOrdinaryStores, numSpec // Each store should have the same number of peers. for _, count := range countOrdinaryPeers { - c.Assert(float64(count), LessEqual, 1.3*float64(numRegions*3)/float64(numOrdinaryStores)) - c.Assert(float64(count), GreaterEqual, 0.7*float64(numRegions*3)/float64(numOrdinaryStores)) + c.Assert(float64(count), LessEqual, 1.25*float64(numRegions*3)/float64(numOrdinaryStores)) + c.Assert(float64(count), GreaterEqual, 0.75*float64(numRegions*3)/float64(numOrdinaryStores)) } for _, count := range countSpecialPeers { - c.Assert(float64(count), LessEqual, 1.3*float64(numRegions*3)/float64(numSpecialStores)) - c.Assert(float64(count), GreaterEqual, 0.7*float64(numRegions*3)/float64(numSpecialStores)) + c.Assert(float64(count), LessEqual, 1.25*float64(numRegions*3)/float64(numSpecialStores)) + c.Assert(float64(count), GreaterEqual, 0.75*float64(numRegions*3)/float64(numSpecialStores)) } for _, count := range countOrdinaryLeaders { - c.Assert(float64(count), LessEqual, 1.3*float64(numRegions)/float64(numOrdinaryStores)) - c.Assert(float64(count), GreaterEqual, 0.7*float64(numRegions)/float64(numOrdinaryStores)) + c.Assert(float64(count), LessEqual, 1.25*float64(numRegions)/float64(numOrdinaryStores)) + c.Assert(float64(count), GreaterEqual, 0.75*float64(numRegions)/float64(numOrdinaryStores)) } } From c6d532daf08cb10f5ea8d72a54030423491236c1 Mon Sep 17 00:00:00 2001 From: Song Gao Date: Tue, 2 Mar 2021 14:40:37 +0800 Subject: [PATCH 06/13] fix test Signed-off-by: Song Gao --- server/schedule/region_scatterer_test.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/server/schedule/region_scatterer_test.go b/server/schedule/region_scatterer_test.go index 345f66b62bc..9d65f357969 100644 --- a/server/schedule/region_scatterer_test.go +++ b/server/schedule/region_scatterer_test.go @@ -198,16 +198,16 @@ func (s *testScatterRegionSuite) scatterSpecial(c *C, numOrdinaryStores, numSpec // Each store should have the same number of peers. for _, count := range countOrdinaryPeers { - c.Assert(float64(count), LessEqual, 1.25*float64(numRegions*3)/float64(numOrdinaryStores)) - c.Assert(float64(count), GreaterEqual, 0.75*float64(numRegions*3)/float64(numOrdinaryStores)) + c.Assert(float64(count), LessEqual, 1.3*float64(numRegions*3)/float64(numOrdinaryStores)) + c.Assert(float64(count), GreaterEqual, 0.7*float64(numRegions*3)/float64(numOrdinaryStores)) } for _, count := range countSpecialPeers { - c.Assert(float64(count), LessEqual, 1.25*float64(numRegions*3)/float64(numSpecialStores)) - c.Assert(float64(count), GreaterEqual, 0.75*float64(numRegions*3)/float64(numSpecialStores)) + c.Assert(float64(count), LessEqual, 1.3*float64(numRegions*3)/float64(numSpecialStores)) + c.Assert(float64(count), GreaterEqual, 0.7*float64(numRegions*3)/float64(numSpecialStores)) } for _, count := range countOrdinaryLeaders { - c.Assert(float64(count), LessEqual, 1.25*float64(numRegions)/float64(numOrdinaryStores)) - c.Assert(float64(count), GreaterEqual, 0.75*float64(numRegions)/float64(numOrdinaryStores)) + c.Assert(float64(count), LessEqual, 1.3*float64(numRegions)/float64(numOrdinaryStores)) + c.Assert(float64(count), GreaterEqual, 0.7*float64(numRegions)/float64(numOrdinaryStores)) } } From aaaca9a34e58ce80517bd3d0f4e6b70c26fb2729 Mon Sep 17 00:00:00 2001 From: yisaer Date: Thu, 1 Apr 2021 13:21:52 +0800 Subject: [PATCH 07/13] fix test Signed-off-by: yisaer --- server/api/region_test.go | 5 +- server/schedule/operator/create_operator.go | 1 + server/schedule/region_scatterer.go | 17 ++++-- server/schedule/region_scatterer_test.go | 58 ++++++++++----------- 4 files changed, 46 insertions(+), 35 deletions(-) diff --git a/server/api/region_test.go b/server/api/region_test.go index bf0d073d48e..9d17d8d4657 100644 --- a/server/api/region_test.go +++ b/server/api/region_test.go @@ -294,9 +294,10 @@ func (s *testRegionSuite) TestScatterRegions(c *C) { err := postJSON(testDialClient, fmt.Sprintf("%s/regions/scatter", s.urlPrefix), []byte(body)) c.Assert(err, IsNil) op1 := s.svr.GetRaftCluster().GetOperatorController().GetOperator(601) - c.Assert(op1 != nil, Equals, true) op2 := s.svr.GetRaftCluster().GetOperatorController().GetOperator(602) - c.Assert(op2 != nil, Equals, true) + op3 := s.svr.GetRaftCluster().GetOperatorController().GetOperator(603) + // At least one operator used to scatter region + c.Assert(op1 != nil || op2 != nil || op3 != nil, Equals, true) } func (s *testRegionSuite) TestSplitRegions(c *C) { diff --git a/server/schedule/operator/create_operator.go b/server/schedule/operator/create_operator.go index 4b9573a20a7..cad033b471d 100644 --- a/server/schedule/operator/create_operator.go +++ b/server/schedule/operator/create_operator.go @@ -195,6 +195,7 @@ func CreateScatterRegionOperator(desc string, cluster opt.Cluster, origin *core. SetPeers(targetPeers). SetLeader(leader). EnableLightWeight(). + EnableForceTargetLeader(). Build(0) } diff --git a/server/schedule/region_scatterer.go b/server/schedule/region_scatterer.go index a97d2808806..89ef5603f98 100644 --- a/server/schedule/region_scatterer.go +++ b/server/schedule/region_scatterer.go @@ -17,6 +17,7 @@ import ( "context" "fmt" "math" + "math/rand" "sync" "time" @@ -382,18 +383,26 @@ func (r *RegionScatterer) selectStore(group string, peer *metapb.Peer, sourceSto // selectAvailableLeaderStores select the target leader store from the candidates. The candidates would be collected by // the existed peers store depended on the leader counts in the group level. func (r *RegionScatterer) selectAvailableLeaderStores(group string, peers map[uint64]*metapb.Peer, context engineContext) uint64 { - minStoreGroupLeader := uint64(math.MaxUint64) - id := uint64(0) + leaderCandidateStores := make([]uint64, 0) for storeID := range peers { - if id == 0 { - id = storeID + store := r.cluster.GetStore(storeID) + engine := store.GetLabelValue(filter.EngineKey) + if len(engine) < 1 { + leaderCandidateStores = append(leaderCandidateStores, storeID) } + } + minStoreGroupLeader := uint64(math.MaxUint64) + id := uint64(0) + for _, storeID := range leaderCandidateStores { storeGroupLeaderCount := context.selectedLeader.Get(storeID, group) if minStoreGroupLeader > storeGroupLeaderCount { minStoreGroupLeader = storeGroupLeaderCount id = storeID } } + if id < 1 { + return leaderCandidateStores[rand.Intn(len(leaderCandidateStores))] + } return id } diff --git a/server/schedule/region_scatterer_test.go b/server/schedule/region_scatterer_test.go index a28454334ff..08c229986c6 100644 --- a/server/schedule/region_scatterer_test.go +++ b/server/schedule/region_scatterer_test.go @@ -204,16 +204,16 @@ func (s *testScatterRegionSuite) scatterSpecial(c *C, numOrdinaryStores, numSpec // Each store should have the same number of peers. for _, count := range countOrdinaryPeers { - c.Assert(float64(count), LessEqual, 1.3*float64(numRegions*3)/float64(numOrdinaryStores)) - c.Assert(float64(count), GreaterEqual, 0.7*float64(numRegions*3)/float64(numOrdinaryStores)) + c.Assert(float64(count), LessEqual, 1.1*float64(numRegions*3)/float64(numOrdinaryStores)) + c.Assert(float64(count), GreaterEqual, 0.9*float64(numRegions*3)/float64(numOrdinaryStores)) } for _, count := range countSpecialPeers { - c.Assert(float64(count), LessEqual, 1.3*float64(numRegions*3)/float64(numSpecialStores)) - c.Assert(float64(count), GreaterEqual, 0.7*float64(numRegions*3)/float64(numSpecialStores)) + c.Assert(float64(count), LessEqual, 1.1*float64(numRegions*3)/float64(numSpecialStores)) + c.Assert(float64(count), GreaterEqual, 0.9*float64(numRegions*3)/float64(numSpecialStores)) } for _, count := range countOrdinaryLeaders { - c.Assert(float64(count), LessEqual, 1.3*float64(numRegions)/float64(numOrdinaryStores)) - c.Assert(float64(count), GreaterEqual, 0.7*float64(numRegions)/float64(numOrdinaryStores)) + c.Assert(float64(count), LessEqual, 1.1*float64(numRegions)/float64(numOrdinaryStores)) + c.Assert(float64(count), GreaterEqual, 0.9*float64(numRegions)/float64(numOrdinaryStores)) } } @@ -293,7 +293,7 @@ func (s *testScatterRegionSuite) TestScatterCheck(c *C) { } } -func (s *testScatterRegionSuite) TestScatterGroup(c *C) { +func (s *testScatterRegionSuite) TestScatterGroupInConcurrency(c *C) { opt := config.NewTestOptions() tc := mockcluster.NewCluster(opt) // Add 5 stores. @@ -319,6 +319,7 @@ func (s *testScatterRegionSuite) TestScatterGroup(c *C) { }, } + // We send scatter interweave request for each group to simulate scattering multiple region groups in concurrency. for _, testcase := range testcases { c.Logf(testcase.name) ctx, cancel := context.WithCancel(context.Background()) @@ -326,36 +327,35 @@ func (s *testScatterRegionSuite) TestScatterGroup(c *C) { regionID := 1 for i := 0; i < 100; i++ { for j := 0; j < testcase.groupCount; j++ { - _, err := scatterer.Scatter(tc.AddLeaderRegion(uint64(regionID), 1, 2, 3), + scatterer.scatterRegion(tc.AddLeaderRegion(uint64(regionID), 1, 2, 3), fmt.Sprintf("group-%v", j)) - c.Assert(err, IsNil) regionID++ } - // insert region with no group - _, err := scatterer.Scatter(tc.AddLeaderRegion(uint64(regionID), 1, 2, 3), "") - c.Assert(err, IsNil) - regionID++ } - for i := 0; i < testcase.groupCount; i++ { - // comparing the leader distribution - group := fmt.Sprintf("group-%v", i) - max := uint64(0) - min := uint64(math.MaxUint64) - groupDistribution, _ := scatterer.ordinaryEngine.selectedLeader.groupDistribution.Get(group) - for _, count := range groupDistribution.(map[uint64]uint64) { - if count > max { - max = count - } - if count < min { - min = count + checker := func(ss *selectedStores, expected uint64, delta float64) { + for i := 0; i < testcase.groupCount; i++ { + // comparing the leader distribution + group := fmt.Sprintf("group-%v", i) + max := uint64(0) + min := uint64(math.MaxUint64) + groupDistribution, _ := ss.groupDistribution.Get(group) + for _, count := range groupDistribution.(map[uint64]uint64) { + if count > max { + max = count + } + if count < min { + min = count + } } + c.Assert(math.Abs(float64(max)-float64(expected)), LessEqual, delta) + c.Assert(math.Abs(float64(min)-float64(expected)), LessEqual, delta) } - // 100 regions divided 5 stores, each store expected to have about 20 regions. - c.Assert(min, LessEqual, uint64(20)) - c.Assert(max, GreaterEqual, uint64(20)) - c.Assert(max-min, LessEqual, uint64(5)) } + // For leader, we expect each store have about 20 leader for each group + checker(scatterer.ordinaryEngine.selectedLeader, 20, 5) + // For peer, we expect each store have about 50 peers for each group + checker(scatterer.ordinaryEngine.selectedPeer, 50, 15) cancel() } } From 3dc64fef9ccc341c08963f239a8a4e8259900839 Mon Sep 17 00:00:00 2001 From: yisaer Date: Mon, 12 Apr 2021 16:56:23 +0800 Subject: [PATCH 08/13] address the comment Signed-off-by: yisaer --- server/schedule/region_scatterer.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/server/schedule/region_scatterer.go b/server/schedule/region_scatterer.go index 89ef5603f98..45e8a4098a0 100644 --- a/server/schedule/region_scatterer.go +++ b/server/schedule/region_scatterer.go @@ -343,7 +343,10 @@ func (r *RegionScatterer) selectCandidates(region *core.RegionInfo, sourceStoreI } for _, store := range stores { storeCount := context.selectedPeer.storeTotalCount(store.GetID()) - if storeCount < maxStoreTotalCount || storeCount == minStoreTotalCount { + // If storeCount is equal to the maxStoreTotalCount, we should skip this store as candidate. + // If the storeCount are all the same for the whole cluster(maxStoreTotalCount == minStoreTotalCount), any store + // could be selected as candidate. + if storeCount < maxStoreTotalCount || maxStoreTotalCount == minStoreTotalCount { if filter.Target(r.cluster.GetOpts(), store, filters) && !store.IsBusy() { candidates = append(candidates, store.GetID()) } From 5f1e4c12508fc61b68183c563234c3af739117ad Mon Sep 17 00:00:00 2001 From: yisaer Date: Tue, 13 Apr 2021 19:17:32 +0800 Subject: [PATCH 09/13] address the comment Signed-off-by: yisaer --- server/api/region_test.go | 2 +- server/schedule/region_scatterer.go | 6 +++--- server/schedule/region_scatterer_test.go | 6 +++--- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/server/api/region_test.go b/server/api/region_test.go index 9d17d8d4657..ecec8dcf489 100644 --- a/server/api/region_test.go +++ b/server/api/region_test.go @@ -297,7 +297,7 @@ func (s *testRegionSuite) TestScatterRegions(c *C) { op2 := s.svr.GetRaftCluster().GetOperatorController().GetOperator(602) op3 := s.svr.GetRaftCluster().GetOperatorController().GetOperator(603) // At least one operator used to scatter region - c.Assert(op1 != nil || op2 != nil || op3 != nil, Equals, true) + c.Assert(op1 != nil || op2 != nil || op3 != nil, IsTrue) } func (s *testRegionSuite) TestSplitRegions(c *C) { diff --git a/server/schedule/region_scatterer.go b/server/schedule/region_scatterer.go index 45e8a4098a0..cc93edb91cd 100644 --- a/server/schedule/region_scatterer.go +++ b/server/schedule/region_scatterer.go @@ -94,7 +94,7 @@ func (s *selectedStores) getDistributionByGroupLocked(group string) (map[uint64] return nil, false } -func (s *selectedStores) storeTotalCount(storeID uint64) uint64 { +func (s *selectedStores) totalCountByStore(storeID uint64) uint64 { groups := s.groupDistribution.GetAllID() totalCount := uint64(0) for _, group := range groups { @@ -333,7 +333,7 @@ func (r *RegionScatterer) selectCandidates(region *core.RegionInfo, sourceStoreI maxStoreTotalCount := uint64(0) minStoreTotalCount := uint64(math.MaxUint64) for _, store := range r.cluster.GetStores() { - count := context.selectedPeer.storeTotalCount(store.GetID()) + count := context.selectedPeer.totalCountByStore(store.GetID()) if count > maxStoreTotalCount { maxStoreTotalCount = count } @@ -342,7 +342,7 @@ func (r *RegionScatterer) selectCandidates(region *core.RegionInfo, sourceStoreI } } for _, store := range stores { - storeCount := context.selectedPeer.storeTotalCount(store.GetID()) + storeCount := context.selectedPeer.totalCountByStore(store.GetID()) // If storeCount is equal to the maxStoreTotalCount, we should skip this store as candidate. // If the storeCount are all the same for the whole cluster(maxStoreTotalCount == minStoreTotalCount), any store // could be selected as candidate. diff --git a/server/schedule/region_scatterer_test.go b/server/schedule/region_scatterer_test.go index 08c229986c6..8ef74e78b7b 100644 --- a/server/schedule/region_scatterer_test.go +++ b/server/schedule/region_scatterer_test.go @@ -449,7 +449,7 @@ func (s *testScatterRegionSuite) TestRegionFromDifferentGroups(c *C) { tc := mockcluster.NewCluster(opt) // Add 6 stores. storeCount := 6 - for i := uint64(1); i <= uint64(6); i++ { + for i := uint64(1); i <= uint64(storeCount); i++ { tc.AddRegionStore(i, 0) } ctx, cancel := context.WithCancel(context.Background()) @@ -464,7 +464,7 @@ func (s *testScatterRegionSuite) TestRegionFromDifferentGroups(c *C) { max := uint64(0) min := uint64(math.MaxUint64) for i := uint64(1); i <= uint64(storeCount); i++ { - count := ss.storeTotalCount(i) + count := ss.totalCountByStore(i) if count > max { max = count } @@ -472,7 +472,7 @@ func (s *testScatterRegionSuite) TestRegionFromDifferentGroups(c *C) { min = count } } - c.Assert(max-min, LessEqual, uint64(regionCount/6)) + c.Assert(max-min, LessEqual, uint64(regionCount/10)) } check(scatterer.ordinaryEngine.selectedPeer) } From 47b1f4354b55a0ef5340cef8716cfb51e4572e9f Mon Sep 17 00:00:00 2001 From: yisaer Date: Tue, 13 Apr 2021 19:19:46 +0800 Subject: [PATCH 10/13] address the comment Signed-off-by: yisaer --- server/schedule/region_scatterer.go | 1 + 1 file changed, 1 insertion(+) diff --git a/server/schedule/region_scatterer.go b/server/schedule/region_scatterer.go index cc93edb91cd..291e0a642a7 100644 --- a/server/schedule/region_scatterer.go +++ b/server/schedule/region_scatterer.go @@ -403,6 +403,7 @@ func (r *RegionScatterer) selectAvailableLeaderStores(group string, peers map[ui id = storeID } } + // unreachable if id < 1 { return leaderCandidateStores[rand.Intn(len(leaderCandidateStores))] } From b13b1e6dc9312dd47ddce03f82f431de0c2c470e Mon Sep 17 00:00:00 2001 From: yisaer Date: Thu, 15 Apr 2021 17:36:18 +0800 Subject: [PATCH 11/13] address the comment Signed-off-by: yisaer --- server/schedule/region_scatterer.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/server/schedule/region_scatterer.go b/server/schedule/region_scatterer.go index 286d44e790b..f88c8f8e279 100644 --- a/server/schedule/region_scatterer.go +++ b/server/schedule/region_scatterer.go @@ -17,7 +17,6 @@ import ( "context" "fmt" "math" - "math/rand" "sync" "time" @@ -418,10 +417,6 @@ func (r *RegionScatterer) selectAvailableLeaderStores(group string, peers map[ui id = storeID } } - // unreachable - if id < 1 { - return leaderCandidateStores[rand.Intn(len(leaderCandidateStores))] - } return id } From e91faed35b9fc87b4d08af45ccfd35d3c2154d2a Mon Sep 17 00:00:00 2001 From: yisaer Date: Fri, 16 Apr 2021 17:46:50 +0800 Subject: [PATCH 12/13] address the comment Signed-off-by: yisaer --- server/schedule/operator/create_operator.go | 1 + 1 file changed, 1 insertion(+) diff --git a/server/schedule/operator/create_operator.go b/server/schedule/operator/create_operator.go index cad033b471d..dfc4b6d137f 100644 --- a/server/schedule/operator/create_operator.go +++ b/server/schedule/operator/create_operator.go @@ -195,6 +195,7 @@ func CreateScatterRegionOperator(desc string, cluster opt.Cluster, origin *core. SetPeers(targetPeers). SetLeader(leader). EnableLightWeight(). + // EnableForceTargetLeader in order to ignore the leader schedule limit EnableForceTargetLeader(). Build(0) } From 3eb838bb43e1b1767b8658e5579563a515513a29 Mon Sep 17 00:00:00 2001 From: yisaer Date: Wed, 21 Apr 2021 12:21:08 +0800 Subject: [PATCH 13/13] address the comment Signed-off-by: yisaer --- server/schedule/operator/builder.go | 2 +- server/schedule/region_scatterer_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/server/schedule/operator/builder.go b/server/schedule/operator/builder.go index c36718c6dd5..5af39eb91fd 100644 --- a/server/schedule/operator/builder.go +++ b/server/schedule/operator/builder.go @@ -304,7 +304,7 @@ func (b *Builder) EnableLightWeight() *Builder { return b } -// EnableForceTargetLeader marks the step of transferring leader to target is forcible. It is used for grant leader. +// EnableForceTargetLeader marks the step of transferring leader to target is forcible. func (b *Builder) EnableForceTargetLeader() *Builder { b.forceTargetLeader = true return b diff --git a/server/schedule/region_scatterer_test.go b/server/schedule/region_scatterer_test.go index 8ef74e78b7b..49e1d31a7e7 100644 --- a/server/schedule/region_scatterer_test.go +++ b/server/schedule/region_scatterer_test.go @@ -472,7 +472,7 @@ func (s *testScatterRegionSuite) TestRegionFromDifferentGroups(c *C) { min = count } } - c.Assert(max-min, LessEqual, uint64(regionCount/10)) + c.Assert(max-min, LessEqual, uint64(2)) } check(scatterer.ordinaryEngine.selectedPeer) }