diff --git a/server/schedule/merge_checker_test.go b/server/schedule/merge_checker_test.go new file mode 100644 index 00000000000..6d66db84aa9 --- /dev/null +++ b/server/schedule/merge_checker_test.go @@ -0,0 +1,254 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package schedule + +import ( + "time" + + . "github.com/pingcap/check" + "github.com/pingcap/kvproto/pkg/metapb" + "github.com/pingcap/pd/server/core" + "github.com/pingcap/pd/server/namespace" +) + +var _ = Suite(&testMergeCheckerSuite{}) + +type testMergeCheckerSuite struct { + cluster *MockCluster + mc *MergeChecker + regions []*core.RegionInfo +} + +func (s *testMergeCheckerSuite) SetUpTest(c *C) { + cfg := NewMockSchedulerOptions() + cfg.MaxMergeRegionSize = 2 + cfg.MaxMergeRegionKeys = 2 + s.cluster = NewMockCluster(cfg) + s.regions = []*core.RegionInfo{ + core.NewRegionInfo( + &metapb.Region{ + Id: 1, + StartKey: []byte(""), + EndKey: []byte("a"), + Peers: []*metapb.Peer{ + {Id: 101, StoreId: 1}, + {Id: 102, StoreId: 2}, + }, + }, + &metapb.Peer{Id: 101, StoreId: 1}, + core.SetApproximateSize(1), + core.SetApproximateKeys(1), + ), + core.NewRegionInfo( + &metapb.Region{ + Id: 2, + StartKey: []byte("a"), + EndKey: []byte("t"), + Peers: []*metapb.Peer{ + {Id: 103, StoreId: 1}, + {Id: 104, StoreId: 4}, + {Id: 105, StoreId: 5}, + }, + }, + &metapb.Peer{Id: 104, StoreId: 4}, + core.SetApproximateSize(200), + core.SetApproximateKeys(200), + ), + core.NewRegionInfo( + &metapb.Region{ + Id: 3, + StartKey: []byte("t"), + EndKey: []byte("x"), + Peers: []*metapb.Peer{ + {Id: 106, StoreId: 2}, + {Id: 107, StoreId: 5}, + {Id: 108, StoreId: 6}, + }, + }, + &metapb.Peer{Id: 108, StoreId: 6}, + core.SetApproximateSize(1), + core.SetApproximateKeys(1), + ), + core.NewRegionInfo( + &metapb.Region{ + Id: 4, + StartKey: []byte("x"), + EndKey: []byte(""), + Peers: []*metapb.Peer{ + {Id: 109, StoreId: 4}, + }, + }, + &metapb.Peer{Id: 109, StoreId: 4}, + core.SetApproximateSize(10), + core.SetApproximateKeys(10), + ), + } + + for _, region := range s.regions { + c.Assert(s.cluster.PutRegion(region), IsNil) + } + + s.mc = NewMergeChecker(s.cluster, namespace.DefaultClassifier) +} + +func (s *testMergeCheckerSuite) TestBasic(c *C) { + s.cluster.MockSchedulerOptions.SplitMergeInterval = time.Hour + + // should with same peer count + op1, op2 := s.mc.Check(s.regions[0]) + c.Assert(op1, IsNil) + c.Assert(op2, IsNil) + // size should be small enough + op1, op2 = s.mc.Check(s.regions[1]) + c.Assert(op1, IsNil) + c.Assert(op2, IsNil) + op1, op2 = s.mc.Check(s.regions[2]) + c.Assert(op1, NotNil) + c.Assert(op2, NotNil) + for _, op := range []*Operator{op1, op2} { + op.createTime = op.createTime.Add(-LeaderOperatorWaitTime - time.Second) + c.Assert(op.IsTimeout(), IsFalse) + op.createTime = op.createTime.Add(-RegionOperatorWaitTime - time.Second) + c.Assert(op.IsTimeout(), IsTrue) + } + // Skip recently split regions. + s.mc.RecordRegionSplit(s.regions[2].GetID()) + op1, op2 = s.mc.Check(s.regions[2]) + c.Assert(op1, IsNil) + c.Assert(op2, IsNil) + op1, op2 = s.mc.Check(s.regions[3]) + c.Assert(op1, IsNil) + c.Assert(op2, IsNil) +} + +func (s *testMergeCheckerSuite) checkSteps(c *C, op *Operator, steps []OperatorStep) { + c.Assert(op.Kind()&OpMerge, Not(Equals), 0) + c.Assert(steps, NotNil) + c.Assert(op.Len(), Equals, len(steps)) + for i := range steps { + c.Assert(op.Step(i), DeepEquals, steps[i]) + } +} + +func (s *testMergeCheckerSuite) TestMatchPeers(c *C) { + // partial store overlap not including leader + op1, op2 := s.mc.Check(s.regions[2]) + s.checkSteps(c, op1, []OperatorStep{ + TransferLeader{FromStore: 6, ToStore: 5}, + AddLearner{ToStore: 1, PeerID: 1}, + PromoteLearner{ToStore: 1, PeerID: 1}, + RemovePeer{FromStore: 2}, + AddLearner{ToStore: 4, PeerID: 2}, + PromoteLearner{ToStore: 4, PeerID: 2}, + RemovePeer{FromStore: 6}, + MergeRegion{ + FromRegion: s.regions[2].GetMeta(), + ToRegion: s.regions[1].GetMeta(), + IsPassive: false, + }, + }) + s.checkSteps(c, op2, []OperatorStep{ + MergeRegion{ + FromRegion: s.regions[2].GetMeta(), + ToRegion: s.regions[1].GetMeta(), + IsPassive: true, + }, + }) + + // partial store overlap including leader + newRegion := s.regions[2].Clone( + core.SetPeers([]*metapb.Peer{ + {Id: 106, StoreId: 1}, + {Id: 107, StoreId: 5}, + {Id: 108, StoreId: 6}, + }), + core.WithLeader(&metapb.Peer{Id: 106, StoreId: 1}), + ) + s.regions[2] = newRegion + s.cluster.PutRegion(s.regions[2]) + op1, op2 = s.mc.Check(s.regions[2]) + s.checkSteps(c, op1, []OperatorStep{ + AddLearner{ToStore: 4, PeerID: 3}, + PromoteLearner{ToStore: 4, PeerID: 3}, + RemovePeer{FromStore: 6}, + MergeRegion{ + FromRegion: s.regions[2].GetMeta(), + ToRegion: s.regions[1].GetMeta(), + IsPassive: false, + }, + }) + s.checkSteps(c, op2, []OperatorStep{ + MergeRegion{ + FromRegion: s.regions[2].GetMeta(), + ToRegion: s.regions[1].GetMeta(), + IsPassive: true, + }, + }) + + // all stores overlap + s.regions[2] = s.regions[2].Clone(core.SetPeers([]*metapb.Peer{ + {Id: 106, StoreId: 1}, + {Id: 107, StoreId: 5}, + {Id: 108, StoreId: 4}, + })) + s.cluster.PutRegion(s.regions[2]) + op1, op2 = s.mc.Check(s.regions[2]) + s.checkSteps(c, op1, []OperatorStep{ + MergeRegion{ + FromRegion: s.regions[2].GetMeta(), + ToRegion: s.regions[1].GetMeta(), + IsPassive: false, + }, + }) + s.checkSteps(c, op2, []OperatorStep{ + MergeRegion{ + FromRegion: s.regions[2].GetMeta(), + ToRegion: s.regions[1].GetMeta(), + IsPassive: true, + }, + }) + + // all stores not overlap + s.regions[2] = s.regions[2].Clone(core.SetPeers([]*metapb.Peer{ + {Id: 109, StoreId: 2}, + {Id: 110, StoreId: 3}, + {Id: 111, StoreId: 6}, + }), core.WithLeader(&metapb.Peer{Id: 109, StoreId: 2})) + s.cluster.PutRegion(s.regions[2]) + op1, op2 = s.mc.Check(s.regions[2]) + s.checkSteps(c, op1, []OperatorStep{ + AddLearner{ToStore: 1, PeerID: 4}, + PromoteLearner{ToStore: 1, PeerID: 4}, + RemovePeer{FromStore: 3}, + AddLearner{ToStore: 4, PeerID: 5}, + PromoteLearner{ToStore: 4, PeerID: 5}, + RemovePeer{FromStore: 6}, + AddLearner{ToStore: 5, PeerID: 6}, + PromoteLearner{ToStore: 5, PeerID: 6}, + TransferLeader{FromStore: 2, ToStore: 1}, + RemovePeer{FromStore: 2}, + MergeRegion{ + FromRegion: s.regions[2].GetMeta(), + ToRegion: s.regions[1].GetMeta(), + IsPassive: false, + }, + }) + s.checkSteps(c, op2, []OperatorStep{ + MergeRegion{ + FromRegion: s.regions[2].GetMeta(), + ToRegion: s.regions[1].GetMeta(), + IsPassive: true, + }, + }) +} diff --git a/server/schedule/operator.go b/server/schedule/operator.go index 80da4954b16..92b76ea43ec 100644 --- a/server/schedule/operator.go +++ b/server/schedule/operator.go @@ -500,7 +500,7 @@ func CreateMergeRegionOperator(desc string, cluster Cluster, source *core.Region }) op1 := NewOperator(desc, source.GetID(), source.GetRegionEpoch(), kinds|kind|OpMerge, steps...) - op2 := NewOperator(desc, target.GetID(), target.GetRegionEpoch(), kind|OpMerge, MergeRegion{ + op2 := NewOperator(desc, target.GetID(), target.GetRegionEpoch(), kinds|kind|OpMerge, MergeRegion{ FromRegion: source.GetMeta(), ToRegion: target.GetMeta(), IsPassive: true, diff --git a/server/schedulers/balance_test.go b/server/schedulers/balance_test.go index 2302cf40469..72e65d9e454 100644 --- a/server/schedulers/balance_test.go +++ b/server/schedulers/balance_test.go @@ -17,7 +17,6 @@ import ( "fmt" "math" "math/rand" - "time" . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" @@ -830,231 +829,6 @@ func (s *testRandomMergeSchedulerSuite) TestMerge(c *C) { c.Assert(mb.IsScheduleAllowed(tc), IsFalse) } -var _ = Suite(&testMergeCheckerSuite{}) - -type testMergeCheckerSuite struct { - cluster *schedule.MockCluster - mc *schedule.MergeChecker - regions []*core.RegionInfo -} - -func (s *testMergeCheckerSuite) SetUpTest(c *C) { - cfg := schedule.NewMockSchedulerOptions() - cfg.MaxMergeRegionSize = 2 - cfg.MaxMergeRegionKeys = 2 - s.cluster = schedule.NewMockCluster(cfg) - s.regions = []*core.RegionInfo{ - core.NewRegionInfo( - &metapb.Region{ - Id: 1, - StartKey: []byte(""), - EndKey: []byte("a"), - Peers: []*metapb.Peer{ - {Id: 101, StoreId: 1}, - {Id: 102, StoreId: 2}, - }, - }, - &metapb.Peer{Id: 101, StoreId: 1}, - core.SetApproximateSize(1), - core.SetApproximateKeys(1), - ), - core.NewRegionInfo( - &metapb.Region{ - Id: 2, - StartKey: []byte("a"), - EndKey: []byte("t"), - Peers: []*metapb.Peer{ - {Id: 103, StoreId: 1}, - {Id: 104, StoreId: 4}, - {Id: 105, StoreId: 5}, - }, - }, - &metapb.Peer{Id: 104, StoreId: 4}, - core.SetApproximateSize(200), - core.SetApproximateKeys(200), - ), - core.NewRegionInfo( - &metapb.Region{ - Id: 3, - StartKey: []byte("t"), - EndKey: []byte("x"), - Peers: []*metapb.Peer{ - {Id: 106, StoreId: 2}, - {Id: 107, StoreId: 5}, - {Id: 108, StoreId: 6}, - }, - }, - &metapb.Peer{Id: 108, StoreId: 6}, - core.SetApproximateSize(1), - core.SetApproximateKeys(1), - ), - core.NewRegionInfo( - &metapb.Region{ - Id: 4, - StartKey: []byte("x"), - EndKey: []byte(""), - Peers: []*metapb.Peer{ - {Id: 109, StoreId: 4}, - }, - }, - &metapb.Peer{Id: 109, StoreId: 4}, - core.SetApproximateSize(10), - core.SetApproximateKeys(10), - ), - } - - for _, region := range s.regions { - c.Assert(s.cluster.PutRegion(region), IsNil) - } - - s.mc = schedule.NewMergeChecker(s.cluster, namespace.DefaultClassifier) -} - -func (s *testMergeCheckerSuite) TestBasic(c *C) { - s.cluster.MockSchedulerOptions.SplitMergeInterval = time.Hour - - // should with same peer count - op1, op2 := s.mc.Check(s.regions[0]) - c.Assert(op1, IsNil) - c.Assert(op2, IsNil) - // size should be small enough - op1, op2 = s.mc.Check(s.regions[1]) - c.Assert(op1, IsNil) - c.Assert(op2, IsNil) - op1, op2 = s.mc.Check(s.regions[2]) - c.Assert(op1, NotNil) - c.Assert(op2, NotNil) - // Skip recently split regions. - s.mc.RecordRegionSplit(s.regions[2].GetID()) - op1, op2 = s.mc.Check(s.regions[2]) - c.Assert(op1, IsNil) - c.Assert(op2, IsNil) - op1, op2 = s.mc.Check(s.regions[3]) - c.Assert(op1, IsNil) - c.Assert(op2, IsNil) -} - -func (s *testMergeCheckerSuite) checkSteps(c *C, op *schedule.Operator, steps []schedule.OperatorStep) { - c.Assert(op.Kind()&schedule.OpMerge, Not(Equals), 0) - c.Assert(steps, NotNil) - c.Assert(op.Len(), Equals, len(steps)) - for i := range steps { - c.Assert(op.Step(i), DeepEquals, steps[i]) - } -} - -func (s *testMergeCheckerSuite) TestMatchPeers(c *C) { - // partial store overlap not including leader - op1, op2 := s.mc.Check(s.regions[2]) - s.checkSteps(c, op1, []schedule.OperatorStep{ - schedule.TransferLeader{FromStore: 6, ToStore: 5}, - schedule.AddLearner{ToStore: 1, PeerID: 1}, - schedule.PromoteLearner{ToStore: 1, PeerID: 1}, - schedule.RemovePeer{FromStore: 2}, - schedule.AddLearner{ToStore: 4, PeerID: 2}, - schedule.PromoteLearner{ToStore: 4, PeerID: 2}, - schedule.RemovePeer{FromStore: 6}, - schedule.MergeRegion{ - FromRegion: s.regions[2].GetMeta(), - ToRegion: s.regions[1].GetMeta(), - IsPassive: false, - }, - }) - s.checkSteps(c, op2, []schedule.OperatorStep{ - schedule.MergeRegion{ - FromRegion: s.regions[2].GetMeta(), - ToRegion: s.regions[1].GetMeta(), - IsPassive: true, - }, - }) - - // partial store overlap including leader - newRegion := s.regions[2].Clone( - core.SetPeers([]*metapb.Peer{ - {Id: 106, StoreId: 1}, - {Id: 107, StoreId: 5}, - {Id: 108, StoreId: 6}, - }), - core.WithLeader(&metapb.Peer{Id: 106, StoreId: 1}), - ) - s.regions[2] = newRegion - s.cluster.PutRegion(s.regions[2]) - op1, op2 = s.mc.Check(s.regions[2]) - s.checkSteps(c, op1, []schedule.OperatorStep{ - schedule.AddLearner{ToStore: 4, PeerID: 3}, - schedule.PromoteLearner{ToStore: 4, PeerID: 3}, - schedule.RemovePeer{FromStore: 6}, - schedule.MergeRegion{ - FromRegion: s.regions[2].GetMeta(), - ToRegion: s.regions[1].GetMeta(), - IsPassive: false, - }, - }) - s.checkSteps(c, op2, []schedule.OperatorStep{ - schedule.MergeRegion{ - FromRegion: s.regions[2].GetMeta(), - ToRegion: s.regions[1].GetMeta(), - IsPassive: true, - }, - }) - - // all stores overlap - s.regions[2] = s.regions[2].Clone(core.SetPeers([]*metapb.Peer{ - {Id: 106, StoreId: 1}, - {Id: 107, StoreId: 5}, - {Id: 108, StoreId: 4}, - })) - s.cluster.PutRegion(s.regions[2]) - op1, op2 = s.mc.Check(s.regions[2]) - s.checkSteps(c, op1, []schedule.OperatorStep{ - schedule.MergeRegion{ - FromRegion: s.regions[2].GetMeta(), - ToRegion: s.regions[1].GetMeta(), - IsPassive: false, - }, - }) - s.checkSteps(c, op2, []schedule.OperatorStep{ - schedule.MergeRegion{ - FromRegion: s.regions[2].GetMeta(), - ToRegion: s.regions[1].GetMeta(), - IsPassive: true, - }, - }) - - // all stores not overlap - s.regions[2] = s.regions[2].Clone(core.SetPeers([]*metapb.Peer{ - {Id: 109, StoreId: 2}, - {Id: 110, StoreId: 3}, - {Id: 111, StoreId: 6}, - }), core.WithLeader(&metapb.Peer{Id: 109, StoreId: 2})) - s.cluster.PutRegion(s.regions[2]) - op1, op2 = s.mc.Check(s.regions[2]) - s.checkSteps(c, op1, []schedule.OperatorStep{ - schedule.AddLearner{ToStore: 1, PeerID: 4}, - schedule.PromoteLearner{ToStore: 1, PeerID: 4}, - schedule.RemovePeer{FromStore: 3}, - schedule.AddLearner{ToStore: 4, PeerID: 5}, - schedule.PromoteLearner{ToStore: 4, PeerID: 5}, - schedule.RemovePeer{FromStore: 6}, - schedule.AddLearner{ToStore: 5, PeerID: 6}, - schedule.PromoteLearner{ToStore: 5, PeerID: 6}, - schedule.TransferLeader{FromStore: 2, ToStore: 1}, - schedule.RemovePeer{FromStore: 2}, - schedule.MergeRegion{ - FromRegion: s.regions[2].GetMeta(), - ToRegion: s.regions[1].GetMeta(), - IsPassive: false, - }, - }) - s.checkSteps(c, op2, []schedule.OperatorStep{ - schedule.MergeRegion{ - FromRegion: s.regions[2].GetMeta(), - ToRegion: s.regions[1].GetMeta(), - IsPassive: true, - }, - }) -} - var _ = Suite(&testBalanceHotWriteRegionSchedulerSuite{}) type testBalanceHotWriteRegionSchedulerSuite struct{}