Skip to content

Commit

Permalink
schedule: let merge not easy to timeout (tikv#1495)
Browse files Browse the repository at this point in the history
* fix merge easy to timeout

Signed-off-by: Connor1996 <zbk602423539@gmail.com>
  • Loading branch information
Connor1996 committed Apr 10, 2019
1 parent 66aeaa7 commit c601d7e
Show file tree
Hide file tree
Showing 3 changed files with 255 additions and 227 deletions.
254 changes: 254 additions & 0 deletions server/schedule/merge_checker_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,254 @@
// Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.

package schedule

import (
"time"

. "github.com/pingcap/check"
"github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/pd/server/core"
"github.com/pingcap/pd/server/namespace"
)

var _ = Suite(&testMergeCheckerSuite{})

type testMergeCheckerSuite struct {
cluster *MockCluster
mc *MergeChecker
regions []*core.RegionInfo
}

func (s *testMergeCheckerSuite) SetUpTest(c *C) {
cfg := NewMockSchedulerOptions()
cfg.MaxMergeRegionSize = 2
cfg.MaxMergeRegionKeys = 2
s.cluster = NewMockCluster(cfg)
s.regions = []*core.RegionInfo{
core.NewRegionInfo(
&metapb.Region{
Id: 1,
StartKey: []byte(""),
EndKey: []byte("a"),
Peers: []*metapb.Peer{
{Id: 101, StoreId: 1},
{Id: 102, StoreId: 2},
},
},
&metapb.Peer{Id: 101, StoreId: 1},
core.SetApproximateSize(1),
core.SetApproximateKeys(1),
),
core.NewRegionInfo(
&metapb.Region{
Id: 2,
StartKey: []byte("a"),
EndKey: []byte("t"),
Peers: []*metapb.Peer{
{Id: 103, StoreId: 1},
{Id: 104, StoreId: 4},
{Id: 105, StoreId: 5},
},
},
&metapb.Peer{Id: 104, StoreId: 4},
core.SetApproximateSize(200),
core.SetApproximateKeys(200),
),
core.NewRegionInfo(
&metapb.Region{
Id: 3,
StartKey: []byte("t"),
EndKey: []byte("x"),
Peers: []*metapb.Peer{
{Id: 106, StoreId: 2},
{Id: 107, StoreId: 5},
{Id: 108, StoreId: 6},
},
},
&metapb.Peer{Id: 108, StoreId: 6},
core.SetApproximateSize(1),
core.SetApproximateKeys(1),
),
core.NewRegionInfo(
&metapb.Region{
Id: 4,
StartKey: []byte("x"),
EndKey: []byte(""),
Peers: []*metapb.Peer{
{Id: 109, StoreId: 4},
},
},
&metapb.Peer{Id: 109, StoreId: 4},
core.SetApproximateSize(10),
core.SetApproximateKeys(10),
),
}

for _, region := range s.regions {
c.Assert(s.cluster.PutRegion(region), IsNil)
}

s.mc = NewMergeChecker(s.cluster, namespace.DefaultClassifier)
}

func (s *testMergeCheckerSuite) TestBasic(c *C) {
s.cluster.MockSchedulerOptions.SplitMergeInterval = time.Hour

// should with same peer count
op1, op2 := s.mc.Check(s.regions[0])
c.Assert(op1, IsNil)
c.Assert(op2, IsNil)
// size should be small enough
op1, op2 = s.mc.Check(s.regions[1])
c.Assert(op1, IsNil)
c.Assert(op2, IsNil)
op1, op2 = s.mc.Check(s.regions[2])
c.Assert(op1, NotNil)
c.Assert(op2, NotNil)
for _, op := range []*Operator{op1, op2} {
op.createTime = op.createTime.Add(-LeaderOperatorWaitTime - time.Second)
c.Assert(op.IsTimeout(), IsFalse)
op.createTime = op.createTime.Add(-RegionOperatorWaitTime - time.Second)
c.Assert(op.IsTimeout(), IsTrue)
}
// Skip recently split regions.
s.mc.RecordRegionSplit(s.regions[2].GetID())
op1, op2 = s.mc.Check(s.regions[2])
c.Assert(op1, IsNil)
c.Assert(op2, IsNil)
op1, op2 = s.mc.Check(s.regions[3])
c.Assert(op1, IsNil)
c.Assert(op2, IsNil)
}

func (s *testMergeCheckerSuite) checkSteps(c *C, op *Operator, steps []OperatorStep) {
c.Assert(op.Kind()&OpMerge, Not(Equals), 0)
c.Assert(steps, NotNil)
c.Assert(op.Len(), Equals, len(steps))
for i := range steps {
c.Assert(op.Step(i), DeepEquals, steps[i])
}
}

func (s *testMergeCheckerSuite) TestMatchPeers(c *C) {
// partial store overlap not including leader
op1, op2 := s.mc.Check(s.regions[2])
s.checkSteps(c, op1, []OperatorStep{
TransferLeader{FromStore: 6, ToStore: 5},
AddLearner{ToStore: 1, PeerID: 1},
PromoteLearner{ToStore: 1, PeerID: 1},
RemovePeer{FromStore: 2},
AddLearner{ToStore: 4, PeerID: 2},
PromoteLearner{ToStore: 4, PeerID: 2},
RemovePeer{FromStore: 6},
MergeRegion{
FromRegion: s.regions[2].GetMeta(),
ToRegion: s.regions[1].GetMeta(),
IsPassive: false,
},
})
s.checkSteps(c, op2, []OperatorStep{
MergeRegion{
FromRegion: s.regions[2].GetMeta(),
ToRegion: s.regions[1].GetMeta(),
IsPassive: true,
},
})

// partial store overlap including leader
newRegion := s.regions[2].Clone(
core.SetPeers([]*metapb.Peer{
{Id: 106, StoreId: 1},
{Id: 107, StoreId: 5},
{Id: 108, StoreId: 6},
}),
core.WithLeader(&metapb.Peer{Id: 106, StoreId: 1}),
)
s.regions[2] = newRegion
s.cluster.PutRegion(s.regions[2])
op1, op2 = s.mc.Check(s.regions[2])
s.checkSteps(c, op1, []OperatorStep{
AddLearner{ToStore: 4, PeerID: 3},
PromoteLearner{ToStore: 4, PeerID: 3},
RemovePeer{FromStore: 6},
MergeRegion{
FromRegion: s.regions[2].GetMeta(),
ToRegion: s.regions[1].GetMeta(),
IsPassive: false,
},
})
s.checkSteps(c, op2, []OperatorStep{
MergeRegion{
FromRegion: s.regions[2].GetMeta(),
ToRegion: s.regions[1].GetMeta(),
IsPassive: true,
},
})

// all stores overlap
s.regions[2] = s.regions[2].Clone(core.SetPeers([]*metapb.Peer{
{Id: 106, StoreId: 1},
{Id: 107, StoreId: 5},
{Id: 108, StoreId: 4},
}))
s.cluster.PutRegion(s.regions[2])
op1, op2 = s.mc.Check(s.regions[2])
s.checkSteps(c, op1, []OperatorStep{
MergeRegion{
FromRegion: s.regions[2].GetMeta(),
ToRegion: s.regions[1].GetMeta(),
IsPassive: false,
},
})
s.checkSteps(c, op2, []OperatorStep{
MergeRegion{
FromRegion: s.regions[2].GetMeta(),
ToRegion: s.regions[1].GetMeta(),
IsPassive: true,
},
})

// all stores not overlap
s.regions[2] = s.regions[2].Clone(core.SetPeers([]*metapb.Peer{
{Id: 109, StoreId: 2},
{Id: 110, StoreId: 3},
{Id: 111, StoreId: 6},
}), core.WithLeader(&metapb.Peer{Id: 109, StoreId: 2}))
s.cluster.PutRegion(s.regions[2])
op1, op2 = s.mc.Check(s.regions[2])
s.checkSteps(c, op1, []OperatorStep{
AddLearner{ToStore: 1, PeerID: 4},
PromoteLearner{ToStore: 1, PeerID: 4},
RemovePeer{FromStore: 3},
AddLearner{ToStore: 4, PeerID: 5},
PromoteLearner{ToStore: 4, PeerID: 5},
RemovePeer{FromStore: 6},
AddLearner{ToStore: 5, PeerID: 6},
PromoteLearner{ToStore: 5, PeerID: 6},
TransferLeader{FromStore: 2, ToStore: 1},
RemovePeer{FromStore: 2},
MergeRegion{
FromRegion: s.regions[2].GetMeta(),
ToRegion: s.regions[1].GetMeta(),
IsPassive: false,
},
})
s.checkSteps(c, op2, []OperatorStep{
MergeRegion{
FromRegion: s.regions[2].GetMeta(),
ToRegion: s.regions[1].GetMeta(),
IsPassive: true,
},
})
}
2 changes: 1 addition & 1 deletion server/schedule/operator.go
Original file line number Diff line number Diff line change
Expand Up @@ -500,7 +500,7 @@ func CreateMergeRegionOperator(desc string, cluster Cluster, source *core.Region
})

op1 := NewOperator(desc, source.GetID(), source.GetRegionEpoch(), kinds|kind|OpMerge, steps...)
op2 := NewOperator(desc, target.GetID(), target.GetRegionEpoch(), kind|OpMerge, MergeRegion{
op2 := NewOperator(desc, target.GetID(), target.GetRegionEpoch(), kinds|kind|OpMerge, MergeRegion{
FromRegion: source.GetMeta(),
ToRegion: target.GetMeta(),
IsPassive: true,
Expand Down
Loading

0 comments on commit c601d7e

Please sign in to comment.