From eee23be0ea0a8b6af500f36482abef1e264c3ab6 Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Fri, 17 Jun 2022 14:46:04 +0800 Subject: [PATCH] migrate test framework to testify Signed-off-by: Ryan Leung --- pkg/testutil/operator_check.go | 60 ++ .../checker/joint_state_checker_test.go | 61 +- .../schedule/checker/learner_checker_test.go | 44 +- server/schedule/checker/merge_checker_test.go | 445 +++++------ .../checker/priority_inspector_test.go | 49 +- .../schedule/checker/replica_checker_test.go | 251 +++--- server/schedule/checker/rule_checker_test.go | 743 +++++++++--------- server/schedule/checker/split_checker_test.go | 66 +- 8 files changed, 846 insertions(+), 873 deletions(-) diff --git a/pkg/testutil/operator_check.go b/pkg/testutil/operator_check.go index 90779b7059aa..3c025c0f48bc 100644 --- a/pkg/testutil/operator_check.go +++ b/pkg/testutil/operator_check.go @@ -16,6 +16,7 @@ package testutil import ( "github.com/pingcap/check" + "github.com/stretchr/testify/require" "github.com/tikv/pd/server/schedule/operator" ) @@ -141,3 +142,62 @@ func CheckTransferPeerWithLeaderTransferFrom(c *check.C, op *operator.Operator, kind |= operator.OpRegion | operator.OpLeader c.Assert(op.Kind()&kind, check.Equals, kind) } + +// CheckAddPeerWithTestify checks if the operator is to add peer on specified store. +func CheckAddPeerWithTestify(re *require.Assertions, op *operator.Operator, kind operator.OpKind, storeID uint64) { + re.NotNil(op) + re.Equal(2, op.Len()) + re.Equal(storeID, op.Step(0).(operator.AddLearner).ToStore) + re.IsType(operator.PromoteLearner{}, op.Step(1)) + kind |= operator.OpRegion + re.Equal(kind, op.Kind()&kind) +} + +// CheckRemovePeerWithTestify checks if the operator is to remove peer on specified store. +func CheckRemovePeerWithTestify(re *require.Assertions, op *operator.Operator, storeID uint64) { + re.NotNil(op) + if op.Len() == 1 { + re.Equal(storeID, op.Step(0).(operator.RemovePeer).FromStore) + } else { + re.Equal(2, op.Len()) + re.Equal(storeID, op.Step(0).(operator.TransferLeader).FromStore) + re.Equal(storeID, op.Step(1).(operator.RemovePeer).FromStore) + } +} + +// CheckTransferPeerWithTestify checks if the operator is to transfer peer between the specified source and target stores. +func CheckTransferPeerWithTestify(re *require.Assertions, op *operator.Operator, kind operator.OpKind, sourceID, targetID uint64) { + re.NotNil(op) + + steps, _ := trimTransferLeaders(op) + re.Len(steps, 3) + re.Equal(targetID, steps[0].(operator.AddLearner).ToStore) + re.IsType(operator.PromoteLearner{}, steps[1]) + re.Equal(sourceID, steps[2].(operator.RemovePeer).FromStore) + kind |= operator.OpRegion + re.Equal(kind, op.Kind()&kind) +} + +// CheckSteps checks if the operator matches the given steps. +func CheckSteps(re *require.Assertions, op *operator.Operator, steps []operator.OpStep) { + re.NotEqual(0, op.Kind()&operator.OpMerge) + re.NotNil(steps) + re.Equal(len(steps), op.Len()) + for i := range steps { + switch op.Step(i).(type) { + case operator.AddLearner: + re.Equal(steps[i].(operator.AddLearner).ToStore, op.Step(i).(operator.AddLearner).ToStore) + case operator.PromoteLearner: + re.Equal(steps[i].(operator.PromoteLearner).ToStore, op.Step(i).(operator.PromoteLearner).ToStore) + case operator.TransferLeader: + re.Equal(steps[i].(operator.TransferLeader).FromStore, op.Step(i).(operator.TransferLeader).FromStore) + re.Equal(steps[i].(operator.TransferLeader).ToStore, op.Step(i).(operator.TransferLeader).ToStore) + case operator.RemovePeer: + re.Equal(steps[i].(operator.RemovePeer).FromStore, op.Step(i).(operator.RemovePeer).FromStore) + case operator.MergeRegion: + re.Equal(steps[i].(operator.MergeRegion).IsPassive, op.Step(i).(operator.MergeRegion).IsPassive) + default: + re.FailNow("unknown operator step type") + } + } +} diff --git a/server/schedule/checker/joint_state_checker_test.go b/server/schedule/checker/joint_state_checker_test.go index 5d759c51e67d..927d6181f9e9 100644 --- a/server/schedule/checker/joint_state_checker_test.go +++ b/server/schedule/checker/joint_state_checker_test.go @@ -16,42 +16,25 @@ package checker import ( "context" + "testing" - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" + "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/mock/mockcluster" "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/core" "github.com/tikv/pd/server/schedule/operator" ) -var _ = Suite(&testJointStateCheckerSuite{}) - -type testJointStateCheckerSuite struct { - cluster *mockcluster.Cluster - jsc *JointStateChecker - ctx context.Context - cancel context.CancelFunc -} - -func (s *testJointStateCheckerSuite) SetUpSuite(c *C) { - s.ctx, s.cancel = context.WithCancel(context.Background()) -} - -func (s *testJointStateCheckerSuite) TearDownTest(c *C) { - s.cancel() -} - -func (s *testJointStateCheckerSuite) SetUpTest(c *C) { - s.cluster = mockcluster.NewCluster(s.ctx, config.NewTestOptions()) - s.jsc = NewJointStateChecker(s.cluster) +func TestLeaveJointState(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cluster := mockcluster.NewCluster(ctx, config.NewTestOptions()) + jsc := NewJointStateChecker(cluster) for id := uint64(1); id <= 10; id++ { - s.cluster.PutStoreWithLabels(id) + cluster.PutStoreWithLabels(id) } -} - -func (s *testJointStateCheckerSuite) TestLeaveJointState(c *C) { - jsc := s.jsc type testCase struct { Peers []*metapb.Peer // first is leader OpSteps []operator.OpStep @@ -131,38 +114,38 @@ func (s *testJointStateCheckerSuite) TestLeaveJointState(c *C) { for _, tc := range cases { region := core.NewRegionInfo(&metapb.Region{Id: 1, Peers: tc.Peers}, tc.Peers[0]) op := jsc.Check(region) - s.checkSteps(c, op, tc.OpSteps) + checkSteps(re, op, tc.OpSteps) } } -func (s *testJointStateCheckerSuite) checkSteps(c *C, op *operator.Operator, steps []operator.OpStep) { +func checkSteps(re *require.Assertions, op *operator.Operator, steps []operator.OpStep) { if len(steps) == 0 { - c.Assert(op, IsNil) + re.Nil(op) return } - c.Assert(op, NotNil) - c.Assert(op.Desc(), Equals, "leave-joint-state") + re.NotNil(op) + re.Equal("leave-joint-state", op.Desc()) - c.Assert(op.Len(), Equals, len(steps)) + re.Equal(len(steps), op.Len()) for i := range steps { switch obtain := op.Step(i).(type) { case operator.ChangePeerV2Leave: expect := steps[i].(operator.ChangePeerV2Leave) - c.Assert(len(obtain.PromoteLearners), Equals, len(expect.PromoteLearners)) - c.Assert(len(obtain.DemoteVoters), Equals, len(expect.DemoteVoters)) + re.Equal(len(expect.PromoteLearners), len(obtain.PromoteLearners)) + re.Equal(len(expect.DemoteVoters), len(obtain.DemoteVoters)) for j, p := range expect.PromoteLearners { - c.Assert(expect.PromoteLearners[j].ToStore, Equals, p.ToStore) + re.Equal(p.ToStore, expect.PromoteLearners[j].ToStore) } for j, d := range expect.DemoteVoters { - c.Assert(obtain.DemoteVoters[j].ToStore, Equals, d.ToStore) + re.Equal(d.ToStore, obtain.DemoteVoters[j].ToStore) } case operator.TransferLeader: expect := steps[i].(operator.TransferLeader) - c.Assert(obtain.FromStore, Equals, expect.FromStore) - c.Assert(obtain.ToStore, Equals, expect.ToStore) + re.Equal(expect.FromStore, obtain.FromStore) + re.Equal(expect.ToStore, obtain.ToStore) default: - c.Fatal("unknown operator step type") + re.FailNow("unknown operator step type") } } } diff --git a/server/schedule/checker/learner_checker_test.go b/server/schedule/checker/learner_checker_test.go index 1a403e790438..afe4b920313a 100644 --- a/server/schedule/checker/learner_checker_test.go +++ b/server/schedule/checker/learner_checker_test.go @@ -16,9 +16,10 @@ package checker import ( "context" + "testing" - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" + "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/mock/mockcluster" "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/core" @@ -26,31 +27,16 @@ import ( "github.com/tikv/pd/server/versioninfo" ) -var _ = Suite(&testLearnerCheckerSuite{}) - -type testLearnerCheckerSuite struct { - cluster *mockcluster.Cluster - lc *LearnerChecker - ctx context.Context - cancel context.CancelFunc -} - -func (s *testLearnerCheckerSuite) SetUpTest(c *C) { - s.ctx, s.cancel = context.WithCancel(context.Background()) - s.cluster = mockcluster.NewCluster(s.ctx, config.NewTestOptions()) - s.cluster.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) - s.lc = NewLearnerChecker(s.cluster) +func TestPromoteLearner(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cluster := mockcluster.NewCluster(ctx, config.NewTestOptions()) + cluster.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) + lc := NewLearnerChecker(cluster) for id := uint64(1); id <= 10; id++ { - s.cluster.PutStoreWithLabels(id) + cluster.PutStoreWithLabels(id) } -} - -func (s *testLearnerCheckerSuite) TearDownTest(c *C) { - s.cancel() -} - -func (s *testLearnerCheckerSuite) TestPromoteLearner(c *C) { - lc := s.lc region := core.NewRegionInfo( &metapb.Region{ @@ -62,12 +48,12 @@ func (s *testLearnerCheckerSuite) TestPromoteLearner(c *C) { }, }, &metapb.Peer{Id: 101, StoreId: 1}) op := lc.Check(region) - c.Assert(op, NotNil) - c.Assert(op.Desc(), Equals, "promote-learner") - c.Assert(op.Step(0), FitsTypeOf, operator.PromoteLearner{}) - c.Assert(op.Step(0).(operator.PromoteLearner).ToStore, Equals, uint64(3)) + re.NotNil(op) + re.Equal("promote-learner", op.Desc()) + re.IsType(operator.PromoteLearner{}, op.Step(0)) + re.Equal(uint64(3), op.Step(0).(operator.PromoteLearner).ToStore) region = region.Clone(core.WithPendingPeers([]*metapb.Peer{region.GetPeer(103)})) op = lc.Check(region) - c.Assert(op, IsNil) + re.Nil(op) } diff --git a/server/schedule/checker/merge_checker_test.go b/server/schedule/checker/merge_checker_test.go index 21c6eeec410a..86127ca73b3f 100644 --- a/server/schedule/checker/merge_checker_test.go +++ b/server/schedule/checker/merge_checker_test.go @@ -20,8 +20,8 @@ import ( "testing" "time" - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" + "github.com/stretchr/testify/suite" "github.com/tikv/pd/pkg/mock/mockcluster" "github.com/tikv/pd/pkg/testutil" "github.com/tikv/pd/server/config" @@ -36,17 +36,12 @@ import ( "go.uber.org/goleak" ) -func TestMergeChecker(t *testing.T) { - TestingT(t) -} - func TestMain(m *testing.M) { goleak.VerifyTestMain(m, testutil.LeakOptions...) } -var _ = Suite(&testMergeCheckerSuite{}) - -type testMergeCheckerSuite struct { +type mergeCheckerTestSuite struct { + suite.Suite ctx context.Context cancel context.CancelFunc cluster *mockcluster.Cluster @@ -54,145 +49,146 @@ type testMergeCheckerSuite struct { regions []*core.RegionInfo } -func (s *testMergeCheckerSuite) SetUpSuite(c *C) { - s.ctx, s.cancel = context.WithCancel(context.Background()) +func TestMergeCheckerSuite(t *testing.T) { + suite.Run(t, new(mergeCheckerTestSuite)) } -func (s *testMergeCheckerSuite) TearDownTest(c *C) { - s.cancel() -} - -func (s *testMergeCheckerSuite) SetUpTest(c *C) { +func (suite *mergeCheckerTestSuite) SetupTest() { cfg := config.NewTestOptions() - s.cluster = mockcluster.NewCluster(s.ctx, cfg) - s.cluster.SetMaxMergeRegionSize(2) - s.cluster.SetMaxMergeRegionKeys(2) - s.cluster.SetLabelPropertyConfig(config.LabelPropertyConfig{ + suite.ctx, suite.cancel = context.WithCancel(context.Background()) + suite.cluster = mockcluster.NewCluster(suite.ctx, cfg) + suite.cluster.SetMaxMergeRegionSize(2) + suite.cluster.SetMaxMergeRegionKeys(2) + suite.cluster.SetLabelPropertyConfig(config.LabelPropertyConfig{ config.RejectLeader: {{Key: "reject", Value: "leader"}}, }) - s.cluster.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) + suite.cluster.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) stores := map[uint64][]string{ 1: {}, 2: {}, 3: {}, 4: {}, 5: {}, 6: {}, 7: {"reject", "leader"}, 8: {"reject", "leader"}, } for storeID, labels := range stores { - s.cluster.PutStoreWithLabels(storeID, labels...) + suite.cluster.PutStoreWithLabels(storeID, labels...) } - s.regions = []*core.RegionInfo{ + suite.regions = []*core.RegionInfo{ newRegionInfo(1, "", "a", 1, 1, []uint64{101, 1}, []uint64{101, 1}, []uint64{102, 2}), newRegionInfo(2, "a", "t", 200, 200, []uint64{104, 4}, []uint64{103, 1}, []uint64{104, 4}, []uint64{105, 5}), newRegionInfo(3, "t", "x", 1, 1, []uint64{108, 6}, []uint64{106, 2}, []uint64{107, 5}, []uint64{108, 6}), newRegionInfo(4, "x", "", 1, 1, []uint64{109, 4}, []uint64{109, 4}), } - for _, region := range s.regions { - s.cluster.PutRegion(region) + for _, region := range suite.regions { + suite.cluster.PutRegion(region) } - s.mc = NewMergeChecker(s.ctx, s.cluster) + suite.mc = NewMergeChecker(suite.ctx, suite.cluster) } -func (s *testMergeCheckerSuite) TestBasic(c *C) { - s.cluster.SetSplitMergeInterval(0) +func (suite *mergeCheckerTestSuite) TearDownTest() { + suite.cancel() +} + +func (suite *mergeCheckerTestSuite) TestBasic() { + suite.cluster.SetSplitMergeInterval(0) // should with same peer count - ops := s.mc.Check(s.regions[0]) - c.Assert(ops, IsNil) + ops := suite.mc.Check(suite.regions[0]) + suite.Nil(ops) // The size should be small enough. - ops = s.mc.Check(s.regions[1]) - c.Assert(ops, IsNil) + ops = suite.mc.Check(suite.regions[1]) + suite.Nil(ops) // target region size is too large - s.cluster.PutRegion(s.regions[1].Clone(core.SetApproximateSize(600))) - ops = s.mc.Check(s.regions[2]) - c.Assert(ops, IsNil) + suite.cluster.PutRegion(suite.regions[1].Clone(core.SetApproximateSize(600))) + ops = suite.mc.Check(suite.regions[2]) + suite.Nil(ops) // it can merge if the max region size of the store is greater than the target region size. - config := s.cluster.GetStoreConfig() + config := suite.cluster.GetStoreConfig() config.RegionMaxSize = "10Gib" - ops = s.mc.Check(s.regions[2]) - c.Assert(ops, NotNil) + ops = suite.mc.Check(suite.regions[2]) + suite.NotNil(ops) config.RegionMaxSize = "144Mib" - ops = s.mc.Check(s.regions[2]) - c.Assert(ops, IsNil) + ops = suite.mc.Check(suite.regions[2]) + suite.Nil(ops) // change the size back - s.cluster.PutRegion(s.regions[1].Clone(core.SetApproximateSize(200))) - ops = s.mc.Check(s.regions[2]) - c.Assert(ops, NotNil) + suite.cluster.PutRegion(suite.regions[1].Clone(core.SetApproximateSize(200))) + ops = suite.mc.Check(suite.regions[2]) + suite.NotNil(ops) // Check merge with previous region. - c.Assert(ops[0].RegionID(), Equals, s.regions[2].GetID()) - c.Assert(ops[1].RegionID(), Equals, s.regions[1].GetID()) + suite.Equal(suite.regions[2].GetID(), ops[0].RegionID()) + suite.Equal(suite.regions[1].GetID(), ops[1].RegionID()) // Test the peer store check. - store := s.cluster.GetStore(1) - c.Assert(store, NotNil) + store := suite.cluster.GetStore(1) + suite.NotNil(store) // Test the peer store is deleted. - s.cluster.DeleteStore(store) - ops = s.mc.Check(s.regions[2]) - c.Assert(ops, IsNil) + suite.cluster.DeleteStore(store) + ops = suite.mc.Check(suite.regions[2]) + suite.Nil(ops) // Test the store is normal. - s.cluster.PutStore(store) - ops = s.mc.Check(s.regions[2]) - c.Assert(ops, NotNil) - c.Assert(ops[0].RegionID(), Equals, s.regions[2].GetID()) - c.Assert(ops[1].RegionID(), Equals, s.regions[1].GetID()) + suite.cluster.PutStore(store) + ops = suite.mc.Check(suite.regions[2]) + suite.NotNil(ops) + suite.Equal(suite.regions[2].GetID(), ops[0].RegionID()) + suite.Equal(suite.regions[1].GetID(), ops[1].RegionID()) // Test the store is offline. - s.cluster.SetStoreOffline(store.GetID()) - ops = s.mc.Check(s.regions[2]) + suite.cluster.SetStoreOffline(store.GetID()) + ops = suite.mc.Check(suite.regions[2]) // Only target region have a peer on the offline store, // so it's not ok to merge. - c.Assert(ops, IsNil) + suite.Nil(ops) // Test the store is up. - s.cluster.SetStoreUp(store.GetID()) - ops = s.mc.Check(s.regions[2]) - c.Assert(ops, NotNil) - c.Assert(ops[0].RegionID(), Equals, s.regions[2].GetID()) - c.Assert(ops[1].RegionID(), Equals, s.regions[1].GetID()) - store = s.cluster.GetStore(5) - c.Assert(store, NotNil) + suite.cluster.SetStoreUp(store.GetID()) + ops = suite.mc.Check(suite.regions[2]) + suite.NotNil(ops) + suite.Equal(suite.regions[2].GetID(), ops[0].RegionID()) + suite.Equal(suite.regions[1].GetID(), ops[1].RegionID()) + store = suite.cluster.GetStore(5) + suite.NotNil(store) // Test the peer store is deleted. - s.cluster.DeleteStore(store) - ops = s.mc.Check(s.regions[2]) - c.Assert(ops, IsNil) + suite.cluster.DeleteStore(store) + ops = suite.mc.Check(suite.regions[2]) + suite.Nil(ops) // Test the store is normal. - s.cluster.PutStore(store) - ops = s.mc.Check(s.regions[2]) - c.Assert(ops, NotNil) - c.Assert(ops[0].RegionID(), Equals, s.regions[2].GetID()) - c.Assert(ops[1].RegionID(), Equals, s.regions[1].GetID()) + suite.cluster.PutStore(store) + ops = suite.mc.Check(suite.regions[2]) + suite.NotNil(ops) + suite.Equal(suite.regions[2].GetID(), ops[0].RegionID()) + suite.Equal(suite.regions[1].GetID(), ops[1].RegionID()) // Test the store is offline. - s.cluster.SetStoreOffline(store.GetID()) - ops = s.mc.Check(s.regions[2]) + suite.cluster.SetStoreOffline(store.GetID()) + ops = suite.mc.Check(suite.regions[2]) // Both regions have peers on the offline store, // so it's ok to merge. - c.Assert(ops, NotNil) - c.Assert(ops[0].RegionID(), Equals, s.regions[2].GetID()) - c.Assert(ops[1].RegionID(), Equals, s.regions[1].GetID()) + suite.NotNil(ops) + suite.Equal(suite.regions[2].GetID(), ops[0].RegionID()) + suite.Equal(suite.regions[1].GetID(), ops[1].RegionID()) // Test the store is up. - s.cluster.SetStoreUp(store.GetID()) - ops = s.mc.Check(s.regions[2]) - c.Assert(ops, NotNil) - c.Assert(ops[0].RegionID(), Equals, s.regions[2].GetID()) - c.Assert(ops[1].RegionID(), Equals, s.regions[1].GetID()) + suite.cluster.SetStoreUp(store.GetID()) + ops = suite.mc.Check(suite.regions[2]) + suite.NotNil(ops) + suite.Equal(suite.regions[2].GetID(), ops[0].RegionID()) + suite.Equal(suite.regions[1].GetID(), ops[1].RegionID()) // Enable one way merge - s.cluster.SetEnableOneWayMerge(true) - ops = s.mc.Check(s.regions[2]) - c.Assert(ops, IsNil) - s.cluster.SetEnableOneWayMerge(false) + suite.cluster.SetEnableOneWayMerge(true) + ops = suite.mc.Check(suite.regions[2]) + suite.Nil(ops) + suite.cluster.SetEnableOneWayMerge(false) // Make up peers for next region. - s.regions[3] = s.regions[3].Clone(core.WithAddPeer(&metapb.Peer{Id: 110, StoreId: 1}), core.WithAddPeer(&metapb.Peer{Id: 111, StoreId: 2})) - s.cluster.PutRegion(s.regions[3]) - ops = s.mc.Check(s.regions[2]) - c.Assert(ops, NotNil) + suite.regions[3] = suite.regions[3].Clone(core.WithAddPeer(&metapb.Peer{Id: 110, StoreId: 1}), core.WithAddPeer(&metapb.Peer{Id: 111, StoreId: 2})) + suite.cluster.PutRegion(suite.regions[3]) + ops = suite.mc.Check(suite.regions[2]) + suite.NotNil(ops) // Now it merges to next region. - c.Assert(ops[0].RegionID(), Equals, s.regions[2].GetID()) - c.Assert(ops[1].RegionID(), Equals, s.regions[3].GetID()) + suite.Equal(suite.regions[2].GetID(), ops[0].RegionID()) + suite.Equal(suite.regions[3].GetID(), ops[1].RegionID()) // merge cannot across rule key. - s.cluster.SetEnablePlacementRules(true) - s.cluster.RuleManager.SetRule(&placement.Rule{ + suite.cluster.SetEnablePlacementRules(true) + suite.cluster.RuleManager.SetRule(&placement.Rule{ GroupID: "pd", ID: "test", Index: 1, @@ -203,83 +199,60 @@ func (s *testMergeCheckerSuite) TestBasic(c *C) { Count: 3, }) // region 2 can only merge with previous region now. - ops = s.mc.Check(s.regions[2]) - c.Assert(ops, NotNil) - c.Assert(ops[0].RegionID(), Equals, s.regions[2].GetID()) - c.Assert(ops[1].RegionID(), Equals, s.regions[1].GetID()) - s.cluster.RuleManager.DeleteRule("pd", "test") + ops = suite.mc.Check(suite.regions[2]) + suite.NotNil(ops) + suite.Equal(suite.regions[2].GetID(), ops[0].RegionID()) + suite.Equal(suite.regions[1].GetID(), ops[1].RegionID()) + suite.cluster.RuleManager.DeleteRule("pd", "test") // check 'merge_option' label - s.cluster.GetRegionLabeler().SetLabelRule(&labeler.LabelRule{ + suite.cluster.GetRegionLabeler().SetLabelRule(&labeler.LabelRule{ ID: "test", Labels: []labeler.RegionLabel{{Key: mergeOptionLabel, Value: mergeOptionValueDeny}}, RuleType: labeler.KeyRange, Data: makeKeyRanges("", "74"), }) - ops = s.mc.Check(s.regions[0]) - c.Assert(ops, HasLen, 0) - ops = s.mc.Check(s.regions[1]) - c.Assert(ops, HasLen, 0) + ops = suite.mc.Check(suite.regions[0]) + suite.Len(ops, 0) + ops = suite.mc.Check(suite.regions[1]) + suite.Len(ops, 0) // Skip recently split regions. - s.cluster.SetSplitMergeInterval(time.Hour) - ops = s.mc.Check(s.regions[2]) - c.Assert(ops, IsNil) - - s.mc.startTime = time.Now().Add(-2 * time.Hour) - ops = s.mc.Check(s.regions[2]) - c.Assert(ops, NotNil) - ops = s.mc.Check(s.regions[3]) - c.Assert(ops, NotNil) - - s.mc.RecordRegionSplit([]uint64{s.regions[2].GetID()}) - ops = s.mc.Check(s.regions[2]) - c.Assert(ops, IsNil) - ops = s.mc.Check(s.regions[3]) - c.Assert(ops, IsNil) - - s.cluster.SetSplitMergeInterval(500 * time.Millisecond) - ops = s.mc.Check(s.regions[2]) - c.Assert(ops, IsNil) - ops = s.mc.Check(s.regions[3]) - c.Assert(ops, IsNil) + suite.cluster.SetSplitMergeInterval(time.Hour) + ops = suite.mc.Check(suite.regions[2]) + suite.Nil(ops) + + suite.mc.startTime = time.Now().Add(-2 * time.Hour) + ops = suite.mc.Check(suite.regions[2]) + suite.NotNil(ops) + ops = suite.mc.Check(suite.regions[3]) + suite.NotNil(ops) + + suite.mc.RecordRegionSplit([]uint64{suite.regions[2].GetID()}) + ops = suite.mc.Check(suite.regions[2]) + suite.Nil(ops) + ops = suite.mc.Check(suite.regions[3]) + suite.Nil(ops) + + suite.cluster.SetSplitMergeInterval(500 * time.Millisecond) + ops = suite.mc.Check(suite.regions[2]) + suite.Nil(ops) + ops = suite.mc.Check(suite.regions[3]) + suite.Nil(ops) time.Sleep(500 * time.Millisecond) - ops = s.mc.Check(s.regions[2]) - c.Assert(ops, NotNil) - ops = s.mc.Check(s.regions[3]) - c.Assert(ops, NotNil) -} - -func (s *testMergeCheckerSuite) checkSteps(c *C, op *operator.Operator, steps []operator.OpStep) { - c.Assert(op.Kind()&operator.OpMerge, Not(Equals), 0) - c.Assert(steps, NotNil) - c.Assert(op.Len(), Equals, len(steps)) - for i := range steps { - switch op.Step(i).(type) { - case operator.AddLearner: - c.Assert(op.Step(i).(operator.AddLearner).ToStore, Equals, steps[i].(operator.AddLearner).ToStore) - case operator.PromoteLearner: - c.Assert(op.Step(i).(operator.PromoteLearner).ToStore, Equals, steps[i].(operator.PromoteLearner).ToStore) - case operator.TransferLeader: - c.Assert(op.Step(i).(operator.TransferLeader).FromStore, Equals, steps[i].(operator.TransferLeader).FromStore) - c.Assert(op.Step(i).(operator.TransferLeader).ToStore, Equals, steps[i].(operator.TransferLeader).ToStore) - case operator.RemovePeer: - c.Assert(op.Step(i).(operator.RemovePeer).FromStore, Equals, steps[i].(operator.RemovePeer).FromStore) - case operator.MergeRegion: - c.Assert(op.Step(i).(operator.MergeRegion).IsPassive, Equals, steps[i].(operator.MergeRegion).IsPassive) - default: - c.Fatal("unknown operator step type") - } - } + ops = suite.mc.Check(suite.regions[2]) + suite.NotNil(ops) + ops = suite.mc.Check(suite.regions[3]) + suite.NotNil(ops) } -func (s *testMergeCheckerSuite) TestMatchPeers(c *C) { - s.cluster.SetSplitMergeInterval(0) +func (suite *mergeCheckerTestSuite) TestMatchPeers() { + suite.cluster.SetSplitMergeInterval(0) // partial store overlap not including leader - ops := s.mc.Check(s.regions[2]) - c.Assert(ops, NotNil) - s.checkSteps(c, ops[0], []operator.OpStep{ + ops := suite.mc.Check(suite.regions[2]) + suite.NotNil(ops) + testutil.CheckSteps(suite.Require(), ops[0], []operator.OpStep{ operator.AddLearner{ToStore: 1}, operator.PromoteLearner{ToStore: 1}, operator.RemovePeer{FromStore: 2}, @@ -288,21 +261,21 @@ func (s *testMergeCheckerSuite) TestMatchPeers(c *C) { operator.TransferLeader{FromStore: 6, ToStore: 5}, operator.RemovePeer{FromStore: 6}, operator.MergeRegion{ - FromRegion: s.regions[2].GetMeta(), - ToRegion: s.regions[1].GetMeta(), + FromRegion: suite.regions[2].GetMeta(), + ToRegion: suite.regions[1].GetMeta(), IsPassive: false, }, }) - s.checkSteps(c, ops[1], []operator.OpStep{ + testutil.CheckSteps(suite.Require(), ops[1], []operator.OpStep{ operator.MergeRegion{ - FromRegion: s.regions[2].GetMeta(), - ToRegion: s.regions[1].GetMeta(), + FromRegion: suite.regions[2].GetMeta(), + ToRegion: suite.regions[1].GetMeta(), IsPassive: true, }, }) // partial store overlap including leader - newRegion := s.regions[2].Clone( + newRegion := suite.regions[2].Clone( core.SetPeers([]*metapb.Peer{ {Id: 106, StoreId: 1}, {Id: 107, StoreId: 5}, @@ -310,59 +283,59 @@ func (s *testMergeCheckerSuite) TestMatchPeers(c *C) { }), core.WithLeader(&metapb.Peer{Id: 106, StoreId: 1}), ) - s.regions[2] = newRegion - s.cluster.PutRegion(s.regions[2]) - ops = s.mc.Check(s.regions[2]) - s.checkSteps(c, ops[0], []operator.OpStep{ + suite.regions[2] = newRegion + suite.cluster.PutRegion(suite.regions[2]) + ops = suite.mc.Check(suite.regions[2]) + testutil.CheckSteps(suite.Require(), ops[0], []operator.OpStep{ operator.AddLearner{ToStore: 4}, operator.PromoteLearner{ToStore: 4}, operator.RemovePeer{FromStore: 6}, operator.MergeRegion{ - FromRegion: s.regions[2].GetMeta(), - ToRegion: s.regions[1].GetMeta(), + FromRegion: suite.regions[2].GetMeta(), + ToRegion: suite.regions[1].GetMeta(), IsPassive: false, }, }) - s.checkSteps(c, ops[1], []operator.OpStep{ + testutil.CheckSteps(suite.Require(), ops[1], []operator.OpStep{ operator.MergeRegion{ - FromRegion: s.regions[2].GetMeta(), - ToRegion: s.regions[1].GetMeta(), + FromRegion: suite.regions[2].GetMeta(), + ToRegion: suite.regions[1].GetMeta(), IsPassive: true, }, }) // all stores overlap - s.regions[2] = s.regions[2].Clone(core.SetPeers([]*metapb.Peer{ + suite.regions[2] = suite.regions[2].Clone(core.SetPeers([]*metapb.Peer{ {Id: 106, StoreId: 1}, {Id: 107, StoreId: 5}, {Id: 108, StoreId: 4}, })) - s.cluster.PutRegion(s.regions[2]) - ops = s.mc.Check(s.regions[2]) - s.checkSteps(c, ops[0], []operator.OpStep{ + suite.cluster.PutRegion(suite.regions[2]) + ops = suite.mc.Check(suite.regions[2]) + testutil.CheckSteps(suite.Require(), ops[0], []operator.OpStep{ operator.MergeRegion{ - FromRegion: s.regions[2].GetMeta(), - ToRegion: s.regions[1].GetMeta(), + FromRegion: suite.regions[2].GetMeta(), + ToRegion: suite.regions[1].GetMeta(), IsPassive: false, }, }) - s.checkSteps(c, ops[1], []operator.OpStep{ + testutil.CheckSteps(suite.Require(), ops[1], []operator.OpStep{ operator.MergeRegion{ - FromRegion: s.regions[2].GetMeta(), - ToRegion: s.regions[1].GetMeta(), + FromRegion: suite.regions[2].GetMeta(), + ToRegion: suite.regions[1].GetMeta(), IsPassive: true, }, }) // all stores not overlap - s.regions[2] = s.regions[2].Clone(core.SetPeers([]*metapb.Peer{ + suite.regions[2] = suite.regions[2].Clone(core.SetPeers([]*metapb.Peer{ {Id: 109, StoreId: 2}, {Id: 110, StoreId: 3}, {Id: 111, StoreId: 6}, }), core.WithLeader(&metapb.Peer{Id: 109, StoreId: 2})) - s.cluster.PutRegion(s.regions[2]) - ops = s.mc.Check(s.regions[2]) - s.checkSteps(c, ops[0], []operator.OpStep{ + suite.cluster.PutRegion(suite.regions[2]) + ops = suite.mc.Check(suite.regions[2]) + testutil.CheckSteps(suite.Require(), ops[0], []operator.OpStep{ operator.AddLearner{ToStore: 1}, operator.PromoteLearner{ToStore: 1}, operator.RemovePeer{FromStore: 3}, @@ -374,21 +347,21 @@ func (s *testMergeCheckerSuite) TestMatchPeers(c *C) { operator.TransferLeader{FromStore: 2, ToStore: 1}, operator.RemovePeer{FromStore: 2}, operator.MergeRegion{ - FromRegion: s.regions[2].GetMeta(), - ToRegion: s.regions[1].GetMeta(), + FromRegion: suite.regions[2].GetMeta(), + ToRegion: suite.regions[1].GetMeta(), IsPassive: false, }, }) - s.checkSteps(c, ops[1], []operator.OpStep{ + testutil.CheckSteps(suite.Require(), ops[1], []operator.OpStep{ operator.MergeRegion{ - FromRegion: s.regions[2].GetMeta(), - ToRegion: s.regions[1].GetMeta(), + FromRegion: suite.regions[2].GetMeta(), + ToRegion: suite.regions[1].GetMeta(), IsPassive: true, }, }) // no overlap with reject leader label - s.regions[1] = s.regions[1].Clone( + suite.regions[1] = suite.regions[1].Clone( core.SetPeers([]*metapb.Peer{ {Id: 112, StoreId: 7}, {Id: 113, StoreId: 8}, @@ -396,9 +369,9 @@ func (s *testMergeCheckerSuite) TestMatchPeers(c *C) { }), core.WithLeader(&metapb.Peer{Id: 114, StoreId: 1}), ) - s.cluster.PutRegion(s.regions[1]) - ops = s.mc.Check(s.regions[2]) - s.checkSteps(c, ops[0], []operator.OpStep{ + suite.cluster.PutRegion(suite.regions[1]) + ops = suite.mc.Check(suite.regions[2]) + testutil.CheckSteps(suite.Require(), ops[0], []operator.OpStep{ operator.AddLearner{ToStore: 1}, operator.PromoteLearner{ToStore: 1}, operator.RemovePeer{FromStore: 3}, @@ -413,21 +386,21 @@ func (s *testMergeCheckerSuite) TestMatchPeers(c *C) { operator.RemovePeer{FromStore: 2}, operator.MergeRegion{ - FromRegion: s.regions[2].GetMeta(), - ToRegion: s.regions[1].GetMeta(), + FromRegion: suite.regions[2].GetMeta(), + ToRegion: suite.regions[1].GetMeta(), IsPassive: false, }, }) - s.checkSteps(c, ops[1], []operator.OpStep{ + testutil.CheckSteps(suite.Require(), ops[1], []operator.OpStep{ operator.MergeRegion{ - FromRegion: s.regions[2].GetMeta(), - ToRegion: s.regions[1].GetMeta(), + FromRegion: suite.regions[2].GetMeta(), + ToRegion: suite.regions[1].GetMeta(), IsPassive: true, }, }) // overlap with reject leader label - s.regions[1] = s.regions[1].Clone( + suite.regions[1] = suite.regions[1].Clone( core.SetPeers([]*metapb.Peer{ {Id: 115, StoreId: 7}, {Id: 116, StoreId: 8}, @@ -435,7 +408,7 @@ func (s *testMergeCheckerSuite) TestMatchPeers(c *C) { }), core.WithLeader(&metapb.Peer{Id: 117, StoreId: 1}), ) - s.regions[2] = s.regions[2].Clone( + suite.regions[2] = suite.regions[2].Clone( core.SetPeers([]*metapb.Peer{ {Id: 118, StoreId: 7}, {Id: 119, StoreId: 3}, @@ -443,9 +416,9 @@ func (s *testMergeCheckerSuite) TestMatchPeers(c *C) { }), core.WithLeader(&metapb.Peer{Id: 120, StoreId: 2}), ) - s.cluster.PutRegion(s.regions[1]) - ops = s.mc.Check(s.regions[2]) - s.checkSteps(c, ops[0], []operator.OpStep{ + suite.cluster.PutRegion(suite.regions[1]) + ops = suite.mc.Check(suite.regions[2]) + testutil.CheckSteps(suite.Require(), ops[0], []operator.OpStep{ operator.AddLearner{ToStore: 1}, operator.PromoteLearner{ToStore: 1}, operator.RemovePeer{FromStore: 3}, @@ -454,23 +427,23 @@ func (s *testMergeCheckerSuite) TestMatchPeers(c *C) { operator.TransferLeader{FromStore: 2, ToStore: 1}, operator.RemovePeer{FromStore: 2}, operator.MergeRegion{ - FromRegion: s.regions[2].GetMeta(), - ToRegion: s.regions[1].GetMeta(), + FromRegion: suite.regions[2].GetMeta(), + ToRegion: suite.regions[1].GetMeta(), IsPassive: false, }, }) - s.checkSteps(c, ops[1], []operator.OpStep{ + testutil.CheckSteps(suite.Require(), ops[1], []operator.OpStep{ operator.MergeRegion{ - FromRegion: s.regions[2].GetMeta(), - ToRegion: s.regions[1].GetMeta(), + FromRegion: suite.regions[2].GetMeta(), + ToRegion: suite.regions[1].GetMeta(), IsPassive: true, }, }) } -func (s *testMergeCheckerSuite) TestStoreLimitWithMerge(c *C) { +func (suite *mergeCheckerTestSuite) TestStoreLimitWithMerge() { cfg := config.NewTestOptions() - tc := mockcluster.NewCluster(s.ctx, cfg) + tc := mockcluster.NewCluster(suite.ctx, cfg) tc.SetMaxMergeRegionSize(2) tc.SetMaxMergeRegionKeys(2) tc.SetSplitMergeInterval(0) @@ -489,9 +462,9 @@ func (s *testMergeCheckerSuite) TestStoreLimitWithMerge(c *C) { tc.PutRegion(region) } - mc := NewMergeChecker(s.ctx, tc) - stream := hbstream.NewTestHeartbeatStreams(s.ctx, tc.ID, tc, false /* no need to run */) - oc := schedule.NewOperatorController(s.ctx, tc, stream) + mc := NewMergeChecker(suite.ctx, tc) + stream := hbstream.NewTestHeartbeatStreams(suite.ctx, tc.ID, tc, false /* no need to run */) + oc := schedule.NewOperatorController(suite.ctx, tc, stream) regions[2] = regions[2].Clone( core.SetPeers([]*metapb.Peer{ @@ -509,8 +482,8 @@ func (s *testMergeCheckerSuite) TestStoreLimitWithMerge(c *C) { // The size of Region is less or equal than 1MB. for i := 0; i < 50; i++ { ops := mc.Check(regions[2]) - c.Assert(ops, NotNil) - c.Assert(oc.AddOperator(ops...), IsTrue) + suite.NotNil(ops) + suite.True(oc.AddOperator(ops...)) for _, op := range ops { oc.RemoveOperator(op) } @@ -523,49 +496,49 @@ func (s *testMergeCheckerSuite) TestStoreLimitWithMerge(c *C) { // The size of Region is more than 1MB but no more than 20MB. for i := 0; i < 5; i++ { ops := mc.Check(regions[2]) - c.Assert(ops, NotNil) - c.Assert(oc.AddOperator(ops...), IsTrue) + suite.NotNil(ops) + suite.True(oc.AddOperator(ops...)) for _, op := range ops { oc.RemoveOperator(op) } } { ops := mc.Check(regions[2]) - c.Assert(ops, NotNil) - c.Assert(oc.AddOperator(ops...), IsFalse) + suite.NotNil(ops) + suite.False(oc.AddOperator(ops...)) } } -func (s *testMergeCheckerSuite) TestCache(c *C) { +func (suite *mergeCheckerTestSuite) TestCache() { cfg := config.NewTestOptions() - s.cluster = mockcluster.NewCluster(s.ctx, cfg) - s.cluster.SetMaxMergeRegionSize(2) - s.cluster.SetMaxMergeRegionKeys(2) - s.cluster.SetSplitMergeInterval(time.Hour) - s.cluster.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) + suite.cluster = mockcluster.NewCluster(suite.ctx, cfg) + suite.cluster.SetMaxMergeRegionSize(2) + suite.cluster.SetMaxMergeRegionKeys(2) + suite.cluster.SetSplitMergeInterval(time.Hour) + suite.cluster.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) stores := map[uint64][]string{ 1: {}, 2: {}, 3: {}, 4: {}, 5: {}, 6: {}, } for storeID, labels := range stores { - s.cluster.PutStoreWithLabels(storeID, labels...) + suite.cluster.PutStoreWithLabels(storeID, labels...) } - s.regions = []*core.RegionInfo{ + suite.regions = []*core.RegionInfo{ newRegionInfo(2, "a", "t", 200, 200, []uint64{104, 4}, []uint64{103, 1}, []uint64{104, 4}, []uint64{105, 5}), newRegionInfo(3, "t", "x", 1, 1, []uint64{108, 6}, []uint64{106, 2}, []uint64{107, 5}, []uint64{108, 6}), } - for _, region := range s.regions { - s.cluster.PutRegion(region) + for _, region := range suite.regions { + suite.cluster.PutRegion(region) } - s.mc = NewMergeChecker(s.ctx, s.cluster) + suite.mc = NewMergeChecker(suite.ctx, suite.cluster) - ops := s.mc.Check(s.regions[1]) - c.Assert(ops, IsNil) - s.cluster.SetSplitMergeInterval(0) + ops := suite.mc.Check(suite.regions[1]) + suite.Nil(ops) + suite.cluster.SetSplitMergeInterval(0) time.Sleep(time.Second) - ops = s.mc.Check(s.regions[1]) - c.Assert(ops, NotNil) + ops = suite.mc.Check(suite.regions[1]) + suite.NotNil(ops) } func makeKeyRanges(keys ...string) []interface{} { diff --git a/server/schedule/checker/priority_inspector_test.go b/server/schedule/checker/priority_inspector_test.go index 319c330d3591..467c80fdd9b3 100644 --- a/server/schedule/checker/priority_inspector_test.go +++ b/server/schedule/checker/priority_inspector_test.go @@ -16,31 +16,20 @@ package checker import ( "context" + "testing" "time" - . "github.com/pingcap/check" + "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/mock/mockcluster" "github.com/tikv/pd/server/config" ) -var _ = Suite(&testPriorityInspectorSuite{}) - -type testPriorityInspectorSuite struct { - ctx context.Context - cancel context.CancelFunc -} - -func (s *testPriorityInspectorSuite) SetUpSuite(c *C) { - s.ctx, s.cancel = context.WithCancel(context.Background()) -} - -func (s *testPriorityInspectorSuite) TearDownTest(c *C) { - s.cancel() -} - -func (s *testPriorityInspectorSuite) TestCheckPriorityRegions(c *C) { +func TestCheckPriorityRegions(t *testing.T) { + re := require.New(t) opt := config.NewTestOptions() - tc := mockcluster.NewCluster(s.ctx, opt) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + tc := mockcluster.NewCluster(ctx, opt) tc.AddRegionStore(1, 0) tc.AddRegionStore(2, 0) tc.AddRegionStore(3, 0) @@ -49,42 +38,42 @@ func (s *testPriorityInspectorSuite) TestCheckPriorityRegions(c *C) { tc.AddLeaderRegion(3, 2) pc := NewPriorityInspector(tc) - checkPriorityRegionTest(pc, tc, c) + checkPriorityRegionTest(re, pc, tc) opt.SetPlacementRuleEnabled(true) - c.Assert(opt.IsPlacementRulesEnabled(), IsTrue) - checkPriorityRegionTest(pc, tc, c) + re.True(opt.IsPlacementRulesEnabled()) + checkPriorityRegionTest(re, pc, tc) } -func checkPriorityRegionTest(pc *PriorityInspector, tc *mockcluster.Cluster, c *C) { +func checkPriorityRegionTest(re *require.Assertions, pc *PriorityInspector, tc *mockcluster.Cluster) { // case1: inspect region 1, it doesn't lack replica region := tc.GetRegion(1) opt := tc.GetOpts() pc.Inspect(region) - c.Assert(0, Equals, pc.queue.Len()) + re.Equal(pc.queue.Len(), 0) // case2: inspect region 2, it lacks one replica region = tc.GetRegion(2) pc.Inspect(region) - c.Assert(1, Equals, pc.queue.Len()) + re.Equal(pc.queue.Len(), 1) // the region will not rerun after it checks - c.Assert(0, Equals, len(pc.GetPriorityRegions())) + re.Equal(len(pc.GetPriorityRegions()), 0) // case3: inspect region 3, it will has high priority region = tc.GetRegion(3) pc.Inspect(region) - c.Assert(2, Equals, pc.queue.Len()) + re.Equal(pc.queue.Len(), 2) time.Sleep(opt.GetPatrolRegionInterval() * 10) // region 3 has higher priority ids := pc.GetPriorityRegions() - c.Assert(2, Equals, len(ids)) - c.Assert(uint64(3), Equals, ids[0]) - c.Assert(uint64(2), Equals, ids[1]) + re.Equal(len(ids), 2) + re.Equal(ids[0], uint64(3)) + re.Equal(ids[1], uint64(2)) // case4: inspect region 2 again after it fixup replicas tc.AddLeaderRegion(2, 2, 3, 1) region = tc.GetRegion(2) pc.Inspect(region) - c.Assert(1, Equals, pc.queue.Len()) + re.Equal(pc.queue.Len(), 1) // recover tc.AddLeaderRegion(2, 2, 3) diff --git a/server/schedule/checker/replica_checker_test.go b/server/schedule/checker/replica_checker_test.go index 87c813d01119..9575a0446835 100644 --- a/server/schedule/checker/replica_checker_test.go +++ b/server/schedule/checker/replica_checker_test.go @@ -16,11 +16,12 @@ package checker import ( "context" + "testing" "time" - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/stretchr/testify/suite" "github.com/tikv/pd/pkg/cache" "github.com/tikv/pd/pkg/mock/mockcluster" "github.com/tikv/pd/pkg/testutil" @@ -35,28 +36,24 @@ const ( MB = 1024 * KB ) -var _ = Suite(&testReplicaCheckerSuite{}) - -type testReplicaCheckerSuite struct { +type replicaCheckerTestSuite struct { + suite.Suite cluster *mockcluster.Cluster rc *ReplicaChecker ctx context.Context cancel context.CancelFunc } -func (s *testReplicaCheckerSuite) SetUpSuite(c *C) { - s.ctx, s.cancel = context.WithCancel(context.Background()) -} - -func (s *testReplicaCheckerSuite) TearDownTest(c *C) { - s.cancel() +func TestReplicaCheckerSuite(t *testing.T) { + suite.Run(t, new(replicaCheckerTestSuite)) } -func (s *testReplicaCheckerSuite) SetUpTest(c *C) { +func (suite *replicaCheckerTestSuite) SetupTest() { cfg := config.NewTestOptions() - s.cluster = mockcluster.NewCluster(s.ctx, cfg) - s.cluster.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) - s.rc = NewReplicaChecker(s.cluster, cache.NewDefaultCache(10)) + suite.ctx, suite.cancel = context.WithCancel(context.Background()) + suite.cluster = mockcluster.NewCluster(suite.ctx, cfg) + suite.cluster.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) + suite.rc = NewReplicaChecker(suite.cluster, cache.NewDefaultCache(10)) stats := &pdpb.StoreStats{ Capacity: 100, Available: 100, @@ -88,12 +85,16 @@ func (s *testReplicaCheckerSuite) SetUpTest(c *C) { ), } for _, store := range stores { - s.cluster.PutStore(store) + suite.cluster.PutStore(store) } - s.cluster.AddLabelsStore(2, 1, map[string]string{"noleader": "true"}) + suite.cluster.AddLabelsStore(2, 1, map[string]string{"noleader": "true"}) +} + +func (suite *replicaCheckerTestSuite) TearDownTest() { + suite.cancel() } -func (s *testReplicaCheckerSuite) TestReplacePendingPeer(c *C) { +func (suite *replicaCheckerTestSuite) TestReplacePendingPeer() { peers := []*metapb.Peer{ { Id: 2, @@ -109,16 +110,16 @@ func (s *testReplicaCheckerSuite) TestReplacePendingPeer(c *C) { }, } r := core.NewRegionInfo(&metapb.Region{Id: 1, Peers: peers}, peers[1], core.WithPendingPeers(peers[0:1])) - s.cluster.PutRegion(r) - op := s.rc.Check(r) - c.Assert(op, NotNil) - c.Assert(op.Step(0).(operator.AddLearner).ToStore, Equals, uint64(4)) - c.Assert(op.Step(1).(operator.PromoteLearner).ToStore, Equals, uint64(4)) - c.Assert(op.Step(2).(operator.RemovePeer).FromStore, Equals, uint64(1)) + suite.cluster.PutRegion(r) + op := suite.rc.Check(r) + suite.NotNil(op) + suite.Equal(uint64(4), op.Step(0).(operator.AddLearner).ToStore) + suite.Equal(uint64(4), op.Step(1).(operator.PromoteLearner).ToStore) + suite.Equal(uint64(1), op.Step(2).(operator.RemovePeer).FromStore) } -func (s *testReplicaCheckerSuite) TestReplaceOfflinePeer(c *C) { - s.cluster.SetLabelPropertyConfig(config.LabelPropertyConfig{ +func (suite *replicaCheckerTestSuite) TestReplaceOfflinePeer() { + suite.cluster.SetLabelPropertyConfig(config.LabelPropertyConfig{ config.RejectLeader: {{Key: "noleader", Value: "true"}}, }) peers := []*metapb.Peer{ @@ -136,17 +137,17 @@ func (s *testReplicaCheckerSuite) TestReplaceOfflinePeer(c *C) { }, } r := core.NewRegionInfo(&metapb.Region{Id: 2, Peers: peers}, peers[0]) - s.cluster.PutRegion(r) - op := s.rc.Check(r) - c.Assert(op, NotNil) - c.Assert(op.Step(0).(operator.TransferLeader).ToStore, Equals, uint64(3)) - c.Assert(op.Step(1).(operator.AddLearner).ToStore, Equals, uint64(4)) - c.Assert(op.Step(2).(operator.PromoteLearner).ToStore, Equals, uint64(4)) - c.Assert(op.Step(3).(operator.RemovePeer).FromStore, Equals, uint64(1)) + suite.cluster.PutRegion(r) + op := suite.rc.Check(r) + suite.NotNil(op) + suite.Equal(uint64(3), op.Step(0).(operator.TransferLeader).ToStore) + suite.Equal(uint64(4), op.Step(1).(operator.AddLearner).ToStore) + suite.Equal(uint64(4), op.Step(2).(operator.PromoteLearner).ToStore) + suite.Equal(uint64(1), op.Step(3).(operator.RemovePeer).FromStore) } -func (s *testReplicaCheckerSuite) TestOfflineWithOneReplica(c *C) { - s.cluster.SetMaxReplicas(1) +func (suite *replicaCheckerTestSuite) TestOfflineWithOneReplica() { + suite.cluster.SetMaxReplicas(1) peers := []*metapb.Peer{ { Id: 4, @@ -154,27 +155,27 @@ func (s *testReplicaCheckerSuite) TestOfflineWithOneReplica(c *C) { }, } r := core.NewRegionInfo(&metapb.Region{Id: 2, Peers: peers}, peers[0]) - s.cluster.PutRegion(r) - op := s.rc.Check(r) - c.Assert(op, NotNil) - c.Assert(op.Desc(), Equals, "replace-offline-replica") + suite.cluster.PutRegion(r) + op := suite.rc.Check(r) + suite.NotNil(op) + suite.Equal("replace-offline-replica", op.Desc()) } -func (s *testReplicaCheckerSuite) TestDownPeer(c *C) { +func (suite *replicaCheckerTestSuite) TestDownPeer() { // down a peer, the number of normal peers(except learner) is enough. - op := s.downPeerAndCheck(c, metapb.PeerRole_Voter) - c.Assert(op, NotNil) - c.Assert(op.Desc(), Equals, "remove-extra-down-replica") + op := suite.downPeerAndCheck(metapb.PeerRole_Voter) + suite.NotNil(op) + suite.Equal("remove-extra-down-replica", op.Desc()) // down a peer,the number of peers(except learner) is not enough. - op = s.downPeerAndCheck(c, metapb.PeerRole_Learner) - c.Assert(op, NotNil) - c.Assert(op.Desc(), Equals, "replace-down-replica") + op = suite.downPeerAndCheck(metapb.PeerRole_Learner) + suite.NotNil(op) + suite.Equal("replace-down-replica", op.Desc()) } -func (s *testReplicaCheckerSuite) downPeerAndCheck(c *C, aliveRole metapb.PeerRole) *operator.Operator { - s.cluster.SetMaxReplicas(2) - s.cluster.SetStoreUp(1) +func (suite *replicaCheckerTestSuite) downPeerAndCheck(aliveRole metapb.PeerRole) *operator.Operator { + suite.cluster.SetMaxReplicas(2) + suite.cluster.SetStoreUp(1) downStoreID := uint64(3) peers := []*metapb.Peer{ { @@ -192,8 +193,8 @@ func (s *testReplicaCheckerSuite) downPeerAndCheck(c *C, aliveRole metapb.PeerRo }, } r := core.NewRegionInfo(&metapb.Region{Id: 2, Peers: peers}, peers[0]) - s.cluster.PutRegion(r) - s.cluster.SetStoreDown(downStoreID) + suite.cluster.PutRegion(r) + suite.cluster.SetStoreDown(downStoreID) downPeer := &pdpb.PeerStats{ Peer: &metapb.Peer{ Id: 14, @@ -202,13 +203,13 @@ func (s *testReplicaCheckerSuite) downPeerAndCheck(c *C, aliveRole metapb.PeerRo DownSeconds: 24 * 60 * 60, } r = r.Clone(core.WithDownPeers(append(r.GetDownPeers(), downPeer))) - c.Assert(r.GetDownPeers(), HasLen, 1) - return s.rc.Check(r) + suite.Len(r.GetDownPeers(), 1) + return suite.rc.Check(r) } -func (s *testReplicaCheckerSuite) TestBasic(c *C) { +func (suite *replicaCheckerTestSuite) TestBasic() { opt := config.NewTestOptions() - tc := mockcluster.NewCluster(s.ctx, opt) + tc := mockcluster.NewCluster(suite.ctx, opt) tc.SetMaxSnapshotCount(2) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) rc := NewReplicaChecker(tc, cache.NewDefaultCache(10)) @@ -223,41 +224,41 @@ func (s *testReplicaCheckerSuite) TestBasic(c *C) { // Region has 2 peers, we need to add a new peer. region := tc.GetRegion(1) - testutil.CheckAddPeer(c, rc.Check(region), operator.OpReplica, 4) + testutil.CheckAddPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 4) // Disable make up replica feature. tc.SetEnableMakeUpReplica(false) - c.Assert(rc.Check(region), IsNil) + suite.Nil(rc.Check(region)) tc.SetEnableMakeUpReplica(true) // Test healthFilter. // If store 4 is down, we add to store 3. tc.SetStoreDown(4) - testutil.CheckAddPeer(c, rc.Check(region), operator.OpReplica, 3) + testutil.CheckAddPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 3) tc.SetStoreUp(4) - testutil.CheckAddPeer(c, rc.Check(region), operator.OpReplica, 4) + testutil.CheckAddPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 4) // Test snapshotCountFilter. // If snapshotCount > MaxSnapshotCount, we add to store 3. tc.UpdateSnapshotCount(4, 3) - testutil.CheckAddPeer(c, rc.Check(region), operator.OpReplica, 3) + testutil.CheckAddPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 3) // If snapshotCount < MaxSnapshotCount, we can add peer again. tc.UpdateSnapshotCount(4, 1) - testutil.CheckAddPeer(c, rc.Check(region), operator.OpReplica, 4) + testutil.CheckAddPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 4) // Add peer in store 4, and we have enough replicas. peer4, _ := tc.AllocPeer(4) region = region.Clone(core.WithAddPeer(peer4)) - c.Assert(rc.Check(region), IsNil) + suite.Nil(rc.Check(region)) // Add peer in store 3, and we have redundant replicas. peer3, _ := tc.AllocPeer(3) region = region.Clone(core.WithAddPeer(peer3)) - testutil.CheckRemovePeer(c, rc.Check(region), 1) + testutil.CheckRemovePeerWithTestify(suite.Require(), rc.Check(region), 1) // Disable remove extra replica feature. tc.SetEnableRemoveExtraReplica(false) - c.Assert(rc.Check(region), IsNil) + suite.Nil(rc.Check(region)) tc.SetEnableRemoveExtraReplica(true) region = region.Clone(core.WithRemoveStorePeer(1), core.WithLeader(region.GetStorePeer(3))) @@ -270,18 +271,18 @@ func (s *testReplicaCheckerSuite) TestBasic(c *C) { } region = region.Clone(core.WithDownPeers(append(region.GetDownPeers(), downPeer))) - testutil.CheckTransferPeer(c, rc.Check(region), operator.OpReplica, 2, 1) + testutil.CheckTransferPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 2, 1) region = region.Clone(core.WithDownPeers(nil)) - c.Assert(rc.Check(region), IsNil) + suite.Nil(rc.Check(region)) // Peer in store 3 is offline, transfer peer to store 1. tc.SetStoreOffline(3) - testutil.CheckTransferPeer(c, rc.Check(region), operator.OpReplica, 3, 1) + testutil.CheckTransferPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 3, 1) } -func (s *testReplicaCheckerSuite) TestLostStore(c *C) { +func (suite *replicaCheckerTestSuite) TestLostStore() { opt := config.NewTestOptions() - tc := mockcluster.NewCluster(s.ctx, opt) + tc := mockcluster.NewCluster(suite.ctx, opt) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.AddRegionStore(1, 1) @@ -295,12 +296,12 @@ func (s *testReplicaCheckerSuite) TestLostStore(c *C) { tc.AddLeaderRegion(1, 1, 2, 3) region := tc.GetRegion(1) op := rc.Check(region) - c.Assert(op, IsNil) + suite.Nil(op) } -func (s *testReplicaCheckerSuite) TestOffline(c *C) { +func (suite *replicaCheckerTestSuite) TestOffline() { opt := config.NewTestOptions() - tc := mockcluster.NewCluster(s.ctx, opt) + tc := mockcluster.NewCluster(suite.ctx, opt) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.SetMaxReplicas(3) tc.SetLocationLabels([]string{"zone", "rack", "host"}) @@ -316,43 +317,43 @@ func (s *testReplicaCheckerSuite) TestOffline(c *C) { region := tc.GetRegion(1) // Store 2 has different zone and smallest region score. - testutil.CheckAddPeer(c, rc.Check(region), operator.OpReplica, 2) + testutil.CheckAddPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 2) peer2, _ := tc.AllocPeer(2) region = region.Clone(core.WithAddPeer(peer2)) // Store 3 has different zone and smallest region score. - testutil.CheckAddPeer(c, rc.Check(region), operator.OpReplica, 3) + testutil.CheckAddPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 3) peer3, _ := tc.AllocPeer(3) region = region.Clone(core.WithAddPeer(peer3)) // Store 4 has the same zone with store 3 and larger region score. peer4, _ := tc.AllocPeer(4) region = region.Clone(core.WithAddPeer(peer4)) - testutil.CheckRemovePeer(c, rc.Check(region), 4) + testutil.CheckRemovePeerWithTestify(suite.Require(), rc.Check(region), 4) // Test offline // the number of region peers more than the maxReplicas // remove the peer tc.SetStoreOffline(3) - testutil.CheckRemovePeer(c, rc.Check(region), 3) + testutil.CheckRemovePeerWithTestify(suite.Require(), rc.Check(region), 3) region = region.Clone(core.WithRemoveStorePeer(4)) // the number of region peers equals the maxReplicas // Transfer peer to store 4. - testutil.CheckTransferPeer(c, rc.Check(region), operator.OpReplica, 3, 4) + testutil.CheckTransferPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 3, 4) // Store 5 has a same label score with store 4, but the region score smaller than store 4, we will choose store 5. tc.AddLabelsStore(5, 3, map[string]string{"zone": "z4", "rack": "r1", "host": "h1"}) - testutil.CheckTransferPeer(c, rc.Check(region), operator.OpReplica, 3, 5) + testutil.CheckTransferPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 3, 5) // Store 5 has too many snapshots, choose store 4 tc.UpdateSnapshotCount(5, 100) - testutil.CheckTransferPeer(c, rc.Check(region), operator.OpReplica, 3, 4) + testutil.CheckTransferPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 3, 4) tc.UpdatePendingPeerCount(4, 100) - c.Assert(rc.Check(region), IsNil) + suite.Nil(rc.Check(region)) } -func (s *testReplicaCheckerSuite) TestDistinctScore(c *C) { +func (suite *replicaCheckerTestSuite) TestDistinctScore() { opt := config.NewTestOptions() - tc := mockcluster.NewCluster(s.ctx, opt) + tc := mockcluster.NewCluster(suite.ctx, opt) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.SetMaxReplicas(3) tc.SetLocationLabels([]string{"zone", "rack", "host"}) @@ -365,73 +366,73 @@ func (s *testReplicaCheckerSuite) TestDistinctScore(c *C) { // We need 3 replicas. tc.AddLeaderRegion(1, 1) region := tc.GetRegion(1) - testutil.CheckAddPeer(c, rc.Check(region), operator.OpReplica, 2) + testutil.CheckAddPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 2) peer2, _ := tc.AllocPeer(2) region = region.Clone(core.WithAddPeer(peer2)) // Store 1,2,3 have the same zone, rack, and host. tc.AddLabelsStore(3, 5, map[string]string{"zone": "z1", "rack": "r1", "host": "h1"}) - testutil.CheckAddPeer(c, rc.Check(region), operator.OpReplica, 3) + testutil.CheckAddPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 3) // Store 4 has smaller region score. tc.AddLabelsStore(4, 4, map[string]string{"zone": "z1", "rack": "r1", "host": "h1"}) - testutil.CheckAddPeer(c, rc.Check(region), operator.OpReplica, 4) + testutil.CheckAddPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 4) // Store 5 has a different host. tc.AddLabelsStore(5, 5, map[string]string{"zone": "z1", "rack": "r1", "host": "h2"}) - testutil.CheckAddPeer(c, rc.Check(region), operator.OpReplica, 5) + testutil.CheckAddPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 5) // Store 6 has a different rack. tc.AddLabelsStore(6, 6, map[string]string{"zone": "z1", "rack": "r2", "host": "h1"}) - testutil.CheckAddPeer(c, rc.Check(region), operator.OpReplica, 6) + testutil.CheckAddPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 6) // Store 7 has a different zone. tc.AddLabelsStore(7, 7, map[string]string{"zone": "z2", "rack": "r1", "host": "h1"}) - testutil.CheckAddPeer(c, rc.Check(region), operator.OpReplica, 7) + testutil.CheckAddPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 7) // Test stateFilter. tc.SetStoreOffline(7) - testutil.CheckAddPeer(c, rc.Check(region), operator.OpReplica, 6) + testutil.CheckAddPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 6) tc.SetStoreUp(7) - testutil.CheckAddPeer(c, rc.Check(region), operator.OpReplica, 7) + testutil.CheckAddPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 7) // Add peer to store 7. peer7, _ := tc.AllocPeer(7) region = region.Clone(core.WithAddPeer(peer7)) // Replace peer in store 1 with store 6 because it has a different rack. - testutil.CheckTransferPeer(c, rc.Check(region), operator.OpReplica, 1, 6) + testutil.CheckTransferPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 1, 6) // Disable locationReplacement feature. tc.SetEnableLocationReplacement(false) - c.Assert(rc.Check(region), IsNil) + suite.Nil(rc.Check(region)) tc.SetEnableLocationReplacement(true) peer6, _ := tc.AllocPeer(6) region = region.Clone(core.WithAddPeer(peer6)) - testutil.CheckRemovePeer(c, rc.Check(region), 1) + testutil.CheckRemovePeerWithTestify(suite.Require(), rc.Check(region), 1) region = region.Clone(core.WithRemoveStorePeer(1), core.WithLeader(region.GetStorePeer(2))) - c.Assert(rc.Check(region), IsNil) + suite.Nil(rc.Check(region)) // Store 8 has the same zone and different rack with store 7. // Store 1 has the same zone and different rack with store 6. // So store 8 and store 1 are equivalent. tc.AddLabelsStore(8, 1, map[string]string{"zone": "z2", "rack": "r2", "host": "h1"}) - c.Assert(rc.Check(region), IsNil) + suite.Nil(rc.Check(region)) // Store 10 has a different zone. // Store 2 and 6 have the same distinct score, but store 2 has larger region score. // So replace peer in store 2 with store 10. tc.AddLabelsStore(10, 1, map[string]string{"zone": "z3", "rack": "r1", "host": "h1"}) - testutil.CheckTransferPeer(c, rc.Check(region), operator.OpReplica, 2, 10) + testutil.CheckTransferPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 2, 10) peer10, _ := tc.AllocPeer(10) region = region.Clone(core.WithAddPeer(peer10)) - testutil.CheckRemovePeer(c, rc.Check(region), 2) + testutil.CheckRemovePeerWithTestify(suite.Require(), rc.Check(region), 2) region = region.Clone(core.WithRemoveStorePeer(2)) - c.Assert(rc.Check(region), IsNil) + suite.Nil(rc.Check(region)) } -func (s *testReplicaCheckerSuite) TestDistinctScore2(c *C) { +func (suite *replicaCheckerTestSuite) TestDistinctScore2() { opt := config.NewTestOptions() - tc := mockcluster.NewCluster(s.ctx, opt) + tc := mockcluster.NewCluster(suite.ctx, opt) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.SetMaxReplicas(5) tc.SetLocationLabels([]string{"zone", "host"}) @@ -448,20 +449,20 @@ func (s *testReplicaCheckerSuite) TestDistinctScore2(c *C) { tc.AddLeaderRegion(1, 1, 2, 4) region := tc.GetRegion(1) - testutil.CheckAddPeer(c, rc.Check(region), operator.OpReplica, 6) + testutil.CheckAddPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 6) peer6, _ := tc.AllocPeer(6) region = region.Clone(core.WithAddPeer(peer6)) - testutil.CheckAddPeer(c, rc.Check(region), operator.OpReplica, 5) + testutil.CheckAddPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 5) peer5, _ := tc.AllocPeer(5) region = region.Clone(core.WithAddPeer(peer5)) - c.Assert(rc.Check(region), IsNil) + suite.Nil(rc.Check(region)) } -func (s *testReplicaCheckerSuite) TestStorageThreshold(c *C) { +func (suite *replicaCheckerTestSuite) TestStorageThreshold() { opt := config.NewTestOptions() - tc := mockcluster.NewCluster(s.ctx, opt) + tc := mockcluster.NewCluster(suite.ctx, opt) tc.SetLocationLabels([]string{"zone"}) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) rc := NewReplicaChecker(tc, cache.NewDefaultCache(10)) @@ -480,24 +481,24 @@ func (s *testReplicaCheckerSuite) TestStorageThreshold(c *C) { // Move peer to better location. tc.UpdateStorageRatio(4, 0, 1) - testutil.CheckTransferPeer(c, rc.Check(region), operator.OpReplica, 1, 4) + testutil.CheckTransferPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 1, 4) // If store4 is almost full, do not add peer on it. tc.UpdateStorageRatio(4, 0.9, 0.1) - c.Assert(rc.Check(region), IsNil) + suite.Nil(rc.Check(region)) tc.AddLeaderRegion(2, 1, 3) region = tc.GetRegion(2) // Add peer on store4. tc.UpdateStorageRatio(4, 0, 1) - testutil.CheckAddPeer(c, rc.Check(region), operator.OpReplica, 4) + testutil.CheckAddPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 4) // If store4 is almost full, do not add peer on it. tc.UpdateStorageRatio(4, 0.8, 0) - testutil.CheckAddPeer(c, rc.Check(region), operator.OpReplica, 2) + testutil.CheckAddPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 2) } -func (s *testReplicaCheckerSuite) TestOpts(c *C) { +func (suite *replicaCheckerTestSuite) TestOpts() { opt := config.NewTestOptions() - tc := mockcluster.NewCluster(s.ctx, opt) + tc := mockcluster.NewCluster(suite.ctx, opt) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) rc := NewReplicaChecker(tc, cache.NewDefaultCache(10)) @@ -518,17 +519,17 @@ func (s *testReplicaCheckerSuite) TestOpts(c *C) { })) tc.SetStoreOffline(2) // RemoveDownReplica has higher priority than replaceOfflineReplica. - testutil.CheckTransferPeer(c, rc.Check(region), operator.OpReplica, 1, 4) + testutil.CheckTransferPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 1, 4) tc.SetEnableRemoveDownReplica(false) - testutil.CheckTransferPeer(c, rc.Check(region), operator.OpReplica, 2, 4) + testutil.CheckTransferPeerWithTestify(suite.Require(), rc.Check(region), operator.OpReplica, 2, 4) tc.SetEnableReplaceOfflineReplica(false) - c.Assert(rc.Check(region), IsNil) + suite.Nil(rc.Check(region)) } // See issue: https://github.com/tikv/pd/issues/3705 -func (s *testReplicaCheckerSuite) TestFixDownPeer(c *C) { +func (suite *replicaCheckerTestSuite) TestFixDownPeer() { opt := config.NewTestOptions() - tc := mockcluster.NewCluster(s.ctx, opt) + tc := mockcluster.NewCluster(suite.ctx, opt) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.SetLocationLabels([]string{"zone"}) rc := NewReplicaChecker(tc, cache.NewDefaultCache(10)) @@ -541,25 +542,25 @@ func (s *testReplicaCheckerSuite) TestFixDownPeer(c *C) { tc.AddLeaderRegion(1, 1, 3, 4) region := tc.GetRegion(1) - c.Assert(rc.Check(region), IsNil) + suite.Nil(rc.Check(region)) tc.SetStoreDown(4) region = region.Clone(core.WithDownPeers([]*pdpb.PeerStats{ {Peer: region.GetStorePeer(4), DownSeconds: 6000}, })) - testutil.CheckTransferPeer(c, rc.Check(region), operator.OpRegion, 4, 5) + testutil.CheckTransferPeerWithTestify(suite.Require(), rc.Check(region), operator.OpRegion, 4, 5) tc.SetStoreDown(5) - testutil.CheckTransferPeer(c, rc.Check(region), operator.OpRegion, 4, 2) + testutil.CheckTransferPeerWithTestify(suite.Require(), rc.Check(region), operator.OpRegion, 4, 2) tc.SetIsolationLevel("zone") - c.Assert(rc.Check(region), IsNil) + suite.Nil(rc.Check(region)) } // See issue: https://github.com/tikv/pd/issues/3705 -func (s *testReplicaCheckerSuite) TestFixOfflinePeer(c *C) { +func (suite *replicaCheckerTestSuite) TestFixOfflinePeer() { opt := config.NewTestOptions() - tc := mockcluster.NewCluster(s.ctx, opt) + tc := mockcluster.NewCluster(suite.ctx, opt) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.SetLocationLabels([]string{"zone"}) rc := NewReplicaChecker(tc, cache.NewDefaultCache(10)) @@ -572,14 +573,14 @@ func (s *testReplicaCheckerSuite) TestFixOfflinePeer(c *C) { tc.AddLeaderRegion(1, 1, 3, 4) region := tc.GetRegion(1) - c.Assert(rc.Check(region), IsNil) + suite.Nil(rc.Check(region)) tc.SetStoreOffline(4) - testutil.CheckTransferPeer(c, rc.Check(region), operator.OpRegion, 4, 5) + testutil.CheckTransferPeerWithTestify(suite.Require(), rc.Check(region), operator.OpRegion, 4, 5) tc.SetStoreOffline(5) - testutil.CheckTransferPeer(c, rc.Check(region), operator.OpRegion, 4, 2) + testutil.CheckTransferPeerWithTestify(suite.Require(), rc.Check(region), operator.OpRegion, 4, 2) tc.SetIsolationLevel("zone") - c.Assert(rc.Check(region), IsNil) + suite.Nil(rc.Check(region)) } diff --git a/server/schedule/checker/rule_checker_test.go b/server/schedule/checker/rule_checker_test.go index f3a908939bf3..0fc70b410db9 100644 --- a/server/schedule/checker/rule_checker_test.go +++ b/server/schedule/checker/rule_checker_test.go @@ -16,11 +16,12 @@ package checker import ( "context" + "testing" - . "github.com/pingcap/check" "github.com/pingcap/failpoint" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/stretchr/testify/suite" "github.com/tikv/pd/pkg/cache" "github.com/tikv/pd/pkg/mock/mockcluster" "github.com/tikv/pd/pkg/testutil" @@ -31,10 +32,8 @@ import ( "github.com/tikv/pd/server/versioninfo" ) -var _ = Suite(&testRuleCheckerSuite{}) -var _ = SerialSuites(&testRuleCheckerSerialSuite{}) - -type testRuleCheckerSerialSuite struct { +type ruleCheckerTestSuite struct { + suite.Suite cluster *mockcluster.Cluster ruleManager *placement.RuleManager rc *RuleChecker @@ -42,25 +41,28 @@ type testRuleCheckerSerialSuite struct { cancel context.CancelFunc } -func (s *testRuleCheckerSerialSuite) SetUpSuite(c *C) { - s.ctx, s.cancel = context.WithCancel(context.Background()) -} - -func (s *testRuleCheckerSerialSuite) TearDownTest(c *C) { - s.cancel() +func TestRuleCheckerSuite(t *testing.T) { + suite.Run(t, new(ruleCheckerTestSuite)) + suite.Run(t, new(ruleCheckerCacheTestSuite)) } -func (s *testRuleCheckerSerialSuite) SetUpTest(c *C) { +func (suite *ruleCheckerTestSuite) SetUpTest() { cfg := config.NewTestOptions() cfg.SetPlacementRulesCacheEnabled(true) - s.cluster = mockcluster.NewCluster(s.ctx, cfg) - s.cluster.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) - s.cluster.SetEnablePlacementRules(true) - s.ruleManager = s.cluster.RuleManager - s.rc = NewRuleChecker(s.cluster, s.ruleManager, cache.NewDefaultCache(10)) + suite.ctx, suite.cancel = context.WithCancel(context.Background()) + suite.cluster = mockcluster.NewCluster(suite.ctx, cfg) + suite.cluster.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) + suite.cluster.SetEnablePlacementRules(true) + suite.ruleManager = suite.cluster.RuleManager + suite.rc = NewRuleChecker(suite.cluster, suite.ruleManager, cache.NewDefaultCache(10)) +} + +func (suite *ruleCheckerTestSuite) TearDownTest() { + suite.cancel() } -type testRuleCheckerSuite struct { +type ruleCheckerCacheTestSuite struct { + suite.Suite cluster *mockcluster.Cluster ruleManager *placement.RuleManager rc *RuleChecker @@ -68,42 +70,39 @@ type testRuleCheckerSuite struct { cancel context.CancelFunc } -func (s *testRuleCheckerSuite) SetUpSuite(c *C) { - s.ctx, s.cancel = context.WithCancel(context.Background()) -} - -func (s *testRuleCheckerSuite) TearDownTest(c *C) { - s.cancel() +func (suite *ruleCheckerCacheTestSuite) SetupTest() { + cfg := config.NewTestOptions() + suite.ctx, suite.cancel = context.WithCancel(context.Background()) + suite.cluster = mockcluster.NewCluster(suite.ctx, cfg) + suite.cluster.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) + suite.cluster.SetEnablePlacementRules(true) + suite.ruleManager = suite.cluster.RuleManager + suite.rc = NewRuleChecker(suite.cluster, suite.ruleManager, cache.NewDefaultCache(10)) } -func (s *testRuleCheckerSuite) SetUpTest(c *C) { - cfg := config.NewTestOptions() - s.cluster = mockcluster.NewCluster(s.ctx, cfg) - s.cluster.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) - s.cluster.SetEnablePlacementRules(true) - s.ruleManager = s.cluster.RuleManager - s.rc = NewRuleChecker(s.cluster, s.ruleManager, cache.NewDefaultCache(10)) +func (suite *ruleCheckerCacheTestSuite) TearDownTest() { + suite.cancel() } -func (s *testRuleCheckerSuite) TestAddRulePeer(c *C) { - s.cluster.AddLeaderStore(1, 1) - s.cluster.AddLeaderStore(2, 1) - s.cluster.AddLeaderStore(3, 1) - s.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2) - op := s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, NotNil) - c.Assert(op.Desc(), Equals, "add-rule-peer") - c.Assert(op.GetPriorityLevel(), Equals, core.HighPriority) - c.Assert(op.Step(0).(operator.AddLearner).ToStore, Equals, uint64(3)) +func (suite *ruleCheckerCacheTestSuite) TestAddRulePeer() { + suite.cluster.AddLeaderStore(1, 1) + suite.cluster.AddLeaderStore(2, 1) + suite.cluster.AddLeaderStore(3, 1) + suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2) + op := suite.rc.Check(suite.cluster.GetRegion(1)) + suite.NotNil(op) + suite.Equal("add-rule-peer", op.Desc()) + suite.Equal(core.HighPriority, op.GetPriorityLevel()) + suite.Equal(uint64(3), op.Step(0).(operator.AddLearner).ToStore) } -func (s *testRuleCheckerSuite) TestAddRulePeerWithIsolationLevel(c *C) { - s.cluster.AddLabelsStore(1, 1, map[string]string{"zone": "z1", "rack": "r1", "host": "h1"}) - s.cluster.AddLabelsStore(2, 1, map[string]string{"zone": "z1", "rack": "r1", "host": "h2"}) - s.cluster.AddLabelsStore(3, 1, map[string]string{"zone": "z1", "rack": "r2", "host": "h1"}) - s.cluster.AddLabelsStore(4, 1, map[string]string{"zone": "z1", "rack": "r3", "host": "h1"}) - s.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2) - s.ruleManager.SetRule(&placement.Rule{ +func (suite *ruleCheckerCacheTestSuite) TestAddRulePeerWithIsolationLevel() { + suite.cluster.AddLabelsStore(1, 1, map[string]string{"zone": "z1", "rack": "r1", "host": "h1"}) + suite.cluster.AddLabelsStore(2, 1, map[string]string{"zone": "z1", "rack": "r1", "host": "h2"}) + suite.cluster.AddLabelsStore(3, 1, map[string]string{"zone": "z1", "rack": "r2", "host": "h1"}) + suite.cluster.AddLabelsStore(4, 1, map[string]string{"zone": "z1", "rack": "r3", "host": "h1"}) + suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2) + suite.ruleManager.SetRule(&placement.Rule{ GroupID: "pd", ID: "test", Index: 100, @@ -113,10 +112,10 @@ func (s *testRuleCheckerSuite) TestAddRulePeerWithIsolationLevel(c *C) { LocationLabels: []string{"zone", "rack", "host"}, IsolationLevel: "zone", }) - op := s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, IsNil) - s.cluster.AddLeaderRegionWithRange(1, "", "", 1, 3) - s.ruleManager.SetRule(&placement.Rule{ + op := suite.rc.Check(suite.cluster.GetRegion(1)) + suite.Nil(op) + suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 3) + suite.ruleManager.SetRule(&placement.Rule{ GroupID: "pd", ID: "test", Index: 100, @@ -126,75 +125,75 @@ func (s *testRuleCheckerSuite) TestAddRulePeerWithIsolationLevel(c *C) { LocationLabels: []string{"zone", "rack", "host"}, IsolationLevel: "rack", }) - op = s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, NotNil) - c.Assert(op.Desc(), Equals, "add-rule-peer") - c.Assert(op.Step(0).(operator.AddLearner).ToStore, Equals, uint64(4)) + op = suite.rc.Check(suite.cluster.GetRegion(1)) + suite.NotNil(op) + suite.Equal("add-rule-peer", op.Desc()) + suite.Equal(uint64(4), op.Step(0).(operator.AddLearner).ToStore) } -func (s *testRuleCheckerSuite) TestFixPeer(c *C) { - s.cluster.AddLeaderStore(1, 1) - s.cluster.AddLeaderStore(2, 1) - s.cluster.AddLeaderStore(3, 1) - s.cluster.AddLeaderStore(4, 1) - s.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2, 3) - op := s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, IsNil) - s.cluster.SetStoreDown(2) - r := s.cluster.GetRegion(1) +func (suite *ruleCheckerCacheTestSuite) TestFixPeer() { + suite.cluster.AddLeaderStore(1, 1) + suite.cluster.AddLeaderStore(2, 1) + suite.cluster.AddLeaderStore(3, 1) + suite.cluster.AddLeaderStore(4, 1) + suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2, 3) + op := suite.rc.Check(suite.cluster.GetRegion(1)) + suite.Nil(op) + suite.cluster.SetStoreDown(2) + r := suite.cluster.GetRegion(1) r = r.Clone(core.WithDownPeers([]*pdpb.PeerStats{{Peer: r.GetStorePeer(2), DownSeconds: 60000}})) - op = s.rc.Check(r) - c.Assert(op, NotNil) - c.Assert(op.Desc(), Equals, "replace-rule-down-peer") - c.Assert(op.GetPriorityLevel(), Equals, core.HighPriority) + op = suite.rc.Check(r) + suite.NotNil(op) + suite.Equal("replace-rule-down-peer", op.Desc()) + suite.Equal(core.HighPriority, op.GetPriorityLevel()) var add operator.AddLearner - c.Assert(op.Step(0), FitsTypeOf, add) - s.cluster.SetStoreUp(2) - s.cluster.SetStoreOffline(2) - op = s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, NotNil) - c.Assert(op.Desc(), Equals, "replace-rule-offline-peer") - c.Assert(op.GetPriorityLevel(), Equals, core.HighPriority) - c.Assert(op.Step(0), FitsTypeOf, add) - - s.cluster.SetStoreUp(2) + suite.IsType(add, op.Step(0)) + suite.cluster.SetStoreUp(2) + suite.cluster.SetStoreOffline(2) + op = suite.rc.Check(suite.cluster.GetRegion(1)) + suite.NotNil(op) + suite.Equal("replace-rule-offline-peer", op.Desc()) + suite.Equal(core.HighPriority, op.GetPriorityLevel()) + suite.IsType(add, op.Step(0)) + + suite.cluster.SetStoreUp(2) // leader store offline - s.cluster.SetStoreOffline(1) - r1 := s.cluster.GetRegion(1) + suite.cluster.SetStoreOffline(1) + r1 := suite.cluster.GetRegion(1) nr1 := r1.Clone(core.WithPendingPeers([]*metapb.Peer{r1.GetStorePeer(3)})) - s.cluster.PutRegion(nr1) + suite.cluster.PutRegion(nr1) hasTransferLeader := false for i := 0; i < 100; i++ { - op = s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, NotNil) + op = suite.rc.Check(suite.cluster.GetRegion(1)) + suite.NotNil(op) if step, ok := op.Step(0).(operator.TransferLeader); ok { - c.Assert(step.FromStore, Equals, uint64(1)) - c.Assert(step.ToStore, Not(Equals), uint64(3)) + suite.Equal(uint64(1), step.FromStore) + suite.NotEqual(uint64(3), step.ToStore) hasTransferLeader = true } } - c.Assert(hasTransferLeader, IsTrue) + suite.True(hasTransferLeader) } -func (s *testRuleCheckerSuite) TestFixOrphanPeers(c *C) { - s.cluster.AddLeaderStore(1, 1) - s.cluster.AddLeaderStore(2, 1) - s.cluster.AddLeaderStore(3, 1) - s.cluster.AddLeaderStore(4, 1) - s.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2, 3, 4) - op := s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, NotNil) - c.Assert(op.Desc(), Equals, "remove-orphan-peer") - c.Assert(op.Step(0).(operator.RemovePeer).FromStore, Equals, uint64(4)) +func (suite *ruleCheckerCacheTestSuite) TestFixOrphanPeers() { + suite.cluster.AddLeaderStore(1, 1) + suite.cluster.AddLeaderStore(2, 1) + suite.cluster.AddLeaderStore(3, 1) + suite.cluster.AddLeaderStore(4, 1) + suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2, 3, 4) + op := suite.rc.Check(suite.cluster.GetRegion(1)) + suite.NotNil(op) + suite.Equal("remove-orphan-peer", op.Desc()) + suite.Equal(uint64(4), op.Step(0).(operator.RemovePeer).FromStore) } -func (s *testRuleCheckerSuite) TestFixOrphanPeers2(c *C) { +func (suite *ruleCheckerCacheTestSuite) TestFixOrphanPeers2() { // check orphan peers can only be handled when all rules are satisfied. - s.cluster.AddLabelsStore(1, 1, map[string]string{"foo": "bar"}) - s.cluster.AddLabelsStore(2, 1, map[string]string{"foo": "bar"}) - s.cluster.AddLabelsStore(3, 1, map[string]string{"foo": "baz"}) - s.cluster.AddLeaderRegionWithRange(1, "", "", 1, 3) - s.ruleManager.SetRule(&placement.Rule{ + suite.cluster.AddLabelsStore(1, 1, map[string]string{"foo": "bar"}) + suite.cluster.AddLabelsStore(2, 1, map[string]string{"foo": "bar"}) + suite.cluster.AddLabelsStore(3, 1, map[string]string{"foo": "baz"}) + suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 3) + suite.ruleManager.SetRule(&placement.Rule{ GroupID: "pd", ID: "r1", Index: 100, @@ -205,32 +204,32 @@ func (s *testRuleCheckerSuite) TestFixOrphanPeers2(c *C) { {Key: "foo", Op: "in", Values: []string{"baz"}}, }, }) - s.cluster.SetStoreDown(2) - op := s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, IsNil) + suite.cluster.SetStoreDown(2) + op := suite.rc.Check(suite.cluster.GetRegion(1)) + suite.Nil(op) } -func (s *testRuleCheckerSuite) TestFixRole(c *C) { - s.cluster.AddLeaderStore(1, 1) - s.cluster.AddLeaderStore(2, 1) - s.cluster.AddLeaderStore(3, 1) - s.cluster.AddLeaderRegionWithRange(1, "", "", 2, 1, 3) - r := s.cluster.GetRegion(1) +func (suite *ruleCheckerCacheTestSuite) TestFixRole() { + suite.cluster.AddLeaderStore(1, 1) + suite.cluster.AddLeaderStore(2, 1) + suite.cluster.AddLeaderStore(3, 1) + suite.cluster.AddLeaderRegionWithRange(1, "", "", 2, 1, 3) + r := suite.cluster.GetRegion(1) p := r.GetStorePeer(1) p.Role = metapb.PeerRole_Learner r = r.Clone(core.WithLearners([]*metapb.Peer{p})) - op := s.rc.Check(r) - c.Assert(op, NotNil) - c.Assert(op.Desc(), Equals, "fix-peer-role") - c.Assert(op.Step(0).(operator.PromoteLearner).ToStore, Equals, uint64(1)) + op := suite.rc.Check(r) + suite.NotNil(op) + suite.Equal("fix-peer-role", op.Desc()) + suite.Equal(uint64(1), op.Step(0).(operator.PromoteLearner).ToStore) } -func (s *testRuleCheckerSuite) TestFixRoleLeader(c *C) { - s.cluster.AddLabelsStore(1, 1, map[string]string{"role": "follower"}) - s.cluster.AddLabelsStore(2, 1, map[string]string{"role": "follower"}) - s.cluster.AddLabelsStore(3, 1, map[string]string{"role": "voter"}) - s.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2, 3) - s.ruleManager.SetRule(&placement.Rule{ +func (suite *ruleCheckerCacheTestSuite) TestFixRoleLeader() { + suite.cluster.AddLabelsStore(1, 1, map[string]string{"role": "follower"}) + suite.cluster.AddLabelsStore(2, 1, map[string]string{"role": "follower"}) + suite.cluster.AddLabelsStore(3, 1, map[string]string{"role": "voter"}) + suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2, 3) + suite.ruleManager.SetRule(&placement.Rule{ GroupID: "pd", ID: "r1", Index: 100, @@ -241,7 +240,7 @@ func (s *testRuleCheckerSuite) TestFixRoleLeader(c *C) { {Key: "role", Op: "in", Values: []string{"voter"}}, }, }) - s.ruleManager.SetRule(&placement.Rule{ + suite.ruleManager.SetRule(&placement.Rule{ GroupID: "pd", ID: "r2", Index: 101, @@ -251,17 +250,17 @@ func (s *testRuleCheckerSuite) TestFixRoleLeader(c *C) { {Key: "role", Op: "in", Values: []string{"follower"}}, }, }) - op := s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, NotNil) - c.Assert(op.Desc(), Equals, "fix-follower-role") - c.Assert(op.Step(0).(operator.TransferLeader).ToStore, Equals, uint64(3)) + op := suite.rc.Check(suite.cluster.GetRegion(1)) + suite.NotNil(op) + suite.Equal("fix-follower-role", op.Desc()) + suite.Equal(uint64(3), op.Step(0).(operator.TransferLeader).ToStore) } -func (s *testRuleCheckerSuite) TestFixRoleLeaderIssue3130(c *C) { - s.cluster.AddLabelsStore(1, 1, map[string]string{"role": "follower"}) - s.cluster.AddLabelsStore(2, 1, map[string]string{"role": "leader"}) - s.cluster.AddLeaderRegion(1, 1, 2) - s.ruleManager.SetRule(&placement.Rule{ +func (suite *ruleCheckerCacheTestSuite) TestFixRoleLeaderIssue3130() { + suite.cluster.AddLabelsStore(1, 1, map[string]string{"role": "follower"}) + suite.cluster.AddLabelsStore(2, 1, map[string]string{"role": "leader"}) + suite.cluster.AddLeaderRegion(1, 1, 2) + suite.ruleManager.SetRule(&placement.Rule{ GroupID: "pd", ID: "r1", Index: 100, @@ -272,30 +271,30 @@ func (s *testRuleCheckerSuite) TestFixRoleLeaderIssue3130(c *C) { {Key: "role", Op: "in", Values: []string{"leader"}}, }, }) - op := s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, NotNil) - c.Assert(op.Desc(), Equals, "fix-leader-role") - c.Assert(op.Step(0).(operator.TransferLeader).ToStore, Equals, uint64(2)) - - s.cluster.SetStoreBusy(2, true) - op = s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, IsNil) - s.cluster.SetStoreBusy(2, false) - - s.cluster.AddLeaderRegion(1, 2, 1) - op = s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, NotNil) - c.Assert(op.Desc(), Equals, "remove-orphan-peer") - c.Assert(op.Step(0).(operator.RemovePeer).FromStore, Equals, uint64(1)) + op := suite.rc.Check(suite.cluster.GetRegion(1)) + suite.NotNil(op) + suite.Equal("fix-leader-role", op.Desc()) + suite.Equal(uint64(2), op.Step(0).(operator.TransferLeader).ToStore) + + suite.cluster.SetStoreBusy(2, true) + op = suite.rc.Check(suite.cluster.GetRegion(1)) + suite.Nil(op) + suite.cluster.SetStoreBusy(2, false) + + suite.cluster.AddLeaderRegion(1, 2, 1) + op = suite.rc.Check(suite.cluster.GetRegion(1)) + suite.NotNil(op) + suite.Equal("remove-orphan-peer", op.Desc()) + suite.Equal(uint64(1), op.Step(0).(operator.RemovePeer).FromStore) } -func (s *testRuleCheckerSuite) TestBetterReplacement(c *C) { - s.cluster.AddLabelsStore(1, 1, map[string]string{"host": "host1"}) - s.cluster.AddLabelsStore(2, 1, map[string]string{"host": "host1"}) - s.cluster.AddLabelsStore(3, 1, map[string]string{"host": "host2"}) - s.cluster.AddLabelsStore(4, 1, map[string]string{"host": "host3"}) - s.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2, 3) - s.ruleManager.SetRule(&placement.Rule{ +func (suite *ruleCheckerCacheTestSuite) TestBetterReplacement() { + suite.cluster.AddLabelsStore(1, 1, map[string]string{"host": "host1"}) + suite.cluster.AddLabelsStore(2, 1, map[string]string{"host": "host1"}) + suite.cluster.AddLabelsStore(3, 1, map[string]string{"host": "host2"}) + suite.cluster.AddLabelsStore(4, 1, map[string]string{"host": "host3"}) + suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2, 3) + suite.ruleManager.SetRule(&placement.Rule{ GroupID: "pd", ID: "test", Index: 100, @@ -304,22 +303,22 @@ func (s *testRuleCheckerSuite) TestBetterReplacement(c *C) { Count: 3, LocationLabels: []string{"host"}, }) - op := s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, NotNil) - c.Assert(op.Desc(), Equals, "move-to-better-location") - c.Assert(op.Step(0).(operator.AddLearner).ToStore, Equals, uint64(4)) - s.cluster.AddLeaderRegionWithRange(1, "", "", 1, 3, 4) - op = s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, IsNil) + op := suite.rc.Check(suite.cluster.GetRegion(1)) + suite.NotNil(op) + suite.Equal("move-to-better-location", op.Desc()) + suite.Equal(uint64(4), op.Step(0).(operator.AddLearner).ToStore) + suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 3, 4) + op = suite.rc.Check(suite.cluster.GetRegion(1)) + suite.Nil(op) } -func (s *testRuleCheckerSuite) TestBetterReplacement2(c *C) { - s.cluster.AddLabelsStore(1, 1, map[string]string{"zone": "z1", "host": "host1"}) - s.cluster.AddLabelsStore(2, 1, map[string]string{"zone": "z1", "host": "host2"}) - s.cluster.AddLabelsStore(3, 1, map[string]string{"zone": "z1", "host": "host3"}) - s.cluster.AddLabelsStore(4, 1, map[string]string{"zone": "z2", "host": "host1"}) - s.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2, 3) - s.ruleManager.SetRule(&placement.Rule{ +func (suite *ruleCheckerCacheTestSuite) TestBetterReplacement2() { + suite.cluster.AddLabelsStore(1, 1, map[string]string{"zone": "z1", "host": "host1"}) + suite.cluster.AddLabelsStore(2, 1, map[string]string{"zone": "z1", "host": "host2"}) + suite.cluster.AddLabelsStore(3, 1, map[string]string{"zone": "z1", "host": "host3"}) + suite.cluster.AddLabelsStore(4, 1, map[string]string{"zone": "z2", "host": "host1"}) + suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2, 3) + suite.ruleManager.SetRule(&placement.Rule{ GroupID: "pd", ID: "test", Index: 100, @@ -328,21 +327,21 @@ func (s *testRuleCheckerSuite) TestBetterReplacement2(c *C) { Count: 3, LocationLabels: []string{"zone", "host"}, }) - op := s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, NotNil) - c.Assert(op.Desc(), Equals, "move-to-better-location") - c.Assert(op.Step(0).(operator.AddLearner).ToStore, Equals, uint64(4)) - s.cluster.AddLeaderRegionWithRange(1, "", "", 1, 3, 4) - op = s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, IsNil) + op := suite.rc.Check(suite.cluster.GetRegion(1)) + suite.NotNil(op) + suite.Equal("move-to-better-location", op.Desc()) + suite.Equal(uint64(4), op.Step(0).(operator.AddLearner).ToStore) + suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 3, 4) + op = suite.rc.Check(suite.cluster.GetRegion(1)) + suite.Nil(op) } -func (s *testRuleCheckerSuite) TestNoBetterReplacement(c *C) { - s.cluster.AddLabelsStore(1, 1, map[string]string{"host": "host1"}) - s.cluster.AddLabelsStore(2, 1, map[string]string{"host": "host1"}) - s.cluster.AddLabelsStore(3, 1, map[string]string{"host": "host2"}) - s.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2, 3) - s.ruleManager.SetRule(&placement.Rule{ +func (suite *ruleCheckerCacheTestSuite) TestNoBetterReplacement() { + suite.cluster.AddLabelsStore(1, 1, map[string]string{"host": "host1"}) + suite.cluster.AddLabelsStore(2, 1, map[string]string{"host": "host1"}) + suite.cluster.AddLabelsStore(3, 1, map[string]string{"host": "host2"}) + suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2, 3) + suite.ruleManager.SetRule(&placement.Rule{ GroupID: "pd", ID: "test", Index: 100, @@ -351,72 +350,72 @@ func (s *testRuleCheckerSuite) TestNoBetterReplacement(c *C) { Count: 3, LocationLabels: []string{"host"}, }) - op := s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, IsNil) + op := suite.rc.Check(suite.cluster.GetRegion(1)) + suite.Nil(op) } -func (s *testRuleCheckerSuite) TestIssue2419(c *C) { - s.cluster.AddLeaderStore(1, 1) - s.cluster.AddLeaderStore(2, 1) - s.cluster.AddLeaderStore(3, 1) - s.cluster.AddLeaderStore(4, 1) - s.cluster.SetStoreOffline(3) - s.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2, 3) - r := s.cluster.GetRegion(1) +func (suite *ruleCheckerCacheTestSuite) TestIssue2419() { + suite.cluster.AddLeaderStore(1, 1) + suite.cluster.AddLeaderStore(2, 1) + suite.cluster.AddLeaderStore(3, 1) + suite.cluster.AddLeaderStore(4, 1) + suite.cluster.SetStoreOffline(3) + suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2, 3) + r := suite.cluster.GetRegion(1) r = r.Clone(core.WithAddPeer(&metapb.Peer{Id: 5, StoreId: 4, Role: metapb.PeerRole_Learner})) - op := s.rc.Check(r) - c.Assert(op, NotNil) - c.Assert(op.Desc(), Equals, "remove-orphan-peer") - c.Assert(op.Step(0).(operator.RemovePeer).FromStore, Equals, uint64(4)) + op := suite.rc.Check(r) + suite.NotNil(op) + suite.Equal("remove-orphan-peer", op.Desc()) + suite.Equal(uint64(4), op.Step(0).(operator.RemovePeer).FromStore) r = r.Clone(core.WithRemoveStorePeer(4)) - op = s.rc.Check(r) - c.Assert(op, NotNil) - c.Assert(op.Desc(), Equals, "replace-rule-offline-peer") - c.Assert(op.Step(0).(operator.AddLearner).ToStore, Equals, uint64(4)) - c.Assert(op.Step(1).(operator.PromoteLearner).ToStore, Equals, uint64(4)) - c.Assert(op.Step(2).(operator.RemovePeer).FromStore, Equals, uint64(3)) + op = suite.rc.Check(r) + suite.NotNil(op) + suite.Equal("replace-rule-offline-peer", op.Desc()) + suite.Equal(uint64(4), op.Step(0).(operator.AddLearner).ToStore) + suite.Equal(uint64(4), op.Step(1).(operator.PromoteLearner).ToStore) + suite.Equal(uint64(3), op.Step(2).(operator.RemovePeer).FromStore) } // Ref https://github.com/tikv/pd/issues/3521 // The problem is when offline a store, we may add learner multiple times if // the operator is timeout. -func (s *testRuleCheckerSuite) TestPriorityFixOrphanPeer(c *C) { - s.cluster.AddLabelsStore(1, 1, map[string]string{"host": "host1"}) - s.cluster.AddLabelsStore(2, 1, map[string]string{"host": "host1"}) - s.cluster.AddLabelsStore(3, 1, map[string]string{"host": "host2"}) - s.cluster.AddLabelsStore(4, 1, map[string]string{"host": "host4"}) - s.cluster.AddLabelsStore(5, 1, map[string]string{"host": "host5"}) - s.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2, 3) - op := s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, IsNil) +func (suite *ruleCheckerCacheTestSuite) TestPriorityFixOrphanPeer() { + suite.cluster.AddLabelsStore(1, 1, map[string]string{"host": "host1"}) + suite.cluster.AddLabelsStore(2, 1, map[string]string{"host": "host1"}) + suite.cluster.AddLabelsStore(3, 1, map[string]string{"host": "host2"}) + suite.cluster.AddLabelsStore(4, 1, map[string]string{"host": "host4"}) + suite.cluster.AddLabelsStore(5, 1, map[string]string{"host": "host5"}) + suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2, 3) + op := suite.rc.Check(suite.cluster.GetRegion(1)) + suite.Nil(op) var add operator.AddLearner var remove operator.RemovePeer - s.cluster.SetStoreOffline(2) - op = s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, NotNil) - c.Assert(op.Step(0), FitsTypeOf, add) - c.Assert(op.Desc(), Equals, "replace-rule-offline-peer") - r := s.cluster.GetRegion(1).Clone(core.WithAddPeer( + suite.cluster.SetStoreOffline(2) + op = suite.rc.Check(suite.cluster.GetRegion(1)) + suite.NotNil(op) + suite.IsType(add, op.Step(0)) + suite.Equal("replace-rule-offline-peer", op.Desc()) + r := suite.cluster.GetRegion(1).Clone(core.WithAddPeer( &metapb.Peer{ Id: 5, StoreId: 4, Role: metapb.PeerRole_Learner, })) - s.cluster.PutRegion(r) - op = s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op.Step(0), FitsTypeOf, remove) - c.Assert(op.Desc(), Equals, "remove-orphan-peer") + suite.cluster.PutRegion(r) + op = suite.rc.Check(suite.cluster.GetRegion(1)) + suite.IsType(remove, op.Step(0)) + suite.Equal("remove-orphan-peer", op.Desc()) } -func (s *testRuleCheckerSuite) TestIssue3293(c *C) { - s.cluster.AddLabelsStore(1, 1, map[string]string{"host": "host1"}) - s.cluster.AddLabelsStore(2, 1, map[string]string{"host": "host1"}) - s.cluster.AddLabelsStore(3, 1, map[string]string{"host": "host2"}) - s.cluster.AddLabelsStore(4, 1, map[string]string{"host": "host4"}) - s.cluster.AddLabelsStore(5, 1, map[string]string{"host": "host5"}) - s.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2) - err := s.ruleManager.SetRule(&placement.Rule{ +func (suite *ruleCheckerCacheTestSuite) TestIssue3293() { + suite.cluster.AddLabelsStore(1, 1, map[string]string{"host": "host1"}) + suite.cluster.AddLabelsStore(2, 1, map[string]string{"host": "host1"}) + suite.cluster.AddLabelsStore(3, 1, map[string]string{"host": "host2"}) + suite.cluster.AddLabelsStore(4, 1, map[string]string{"host": "host4"}) + suite.cluster.AddLabelsStore(5, 1, map[string]string{"host": "host5"}) + suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2) + err := suite.ruleManager.SetRule(&placement.Rule{ GroupID: "TiDB_DDL_51", ID: "0", Role: placement.Follower, @@ -431,26 +430,26 @@ func (s *testRuleCheckerSuite) TestIssue3293(c *C) { }, }, }) - c.Assert(err, IsNil) - s.cluster.DeleteStore(s.cluster.GetStore(5)) - err = s.ruleManager.SetRule(&placement.Rule{ + suite.NoError(err) + suite.cluster.DeleteStore(suite.cluster.GetStore(5)) + err = suite.ruleManager.SetRule(&placement.Rule{ GroupID: "TiDB_DDL_51", ID: "default", Role: placement.Voter, Count: 3, }) - c.Assert(err, IsNil) - err = s.ruleManager.DeleteRule("pd", "default") - c.Assert(err, IsNil) - op := s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, NotNil) - c.Assert(op.Desc(), Equals, "add-rule-peer") + suite.NoError(err) + err = suite.ruleManager.DeleteRule("pd", "default") + suite.NoError(err) + op := suite.rc.Check(suite.cluster.GetRegion(1)) + suite.NotNil(op) + suite.Equal("add-rule-peer", op.Desc()) } -func (s *testRuleCheckerSuite) TestIssue3299(c *C) { - s.cluster.AddLabelsStore(1, 1, map[string]string{"host": "host1"}) - s.cluster.AddLabelsStore(2, 1, map[string]string{"dc": "sh"}) - s.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2) +func (suite *ruleCheckerCacheTestSuite) TestIssue3299() { + suite.cluster.AddLabelsStore(1, 1, map[string]string{"host": "host1"}) + suite.cluster.AddLabelsStore(2, 1, map[string]string{"dc": "sh"}) + suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2) testCases := []struct { constraints []placement.LabelConstraint @@ -524,7 +523,7 @@ func (s *testRuleCheckerSuite) TestIssue3299(c *C) { } for _, t := range testCases { - err := s.ruleManager.SetRule(&placement.Rule{ + err := suite.ruleManager.SetRule(&placement.Rule{ GroupID: "p", ID: "0", Role: placement.Follower, @@ -532,21 +531,21 @@ func (s *testRuleCheckerSuite) TestIssue3299(c *C) { LabelConstraints: t.constraints, }) if t.err != "" { - c.Assert(err, ErrorMatches, t.err) + suite.Regexp(t.err, err.Error()) } else { - c.Assert(err, IsNil) + suite.NoError(err) } } } // See issue: https://github.com/tikv/pd/issues/3705 -func (s *testRuleCheckerSuite) TestFixDownPeer(c *C) { - s.cluster.AddLabelsStore(1, 1, map[string]string{"zone": "z1"}) - s.cluster.AddLabelsStore(2, 1, map[string]string{"zone": "z1"}) - s.cluster.AddLabelsStore(3, 1, map[string]string{"zone": "z2"}) - s.cluster.AddLabelsStore(4, 1, map[string]string{"zone": "z3"}) - s.cluster.AddLabelsStore(5, 1, map[string]string{"zone": "z3"}) - s.cluster.AddLeaderRegion(1, 1, 3, 4) +func (suite *ruleCheckerCacheTestSuite) TestFixDownPeer() { + suite.cluster.AddLabelsStore(1, 1, map[string]string{"zone": "z1"}) + suite.cluster.AddLabelsStore(2, 1, map[string]string{"zone": "z1"}) + suite.cluster.AddLabelsStore(3, 1, map[string]string{"zone": "z2"}) + suite.cluster.AddLabelsStore(4, 1, map[string]string{"zone": "z3"}) + suite.cluster.AddLabelsStore(5, 1, map[string]string{"zone": "z3"}) + suite.cluster.AddLeaderRegion(1, 1, 3, 4) rule := &placement.Rule{ GroupID: "pd", ID: "test", @@ -556,33 +555,33 @@ func (s *testRuleCheckerSuite) TestFixDownPeer(c *C) { Count: 3, LocationLabels: []string{"zone"}, } - s.ruleManager.SetRule(rule) + suite.ruleManager.SetRule(rule) - region := s.cluster.GetRegion(1) - c.Assert(s.rc.Check(region), IsNil) + region := suite.cluster.GetRegion(1) + suite.Nil(suite.rc.Check(region)) - s.cluster.SetStoreDown(4) + suite.cluster.SetStoreDown(4) region = region.Clone(core.WithDownPeers([]*pdpb.PeerStats{ {Peer: region.GetStorePeer(4), DownSeconds: 6000}, })) - testutil.CheckTransferPeer(c, s.rc.Check(region), operator.OpRegion, 4, 5) + testutil.CheckTransferPeerWithTestify(suite.Require(), suite.rc.Check(region), operator.OpRegion, 4, 5) - s.cluster.SetStoreDown(5) - testutil.CheckTransferPeer(c, s.rc.Check(region), operator.OpRegion, 4, 2) + suite.cluster.SetStoreDown(5) + testutil.CheckTransferPeerWithTestify(suite.Require(), suite.rc.Check(region), operator.OpRegion, 4, 2) rule.IsolationLevel = "zone" - s.ruleManager.SetRule(rule) - c.Assert(s.rc.Check(region), IsNil) + suite.ruleManager.SetRule(rule) + suite.Nil(suite.rc.Check(region)) } // See issue: https://github.com/tikv/pd/issues/3705 -func (s *testRuleCheckerSuite) TestFixOfflinePeer(c *C) { - s.cluster.AddLabelsStore(1, 1, map[string]string{"zone": "z1"}) - s.cluster.AddLabelsStore(2, 1, map[string]string{"zone": "z1"}) - s.cluster.AddLabelsStore(3, 1, map[string]string{"zone": "z2"}) - s.cluster.AddLabelsStore(4, 1, map[string]string{"zone": "z3"}) - s.cluster.AddLabelsStore(5, 1, map[string]string{"zone": "z3"}) - s.cluster.AddLeaderRegion(1, 1, 3, 4) +func (suite *ruleCheckerCacheTestSuite) TestFixOfflinePeer() { + suite.cluster.AddLabelsStore(1, 1, map[string]string{"zone": "z1"}) + suite.cluster.AddLabelsStore(2, 1, map[string]string{"zone": "z1"}) + suite.cluster.AddLabelsStore(3, 1, map[string]string{"zone": "z2"}) + suite.cluster.AddLabelsStore(4, 1, map[string]string{"zone": "z3"}) + suite.cluster.AddLabelsStore(5, 1, map[string]string{"zone": "z3"}) + suite.cluster.AddLeaderRegion(1, 1, 3, 4) rule := &placement.Rule{ GroupID: "pd", ID: "test", @@ -592,30 +591,30 @@ func (s *testRuleCheckerSuite) TestFixOfflinePeer(c *C) { Count: 3, LocationLabels: []string{"zone"}, } - s.ruleManager.SetRule(rule) + suite.ruleManager.SetRule(rule) - region := s.cluster.GetRegion(1) - c.Assert(s.rc.Check(region), IsNil) + region := suite.cluster.GetRegion(1) + suite.Nil(suite.rc.Check(region)) - s.cluster.SetStoreOffline(4) - testutil.CheckTransferPeer(c, s.rc.Check(region), operator.OpRegion, 4, 5) + suite.cluster.SetStoreOffline(4) + testutil.CheckTransferPeerWithTestify(suite.Require(), suite.rc.Check(region), operator.OpRegion, 4, 5) - s.cluster.SetStoreOffline(5) - testutil.CheckTransferPeer(c, s.rc.Check(region), operator.OpRegion, 4, 2) + suite.cluster.SetStoreOffline(5) + testutil.CheckTransferPeerWithTestify(suite.Require(), suite.rc.Check(region), operator.OpRegion, 4, 2) rule.IsolationLevel = "zone" - s.ruleManager.SetRule(rule) - c.Assert(s.rc.Check(region), IsNil) + suite.ruleManager.SetRule(rule) + suite.Nil(suite.rc.Check(region)) } -func (s *testRuleCheckerSerialSuite) TestRuleCache(c *C) { - s.cluster.AddLabelsStore(1, 1, map[string]string{"zone": "z1"}) - s.cluster.AddLabelsStore(2, 1, map[string]string{"zone": "z1"}) - s.cluster.AddLabelsStore(3, 1, map[string]string{"zone": "z2"}) - s.cluster.AddLabelsStore(4, 1, map[string]string{"zone": "z3"}) - s.cluster.AddLabelsStore(5, 1, map[string]string{"zone": "z3"}) - s.cluster.AddRegionStore(999, 1) - s.cluster.AddLeaderRegion(1, 1, 3, 4) +func (suite *ruleCheckerTestSuite) TestRuleCache() { + suite.cluster.AddLabelsStore(1, 1, map[string]string{"zone": "z1"}) + suite.cluster.AddLabelsStore(2, 1, map[string]string{"zone": "z1"}) + suite.cluster.AddLabelsStore(3, 1, map[string]string{"zone": "z2"}) + suite.cluster.AddLabelsStore(4, 1, map[string]string{"zone": "z3"}) + suite.cluster.AddLabelsStore(5, 1, map[string]string{"zone": "z3"}) + suite.cluster.AddRegionStore(999, 1) + suite.cluster.AddLeaderRegion(1, 1, 3, 4) rule := &placement.Rule{ GroupID: "pd", ID: "test", @@ -625,10 +624,10 @@ func (s *testRuleCheckerSerialSuite) TestRuleCache(c *C) { Count: 3, LocationLabels: []string{"zone"}, } - s.ruleManager.SetRule(rule) - region := s.cluster.GetRegion(1) + suite.ruleManager.SetRule(rule) + region := suite.cluster.GetRegion(1) region = region.Clone(core.WithIncConfVer(), core.WithIncVersion()) - c.Assert(s.rc.Check(region), IsNil) + suite.Nil(suite.rc.Check(region)) testcases := []struct { name string @@ -669,35 +668,35 @@ func (s *testRuleCheckerSerialSuite) TestRuleCache(c *C) { }, } for _, testcase := range testcases { - c.Log(testcase.name) + suite.T().Log(testcase.name) if testcase.stillCached { - c.Assert(failpoint.Enable("github.com/tikv/pd/server/schedule/checker/assertShouldCache", "return(true)"), IsNil) - s.rc.Check(testcase.region) - c.Assert(failpoint.Disable("github.com/tikv/pd/server/schedule/checker/assertShouldCache"), IsNil) + suite.NoError(failpoint.Enable("github.com/tikv/pd/server/schedule/checker/assertShouldCache", "return(true)")) + suite.rc.Check(testcase.region) + suite.NoError(failpoint.Disable("github.com/tikv/pd/server/schedule/checker/assertShouldCache")) } else { - c.Assert(failpoint.Enable("github.com/tikv/pd/server/schedule/checker/assertShouldNotCache", "return(true)"), IsNil) - s.rc.Check(testcase.region) - c.Assert(failpoint.Disable("github.com/tikv/pd/server/schedule/checker/assertShouldNotCache"), IsNil) + suite.NoError(failpoint.Enable("github.com/tikv/pd/server/schedule/checker/assertShouldNotCache", "return(true)")) + suite.rc.Check(testcase.region) + suite.NoError(failpoint.Disable("github.com/tikv/pd/server/schedule/checker/assertShouldNotCache")) } } } // Ref https://github.com/tikv/pd/issues/4045 -func (s *testRuleCheckerSuite) TestSkipFixOrphanPeerIfSelectedPeerisPendingOrDown(c *C) { - s.cluster.AddLabelsStore(1, 1, map[string]string{"host": "host1"}) - s.cluster.AddLabelsStore(2, 1, map[string]string{"host": "host1"}) - s.cluster.AddLabelsStore(3, 1, map[string]string{"host": "host2"}) - s.cluster.AddLabelsStore(4, 1, map[string]string{"host": "host4"}) - s.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2, 3, 4) +func (suite *ruleCheckerCacheTestSuite) TestSkipFixOrphanPeerIfSelectedPeerisPendingOrDown() { + suite.cluster.AddLabelsStore(1, 1, map[string]string{"host": "host1"}) + suite.cluster.AddLabelsStore(2, 1, map[string]string{"host": "host1"}) + suite.cluster.AddLabelsStore(3, 1, map[string]string{"host": "host2"}) + suite.cluster.AddLabelsStore(4, 1, map[string]string{"host": "host4"}) + suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2, 3, 4) // set peer3 and peer4 to pending - r1 := s.cluster.GetRegion(1) + r1 := suite.cluster.GetRegion(1) r1 = r1.Clone(core.WithPendingPeers([]*metapb.Peer{r1.GetStorePeer(3), r1.GetStorePeer(4)})) - s.cluster.PutRegion(r1) + suite.cluster.PutRegion(r1) // should not remove extra peer - op := s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, IsNil) + op := suite.rc.Check(suite.cluster.GetRegion(1)) + suite.Nil(op) // set peer3 to down-peer r1 = r1.Clone(core.WithPendingPeers([]*metapb.Peer{r1.GetStorePeer(4)})) @@ -707,39 +706,39 @@ func (s *testRuleCheckerSuite) TestSkipFixOrphanPeerIfSelectedPeerisPendingOrDow DownSeconds: 42, }, })) - s.cluster.PutRegion(r1) + suite.cluster.PutRegion(r1) // should not remove extra peer - op = s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, IsNil) + op = suite.rc.Check(suite.cluster.GetRegion(1)) + suite.Nil(op) // set peer3 to normal r1 = r1.Clone(core.WithDownPeers(nil)) - s.cluster.PutRegion(r1) + suite.cluster.PutRegion(r1) // should remove extra peer now var remove operator.RemovePeer - op = s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op.Step(0), FitsTypeOf, remove) - c.Assert(op.Desc(), Equals, "remove-orphan-peer") + op = suite.rc.Check(suite.cluster.GetRegion(1)) + suite.IsType(remove, op.Step(0)) + suite.Equal("remove-orphan-peer", op.Desc()) } -func (s *testRuleCheckerSuite) TestPriorityFitHealthPeers(c *C) { - s.cluster.AddLabelsStore(1, 1, map[string]string{"host": "host1"}) - s.cluster.AddLabelsStore(2, 1, map[string]string{"host": "host2"}) - s.cluster.AddLabelsStore(3, 1, map[string]string{"host": "host3"}) - s.cluster.AddLabelsStore(4, 1, map[string]string{"host": "host4"}) - s.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2, 3, 4) - r1 := s.cluster.GetRegion(1) +func (suite *ruleCheckerCacheTestSuite) TestPriorityFitHealthPeers() { + suite.cluster.AddLabelsStore(1, 1, map[string]string{"host": "host1"}) + suite.cluster.AddLabelsStore(2, 1, map[string]string{"host": "host2"}) + suite.cluster.AddLabelsStore(3, 1, map[string]string{"host": "host3"}) + suite.cluster.AddLabelsStore(4, 1, map[string]string{"host": "host4"}) + suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2, 3, 4) + r1 := suite.cluster.GetRegion(1) // set peer3 to pending r1 = r1.Clone(core.WithPendingPeers([]*metapb.Peer{r1.GetPeer(3)})) - s.cluster.PutRegion(r1) + suite.cluster.PutRegion(r1) var remove operator.RemovePeer - op := s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op.Step(0), FitsTypeOf, remove) - c.Assert(op.Desc(), Equals, "remove-orphan-peer") + op := suite.rc.Check(suite.cluster.GetRegion(1)) + suite.IsType(remove, op.Step(0)) + suite.Equal("remove-orphan-peer", op.Desc()) // set peer3 to down r1 = r1.Clone(core.WithDownPeers([]*pdpb.PeerStats{ @@ -749,18 +748,18 @@ func (s *testRuleCheckerSuite) TestPriorityFitHealthPeers(c *C) { }, })) r1 = r1.Clone(core.WithPendingPeers(nil)) - s.cluster.PutRegion(r1) + suite.cluster.PutRegion(r1) - op = s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op.Step(0), FitsTypeOf, remove) - c.Assert(op.Desc(), Equals, "remove-orphan-peer") + op = suite.rc.Check(suite.cluster.GetRegion(1)) + suite.IsType(remove, op.Step(0)) + suite.Equal("remove-orphan-peer", op.Desc()) } // Ref https://github.com/tikv/pd/issues/4140 -func (s *testRuleCheckerSuite) TestDemoteVoter(c *C) { - s.cluster.AddLabelsStore(1, 1, map[string]string{"zone": "z1"}) - s.cluster.AddLabelsStore(4, 1, map[string]string{"zone": "z4"}) - region := s.cluster.AddLeaderRegion(1, 1, 4) +func (suite *ruleCheckerCacheTestSuite) TestDemoteVoter() { + suite.cluster.AddLabelsStore(1, 1, map[string]string{"zone": "z1"}) + suite.cluster.AddLabelsStore(4, 1, map[string]string{"zone": "z4"}) + region := suite.cluster.AddLeaderRegion(1, 1, 4) rule := &placement.Rule{ GroupID: "pd", ID: "test", @@ -787,57 +786,57 @@ func (s *testRuleCheckerSuite) TestDemoteVoter(c *C) { }, }, } - s.ruleManager.SetRule(rule) - s.ruleManager.SetRule(rule2) - s.ruleManager.DeleteRule("pd", "default") - op := s.rc.Check(region) - c.Assert(op, NotNil) - c.Assert(op.Desc(), Equals, "fix-demote-voter") + suite.ruleManager.SetRule(rule) + suite.ruleManager.SetRule(rule2) + suite.ruleManager.DeleteRule("pd", "default") + op := suite.rc.Check(region) + suite.NotNil(op) + suite.Equal("fix-demote-voter", op.Desc()) } -func (s *testRuleCheckerSuite) TestOfflineAndDownStore(c *C) { - s.cluster.AddLabelsStore(1, 1, map[string]string{"zone": "z1"}) - s.cluster.AddLabelsStore(2, 1, map[string]string{"zone": "z4"}) - s.cluster.AddLabelsStore(3, 1, map[string]string{"zone": "z1"}) - s.cluster.AddLabelsStore(4, 1, map[string]string{"zone": "z4"}) - region := s.cluster.AddLeaderRegion(1, 1, 2, 3) - op := s.rc.Check(region) - c.Assert(op, IsNil) +func (suite *ruleCheckerCacheTestSuite) TestOfflineAndDownStore() { + suite.cluster.AddLabelsStore(1, 1, map[string]string{"zone": "z1"}) + suite.cluster.AddLabelsStore(2, 1, map[string]string{"zone": "z4"}) + suite.cluster.AddLabelsStore(3, 1, map[string]string{"zone": "z1"}) + suite.cluster.AddLabelsStore(4, 1, map[string]string{"zone": "z4"}) + region := suite.cluster.AddLeaderRegion(1, 1, 2, 3) + op := suite.rc.Check(region) + suite.Nil(op) // assert rule checker should generate replace offline peer operator after cached - s.cluster.SetStoreOffline(1) - op = s.rc.Check(region) - c.Assert(op, NotNil) - c.Assert(op.Desc(), Equals, "replace-rule-offline-peer") + suite.cluster.SetStoreOffline(1) + op = suite.rc.Check(region) + suite.NotNil(op) + suite.Equal("replace-rule-offline-peer", op.Desc()) // re-cache the regionFit - s.cluster.SetStoreUp(1) - op = s.rc.Check(region) - c.Assert(op, IsNil) + suite.cluster.SetStoreUp(1) + op = suite.rc.Check(region) + suite.Nil(op) // assert rule checker should generate replace down peer operator after cached - s.cluster.SetStoreDown(2) + suite.cluster.SetStoreDown(2) region = region.Clone(core.WithDownPeers([]*pdpb.PeerStats{{Peer: region.GetStorePeer(2), DownSeconds: 60000}})) - op = s.rc.Check(region) - c.Assert(op, NotNil) - c.Assert(op.Desc(), Equals, "replace-rule-down-peer") + op = suite.rc.Check(region) + suite.NotNil(op) + suite.Equal("replace-rule-down-peer", op.Desc()) } -func (s *testRuleCheckerSuite) TestPendingList(c *C) { +func (suite *ruleCheckerCacheTestSuite) TestPendingList() { // no enough store - s.cluster.AddLeaderStore(1, 1) - s.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2) - op := s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, IsNil) - _, exist := s.rc.pendingList.Get(1) - c.Assert(exist, IsTrue) + suite.cluster.AddLeaderStore(1, 1) + suite.cluster.AddLeaderRegionWithRange(1, "", "", 1, 2) + op := suite.rc.Check(suite.cluster.GetRegion(1)) + suite.Nil(op) + _, exist := suite.rc.pendingList.Get(1) + suite.True(exist) // add more stores - s.cluster.AddLeaderStore(2, 1) - s.cluster.AddLeaderStore(3, 1) - op = s.rc.Check(s.cluster.GetRegion(1)) - c.Assert(op, NotNil) - c.Assert(op.Desc(), Equals, "add-rule-peer") - c.Assert(op.GetPriorityLevel(), Equals, core.HighPriority) - c.Assert(op.Step(0).(operator.AddLearner).ToStore, Equals, uint64(3)) - _, exist = s.rc.pendingList.Get(1) - c.Assert(exist, IsFalse) + suite.cluster.AddLeaderStore(2, 1) + suite.cluster.AddLeaderStore(3, 1) + op = suite.rc.Check(suite.cluster.GetRegion(1)) + suite.NotNil(op) + suite.Equal("add-rule-peer", op.Desc()) + suite.Equal(core.HighPriority, op.GetPriorityLevel()) + suite.Equal(uint64(3), op.Step(0).(operator.AddLearner).ToStore) + _, exist = suite.rc.pendingList.Get(1) + suite.False(exist) } diff --git a/server/schedule/checker/split_checker_test.go b/server/schedule/checker/split_checker_test.go index 606c5953762f..957ca87bc07d 100644 --- a/server/schedule/checker/split_checker_test.go +++ b/server/schedule/checker/split_checker_test.go @@ -17,8 +17,9 @@ package checker import ( "context" "encoding/hex" + "testing" - . "github.com/pingcap/check" + "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/mock/mockcluster" "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/schedule/labeler" @@ -26,37 +27,18 @@ import ( "github.com/tikv/pd/server/schedule/placement" ) -var _ = Suite(&testSplitCheckerSuite{}) - -type testSplitCheckerSuite struct { - cluster *mockcluster.Cluster - ruleManager *placement.RuleManager - labeler *labeler.RegionLabeler - sc *SplitChecker - ctx context.Context - cancel context.CancelFunc -} - -func (s *testSplitCheckerSuite) SetUpSuite(c *C) { - s.ctx, s.cancel = context.WithCancel(context.Background()) -} - -func (s *testSplitCheckerSuite) TearDownTest(c *C) { - s.cancel() -} - -func (s *testSplitCheckerSuite) SetUpTest(c *C) { +func TestSplit(t *testing.T) { + re := require.New(t) cfg := config.NewTestOptions() cfg.GetReplicationConfig().EnablePlacementRules = true - s.cluster = mockcluster.NewCluster(s.ctx, cfg) - s.ruleManager = s.cluster.RuleManager - s.labeler = s.cluster.RegionLabeler - s.sc = NewSplitChecker(s.cluster, s.ruleManager, s.labeler) -} - -func (s *testSplitCheckerSuite) TestSplit(c *C) { - s.cluster.AddLeaderStore(1, 1) - s.ruleManager.SetRule(&placement.Rule{ + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cluster := mockcluster.NewCluster(ctx, cfg) + ruleManager := cluster.RuleManager + regionLabeler := cluster.RegionLabeler + sc := NewSplitChecker(cluster, ruleManager, regionLabeler) + cluster.AddLeaderStore(1, 1) + ruleManager.SetRule(&placement.Rule{ GroupID: "test", ID: "test", StartKeyHex: "aa", @@ -64,25 +46,25 @@ func (s *testSplitCheckerSuite) TestSplit(c *C) { Role: placement.Voter, Count: 1, }) - s.cluster.AddLeaderRegionWithRange(1, "", "", 1) - op := s.sc.Check(s.cluster.GetRegion(1)) - c.Assert(op, NotNil) - c.Assert(op.Len(), Equals, 1) + cluster.AddLeaderRegionWithRange(1, "", "", 1) + op := sc.Check(cluster.GetRegion(1)) + re.NotNil(op) + re.Equal(1, op.Len()) splitKeys := op.Step(0).(operator.SplitRegion).SplitKeys - c.Assert(hex.EncodeToString(splitKeys[0]), Equals, "aa") - c.Assert(hex.EncodeToString(splitKeys[1]), Equals, "cc") + re.Equal("aa", hex.EncodeToString(splitKeys[0])) + re.Equal("cc", hex.EncodeToString(splitKeys[1])) // region label has higher priority. - s.labeler.SetLabelRule(&labeler.LabelRule{ + regionLabeler.SetLabelRule(&labeler.LabelRule{ ID: "test", Labels: []labeler.RegionLabel{{Key: "test", Value: "test"}}, RuleType: labeler.KeyRange, Data: makeKeyRanges("bb", "dd"), }) - op = s.sc.Check(s.cluster.GetRegion(1)) - c.Assert(op, NotNil) - c.Assert(op.Len(), Equals, 1) + op = sc.Check(cluster.GetRegion(1)) + re.NotNil(op) + re.Equal(1, op.Len()) splitKeys = op.Step(0).(operator.SplitRegion).SplitKeys - c.Assert(hex.EncodeToString(splitKeys[0]), Equals, "bb") - c.Assert(hex.EncodeToString(splitKeys[1]), Equals, "dd") + re.Equal("bb", hex.EncodeToString(splitKeys[0])) + re.Equal("dd", hex.EncodeToString(splitKeys[1])) }