From 958d687c32389ce5a06eb1dc3303d5d440328d6e Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Mon, 13 Jun 2022 15:24:34 +0800 Subject: [PATCH] statistics: migrate test framework to testify (#5140) ref tikv/pd#4813 Signed-off-by: Ryan Leung Co-authored-by: Ti Chi Robot --- server/statistics/hot_peer_cache_test.go | 310 ++++++++++---------- server/statistics/kind_test.go | 43 ++- server/statistics/region_collection_test.go | 153 +++++----- server/statistics/store_collection_test.go | 46 ++- server/statistics/store_test.go | 18 +- server/statistics/topn_test.go | 72 ++--- 6 files changed, 314 insertions(+), 328 deletions(-) diff --git a/server/statistics/hot_peer_cache_test.go b/server/statistics/hot_peer_cache_test.go index 347e2a423d8..c021f05df3f 100644 --- a/server/statistics/hot_peer_cache_test.go +++ b/server/statistics/hot_peer_cache_test.go @@ -20,27 +20,24 @@ import ( "testing" "time" - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" + "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/movingaverage" "github.com/tikv/pd/server/core" ) -var _ = Suite(&testHotPeerCache{}) - -type testHotPeerCache struct{} - -func (t *testHotPeerCache) TestStoreTimeUnsync(c *C) { +func TestStoreTimeUnsync(t *testing.T) { + re := require.New(t) cache := NewHotPeerCache(Write) intervals := []uint64{120, 60} for _, interval := range intervals { region := buildRegion(Write, 3, interval) - checkAndUpdate(c, cache, region, 3) + checkAndUpdate(re, cache, region, 3) { stats := cache.RegionStats(0) - c.Assert(stats, HasLen, 3) + re.Len(stats, 3) for _, s := range stats { - c.Assert(s, HasLen, 1) + re.Len(s, 1) } } } @@ -62,7 +59,8 @@ type testCacheCase struct { actionType ActionType } -func (t *testHotPeerCache) TestCache(c *C) { +func TestCache(t *testing.T) { + re := require.New(t) tests := []*testCacheCase{ {Read, transferLeader, 3, Update}, {Read, movePeer, 4, Remove}, @@ -71,26 +69,22 @@ func (t *testHotPeerCache) TestCache(c *C) { {Write, movePeer, 4, Remove}, {Write, addReplica, 4, Remove}, } - for _, t := range tests { - testCache(c, t) - } -} - -func testCache(c *C, t *testCacheCase) { - defaultSize := map[RWType]int{ - Read: 3, // all peers - Write: 3, // all peers - } - cache := NewHotPeerCache(t.kind) - region := buildRegion(t.kind, 3, 60) - checkAndUpdate(c, cache, region, defaultSize[t.kind]) - checkHit(c, cache, region, t.kind, Add) // all peers are new - - srcStore, region := schedule(c, t.operator, region, 10) - res := checkAndUpdate(c, cache, region, t.expect) - checkHit(c, cache, region, t.kind, Update) // hit cache - if t.expect != defaultSize[t.kind] { - checkOp(c, res, srcStore, t.actionType) + for _, test := range tests { + defaultSize := map[RWType]int{ + Read: 3, // all peers + Write: 3, // all peers + } + cache := NewHotPeerCache(test.kind) + region := buildRegion(test.kind, 3, 60) + checkAndUpdate(re, cache, region, defaultSize[test.kind]) + checkHit(re, cache, region, test.kind, Add) // all peers are new + + srcStore, region := schedule(re, test.operator, region, 10) + res := checkAndUpdate(re, cache, region, test.expect) + checkHit(re, cache, region, test.kind, Update) // hit cache + if test.expect != defaultSize[test.kind] { + checkOp(re, res, srcStore, test.actionType) + } } } @@ -127,35 +121,35 @@ func updateFlow(cache *hotPeerCache, res []*HotPeerStat) []*HotPeerStat { return res } -type check func(c *C, cache *hotPeerCache, region *core.RegionInfo, expect ...int) (res []*HotPeerStat) +type check func(re *require.Assertions, cache *hotPeerCache, region *core.RegionInfo, expect ...int) (res []*HotPeerStat) -func checkAndUpdate(c *C, cache *hotPeerCache, region *core.RegionInfo, expect ...int) (res []*HotPeerStat) { +func checkAndUpdate(re *require.Assertions, cache *hotPeerCache, region *core.RegionInfo, expect ...int) (res []*HotPeerStat) { res = checkFlow(cache, region, region.GetPeers()) if len(expect) != 0 { - c.Assert(res, HasLen, expect[0]) + re.Len(res, expect[0]) } return updateFlow(cache, res) } // Check and update peers in the specified order that old item that he items that have not expired come first, and the items that have expired come second. // This order is also similar to the previous version. By the way the order in now version is random. -func checkAndUpdateWithOrdering(c *C, cache *hotPeerCache, region *core.RegionInfo, expect ...int) (res []*HotPeerStat) { +func checkAndUpdateWithOrdering(re *require.Assertions, cache *hotPeerCache, region *core.RegionInfo, expect ...int) (res []*HotPeerStat) { res = checkFlow(cache, region, orderingPeers(cache, region)) if len(expect) != 0 { - c.Assert(res, HasLen, expect[0]) + re.Len(res, expect[0]) } return updateFlow(cache, res) } -func checkAndUpdateSkipOne(c *C, cache *hotPeerCache, region *core.RegionInfo, expect ...int) (res []*HotPeerStat) { +func checkAndUpdateSkipOne(re *require.Assertions, cache *hotPeerCache, region *core.RegionInfo, expect ...int) (res []*HotPeerStat) { res = checkFlow(cache, region, region.GetPeers()[1:]) if len(expect) != 0 { - c.Assert(res, HasLen, expect[0]) + re.Len(res, expect[0]) } return updateFlow(cache, res) } -func checkHit(c *C, cache *hotPeerCache, region *core.RegionInfo, kind RWType, actionType ActionType) { +func checkHit(re *require.Assertions, cache *hotPeerCache, region *core.RegionInfo, kind RWType, actionType ActionType) { var peers []*metapb.Peer if kind == Read { peers = []*metapb.Peer{region.GetLeader()} @@ -164,15 +158,15 @@ func checkHit(c *C, cache *hotPeerCache, region *core.RegionInfo, kind RWType, a } for _, peer := range peers { item := cache.getOldHotPeerStat(region.GetID(), peer.StoreId) - c.Assert(item, NotNil) - c.Assert(item.actionType, Equals, actionType) + re.NotNil(item) + re.Equal(actionType, item.actionType) } } -func checkOp(c *C, ret []*HotPeerStat, storeID uint64, actionType ActionType) { +func checkOp(re *require.Assertions, ret []*HotPeerStat, storeID uint64, actionType ActionType) { for _, item := range ret { if item.StoreID == storeID { - c.Assert(item.actionType, Equals, actionType) + re.Equal(actionType, item.actionType) return } } @@ -192,7 +186,7 @@ func checkIntervalSum(cache *hotPeerCache, region *core.RegionInfo) bool { } // checkIntervalSumContinuous checks whether the interval sum of the peer is continuous. -func checkIntervalSumContinuous(c *C, intervalSums map[uint64]int, rets []*HotPeerStat, interval uint64) { +func checkIntervalSumContinuous(re *require.Assertions, intervalSums map[uint64]int, rets []*HotPeerStat, interval uint64) { for _, ret := range rets { if ret.actionType == Remove { delete(intervalSums, ret.StoreID) @@ -201,27 +195,27 @@ func checkIntervalSumContinuous(c *C, intervalSums map[uint64]int, rets []*HotPe new := int(ret.getIntervalSum() / 1000000000) if ret.source == direct { if old, ok := intervalSums[ret.StoreID]; ok { - c.Assert(new, Equals, (old+int(interval))%RegionHeartBeatReportInterval) + re.Equal((old+int(interval))%RegionHeartBeatReportInterval, new) } } intervalSums[ret.StoreID] = new } } -func schedule(c *C, operator operator, region *core.RegionInfo, targets ...uint64) (srcStore uint64, _ *core.RegionInfo) { +func schedule(re *require.Assertions, operator operator, region *core.RegionInfo, targets ...uint64) (srcStore uint64, _ *core.RegionInfo) { switch operator { case transferLeader: _, newLeader := pickFollower(region) return region.GetLeader().StoreId, region.Clone(core.WithLeader(newLeader)) case movePeer: - c.Assert(targets, HasLen, 1) + re.Len(targets, 1) index, _ := pickFollower(region) srcStore := region.GetPeers()[index].StoreId region := region.Clone(core.WithAddPeer(&metapb.Peer{Id: targets[0]*10 + 1, StoreId: targets[0]})) region = region.Clone(core.WithRemoveStorePeer(srcStore)) return srcStore, region case addReplica: - c.Assert(targets, HasLen, 1) + re.Len(targets, 1) region := region.Clone(core.WithAddPeer(&metapb.Peer{Id: targets[0]*10 + 1, StoreId: targets[0]})) return 0, region case removeReplica: @@ -307,7 +301,8 @@ func newPeers(n int, pid genID, sid genID) []*metapb.Peer { return peers } -func (t *testHotPeerCache) TestUpdateHotPeerStat(c *C) { +func TestUpdateHotPeerStat(t *testing.T) { + re := require.New(t) cache := NewHotPeerCache(Read) // we statistic read peer info from store heartbeat rather than region heartbeat m := RegionHeartBeatReportInterval / StoreHeartBeatReportInterval @@ -315,69 +310,70 @@ func (t *testHotPeerCache) TestUpdateHotPeerStat(c *C) { // skip interval=0 newItem := &HotPeerStat{actionType: Update, thresholds: []float64{0.0, 0.0, 0.0}, Kind: Read} newItem = cache.updateHotPeerStat(nil, newItem, nil, []float64{0.0, 0.0, 0.0}, 0) - c.Check(newItem, IsNil) + re.Nil(newItem) // new peer, interval is larger than report interval, but no hot newItem = &HotPeerStat{actionType: Update, thresholds: []float64{1.0, 1.0, 1.0}, Kind: Read} newItem = cache.updateHotPeerStat(nil, newItem, nil, []float64{0.0, 0.0, 0.0}, 10*time.Second) - c.Check(newItem, IsNil) + re.Nil(newItem) // new peer, interval is less than report interval newItem = &HotPeerStat{actionType: Update, thresholds: []float64{0.0, 0.0, 0.0}, Kind: Read} newItem = cache.updateHotPeerStat(nil, newItem, nil, []float64{60.0, 60.0, 60.0}, 4*time.Second) - c.Check(newItem, NotNil) - c.Check(newItem.HotDegree, Equals, 0) - c.Check(newItem.AntiCount, Equals, 0) + re.NotNil(newItem) + re.Equal(0, newItem.HotDegree) + re.Equal(0, newItem.AntiCount) // sum of interval is less than report interval oldItem := newItem newItem = cache.updateHotPeerStat(nil, newItem, oldItem, []float64{60.0, 60.0, 60.0}, 4*time.Second) - c.Check(newItem.HotDegree, Equals, 0) - c.Check(newItem.AntiCount, Equals, 0) + re.Equal(0, newItem.HotDegree) + re.Equal(0, newItem.AntiCount) // sum of interval is larger than report interval, and hot oldItem = newItem newItem = cache.updateHotPeerStat(nil, newItem, oldItem, []float64{60.0, 60.0, 60.0}, 4*time.Second) - c.Check(newItem.HotDegree, Equals, 1) - c.Check(newItem.AntiCount, Equals, 2*m) + re.Equal(1, newItem.HotDegree) + re.Equal(2*m, newItem.AntiCount) // sum of interval is less than report interval oldItem = newItem newItem = cache.updateHotPeerStat(nil, newItem, oldItem, []float64{60.0, 60.0, 60.0}, 4*time.Second) - c.Check(newItem.HotDegree, Equals, 1) - c.Check(newItem.AntiCount, Equals, 2*m) + re.Equal(1, newItem.HotDegree) + re.Equal(2*m, newItem.AntiCount) // sum of interval is larger than report interval, and hot oldItem = newItem newItem = cache.updateHotPeerStat(nil, newItem, oldItem, []float64{60.0, 60.0, 60.0}, 10*time.Second) - c.Check(newItem.HotDegree, Equals, 2) - c.Check(newItem.AntiCount, Equals, 2*m) + re.Equal(2, newItem.HotDegree) + re.Equal(2*m, newItem.AntiCount) // sum of interval is larger than report interval, and cold oldItem = newItem newItem.thresholds = []float64{10.0, 10.0, 10.0} newItem = cache.updateHotPeerStat(nil, newItem, oldItem, []float64{60.0, 60.0, 60.0}, 10*time.Second) - c.Check(newItem.HotDegree, Equals, 1) - c.Check(newItem.AntiCount, Equals, 2*m-1) + re.Equal(1, newItem.HotDegree) + re.Equal(2*m-1, newItem.AntiCount) // sum of interval is larger than report interval, and cold for i := 0; i < 2*m-1; i++ { oldItem = newItem newItem = cache.updateHotPeerStat(nil, newItem, oldItem, []float64{60.0, 60.0, 60.0}, 10*time.Second) } - c.Check(newItem.HotDegree, Less, 0) - c.Check(newItem.AntiCount, Equals, 0) - c.Check(newItem.actionType, Equals, Remove) + re.Less(newItem.HotDegree, 0) + re.Equal(0, newItem.AntiCount) + re.Equal(Remove, newItem.actionType) } -func (t *testHotPeerCache) TestThresholdWithUpdateHotPeerStat(c *C) { +func TestThresholdWithUpdateHotPeerStat(t *testing.T) { + re := require.New(t) byteRate := minHotThresholds[RegionReadBytes] * 2 expectThreshold := byteRate * HotThresholdRatio - t.testMetrics(c, 120., byteRate, expectThreshold) - t.testMetrics(c, 60., byteRate, expectThreshold) - t.testMetrics(c, 30., byteRate, expectThreshold) - t.testMetrics(c, 17., byteRate, expectThreshold) - t.testMetrics(c, 1., byteRate, expectThreshold) + testMetrics(re, 120., byteRate, expectThreshold) + testMetrics(re, 60., byteRate, expectThreshold) + testMetrics(re, 30., byteRate, expectThreshold) + testMetrics(re, 17., byteRate, expectThreshold) + testMetrics(re, 1., byteRate, expectThreshold) } -func (t *testHotPeerCache) testMetrics(c *C, interval, byteRate, expectThreshold float64) { +func testMetrics(re *require.Assertions, interval, byteRate, expectThreshold float64) { cache := NewHotPeerCache(Read) storeID := uint64(1) - c.Assert(byteRate, GreaterEqual, minHotThresholds[RegionReadBytes]) + re.GreaterOrEqual(byteRate, minHotThresholds[RegionReadBytes]) for i := uint64(1); i < TopNN+10; i++ { var oldItem *HotPeerStat for { @@ -401,14 +397,15 @@ func (t *testHotPeerCache) testMetrics(c *C, interval, byteRate, expectThreshold } thresholds := cache.calcHotThresholds(storeID) if i < TopNN { - c.Assert(thresholds[RegionReadBytes], Equals, minHotThresholds[RegionReadBytes]) + re.Equal(minHotThresholds[RegionReadBytes], thresholds[RegionReadBytes]) } else { - c.Assert(thresholds[RegionReadBytes], Equals, expectThreshold) + re.Equal(expectThreshold, thresholds[RegionReadBytes]) } } } -func (t *testHotPeerCache) TestRemoveFromCache(c *C) { +func TestRemoveFromCache(t *testing.T) { + re := require.New(t) peerCount := 3 interval := uint64(5) checkers := []check{checkAndUpdate, checkAndUpdateWithOrdering} @@ -418,29 +415,30 @@ func (t *testHotPeerCache) TestRemoveFromCache(c *C) { // prepare intervalSums := make(map[uint64]int) for i := 1; i <= 200; i++ { - rets := checker(c, cache, region) - checkIntervalSumContinuous(c, intervalSums, rets, interval) + rets := checker(re, cache, region) + checkIntervalSumContinuous(re, intervalSums, rets, interval) } // make the interval sum of peers are different - checkAndUpdateSkipOne(c, cache, region) + checkAndUpdateSkipOne(re, cache, region) checkIntervalSum(cache, region) // check whether cold cache is cleared var isClear bool intervalSums = make(map[uint64]int) region = region.Clone(core.SetWrittenBytes(0), core.SetWrittenKeys(0), core.SetWrittenQuery(0)) for i := 1; i <= 200; i++ { - rets := checker(c, cache, region) - checkIntervalSumContinuous(c, intervalSums, rets, interval) + rets := checker(re, cache, region) + checkIntervalSumContinuous(re, intervalSums, rets, interval) if len(cache.storesOfRegion[region.GetID()]) == 0 { isClear = true break } } - c.Assert(isClear, IsTrue) + re.True(isClear) } } -func (t *testHotPeerCache) TestRemoveFromCacheRandom(c *C) { +func TestRemoveFromCacheRandom(t *testing.T) { + re := require.New(t) peerCounts := []int{3, 5} intervals := []uint64{120, 60, 10, 5} checkers := []check{checkAndUpdate, checkAndUpdateWithOrdering} @@ -455,12 +453,12 @@ func (t *testHotPeerCache) TestRemoveFromCacheRandom(c *C) { step := func(i int) { tmp := uint64(0) if i%5 == 0 { - tmp, region = schedule(c, removeReplica, region) + tmp, region = schedule(re, removeReplica, region) } - rets := checker(c, cache, region) - checkIntervalSumContinuous(c, intervalSums, rets, interval) + rets := checker(re, cache, region) + checkIntervalSumContinuous(re, intervalSums, rets, interval) if i%5 == 0 { - _, region = schedule(c, addReplica, region, target) + _, region = schedule(re, addReplica, region, target) target = tmp } } @@ -473,9 +471,9 @@ func (t *testHotPeerCache) TestRemoveFromCacheRandom(c *C) { } } if interval < RegionHeartBeatReportInterval { - c.Assert(checkIntervalSum(cache, region), IsTrue) + re.True(checkIntervalSum(cache, region)) } - c.Assert(cache.storesOfRegion[region.GetID()], HasLen, peerCount) + re.Len(cache.storesOfRegion[region.GetID()], peerCount) // check whether cold cache is cleared var isClear bool @@ -488,119 +486,98 @@ func (t *testHotPeerCache) TestRemoveFromCacheRandom(c *C) { break } } - c.Assert(isClear, IsTrue) + re.True(isClear) } } } } -func checkCoolDown(c *C, cache *hotPeerCache, region *core.RegionInfo, expect bool) { +func checkCoolDown(re *require.Assertions, cache *hotPeerCache, region *core.RegionInfo, expect bool) { item := cache.getOldHotPeerStat(region.GetID(), region.GetLeader().GetStoreId()) - c.Assert(item.IsNeedCoolDownTransferLeader(3), Equals, expect) + re.Equal(expect, item.IsNeedCoolDownTransferLeader(3)) } -func (t *testHotPeerCache) TestCoolDownTransferLeader(c *C) { +func TestCoolDownTransferLeader(t *testing.T) { + re := require.New(t) cache := NewHotPeerCache(Read) region := buildRegion(Read, 3, 60) moveLeader := func() { - _, region = schedule(c, movePeer, region, 10) - checkAndUpdate(c, cache, region) - checkCoolDown(c, cache, region, false) - _, region = schedule(c, transferLeader, region, 10) - checkAndUpdate(c, cache, region) - checkCoolDown(c, cache, region, true) + _, region = schedule(re, movePeer, region, 10) + checkAndUpdate(re, cache, region) + checkCoolDown(re, cache, region, false) + _, region = schedule(re, transferLeader, region, 10) + checkAndUpdate(re, cache, region) + checkCoolDown(re, cache, region, true) } transferLeader := func() { - _, region = schedule(c, transferLeader, region) - checkAndUpdate(c, cache, region) - checkCoolDown(c, cache, region, true) + _, region = schedule(re, transferLeader, region) + checkAndUpdate(re, cache, region) + checkCoolDown(re, cache, region, true) } movePeer := func() { - _, region = schedule(c, movePeer, region, 10) - checkAndUpdate(c, cache, region) - checkCoolDown(c, cache, region, false) + _, region = schedule(re, movePeer, region, 10) + checkAndUpdate(re, cache, region) + checkCoolDown(re, cache, region, false) } addReplica := func() { - _, region = schedule(c, addReplica, region, 10) - checkAndUpdate(c, cache, region) - checkCoolDown(c, cache, region, false) + _, region = schedule(re, addReplica, region, 10) + checkAndUpdate(re, cache, region) + checkCoolDown(re, cache, region, false) } removeReplica := func() { - _, region = schedule(c, removeReplica, region, 10) - checkAndUpdate(c, cache, region) - checkCoolDown(c, cache, region, false) + _, region = schedule(re, removeReplica, region, 10) + checkAndUpdate(re, cache, region) + checkCoolDown(re, cache, region, false) } cases := []func(){moveLeader, transferLeader, movePeer, addReplica, removeReplica} for _, runCase := range cases { cache = NewHotPeerCache(Read) region = buildRegion(Read, 3, 60) for i := 1; i <= 200; i++ { - checkAndUpdate(c, cache, region) + checkAndUpdate(re, cache, region) } - checkCoolDown(c, cache, region, false) + checkCoolDown(re, cache, region, false) runCase() } } // See issue #4510 -func (t *testHotPeerCache) TestCacheInherit(c *C) { +func TestCacheInherit(t *testing.T) { + re := require.New(t) cache := NewHotPeerCache(Read) region := buildRegion(Read, 3, 10) // prepare for i := 1; i <= 200; i++ { - checkAndUpdate(c, cache, region) + checkAndUpdate(re, cache, region) } // move peer newStoreID := uint64(10) - _, region = schedule(c, addReplica, region, newStoreID) - checkAndUpdate(c, cache, region) - newStoreID, region = schedule(c, removeReplica, region) - rets := checkAndUpdate(c, cache, region) + _, region = schedule(re, addReplica, region, newStoreID) + checkAndUpdate(re, cache, region) + newStoreID, region = schedule(re, removeReplica, region) + rets := checkAndUpdate(re, cache, region) for _, ret := range rets { if ret.actionType != Remove { flow := ret.GetLoads()[RegionReadBytes] - c.Assert(flow, Equals, float64(region.GetBytesRead()/ReadReportInterval)) + re.Equal(float64(region.GetBytesRead()/ReadReportInterval), flow) } } // new flow newFlow := region.GetBytesRead() * 10 region = region.Clone(core.SetReadBytes(newFlow)) for i := 1; i <= 200; i++ { - checkAndUpdate(c, cache, region) + checkAndUpdate(re, cache, region) } // move peer - _, region = schedule(c, addReplica, region, newStoreID) - checkAndUpdate(c, cache, region) - _, region = schedule(c, removeReplica, region) - rets = checkAndUpdate(c, cache, region) + _, region = schedule(re, addReplica, region, newStoreID) + checkAndUpdate(re, cache, region) + _, region = schedule(re, removeReplica, region) + rets = checkAndUpdate(re, cache, region) for _, ret := range rets { if ret.actionType != Remove { flow := ret.GetLoads()[RegionReadBytes] - c.Assert(flow, Equals, float64(newFlow/ReadReportInterval)) - } - } -} - -func BenchmarkCheckRegionFlow(b *testing.B) { - cache := NewHotPeerCache(Read) - region := buildRegion(Read, 3, 10) - peerInfos := make([]*core.PeerInfo, 0) - for _, peer := range region.GetPeers() { - peerInfo := core.NewPeerInfo(peer, region.GetLoads(), 10) - peerInfos = append(peerInfos, peerInfo) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - items := make([]*HotPeerStat, 0) - for _, peerInfo := range peerInfos { - item := cache.checkPeerFlow(peerInfo, region) - if item != nil { - items = append(items, item) - } - } - for _, ret := range items { - cache.updateStat(ret) + re.Equal(float64(newFlow/ReadReportInterval), flow) } } } @@ -610,7 +587,7 @@ type testMovingAverageCase struct { expect []float64 } -func checkMovingAverage(c *C, testCase *testMovingAverageCase) { +func checkMovingAverage(re *require.Assertions, testCase *testMovingAverageCase) { interval := 1 * time.Second tm := movingaverage.NewTimeMedian(DefaultAotSize, DefaultWriteMfSize, interval) var results []float64 @@ -618,11 +595,11 @@ func checkMovingAverage(c *C, testCase *testMovingAverageCase) { tm.Add(data, interval) results = append(results, tm.Get()) } - c.Assert(results, DeepEquals, testCase.expect) + re.Equal(testCase.expect, results) } -// -func (t *testHotPeerCache) TestUnstableData(c *C) { +func TestUnstableData(t *testing.T) { + re := require.New(t) cases := []*testMovingAverageCase{ { report: []float64{1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, @@ -650,6 +627,29 @@ func (t *testHotPeerCache) TestUnstableData(c *C) { }, } for i := range cases { - checkMovingAverage(c, cases[i]) + checkMovingAverage(re, cases[i]) + } +} + +func BenchmarkCheckRegionFlow(b *testing.B) { + cache := NewHotPeerCache(Read) + region := buildRegion(Read, 3, 10) + peerInfos := make([]*core.PeerInfo, 0) + for _, peer := range region.GetPeers() { + peerInfo := core.NewPeerInfo(peer, region.GetLoads(), 10) + peerInfos = append(peerInfos, peerInfo) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + items := make([]*HotPeerStat, 0) + for _, peerInfo := range peerInfos { + item := cache.checkPeerFlow(peerInfo, region) + if item != nil { + items = append(items, item) + } + } + for _, ret := range items { + cache.updateStat(ret) + } } } diff --git a/server/statistics/kind_test.go b/server/statistics/kind_test.go index 86e9a77e10b..ccde182eefe 100644 --- a/server/statistics/kind_test.go +++ b/server/statistics/kind_test.go @@ -15,17 +15,16 @@ package statistics import ( - . "github.com/pingcap/check" + "testing" + "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/stretchr/testify/require" "github.com/tikv/pd/server/core" ) -var _ = Suite(&testRegionInfoSuite{}) - -type testRegionInfoSuite struct{} - -func (s *testRegionInfoSuite) TestGetLoads(c *C) { +func TestGetLoads(t *testing.T) { + re := require.New(t) queryStats := &pdpb.QueryStats{ Get: 5, Coprocessor: 6, @@ -45,24 +44,24 @@ func (s *testRegionInfoSuite) TestGetLoads(c *C) { core.SetWrittenKeys(4), core.SetQueryStats(queryStats)) loads := regionA.GetLoads() - c.Assert(loads, HasLen, int(RegionStatCount)) - c.Assert(float64(regionA.GetBytesRead()), Equals, loads[RegionReadBytes]) - c.Assert(float64(regionA.GetKeysRead()), Equals, loads[RegionReadKeys]) - c.Assert(float64(regionA.GetReadQueryNum()), Equals, loads[RegionReadQuery]) + re.Len(loads, int(RegionStatCount)) + re.Equal(float64(regionA.GetBytesRead()), loads[RegionReadBytes]) + re.Equal(float64(regionA.GetKeysRead()), loads[RegionReadKeys]) + re.Equal(float64(regionA.GetReadQueryNum()), loads[RegionReadQuery]) readQuery := float64(queryStats.Coprocessor + queryStats.Get + queryStats.Scan) - c.Assert(float64(regionA.GetReadQueryNum()), Equals, readQuery) - c.Assert(float64(regionA.GetBytesWritten()), Equals, loads[RegionWriteBytes]) - c.Assert(float64(regionA.GetKeysWritten()), Equals, loads[RegionWriteKeys]) - c.Assert(float64(regionA.GetWriteQueryNum()), Equals, loads[RegionWriteQuery]) + re.Equal(float64(regionA.GetReadQueryNum()), readQuery) + re.Equal(float64(regionA.GetBytesWritten()), loads[RegionWriteBytes]) + re.Equal(float64(regionA.GetKeysWritten()), loads[RegionWriteKeys]) + re.Equal(float64(regionA.GetWriteQueryNum()), loads[RegionWriteQuery]) writeQuery := float64(queryStats.Put + queryStats.Delete + queryStats.DeleteRange + queryStats.AcquirePessimisticLock + queryStats.Rollback + queryStats.Prewrite + queryStats.Commit) - c.Assert(float64(regionA.GetWriteQueryNum()), Equals, writeQuery) + re.Equal(float64(regionA.GetWriteQueryNum()), writeQuery) loads = regionA.GetWriteLoads() - c.Assert(loads, HasLen, int(RegionStatCount)) - c.Assert(0.0, Equals, loads[RegionReadBytes]) - c.Assert(0.0, Equals, loads[RegionReadKeys]) - c.Assert(0.0, Equals, loads[RegionReadQuery]) - c.Assert(float64(regionA.GetBytesWritten()), Equals, loads[RegionWriteBytes]) - c.Assert(float64(regionA.GetKeysWritten()), Equals, loads[RegionWriteKeys]) - c.Assert(float64(regionA.GetWriteQueryNum()), Equals, loads[RegionWriteQuery]) + re.Len(loads, int(RegionStatCount)) + re.Equal(0.0, loads[RegionReadBytes]) + re.Equal(0.0, loads[RegionReadKeys]) + re.Equal(0.0, loads[RegionReadQuery]) + re.Equal(float64(regionA.GetBytesWritten()), loads[RegionWriteBytes]) + re.Equal(float64(regionA.GetKeysWritten()), loads[RegionWriteKeys]) + re.Equal(float64(regionA.GetWriteQueryNum()), loads[RegionWriteQuery]) } diff --git a/server/statistics/region_collection_test.go b/server/statistics/region_collection_test.go index eb100e958fd..932c35f139e 100644 --- a/server/statistics/region_collection_test.go +++ b/server/statistics/region_collection_test.go @@ -17,36 +17,21 @@ package statistics import ( "testing" - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/stretchr/testify/require" "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/core" "github.com/tikv/pd/server/schedule/placement" "github.com/tikv/pd/server/storage" - "github.com/tikv/pd/server/storage/endpoint" ) -func TestStatistics(t *testing.T) { - TestingT(t) -} - -var _ = Suite(&testRegionStatisticsSuite{}) - -type testRegionStatisticsSuite struct { - store endpoint.RuleStorage - manager *placement.RuleManager -} - -func (t *testRegionStatisticsSuite) SetUpTest(c *C) { - t.store = storage.NewStorageWithMemoryBackend() - var err error - t.manager = placement.NewRuleManager(t.store, nil, nil) - err = t.manager.Initialize(3, []string{"zone", "rack", "host"}) - c.Assert(err, IsNil) -} - -func (t *testRegionStatisticsSuite) TestRegionStatistics(c *C) { +func TestRegionStatistics(t *testing.T) { + re := require.New(t) + store := storage.NewStorageWithMemoryBackend() + manager := placement.NewRuleManager(store, nil, nil) + err := manager.Initialize(3, []string{"zone", "rack", "host"}) + re.NoError(err) opt := config.NewTestOptions() opt.SetPlacementRuleEnabled(false) peers := []*metapb.Peer{ @@ -80,14 +65,14 @@ func (t *testRegionStatisticsSuite) TestRegionStatistics(c *C) { r2 := &metapb.Region{Id: 2, Peers: peers[0:2], StartKey: []byte("cc"), EndKey: []byte("dd")} region1 := core.NewRegionInfo(r1, peers[0]) region2 := core.NewRegionInfo(r2, peers[0]) - regionStats := NewRegionStatistics(opt, t.manager, nil) + regionStats := NewRegionStatistics(opt, manager, nil) regionStats.Observe(region1, stores) - c.Assert(regionStats.stats[ExtraPeer], HasLen, 1) - c.Assert(regionStats.stats[LearnerPeer], HasLen, 1) - c.Assert(regionStats.stats[EmptyRegion], HasLen, 1) - c.Assert(regionStats.stats[UndersizedRegion], HasLen, 1) - c.Assert(regionStats.offlineStats[ExtraPeer], HasLen, 1) - c.Assert(regionStats.offlineStats[LearnerPeer], HasLen, 1) + re.Len(regionStats.stats[ExtraPeer], 1) + re.Len(regionStats.stats[LearnerPeer], 1) + re.Len(regionStats.stats[EmptyRegion], 1) + re.Len(regionStats.stats[UndersizedRegion], 1) + re.Len(regionStats.offlineStats[ExtraPeer], 1) + re.Len(regionStats.offlineStats[LearnerPeer], 1) region1 = region1.Clone( core.WithDownPeers(downPeers), @@ -95,58 +80,63 @@ func (t *testRegionStatisticsSuite) TestRegionStatistics(c *C) { core.SetApproximateSize(144), ) regionStats.Observe(region1, stores) - c.Assert(regionStats.stats[ExtraPeer], HasLen, 1) - c.Assert(regionStats.stats[MissPeer], HasLen, 0) - c.Assert(regionStats.stats[DownPeer], HasLen, 1) - c.Assert(regionStats.stats[PendingPeer], HasLen, 1) - c.Assert(regionStats.stats[LearnerPeer], HasLen, 1) - c.Assert(regionStats.stats[EmptyRegion], HasLen, 0) - c.Assert(regionStats.stats[OversizedRegion], HasLen, 1) - c.Assert(regionStats.stats[UndersizedRegion], HasLen, 0) - c.Assert(regionStats.offlineStats[ExtraPeer], HasLen, 1) - c.Assert(regionStats.offlineStats[MissPeer], HasLen, 0) - c.Assert(regionStats.offlineStats[DownPeer], HasLen, 1) - c.Assert(regionStats.offlineStats[PendingPeer], HasLen, 1) - c.Assert(regionStats.offlineStats[LearnerPeer], HasLen, 1) - c.Assert(regionStats.offlineStats[OfflinePeer], HasLen, 1) + re.Len(regionStats.stats[ExtraPeer], 1) + re.Len(regionStats.stats[MissPeer], 0) + re.Len(regionStats.stats[DownPeer], 1) + re.Len(regionStats.stats[PendingPeer], 1) + re.Len(regionStats.stats[LearnerPeer], 1) + re.Len(regionStats.stats[EmptyRegion], 0) + re.Len(regionStats.stats[OversizedRegion], 1) + re.Len(regionStats.stats[UndersizedRegion], 0) + re.Len(regionStats.offlineStats[ExtraPeer], 1) + re.Len(regionStats.offlineStats[MissPeer], 0) + re.Len(regionStats.offlineStats[DownPeer], 1) + re.Len(regionStats.offlineStats[PendingPeer], 1) + re.Len(regionStats.offlineStats[LearnerPeer], 1) + re.Len(regionStats.offlineStats[OfflinePeer], 1) region2 = region2.Clone(core.WithDownPeers(downPeers[0:1])) regionStats.Observe(region2, stores[0:2]) - c.Assert(regionStats.stats[ExtraPeer], HasLen, 1) - c.Assert(regionStats.stats[MissPeer], HasLen, 1) - c.Assert(regionStats.stats[DownPeer], HasLen, 2) - c.Assert(regionStats.stats[PendingPeer], HasLen, 1) - c.Assert(regionStats.stats[LearnerPeer], HasLen, 1) - c.Assert(regionStats.stats[OversizedRegion], HasLen, 1) - c.Assert(regionStats.stats[UndersizedRegion], HasLen, 1) - c.Assert(regionStats.offlineStats[ExtraPeer], HasLen, 1) - c.Assert(regionStats.offlineStats[MissPeer], HasLen, 0) - c.Assert(regionStats.offlineStats[DownPeer], HasLen, 1) - c.Assert(regionStats.offlineStats[PendingPeer], HasLen, 1) - c.Assert(regionStats.offlineStats[LearnerPeer], HasLen, 1) - c.Assert(regionStats.offlineStats[OfflinePeer], HasLen, 1) + re.Len(regionStats.stats[ExtraPeer], 1) + re.Len(regionStats.stats[MissPeer], 1) + re.Len(regionStats.stats[DownPeer], 2) + re.Len(regionStats.stats[PendingPeer], 1) + re.Len(regionStats.stats[LearnerPeer], 1) + re.Len(regionStats.stats[OversizedRegion], 1) + re.Len(regionStats.stats[UndersizedRegion], 1) + re.Len(regionStats.offlineStats[ExtraPeer], 1) + re.Len(regionStats.offlineStats[MissPeer], 0) + re.Len(regionStats.offlineStats[DownPeer], 1) + re.Len(regionStats.offlineStats[PendingPeer], 1) + re.Len(regionStats.offlineStats[LearnerPeer], 1) + re.Len(regionStats.offlineStats[OfflinePeer], 1) region1 = region1.Clone(core.WithRemoveStorePeer(7)) regionStats.Observe(region1, stores[0:3]) - c.Assert(regionStats.stats[ExtraPeer], HasLen, 0) - c.Assert(regionStats.stats[MissPeer], HasLen, 1) - c.Assert(regionStats.stats[DownPeer], HasLen, 2) - c.Assert(regionStats.stats[PendingPeer], HasLen, 1) - c.Assert(regionStats.stats[LearnerPeer], HasLen, 0) - c.Assert(regionStats.offlineStats[ExtraPeer], HasLen, 0) - c.Assert(regionStats.offlineStats[MissPeer], HasLen, 0) - c.Assert(regionStats.offlineStats[DownPeer], HasLen, 0) - c.Assert(regionStats.offlineStats[PendingPeer], HasLen, 0) - c.Assert(regionStats.offlineStats[LearnerPeer], HasLen, 0) - c.Assert(regionStats.offlineStats[OfflinePeer], HasLen, 0) + re.Len(regionStats.stats[ExtraPeer], 0) + re.Len(regionStats.stats[MissPeer], 1) + re.Len(regionStats.stats[DownPeer], 2) + re.Len(regionStats.stats[PendingPeer], 1) + re.Len(regionStats.stats[LearnerPeer], 0) + re.Len(regionStats.offlineStats[ExtraPeer], 0) + re.Len(regionStats.offlineStats[MissPeer], 0) + re.Len(regionStats.offlineStats[DownPeer], 0) + re.Len(regionStats.offlineStats[PendingPeer], 0) + re.Len(regionStats.offlineStats[LearnerPeer], 0) + re.Len(regionStats.offlineStats[OfflinePeer], 0) store3 = stores[3].Clone(core.UpStore()) stores[3] = store3 regionStats.Observe(region1, stores) - c.Assert(regionStats.stats[OfflinePeer], HasLen, 0) + re.Len(regionStats.stats[OfflinePeer], 0) } -func (t *testRegionStatisticsSuite) TestRegionStatisticsWithPlacementRule(c *C) { +func TestRegionStatisticsWithPlacementRule(t *testing.T) { + re := require.New(t) + store := storage.NewStorageWithMemoryBackend() + manager := placement.NewRuleManager(store, nil, nil) + err := manager.Initialize(3, []string{"zone", "rack", "host"}) + re.NoError(err) opt := config.NewTestOptions() opt.SetPlacementRuleEnabled(true) peers := []*metapb.Peer{ @@ -173,20 +163,21 @@ func (t *testRegionStatisticsSuite) TestRegionStatisticsWithPlacementRule(c *C) region2 := core.NewRegionInfo(r2, peers[0]) region3 := core.NewRegionInfo(r3, peers[0]) region4 := core.NewRegionInfo(r4, peers[0]) - regionStats := NewRegionStatistics(opt, t.manager, nil) + regionStats := NewRegionStatistics(opt, manager, nil) // r2 didn't match the rules regionStats.Observe(region2, stores) - c.Assert(regionStats.stats[MissPeer], HasLen, 1) + re.Len(regionStats.stats[MissPeer], 1) regionStats.Observe(region3, stores) // r3 didn't match the rules - c.Assert(regionStats.stats[ExtraPeer], HasLen, 1) + re.Len(regionStats.stats[ExtraPeer], 1) regionStats.Observe(region4, stores) // r4 match the rules - c.Assert(regionStats.stats[MissPeer], HasLen, 1) - c.Assert(regionStats.stats[ExtraPeer], HasLen, 1) + re.Len(regionStats.stats[MissPeer], 1) + re.Len(regionStats.stats[ExtraPeer], 1) } -func (t *testRegionStatisticsSuite) TestRegionLabelIsolationLevel(c *C) { +func TestRegionLabelIsolationLevel(t *testing.T) { + re := require.New(t) locationLabels := []string{"zone", "rack", "host"} labelLevelStats := NewLabelStatistics() labelsSet := [][]map[string]string{ @@ -256,7 +247,7 @@ func (t *testRegionStatisticsSuite) TestRegionLabelIsolationLevel(c *C) { region := core.NewRegionInfo(&metapb.Region{Id: uint64(regionID)}, nil) label := GetRegionLabelIsolation(stores, locationLabels) labelLevelStats.Observe(region, stores, locationLabels) - c.Assert(label, Equals, res) + re.Equal(res, label) regionID++ } @@ -264,16 +255,16 @@ func (t *testRegionStatisticsSuite) TestRegionLabelIsolationLevel(c *C) { f(labels, res[i], locationLabels) } for i, res := range counter { - c.Assert(labelLevelStats.labelCounter[i], Equals, res) + re.Equal(res, labelLevelStats.labelCounter[i]) } label := GetRegionLabelIsolation(nil, locationLabels) - c.Assert(label, Equals, nonIsolation) + re.Equal(nonIsolation, label) label = GetRegionLabelIsolation(nil, nil) - c.Assert(label, Equals, nonIsolation) + re.Equal(nonIsolation, label) store := core.NewStoreInfo(&metapb.Store{Id: 1, Address: "mock://tikv-1"}, core.SetStoreLabels([]*metapb.StoreLabel{{Key: "foo", Value: "bar"}})) label = GetRegionLabelIsolation([]*core.StoreInfo{store}, locationLabels) - c.Assert(label, Equals, "zone") + re.Equal("zone", label) regionID = 1 res = []string{"rack", "none", "zone", "rack", "none", "rack", "none"} @@ -284,6 +275,6 @@ func (t *testRegionStatisticsSuite) TestRegionLabelIsolationLevel(c *C) { f(labels, res[i], locationLabels) } for i, res := range counter { - c.Assert(labelLevelStats.labelCounter[i], Equals, res) + re.Equal(res, labelLevelStats.labelCounter[i]) } } diff --git a/server/statistics/store_collection_test.go b/server/statistics/store_collection_test.go index f93a4b54bb5..388fc13b27e 100644 --- a/server/statistics/store_collection_test.go +++ b/server/statistics/store_collection_test.go @@ -15,19 +15,17 @@ package statistics import ( + "testing" "time" - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" + "github.com/stretchr/testify/require" "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/core" ) -var _ = Suite(&testStoreStatisticsSuite{}) - -type testStoreStatisticsSuite struct{} - -func (t *testStoreStatisticsSuite) TestStoreStatistics(c *C) { +func TestStoreStatistics(t *testing.T) { + re := require.New(t) opt := config.NewTestOptions() rep := opt.GetReplicationConfig().Clone() rep.LocationLabels = []string{"zone", "host"} @@ -62,22 +60,22 @@ func (t *testStoreStatisticsSuite) TestStoreStatistics(c *C) { } stats := storeStats.stats - c.Assert(stats.Up, Equals, 6) - c.Assert(stats.Preparing, Equals, 7) - c.Assert(stats.Serving, Equals, 0) - c.Assert(stats.Removing, Equals, 1) - c.Assert(stats.Removed, Equals, 1) - c.Assert(stats.Down, Equals, 1) - c.Assert(stats.Offline, Equals, 1) - c.Assert(stats.RegionCount, Equals, 0) - c.Assert(stats.Unhealthy, Equals, 0) - c.Assert(stats.Disconnect, Equals, 0) - c.Assert(stats.Tombstone, Equals, 1) - c.Assert(stats.LowSpace, Equals, 8) - c.Assert(stats.LabelCounter["zone:z1"], Equals, 2) - c.Assert(stats.LabelCounter["zone:z2"], Equals, 2) - c.Assert(stats.LabelCounter["zone:z3"], Equals, 2) - c.Assert(stats.LabelCounter["host:h1"], Equals, 4) - c.Assert(stats.LabelCounter["host:h2"], Equals, 4) - c.Assert(stats.LabelCounter["zone:unknown"], Equals, 2) + re.Equal(6, stats.Up) + re.Equal(7, stats.Preparing) + re.Equal(0, stats.Serving) + re.Equal(1, stats.Removing) + re.Equal(1, stats.Removed) + re.Equal(1, stats.Down) + re.Equal(1, stats.Offline) + re.Equal(0, stats.RegionCount) + re.Equal(0, stats.Unhealthy) + re.Equal(0, stats.Disconnect) + re.Equal(1, stats.Tombstone) + re.Equal(8, stats.LowSpace) + re.Equal(2, stats.LabelCounter["zone:z1"]) + re.Equal(2, stats.LabelCounter["zone:z2"]) + re.Equal(2, stats.LabelCounter["zone:z3"]) + re.Equal(4, stats.LabelCounter["host:h1"]) + re.Equal(4, stats.LabelCounter["host:h2"]) + re.Equal(2, stats.LabelCounter["zone:unknown"]) } diff --git a/server/statistics/store_test.go b/server/statistics/store_test.go index e3247ea1c46..89508be41b7 100644 --- a/server/statistics/store_test.go +++ b/server/statistics/store_test.go @@ -15,26 +15,24 @@ package statistics import ( + "testing" "time" - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/stretchr/testify/require" "github.com/tikv/pd/server/core" ) -var _ = Suite(&testStoreSuite{}) - -type testStoreSuite struct{} - -func (s *testStoreSuite) TestFilterUnhealtyStore(c *C) { +func TestFilterUnhealtyStore(t *testing.T) { + re := require.New(t) stats := NewStoresStats() cluster := core.NewBasicCluster() for i := uint64(1); i <= 5; i++ { cluster.PutStore(core.NewStoreInfo(&metapb.Store{Id: i}, core.SetLastHeartbeatTS(time.Now()))) stats.Observe(i, &pdpb.StoreStats{}) } - c.Assert(stats.GetStoresLoads(), HasLen, 5) + re.Len(stats.GetStoresLoads(), 5) cluster.PutStore(cluster.GetStore(1).Clone(core.SetLastHeartbeatTS(time.Now().Add(-24 * time.Hour)))) cluster.PutStore(cluster.GetStore(2).Clone(core.TombstoneStore())) @@ -42,7 +40,7 @@ func (s *testStoreSuite) TestFilterUnhealtyStore(c *C) { stats.FilterUnhealthyStore(cluster) loads := stats.GetStoresLoads() - c.Assert(loads, HasLen, 2) - c.Assert(loads[4], NotNil) - c.Assert(loads[5], NotNil) + re.Len(loads, 2) + re.NotNil(loads[4]) + re.NotNil(loads[5]) } diff --git a/server/statistics/topn_test.go b/server/statistics/topn_test.go index 0bf1c4e4d21..fa9e4ebd5f1 100644 --- a/server/statistics/topn_test.go +++ b/server/statistics/topn_test.go @@ -17,15 +17,12 @@ package statistics import ( "math/rand" "sort" + "testing" "time" - . "github.com/pingcap/check" + "github.com/stretchr/testify/require" ) -var _ = Suite(&testTopNSuite{}) - -type testTopNSuite struct{} - type item struct { id uint64 values []float64 @@ -39,21 +36,22 @@ func (it *item) Less(k int, than TopNItem) bool { return it.values[k] < than.(*item).values[k] } -func (s *testTopNSuite) TestPut(c *C) { +func TestPut(t *testing.T) { + re := require.New(t) const Total, N = 10000, 50 tn := NewTopN(DimLen, N, 1*time.Hour) - putPerm(c, tn, Total, func(x int) float64 { + putPerm(re, tn, Total, func(x int) float64 { return float64(-x) + 1 }, false /*insert*/) - putPerm(c, tn, Total, func(x int) float64 { + putPerm(re, tn, Total, func(x int) float64 { return float64(-x) }, true /*update*/) // check GetTopNMin for k := 0; k < DimLen; k++ { - c.Assert(tn.GetTopNMin(k).(*item).values[k], Equals, float64(1-N)) + re.Equal(float64(1-N), tn.GetTopNMin(k).(*item).values[k]) } { @@ -65,7 +63,7 @@ func (s *testTopNSuite) TestPut(c *C) { } // check update worked for i, v := range topns { - c.Assert(v, Equals, float64(-i)) + re.Equal(float64(-i), v) } } @@ -78,7 +76,7 @@ func (s *testTopNSuite) TestPut(c *C) { } // check update worked for i, v := range all { - c.Assert(v, Equals, float64(-i)) + re.Equal(float64(-i), v) } } @@ -96,19 +94,19 @@ func (s *testTopNSuite) TestPut(c *C) { } sort.Sort(sort.Reverse(sort.Float64Slice(all))) - c.Assert(topn, DeepEquals, all[:N]) + re.Equal(all[:N], topn) } } // check Get for i := uint64(0); i < Total; i++ { it := tn.Get(i).(*item) - c.Assert(it.id, Equals, i) - c.Assert(it.values[0], Equals, -float64(i)) + re.Equal(i, it.id) + re.Equal(-float64(i), it.values[0]) } } -func putPerm(c *C, tn *TopN, total int, f func(x int) float64, isUpdate bool) { +func putPerm(re *require.Assertions, tn *TopN, total int, f func(x int) float64, isUpdate bool) { { // insert dims := make([][]int, DimLen) for k := 0; k < DimLen; k++ { @@ -122,16 +120,17 @@ func putPerm(c *C, tn *TopN, total int, f func(x int) float64, isUpdate bool) { for k := 0; k < DimLen; k++ { item.values[k] = f(dims[k][i]) } - c.Assert(tn.Put(item), Equals, isUpdate) + re.Equal(isUpdate, tn.Put(item)) } } } -func (s *testTopNSuite) TestRemove(c *C) { +func TestRemove(t *testing.T) { + re := require.New(t) const Total, N = 10000, 50 tn := NewTopN(DimLen, N, 1*time.Hour) - putPerm(c, tn, Total, func(x int) float64 { + putPerm(re, tn, Total, func(x int) float64 { return float64(-x) }, false /*insert*/) @@ -139,28 +138,28 @@ func (s *testTopNSuite) TestRemove(c *C) { for i := 0; i < Total; i++ { if i%3 != 0 { it := tn.Remove(uint64(i)).(*item) - c.Assert(it.id, Equals, uint64(i)) + re.Equal(uint64(i), it.id) } } // check Remove worked for i := 0; i < Total; i++ { if i%3 != 0 { - c.Assert(tn.Remove(uint64(i)), IsNil) + re.Nil(tn.Remove(uint64(i))) } } - c.Assert(tn.GetTopNMin(0).(*item).id, Equals, uint64(3*(N-1))) + re.Equal(uint64(3*(N-1)), tn.GetTopNMin(0).(*item).id) { topns := make([]float64, N) for _, it := range tn.GetAllTopN(0) { it := it.(*item) topns[it.id/3] = it.values[0] - c.Assert(it.id%3, Equals, uint64(0)) + re.Equal(uint64(0), it.id%3) } for i, v := range topns { - c.Assert(v, Equals, float64(-i*3)) + re.Equal(float64(-i*3), v) } } @@ -169,10 +168,10 @@ func (s *testTopNSuite) TestRemove(c *C) { for _, it := range tn.GetAll() { it := it.(*item) all[it.id/3] = it.values[0] - c.Assert(it.id%3, Equals, uint64(0)) + re.Equal(uint64(0), it.id%3) } for i, v := range all { - c.Assert(v, Equals, float64(-i*3)) + re.Equal(float64(-i*3), v) } } @@ -190,22 +189,23 @@ func (s *testTopNSuite) TestRemove(c *C) { } sort.Sort(sort.Reverse(sort.Float64Slice(all))) - c.Assert(topn, DeepEquals, all[:N]) + re.Equal(all[:N], topn) } } for i := uint64(0); i < Total; i += 3 { it := tn.Get(i).(*item) - c.Assert(it.id, Equals, i) - c.Assert(it.values[0], Equals, -float64(i)) + re.Equal(i, it.id) + re.Equal(-float64(i), it.values[0]) } } -func (s *testTopNSuite) TestTTL(c *C) { +func TestTTL(t *testing.T) { + re := require.New(t) const Total, N = 1000, 50 tn := NewTopN(DimLen, 50, 900*time.Millisecond) - putPerm(c, tn, Total, func(x int) float64 { + putPerm(re, tn, Total, func(x int) float64 { return float64(-x) }, false /*insert*/) @@ -215,27 +215,27 @@ func (s *testTopNSuite) TestTTL(c *C) { for k := 1; k < DimLen; k++ { item.values = append(item.values, rand.NormFloat64()) } - c.Assert(tn.Put(item), IsTrue) + re.True(tn.Put(item)) } for i := 3; i < Total; i += 3 { item := &item{id: uint64(i), values: []float64{float64(-i) + 100}} for k := 1; k < DimLen; k++ { item.values = append(item.values, rand.NormFloat64()) } - c.Assert(tn.Put(item), IsFalse) + re.False(tn.Put(item)) } tn.RemoveExpired() - c.Assert(tn.Len(), Equals, Total/3+1) + re.Equal(Total/3+1, tn.Len()) items := tn.GetAllTopN(0) v := make([]float64, N) for _, it := range items { it := it.(*item) - c.Assert(it.id%3, Equals, uint64(0)) + re.Equal(uint64(0), it.id%3) v[it.id/3] = it.values[0] } for i, x := range v { - c.Assert(x, Equals, float64(-i*3)+100) + re.Equal(float64(-i*3)+100, x) } { // check all dimensions @@ -252,7 +252,7 @@ func (s *testTopNSuite) TestTTL(c *C) { } sort.Sort(sort.Reverse(sort.Float64Slice(all))) - c.Assert(topn, DeepEquals, all[:N]) + re.Equal(all[:N], topn) } } }