Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

statistics, schedulers: split hot cache to each store #1641

Merged
merged 24 commits into from
Aug 6, 2019
Merged
Show file tree
Hide file tree
Changes from 23 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 10 additions & 10 deletions pkg/mock/mockcluster/mockcluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,17 +72,17 @@ func (mc *Cluster) GetStoreRegionCount(storeID uint64) int {
}

// IsRegionHot checks if the region is hot.
func (mc *Cluster) IsRegionHot(id uint64) bool {
return mc.HotSpotCache.IsRegionHot(id, mc.GetHotRegionCacheHitsThreshold())
func (mc *Cluster) IsRegionHot(region *core.RegionInfo) bool {
return mc.HotSpotCache.IsRegionHot(region, mc.GetHotRegionCacheHitsThreshold())
}

// RegionReadStats returns hot region's read stats.
func (mc *Cluster) RegionReadStats() []*statistics.RegionStat {
func (mc *Cluster) RegionReadStats() map[uint64][]*statistics.HotSpotPeerStat {
return mc.HotSpotCache.RegionStats(statistics.ReadFlow)
}

// RegionWriteStats returns hot region's write stats.
func (mc *Cluster) RegionWriteStats() []*statistics.RegionStat {
func (mc *Cluster) RegionWriteStats() map[uint64][]*statistics.HotSpotPeerStat {
return mc.HotSpotCache.RegionStats(statistics.WriteFlow)
}

Expand Down Expand Up @@ -231,9 +231,9 @@ func (mc *Cluster) AddLeaderRegionWithRange(regionID uint64, startKey string, en
func (mc *Cluster) AddLeaderRegionWithReadInfo(regionID uint64, leaderID uint64, readBytes uint64, followerIds ...uint64) {
r := mc.newMockRegionInfo(regionID, leaderID, followerIds...)
r = r.Clone(core.SetReadBytes(readBytes))
isUpdate, item := mc.HotSpotCache.CheckRead(r, mc.StoresStats)
if isUpdate {
mc.HotSpotCache.Update(regionID, item, statistics.ReadFlow)
items := mc.HotSpotCache.CheckRead(r, mc.StoresStats)
for _, item := range items {
mc.HotSpotCache.Update(item)
}
mc.PutRegion(r)
}
Expand All @@ -242,9 +242,9 @@ func (mc *Cluster) AddLeaderRegionWithReadInfo(regionID uint64, leaderID uint64,
func (mc *Cluster) AddLeaderRegionWithWriteInfo(regionID uint64, leaderID uint64, writtenBytes uint64, followerIds ...uint64) {
r := mc.newMockRegionInfo(regionID, leaderID, followerIds...)
r = r.Clone(core.SetWrittenBytes(writtenBytes))
isUpdate, item := mc.HotSpotCache.CheckWrite(r, mc.StoresStats)
if isUpdate {
mc.HotSpotCache.Update(regionID, item, statistics.WriteFlow)
items := mc.HotSpotCache.CheckWrite(r, mc.StoresStats)
for _, item := range items {
mc.HotSpotCache.Update(item)
}
mc.PutRegion(r)
}
Expand Down
2 changes: 2 additions & 0 deletions server/api/region.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,8 @@ type RegionInfo struct {
PendingPeers []*metapb.Peer `json:"pending_peers,omitempty"`
WrittenBytes uint64 `json:"written_bytes,omitempty"`
ReadBytes uint64 `json:"read_bytes,omitempty"`
WrittenKeys uint64 `json:"written_keys,omitempty"`
ReadKeys uint64 `json:"read_keys,omitempty"`
ApproximateSize int64 `json:"approximate_size,omitempty"`
ApproximateKeys int64 `json:"approximate_keys,omitempty"`
}
Expand Down
4 changes: 2 additions & 2 deletions server/checker/merge_checker.go
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ func (m *MergeChecker) Check(region *core.RegionInfo) []*operator.Operator {
}

// skip hot region
if m.cluster.IsRegionHot(region.GetID()) {
if m.cluster.IsRegionHot(region) {
checkerCounter.WithLabelValues("merge_checker", "hot_region").Inc()
return nil
}
Expand Down Expand Up @@ -130,7 +130,7 @@ func (m *MergeChecker) Check(region *core.RegionInfo) []*operator.Operator {

func (m *MergeChecker) checkTarget(region, adjacent, target *core.RegionInfo) *core.RegionInfo {
// if is not hot region and under same namespace
if adjacent != nil && !m.cluster.IsRegionHot(adjacent.GetID()) &&
if adjacent != nil && !m.cluster.IsRegionHot(adjacent) &&
m.classifier.AllowMerge(region, adjacent) &&
len(adjacent.GetDownPeers()) == 0 && len(adjacent.GetPendingPeers()) == 0 && len(adjacent.GetLearners()) == 0 {
// if both region is not hot, prefer the one with smaller size
Expand Down
27 changes: 13 additions & 14 deletions server/cluster_info.go
Original file line number Diff line number Diff line change
Expand Up @@ -313,10 +313,10 @@ func (c *clusterInfo) GetRegion(regionID uint64) *core.RegionInfo {
}

// IsRegionHot checks if a region is in hot state.
func (c *clusterInfo) IsRegionHot(id uint64) bool {
func (c *clusterInfo) IsRegionHot(region *core.RegionInfo) bool {
c.RLock()
defer c.RUnlock()
return c.hotSpotCache.IsRegionHot(id, c.GetHotRegionCacheHitsThreshold())
return c.hotSpotCache.IsRegionHot(region, c.GetHotRegionCacheHitsThreshold())
}

// RandHotRegionFromStore randomly picks a hot region in specified store.
Expand Down Expand Up @@ -522,8 +522,8 @@ func (c *clusterInfo) handleRegionHeartbeat(region *core.RegionInfo) error {
}
}
}
isWriteUpdate, writeItem := c.CheckWriteStatus(region)
isReadUpdate, readItem := c.CheckReadStatus(region)
writeItems := c.CheckWriteStatus(region)
readItems := c.CheckReadStatus(region)
c.RUnlock()

// Save to storage if meta is updated.
Expand Down Expand Up @@ -604,7 +604,7 @@ func (c *clusterInfo) handleRegionHeartbeat(region *core.RegionInfo) error {
default:
}
}
if !isWriteUpdate && !isReadUpdate && !saveCache && !isNew {
if len(writeItems) == 0 && len(readItems) == 0 && !saveCache && !isNew {
return nil
}

Expand Down Expand Up @@ -648,12 +648,11 @@ func (c *clusterInfo) handleRegionHeartbeat(region *core.RegionInfo) error {
c.regionStats.Observe(region, c.takeRegionStoresLocked(region))
}

key := region.GetID()
if isWriteUpdate {
c.hotSpotCache.Update(key, writeItem, statistics.WriteFlow)
for _, writeItem := range writeItems {
c.hotSpotCache.Update(writeItem)
}
if isReadUpdate {
c.hotSpotCache.Update(key, readItem, statistics.ReadFlow)
for _, readItem := range readItems {
c.hotSpotCache.Update(readItem)
}
return nil
}
Expand Down Expand Up @@ -815,24 +814,24 @@ func (c *clusterInfo) CheckLabelProperty(typ string, labels []*metapb.StoreLabel
}

// RegionReadStats returns hot region's read stats.
func (c *clusterInfo) RegionReadStats() []*statistics.RegionStat {
func (c *clusterInfo) RegionReadStats() map[uint64][]*statistics.HotSpotPeerStat {
// RegionStats is a thread-safe method
return c.hotSpotCache.RegionStats(statistics.ReadFlow)
}

// RegionWriteStats returns hot region's write stats.
func (c *clusterInfo) RegionWriteStats() []*statistics.RegionStat {
func (c *clusterInfo) RegionWriteStats() map[uint64][]*statistics.HotSpotPeerStat {
// RegionStats is a thread-safe method
return c.hotSpotCache.RegionStats(statistics.WriteFlow)
}

// CheckWriteStatus checks the write status, returns whether need update statistics and item.
func (c *clusterInfo) CheckWriteStatus(region *core.RegionInfo) (bool, *statistics.RegionStat) {
func (c *clusterInfo) CheckWriteStatus(region *core.RegionInfo) []*statistics.HotSpotPeerStat {
return c.hotSpotCache.CheckWrite(region, c.storesStats)
}

// CheckReadStatus checks the read status, returns whether need update statistics and item.
func (c *clusterInfo) CheckReadStatus(region *core.RegionInfo) (bool, *statistics.RegionStat) {
func (c *clusterInfo) CheckReadStatus(region *core.RegionInfo) []*statistics.HotSpotPeerStat {
return c.hotSpotCache.CheckRead(region, c.storesStats)
}

Expand Down
16 changes: 16 additions & 0 deletions server/core/region.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,9 @@ type RegionInfo struct {
downPeers []*pdpb.PeerStats
pendingPeers []*metapb.Peer
writtenBytes uint64
writtenKeys uint64
readBytes uint64
readKeys uint64
approximateSize int64
approximateKeys int64
}
Expand Down Expand Up @@ -89,7 +91,9 @@ func RegionFromHeartbeat(heartbeat *pdpb.RegionHeartbeatRequest) *RegionInfo {
downPeers: heartbeat.GetDownPeers(),
pendingPeers: heartbeat.GetPendingPeers(),
writtenBytes: heartbeat.GetBytesWritten(),
writtenKeys: heartbeat.GetKeysWritten(),
readBytes: heartbeat.GetBytesRead(),
readKeys: heartbeat.GetKeysRead(),
approximateSize: int64(regionSize),
approximateKeys: int64(heartbeat.GetApproximateKeys()),
}
Expand All @@ -115,7 +119,9 @@ func (r *RegionInfo) Clone(opts ...RegionCreateOption) *RegionInfo {
downPeers: downPeers,
pendingPeers: pendingPeers,
writtenBytes: r.writtenBytes,
writtenKeys: r.writtenKeys,
readBytes: r.readBytes,
readKeys: r.readKeys,
approximateSize: r.approximateSize,
approximateKeys: r.approximateKeys,
}
Expand Down Expand Up @@ -328,6 +334,16 @@ func (r *RegionInfo) GetBytesWritten() uint64 {
return r.writtenBytes
}

// GetKeysWritten returns the written keys of the region.
func (r *RegionInfo) GetKeysWritten() uint64 {
return r.writtenKeys
}

// GetKeysRead returns the read keys of the region.
func (r *RegionInfo) GetKeysRead() uint64 {
return r.readKeys
}

// GetLeader returns the leader of the region.
func (r *RegionInfo) GetLeader() *metapb.Peer {
return r.leader
Expand Down
11 changes: 2 additions & 9 deletions server/namespace_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -126,15 +126,8 @@ func (c *namespaceCluster) GetRegion(id uint64) *core.RegionInfo {
}

// RegionWriteStats returns hot region's write stats.
func (c *namespaceCluster) RegionWriteStats() []*statistics.RegionStat {
allStats := c.Cluster.RegionWriteStats()
stats := make([]*statistics.RegionStat, 0, len(allStats))
for _, s := range allStats {
if c.GetRegion(s.RegionID) != nil {
stats = append(stats, s)
}
}
return stats
func (c *namespaceCluster) RegionWriteStats() map[uint64][]*statistics.HotSpotPeerStat {
return c.Cluster.RegionWriteStats()
}

func scheduleByNamespace(cluster schedule.Cluster, classifier namespace.Classifier, scheduler schedule.Scheduler) []*operator.Operator {
Expand Down
2 changes: 1 addition & 1 deletion server/schedule/region_scatterer.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ func NewRegionScatterer(cluster Cluster, classifier namespace.Classifier) *Regio

// Scatter relocates the region.
func (r *RegionScatterer) Scatter(region *core.RegionInfo) (*operator.Operator, error) {
if r.cluster.IsRegionHot(region.GetID()) {
if r.cluster.IsRegionHot(region) {
return nil, errors.Errorf("region %d is a hot region", region.GetID())
}

Expand Down
2 changes: 1 addition & 1 deletion server/schedulers/adjacent_region.go
Original file line number Diff line number Diff line change
Expand Up @@ -247,7 +247,7 @@ func (l *balanceAdjacentRegionScheduler) unsafeToBalance(cluster schedule.Cluste
return true
}
// Skip hot regions.
if cluster.IsRegionHot(region.GetID()) {
if cluster.IsRegionHot(region) {
schedulerCounter.WithLabelValues(l.GetName(), "region_hot").Inc()
return true
}
Expand Down
7 changes: 3 additions & 4 deletions server/schedulers/balance_leader.go
Original file line number Diff line number Diff line change
Expand Up @@ -197,9 +197,8 @@ func (l *balanceLeaderScheduler) transferLeaderIn(target *core.StoreInfo, cluste
// no new operator need to be created, otherwise create an operator that transfers
// the leader from the source store to the target store for the region.
func (l *balanceLeaderScheduler) createOperator(region *core.RegionInfo, source, target *core.StoreInfo, cluster schedule.Cluster, opInfluence operator.OpInfluence) []*operator.Operator {
regionID := region.GetID()
if cluster.IsRegionHot(regionID) {
log.Debug("region is hot region, ignore it", zap.String("scheduler", l.GetName()), zap.Uint64("region-id", regionID))
if cluster.IsRegionHot(region) {
log.Debug("region is hot region, ignore it", zap.String("scheduler", l.GetName()), zap.Uint64("region-id", region.GetID()))
schedulerCounter.WithLabelValues(l.GetName(), "region_hot").Inc()
return nil
}
Expand All @@ -208,7 +207,7 @@ func (l *balanceLeaderScheduler) createOperator(region *core.RegionInfo, source,
targetID := target.GetID()
if !shouldBalance(cluster, source, target, region, core.LeaderKind, opInfluence) {
log.Debug("skip balance leader",
zap.String("scheduler", l.GetName()), zap.Uint64("region-id", regionID), zap.Uint64("source-store", sourceID), zap.Uint64("target-store", targetID),
zap.String("scheduler", l.GetName()), zap.Uint64("region-id", region.GetID()), zap.Uint64("source-store", sourceID), zap.Uint64("target-store", targetID),
zap.Int64("source-size", source.GetLeaderSize()), zap.Float64("source-score", source.LeaderScore(0)),
zap.Int64("source-influence", opInfluence.GetStoreInfluence(sourceID).ResourceSize(core.LeaderKind)),
zap.Int64("target-size", target.GetLeaderSize()), zap.Float64("target-score", target.LeaderScore(0)),
Expand Down
2 changes: 1 addition & 1 deletion server/schedulers/balance_region.go
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ func (s *balanceRegionScheduler) Schedule(cluster schedule.Cluster) []*operator.
}

// Skip hot regions.
if cluster.IsRegionHot(region.GetID()) {
if cluster.IsRegionHot(region) {
log.Debug("region is hot", zap.String("scheduler", s.GetName()), zap.Uint64("region-id", region.GetID()))
schedulerCounter.WithLabelValues(s.GetName(), "region_hot").Inc()
s.hitsCounter.put(source, nil)
Expand Down
81 changes: 76 additions & 5 deletions server/schedulers/balance_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -935,6 +935,7 @@ var _ = Suite(&testBalanceHotWriteRegionSchedulerSuite{})
type testBalanceHotWriteRegionSchedulerSuite struct{}

func (s *testBalanceHotWriteRegionSchedulerSuite) TestBalance(c *C) {
statistics.Denoising = false
opt := mockoption.NewScheduleOptions()
newTestReplication(opt, 3, "zone", "host")
tc := mockcluster.NewCluster(opt)
Expand Down Expand Up @@ -1038,6 +1039,8 @@ func (s *testBalanceHotWriteRegionSchedulerSuite) TestBalance(c *C) {
hb.Schedule(tc)
}

var _ = Suite(&testBalanceHotReadRegionSchedulerSuite{})

type testBalanceHotReadRegionSchedulerSuite struct{}

func (s *testBalanceHotReadRegionSchedulerSuite) TestBalance(c *C) {
Expand Down Expand Up @@ -1072,17 +1075,19 @@ func (s *testBalanceHotReadRegionSchedulerSuite) TestBalance(c *C) {
// lower than hot read flow rate, but higher than write flow rate
tc.AddLeaderRegionWithReadInfo(11, 1, 24*1024*statistics.RegionHeartBeatReportInterval, 2, 3)
opt.HotRegionCacheHitsThreshold = 0
c.Assert(tc.IsRegionHot(1), IsTrue)
c.Assert(tc.IsRegionHot(11), IsFalse)
c.Assert(tc.IsRegionHot(tc.GetRegion(1)), IsTrue)
c.Assert(tc.IsRegionHot(tc.GetRegion(11)), IsFalse)
// check randomly pick hot region
r := tc.RandHotRegionFromStore(2, statistics.ReadFlow)
c.Assert(r, NotNil)
c.Assert(r.GetID(), Equals, uint64(2))
// check hot items
stats := tc.HotSpotCache.RegionStats(statistics.ReadFlow)
c.Assert(len(stats), Equals, 3)
for _, s := range stats {
c.Assert(s.FlowBytes, Equals, uint64(512*1024))
c.Assert(len(stats), Equals, 2)
for _, ss := range stats {
for _, s := range ss {
c.Assert(s.FlowBytes, Equals, uint64(512*1024))
}
}
// Will transfer a hot region leader from store 1 to store 3, because the total count of peers
// which is hot for store 1 is more larger than other stores.
Expand Down Expand Up @@ -1111,6 +1116,72 @@ func (s *testBalanceHotReadRegionSchedulerSuite) TestBalance(c *C) {
hb.Schedule(tc)
}

var _ = Suite(&testBalanceHotCacheSuite{})

type testBalanceHotCacheSuite struct{}

func (s *testBalanceHotCacheSuite) TestUpdateCache(c *C) {
opt := mockoption.NewScheduleOptions()
tc := mockcluster.NewCluster(opt)

// Add stores 1, 2, 3, 4, 5 with region counts 3, 2, 2, 2, 0.
tc.AddRegionStore(1, 3)
tc.AddRegionStore(2, 2)
tc.AddRegionStore(3, 2)
tc.AddRegionStore(4, 2)
tc.AddRegionStore(5, 0)

// Report store read bytes.
tc.UpdateStorageReadBytes(1, 75*1024*1024)
tc.UpdateStorageReadBytes(2, 45*1024*1024)
tc.UpdateStorageReadBytes(3, 45*1024*1024)
tc.UpdateStorageReadBytes(4, 60*1024*1024)
tc.UpdateStorageReadBytes(5, 0)

/// For read flow
tc.AddLeaderRegionWithReadInfo(1, 1, 512*1024*statistics.RegionHeartBeatReportInterval, 2, 3)
tc.AddLeaderRegionWithReadInfo(2, 2, 512*1024*statistics.RegionHeartBeatReportInterval, 1, 3)
tc.AddLeaderRegionWithReadInfo(3, 1, 512*1024*statistics.RegionHeartBeatReportInterval, 2, 3)
// lower than hot read flow rate, but higher than write flow rate
tc.AddLeaderRegionWithReadInfo(11, 1, 24*1024*statistics.RegionHeartBeatReportInterval, 2, 3)
opt.HotRegionCacheHitsThreshold = 0
stats := tc.RegionStats(statistics.ReadFlow)
c.Assert(len(stats[1]), Equals, 2)
c.Assert(len(stats[2]), Equals, 1)
c.Assert(len(stats[3]), Equals, 0)

tc.AddLeaderRegionWithReadInfo(3, 2, 512*1024*statistics.RegionHeartBeatReportInterval, 2, 3)
tc.AddLeaderRegionWithReadInfo(11, 1, 24*1024*statistics.RegionHeartBeatReportInterval, 2, 3)
stats = tc.RegionStats(statistics.ReadFlow)

c.Assert(len(stats[1]), Equals, 1)
c.Assert(len(stats[2]), Equals, 2)
c.Assert(len(stats[3]), Equals, 0)

// For write flow
tc.UpdateStorageWrittenBytes(1, 60*1024*1024)
tc.UpdateStorageWrittenBytes(2, 30*1024*1024)
tc.UpdateStorageWrittenBytes(3, 60*1024*1024)
tc.UpdateStorageWrittenBytes(4, 30*1024*1024)
tc.UpdateStorageWrittenBytes(5, 0*1024*1024)
tc.AddLeaderRegionWithWriteInfo(4, 1, 512*1024*statistics.RegionHeartBeatReportInterval, 2, 3)
tc.AddLeaderRegionWithWriteInfo(5, 1, 512*1024*statistics.RegionHeartBeatReportInterval, 2, 3)
tc.AddLeaderRegionWithWriteInfo(6, 1, 12*1024*statistics.RegionHeartBeatReportInterval, 2, 3)

stats = tc.RegionStats(statistics.WriteFlow)
c.Assert(len(stats[1]), Equals, 2)
c.Assert(len(stats[2]), Equals, 2)
c.Assert(len(stats[3]), Equals, 2)

tc.AddLeaderRegionWithWriteInfo(5, 1, 512*1024*statistics.RegionHeartBeatReportInterval, 2, 5)
stats = tc.RegionStats(statistics.WriteFlow)

c.Assert(len(stats[1]), Equals, 2)
c.Assert(len(stats[2]), Equals, 2)
c.Assert(len(stats[3]), Equals, 1)
c.Assert(len(stats[5]), Equals, 1)
}

var _ = Suite(&testScatterRangeLeaderSuite{})

type testScatterRangeLeaderSuite struct{}
Expand Down
Loading