From 47de2cdb8080351afab2c8f02ac84442257342d3 Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Fri, 15 Feb 2019 19:11:23 +0800 Subject: [PATCH] *: log format for schedule, scheduler, syncer, api (#1423) * log format for schedule, scheduler, syncer, api Signed-off-by: rleungx --- server/api/member.go | 7 ++++--- server/api/redirector.go | 12 +++++++----- server/api/util.go | 4 ++-- server/schedule/filters.go | 3 --- server/schedule/merge_checker.go | 5 +++-- server/schedule/mockcluster.go | 5 +++-- server/schedule/namespace_checker.go | 7 ++++--- server/schedule/operator.go | 14 +++++++------- server/schedule/replica_checker.go | 19 +++++++++--------- server/schedule/scheduler.go | 5 +++-- server/schedulers/adjacent_region.go | 5 +++-- server/schedulers/balance_leader.go | 29 +++++++++++++++------------- server/schedulers/balance_region.go | 29 ++++++++++++++-------------- server/schedulers/base_scheduler.go | 4 ++-- server/schedulers/hot_region.go | 5 +++-- server/schedulers/label.go | 9 +++++---- server/schedulers/shuffle_region.go | 5 +++-- 17 files changed, 90 insertions(+), 77 deletions(-) diff --git a/server/api/member.go b/server/api/member.go index 139caa75870..75eea86edb8 100644 --- a/server/api/member.go +++ b/server/api/member.go @@ -21,11 +21,12 @@ import ( "github.com/gorilla/mux" "github.com/pingcap/kvproto/pkg/pdpb" + log "github.com/pingcap/log" "github.com/pingcap/pd/pkg/etcdutil" "github.com/pingcap/pd/server" "github.com/pkg/errors" - log "github.com/sirupsen/logrus" "github.com/unrolled/render" + "go.uber.org/zap" ) type memberHandler struct { @@ -58,12 +59,12 @@ func (h *memberHandler) getMembers() (*pdpb.GetMembersResponse, error) { // Fill leader priorities. for _, m := range members.GetMembers() { if h.svr.GetEtcdLeader() == 0 { - log.Warnf("no etcd leader, skip get leader priority, member: %v", m.GetMemberId()) + log.Warn("no etcd leader, skip get leader priority", zap.Uint64("member", m.GetMemberId())) continue } leaderPriority, e := h.svr.GetMemberLeaderPriority(m.GetMemberId()) if e != nil { - log.Errorf("failed to load leader priority, member: %v, err: %v", m.GetMemberId(), e) + log.Error("failed to load leader priority", zap.Uint64("member", m.GetMemberId()), zap.Error(err)) continue } m.LeaderPriority = int32(leaderPriority) diff --git a/server/api/redirector.go b/server/api/redirector.go index d3fff1cd153..a5f52be6703 100644 --- a/server/api/redirector.go +++ b/server/api/redirector.go @@ -14,13 +14,15 @@ package api import ( + "fmt" "io/ioutil" "net/http" "net/url" "strings" + log "github.com/pingcap/log" "github.com/pingcap/pd/server" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) const ( @@ -48,7 +50,7 @@ func (h *redirector) ServeHTTP(w http.ResponseWriter, r *http.Request, next http // Prevent more than one redirection. if name := r.Header.Get(redirectorHeader); len(name) != 0 { - log.Errorf("redirect from %v, but %v is not leader", name, h.s.Name()) + log.Error("redirect but server is not leader", zap.String("from", name), zap.String("server", h.s.Name())) http.Error(w, errRedirectToNotLeader, http.StatusInternalServerError) return } @@ -93,21 +95,21 @@ func (p *customReverseProxies) ServeHTTP(w http.ResponseWriter, r *http.Request) resp, err := p.client.Do(r) if err != nil { - log.Error(err) + log.Error(fmt.Sprintf("%+v", err)) continue } b, err := ioutil.ReadAll(resp.Body) resp.Body.Close() if err != nil { - log.Error(err) + log.Error(fmt.Sprintf("%+v", err)) continue } copyHeader(w.Header(), resp.Header) w.WriteHeader(resp.StatusCode) if _, err := w.Write(b); err != nil { - log.Error(err) + log.Error(fmt.Sprintf("%+v", err)) continue } diff --git a/server/api/util.go b/server/api/util.go index 386e07aaa38..ebf7bc62dab 100644 --- a/server/api/util.go +++ b/server/api/util.go @@ -20,10 +20,10 @@ import ( "io/ioutil" "net/http" + log "github.com/pingcap/log" "github.com/pingcap/pd/pkg/apiutil" errcode "github.com/pingcap/pd/pkg/error_code" "github.com/pkg/errors" - log "github.com/sirupsen/logrus" "github.com/unrolled/render" ) @@ -42,7 +42,7 @@ var dialClient = &http.Client{ // If the error is nil, this also responds with a 500 and logs at the error level. func errorResp(rd *render.Render, w http.ResponseWriter, err error) { if err == nil { - log.Errorf("nil given to errorResp") + log.Error("nil is given to errorResp") rd.JSON(w, http.StatusInternalServerError, "nil error") return } diff --git a/server/schedule/filters.go b/server/schedule/filters.go index 3cad8bb2460..d8d75f2887e 100644 --- a/server/schedule/filters.go +++ b/server/schedule/filters.go @@ -19,7 +19,6 @@ import ( "github.com/pingcap/pd/server/cache" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/server/namespace" - log "github.com/sirupsen/logrus" ) //revive:disable:unused-parameter @@ -38,7 +37,6 @@ func FilterSource(opt Options, store *core.StoreInfo, filters []Filter) bool { storeID := fmt.Sprintf("store%d", store.GetId()) for _, filter := range filters { if filter.FilterSource(opt, store) { - log.Debugf("[filter %T] filters store %v from source", filter, store) filterCounter.WithLabelValues("filter-source", storeID, filter.Type()).Inc() return true } @@ -51,7 +49,6 @@ func FilterTarget(opt Options, store *core.StoreInfo, filters []Filter) bool { storeID := fmt.Sprintf("store%d", store.GetId()) for _, filter := range filters { if filter.FilterTarget(opt, store) { - log.Debugf("[filter %T] filters store %v from target", filter, store) filterCounter.WithLabelValues("filter-target", storeID, filter.Type()).Inc() return true } diff --git a/server/schedule/merge_checker.go b/server/schedule/merge_checker.go index 46e5a621a98..f25abddc2b6 100644 --- a/server/schedule/merge_checker.go +++ b/server/schedule/merge_checker.go @@ -16,10 +16,11 @@ package schedule import ( "time" + log "github.com/pingcap/log" "github.com/pingcap/pd/server/cache" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/server/namespace" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) // As region split history is not persisted. We put a special marker into @@ -109,7 +110,7 @@ func (m *MergeChecker) Check(region *core.RegionInfo) (*Operator, *Operator) { } checkerCounter.WithLabelValues("merge_checker", "new_operator").Inc() - log.Debugf("try to merge region {%v} into region {%v}", region, target) + log.Debug("try to merge region", zap.Reflect("from", region.GetMeta()), zap.Reflect("to", target.GetMeta())) op1, op2, err := CreateMergeRegionOperator("merge-region", m.cluster, region, target, OpMerge) if err != nil { return nil, nil diff --git a/server/schedule/mockcluster.go b/server/schedule/mockcluster.go index 7f1fb08e66c..95ea98d7470 100644 --- a/server/schedule/mockcluster.go +++ b/server/schedule/mockcluster.go @@ -19,9 +19,10 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + log "github.com/pingcap/log" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/server/namespace" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) // MockCluster is used to mock clusterInfo for test use @@ -74,7 +75,7 @@ func (mc *MockCluster) RandHotRegionFromStore(store uint64, kind FlowKind) *core func (mc *MockCluster) AllocPeer(storeID uint64) (*metapb.Peer, error) { peerID, err := mc.allocID() if err != nil { - log.Errorf("failed to alloc peer: %v", err) + log.Error("failed to alloc peer", zap.Error(err)) return nil, err } peer := &metapb.Peer{ diff --git a/server/schedule/namespace_checker.go b/server/schedule/namespace_checker.go index 9a87ef0fb65..f2af08378d7 100644 --- a/server/schedule/namespace_checker.go +++ b/server/schedule/namespace_checker.go @@ -15,9 +15,10 @@ package schedule import ( "github.com/pingcap/kvproto/pkg/metapb" + log "github.com/pingcap/log" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/server/namespace" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) // NamespaceChecker ensures region to go to the right place. @@ -63,7 +64,7 @@ func (n *NamespaceChecker) Check(region *core.RegionInfo) *Operator { if n.isExists(targetStores, peer.StoreId) { continue } - log.Debugf("[region %d] peer %v is not located in namespace target stores", region.GetID(), peer) + log.Debug("peer is not located in namespace target stores", zap.Uint64("region-id", region.GetID()), zap.Reflect("peer", peer)) newPeer := n.SelectBestPeerToRelocate(region, targetStores) if newPeer == nil { checkerCounter.WithLabelValues("namespace_checker", "no_target_peer").Inc() @@ -86,7 +87,7 @@ func (n *NamespaceChecker) Check(region *core.RegionInfo) *Operator { func (n *NamespaceChecker) SelectBestPeerToRelocate(region *core.RegionInfo, targets []*core.StoreInfo) *metapb.Peer { storeID := n.SelectBestStoreToRelocate(region, targets) if storeID == 0 { - log.Debugf("[region %d] has no best store to relocate", region.GetID()) + log.Debug("has no best store to relocate", zap.Uint64("region-id", region.GetID())) return nil } newPeer, err := n.cluster.AllocPeer(storeID) diff --git a/server/schedule/operator.go b/server/schedule/operator.go index 92b76ea43ec..861e2118d1e 100644 --- a/server/schedule/operator.go +++ b/server/schedule/operator.go @@ -23,9 +23,9 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" - log "github.com/sirupsen/logrus" - + log "github.com/pingcap/log" "github.com/pingcap/pd/server/core" + "go.uber.org/zap" ) const ( @@ -82,7 +82,7 @@ func (ap AddPeer) String() string { func (ap AddPeer) IsFinish(region *core.RegionInfo) bool { if p := region.GetStoreVoter(ap.ToStore); p != nil { if p.GetId() != ap.PeerID { - log.Warnf("expect %v, but obtain voter %v", ap.String(), p.GetId()) + log.Warn("obtain unexpected peer", zap.String("expect", ap.String()), zap.Uint64("obtain-voter", p.GetId())) return false } return region.GetPendingVoter(p.GetId()) == nil @@ -111,7 +111,7 @@ func (al AddLearner) String() string { func (al AddLearner) IsFinish(region *core.RegionInfo) bool { if p := region.GetStoreLearner(al.ToStore); p != nil { if p.GetId() != al.PeerID { - log.Warnf("expect %v, but obtain learner %v", al.String(), p.GetId()) + log.Warn("obtain unexpected peer", zap.String("expect", al.String()), zap.Uint64("obtain-learner", p.GetId())) return false } return region.GetPendingLearner(p.GetId()) == nil @@ -140,7 +140,7 @@ func (pl PromoteLearner) String() string { func (pl PromoteLearner) IsFinish(region *core.RegionInfo) bool { if p := region.GetStoreVoter(pl.ToStore); p != nil { if p.GetId() != pl.PeerID { - log.Warnf("expect %v, but obtain voter %v", pl.String(), p.GetId()) + log.Warn("obtain unexpected peer", zap.String("expect", pl.String()), zap.Uint64("obtain-voter", p.GetId())) } return p.GetId() == pl.PeerID } @@ -477,7 +477,7 @@ func removePeerSteps(cluster Cluster, region *core.RegionInfo, storeID uint64, f } if len(steps) == 0 { err = errors.New("no suitable follower to become region leader") - log.Debugf("fail to create remove peer operator, region: %v, err: %v", region.GetID(), err) + log.Debug("fail to create remove peer operator", zap.Uint64("region-id", region.GetID()), zap.Error(err)) return } } @@ -540,7 +540,7 @@ func matchPeerSteps(cluster Cluster, source *core.RegionInfo, target *core.Regio peer, err := cluster.AllocPeer(storeID) if err != nil { - log.Debugf("peer alloc failed: %v", err) + log.Debug("peer alloc failed", zap.Error(err)) return nil, kind, err } if cluster.IsRaftLearnerEnabled() { diff --git a/server/schedule/replica_checker.go b/server/schedule/replica_checker.go index ddb198d05c9..98ce82a7eed 100644 --- a/server/schedule/replica_checker.go +++ b/server/schedule/replica_checker.go @@ -17,9 +17,10 @@ import ( "fmt" "github.com/pingcap/kvproto/pkg/metapb" + log "github.com/pingcap/log" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/server/namespace" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) // ReplicaChecker ensures region has the best replicas. @@ -58,7 +59,7 @@ func (r *ReplicaChecker) Check(region *core.RegionInfo) *Operator { } if len(region.GetPeers()) < r.cluster.GetMaxReplicas() && r.cluster.IsMakeUpReplicaEnabled() { - log.Debugf("[region %d] has %d peers fewer than max replicas", region.GetID(), len(region.GetPeers())) + log.Debug("region has fewer than max replicas", zap.Uint64("region-id", region.GetID()), zap.Int("peers", len(region.GetPeers()))) newPeer, _ := r.selectBestPeerToAddReplica(region, NewStorageThresholdFilter()) if newPeer == nil { checkerCounter.WithLabelValues("replica_checker", "no_target_store").Inc() @@ -82,7 +83,7 @@ func (r *ReplicaChecker) Check(region *core.RegionInfo) *Operator { // when add learner peer, the number of peer will exceed max replicas for a while, // just comparing the the number of voters to avoid too many cancel add operator log. if len(region.GetVoters()) > r.cluster.GetMaxReplicas() && r.cluster.IsRemoveExtraReplicaEnabled() { - log.Debugf("[region %d] has %d peers more than max replicas", region.GetID(), len(region.GetPeers())) + log.Debug("region has more than max replicas", zap.Uint64("region-id", region.GetID()), zap.Int("peers", len(region.GetPeers()))) oldPeer, _ := r.selectWorstPeer(region) if oldPeer == nil { checkerCounter.WithLabelValues("replica_checker", "no_worst_peer").Inc() @@ -111,7 +112,7 @@ func (r *ReplicaChecker) SelectBestReplacementStore(region *core.RegionInfo, old func (r *ReplicaChecker) selectBestPeerToAddReplica(region *core.RegionInfo, filters ...Filter) (*metapb.Peer, float64) { storeID, score := r.selectBestStoreToAddReplica(region, filters...) if storeID == 0 { - log.Debugf("[region %d] no best store to add replica", region.GetID()) + log.Debug("no best store to add replica", zap.Uint64("region-id", region.GetID())) return nil, 0 } newPeer, err := r.cluster.AllocPeer(storeID) @@ -149,7 +150,7 @@ func (r *ReplicaChecker) selectWorstPeer(region *core.RegionInfo) (*metapb.Peer, selector := NewReplicaSelector(regionStores, r.cluster.GetLocationLabels(), r.filters...) worstStore := selector.SelectSource(r.cluster, regionStores) if worstStore == nil { - log.Debugf("[region %d] no worst store", region.GetID()) + log.Debug("no worst store", zap.Uint64("region-id", region.GetID())) return nil, 0 } return region.GetStorePeer(worstStore.GetId()), DistinctScore(r.cluster.GetLocationLabels(), regionStores, worstStore) @@ -167,7 +168,7 @@ func (r *ReplicaChecker) checkDownPeer(region *core.RegionInfo) *Operator { } store := r.cluster.GetStore(peer.GetStoreId()) if store == nil { - log.Infof("lost the store %d, maybe you are recovering the PD cluster.", peer.GetStoreId()) + log.Info("lost the store, maybe you are recovering the PD cluster", zap.Uint64("store-id", peer.GetStoreId())) return nil } if store.DownTime() < r.cluster.GetMaxStoreDownTime() { @@ -195,7 +196,7 @@ func (r *ReplicaChecker) checkOfflinePeer(region *core.RegionInfo) *Operator { for _, peer := range region.GetPeers() { store := r.cluster.GetStore(peer.GetStoreId()) if store == nil { - log.Infof("lost the store %d, maybe you are recovering the PD cluster.", peer.GetStoreId()) + log.Info("lost the store, maybe you are recovering the PD cluster", zap.Uint64("store-id", peer.GetStoreId())) return nil } if store.IsUp() { @@ -225,7 +226,7 @@ func (r *ReplicaChecker) checkBestReplacement(region *core.RegionInfo) *Operator } // Make sure the new peer is better than the old peer. if newScore <= oldScore { - log.Debugf("[region %d] newScore %f is not better than oldScore %f", region.GetID(), newScore, oldScore) + log.Debug("no better peer", zap.Uint64("region-id", region.GetID()), zap.Float64("new-score", newScore), zap.Float64("old-score", oldScore)) checkerCounter.WithLabelValues("replica_checker", "not_better").Inc() return nil } @@ -270,7 +271,7 @@ func (r *ReplicaChecker) fixPeer(region *core.RegionInfo, peer *metapb.Peer, sta storeID, _ := r.SelectBestReplacementStore(region, peer, NewStorageThresholdFilter()) if storeID == 0 { - log.Debugf("[region %d] no best store to add replica", region.GetID()) + log.Debug("no best store to add replica", zap.Uint64("region-id", region.GetID())) return nil } newPeer, err := r.cluster.AllocPeer(storeID) diff --git a/server/schedule/scheduler.go b/server/schedule/scheduler.go index 24bc34ff47a..50800afa700 100644 --- a/server/schedule/scheduler.go +++ b/server/schedule/scheduler.go @@ -18,9 +18,10 @@ import ( "time" "github.com/pingcap/kvproto/pkg/metapb" + log "github.com/pingcap/log" "github.com/pingcap/pd/server/core" "github.com/pkg/errors" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) // Cluster provides an overview of a cluster's regions distribution. @@ -77,7 +78,7 @@ var schedulerMap = make(map[string]CreateSchedulerFunc) // func of a package. func RegisterScheduler(name string, createFn CreateSchedulerFunc) { if _, ok := schedulerMap[name]; ok { - log.Fatalf("duplicated scheduler name: %v", name) + log.Fatal("duplicated scheduler", zap.String("name", name)) } schedulerMap[name] = createFn } diff --git a/server/schedulers/adjacent_region.go b/server/schedulers/adjacent_region.go index 660de6ae4cf..0636873eb21 100644 --- a/server/schedulers/adjacent_region.go +++ b/server/schedulers/adjacent_region.go @@ -18,10 +18,11 @@ import ( "strconv" "time" + log "github.com/pingcap/log" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/server/schedule" "github.com/pkg/errors" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) const ( @@ -194,7 +195,7 @@ func (l *balanceAdjacentRegionScheduler) process(cluster schedule.Cluster) []*sc defer func() { if l.cacheRegions.len() < 0 { - log.Fatalf("[%s]the cache overflow should never happen", l.GetName()) + log.Fatal("cache overflow", zap.String("scheduler", l.GetName())) } l.cacheRegions.head = head + 1 l.lastKey = r2.GetStartKey() diff --git a/server/schedulers/balance_leader.go b/server/schedulers/balance_leader.go index 566f8719279..09f02b9e662 100644 --- a/server/schedulers/balance_leader.go +++ b/server/schedulers/balance_leader.go @@ -17,10 +17,11 @@ import ( "fmt" "strconv" + log "github.com/pingcap/log" "github.com/pingcap/pd/server/cache" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/server/schedule" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) func init() { @@ -87,7 +88,7 @@ func (l *balanceLeaderScheduler) Schedule(cluster schedule.Cluster, opInfluence return nil } - log.Debugf("[%s] store%d has the max leader score, store%d has the min leader score", l.GetName(), source.GetId(), target.GetId()) + log.Debug("store leader score", zap.String("scheduler", l.GetName()), zap.Uint64("max-store", source.GetId()), zap.Uint64("min-store", target.GetId())) sourceStoreLabel := strconv.FormatUint(source.GetId(), 10) targetStoreLabel := strconv.FormatUint(target.GetId(), 10) balanceLeaderCounter.WithLabelValues("high_score", sourceStoreLabel).Inc() @@ -105,7 +106,7 @@ func (l *balanceLeaderScheduler) Schedule(cluster schedule.Cluster, opInfluence } // If no operator can be created for the selected stores, ignore them for a while. - log.Debugf("[%s] no operator created for selected store%d and store%d", l.GetName(), source.GetId(), target.GetId()) + log.Debug("no operator created for selected stores", zap.String("scheduler", l.GetName()), zap.Uint64("source", source.GetId()), zap.Uint64("target", target.GetId())) balanceLeaderCounter.WithLabelValues("add_taint", strconv.FormatUint(source.GetId(), 10)).Inc() l.taintStores.Put(source.GetId()) balanceLeaderCounter.WithLabelValues("add_taint", strconv.FormatUint(target.GetId(), 10)).Inc() @@ -116,13 +117,13 @@ func (l *balanceLeaderScheduler) Schedule(cluster schedule.Cluster, opInfluence func (l *balanceLeaderScheduler) transferLeaderOut(source *core.StoreInfo, cluster schedule.Cluster, opInfluence schedule.OpInfluence) []*schedule.Operator { region := cluster.RandLeaderRegion(source.GetId(), core.HealthRegion()) if region == nil { - log.Debugf("[%s] store%d has no leader", l.GetName(), source.GetId()) + log.Debug("store has no leader", zap.String("scheduler", l.GetName()), zap.Uint64("store-id", source.GetId())) schedulerCounter.WithLabelValues(l.GetName(), "no_leader_region").Inc() return nil } target := l.selector.SelectTarget(cluster, cluster.GetFollowerStores(region)) if target == nil { - log.Debugf("[%s] region %d has no target store", l.GetName(), region.GetID()) + log.Debug("region has no target store", zap.String("scheduler", l.GetName()), zap.Uint64("region-id", region.GetID())) schedulerCounter.WithLabelValues(l.GetName(), "no_target_store").Inc() return nil } @@ -132,13 +133,13 @@ func (l *balanceLeaderScheduler) transferLeaderOut(source *core.StoreInfo, clust func (l *balanceLeaderScheduler) transferLeaderIn(target *core.StoreInfo, cluster schedule.Cluster, opInfluence schedule.OpInfluence) []*schedule.Operator { region := cluster.RandFollowerRegion(target.GetId(), core.HealthRegion()) if region == nil { - log.Debugf("[%s] store%d has no follower", l.GetName(), target.GetId()) + log.Debug("store has no follower", zap.String("scheduler", l.GetName()), zap.Uint64("store-id", target.GetId())) schedulerCounter.WithLabelValues(l.GetName(), "no_follower_region").Inc() return nil } source := cluster.GetStore(region.GetLeader().GetStoreId()) if source == nil { - log.Debugf("[%s] region %d has no leader", l.GetName(), region.GetID()) + log.Debug("region has no leader", zap.String("scheduler", l.GetName()), zap.Uint64("region-id", region.GetID())) schedulerCounter.WithLabelValues(l.GetName(), "no_leader").Inc() return nil } @@ -147,17 +148,19 @@ func (l *balanceLeaderScheduler) transferLeaderIn(target *core.StoreInfo, cluste func (l *balanceLeaderScheduler) createOperator(region *core.RegionInfo, source, target *core.StoreInfo, cluster schedule.Cluster, opInfluence schedule.OpInfluence) []*schedule.Operator { if cluster.IsRegionHot(region.GetID()) { - log.Debugf("[%s] region %d is hot region, ignore it", l.GetName(), region.GetID()) + log.Debug("region is hot region, ignore it", zap.String("scheduler", l.GetName()), zap.Uint64("region-id", region.GetID())) schedulerCounter.WithLabelValues(l.GetName(), "region_hot").Inc() return nil } if !shouldBalance(cluster, source, target, region, core.LeaderKind, opInfluence) { - log.Debugf("[%s] skip balance region %d, source %d to target %d, source size: %v, source score: %v, source influence: %v, target size: %v, target score: %v, target influence: %v, average region size: %v", - l.GetName(), region.GetID(), source.GetId(), target.GetId(), - source.LeaderSize, source.LeaderScore(0), opInfluence.GetStoreInfluence(source.GetId()).ResourceSize(core.LeaderKind), - target.LeaderSize, target.LeaderScore(0), opInfluence.GetStoreInfluence(target.GetId()).ResourceSize(core.LeaderKind), - cluster.GetAverageRegionSize()) + log.Debug("skip balance region", + zap.String("scheduler", l.GetName()), zap.Uint64("region-id", region.GetID()), zap.Uint64("source-store", source.GetId()), zap.Uint64("target-store", target.GetId()), + zap.Int64("source-size", source.LeaderSize), zap.Float64("source-score", source.LeaderScore(0)), + zap.Int64("source-influence", opInfluence.GetStoreInfluence(source.GetId()).ResourceSize(core.LeaderKind)), + zap.Int64("target-size", target.LeaderSize), zap.Float64("target-score", target.LeaderScore(0)), + zap.Int64("target-influence", opInfluence.GetStoreInfluence(target.GetId()).ResourceSize(core.LeaderKind)), + zap.Int64("average-region-size", cluster.GetAverageRegionSize())) schedulerCounter.WithLabelValues(l.GetName(), "skip").Inc() return nil } diff --git a/server/schedulers/balance_region.go b/server/schedulers/balance_region.go index 22aa1a6e49d..99bebe78b53 100644 --- a/server/schedulers/balance_region.go +++ b/server/schedulers/balance_region.go @@ -18,10 +18,11 @@ import ( "strconv" "github.com/pingcap/kvproto/pkg/metapb" + log "github.com/pingcap/log" "github.com/pingcap/pd/server/cache" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/server/schedule" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) func init() { @@ -82,8 +83,8 @@ func (s *balanceRegionScheduler) Schedule(cluster schedule.Cluster, opInfluence return nil } - log.Debugf("[%s] store%d has the max region score", s.GetName(), source.GetId()) sourceLabel := strconv.FormatUint(source.GetId(), 10) + log.Debug("store has the max region score", zap.String("scheduler", s.GetName()), zap.Uint64("store-id", source.GetId())) balanceRegionCounter.WithLabelValues("source_store", sourceLabel).Inc() var hasPotentialTarget bool @@ -96,18 +97,18 @@ func (s *balanceRegionScheduler) Schedule(cluster schedule.Cluster, opInfluence schedulerCounter.WithLabelValues(s.GetName(), "no_region").Inc() continue } - log.Debugf("[%s] select region%d", s.GetName(), region.GetID()) + log.Debug("select region", zap.String("scheduler", s.GetName()), zap.Uint64("region-id", region.GetID())) // We don't schedule region with abnormal number of replicas. if len(region.GetPeers()) != cluster.GetMaxReplicas() { - log.Debugf("[%s] region%d has abnormal replica count", s.GetName(), region.GetID()) + log.Debug("region has abnormal replica count", zap.String("scheduler", s.GetName()), zap.Uint64("region-id", region.GetID())) schedulerCounter.WithLabelValues(s.GetName(), "abnormal_replica").Inc() continue } // Skip hot regions. if cluster.IsRegionHot(region.GetID()) { - log.Debugf("[%s] region%d is hot", s.GetName(), region.GetID()) + log.Debug("region is hot", zap.String("scheduler", s.GetName()), zap.Uint64("region-id", region.GetID())) schedulerCounter.WithLabelValues(s.GetName(), "region_hot").Inc() continue } @@ -126,7 +127,7 @@ func (s *balanceRegionScheduler) Schedule(cluster schedule.Cluster, opInfluence if !hasPotentialTarget { // If no potential target store can be found for the selected store, ignore it for a while. - log.Debugf("[%s] no operator created for selected store%d", s.GetName(), source.GetId()) + log.Debug("no operator created for selected store", zap.String("scheduler", s.GetName()), zap.Uint64("store-id", source.GetId())) balanceRegionCounter.WithLabelValues("add_taint", sourceLabel).Inc() s.taintStores.Put(source.GetId()) } @@ -148,16 +149,16 @@ func (s *balanceRegionScheduler) transferPeer(cluster schedule.Cluster, region * } target := cluster.GetStore(storeID) - log.Debugf("[region %d] source store id is %v, target store id is %v", region.GetID(), source.GetId(), target.GetId()) + log.Debug("", zap.Uint64("region-id", region.GetID()), zap.Uint64("source-store", source.GetId()), zap.Uint64("target-store", target.GetId())) if !shouldBalance(cluster, source, target, region, core.RegionKind, opInfluence) { - log.Debugf("[%s] skip balance region %d, source %d to target %d ,source size: %v, source score: %v, source influence: %v, target size: %v, target score: %v, target influence: %v, average region size: %v", - s.GetName(), region.GetID(), source.GetId(), target.GetId(), - source.RegionSize, source.RegionScore(cluster.GetHighSpaceRatio(), cluster.GetLowSpaceRatio(), 0), - opInfluence.GetStoreInfluence(source.GetId()).ResourceSize(core.RegionKind), - target.RegionSize, target.RegionScore(cluster.GetHighSpaceRatio(), cluster.GetLowSpaceRatio(), 0), - opInfluence.GetStoreInfluence(target.GetId()).ResourceSize(core.RegionKind), - cluster.GetAverageRegionSize()) + log.Debug("skip balance region", + zap.String("scheduler", s.GetName()), zap.Uint64("region-id", region.GetID()), zap.Uint64("source-store", source.GetId()), zap.Uint64("target-store", target.GetId()), + zap.Int64("source-size", source.RegionSize), zap.Float64("source-score", source.RegionScore(cluster.GetHighSpaceRatio(), cluster.GetLowSpaceRatio(), 0)), + zap.Int64("source-influence", opInfluence.GetStoreInfluence(source.GetId()).ResourceSize(core.RegionKind)), + zap.Int64("target-size", target.RegionSize), zap.Float64("target-score", target.RegionScore(cluster.GetHighSpaceRatio(), cluster.GetLowSpaceRatio(), 0)), + zap.Int64("target-influence", opInfluence.GetStoreInfluence(target.GetId()).ResourceSize(core.RegionKind)), + zap.Int64("average-region-size", cluster.GetAverageRegionSize())) schedulerCounter.WithLabelValues(s.GetName(), "skip").Inc() return nil } diff --git a/server/schedulers/base_scheduler.go b/server/schedulers/base_scheduler.go index 2352c7b4488..5ab355ca8b0 100644 --- a/server/schedulers/base_scheduler.go +++ b/server/schedulers/base_scheduler.go @@ -16,8 +16,8 @@ package schedulers import ( "time" + log "github.com/pingcap/log" "github.com/pingcap/pd/server/schedule" - log "github.com/sirupsen/logrus" ) // options for interval of schedulers @@ -46,7 +46,7 @@ func intervalGrow(x time.Duration, maxInterval time.Duration, typ intervalGrowth case zeroGrowth: return x default: - log.Fatal("unKnow interval growth type") + log.Fatal("unknown interval growth type") } return 0 } diff --git a/server/schedulers/hot_region.go b/server/schedulers/hot_region.go index 5e2abb73e8f..5df826601ee 100644 --- a/server/schedulers/hot_region.go +++ b/server/schedulers/hot_region.go @@ -20,9 +20,10 @@ import ( "time" "github.com/pingcap/kvproto/pkg/metapb" + log "github.com/pingcap/log" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/server/schedule" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) func init() { @@ -324,7 +325,7 @@ func (h *balanceHotRegionsScheduler) balanceByPeer(cluster schedule.Cluster, sto // because it doesn't exist in the system right now. destPeer, err := cluster.AllocPeer(destStoreID) if err != nil { - log.Errorf("failed to allocate peer: %v", err) + log.Error("failed to allocate peer", zap.Error(err)) return nil, nil, nil } diff --git a/server/schedulers/label.go b/server/schedulers/label.go index d0d8c913327..e06d53f437a 100644 --- a/server/schedulers/label.go +++ b/server/schedulers/label.go @@ -14,9 +14,10 @@ package schedulers import ( + log "github.com/pingcap/log" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/server/schedule" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) func init() { @@ -63,10 +64,10 @@ func (s *labelScheduler) Schedule(cluster schedule.Cluster, opInfluence schedule schedulerCounter.WithLabelValues(s.GetName(), "skip").Inc() return nil } - log.Debugf("label scheduler reject leader store list: %v", rejectLeaderStores) + log.Debug("label scheduler reject leader store list", zap.Reflect("stores", rejectLeaderStores)) for id := range rejectLeaderStores { if region := cluster.RandLeaderRegion(id); region != nil { - log.Debugf("label scheduler selects region %d to transfer leader", region.GetID()) + log.Debug("label scheduler selects region to transfer leader", zap.Uint64("region-id", region.GetID())) excludeStores := make(map[uint64]struct{}) for _, p := range region.GetDownPeers() { excludeStores[p.GetPeer().GetStoreId()] = struct{}{} @@ -77,7 +78,7 @@ func (s *labelScheduler) Schedule(cluster schedule.Cluster, opInfluence schedule filter := schedule.NewExcludedFilter(nil, excludeStores) target := s.selector.SelectTarget(cluster, cluster.GetFollowerStores(region), filter) if target == nil { - log.Debugf("label scheduler no target found for region %d", region.GetID()) + log.Debug("label scheduler no target found for region", zap.Uint64("region-id", region.GetID())) schedulerCounter.WithLabelValues(s.GetName(), "no_target").Inc() continue } diff --git a/server/schedulers/shuffle_region.go b/server/schedulers/shuffle_region.go index 07030081c17..e24271f935b 100644 --- a/server/schedulers/shuffle_region.go +++ b/server/schedulers/shuffle_region.go @@ -15,9 +15,10 @@ package schedulers import ( "github.com/pingcap/kvproto/pkg/metapb" + log "github.com/pingcap/log" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/server/schedule" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) func init() { @@ -110,7 +111,7 @@ func (s *shuffleRegionScheduler) scheduleAddPeer(cluster schedule.Cluster, filte newPeer, err := cluster.AllocPeer(target.GetId()) if err != nil { - log.Errorf("failed to allocate peer: %v", err) + log.Error("failed to allocate peer", zap.Error(err)) return nil }