Skip to content

Commit

Permalink
*: log format for schedule, scheduler, syncer, api (tikv#1423)
Browse files Browse the repository at this point in the history
* log format for schedule, scheduler, syncer, api

Signed-off-by: rleungx <rleungx@gmail.com>
  • Loading branch information
rleungx authored and nolouch committed Jul 10, 2019
1 parent e6e164a commit 47de2cd
Show file tree
Hide file tree
Showing 17 changed files with 90 additions and 77 deletions.
7 changes: 4 additions & 3 deletions server/api/member.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,12 @@ import (

"github.com/gorilla/mux"
"github.com/pingcap/kvproto/pkg/pdpb"
log "github.com/pingcap/log"
"github.com/pingcap/pd/pkg/etcdutil"
"github.com/pingcap/pd/server"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/unrolled/render"
"go.uber.org/zap"
)

type memberHandler struct {
Expand Down Expand Up @@ -58,12 +59,12 @@ func (h *memberHandler) getMembers() (*pdpb.GetMembersResponse, error) {
// Fill leader priorities.
for _, m := range members.GetMembers() {
if h.svr.GetEtcdLeader() == 0 {
log.Warnf("no etcd leader, skip get leader priority, member: %v", m.GetMemberId())
log.Warn("no etcd leader, skip get leader priority", zap.Uint64("member", m.GetMemberId()))
continue
}
leaderPriority, e := h.svr.GetMemberLeaderPriority(m.GetMemberId())
if e != nil {
log.Errorf("failed to load leader priority, member: %v, err: %v", m.GetMemberId(), e)
log.Error("failed to load leader priority", zap.Uint64("member", m.GetMemberId()), zap.Error(err))
continue
}
m.LeaderPriority = int32(leaderPriority)
Expand Down
12 changes: 7 additions & 5 deletions server/api/redirector.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,15 @@
package api

import (
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strings"

log "github.com/pingcap/log"
"github.com/pingcap/pd/server"
log "github.com/sirupsen/logrus"
"go.uber.org/zap"
)

const (
Expand Down Expand Up @@ -48,7 +50,7 @@ func (h *redirector) ServeHTTP(w http.ResponseWriter, r *http.Request, next http

// Prevent more than one redirection.
if name := r.Header.Get(redirectorHeader); len(name) != 0 {
log.Errorf("redirect from %v, but %v is not leader", name, h.s.Name())
log.Error("redirect but server is not leader", zap.String("from", name), zap.String("server", h.s.Name()))
http.Error(w, errRedirectToNotLeader, http.StatusInternalServerError)
return
}
Expand Down Expand Up @@ -93,21 +95,21 @@ func (p *customReverseProxies) ServeHTTP(w http.ResponseWriter, r *http.Request)

resp, err := p.client.Do(r)
if err != nil {
log.Error(err)
log.Error(fmt.Sprintf("%+v", err))
continue
}

b, err := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if err != nil {
log.Error(err)
log.Error(fmt.Sprintf("%+v", err))
continue
}

copyHeader(w.Header(), resp.Header)
w.WriteHeader(resp.StatusCode)
if _, err := w.Write(b); err != nil {
log.Error(err)
log.Error(fmt.Sprintf("%+v", err))
continue
}

Expand Down
4 changes: 2 additions & 2 deletions server/api/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,10 @@ import (
"io/ioutil"
"net/http"

log "github.com/pingcap/log"
"github.com/pingcap/pd/pkg/apiutil"
errcode "github.com/pingcap/pd/pkg/error_code"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/unrolled/render"
)

Expand All @@ -42,7 +42,7 @@ var dialClient = &http.Client{
// If the error is nil, this also responds with a 500 and logs at the error level.
func errorResp(rd *render.Render, w http.ResponseWriter, err error) {
if err == nil {
log.Errorf("nil given to errorResp")
log.Error("nil is given to errorResp")
rd.JSON(w, http.StatusInternalServerError, "nil error")
return
}
Expand Down
3 changes: 0 additions & 3 deletions server/schedule/filters.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ import (
"github.com/pingcap/pd/server/cache"
"github.com/pingcap/pd/server/core"
"github.com/pingcap/pd/server/namespace"
log "github.com/sirupsen/logrus"
)

//revive:disable:unused-parameter
Expand All @@ -38,7 +37,6 @@ func FilterSource(opt Options, store *core.StoreInfo, filters []Filter) bool {
storeID := fmt.Sprintf("store%d", store.GetId())
for _, filter := range filters {
if filter.FilterSource(opt, store) {
log.Debugf("[filter %T] filters store %v from source", filter, store)
filterCounter.WithLabelValues("filter-source", storeID, filter.Type()).Inc()
return true
}
Expand All @@ -51,7 +49,6 @@ func FilterTarget(opt Options, store *core.StoreInfo, filters []Filter) bool {
storeID := fmt.Sprintf("store%d", store.GetId())
for _, filter := range filters {
if filter.FilterTarget(opt, store) {
log.Debugf("[filter %T] filters store %v from target", filter, store)
filterCounter.WithLabelValues("filter-target", storeID, filter.Type()).Inc()
return true
}
Expand Down
5 changes: 3 additions & 2 deletions server/schedule/merge_checker.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,11 @@ package schedule
import (
"time"

log "github.com/pingcap/log"
"github.com/pingcap/pd/server/cache"
"github.com/pingcap/pd/server/core"
"github.com/pingcap/pd/server/namespace"
log "github.com/sirupsen/logrus"
"go.uber.org/zap"
)

// As region split history is not persisted. We put a special marker into
Expand Down Expand Up @@ -109,7 +110,7 @@ func (m *MergeChecker) Check(region *core.RegionInfo) (*Operator, *Operator) {
}

checkerCounter.WithLabelValues("merge_checker", "new_operator").Inc()
log.Debugf("try to merge region {%v} into region {%v}", region, target)
log.Debug("try to merge region", zap.Reflect("from", region.GetMeta()), zap.Reflect("to", target.GetMeta()))
op1, op2, err := CreateMergeRegionOperator("merge-region", m.cluster, region, target, OpMerge)
if err != nil {
return nil, nil
Expand Down
5 changes: 3 additions & 2 deletions server/schedule/mockcluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,10 @@ import (

"github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/kvproto/pkg/pdpb"
log "github.com/pingcap/log"
"github.com/pingcap/pd/server/core"
"github.com/pingcap/pd/server/namespace"
log "github.com/sirupsen/logrus"
"go.uber.org/zap"
)

// MockCluster is used to mock clusterInfo for test use
Expand Down Expand Up @@ -74,7 +75,7 @@ func (mc *MockCluster) RandHotRegionFromStore(store uint64, kind FlowKind) *core
func (mc *MockCluster) AllocPeer(storeID uint64) (*metapb.Peer, error) {
peerID, err := mc.allocID()
if err != nil {
log.Errorf("failed to alloc peer: %v", err)
log.Error("failed to alloc peer", zap.Error(err))
return nil, err
}
peer := &metapb.Peer{
Expand Down
7 changes: 4 additions & 3 deletions server/schedule/namespace_checker.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,10 @@ package schedule

import (
"github.com/pingcap/kvproto/pkg/metapb"
log "github.com/pingcap/log"
"github.com/pingcap/pd/server/core"
"github.com/pingcap/pd/server/namespace"
log "github.com/sirupsen/logrus"
"go.uber.org/zap"
)

// NamespaceChecker ensures region to go to the right place.
Expand Down Expand Up @@ -63,7 +64,7 @@ func (n *NamespaceChecker) Check(region *core.RegionInfo) *Operator {
if n.isExists(targetStores, peer.StoreId) {
continue
}
log.Debugf("[region %d] peer %v is not located in namespace target stores", region.GetID(), peer)
log.Debug("peer is not located in namespace target stores", zap.Uint64("region-id", region.GetID()), zap.Reflect("peer", peer))
newPeer := n.SelectBestPeerToRelocate(region, targetStores)
if newPeer == nil {
checkerCounter.WithLabelValues("namespace_checker", "no_target_peer").Inc()
Expand All @@ -86,7 +87,7 @@ func (n *NamespaceChecker) Check(region *core.RegionInfo) *Operator {
func (n *NamespaceChecker) SelectBestPeerToRelocate(region *core.RegionInfo, targets []*core.StoreInfo) *metapb.Peer {
storeID := n.SelectBestStoreToRelocate(region, targets)
if storeID == 0 {
log.Debugf("[region %d] has no best store to relocate", region.GetID())
log.Debug("has no best store to relocate", zap.Uint64("region-id", region.GetID()))
return nil
}
newPeer, err := n.cluster.AllocPeer(storeID)
Expand Down
14 changes: 7 additions & 7 deletions server/schedule/operator.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,9 @@ import (

"github.com/pingcap/kvproto/pkg/metapb"
"github.com/pingcap/kvproto/pkg/pdpb"
log "github.com/sirupsen/logrus"

log "github.com/pingcap/log"
"github.com/pingcap/pd/server/core"
"go.uber.org/zap"
)

const (
Expand Down Expand Up @@ -82,7 +82,7 @@ func (ap AddPeer) String() string {
func (ap AddPeer) IsFinish(region *core.RegionInfo) bool {
if p := region.GetStoreVoter(ap.ToStore); p != nil {
if p.GetId() != ap.PeerID {
log.Warnf("expect %v, but obtain voter %v", ap.String(), p.GetId())
log.Warn("obtain unexpected peer", zap.String("expect", ap.String()), zap.Uint64("obtain-voter", p.GetId()))
return false
}
return region.GetPendingVoter(p.GetId()) == nil
Expand Down Expand Up @@ -111,7 +111,7 @@ func (al AddLearner) String() string {
func (al AddLearner) IsFinish(region *core.RegionInfo) bool {
if p := region.GetStoreLearner(al.ToStore); p != nil {
if p.GetId() != al.PeerID {
log.Warnf("expect %v, but obtain learner %v", al.String(), p.GetId())
log.Warn("obtain unexpected peer", zap.String("expect", al.String()), zap.Uint64("obtain-learner", p.GetId()))
return false
}
return region.GetPendingLearner(p.GetId()) == nil
Expand Down Expand Up @@ -140,7 +140,7 @@ func (pl PromoteLearner) String() string {
func (pl PromoteLearner) IsFinish(region *core.RegionInfo) bool {
if p := region.GetStoreVoter(pl.ToStore); p != nil {
if p.GetId() != pl.PeerID {
log.Warnf("expect %v, but obtain voter %v", pl.String(), p.GetId())
log.Warn("obtain unexpected peer", zap.String("expect", pl.String()), zap.Uint64("obtain-voter", p.GetId()))
}
return p.GetId() == pl.PeerID
}
Expand Down Expand Up @@ -477,7 +477,7 @@ func removePeerSteps(cluster Cluster, region *core.RegionInfo, storeID uint64, f
}
if len(steps) == 0 {
err = errors.New("no suitable follower to become region leader")
log.Debugf("fail to create remove peer operator, region: %v, err: %v", region.GetID(), err)
log.Debug("fail to create remove peer operator", zap.Uint64("region-id", region.GetID()), zap.Error(err))
return
}
}
Expand Down Expand Up @@ -540,7 +540,7 @@ func matchPeerSteps(cluster Cluster, source *core.RegionInfo, target *core.Regio

peer, err := cluster.AllocPeer(storeID)
if err != nil {
log.Debugf("peer alloc failed: %v", err)
log.Debug("peer alloc failed", zap.Error(err))
return nil, kind, err
}
if cluster.IsRaftLearnerEnabled() {
Expand Down
19 changes: 10 additions & 9 deletions server/schedule/replica_checker.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,10 @@ import (
"fmt"

"github.com/pingcap/kvproto/pkg/metapb"
log "github.com/pingcap/log"
"github.com/pingcap/pd/server/core"
"github.com/pingcap/pd/server/namespace"
log "github.com/sirupsen/logrus"
"go.uber.org/zap"
)

// ReplicaChecker ensures region has the best replicas.
Expand Down Expand Up @@ -58,7 +59,7 @@ func (r *ReplicaChecker) Check(region *core.RegionInfo) *Operator {
}

if len(region.GetPeers()) < r.cluster.GetMaxReplicas() && r.cluster.IsMakeUpReplicaEnabled() {
log.Debugf("[region %d] has %d peers fewer than max replicas", region.GetID(), len(region.GetPeers()))
log.Debug("region has fewer than max replicas", zap.Uint64("region-id", region.GetID()), zap.Int("peers", len(region.GetPeers())))
newPeer, _ := r.selectBestPeerToAddReplica(region, NewStorageThresholdFilter())
if newPeer == nil {
checkerCounter.WithLabelValues("replica_checker", "no_target_store").Inc()
Expand All @@ -82,7 +83,7 @@ func (r *ReplicaChecker) Check(region *core.RegionInfo) *Operator {
// when add learner peer, the number of peer will exceed max replicas for a while,
// just comparing the the number of voters to avoid too many cancel add operator log.
if len(region.GetVoters()) > r.cluster.GetMaxReplicas() && r.cluster.IsRemoveExtraReplicaEnabled() {
log.Debugf("[region %d] has %d peers more than max replicas", region.GetID(), len(region.GetPeers()))
log.Debug("region has more than max replicas", zap.Uint64("region-id", region.GetID()), zap.Int("peers", len(region.GetPeers())))
oldPeer, _ := r.selectWorstPeer(region)
if oldPeer == nil {
checkerCounter.WithLabelValues("replica_checker", "no_worst_peer").Inc()
Expand Down Expand Up @@ -111,7 +112,7 @@ func (r *ReplicaChecker) SelectBestReplacementStore(region *core.RegionInfo, old
func (r *ReplicaChecker) selectBestPeerToAddReplica(region *core.RegionInfo, filters ...Filter) (*metapb.Peer, float64) {
storeID, score := r.selectBestStoreToAddReplica(region, filters...)
if storeID == 0 {
log.Debugf("[region %d] no best store to add replica", region.GetID())
log.Debug("no best store to add replica", zap.Uint64("region-id", region.GetID()))
return nil, 0
}
newPeer, err := r.cluster.AllocPeer(storeID)
Expand Down Expand Up @@ -149,7 +150,7 @@ func (r *ReplicaChecker) selectWorstPeer(region *core.RegionInfo) (*metapb.Peer,
selector := NewReplicaSelector(regionStores, r.cluster.GetLocationLabels(), r.filters...)
worstStore := selector.SelectSource(r.cluster, regionStores)
if worstStore == nil {
log.Debugf("[region %d] no worst store", region.GetID())
log.Debug("no worst store", zap.Uint64("region-id", region.GetID()))
return nil, 0
}
return region.GetStorePeer(worstStore.GetId()), DistinctScore(r.cluster.GetLocationLabels(), regionStores, worstStore)
Expand All @@ -167,7 +168,7 @@ func (r *ReplicaChecker) checkDownPeer(region *core.RegionInfo) *Operator {
}
store := r.cluster.GetStore(peer.GetStoreId())
if store == nil {
log.Infof("lost the store %d, maybe you are recovering the PD cluster.", peer.GetStoreId())
log.Info("lost the store, maybe you are recovering the PD cluster", zap.Uint64("store-id", peer.GetStoreId()))
return nil
}
if store.DownTime() < r.cluster.GetMaxStoreDownTime() {
Expand Down Expand Up @@ -195,7 +196,7 @@ func (r *ReplicaChecker) checkOfflinePeer(region *core.RegionInfo) *Operator {
for _, peer := range region.GetPeers() {
store := r.cluster.GetStore(peer.GetStoreId())
if store == nil {
log.Infof("lost the store %d, maybe you are recovering the PD cluster.", peer.GetStoreId())
log.Info("lost the store, maybe you are recovering the PD cluster", zap.Uint64("store-id", peer.GetStoreId()))
return nil
}
if store.IsUp() {
Expand Down Expand Up @@ -225,7 +226,7 @@ func (r *ReplicaChecker) checkBestReplacement(region *core.RegionInfo) *Operator
}
// Make sure the new peer is better than the old peer.
if newScore <= oldScore {
log.Debugf("[region %d] newScore %f is not better than oldScore %f", region.GetID(), newScore, oldScore)
log.Debug("no better peer", zap.Uint64("region-id", region.GetID()), zap.Float64("new-score", newScore), zap.Float64("old-score", oldScore))
checkerCounter.WithLabelValues("replica_checker", "not_better").Inc()
return nil
}
Expand Down Expand Up @@ -270,7 +271,7 @@ func (r *ReplicaChecker) fixPeer(region *core.RegionInfo, peer *metapb.Peer, sta

storeID, _ := r.SelectBestReplacementStore(region, peer, NewStorageThresholdFilter())
if storeID == 0 {
log.Debugf("[region %d] no best store to add replica", region.GetID())
log.Debug("no best store to add replica", zap.Uint64("region-id", region.GetID()))
return nil
}
newPeer, err := r.cluster.AllocPeer(storeID)
Expand Down
5 changes: 3 additions & 2 deletions server/schedule/scheduler.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,10 @@ import (
"time"

"github.com/pingcap/kvproto/pkg/metapb"
log "github.com/pingcap/log"
"github.com/pingcap/pd/server/core"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"go.uber.org/zap"
)

// Cluster provides an overview of a cluster's regions distribution.
Expand Down Expand Up @@ -77,7 +78,7 @@ var schedulerMap = make(map[string]CreateSchedulerFunc)
// func of a package.
func RegisterScheduler(name string, createFn CreateSchedulerFunc) {
if _, ok := schedulerMap[name]; ok {
log.Fatalf("duplicated scheduler name: %v", name)
log.Fatal("duplicated scheduler", zap.String("name", name))
}
schedulerMap[name] = createFn
}
Expand Down
5 changes: 3 additions & 2 deletions server/schedulers/adjacent_region.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,11 @@ import (
"strconv"
"time"

log "github.com/pingcap/log"
"github.com/pingcap/pd/server/core"
"github.com/pingcap/pd/server/schedule"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"go.uber.org/zap"
)

const (
Expand Down Expand Up @@ -194,7 +195,7 @@ func (l *balanceAdjacentRegionScheduler) process(cluster schedule.Cluster) []*sc

defer func() {
if l.cacheRegions.len() < 0 {
log.Fatalf("[%s]the cache overflow should never happen", l.GetName())
log.Fatal("cache overflow", zap.String("scheduler", l.GetName()))
}
l.cacheRegions.head = head + 1
l.lastKey = r2.GetStartKey()
Expand Down
Loading

0 comments on commit 47de2cd

Please sign in to comment.