From 41f5261b99137a0905afc751f90b093b9fe684ae Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Fri, 15 Feb 2019 19:11:23 +0800 Subject: [PATCH] *: log format for schedule, scheduler, syncer, api (#1423) * log format for schedule, scheduler, syncer, api Signed-off-by: rleungx --- server/api/member.go | 7 +++--- server/api/redirector.go | 12 +++++----- server/api/util.go | 4 ++-- server/region_syncer/client.go | 18 +++++++++------ server/region_syncer/history_buffer.go | 11 +++++----- server/region_syncer/server.go | 28 +++++++++++++++--------- server/schedule/filters.go | 3 --- server/schedule/merge_checker.go | 5 +++-- server/schedule/mockcluster.go | 5 +++-- server/schedule/namespace_checker.go | 7 +++--- server/schedule/operator.go | 12 +++++----- server/schedule/operator_controller.go | 21 +++++++++--------- server/schedule/replica_checker.go | 19 ++++++++-------- server/schedule/scheduler.go | 5 +++-- server/schedulers/adjacent_region.go | 5 +++-- server/schedulers/balance_leader.go | 29 ++++++++++++++----------- server/schedulers/balance_region.go | 29 +++++++++++++------------ server/schedulers/base_scheduler.go | 4 ++-- server/schedulers/hot_region.go | 5 +++-- server/schedulers/label.go | 9 ++++---- server/schedulers/scheduler_test.go | 2 -- server/schedulers/shuffle_hot_region.go | 5 +++-- server/schedulers/shuffle_region.go | 5 +++-- 23 files changed, 138 insertions(+), 112 deletions(-) diff --git a/server/api/member.go b/server/api/member.go index 139caa75870..75eea86edb8 100644 --- a/server/api/member.go +++ b/server/api/member.go @@ -21,11 +21,12 @@ import ( "github.com/gorilla/mux" "github.com/pingcap/kvproto/pkg/pdpb" + log "github.com/pingcap/log" "github.com/pingcap/pd/pkg/etcdutil" "github.com/pingcap/pd/server" "github.com/pkg/errors" - log "github.com/sirupsen/logrus" "github.com/unrolled/render" + "go.uber.org/zap" ) type memberHandler struct { @@ -58,12 +59,12 @@ func (h *memberHandler) getMembers() (*pdpb.GetMembersResponse, error) { // Fill leader priorities. for _, m := range members.GetMembers() { if h.svr.GetEtcdLeader() == 0 { - log.Warnf("no etcd leader, skip get leader priority, member: %v", m.GetMemberId()) + log.Warn("no etcd leader, skip get leader priority", zap.Uint64("member", m.GetMemberId())) continue } leaderPriority, e := h.svr.GetMemberLeaderPriority(m.GetMemberId()) if e != nil { - log.Errorf("failed to load leader priority, member: %v, err: %v", m.GetMemberId(), e) + log.Error("failed to load leader priority", zap.Uint64("member", m.GetMemberId()), zap.Error(err)) continue } m.LeaderPriority = int32(leaderPriority) diff --git a/server/api/redirector.go b/server/api/redirector.go index 989fcb7ab73..58ddbc90ea3 100644 --- a/server/api/redirector.go +++ b/server/api/redirector.go @@ -14,13 +14,15 @@ package api import ( + "fmt" "io/ioutil" "net/http" "net/url" "strings" + log "github.com/pingcap/log" "github.com/pingcap/pd/server" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) const ( @@ -48,7 +50,7 @@ func (h *redirector) ServeHTTP(w http.ResponseWriter, r *http.Request, next http // Prevent more than one redirection. if name := r.Header.Get(redirectorHeader); len(name) != 0 { - log.Errorf("redirect from %v, but %v is not leader", name, h.s.Name()) + log.Error("redirect but server is not leader", zap.String("from", name), zap.String("server", h.s.Name())) http.Error(w, errRedirectToNotLeader, http.StatusInternalServerError) return } @@ -93,21 +95,21 @@ func (p *customReverseProxies) ServeHTTP(w http.ResponseWriter, r *http.Request) resp, err := p.client.Do(r) if err != nil { - log.Error(err) + log.Error(fmt.Sprintf("%+v", err)) continue } b, err := ioutil.ReadAll(resp.Body) resp.Body.Close() if err != nil { - log.Error(err) + log.Error(fmt.Sprintf("%+v", err)) continue } copyHeader(w.Header(), resp.Header) w.WriteHeader(resp.StatusCode) if _, err := w.Write(b); err != nil { - log.Error(err) + log.Error(fmt.Sprintf("%+v", err)) continue } diff --git a/server/api/util.go b/server/api/util.go index d8b5ae5fb6a..565288ea565 100644 --- a/server/api/util.go +++ b/server/api/util.go @@ -21,10 +21,10 @@ import ( "net/http" "github.com/pingcap/errcode" + log "github.com/pingcap/log" "github.com/pingcap/pd/pkg/apiutil" "github.com/pingcap/pd/server" "github.com/pkg/errors" - log "github.com/sirupsen/logrus" "github.com/unrolled/render" ) @@ -36,7 +36,7 @@ import ( // If the error is nil, this also responds with a 500 and logs at the error level. func errorResp(rd *render.Render, w http.ResponseWriter, err error) { if err == nil { - log.Errorf("nil given to errorResp") + log.Error("nil is given to errorResp") rd.JSON(w, http.StatusInternalServerError, "nil error") return } diff --git a/server/region_syncer/client.go b/server/region_syncer/client.go index 2de1b6c3496..3a794c16a95 100644 --- a/server/region_syncer/client.go +++ b/server/region_syncer/client.go @@ -19,8 +19,9 @@ import ( "time" "github.com/pingcap/kvproto/pkg/pdpb" + log "github.com/pingcap/log" "github.com/pingcap/pd/server/core" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -103,24 +104,27 @@ func (s *RegionSyncer) StartSyncWithLeader(addr string) { return } } - log.Errorf("%s failed to establish sync stream with leader %s: %s", s.server.Name(), s.server.GetLeader().GetName(), err) + log.Error("server failed to establish sync stream with leader", zap.String("server", s.server.Name()), zap.String("leader", s.server.GetLeader().GetName()), zap.Error(err)) time.Sleep(time.Second) continue } - log.Infof("%s start sync with leader %s, the request index is %d", s.server.Name(), s.server.GetLeader().GetName(), s.history.GetNextIndex()) + log.Info("server starts to synchronize with leader", zap.String("server", s.server.Name()), zap.String("leader", s.server.GetLeader().GetName()), zap.Uint64("request-index", s.history.GetNextIndex())) for { resp, err := client.Recv() if err != nil { - log.Error("region sync with leader meet error:", err) + log.Error("region sync with leader meet error", zap.Error(err)) if err = client.CloseSend(); err != nil { - log.Errorf("Failed to terminate client stream: %v", err) + log.Error("failed to terminate client stream", zap.Error(err)) } time.Sleep(time.Second) break } if s.history.GetNextIndex() != resp.GetStartIndex() { - log.Warnf("%s sync index not match the leader, own: %d, leader: %d, records length: %d", - s.server.Name(), s.history.GetNextIndex(), resp.GetStartIndex(), len(resp.GetRegions())) + log.Warn("server sync index not match the leader", + zap.String("server", s.server.Name()), + zap.Uint64("own", s.history.GetNextIndex()), + zap.Uint64("leader", resp.GetStartIndex()), + zap.Int("records-length", len(resp.GetRegions()))) // reset index s.history.ResetWithIndex(resp.GetStartIndex()) } diff --git a/server/region_syncer/history_buffer.go b/server/region_syncer/history_buffer.go index c2c83903f10..adbb075ed08 100644 --- a/server/region_syncer/history_buffer.go +++ b/server/region_syncer/history_buffer.go @@ -17,8 +17,9 @@ import ( "strconv" "sync" + log "github.com/pingcap/log" "github.com/pingcap/pd/server/core" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) const ( @@ -133,15 +134,15 @@ func (h *historyBuffer) get(index uint64) *core.RegionInfo { func (h *historyBuffer) reload() { v, err := h.kv.Load(historyKey) if err != nil { - log.Warnf("load history index failed: %s", err) + log.Warn("load history index failed", zap.Error(err)) } if v != "" { h.index, err = strconv.ParseUint(v, 10, 64) if err != nil { - log.Fatalf("load history index failed: %s", err) + log.Fatal("load history index failed", zap.Error(err)) } } - log.Info("history index start at: ", h.firstIndex()) + log.Info("start from history index", zap.Uint64("start-index", h.firstIndex())) } func (h *historyBuffer) persist() { @@ -149,6 +150,6 @@ func (h *historyBuffer) persist() { regionSyncerStatus.WithLabelValues("last_index").Set(float64(h.nextIndex())) err := h.kv.Save(historyKey, strconv.FormatUint(h.nextIndex(), 10)) if err != nil { - log.Warnf("persist history index (%d) failed: %v", h.nextIndex(), err) + log.Warn("persist history index failed", zap.Uint64("persist-index", h.nextIndex()), zap.Error(err)) } } diff --git a/server/region_syncer/server.go b/server/region_syncer/server.go index f071c21104f..0e9562024fe 100644 --- a/server/region_syncer/server.go +++ b/server/region_syncer/server.go @@ -22,9 +22,10 @@ import ( "github.com/juju/ratelimit" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + log "github.com/pingcap/log" "github.com/pingcap/pd/server/core" "github.com/pkg/errors" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) @@ -140,7 +141,9 @@ func (s *RegionSyncer) Sync(stream pdpb.PD_SyncRegionsServer) error { if clusterID != s.server.ClusterID() { return status.Errorf(codes.FailedPrecondition, "mismatch cluster id, need %d but got %d", s.server.ClusterID(), clusterID) } - log.Infof("establish sync region stream with %s [%s]", request.GetMember().GetName(), request.GetMember().GetClientUrls()[0]) + log.Info("establish sync region stream", + zap.String("requested-server", request.GetMember().GetName()), + zap.String("url", request.GetMember().GetClientUrls()[0])) err = s.syncHistoryRegion(request, stream) if err != nil { @@ -156,7 +159,8 @@ func (s *RegionSyncer) syncHistoryRegion(request *pdpb.SyncRegionRequest, stream records := s.history.RecordsFrom(startIndex) if len(records) == 0 { if s.history.GetNextIndex() == startIndex { - log.Infof("%s already in sync with %s, the last index is %d", name, s.server.Name(), startIndex) + log.Info("requested server has already in sync with server", + zap.String("requested-server", name), zap.String("server", s.server.Name()), zap.Uint64("last-index", startIndex)) return nil } // do full synchronization @@ -178,18 +182,22 @@ func (s *RegionSyncer) syncHistoryRegion(request *pdpb.SyncRegionRequest, stream s.limit.Wait(int64(resp.Size())) lastIndex += len(res) if err := stream.Send(resp); err != nil { - log.Errorf("failed to send sync region response, error: %v", err) + log.Error("failed to send sync region response", zap.Error(err)) } res = res[:0] } - log.Infof("%s has completed full synchronization with %s, spend %v", name, s.server.Name(), time.Since(start)) + log.Info("requested server has completed full synchronization with server", + zap.String("requested-server", name), zap.String("server", s.server.Name()), zap.Duration("cost", time.Since(start))) return nil } - log.Warnf("no history regions from index %d, the leader maybe restarted", startIndex) + log.Warn("no history regions from index, the leader may be restarted", zap.Uint64("index", startIndex)) return nil } - log.Infof("sync the history regions with %s from index: %d, own last index: %d, got records length: %d", - name, startIndex, s.history.GetNextIndex(), len(records)) + log.Info("sync the history regions with server", + zap.String("server", name), + zap.Uint64("from-index", startIndex), + zap.Uint64("last-index", s.history.GetNextIndex()), + zap.Int("records-length", len(records))) regions := make([]*metapb.Region, len(records)) for i, r := range records { regions[i] = r.GetMeta() @@ -215,7 +223,7 @@ func (s *RegionSyncer) broadcast(regions *pdpb.SyncRegionResponse) { for name, sender := range s.streams { err := sender.Send(regions) if err != nil { - log.Error("region syncer send data meet error:", err) + log.Error("region syncer send data meet error", zap.Error(err)) failed = append(failed, name) } } @@ -224,7 +232,7 @@ func (s *RegionSyncer) broadcast(regions *pdpb.SyncRegionResponse) { s.Lock() for _, name := range failed { delete(s.streams, name) - log.Infof("region syncer delete the stream of %s", name) + log.Info("region syncer delete the stream", zap.String("stream", name)) } s.Unlock() } diff --git a/server/schedule/filters.go b/server/schedule/filters.go index 5777a17950f..eb25db0dc23 100644 --- a/server/schedule/filters.go +++ b/server/schedule/filters.go @@ -19,7 +19,6 @@ import ( "github.com/pingcap/pd/server/cache" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/server/namespace" - log "github.com/sirupsen/logrus" ) //revive:disable:unused-parameter @@ -38,7 +37,6 @@ func FilterSource(opt Options, store *core.StoreInfo, filters []Filter) bool { storeID := fmt.Sprintf("store%d", store.GetID()) for _, filter := range filters { if filter.FilterSource(opt, store) { - log.Debugf("[filter %T] filters store %v from source", filter, store) filterCounter.WithLabelValues("filter-source", storeID, filter.Type()).Inc() return true } @@ -51,7 +49,6 @@ func FilterTarget(opt Options, store *core.StoreInfo, filters []Filter) bool { storeID := fmt.Sprintf("store%d", store.GetID()) for _, filter := range filters { if filter.FilterTarget(opt, store) { - log.Debugf("[filter %T] filters store %v from target", filter, store) filterCounter.WithLabelValues("filter-target", storeID, filter.Type()).Inc() return true } diff --git a/server/schedule/merge_checker.go b/server/schedule/merge_checker.go index 1ee325a9bde..ffa6abeaf4f 100644 --- a/server/schedule/merge_checker.go +++ b/server/schedule/merge_checker.go @@ -16,10 +16,11 @@ package schedule import ( "time" + log "github.com/pingcap/log" "github.com/pingcap/pd/server/cache" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/server/namespace" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) // As region split history is not persisted. We put a special marker into @@ -109,7 +110,7 @@ func (m *MergeChecker) Check(region *core.RegionInfo) []*Operator { } checkerCounter.WithLabelValues("merge_checker", "new_operator").Inc() - log.Debugf("try to merge region %v into region %v", core.HexRegionMeta(region.GetMeta()), core.HexRegionMeta(target.GetMeta())) + log.Debug("try to merge region", zap.Reflect("from", core.HexRegionMeta(region.GetMeta())), zap.Reflect("to", core.HexRegionMeta(target.GetMeta()))) ops, err := CreateMergeRegionOperator("merge-region", m.cluster, region, target, OpMerge) if err != nil { return nil diff --git a/server/schedule/mockcluster.go b/server/schedule/mockcluster.go index 7b4d256253f..dc0c5e50d28 100644 --- a/server/schedule/mockcluster.go +++ b/server/schedule/mockcluster.go @@ -21,9 +21,10 @@ import ( "github.com/gogo/protobuf/proto" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + log "github.com/pingcap/log" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/server/namespace" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) // MockCluster is used to mock clusterInfo for test use @@ -77,7 +78,7 @@ func (mc *MockCluster) RandHotRegionFromStore(store uint64, kind FlowKind) *core func (mc *MockCluster) AllocPeer(storeID uint64) (*metapb.Peer, error) { peerID, err := mc.allocID() if err != nil { - log.Errorf("failed to alloc peer: %v", err) + log.Error("failed to alloc peer", zap.Error(err)) return nil, err } peer := &metapb.Peer{ diff --git a/server/schedule/namespace_checker.go b/server/schedule/namespace_checker.go index 3b56da05f1d..269324c7f85 100644 --- a/server/schedule/namespace_checker.go +++ b/server/schedule/namespace_checker.go @@ -15,9 +15,10 @@ package schedule import ( "github.com/pingcap/kvproto/pkg/metapb" + log "github.com/pingcap/log" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/server/namespace" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) // NamespaceChecker ensures region to go to the right place. @@ -63,7 +64,7 @@ func (n *NamespaceChecker) Check(region *core.RegionInfo) *Operator { if n.isExists(targetStores, peer.StoreId) { continue } - log.Debugf("[region %d] peer %v is not located in namespace target stores", region.GetID(), peer) + log.Debug("peer is not located in namespace target stores", zap.Uint64("region-id", region.GetID()), zap.Reflect("peer", peer)) newPeer := n.SelectBestPeerToRelocate(region, targetStores) if newPeer == nil { checkerCounter.WithLabelValues("namespace_checker", "no_target_peer").Inc() @@ -81,7 +82,7 @@ func (n *NamespaceChecker) Check(region *core.RegionInfo) *Operator { func (n *NamespaceChecker) SelectBestPeerToRelocate(region *core.RegionInfo, targets []*core.StoreInfo) *metapb.Peer { storeID := n.SelectBestStoreToRelocate(region, targets) if storeID == 0 { - log.Debugf("[region %d] has no best store to relocate", region.GetID()) + log.Debug("has no best store to relocate", zap.Uint64("region-id", region.GetID())) return nil } newPeer, err := n.cluster.AllocPeer(storeID) diff --git a/server/schedule/operator.go b/server/schedule/operator.go index e627c0e0fe6..fdff046fb33 100644 --- a/server/schedule/operator.go +++ b/server/schedule/operator.go @@ -23,9 +23,9 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" - log "github.com/sirupsen/logrus" - + log "github.com/pingcap/log" "github.com/pingcap/pd/server/core" + "go.uber.org/zap" ) const ( @@ -82,7 +82,7 @@ func (ap AddPeer) String() string { func (ap AddPeer) IsFinish(region *core.RegionInfo) bool { if p := region.GetStoreVoter(ap.ToStore); p != nil { if p.GetId() != ap.PeerID { - log.Warnf("expect %v, but obtain voter %v", ap.String(), p.GetId()) + log.Warn("obtain unexpected peer", zap.String("expect", ap.String()), zap.Uint64("obtain-voter", p.GetId())) return false } return region.GetPendingVoter(p.GetId()) == nil @@ -111,7 +111,7 @@ func (al AddLearner) String() string { func (al AddLearner) IsFinish(region *core.RegionInfo) bool { if p := region.GetStoreLearner(al.ToStore); p != nil { if p.GetId() != al.PeerID { - log.Warnf("expect %v, but obtain learner %v", al.String(), p.GetId()) + log.Warn("obtain unexpected peer", zap.String("expect", al.String()), zap.Uint64("obtain-learner", p.GetId())) return false } return region.GetPendingLearner(p.GetId()) == nil @@ -140,7 +140,7 @@ func (pl PromoteLearner) String() string { func (pl PromoteLearner) IsFinish(region *core.RegionInfo) bool { if p := region.GetStoreVoter(pl.ToStore); p != nil { if p.GetId() != pl.PeerID { - log.Warnf("expect %v, but obtain voter %v", pl.String(), p.GetId()) + log.Warn("obtain unexpected peer", zap.String("expect", pl.String()), zap.Uint64("obtain-voter", p.GetId())) } return p.GetId() == pl.PeerID } @@ -522,7 +522,7 @@ func matchPeerSteps(cluster Cluster, source *core.RegionInfo, target *core.Regio peer, err := cluster.AllocPeer(storeID) if err != nil { - log.Debugf("peer alloc failed: %v", err) + log.Debug("peer alloc failed", zap.Error(err)) return nil, kind, err } if cluster.IsRaftLearnerEnabled() { diff --git a/server/schedule/operator_controller.go b/server/schedule/operator_controller.go index 7f30f32d0e4..bd9988c3d06 100644 --- a/server/schedule/operator_controller.go +++ b/server/schedule/operator_controller.go @@ -21,8 +21,9 @@ import ( "github.com/pingcap/kvproto/pkg/eraftpb" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + log "github.com/pingcap/log" "github.com/pingcap/pd/server/core" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) var historyKeepTime = 5 * time.Minute @@ -64,13 +65,13 @@ func (oc *OperatorController) Dispatch(region *core.RegionInfo) { return } if op.IsFinish() { - log.Infof("[region %v] operator finish: %s", region.GetID(), op) + log.Info("operator finish", zap.Uint64("region-id", region.GetID()), zap.Reflect("operator", op)) operatorCounter.WithLabelValues(op.Desc(), "finish").Inc() operatorDuration.WithLabelValues(op.Desc()).Observe(op.ElapsedTime().Seconds()) oc.pushHistory(op) oc.RemoveOperator(op) } else if timeout { - log.Infof("[region %v] operator timeout: %s", region.GetID(), op) + log.Info("operator timeout", zap.Uint64("region-id", region.GetID()), zap.Reflect("operator", op)) oc.RemoveOperator(op) } } @@ -102,15 +103,15 @@ func (oc *OperatorController) AddOperator(ops ...*Operator) bool { func (oc *OperatorController) checkAddOperator(op *Operator) bool { region := oc.cluster.GetRegion(op.RegionID()) if region == nil { - log.Debugf("[region %v] region not found, cancel add operator", op.RegionID()) + log.Debug("region not found, cancel add operator", zap.Uint64("region-id", op.RegionID())) return false } if region.GetRegionEpoch().GetVersion() != op.RegionEpoch().GetVersion() || region.GetRegionEpoch().GetConfVer() != op.RegionEpoch().GetConfVer() { - log.Debugf("[region %v] region epoch not match, %v vs %v, cancel add operator", op.RegionID(), region.GetRegionEpoch(), op.RegionEpoch()) + log.Debug("region epoch not match, cancel add operator", zap.Uint64("region-id", op.RegionID()), zap.Reflect("old", region.GetRegionEpoch()), zap.Reflect("new", op.RegionEpoch())) return false } if old := oc.operators[op.RegionID()]; old != nil && !isHigherPriorityOperator(op, old) { - log.Debugf("[region %v] already have operator %s, cancel add operator", op.RegionID(), old) + log.Debug("already have operator, cancel add operator", zap.Uint64("region-id", op.RegionID()), zap.Reflect("old", old)) return false } return true @@ -123,12 +124,12 @@ func isHigherPriorityOperator(new, old *Operator) bool { func (oc *OperatorController) addOperatorLocked(op *Operator) bool { regionID := op.RegionID() - log.Infof("[region %v] add operator: %s", regionID, op) + log.Info("add operator", zap.Uint64("region-id", regionID), zap.Reflect("operator", op)) // If there is an old operator, replace it. The priority should be checked // already. if old, ok := oc.operators[regionID]; ok { - log.Infof("[region %v] replace old operator: %s", regionID, old) + log.Info("replace old operator", zap.Uint64("region-id", regionID), zap.Reflect("operator", old)) operatorCounter.WithLabelValues(old.Desc(), "replaced").Inc() oc.removeOperatorLocked(old) } @@ -182,7 +183,7 @@ func (oc *OperatorController) GetOperators() []*Operator { // SendScheduleCommand sends a command to the region. func (oc *OperatorController) SendScheduleCommand(region *core.RegionInfo, step OperatorStep) { - log.Infof("[region %v] send schedule command: %s", region.GetID(), step) + log.Info("send schedule command", zap.Uint64("region-id", region.GetID()), zap.Reflect("step", step)) switch st := step.(type) { case TransferLeader: cmd := &pdpb.RegionHeartbeatResponse{ @@ -260,7 +261,7 @@ func (oc *OperatorController) SendScheduleCommand(region *core.RegionInfo, step } oc.hbStreams.SendMsg(region, cmd) default: - log.Errorf("unknown operatorStep: %v", step) + log.Error("unknown operator step", zap.Reflect("step", step)) } } diff --git a/server/schedule/replica_checker.go b/server/schedule/replica_checker.go index 13e70c0a9f0..73fd28e8952 100644 --- a/server/schedule/replica_checker.go +++ b/server/schedule/replica_checker.go @@ -17,9 +17,10 @@ import ( "fmt" "github.com/pingcap/kvproto/pkg/metapb" + log "github.com/pingcap/log" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/server/namespace" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) // ReplicaChecker ensures region has the best replicas. @@ -62,7 +63,7 @@ func (r *ReplicaChecker) Check(region *core.RegionInfo) *Operator { } if len(region.GetPeers()) < r.cluster.GetMaxReplicas() && r.cluster.IsMakeUpReplicaEnabled() { - log.Debugf("[region %d] has %d peers fewer than max replicas", region.GetID(), len(region.GetPeers())) + log.Debug("region has fewer than max replicas", zap.Uint64("region-id", region.GetID()), zap.Int("peers", len(region.GetPeers()))) newPeer, _ := r.selectBestPeerToAddReplica(region, NewStorageThresholdFilter()) if newPeer == nil { checkerCounter.WithLabelValues("replica_checker", "no_target_store").Inc() @@ -86,7 +87,7 @@ func (r *ReplicaChecker) Check(region *core.RegionInfo) *Operator { // when add learner peer, the number of peer will exceed max replicas for a while, // just comparing the the number of voters to avoid too many cancel add operator log. if len(region.GetVoters()) > r.cluster.GetMaxReplicas() && r.cluster.IsRemoveExtraReplicaEnabled() { - log.Debugf("[region %d] has %d peers more than max replicas", region.GetID(), len(region.GetPeers())) + log.Debug("region has more than max replicas", zap.Uint64("region-id", region.GetID()), zap.Int("peers", len(region.GetPeers()))) oldPeer, _ := r.selectWorstPeer(region) if oldPeer == nil { checkerCounter.WithLabelValues("replica_checker", "no_worst_peer").Inc() @@ -110,7 +111,7 @@ func (r *ReplicaChecker) SelectBestReplacementStore(region *core.RegionInfo, old func (r *ReplicaChecker) selectBestPeerToAddReplica(region *core.RegionInfo, filters ...Filter) (*metapb.Peer, float64) { storeID, score := r.selectBestStoreToAddReplica(region, filters...) if storeID == 0 { - log.Debugf("[region %d] no best store to add replica", region.GetID()) + log.Debug("no best store to add replica", zap.Uint64("region-id", region.GetID())) return nil, 0 } newPeer, err := r.cluster.AllocPeer(storeID) @@ -148,7 +149,7 @@ func (r *ReplicaChecker) selectWorstPeer(region *core.RegionInfo) (*metapb.Peer, selector := NewReplicaSelector(regionStores, r.cluster.GetLocationLabels(), r.filters...) worstStore := selector.SelectSource(r.cluster, regionStores) if worstStore == nil { - log.Debugf("[region %d] no worst store", region.GetID()) + log.Debug("no worst store", zap.Uint64("region-id", region.GetID())) return nil, 0 } return region.GetStorePeer(worstStore.GetID()), DistinctScore(r.cluster.GetLocationLabels(), regionStores, worstStore) @@ -166,7 +167,7 @@ func (r *ReplicaChecker) checkDownPeer(region *core.RegionInfo) *Operator { } store := r.cluster.GetStore(peer.GetStoreId()) if store == nil { - log.Infof("lost the store %d, maybe you are recovering the PD cluster.", peer.GetStoreId()) + log.Info("lost the store, maybe you are recovering the PD cluster", zap.Uint64("store-id", peer.GetStoreId())) return nil } if store.DownTime() < r.cluster.GetMaxStoreDownTime() { @@ -194,7 +195,7 @@ func (r *ReplicaChecker) checkOfflinePeer(region *core.RegionInfo) *Operator { for _, peer := range region.GetPeers() { store := r.cluster.GetStore(peer.GetStoreId()) if store == nil { - log.Infof("lost the store %d, maybe you are recovering the PD cluster.", peer.GetStoreId()) + log.Info("lost the store, maybe you are recovering the PD cluster", zap.Uint64("store-id", peer.GetStoreId())) return nil } if store.IsUp() { @@ -224,7 +225,7 @@ func (r *ReplicaChecker) checkBestReplacement(region *core.RegionInfo) *Operator } // Make sure the new peer is better than the old peer. if newScore <= oldScore { - log.Debugf("[region %d] newScore %f is not better than oldScore %f", region.GetID(), newScore, oldScore) + log.Debug("no better peer", zap.Uint64("region-id", region.GetID()), zap.Float64("new-score", newScore), zap.Float64("old-score", oldScore)) checkerCounter.WithLabelValues("replica_checker", "not_better").Inc() return nil } @@ -254,7 +255,7 @@ func (r *ReplicaChecker) fixPeer(region *core.RegionInfo, peer *metapb.Peer, sta storeID, _ := r.SelectBestReplacementStore(region, peer, NewStorageThresholdFilter()) if storeID == 0 { - log.Debugf("[region %d] no best store to add replica", region.GetID()) + log.Debug("no best store to add replica", zap.Uint64("region-id", region.GetID())) return nil } newPeer, err := r.cluster.AllocPeer(storeID) diff --git a/server/schedule/scheduler.go b/server/schedule/scheduler.go index 6ec7db2a80f..9c587fdf2c3 100644 --- a/server/schedule/scheduler.go +++ b/server/schedule/scheduler.go @@ -17,9 +17,10 @@ import ( "time" "github.com/pingcap/kvproto/pkg/metapb" + log "github.com/pingcap/log" "github.com/pingcap/pd/server/core" "github.com/pkg/errors" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) // Cluster provides an overview of a cluster's regions distribution. @@ -76,7 +77,7 @@ var schedulerMap = make(map[string]CreateSchedulerFunc) // func of a package. func RegisterScheduler(name string, createFn CreateSchedulerFunc) { if _, ok := schedulerMap[name]; ok { - log.Fatalf("duplicated scheduler name: %v", name) + log.Fatal("duplicated scheduler", zap.String("name", name)) } schedulerMap[name] = createFn } diff --git a/server/schedulers/adjacent_region.go b/server/schedulers/adjacent_region.go index b62852a0e5a..d42837f1187 100644 --- a/server/schedulers/adjacent_region.go +++ b/server/schedulers/adjacent_region.go @@ -18,10 +18,11 @@ import ( "strconv" "time" + log "github.com/pingcap/log" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/server/schedule" "github.com/pkg/errors" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) const ( @@ -204,7 +205,7 @@ func (l *balanceAdjacentRegionScheduler) process(cluster schedule.Cluster) []*sc defer func() { if l.cacheRegions.len() < 0 { - log.Fatalf("[%s]the cache overflow should never happen", l.GetName()) + log.Fatal("cache overflow", zap.String("scheduler", l.GetName())) } l.cacheRegions.head = head + 1 l.lastKey = r2.GetStartKey() diff --git a/server/schedulers/balance_leader.go b/server/schedulers/balance_leader.go index 1ae517b9150..ca059a96faa 100644 --- a/server/schedulers/balance_leader.go +++ b/server/schedulers/balance_leader.go @@ -17,10 +17,11 @@ import ( "fmt" "strconv" + log "github.com/pingcap/log" "github.com/pingcap/pd/server/cache" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/server/schedule" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) func init() { @@ -90,7 +91,7 @@ func (l *balanceLeaderScheduler) Schedule(cluster schedule.Cluster) []*schedule. return nil } - log.Debugf("[%s] store%d has the max leader score, store%d has the min leader score", l.GetName(), source.GetID(), target.GetID()) + log.Debug("store leader score", zap.String("scheduler", l.GetName()), zap.Uint64("max-store", source.GetID()), zap.Uint64("min-store", target.GetID())) sourceStoreLabel := strconv.FormatUint(source.GetID(), 10) targetStoreLabel := strconv.FormatUint(target.GetID(), 10) balanceLeaderCounter.WithLabelValues("high_score", sourceStoreLabel).Inc() @@ -109,7 +110,7 @@ func (l *balanceLeaderScheduler) Schedule(cluster schedule.Cluster) []*schedule. } // If no operator can be created for the selected stores, ignore them for a while. - log.Debugf("[%s] no operator created for selected store%d and store%d", l.GetName(), source.GetID(), target.GetID()) + log.Debug("no operator created for selected stores", zap.String("scheduler", l.GetName()), zap.Uint64("source", source.GetID()), zap.Uint64("target", target.GetID())) balanceLeaderCounter.WithLabelValues("add_taint", strconv.FormatUint(source.GetID(), 10)).Inc() l.taintStores.Put(source.GetID()) balanceLeaderCounter.WithLabelValues("add_taint", strconv.FormatUint(target.GetID(), 10)).Inc() @@ -123,13 +124,13 @@ func (l *balanceLeaderScheduler) Schedule(cluster schedule.Cluster) []*schedule. func (l *balanceLeaderScheduler) transferLeaderOut(source *core.StoreInfo, cluster schedule.Cluster, opInfluence schedule.OpInfluence) []*schedule.Operator { region := cluster.RandLeaderRegion(source.GetID(), core.HealthRegion()) if region == nil { - log.Debugf("[%s] store%d has no leader", l.GetName(), source.GetID()) + log.Debug("store has no leader", zap.String("scheduler", l.GetName()), zap.Uint64("store-id", source.GetID())) schedulerCounter.WithLabelValues(l.GetName(), "no_leader_region").Inc() return nil } target := l.selector.SelectTarget(cluster, cluster.GetFollowerStores(region)) if target == nil { - log.Debugf("[%s] region %d has no target store", l.GetName(), region.GetID()) + log.Debug("region has no target store", zap.String("scheduler", l.GetName()), zap.Uint64("region-id", region.GetID())) schedulerCounter.WithLabelValues(l.GetName(), "no_target_store").Inc() return nil } @@ -142,13 +143,13 @@ func (l *balanceLeaderScheduler) transferLeaderOut(source *core.StoreInfo, clust func (l *balanceLeaderScheduler) transferLeaderIn(target *core.StoreInfo, cluster schedule.Cluster, opInfluence schedule.OpInfluence) []*schedule.Operator { region := cluster.RandFollowerRegion(target.GetID(), core.HealthRegion()) if region == nil { - log.Debugf("[%s] store%d has no follower", l.GetName(), target.GetID()) + log.Debug("store has no follower", zap.String("scheduler", l.GetName()), zap.Uint64("store-id", target.GetID())) schedulerCounter.WithLabelValues(l.GetName(), "no_follower_region").Inc() return nil } source := cluster.GetStore(region.GetLeader().GetStoreId()) if source == nil { - log.Debugf("[%s] region %d has no leader", l.GetName(), region.GetID()) + log.Debug("region has no leader", zap.String("scheduler", l.GetName()), zap.Uint64("region-id", region.GetID())) schedulerCounter.WithLabelValues(l.GetName(), "no_leader").Inc() return nil } @@ -161,17 +162,19 @@ func (l *balanceLeaderScheduler) transferLeaderIn(target *core.StoreInfo, cluste // the leader from the source store to the target store for the region. func (l *balanceLeaderScheduler) createOperator(region *core.RegionInfo, source, target *core.StoreInfo, cluster schedule.Cluster, opInfluence schedule.OpInfluence) []*schedule.Operator { if cluster.IsRegionHot(region.GetID()) { - log.Debugf("[%s] region %d is hot region, ignore it", l.GetName(), region.GetID()) + log.Debug("region is hot region, ignore it", zap.String("scheduler", l.GetName()), zap.Uint64("region-id", region.GetID())) schedulerCounter.WithLabelValues(l.GetName(), "region_hot").Inc() return nil } if !shouldBalance(cluster, source, target, region, core.LeaderKind, opInfluence) { - log.Debugf("[%s] skip balance region %d, source %d to target %d, source size: %v, source score: %v, source influence: %v, target size: %v, target score: %v, target influence: %v, average region size: %v", - l.GetName(), region.GetID(), source.GetID(), target.GetID(), - source.GetLeaderSize(), source.LeaderScore(0), opInfluence.GetStoreInfluence(source.GetID()).ResourceSize(core.LeaderKind), - target.GetLeaderSize(), target.LeaderScore(0), opInfluence.GetStoreInfluence(target.GetID()).ResourceSize(core.LeaderKind), - cluster.GetAverageRegionSize()) + log.Debug("skip balance region", + zap.String("scheduler", l.GetName()), zap.Uint64("region-id", region.GetID()), zap.Uint64("source-store", source.GetID()), zap.Uint64("target-store", target.GetID()), + zap.Int64("source-size", source.GetLeaderSize()), zap.Float64("source-score", source.LeaderScore(0)), + zap.Int64("source-influence", opInfluence.GetStoreInfluence(source.GetID()).ResourceSize(core.LeaderKind)), + zap.Int64("target-size", target.GetLeaderSize()), zap.Float64("target-score", target.LeaderScore(0)), + zap.Int64("target-influence", opInfluence.GetStoreInfluence(target.GetID()).ResourceSize(core.LeaderKind)), + zap.Int64("average-region-size", cluster.GetAverageRegionSize())) schedulerCounter.WithLabelValues(l.GetName(), "skip").Inc() return nil } diff --git a/server/schedulers/balance_region.go b/server/schedulers/balance_region.go index 951268d7c6d..99d002392b6 100644 --- a/server/schedulers/balance_region.go +++ b/server/schedulers/balance_region.go @@ -18,10 +18,11 @@ import ( "strconv" "github.com/pingcap/kvproto/pkg/metapb" + log "github.com/pingcap/log" "github.com/pingcap/pd/server/cache" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/server/schedule" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) func init() { @@ -85,7 +86,7 @@ func (s *balanceRegionScheduler) Schedule(cluster schedule.Cluster) []*schedule. return nil } - log.Debugf("[%s] store%d has the max region score", s.GetName(), source.GetID()) + log.Debug("store has the max region score", zap.String("scheduler", s.GetName()), zap.Uint64("store-id", source.GetID())) sourceLabel := strconv.FormatUint(source.GetID(), 10) balanceRegionCounter.WithLabelValues("source_store", sourceLabel).Inc() @@ -102,18 +103,18 @@ func (s *balanceRegionScheduler) Schedule(cluster schedule.Cluster) []*schedule. schedulerCounter.WithLabelValues(s.GetName(), "no_region").Inc() continue } - log.Debugf("[%s] select region%d", s.GetName(), region.GetID()) + log.Debug("select region", zap.String("scheduler", s.GetName()), zap.Uint64("region-id", region.GetID())) // We don't schedule region with abnormal number of replicas. if len(region.GetPeers()) != cluster.GetMaxReplicas() { - log.Debugf("[%s] region%d has abnormal replica count", s.GetName(), region.GetID()) + log.Debug("region has abnormal replica count", zap.String("scheduler", s.GetName()), zap.Uint64("region-id", region.GetID())) schedulerCounter.WithLabelValues(s.GetName(), "abnormal_replica").Inc() continue } // Skip hot regions. if cluster.IsRegionHot(region.GetID()) { - log.Debugf("[%s] region%d is hot", s.GetName(), region.GetID()) + log.Debug("region is hot", zap.String("scheduler", s.GetName()), zap.Uint64("region-id", region.GetID())) schedulerCounter.WithLabelValues(s.GetName(), "region_hot").Inc() continue } @@ -132,7 +133,7 @@ func (s *balanceRegionScheduler) Schedule(cluster schedule.Cluster) []*schedule. if !hasPotentialTarget { // If no potential target store can be found for the selected store, ignore it for a while. - log.Debugf("[%s] no operator created for selected store%d", s.GetName(), source.GetID()) + log.Debug("no operator created for selected store", zap.String("scheduler", s.GetName()), zap.Uint64("store-id", source.GetID())) balanceRegionCounter.WithLabelValues("add_taint", sourceLabel).Inc() s.taintStores.Put(source.GetID()) } @@ -155,16 +156,16 @@ func (s *balanceRegionScheduler) transferPeer(cluster schedule.Cluster, region * } target := cluster.GetStore(storeID) - log.Debugf("[region %d] source store id is %v, target store id is %v", region.GetID(), source.GetID(), target.GetID()) + log.Debug("", zap.Uint64("region-id", region.GetID()), zap.Uint64("source-store", source.GetID()), zap.Uint64("target-store", target.GetID())) if !shouldBalance(cluster, source, target, region, core.RegionKind, opInfluence) { - log.Debugf("[%s] skip balance region %d, source %d to target %d ,source size: %v, source score: %v, source influence: %v, target size: %v, target score: %v, target influence: %v, average region size: %v", - s.GetName(), region.GetID(), source.GetID(), target.GetID(), - source.GetRegionSize(), source.RegionScore(cluster.GetHighSpaceRatio(), cluster.GetLowSpaceRatio(), 0), - opInfluence.GetStoreInfluence(source.GetID()).ResourceSize(core.RegionKind), - target.GetRegionSize(), target.RegionScore(cluster.GetHighSpaceRatio(), cluster.GetLowSpaceRatio(), 0), - opInfluence.GetStoreInfluence(target.GetID()).ResourceSize(core.RegionKind), - cluster.GetAverageRegionSize()) + log.Debug("skip balance region", + zap.String("scheduler", s.GetName()), zap.Uint64("region-id", region.GetID()), zap.Uint64("source-store", source.GetID()), zap.Uint64("target-store", target.GetID()), + zap.Int64("source-size", source.GetRegionSize()), zap.Float64("source-score", source.RegionScore(cluster.GetHighSpaceRatio(), cluster.GetLowSpaceRatio(), 0)), + zap.Int64("source-influence", opInfluence.GetStoreInfluence(source.GetID()).ResourceSize(core.RegionKind)), + zap.Int64("target-size", target.GetRegionSize()), zap.Float64("target-score", target.RegionScore(cluster.GetHighSpaceRatio(), cluster.GetLowSpaceRatio(), 0)), + zap.Int64("target-influence", opInfluence.GetStoreInfluence(target.GetID()).ResourceSize(core.RegionKind)), + zap.Int64("average-region-size", cluster.GetAverageRegionSize())) schedulerCounter.WithLabelValues(s.GetName(), "skip").Inc() return nil } diff --git a/server/schedulers/base_scheduler.go b/server/schedulers/base_scheduler.go index 096762bd697..7bb66554a65 100644 --- a/server/schedulers/base_scheduler.go +++ b/server/schedulers/base_scheduler.go @@ -16,8 +16,8 @@ package schedulers import ( "time" + log "github.com/pingcap/log" "github.com/pingcap/pd/server/schedule" - log "github.com/sirupsen/logrus" ) // options for interval of schedulers @@ -47,7 +47,7 @@ func intervalGrow(x time.Duration, maxInterval time.Duration, typ intervalGrowth case zeroGrowth: return x default: - log.Fatal("unKnow interval growth type") + log.Fatal("unknown interval growth type") } return 0 } diff --git a/server/schedulers/hot_region.go b/server/schedulers/hot_region.go index a55c678953f..cc7add615e6 100644 --- a/server/schedulers/hot_region.go +++ b/server/schedulers/hot_region.go @@ -20,9 +20,10 @@ import ( "time" "github.com/pingcap/kvproto/pkg/metapb" + log "github.com/pingcap/log" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/server/schedule" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) func init() { @@ -307,7 +308,7 @@ func (h *balanceHotRegionsScheduler) balanceByPeer(cluster schedule.Cluster, sto // because it doesn't exist in the system right now. destPeer, err := cluster.AllocPeer(destStoreID) if err != nil { - log.Errorf("failed to allocate peer: %v", err) + log.Error("failed to allocate peer", zap.Error(err)) return nil, nil, nil } diff --git a/server/schedulers/label.go b/server/schedulers/label.go index f4dd7f57bf2..e1e3d83392c 100644 --- a/server/schedulers/label.go +++ b/server/schedulers/label.go @@ -14,9 +14,10 @@ package schedulers import ( + log "github.com/pingcap/log" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/server/schedule" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) func init() { @@ -66,10 +67,10 @@ func (s *labelScheduler) Schedule(cluster schedule.Cluster) []*schedule.Operator schedulerCounter.WithLabelValues(s.GetName(), "skip").Inc() return nil } - log.Debugf("label scheduler reject leader store list: %v", rejectLeaderStores) + log.Debug("label scheduler reject leader store list", zap.Reflect("stores", rejectLeaderStores)) for id := range rejectLeaderStores { if region := cluster.RandLeaderRegion(id); region != nil { - log.Debugf("label scheduler selects region %d to transfer leader", region.GetID()) + log.Debug("label scheduler selects region to transfer leader", zap.Uint64("region-id", region.GetID())) excludeStores := make(map[uint64]struct{}) for _, p := range region.GetDownPeers() { excludeStores[p.GetPeer().GetStoreId()] = struct{}{} @@ -80,7 +81,7 @@ func (s *labelScheduler) Schedule(cluster schedule.Cluster) []*schedule.Operator filter := schedule.NewExcludedFilter(nil, excludeStores) target := s.selector.SelectTarget(cluster, cluster.GetFollowerStores(region), filter) if target == nil { - log.Debugf("label scheduler no target found for region %d", region.GetID()) + log.Debug("label scheduler no target found for region", zap.Uint64("region-id", region.GetID())) schedulerCounter.WithLabelValues(s.GetName(), "no_target").Inc() continue } diff --git a/server/schedulers/scheduler_test.go b/server/schedulers/scheduler_test.go index d82e98757c8..3f90cec7243 100644 --- a/server/schedulers/scheduler_test.go +++ b/server/schedulers/scheduler_test.go @@ -20,7 +20,6 @@ import ( "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/server/namespace" "github.com/pingcap/pd/server/schedule" - log "github.com/sirupsen/logrus" ) var _ = Suite(&testShuffleLeaderSuite{}) @@ -197,7 +196,6 @@ func (s *testScatterRegionSuite) scatter(c *C, numStores, numRegions uint64) { for i := uint64(1); i <= numRegions; i++ { region := tc.GetRegion(i) if op := scatterer.Scatter(region); op != nil { - log.Info(op) tc.ApplyOperator(op) } } diff --git a/server/schedulers/shuffle_hot_region.go b/server/schedulers/shuffle_hot_region.go index fd86200965d..c9b3a5be29b 100644 --- a/server/schedulers/shuffle_hot_region.go +++ b/server/schedulers/shuffle_hot_region.go @@ -18,10 +18,11 @@ import ( "strconv" "time" + log "github.com/pingcap/log" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/server/schedule" "github.com/pkg/errors" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) func init() { @@ -132,7 +133,7 @@ func (s *shuffleHotRegionScheduler) randomSchedule(cluster schedule.Cluster, sto } destPeer, err := cluster.AllocPeer(destStoreID) if err != nil { - log.Errorf("failed to allocate peer: %v", err) + log.Error("failed to allocate peer", zap.Error(err)) return nil } schedulerCounter.WithLabelValues(s.GetName(), "create_operator").Inc() diff --git a/server/schedulers/shuffle_region.go b/server/schedulers/shuffle_region.go index 5c0522b6d4c..e2651983995 100644 --- a/server/schedulers/shuffle_region.go +++ b/server/schedulers/shuffle_region.go @@ -15,9 +15,10 @@ package schedulers import ( "github.com/pingcap/kvproto/pkg/metapb" + log "github.com/pingcap/log" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/server/schedule" - log "github.com/sirupsen/logrus" + "go.uber.org/zap" ) func init() { @@ -106,7 +107,7 @@ func (s *shuffleRegionScheduler) scheduleAddPeer(cluster schedule.Cluster, filte newPeer, err := cluster.AllocPeer(target.GetID()) if err != nil { - log.Errorf("failed to allocate peer: %v", err) + log.Error("failed to allocate peer", zap.Error(err)) return nil }