From 4fe72e539f5af711215ba50411a4044244b7c577 Mon Sep 17 00:00:00 2001 From: disksing Date: Thu, 4 Feb 2021 05:48:45 -0600 Subject: [PATCH] store/tikv: move metrics shortcuts to /metrics (#22693) Signed-off-by: disksing --- store/tikv/2pc.go | 31 ++---- store/tikv/backoff.go | 27 ++--- store/tikv/batch_coprocessor.go | 3 +- store/tikv/cleanup.go | 3 +- store/tikv/commit.go | 5 +- store/tikv/coprocessor.go | 8 +- store/tikv/lock_resolver.go | 49 +++------ store/tikv/metrics/metrics.go | 2 + store/tikv/metrics/shortcuts.go | 175 ++++++++++++++++++++++++++++++++ store/tikv/pessimistic.go | 7 +- store/tikv/prewrite.go | 6 +- store/tikv/rawkv.go | 33 ++---- store/tikv/region_cache.go | 49 +++------ store/tikv/snapshot.go | 10 +- store/tikv/txn.go | 14 +-- 15 files changed, 251 insertions(+), 171 deletions(-) create mode 100644 store/tikv/metrics/shortcuts.go diff --git a/store/tikv/2pc.go b/store/tikv/2pc.go index 0fc577a954b14..a3c63e73cb4f1 100644 --- a/store/tikv/2pc.go +++ b/store/tikv/2pc.go @@ -55,26 +55,11 @@ type twoPhaseCommitAction interface { String() string } -var ( - tikvSecondaryLockCleanupFailureCounterRollback = metrics.TiKVSecondaryLockCleanupFailureCounter.WithLabelValues("rollback") - tiKVTxnHeartBeatHistogramOK = metrics.TiKVTxnHeartBeatHistogram.WithLabelValues("ok") - tiKVTxnHeartBeatHistogramError = metrics.TiKVTxnHeartBeatHistogram.WithLabelValues("err") - tikvAsyncCommitTxnCounterOk = metrics.TiKVAsyncCommitTxnCounter.WithLabelValues("ok") - tikvAsyncCommitTxnCounterError = metrics.TiKVAsyncCommitTxnCounter.WithLabelValues("err") - tikvOnePCTxnCounterOk = metrics.TiKVOnePCTxnCounter.WithLabelValues("ok") - tikvOnePCTxnCounterError = metrics.TiKVOnePCTxnCounter.WithLabelValues("err") -) - // Global variable set by config file. var ( ManagedLockTTL uint64 = 20000 // 20s ) -// metricsTag returns detail tag for metrics. -func metricsTag(action string) string { - return "2pc_" + action -} - // twoPhaseCommitter executes a two-phase commit protocol. type twoPhaseCommitter struct { store *KVStore @@ -766,7 +751,7 @@ func (c *twoPhaseCommitter) doActionOnGroupMutations(bo *Backoffer, action twoPh zap.Uint64("session", c.sessionID), zap.Stringer("action type", action), zap.Error(e)) - tikvSecondaryLockCleanupFailureCounterCommit.Inc() + metrics.SecondaryLockCleanupFailureCounterCommit.Inc() } }() } else { @@ -907,13 +892,13 @@ func (tm *ttlManager) keepAlive(c *twoPhaseCommitter) { startTime := time.Now() _, err = sendTxnHeartBeat(bo, c.store, c.primary(), c.startTS, newTTL) if err != nil { - tiKVTxnHeartBeatHistogramError.Observe(time.Since(startTime).Seconds()) + metrics.TxnHeartBeatHistogramError.Observe(time.Since(startTime).Seconds()) logutil.Logger(bo.ctx).Warn("send TxnHeartBeat failed", zap.Error(err), zap.Uint64("txnStartTS", c.startTS)) return } - tiKVTxnHeartBeatHistogramOK.Observe(time.Since(startTime).Seconds()) + metrics.TxnHeartBeatHistogramOK.Observe(time.Since(startTime).Seconds()) } } } @@ -1046,7 +1031,7 @@ func (c *twoPhaseCommitter) cleanup(ctx context.Context) { cleanupKeysCtx := context.WithValue(context.Background(), TxnStartKey, ctx.Value(TxnStartKey)) err := c.cleanupMutations(NewBackofferWithVars(cleanupKeysCtx, cleanupMaxBackoff, c.txn.vars), c.mutations) if err != nil { - tikvSecondaryLockCleanupFailureCounterRollback.Inc() + metrics.SecondaryLockCleanupFailureCounterRollback.Inc() logutil.Logger(ctx).Info("2PC cleanup failed", zap.Error(err), zap.Uint64("txnStartTS", c.startTS)) @@ -1065,9 +1050,9 @@ func (c *twoPhaseCommitter) execute(ctx context.Context) (err error) { if c.isOnePC() { // The error means the 1PC transaction failed. if err != nil { - tikvOnePCTxnCounterError.Inc() + metrics.OnePCTxnCounterError.Inc() } else { - tikvOnePCTxnCounterOk.Inc() + metrics.OnePCTxnCounterOk.Inc() } } else if c.isAsyncCommit() { // The error means the async commit should not succeed. @@ -1075,9 +1060,9 @@ func (c *twoPhaseCommitter) execute(ctx context.Context) (err error) { if c.getUndeterminedErr() == nil { c.cleanup(ctx) } - tikvAsyncCommitTxnCounterError.Inc() + metrics.AsyncCommitTxnCounterError.Inc() } else { - tikvAsyncCommitTxnCounterOk.Inc() + metrics.AsyncCommitTxnCounterOk.Inc() } } else { // Always clean up all written keys if the txn does not commit. diff --git a/store/tikv/backoff.go b/store/tikv/backoff.go index 0dd4ecf34cdb0..b61a0f359b8ef 100644 --- a/store/tikv/backoff.go +++ b/store/tikv/backoff.go @@ -45,36 +45,25 @@ const ( DecorrJitter ) -var ( - tikvBackoffHistogramRPC = metrics.TiKVBackoffHistogram.WithLabelValues("tikvRPC") - tikvBackoffHistogramLock = metrics.TiKVBackoffHistogram.WithLabelValues("txnLock") - tikvBackoffHistogramLockFast = metrics.TiKVBackoffHistogram.WithLabelValues("tikvLockFast") - tikvBackoffHistogramPD = metrics.TiKVBackoffHistogram.WithLabelValues("pdRPC") - tikvBackoffHistogramRegionMiss = metrics.TiKVBackoffHistogram.WithLabelValues("regionMiss") - tikvBackoffHistogramServerBusy = metrics.TiKVBackoffHistogram.WithLabelValues("serverBusy") - tikvBackoffHistogramStaleCmd = metrics.TiKVBackoffHistogram.WithLabelValues("staleCommand") - tikvBackoffHistogramEmpty = metrics.TiKVBackoffHistogram.WithLabelValues("") -) - func (t BackoffType) metric() prometheus.Observer { switch t { // TODO: distinguish tikv and tiflash in metrics case BoTiKVRPC, BoTiFlashRPC: - return tikvBackoffHistogramRPC + return metrics.BackoffHistogramRPC case BoTxnLock: - return tikvBackoffHistogramLock + return metrics.BackoffHistogramLock case BoTxnLockFast: - return tikvBackoffHistogramLockFast + return metrics.BackoffHistogramLockFast case BoPDRPC: - return tikvBackoffHistogramPD + return metrics.BackoffHistogramPD case BoRegionMiss: - return tikvBackoffHistogramRegionMiss + return metrics.BackoffHistogramRegionMiss case boTiKVServerBusy, boTiFlashServerBusy: - return tikvBackoffHistogramServerBusy + return metrics.BackoffHistogramServerBusy case boStaleCmd: - return tikvBackoffHistogramStaleCmd + return metrics.BackoffHistogramStaleCmd } - return tikvBackoffHistogramEmpty + return metrics.BackoffHistogramEmpty } // NewBackoffFn creates a backoff func which implements exponential backoff with diff --git a/store/tikv/batch_coprocessor.go b/store/tikv/batch_coprocessor.go index ab830381fe55d..39d4e9f6ba8c7 100644 --- a/store/tikv/batch_coprocessor.go +++ b/store/tikv/batch_coprocessor.go @@ -26,6 +26,7 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/store/tikv/logutil" + "github.com/pingcap/tidb/store/tikv/metrics" "github.com/pingcap/tidb/store/tikv/tikvrpc" "github.com/pingcap/tidb/util/memory" "go.uber.org/zap" @@ -160,7 +161,7 @@ func buildBatchCopTasks(bo *Backoffer, cache *RegionCache, ranges *KeyRanges, st zap.Int("range len", rangesLen), zap.Int("task len", len(batchTasks))) } - tikvTxnRegionsNumHistogramWithBatchCoprocessor.Observe(float64(len(batchTasks))) + metrics.TxnRegionsNumHistogramWithBatchCoprocessor.Observe(float64(len(batchTasks))) return batchTasks, nil } } diff --git a/store/tikv/cleanup.go b/store/tikv/cleanup.go index 911b0890e045c..1dd56f54f63e3 100644 --- a/store/tikv/cleanup.go +++ b/store/tikv/cleanup.go @@ -26,14 +26,13 @@ import ( type actionCleanup struct{} var _ twoPhaseCommitAction = actionCleanup{} -var tiKVTxnRegionsNumHistogramCleanup = metrics.TiKVTxnRegionsNumHistogram.WithLabelValues(metricsTag("cleanup")) func (actionCleanup) String() string { return "cleanup" } func (actionCleanup) tiKVTxnRegionsNumHistogram() prometheus.Observer { - return tiKVTxnRegionsNumHistogramCleanup + return metrics.TxnRegionsNumHistogramCleanup } func (actionCleanup) handleSingleBatch(c *twoPhaseCommitter, bo *Backoffer, batch batchMutations) error { diff --git a/store/tikv/commit.go b/store/tikv/commit.go index b521cb1493538..495bbdffb52d2 100644 --- a/store/tikv/commit.go +++ b/store/tikv/commit.go @@ -31,15 +31,12 @@ type actionCommit struct{ retry bool } var _ twoPhaseCommitAction = actionCommit{} -var tikvSecondaryLockCleanupFailureCounterCommit = metrics.TiKVSecondaryLockCleanupFailureCounter.WithLabelValues("commit") -var tiKVTxnRegionsNumHistogramCommit = metrics.TiKVTxnRegionsNumHistogram.WithLabelValues(metricsTag("commit")) - func (actionCommit) String() string { return "commit" } func (actionCommit) tiKVTxnRegionsNumHistogram() prometheus.Observer { - return tiKVTxnRegionsNumHistogramCommit + return metrics.TxnRegionsNumHistogramCommit } func (actionCommit) handleSingleBatch(c *twoPhaseCommitter, bo *Backoffer, batch batchMutations) error { diff --git a/store/tikv/coprocessor.go b/store/tikv/coprocessor.go index 5d79ca984a8eb..e8d9fa71cc6d9 100644 --- a/store/tikv/coprocessor.go +++ b/store/tikv/coprocessor.go @@ -44,11 +44,7 @@ import ( "go.uber.org/zap" ) -var ( - tikvTxnRegionsNumHistogramWithCoprocessor = metrics.TiKVTxnRegionsNumHistogram.WithLabelValues("coprocessor") - tikvTxnRegionsNumHistogramWithBatchCoprocessor = metrics.TiKVTxnRegionsNumHistogram.WithLabelValues("batch_coprocessor") - coprCacheHistogramEvict = tidbmetrics.DistSQLCoprCacheHistogram.WithLabelValues("evict") -) +var coprCacheHistogramEvict = tidbmetrics.DistSQLCoprCacheHistogram.WithLabelValues("evict") // CopClient is coprocessor client. type CopClient struct { @@ -180,7 +176,7 @@ func buildCopTasks(bo *Backoffer, cache *RegionCache, ranges *KeyRanges, req *kv zap.Int("range len", rangesLen), zap.Int("task len", len(tasks))) } - tikvTxnRegionsNumHistogramWithCoprocessor.Observe(float64(len(tasks))) + metrics.TxnRegionsNumHistogramWithCoprocessor.Observe(float64(len(tasks))) return tasks, nil } diff --git a/store/tikv/lock_resolver.go b/store/tikv/lock_resolver.go index 65b0720cd6eb1..97c114b95fb91 100644 --- a/store/tikv/lock_resolver.go +++ b/store/tikv/lock_resolver.go @@ -42,23 +42,6 @@ const ResolvedCacheSize = 2048 // bigTxnThreshold : transaction involves keys exceed this threshold can be treated as `big transaction`. const bigTxnThreshold = 16 -var ( - tikvLockResolverCountWithBatchResolve = metrics.TiKVLockResolverCounter.WithLabelValues("batch_resolve") - tikvLockResolverCountWithExpired = metrics.TiKVLockResolverCounter.WithLabelValues("expired") - tikvLockResolverCountWithNotExpired = metrics.TiKVLockResolverCounter.WithLabelValues("not_expired") - tikvLockResolverCountWithWaitExpired = metrics.TiKVLockResolverCounter.WithLabelValues("wait_expired") - tikvLockResolverCountWithResolve = metrics.TiKVLockResolverCounter.WithLabelValues("resolve") - tikvLockResolverCountWithResolveForWrite = metrics.TiKVLockResolverCounter.WithLabelValues("resolve_for_write") - tikvLockResolverCountWithResolveAsync = metrics.TiKVLockResolverCounter.WithLabelValues("resolve_async_commit") - tikvLockResolverCountWithWriteConflict = metrics.TiKVLockResolverCounter.WithLabelValues("write_conflict") - tikvLockResolverCountWithQueryTxnStatus = metrics.TiKVLockResolverCounter.WithLabelValues("query_txn_status") - tikvLockResolverCountWithQueryTxnStatusCommitted = metrics.TiKVLockResolverCounter.WithLabelValues("query_txn_status_committed") - tikvLockResolverCountWithQueryTxnStatusRolledBack = metrics.TiKVLockResolverCounter.WithLabelValues("query_txn_status_rolled_back") - tikvLockResolverCountWithQueryCheckSecondaryLocks = metrics.TiKVLockResolverCounter.WithLabelValues("query_check_secondary_locks") - tikvLockResolverCountWithResolveLocks = metrics.TiKVLockResolverCounter.WithLabelValues("query_resolve_locks") - tikvLockResolverCountWithResolveLockLite = metrics.TiKVLockResolverCounter.WithLabelValues("query_resolve_lock_lite") -) - // LockResolver resolves locks and also caches resolved txn status. type LockResolver struct { store Storage @@ -237,7 +220,7 @@ func (lr *LockResolver) BatchResolveLocks(bo *Backoffer, locks []*Lock, loc Regi return true, nil } - tikvLockResolverCountWithBatchResolve.Inc() + metrics.LockResolverCountWithBatchResolve.Inc() // The GCWorker kill all ongoing transactions, because it must make sure all // locks have been cleaned before GC. @@ -254,7 +237,7 @@ func (lr *LockResolver) BatchResolveLocks(bo *Backoffer, locks []*Lock, loc Regi if _, ok := txnInfos[l.TxnID]; ok { continue } - tikvLockResolverCountWithExpired.Inc() + metrics.LockResolverCountWithExpired.Inc() // Use currentTS = math.MaxUint64 means rollback the txn, no matter the lock is expired or not! status, err := lr.getTxnStatus(bo, l.TxnID, l.Primary, callerStartTS, math.MaxUint64, true, false, l) @@ -361,9 +344,9 @@ func (lr *LockResolver) resolveLocks(bo *Backoffer, callerStartTS uint64, locks } if forWrite { - tikvLockResolverCountWithResolveForWrite.Inc() + metrics.LockResolverCountWithResolveForWrite.Inc() } else { - tikvLockResolverCountWithResolve.Inc() + metrics.LockResolverCountWithResolve.Inc() } var pushFail bool @@ -384,7 +367,7 @@ func (lr *LockResolver) resolveLocks(bo *Backoffer, callerStartTS uint64, locks } if status.ttl == 0 { - tikvLockResolverCountWithExpired.Inc() + metrics.LockResolverCountWithExpired.Inc() // If the lock is committed or rollbacked, resolve lock. cleanRegions, exists := cleanTxns[l.TxnID] if !exists { @@ -406,7 +389,7 @@ func (lr *LockResolver) resolveLocks(bo *Backoffer, callerStartTS uint64, locks return err } } else { - tikvLockResolverCountWithNotExpired.Inc() + metrics.LockResolverCountWithNotExpired.Inc() // If the lock is valid, the txn may be a pessimistic transaction. // Update the txn expire time. msBeforeLockExpired := lr.store.GetOracle().UntilExpired(l.TxnID, status.ttl, &oracle.Option{TxnScope: oracle.GlobalTxnScope}) @@ -417,7 +400,7 @@ func (lr *LockResolver) resolveLocks(bo *Backoffer, callerStartTS uint64, locks // abort current transaction. // This could avoids the deadlock scene of two large transaction. if l.LockType != kvrpcpb.Op_PessimisticLock && l.TxnID > callerStartTS { - tikvLockResolverCountWithWriteConflict.Inc() + metrics.LockResolverCountWithWriteConflict.Inc() return kv.ErrWriteConflict.GenWithStackByArgs(callerStartTS, l.TxnID, status.commitTS, l.Key) } } else { @@ -446,7 +429,7 @@ func (lr *LockResolver) resolveLocks(bo *Backoffer, callerStartTS uint64, locks if msBeforeTxnExpired.value() > 0 && len(pushed) == 0 { // If len(pushed) > 0, the caller will not block on the locks, it push the minCommitTS instead. - tikvLockResolverCountWithWaitExpired.Inc() + metrics.LockResolverCountWithWaitExpired.Inc() } return msBeforeTxnExpired.value(), pushed, nil } @@ -582,7 +565,7 @@ func (lr *LockResolver) getTxnStatus(bo *Backoffer, txnID uint64, primary []byte return s, nil } - tikvLockResolverCountWithQueryTxnStatus.Inc() + metrics.LockResolverCountWithQueryTxnStatus.Inc() // CheckTxnStatus may meet the following cases: // 1. LOCK @@ -649,9 +632,9 @@ func (lr *LockResolver) getTxnStatus(bo *Backoffer, txnID uint64, primary []byte status.ttl = cmdResp.LockTtl } else { if cmdResp.CommitVersion == 0 { - tikvLockResolverCountWithQueryTxnStatusRolledBack.Inc() + metrics.LockResolverCountWithQueryTxnStatusRolledBack.Inc() } else { - tikvLockResolverCountWithQueryTxnStatusCommitted.Inc() + metrics.LockResolverCountWithQueryTxnStatusCommitted.Inc() } status.commitTS = cmdResp.CommitVersion @@ -744,7 +727,7 @@ func (lr *LockResolver) checkSecondaries(bo *Backoffer, txnID uint64, curKeys [] StartVersion: txnID, } req := tikvrpc.NewRequest(tikvrpc.CmdCheckSecondaryLocks, checkReq) - tikvLockResolverCountWithQueryCheckSecondaryLocks.Inc() + metrics.LockResolverCountWithQueryCheckSecondaryLocks.Inc() resp, err := lr.store.SendReq(bo, req, curRegionID, readTimeoutShort) if err != nil { return errors.Trace(err) @@ -785,7 +768,7 @@ func (lr *LockResolver) checkSecondaries(bo *Backoffer, txnID uint64, curKeys [] // resolveLockAsync resolves l assuming it was locked using the async commit protocol. func (lr *LockResolver) resolveLockAsync(bo *Backoffer, l *Lock, status TxnStatus) error { - tikvLockResolverCountWithResolveAsync.Inc() + metrics.LockResolverCountWithResolveAsync.Inc() resolveData, err := lr.checkAllSecondaries(bo, l, &status) if err != nil { @@ -918,7 +901,7 @@ func (lr *LockResolver) resolveRegionLocks(bo *Backoffer, l *Lock, region Region } func (lr *LockResolver) resolveLock(bo *Backoffer, l *Lock, status TxnStatus, lite bool, cleanRegions map[RegionVerID]struct{}) error { - tikvLockResolverCountWithResolveLocks.Inc() + metrics.LockResolverCountWithResolveLocks.Inc() resolveLite := lite || l.TxnSize < bigTxnThreshold for { loc, err := lr.store.GetRegionCache().LocateKey(bo, l.Key) @@ -940,7 +923,7 @@ func (lr *LockResolver) resolveLock(bo *Backoffer, l *Lock, status TxnStatus, li if resolveLite { // Only resolve specified keys when it is a small transaction, // prevent from scanning the whole region in this case. - tikvLockResolverCountWithResolveLockLite.Inc() + metrics.LockResolverCountWithResolveLockLite.Inc() lreq.Keys = [][]byte{l.Key} } req := tikvrpc.NewRequest(tikvrpc.CmdResolveLock, lreq) @@ -976,7 +959,7 @@ func (lr *LockResolver) resolveLock(bo *Backoffer, l *Lock, status TxnStatus, li } func (lr *LockResolver) resolvePessimisticLock(bo *Backoffer, l *Lock, cleanRegions map[RegionVerID]struct{}) error { - tikvLockResolverCountWithResolveLocks.Inc() + metrics.LockResolverCountWithResolveLocks.Inc() for { loc, err := lr.store.GetRegionCache().LocateKey(bo, l.Key) if err != nil { diff --git a/store/tikv/metrics/metrics.go b/store/tikv/metrics/metrics.go index 44e726bd0a4d6..d58ede0089eb2 100644 --- a/store/tikv/metrics/metrics.go +++ b/store/tikv/metrics/metrics.go @@ -344,6 +344,8 @@ func initMetrics(namespace, subsystem string) { Name: "one_pc_txn_counter", Help: "Counter of 1PC transactions.", }, []string{LblType}) + + initShortcuts() } func init() { diff --git a/store/tikv/metrics/shortcuts.go b/store/tikv/metrics/shortcuts.go new file mode 100644 index 0000000000000..5f82405e7041c --- /dev/null +++ b/store/tikv/metrics/shortcuts.go @@ -0,0 +1,175 @@ +// Copyright 2021 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package metrics + +import "github.com/prometheus/client_golang/prometheus" + +// Shortcuts for performance improvement. +var ( + TxnCmdHistogramWithCommit prometheus.Observer + TxnCmdHistogramWithRollback prometheus.Observer + TxnCmdHistogramWithBatchGet prometheus.Observer + TxnCmdHistogramWithGet prometheus.Observer + TxnCmdHistogramWithLockKeys prometheus.Observer + + RawkvCmdHistogramWithGet prometheus.Observer + RawkvCmdHistogramWithBatchGet prometheus.Observer + RawkvCmdHistogramWithBatchPut prometheus.Observer + RawkvCmdHistogramWithDelete prometheus.Observer + RawkvCmdHistogramWithBatchDelete prometheus.Observer + RawkvCmdHistogramWithRawScan prometheus.Observer + RawkvCmdHistogramWithRawReversScan prometheus.Observer + RawkvSizeHistogramWithKey prometheus.Observer + RawkvSizeHistogramWithValue prometheus.Observer + + BackoffHistogramRPC prometheus.Observer + BackoffHistogramLock prometheus.Observer + BackoffHistogramLockFast prometheus.Observer + BackoffHistogramPD prometheus.Observer + BackoffHistogramRegionMiss prometheus.Observer + BackoffHistogramServerBusy prometheus.Observer + BackoffHistogramStaleCmd prometheus.Observer + BackoffHistogramEmpty prometheus.Observer + + TxnRegionsNumHistogramWithSnapshot prometheus.Observer + TxnRegionsNumHistogramPrewrite prometheus.Observer + TxnRegionsNumHistogramCommit prometheus.Observer + TxnRegionsNumHistogramCleanup prometheus.Observer + TxnRegionsNumHistogramPessimisticLock prometheus.Observer + TxnRegionsNumHistogramPessimisticRollback prometheus.Observer + TxnRegionsNumHistogramWithCoprocessor prometheus.Observer + TxnRegionsNumHistogramWithBatchCoprocessor prometheus.Observer + + LockResolverCountWithBatchResolve prometheus.Counter + LockResolverCountWithExpired prometheus.Counter + LockResolverCountWithNotExpired prometheus.Counter + LockResolverCountWithWaitExpired prometheus.Counter + LockResolverCountWithResolve prometheus.Counter + LockResolverCountWithResolveForWrite prometheus.Counter + LockResolverCountWithResolveAsync prometheus.Counter + LockResolverCountWithWriteConflict prometheus.Counter + LockResolverCountWithQueryTxnStatus prometheus.Counter + LockResolverCountWithQueryTxnStatusCommitted prometheus.Counter + LockResolverCountWithQueryTxnStatusRolledBack prometheus.Counter + LockResolverCountWithQueryCheckSecondaryLocks prometheus.Counter + LockResolverCountWithResolveLocks prometheus.Counter + LockResolverCountWithResolveLockLite prometheus.Counter + + RegionCacheCounterWithInvalidateRegionFromCacheOK prometheus.Counter + RegionCacheCounterWithSendFail prometheus.Counter + RegionCacheCounterWithGetRegionByIDOK prometheus.Counter + RegionCacheCounterWithGetRegionByIDError prometheus.Counter + RegionCacheCounterWithGetRegionOK prometheus.Counter + RegionCacheCounterWithGetRegionError prometheus.Counter + RegionCacheCounterWithScanRegionsOK prometheus.Counter + RegionCacheCounterWithScanRegionsError prometheus.Counter + RegionCacheCounterWithGetStoreOK prometheus.Counter + RegionCacheCounterWithGetStoreError prometheus.Counter + RegionCacheCounterWithInvalidateStoreRegionsOK prometheus.Counter + + TxnHeartBeatHistogramOK prometheus.Observer + TxnHeartBeatHistogramError prometheus.Observer + + StatusCountWithOK prometheus.Counter + StatusCountWithError prometheus.Counter + + SecondaryLockCleanupFailureCounterCommit prometheus.Counter + SecondaryLockCleanupFailureCounterRollback prometheus.Counter + + AsyncCommitTxnCounterOk prometheus.Counter + AsyncCommitTxnCounterError prometheus.Counter + + OnePCTxnCounterOk prometheus.Counter + OnePCTxnCounterError prometheus.Counter + OnePCTxnCounterFallback prometheus.Counter +) + +func initShortcuts() { + TxnCmdHistogramWithCommit = TiKVTxnCmdHistogram.WithLabelValues(LblCommit) + TxnCmdHistogramWithRollback = TiKVTxnCmdHistogram.WithLabelValues(LblRollback) + TxnCmdHistogramWithBatchGet = TiKVTxnCmdHistogram.WithLabelValues(LblBatchGet) + TxnCmdHistogramWithGet = TiKVTxnCmdHistogram.WithLabelValues(LblGet) + TxnCmdHistogramWithLockKeys = TiKVTxnCmdHistogram.WithLabelValues(LblLockKeys) + + RawkvCmdHistogramWithGet = TiKVRawkvCmdHistogram.WithLabelValues("get") + RawkvCmdHistogramWithBatchGet = TiKVRawkvCmdHistogram.WithLabelValues("batch_get") + RawkvCmdHistogramWithBatchPut = TiKVRawkvCmdHistogram.WithLabelValues("batch_put") + RawkvCmdHistogramWithDelete = TiKVRawkvCmdHistogram.WithLabelValues("delete") + RawkvCmdHistogramWithBatchDelete = TiKVRawkvCmdHistogram.WithLabelValues("batch_delete") + RawkvCmdHistogramWithRawScan = TiKVRawkvCmdHistogram.WithLabelValues("raw_scan") + RawkvCmdHistogramWithRawReversScan = TiKVRawkvCmdHistogram.WithLabelValues("raw_reverse_scan") + RawkvSizeHistogramWithKey = TiKVRawkvSizeHistogram.WithLabelValues("key") + RawkvSizeHistogramWithValue = TiKVRawkvSizeHistogram.WithLabelValues("value") + + BackoffHistogramRPC = TiKVBackoffHistogram.WithLabelValues("tikvRPC") + BackoffHistogramLock = TiKVBackoffHistogram.WithLabelValues("txnLock") + BackoffHistogramLockFast = TiKVBackoffHistogram.WithLabelValues("tikvLockFast") + BackoffHistogramPD = TiKVBackoffHistogram.WithLabelValues("pdRPC") + BackoffHistogramRegionMiss = TiKVBackoffHistogram.WithLabelValues("regionMiss") + BackoffHistogramServerBusy = TiKVBackoffHistogram.WithLabelValues("serverBusy") + BackoffHistogramStaleCmd = TiKVBackoffHistogram.WithLabelValues("staleCommand") + BackoffHistogramEmpty = TiKVBackoffHistogram.WithLabelValues("") + + TxnRegionsNumHistogramWithSnapshot = TiKVTxnRegionsNumHistogram.WithLabelValues("snapshot") + TxnRegionsNumHistogramPrewrite = TiKVTxnRegionsNumHistogram.WithLabelValues("2pc_prewrite") + TxnRegionsNumHistogramCommit = TiKVTxnRegionsNumHistogram.WithLabelValues("2pc_commit") + TxnRegionsNumHistogramCleanup = TiKVTxnRegionsNumHistogram.WithLabelValues("2pc_cleanup") + TxnRegionsNumHistogramPessimisticLock = TiKVTxnRegionsNumHistogram.WithLabelValues("2pc_pessimistic_lock") + TxnRegionsNumHistogramPessimisticRollback = TiKVTxnRegionsNumHistogram.WithLabelValues("2pc_pessimistic_rollback") + TxnRegionsNumHistogramWithCoprocessor = TiKVTxnRegionsNumHistogram.WithLabelValues("coprocessor") + TxnRegionsNumHistogramWithBatchCoprocessor = TiKVTxnRegionsNumHistogram.WithLabelValues("batch_coprocessor") + + LockResolverCountWithBatchResolve = TiKVLockResolverCounter.WithLabelValues("batch_resolve") + LockResolverCountWithExpired = TiKVLockResolverCounter.WithLabelValues("expired") + LockResolverCountWithNotExpired = TiKVLockResolverCounter.WithLabelValues("not_expired") + LockResolverCountWithWaitExpired = TiKVLockResolverCounter.WithLabelValues("wait_expired") + LockResolverCountWithResolve = TiKVLockResolverCounter.WithLabelValues("resolve") + LockResolverCountWithResolveForWrite = TiKVLockResolverCounter.WithLabelValues("resolve_for_write") + LockResolverCountWithResolveAsync = TiKVLockResolverCounter.WithLabelValues("resolve_async_commit") + LockResolverCountWithWriteConflict = TiKVLockResolverCounter.WithLabelValues("write_conflict") + LockResolverCountWithQueryTxnStatus = TiKVLockResolverCounter.WithLabelValues("query_txn_status") + LockResolverCountWithQueryTxnStatusCommitted = TiKVLockResolverCounter.WithLabelValues("query_txn_status_committed") + LockResolverCountWithQueryTxnStatusRolledBack = TiKVLockResolverCounter.WithLabelValues("query_txn_status_rolled_back") + LockResolverCountWithQueryCheckSecondaryLocks = TiKVLockResolverCounter.WithLabelValues("query_check_secondary_locks") + LockResolverCountWithResolveLocks = TiKVLockResolverCounter.WithLabelValues("query_resolve_locks") + LockResolverCountWithResolveLockLite = TiKVLockResolverCounter.WithLabelValues("query_resolve_lock_lite") + + RegionCacheCounterWithInvalidateRegionFromCacheOK = TiKVRegionCacheCounter.WithLabelValues("invalidate_region_from_cache", "ok") + RegionCacheCounterWithSendFail = TiKVRegionCacheCounter.WithLabelValues("send_fail", "ok") + RegionCacheCounterWithGetRegionByIDOK = TiKVRegionCacheCounter.WithLabelValues("get_region_by_id", "ok") + RegionCacheCounterWithGetRegionByIDError = TiKVRegionCacheCounter.WithLabelValues("get_region_by_id", "err") + RegionCacheCounterWithGetRegionOK = TiKVRegionCacheCounter.WithLabelValues("get_region", "ok") + RegionCacheCounterWithGetRegionError = TiKVRegionCacheCounter.WithLabelValues("get_region", "err") + RegionCacheCounterWithScanRegionsOK = TiKVRegionCacheCounter.WithLabelValues("scan_regions", "ok") + RegionCacheCounterWithScanRegionsError = TiKVRegionCacheCounter.WithLabelValues("scan_regions", "err") + RegionCacheCounterWithGetStoreOK = TiKVRegionCacheCounter.WithLabelValues("get_store", "ok") + RegionCacheCounterWithGetStoreError = TiKVRegionCacheCounter.WithLabelValues("get_store", "err") + RegionCacheCounterWithInvalidateStoreRegionsOK = TiKVRegionCacheCounter.WithLabelValues("invalidate_store_regions", "ok") + + TxnHeartBeatHistogramOK = TiKVTxnHeartBeatHistogram.WithLabelValues("ok") + TxnHeartBeatHistogramError = TiKVTxnHeartBeatHistogram.WithLabelValues("err") + + StatusCountWithOK = TiKVStatusCounter.WithLabelValues("ok") + StatusCountWithError = TiKVStatusCounter.WithLabelValues("err") + + SecondaryLockCleanupFailureCounterCommit = TiKVSecondaryLockCleanupFailureCounter.WithLabelValues("commit") + SecondaryLockCleanupFailureCounterRollback = TiKVSecondaryLockCleanupFailureCounter.WithLabelValues("rollback") + + AsyncCommitTxnCounterOk = TiKVAsyncCommitTxnCounter.WithLabelValues("ok") + AsyncCommitTxnCounterError = TiKVAsyncCommitTxnCounter.WithLabelValues("err") + + OnePCTxnCounterOk = TiKVOnePCTxnCounter.WithLabelValues("ok") + OnePCTxnCounterError = TiKVOnePCTxnCounter.WithLabelValues("err") + OnePCTxnCounterFallback = TiKVOnePCTxnCounter.WithLabelValues("fallback") +} diff --git a/store/tikv/pessimistic.go b/store/tikv/pessimistic.go index a8a64719fb225..ef4ac925babc2 100644 --- a/store/tikv/pessimistic.go +++ b/store/tikv/pessimistic.go @@ -39,9 +39,6 @@ type actionPessimisticRollback struct{} var ( _ twoPhaseCommitAction = actionPessimisticLock{} _ twoPhaseCommitAction = actionPessimisticRollback{} - - tiKVTxnRegionsNumHistogramPessimisticLock = metrics.TiKVTxnRegionsNumHistogram.WithLabelValues(metricsTag("pessimistic_lock")) - tiKVTxnRegionsNumHistogramPessimisticRollback = metrics.TiKVTxnRegionsNumHistogram.WithLabelValues(metricsTag("pessimistic_rollback")) ) func (actionPessimisticLock) String() string { @@ -49,7 +46,7 @@ func (actionPessimisticLock) String() string { } func (actionPessimisticLock) tiKVTxnRegionsNumHistogram() prometheus.Observer { - return tiKVTxnRegionsNumHistogramPessimisticLock + return metrics.TxnRegionsNumHistogramPessimisticLock } func (actionPessimisticRollback) String() string { @@ -57,7 +54,7 @@ func (actionPessimisticRollback) String() string { } func (actionPessimisticRollback) tiKVTxnRegionsNumHistogram() prometheus.Observer { - return tiKVTxnRegionsNumHistogramPessimisticRollback + return metrics.TxnRegionsNumHistogramPessimisticRollback } func (action actionPessimisticLock) handleSingleBatch(c *twoPhaseCommitter, bo *Backoffer, batch batchMutations) error { diff --git a/store/tikv/prewrite.go b/store/tikv/prewrite.go index 4ce995ccae223..dcb2fb522332f 100644 --- a/store/tikv/prewrite.go +++ b/store/tikv/prewrite.go @@ -34,15 +34,13 @@ import ( type actionPrewrite struct{} var _ twoPhaseCommitAction = actionPrewrite{} -var tiKVTxnRegionsNumHistogramPrewrite = metrics.TiKVTxnRegionsNumHistogram.WithLabelValues(metricsTag("prewrite")) -var tikvOnePCTxnCounterFallback = metrics.TiKVOnePCTxnCounter.WithLabelValues("fallback") func (actionPrewrite) String() string { return "prewrite" } func (actionPrewrite) tiKVTxnRegionsNumHistogram() prometheus.Observer { - return tiKVTxnRegionsNumHistogramPrewrite + return metrics.TxnRegionsNumHistogramPrewrite } func (c *twoPhaseCommitter) buildPrewriteRequest(batch batchMutations, txnSize uint64) *tikvrpc.Request { @@ -208,7 +206,7 @@ func (action actionPrewrite) handleSingleBatch(c *twoPhaseCommitter, bo *Backoff } logutil.Logger(bo.ctx).Warn("1pc failed and fallbacks to normal commit procedure", zap.Uint64("startTS", c.startTS)) - tikvOnePCTxnCounterFallback.Inc() + metrics.OnePCTxnCounterFallback.Inc() c.setOnePC(false) c.setAsyncCommit(false) } else { diff --git a/store/tikv/rawkv.go b/store/tikv/rawkv.go index 72fbd3a408b93..dd5b621bb1092 100644 --- a/store/tikv/rawkv.go +++ b/store/tikv/rawkv.go @@ -33,19 +33,6 @@ var ( ErrMaxScanLimitExceeded = errors.New("limit should be less than MaxRawKVScanLimit") ) -var ( - tikvRawkvCmdHistogramWithGet = metrics.TiKVRawkvCmdHistogram.WithLabelValues("get") - tikvRawkvCmdHistogramWithBatchGet = metrics.TiKVRawkvCmdHistogram.WithLabelValues("batch_get") - tikvRawkvCmdHistogramWithBatchPut = metrics.TiKVRawkvCmdHistogram.WithLabelValues("batch_put") - tikvRawkvCmdHistogramWithDelete = metrics.TiKVRawkvCmdHistogram.WithLabelValues("delete") - tikvRawkvCmdHistogramWithBatchDelete = metrics.TiKVRawkvCmdHistogram.WithLabelValues("batch_delete") - tikvRawkvCmdHistogramWithRawScan = metrics.TiKVRawkvCmdHistogram.WithLabelValues("raw_scan") - tikvRawkvCmdHistogramWithRawReversScan = metrics.TiKVRawkvCmdHistogram.WithLabelValues("raw_reverse_scan") - - tikvRawkvSizeHistogramWithKey = metrics.TiKVRawkvSizeHistogram.WithLabelValues("key") - tikvRawkvSizeHistogramWithValue = metrics.TiKVRawkvSizeHistogram.WithLabelValues("value") -) - const ( // rawBatchPutSize is the maximum size limit for rawkv each batch put request. rawBatchPutSize = 16 * 1024 @@ -102,7 +89,7 @@ func (c *RawKVClient) ClusterID() uint64 { // Get queries value with the key. When the key does not exist, it returns `nil, nil`. func (c *RawKVClient) Get(key []byte) ([]byte, error) { start := time.Now() - defer func() { tikvRawkvCmdHistogramWithGet.Observe(time.Since(start).Seconds()) }() + defer func() { metrics.RawkvCmdHistogramWithGet.Observe(time.Since(start).Seconds()) }() req := tikvrpc.NewRequest(tikvrpc.CmdRawGet, &kvrpcpb.RawGetRequest{Key: key}) resp, _, err := c.sendReq(key, req, false) @@ -126,7 +113,7 @@ func (c *RawKVClient) Get(key []byte) ([]byte, error) { func (c *RawKVClient) BatchGet(keys [][]byte) ([][]byte, error) { start := time.Now() defer func() { - tikvRawkvCmdHistogramWithBatchGet.Observe(time.Since(start).Seconds()) + metrics.RawkvCmdHistogramWithBatchGet.Observe(time.Since(start).Seconds()) }() bo := NewBackofferWithVars(context.Background(), rawkvMaxBackoff, nil) @@ -155,9 +142,9 @@ func (c *RawKVClient) BatchGet(keys [][]byte) ([][]byte, error) { // Put stores a key-value pair to TiKV. func (c *RawKVClient) Put(key, value []byte) error { start := time.Now() - defer func() { tikvRawkvCmdHistogramWithBatchPut.Observe(time.Since(start).Seconds()) }() - tikvRawkvSizeHistogramWithKey.Observe(float64(len(key))) - tikvRawkvSizeHistogramWithValue.Observe(float64(len(value))) + defer func() { metrics.RawkvCmdHistogramWithBatchPut.Observe(time.Since(start).Seconds()) }() + metrics.RawkvSizeHistogramWithKey.Observe(float64(len(key))) + metrics.RawkvSizeHistogramWithValue.Observe(float64(len(value))) if len(value) == 0 { return errors.New("empty value is not supported") @@ -185,7 +172,7 @@ func (c *RawKVClient) Put(key, value []byte) error { func (c *RawKVClient) BatchPut(keys, values [][]byte) error { start := time.Now() defer func() { - tikvRawkvCmdHistogramWithBatchPut.Observe(time.Since(start).Seconds()) + metrics.RawkvCmdHistogramWithBatchPut.Observe(time.Since(start).Seconds()) }() if len(keys) != len(values) { @@ -204,7 +191,7 @@ func (c *RawKVClient) BatchPut(keys, values [][]byte) error { // Delete deletes a key-value pair from TiKV. func (c *RawKVClient) Delete(key []byte) error { start := time.Now() - defer func() { tikvRawkvCmdHistogramWithDelete.Observe(time.Since(start).Seconds()) }() + defer func() { metrics.RawkvCmdHistogramWithDelete.Observe(time.Since(start).Seconds()) }() req := tikvrpc.NewRequest(tikvrpc.CmdRawDelete, &kvrpcpb.RawDeleteRequest{ Key: key, @@ -227,7 +214,7 @@ func (c *RawKVClient) Delete(key []byte) error { func (c *RawKVClient) BatchDelete(keys [][]byte) error { start := time.Now() defer func() { - tikvRawkvCmdHistogramWithBatchDelete.Observe(time.Since(start).Seconds()) + metrics.RawkvCmdHistogramWithBatchDelete.Observe(time.Since(start).Seconds()) }() bo := NewBackofferWithVars(context.Background(), rawkvMaxBackoff, nil) @@ -285,7 +272,7 @@ func (c *RawKVClient) DeleteRange(startKey []byte, endKey []byte) error { // `Scan(push(startKey, '\0'), push(endKey, '\0'), limit)`. func (c *RawKVClient) Scan(startKey, endKey []byte, limit int) (keys [][]byte, values [][]byte, err error) { start := time.Now() - defer func() { tikvRawkvCmdHistogramWithRawScan.Observe(time.Since(start).Seconds()) }() + defer func() { metrics.RawkvCmdHistogramWithRawScan.Observe(time.Since(start).Seconds()) }() if limit > MaxRawKVScanLimit { return nil, nil, errors.Trace(ErrMaxScanLimitExceeded) @@ -327,7 +314,7 @@ func (c *RawKVClient) Scan(startKey, endKey []byte, limit int) (keys [][]byte, v func (c *RawKVClient) ReverseScan(startKey, endKey []byte, limit int) (keys [][]byte, values [][]byte, err error) { start := time.Now() defer func() { - tikvRawkvCmdHistogramWithRawReversScan.Observe(time.Since(start).Seconds()) + metrics.RawkvCmdHistogramWithRawReversScan.Observe(time.Since(start).Seconds()) }() if limit > MaxRawKVScanLimit { diff --git a/store/tikv/region_cache.go b/store/tikv/region_cache.go index 73d09623cb406..2967d2bf32850 100644 --- a/store/tikv/region_cache.go +++ b/store/tikv/region_cache.go @@ -50,23 +50,6 @@ const ( // RegionCacheTTLSec is the max idle time for regions in the region cache. var RegionCacheTTLSec int64 = 600 -var ( - tikvRegionCacheCounterWithInvalidateRegionFromCacheOK = metrics.TiKVRegionCacheCounter.WithLabelValues("invalidate_region_from_cache", "ok") - tikvRegionCacheCounterWithSendFail = metrics.TiKVRegionCacheCounter.WithLabelValues("send_fail", "ok") - tikvRegionCacheCounterWithGetRegionByIDOK = metrics.TiKVRegionCacheCounter.WithLabelValues("get_region_by_id", "ok") - tikvRegionCacheCounterWithGetRegionByIDError = metrics.TiKVRegionCacheCounter.WithLabelValues("get_region_by_id", "err") - tikvRegionCacheCounterWithGetRegionOK = metrics.TiKVRegionCacheCounter.WithLabelValues("get_region", "ok") - tikvRegionCacheCounterWithGetRegionError = metrics.TiKVRegionCacheCounter.WithLabelValues("get_region", "err") - tikvRegionCacheCounterWithScanRegionsOK = metrics.TiKVRegionCacheCounter.WithLabelValues("scan_regions", "ok") - tikvRegionCacheCounterWithScanRegionsError = metrics.TiKVRegionCacheCounter.WithLabelValues("scan_regions", "err") - tikvRegionCacheCounterWithGetStoreOK = metrics.TiKVRegionCacheCounter.WithLabelValues("get_store", "ok") - tikvRegionCacheCounterWithGetStoreError = metrics.TiKVRegionCacheCounter.WithLabelValues("get_store", "err") - tikvRegionCacheCounterWithInvalidateStoreRegionsOK = metrics.TiKVRegionCacheCounter.WithLabelValues("invalidate_store_regions", "ok") - - tikvStatusCountWithOK = metrics.TiKVStatusCounter.WithLabelValues("ok") - tikvStatusCountWithError = metrics.TiKVStatusCounter.WithLabelValues("err") -) - const ( updated int32 = iota // region is updated and no need to reload. needSync // need sync new region info. @@ -249,7 +232,7 @@ func (r *Region) checkRegionCacheTTL(ts int64) bool { // invalidate invalidates a region, next time it will got null result. func (r *Region) invalidate() { - tikvRegionCacheCounterWithInvalidateRegionFromCacheOK.Inc() + metrics.RegionCacheCounterWithInvalidateRegionFromCacheOK.Inc() atomic.StoreInt64(&r.lastAccess, invalidatedLastAccessTime) } @@ -596,7 +579,7 @@ func (c *RegionCache) findRegionByKey(bo *Backoffer, key []byte, isEndKey bool) // OnSendFail handles send request fail logic. func (c *RegionCache) OnSendFail(bo *Backoffer, ctx *RPCContext, scheduleReload bool, err error) { - tikvRegionCacheCounterWithSendFail.Inc() + metrics.RegionCacheCounterWithSendFail.Inc() r := c.getCachedRegionWithRLock(ctx.Region) if r != nil { peersNum := len(r.meta.Peers) @@ -624,7 +607,7 @@ func (c *RegionCache) OnSendFail(bo *Backoffer, ctx *RPCContext, scheduleReload epoch := rs.storeEpochs[storeIdx] if atomic.CompareAndSwapUint32(&s.epoch, epoch, epoch+1) { logutil.BgLogger().Info("mark store's regions need be refill", zap.String("store", s.addr)) - tikvRegionCacheCounterWithInvalidateStoreRegionsOK.Inc() + metrics.RegionCacheCounterWithInvalidateStoreRegionsOK.Inc() } // schedule a store addr resolve. @@ -994,9 +977,9 @@ func (c *RegionCache) loadRegion(bo *Backoffer, key []byte, isEndKey bool) (*Reg reg, err = c.pdClient.GetRegion(ctx, key) } if err != nil { - tikvRegionCacheCounterWithGetRegionError.Inc() + metrics.RegionCacheCounterWithGetRegionError.Inc() } else { - tikvRegionCacheCounterWithGetRegionOK.Inc() + metrics.RegionCacheCounterWithGetRegionOK.Inc() } if err != nil { backoffErr = errors.Errorf("loadRegion from PD failed, key: %q, err: %v", key, err) @@ -1044,9 +1027,9 @@ func (c *RegionCache) loadRegionByID(bo *Backoffer, regionID uint64) (*Region, e } reg, err := c.pdClient.GetRegionByID(ctx, regionID) if err != nil { - tikvRegionCacheCounterWithGetRegionByIDError.Inc() + metrics.RegionCacheCounterWithGetRegionByIDError.Inc() } else { - tikvRegionCacheCounterWithGetRegionByIDOK.Inc() + metrics.RegionCacheCounterWithGetRegionByIDOK.Inc() } if err != nil { backoffErr = errors.Errorf("loadRegion from PD failed, regionID: %v, err: %v", regionID, err) @@ -1094,7 +1077,7 @@ func (c *RegionCache) scanRegions(bo *Backoffer, startKey, endKey []byte, limit } regionsInfo, err := c.pdClient.ScanRegions(ctx, startKey, endKey, limit) if err != nil { - tikvRegionCacheCounterWithScanRegionsError.Inc() + metrics.RegionCacheCounterWithScanRegionsError.Inc() backoffErr = errors.Errorf( "scanRegion from PD failed, startKey: %q, limit: %q, err: %v", startKey, @@ -1103,7 +1086,7 @@ func (c *RegionCache) scanRegions(bo *Backoffer, startKey, endKey []byte, limit continue } - tikvRegionCacheCounterWithScanRegionsOK.Inc() + metrics.RegionCacheCounterWithScanRegionsOK.Inc() if len(regionsInfo) == 0 { return nil, errors.New("PD returned no region") @@ -1494,9 +1477,9 @@ func (s *Store) initResolve(bo *Backoffer, c *RegionCache) (addr string, err err for { store, err = c.pdClient.GetStore(bo.ctx, s.storeID) if err != nil { - tikvRegionCacheCounterWithGetStoreError.Inc() + metrics.RegionCacheCounterWithGetStoreError.Inc() } else { - tikvRegionCacheCounterWithGetStoreOK.Inc() + metrics.RegionCacheCounterWithGetStoreOK.Inc() } if err != nil { // TODO: more refine PD error status handle. @@ -1549,9 +1532,9 @@ func (s *Store) reResolve(c *RegionCache) { var addr string store, err := c.pdClient.GetStore(context.Background(), s.storeID) if err != nil { - tikvRegionCacheCounterWithGetStoreError.Inc() + metrics.RegionCacheCounterWithGetStoreError.Inc() } else { - tikvRegionCacheCounterWithGetStoreOK.Inc() + metrics.RegionCacheCounterWithGetStoreOK.Inc() } if err != nil { logutil.BgLogger().Error("loadStore from PD failed", zap.Uint64("id", s.storeID), zap.Error(err)) @@ -1563,7 +1546,7 @@ func (s *Store) reResolve(c *RegionCache) { logutil.BgLogger().Info("invalidate regions in removed store", zap.Uint64("store", s.storeID), zap.String("add", s.addr)) atomic.AddUint32(&s.epoch, 1) - tikvRegionCacheCounterWithInvalidateStoreRegionsOK.Inc() + metrics.RegionCacheCounterWithInvalidateStoreRegionsOK.Inc() return } @@ -1702,9 +1685,9 @@ func invokeKVStatusAPI(saddr string, timeout time.Duration) (l livenessState) { start := time.Now() defer func() { if l == reachable { - tikvStatusCountWithOK.Inc() + metrics.StatusCountWithOK.Inc() } else { - tikvStatusCountWithError.Inc() + metrics.StatusCountWithError.Inc() } metrics.TiKVStatusDuration.WithLabelValues(saddr).Observe(time.Since(start).Seconds()) }() diff --git a/store/tikv/snapshot.go b/store/tikv/snapshot.go index 3603c84d2b05b..f8c2458967333 100644 --- a/store/tikv/snapshot.go +++ b/store/tikv/snapshot.go @@ -47,10 +47,6 @@ const ( batchGetSize = 5120 ) -var ( - tikvTxnRegionsNumHistogramWithSnapshot = metrics.TiKVTxnRegionsNumHistogram.WithLabelValues("snapshot") -) - // tikvSnapshot implements the kv.Snapshot interface. type tikvSnapshot struct { store *KVStore @@ -222,14 +218,14 @@ func appendBatchKeysBySize(b []batchKeys, region RegionVerID, keys [][]byte, siz func (s *tikvSnapshot) batchGetKeysByRegions(bo *Backoffer, keys [][]byte, collectF func(k, v []byte)) error { defer func(start time.Time) { - tikvTxnCmdHistogramWithBatchGet.Observe(time.Since(start).Seconds()) + metrics.TxnCmdHistogramWithBatchGet.Observe(time.Since(start).Seconds()) }(time.Now()) groups, _, err := s.store.regionCache.GroupKeysByRegion(bo, keys, nil) if err != nil { return errors.Trace(err) } - tikvTxnRegionsNumHistogramWithSnapshot.Observe(float64(len(groups))) + metrics.TxnRegionsNumHistogramWithSnapshot.Observe(float64(len(groups))) var batches []batchKeys for id, g := range groups { @@ -368,7 +364,7 @@ func (s *tikvSnapshot) batchGetSingleRegion(bo *Backoffer, batch batchKeys, coll func (s *tikvSnapshot) Get(ctx context.Context, k kv.Key) ([]byte, error) { defer func(start time.Time) { - tikvTxnCmdHistogramWithGet.Observe(time.Since(start).Seconds()) + metrics.TxnCmdHistogramWithGet.Observe(time.Since(start).Seconds()) }(time.Now()) ctx = context.WithValue(ctx, TxnStartKey, s.version.Ver) diff --git a/store/tikv/txn.go b/store/tikv/txn.go index e4e8bcf20d7b7..78eccbbb2529b 100644 --- a/store/tikv/txn.go +++ b/store/tikv/txn.go @@ -41,14 +41,6 @@ var ( _ kv.Transaction = (*tikvTxn)(nil) ) -var ( - tikvTxnCmdHistogramWithCommit = metrics.TiKVTxnCmdHistogram.WithLabelValues(metrics.LblCommit) - tikvTxnCmdHistogramWithRollback = metrics.TiKVTxnCmdHistogram.WithLabelValues(metrics.LblRollback) - tikvTxnCmdHistogramWithBatchGet = metrics.TiKVTxnCmdHistogram.WithLabelValues(metrics.LblBatchGet) - tikvTxnCmdHistogramWithGet = metrics.TiKVTxnCmdHistogram.WithLabelValues(metrics.LblGet) - tikvTxnCmdHistogramWithLockKeys = metrics.TiKVTxnCmdHistogram.WithLabelValues(metrics.LblLockKeys) -) - // SchemaAmender is used by pessimistic transactions to amend commit mutations for schema change during 2pc. type SchemaAmender interface { // AmendTxn is the amend entry, new mutations will be generated based on input mutations using schema change info. @@ -222,7 +214,7 @@ func (txn *tikvTxn) Commit(ctx context.Context) error { }) start := time.Now() - defer func() { tikvTxnCmdHistogramWithCommit.Observe(time.Since(start).Seconds()) }() + defer func() { metrics.TxnCmdHistogramWithCommit.Observe(time.Since(start).Seconds()) }() // sessionID is used for log. var sessionID uint64 @@ -323,7 +315,7 @@ func (txn *tikvTxn) Rollback() error { } txn.close() logutil.BgLogger().Debug("[kv] rollback txn", zap.Uint64("txnStartTS", txn.StartTS())) - tikvTxnCmdHistogramWithRollback.Observe(time.Since(start).Seconds()) + metrics.TxnCmdHistogramWithRollback.Observe(time.Since(start).Seconds()) return nil } @@ -368,7 +360,7 @@ func (txn *tikvTxn) LockKeys(ctx context.Context, lockCtx *kv.LockCtx, keysInput txn.mu.Lock() defer txn.mu.Unlock() defer func() { - tikvTxnCmdHistogramWithLockKeys.Observe(time.Since(startTime).Seconds()) + metrics.TxnCmdHistogramWithLockKeys.Observe(time.Since(startTime).Seconds()) if err == nil { if lockCtx.PessimisticLockWaited != nil { if atomic.LoadInt32(lockCtx.PessimisticLockWaited) > 0 {