From b47381b926e5061dc0876c0b96bfbd3b4c285d7c Mon Sep 17 00:00:00 2001 From: djshow832 <873581766@qq.com> Date: Fri, 20 Dec 2019 22:43:11 +0800 Subject: [PATCH] infoschema: add 3 fields to statement summary table (#14096) (#14168) --- executor/adapter.go | 20 +++- infoschema/perfschema/const.go | 79 +++------------- infoschema/perfschema/tables_test.go | 8 +- session/tidb.go | 1 + sessionctx/variable/session.go | 15 +++ util/stmtsummary/statement_summary.go | 68 +++++++++++--- util/stmtsummary/statement_summary_test.go | 101 +++++++++++++++++++-- 7 files changed, 201 insertions(+), 91 deletions(-) diff --git a/executor/adapter.go b/executor/adapter.go index b4ff8d4cfe85c..5f15d68587608 100644 --- a/executor/adapter.go +++ b/executor/adapter.go @@ -135,12 +135,13 @@ func (a *recordSet) NewChunk() *chunk.Chunk { func (a *recordSet) Close() error { err := a.executor.Close() + // `LowSlowQuery` and `SummaryStmt` must be called before recording `PrevStmt`. a.stmt.LogSlowQuery(a.txnStartTS, a.lastErr == nil, false) + a.stmt.SummaryStmt() sessVars := a.stmt.Ctx.GetSessionVars() pps := types.CloneRow(sessVars.PreparedParams) sessVars.PrevStmt = FormatSQL(a.stmt.OriginText(), pps) a.stmt.logAudit() - a.stmt.SummaryStmt() return err } @@ -769,13 +770,26 @@ func getPlanTree(p plannercore.Plan) string { // SummaryStmt collects statements for performance_schema.events_statements_summary_by_digest func (a *ExecStmt) SummaryStmt() { sessVars := a.Ctx.GetSessionVars() - if sessVars.InRestrictedSQL || !stmtsummary.StmtSummaryByDigestMap.Enabled() { + // Internal SQLs must also be recorded to keep the consistency of `PrevStmt` and `PrevStmtDigest`. + if !stmtsummary.StmtSummaryByDigestMap.Enabled() { + sessVars.SetPrevStmtDigest("") return } stmtCtx := sessVars.StmtCtx normalizedSQL, digest := stmtCtx.SQLDigest() costTime := time.Since(sessVars.StartTime) + var prevSQL, prevSQLDigest string + if _, ok := a.StmtNode.(*ast.CommitStmt); ok { + // If prevSQLDigest is not recorded, it means this `commit` is the first SQL once stmt summary is enabled, + // so it's OK just to ignore it. + if prevSQLDigest = sessVars.GetPrevStmtDigest(); len(prevSQLDigest) == 0 { + return + } + prevSQL = sessVars.PrevStmt.String() + } + sessVars.SetPrevStmtDigest(digest) + execDetail := stmtCtx.GetExecDetails() copTaskInfo := stmtCtx.CopTasksDetails() memMax := stmtCtx.MemTracker.MaxConsumed() @@ -789,6 +803,8 @@ func (a *ExecStmt) SummaryStmt() { OriginalSQL: a.Text, NormalizedSQL: normalizedSQL, Digest: digest, + PrevSQL: prevSQL, + PrevSQLDigest: prevSQLDigest, User: userString, TotalLatency: costTime, ParseLatency: sessVars.DurationParse, diff --git a/infoschema/perfschema/const.go b/infoschema/perfschema/const.go index 5174fd15a2927..e369f2f5327fc 100644 --- a/infoschema/perfschema/const.go +++ b/infoschema/perfschema/const.go @@ -380,10 +380,10 @@ const tableStagesHistoryLong = "CREATE TABLE if not exists performance_schema.ev "NESTING_EVENT_ID BIGINT(20) UNSIGNED," + "NESTING_EVENT_TYPE ENUM('TRANSACTION','STATEMENT','STAGE'));" -// tableEventsStatementsSummaryByDigest contains the column name definitions for table -// events_statements_summary_by_digest, same as MySQL. -const tableEventsStatementsSummaryByDigest = "CREATE TABLE if not exists events_statements_summary_by_digest (" + +// Fields in `events_statements_summary_by_digest` and `events_statements_summary_by_digest_history` are the same. +const fieldsInEventsStatementsSummary = " (" + "SUMMARY_BEGIN_TIME TIMESTAMP(6) NOT NULL," + + "SUMMARY_END_TIME TIMESTAMP(6) NOT NULL," + "STMT_TYPE VARCHAR(64) NOT NULL," + "SCHEMA_NAME VARCHAR(64) DEFAULT NULL," + "DIGEST VARCHAR(64) NOT NULL," + @@ -437,78 +437,25 @@ const tableEventsStatementsSummaryByDigest = "CREATE TABLE if not exists events_ "MAX_PREWRITE_REGIONS INT(11) UNSIGNED NOT NULL," + "AVG_TXN_RETRY DOUBLE NOT NULL," + "MAX_TXN_RETRY INT(11) UNSIGNED NOT NULL," + + "SUM_BACKOFF_TIMES BIGINT(20) UNSIGNED NOT NULL," + "BACKOFF_TYPES VARCHAR(1024) DEFAULT NULL," + "AVG_MEM BIGINT(20) UNSIGNED NOT NULL," + "MAX_MEM BIGINT(20) UNSIGNED NOT NULL," + "AVG_AFFECTED_ROWS DOUBLE UNSIGNED NOT NULL," + "FIRST_SEEN TIMESTAMP(6) NOT NULL," + "LAST_SEEN TIMESTAMP(6) NOT NULL," + - "QUERY_SAMPLE_TEXT LONGTEXT DEFAULT NULL);" + "QUERY_SAMPLE_TEXT LONGTEXT DEFAULT NULL," + + "PREV_SAMPLE_TEXT LONGTEXT DEFAULT NULL);" + +// tableEventsStatementsSummaryByDigest contains the column name definitions for table +// events_statements_summary_by_digest, same as MySQL. +const tableEventsStatementsSummaryByDigest = "CREATE TABLE if not exists " + tableNameEventsStatementsSummaryByDigest + + fieldsInEventsStatementsSummary // tableEventsStatementsSummaryByDigestHistory contains the column name definitions for table // events_statements_summary_by_digest_history. -const tableEventsStatementsSummaryByDigestHistory = "CREATE TABLE if not exists events_statements_summary_by_digest_history (" + - "SUMMARY_BEGIN_TIME TIMESTAMP(6) NOT NULL," + - "STMT_TYPE VARCHAR(64) NOT NULL," + - "SCHEMA_NAME VARCHAR(64) DEFAULT NULL," + - "DIGEST VARCHAR(64) NOT NULL," + - "DIGEST_TEXT LONGTEXT NOT NULL," + - "TABLE_NAMES TEXT DEFAULT NULL," + - "INDEX_NAMES TEXT DEFAULT NULL," + - "SAMPLE_USER VARCHAR(64) DEFAULT NULL," + - "EXEC_COUNT BIGINT(20) UNSIGNED NOT NULL," + - "SUM_LATENCY BIGINT(20) UNSIGNED NOT NULL," + - "MAX_LATENCY BIGINT(20) UNSIGNED NOT NULL," + - "MIN_LATENCY BIGINT(20) UNSIGNED NOT NULL," + - "AVG_LATENCY BIGINT(20) UNSIGNED NOT NULL," + - "AVG_PARSE_LATENCY BIGINT(20) UNSIGNED NOT NULL," + - "MAX_PARSE_LATENCY BIGINT(20) UNSIGNED NOT NULL," + - "AVG_COMPILE_LATENCY BIGINT(20) UNSIGNED NOT NULL," + - "MAX_COMPILE_LATENCY BIGINT(20) UNSIGNED NOT NULL," + - "COP_TASK_NUM BIGINT(20) UNSIGNED NOT NULL," + - "AVG_COP_PROCESS_TIME BIGINT(20) UNSIGNED NOT NULL," + - "MAX_COP_PROCESS_TIME BIGINT(20) UNSIGNED NOT NULL," + - "MAX_COP_PROCESS_ADDRESS VARCHAR(256) DEFAULT NULL," + - "AVG_COP_WAIT_TIME BIGINT(20) UNSIGNED NOT NULL," + - "MAX_COP_WAIT_TIME BIGINT(20) UNSIGNED NOT NULL," + - "MAX_COP_WAIT_ADDRESS VARCHAR(256) DEFAULT NULL," + - "AVG_PROCESS_TIME BIGINT(20) UNSIGNED NOT NULL," + - "MAX_PROCESS_TIME BIGINT(20) UNSIGNED NOT NULL," + - "AVG_WAIT_TIME BIGINT(20) UNSIGNED NOT NULL," + - "MAX_WAIT_TIME BIGINT(20) UNSIGNED NOT NULL," + - "AVG_BACKOFF_TIME BIGINT(20) UNSIGNED NOT NULL," + - "MAX_BACKOFF_TIME BIGINT(20) UNSIGNED NOT NULL," + - "AVG_TOTAL_KEYS BIGINT(20) UNSIGNED NOT NULL," + - "MAX_TOTAL_KEYS BIGINT(20) UNSIGNED NOT NULL," + - "AVG_PROCESSED_KEYS BIGINT(20) UNSIGNED NOT NULL," + - "MAX_PROCESSED_KEYS BIGINT(20) UNSIGNED NOT NULL," + - "AVG_PREWRITE_TIME BIGINT(20) UNSIGNED NOT NULL," + - "MAX_PREWRITE_TIME BIGINT(20) UNSIGNED NOT NULL," + - "AVG_COMMIT_TIME BIGINT(20) UNSIGNED NOT NULL," + - "MAX_COMMIT_TIME BIGINT(20) UNSIGNED NOT NULL," + - "AVG_GET_COMMIT_TS_TIME BIGINT(20) UNSIGNED NOT NULL," + - "MAX_GET_COMMIT_TS_TIME BIGINT(20) UNSIGNED NOT NULL," + - "AVG_COMMIT_BACKOFF_TIME BIGINT(20) UNSIGNED NOT NULL," + - "MAX_COMMIT_BACKOFF_TIME BIGINT(20) UNSIGNED NOT NULL," + - "AVG_RESOLVE_LOCK_TIME BIGINT(20) UNSIGNED NOT NULL," + - "MAX_RESOLVE_LOCK_TIME BIGINT(20) UNSIGNED NOT NULL," + - "AVG_LOCAL_LATCH_WAIT_TIME BIGINT(20) UNSIGNED NOT NULL," + - "MAX_LOCAL_LATCH_WAIT_TIME BIGINT(20) UNSIGNED NOT NULL," + - "AVG_WRITE_KEYS DOUBLE UNSIGNED NOT NULL," + - "MAX_WRITE_KEYS BIGINT(20) UNSIGNED NOT NULL," + - "AVG_WRITE_SIZE DOUBLE NOT NULL," + - "MAX_WRITE_SIZE BIGINT(20) UNSIGNED NOT NULL," + - "AVG_PREWRITE_REGIONS DOUBLE NOT NULL," + - "MAX_PREWRITE_REGIONS INT(11) UNSIGNED NOT NULL," + - "AVG_TXN_RETRY DOUBLE NOT NULL," + - "MAX_TXN_RETRY INT(11) UNSIGNED NOT NULL," + - "BACKOFF_TYPES VARCHAR(1024) DEFAULT NULL," + - "AVG_MEM BIGINT(20) UNSIGNED NOT NULL," + - "MAX_MEM BIGINT(20) UNSIGNED NOT NULL," + - "AVG_AFFECTED_ROWS DOUBLE UNSIGNED NOT NULL," + - "FIRST_SEEN TIMESTAMP(6) NOT NULL," + - "LAST_SEEN TIMESTAMP(6) NOT NULL," + - "QUERY_SAMPLE_TEXT LONGTEXT DEFAULT NULL);" +const tableEventsStatementsSummaryByDigestHistory = "CREATE TABLE if not exists " + tableNameEventsStatementsSummaryByDigestHistory + + fieldsInEventsStatementsSummary // tableTiDBProfileCPU contains the columns name definitions for table tidb_profile_cpu const tableTiDBProfileCPU = "CREATE TABLE IF NOT EXISTS " + tableNameTiDBProfileCPU + " (" + diff --git a/infoschema/perfschema/tables_test.go b/infoschema/perfschema/tables_test.go index 69aad67bdb7c1..d9ab077a08e5b 100644 --- a/infoschema/perfschema/tables_test.go +++ b/infoschema/perfschema/tables_test.go @@ -142,16 +142,16 @@ func (s *testTableSuite) TestStmtSummaryTable(c *C) { tk.MustExec("commit") tk.MustQuery(`select stmt_type, schema_name, table_names, index_names, exec_count, cop_task_num, avg_total_keys, max_total_keys, avg_processed_keys, max_processed_keys, avg_write_keys, max_write_keys, avg_prewrite_regions, - max_prewrite_regions, avg_affected_rows, query_sample_text + max_prewrite_regions, avg_affected_rows, query_sample_text, prev_sample_text from performance_schema.events_statements_summary_by_digest where digest_text like 'insert into t%'`, - ).Check(testkit.Rows("insert test test.t 1 0 0 0 0 0 0 0 0 0 1 insert into t values(1, 'a')")) + ).Check(testkit.Rows("insert test test.t 1 0 0 0 0 0 0 0 0 0 1 insert into t values(1, 'a') ")) tk.MustQuery(`select stmt_type, schema_name, table_names, index_names, exec_count, cop_task_num, avg_total_keys, max_total_keys, avg_processed_keys, max_processed_keys, avg_write_keys, max_write_keys, avg_prewrite_regions, - max_prewrite_regions, avg_affected_rows, query_sample_text + max_prewrite_regions, avg_affected_rows, query_sample_text, prev_sample_text from performance_schema.events_statements_summary_by_digest where digest_text='commit'`, - ).Check(testkit.Rows("commit test 1 0 0 0 0 0 2 2 1 1 0 commit")) + ).Check(testkit.Rows("commit test 1 0 0 0 0 0 2 2 1 1 0 commit insert into t values(1, 'a')")) tk.MustQuery("select * from t where a=2") tk.MustQuery(`select stmt_type, schema_name, table_names, index_names, exec_count, cop_task_num, avg_total_keys, diff --git a/session/tidb.go b/session/tidb.go index 4685b51a71bb9..c3f6bbb12f83f 100644 --- a/session/tidb.go +++ b/session/tidb.go @@ -236,6 +236,7 @@ func runStmt(ctx context.Context, sctx sessionctx.Context, s sqlexec.Statement) // If it is not a select statement, we record its slow log here, // then it could include the transaction commit time. if rs == nil { + // `LowSlowQuery` and `SummaryStmt` must be called before recording `PrevStmt`. s.(*executor.ExecStmt).LogSlowQuery(origTxnCtx.StartTS, err == nil, false) s.(*executor.ExecStmt).SummaryStmt() pps := types.CloneRow(sessVars.PreparedParams) diff --git a/sessionctx/variable/session.go b/sessionctx/variable/session.go index ce3f8fa8881be..ae74536f6e208 100644 --- a/sessionctx/variable/session.go +++ b/sessionctx/variable/session.go @@ -428,6 +428,9 @@ type SessionVars struct { // PrevStmt is used to store the previous executed statement in the current session. PrevStmt fmt.Stringer + // prevStmtDigest is used to store the digest of the previous statement in the current session. + prevStmtDigest string + // AllowRemoveAutoInc indicates whether a user can drop the auto_increment column attribute or not. AllowRemoveAutoInc bool @@ -890,6 +893,18 @@ func (s *SessionVars) setTxnMode(val string) error { return nil } +// SetPrevStmtDigest sets the digest of the previous statement. +func (s *SessionVars) SetPrevStmtDigest(prevStmtDigest string) { + s.prevStmtDigest = prevStmtDigest +} + +// GetPrevStmtDigest returns the digest of the previous statement. +func (s *SessionVars) GetPrevStmtDigest() string { + // Because `prevStmt` may be truncated, so it's senseless to normalize it. + // Even if `prevStmtDigest` is empty but `prevStmt` is not, just return it anyway. + return s.prevStmtDigest +} + // SetLocalSystemVar sets values of the local variables which in "server" scope. func SetLocalSystemVar(name string, val string) { switch name { diff --git a/util/stmtsummary/statement_summary.go b/util/stmtsummary/statement_summary.go index 96428278fa331..35813a1f1635b 100644 --- a/util/stmtsummary/statement_summary.go +++ b/util/stmtsummary/statement_summary.go @@ -41,16 +41,21 @@ type stmtSummaryByDigestKey struct { // Same statements may appear in different schema, but they refer to different tables. schemaName string digest string + // The digest of the previous statement. + prevDigest string // `hash` is the hash value of this object. hash []byte } // Hash implements SimpleLRUCache.Key. +// Only when current SQL is `commit` do we record `prevSQL`. Otherwise, `prevSQL` is empty. +// `prevSQL` is included in the key To distinguish different transactions. func (key *stmtSummaryByDigestKey) Hash() []byte { if len(key.hash) == 0 { - key.hash = make([]byte, 0, len(key.schemaName)+len(key.digest)) + key.hash = make([]byte, 0, len(key.schemaName)+len(key.digest)+len(key.prevDigest)) key.hash = append(key.hash, hack.Slice(key.digest)...) key.hash = append(key.hash, hack.Slice(key.schemaName)...) + key.hash = append(key.hash, hack.Slice(key.prevDigest)...) } return key.hash } @@ -109,10 +114,12 @@ type stmtSummaryByDigest struct { // stmtSummaryByDigestElement is the summary for each type of statements in current interval. type stmtSummaryByDigestElement struct { sync.Mutex - // Each summary is summarized between [beginTime, beginTime + refreshInterval]. + // Each summary is summarized between [beginTime, endTime). beginTime int64 + endTime int64 // basic sampleSQL string + prevSQL string sampleUser string indexNames []string execCount int64 @@ -165,6 +172,7 @@ type stmtSummaryByDigestElement struct { maxPrewriteRegionNum int32 sumTxnRetry int64 maxTxnRetry int + sumBackoffTimes int64 backoffTypes map[fmt.Stringer]int // other sumMem int64 @@ -182,6 +190,8 @@ type StmtExecInfo struct { OriginalSQL string NormalizedSQL string Digest string + PrevSQL string + PrevSQLDigest string User string TotalLatency time.Duration ParseLatency time.Duration @@ -220,6 +230,7 @@ func (ssMap *stmtSummaryByDigestMap) AddStatement(sei *StmtExecInfo) { key := &stmtSummaryByDigestKey{ schemaName: sei.SchemaName, digest: sei.Digest, + prevDigest: sei.PrevSQLDigest, } // Enclose the block in a function to ensure the lock will always be released. @@ -241,7 +252,7 @@ func (ssMap *stmtSummaryByDigestMap) AddStatement(sei *StmtExecInfo) { beginTime := ssMap.beginTimeForCurInterval value, ok := ssMap.summaryMap.Get(key) if !ok { - newSummary := newStmtSummaryByDigest(sei, beginTime, historySize) + newSummary := newStmtSummaryByDigest(sei, beginTime, intervalSeconds, historySize) ssMap.summaryMap.Put(key, newSummary) } return value, beginTime, ok @@ -249,7 +260,7 @@ func (ssMap *stmtSummaryByDigestMap) AddStatement(sei *StmtExecInfo) { // Lock a single entry, not the whole cache. if ok { - value.(*stmtSummaryByDigest).add(sei, beginTime, historySize) + value.(*stmtSummaryByDigest).add(sei, beginTime, intervalSeconds, historySize) } } @@ -464,7 +475,7 @@ func (ssMap *stmtSummaryByDigestMap) historySize() int { } // newStmtSummaryByDigest creates a stmtSummaryByDigest from StmtExecInfo. -func newStmtSummaryByDigest(sei *StmtExecInfo, beginTime int64, historySize int) *stmtSummaryByDigest { +func newStmtSummaryByDigest(sei *StmtExecInfo, beginTime int64, intervalSeconds int64, historySize int) *stmtSummaryByDigest { // Trim SQL to size MaxSQLLength. maxSQLLength := config.GetGlobalConfig().StmtSummary.MaxSQLLength normalizedSQL := sei.NormalizedSQL @@ -493,11 +504,11 @@ func newStmtSummaryByDigest(sei *StmtExecInfo, beginTime int64, historySize int) tableNames: tableNames, history: list.New(), } - ssbd.add(sei, beginTime, historySize) + ssbd.add(sei, beginTime, intervalSeconds, historySize) return ssbd } -func (ssbd *stmtSummaryByDigest) add(sei *StmtExecInfo, beginTime int64, historySize int) { +func (ssbd *stmtSummaryByDigest) add(sei *StmtExecInfo, beginTime int64, intervalSeconds int64, historySize int) { // Enclose this block in a function to ensure the lock will always be released. ssElement, isElementNew := func() (*stmtSummaryByDigestElement, bool) { ssbd.Lock() @@ -510,11 +521,14 @@ func (ssbd *stmtSummaryByDigest) add(sei *StmtExecInfo, beginTime int64, history if lastElement.beginTime >= beginTime { ssElement = lastElement isElementNew = false + } else { + // The last elements expires to the history. + lastElement.onExpire(intervalSeconds) } } if isElementNew { // If the element is new created, `ssElement.add(sei)` should be done inside the lock of `ssbd`. - ssElement = newStmtSummaryByDigestElement(sei, beginTime) + ssElement = newStmtSummaryByDigestElement(sei, beginTime, intervalSeconds) ssbd.history.PushBack(ssElement) } @@ -529,7 +543,7 @@ func (ssbd *stmtSummaryByDigest) add(sei *StmtExecInfo, beginTime int64, history // Lock a single entry, not the whole `ssbd`. if !isElementNew { - ssElement.add(sei) + ssElement.add(sei, intervalSeconds) } } @@ -574,7 +588,7 @@ func (ssbd *stmtSummaryByDigest) collectHistorySummaries(historySize int) []*stm return ssElements } -func newStmtSummaryByDigestElement(sei *StmtExecInfo, beginTime int64) *stmtSummaryByDigestElement { +func newStmtSummaryByDigestElement(sei *StmtExecInfo, beginTime int64, intervalSeconds int64) *stmtSummaryByDigestElement { ssElement := &stmtSummaryByDigestElement{ beginTime: beginTime, minLatency: sei.TotalLatency, @@ -582,25 +596,50 @@ func newStmtSummaryByDigestElement(sei *StmtExecInfo, beginTime int64) *stmtSumm lastSeen: sei.StartTime, backoffTypes: make(map[fmt.Stringer]int, 0), } - ssElement.add(sei) + ssElement.add(sei, intervalSeconds) return ssElement } -func (ssElement *stmtSummaryByDigestElement) add(sei *StmtExecInfo) { +// onExpire is called when this element expires to history. +func (ssElement *stmtSummaryByDigestElement) onExpire(intervalSeconds int64) { + ssElement.Lock() + defer ssElement.Unlock() + + // refreshInterval may change anytime, so we need to update endTime. + if ssElement.beginTime+intervalSeconds > ssElement.endTime { + // // If interval changes to a bigger value, update endTime to beginTime + interval. + ssElement.endTime = ssElement.beginTime + intervalSeconds + } else if ssElement.beginTime+intervalSeconds < ssElement.endTime { + now := time.Now().Unix() + // If interval changes to a smaller value and now > beginTime + interval, update endTime to current time. + if now > ssElement.beginTime+intervalSeconds { + ssElement.endTime = now + } + } +} + +func (ssElement *stmtSummaryByDigestElement) add(sei *StmtExecInfo, intervalSeconds int64) { maxSQLLength := config.GetGlobalConfig().StmtSummary.MaxSQLLength sampleSQL := sei.OriginalSQL if len(sampleSQL) > int(maxSQLLength) { // Make sure the memory of original `sampleSQL` will be released. sampleSQL = string([]byte(sampleSQL[:maxSQLLength])) } + prevSQL := sei.PrevSQL + if len(prevSQL) > int(maxSQLLength) { + prevSQL = string([]byte(prevSQL[:maxSQLLength])) + } ssElement.Lock() defer ssElement.Unlock() + // refreshInterval may change anytime, update endTime ASAP. + ssElement.endTime = ssElement.beginTime + intervalSeconds if sei.User != "" { ssElement.sampleUser = sei.User } ssElement.sampleSQL = sampleSQL + ssElement.prevSQL = prevSQL ssElement.indexNames = sei.StmtCtx.IndexNames ssElement.execCount++ @@ -704,6 +743,7 @@ func (ssElement *stmtSummaryByDigestElement) add(sei *StmtExecInfo) { ssElement.maxTxnRetry = commitDetails.TxnRetry } commitDetails.Mu.Lock() + ssElement.sumBackoffTimes += int64(len(commitDetails.Mu.BackoffTypes)) for _, backoffType := range commitDetails.Mu.BackoffTypes { ssElement.backoffTypes[backoffType] += 1 } @@ -728,8 +768,10 @@ func (ssElement *stmtSummaryByDigestElement) toDatum(ssbd *stmtSummaryByDigest) ssElement.Lock() defer ssElement.Unlock() + // Actually, there's a small chance that endTime is out of date, but it's hard to keep it up to date all the time. return types.MakeDatums( types.Time{Time: types.FromGoTime(time.Unix(ssElement.beginTime, 0)), Type: mysql.TypeTimestamp}, + types.Time{Time: types.FromGoTime(time.Unix(ssElement.endTime, 0)), Type: mysql.TypeTimestamp}, ssbd.stmtType, ssbd.schemaName, ssbd.digest, @@ -783,6 +825,7 @@ func (ssElement *stmtSummaryByDigestElement) toDatum(ssbd *stmtSummaryByDigest) int(ssElement.maxPrewriteRegionNum), avgFloat(ssElement.sumTxnRetry, ssElement.commitCount), ssElement.maxTxnRetry, + ssElement.sumBackoffTimes, formatBackoffTypes(ssElement.backoffTypes), avgInt(ssElement.sumMem, ssElement.execCount), ssElement.maxMem, @@ -790,6 +833,7 @@ func (ssElement *stmtSummaryByDigestElement) toDatum(ssbd *stmtSummaryByDigest) types.Time{Time: types.FromGoTime(ssElement.firstSeen), Type: mysql.TypeTimestamp}, types.Time{Time: types.FromGoTime(ssElement.lastSeen), Type: mysql.TypeTimestamp}, ssElement.sampleSQL, + ssElement.prevSQL, ) } diff --git a/util/stmtsummary/statement_summary_test.go b/util/stmtsummary/statement_summary_test.go index 800a7413b04f9..bcc026731cfa3 100644 --- a/util/stmtsummary/statement_summary_test.go +++ b/util/stmtsummary/statement_summary_test.go @@ -39,8 +39,8 @@ type testStmtSummarySuite struct { func (s *testStmtSummarySuite) SetUpSuite(c *C) { s.ssMap = newStmtSummaryByDigestMap() s.ssMap.SetEnabled("1", false) - s.ssMap.SetRefreshInterval("999999999", false) - s.ssMap.SetHistorySize("1", false) + s.ssMap.SetRefreshInterval("1800", false) + s.ssMap.SetHistorySize("24", false) } func TestT(t *testing.T) { @@ -66,6 +66,7 @@ func (s *testStmtSummarySuite) TestAddStatement(c *C) { } expectedSummaryElement := stmtSummaryByDigestElement{ beginTime: now + 60, + endTime: now + 1860, sampleSQL: stmtExecInfo1.OriginalSQL, indexNames: stmtExecInfo1.StmtCtx.IndexNames, sampleUser: stmtExecInfo1.User, @@ -241,6 +242,7 @@ func (s *testStmtSummarySuite) TestAddStatement(c *C) { expectedSummaryElement.maxPrewriteRegionNum = stmtExecInfo2.ExecDetail.CommitDetail.PrewriteRegionNum expectedSummaryElement.sumTxnRetry += int64(stmtExecInfo2.ExecDetail.CommitDetail.TxnRetry) expectedSummaryElement.maxTxnRetry = stmtExecInfo2.ExecDetail.CommitDetail.TxnRetry + expectedSummaryElement.sumBackoffTimes += 1 expectedSummaryElement.backoffTypes[tikv.BoTxnLock] = 1 expectedSummaryElement.sumMem += stmtExecInfo2.MemMax expectedSummaryElement.maxMem = stmtExecInfo2.MemMax @@ -335,6 +337,7 @@ func (s *testStmtSummarySuite) TestAddStatement(c *C) { expectedSummaryElement.sumWriteSize += int64(stmtExecInfo3.ExecDetail.CommitDetail.WriteSize) expectedSummaryElement.sumPrewriteRegionNum += int64(stmtExecInfo3.ExecDetail.CommitDetail.PrewriteRegionNum) expectedSummaryElement.sumTxnRetry += int64(stmtExecInfo3.ExecDetail.CommitDetail.TxnRetry) + expectedSummaryElement.sumBackoffTimes += 1 expectedSummaryElement.backoffTypes[tikv.BoTxnLock] = 2 expectedSummaryElement.sumMem += stmtExecInfo3.MemMax expectedSummaryElement.sumAffectedRows += stmtExecInfo3.StmtCtx.AffectedRows() @@ -391,7 +394,9 @@ func matchStmtSummaryByDigest(first, second *stmtSummaryByDigest) bool { ssElement1 := ele1.Value.(*stmtSummaryByDigestElement) ssElement2 := ele2.Value.(*stmtSummaryByDigestElement) if ssElement1.beginTime != ssElement2.beginTime || + ssElement1.endTime != ssElement2.endTime || ssElement1.sampleSQL != ssElement2.sampleSQL || + ssElement1.prevSQL != ssElement2.prevSQL || ssElement1.sampleUser != ssElement2.sampleUser || ssElement1.execCount != ssElement2.execCount || ssElement1.sumLatency != ssElement2.sumLatency || @@ -438,6 +443,7 @@ func matchStmtSummaryByDigest(first, second *stmtSummaryByDigest) bool { ssElement1.maxPrewriteRegionNum != ssElement2.maxPrewriteRegionNum || ssElement1.sumTxnRetry != ssElement2.sumTxnRetry || ssElement1.maxTxnRetry != ssElement2.maxTxnRetry || + ssElement1.sumBackoffTimes != ssElement2.sumBackoffTimes || ssElement1.sumMem != ssElement2.sumMem || ssElement1.maxMem != ssElement2.maxMem || ssElement1.sumAffectedRows != ssElement2.sumAffectedRows || @@ -551,8 +557,9 @@ func (s *testStmtSummarySuite) TestToDatum(c *C) { datums := s.ssMap.ToCurrentDatum() c.Assert(len(datums), Equals, 1) n := types.Time{Time: types.FromGoTime(time.Unix(s.ssMap.beginTimeForCurInterval, 0)), Type: mysql.TypeTimestamp} + e := types.Time{Time: types.FromGoTime(time.Unix(s.ssMap.beginTimeForCurInterval+1800, 0)), Type: mysql.TypeTimestamp} t := types.Time{Time: types.FromGoTime(stmtExecInfo1.StartTime), Type: mysql.TypeTimestamp} - expectedDatum := []interface{}{n, "select", stmtExecInfo1.SchemaName, stmtExecInfo1.Digest, stmtExecInfo1.NormalizedSQL, + expectedDatum := []interface{}{n, e, "select", stmtExecInfo1.SchemaName, stmtExecInfo1.Digest, stmtExecInfo1.NormalizedSQL, "db1.tb1,db2.tb2", "a", stmtExecInfo1.User, 1, int64(stmtExecInfo1.TotalLatency), int64(stmtExecInfo1.TotalLatency), int64(stmtExecInfo1.TotalLatency), int64(stmtExecInfo1.TotalLatency), int64(stmtExecInfo1.ParseLatency), int64(stmtExecInfo1.ParseLatency), int64(stmtExecInfo1.CompileLatency), @@ -572,9 +579,9 @@ func (s *testStmtSummarySuite) TestToDatum(c *C) { stmtExecInfo1.ExecDetail.CommitDetail.WriteKeys, stmtExecInfo1.ExecDetail.CommitDetail.WriteKeys, stmtExecInfo1.ExecDetail.CommitDetail.WriteSize, stmtExecInfo1.ExecDetail.CommitDetail.WriteSize, stmtExecInfo1.ExecDetail.CommitDetail.PrewriteRegionNum, stmtExecInfo1.ExecDetail.CommitDetail.PrewriteRegionNum, - stmtExecInfo1.ExecDetail.CommitDetail.TxnRetry, stmtExecInfo1.ExecDetail.CommitDetail.TxnRetry, + stmtExecInfo1.ExecDetail.CommitDetail.TxnRetry, stmtExecInfo1.ExecDetail.CommitDetail.TxnRetry, 1, "txnLock:1", stmtExecInfo1.MemMax, stmtExecInfo1.MemMax, stmtExecInfo1.StmtCtx.AffectedRows(), - t, t, stmtExecInfo1.OriginalSQL} + t, t, stmtExecInfo1.OriginalSQL, stmtExecInfo1.PrevSQL} match(c, datums[0], expectedDatum...) datums = s.ssMap.ToHistoryDatum() @@ -665,11 +672,14 @@ func (s *testStmtSummarySuite) TestMaxSQLLength(c *C) { stmtExecInfo1 := generateAnyExecInfo() stmtExecInfo1.OriginalSQL = str stmtExecInfo1.NormalizedSQL = str + stmtExecInfo1.PrevSQLDigest = "prevSQLDigest" + stmtExecInfo1.PrevSQL = str s.ssMap.AddStatement(stmtExecInfo1) key := &stmtSummaryByDigestKey{ schemaName: stmtExecInfo1.SchemaName, digest: stmtExecInfo1.Digest, + prevDigest: stmtExecInfo1.PrevSQLDigest, } value, ok := s.ssMap.summaryMap.Get(key) c.Assert(ok, IsTrue) @@ -678,6 +688,7 @@ func (s *testStmtSummarySuite) TestMaxSQLLength(c *C) { c.Assert(len(summary.normalizedSQL), Equals, int(maxSQLLength)) ssElement := summary.history.Back().Value.(*stmtSummaryByDigestElement) c.Assert(len(ssElement.sampleSQL), Equals, int(maxSQLLength)) + c.Assert(len(ssElement.prevSQL), Equals, int(maxSQLLength)) } // Test setting EnableStmtSummary to 0. @@ -808,8 +819,6 @@ func (s *testStmtSummarySuite) TestFormatBackoffTypes(c *C) { func (s *testStmtSummarySuite) TestRefreshCurrentSummary(c *C) { s.ssMap.Clear() now := time.Now().Unix() - s.ssMap.SetRefreshInterval("1800", false) - s.ssMap.SetHistorySize("10", false) s.ssMap.beginTimeForCurInterval = now + 10 stmtExecInfo1 := generateAnyExecInfo() @@ -849,6 +858,8 @@ func (s *testStmtSummarySuite) TestSummaryHistory(c *C) { now := time.Now().Unix() s.ssMap.SetRefreshInterval("10", false) s.ssMap.SetHistorySize("10", false) + defer s.ssMap.SetRefreshInterval("1800", false) + defer s.ssMap.SetHistorySize("24", false) stmtExecInfo1 := generateAnyExecInfo() key := &stmtSummaryByDigestKey{ @@ -882,3 +893,79 @@ func (s *testStmtSummarySuite) TestSummaryHistory(c *C) { datum = s.ssMap.ToHistoryDatum() c.Assert(len(datum), Equals, 5) } + +// Test summary when PrevSQL is not empty. +func (s *testStmtSummarySuite) TestPrevSQL(c *C) { + s.ssMap.Clear() + now := time.Now().Unix() + // to disable expiration + s.ssMap.beginTimeForCurInterval = now + 60 + + stmtExecInfo1 := generateAnyExecInfo() + stmtExecInfo1.PrevSQL = "prevSQL" + stmtExecInfo1.PrevSQLDigest = "prevSQLDigest" + s.ssMap.AddStatement(stmtExecInfo1) + key := &stmtSummaryByDigestKey{ + schemaName: stmtExecInfo1.SchemaName, + digest: stmtExecInfo1.Digest, + prevDigest: stmtExecInfo1.PrevSQLDigest, + } + c.Assert(s.ssMap.summaryMap.Size(), Equals, 1) + _, ok := s.ssMap.summaryMap.Get(key) + c.Assert(ok, IsTrue) + + // same prevSQL + s.ssMap.AddStatement(stmtExecInfo1) + c.Assert(s.ssMap.summaryMap.Size(), Equals, 1) + + // different prevSQL + stmtExecInfo2 := stmtExecInfo1 + stmtExecInfo2.PrevSQL = "prevSQL1" + stmtExecInfo2.PrevSQLDigest = "prevSQLDigest1" + key.prevDigest = stmtExecInfo2.PrevSQLDigest + s.ssMap.AddStatement(stmtExecInfo2) + c.Assert(s.ssMap.summaryMap.Size(), Equals, 2) + _, ok = s.ssMap.summaryMap.Get(key) + c.Assert(ok, IsTrue) +} + +func (s *testStmtSummarySuite) TestEndTime(c *C) { + s.ssMap.Clear() + now := time.Now().Unix() + s.ssMap.beginTimeForCurInterval = now - 100 + + stmtExecInfo1 := generateAnyExecInfo() + s.ssMap.AddStatement(stmtExecInfo1) + key := &stmtSummaryByDigestKey{ + schemaName: stmtExecInfo1.SchemaName, + digest: stmtExecInfo1.Digest, + } + c.Assert(s.ssMap.summaryMap.Size(), Equals, 1) + value, ok := s.ssMap.summaryMap.Get(key) + c.Assert(ok, IsTrue) + ssbd := value.(*stmtSummaryByDigest) + ssElement := ssbd.history.Back().Value.(*stmtSummaryByDigestElement) + c.Assert(ssElement.beginTime, Equals, now-100) + c.Assert(ssElement.endTime, Equals, now+1700) + + s.ssMap.SetRefreshInterval("3600", false) + defer s.ssMap.SetRefreshInterval("1800", false) + s.ssMap.AddStatement(stmtExecInfo1) + c.Assert(ssbd.history.Len(), Equals, 1) + ssElement = ssbd.history.Back().Value.(*stmtSummaryByDigestElement) + c.Assert(ssElement.beginTime, Equals, now-100) + c.Assert(ssElement.endTime, Equals, now+3500) + + s.ssMap.SetRefreshInterval("60", false) + s.ssMap.AddStatement(stmtExecInfo1) + c.Assert(ssbd.history.Len(), Equals, 2) + now2 := time.Now().Unix() + ssElement = ssbd.history.Front().Value.(*stmtSummaryByDigestElement) + c.Assert(ssElement.beginTime, Equals, now-100) + c.Assert(ssElement.endTime, GreaterEqual, now) + c.Assert(ssElement.endTime, LessEqual, now2) + ssElement = ssbd.history.Back().Value.(*stmtSummaryByDigestElement) + c.Assert(ssElement.beginTime, GreaterEqual, now-60) + c.Assert(ssElement.beginTime, LessEqual, now2) + c.Assert(ssElement.endTime-ssElement.beginTime, Equals, int64(60)) +}