Skip to content

Commit

Permalink
Merge branch 'master' into fix-issue-27313
Browse files Browse the repository at this point in the history
  • Loading branch information
xuyifangreeneyes authored Nov 26, 2021
2 parents 88ec94e + a9f161d commit 401703c
Show file tree
Hide file tree
Showing 45 changed files with 1,704 additions and 571 deletions.
12 changes: 12 additions & 0 deletions cmd/explaintest/r/new_character_set_builtin.result
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,12 @@ hex(convert('1' using gbk)) convert('1' using gbk)
select hex(convert('ㅂ' using gbk)), convert('ㅂ' using gbk);
hex(convert('ㅂ' using gbk)) convert('ㅂ' using gbk)
3F ?
select hex(convert(0xe240 using gbk)), convert(0xe240 using gbk);
hex(convert(0xe240 using gbk)) convert(0xe240 using gbk)
E240 釦
select hex(convert(0x1e240 using gbk)), convert(0x1e240 using gbk);
hex(convert(0x1e240 using gbk)) convert(0x1e240 using gbk)
01E240 釦
select convert(a using binary), convert(convert(a using gbk) using binary) from t;
convert(a using binary) convert(convert(a using gbk) using binary)
中文 ����
Expand Down Expand Up @@ -117,6 +123,12 @@ hex(convert('1' using gbk)) convert('1' using gbk)
select hex(convert('ㅂ' using gbk)), convert('ㅂ' using gbk);
hex(convert('ㅂ' using gbk)) convert('ㅂ' using gbk)
3F ?
select hex(convert(0xe240 using gbk)), convert(0xe240 using gbk);
hex(convert(0xe240 using gbk)) convert(0xe240 using gbk)
E240 釦
select hex(convert(0x1e240 using gbk)), convert(0x1e240 using gbk);
hex(convert(0x1e240 using gbk)) convert(0x1e240 using gbk)
01E240 釦
select convert(a using binary) from t;
convert(a using binary)
中文
Expand Down
4 changes: 4 additions & 0 deletions cmd/explaintest/t/new_character_set_builtin.test
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,8 @@ select hex(convert('啊' using gbk)), convert('啊' using gbk);
select hex(convert('a' using gbk)), convert('a' using gbk);
select hex(convert('1' using gbk)), convert('1' using gbk);
select hex(convert('ㅂ' using gbk)), convert('ㅂ' using gbk);
select hex(convert(0xe240 using gbk)), convert(0xe240 using gbk);
select hex(convert(0x1e240 using gbk)), convert(0x1e240 using gbk);
select convert(a using binary), convert(convert(a using gbk) using binary) from t;
select convert(convert('中文' using gbk) using binary), convert('中文' using binary);
select convert(convert('ㅂ' using gbk) using binary), convert('ㅂ' using binary);
Expand All @@ -54,6 +56,8 @@ select hex(convert('啊' using gbk)), convert('啊' using gbk);
select hex(convert('a' using gbk)), convert('a' using gbk);
select hex(convert('1' using gbk)), convert('1' using gbk);
select hex(convert('ㅂ' using gbk)), convert('ㅂ' using gbk);
select hex(convert(0xe240 using gbk)), convert(0xe240 using gbk);
select hex(convert(0x1e240 using gbk)), convert(0x1e240 using gbk);
select convert(a using binary) from t;
select convert(convert('中文' using gbk) using binary), convert('中文' using binary);
select convert(convert('ㅂ' using gbk) using binary), convert('ㅂ' using binary);
Expand Down
41 changes: 21 additions & 20 deletions config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -434,11 +434,12 @@ type Performance struct {
DistinctAggPushDown bool `toml:"distinct-agg-push-down" json:"distinct-agg-push-down"`
CommitterConcurrency int `toml:"committer-concurrency" json:"committer-concurrency"`
MaxTxnTTL uint64 `toml:"max-txn-ttl" json:"max-txn-ttl"`
MemProfileInterval string `toml:"mem-profile-interval" json:"mem-profile-interval"`
IndexUsageSyncLease string `toml:"index-usage-sync-lease" json:"index-usage-sync-lease"`
PlanReplayerGCLease string `toml:"plan-replayer-gc-lease" json:"plan-replayer-gc-lease"`
GOGC int `toml:"gogc" json:"gogc"`
EnforceMPP bool `toml:"enforce-mpp" json:"enforce-mpp"`
// Deprecated
MemProfileInterval string `toml:"-" json:"-"`
IndexUsageSyncLease string `toml:"index-usage-sync-lease" json:"index-usage-sync-lease"`
PlanReplayerGCLease string `toml:"plan-replayer-gc-lease" json:"plan-replayer-gc-lease"`
GOGC int `toml:"gogc" json:"gogc"`
EnforceMPP bool `toml:"enforce-mpp" json:"enforce-mpp"`
}

// PlanCache is the PlanCache section of the config.
Expand Down Expand Up @@ -647,7 +648,6 @@ var defaultConf = Config{
DistinctAggPushDown: false,
CommitterConcurrency: defTiKVCfg.CommitterConcurrency,
MaxTxnTTL: defTiKVCfg.MaxTxnTTL, // 1hour
MemProfileInterval: "1m",
// TODO: set indexUsageSyncLease to 60s.
IndexUsageSyncLease: "0s",
GOGC: 100,
Expand Down Expand Up @@ -738,20 +738,21 @@ func StoreGlobalConfig(config *Config) {
}

var deprecatedConfig = map[string]struct{}{
"pessimistic-txn.ttl": {},
"pessimistic-txn.enable": {},
"log.file.log-rotate": {},
"log.log-slow-query": {},
"txn-local-latches": {},
"txn-local-latches.enabled": {},
"txn-local-latches.capacity": {},
"performance.max-memory": {},
"max-txn-time-use": {},
"experimental.allow-auto-random": {},
"enable-redact-log": {}, // use variable tidb_redact_log instead
"tikv-client.copr-cache.enable": {},
"alter-primary-key": {}, // use NONCLUSTERED keyword instead
"enable-streaming": {},
"pessimistic-txn.ttl": {},
"pessimistic-txn.enable": {},
"log.file.log-rotate": {},
"log.log-slow-query": {},
"txn-local-latches": {},
"txn-local-latches.enabled": {},
"txn-local-latches.capacity": {},
"performance.max-memory": {},
"max-txn-time-use": {},
"experimental.allow-auto-random": {},
"enable-redact-log": {}, // use variable tidb_redact_log instead
"tikv-client.copr-cache.enable": {},
"alter-primary-key": {}, // use NONCLUSTERED keyword instead
"enable-streaming": {},
"performance.mem-profile-interval": {},
}

func isAllDeprecatedConfigItems(items []string) bool {
Expand Down
3 changes: 0 additions & 3 deletions config/config.toml.example
Original file line number Diff line number Diff line change
Expand Up @@ -305,9 +305,6 @@ committer-concurrency = 128
# max lifetime of transaction ttl manager.
max-txn-ttl = 3600000

# the interval duration between two memory profile into global tracker
mem-profile-interval = "1m"

# The Go GC trigger factor, you can get more information about it at https://golang.org/pkg/runtime.
# If you encounter OOM when executing large query, you can decrease this value to trigger GC earlier.
# If you find the CPU used by GC is too high or GC is too frequent and impact your business you can increase this value.
Expand Down
9 changes: 8 additions & 1 deletion config/config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -295,9 +295,16 @@ receiver-address = "127.0.0.1:10100"
require.Equal(t, "127.0.0.1:10100", conf.TopSQL.ReceiverAddress)
require.True(t, conf.Experimental.AllowsExpressionIndex)

err = f.Truncate(0)
require.NoError(t, err)
_, err = f.Seek(0, 0)
require.NoError(t, err)
require.NoError(t, f.Sync())
_, err = f.WriteString(`
[log.file]
log-rotate = true`)
log-rotate = true
[performance]
mem-profile-interval="1m"`)
require.NoError(t, err)
err = conf.Load(configFile)
tmp := err.(*ErrConfigValidationFailed)
Expand Down
11 changes: 6 additions & 5 deletions executor/builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -975,12 +975,13 @@ func (b *executorBuilder) buildDDL(v *plannercore.DDL) Executor {
// at build().
func (b *executorBuilder) buildTrace(v *plannercore.Trace) Executor {
t := &TraceExec{
baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()),
stmtNode: v.StmtNode,
builder: b,
format: v.Format,
baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()),
stmtNode: v.StmtNode,
builder: b,
format: v.Format,
optimizerTrace: v.OptimizerTrace,
}
if t.format == plannercore.TraceFormatLog {
if t.format == plannercore.TraceFormatLog && !t.optimizerTrace {
return &SortExec{
baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID(), t),
ByItems: []*plannerutil.ByItems{
Expand Down
1 change: 0 additions & 1 deletion executor/executor.go
Original file line number Diff line number Diff line change
Expand Up @@ -1689,7 +1689,6 @@ func ResetContextOfStmt(ctx sessionctx.Context, s ast.StmtNode) (err error) {
sc.CTEStorageMap = map[int]*CTEStorages{}
sc.IsStaleness = false
sc.LockTableIDs = make(map[int64]struct{})
sc.EnableOptimizeTrace = false
sc.LogicalOptimizeTrace = nil

sc.InitMemTracker(memory.LabelForSQLText, vars.MemQuotaQuery)
Expand Down
5 changes: 0 additions & 5 deletions executor/prepared_serial_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -706,11 +706,6 @@ func TestPlanCacheOperators(t *testing.T) {

// execute this statement and check whether it uses a cached plan
results := tk.MustQuery("execute stmt " + usingStmt).Sort().Rows()
useCache := "0"
if execCase.UseCache {
useCache = "1"
}
tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows(useCache))

// check whether the result is correct
tmp := strings.Split(prepCase.PrepStmt, "?")
Expand Down
78 changes: 78 additions & 0 deletions executor/trace.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,16 @@
package executor

import (
"archive/zip"
"context"
"crypto/rand"
"encoding/base64"
"encoding/json"
"fmt"
"os"
"path/filepath"
"sort"
"strconv"
"time"

"github.com/opentracing/basictracer-go"
Expand Down Expand Up @@ -51,6 +57,8 @@ type TraceExec struct {

builder *executorBuilder
format string
// optimizerTrace indicates 'trace plan statement'
optimizerTrace bool
}

// Next executes real query and collects span later.
Expand All @@ -71,6 +79,10 @@ func (e *TraceExec) Next(ctx context.Context, req *chunk.Chunk) error {
e.ctx.GetSessionVars().StmtCtx = stmtCtx
}()

if e.optimizerTrace {
return e.nextOptimizerPlanTrace(ctx, e.ctx, req)
}

switch e.format {
case core.TraceFormatLog:
return e.nextTraceLog(ctx, se, req)
Expand All @@ -79,6 +91,40 @@ func (e *TraceExec) Next(ctx context.Context, req *chunk.Chunk) error {
}
}

func (e *TraceExec) nextOptimizerPlanTrace(ctx context.Context, se sessionctx.Context, req *chunk.Chunk) error {
zf, fileName, err := generateOptimizerTraceFile()
if err != nil {
return err
}
zw := zip.NewWriter(zf)
defer func() {
err := zw.Close()
if err != nil {
logutil.BgLogger().Warn("Closing zip writer failed", zap.Error(err))
}
err = zf.Close()
if err != nil {
logutil.BgLogger().Warn("Closing zip file failed", zap.Error(err))
}
}()
traceZW, err := zw.Create("trace.json")
if err != nil {
return errors.AddStack(err)
}
e.executeChild(ctx, se.(sqlexec.SQLExecutor))
res, err := json.Marshal(se.GetSessionVars().StmtCtx.LogicalOptimizeTrace)
if err != nil {
return errors.AddStack(err)
}
_, err = traceZW.Write(res)
if err != nil {
return errors.AddStack(err)
}
req.AppendString(0, fileName)
e.exhausted = true
return nil
}

func (e *TraceExec) nextTraceLog(ctx context.Context, se sqlexec.SQLExecutor, req *chunk.Chunk) error {
recorder := basictracer.NewInMemoryRecorder()
tracer := basictracer.New(recorder)
Expand Down Expand Up @@ -142,8 +188,11 @@ func (e *TraceExec) executeChild(ctx context.Context, se sqlexec.SQLExecutor) {
vars := e.ctx.GetSessionVars()
origin := vars.InRestrictedSQL
vars.InRestrictedSQL = true
originOptimizeTrace := vars.EnableStmtOptimizeTrace
vars.EnableStmtOptimizeTrace = e.optimizerTrace
defer func() {
vars.InRestrictedSQL = origin
vars.EnableStmtOptimizeTrace = originOptimizeTrace
}()
rs, err := se.ExecuteStmt(ctx, e.stmtNode)
if err != nil {
Expand Down Expand Up @@ -252,3 +301,32 @@ func generateLogResult(allSpans []basictracer.RawSpan, chk *chunk.Chunk) {
}
}
}

func generateOptimizerTraceFile() (*os.File, string, error) {
dirPath := getOptimizerTraceDirName()
// Create path
err := os.MkdirAll(dirPath, os.ModePerm)
if err != nil {
return nil, "", errors.AddStack(err)
}
// Generate key and create zip file
time := time.Now().UnixNano()
b := make([]byte, 16)
_, err = rand.Read(b)
if err != nil {
return nil, "", errors.AddStack(err)
}
key := base64.URLEncoding.EncodeToString(b)
fileName := fmt.Sprintf("optimizer_trace_%v_%v.zip", key, time)
zf, err := os.Create(filepath.Join(dirPath, fileName))
if err != nil {
return nil, "", errors.AddStack(err)
}
return zf, fileName, nil
}

// getOptimizerTraceDirName returns optimizer trace directory path.
// The path is related to the process id.
func getOptimizerTraceDirName() string {
return filepath.Join(os.TempDir(), "optimizer_trace", strconv.Itoa(os.Getpid()))
}
10 changes: 10 additions & 0 deletions executor/trace_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,3 +66,13 @@ func rowsOrdered(rows [][]interface{}) bool {
}
return true
}

func (s *testSuite1) TestTracePlanStmt(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("create table tp123(id int);")
rows := tk.MustQuery("trace plan select * from tp123").Rows()
c.Assert(rows, HasLen, 1)
c.Assert(rows[0], HasLen, 1)
c.Assert(rows[0][0].(string), Matches, ".*zip")
}
5 changes: 4 additions & 1 deletion expression/builtin_compare.go
Original file line number Diff line number Diff line change
Expand Up @@ -1461,11 +1461,14 @@ func (c *compareFunctionClass) refineArgs(ctx sessionctx.Context, args []Express
// To keep the result be compatible with MySQL, refine `int non-constant <cmp> str constant`
// here and skip this refine operation in all other cases for safety.
if (arg0IsInt && !arg0IsCon && arg1IsString && arg1IsCon) || (arg1IsInt && !arg1IsCon && arg0IsString && arg0IsCon) {
ctx.GetSessionVars().StmtCtx.MaybeOverOptimized4PlanCache = true
ctx.GetSessionVars().StmtCtx.SkipPlanCache = true
RemoveMutableConst(ctx, args)
} else {
return args
}
} else if ctx.GetSessionVars().StmtCtx.SkipPlanCache {
// We should remove the mutable constant for correctness, because its value may be changed.
RemoveMutableConst(ctx, args)
}
// int non-constant [cmp] non-int constant
if arg0IsInt && !arg0IsCon && !arg1IsInt && arg1IsCon {
Expand Down
Loading

0 comments on commit 401703c

Please sign in to comment.