diff --git a/docs/design/2021-04-18-common-table-expression.md b/docs/design/2021-04-18-common-table-expression.md index b61e4bb5a918f..02cc9ab30401e 100644 --- a/docs/design/2021-04-18-common-table-expression.md +++ b/docs/design/2021-04-18-common-table-expression.md @@ -128,7 +128,7 @@ type LogicalCTETable struct { ```Go type CTEExec struct { - baseExecutor + exec.BaseExecutor seedExec Executor recursiveExec Executor @@ -150,7 +150,7 @@ type CTEExec struct { ```Go type CTETableReaderExec struct { - baseExecutor + exec.BaseExecutor iterInTbl cteutil.Storage chkIdx int diff --git a/executor/BUILD.bazel b/executor/BUILD.bazel index 1b1844c944d53..398810aa45451 100644 --- a/executor/BUILD.bazel +++ b/executor/BUILD.bazel @@ -121,6 +121,7 @@ go_library( "//executor/asyncloaddata", "//executor/importer", "//executor/internal/builder", + "//executor/internal/exec", "//executor/internal/mpp", "//executor/internal/util", "//executor/metrics", @@ -381,6 +382,7 @@ go_test( "//executor/aggfuncs", "//executor/importer", "//executor/internal/builder", + "//executor/internal/exec", "//expression", "//expression/aggregation", "//infoschema", diff --git a/executor/adapter.go b/executor/adapter.go index a85d3c3e4fb73..12ca012561fb2 100644 --- a/executor/adapter.go +++ b/executor/adapter.go @@ -32,6 +32,7 @@ import ( "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/ddl/placement" "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/executor/internal/exec" executor_metrics "github.com/pingcap/tidb/executor/metrics" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/infoschema" @@ -86,7 +87,7 @@ type processinfoSetter interface { // recordSet wraps an executor, implements sqlexec.RecordSet interface type recordSet struct { fields []*ast.ResultField - executor Executor + executor exec.Executor stmt *ExecStmt lastErr error txnStartTS uint64 @@ -172,8 +173,8 @@ func (a *recordSet) NewChunk(alloc chunk.Allocator) *chunk.Chunk { return newFirstChunk(a.executor) } - base := a.executor.base() - return alloc.Alloc(base.retFieldTypes, base.initCap, base.maxChunkSize) + base := a.executor.Base() + return alloc.Alloc(base.RetFieldTypes(), base.InitCap(), base.MaxChunkSize()) } func (a *recordSet) Close() error { @@ -601,7 +602,7 @@ func (a *ExecStmt) getSQLForProcessInfo() string { return sql } -func (a *ExecStmt) handleStmtForeignKeyTrigger(ctx context.Context, e Executor) error { +func (a *ExecStmt) handleStmtForeignKeyTrigger(ctx context.Context, e exec.Executor) error { stmtCtx := a.Ctx.GetSessionVars().StmtCtx if stmtCtx.ForeignKeyTriggerCtx.HasFKCascades { // If the ExecStmt has foreign key cascade to be executed, we need call `StmtCommit` to commit the ExecStmt itself @@ -626,7 +627,7 @@ func (a *ExecStmt) handleStmtForeignKeyTrigger(ctx context.Context, e Executor) var maxForeignKeyCascadeDepth = 15 -func (a *ExecStmt) handleForeignKeyTrigger(ctx context.Context, e Executor, depth int) error { +func (a *ExecStmt) handleForeignKeyTrigger(ctx context.Context, e exec.Executor, depth int) error { exec, ok := e.(WithForeignKeyTrigger) if !ok { return nil @@ -709,7 +710,7 @@ func (a *ExecStmt) handleForeignKeyCascade(ctx context.Context, fkc *FKCascadeEx // prepareFKCascadeContext records a transaction savepoint for foreign key cascade when this ExecStmt has foreign key // cascade behaviour and this ExecStmt is in transaction. -func (a *ExecStmt) prepareFKCascadeContext(e Executor) { +func (a *ExecStmt) prepareFKCascadeContext(e exec.Executor) { exec, ok := e.(WithForeignKeyTrigger) if !ok || !exec.HasFKCascades() { return @@ -753,7 +754,7 @@ func (a *ExecStmt) handleFKTriggerError(sc *stmtctx.StatementContext) error { return nil } -func (a *ExecStmt) handleNoDelay(ctx context.Context, e Executor, isPessimistic bool) (handled bool, rs sqlexec.RecordSet, err error) { +func (a *ExecStmt) handleNoDelay(ctx context.Context, e exec.Executor, isPessimistic bool) (handled bool, rs sqlexec.RecordSet, err error) { sc := a.Ctx.GetSessionVars().StmtCtx defer func() { // If the stmt have no rs like `insert`, The session tracker detachment will be directly @@ -832,7 +833,7 @@ type chunkRowRecordSet struct { rows []chunk.Row idx int fields []*ast.ResultField - e Executor + e exec.Executor execStmt *ExecStmt } @@ -858,15 +859,15 @@ func (c *chunkRowRecordSet) NewChunk(alloc chunk.Allocator) *chunk.Chunk { return newFirstChunk(c.e) } - base := c.e.base() - return alloc.Alloc(base.retFieldTypes, base.initCap, base.maxChunkSize) + base := c.e.Base() + return alloc.Alloc(base.RetFieldTypes(), base.InitCap(), base.MaxChunkSize()) } func (c *chunkRowRecordSet) Close() error { return c.execStmt.CloseRecordSet(c.execStmt.Ctx.GetSessionVars().TxnCtx.StartTS, nil) } -func (a *ExecStmt) handlePessimisticSelectForUpdate(ctx context.Context, e Executor) (_ sqlexec.RecordSet, retErr error) { +func (a *ExecStmt) handlePessimisticSelectForUpdate(ctx context.Context, e exec.Executor) (_ sqlexec.RecordSet, retErr error) { if snapshotTS := a.Ctx.GetSessionVars().SnapshotTS; snapshotTS != 0 { terror.Log(e.Close()) return nil, errors.New("can not execute write statement when 'tidb_snapshot' is set") @@ -910,7 +911,7 @@ func (a *ExecStmt) handlePessimisticSelectForUpdate(ctx context.Context, e Execu } } -func (a *ExecStmt) runPessimisticSelectForUpdate(ctx context.Context, e Executor) (sqlexec.RecordSet, error) { +func (a *ExecStmt) runPessimisticSelectForUpdate(ctx context.Context, e exec.Executor) (sqlexec.RecordSet, error) { defer func() { terror.Log(e.Close()) }() @@ -935,7 +936,7 @@ func (a *ExecStmt) runPessimisticSelectForUpdate(ctx context.Context, e Executor return nil, err } -func (a *ExecStmt) handleNoDelayExecutor(ctx context.Context, e Executor) (sqlexec.RecordSet, error) { +func (a *ExecStmt) handleNoDelayExecutor(ctx context.Context, e exec.Executor) (sqlexec.RecordSet, error) { sctx := a.Ctx r, ctx := tracing.StartRegionEx(ctx, "executor.handleNoDelayExecutor") defer r.End() @@ -968,7 +969,7 @@ func (a *ExecStmt) handleNoDelayExecutor(ctx context.Context, e Executor) (sqlex return nil, err } -func (a *ExecStmt) handlePessimisticDML(ctx context.Context, e Executor) (err error) { +func (a *ExecStmt) handlePessimisticDML(ctx context.Context, e exec.Executor) (err error) { sctx := a.Ctx // Do not activate the transaction here. // When autocommit = 0 and transaction in pessimistic mode, @@ -1082,7 +1083,7 @@ func (a *ExecStmt) handlePessimisticDML(ctx context.Context, e Executor) (err er } // handlePessimisticLockError updates TS and rebuild executor if the err is write conflict. -func (a *ExecStmt) handlePessimisticLockError(ctx context.Context, lockErr error) (_ Executor, err error) { +func (a *ExecStmt) handlePessimisticLockError(ctx context.Context, lockErr error) (_ exec.Executor, err error) { if lockErr == nil { return nil, nil } @@ -1157,7 +1158,7 @@ type pessimisticTxn interface { } // buildExecutor build an executor from plan, prepared statement may need additional procedure. -func (a *ExecStmt) buildExecutor() (Executor, error) { +func (a *ExecStmt) buildExecutor() (exec.Executor, error) { defer func(start time.Time) { a.phaseBuildDurations[0] += time.Since(start) }(time.Now()) ctx := a.Ctx stmtCtx := ctx.GetSessionVars().StmtCtx @@ -1201,7 +1202,7 @@ func (a *ExecStmt) buildExecutor() (Executor, error) { return e, nil } -func (a *ExecStmt) openExecutor(ctx context.Context, e Executor) (err error) { +func (a *ExecStmt) openExecutor(ctx context.Context, e exec.Executor) (err error) { defer func() { if r := recover(); r != nil { err = errors.New(fmt.Sprint(r)) @@ -1213,7 +1214,7 @@ func (a *ExecStmt) openExecutor(ctx context.Context, e Executor) (err error) { return err } -func (a *ExecStmt) next(ctx context.Context, e Executor, req *chunk.Chunk) error { +func (a *ExecStmt) next(ctx context.Context, e exec.Executor, req *chunk.Chunk) error { start := time.Now() err := Next(ctx, e, req) a.phaseNextDurations[0] += time.Since(start) diff --git a/executor/admin.go b/executor/admin.go index 3b4561ae00b7d..9818d42d5777c 100644 --- a/executor/admin.go +++ b/executor/admin.go @@ -21,6 +21,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/kvrpcpb" "github.com/pingcap/tidb/distsql" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" @@ -44,14 +45,14 @@ import ( ) var ( - _ Executor = &CheckIndexRangeExec{} - _ Executor = &RecoverIndexExec{} - _ Executor = &CleanupIndexExec{} + _ exec.Executor = &CheckIndexRangeExec{} + _ exec.Executor = &RecoverIndexExec{} + _ exec.Executor = &CleanupIndexExec{} ) // CheckIndexRangeExec outputs the index values which has handle between begin and end. type CheckIndexRangeExec struct { - baseExecutor + exec.BaseExecutor table *model.TableInfo index *model.IndexInfo @@ -68,7 +69,7 @@ type CheckIndexRangeExec struct { // Next implements the Executor Next interface. func (e *CheckIndexRangeExec) Next(ctx context.Context, req *chunk.Chunk) error { req.Reset() - handleIdx := e.schema.Len() - 1 + handleIdx := e.GetSchema().Len() - 1 for { err := e.result.Next(ctx, e.srcChunk) if err != nil { @@ -105,7 +106,7 @@ func (e *CheckIndexRangeExec) Open(ctx context.Context) error { e.cols = append(e.cols, col) } - colTypeForHandle := e.schema.Columns[len(e.cols)].RetType + colTypeForHandle := e.GetSchema().Columns[len(e.cols)].RetType e.cols = append(e.cols, &model.ColumnInfo{ ID: model.ExtraHandleID, Name: model.ExtraHandleName, @@ -117,8 +118,8 @@ func (e *CheckIndexRangeExec) Open(ctx context.Context) error { if err != nil { return err } - sc := e.ctx.GetSessionVars().StmtCtx - txn, err := e.ctx.Txn(true) + sc := e.Ctx().GetSessionVars().StmtCtx + txn, err := e.Ctx().Txn(true) if err != nil { return nil } @@ -127,15 +128,15 @@ func (e *CheckIndexRangeExec) Open(ctx context.Context) error { SetDAGRequest(dagPB). SetStartTS(txn.StartTS()). SetKeepOrder(true). - SetFromSessionVars(e.ctx.GetSessionVars()). - SetFromInfoSchema(e.ctx.GetInfoSchema()). - SetConnID(e.ctx.GetSessionVars().ConnectionID). + SetFromSessionVars(e.Ctx().GetSessionVars()). + SetFromInfoSchema(e.Ctx().GetInfoSchema()). + SetConnID(e.Ctx().GetSessionVars().ConnectionID). Build() if err != nil { return err } - e.result, err = distsql.Select(ctx, e.ctx, kvReq, e.retFieldTypes, statistics.NewQueryFeedback(0, nil, 0, false)) + e.result, err = distsql.Select(ctx, e.Ctx(), kvReq, e.RetFieldTypes(), statistics.NewQueryFeedback(0, nil, 0, false)) if err != nil { return err } @@ -144,20 +145,20 @@ func (e *CheckIndexRangeExec) Open(ctx context.Context) error { func (e *CheckIndexRangeExec) buildDAGPB() (*tipb.DAGRequest, error) { dagReq := &tipb.DAGRequest{} - dagReq.TimeZoneName, dagReq.TimeZoneOffset = timeutil.Zone(e.ctx.GetSessionVars().Location()) - sc := e.ctx.GetSessionVars().StmtCtx + dagReq.TimeZoneName, dagReq.TimeZoneOffset = timeutil.Zone(e.Ctx().GetSessionVars().Location()) + sc := e.Ctx().GetSessionVars().StmtCtx dagReq.Flags = sc.PushDownFlags() - for i := range e.schema.Columns { + for i := range e.Schema().Columns { dagReq.OutputOffsets = append(dagReq.OutputOffsets, uint32(i)) } execPB := e.constructIndexScanPB() dagReq.Executors = append(dagReq.Executors, execPB) - err := tables.SetPBColumnsDefaultValue(e.ctx, dagReq.Executors[0].IdxScan.Columns, e.cols) + err := tables.SetPBColumnsDefaultValue(e.Ctx(), dagReq.Executors[0].IdxScan.Columns, e.cols) if err != nil { return nil, err } - distsql.SetEncodeType(e.ctx, dagReq) + distsql.SetEncodeType(e.Ctx(), dagReq) return dagReq, nil } @@ -179,7 +180,7 @@ func (e *CheckIndexRangeExec) Close() error { // It is built from "admin recover index" statement, is used to backfill // corrupted index. type RecoverIndexExec struct { - baseExecutor + exec.BaseExecutor done bool @@ -217,11 +218,11 @@ func (e *RecoverIndexExec) columnsTypes() []*types.FieldType { // Open implements the Executor Open interface. func (e *RecoverIndexExec) Open(ctx context.Context) error { - if err := e.baseExecutor.Open(ctx); err != nil { + if err := e.BaseExecutor.Open(ctx); err != nil { return err } - e.srcChunk = chunk.New(e.columnsTypes(), e.initCap, e.maxChunkSize) + e.srcChunk = chunk.New(e.columnsTypes(), e.InitCap(), e.MaxChunkSize()) e.batchSize = 2048 e.recoverRows = make([]recoverRows, 0, e.batchSize) e.idxValsBufs = make([][]types.Datum, e.batchSize) @@ -232,7 +233,7 @@ func (e *RecoverIndexExec) Open(ctx context.Context) error { func (e *RecoverIndexExec) constructTableScanPB(tblInfo *model.TableInfo, colInfos []*model.ColumnInfo) (*tipb.Executor, error) { tblScan := tables.BuildTableScanFromInfos(tblInfo, colInfos) tblScan.TableId = e.physicalID - err := tables.SetPBColumnsDefaultValue(e.ctx, tblScan.Columns, colInfos) + err := tables.SetPBColumnsDefaultValue(e.Ctx(), tblScan.Columns, colInfos) return &tipb.Executor{Tp: tipb.ExecType_TypeTableScan, TblScan: tblScan}, err } @@ -245,8 +246,8 @@ func (e *RecoverIndexExec) constructLimitPB(count uint64) *tipb.Executor { func (e *RecoverIndexExec) buildDAGPB(txn kv.Transaction, limitCnt uint64) (*tipb.DAGRequest, error) { dagReq := &tipb.DAGRequest{} - dagReq.TimeZoneName, dagReq.TimeZoneOffset = timeutil.Zone(e.ctx.GetSessionVars().Location()) - sc := e.ctx.GetSessionVars().StmtCtx + dagReq.TimeZoneName, dagReq.TimeZoneOffset = timeutil.Zone(e.Ctx().GetSessionVars().Location()) + sc := e.Ctx().GetSessionVars().StmtCtx dagReq.Flags = sc.PushDownFlags() for i := range e.columns { dagReq.OutputOffsets = append(dagReq.OutputOffsets, uint32(i)) @@ -260,7 +261,7 @@ func (e *RecoverIndexExec) buildDAGPB(txn kv.Transaction, limitCnt uint64) (*tip limitExec := e.constructLimitPB(limitCnt) dagReq.Executors = append(dagReq.Executors, limitExec) - distsql.SetEncodeType(e.ctx, dagReq) + distsql.SetEncodeType(e.Ctx(), dagReq) return dagReq, nil } @@ -270,7 +271,7 @@ func (e *RecoverIndexExec) buildTableScan(ctx context.Context, txn kv.Transactio return nil, err } var builder distsql.RequestBuilder - keyRanges, err := buildRecoverIndexKeyRanges(e.ctx.GetSessionVars().StmtCtx, e.physicalID, startHandle) + keyRanges, err := buildRecoverIndexKeyRanges(e.Ctx().GetSessionVars().StmtCtx, e.physicalID, startHandle) if err != nil { return nil, err } @@ -279,9 +280,9 @@ func (e *RecoverIndexExec) buildTableScan(ctx context.Context, txn kv.Transactio SetDAGRequest(dagPB). SetStartTS(txn.StartTS()). SetKeepOrder(true). - SetFromSessionVars(e.ctx.GetSessionVars()). - SetFromInfoSchema(e.ctx.GetInfoSchema()). - SetConnID(e.ctx.GetSessionVars().ConnectionID). + SetFromSessionVars(e.Ctx().GetSessionVars()). + SetFromInfoSchema(e.Ctx().GetInfoSchema()). + SetConnID(e.Ctx().GetSessionVars().ConnectionID). Build() if err != nil { return nil, err @@ -290,7 +291,7 @@ func (e *RecoverIndexExec) buildTableScan(ctx context.Context, txn kv.Transactio // Actually, with limitCnt, the match datas maybe only in one region, so let the concurrency to be 1, // avoid unnecessary region scan. kvReq.Concurrency = 1 - result, err := distsql.Select(ctx, e.ctx, kvReq, e.columnsTypes(), statistics.NewQueryFeedback(0, nil, 0, false)) + result, err := distsql.Select(ctx, e.Ctx(), kvReq, e.columnsTypes(), statistics.NewQueryFeedback(0, nil, 0, false)) if err != nil { return nil, err } @@ -325,8 +326,8 @@ func (e *RecoverIndexExec) backfillIndex(ctx context.Context) (int64, int64, err ) for { ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnAdmin) - errInTxn := kv.RunInNewTxn(ctx, e.ctx.GetStore(), true, func(ctx context.Context, txn kv.Transaction) error { - setOptionForTopSQL(e.ctx.GetSessionVars().StmtCtx, txn) + errInTxn := kv.RunInNewTxn(ctx, e.Ctx().GetStore(), true, func(ctx context.Context, txn kv.Transaction) error { + setOptionForTopSQL(e.Ctx().GetSessionVars().StmtCtx, txn) var err error result, err = e.backfillIndexInTxn(ctx, txn, currentHandle) return err @@ -406,7 +407,7 @@ func (e *RecoverIndexExec) buildIndexedValues(row chunk.Row, idxVals []types.Dat } if e.cols == nil { - columns, _, err := expression.ColumnInfos2ColumnsAndNames(e.ctx, model.NewCIStr("mock"), e.table.Meta().Name, e.table.Meta().Columns, e.table.Meta()) + columns, _, err := expression.ColumnInfos2ColumnsAndNames(e.Ctx(), model.NewCIStr("mock"), e.table.Meta().Name, e.table.Meta().Columns, e.table.Meta()) if err != nil { return nil, err } @@ -439,7 +440,7 @@ func (e *RecoverIndexExec) batchMarkDup(txn kv.Transaction, rows []recoverRows) return nil } e.batchKeys = e.batchKeys[:0] - sc := e.ctx.GetSessionVars().StmtCtx + sc := e.Ctx().GetSessionVars().StmtCtx distinctFlags := make([]bool, 0, len(rows)) rowIdx := make([]int, 0, len(rows)) cnt := 0 @@ -515,7 +516,7 @@ func (e *RecoverIndexExec) backfillIndexInTxn(ctx context.Context, txn kv.Transa } // Constrains is already checked. - e.ctx.GetSessionVars().StmtCtx.BatchCheck = true + e.Ctx().GetSessionVars().StmtCtx.BatchCheck = true for _, row := range rows { if row.skip { continue @@ -527,7 +528,7 @@ func (e *RecoverIndexExec) backfillIndexInTxn(ctx context.Context, txn kv.Transa return result, err } - _, err = e.index.Create(e.ctx, txn, row.idxVals, row.handle, row.rsData, table.WithIgnoreAssertion) + _, err = e.index.Create(e.Ctx(), txn, row.idxVals, row.handle, row.rsData, table.WithIgnoreAssertion) if err != nil { return result, err } @@ -582,7 +583,7 @@ func (e *RecoverIndexExec) Next(ctx context.Context, req *chunk.Chunk) error { // It is built from "admin cleanup index" statement, is used to delete // dangling index data. type CleanupIndexExec struct { - baseExecutor + exec.BaseExecutor done bool removeCnt uint64 @@ -639,7 +640,7 @@ func (e *CleanupIndexExec) deleteDanglingIdx(txn kv.Transaction, values map[stri return errors.Trace(errors.Errorf("batch keys are inconsistent with handles")) } for _, handleIdxVals := range handleIdxValsGroup.([][]types.Datum) { - if err := e.index.Delete(e.ctx.GetSessionVars().StmtCtx, txn, handleIdxVals, handle); err != nil { + if err := e.index.Delete(e.Ctx().GetSessionVars().StmtCtx, txn, handleIdxVals, handle); err != nil { return err } e.removeCnt++ @@ -675,7 +676,7 @@ func (e *CleanupIndexExec) fetchIndex(ctx context.Context, txn kv.Transaction) e } defer terror.Call(result.Close) - sc := e.ctx.GetSessionVars().StmtCtx + sc := e.Ctx().GetSessionVars().StmtCtx idxColLen := len(e.index.Meta().Columns) for { err := result.Next(ctx, e.idxChunk) @@ -756,9 +757,9 @@ func (e *CleanupIndexExec) Next(ctx context.Context, req *chunk.Chunk) error { func (e *CleanupIndexExec) cleanTableIndex(ctx context.Context) error { for { ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnAdmin) - errInTxn := kv.RunInNewTxn(ctx, e.ctx.GetStore(), true, func(ctx context.Context, txn kv.Transaction) error { + errInTxn := kv.RunInNewTxn(ctx, e.Ctx().GetStore(), true, func(ctx context.Context, txn kv.Transaction) error { txn.SetDiskFullOpt(kvrpcpb.DiskFullOpt_AllowedOnAlmostFull) - setOptionForTopSQL(e.ctx.GetSessionVars().StmtCtx, txn) + setOptionForTopSQL(e.Ctx().GetSessionVars().StmtCtx, txn) err := e.fetchIndex(ctx, txn) if err != nil { return err @@ -794,7 +795,7 @@ func (e *CleanupIndexExec) buildIndexScan(ctx context.Context, txn kv.Transactio if err != nil { return nil, err } - sc := e.ctx.GetSessionVars().StmtCtx + sc := e.Ctx().GetSessionVars().StmtCtx var builder distsql.RequestBuilder ranges := ranger.FullRange() keyRanges, err := distsql.IndexRangesToKVRanges(sc, e.physicalID, e.index.Meta().ID, ranges, nil) @@ -810,16 +811,16 @@ func (e *CleanupIndexExec) buildIndexScan(ctx context.Context, txn kv.Transactio SetDAGRequest(dagPB). SetStartTS(txn.StartTS()). SetKeepOrder(true). - SetFromSessionVars(e.ctx.GetSessionVars()). - SetFromInfoSchema(e.ctx.GetInfoSchema()). - SetConnID(e.ctx.GetSessionVars().ConnectionID). + SetFromSessionVars(e.Ctx().GetSessionVars()). + SetFromInfoSchema(e.Ctx().GetInfoSchema()). + SetConnID(e.Ctx().GetSessionVars().ConnectionID). Build() if err != nil { return nil, err } kvReq.Concurrency = 1 - result, err := distsql.Select(ctx, e.ctx, kvReq, e.getIdxColTypes(), statistics.NewQueryFeedback(0, nil, 0, false)) + result, err := distsql.Select(ctx, e.Ctx(), kvReq, e.getIdxColTypes(), statistics.NewQueryFeedback(0, nil, 0, false)) if err != nil { return nil, err } @@ -828,18 +829,18 @@ func (e *CleanupIndexExec) buildIndexScan(ctx context.Context, txn kv.Transactio // Open implements the Executor Open interface. func (e *CleanupIndexExec) Open(ctx context.Context) error { - if err := e.baseExecutor.Open(ctx); err != nil { + if err := e.BaseExecutor.Open(ctx); err != nil { return err } return e.init() } func (e *CleanupIndexExec) init() error { - e.idxChunk = chunk.New(e.getIdxColTypes(), e.initCap, e.maxChunkSize) + e.idxChunk = chunk.New(e.getIdxColTypes(), e.InitCap(), e.MaxChunkSize()) e.idxValues = kv.NewHandleMap() e.batchKeys = make([]kv.Key, 0, e.batchSize) e.idxValsBufs = make([][]types.Datum, e.batchSize) - sc := e.ctx.GetSessionVars().StmtCtx + sc := e.Ctx().GetSessionVars().StmtCtx idxKey, _, err := e.index.GenIndexKey(sc, []types.Datum{{}}, kv.IntHandle(math.MinInt64), nil) if err != nil { return err @@ -850,8 +851,8 @@ func (e *CleanupIndexExec) init() error { func (e *CleanupIndexExec) buildIdxDAGPB(txn kv.Transaction) (*tipb.DAGRequest, error) { dagReq := &tipb.DAGRequest{} - dagReq.TimeZoneName, dagReq.TimeZoneOffset = timeutil.Zone(e.ctx.GetSessionVars().Location()) - sc := e.ctx.GetSessionVars().StmtCtx + dagReq.TimeZoneName, dagReq.TimeZoneOffset = timeutil.Zone(e.Ctx().GetSessionVars().Location()) + sc := e.Ctx().GetSessionVars().StmtCtx dagReq.Flags = sc.PushDownFlags() for i := range e.columns { dagReq.OutputOffsets = append(dagReq.OutputOffsets, uint32(i)) @@ -859,14 +860,14 @@ func (e *CleanupIndexExec) buildIdxDAGPB(txn kv.Transaction) (*tipb.DAGRequest, execPB := e.constructIndexScanPB() dagReq.Executors = append(dagReq.Executors, execPB) - err := tables.SetPBColumnsDefaultValue(e.ctx, dagReq.Executors[0].IdxScan.Columns, e.columns) + err := tables.SetPBColumnsDefaultValue(e.Ctx(), dagReq.Executors[0].IdxScan.Columns, e.columns) if err != nil { return nil, err } limitExec := e.constructLimitPB() dagReq.Executors = append(dagReq.Executors, limitExec) - distsql.SetEncodeType(e.ctx, dagReq) + distsql.SetEncodeType(e.Ctx(), dagReq) return dagReq, nil } diff --git a/executor/admin_plugins.go b/executor/admin_plugins.go index c81de0ea24682..5c04577a680a5 100644 --- a/executor/admin_plugins.go +++ b/executor/admin_plugins.go @@ -18,6 +18,7 @@ import ( "context" "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/plugin" "github.com/pingcap/tidb/util/chunk" @@ -25,7 +26,7 @@ import ( // AdminPluginsExec indicates AdminPlugins executor. type AdminPluginsExec struct { - baseExecutor + exec.BaseExecutor Action core.AdminPluginsAction Plugins []string } @@ -42,7 +43,7 @@ func (e *AdminPluginsExec) Next(ctx context.Context, _ *chunk.Chunk) error { } func (e *AdminPluginsExec) changeDisableFlagAndFlush(disabled bool) error { - dom := domain.GetDomain(e.ctx) + dom := domain.GetDomain(e.Ctx()) for _, pluginName := range e.Plugins { err := plugin.ChangeDisableFlagAndFlush(dom, pluginName, disabled) if err != nil { diff --git a/executor/admin_telemetry.go b/executor/admin_telemetry.go index c3469c1eb7491..f7ec7b0272da2 100644 --- a/executor/admin_telemetry.go +++ b/executor/admin_telemetry.go @@ -18,13 +18,14 @@ import ( "context" "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/telemetry" "github.com/pingcap/tidb/util/chunk" ) // AdminShowTelemetryExec is an executor for ADMIN SHOW TELEMETRY. type AdminShowTelemetryExec struct { - baseExecutor + exec.BaseExecutor done bool } @@ -35,7 +36,7 @@ func (e *AdminShowTelemetryExec) Next(ctx context.Context, req *chunk.Chunk) err return nil } e.done = true - dom := domain.GetDomain(e.ctx) + dom := domain.GetDomain(e.Ctx()) id, err := telemetry.GetTrackingID(dom.GetEtcdClient()) if err != nil { return err @@ -44,7 +45,7 @@ func (e *AdminShowTelemetryExec) Next(ctx context.Context, req *chunk.Chunk) err if err != nil { return err } - previewData, err := telemetry.PreviewUsageData(e.ctx, dom.GetEtcdClient()) + previewData, err := telemetry.PreviewUsageData(e.Ctx(), dom.GetEtcdClient()) if err != nil { return err } @@ -56,7 +57,7 @@ func (e *AdminShowTelemetryExec) Next(ctx context.Context, req *chunk.Chunk) err // AdminResetTelemetryIDExec is an executor for ADMIN RESET TELEMETRY_ID. type AdminResetTelemetryIDExec struct { - baseExecutor + exec.BaseExecutor done bool } @@ -66,7 +67,7 @@ func (e *AdminResetTelemetryIDExec) Next(ctx context.Context, _ *chunk.Chunk) er return nil } e.done = true - dom := domain.GetDomain(e.ctx) + dom := domain.GetDomain(e.Ctx()) _, err := telemetry.ResetTrackingID(dom.GetEtcdClient()) return err } diff --git a/executor/aggregate.go b/executor/aggregate.go index 4de4a4e88168e..8c272af9062ac 100644 --- a/executor/aggregate.go +++ b/executor/aggregate.go @@ -25,6 +25,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/tidb/executor/aggfuncs" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/parser/terror" @@ -157,7 +158,7 @@ type AfFinalResult struct { +--------------+ +-+ +-+ +-+ */ type HashAggExec struct { - baseExecutor + exec.BaseExecutor sc *stmtctx.StatementContext PartialAggFuncs []aggfuncs.AggFunc @@ -245,7 +246,7 @@ func (d *HashAggIntermData) getPartialResultBatch(_ *stmtctx.StatementContext, p // Close implements the Executor Close interface. func (e *HashAggExec) Close() error { if e.stats != nil { - defer e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, e.stats) + defer e.Ctx().GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.ID(), e.stats) } if e.isUnparallelExec { var firstErr error @@ -259,7 +260,7 @@ func (e *HashAggExec) Close() error { firstErr = e.listInDisk.Close() } e.spillAction, e.tmpChkForSpill = nil, nil - if err := e.baseExecutor.Close(); firstErr == nil { + if err := e.BaseExecutor.Close(); firstErr == nil { firstErr = err } return firstErr @@ -289,7 +290,7 @@ func (e *HashAggExec) Close() error { e.memTracker.ReplaceBytesUsed(0) } } - return e.baseExecutor.Close() + return e.BaseExecutor.Close() } // Open implements the Executor Open interface. @@ -300,7 +301,7 @@ func (e *HashAggExec) Open(ctx context.Context) error { } }) - if err := e.baseExecutor.Open(ctx); err != nil { + if err := e.BaseExecutor.Open(ctx); err != nil { return err } e.prepared = false @@ -308,17 +309,17 @@ func (e *HashAggExec) Open(ctx context.Context) error { if e.memTracker != nil { e.memTracker.Reset() } else { - e.memTracker = memory.NewTracker(e.id, -1) + e.memTracker = memory.NewTracker(e.ID(), -1) } - if e.ctx.GetSessionVars().TrackAggregateMemoryUsage { - e.memTracker.AttachTo(e.ctx.GetSessionVars().StmtCtx.MemTracker) + if e.Ctx().GetSessionVars().TrackAggregateMemoryUsage { + e.memTracker.AttachTo(e.Ctx().GetSessionVars().StmtCtx.MemTracker) } if e.isUnparallelExec { e.initForUnparallelExec() return nil } - e.initForParallelExec(e.ctx) + e.initForParallelExec(e.Ctx()) return nil } @@ -330,23 +331,23 @@ func (e *HashAggExec) initForUnparallelExec() { failpoint.Inject("ConsumeRandomPanic", nil) e.memTracker.Consume(hack.DefBucketMemoryUsageForMapStrToSlice*(1< 1, we will try to demand extra session from Domain to save Analyze results in concurrency. // If there is no extra session we can use, we will save analyze results in single-thread. if partitionStatsConcurrency > 1 { - dom := domain.GetDomain(e.ctx) + dom := domain.GetDomain(e.Ctx()) subSctxs := dom.FetchAnalyzeExec(partitionStatsConcurrency) if len(subSctxs) > 0 { defer func() { @@ -296,7 +297,7 @@ func (e *AnalyzeExec) handleResultsError(ctx context.Context, concurrency int, n tableIDs := map[int64]struct{}{} // save analyze results in single-thread. - statsHandle := domain.GetDomain(e.ctx).StatsHandle() + statsHandle := domain.GetDomain(e.Ctx()).StatsHandle() panicCnt := 0 var err error for panicCnt < concurrency { @@ -311,28 +312,28 @@ func (e *AnalyzeExec) handleResultsError(ctx context.Context, concurrency int, n } else { logutil.Logger(ctx).Error("analyze failed", zap.Error(err)) } - finishJobWithLog(e.ctx, results.Job, err) + finishJobWithLog(e.Ctx(), results.Job, err) continue } handleGlobalStats(needGlobalStats, globalStatsMap, results) tableIDs[results.TableID.GetStatisticsID()] = struct{}{} - if err1 := statsHandle.SaveTableStatsToStorage(results, e.ctx.GetSessionVars().EnableAnalyzeSnapshot, handle.StatsMetaHistorySourceAnalyze); err1 != nil { + if err1 := statsHandle.SaveTableStatsToStorage(results, e.Ctx().GetSessionVars().EnableAnalyzeSnapshot, handle.StatsMetaHistorySourceAnalyze); err1 != nil { tableID := results.TableID.TableID err = err1 logutil.Logger(ctx).Error("save table stats to storage failed", zap.Error(err), zap.Int64("tableID", tableID)) - finishJobWithLog(e.ctx, results.Job, err) + finishJobWithLog(e.Ctx(), results.Job, err) } else { - finishJobWithLog(e.ctx, results.Job, nil) + finishJobWithLog(e.Ctx(), results.Job, nil) } - if atomic.LoadUint32(&e.ctx.GetSessionVars().Killed) == 1 { - finishJobWithLog(e.ctx, results.Job, exeerrors.ErrQueryInterrupted) + if atomic.LoadUint32(&e.Ctx().GetSessionVars().Killed) == 1 { + finishJobWithLog(e.Ctx(), results.Job, exeerrors.ErrQueryInterrupted) return errors.Trace(exeerrors.ErrQueryInterrupted) } } // Dump stats to historical storage. for tableID := range tableIDs { - if err := recordHistoricalStats(e.ctx, tableID); err != nil { + if err := recordHistoricalStats(e.Ctx(), tableID); err != nil { logutil.BgLogger().Error("record historical stats failed", zap.Error(err)) } } @@ -349,17 +350,17 @@ func (e *AnalyzeExec) handleResultsErrorWithConcurrency(ctx context.Context, sta saveResultsCh := make(chan *statistics.AnalyzeResults, partitionStatsConcurrency) errCh := make(chan error, partitionStatsConcurrency) for i := 0; i < partitionStatsConcurrency; i++ { - worker := newAnalyzeSaveStatsWorker(saveResultsCh, subSctxs[i], errCh, &e.ctx.GetSessionVars().Killed) + worker := newAnalyzeSaveStatsWorker(saveResultsCh, subSctxs[i], errCh, &e.Ctx().GetSessionVars().Killed) ctx1 := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) wg.Run(func() { - worker.run(ctx1, e.ctx.GetSessionVars().EnableAnalyzeSnapshot) + worker.run(ctx1, e.Ctx().GetSessionVars().EnableAnalyzeSnapshot) }) } tableIDs := map[int64]struct{}{} panicCnt := 0 var err error for panicCnt < statsConcurrency { - if atomic.LoadUint32(&e.ctx.GetSessionVars().Killed) == 1 { + if atomic.LoadUint32(&e.Ctx().GetSessionVars().Killed) == 1 { close(saveResultsCh) return errors.Trace(exeerrors.ErrQueryInterrupted) } @@ -374,7 +375,7 @@ func (e *AnalyzeExec) handleResultsErrorWithConcurrency(ctx context.Context, sta } else { logutil.Logger(ctx).Error("analyze failed", zap.Error(err)) } - finishJobWithLog(e.ctx, results.Job, err) + finishJobWithLog(e.Ctx(), results.Job, err) continue } handleGlobalStats(needGlobalStats, globalStatsMap, results) @@ -393,7 +394,7 @@ func (e *AnalyzeExec) handleResultsErrorWithConcurrency(ctx context.Context, sta } for tableID := range tableIDs { // Dump stats to historical storage. - if err := recordHistoricalStats(e.ctx, tableID); err != nil { + if err := recordHistoricalStats(e.Ctx(), tableID); err != nil { logutil.BgLogger().Error("record historical stats failed", zap.Error(err)) } } @@ -418,7 +419,7 @@ func (e *AnalyzeExec) analyzeWorker(taskCh <-chan *analyzeTask, resultsCh chan<- if !ok { break } - StartAnalyzeJob(e.ctx, task.job) + StartAnalyzeJob(e.Ctx(), task.job) switch task.taskType { case colTask: resultsCh <- analyzeColumnsPushDownEntry(task.colExec) diff --git a/executor/analyze_global_stats.go b/executor/analyze_global_stats.go index 47e3434f692cf..50b58172307e7 100644 --- a/executor/analyze_global_stats.go +++ b/executor/analyze_global_stats.go @@ -53,7 +53,7 @@ func (e *AnalyzeExec) handleGlobalStats(ctx context.Context, needGlobalStats boo for globalStatsID := range globalStatsMap { globalStatsTableIDs[globalStatsID.tableID] = struct{}{} } - statsHandle := domain.GetDomain(e.ctx).StatsHandle() + statsHandle := domain.GetDomain(e.Ctx()).StatsHandle() tableIDs := map[int64]struct{}{} for tableID := range globalStatsTableIDs { tableIDs[tableID] = struct{}{} @@ -67,8 +67,8 @@ func (e *AnalyzeExec) handleGlobalStats(ctx context.Context, needGlobalStats boo logutil.BgLogger().Warn("cannot find the partitioned table, skip merging global stats", zap.Int64("tableID", globalStatsID.tableID)) continue } - AddNewAnalyzeJob(e.ctx, job) - StartAnalyzeJob(e.ctx, job) + AddNewAnalyzeJob(e.Ctx(), job) + StartAnalyzeJob(e.Ctx(), job) mergeStatsErr := func() error { globalOpts := e.opts if e.OptionsMap != nil { @@ -76,7 +76,7 @@ func (e *AnalyzeExec) handleGlobalStats(ctx context.Context, needGlobalStats boo globalOpts = v2Options.FilledOpts } } - globalStats, err := statsHandle.MergePartitionStats2GlobalStatsByTableID(e.ctx, globalOpts, e.ctx.GetInfoSchema().(infoschema.InfoSchema), + globalStats, err := statsHandle.MergePartitionStats2GlobalStatsByTableID(e.Ctx(), globalOpts, e.Ctx().GetInfoSchema().(infoschema.InfoSchema), globalStatsID.tableID, info.isIndex, info.histIDs, tableAllPartitionStats) if err != nil { @@ -84,7 +84,7 @@ func (e *AnalyzeExec) handleGlobalStats(ctx context.Context, needGlobalStats boo zap.String("info", job.JobInfo), zap.Error(err), zap.Int64("tableID", tableID)) if types.ErrPartitionStatsMissing.Equal(err) || types.ErrPartitionColumnStatsMissing.Equal(err) { // When we find some partition-level stats are missing, we need to report warning. - e.ctx.GetSessionVars().StmtCtx.AppendWarning(err) + e.Ctx().GetSessionVars().StmtCtx.AppendWarning(err) } return err } @@ -110,12 +110,12 @@ func (e *AnalyzeExec) handleGlobalStats(ctx context.Context, needGlobalStats boo } return err }() - FinishAnalyzeMergeJob(e.ctx, job, mergeStatsErr) + FinishAnalyzeMergeJob(e.Ctx(), job, mergeStatsErr) } } for tableID := range tableIDs { // Dump stats to historical storage. - if err := recordHistoricalStats(e.ctx, tableID); err != nil { + if err := recordHistoricalStats(e.Ctx(), tableID); err != nil { logutil.BgLogger().Error("record historical stats failed", zap.Error(err)) } } @@ -123,7 +123,7 @@ func (e *AnalyzeExec) handleGlobalStats(ctx context.Context, needGlobalStats boo } func (e *AnalyzeExec) newAnalyzeHandleGlobalStatsJob(key globalStatsKey) *statistics.AnalyzeJob { - dom := domain.GetDomain(e.ctx) + dom := domain.GetDomain(e.Ctx()) is := dom.InfoSchema() table, ok := is.TableByID(key.tableID) if !ok { diff --git a/executor/batch_point_get.go b/executor/batch_point_get.go index d447f06e7af7b..4c74ce991322b 100644 --- a/executor/batch_point_get.go +++ b/executor/batch_point_get.go @@ -20,6 +20,7 @@ import ( "sync/atomic" "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" @@ -41,7 +42,7 @@ import ( // BatchPointGetExec executes a bunch of point select queries. type BatchPointGetExec struct { - baseExecutor + exec.BaseExecutor tblInfo *model.TableInfo idxInfo *model.IndexInfo @@ -82,29 +83,29 @@ func (e *BatchPointGetExec) buildVirtualColumnInfo() { if len(e.virtualColumnIndex) > 0 { e.virtualColumnRetFieldTypes = make([]*types.FieldType, len(e.virtualColumnIndex)) for i, idx := range e.virtualColumnIndex { - e.virtualColumnRetFieldTypes[i] = e.schema.Columns[idx].RetType + e.virtualColumnRetFieldTypes[i] = e.Schema().Columns[idx].RetType } } } // Open implements the Executor interface. func (e *BatchPointGetExec) Open(context.Context) error { - sessVars := e.ctx.GetSessionVars() + sessVars := e.Ctx().GetSessionVars() txnCtx := sessVars.TxnCtx - txn, err := e.ctx.Txn(false) + txn, err := e.Ctx().Txn(false) if err != nil { return err } e.txn = txn - setOptionForTopSQL(e.ctx.GetSessionVars().StmtCtx, e.snapshot) + setOptionForTopSQL(e.Ctx().GetSessionVars().StmtCtx, e.snapshot) var batchGetter kv.BatchGetter = e.snapshot if txn.Valid() { lock := e.tblInfo.Lock if e.lock { batchGetter = driver.NewBufferBatchGetter(txn.GetMemBuffer(), &PessimisticLockCacheGetter{txnCtx: txnCtx}, e.snapshot) - } else if lock != nil && (lock.Tp == model.TableLockRead || lock.Tp == model.TableLockReadOnly) && e.ctx.GetSessionVars().EnablePointGetCache { - batchGetter = newCacheBatchGetter(e.ctx, e.tblInfo.ID, e.snapshot) + } else if lock != nil && (lock.Tp == model.TableLockRead || lock.Tp == model.TableLockReadOnly) && e.Ctx().GetSessionVars().EnablePointGetCache { + batchGetter = newCacheBatchGetter(e.Ctx(), e.tblInfo.ID, e.snapshot) } else { batchGetter = driver.NewBufferBatchGetter(txn.GetMemBuffer(), nil, e.snapshot) } @@ -157,10 +158,10 @@ func MockNewCacheTableSnapShot(snapshot kv.Snapshot, memBuffer kv.MemBuffer) *ca // Close implements the Executor interface. func (e *BatchPointGetExec) Close() error { - if e.runtimeStats != nil { - defer e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, e.stats) + if e.RuntimeStats() != nil { + defer e.Ctx().GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.ID(), e.stats) } - if e.runtimeStats != nil && e.snapshot != nil { + if e.RuntimeStats() != nil && e.snapshot != nil { e.snapshot.SetOption(kv.CollectRuntimeStats, nil) } e.inited = 0 @@ -176,7 +177,7 @@ func (e *BatchPointGetExec) Next(ctx context.Context, req *chunk.Chunk) error { return err } if e.lock { - e.updateDeltaForTableID(e.tblInfo.ID) + e.UpdateDeltaForTableID(e.tblInfo.ID) } } @@ -185,14 +186,14 @@ func (e *BatchPointGetExec) Next(ctx context.Context, req *chunk.Chunk) error { } for !req.IsFull() && e.index < len(e.values) { handle, val := e.handles[e.index], e.values[e.index] - err := DecodeRowValToChunk(e.base().ctx, e.schema, e.tblInfo, handle, val, req, e.rowDecoder) + err := DecodeRowValToChunk(e.Base().Ctx(), e.Schema(), e.tblInfo, handle, val, req, e.rowDecoder) if err != nil { return err } e.index++ } - err := table.FillVirtualColumnValue(e.virtualColumnRetFieldTypes, e.virtualColumnIndex, e.schema.Columns, e.columns, e.ctx, req) + err := table.FillVirtualColumnValue(e.virtualColumnRetFieldTypes, e.virtualColumnIndex, e.Schema().Columns, e.columns, e.Ctx(), req) if err != nil { return err } @@ -213,7 +214,7 @@ func (e *BatchPointGetExec) initialize(ctx context.Context) error { var indexKeys []kv.Key var err error batchGetter := e.batchGetter - rc := e.ctx.GetSessionVars().IsPessimisticReadConsistency() + rc := e.Ctx().GetSessionVars().IsPessimisticReadConsistency() if e.idxInfo != nil && !isCommonHandleRead(e.tblInfo, e.idxInfo) { // `SELECT a, b FROM t WHERE (a, b) IN ((1, 2), (1, 2), (2, 1), (1, 2))` should not return duplicated rows dedup := make(map[hack.MutableString]struct{}) @@ -238,7 +239,7 @@ func (e *BatchPointGetExec) initialize(ctx context.Context) error { if e.singlePart && e.partTblID != physID { continue } - idxKey, err1 := EncodeUniqueIndexKey(e.ctx, e.tblInfo, e.idxInfo, idxVals, physID) + idxKey, err1 := EncodeUniqueIndexKey(e.Ctx(), e.tblInfo, e.idxInfo, idxVals, physID) if err1 != nil && !kv.ErrNotExist.Equal(err1) { return err1 } @@ -302,7 +303,7 @@ func (e *BatchPointGetExec) initialize(ctx context.Context) error { pid := tablecodec.DecodeTableID(key) e.physIDs = append(e.physIDs, pid) if e.lock { - e.updateDeltaForTableID(pid) + e.UpdateDeltaForTableID(pid) } } } @@ -394,7 +395,7 @@ func (e *BatchPointGetExec) initialize(ctx context.Context) error { lockKeys := make([]kv.Key, len(keys)+len(indexKeys)) copy(lockKeys, keys) copy(lockKeys[len(keys):], indexKeys) - err = LockKeys(ctx, e.ctx, e.waitTime, lockKeys...) + err = LockKeys(ctx, e.Ctx(), e.waitTime, lockKeys...) if err != nil { return err } @@ -414,7 +415,7 @@ func (e *BatchPointGetExec) initialize(ctx context.Context) error { val := values[string(key)] if len(val) == 0 { if e.idxInfo != nil && (!e.tblInfo.IsCommonHandle || !e.idxInfo.Primary) && - !e.ctx.GetSessionVars().StmtCtx.WeakConsistency { + !e.Ctx().GetSessionVars().StmtCtx.WeakConsistency { return (&consistency.Reporter{ HandleEncode: func(_ kv.Handle) kv.Key { return key @@ -424,7 +425,7 @@ func (e *BatchPointGetExec) initialize(ctx context.Context) error { }, Tbl: e.tblInfo, Idx: e.idxInfo, - Sctx: e.ctx, + Sctx: e.Ctx(), }).ReportLookupInconsistent(ctx, 1, 0, e.handles[i:i+1], @@ -448,7 +449,7 @@ func (e *BatchPointGetExec) initialize(ctx context.Context) error { } // Lock exists keys only for Read Committed Isolation. if e.lock && rc { - err = LockKeys(ctx, e.ctx, e.waitTime, existKeys...) + err = LockKeys(ctx, e.Ctx(), e.waitTime, existKeys...) if err != nil { return err } diff --git a/executor/benchmark_test.go b/executor/benchmark_test.go index 3f64164332ce7..688fc35711b6f 100644 --- a/executor/benchmark_test.go +++ b/executor/benchmark_test.go @@ -29,6 +29,7 @@ import ( "github.com/pingcap/log" "github.com/pingcap/tidb/executor/aggfuncs" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/expression/aggregation" "github.com/pingcap/tidb/parser/ast" @@ -48,7 +49,7 @@ import ( ) var ( - _ Executor = &mockDataSource{} + _ exec.Executor = &mockDataSource{} _ core.PhysicalPlan = &mockDataPhysicalPlan{} wideString = strings.Repeat("x", 5*1024) ) @@ -63,7 +64,7 @@ type mockDataSourceParameters struct { } type mockDataSource struct { - baseExecutor + exec.BaseExecutor p mockDataSourceParameters genData []*chunk.Chunk chunks []*chunk.Chunk @@ -73,10 +74,10 @@ type mockDataSource struct { type mockDataPhysicalPlan struct { MockPhysicalPlan schema *expression.Schema - exec Executor + exec exec.Executor } -func (mp *mockDataPhysicalPlan) GetExecutor() Executor { +func (mp *mockDataPhysicalPlan) GetExecutor() exec.Executor { return mp.exec } @@ -107,7 +108,7 @@ func (mp *mockDataPhysicalPlan) MemoryUsage() (sum int64) { return } -func buildMockDataPhysicalPlan(ctx sessionctx.Context, srcExec Executor) *mockDataPhysicalPlan { +func buildMockDataPhysicalPlan(ctx sessionctx.Context, srcExec exec.Executor) *mockDataPhysicalPlan { return &mockDataPhysicalPlan{ schema: srcExec.Schema(), exec: srcExec, @@ -115,7 +116,7 @@ func buildMockDataPhysicalPlan(ctx sessionctx.Context, srcExec Executor) *mockDa } func (mds *mockDataSource) genColDatums(col int) (results []interface{}) { - typ := mds.retFieldTypes[col] + typ := mds.RetFieldTypes()[col] order := false if col < len(mds.p.orders) { order = mds.p.orders[col] @@ -212,7 +213,7 @@ func (mds *mockDataSource) Next(ctx context.Context, req *chunk.Chunk) error { } func buildMockDataSource(opt mockDataSourceParameters) *mockDataSource { - baseExec := newBaseExecutor(opt.ctx, opt.schema, 0) + baseExec := exec.NewBaseExecutor(opt.ctx, opt.schema, 0) m := &mockDataSource{baseExec, opt, nil, nil, 0} rTypes := retTypes(m) colData := make([][]interface{}, len(rTypes)) @@ -220,13 +221,13 @@ func buildMockDataSource(opt mockDataSourceParameters) *mockDataSource { colData[i] = m.genColDatums(i) } - m.genData = make([]*chunk.Chunk, (m.p.rows+m.maxChunkSize-1)/m.maxChunkSize) + m.genData = make([]*chunk.Chunk, (m.p.rows+m.MaxChunkSize()-1)/m.MaxChunkSize()) for i := range m.genData { - m.genData[i] = chunk.NewChunkWithCapacity(retTypes(m), m.maxChunkSize) + m.genData[i] = chunk.NewChunkWithCapacity(retTypes(m), m.MaxChunkSize()) } for i := 0; i < m.p.rows; i++ { - idx := i / m.maxChunkSize + idx := i / m.MaxChunkSize() retTypes := retTypes(m) for colIdx := 0; colIdx < len(rTypes); colIdx++ { switch retTypes[colIdx].GetType() { @@ -287,8 +288,8 @@ func defaultAggTestCase(exec string) *aggTestCase { return &aggTestCase{exec, ast.AggFuncSum, 1000, false, 10000000, 4, true, ctx} } -func buildHashAggExecutor(ctx sessionctx.Context, src Executor, schema *expression.Schema, - aggFuncs []*aggregation.AggFuncDesc, groupItems []expression.Expression) Executor { +func buildHashAggExecutor(ctx sessionctx.Context, src exec.Executor, schema *expression.Schema, + aggFuncs []*aggregation.AggFuncDesc, groupItems []expression.Expression) exec.Executor { plan := new(core.PhysicalHashAgg) plan.AggFuncs = aggFuncs plan.GroupByItems = groupItems @@ -298,12 +299,12 @@ func buildHashAggExecutor(ctx sessionctx.Context, src Executor, schema *expressi b := newExecutorBuilder(ctx, nil, nil) exec := b.build(plan) hashAgg := exec.(*HashAggExec) - hashAgg.children[0] = src + hashAgg.SetChildren(0, src) return exec } -func buildStreamAggExecutor(ctx sessionctx.Context, srcExec Executor, schema *expression.Schema, - aggFuncs []*aggregation.AggFuncDesc, groupItems []expression.Expression, concurrency int, dataSourceSorted bool) Executor { +func buildStreamAggExecutor(ctx sessionctx.Context, srcExec exec.Executor, schema *expression.Schema, + aggFuncs []*aggregation.AggFuncDesc, groupItems []expression.Expression, concurrency int, dataSourceSorted bool) exec.Executor { src := buildMockDataPhysicalPlan(ctx, srcExec) sg := new(core.PhysicalStreamAgg) @@ -351,7 +352,7 @@ func buildStreamAggExecutor(ctx sessionctx.Context, srcExec Executor, schema *ex return b.build(plan) } -func buildAggExecutor(b *testing.B, testCase *aggTestCase, child Executor) Executor { +func buildAggExecutor(b *testing.B, testCase *aggTestCase, child exec.Executor) exec.Executor { ctx := testCase.ctx if testCase.execType == "stream" { if err := ctx.GetSessionVars().SetSystemVar(variable.TiDBStreamAggConcurrency, fmt.Sprintf("%v", testCase.concurrency)); err != nil { @@ -375,7 +376,7 @@ func buildAggExecutor(b *testing.B, testCase *aggTestCase, child Executor) Execu } aggFuncs := []*aggregation.AggFuncDesc{aggFunc} - var aggExec Executor + var aggExec exec.Executor switch testCase.execType { case "hash": aggExec = buildHashAggExecutor(testCase.ctx, child, schema, aggFuncs, groupBy) @@ -507,7 +508,7 @@ func BenchmarkAggDistinct(b *testing.B) { } } -func buildWindowExecutor(ctx sessionctx.Context, windowFunc string, funcs int, frame *core.WindowFrame, srcExec Executor, schema *expression.Schema, partitionBy []*expression.Column, concurrency int, dataSourceSorted bool) Executor { +func buildWindowExecutor(ctx sessionctx.Context, windowFunc string, funcs int, frame *core.WindowFrame, srcExec exec.Executor, schema *expression.Schema, partitionBy []*expression.Column, concurrency int, dataSourceSorted bool) exec.Executor { src := buildMockDataPhysicalPlan(ctx, srcExec) win := new(core.PhysicalWindow) win.WindowFuncDescs = make([]*aggregation.WindowFuncDesc, 0) @@ -882,7 +883,7 @@ func defaultHashJoinTestCase(cols []*types.FieldType, joinType core.JoinType, us return tc } -func prepare4HashJoin(testCase *hashJoinTestCase, innerExec, outerExec Executor) *HashJoinExec { +func prepare4HashJoin(testCase *hashJoinTestCase, innerExec, outerExec exec.Executor) *HashJoinExec { if testCase.useOuterToBuild { innerExec, outerExec = outerExec, innerExec } @@ -911,7 +912,7 @@ func prepare4HashJoin(testCase *hashJoinTestCase, innerExec, outerExec Executor) probeKeysColIdx := make([]int, 0, len(testCase.keyIdx)) probeKeysColIdx = append(probeKeysColIdx, testCase.keyIdx...) e := &HashJoinExec{ - baseExecutor: newBaseExecutor(testCase.ctx, joinSchema, 5, innerExec, outerExec), + BaseExecutor: exec.NewBaseExecutor(testCase.ctx, joinSchema, 5, innerExec, outerExec), hashJoinCtx: &hashJoinCtx{ sessCtx: testCase.ctx, joinType: testCase.joinType, // 0 for InnerJoin, 1 for LeftOutersJoin, 2 for RightOuterJoin @@ -931,7 +932,7 @@ func prepare4HashJoin(testCase *hashJoinTestCase, innerExec, outerExec Executor) }, } - childrenUsedSchema := markChildrenUsedCols(e.Schema(), e.children[0].Schema(), e.children[1].Schema()) + childrenUsedSchema := markChildrenUsedCols(e.Schema(), e.Children(0).Schema(), e.Children(1).Schema()) defaultValues := make([]types.Datum, e.buildWorker.buildSideExec.Schema().Len()) lhsTypes, rhsTypes := retTypes(innerExec), retTypes(outerExec) for i := uint(0); i < e.concurrency; i++ { @@ -951,10 +952,10 @@ func prepare4HashJoin(testCase *hashJoinTestCase, innerExec, outerExec Executor) t := memory.NewTracker(-1, memLimit) t.SetActionOnExceed(nil) t2 := disk.NewTracker(-1, -1) - e.ctx.GetSessionVars().MemTracker = t - e.ctx.GetSessionVars().StmtCtx.MemTracker.AttachTo(t) - e.ctx.GetSessionVars().DiskTracker = t2 - e.ctx.GetSessionVars().StmtCtx.DiskTracker.AttachTo(t2) + e.Ctx().GetSessionVars().MemTracker = t + e.Ctx().GetSessionVars().StmtCtx.MemTracker.AttachTo(t) + e.Ctx().GetSessionVars().DiskTracker = t2 + e.Ctx().GetSessionVars().StmtCtx.DiskTracker.AttachTo(t2) return e } @@ -1315,7 +1316,7 @@ func (tc indexJoinTestCase) getMockDataSourceOptByRows(rows int) mockDataSourceP } } -func prepare4IndexInnerHashJoin(tc *indexJoinTestCase, outerDS *mockDataSource, innerDS *mockDataSource) (Executor, error) { +func prepare4IndexInnerHashJoin(tc *indexJoinTestCase, outerDS *mockDataSource, innerDS *mockDataSource) (exec.Executor, error) { outerCols, innerCols := tc.columns(), tc.columns() joinSchema := expression.NewSchema(outerCols...) joinSchema.Append(innerCols...) @@ -1337,7 +1338,7 @@ func prepare4IndexInnerHashJoin(tc *indexJoinTestCase, outerDS *mockDataSource, } e := &IndexLookUpJoin{ - baseExecutor: newBaseExecutor(tc.ctx, joinSchema, 1, outerDS), + BaseExecutor: exec.NewBaseExecutor(tc.ctx, joinSchema, 1, outerDS), outerCtx: outerCtx{ rowTypes: leftTypes, keyCols: tc.outerJoinKeyIdx, @@ -1360,7 +1361,7 @@ func prepare4IndexInnerHashJoin(tc *indexJoinTestCase, outerDS *mockDataSource, return e, nil } -func prepare4IndexOuterHashJoin(tc *indexJoinTestCase, outerDS *mockDataSource, innerDS *mockDataSource) (Executor, error) { +func prepare4IndexOuterHashJoin(tc *indexJoinTestCase, outerDS *mockDataSource, innerDS *mockDataSource) (exec.Executor, error) { e, err := prepare4IndexInnerHashJoin(tc, outerDS, innerDS) if err != nil { return nil, err @@ -1374,7 +1375,7 @@ func prepare4IndexOuterHashJoin(tc *indexJoinTestCase, outerDS *mockDataSource, return idxHash, nil } -func prepare4IndexMergeJoin(tc *indexJoinTestCase, outerDS *mockDataSource, innerDS *mockDataSource) (Executor, error) { +func prepare4IndexMergeJoin(tc *indexJoinTestCase, outerDS *mockDataSource, innerDS *mockDataSource) (exec.Executor, error) { outerCols, innerCols := tc.columns(), tc.columns() joinSchema := expression.NewSchema(outerCols...) joinSchema.Append(innerCols...) @@ -1411,7 +1412,7 @@ func prepare4IndexMergeJoin(tc *indexJoinTestCase, outerDS *mockDataSource, inne } e := &IndexLookUpMergeJoin{ - baseExecutor: newBaseExecutor(tc.ctx, joinSchema, 2, outerDS), + BaseExecutor: exec.NewBaseExecutor(tc.ctx, joinSchema, 2, outerDS), outerMergeCtx: outerMergeCtx{ rowTypes: leftTypes, keyCols: tc.outerJoinKeyIdx, @@ -1432,7 +1433,7 @@ func prepare4IndexMergeJoin(tc *indexJoinTestCase, outerDS *mockDataSource, inne keyOff2IdxOff: keyOff2IdxOff, lastColHelper: nil, } - concurrency := e.ctx.GetSessionVars().IndexLookupJoinConcurrency() + concurrency := e.Ctx().GetSessionVars().IndexLookupJoinConcurrency() joiners := make([]joiner, concurrency) for i := 0; i < concurrency; i++ { joiners[i] = newJoiner(tc.ctx, 0, false, defaultValues, nil, leftTypes, rightTypes, nil, false) @@ -1459,7 +1460,7 @@ func benchmarkIndexJoinExecWithCase( b.ResetTimer() for i := 0; i < b.N; i++ { b.StopTimer() - var exec Executor + var exec exec.Executor var err error switch execType { case indexInnerHashJoin: @@ -1535,12 +1536,12 @@ type mergeJoinTestCase struct { childrenUsedSchema [][]bool } -func prepareMergeJoinExec(tc *mergeJoinTestCase, joinSchema *expression.Schema, leftExec, rightExec Executor, defaultValues []types.Datum, +func prepareMergeJoinExec(tc *mergeJoinTestCase, joinSchema *expression.Schema, leftExec, rightExec exec.Executor, defaultValues []types.Datum, compareFuncs []expression.CompareFunc, innerJoinKeys []*expression.Column, outerJoinKeys []*expression.Column) *MergeJoinExec { // only benchmark inner join mergeJoinExec := &MergeJoinExec{ stmtCtx: tc.ctx.GetSessionVars().StmtCtx, - baseExecutor: newBaseExecutor(tc.ctx, joinSchema, 3, leftExec, rightExec), + BaseExecutor: exec.NewBaseExecutor(tc.ctx, joinSchema, 3, leftExec, rightExec), compareFuncs: compareFuncs, isOuterJoin: false, } @@ -1572,7 +1573,7 @@ func prepareMergeJoinExec(tc *mergeJoinTestCase, joinSchema *expression.Schema, return mergeJoinExec } -func prepare4MergeJoin(tc *mergeJoinTestCase, innerDS, outerDS *mockDataSource, sorted bool, concurrency int) Executor { +func prepare4MergeJoin(tc *mergeJoinTestCase, innerDS, outerDS *mockDataSource, sorted bool, concurrency int) exec.Executor { outerCols, innerCols := tc.columns(), tc.columns() joinSchema := expression.NewSchema() @@ -1607,12 +1608,12 @@ func prepare4MergeJoin(tc *mergeJoinTestCase, innerDS, outerDS *mockDataSource, defaultValues := make([]types.Datum, len(innerCols)) - var leftExec, rightExec Executor + var leftExec, rightExec exec.Executor if sorted { leftSortExec := &SortExec{ - baseExecutor: newBaseExecutor(tc.ctx, innerDS.schema, 3, innerDS), + BaseExecutor: exec.NewBaseExecutor(tc.ctx, innerDS.Schema(), 3, innerDS), ByItems: make([]*util.ByItems, 0, len(tc.innerJoinKeyIdx)), - schema: innerDS.schema, + schema: innerDS.Schema(), } for _, key := range innerJoinKeys { leftSortExec.ByItems = append(leftSortExec.ByItems, &util.ByItems{Expr: key}) @@ -1620,9 +1621,9 @@ func prepare4MergeJoin(tc *mergeJoinTestCase, innerDS, outerDS *mockDataSource, leftExec = leftSortExec rightSortExec := &SortExec{ - baseExecutor: newBaseExecutor(tc.ctx, outerDS.schema, 4, outerDS), + BaseExecutor: exec.NewBaseExecutor(tc.ctx, outerDS.Schema(), 4, outerDS), ByItems: make([]*util.ByItems, 0, len(tc.outerJoinKeyIdx)), - schema: outerDS.schema, + schema: outerDS.Schema(), } for _, key := range outerJoinKeys { rightSortExec.ByItems = append(rightSortExec.ByItems, &util.ByItems{Expr: key}) @@ -1633,12 +1634,12 @@ func prepare4MergeJoin(tc *mergeJoinTestCase, innerDS, outerDS *mockDataSource, rightExec = outerDS } - var e Executor + var e exec.Executor if concurrency == 1 { e = prepareMergeJoinExec(tc, joinSchema, leftExec, rightExec, defaultValues, compareFuncs, innerJoinKeys, outerJoinKeys) } else { // build dataSources - dataSources := []Executor{leftExec, rightExec} + dataSources := []exec.Executor{leftExec, rightExec} // build splitters innerByItems := make([]expression.Expression, 0, len(innerJoinKeys)) for _, innerJoinKey := range innerJoinKeys { @@ -1660,7 +1661,7 @@ func prepare4MergeJoin(tc *mergeJoinTestCase, innerDS, outerDS *mockDataSource, } // build ShuffleMergeJoinExec shuffle := &ShuffleExec{ - baseExecutor: newBaseExecutor(tc.ctx, joinSchema, 4), + BaseExecutor: exec.NewBaseExecutor(tc.ctx, joinSchema, 4), concurrency: concurrency, dataSources: dataSources, splitters: splitters, @@ -1670,10 +1671,10 @@ func prepare4MergeJoin(tc *mergeJoinTestCase, innerDS, outerDS *mockDataSource, shuffle.workers = make([]*shuffleWorker, shuffle.concurrency) for i := range shuffle.workers { leftReceiver := shuffleReceiver{ - baseExecutor: newBaseExecutor(tc.ctx, leftExec.Schema(), 0), + BaseExecutor: exec.NewBaseExecutor(tc.ctx, leftExec.Schema(), 0), } rightReceiver := shuffleReceiver{ - baseExecutor: newBaseExecutor(tc.ctx, rightExec.Schema(), 0), + BaseExecutor: exec.NewBaseExecutor(tc.ctx, rightExec.Schema(), 0), } w := &shuffleWorker{ receivers: []*shuffleReceiver{&leftReceiver, &rightReceiver}, @@ -1763,7 +1764,7 @@ func benchmarkMergeJoinExecWithCase(b *testing.B, tc *mergeJoinTestCase, innerDS b.ResetTimer() for i := 0; i < b.N; i++ { b.StopTimer() - var exec Executor + var exec exec.Executor switch joinType { case innerMergeJoin: exec = prepare4MergeJoin(tc, innerDS, outerDS, true, 2) @@ -1870,9 +1871,9 @@ func benchmarkSortExec(b *testing.B, cas *sortCase) { } dataSource := buildMockDataSource(opt) exec := &SortExec{ - baseExecutor: newBaseExecutor(cas.ctx, dataSource.schema, 4, dataSource), + BaseExecutor: exec.NewBaseExecutor(cas.ctx, dataSource.Schema(), 4, dataSource), ByItems: make([]*util.ByItems, 0, len(cas.orderByIdx)), - schema: dataSource.schema, + schema: dataSource.Schema(), } for _, idx := range cas.orderByIdx { exec.ByItems = append(exec.ByItems, &util.ByItems{Expr: cas.columns()[idx]}) @@ -1980,9 +1981,9 @@ func benchmarkLimitExec(b *testing.B, cas *limitCase) { ctx: cas.ctx, } dataSource := buildMockDataSource(opt) - var exec Executor + var exe exec.Executor limit := &LimitExec{ - baseExecutor: newBaseExecutor(cas.ctx, dataSource.schema, 4, dataSource), + BaseExecutor: exec.NewBaseExecutor(cas.ctx, dataSource.Schema(), 4, dataSource), begin: uint64(cas.offset), end: uint64(cas.offset + cas.count), } @@ -1995,7 +1996,7 @@ func benchmarkLimitExec(b *testing.B, cas *limitCase) { } } } - exec = limit + exe = limit } else { columns := cas.columns() usedCols := make([]*expression.Column, 0, len(columns)) @@ -2007,26 +2008,26 @@ func benchmarkLimitExec(b *testing.B, cas *limitCase) { } } proj := &ProjectionExec{ - baseExecutor: newBaseExecutor(cas.ctx, expression.NewSchema(usedCols...), 0, limit), + BaseExecutor: exec.NewBaseExecutor(cas.ctx, expression.NewSchema(usedCols...), 0, limit), numWorkers: 1, evaluatorSuit: expression.NewEvaluatorSuite(exprs, false), } - exec = proj + exe = proj } b.ResetTimer() for i := 0; i < b.N; i++ { b.StopTimer() tmpCtx := context.Background() - chk := newFirstChunk(exec) + chk := newFirstChunk(exe) dataSource.prepareChunks() b.StartTimer() - if err := exec.Open(tmpCtx); err != nil { + if err := exe.Open(tmpCtx); err != nil { b.Fatal(err) } for { - if err := exec.Next(tmpCtx, chk); err != nil { + if err := exe.Next(tmpCtx, chk); err != nil { b.Fatal(err) } if chk.NumRows() == 0 { @@ -2034,7 +2035,7 @@ func benchmarkLimitExec(b *testing.B, cas *limitCase) { } } - if err := exec.Close(); err != nil { + if err := exe.Close(); err != nil { b.Fatal(err) } b.StopTimer() diff --git a/executor/bind.go b/executor/bind.go index 90272e6878620..53fd6c7a664ad 100644 --- a/executor/bind.go +++ b/executor/bind.go @@ -20,6 +20,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/bindinfo" "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/parser/ast" plannercore "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/util/chunk" @@ -27,7 +28,7 @@ import ( // SQLBindExec represents a bind executor. type SQLBindExec struct { - baseExecutor + exec.BaseExecutor sqlBindOp plannercore.SQLBindOpType normdOrigSQL string @@ -81,12 +82,12 @@ func (e *SQLBindExec) dropSQLBind() error { } } if !e.isGlobal { - handle := e.ctx.Value(bindinfo.SessionBindInfoKeyType).(*bindinfo.SessionHandle) + handle := e.Ctx().Value(bindinfo.SessionBindInfoKeyType).(*bindinfo.SessionHandle) err := handle.DropBindRecord(e.normdOrigSQL, e.db, bindInfo) return err } - affectedRows, err := domain.GetDomain(e.ctx).BindHandle().DropBindRecord(e.normdOrigSQL, e.db, bindInfo) - e.ctx.GetSessionVars().StmtCtx.AddAffectedRows(affectedRows) + affectedRows, err := domain.GetDomain(e.Ctx()).BindHandle().DropBindRecord(e.normdOrigSQL, e.db, bindInfo) + e.Ctx().GetSessionVars().StmtCtx.AddAffectedRows(affectedRows) return err } @@ -95,12 +96,12 @@ func (e *SQLBindExec) dropSQLBindByDigest() error { return errors.New("sql digest is empty") } if !e.isGlobal { - handle := e.ctx.Value(bindinfo.SessionBindInfoKeyType).(*bindinfo.SessionHandle) + handle := e.Ctx().Value(bindinfo.SessionBindInfoKeyType).(*bindinfo.SessionHandle) err := handle.DropBindRecordByDigest(e.sqlDigest) return err } - affectedRows, err := domain.GetDomain(e.ctx).BindHandle().DropBindRecordByDigest(e.sqlDigest) - e.ctx.GetSessionVars().StmtCtx.AddAffectedRows(affectedRows) + affectedRows, err := domain.GetDomain(e.Ctx()).BindHandle().DropBindRecordByDigest(e.sqlDigest) + e.Ctx().GetSessionVars().StmtCtx.AddAffectedRows(affectedRows) return err } @@ -113,19 +114,19 @@ func (e *SQLBindExec) setBindingStatus() error { Collation: e.collation, } } - ok, err := domain.GetDomain(e.ctx).BindHandle().SetBindRecordStatus(e.normdOrigSQL, bindInfo, e.newStatus) + ok, err := domain.GetDomain(e.Ctx()).BindHandle().SetBindRecordStatus(e.normdOrigSQL, bindInfo, e.newStatus) if err == nil && !ok { warningMess := errors.New("There are no bindings can be set the status. Please check the SQL text") - e.ctx.GetSessionVars().StmtCtx.AppendWarning(warningMess) + e.Ctx().GetSessionVars().StmtCtx.AppendWarning(warningMess) } return err } func (e *SQLBindExec) setBindingStatusByDigest() error { - ok, err := domain.GetDomain(e.ctx).BindHandle().SetBindRecordStatusByDigest(e.newStatus, e.sqlDigest) + ok, err := domain.GetDomain(e.Ctx()).BindHandle().SetBindRecordStatusByDigest(e.newStatus, e.sqlDigest) if err == nil && !ok { warningMess := errors.New("There are no bindings can be set the status. Please check the SQL text") - e.ctx.GetSessionVars().StmtCtx.AppendWarning(warningMess) + e.Ctx().GetSessionVars().StmtCtx.AppendWarning(warningMess) } return err } @@ -133,9 +134,9 @@ func (e *SQLBindExec) setBindingStatusByDigest() error { func (e *SQLBindExec) createSQLBind() error { // For audit log, SQLBindExec execute "explain" statement internally, save and recover stmtctx // is necessary to avoid 'create binding' been recorded as 'explain'. - saveStmtCtx := e.ctx.GetSessionVars().StmtCtx + saveStmtCtx := e.Ctx().GetSessionVars().StmtCtx defer func() { - e.ctx.GetSessionVars().StmtCtx = saveStmtCtx + e.Ctx().GetSessionVars().StmtCtx = saveStmtCtx }() bindInfo := bindinfo.Binding{ @@ -153,24 +154,24 @@ func (e *SQLBindExec) createSQLBind() error { Bindings: []bindinfo.Binding{bindInfo}, } if !e.isGlobal { - handle := e.ctx.Value(bindinfo.SessionBindInfoKeyType).(*bindinfo.SessionHandle) - return handle.CreateBindRecord(e.ctx, record) + handle := e.Ctx().Value(bindinfo.SessionBindInfoKeyType).(*bindinfo.SessionHandle) + return handle.CreateBindRecord(e.Ctx(), record) } - return domain.GetDomain(e.ctx).BindHandle().CreateBindRecord(e.ctx, record) + return domain.GetDomain(e.Ctx()).BindHandle().CreateBindRecord(e.Ctx(), record) } func (e *SQLBindExec) flushBindings() error { - return domain.GetDomain(e.ctx).BindHandle().FlushBindings() + return domain.GetDomain(e.Ctx()).BindHandle().FlushBindings() } func (e *SQLBindExec) captureBindings() { - domain.GetDomain(e.ctx).BindHandle().CaptureBaselines() + domain.GetDomain(e.Ctx()).BindHandle().CaptureBaselines() } func (e *SQLBindExec) evolveBindings() error { - return domain.GetDomain(e.ctx).BindHandle().HandleEvolvePlanTask(e.ctx, true) + return domain.GetDomain(e.Ctx()).BindHandle().HandleEvolvePlanTask(e.Ctx(), true) } func (e *SQLBindExec) reloadBindings() error { - return domain.GetDomain(e.ctx).BindHandle().ReloadBindings() + return domain.GetDomain(e.Ctx()).BindHandle().ReloadBindings() } diff --git a/executor/brie.go b/executor/brie.go index 478ccbab46eb4..76d470ea41320 100644 --- a/executor/brie.go +++ b/executor/brie.go @@ -35,6 +35,7 @@ import ( "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/ddl" "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta/autoid" @@ -246,30 +247,30 @@ func (b *executorBuilder) parseTSString(ts string) (uint64, error) { return oracle.GoTimeToTS(t1), nil } -func (b *executorBuilder) buildBRIE(s *ast.BRIEStmt, schema *expression.Schema) Executor { +func (b *executorBuilder) buildBRIE(s *ast.BRIEStmt, schema *expression.Schema) exec.Executor { if s.Kind == ast.BRIEKindShowBackupMeta { return execOnce(&showMetaExec{ - baseExecutor: newBaseExecutor(b.ctx, schema, 0), + BaseExecutor: exec.NewBaseExecutor(b.ctx, schema, 0), showConfig: buildShowMetadataConfigFrom(s), }) } if s.Kind == ast.BRIEKindShowQuery { return execOnce(&showQueryExec{ - baseExecutor: newBaseExecutor(b.ctx, schema, 0), + BaseExecutor: exec.NewBaseExecutor(b.ctx, schema, 0), targetID: uint64(s.JobID), }) } if s.Kind == ast.BRIEKindCancelJob { return &cancelJobExec{ - baseExecutor: newBaseExecutor(b.ctx, schema, 0), + BaseExecutor: exec.NewBaseExecutor(b.ctx, schema, 0), targetID: uint64(s.JobID), } } e := &BRIEExec{ - baseExecutor: newBaseExecutor(b.ctx, schema, 0), + BaseExecutor: exec.NewBaseExecutor(b.ctx, schema, 0), info: &brieTaskInfo{ kind: s.Kind, }, @@ -409,7 +410,7 @@ func (b *executorBuilder) buildBRIE(s *ast.BRIEStmt, schema *expression.Schema) // oneshotExecutor wraps a executor, making its `Next` would only be called once. type oneshotExecutor struct { - Executor + exec.Executor finished bool } @@ -426,12 +427,12 @@ func (o *oneshotExecutor) Next(ctx context.Context, req *chunk.Chunk) error { return nil } -func execOnce(ex Executor) Executor { +func execOnce(ex exec.Executor) exec.Executor { return &oneshotExecutor{Executor: ex} } type showQueryExec struct { - baseExecutor + exec.BaseExecutor targetID uint64 } @@ -449,7 +450,7 @@ func (s *showQueryExec) Next(ctx context.Context, req *chunk.Chunk) error { } type cancelJobExec struct { - baseExecutor + exec.BaseExecutor targetID uint64 } @@ -457,20 +458,20 @@ type cancelJobExec struct { func (s cancelJobExec) Next(ctx context.Context, req *chunk.Chunk) error { req.Reset() if !globalBRIEQueue.cancelTask(s.targetID) { - s.ctx.GetSessionVars().StmtCtx.AppendWarning(exeerrors.ErrLoadDataJobNotFound.FastGenByArgs(s.targetID)) + s.Ctx().GetSessionVars().StmtCtx.AppendWarning(exeerrors.ErrLoadDataJobNotFound.FastGenByArgs(s.targetID)) } return nil } type showMetaExec struct { - baseExecutor + exec.BaseExecutor showConfig show.Config } // BRIEExec represents an executor for BRIE statements (BACKUP, RESTORE, etc) type BRIEExec struct { - baseExecutor + exec.BaseExecutor backupCfg *task.BackupConfig restoreCfg *task.RestoreConfig @@ -529,9 +530,9 @@ func (e *BRIEExec) Next(ctx context.Context, req *chunk.Chunk) error { } bq := globalBRIEQueue - bq.clearTask(e.ctx.GetSessionVars().StmtCtx) + bq.clearTask(e.Ctx().GetSessionVars().StmtCtx) - e.info.connID = e.ctx.GetSessionVars().ConnectionID + e.info.connID = e.Ctx().GetSessionVars().ConnectionID e.info.queueTime = types.CurrentTime(mysql.TypeDatetime) taskCtx, taskID := bq.registerTask(ctx, e.info) defer bq.cancelTask(taskID) @@ -549,7 +550,7 @@ func (e *BRIEExec) Next(ctx context.Context, req *chunk.Chunk) error { for { select { case <-ticker.C: - if atomic.LoadUint32(&e.ctx.GetSessionVars().Killed) == 1 { + if atomic.LoadUint32(&e.Ctx().GetSessionVars().Killed) == 1 { bq.cancelTask(taskID) return } @@ -566,7 +567,7 @@ func (e *BRIEExec) Next(ctx context.Context, req *chunk.Chunk) error { defer bq.releaseTask() e.info.execTime = types.CurrentTime(mysql.TypeDatetime) - glue := &tidbGlueSession{se: e.ctx, progress: progress, info: e.info} + glue := &tidbGlueSession{se: e.Ctx(), progress: progress, info: e.info} switch e.info.kind { case ast.BRIEKindBackup: @@ -630,7 +631,7 @@ func (e *ShowExec) fetchShowBRIE(kind ast.BRIEKind) error { } return true }) - globalBRIEQueue.clearTask(e.ctx.GetSessionVars().StmtCtx) + globalBRIEQueue.clearTask(e.Ctx().GetSessionVars().StmtCtx) return nil } diff --git a/executor/brie_test.go b/executor/brie_test.go index 05c3224275182..785e73a1cdde6 100644 --- a/executor/brie_test.go +++ b/executor/brie_test.go @@ -22,6 +22,7 @@ import ( "testing" "time" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/parser" "github.com/pingcap/tidb/parser/ast" @@ -83,7 +84,7 @@ func TestFetchShowBRIE(t *testing.T) { // Compose executor. e := &ShowExec{ - baseExecutor: newBaseExecutor(sctx, schema, 0), + BaseExecutor: exec.NewBaseExecutor(sctx, schema, 0), Tp: ast.ShowBackups, } require.NoError(t, e.Open(ctx)) @@ -98,7 +99,7 @@ func TestFetchShowBRIE(t *testing.T) { // Register brie task info info1 := &brieTaskInfo{ kind: ast.BRIEKindBackup, - connID: e.ctx.GetSessionVars().ConnectionID, + connID: e.Ctx().GetSessionVars().ConnectionID, queueTime: lateTime, execTime: lateTime, finishTime: lateTime, @@ -127,7 +128,7 @@ func TestFetchShowBRIE(t *testing.T) { info2 := &brieTaskInfo{ id: 2, kind: ast.BRIEKindBackup, - connID: e.ctx.GetSessionVars().ConnectionID, + connID: e.Ctx().GetSessionVars().ConnectionID, queueTime: currTime, execTime: currTime, finishTime: currTime, @@ -136,6 +137,6 @@ func TestFetchShowBRIE(t *testing.T) { } globalBRIEQueue.registerTask(ctx, info2) info2Res := brieTaskInfoToResult(info2) - globalBRIEQueue.clearTask(e.ctx.GetSessionVars().StmtCtx) + globalBRIEQueue.clearTask(e.Ctx().GetSessionVars().StmtCtx) require.Equal(t, info2Res, fetchShowBRIEResult(t, e, brieColTypes)) } diff --git a/executor/builder.go b/executor/builder.go index 06b553377070f..873efc799c2fc 100644 --- a/executor/builder.go +++ b/executor/builder.go @@ -36,6 +36,7 @@ import ( "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/executor/aggfuncs" "github.com/pingcap/tidb/executor/internal/builder" + "github.com/pingcap/tidb/executor/internal/exec" internalutil "github.com/pingcap/tidb/executor/internal/util" executor_metrics "github.com/pingcap/tidb/executor/metrics" "github.com/pingcap/tidb/expression" @@ -130,7 +131,7 @@ func newExecutorBuilder(ctx sessionctx.Context, is infoschema.InfoSchema, ti *Te // It is mainly used for testing. type MockPhysicalPlan interface { plannercore.PhysicalPlan - GetExecutor() Executor + GetExecutor() exec.Executor } // MockExecutorBuilder is a wrapper for executorBuilder. @@ -146,11 +147,11 @@ func NewMockExecutorBuilderForTest(ctx sessionctx.Context, is infoschema.InfoSch } // Build builds an executor tree according to `p`. -func (b *MockExecutorBuilder) Build(p plannercore.Plan) Executor { +func (b *MockExecutorBuilder) Build(p plannercore.Plan) exec.Executor { return b.build(p) } -func (b *executorBuilder) build(p plannercore.Plan) Executor { +func (b *executorBuilder) build(p plannercore.Plan) exec.Executor { switch v := p.(type) { case nil: return nil @@ -318,10 +319,10 @@ func (b *executorBuilder) build(p plannercore.Plan) Executor { } } -func (b *executorBuilder) buildCancelDDLJobs(v *plannercore.CancelDDLJobs) Executor { +func (b *executorBuilder) buildCancelDDLJobs(v *plannercore.CancelDDLJobs) exec.Executor { e := &CancelDDLJobsExec{ CommandDDLJobsExec: &CommandDDLJobsExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), jobIDs: v.JobIDs, execute: ddl.CancelJobs, }, @@ -329,10 +330,10 @@ func (b *executorBuilder) buildCancelDDLJobs(v *plannercore.CancelDDLJobs) Execu return e } -func (b *executorBuilder) buildPauseDDLJobs(v *plannercore.PauseDDLJobs) Executor { +func (b *executorBuilder) buildPauseDDLJobs(v *plannercore.PauseDDLJobs) exec.Executor { e := &PauseDDLJobsExec{ CommandDDLJobsExec: &CommandDDLJobsExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), jobIDs: v.JobIDs, execute: ddl.PauseJobs, }, @@ -340,10 +341,10 @@ func (b *executorBuilder) buildPauseDDLJobs(v *plannercore.PauseDDLJobs) Executo return e } -func (b *executorBuilder) buildResumeDDLJobs(v *plannercore.ResumeDDLJobs) Executor { +func (b *executorBuilder) buildResumeDDLJobs(v *plannercore.ResumeDDLJobs) exec.Executor { e := &ResumeDDLJobsExec{ CommandDDLJobsExec: &CommandDDLJobsExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), jobIDs: v.JobIDs, execute: ddl.ResumeJobs, }, @@ -351,31 +352,31 @@ func (b *executorBuilder) buildResumeDDLJobs(v *plannercore.ResumeDDLJobs) Execu return e } -func (b *executorBuilder) buildChange(v *plannercore.Change) Executor { +func (b *executorBuilder) buildChange(v *plannercore.Change) exec.Executor { return &ChangeExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), ChangeStmt: v.ChangeStmt, } } -func (b *executorBuilder) buildShowNextRowID(v *plannercore.ShowNextRowID) Executor { +func (b *executorBuilder) buildShowNextRowID(v *plannercore.ShowNextRowID) exec.Executor { e := &ShowNextRowIDExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), tblName: v.TableName, } return e } -func (b *executorBuilder) buildShowDDL(v *plannercore.ShowDDL) Executor { +func (b *executorBuilder) buildShowDDL(v *plannercore.ShowDDL) exec.Executor { // We get Info here because for Executors that returns result set, // next will be called after transaction has been committed. // We need the transaction to get Info. e := &ShowDDLExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), } var err error - ownerManager := domain.GetDomain(e.ctx).DDL().OwnerManager() + ownerManager := domain.GetDomain(e.Ctx()).DDL().OwnerManager() ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) e.ddlOwnerID, err = ownerManager.GetOwnerID(ctx) cancel() @@ -384,13 +385,13 @@ func (b *executorBuilder) buildShowDDL(v *plannercore.ShowDDL) Executor { return nil } - session, err := e.getSysSession() + session, err := e.GetSysSession() if err != nil { b.err = err return nil } ddlInfo, err := ddl.GetDDLInfoWithNewTxn(session) - e.releaseSysSession(kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL), session) + e.ReleaseSysSession(kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL), session) if err != nil { b.err = err return nil @@ -400,38 +401,38 @@ func (b *executorBuilder) buildShowDDL(v *plannercore.ShowDDL) Executor { return e } -func (b *executorBuilder) buildShowDDLJobs(v *plannercore.PhysicalShowDDLJobs) Executor { +func (b *executorBuilder) buildShowDDLJobs(v *plannercore.PhysicalShowDDLJobs) exec.Executor { loc := b.ctx.GetSessionVars().Location() ddlJobRetriever := DDLJobRetriever{TZLoc: loc} e := &ShowDDLJobsExec{ jobNumber: int(v.JobNumber), is: b.is, - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), DDLJobRetriever: ddlJobRetriever, } return e } -func (b *executorBuilder) buildShowDDLJobQueries(v *plannercore.ShowDDLJobQueries) Executor { +func (b *executorBuilder) buildShowDDLJobQueries(v *plannercore.ShowDDLJobQueries) exec.Executor { e := &ShowDDLJobQueriesExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), jobIDs: v.JobIDs, } return e } -func (b *executorBuilder) buildShowDDLJobQueriesWithRange(v *plannercore.ShowDDLJobQueriesWithRange) Executor { +func (b *executorBuilder) buildShowDDLJobQueriesWithRange(v *plannercore.ShowDDLJobQueriesWithRange) exec.Executor { e := &ShowDDLJobQueriesWithRangeExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), offset: v.Offset, limit: v.Limit, } return e } -func (b *executorBuilder) buildShowSlow(v *plannercore.ShowSlow) Executor { +func (b *executorBuilder) buildShowSlow(v *plannercore.ShowSlow) exec.Executor { e := &ShowSlowExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), ShowSlow: v.ShowSlow, } return e @@ -478,7 +479,7 @@ func buildIndexLookUpChecker(b *executorBuilder, p *plannercore.PhysicalIndexLoo } } -func (b *executorBuilder) buildCheckTable(v *plannercore.CheckTable) Executor { +func (b *executorBuilder) buildCheckTable(v *plannercore.CheckTable) exec.Executor { noMVIndexOrPrefixIndex := true for _, idx := range v.IndexInfos { if idx.MVIndex { @@ -497,7 +498,7 @@ func (b *executorBuilder) buildCheckTable(v *plannercore.CheckTable) Executor { } if b.ctx.GetSessionVars().FastCheckTable && noMVIndexOrPrefixIndex { e := &FastCheckTableExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), dbName: v.DBName, table: v.Table, indexInfos: v.IndexInfos, @@ -520,7 +521,7 @@ func (b *executorBuilder) buildCheckTable(v *plannercore.CheckTable) Executor { } e := &CheckTableExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), dbName: v.DBName, table: v.Table, indexInfos: v.IndexInfos, @@ -573,7 +574,7 @@ func buildIdxColsConcatHandleCols(tblInfo *model.TableInfo, indexInfo *model.Ind return columns } -func (b *executorBuilder) buildRecoverIndex(v *plannercore.RecoverIndex) Executor { +func (b *executorBuilder) buildRecoverIndex(v *plannercore.RecoverIndex) exec.Executor { tblInfo := v.Table.TableInfo t, err := b.is.TableByName(v.Table.Schema, tblInfo.Name) if err != nil { @@ -594,14 +595,14 @@ func (b *executorBuilder) buildRecoverIndex(v *plannercore.RecoverIndex) Executo } cols := buildIdxColsConcatHandleCols(tblInfo, index.Meta(), hasGenedCol) e := &RecoverIndexExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), columns: cols, containsGenedCol: hasGenedCol, index: index, table: t, physicalID: t.Meta().ID, } - sessCtx := e.ctx.GetSessionVars().StmtCtx + sessCtx := e.Ctx().GetSessionVars().StmtCtx e.handleCols = buildHandleColsForExec(sessCtx, tblInfo, index.Meta(), e.columns) return e } @@ -631,7 +632,7 @@ func buildHandleColsForExec(sctx *stmtctx.StatementContext, tblInfo *model.Table return plannercore.NewCommonHandleCols(sctx, tblInfo, pkIdx, tblCols) } -func (b *executorBuilder) buildCleanupIndex(v *plannercore.CleanupIndex) Executor { +func (b *executorBuilder) buildCleanupIndex(v *plannercore.CleanupIndex) exec.Executor { tblInfo := v.Table.TableInfo t, err := b.is.TableByName(v.Table.Schema, tblInfo.Name) if err != nil { @@ -655,26 +656,26 @@ func (b *executorBuilder) buildCleanupIndex(v *plannercore.CleanupIndex) Executo return nil } e := &CleanupIndexExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), columns: buildIdxColsConcatHandleCols(tblInfo, index.Meta(), false), index: index, table: t, physicalID: t.Meta().ID, batchSize: 20000, } - sessCtx := e.ctx.GetSessionVars().StmtCtx + sessCtx := e.Ctx().GetSessionVars().StmtCtx e.handleCols = buildHandleColsForExec(sessCtx, tblInfo, index.Meta(), e.columns) return e } -func (b *executorBuilder) buildCheckIndexRange(v *plannercore.CheckIndexRange) Executor { +func (b *executorBuilder) buildCheckIndexRange(v *plannercore.CheckIndexRange) exec.Executor { tb, err := b.is.TableByName(v.Table.Schema, v.Table.Name) if err != nil { b.err = err return nil } e := &CheckIndexRangeExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), handleRanges: v.HandleRanges, table: tb.Meta(), is: b.is, @@ -690,9 +691,9 @@ func (b *executorBuilder) buildCheckIndexRange(v *plannercore.CheckIndexRange) E return e } -func (b *executorBuilder) buildChecksumTable(v *plannercore.ChecksumTable) Executor { +func (b *executorBuilder) buildChecksumTable(v *plannercore.ChecksumTable) exec.Executor { e := &ChecksumTableExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), tables: make(map[int64]*checksumContext), done: false, } @@ -707,29 +708,32 @@ func (b *executorBuilder) buildChecksumTable(v *plannercore.ChecksumTable) Execu return e } -func (b *executorBuilder) buildReloadExprPushdownBlacklist(v *plannercore.ReloadExprPushdownBlacklist) Executor { - return &ReloadExprPushdownBlacklistExec{baseExecutor{ctx: b.ctx}} +func (b *executorBuilder) buildReloadExprPushdownBlacklist(_ *plannercore.ReloadExprPushdownBlacklist) exec.Executor { + base := exec.NewBaseExecutor(b.ctx, nil, 0) + return &ReloadExprPushdownBlacklistExec{base} } -func (b *executorBuilder) buildReloadOptRuleBlacklist(v *plannercore.ReloadOptRuleBlacklist) Executor { - return &ReloadOptRuleBlacklistExec{baseExecutor{ctx: b.ctx}} +func (b *executorBuilder) buildReloadOptRuleBlacklist(_ *plannercore.ReloadOptRuleBlacklist) exec.Executor { + base := exec.NewBaseExecutor(b.ctx, nil, 0) + return &ReloadOptRuleBlacklistExec{BaseExecutor: base} } -func (b *executorBuilder) buildAdminPlugins(v *plannercore.AdminPlugins) Executor { - return &AdminPluginsExec{baseExecutor: baseExecutor{ctx: b.ctx}, Action: v.Action, Plugins: v.Plugins} +func (b *executorBuilder) buildAdminPlugins(v *plannercore.AdminPlugins) exec.Executor { + base := exec.NewBaseExecutor(b.ctx, nil, 0) + return &AdminPluginsExec{BaseExecutor: base, Action: v.Action, Plugins: v.Plugins} } -func (b *executorBuilder) buildDeallocate(v *plannercore.Deallocate) Executor { - base := newBaseExecutor(b.ctx, nil, v.ID()) - base.initCap = chunk.ZeroCapacity +func (b *executorBuilder) buildDeallocate(v *plannercore.Deallocate) exec.Executor { + base := exec.NewBaseExecutor(b.ctx, nil, v.ID()) + base.SetInitCap(chunk.ZeroCapacity) e := &DeallocateExec{ - baseExecutor: base, + BaseExecutor: base, Name: v.Name, } return e } -func (b *executorBuilder) buildSelectLock(v *plannercore.PhysicalLock) Executor { +func (b *executorBuilder) buildSelectLock(v *plannercore.PhysicalLock) exec.Executor { if !b.inSelectLockStmt { b.inSelectLockStmt = true defer func() { b.inSelectLockStmt = false }() @@ -751,14 +755,14 @@ func (b *executorBuilder) buildSelectLock(v *plannercore.PhysicalLock) Executor return src } e := &SelectLockExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID(), src), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID(), src), Lock: v.Lock, tblID2Handle: v.TblID2Handle, tblID2PhysTblIDCol: v.TblID2PhysTblIDCol, } // filter out temporary tables because they do not store any record in tikv and should not write any lock - is := e.ctx.GetInfoSchema().(infoschema.InfoSchema) + is := e.Ctx().GetInfoSchema().(infoschema.InfoSchema) for tblID := range e.tblID2Handle { tblInfo, ok := is.TableByID(tblID) if !ok { @@ -773,16 +777,16 @@ func (b *executorBuilder) buildSelectLock(v *plannercore.PhysicalLock) Executor return e } -func (b *executorBuilder) buildLimit(v *plannercore.PhysicalLimit) Executor { +func (b *executorBuilder) buildLimit(v *plannercore.PhysicalLimit) exec.Executor { childExec := b.build(v.Children()[0]) if b.err != nil { return nil } n := int(mathutil.Min(v.Count, uint64(b.ctx.GetSessionVars().MaxChunkSize))) - base := newBaseExecutor(b.ctx, v.Schema(), v.ID(), childExec) - base.initCap = n + base := exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID(), childExec) + base.SetInitCap(n) e := &LimitExec{ - baseExecutor: base, + BaseExecutor: base, begin: v.Offset, end: v.Offset + v.Count, } @@ -800,19 +804,19 @@ func (b *executorBuilder) buildLimit(v *plannercore.PhysicalLimit) Executor { return e } -func (b *executorBuilder) buildPrepare(v *plannercore.Prepare) Executor { - base := newBaseExecutor(b.ctx, v.Schema(), v.ID()) - base.initCap = chunk.ZeroCapacity +func (b *executorBuilder) buildPrepare(v *plannercore.Prepare) exec.Executor { + base := exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()) + base.SetInitCap(chunk.ZeroCapacity) return &PrepareExec{ - baseExecutor: base, + BaseExecutor: base, name: v.Name, sqlText: v.SQLText, } } -func (b *executorBuilder) buildExecute(v *plannercore.Execute) Executor { +func (b *executorBuilder) buildExecute(v *plannercore.Execute) exec.Executor { e := &ExecuteExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), is: b.is, name: v.Name, usingVars: v.Params, @@ -839,9 +843,9 @@ func (b *executorBuilder) buildExecute(v *plannercore.Execute) Executor { return e } -func (b *executorBuilder) buildShow(v *plannercore.PhysicalShow) Executor { +func (b *executorBuilder) buildShow(v *plannercore.PhysicalShow) exec.Executor { e := &ShowExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), Tp: v.Tp, CountWarningsOrErrors: v.CountWarningsOrErrors, DBName: model.NewCIStr(v.DBName), @@ -863,14 +867,14 @@ func (b *executorBuilder) buildShow(v *plannercore.PhysicalShow) Executor { } if e.Tp == ast.ShowMasterStatus { // show master status need start ts. - if _, err := e.ctx.Txn(true); err != nil { + if _, err := e.Ctx().Txn(true); err != nil { b.err = err } } return e } -func (b *executorBuilder) buildSimple(v *plannercore.Simple) Executor { +func (b *executorBuilder) buildSimple(v *plannercore.Simple) exec.Executor { switch s := v.Statement.(type) { case *ast.GrantStmt: return b.buildGrant(s) @@ -903,27 +907,27 @@ func (b *executorBuilder) buildSimple(v *plannercore.Simple) Executor { } case *ast.CalibrateResourceStmt: return &calibrateResourceExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), 0), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), 0), workloadType: s.Tp, optionList: s.DynamicCalibrateResourceOptionList, } case *ast.LoadDataActionStmt: return &LoadDataActionExec{ - baseExecutor: newBaseExecutor(b.ctx, nil, 0), + BaseExecutor: exec.NewBaseExecutor(b.ctx, nil, 0), tp: s.Tp, jobID: s.JobID, } case *ast.ImportIntoActionStmt: return &ImportIntoActionExec{ - baseExecutor: newBaseExecutor(b.ctx, nil, 0), + BaseExecutor: exec.NewBaseExecutor(b.ctx, nil, 0), tp: s.Tp, jobID: s.JobID, } } - base := newBaseExecutor(b.ctx, v.Schema(), v.ID()) - base.initCap = chunk.ZeroCapacity + base := exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()) + base.SetInitCap(chunk.ZeroCapacity) e := &SimpleExec{ - baseExecutor: base, + BaseExecutor: base, Statement: v.Statement, IsFromRemote: v.IsFromRemote, is: b.is, @@ -932,24 +936,24 @@ func (b *executorBuilder) buildSimple(v *plannercore.Simple) Executor { return e } -func (b *executorBuilder) buildSet(v *plannercore.Set) Executor { - base := newBaseExecutor(b.ctx, v.Schema(), v.ID()) - base.initCap = chunk.ZeroCapacity +func (b *executorBuilder) buildSet(v *plannercore.Set) exec.Executor { + base := exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()) + base.SetInitCap(chunk.ZeroCapacity) e := &SetExecutor{ - baseExecutor: base, + BaseExecutor: base, vars: v.VarAssigns, } return e } -func (b *executorBuilder) buildSetConfig(v *plannercore.SetConfig) Executor { +func (b *executorBuilder) buildSetConfig(v *plannercore.SetConfig) exec.Executor { return &SetConfigExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), p: v, } } -func (b *executorBuilder) buildInsert(v *plannercore.Insert) Executor { +func (b *executorBuilder) buildInsert(v *plannercore.Insert) exec.Executor { b.inInsertStmt = true if b.err = b.updateForUpdateTS(); b.err != nil { return nil @@ -959,16 +963,16 @@ func (b *executorBuilder) buildInsert(v *plannercore.Insert) Executor { if b.err != nil { return nil } - var baseExec baseExecutor + var baseExec exec.BaseExecutor if selectExec != nil { - baseExec = newBaseExecutor(b.ctx, nil, v.ID(), selectExec) + baseExec = exec.NewBaseExecutor(b.ctx, nil, v.ID(), selectExec) } else { - baseExec = newBaseExecutor(b.ctx, nil, v.ID()) + baseExec = exec.NewBaseExecutor(b.ctx, nil, v.ID()) } - baseExec.initCap = chunk.ZeroCapacity + baseExec.SetInitCap(chunk.ZeroCapacity) ivs := &InsertValues{ - baseExecutor: baseExec, + BaseExecutor: baseExec, Table: v.Table, Columns: v.Columns, Lists: v.Lists, @@ -1002,7 +1006,7 @@ func (b *executorBuilder) buildInsert(v *plannercore.Insert) Executor { return insert } -func (b *executorBuilder) buildImportInto(v *plannercore.ImportInto) Executor { +func (b *executorBuilder) buildImportInto(v *plannercore.ImportInto) exec.Executor { tbl, ok := b.is.TableByID(v.Table.TableInfo.ID) if !ok { b.err = errors.Errorf("Can not get table %d", v.Table.TableInfo.ID) @@ -1013,7 +1017,7 @@ func (b *executorBuilder) buildImportInto(v *plannercore.ImportInto) Executor { return nil } - base := newBaseExecutor(b.ctx, v.Schema(), v.ID()) + base := exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()) exec, err := newImportIntoExec(base, b.ctx, v, tbl) if err != nil { b.err = err @@ -1023,7 +1027,7 @@ func (b *executorBuilder) buildImportInto(v *plannercore.ImportInto) Executor { return exec } -func (b *executorBuilder) buildLoadData(v *plannercore.LoadData) Executor { +func (b *executorBuilder) buildLoadData(v *plannercore.LoadData) exec.Executor { tbl, ok := b.is.TableByID(v.Table.TableInfo.ID) if !ok { b.err = errors.Errorf("Can not get table %d", v.Table.TableInfo.ID) @@ -1034,7 +1038,7 @@ func (b *executorBuilder) buildLoadData(v *plannercore.LoadData) Executor { return nil } - base := newBaseExecutor(b.ctx, v.Schema(), v.ID()) + base := exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()) worker, err := NewLoadDataWorker(b.ctx, v, tbl) if err != nil { b.err = err @@ -1042,39 +1046,39 @@ func (b *executorBuilder) buildLoadData(v *plannercore.LoadData) Executor { } return &LoadDataExec{ - baseExecutor: base, + BaseExecutor: base, loadDataWorker: worker, FileLocRef: v.FileLocRef, } } -func (b *executorBuilder) buildLoadStats(v *plannercore.LoadStats) Executor { +func (b *executorBuilder) buildLoadStats(v *plannercore.LoadStats) exec.Executor { e := &LoadStatsExec{ - baseExecutor: newBaseExecutor(b.ctx, nil, v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, nil, v.ID()), info: &LoadStatsInfo{v.Path, b.ctx}, } return e } -func (b *executorBuilder) buildLockStats(v *plannercore.LockStats) Executor { +func (b *executorBuilder) buildLockStats(v *plannercore.LockStats) exec.Executor { e := &LockStatsExec{ - baseExecutor: newBaseExecutor(b.ctx, nil, v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, nil, v.ID()), Tables: v.Tables, } return e } -func (b *executorBuilder) buildUnlockStats(v *plannercore.UnlockStats) Executor { +func (b *executorBuilder) buildUnlockStats(v *plannercore.UnlockStats) exec.Executor { e := &UnlockStatsExec{ - baseExecutor: newBaseExecutor(b.ctx, nil, v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, nil, v.ID()), Tables: v.Tables, } return e } -func (b *executorBuilder) buildIndexAdvise(v *plannercore.IndexAdvise) Executor { +func (b *executorBuilder) buildIndexAdvise(v *plannercore.IndexAdvise) exec.Executor { e := &IndexAdviseExec{ - baseExecutor: newBaseExecutor(b.ctx, nil, v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, nil, v.ID()), IsLocal: v.IsLocal, indexAdviseInfo: &IndexAdviseInfo{ Path: v.Path, @@ -1087,17 +1091,17 @@ func (b *executorBuilder) buildIndexAdvise(v *plannercore.IndexAdvise) Executor return e } -func (b *executorBuilder) buildPlanReplayer(v *plannercore.PlanReplayer) Executor { +func (b *executorBuilder) buildPlanReplayer(v *plannercore.PlanReplayer) exec.Executor { if v.Load { e := &PlanReplayerLoadExec{ - baseExecutor: newBaseExecutor(b.ctx, nil, v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, nil, v.ID()), info: &PlanReplayerLoadInfo{Path: v.File, Ctx: b.ctx}, } return e } if v.Capture { e := &PlanReplayerExec{ - baseExecutor: newBaseExecutor(b.ctx, nil, v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, nil, v.ID()), CaptureInfo: &PlanReplayerCaptureInfo{ SQLDigest: v.SQLDigest, PlanDigest: v.PlanDigest, @@ -1107,7 +1111,7 @@ func (b *executorBuilder) buildPlanReplayer(v *plannercore.PlanReplayer) Executo } if v.Remove { e := &PlanReplayerExec{ - baseExecutor: newBaseExecutor(b.ctx, nil, v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, nil, v.ID()), CaptureInfo: &PlanReplayerCaptureInfo{ SQLDigest: v.SQLDigest, PlanDigest: v.PlanDigest, @@ -1118,7 +1122,7 @@ func (b *executorBuilder) buildPlanReplayer(v *plannercore.PlanReplayer) Executo } e := &PlanReplayerExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), DumpInfo: &PlanReplayerDumpInfo{ Analyze: v.Analyze, Path: v.File, @@ -1128,21 +1132,21 @@ func (b *executorBuilder) buildPlanReplayer(v *plannercore.PlanReplayer) Executo if v.ExecStmt != nil { e.DumpInfo.ExecStmts = []ast.StmtNode{v.ExecStmt} } else { - e.baseExecutor = newBaseExecutor(b.ctx, nil, v.ID()) + e.BaseExecutor = exec.NewBaseExecutor(b.ctx, nil, v.ID()) } return e } -func (b *executorBuilder) buildReplace(vals *InsertValues) Executor { +func (b *executorBuilder) buildReplace(vals *InsertValues) exec.Executor { replaceExec := &ReplaceExec{ InsertValues: vals, } return replaceExec } -func (b *executorBuilder) buildGrant(grant *ast.GrantStmt) Executor { +func (b *executorBuilder) buildGrant(grant *ast.GrantStmt) exec.Executor { e := &GrantExec{ - baseExecutor: newBaseExecutor(b.ctx, nil, 0), + BaseExecutor: exec.NewBaseExecutor(b.ctx, nil, 0), Privs: grant.Privs, ObjectType: grant.ObjectType, Level: grant.Level, @@ -1154,9 +1158,9 @@ func (b *executorBuilder) buildGrant(grant *ast.GrantStmt) Executor { return e } -func (b *executorBuilder) buildRevoke(revoke *ast.RevokeStmt) Executor { +func (b *executorBuilder) buildRevoke(revoke *ast.RevokeStmt) exec.Executor { e := &RevokeExec{ - baseExecutor: newBaseExecutor(b.ctx, nil, 0), + BaseExecutor: exec.NewBaseExecutor(b.ctx, nil, 0), ctx: b.ctx, Privs: revoke.Privs, ObjectType: revoke.ObjectType, @@ -1249,11 +1253,11 @@ func (b *executorBuilder) setTelemetryInfo(v *plannercore.DDL) { } } -func (b *executorBuilder) buildDDL(v *plannercore.DDL) Executor { +func (b *executorBuilder) buildDDL(v *plannercore.DDL) exec.Executor { b.setTelemetryInfo(v) e := &DDLExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), stmt: v.Statement, is: b.is, tempTableDDL: temptable.GetTemporaryTableDDL(b.ctx), @@ -1263,9 +1267,9 @@ func (b *executorBuilder) buildDDL(v *plannercore.DDL) Executor { // buildTrace builds a TraceExec for future executing. This method will be called // at build(). -func (b *executorBuilder) buildTrace(v *plannercore.Trace) Executor { +func (b *executorBuilder) buildTrace(v *plannercore.Trace) exec.Executor { t := &TraceExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), stmtNode: v.StmtNode, builder: b, format: v.Format, @@ -1275,7 +1279,7 @@ func (b *executorBuilder) buildTrace(v *plannercore.Trace) Executor { } if t.format == plannercore.TraceFormatLog && !t.optimizerTrace { return &SortExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID(), t), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID(), t), ByItems: []*plannerutil.ByItems{ {Expr: &expression.Column{ Index: 0, @@ -1289,9 +1293,9 @@ func (b *executorBuilder) buildTrace(v *plannercore.Trace) Executor { } // buildExplain builds a explain executor. `e.rows` collects final result to `ExplainExec`. -func (b *executorBuilder) buildExplain(v *plannercore.Explain) Executor { +func (b *executorBuilder) buildExplain(v *plannercore.Explain) exec.Executor { explainExec := &ExplainExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), explain: v, } if v.Analyze { @@ -1319,19 +1323,19 @@ func (b *executorBuilder) buildExplain(v *plannercore.Explain) Executor { return explainExec } -func (b *executorBuilder) buildSelectInto(v *plannercore.SelectInto) Executor { +func (b *executorBuilder) buildSelectInto(v *plannercore.SelectInto) exec.Executor { child := b.build(v.TargetPlan) if b.err != nil { return nil } return &SelectIntoExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID(), child), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID(), child), intoOpt: v.IntoOpt, LineFieldsInfo: v.LineFieldsInfo, } } -func (b *executorBuilder) buildUnionScanExec(v *plannercore.PhysicalUnionScan) Executor { +func (b *executorBuilder) buildUnionScanExec(v *plannercore.PhysicalUnionScan) exec.Executor { oriEncounterUnionScan := b.encounterUnionScan b.encounterUnionScan = true defer func() { @@ -1348,18 +1352,18 @@ func (b *executorBuilder) buildUnionScanExec(v *plannercore.PhysicalUnionScan) E // buildUnionScanFromReader builds union scan executor from child executor. // Note that this function may be called by inner workers of index lookup join concurrently. // Be careful to avoid data race. -func (b *executorBuilder) buildUnionScanFromReader(reader Executor, v *plannercore.PhysicalUnionScan) Executor { +func (b *executorBuilder) buildUnionScanFromReader(reader exec.Executor, v *plannercore.PhysicalUnionScan) exec.Executor { // If reader is union, it means a partition table and we should transfer as above. if x, ok := reader.(*UnionExec); ok { - for i, child := range x.children { - x.children[i] = b.buildUnionScanFromReader(child, v) + for i, child := range x.AllChildren() { + x.SetChildren(i, b.buildUnionScanFromReader(child, v)) if b.err != nil { return nil } } return x } - us := &UnionScanExec{baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID(), reader)} + us := &UnionScanExec{BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID(), reader)} // Get the handle column index of the below Plan. us.belowHandleCols = v.HandleCols us.mutableRow = chunk.MutRowFromTypes(retTypes(us)) @@ -1367,7 +1371,7 @@ func (b *executorBuilder) buildUnionScanFromReader(reader Executor, v *plannerco // If the push-downed condition contains virtual column, we may build a selection upon reader originReader := reader if sel, ok := reader.(*SelectionExec); ok { - reader = sel.children[0] + reader = sel.Children(0) } us.collators = make([]collate.Collator, 0, len(us.columns)) @@ -1467,7 +1471,7 @@ func (us *UnionScanExec) handleCachedTable(b *executorBuilder, x bypassDataSourc } // buildMergeJoin builds MergeJoinExec executor. -func (b *executorBuilder) buildMergeJoin(v *plannercore.PhysicalMergeJoin) Executor { +func (b *executorBuilder) buildMergeJoin(v *plannercore.PhysicalMergeJoin) exec.Executor { leftExec := b.build(v.Children()[0]) if b.err != nil { return nil @@ -1489,7 +1493,7 @@ func (b *executorBuilder) buildMergeJoin(v *plannercore.PhysicalMergeJoin) Execu e := &MergeJoinExec{ stmtCtx: b.ctx.GetSessionVars().StmtCtx, - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID(), leftExec, rightExec), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID(), leftExec, rightExec), compareFuncs: v.CompareFuncs, joiner: newJoiner( b.ctx, @@ -1537,7 +1541,7 @@ func (b *executorBuilder) buildMergeJoin(v *plannercore.PhysicalMergeJoin) Execu return e } -func (b *executorBuilder) buildHashJoin(v *plannercore.PhysicalHashJoin) Executor { +func (b *executorBuilder) buildHashJoin(v *plannercore.PhysicalHashJoin) exec.Executor { leftExec := b.build(v.Children()[0]) if b.err != nil { return nil @@ -1549,7 +1553,7 @@ func (b *executorBuilder) buildHashJoin(v *plannercore.PhysicalHashJoin) Executo } e := &HashJoinExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID(), leftExec, rightExec), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID(), leftExec, rightExec), probeSideTupleFetcher: &probeSideTupleFetcher{}, probeWorkers: make([]*probeWorker, v.Concurrency), buildWorker: &buildWorker{}, @@ -1580,7 +1584,7 @@ func (b *executorBuilder) buildHashJoin(v *plannercore.PhysicalHashJoin) Executo e.isNullEQ = v.IsNullEQ var probeKeys, probeNAKeys, buildKeys, buildNAKeys []*expression.Column - var buildSideExec Executor + var buildSideExec exec.Executor if v.UseOuterToBuild { // update the buildSideEstCount due to changing the build side if v.InnerChildIdx == 1 { @@ -1692,14 +1696,14 @@ func (b *executorBuilder) buildHashJoin(v *plannercore.PhysicalHashJoin) Executo return e } -func (b *executorBuilder) buildHashAgg(v *plannercore.PhysicalHashAgg) Executor { +func (b *executorBuilder) buildHashAgg(v *plannercore.PhysicalHashAgg) exec.Executor { src := b.build(v.Children()[0]) if b.err != nil { return nil } sessionVars := b.ctx.GetSessionVars() e := &HashAggExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID(), src), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID(), src), sc: sessionVars.StmtCtx, PartialAggFuncs: make([]aggfuncs.AggFunc, 0, len(v.AggFuncs)), GroupByItems: v.GroupByItems, @@ -1727,7 +1731,7 @@ func (b *executorBuilder) buildHashAgg(v *plannercore.PhysicalHashAgg) Executor e.defaultVal = nil } else { if v.IsFinalAgg() { - e.defaultVal = e.ctx.GetSessionVars().GetNewChunkWithCapacity(retTypes(e), 1, 1, e.AllocPool) + e.defaultVal = e.Ctx().GetSessionVars().GetNewChunkWithCapacity(retTypes(e), 1, 1, e.AllocPool) } } for _, aggDesc := range v.AggFuncs { @@ -1774,13 +1778,13 @@ func (b *executorBuilder) buildHashAgg(v *plannercore.PhysicalHashAgg) Executor return e } -func (b *executorBuilder) buildStreamAgg(v *plannercore.PhysicalStreamAgg) Executor { +func (b *executorBuilder) buildStreamAgg(v *plannercore.PhysicalStreamAgg) exec.Executor { src := b.build(v.Children()[0]) if b.err != nil { return nil } e := &StreamAggExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID(), src), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID(), src), groupChecker: newVecGroupChecker(b.ctx, v.GroupByItems), aggFuncs: make([]aggfuncs.AggFunc, 0, len(v.AggFuncs)), } @@ -1790,7 +1794,7 @@ func (b *executorBuilder) buildStreamAgg(v *plannercore.PhysicalStreamAgg) Execu } else { // Only do this for final agg, see issue #35295, #30923 if v.IsFinalAgg() { - e.defaultVal = e.ctx.GetSessionVars().GetNewChunkWithCapacity(retTypes(e), 1, 1, e.AllocPool) + e.defaultVal = e.Ctx().GetSessionVars().GetNewChunkWithCapacity(retTypes(e), 1, 1, e.AllocPool) } } for i, aggDesc := range v.AggFuncs { @@ -1806,25 +1810,25 @@ func (b *executorBuilder) buildStreamAgg(v *plannercore.PhysicalStreamAgg) Execu return e } -func (b *executorBuilder) buildSelection(v *plannercore.PhysicalSelection) Executor { +func (b *executorBuilder) buildSelection(v *plannercore.PhysicalSelection) exec.Executor { childExec := b.build(v.Children()[0]) if b.err != nil { return nil } e := &SelectionExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID(), childExec), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID(), childExec), filters: v.Conditions, } return e } -func (b *executorBuilder) buildProjection(v *plannercore.PhysicalProjection) Executor { +func (b *executorBuilder) buildProjection(v *plannercore.PhysicalProjection) exec.Executor { childExec := b.build(v.Children()[0]) if b.err != nil { return nil } e := &ProjectionExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID(), childExec), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID(), childExec), numWorkers: int64(b.ctx.GetSessionVars().ProjectionConcurrency()), evaluatorSuit: expression.NewEvaluatorSuite(v.Exprs, v.AvoidColumnEvaluator), calculateNoDelay: v.CalculateNoDelay, @@ -1845,15 +1849,15 @@ func (b *executorBuilder) buildProjection(v *plannercore.PhysicalProjection) Exe return e } -func (b *executorBuilder) buildTableDual(v *plannercore.PhysicalTableDual) Executor { +func (b *executorBuilder) buildTableDual(v *plannercore.PhysicalTableDual) exec.Executor { if v.RowCount != 0 && v.RowCount != 1 { b.err = errors.Errorf("buildTableDual failed, invalid row count for dual table: %v", v.RowCount) return nil } - base := newBaseExecutor(b.ctx, v.Schema(), v.ID()) - base.initCap = v.RowCount + base := exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()) + base.SetInitCap(v.RowCount) e := &TableDualExec{ - baseExecutor: base, + BaseExecutor: base, numDualRows: v.RowCount, } return e @@ -1906,11 +1910,11 @@ func (b *executorBuilder) getSnapshot() (kv.Snapshot, error) { return snapshot, nil } -func (b *executorBuilder) buildMemTable(v *plannercore.PhysicalMemTable) Executor { +func (b *executorBuilder) buildMemTable(v *plannercore.PhysicalMemTable) exec.Executor { switch v.DBName.L { case util.MetricSchemaName.L: return &MemTableReaderExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.Table, retriever: &MetricRetriever{ table: v.Table, @@ -1921,7 +1925,7 @@ func (b *executorBuilder) buildMemTable(v *plannercore.PhysicalMemTable) Executo switch v.Table.Name.L { case strings.ToLower(infoschema.TableClusterConfig): return &MemTableReaderExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.Table, retriever: &clusterConfigRetriever{ extractor: v.Extractor.(*plannercore.ClusterTableExtractor), @@ -1929,7 +1933,7 @@ func (b *executorBuilder) buildMemTable(v *plannercore.PhysicalMemTable) Executo } case strings.ToLower(infoschema.TableClusterLoad): return &MemTableReaderExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.Table, retriever: &clusterServerInfoRetriever{ extractor: v.Extractor.(*plannercore.ClusterTableExtractor), @@ -1938,7 +1942,7 @@ func (b *executorBuilder) buildMemTable(v *plannercore.PhysicalMemTable) Executo } case strings.ToLower(infoschema.TableClusterHardware): return &MemTableReaderExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.Table, retriever: &clusterServerInfoRetriever{ extractor: v.Extractor.(*plannercore.ClusterTableExtractor), @@ -1947,7 +1951,7 @@ func (b *executorBuilder) buildMemTable(v *plannercore.PhysicalMemTable) Executo } case strings.ToLower(infoschema.TableClusterSystemInfo): return &MemTableReaderExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.Table, retriever: &clusterServerInfoRetriever{ extractor: v.Extractor.(*plannercore.ClusterTableExtractor), @@ -1956,7 +1960,7 @@ func (b *executorBuilder) buildMemTable(v *plannercore.PhysicalMemTable) Executo } case strings.ToLower(infoschema.TableClusterLog): return &MemTableReaderExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.Table, retriever: &clusterLogRetriever{ extractor: v.Extractor.(*plannercore.ClusterLogTableExtractor), @@ -1964,7 +1968,7 @@ func (b *executorBuilder) buildMemTable(v *plannercore.PhysicalMemTable) Executo } case strings.ToLower(infoschema.TableTiDBHotRegionsHistory): return &MemTableReaderExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.Table, retriever: &hotRegionsHistoryRetriver{ extractor: v.Extractor.(*plannercore.HotRegionsHistoryTableExtractor), @@ -1972,7 +1976,7 @@ func (b *executorBuilder) buildMemTable(v *plannercore.PhysicalMemTable) Executo } case strings.ToLower(infoschema.TableInspectionResult): return &MemTableReaderExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.Table, retriever: &inspectionResultRetriever{ extractor: v.Extractor.(*plannercore.InspectionResultTableExtractor), @@ -1981,7 +1985,7 @@ func (b *executorBuilder) buildMemTable(v *plannercore.PhysicalMemTable) Executo } case strings.ToLower(infoschema.TableInspectionSummary): return &MemTableReaderExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.Table, retriever: &inspectionSummaryRetriever{ table: v.Table, @@ -1991,7 +1995,7 @@ func (b *executorBuilder) buildMemTable(v *plannercore.PhysicalMemTable) Executo } case strings.ToLower(infoschema.TableInspectionRules): return &MemTableReaderExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.Table, retriever: &inspectionRuleRetriever{ extractor: v.Extractor.(*plannercore.InspectionRuleTableExtractor), @@ -1999,7 +2003,7 @@ func (b *executorBuilder) buildMemTable(v *plannercore.PhysicalMemTable) Executo } case strings.ToLower(infoschema.TableMetricSummary): return &MemTableReaderExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.Table, retriever: &MetricsSummaryRetriever{ table: v.Table, @@ -2009,7 +2013,7 @@ func (b *executorBuilder) buildMemTable(v *plannercore.PhysicalMemTable) Executo } case strings.ToLower(infoschema.TableMetricSummaryByLabel): return &MemTableReaderExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.Table, retriever: &MetricsSummaryByLabelRetriever{ table: v.Table, @@ -2019,7 +2023,7 @@ func (b *executorBuilder) buildMemTable(v *plannercore.PhysicalMemTable) Executo } case strings.ToLower(infoschema.TableTiKVRegionPeers): return &MemTableReaderExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.Table, retriever: &tikvRegionPeersRetriever{ extractor: v.Extractor.(*plannercore.TikvRegionPeersExtractor), @@ -2067,7 +2071,7 @@ func (b *executorBuilder) buildMemTable(v *plannercore.PhysicalMemTable) Executo strings.ToLower(infoschema.ClusterTableMemoryUsageOpsHistory), strings.ToLower(infoschema.TableResourceGroups): return &MemTableReaderExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.Table, retriever: &memtableRetriever{ table: v.Table, @@ -2078,7 +2082,7 @@ func (b *executorBuilder) buildMemTable(v *plannercore.PhysicalMemTable) Executo case strings.ToLower(infoschema.TableTiDBTrx), strings.ToLower(infoschema.ClusterTableTiDBTrx): return &MemTableReaderExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.Table, retriever: &tidbTrxTableRetriever{ table: v.Table, @@ -2087,7 +2091,7 @@ func (b *executorBuilder) buildMemTable(v *plannercore.PhysicalMemTable) Executo } case strings.ToLower(infoschema.TableDataLockWaits): return &MemTableReaderExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.Table, retriever: &dataLockWaitsTableRetriever{ table: v.Table, @@ -2097,7 +2101,7 @@ func (b *executorBuilder) buildMemTable(v *plannercore.PhysicalMemTable) Executo case strings.ToLower(infoschema.TableDeadlocks), strings.ToLower(infoschema.ClusterTableDeadlocks): return &MemTableReaderExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.Table, retriever: &deadlocksTableRetriever{ table: v.Table, @@ -2115,13 +2119,13 @@ func (b *executorBuilder) buildMemTable(v *plannercore.PhysicalMemTable) Executo extractor = v.Extractor.(*plannercore.StatementsSummaryExtractor) } return &MemTableReaderExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.Table, retriever: buildStmtSummaryRetriever(b.ctx, v.Table, v.Columns, extractor), } case strings.ToLower(infoschema.TableColumns): return &MemTableReaderExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.Table, retriever: &hugeMemTableRetriever{ table: v.Table, @@ -2135,7 +2139,7 @@ func (b *executorBuilder) buildMemTable(v *plannercore.PhysicalMemTable) Executo memTracker := memory.NewTracker(v.ID(), -1) memTracker.AttachTo(b.ctx.GetSessionVars().StmtCtx.MemTracker) return &MemTableReaderExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.Table, retriever: &slowQueryRetriever{ table: v.Table, @@ -2146,7 +2150,7 @@ func (b *executorBuilder) buildMemTable(v *plannercore.PhysicalMemTable) Executo } case strings.ToLower(infoschema.TableStorageStats): return &MemTableReaderExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.Table, retriever: &tableStorageStatsRetriever{ table: v.Table, @@ -2158,14 +2162,14 @@ func (b *executorBuilder) buildMemTable(v *plannercore.PhysicalMemTable) Executo loc := b.ctx.GetSessionVars().Location() ddlJobRetriever := DDLJobRetriever{TZLoc: loc} return &DDLJobsReaderExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), is: b.is, DDLJobRetriever: ddlJobRetriever, } case strings.ToLower(infoschema.TableTiFlashTables), strings.ToLower(infoschema.TableTiFlashSegments): return &MemTableReaderExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.Table, retriever: &TiFlashSystemTableRetriever{ table: v.Table, @@ -2177,19 +2181,19 @@ func (b *executorBuilder) buildMemTable(v *plannercore.PhysicalMemTable) Executo } tb, _ := b.is.TableByID(v.Table.ID) return &TableScanExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), t: tb, columns: v.Columns, } } -func (b *executorBuilder) buildSort(v *plannercore.PhysicalSort) Executor { +func (b *executorBuilder) buildSort(v *plannercore.PhysicalSort) exec.Executor { childExec := b.build(v.Children()[0]) if b.err != nil { return nil } sortExec := SortExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID(), childExec), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID(), childExec), ByItems: v.ByItems, schema: v.Schema(), } @@ -2197,13 +2201,13 @@ func (b *executorBuilder) buildSort(v *plannercore.PhysicalSort) Executor { return &sortExec } -func (b *executorBuilder) buildTopN(v *plannercore.PhysicalTopN) Executor { +func (b *executorBuilder) buildTopN(v *plannercore.PhysicalTopN) exec.Executor { childExec := b.build(v.Children()[0]) if b.err != nil { return nil } sortExec := SortExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID(), childExec), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID(), childExec), ByItems: v.ByItems, schema: v.Schema(), } @@ -2214,7 +2218,7 @@ func (b *executorBuilder) buildTopN(v *plannercore.PhysicalTopN) Executor { } } -func (b *executorBuilder) buildApply(v *plannercore.PhysicalApply) Executor { +func (b *executorBuilder) buildApply(v *plannercore.PhysicalApply) exec.Executor { var ( innerPlan plannercore.PhysicalPlan outerPlan plannercore.PhysicalPlan @@ -2252,7 +2256,7 @@ func (b *executorBuilder) buildApply(v *plannercore.PhysicalApply) Executor { tupleJoiner := newJoiner(b.ctx, v.JoinType, v.InnerChildIdx == 0, defaultValues, otherConditions, retTypes(leftChild), retTypes(rightChild), nil, false) serialExec := &NestedLoopApplyExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID(), outerExec, innerExec), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID(), outerExec, innerExec), innerExec: innerExec, outerExec: outerExec, outerFilter: outerFilter, @@ -2267,7 +2271,7 @@ func (b *executorBuilder) buildApply(v *plannercore.PhysicalApply) Executor { // try parallel mode if v.Concurrency > 1 { - innerExecs := make([]Executor, 0, v.Concurrency) + innerExecs := make([]exec.Executor, 0, v.Concurrency) innerFilters := make([]expression.CNFExprs, 0, v.Concurrency) corCols := make([][]*expression.CorrelatedColumn, 0, v.Concurrency) joiners := make([]joiner, 0, v.Concurrency) @@ -2290,10 +2294,10 @@ func (b *executorBuilder) buildApply(v *plannercore.PhysicalApply) Executor { defaultValues, otherConditions, retTypes(leftChild), retTypes(rightChild), nil, false)) } - allExecs := append([]Executor{outerExec}, innerExecs...) + allExecs := append([]exec.Executor{outerExec}, innerExecs...) return &ParallelNestedLoopApplyExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID(), allExecs...), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID(), allExecs...), innerExecs: innerExecs, outerExec: outerExec, outerFilter: outerFilter, @@ -2308,20 +2312,20 @@ func (b *executorBuilder) buildApply(v *plannercore.PhysicalApply) Executor { return serialExec } -func (b *executorBuilder) buildMaxOneRow(v *plannercore.PhysicalMaxOneRow) Executor { +func (b *executorBuilder) buildMaxOneRow(v *plannercore.PhysicalMaxOneRow) exec.Executor { childExec := b.build(v.Children()[0]) if b.err != nil { return nil } - base := newBaseExecutor(b.ctx, v.Schema(), v.ID(), childExec) - base.initCap = 2 - base.maxChunkSize = 2 - e := &MaxOneRowExec{baseExecutor: base} + base := exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID(), childExec) + base.SetInitCap(2) + base.SetMaxChunkSize(2) + e := &MaxOneRowExec{BaseExecutor: base} return e } -func (b *executorBuilder) buildUnionAll(v *plannercore.PhysicalUnionAll) Executor { - childExecs := make([]Executor, len(v.Children())) +func (b *executorBuilder) buildUnionAll(v *plannercore.PhysicalUnionAll) exec.Executor { + childExecs := make([]exec.Executor, len(v.Children())) for i, child := range v.Children() { childExecs[i] = b.build(child) if b.err != nil { @@ -2329,7 +2333,7 @@ func (b *executorBuilder) buildUnionAll(v *plannercore.PhysicalUnionAll) Executo } } e := &UnionExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID(), childExecs...), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID(), childExecs...), concurrency: b.ctx.GetSessionVars().UnionConcurrency(), } return e @@ -2356,13 +2360,13 @@ func buildHandleColsForSplit(sc *stmtctx.StatementContext, tbInfo *model.TableIn return plannercore.NewIntHandleCols(intCol) } -func (b *executorBuilder) buildSplitRegion(v *plannercore.SplitRegion) Executor { - base := newBaseExecutor(b.ctx, v.Schema(), v.ID()) - base.initCap = 1 - base.maxChunkSize = 1 +func (b *executorBuilder) buildSplitRegion(v *plannercore.SplitRegion) exec.Executor { + base := exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()) + base.SetInitCap(1) + base.SetMaxChunkSize(1) if v.IndexInfo != nil { return &SplitIndexRegionExec{ - baseExecutor: base, + BaseExecutor: base, tableInfo: v.TableInfo, partitionNames: v.PartitionNames, indexInfo: v.IndexInfo, @@ -2375,7 +2379,7 @@ func (b *executorBuilder) buildSplitRegion(v *plannercore.SplitRegion) Executor handleCols := buildHandleColsForSplit(b.ctx.GetSessionVars().StmtCtx, v.TableInfo) if len(v.ValueLists) > 0 { return &SplitTableRegionExec{ - baseExecutor: base, + BaseExecutor: base, tableInfo: v.TableInfo, partitionNames: v.PartitionNames, handleCols: handleCols, @@ -2383,7 +2387,7 @@ func (b *executorBuilder) buildSplitRegion(v *plannercore.SplitRegion) Executor } } return &SplitTableRegionExec{ - baseExecutor: base, + BaseExecutor: base, tableInfo: v.TableInfo, partitionNames: v.PartitionNames, handleCols: handleCols, @@ -2393,7 +2397,7 @@ func (b *executorBuilder) buildSplitRegion(v *plannercore.SplitRegion) Executor } } -func (b *executorBuilder) buildUpdate(v *plannercore.Update) Executor { +func (b *executorBuilder) buildUpdate(v *plannercore.Update) exec.Executor { b.inUpdateStmt = true tblID2table := make(map[int64]table.Table, len(v.TblColPosInfos)) multiUpdateOnSameTable := make(map[int64]bool) @@ -2423,8 +2427,8 @@ func (b *executorBuilder) buildUpdate(v *plannercore.Update) Executor { if b.err != nil { return nil } - base := newBaseExecutor(b.ctx, v.Schema(), v.ID(), selExec) - base.initCap = chunk.ZeroCapacity + base := exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID(), selExec) + base.SetInitCap(chunk.ZeroCapacity) var assignFlag []int assignFlag, b.err = getAssignFlag(b.ctx, v, selExec.Schema().Len()) if b.err != nil { @@ -2436,7 +2440,7 @@ func (b *executorBuilder) buildUpdate(v *plannercore.Update) Executor { return nil } updateExec := &UpdateExec{ - baseExecutor: base, + BaseExecutor: base, OrderedList: v.OrderedList, allAssignmentsAreConstant: v.AllAssignmentsAreConstant, virtualAssignmentsOffset: v.VirtualAssignmentsOffset, @@ -2474,7 +2478,7 @@ func getAssignFlag(ctx sessionctx.Context, v *plannercore.Update, schemaLen int) return assignFlag, nil } -func (b *executorBuilder) buildDelete(v *plannercore.Delete) Executor { +func (b *executorBuilder) buildDelete(v *plannercore.Delete) exec.Executor { b.inDeleteStmt = true tblID2table := make(map[int64]table.Table, len(v.TblColPosInfos)) for _, info := range v.TblColPosInfos { @@ -2489,10 +2493,10 @@ func (b *executorBuilder) buildDelete(v *plannercore.Delete) Executor { if b.err != nil { return nil } - base := newBaseExecutor(b.ctx, v.Schema(), v.ID(), selExec) - base.initCap = chunk.ZeroCapacity + base := exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID(), selExec) + base.SetInitCap(chunk.ZeroCapacity) deleteExec := &DeleteExec{ - baseExecutor: base, + BaseExecutor: base, tblID2Table: tblID2table, IsMultiTable: v.IsMultiTable, tblColPosInfos: v.TblColPosInfos, @@ -2925,7 +2929,7 @@ func (b *executorBuilder) buildAnalyzeFastColumn(e *AnalyzeExec, task plannercor if !findTask { job := &statistics.AnalyzeJob{DBName: task.DBName, TableName: task.TableName, PartitionName: task.PartitionName, JobInfo: "fast analyze columns"} var concurrency int - concurrency, b.err = getBuildStatsConcurrency(e.ctx) + concurrency, b.err = getBuildStatsConcurrency(e.Ctx()) if b.err != nil { return } @@ -2973,7 +2977,7 @@ func (b *executorBuilder) buildAnalyzeFastIndex(e *AnalyzeExec, task plannercore if !findTask { job := &statistics.AnalyzeJob{DBName: task.DBName, TableName: task.TableName, PartitionName: "fast analyze index " + task.IndexInfo.Name.O} var concurrency int - concurrency, b.err = getBuildStatsConcurrency(e.ctx) + concurrency, b.err = getBuildStatsConcurrency(e.Ctx()) if b.err != nil { return } @@ -3008,9 +3012,9 @@ func (b *executorBuilder) buildAnalyzeFastIndex(e *AnalyzeExec, task plannercore } } -func (b *executorBuilder) buildAnalyze(v *plannercore.Analyze) Executor { +func (b *executorBuilder) buildAnalyze(v *plannercore.Analyze) exec.Executor { e := &AnalyzeExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), tasks: make([]*analyzeTask, 0, len(v.ColTasks)+len(v.IdxTasks)), opts: v.Opts, OptionsMap: v.OptionsMap, @@ -3115,7 +3119,7 @@ func (b *executorBuilder) newDataReaderBuilder(p plannercore.PhysicalPlan) (*dat }, nil } -func (b *executorBuilder) buildIndexLookUpJoin(v *plannercore.PhysicalIndexJoin) Executor { +func (b *executorBuilder) buildIndexLookUpJoin(v *plannercore.PhysicalIndexJoin) exec.Executor { outerExec := b.build(v.Children()[1-v.InnerChildIdx]) if b.err != nil { return nil @@ -3194,7 +3198,7 @@ func (b *executorBuilder) buildIndexLookUpJoin(v *plannercore.PhysicalIndexJoin) } e := &IndexLookUpJoin{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID(), outerExec), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID(), outerExec), outerCtx: outerCtx{ rowTypes: outerTypes, hashTypes: outerHashTypes, @@ -3251,7 +3255,7 @@ func (b *executorBuilder) buildIndexLookUpJoin(v *plannercore.PhysicalIndexJoin) return e } -func (b *executorBuilder) buildIndexLookUpMergeJoin(v *plannercore.PhysicalIndexMergeJoin) Executor { +func (b *executorBuilder) buildIndexLookUpMergeJoin(v *plannercore.PhysicalIndexMergeJoin) exec.Executor { outerExec := b.build(v.Children()[1-v.InnerChildIdx]) if b.err != nil { return nil @@ -3310,7 +3314,7 @@ func (b *executorBuilder) buildIndexLookUpMergeJoin(v *plannercore.PhysicalIndex } e := &IndexLookUpMergeJoin{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID(), outerExec), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID(), outerExec), outerMergeCtx: outerMergeCtx{ rowTypes: outerTypes, filter: outerFilter, @@ -3337,7 +3341,7 @@ func (b *executorBuilder) buildIndexLookUpMergeJoin(v *plannercore.PhysicalIndex lastColHelper: v.CompareFilters, } childrenUsedSchema := markChildrenUsedCols(v.Schema(), v.Children()[0].Schema(), v.Children()[1].Schema()) - joiners := make([]joiner, e.ctx.GetSessionVars().IndexLookupJoinConcurrency()) + joiners := make([]joiner, e.Ctx().GetSessionVars().IndexLookupJoinConcurrency()) for i := 0; i < len(joiners); i++ { joiners[i] = newJoiner(b.ctx, v.JoinType, v.InnerChildIdx == 0, defaultValues, v.OtherConditions, leftTypes, rightTypes, childrenUsedSchema, false) } @@ -3345,7 +3349,7 @@ func (b *executorBuilder) buildIndexLookUpMergeJoin(v *plannercore.PhysicalIndex return e } -func (b *executorBuilder) buildIndexNestedLoopHashJoin(v *plannercore.PhysicalIndexHashJoin) Executor { +func (b *executorBuilder) buildIndexNestedLoopHashJoin(v *plannercore.PhysicalIndexHashJoin) exec.Executor { join := b.buildIndexLookUpJoin(&(v.PhysicalIndexJoin)) if b.err != nil { return nil @@ -3355,7 +3359,7 @@ func (b *executorBuilder) buildIndexNestedLoopHashJoin(v *plannercore.PhysicalIn IndexLookUpJoin: *e, keepOuterOrder: v.KeepOuterOrder, } - concurrency := e.ctx.GetSessionVars().IndexLookupJoinConcurrency() + concurrency := e.Ctx().GetSessionVars().IndexLookupJoinConcurrency() idxHash.joiners = make([]joiner, concurrency) for i := 0; i < concurrency; i++ { idxHash.joiners[i] = e.joiner.Clone() @@ -3403,7 +3407,7 @@ func buildNoRangeTableReader(b *executorBuilder, v *plannercore.PhysicalTableRea } paging := b.ctx.GetSessionVars().EnablePaging e := &TableReaderExecutor{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), dagPB: dagReq, startTS: startTS, txnScope: b.txnScope, @@ -3427,7 +3431,7 @@ func buildNoRangeTableReader(b *executorBuilder, v *plannercore.PhysicalTableRea if containsLimit(dagReq.Executors) { e.feedback = statistics.NewQueryFeedback(0, nil, 0, ts.Desc) } else { - e.feedback = statistics.NewQueryFeedback(getFeedbackStatsTableID(e.ctx, tbl), ts.Hist, int64(ts.StatsCount()), ts.Desc) + e.feedback = statistics.NewQueryFeedback(getFeedbackStatsTableID(e.Ctx(), tbl), ts.Hist, int64(ts.StatsCount()), ts.Desc) } collect := statistics.CollectFeedback(b.ctx.GetSessionVars().StmtCtx, e.feedback, len(ts.Ranges)) // Do not collect the feedback when the table is the partition table. @@ -3453,7 +3457,7 @@ func buildNoRangeTableReader(b *executorBuilder, v *plannercore.PhysicalTableRea return e, nil } -func (b *executorBuilder) buildMPPGather(v *plannercore.PhysicalTableReader) Executor { +func (b *executorBuilder) buildMPPGather(v *plannercore.PhysicalTableReader) exec.Executor { startTs, err := b.getSnapshotTS() if err != nil { b.err = err @@ -3461,7 +3465,7 @@ func (b *executorBuilder) buildMPPGather(v *plannercore.PhysicalTableReader) Exe } gather := &MPPGather{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), is: b.is, originalPlan: v.GetTablePlan(), startTS: startTs, @@ -3524,7 +3528,7 @@ func (b *executorBuilder) buildMPPGather(v *plannercore.PhysicalTableReader) Exe // buildTableReader builds a table reader executor. It first build a no range table reader, // and then update it ranges from table scan plan. -func (b *executorBuilder) buildTableReader(v *plannercore.PhysicalTableReader) Executor { +func (b *executorBuilder) buildTableReader(v *plannercore.PhysicalTableReader) exec.Executor { failpoint.Inject("checkUseMPP", func(val failpoint.Value) { if !b.ctx.GetSessionVars().InRestrictedSQL && val.(bool) != useMPPExecution(b.ctx, v) { if val.(bool) { @@ -3586,7 +3590,7 @@ func (b *executorBuilder) buildTableReader(v *plannercore.PhysicalTableReader) E } if len(partitions) == 0 { - return &TableDualExec{baseExecutor: *ret.base()} + return &TableDualExec{BaseExecutor: *ret.Base()} } // Sort the partition is necessary to make the final multiple partition key ranges ordered. @@ -3740,7 +3744,7 @@ func buildNoRangeIndexReader(b *executorBuilder, v *plannercore.PhysicalIndexRea } paging := b.ctx.GetSessionVars().EnablePaging e := &IndexReaderExecutor{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), dagPB: dagReq, startTS: startTS, txnScope: b.txnScope, @@ -3788,7 +3792,7 @@ func buildNoRangeIndexReader(b *executorBuilder, v *plannercore.PhysicalIndexRea return e, nil } -func (b *executorBuilder) buildIndexReader(v *plannercore.PhysicalIndexReader) Executor { +func (b *executorBuilder) buildIndexReader(v *plannercore.PhysicalIndexReader) exec.Executor { is := v.IndexPlans[0].(*plannercore.PhysicalIndexScan) if err := b.validCanReadTemporaryOrCacheTable(is.Table); err != nil { b.err = err @@ -3945,7 +3949,7 @@ func buildNoRangeIndexLookUpReader(b *executorBuilder, v *plannercore.PhysicalIn } e := &IndexLookUpExecutor{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), dagPB: indexReq, startTS: startTS, table: tbl, @@ -3972,7 +3976,7 @@ func buildNoRangeIndexLookUpReader(b *executorBuilder, v *plannercore.PhysicalIn if containsLimit(indexReq.Executors) { e.feedback = statistics.NewQueryFeedback(0, nil, 0, is.Desc) } else { - e.feedback = statistics.NewQueryFeedback(getFeedbackStatsTableID(e.ctx, tbl), is.Hist, int64(is.StatsCount()), is.Desc) + e.feedback = statistics.NewQueryFeedback(getFeedbackStatsTableID(e.Ctx(), tbl), is.Hist, int64(is.StatsCount()), is.Desc) } // Do not collect the feedback for table request. collectTable := false @@ -3999,7 +4003,7 @@ func buildNoRangeIndexLookUpReader(b *executorBuilder, v *plannercore.PhysicalIn return e, nil } -func (b *executorBuilder) buildIndexLookUpReader(v *plannercore.PhysicalIndexLookUpReader) Executor { +func (b *executorBuilder) buildIndexLookUpReader(v *plannercore.PhysicalIndexLookUpReader) exec.Executor { if b.Ti != nil { b.Ti.UseTableLookUp.Store(true) } @@ -4129,7 +4133,7 @@ func buildNoRangeIndexMergeReader(b *executorBuilder, v *plannercore.PhysicalInd paging := b.ctx.GetSessionVars().EnablePaging e := &IndexMergeReaderExecutor{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), dagPBs: partialReqs, startTS: startTS, table: tblInfo, @@ -4158,7 +4162,7 @@ func buildNoRangeIndexMergeReader(b *executorBuilder, v *plannercore.PhysicalInd return e, nil } -func (b *executorBuilder) buildIndexMergeReader(v *plannercore.PhysicalIndexMergeReader) Executor { +func (b *executorBuilder) buildIndexMergeReader(v *plannercore.PhysicalIndexMergeReader) exec.Executor { if b.Ti != nil { b.Ti.UseIndexMerge = true b.Ti.UseTableLookUp.Store(true) @@ -4229,7 +4233,7 @@ type dataReaderBuilder struct { type mockPhysicalIndexReader struct { plannercore.PhysicalPlan - e Executor + e exec.Executor } // MemoryUsage of mockPhysicalIndexReader is only for testing @@ -4238,12 +4242,12 @@ func (p *mockPhysicalIndexReader) MemoryUsage() (sum int64) { } func (builder *dataReaderBuilder) buildExecutorForIndexJoin(ctx context.Context, lookUpContents []*indexJoinLookUpContent, - IndexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager, canReorderHandles bool, memTracker *memory.Tracker, interruptSignal *atomic.Value) (Executor, error) { + IndexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager, canReorderHandles bool, memTracker *memory.Tracker, interruptSignal *atomic.Value) (exec.Executor, error) { return builder.buildExecutorForIndexJoinInternal(ctx, builder.Plan, lookUpContents, IndexRanges, keyOff2IdxOff, cwc, canReorderHandles, memTracker, interruptSignal) } func (builder *dataReaderBuilder) buildExecutorForIndexJoinInternal(ctx context.Context, plan plannercore.Plan, lookUpContents []*indexJoinLookUpContent, - IndexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager, canReorderHandles bool, memTracker *memory.Tracker, interruptSignal *atomic.Value) (Executor, error) { + IndexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager, canReorderHandles bool, memTracker *memory.Tracker, interruptSignal *atomic.Value) (exec.Executor, error) { switch v := plan.(type) { case *plannercore.PhysicalTableReader: return builder.buildTableReaderForIndexJoin(ctx, v, lookUpContents, IndexRanges, keyOff2IdxOff, cwc, canReorderHandles, memTracker, interruptSignal) @@ -4269,7 +4273,7 @@ func (builder *dataReaderBuilder) buildExecutorForIndexJoinInternal(ctx context. return nil, err } exec := &SelectionExec{ - baseExecutor: newBaseExecutor(builder.ctx, v.Schema(), v.ID(), childExec), + BaseExecutor: exec.NewBaseExecutor(builder.ctx, v.Schema(), v.ID(), childExec), filters: v.Conditions, } err = exec.open(ctx) @@ -4282,7 +4286,7 @@ func (builder *dataReaderBuilder) buildExecutorForIndexJoinInternal(ctx context. func (builder *dataReaderBuilder) buildUnionScanForIndexJoin(ctx context.Context, v *plannercore.PhysicalUnionScan, values []*indexJoinLookUpContent, indexRanges []*ranger.Range, keyOff2IdxOff []int, - cwc *plannercore.ColWithCmpFuncManager, canReorderHandles bool, memTracker *memory.Tracker, interruptSignal *atomic.Value) (Executor, error) { + cwc *plannercore.ColWithCmpFuncManager, canReorderHandles bool, memTracker *memory.Tracker, interruptSignal *atomic.Value) (exec.Executor, error) { childBuilder, err := builder.newDataReaderBuilder(v.Children()[0]) if err != nil { return nil, err @@ -4302,7 +4306,7 @@ func (builder *dataReaderBuilder) buildUnionScanForIndexJoin(ctx context.Context func (builder *dataReaderBuilder) buildTableReaderForIndexJoin(ctx context.Context, v *plannercore.PhysicalTableReader, lookUpContents []*indexJoinLookUpContent, indexRanges []*ranger.Range, keyOff2IdxOff []int, - cwc *plannercore.ColWithCmpFuncManager, canReorderHandles bool, memTracker *memory.Tracker, interruptSignal *atomic.Value) (Executor, error) { + cwc *plannercore.ColWithCmpFuncManager, canReorderHandles bool, memTracker *memory.Tracker, interruptSignal *atomic.Value) (exec.Executor, error) { e, err := buildNoRangeTableReader(builder.executorBuilder, v) if !canReorderHandles { // `canReorderHandles` is set to false only in IndexMergeJoin. IndexMergeJoin will trigger a dead loop problem @@ -4316,7 +4320,7 @@ func (builder *dataReaderBuilder) buildTableReaderForIndexJoin(ctx context.Conte tbInfo := e.table.Meta() if tbInfo.GetPartitionInfo() == nil || !builder.ctx.GetSessionVars().StmtCtx.UseDynamicPartitionPrune() { if v.IsCommonHandle { - kvRanges, err := buildKvRangesForIndexJoin(e.ctx, getPhysicalTableID(e.table), -1, lookUpContents, indexRanges, keyOff2IdxOff, cwc, memTracker, interruptSignal) + kvRanges, err := buildKvRangesForIndexJoin(e.Ctx(), getPhysicalTableID(e.table), -1, lookUpContents, indexRanges, keyOff2IdxOff, cwc, memTracker, interruptSignal) if err != nil { return nil, err } @@ -4351,7 +4355,7 @@ func (builder *dataReaderBuilder) buildTableReaderForIndexJoin(ctx context.Conte for i, data := range content.keys { locateKey[keyColOffsets[i]] = data } - p, err := pt.GetPartitionByRow(e.ctx, locateKey) + p, err := pt.GetPartitionByRow(e.Ctx(), locateKey) if table.ErrNoPartitionForGivenValue.Equal(err) { continue } @@ -4366,7 +4370,7 @@ func (builder *dataReaderBuilder) buildTableReaderForIndexJoin(ctx context.Conte } for pid, contents := range lookUpContentsByPID { // buildKvRanges for each partition. - tmp, err := buildKvRangesForIndexJoin(e.ctx, pid, -1, contents, indexRanges, keyOff2IdxOff, cwc, nil, interruptSignal) + tmp, err := buildKvRangesForIndexJoin(e.Ctx(), pid, -1, contents, indexRanges, keyOff2IdxOff, cwc, nil, interruptSignal) if err != nil { return nil, err } @@ -4375,7 +4379,7 @@ func (builder *dataReaderBuilder) buildTableReaderForIndexJoin(ctx context.Conte } else { kvRanges = make([]kv.KeyRange, 0, len(usedPartitions)*len(lookUpContents)) for _, p := range usedPartitionList { - tmp, err := buildKvRangesForIndexJoin(e.ctx, p.GetPhysicalID(), -1, lookUpContents, indexRanges, keyOff2IdxOff, cwc, memTracker, interruptSignal) + tmp, err := buildKvRangesForIndexJoin(e.Ctx(), p.GetPhysicalID(), -1, lookUpContents, indexRanges, keyOff2IdxOff, cwc, memTracker, interruptSignal) if err != nil { return nil, err } @@ -4398,7 +4402,7 @@ func (builder *dataReaderBuilder) buildTableReaderForIndexJoin(ctx context.Conte for i, data := range content.keys { locateKey[keyColOffsets[i]] = data } - p, err := pt.GetPartitionByRow(e.ctx, locateKey) + p, err := pt.GetPartitionByRow(e.Ctx(), locateKey) if table.ErrNoPartitionForGivenValue.Equal(err) { continue } @@ -4522,18 +4526,18 @@ func (builder *dataReaderBuilder) buildTableReaderBase(ctx context.Context, e *T SetTxnScope(e.txnScope). SetReadReplicaScope(e.readReplicaScope). SetIsStaleness(e.isStaleness). - SetFromSessionVars(e.ctx.GetSessionVars()). - SetFromInfoSchema(e.ctx.GetInfoSchema()). - SetClosestReplicaReadAdjuster(newClosestReadAdjuster(e.ctx, &reqBuilderWithRange.Request, e.netDataSize)). + SetFromSessionVars(e.Ctx().GetSessionVars()). + SetFromInfoSchema(e.Ctx().GetInfoSchema()). + SetClosestReplicaReadAdjuster(newClosestReadAdjuster(e.Ctx(), &reqBuilderWithRange.Request, e.netDataSize)). SetPaging(e.paging). - SetConnID(e.ctx.GetSessionVars().ConnectionID). + SetConnID(e.Ctx().GetSessionVars().ConnectionID). Build() if err != nil { return nil, err } e.kvRanges = kvReq.KeyRanges.AppendSelfTo(e.kvRanges) e.resultHandler = &tableResultHandler{} - result, err := builder.SelectResult(ctx, builder.ctx, kvReq, retTypes(e), e.feedback, getPhysicalPlanIDs(e.plans), e.id) + result, err := builder.SelectResult(ctx, builder.ctx, kvReq, retTypes(e), e.feedback, getPhysicalPlanIDs(e.plans), e.ID()) if err != nil { return nil, err } @@ -4560,21 +4564,21 @@ func (builder *dataReaderBuilder) buildTableReaderFromHandles(ctx context.Contex return builder.buildTableReaderBase(ctx, e, b) } -func (builder *dataReaderBuilder) buildTableReaderFromKvRanges(ctx context.Context, e *TableReaderExecutor, ranges []kv.KeyRange) (Executor, error) { +func (builder *dataReaderBuilder) buildTableReaderFromKvRanges(ctx context.Context, e *TableReaderExecutor, ranges []kv.KeyRange) (exec.Executor, error) { var b distsql.RequestBuilder b.SetKeyRanges(ranges) return builder.buildTableReaderBase(ctx, e, b) } func (builder *dataReaderBuilder) buildIndexReaderForIndexJoin(ctx context.Context, v *plannercore.PhysicalIndexReader, - lookUpContents []*indexJoinLookUpContent, indexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager, memoryTracker *memory.Tracker, interruptSignal *atomic.Value) (Executor, error) { + lookUpContents []*indexJoinLookUpContent, indexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager, memoryTracker *memory.Tracker, interruptSignal *atomic.Value) (exec.Executor, error) { e, err := buildNoRangeIndexReader(builder.executorBuilder, v) if err != nil { return nil, err } tbInfo := e.table.Meta() if tbInfo.GetPartitionInfo() == nil || !builder.ctx.GetSessionVars().StmtCtx.UseDynamicPartitionPrune() { - kvRanges, err := buildKvRangesForIndexJoin(e.ctx, e.physicalTableID, e.index.ID, lookUpContents, indexRanges, keyOff2IdxOff, cwc, memoryTracker, interruptSignal) + kvRanges, err := buildKvRangesForIndexJoin(e.Ctx(), e.physicalTableID, e.index.ID, lookUpContents, indexRanges, keyOff2IdxOff, cwc, memoryTracker, interruptSignal) if err != nil { return nil, err } @@ -4597,7 +4601,7 @@ func (builder *dataReaderBuilder) buildIndexReaderForIndexJoin(ctx context.Conte return nil, err } - if e.ranges, err = buildRangesForIndexJoin(e.ctx, lookUpContents, indexRanges, keyOff2IdxOff, cwc); err != nil { + if e.ranges, err = buildRangesForIndexJoin(e.Ctx(), lookUpContents, indexRanges, keyOff2IdxOff, cwc); err != nil { return nil, err } if err := e.Open(ctx); err != nil { @@ -4613,7 +4617,7 @@ func (builder *dataReaderBuilder) buildIndexReaderForIndexJoin(ctx context.Conte } if len(usedPartition) != 0 { if canPrune { - rangeMap, err := buildIndexRangeForEachPartition(e.ctx, usedPartition, contentPos, lookUpContents, indexRanges, keyOff2IdxOff, cwc) + rangeMap, err := buildIndexRangeForEachPartition(e.Ctx(), usedPartition, contentPos, lookUpContents, indexRanges, keyOff2IdxOff, cwc) if err != nil { return nil, err } @@ -4622,7 +4626,7 @@ func (builder *dataReaderBuilder) buildIndexReaderForIndexJoin(ctx context.Conte e.partRangeMap = rangeMap } else { e.partitions = usedPartition - if e.ranges, err = buildRangesForIndexJoin(e.ctx, lookUpContents, indexRanges, keyOff2IdxOff, cwc); err != nil { + if e.ranges, err = buildRangesForIndexJoin(e.Ctx(), lookUpContents, indexRanges, keyOff2IdxOff, cwc); err != nil { return nil, err } } @@ -4631,13 +4635,13 @@ func (builder *dataReaderBuilder) buildIndexReaderForIndexJoin(ctx context.Conte } return e, nil } - ret := &TableDualExec{baseExecutor: *e.base()} + ret := &TableDualExec{BaseExecutor: *e.Base()} err = ret.Open(ctx) return ret, err } func (builder *dataReaderBuilder) buildIndexLookUpReaderForIndexJoin(ctx context.Context, v *plannercore.PhysicalIndexLookUpReader, - lookUpContents []*indexJoinLookUpContent, indexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager, memTracker *memory.Tracker, interruptSignal *atomic.Value) (Executor, error) { + lookUpContents []*indexJoinLookUpContent, indexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager, memTracker *memory.Tracker, interruptSignal *atomic.Value) (exec.Executor, error) { if builder.Ti != nil { builder.Ti.UseTableLookUp.Store(true) } @@ -4648,7 +4652,7 @@ func (builder *dataReaderBuilder) buildIndexLookUpReaderForIndexJoin(ctx context tbInfo := e.table.Meta() if tbInfo.GetPartitionInfo() == nil || !builder.ctx.GetSessionVars().StmtCtx.UseDynamicPartitionPrune() { - e.kvRanges, err = buildKvRangesForIndexJoin(e.ctx, getPhysicalTableID(e.table), e.index.ID, lookUpContents, indexRanges, keyOff2IdxOff, cwc, memTracker, interruptSignal) + e.kvRanges, err = buildKvRangesForIndexJoin(e.Ctx(), getPhysicalTableID(e.table), e.index.ID, lookUpContents, indexRanges, keyOff2IdxOff, cwc, memTracker, interruptSignal) if err != nil { return nil, err } @@ -4671,7 +4675,7 @@ func (builder *dataReaderBuilder) buildIndexLookUpReaderForIndexJoin(ctx context if err != nil { return nil, err } - e.ranges, err = buildRangesForIndexJoin(e.ctx, lookUpContents, indexRanges, keyOff2IdxOff, cwc) + e.ranges, err = buildRangesForIndexJoin(e.Ctx(), lookUpContents, indexRanges, keyOff2IdxOff, cwc) if err != nil { return nil, err } @@ -4688,7 +4692,7 @@ func (builder *dataReaderBuilder) buildIndexLookUpReaderForIndexJoin(ctx context } if len(usedPartition) != 0 { if canPrune { - rangeMap, err := buildIndexRangeForEachPartition(e.ctx, usedPartition, contentPos, lookUpContents, indexRanges, keyOff2IdxOff, cwc) + rangeMap, err := buildIndexRangeForEachPartition(e.Ctx(), usedPartition, contentPos, lookUpContents, indexRanges, keyOff2IdxOff, cwc) if err != nil { return nil, err } @@ -4697,7 +4701,7 @@ func (builder *dataReaderBuilder) buildIndexLookUpReaderForIndexJoin(ctx context e.partitionRangeMap = rangeMap } else { e.prunedPartitions = usedPartition - e.ranges, err = buildRangesForIndexJoin(e.ctx, lookUpContents, indexRanges, keyOff2IdxOff, cwc) + e.ranges, err = buildRangesForIndexJoin(e.Ctx(), lookUpContents, indexRanges, keyOff2IdxOff, cwc) if err != nil { return nil, err } @@ -4708,15 +4712,15 @@ func (builder *dataReaderBuilder) buildIndexLookUpReaderForIndexJoin(ctx context } return e, err } - ret := &TableDualExec{baseExecutor: *e.base()} + ret := &TableDualExec{BaseExecutor: *e.Base()} err = ret.Open(ctx) return ret, err } func (builder *dataReaderBuilder) buildProjectionForIndexJoin(ctx context.Context, v *plannercore.PhysicalProjection, - lookUpContents []*indexJoinLookUpContent, indexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager, memTracker *memory.Tracker, interruptSignal *atomic.Value) (Executor, error) { + lookUpContents []*indexJoinLookUpContent, indexRanges []*ranger.Range, keyOff2IdxOff []int, cwc *plannercore.ColWithCmpFuncManager, memTracker *memory.Tracker, interruptSignal *atomic.Value) (exec.Executor, error) { var ( - childExec Executor + childExec exec.Executor err error ) switch op := v.Children()[0].(type) { @@ -4733,7 +4737,7 @@ func (builder *dataReaderBuilder) buildProjectionForIndexJoin(ctx context.Contex } e := &ProjectionExec{ - baseExecutor: newBaseExecutor(builder.ctx, v.Schema(), v.ID(), childExec), + BaseExecutor: exec.NewBaseExecutor(builder.ctx, v.Schema(), v.ID(), childExec), numWorkers: int64(builder.ctx.GetSessionVars().ProjectionConcurrency()), evaluatorSuit: expression.NewEvaluatorSuite(v.Exprs, v.AvoidColumnEvaluator), calculateNoDelay: v.CalculateNoDelay, @@ -4866,12 +4870,12 @@ func buildKvRangesForIndexJoin(ctx sessionctx.Context, tableID, indexID int64, l return tmpKeyRanges.FirstPartitionRange(), err } -func (b *executorBuilder) buildWindow(v *plannercore.PhysicalWindow) Executor { +func (b *executorBuilder) buildWindow(v *plannercore.PhysicalWindow) exec.Executor { childExec := b.build(v.Children()[0]) if b.err != nil { return nil } - base := newBaseExecutor(b.ctx, v.Schema(), v.ID(), childExec) + base := exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID(), childExec) groupByItems := make([]expression.Expression, 0, len(v.PartitionBy)) for _, item := range v.PartitionBy { groupByItems = append(groupByItems, item.Col) @@ -4898,7 +4902,7 @@ func (b *executorBuilder) buildWindow(v *plannercore.PhysicalWindow) Executor { if b.ctx.GetSessionVars().EnablePipelinedWindowExec { exec := &PipelinedWindowExec{ - baseExecutor: base, + BaseExecutor: base, groupChecker: newVecGroupChecker(b.ctx, groupByItems), numWindowFuncs: len(v.WindowFuncDescs), } @@ -4956,7 +4960,7 @@ func (b *executorBuilder) buildWindow(v *plannercore.PhysicalWindow) Executor { expectedCmpResult: cmpResult, } } - return &WindowExec{baseExecutor: base, + return &WindowExec{BaseExecutor: base, processor: processor, groupChecker: newVecGroupChecker(b.ctx, groupByItems), numWindowFuncs: len(v.WindowFuncDescs), @@ -4964,9 +4968,9 @@ func (b *executorBuilder) buildWindow(v *plannercore.PhysicalWindow) Executor { } func (b *executorBuilder) buildShuffle(v *plannercore.PhysicalShuffle) *ShuffleExec { - base := newBaseExecutor(b.ctx, v.Schema(), v.ID()) + base := exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()) shuffle := &ShuffleExec{ - baseExecutor: base, + BaseExecutor: base, concurrency: v.Concurrency, } @@ -4987,7 +4991,7 @@ func (b *executorBuilder) buildShuffle(v *plannercore.PhysicalShuffle) *ShuffleE shuffle.splitters = splitters // 2. initialize the data sources (build the data sources from physical plan to executors) - shuffle.dataSources = make([]Executor, len(v.DataSources)) + shuffle.dataSources = make([]exec.Executor, len(v.DataSources)) for i, dataSource := range v.DataSources { shuffle.dataSources[i] = b.build(dataSource) if b.err != nil { @@ -5012,7 +5016,7 @@ func (b *executorBuilder) buildShuffle(v *plannercore.PhysicalShuffle) *ShuffleE receivers := make([]*shuffleReceiver, len(v.DataSources)) for j, dataSource := range v.DataSources { receivers[j] = &shuffleReceiver{ - baseExecutor: newBaseExecutor(b.ctx, dataSource.Schema(), stubs[j].ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, dataSource.Schema(), stubs[j].ID()), } } @@ -5041,12 +5045,12 @@ func (b *executorBuilder) buildShuffleReceiverStub(v *plannercore.PhysicalShuffl return (*shuffleReceiver)(v.Receiver) } -func (b *executorBuilder) buildSQLBindExec(v *plannercore.SQLBindPlan) Executor { - base := newBaseExecutor(b.ctx, v.Schema(), v.ID()) - base.initCap = chunk.ZeroCapacity +func (b *executorBuilder) buildSQLBindExec(v *plannercore.SQLBindPlan) exec.Executor { + base := exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()) + base.SetInitCap(chunk.ZeroCapacity) e := &SQLBindExec{ - baseExecutor: base, + BaseExecutor: base, sqlBindOp: v.SQLBindOp, normdOrigSQL: v.NormdOrigSQL, bindSQL: v.BindSQL, @@ -5109,7 +5113,7 @@ func NewRowDecoder(ctx sessionctx.Context, schema *expression.Schema, tbl *model return rowcodec.NewChunkDecoder(reqCols, pkCols, defVal, ctx.GetSessionVars().Location()) } -func (b *executorBuilder) buildBatchPointGet(plan *plannercore.BatchPointGetPlan) Executor { +func (b *executorBuilder) buildBatchPointGet(plan *plannercore.BatchPointGetPlan) exec.Executor { var err error if err = b.validCanReadTemporaryOrCacheTable(plan.TblInfo); err != nil { b.err = err @@ -5125,7 +5129,7 @@ func (b *executorBuilder) buildBatchPointGet(plan *plannercore.BatchPointGetPlan decoder := NewRowDecoder(b.ctx, plan.Schema(), plan.TblInfo) e := &BatchPointGetExec{ - baseExecutor: newBaseExecutor(b.ctx, plan.Schema(), plan.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, plan.Schema(), plan.ID()), tblInfo: plan.TblInfo, idxInfo: plan.IndexInfo, rowDecoder: decoder, @@ -5146,11 +5150,11 @@ func (b *executorBuilder) buildBatchPointGet(plan *plannercore.BatchPointGetPlan b.err = err return nil } - if e.ctx.GetSessionVars().IsReplicaReadClosestAdaptive() { - e.snapshot.SetOption(kv.ReplicaReadAdjuster, newReplicaReadAdjuster(e.ctx, plan.GetAvgRowSize())) + if e.Ctx().GetSessionVars().IsReplicaReadClosestAdaptive() { + e.snapshot.SetOption(kv.ReplicaReadAdjuster, newReplicaReadAdjuster(e.Ctx(), plan.GetAvgRowSize())) } e.snapshot.SetOption(kv.ResourceGroupName, b.ctx.GetSessionVars().ResourceGroupName) - if e.runtimeStats != nil { + if e.RuntimeStats() != nil { snapshotStats := &txnsnapshot.SnapshotRuntimeStats{} e.stats = &runtimeStatsWithSnapshot{ SnapshotRuntimeStats: snapshotStats, @@ -5165,7 +5169,7 @@ func (b *executorBuilder) buildBatchPointGet(plan *plannercore.BatchPointGetPlan failpoint.Inject("assertBatchPointReplicaOption", func(val failpoint.Value) { assertScope := val.(string) - if e.ctx.GetSessionVars().GetReplicaRead().IsClosestRead() && assertScope != b.readReplicaScope { + if e.Ctx().GetSessionVars().GetReplicaRead().IsClosestRead() && assertScope != b.readReplicaScope { panic("batch point get replica option fail") } }) @@ -5212,7 +5216,7 @@ func (b *executorBuilder) buildBatchPointGet(plan *plannercore.BatchPointGetPlan if datumsContainNull(value) { continue } - handleBytes, err := EncodeUniqueIndexValuesForKey(e.ctx, e.tblInfo, plan.IndexInfo, value) + handleBytes, err := EncodeUniqueIndexValuesForKey(e.Ctx(), e.tblInfo, plan.IndexInfo, value) if err != nil { if kv.ErrNotExist.Equal(err) { continue @@ -5235,8 +5239,8 @@ func (b *executorBuilder) buildBatchPointGet(plan *plannercore.BatchPointGetPlan e.handles = handles capacity = len(e.handles) } - e.base().initCap = capacity - e.base().maxChunkSize = capacity + e.Base().SetInitCap(capacity) + e.Base().SetMaxChunkSize(capacity) e.buildVirtualColumnInfo() return e } @@ -5274,12 +5278,12 @@ func getFeedbackStatsTableID(ctx sessionctx.Context, t table.Table) int64 { return t.Meta().ID } -func (b *executorBuilder) buildAdminShowTelemetry(v *plannercore.AdminShowTelemetry) Executor { - return &AdminShowTelemetryExec{baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID())} +func (b *executorBuilder) buildAdminShowTelemetry(v *plannercore.AdminShowTelemetry) exec.Executor { + return &AdminShowTelemetryExec{BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID())} } -func (b *executorBuilder) buildAdminResetTelemetryID(v *plannercore.AdminResetTelemetryID) Executor { - return &AdminResetTelemetryIDExec{baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID())} +func (b *executorBuilder) buildAdminResetTelemetryID(v *plannercore.AdminResetTelemetryID) exec.Executor { + return &AdminResetTelemetryIDExec{BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID())} } func (builder *dataReaderBuilder) partitionPruning(tbl table.PartitionedTable, conds []expression.Expression, partitionNames []model.CIStr, @@ -5366,7 +5370,7 @@ func (b *executorBuilder) buildTableSample(v *plannercore.PhysicalTableSample) * return nil } e := &TableSampleExecutor{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), table: v.TableInfo, startTS: startTS, } @@ -5381,13 +5385,13 @@ func (b *executorBuilder) buildTableSample(v *plannercore.PhysicalTableSample) * } else if v.TableSampleInfo.AstNode.SampleMethod == ast.SampleMethodTypeTiDBRegion { e.sampler = newTableRegionSampler( b.ctx, v.TableInfo, startTS, v.TableSampleInfo.Partitions, v.Schema(), - v.TableSampleInfo.FullSchema, e.retFieldTypes, v.Desc) + v.TableSampleInfo.FullSchema, e.RetFieldTypes(), v.Desc) } return e } -func (b *executorBuilder) buildCTE(v *plannercore.PhysicalCTE) Executor { +func (b *executorBuilder) buildCTE(v *plannercore.PhysicalCTE) exec.Executor { if b.Ti != nil { b.Ti.UseNonRecursive = true } @@ -5425,7 +5429,7 @@ func (b *executorBuilder) buildCTE(v *plannercore.PhysicalCTE) Executor { } // Setup storages. - tps := seedExec.base().retFieldTypes + tps := seedExec.Base().RetFieldTypes() resTbl = cteutil.NewStorageRowContainer(tps, chkSize) if err := resTbl.OpenAndRef(); err != nil { b.err = err @@ -5439,7 +5443,7 @@ func (b *executorBuilder) buildCTE(v *plannercore.PhysicalCTE) Executor { storageMap[v.CTE.IDForStorage] = &CTEStorages{ResTbl: resTbl, IterInTbl: iterInTbl} // Build recursive part. - var recursiveExec Executor + var recursiveExec exec.Executor if v.RecurPlan != nil { recursiveExec = b.build(v.RecurPlan) if b.err != nil { @@ -5479,12 +5483,12 @@ func (b *executorBuilder) buildCTE(v *plannercore.PhysicalCTE) Executor { } return &CTEExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), producer: producer, } } -func (b *executorBuilder) buildCTETableReader(v *plannercore.PhysicalCTETable) Executor { +func (b *executorBuilder) buildCTETableReader(v *plannercore.PhysicalCTETable) exec.Executor { storageMap, ok := b.ctx.GetSessionVars().StmtCtx.CTEStorageMap.(map[int]*CTEStorages) if !ok { b.err = errors.New("type assertion for CTEStorageMap failed") @@ -5496,7 +5500,7 @@ func (b *executorBuilder) buildCTETableReader(v *plannercore.PhysicalCTETable) E return nil } return &CTETableReaderExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), iterInTbl: storages.IterInTbl, chkIdx: 0, } @@ -5567,7 +5571,7 @@ func (b *executorBuilder) getCacheTable(tblInfo *model.TableInfo, startTS uint64 return nil } -func (b *executorBuilder) buildCompactTable(v *plannercore.CompactTable) Executor { +func (b *executorBuilder) buildCompactTable(v *plannercore.CompactTable) exec.Executor { if v.ReplicaKind != ast.CompactReplicaKindTiFlash && v.ReplicaKind != ast.CompactReplicaKindAll { b.err = errors.Errorf("compact %v replica is not supported", strings.ToLower(string(v.ReplicaKind))) return nil @@ -5607,7 +5611,7 @@ func (b *executorBuilder) buildCompactTable(v *plannercore.CompactTable) Executo } return &CompactTableTiFlashExec{ - baseExecutor: newBaseExecutor(b.ctx, v.Schema(), v.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, v.Schema(), v.ID()), tableInfo: v.TableInfo, partitionIDs: partitionIDs, tikvStore: tikvStore, diff --git a/executor/calibrate_resource.go b/executor/calibrate_resource.go index f61c9ae5702b9..0f0691017dcae 100644 --- a/executor/calibrate_resource.go +++ b/executor/calibrate_resource.go @@ -24,6 +24,7 @@ import ( "github.com/docker/go-units" "github.com/pingcap/errors" "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/ast" @@ -113,7 +114,7 @@ const ( ) type calibrateResourceExec struct { - baseExecutor + exec.BaseExecutor optionList []*ast.DynamicCalibrateResourceOption workloadType ast.CalibrateResourceType done bool @@ -125,13 +126,13 @@ func (e *calibrateResourceExec) parseCalibrateDuration(ctx context.Context) (sta for _, op := range e.optionList { switch op.Tp { case ast.CalibrateStartTime: - ts, err = staleread.CalculateAsOfTsExpr(ctx, e.ctx, op.Ts) + ts, err = staleread.CalculateAsOfTsExpr(ctx, e.Ctx(), op.Ts) if err != nil { return } startTime = oracle.GetTimeFromTS(ts) case ast.CalibrateEndTime: - ts, err = staleread.CalculateAsOfTsExpr(ctx, e.ctx, op.Ts) + ts, err = staleread.CalculateAsOfTsExpr(ctx, e.Ctx(), op.Ts) if err != nil { return } @@ -175,7 +176,7 @@ func (e *calibrateResourceExec) Next(ctx context.Context, req *chunk.Chunk) erro } e.done = true - exec := e.ctx.(sqlexec.RestrictedSQLExecutor) + exec := e.Ctx().(sqlexec.RestrictedSQLExecutor) ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnOthers) if len(e.optionList) > 0 { return e.dynamicCalibrate(ctx, req, exec) @@ -193,8 +194,8 @@ func (e *calibrateResourceExec) dynamicCalibrate(ctx context.Context, req *chunk if err != nil { return err } - startTime := startTs.In(e.ctx.GetSessionVars().Location()).Format(time.DateTime) - endTime := endTs.In(e.ctx.GetSessionVars().Location()).Format(time.DateTime) + startTime := startTs.In(e.Ctx().GetSessionVars().Location()).Format(time.DateTime) + endTime := endTs.In(e.Ctx().GetSessionVars().Location()).Format(time.DateTime) totalKVCPUQuota, err := getTiKVTotalCPUQuota(ctx, exec) if err != nil { @@ -204,15 +205,15 @@ func (e *calibrateResourceExec) dynamicCalibrate(ctx context.Context, req *chunk if err != nil { return errNoCPUQuotaMetrics.FastGenByArgs(err.Error()) } - rus, err := getRUPerSec(ctx, e.ctx, exec, startTime, endTime) + rus, err := getRUPerSec(ctx, e.Ctx(), exec, startTime, endTime) if err != nil { return err } - tikvCPUs, err := getComponentCPUUsagePerSec(ctx, e.ctx, exec, "tikv", startTime, endTime) + tikvCPUs, err := getComponentCPUUsagePerSec(ctx, e.Ctx(), exec, "tikv", startTime, endTime) if err != nil { return err } - tidbCPUs, err := getComponentCPUUsagePerSec(ctx, e.ctx, exec, "tidb", startTime, endTime) + tidbCPUs, err := getComponentCPUUsagePerSec(ctx, e.Ctx(), exec, "tidb", startTime, endTime) if err != nil { return err } @@ -271,7 +272,7 @@ func (e *calibrateResourceExec) staticCalibrate(ctx context.Context, req *chunk. if !variable.EnableResourceControl.Load() { return infoschema.ErrResourceGroupSupportDisabled } - resourceGroupCtl := domain.GetDomain(e.ctx).ResourceGroupsController() + resourceGroupCtl := domain.GetDomain(e.Ctx()).ResourceGroupsController() // first fetch the ru settings config. if resourceGroupCtl == nil { return errors.New("resource group controller is not initialized") diff --git a/executor/change.go b/executor/change.go index df2a211d141fd..e61854b318286 100644 --- a/executor/change.go +++ b/executor/change.go @@ -20,6 +20,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/config" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/tidb-binlog/node" "github.com/pingcap/tidb/util/chunk" @@ -27,7 +28,7 @@ import ( // ChangeExec represents a change executor. type ChangeExec struct { - baseExecutor + exec.BaseExecutor *ast.ChangeStmt } diff --git a/executor/checksum.go b/executor/checksum.go index 845c1b85d4c66..bb6a64bf9cbee 100644 --- a/executor/checksum.go +++ b/executor/checksum.go @@ -19,6 +19,7 @@ import ( "strconv" "github.com/pingcap/tidb/distsql" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/sessionctx" @@ -30,11 +31,11 @@ import ( "go.uber.org/zap" ) -var _ Executor = &ChecksumTableExec{} +var _ exec.Executor = &ChecksumTableExec{} // ChecksumTableExec represents ChecksumTable executor. type ChecksumTableExec struct { - baseExecutor + exec.BaseExecutor tables map[int64]*checksumContext done bool @@ -42,11 +43,11 @@ type ChecksumTableExec struct { // Open implements the Executor Open interface. func (e *ChecksumTableExec) Open(ctx context.Context) error { - if err := e.baseExecutor.Open(ctx); err != nil { + if err := e.BaseExecutor.Open(ctx); err != nil { return err } - concurrency, err := getChecksumTableConcurrency(e.ctx) + concurrency, err := getChecksumTableConcurrency(e.Ctx()) if err != nil { return err } @@ -84,7 +85,7 @@ func (e *ChecksumTableExec) Open(ctx context.Context) error { } // Next implements the Executor Next interface. -func (e *ChecksumTableExec) Next(ctx context.Context, req *chunk.Chunk) error { +func (e *ChecksumTableExec) Next(_ context.Context, req *chunk.Chunk) error { req.Reset() if e.done { return nil @@ -103,7 +104,7 @@ func (e *ChecksumTableExec) Next(ctx context.Context, req *chunk.Chunk) error { func (e *ChecksumTableExec) buildTasks() ([]*checksumTask, error) { var tasks []*checksumTask for id, t := range e.tables { - reqs, err := t.BuildRequests(e.ctx) + reqs, err := t.BuildRequests(e.Ctx()) if err != nil { return nil, err } @@ -128,8 +129,8 @@ func (e *ChecksumTableExec) checksumWorker(taskCh <-chan *checksumTask, resultCh } func (e *ChecksumTableExec) handleChecksumRequest(req *kv.Request) (resp *tipb.ChecksumResponse, err error) { - ctx := distsql.WithSQLKvExecCounterInterceptor(context.TODO(), e.ctx.GetSessionVars().StmtCtx) - res, err := distsql.Checksum(ctx, e.ctx.GetClient(), req, e.ctx.GetSessionVars().KVVars) + ctx := distsql.WithSQLKvExecCounterInterceptor(context.TODO(), e.Ctx().GetSessionVars().StmtCtx) + res, err := distsql.Checksum(ctx, e.Ctx().GetClient(), req, e.Ctx().GetSessionVars().KVVars) if err != nil { return nil, err } diff --git a/executor/compact_table.go b/executor/compact_table.go index c5960619c54b0..2d88dd0293e23 100644 --- a/executor/compact_table.go +++ b/executor/compact_table.go @@ -23,6 +23,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/kvrpcpb" "github.com/pingcap/log" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" @@ -38,7 +39,7 @@ import ( "google.golang.org/grpc/status" ) -var _ Executor = &CompactTableTiFlashExec{} +var _ exec.Executor = &CompactTableTiFlashExec{} const ( compactRequestTimeout = time.Minute * 60 // A single compact request may take at most 1 hour. @@ -64,7 +65,7 @@ func getTiFlashStores(ctx sessionctx.Context) ([]infoschema.ServerInfo, error) { // CompactTableTiFlashExec represents an executor for "ALTER TABLE [NAME] COMPACT TIFLASH REPLICA" statement. type CompactTableTiFlashExec struct { - baseExecutor + exec.BaseExecutor tableInfo *model.TableInfo partitionIDs []int64 @@ -84,7 +85,7 @@ func (e *CompactTableTiFlashExec) Next(ctx context.Context, chk *chunk.Chunk) er } func (e *CompactTableTiFlashExec) doCompact(execCtx context.Context) error { - vars := e.ctx.GetSessionVars() + vars := e.Ctx().GetSessionVars() if e.tableInfo.TiFlashReplica == nil || e.tableInfo.TiFlashReplica.Count == 0 { vars.StmtCtx.AppendWarning(errors.Errorf("compact skipped: no tiflash replica in the table")) return nil @@ -95,7 +96,7 @@ func (e *CompactTableTiFlashExec) doCompact(execCtx context.Context) error { // For each partition (in series): // Send a series of compact request for this partition. <--- Handled by "compactOnePhysicalTable" - tiFlashStores, err := getTiFlashStores(e.ctx) + tiFlashStores, err := getTiFlashStores(e.Ctx()) if err != nil { return err } @@ -257,7 +258,7 @@ func (task *storeCompactTask) compactOnePhysicalTable(physicalTableID int64) (bo // Even after backoff, the request is still failed.., or the request is cancelled or timed out // For example, the store is down. Let's simply don't compact other partitions. warn := errors.Errorf("compact on store %s failed: %v", task.targetStore.Address, err) - task.parentExec.ctx.GetSessionVars().StmtCtx.AppendWarning(warn) + task.parentExec.Ctx().GetSessionVars().StmtCtx.AppendWarning(warn) task.logFailure( zap.Int64("physical-table-id", physicalTableID), zap.Error(err)) @@ -267,7 +268,7 @@ func (task *storeCompactTask) compactOnePhysicalTable(physicalTableID int64) (bo switch resp.GetError().GetError().(type) { case *kvrpcpb.CompactError_ErrCompactInProgress: warn := errors.Errorf("compact on store %s failed: table is compacting in progress", task.targetStore.Address) - task.parentExec.ctx.GetSessionVars().StmtCtx.AppendWarning(warn) + task.parentExec.Ctx().GetSessionVars().StmtCtx.AppendWarning(warn) task.logFailure( zap.Int64("physical-table-id", physicalTableID), zap.Error(warn)) @@ -278,7 +279,7 @@ func (task *storeCompactTask) compactOnePhysicalTable(physicalTableID int64) (bo case *kvrpcpb.CompactError_ErrTooManyPendingTasks: // The store is already very busy, don't retry and don't compact other partitions. warn := errors.Errorf("compact on store %s failed: store is too busy", task.targetStore.Address) - task.parentExec.ctx.GetSessionVars().StmtCtx.AppendWarning(warn) + task.parentExec.Ctx().GetSessionVars().StmtCtx.AppendWarning(warn) task.logFailure( zap.Int64("physical-table-id", physicalTableID), zap.Error(warn)) @@ -296,7 +297,7 @@ func (task *storeCompactTask) compactOnePhysicalTable(physicalTableID int64) (bo default: // Others are unexpected errors, don't retry and don't compact other partitions. warn := errors.Errorf("compact on store %s failed: internal error (check logs for details)", task.targetStore.Address) - task.parentExec.ctx.GetSessionVars().StmtCtx.AppendWarning(warn) + task.parentExec.Ctx().GetSessionVars().StmtCtx.AppendWarning(warn) task.logFailure( zap.Int64("physical-table-id", physicalTableID), zap.Any("response-error", resp.GetError().GetError())) @@ -314,7 +315,7 @@ func (task *storeCompactTask) compactOnePhysicalTable(physicalTableID int64) (bo // The TiFlash server returned an invalid compacted end key. // This is unexpected... warn := errors.Errorf("compact on store %s failed: internal error (check logs for details)", task.targetStore.Address) - task.parentExec.ctx.GetSessionVars().StmtCtx.AppendWarning(warn) + task.parentExec.Ctx().GetSessionVars().StmtCtx.AppendWarning(warn) task.logFailure( zap.Int64("physical-table-id", physicalTableID), zap.String("compacted-start-key", hex.EncodeToString(resp.GetCompactedStartKey())), diff --git a/executor/coprocessor.go b/executor/coprocessor.go index 7a3389026c561..c700dc01e0795 100644 --- a/executor/coprocessor.go +++ b/executor/coprocessor.go @@ -21,6 +21,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/kvproto/pkg/coprocessor" "github.com/pingcap/kvproto/pkg/tikvpb" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/auth" @@ -60,7 +61,7 @@ func (h *CoprocessorDAGHandler) HandleRequest(ctx context.Context, req *coproces } chk := tryNewCacheChunk(e) - tps := e.base().retFieldTypes + tps := e.Base().RetFieldTypes() var totalChunks, partChunks []tipb.Chunk memTracker := h.sctx.GetSessionVars().StmtCtx.MemTracker for { @@ -100,7 +101,7 @@ func (h *CoprocessorDAGHandler) HandleStreamRequest(ctx context.Context, req *co } chk := tryNewCacheChunk(e) - tps := e.base().retFieldTypes + tps := e.Base().RetFieldTypes() for { chk.Reset() if err = Next(ctx, e, chk); err != nil { @@ -130,7 +131,7 @@ func (h *CoprocessorDAGHandler) buildResponseAndSendToStream(chk *chunk.Chunk, t return nil } -func (h *CoprocessorDAGHandler) buildDAGExecutor(req *coprocessor.Request) (Executor, error) { +func (h *CoprocessorDAGHandler) buildDAGExecutor(req *coprocessor.Request) (exec.Executor, error) { if req.GetTp() != kv.ReqTypeDAG { return nil, errors.Errorf("unsupported request type %d", req.GetTp()) } diff --git a/executor/cte.go b/executor/cte.go index 233ee4678204b..5164ffb52b8ab 100644 --- a/executor/cte.go +++ b/executor/cte.go @@ -20,6 +20,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/variable" @@ -31,7 +32,7 @@ import ( "github.com/pingcap/tidb/util/memory" ) -var _ Executor = &CTEExec{} +var _ exec.Executor = &CTEExec{} // CTEExec implements CTE. // Following diagram describes how CTEExec works. @@ -63,7 +64,7 @@ var _ Executor = &CTEExec{} +----------+ */ type CTEExec struct { - baseExecutor + exec.BaseExecutor chkIdx int producer *cteProducer @@ -76,7 +77,7 @@ type CTEExec struct { // Open implements the Executor interface. func (e *CTEExec) Open(ctx context.Context) (err error) { e.reset() - if err := e.baseExecutor.Open(ctx); err != nil { + if err := e.BaseExecutor.Open(ctx); err != nil { return err } @@ -106,7 +107,7 @@ func (e *CTEExec) Next(ctx context.Context, req *chunk.Chunk) (err error) { return err } } - return e.producer.getChunk(ctx, e, req) + return e.producer.getChunk(e, req) } // Close implements the Executor interface. @@ -123,7 +124,7 @@ func (e *CTEExec) Close() (err error) { if err != nil { return err } - return e.baseExecutor.Close() + return e.BaseExecutor.Close() } func (e *CTEExec) reset() { @@ -139,8 +140,8 @@ type cteProducer struct { ctx sessionctx.Context - seedExec Executor - recursiveExec Executor + seedExec exec.Executor + recursiveExec exec.Executor // `resTbl` and `iterInTbl` are shared by all CTEExec which reference to same the CTE. // `iterInTbl` is also shared by CTETableReaderExec. @@ -180,9 +181,9 @@ func (p *cteProducer) openProducer(ctx context.Context, cteExec *CTEExec) (err e if p.memTracker != nil { p.memTracker.Reset() } else { - p.memTracker = memory.NewTracker(cteExec.id, -1) + p.memTracker = memory.NewTracker(cteExec.ID(), -1) } - p.diskTracker = disk.NewTracker(cteExec.id, -1) + p.diskTracker = disk.NewTracker(cteExec.ID(), -1) p.memTracker.AttachTo(p.ctx.GetSessionVars().StmtCtx.MemTracker) p.diskTracker.AttachTo(p.ctx.GetSessionVars().StmtCtx.DiskTracker) @@ -193,8 +194,8 @@ func (p *cteProducer) openProducer(ctx context.Context, cteExec *CTEExec) (err e // For non-recursive CTE, the result will be put into resTbl directly. // So no need to build iterOutTbl. // Construct iterOutTbl in Open() instead of buildCTE(), because its destruct is in Close(). - recursiveTypes := p.recursiveExec.base().retFieldTypes - p.iterOutTbl = cteutil.NewStorageRowContainer(recursiveTypes, cteExec.maxChunkSize) + recursiveTypes := p.recursiveExec.Base().RetFieldTypes() + p.iterOutTbl = cteutil.NewStorageRowContainer(recursiveTypes, cteExec.MaxChunkSize()) if err = p.iterOutTbl.OpenAndRef(); err != nil { return err } @@ -203,7 +204,7 @@ func (p *cteProducer) openProducer(ctx context.Context, cteExec *CTEExec) (err e if p.isDistinct { p.hashTbl = newConcurrentMapHashTable() p.hCtx = &hashContext{ - allTypes: cteExec.base().retFieldTypes, + allTypes: cteExec.Base().RetFieldTypes(), } // We use all columns to compute hash. p.hCtx.keyColIdx = make([]int, len(p.hCtx.allTypes)) @@ -235,7 +236,7 @@ func (p *cteProducer) closeProducer() (err error) { return nil } -func (p *cteProducer) getChunk(ctx context.Context, cteExec *CTEExec, req *chunk.Chunk) (err error) { +func (p *cteProducer) getChunk(cteExec *CTEExec, req *chunk.Chunk) (err error) { req.Reset() if p.hasLimit { return p.nextChunkLimit(cteExec, req) @@ -302,11 +303,11 @@ func (p *cteProducer) produce(ctx context.Context, cteExec *CTEExec) (err error) if p.resTbl.Error() != nil { return p.resTbl.Error() } - resAction := setupCTEStorageTracker(p.resTbl, cteExec.ctx, p.memTracker, p.diskTracker) - iterInAction := setupCTEStorageTracker(p.iterInTbl, cteExec.ctx, p.memTracker, p.diskTracker) + resAction := setupCTEStorageTracker(p.resTbl, cteExec.Ctx(), p.memTracker, p.diskTracker) + iterInAction := setupCTEStorageTracker(p.iterInTbl, cteExec.Ctx(), p.memTracker, p.diskTracker) var iterOutAction *chunk.SpillDiskAction if p.iterOutTbl != nil { - iterOutAction = setupCTEStorageTracker(p.iterOutTbl, cteExec.ctx, p.memTracker, p.diskTracker) + iterOutAction = setupCTEStorageTracker(p.iterOutTbl, cteExec.Ctx(), p.memTracker, p.diskTracker) } failpoint.Inject("testCTEStorageSpill", func(val failpoint.Value) { diff --git a/executor/cte_table_reader.go b/executor/cte_table_reader.go index 261a99f21f9c5..2eb7aa91eb7c7 100644 --- a/executor/cte_table_reader.go +++ b/executor/cte_table_reader.go @@ -18,13 +18,14 @@ import ( "context" "github.com/pingcap/errors" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/cteutil" ) // CTETableReaderExec scans data in iterInTbl, which is filled by corresponding CTEExec. type CTETableReaderExec struct { - baseExecutor + exec.BaseExecutor iterInTbl cteutil.Storage chkIdx int @@ -34,7 +35,7 @@ type CTETableReaderExec struct { // Open implements the Executor interface. func (e *CTETableReaderExec) Open(ctx context.Context) error { e.reset() - return e.baseExecutor.Open(ctx) + return e.BaseExecutor.Open(ctx) } // Next implements the Executor interface. @@ -67,7 +68,7 @@ func (e *CTETableReaderExec) Next(ctx context.Context, req *chunk.Chunk) (err er // Close implements the Executor interface. func (e *CTETableReaderExec) Close() (err error) { e.reset() - return e.baseExecutor.Close() + return e.BaseExecutor.Close() } func (e *CTETableReaderExec) reset() { diff --git a/executor/ddl.go b/executor/ddl.go index f8659065bd5bd..0a23696e188a5 100644 --- a/executor/ddl.go +++ b/executor/ddl.go @@ -23,6 +23,7 @@ import ( "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/ddl" "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/ast" @@ -46,7 +47,7 @@ import ( // DDLExec represents a DDL executor. // It grabs a DDL instance from Domain, calling the DDL methods to do the work. type DDLExec struct { - baseExecutor + exec.BaseExecutor stmt ast.StmtNode is infoschema.InfoSchema @@ -57,9 +58,9 @@ type DDLExec struct { // toErr converts the error to the ErrInfoSchemaChanged when the schema is outdated. func (e *DDLExec) toErr(err error) error { // The err may be cause by schema changed, here we distinguish the ErrInfoSchemaChanged error from other errors. - dom := domain.GetDomain(e.ctx) + dom := domain.GetDomain(e.Ctx()) checker := domain.NewSchemaChecker(dom, e.is.SchemaMetaVersion(), nil, true) - txn, err1 := e.ctx.Txn(true) + txn, err1 := e.Ctx().Txn(true) if err1 != nil { logutil.BgLogger().Error("active txn failed", zap.Error(err1)) return err @@ -72,7 +73,7 @@ func (e *DDLExec) toErr(err error) error { } func (e *DDLExec) getLocalTemporaryTable(schema model.CIStr, table model.CIStr) (table.Table, bool) { - tbl, err := e.ctx.GetInfoSchema().(infoschema.InfoSchema).TableByName(schema, table) + tbl, err := e.Ctx().GetInfoSchema().(infoschema.InfoSchema).TableByName(schema, table) if infoschema.ErrTableNotExists.Equal(err) { return nil, false } @@ -85,7 +86,7 @@ func (e *DDLExec) getLocalTemporaryTable(schema model.CIStr, table model.CIStr) } // Next implements the Executor Next interface. -func (e *DDLExec) Next(ctx context.Context, req *chunk.Chunk) (err error) { +func (e *DDLExec) Next(ctx context.Context, _ *chunk.Chunk) (err error) { if e.done { return nil } @@ -120,7 +121,7 @@ func (e *DDLExec) Next(ctx context.Context, req *chunk.Chunk) (err error) { } err = infoschema.ErrTableDropExists.GenWithStackByArgs(strings.Join(nonExistsTables, ",")) if s.IfExists { - e.ctx.GetSessionVars().StmtCtx.AppendNote(err) + e.Ctx().GetSessionVars().StmtCtx.AppendNote(err) return nil } return err @@ -132,13 +133,13 @@ func (e *DDLExec) Next(ctx context.Context, req *chunk.Chunk) (err error) { } } - if err = sessiontxn.NewTxnInStmt(ctx, e.ctx); err != nil { + if err = sessiontxn.NewTxnInStmt(ctx, e.Ctx()); err != nil { return err } defer func() { - e.ctx.GetSessionVars().StmtCtx.IsDDLJobInQueue = false - e.ctx.GetSessionVars().StmtCtx.DDLJobID = 0 + e.Ctx().GetSessionVars().StmtCtx.IsDDLJobInQueue = false + e.Ctx().GetSessionVars().StmtCtx.DDLJobID = 0 }() switch x := e.stmt.(type) { @@ -215,20 +216,20 @@ func (e *DDLExec) Next(ctx context.Context, req *chunk.Chunk) (err error) { if err != nil { // If the owner return ErrTableNotExists error when running this DDL, it may be caused by schema changed, // otherwise, ErrTableNotExists can be returned before putting this DDL job to the job queue. - if (e.ctx.GetSessionVars().StmtCtx.IsDDLJobInQueue && infoschema.ErrTableNotExists.Equal(err)) || - !e.ctx.GetSessionVars().StmtCtx.IsDDLJobInQueue { + if (e.Ctx().GetSessionVars().StmtCtx.IsDDLJobInQueue && infoschema.ErrTableNotExists.Equal(err)) || + !e.Ctx().GetSessionVars().StmtCtx.IsDDLJobInQueue { return e.toErr(err) } return err } - dom := domain.GetDomain(e.ctx) + dom := domain.GetDomain(e.Ctx()) // Update InfoSchema in TxnCtx, so it will pass schema check. is := dom.InfoSchema() - txnCtx := e.ctx.GetSessionVars().TxnCtx + txnCtx := e.Ctx().GetSessionVars().TxnCtx txnCtx.InfoSchema = is // DDL will force commit old transaction, after DDL, in transaction status should be false. - e.ctx.GetSessionVars().SetInTxn(false) + e.Ctx().GetSessionVars().SetInTxn(false) return nil } @@ -237,7 +238,7 @@ func (e *DDLExec) executeTruncateTable(s *ast.TruncateTableStmt) error { if _, exist := e.getLocalTemporaryTable(s.Table.Schema, s.Table.Name); exist { return e.tempTableDDL.TruncateLocalTemporaryTable(s.Table.Schema, s.Table.Name) } - err := domain.GetDomain(e.ctx).DDL().TruncateTable(e.ctx, ident) + err := domain.GetDomain(e.Ctx()).DDL().TruncateTable(e.Ctx(), ident) return err } @@ -247,26 +248,26 @@ func (e *DDLExec) executeRenameTable(s *ast.RenameTableStmt) error { return dbterror.ErrUnsupportedLocalTempTableDDL.GenWithStackByArgs("RENAME TABLE") } } - return domain.GetDomain(e.ctx).DDL().RenameTable(e.ctx, s) + return domain.GetDomain(e.Ctx()).DDL().RenameTable(e.Ctx(), s) } func (e *DDLExec) executeCreateDatabase(s *ast.CreateDatabaseStmt) error { - err := domain.GetDomain(e.ctx).DDL().CreateSchema(e.ctx, s) + err := domain.GetDomain(e.Ctx()).DDL().CreateSchema(e.Ctx(), s) return err } func (e *DDLExec) executeAlterDatabase(s *ast.AlterDatabaseStmt) error { - err := domain.GetDomain(e.ctx).DDL().AlterSchema(e.ctx, s) + err := domain.GetDomain(e.Ctx()).DDL().AlterSchema(e.Ctx(), s) return err } func (e *DDLExec) executeCreateTable(s *ast.CreateTableStmt) error { - err := domain.GetDomain(e.ctx).DDL().CreateTable(e.ctx, s) + err := domain.GetDomain(e.Ctx()).DDL().CreateTable(e.Ctx(), s) return err } func (e *DDLExec) createSessionTemporaryTable(s *ast.CreateTableStmt) error { - is := e.ctx.GetInfoSchema().(infoschema.InfoSchema) + is := e.Ctx().GetInfoSchema().(infoschema.InfoSchema) dbInfo, ok := is.SchemaByName(s.Table.Schema) if !ok { return infoschema.ErrDatabaseNotExists.GenWithStackByArgs(s.Table.Schema.O) @@ -276,13 +277,13 @@ func (e *DDLExec) createSessionTemporaryTable(s *ast.CreateTableStmt) error { if exists { err := infoschema.ErrTableExists.GenWithStackByArgs(ast.Ident{Schema: s.Table.Schema, Name: s.Table.Name}) if s.IfNotExists { - e.ctx.GetSessionVars().StmtCtx.AppendNote(err) + e.Ctx().GetSessionVars().StmtCtx.AppendNote(err) return nil } return err } - tbInfo, err := ddl.BuildSessionTemporaryTableInfo(e.ctx, is, s, dbInfo.Charset, dbInfo.Collate, dbInfo.PlacementPolicyRef) + tbInfo, err := ddl.BuildSessionTemporaryTableInfo(e.Ctx(), is, s, dbInfo.Charset, dbInfo.Collate, dbInfo.PlacementPolicyRef) if err != nil { return err } @@ -291,13 +292,13 @@ func (e *DDLExec) createSessionTemporaryTable(s *ast.CreateTableStmt) error { return err } - sessiontxn.GetTxnManager(e.ctx).OnLocalTemporaryTableCreated() + sessiontxn.GetTxnManager(e.Ctx()).OnLocalTemporaryTableCreated() return nil } func (e *DDLExec) executeCreateView(ctx context.Context, s *ast.CreateViewStmt) error { ret := &core.PreprocessorReturn{} - err := core.Preprocess(ctx, e.ctx, s.Select, core.WithPreprocessorReturn(ret)) + err := core.Preprocess(ctx, e.Ctx(), s.Select, core.WithPreprocessorReturn(ret)) if err != nil { return errors.Trace(err) } @@ -305,7 +306,7 @@ func (e *DDLExec) executeCreateView(ctx context.Context, s *ast.CreateViewStmt) return exeerrors.ErrViewInvalid.GenWithStackByArgs(s.ViewName.Schema.L, s.ViewName.Name.L) } - return domain.GetDomain(e.ctx).DDL().CreateView(e.ctx, s) + return domain.GetDomain(e.Ctx()).DDL().CreateView(e.Ctx(), s) } func (e *DDLExec) executeCreateIndex(s *ast.CreateIndexStmt) error { @@ -313,7 +314,7 @@ func (e *DDLExec) executeCreateIndex(s *ast.CreateIndexStmt) error { return dbterror.ErrUnsupportedLocalTempTableDDL.GenWithStackByArgs("CREATE INDEX") } - return domain.GetDomain(e.ctx).DDL().CreateIndex(e.ctx, s) + return domain.GetDomain(e.Ctx()).DDL().CreateIndex(e.Ctx(), s) } func (e *DDLExec) executeDropDatabase(s *ast.DropDatabaseStmt) error { @@ -325,8 +326,8 @@ func (e *DDLExec) executeDropDatabase(s *ast.DropDatabaseStmt) error { return errors.New("Drop 'mysql' database is forbidden") } - err := domain.GetDomain(e.ctx).DDL().DropSchema(e.ctx, s) - sessionVars := e.ctx.GetSessionVars() + err := domain.GetDomain(e.Ctx()).DDL().DropSchema(e.Ctx(), s) + sessionVars := e.Ctx().GetSessionVars() if err == nil && strings.ToLower(sessionVars.CurrentDB) == dbName.L { sessionVars.CurrentDB = "" err = sessionVars.SetSystemVar(variable.CharsetDatabase, mysql.DefaultCharset) @@ -342,15 +343,15 @@ func (e *DDLExec) executeDropDatabase(s *ast.DropDatabaseStmt) error { } func (e *DDLExec) executeDropTable(s *ast.DropTableStmt) error { - return domain.GetDomain(e.ctx).DDL().DropTable(e.ctx, s) + return domain.GetDomain(e.Ctx()).DDL().DropTable(e.Ctx(), s) } func (e *DDLExec) executeDropView(s *ast.DropTableStmt) error { - return domain.GetDomain(e.ctx).DDL().DropView(e.ctx, s) + return domain.GetDomain(e.Ctx()).DDL().DropView(e.Ctx(), s) } func (e *DDLExec) executeDropSequence(s *ast.DropSequenceStmt) error { - return domain.GetDomain(e.ctx).DDL().DropSequence(e.ctx, s) + return domain.GetDomain(e.Ctx()).DDL().DropSequence(e.Ctx(), s) } func (e *DDLExec) dropLocalTemporaryTables(localTempTables []*ast.TableName) error { @@ -373,7 +374,7 @@ func (e *DDLExec) executeDropIndex(s *ast.DropIndexStmt) error { return dbterror.ErrUnsupportedLocalTempTableDDL.GenWithStackByArgs("DROP INDEX") } - return domain.GetDomain(e.ctx).DDL().DropIndex(e.ctx, s) + return domain.GetDomain(e.Ctx()).DDL().DropIndex(e.Ctx(), s) } func (e *DDLExec) executeAlterTable(ctx context.Context, s *ast.AlterTableStmt) error { @@ -381,14 +382,14 @@ func (e *DDLExec) executeAlterTable(ctx context.Context, s *ast.AlterTableStmt) return dbterror.ErrUnsupportedLocalTempTableDDL.GenWithStackByArgs("ALTER TABLE") } - return domain.GetDomain(e.ctx).DDL().AlterTable(ctx, e.ctx, s) + return domain.GetDomain(e.Ctx()).DDL().AlterTable(ctx, e.Ctx(), s) } // executeRecoverTable represents a recover table executor. // It is built from "recover table" statement, // is used to recover the table that deleted by mistake. func (e *DDLExec) executeRecoverTable(s *ast.RecoverTableStmt) error { - dom := domain.GetDomain(e.ctx) + dom := domain.GetDomain(e.Ctx()) var job *model.Job var err error var tblInfo *model.TableInfo @@ -406,7 +407,7 @@ func (e *DDLExec) executeRecoverTable(s *ast.RecoverTableStmt) error { return infoschema.ErrTableExists.GenWithStack("Table '%-.192s' already been recover to '%-.192s', can't be recover repeatedly", s.Table.Name.O, tbl.Meta().Name.O) } - m, err := domain.GetDomain(e.ctx).GetSnapshotMeta(job.StartTS) + m, err := domain.GetDomain(e.Ctx()).GetSnapshotMeta(job.StartTS) if err != nil { return err } @@ -425,17 +426,17 @@ func (e *DDLExec) executeRecoverTable(s *ast.RecoverTableStmt) error { OldTableName: tblInfo.Name.L, } // Call DDL RecoverTable. - err = domain.GetDomain(e.ctx).DDL().RecoverTable(e.ctx, recoverInfo) + err = domain.GetDomain(e.Ctx()).DDL().RecoverTable(e.Ctx(), recoverInfo) return err } func (e *DDLExec) getRecoverTableByJobID(s *ast.RecoverTableStmt, dom *domain.Domain) (*model.Job, *model.TableInfo, error) { - se, err := e.getSysSession() + se, err := e.GetSysSession() if err != nil { return nil, nil, err } ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL) - defer e.releaseSysSession(ctx, se) + defer e.ReleaseSysSession(ctx, se) job, err := ddl.GetHistoryJobByID(se, s.JobID) if err != nil { return nil, nil, err @@ -448,7 +449,7 @@ func (e *DDLExec) getRecoverTableByJobID(s *ast.RecoverTableStmt, dom *domain.Do } // Check GC safe point for getting snapshot infoSchema. - err = gcutil.ValidateSnapshot(e.ctx, job.StartTS) + err = gcutil.ValidateSnapshot(e.Ctx(), job.StartTS) if err != nil { return nil, nil, err } @@ -484,24 +485,24 @@ func GetDropOrTruncateTableInfoFromJobs(jobs []*model.Job, gcSafePoint uint64, d } func (e *DDLExec) getRecoverTableByTableName(tableName *ast.TableName) (*model.Job, *model.TableInfo, error) { - txn, err := e.ctx.Txn(true) + txn, err := e.Ctx().Txn(true) if err != nil { return nil, nil, err } schemaName := tableName.Schema.L if schemaName == "" { - schemaName = strings.ToLower(e.ctx.GetSessionVars().CurrentDB) + schemaName = strings.ToLower(e.Ctx().GetSessionVars().CurrentDB) } if schemaName == "" { return nil, nil, errors.Trace(core.ErrNoDB) } - gcSafePoint, err := gcutil.GetGCSafePoint(e.ctx) + gcSafePoint, err := gcutil.GetGCSafePoint(e.Ctx()) if err != nil { return nil, nil, err } var jobInfo *model.Job var tableInfo *model.TableInfo - dom := domain.GetDomain(e.ctx) + dom := domain.GetDomain(e.Ctx()) handleJobAndTableInfo := func(job *model.Job, tblInfo *model.TableInfo) (bool, error) { if tblInfo.Name.L != tableName.Name.L { return false, nil @@ -538,12 +539,12 @@ func (e *DDLExec) getRecoverTableByTableName(tableName *ast.TableName) (*model.J } func (e *DDLExec) executeFlashBackCluster(s *ast.FlashBackToTimestampStmt) error { - flashbackTS, err := staleread.CalculateAsOfTsExpr(context.Background(), e.ctx, s.FlashbackTS) + flashbackTS, err := staleread.CalculateAsOfTsExpr(context.Background(), e.Ctx(), s.FlashbackTS) if err != nil { return err } - return domain.GetDomain(e.ctx).DDL().FlashbackCluster(e.ctx, flashbackTS) + return domain.GetDomain(e.Ctx()).DDL().FlashbackCluster(e.Ctx(), flashbackTS) } func (e *DDLExec) executeFlashbackTable(s *ast.FlashBackTableStmt) error { @@ -555,13 +556,13 @@ func (e *DDLExec) executeFlashbackTable(s *ast.FlashBackTableStmt) error { tblInfo.Name = model.NewCIStr(s.NewName) } // Check the table ID was not exists. - is := domain.GetDomain(e.ctx).InfoSchema() + is := domain.GetDomain(e.Ctx()).InfoSchema() tbl, ok := is.TableByID(tblInfo.ID) if ok { return infoschema.ErrTableExists.GenWithStack("Table '%-.192s' already been flashback to '%-.192s', can't be flashback repeatedly", s.Table.Name.O, tbl.Meta().Name.O) } - m, err := domain.GetDomain(e.ctx).GetSnapshotMeta(job.StartTS) + m, err := domain.GetDomain(e.Ctx()).GetSnapshotMeta(job.StartTS) if err != nil { return err } @@ -580,7 +581,7 @@ func (e *DDLExec) executeFlashbackTable(s *ast.FlashBackTableStmt) error { OldTableName: s.Table.Name.L, } // Call DDL RecoverTable. - err = domain.GetDomain(e.ctx).DDL().RecoverTable(e.ctx, recoverInfo) + err = domain.GetDomain(e.Ctx()).DDL().RecoverTable(e.Ctx(), recoverInfo) return err } @@ -593,7 +594,7 @@ func (e *DDLExec) executeFlashbackDatabase(s *ast.FlashBackDatabaseStmt) error { dbName = model.NewCIStr(s.NewName) } // Check the Schema Name was not exists. - is := domain.GetDomain(e.ctx).InfoSchema() + is := domain.GetDomain(e.Ctx()).InfoSchema() if is.SchemaExists(dbName) { return infoschema.ErrDatabaseExists.GenWithStackByArgs(dbName) } @@ -607,20 +608,20 @@ func (e *DDLExec) executeFlashbackDatabase(s *ast.FlashBackDatabaseStmt) error { } recoverSchemaInfo.Name = dbName // Call DDL RecoverSchema. - err = domain.GetDomain(e.ctx).DDL().RecoverSchema(e.ctx, recoverSchemaInfo) + err = domain.GetDomain(e.Ctx()).DDL().RecoverSchema(e.Ctx(), recoverSchemaInfo) return err } func (e *DDLExec) getRecoverDBByName(schemaName model.CIStr) (recoverSchemaInfo *ddl.RecoverSchemaInfo, err error) { - txn, err := e.ctx.Txn(true) + txn, err := e.Ctx().Txn(true) if err != nil { return nil, err } - gcSafePoint, err := gcutil.GetGCSafePoint(e.ctx) + gcSafePoint, err := gcutil.GetGCSafePoint(e.Ctx()) if err != nil { return nil, err } - dom := domain.GetDomain(e.ctx) + dom := domain.GetDomain(e.Ctx()) fn := func(jobs []*model.Job) (bool, error) { for _, job := range jobs { // Check GC safe point for getting snapshot infoSchema. @@ -688,7 +689,7 @@ func (e *DDLExec) getRecoverDBByName(schemaName model.CIStr) (recoverSchemaInfo func (e *DDLExec) executeLockTables(s *ast.LockTablesStmt) error { if !config.TableLockEnabled() { - e.ctx.GetSessionVars().StmtCtx.AppendWarning(exeerrors.ErrFuncNotEnabled.GenWithStackByArgs("LOCK TABLES", "enable-table-lock")) + e.Ctx().GetSessionVars().StmtCtx.AppendWarning(exeerrors.ErrFuncNotEnabled.GenWithStackByArgs("LOCK TABLES", "enable-table-lock")) return nil } @@ -698,16 +699,16 @@ func (e *DDLExec) executeLockTables(s *ast.LockTablesStmt) error { } } - return domain.GetDomain(e.ctx).DDL().LockTables(e.ctx, s) + return domain.GetDomain(e.Ctx()).DDL().LockTables(e.Ctx(), s) } func (e *DDLExec) executeUnlockTables(_ *ast.UnlockTablesStmt) error { if !config.TableLockEnabled() { - e.ctx.GetSessionVars().StmtCtx.AppendWarning(exeerrors.ErrFuncNotEnabled.GenWithStackByArgs("UNLOCK TABLES", "enable-table-lock")) + e.Ctx().GetSessionVars().StmtCtx.AppendWarning(exeerrors.ErrFuncNotEnabled.GenWithStackByArgs("UNLOCK TABLES", "enable-table-lock")) return nil } - lockedTables := e.ctx.GetAllTableLocks() - return domain.GetDomain(e.ctx).DDL().UnlockTables(e.ctx, lockedTables) + lockedTables := e.Ctx().GetAllTableLocks() + return domain.GetDomain(e.Ctx()).DDL().UnlockTables(e.Ctx(), lockedTables) } func (e *DDLExec) executeCleanupTableLock(s *ast.CleanupTableLockStmt) error { @@ -716,50 +717,50 @@ func (e *DDLExec) executeCleanupTableLock(s *ast.CleanupTableLockStmt) error { return dbterror.ErrUnsupportedLocalTempTableDDL.GenWithStackByArgs("ADMIN CLEANUP TABLE LOCK") } } - return domain.GetDomain(e.ctx).DDL().CleanupTableLock(e.ctx, s.Tables) + return domain.GetDomain(e.Ctx()).DDL().CleanupTableLock(e.Ctx(), s.Tables) } func (e *DDLExec) executeRepairTable(s *ast.RepairTableStmt) error { - return domain.GetDomain(e.ctx).DDL().RepairTable(e.ctx, s.CreateStmt) + return domain.GetDomain(e.Ctx()).DDL().RepairTable(e.Ctx(), s.CreateStmt) } func (e *DDLExec) executeCreateSequence(s *ast.CreateSequenceStmt) error { - return domain.GetDomain(e.ctx).DDL().CreateSequence(e.ctx, s) + return domain.GetDomain(e.Ctx()).DDL().CreateSequence(e.Ctx(), s) } func (e *DDLExec) executeAlterSequence(s *ast.AlterSequenceStmt) error { - return domain.GetDomain(e.ctx).DDL().AlterSequence(e.ctx, s) + return domain.GetDomain(e.Ctx()).DDL().AlterSequence(e.Ctx(), s) } func (e *DDLExec) executeCreatePlacementPolicy(s *ast.CreatePlacementPolicyStmt) error { - return domain.GetDomain(e.ctx).DDL().CreatePlacementPolicy(e.ctx, s) + return domain.GetDomain(e.Ctx()).DDL().CreatePlacementPolicy(e.Ctx(), s) } func (e *DDLExec) executeDropPlacementPolicy(s *ast.DropPlacementPolicyStmt) error { - return domain.GetDomain(e.ctx).DDL().DropPlacementPolicy(e.ctx, s) + return domain.GetDomain(e.Ctx()).DDL().DropPlacementPolicy(e.Ctx(), s) } func (e *DDLExec) executeAlterPlacementPolicy(s *ast.AlterPlacementPolicyStmt) error { - return domain.GetDomain(e.ctx).DDL().AlterPlacementPolicy(e.ctx, s) + return domain.GetDomain(e.Ctx()).DDL().AlterPlacementPolicy(e.Ctx(), s) } func (e *DDLExec) executeCreateResourceGroup(s *ast.CreateResourceGroupStmt) error { - if !variable.EnableResourceControl.Load() && !e.ctx.GetSessionVars().InRestrictedSQL { + if !variable.EnableResourceControl.Load() && !e.Ctx().GetSessionVars().InRestrictedSQL { return infoschema.ErrResourceGroupSupportDisabled } - return domain.GetDomain(e.ctx).DDL().AddResourceGroup(e.ctx, s) + return domain.GetDomain(e.Ctx()).DDL().AddResourceGroup(e.Ctx(), s) } func (e *DDLExec) executeAlterResourceGroup(s *ast.AlterResourceGroupStmt) error { - if !variable.EnableResourceControl.Load() && !e.ctx.GetSessionVars().InRestrictedSQL { + if !variable.EnableResourceControl.Load() && !e.Ctx().GetSessionVars().InRestrictedSQL { return infoschema.ErrResourceGroupSupportDisabled } - return domain.GetDomain(e.ctx).DDL().AlterResourceGroup(e.ctx, s) + return domain.GetDomain(e.Ctx()).DDL().AlterResourceGroup(e.Ctx(), s) } func (e *DDLExec) executeDropResourceGroup(s *ast.DropResourceGroupStmt) error { - if !variable.EnableResourceControl.Load() && !e.ctx.GetSessionVars().InRestrictedSQL { + if !variable.EnableResourceControl.Load() && !e.Ctx().GetSessionVars().InRestrictedSQL { return infoschema.ErrResourceGroupSupportDisabled } - return domain.GetDomain(e.ctx).DDL().DropResourceGroup(e.ctx, s) + return domain.GetDomain(e.Ctx()).DDL().DropResourceGroup(e.Ctx(), s) } diff --git a/executor/delete.go b/executor/delete.go index e65547f8543df..ca4f8953aca66 100644 --- a/executor/delete.go +++ b/executor/delete.go @@ -18,6 +18,7 @@ import ( "context" "github.com/pingcap/errors" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" plannercore "github.com/pingcap/tidb/planner/core" @@ -36,7 +37,7 @@ import ( // DeleteExec represents a delete executor. // See https://dev.mysql.com/doc/refman/5.7/en/delete.html type DeleteExec struct { - baseExecutor + exec.BaseExecutor IsMultiTable bool tblID2Table map[int64]table.Table @@ -69,7 +70,7 @@ func (e *DeleteExec) deleteOneRow(tbl table.Table, handleCols plannercore.Handle if err != nil { return err } - err = e.removeRow(e.ctx, tbl, handle, row[:end]) + err = e.removeRow(e.Ctx(), tbl, handle, row[:end]) if err != nil { return err } @@ -91,13 +92,13 @@ func (e *DeleteExec) deleteSingleTableByChunk(ctx context.Context) error { } } - batchDMLSize := e.ctx.GetSessionVars().DMLBatchSize + batchDMLSize := e.Ctx().GetSessionVars().DMLBatchSize // If tidb_batch_delete is ON and not in a transaction, we could use BatchDelete mode. - batchDelete := e.ctx.GetSessionVars().BatchDelete && !e.ctx.GetSessionVars().InTxn() && + batchDelete := e.Ctx().GetSessionVars().BatchDelete && !e.Ctx().GetSessionVars().InTxn() && variable.EnableBatchDML.Load() && batchDMLSize > 0 - fields := retTypes(e.children[0]) - chk := tryNewCacheChunk(e.children[0]) - columns := e.children[0].Schema().Columns + fields := retTypes(e.Children(0)) + chk := tryNewCacheChunk(e.Children(0)) + columns := e.Children(0).Schema().Columns if len(columns) != len(fields) { logutil.BgLogger().Error("schema columns and fields mismatch", zap.Int("len(columns)", len(columns)), @@ -109,7 +110,7 @@ func (e *DeleteExec) deleteSingleTableByChunk(ctx context.Context) error { for { e.memTracker.Consume(-memUsageOfChk) iter := chunk.NewIterator4Chunk(chk) - err := Next(ctx, e.children[0], chk) + err := Next(ctx, e.Children(0), chk) if err != nil { return err } @@ -142,20 +143,20 @@ func (e *DeleteExec) deleteSingleTableByChunk(ctx context.Context) error { } rowCount++ } - chk = chunk.Renew(chk, e.maxChunkSize) + chk = chunk.Renew(chk, e.MaxChunkSize()) } return nil } func (e *DeleteExec) doBatchDelete(ctx context.Context) error { - txn, err := e.ctx.Txn(false) + txn, err := e.Ctx().Txn(false) if err != nil { return exeerrors.ErrBatchInsertFail.GenWithStack("BatchDelete failed with error: %v", err) } e.memTracker.Consume(-int64(txn.Size())) - e.ctx.StmtCommit(ctx) - if err := sessiontxn.NewTxnInStmt(ctx, e.ctx); err != nil { + e.Ctx().StmtCommit(ctx) + if err := sessiontxn.NewTxnInStmt(ctx, e.Ctx()); err != nil { // We should return a special error for batch insert. return exeerrors.ErrBatchInsertFail.GenWithStack("BatchDelete failed with error: %v", err) } @@ -196,14 +197,14 @@ func (e *DeleteExec) composeTblRowMap(tblRowMap tableRowMapType, colPosInfos []p func (e *DeleteExec) deleteMultiTablesByChunk(ctx context.Context) error { colPosInfos := e.tblColPosInfos tblRowMap := make(tableRowMapType) - fields := retTypes(e.children[0]) - chk := tryNewCacheChunk(e.children[0]) + fields := retTypes(e.Children(0)) + chk := tryNewCacheChunk(e.Children(0)) memUsageOfChk := int64(0) joinedDatumRowBuffer := make([]types.Datum, len(fields)) for { e.memTracker.Consume(-memUsageOfChk) iter := chunk.NewIterator4Chunk(chk) - err := Next(ctx, e.children[0], chk) + err := Next(ctx, e.Children(0), chk) if err != nil { return err } @@ -220,7 +221,7 @@ func (e *DeleteExec) deleteMultiTablesByChunk(ctx context.Context) error { return err } } - chk = tryNewCacheChunk(e.children[0]) + chk = tryNewCacheChunk(e.Children(0)) } return e.removeRowsInTblRowMap(tblRowMap) @@ -230,7 +231,7 @@ func (e *DeleteExec) removeRowsInTblRowMap(tblRowMap tableRowMapType) error { for id, rowMap := range tblRowMap { var err error rowMap.Range(func(h kv.Handle, val []types.Datum) bool { - err = e.removeRow(e.ctx, e.tblID2Table[id], h, val) + err = e.removeRow(e.Ctx(), e.tblID2Table[id], h, val) return err == nil }) if err != nil { @@ -275,15 +276,15 @@ func onRemoveRowForFK(ctx sessionctx.Context, data []types.Datum, fkChecks []*FK // Close implements the Executor Close interface. func (e *DeleteExec) Close() error { defer e.memTracker.ReplaceBytesUsed(0) - return e.children[0].Close() + return e.Children(0).Close() } // Open implements the Executor Open interface. func (e *DeleteExec) Open(ctx context.Context) error { - e.memTracker = memory.NewTracker(e.id, -1) - e.memTracker.AttachTo(e.ctx.GetSessionVars().StmtCtx.MemTracker) + e.memTracker = memory.NewTracker(e.ID(), -1) + e.memTracker.AttachTo(e.Ctx().GetSessionVars().StmtCtx.MemTracker) - return e.children[0].Open(ctx) + return e.Children(0).Open(ctx) } // GetFKChecks implements WithForeignKeyTrigger interface. diff --git a/executor/distsql.go b/executor/distsql.go index 0bffe6af6a3d6..71d24a78848af 100644 --- a/executor/distsql.go +++ b/executor/distsql.go @@ -29,6 +29,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/distsql" "github.com/pingcap/tidb/executor/internal/builder" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/ast" @@ -62,9 +63,9 @@ import ( ) var ( - _ Executor = &TableReaderExecutor{} - _ Executor = &IndexReaderExecutor{} - _ Executor = &IndexLookUpExecutor{} + _ exec.Executor = &TableReaderExecutor{} + _ exec.Executor = &IndexReaderExecutor{} + _ exec.Executor = &IndexLookUpExecutor{} ) // LookupTableTaskChannelSize represents the channel size of the index double read taskChan. @@ -165,7 +166,7 @@ func rebuildIndexRanges(ctx sessionctx.Context, is *plannercore.PhysicalIndexSca // IndexReaderExecutor sends dag request and reads index data from kv layer. type IndexReaderExecutor struct { - baseExecutor + exec.BaseExecutor // For a partitioned table, the IndexReaderExecutor works on a partition, so // the type of this table field is actually `table.PhysicalTable`. @@ -234,7 +235,7 @@ func (e *IndexReaderExecutor) Close() (err error) { if e.dummy { return nil } - e.ctx.StoreQueryFeedback(e.feedback) + e.Ctx().StoreQueryFeedback(e.feedback) return err } @@ -270,13 +271,13 @@ func (e *IndexReaderExecutor) buildKeyRanges(sc *stmtctx.StatementContext, range func (e *IndexReaderExecutor) Open(ctx context.Context) error { var err error if e.corColInAccess { - e.ranges, err = rebuildIndexRanges(e.ctx, e.plans[0].(*plannercore.PhysicalIndexScan), e.idxCols, e.colLens) + e.ranges, err = rebuildIndexRanges(e.Ctx(), e.plans[0].(*plannercore.PhysicalIndexScan), e.idxCols, e.colLens) if err != nil { return err } } - sc := e.ctx.GetSessionVars().StmtCtx + sc := e.Ctx().GetSessionVars().StmtCtx var kvRanges []kv.KeyRange if len(e.partitions) > 0 { for _, p := range e.partitions { @@ -310,11 +311,11 @@ func (e *IndexReaderExecutor) buildKVReq(ctx context.Context, r []kv.KeyRange) ( SetTxnScope(e.txnScope). SetReadReplicaScope(e.readReplicaScope). SetIsStaleness(e.isStaleness). - SetFromSessionVars(e.ctx.GetSessionVars()). - SetFromInfoSchema(e.ctx.GetInfoSchema()). + SetFromSessionVars(e.Ctx().GetSessionVars()). + SetFromInfoSchema(e.Ctx().GetInfoSchema()). SetMemTracker(e.memTracker). - SetClosestReplicaReadAdjuster(newClosestReadAdjuster(e.ctx, &builder.Request, e.netDataSize)). - SetConnID(e.ctx.GetSessionVars().ConnectionID) + SetClosestReplicaReadAdjuster(newClosestReadAdjuster(e.Ctx(), &builder.Request, e.netDataSize)). + SetConnID(e.Ctx().GetSessionVars().ConnectionID) kvReq, err := builder.Build() return kvReq, err } @@ -322,7 +323,7 @@ func (e *IndexReaderExecutor) buildKVReq(ctx context.Context, r []kv.KeyRange) ( func (e *IndexReaderExecutor) open(ctx context.Context, kvRanges []kv.KeyRange) error { var err error if e.corColInFilter { - e.dagPB.Executors, err = builder.ConstructListBasedDistExec(e.ctx, e.plans) + e.dagPB.Executors, err = builder.ConstructListBasedDistExec(e.Ctx(), e.plans) if err != nil { return err } @@ -342,11 +343,11 @@ func (e *IndexReaderExecutor) open(ctx context.Context, kvRanges []kv.KeyRange) args = append(args, expression.NewInt64Const(pid)) } - inCondition, err := expression.NewFunction(e.ctx, ast.In, types.NewFieldType(mysql.TypeLonglong), args...) + inCondition, err := expression.NewFunction(e.Ctx(), ast.In, types.NewFieldType(mysql.TypeLonglong), args...) if err != nil { return err } - pbConditions, err := expression.ExpressionsToPBList(e.ctx.GetSessionVars().StmtCtx, []expression.Expression{inCondition}, e.ctx.GetClient()) + pbConditions, err := expression.ExpressionsToPBList(e.Ctx().GetSessionVars().StmtCtx, []expression.Expression{inCondition}, e.Ctx().GetClient()) if err != nil { return err } @@ -364,7 +365,7 @@ func (e *IndexReaderExecutor) open(ctx context.Context, kvRanges []kv.KeyRange) } } - if e.runtimeStats != nil { + if e.RuntimeStats() != nil { collExec := true e.dagPB.CollectExecutionSummaries = &collExec } @@ -379,9 +380,9 @@ func (e *IndexReaderExecutor) open(ctx context.Context, kvRanges []kv.KeyRange) if e.memTracker != nil { e.memTracker.Reset() } else { - e.memTracker = memory.NewTracker(e.id, -1) + e.memTracker = memory.NewTracker(e.ID(), -1) } - e.memTracker.AttachTo(e.ctx.GetSessionVars().StmtCtx.MemTracker) + e.memTracker.AttachTo(e.Ctx().GetSessionVars().StmtCtx.MemTracker) slices.SortFunc(kvRanges, func(i, j kv.KeyRange) bool { return bytes.Compare(i.StartKey, j.StartKey) < 0 }) @@ -392,7 +393,7 @@ func (e *IndexReaderExecutor) open(ctx context.Context, kvRanges []kv.KeyRange) e.feedback.Invalidate() return err } - e.result, err = e.SelectResult(ctx, e.ctx, kvReq, retTypes(e), e.feedback, getPhysicalPlanIDs(e.plans), e.id) + e.result, err = e.SelectResult(ctx, e.Ctx(), kvReq, retTypes(e), e.feedback, getPhysicalPlanIDs(e.plans), e.ID()) if err != nil { e.feedback.Invalidate() return err @@ -409,7 +410,7 @@ func (e *IndexReaderExecutor) open(ctx context.Context, kvRanges []kv.KeyRange) } var results []distsql.SelectResult for _, kvReq := range kvReqs { - result, err := e.SelectResult(ctx, e.ctx, kvReq, retTypes(e), e.feedback, getPhysicalPlanIDs(e.plans), e.id) + result, err := e.SelectResult(ctx, e.Ctx(), kvReq, retTypes(e), e.feedback, getPhysicalPlanIDs(e.plans), e.ID()) if err != nil { e.feedback.Invalidate() return err @@ -423,7 +424,7 @@ func (e *IndexReaderExecutor) open(ctx context.Context, kvRanges []kv.KeyRange) // IndexLookUpExecutor implements double read for index scan. type IndexLookUpExecutor struct { - baseExecutor + exec.BaseExecutor table table.Table index *model.IndexInfo @@ -519,7 +520,7 @@ func (e *IndexLookUpExecutor) setDummy() { func (e *IndexLookUpExecutor) Open(ctx context.Context) error { var err error if e.corColInAccess { - e.ranges, err = rebuildIndexRanges(e.ctx, e.idxPlans[0].(*plannercore.PhysicalIndexScan), e.idxCols, e.colLens) + e.ranges, err = rebuildIndexRanges(e.Ctx(), e.idxPlans[0].(*plannercore.PhysicalIndexScan), e.idxCols, e.colLens) if err != nil { return err } @@ -543,7 +544,7 @@ func (e *IndexLookUpExecutor) Open(ctx context.Context) error { } func (e *IndexLookUpExecutor) buildTableKeyRanges() (err error) { - sc := e.ctx.GetSessionVars().StmtCtx + sc := e.Ctx().GetSessionVars().StmtCtx if e.partitionTableMode { e.feedback.Invalidate() // feedback for partition tables is not ready e.partitionKVRanges = make([][]kv.KeyRange, 0, len(e.prunedPartitions)) @@ -580,28 +581,28 @@ func (e *IndexLookUpExecutor) buildTableKeyRanges() (err error) { return err } -func (e *IndexLookUpExecutor) open(ctx context.Context) error { +func (e *IndexLookUpExecutor) open(_ context.Context) error { // We have to initialize "memTracker" and other execution resources in here // instead of in function "Open", because this "IndexLookUpExecutor" may be // constructed by a "IndexLookUpJoin" and "Open" will not be called in that // situation. e.initRuntimeStats() - e.memTracker = memory.NewTracker(e.id, -1) - e.memTracker.AttachTo(e.ctx.GetSessionVars().StmtCtx.MemTracker) + e.memTracker = memory.NewTracker(e.ID(), -1) + e.memTracker.AttachTo(e.Ctx().GetSessionVars().StmtCtx.MemTracker) e.finished = make(chan struct{}) e.resultCh = make(chan *lookupTableTask, atomic.LoadInt32(&LookupTableTaskChannelSize)) var err error if e.corColInIdxSide { - e.dagPB.Executors, err = builder.ConstructListBasedDistExec(e.ctx, e.idxPlans) + e.dagPB.Executors, err = builder.ConstructListBasedDistExec(e.Ctx(), e.idxPlans) if err != nil { return err } } if e.corColInTblSide { - e.tableRequest.Executors, err = builder.ConstructListBasedDistExec(e.ctx, e.tblPlans) + e.tableRequest.Executors, err = builder.ConstructListBasedDistExec(e.Ctx(), e.tblPlans) if err != nil { return err } @@ -683,7 +684,7 @@ func (e *IndexLookUpExecutor) getRetTpsForIndexReader() []*types.FieldType { // startIndexWorker launch a background goroutine to fetch handles, send the results to workCh. func (e *IndexLookUpExecutor) startIndexWorker(ctx context.Context, workCh chan<- *lookupTableTask, initBatchSize int) error { - if e.runtimeStats != nil { + if e.RuntimeStats() != nil { collExec := true e.dagPB.CollectExecutionSummaries = &collExec } @@ -712,8 +713,8 @@ func (e *IndexLookUpExecutor) startIndexWorker(ctx context.Context, workCh chan< resultCh: e.resultCh, keepOrder: e.keepOrder, checkIndexValue: e.checkIndexValue, - maxBatchSize: e.ctx.GetSessionVars().IndexLookupSize, - maxChunkSize: e.maxChunkSize, + maxBatchSize: e.Ctx().GetSessionVars().IndexLookupSize, + maxChunkSize: e.MaxChunkSize(), PushedLimit: e.PushedLimit, } var builder distsql.RequestBuilder @@ -725,11 +726,11 @@ func (e *IndexLookUpExecutor) startIndexWorker(ctx context.Context, workCh chan< SetTxnScope(e.txnScope). SetReadReplicaScope(e.readReplicaScope). SetIsStaleness(e.isStaleness). - SetFromSessionVars(e.ctx.GetSessionVars()). - SetFromInfoSchema(e.ctx.GetInfoSchema()). - SetClosestReplicaReadAdjuster(newClosestReadAdjuster(e.ctx, &builder.Request, e.idxNetDataSize/float64(len(kvRanges)))). + SetFromSessionVars(e.Ctx().GetSessionVars()). + SetFromInfoSchema(e.Ctx().GetInfoSchema()). + SetClosestReplicaReadAdjuster(newClosestReadAdjuster(e.Ctx(), &builder.Request, e.idxNetDataSize/float64(len(kvRanges)))). SetMemTracker(tracker). - SetConnID(e.ctx.GetSessionVars().ConnectionID) + SetConnID(e.Ctx().GetSessionVars().ConnectionID) results := make([]distsql.SelectResult, 0, len(kvRanges)) for _, kvRange := range kvRanges { @@ -754,7 +755,7 @@ func (e *IndexLookUpExecutor) startIndexWorker(ctx context.Context, workCh chan< worker.syncErr(err) break } - result, err := distsql.SelectWithRuntimeStats(ctx, e.ctx, kvReq, tps, e.feedback, getPhysicalPlanIDs(e.idxPlans), idxID) + result, err := distsql.SelectWithRuntimeStats(ctx, e.Ctx(), kvReq, tps, e.feedback, getPhysicalPlanIDs(e.idxPlans), idxID) if err != nil { worker.syncErr(err) break @@ -778,7 +779,7 @@ func (e *IndexLookUpExecutor) startIndexWorker(ctx context.Context, workCh chan< logutil.Logger(ctx).Error("close Select result failed", zap.Error(err)) } } - e.ctx.StoreQueryFeedback(e.feedback) + e.Ctx().StoreQueryFeedback(e.feedback) close(workCh) close(e.resultCh) e.idxWorkerWg.Done() @@ -788,7 +789,7 @@ func (e *IndexLookUpExecutor) startIndexWorker(ctx context.Context, workCh chan< // startTableWorker launchs some background goroutines which pick tasks from workCh and execute the task. func (e *IndexLookUpExecutor) startTableWorker(ctx context.Context, workCh <-chan *lookupTableTask) { - lookupConcurrencyLimit := e.ctx.GetSessionVars().IndexLookupConcurrency() + lookupConcurrencyLimit := e.Ctx().GetSessionVars().IndexLookupConcurrency() e.tblWorkerWg.Add(lookupConcurrencyLimit) for i := 0; i < lookupConcurrencyLimit; i++ { workerID := i @@ -812,13 +813,13 @@ func (e *IndexLookUpExecutor) startTableWorker(ctx context.Context, workCh <-cha } } -func (e *IndexLookUpExecutor) buildTableReader(ctx context.Context, task *lookupTableTask) (Executor, error) { +func (e *IndexLookUpExecutor) buildTableReader(ctx context.Context, task *lookupTableTask) (exec.Executor, error) { table := e.table if e.partitionTableMode && task.partitionTable != nil { table = task.partitionTable } tableReaderExec := &TableReaderExecutor{ - baseExecutor: newBaseExecutor(e.ctx, e.schema, e.getTableRootPlanID()), + BaseExecutor: exec.NewBaseExecutor(e.Ctx(), e.Schema(), e.getTableRootPlanID()), table: table, dagPB: e.tableRequest, startTS: e.startTS, @@ -844,7 +845,7 @@ func (e *IndexLookUpExecutor) buildTableReader(ctx context.Context, task *lookup // Close implements Exec Close interface. func (e *IndexLookUpExecutor) Close() error { if e.stats != nil { - defer e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, e.stats) + defer e.Ctx().GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.ID(), e.stats) } e.kvRanges = e.kvRanges[:0] if e.dummy { @@ -944,10 +945,10 @@ func (e *IndexLookUpExecutor) getResultTask() (*lookupTableTask, error) { } func (e *IndexLookUpExecutor) initRuntimeStats() { - if e.runtimeStats != nil { + if e.RuntimeStats() != nil { e.stats = &IndexLookUpRunTimeStats{ indexScanBasicStats: &execdetails.BasicRuntimeStats{}, - Concurrency: e.ctx.GetSessionVars().IndexLookupConcurrency(), + Concurrency: e.Ctx().GetSessionVars().IndexLookupConcurrency(), } } } @@ -956,14 +957,14 @@ func (e *IndexLookUpExecutor) getIndexPlanRootID() int { if len(e.idxPlans) > 0 { return e.idxPlans[len(e.idxPlans)-1].ID() } - return e.id + return e.ID() } func (e *IndexLookUpExecutor) getTableRootPlanID() int { if len(e.tblPlans) > 0 { return e.tblPlans[len(e.tblPlans)-1].ID() } - return e.id + return e.ID() } // indexWorker is used by IndexLookUpExecutor to maintain index lookup background goroutines. @@ -1009,11 +1010,11 @@ func (w *indexWorker) fetchHandles(ctx context.Context, results []distsql.Select } } }() - chk := w.idxLookup.ctx.GetSessionVars().GetNewChunkWithCapacity(w.idxLookup.getRetTpsForIndexReader(), w.idxLookup.maxChunkSize, w.idxLookup.maxChunkSize, w.idxLookup.AllocPool) + chk := w.idxLookup.Ctx().GetSessionVars().GetNewChunkWithCapacity(w.idxLookup.getRetTpsForIndexReader(), w.idxLookup.MaxChunkSize(), w.idxLookup.MaxChunkSize(), w.idxLookup.AllocPool) idxID := w.idxLookup.getIndexPlanRootID() - if w.idxLookup.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl != nil { - if idxID != w.idxLookup.id && w.idxLookup.stats != nil { - w.idxLookup.stats.indexScanBasicStats = w.idxLookup.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.GetBasicRuntimeStats(idxID) + if w.idxLookup.Ctx().GetSessionVars().StmtCtx.RuntimeStatsColl != nil { + if idxID != w.idxLookup.ID() && w.idxLookup.stats != nil { + w.idxLookup.stats.indexScanBasicStats = w.idxLookup.Ctx().GetSessionVars().StmtCtx.RuntimeStatsColl.GetBasicRuntimeStats(idxID) } } for i := 0; i < len(results); { @@ -1237,7 +1238,7 @@ func (e *IndexLookUpExecutor) getHandle(row chunk.Row, handleIdx []int, datums = append(datums, row.GetDatum(idx, e.handleCols[i].RetType)) } tablecodec.TruncateIndexValues(e.table.Meta(), e.primaryKeyIndex, datums) - handleEncoded, err = codec.EncodeKey(e.ctx.GetSessionVars().StmtCtx, nil, datums...) + handleEncoded, err = codec.EncodeKey(e.Ctx().GetSessionVars().StmtCtx, nil, datums...) if err != nil { return nil, err } @@ -1339,7 +1340,7 @@ func (e *IndexLookUpRunTimeStats) Tp() int { return execdetails.TpIndexLookUpRunTimeStats } -func (w *tableWorker) compareData(ctx context.Context, task *lookupTableTask, tableReader Executor) error { +func (w *tableWorker) compareData(ctx context.Context, task *lookupTableTask, tableReader exec.Executor) error { chk := tryNewCacheChunk(tableReader) tblInfo := w.idxLookup.table.Meta() vals := make([]types.Datum, 0, len(w.idxTblCols)) @@ -1366,7 +1367,7 @@ func (w *tableWorker) compareData(ctx context.Context, task *lookupTableTask, ta if idx == nil { return nil } - k, _, err := idx.GenIndexKey(w.idxLookup.ctx.GetSessionVars().StmtCtx, idxRow.Values[:len(idx.Meta().Columns)], idxRow.Handle, nil) + k, _, err := idx.GenIndexKey(w.idxLookup.Ctx().GetSessionVars().StmtCtx, idxRow.Values[:len(idx.Meta().Columns)], idxRow.Handle, nil) if err != nil { return nil } @@ -1374,7 +1375,7 @@ func (w *tableWorker) compareData(ctx context.Context, task *lookupTableTask, ta }, Tbl: tblInfo, Idx: w.idxLookup.index, - Sctx: w.idxLookup.ctx, + Sctx: w.idxLookup.Ctx(), } } @@ -1422,7 +1423,7 @@ func (w *tableWorker) compareData(ctx context.Context, task *lookupTableTask, ta vals = append(vals, row.GetDatum(i, &col.FieldType)) } tablecodec.TruncateIndexValues(tblInfo, w.idxLookup.index, vals) - sctx := w.idxLookup.ctx.GetSessionVars().StmtCtx + sctx := w.idxLookup.Ctx().GetSessionVars().StmtCtx for i := range vals { col := w.idxTblCols[i] idxVal := idxRow.GetDatum(i, w.idxColTps[i]) @@ -1527,7 +1528,7 @@ func (w *tableWorker) executeTask(ctx context.Context, task *lookupTableTask) er } if handleCnt != len(task.rows) && !util.HasCancelled(ctx) && - !w.idxLookup.ctx.GetSessionVars().StmtCtx.WeakConsistency { + !w.idxLookup.Ctx().GetSessionVars().StmtCtx.WeakConsistency { if len(w.idxLookup.tblPlans) == 1 { obtainedHandlesMap := kv.NewHandleMap() for _, row := range task.rows { @@ -1544,7 +1545,7 @@ func (w *tableWorker) executeTask(ctx context.Context, task *lookupTableTask) er }, Tbl: w.idxLookup.table.Meta(), Idx: w.idxLookup.index, - Sctx: w.idxLookup.ctx, + Sctx: w.idxLookup.Ctx(), }).ReportLookupInconsistent(ctx, handleCnt, len(task.rows), diff --git a/executor/executor.go b/executor/executor.go index 5b3dc444c3f7a..ad8691530f691 100644 --- a/executor/executor.go +++ b/executor/executor.go @@ -34,6 +34,7 @@ import ( "github.com/pingcap/tidb/ddl/schematracker" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/domain/infosync" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" @@ -84,30 +85,29 @@ import ( ) var ( - _ Executor = &baseExecutor{} - _ Executor = &CheckTableExec{} - _ Executor = &HashAggExec{} - _ Executor = &HashJoinExec{} - _ Executor = &IndexLookUpExecutor{} - _ Executor = &IndexReaderExecutor{} - _ Executor = &LimitExec{} - _ Executor = &MaxOneRowExec{} - _ Executor = &MergeJoinExec{} - _ Executor = &ProjectionExec{} - _ Executor = &SelectionExec{} - _ Executor = &SelectLockExec{} - _ Executor = &ShowNextRowIDExec{} - _ Executor = &ShowDDLExec{} - _ Executor = &ShowDDLJobsExec{} - _ Executor = &ShowDDLJobQueriesExec{} - _ Executor = &SortExec{} - _ Executor = &StreamAggExec{} - _ Executor = &TableDualExec{} - _ Executor = &TableReaderExecutor{} - _ Executor = &TableScanExec{} - _ Executor = &TopNExec{} - _ Executor = &UnionExec{} - _ Executor = &FastCheckTableExec{} + _ exec.Executor = &CheckTableExec{} + _ exec.Executor = &HashAggExec{} + _ exec.Executor = &HashJoinExec{} + _ exec.Executor = &IndexLookUpExecutor{} + _ exec.Executor = &IndexReaderExecutor{} + _ exec.Executor = &LimitExec{} + _ exec.Executor = &MaxOneRowExec{} + _ exec.Executor = &MergeJoinExec{} + _ exec.Executor = &ProjectionExec{} + _ exec.Executor = &SelectionExec{} + _ exec.Executor = &SelectLockExec{} + _ exec.Executor = &ShowNextRowIDExec{} + _ exec.Executor = &ShowDDLExec{} + _ exec.Executor = &ShowDDLJobsExec{} + _ exec.Executor = &ShowDDLJobQueriesExec{} + _ exec.Executor = &SortExec{} + _ exec.Executor = &StreamAggExec{} + _ exec.Executor = &TableDualExec{} + _ exec.Executor = &TableReaderExecutor{} + _ exec.Executor = &TableScanExec{} + _ exec.Executor = &TopNExec{} + _ exec.Executor = &UnionExec{} + _ exec.Executor = &FastCheckTableExec{} // GlobalMemoryUsageTracker is the ancestor of all the Executors' memory tracker and GlobalMemory Tracker GlobalMemoryUsageTracker *memory.Tracker @@ -131,22 +131,10 @@ var ( // Currently, there are TableReader/IndexReader/IndexLookUp/IndexMergeReader. // Note, partition reader is special and the caller should handle it carefully. type dataSourceExecutor interface { - Executor + exec.Executor Table() table.Table } -type baseExecutor struct { - ctx sessionctx.Context - id int - schema *expression.Schema // output schema - initCap int - maxChunkSize int - children []Executor - retFieldTypes []*types.FieldType - runtimeStats *execdetails.BasicRuntimeStats - AllocPool chunk.Allocator -} - const ( // globalPanicStorageExceed represents the panic message when out of storage quota. globalPanicStorageExceed string = "Out Of Global Storage Quota!" @@ -206,126 +194,38 @@ func (a *globalPanicOnExceed) GetPriority() int64 { return memory.DefPanicPriority } -// base returns the baseExecutor of an executor, don't override this method! -func (e *baseExecutor) base() *baseExecutor { - return e -} - -// Open initializes children recursively and "childrenResults" according to children's schemas. -func (e *baseExecutor) Open(ctx context.Context) error { - for _, child := range e.children { - err := child.Open(ctx) - if err != nil { - return err - } - } - return nil -} - -// Close closes all executors and release all resources. -func (e *baseExecutor) Close() error { - var firstErr error - for _, src := range e.children { - if err := src.Close(); err != nil && firstErr == nil { - firstErr = err - } - } - return firstErr -} - -// Schema returns the current baseExecutor's schema. If it is nil, then create and return a new one. -func (e *baseExecutor) Schema() *expression.Schema { - if e.schema == nil { - return expression.NewSchema() - } - return e.schema -} - // newFirstChunk creates a new chunk to buffer current executor's result. -func newFirstChunk(e Executor) *chunk.Chunk { - base := e.base() - return chunk.New(base.retFieldTypes, base.initCap, base.maxChunkSize) +func newFirstChunk(e exec.Executor) *chunk.Chunk { + base := e.Base() + return chunk.New(base.RetFieldTypes(), base.InitCap(), base.MaxChunkSize()) } -func tryNewCacheChunk(e Executor) *chunk.Chunk { - base := e.base() - s := base.ctx.GetSessionVars() - return s.GetNewChunkWithCapacity(base.retFieldTypes, base.initCap, base.maxChunkSize, base.AllocPool) +func tryNewCacheChunk(e exec.Executor) *chunk.Chunk { + base := e.Base() + s := base.Ctx().GetSessionVars() + return s.GetNewChunkWithCapacity(base.RetFieldTypes(), base.InitCap(), base.MaxChunkSize(), base.AllocPool) } // newList creates a new List to buffer current executor's result. -func newList(e Executor) *chunk.List { - base := e.base() - return chunk.NewList(base.retFieldTypes, base.initCap, base.maxChunkSize) +func newList(e exec.Executor) *chunk.List { + base := e.Base() + return chunk.NewList(base.RetFieldTypes(), base.InitCap(), base.MaxChunkSize()) } // retTypes returns all output column types. -func retTypes(e Executor) []*types.FieldType { - base := e.base() - return base.retFieldTypes -} - -// Next fills multiple rows into a chunk. -func (e *baseExecutor) Next(ctx context.Context, req *chunk.Chunk) error { - return nil -} - -func (e *baseExecutor) updateDeltaForTableID(id int64) { - txnCtx := e.ctx.GetSessionVars().TxnCtx - txnCtx.UpdateDeltaForTable(id, 0, 0, map[int64]int64{}) -} - -func newBaseExecutor(ctx sessionctx.Context, schema *expression.Schema, id int, children ...Executor) baseExecutor { - e := baseExecutor{ - children: children, - ctx: ctx, - id: id, - schema: schema, - initCap: ctx.GetSessionVars().InitChunkSize, - maxChunkSize: ctx.GetSessionVars().MaxChunkSize, - AllocPool: ctx.GetSessionVars().ChunkPool.Alloc, - } - if ctx.GetSessionVars().StmtCtx.RuntimeStatsColl != nil { - if e.id > 0 { - e.runtimeStats = e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.GetBasicRuntimeStats(id) - } - } - if schema != nil { - cols := schema.Columns - e.retFieldTypes = make([]*types.FieldType, len(cols)) - for i := range cols { - e.retFieldTypes[i] = cols[i].RetType - } - } - return e -} - -// Executor is the physical implementation of a algebra operator. -// -// In TiDB, all algebra operators are implemented as iterators, i.e., they -// support a simple Open-Next-Close protocol. See this paper for more details: -// -// "Volcano-An Extensible and Parallel Query Evaluation System" -// -// Different from Volcano's execution model, a "Next" function call in TiDB will -// return a batch of rows, other than a single row in Volcano. -// NOTE: Executors must call "chk.Reset()" before appending their results to it. -type Executor interface { - base() *baseExecutor - Open(context.Context) error - Next(ctx context.Context, req *chunk.Chunk) error - Close() error - Schema() *expression.Schema +func retTypes(e exec.Executor) []*types.FieldType { + base := e.Base() + return base.RetFieldTypes() } // Next is a wrapper function on e.Next(), it handles some common codes. -func Next(ctx context.Context, e Executor, req *chunk.Chunk) error { - base := e.base() - if base.runtimeStats != nil { +func Next(ctx context.Context, e exec.Executor, req *chunk.Chunk) error { + base := e.Base() + if base.RuntimeStats() != nil { start := time.Now() - defer func() { base.runtimeStats.Record(time.Since(start), req.NumRows()) }() + defer func() { base.RuntimeStats().Record(time.Since(start), req.NumRows()) }() } - sessVars := base.ctx.GetSessionVars() + sessVars := base.Ctx().GetSessionVars() if atomic.LoadUint32(&sessVars.Killed) == 2 { return exeerrors.ErrMaxExecTimeExceeded } @@ -358,7 +258,7 @@ func Next(ctx context.Context, e Executor, req *chunk.Chunk) error { // DDL jobs. These command currently by admin have the very similar struct and // operations, it should be a better idea to have them in the same struct. type CommandDDLJobsExec struct { - baseExecutor + exec.BaseExecutor cursor int jobIDs []int64 @@ -371,19 +271,19 @@ type CommandDDLJobsExec struct { // just with different processes. And, it should not be called directly by the // Executor. func (e *CommandDDLJobsExec) Open(ctx context.Context) error { - // We want to use a global transaction to execute the admin command, so we don't use e.ctx here. - newSess, err := e.getSysSession() + // We want to use a global transaction to execute the admin command, so we don't use e.Ctx() here. + newSess, err := e.GetSysSession() if err != nil { return err } e.errs, err = e.execute(newSess, e.jobIDs) - e.releaseSysSession(kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL), newSess) + e.ReleaseSysSession(kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL), newSess) return err } // Next implements the Executor Next interface for Cancel/Pause/Resume func (e *CommandDDLJobsExec) Next(ctx context.Context, req *chunk.Chunk) error { - req.GrowAndReset(e.maxChunkSize) + req.GrowAndReset(e.MaxChunkSize()) if e.cursor >= len(e.jobIDs) { return nil } @@ -417,7 +317,7 @@ type ResumeDDLJobsExec struct { // ShowNextRowIDExec represents a show the next row ID executor. type ShowNextRowIDExec struct { - baseExecutor + exec.BaseExecutor tblName *ast.TableName done bool } @@ -428,14 +328,14 @@ func (e *ShowNextRowIDExec) Next(ctx context.Context, req *chunk.Chunk) error { if e.done { return nil } - is := domain.GetDomain(e.ctx).InfoSchema() + is := domain.GetDomain(e.Ctx()).InfoSchema() tbl, err := is.TableByName(e.tblName.Schema, e.tblName.Name) if err != nil { return err } tblMeta := tbl.Meta() - allocators := tbl.Allocators(e.ctx) + allocators := tbl.Allocators(e.Ctx()) for _, alloc := range allocators.Allocs { nextGlobalID, err := alloc.NextGlobalAutoID() if err != nil { @@ -485,7 +385,7 @@ func (e *ShowNextRowIDExec) Next(ctx context.Context, req *chunk.Chunk) error { // ShowDDLExec represents a show DDL executor. type ShowDDLExec struct { - baseExecutor + exec.BaseExecutor ddlOwnerID string selfID string @@ -533,7 +433,7 @@ func (e *ShowDDLExec) Next(ctx context.Context, req *chunk.Chunk) error { // ShowDDLJobsExec represent a show DDL jobs executor. type ShowDDLJobsExec struct { - baseExecutor + exec.BaseExecutor DDLJobRetriever jobNumber int @@ -691,7 +591,7 @@ func ts2Time(timestamp uint64, loc *time.Location) types.Time { // The jobs id that is given by 'admin show ddl job queries' statement, // only be searched in the latest 10 history jobs. type ShowDDLJobQueriesExec struct { - baseExecutor + exec.BaseExecutor cursor int jobs []*model.Job @@ -702,10 +602,10 @@ type ShowDDLJobQueriesExec struct { func (e *ShowDDLJobQueriesExec) Open(ctx context.Context) error { var err error var jobs []*model.Job - if err := e.baseExecutor.Open(ctx); err != nil { + if err := e.BaseExecutor.Open(ctx); err != nil { return err } - session, err := e.getSysSession() + session, err := e.GetSysSession() if err != nil { return err } @@ -714,8 +614,8 @@ func (e *ShowDDLJobQueriesExec) Open(ctx context.Context) error { return err } defer func() { - // releaseSysSession will rollbacks txn automatically. - e.releaseSysSession(kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL), session) + // ReleaseSysSession will rollbacks txn automatically. + e.ReleaseSysSession(kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL), session) }() txn, err := session.Txn(true) if err != nil { @@ -755,7 +655,7 @@ func (e *ShowDDLJobQueriesExec) Open(ctx context.Context) error { // Next implements the Executor Next interface. func (e *ShowDDLJobQueriesExec) Next(ctx context.Context, req *chunk.Chunk) error { - req.GrowAndReset(e.maxChunkSize) + req.GrowAndReset(e.MaxChunkSize()) if e.cursor >= len(e.jobs) { return nil } @@ -778,7 +678,7 @@ func (e *ShowDDLJobQueriesExec) Next(ctx context.Context, req *chunk.Chunk) erro // The jobs id that is given by 'admin show ddl job queries' statement, // can be searched within a specified range in history jobs using offset and limit. type ShowDDLJobQueriesWithRangeExec struct { - baseExecutor + exec.BaseExecutor cursor int jobs []*model.Job @@ -790,10 +690,10 @@ type ShowDDLJobQueriesWithRangeExec struct { func (e *ShowDDLJobQueriesWithRangeExec) Open(ctx context.Context) error { var err error var jobs []*model.Job - if err := e.baseExecutor.Open(ctx); err != nil { + if err := e.BaseExecutor.Open(ctx); err != nil { return err } - session, err := e.getSysSession() + session, err := e.GetSysSession() if err != nil { return err } @@ -802,8 +702,8 @@ func (e *ShowDDLJobQueriesWithRangeExec) Open(ctx context.Context) error { return err } defer func() { - // releaseSysSession will rollbacks txn automatically. - e.releaseSysSession(kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL), session) + // ReleaseSysSession will rollbacks txn automatically. + e.ReleaseSysSession(kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL), session) }() txn, err := session.Txn(true) if err != nil { @@ -847,7 +747,7 @@ func (e *ShowDDLJobQueriesWithRangeExec) Open(ctx context.Context) error { // Next implements the Executor Next interface. func (e *ShowDDLJobQueriesWithRangeExec) Next(ctx context.Context, req *chunk.Chunk) error { - req.GrowAndReset(e.maxChunkSize) + req.GrowAndReset(e.MaxChunkSize()) if e.cursor >= len(e.jobs) { return nil } @@ -869,14 +769,14 @@ func (e *ShowDDLJobQueriesWithRangeExec) Next(ctx context.Context, req *chunk.Ch // Open implements the Executor Open interface. func (e *ShowDDLJobsExec) Open(ctx context.Context) error { - if err := e.baseExecutor.Open(ctx); err != nil { + if err := e.BaseExecutor.Open(ctx); err != nil { return err } e.DDLJobRetriever.is = e.is if e.jobNumber == 0 { e.jobNumber = ddl.DefNumHistoryJobs } - sess, err := e.getSysSession() + sess, err := e.GetSysSession() if err != nil { return err } @@ -896,7 +796,7 @@ func (e *ShowDDLJobsExec) Open(ctx context.Context) error { // Next implements the Executor Next interface. func (e *ShowDDLJobsExec) Next(ctx context.Context, req *chunk.Chunk) error { - req.GrowAndReset(e.maxChunkSize) + req.GrowAndReset(e.MaxChunkSize()) if (e.cursor - len(e.runningJobs)) >= e.jobNumber { return nil } @@ -932,8 +832,8 @@ func (e *ShowDDLJobsExec) Next(ctx context.Context, req *chunk.Chunk) error { // Close implements the Executor Close interface. func (e *ShowDDLJobsExec) Close() error { - e.releaseSysSession(kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL), e.sess) - return e.baseExecutor.Close() + e.ReleaseSysSession(kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL), e.sess) + return e.BaseExecutor.Close() } func getSchemaName(is infoschema.InfoSchema, id int64) string { @@ -962,7 +862,7 @@ func getTableName(is infoschema.InfoSchema, id int64) string { // It is built from the "admin check table" statement, and it checks if the // index matches the records in the table. type CheckTableExec struct { - baseExecutor + exec.BaseExecutor dbName string table table.Table @@ -977,7 +877,7 @@ type CheckTableExec struct { // Open implements the Executor Open interface. func (e *CheckTableExec) Open(ctx context.Context) error { - if err := e.baseExecutor.Open(ctx); err != nil { + if err := e.BaseExecutor.Open(ctx); err != nil { return err } for _, src := range e.srcs { @@ -1015,12 +915,12 @@ func (e *CheckTableExec) checkTableIndexHandle(ctx context.Context, idxInfo *mod } func (e *CheckTableExec) checkIndexHandle(ctx context.Context, src *IndexLookUpExecutor) error { - cols := src.schema.Columns + cols := src.Schema().Columns retFieldTypes := make([]*types.FieldType, len(cols)) for i := range cols { retFieldTypes[i] = cols[i].RetType } - chk := chunk.New(retFieldTypes, e.initCap, e.maxChunkSize) + chk := chunk.New(retFieldTypes, e.InitCap(), e.MaxChunkSize()) var err error for { @@ -1056,7 +956,7 @@ func (e *CheckTableExec) Next(ctx context.Context, req *chunk.Chunk) error { } idxNames = append(idxNames, idx.Name.O) } - greater, idxOffset, err := admin.CheckIndicesCount(e.ctx, e.dbName, e.table.Meta().Name.O, idxNames) + greater, idxOffset, err := admin.CheckIndicesCount(e.Ctx(), e.dbName, e.table.Meta().Name.O, idxNames) if err != nil { // For admin check index statement, for speed up and compatibility, doesn't do below checks. if e.checkIndex { @@ -1131,13 +1031,13 @@ func (e *CheckTableExec) Next(ctx context.Context, req *chunk.Chunk) error { func (e *CheckTableExec) checkTableRecord(ctx context.Context, idxOffset int) error { idxInfo := e.indexInfos[idxOffset] - txn, err := e.ctx.Txn(true) + txn, err := e.Ctx().Txn(true) if err != nil { return err } if e.table.Meta().GetPartitionInfo() == nil { idx := tables.NewIndex(e.table.Meta().ID, e.table.Meta(), idxInfo) - return admin.CheckRecordAndIndex(ctx, e.ctx, txn, e.table, idx) + return admin.CheckRecordAndIndex(ctx, e.Ctx(), txn, e.table, idx) } info := e.table.Meta().GetPartitionInfo() @@ -1145,7 +1045,7 @@ func (e *CheckTableExec) checkTableRecord(ctx context.Context, idxOffset int) er pid := def.ID partition := e.table.(table.PartitionedTable).GetPartition(pid) idx := tables.NewIndex(def.ID, e.table.Meta(), idxInfo) - if err := admin.CheckRecordAndIndex(ctx, e.ctx, txn, partition, idx); err != nil { + if err := admin.CheckRecordAndIndex(ctx, e.Ctx(), txn, partition, idx); err != nil { return errors.Trace(err) } } @@ -1158,7 +1058,7 @@ func (e *CheckTableExec) checkTableRecord(ctx context.Context, idxOffset int) er // admin show slow top [internal | all] N // admin show slow recent N type ShowSlowExec struct { - baseExecutor + exec.BaseExecutor ShowSlow *ast.ShowSlow result []*domain.SlowQueryInfo @@ -1167,11 +1067,11 @@ type ShowSlowExec struct { // Open implements the Executor Open interface. func (e *ShowSlowExec) Open(ctx context.Context) error { - if err := e.baseExecutor.Open(ctx); err != nil { + if err := e.BaseExecutor.Open(ctx); err != nil { return err } - dom := domain.GetDomain(e.ctx) + dom := domain.GetDomain(e.Ctx()) e.result = dom.ShowSlowQuery(e.ShowSlow) return nil } @@ -1183,7 +1083,7 @@ func (e *ShowSlowExec) Next(ctx context.Context, req *chunk.Chunk) error { return nil } - for e.cursor < len(e.result) && req.NumRows() < e.maxChunkSize { + for e.cursor < len(e.result) && req.NumRows() < e.MaxChunkSize() { slow := e.result[e.cursor] req.AppendString(0, slow.SQL) req.AppendTime(1, types.NewTime(types.FromGoTime(slow.Start), mysql.TypeTimestamp, types.MaxFsp)) @@ -1218,7 +1118,7 @@ func (e *ShowSlowExec) Next(ctx context.Context, req *chunk.Chunk) error { // when doing commit. If there is any key already locked by another transaction, // the transaction will rollback and retry. type SelectLockExec struct { - baseExecutor + exec.BaseExecutor Lock *ast.SelectLockInfo keys []kv.Key @@ -1264,13 +1164,13 @@ func (e *SelectLockExec) Open(ctx context.Context) error { } } } - return e.baseExecutor.Open(ctx) + return e.BaseExecutor.Open(ctx) } // Next implements the Executor Next interface. func (e *SelectLockExec) Next(ctx context.Context, req *chunk.Chunk) error { - req.GrowAndReset(e.maxChunkSize) - err := Next(ctx, e.children[0], req) + req.GrowAndReset(e.MaxChunkSize()) + err := Next(ctx, e.Children(0), req) if err != nil { return err } @@ -1307,7 +1207,7 @@ func (e *SelectLockExec) Next(ctx context.Context, req *chunk.Chunk) error { } return nil } - lockWaitTime := e.ctx.GetSessionVars().LockWaitTimeout + lockWaitTime := e.Ctx().GetSessionVars().LockWaitTimeout if e.Lock.LockType == ast.SelectLockForUpdateNoWait { lockWaitTime = tikvstore.LockNoWait } else if e.Lock.LockType == ast.SelectLockForUpdateWaitN { @@ -1315,13 +1215,13 @@ func (e *SelectLockExec) Next(ctx context.Context, req *chunk.Chunk) error { } for id := range e.tblID2Handle { - e.updateDeltaForTableID(id) + e.UpdateDeltaForTableID(id) } - lockCtx, err := newLockCtx(e.ctx, lockWaitTime, len(e.keys)) + lockCtx, err := newLockCtx(e.Ctx(), lockWaitTime, len(e.keys)) if err != nil { return err } - return doLockKeys(ctx, e.ctx, lockCtx, e.keys...) + return doLockKeys(ctx, e.Ctx(), lockCtx, e.keys...) } func newLockCtx(sctx sessionctx.Context, lockWaitTime int64, numKeys int) (*tikvstore.LockCtx, error) { @@ -1429,7 +1329,7 @@ func filterLockTableKeys(stmtCtx *stmtctx.StatementContext, keys []kv.Key) []kv. // LimitExec represents limit executor // It ignores 'Offset' rows from src, then returns 'Count' rows at maximum. type LimitExec struct { - baseExecutor + exec.BaseExecutor begin uint64 end uint64 @@ -1455,8 +1355,8 @@ func (e *LimitExec) Next(ctx context.Context, req *chunk.Chunk) error { } for !e.meetFirstBatch { // transfer req's requiredRows to childResult and then adjust it in childResult - e.childResult = e.childResult.SetRequiredRows(req.RequiredRows(), e.maxChunkSize) - err := Next(ctx, e.children[0], e.adjustRequiredRows(e.childResult)) + e.childResult = e.childResult.SetRequiredRows(req.RequiredRows(), e.MaxChunkSize()) + err := Next(ctx, e.Children(0), e.adjustRequiredRows(e.childResult)) if err != nil { return err } @@ -1485,9 +1385,9 @@ func (e *LimitExec) Next(ctx context.Context, req *chunk.Chunk) error { e.cursor += batchSize } e.childResult.Reset() - e.childResult = e.childResult.SetRequiredRows(req.RequiredRows(), e.maxChunkSize) + e.childResult = e.childResult.SetRequiredRows(req.RequiredRows(), e.MaxChunkSize()) e.adjustRequiredRows(e.childResult) - err := Next(ctx, e.children[0], e.childResult) + err := Next(ctx, e.Children(0), e.childResult) if err != nil { return err } @@ -1516,10 +1416,10 @@ func (e *LimitExec) Next(ctx context.Context, req *chunk.Chunk) error { // Open implements the Executor Open interface. func (e *LimitExec) Open(ctx context.Context) error { - if err := e.baseExecutor.Open(ctx); err != nil { + if err := e.BaseExecutor.Open(ctx); err != nil { return err } - e.childResult = tryNewCacheChunk(e.children[0]) + e.childResult = tryNewCacheChunk(e.Children(0)) e.cursor = 0 e.meetFirstBatch = e.begin == 0 if span := opentracing.SpanFromContext(ctx); span != nil && span.Tracer() != nil { @@ -1533,7 +1433,7 @@ func (e *LimitExec) Close() error { start := time.Now() e.childResult = nil - err := e.baseExecutor.Close() + err := e.BaseExecutor.Close() elapsed := time.Since(start) if elapsed > time.Millisecond { @@ -1562,7 +1462,7 @@ func (e *LimitExec) adjustRequiredRows(chk *chunk.Chunk) *chunk.Chunk { limitRequired = chk.RequiredRows() } - return chk.SetRequiredRows(mathutil.Min(limitTotal, limitRequired), e.maxChunkSize) + return chk.SetRequiredRows(mathutil.Min(limitTotal, limitRequired), e.MaxChunkSize()) } func init() { @@ -1610,7 +1510,7 @@ func init() { // TableDualExec represents a dual table executor. type TableDualExec struct { - baseExecutor + exec.BaseExecutor // numDualRows can only be 0 or 1. numDualRows int @@ -1642,7 +1542,7 @@ func (e *TableDualExec) Next(ctx context.Context, req *chunk.Chunk) error { // SelectionExec represents a filter executor. type SelectionExec struct { - baseExecutor + exec.BaseExecutor batched bool filters []expression.Expression @@ -1656,7 +1556,7 @@ type SelectionExec struct { // Open implements the Executor Open interface. func (e *SelectionExec) Open(ctx context.Context) error { - if err := e.baseExecutor.Open(ctx); err != nil { + if err := e.BaseExecutor.Open(ctx); err != nil { return err } failpoint.Inject("mockSelectionExecBaseExecutorOpenReturnedError", func(val failpoint.Value) { @@ -1671,10 +1571,10 @@ func (e *SelectionExec) open(ctx context.Context) error { if e.memTracker != nil { e.memTracker.Reset() } else { - e.memTracker = memory.NewTracker(e.id, -1) + e.memTracker = memory.NewTracker(e.ID(), -1) } - e.memTracker.AttachTo(e.ctx.GetSessionVars().StmtCtx.MemTracker) - e.childResult = tryNewCacheChunk(e.children[0]) + e.memTracker.AttachTo(e.Ctx().GetSessionVars().StmtCtx.MemTracker) + e.childResult = tryNewCacheChunk(e.Children(0)) e.memTracker.Consume(e.childResult.MemoryUsage()) e.batched = expression.Vectorizable(e.filters) if e.batched { @@ -1692,12 +1592,12 @@ func (e *SelectionExec) Close() error { e.childResult = nil } e.selected = nil - return e.baseExecutor.Close() + return e.BaseExecutor.Close() } // Next implements the Executor Next interface. func (e *SelectionExec) Next(ctx context.Context, req *chunk.Chunk) error { - req.GrowAndReset(e.maxChunkSize) + req.GrowAndReset(e.MaxChunkSize()) if !e.batched { return e.unBatchedNext(ctx, req) @@ -1716,7 +1616,7 @@ func (e *SelectionExec) Next(ctx context.Context, req *chunk.Chunk) error { req.AppendRow(e.inputRow) } mSize := e.childResult.MemoryUsage() - err := Next(ctx, e.children[0], e.childResult) + err := Next(ctx, e.Children(0), e.childResult) e.memTracker.Consume(e.childResult.MemoryUsage() - mSize) if err != nil { return err @@ -1725,7 +1625,7 @@ func (e *SelectionExec) Next(ctx context.Context, req *chunk.Chunk) error { if e.childResult.NumRows() == 0 { return nil } - e.selected, err = expression.VectorizedFilter(e.ctx, e.filters, e.inputIter, e.selected) + e.selected, err = expression.VectorizedFilter(e.Ctx(), e.filters, e.inputIter, e.selected) if err != nil { return err } @@ -1739,7 +1639,7 @@ func (e *SelectionExec) Next(ctx context.Context, req *chunk.Chunk) error { func (e *SelectionExec) unBatchedNext(ctx context.Context, chk *chunk.Chunk) error { for { for ; e.inputRow != e.inputIter.End(); e.inputRow = e.inputIter.Next() { - selected, _, err := expression.EvalBool(e.ctx, e.filters, e.inputRow) + selected, _, err := expression.EvalBool(e.Ctx(), e.filters, e.inputRow) if err != nil { return err } @@ -1750,7 +1650,7 @@ func (e *SelectionExec) unBatchedNext(ctx context.Context, chk *chunk.Chunk) err } } mSize := e.childResult.MemoryUsage() - err := Next(ctx, e.children[0], e.childResult) + err := Next(ctx, e.Children(0), e.childResult) e.memTracker.Consume(e.childResult.MemoryUsage() - mSize) if err != nil { return err @@ -1765,7 +1665,7 @@ func (e *SelectionExec) unBatchedNext(ctx context.Context, chk *chunk.Chunk) err // TableScanExec is a table scan executor without result fields. type TableScanExec struct { - baseExecutor + exec.BaseExecutor t table.Table columns []*model.ColumnInfo @@ -1775,15 +1675,15 @@ type TableScanExec struct { // Next implements the Executor Next interface. func (e *TableScanExec) Next(ctx context.Context, req *chunk.Chunk) error { - req.GrowAndReset(e.maxChunkSize) + req.GrowAndReset(e.MaxChunkSize()) return e.nextChunk4InfoSchema(ctx, req) } func (e *TableScanExec) nextChunk4InfoSchema(ctx context.Context, chk *chunk.Chunk) error { - chk.GrowAndReset(e.maxChunkSize) + chk.GrowAndReset(e.MaxChunkSize()) if e.virtualTableChunkList == nil { - e.virtualTableChunkList = chunk.NewList(retTypes(e), e.initCap, e.maxChunkSize) - columns := make([]*table.Column, e.schema.Len()) + e.virtualTableChunkList = chunk.NewList(retTypes(e), e.InitCap(), e.MaxChunkSize()) + columns := make([]*table.Column, e.Schema().Len()) for i, colInfo := range e.columns { columns[i] = table.ToColumn(colInfo) } @@ -1791,7 +1691,7 @@ func (e *TableScanExec) nextChunk4InfoSchema(ctx context.Context, chk *chunk.Chu type tableIter interface { IterRecords(ctx context.Context, sctx sessionctx.Context, cols []*table.Column, fn table.RecordIterFunc) error } - err := (e.t.(tableIter)).IterRecords(ctx, e.ctx, columns, func(_ kv.Handle, rec []types.Datum, cols []*table.Column) (bool, error) { + err := (e.t.(tableIter)).IterRecords(ctx, e.Ctx(), columns, func(_ kv.Handle, rec []types.Datum, cols []*table.Column) (bool, error) { mutableRow.SetDatums(rec...) e.virtualTableChunkList.AppendRow(mutableRow.ToRow()) return true, nil @@ -1819,14 +1719,14 @@ func (e *TableScanExec) Open(ctx context.Context) error { // MaxOneRowExec checks if the number of rows that a query returns is at maximum one. // It's built from subquery expression. type MaxOneRowExec struct { - baseExecutor + exec.BaseExecutor evaluated bool } // Open implements the Executor Open interface. func (e *MaxOneRowExec) Open(ctx context.Context) error { - if err := e.baseExecutor.Open(ctx); err != nil { + if err := e.BaseExecutor.Open(ctx); err != nil { return err } e.evaluated = false @@ -1840,13 +1740,13 @@ func (e *MaxOneRowExec) Next(ctx context.Context, req *chunk.Chunk) error { return nil } e.evaluated = true - err := Next(ctx, e.children[0], req) + err := Next(ctx, e.Children(0), req) if err != nil { return err } if num := req.NumRows(); num == 0 { - for i := range e.schema.Columns { + for i := range e.Schema().Columns { req.AppendNull(i) } return nil @@ -1854,8 +1754,8 @@ func (e *MaxOneRowExec) Next(ctx context.Context, req *chunk.Chunk) error { return exeerrors.ErrSubqueryMoreThan1Row } - childChunk := tryNewCacheChunk(e.children[0]) - err = Next(ctx, e.children[0], childChunk) + childChunk := tryNewCacheChunk(e.Children(0)) + err = Next(ctx, e.Children(0), childChunk) if err != nil { return err } @@ -1886,7 +1786,7 @@ func (e *MaxOneRowExec) Next(ctx context.Context, req *chunk.Chunk) error { // |--------------------------| main thread | <---------------------+ // +-------------+ type UnionExec struct { - baseExecutor + exec.BaseExecutor concurrency int childIDChan chan int @@ -1933,22 +1833,22 @@ func (e *UnionExec) Open(ctx context.Context) error { } func (e *UnionExec) initialize(ctx context.Context) { - if e.concurrency > len(e.children) { - e.concurrency = len(e.children) + if e.concurrency > e.ChildrenLen() { + e.concurrency = e.ChildrenLen() } for i := 0; i < e.concurrency; i++ { - e.results = append(e.results, newFirstChunk(e.children[0])) + e.results = append(e.results, newFirstChunk(e.Children(0))) } e.resultPool = make(chan *unionWorkerResult, e.concurrency) e.resourcePools = make([]chan *chunk.Chunk, e.concurrency) - e.childIDChan = make(chan int, len(e.children)) + e.childIDChan = make(chan int, e.ChildrenLen()) for i := 0; i < e.concurrency; i++ { e.resourcePools[i] = make(chan *chunk.Chunk, 1) e.resourcePools[i] <- e.results[i] e.wg.Add(1) go e.resultPuller(ctx, i) } - for i := 0; i < len(e.children); i++ { + for i := 0; i < e.ChildrenLen(); i++ { e.childIDChan <- i } close(e.childIDChan) @@ -1976,7 +1876,7 @@ func (e *UnionExec) resultPuller(ctx context.Context, workerID int) { e.mu.maxOpenedChildID = childID } e.mu.Unlock() - if err := e.children[childID].Open(ctx); err != nil { + if err := e.Children(childID).Open(ctx); err != nil { result.err = err e.stopFetchData.Store(true) e.resultPool <- result @@ -1993,7 +1893,7 @@ func (e *UnionExec) resultPuller(ctx context.Context, workerID int) { return case result.chk = <-e.resourcePools[workerID]: } - result.err = Next(ctx, e.children[childID], result.chk) + result.err = Next(ctx, e.Children(childID), result.chk) if result.err == nil && result.chk.NumRows() == 0 { e.resourcePools[workerID] <- result.chk break @@ -2017,7 +1917,7 @@ func (e *UnionExec) resultPuller(ctx context.Context, workerID int) { // Next implements the Executor Next interface. func (e *UnionExec) Next(ctx context.Context, req *chunk.Chunk) error { - req.GrowAndReset(e.maxChunkSize) + req.GrowAndReset(e.MaxChunkSize()) if !e.initialized { e.initialize(ctx) e.initialized = true @@ -2056,7 +1956,7 @@ func (e *UnionExec) Close() error { // promised to exit when reaching here (e.childIDChan been closed). var firstErr error for i := 0; i <= e.mu.maxOpenedChildID; i++ { - if err := e.children[i].Close(); err != nil && firstErr == nil { + if err := e.Children(i).Close(); err != nil && firstErr == nil { firstErr = err } } @@ -2368,7 +2268,7 @@ func isWeakConsistencyRead(ctx sessionctx.Context, node ast.Node) bool { // index matches the records in the table. // It uses a new algorithms to check table data, which is faster than the old one(CheckTableExec). type FastCheckTableExec struct { - baseExecutor + exec.BaseExecutor dbName string table table.Table @@ -2382,7 +2282,7 @@ type FastCheckTableExec struct { // Open implements the Executor Open interface. func (e *FastCheckTableExec) Open(ctx context.Context) error { - if err := e.baseExecutor.Open(ctx); err != nil { + if err := e.BaseExecutor.Open(ctx); err != nil { return err } @@ -2445,7 +2345,7 @@ func (w *checkIndexWorker) HandleTask(task checkIndexTask) { w.e.err.CompareAndSwap(nil, &err) } - se, err := w.e.base().getSysSession() + se, err := w.e.Base().GetSysSession() if err != nil { trySaveErr(err) return @@ -2453,7 +2353,7 @@ func (w *checkIndexWorker) HandleTask(task checkIndexTask) { se.GetSessionVars().OptimizerUseInvisibleIndexes = true defer func() { se.GetSessionVars().OptimizerUseInvisibleIndexes = false - w.e.base().releaseSysSession(ctx, se) + w.e.Base().ReleaseSysSession(ctx, se) }() var pkCols []string @@ -2521,8 +2421,8 @@ func (w *checkIndexWorker) HandleTask(task checkIndexTask) { lookupCheckThreshold := int64(100) checkOnce := false - if w.e.ctx.GetSessionVars().SnapshotTS != 0 { - se.GetSessionVars().SnapshotTS = w.e.ctx.GetSessionVars().SnapshotTS + if w.e.Ctx().GetSessionVars().SnapshotTS != 0 { + se.GetSessionVars().SnapshotTS = w.e.Ctx().GetSessionVars().SnapshotTS defer func() { se.GetSessionVars().SnapshotTS = 0 }() @@ -2779,7 +2679,7 @@ func (w *checkIndexWorker) Close() { } func (e *FastCheckTableExec) createWorker() workerpool.Worker[checkIndexTask] { - return &checkIndexWorker{sctx: e.ctx, dbName: e.dbName, table: e.table, indexInfos: e.indexInfos, e: e} + return &checkIndexWorker{sctx: e.Ctx(), dbName: e.dbName, table: e.table, indexInfos: e.indexInfos, e: e} } // Next implements the Executor Next interface. @@ -2790,9 +2690,9 @@ func (e *FastCheckTableExec) Next(ctx context.Context, req *chunk.Chunk) error { defer func() { e.done = true }() // Here we need check all indexes, includes invisible index - e.ctx.GetSessionVars().OptimizerUseInvisibleIndexes = true + e.Ctx().GetSessionVars().OptimizerUseInvisibleIndexes = true defer func() { - e.ctx.GetSessionVars().OptimizerUseInvisibleIndexes = false + e.Ctx().GetSessionVars().OptimizerUseInvisibleIndexes = false }() workerPool, err := workerpool.NewWorkerPool[checkIndexTask]("checkIndex", diff --git a/executor/executor_pkg_test.go b/executor/executor_pkg_test.go index 28c75c9abcaad..f5234f7ab75c0 100644 --- a/executor/executor_pkg_test.go +++ b/executor/executor_pkg_test.go @@ -24,6 +24,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/executor/aggfuncs" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/kv" plannerutil "github.com/pingcap/tidb/planner/util" @@ -231,41 +232,41 @@ func TestSortSpillDisk(t *testing.T) { ndvs: cas.ndvs, } dataSource := buildMockDataSource(opt) - exec := &SortExec{ - baseExecutor: newBaseExecutor(cas.ctx, dataSource.schema, 0, dataSource), + exe := &SortExec{ + BaseExecutor: exec.NewBaseExecutor(cas.ctx, dataSource.Schema(), 0, dataSource), ByItems: make([]*plannerutil.ByItems, 0, len(cas.orderByIdx)), - schema: dataSource.schema, + schema: dataSource.Schema(), } for _, idx := range cas.orderByIdx { - exec.ByItems = append(exec.ByItems, &plannerutil.ByItems{Expr: cas.columns()[idx]}) + exe.ByItems = append(exe.ByItems, &plannerutil.ByItems{Expr: cas.columns()[idx]}) } tmpCtx := context.Background() - chk := newFirstChunk(exec) + chk := newFirstChunk(exe) dataSource.prepareChunks() - err := exec.Open(tmpCtx) + err := exe.Open(tmpCtx) require.NoError(t, err) for { - err = exec.Next(tmpCtx, chk) + err = exe.Next(tmpCtx, chk) require.NoError(t, err) if chk.NumRows() == 0 { break } } // Test only 1 partition and all data in memory. - require.Len(t, exec.partitionList, 1) - require.Equal(t, false, exec.partitionList[0].AlreadySpilledSafeForTest()) - require.Equal(t, 2048, exec.partitionList[0].NumRow()) - err = exec.Close() + require.Len(t, exe.partitionList, 1) + require.Equal(t, false, exe.partitionList[0].AlreadySpilledSafeForTest()) + require.Equal(t, 2048, exe.partitionList[0].NumRow()) + err = exe.Close() require.NoError(t, err) ctx.GetSessionVars().MemTracker = memory.NewTracker(memory.LabelForSession, 1) ctx.GetSessionVars().StmtCtx.MemTracker = memory.NewTracker(memory.LabelForSQLText, -1) ctx.GetSessionVars().StmtCtx.MemTracker.AttachTo(ctx.GetSessionVars().MemTracker) dataSource.prepareChunks() - err = exec.Open(tmpCtx) + err = exe.Open(tmpCtx) require.NoError(t, err) for { - err = exec.Next(tmpCtx, chk) + err = exe.Next(tmpCtx, chk) require.NoError(t, err) if chk.NumRows() == 0 { break @@ -275,39 +276,39 @@ func TestSortSpillDisk(t *testing.T) { // Now spilling is in parallel. // Maybe the second add() will called before spilling, depends on // Golang goroutine scheduling. So the result has two possibilities. - if len(exec.partitionList) == 2 { - require.Len(t, exec.partitionList, 2) - require.Equal(t, true, exec.partitionList[0].AlreadySpilledSafeForTest()) - require.Equal(t, true, exec.partitionList[1].AlreadySpilledSafeForTest()) - require.Equal(t, 1024, exec.partitionList[0].NumRow()) - require.Equal(t, 1024, exec.partitionList[1].NumRow()) + if len(exe.partitionList) == 2 { + require.Len(t, exe.partitionList, 2) + require.Equal(t, true, exe.partitionList[0].AlreadySpilledSafeForTest()) + require.Equal(t, true, exe.partitionList[1].AlreadySpilledSafeForTest()) + require.Equal(t, 1024, exe.partitionList[0].NumRow()) + require.Equal(t, 1024, exe.partitionList[1].NumRow()) } else { - require.Len(t, exec.partitionList, 1) - require.Equal(t, true, exec.partitionList[0].AlreadySpilledSafeForTest()) - require.Equal(t, 2048, exec.partitionList[0].NumRow()) + require.Len(t, exe.partitionList, 1) + require.Equal(t, true, exe.partitionList[0].AlreadySpilledSafeForTest()) + require.Equal(t, 2048, exe.partitionList[0].NumRow()) } - err = exec.Close() + err = exe.Close() require.NoError(t, err) ctx.GetSessionVars().MemTracker = memory.NewTracker(memory.LabelForSession, 28000) ctx.GetSessionVars().StmtCtx.MemTracker = memory.NewTracker(memory.LabelForSQLText, -1) ctx.GetSessionVars().StmtCtx.MemTracker.AttachTo(ctx.GetSessionVars().MemTracker) dataSource.prepareChunks() - err = exec.Open(tmpCtx) + err = exe.Open(tmpCtx) require.NoError(t, err) for { - err = exec.Next(tmpCtx, chk) + err = exe.Next(tmpCtx, chk) require.NoError(t, err) if chk.NumRows() == 0 { break } } // Test only 1 partition but spill disk. - require.Len(t, exec.partitionList, 1) - require.Equal(t, true, exec.partitionList[0].AlreadySpilledSafeForTest()) - require.Equal(t, 2048, exec.partitionList[0].NumRow()) - err = exec.Close() + require.Len(t, exe.partitionList, 1) + require.Equal(t, true, exe.partitionList[0].AlreadySpilledSafeForTest()) + require.Equal(t, 2048, exe.partitionList[0].NumRow()) + err = exe.Close() require.NoError(t, err) // Test partition nums. @@ -326,28 +327,28 @@ func TestSortSpillDisk(t *testing.T) { ndvs: cas.ndvs, } dataSource = buildMockDataSource(opt) - exec = &SortExec{ - baseExecutor: newBaseExecutor(cas.ctx, dataSource.schema, 0, dataSource), + exe = &SortExec{ + BaseExecutor: exec.NewBaseExecutor(cas.ctx, dataSource.Schema(), 0, dataSource), ByItems: make([]*plannerutil.ByItems, 0, len(cas.orderByIdx)), - schema: dataSource.schema, + schema: dataSource.Schema(), } for _, idx := range cas.orderByIdx { - exec.ByItems = append(exec.ByItems, &plannerutil.ByItems{Expr: cas.columns()[idx]}) + exe.ByItems = append(exe.ByItems, &plannerutil.ByItems{Expr: cas.columns()[idx]}) } tmpCtx = context.Background() - chk = newFirstChunk(exec) + chk = newFirstChunk(exe) dataSource.prepareChunks() - err = exec.Open(tmpCtx) + err = exe.Open(tmpCtx) require.NoError(t, err) for { - err = exec.Next(tmpCtx, chk) + err = exe.Next(tmpCtx, chk) require.NoError(t, err) if chk.NumRows() == 0 { break } } // Don't spill too many partitions. - require.True(t, len(exec.partitionList) <= 4) - err = exec.Close() + require.True(t, len(exe.partitionList) <= 4) + err = exe.Close() require.NoError(t, err) } diff --git a/executor/executor_required_rows_test.go b/executor/executor_required_rows_test.go index c3ac762050d24..983c1e2dbe105 100644 --- a/executor/executor_required_rows_test.go +++ b/executor/executor_required_rows_test.go @@ -23,6 +23,7 @@ import ( "time" "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/expression/aggregation" "github.com/pingcap/tidb/parser/ast" @@ -41,7 +42,7 @@ import ( ) type requiredRowsDataSource struct { - baseExecutor + exec.BaseExecutor totalRows int count int ctx sessionctx.Context @@ -67,7 +68,7 @@ func newRequiredRowsDataSource(ctx sessionctx.Context, totalRows int, expectedRo cols[i] = &expression.Column{Index: i, RetType: retTypes[i]} } schema := expression.NewSchema(cols...) - baseExec := newBaseExecutor(ctx, schema, 0) + baseExec := exec.NewBaseExecutor(ctx, schema, 0) return &requiredRowsDataSource{baseExec, totalRows, 0, ctx, expectedRowsRet, 0, defaultGenerator} } @@ -180,25 +181,25 @@ func TestLimitRequiredRows(t *testing.T) { sctx := defaultCtx() ctx := context.Background() ds := newRequiredRowsDataSource(sctx, testCase.totalRows, testCase.expectedRowsDS) - exec := buildLimitExec(sctx, ds, testCase.limitOffset, testCase.limitCount) - require.NoError(t, exec.Open(ctx)) - chk := newFirstChunk(exec) + exe := buildLimitExec(sctx, ds, testCase.limitOffset, testCase.limitCount) + require.NoError(t, exe.Open(ctx)) + chk := newFirstChunk(exe) for i := range testCase.requiredRows { chk.SetRequiredRows(testCase.requiredRows[i], sctx.GetSessionVars().MaxChunkSize) - require.NoError(t, exec.Next(ctx, chk)) + require.NoError(t, exe.Next(ctx, chk)) require.Equal(t, testCase.expectedRows[i], chk.NumRows()) } - require.NoError(t, exec.Close()) + require.NoError(t, exe.Close()) require.NoError(t, ds.checkNumNextCalled()) } } -func buildLimitExec(ctx sessionctx.Context, src Executor, offset, count int) Executor { +func buildLimitExec(ctx sessionctx.Context, src exec.Executor, offset, count int) exec.Executor { n := mathutil.Min(count, ctx.GetSessionVars().MaxChunkSize) - base := newBaseExecutor(ctx, src.Schema(), 0, src) - base.initCap = n + base := exec.NewBaseExecutor(ctx, src.Schema(), 0, src) + base.SetInitCap(n) limitExec := &LimitExec{ - baseExecutor: base, + BaseExecutor: base, begin: uint64(offset), end: uint64(offset + count), } @@ -277,9 +278,9 @@ func TestSortRequiredRows(t *testing.T) { } } -func buildSortExec(sctx sessionctx.Context, byItems []*util.ByItems, src Executor) Executor { +func buildSortExec(sctx sessionctx.Context, byItems []*util.ByItems, src exec.Executor) exec.Executor { sortExec := SortExec{ - baseExecutor: newBaseExecutor(sctx, src.Schema(), 0, src), + BaseExecutor: exec.NewBaseExecutor(sctx, src.Schema(), 0, src), ByItems: byItems, schema: src.Schema(), } @@ -384,9 +385,9 @@ func TestTopNRequiredRows(t *testing.T) { } } -func buildTopNExec(ctx sessionctx.Context, offset, count int, byItems []*util.ByItems, src Executor) Executor { +func buildTopNExec(ctx sessionctx.Context, offset, count int, byItems []*util.ByItems, src exec.Executor) exec.Executor { sortExec := SortExec{ - baseExecutor: newBaseExecutor(ctx, src.Schema(), 0, src), + BaseExecutor: exec.NewBaseExecutor(ctx, src.Schema(), 0, src), ByItems: byItems, schema: src.Schema(), } @@ -477,9 +478,9 @@ func TestSelectionRequiredRows(t *testing.T) { } } -func buildSelectionExec(ctx sessionctx.Context, filters []expression.Expression, src Executor) Executor { +func buildSelectionExec(ctx sessionctx.Context, filters []expression.Expression, src exec.Executor) exec.Executor { return &SelectionExec{ - baseExecutor: newBaseExecutor(ctx, src.Schema(), 0, src), + BaseExecutor: exec.NewBaseExecutor(ctx, src.Schema(), 0, src), filters: filters, } } @@ -595,9 +596,9 @@ func TestProjectionParallelRequiredRows(t *testing.T) { } } -func buildProjectionExec(ctx sessionctx.Context, exprs []expression.Expression, src Executor, numWorkers int) Executor { +func buildProjectionExec(ctx sessionctx.Context, exprs []expression.Expression, src exec.Executor, numWorkers int) exec.Executor { return &ProjectionExec{ - baseExecutor: newBaseExecutor(ctx, src.Schema(), 0, src), + BaseExecutor: exec.NewBaseExecutor(ctx, src.Schema(), 0, src), numWorkers: int64(numWorkers), evaluatorSuit: expression.NewEvaluatorSuite(exprs, false), } @@ -827,7 +828,7 @@ func TestVecGroupChecker4GroupCount(t *testing.T) { } } -func buildMergeJoinExec(ctx sessionctx.Context, joinType plannercore.JoinType, innerSrc, outerSrc Executor) Executor { +func buildMergeJoinExec(ctx sessionctx.Context, joinType plannercore.JoinType, innerSrc, outerSrc exec.Executor) exec.Executor { if joinType == plannercore.RightOuterJoin { innerSrc, outerSrc = outerSrc, innerSrc } @@ -852,10 +853,10 @@ func buildMergeJoinExec(ctx sessionctx.Context, joinType plannercore.JoinType, i type mockPlan struct { MockPhysicalPlan - exec Executor + exec exec.Executor } -func (mp *mockPlan) GetExecutor() Executor { +func (mp *mockPlan) GetExecutor() exec.Executor { return mp.exec } diff --git a/executor/explain.go b/executor/explain.go index ed55ebcc5c75b..8683d141ae538 100644 --- a/executor/explain.go +++ b/executor/explain.go @@ -28,6 +28,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/config" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/execdetails" @@ -40,10 +41,10 @@ import ( // ExplainExec represents an explain executor. type ExplainExec struct { - baseExecutor + exec.BaseExecutor explain *core.Explain - analyzeExec Executor + analyzeExec exec.Executor executed bool ruRuntimeStats *clientutil.RURuntimeStats rows [][]string @@ -78,7 +79,7 @@ func (e *ExplainExec) Next(ctx context.Context, req *chunk.Chunk) error { } } - req.GrowAndReset(e.maxChunkSize) + req.GrowAndReset(e.MaxChunkSize()) if e.cursor >= len(e.rows) { return nil } @@ -105,7 +106,7 @@ func (e *ExplainExec) executeAnalyzeExec(ctx context.Context) (err error) { } } }() - if minHeapInUse, alarmRatio := e.ctx.GetSessionVars().MemoryDebugModeMinHeapInUse, e.ctx.GetSessionVars().MemoryDebugModeAlarmRatio; minHeapInUse != 0 && alarmRatio != 0 { + if minHeapInUse, alarmRatio := e.Ctx().GetSessionVars().MemoryDebugModeMinHeapInUse, e.Ctx().GetSessionVars().MemoryDebugModeAlarmRatio; minHeapInUse != 0 && alarmRatio != 0 { memoryDebugModeCtx, cancel := context.WithCancel(ctx) waitGroup := sync.WaitGroup{} waitGroup.Add(1) @@ -119,7 +120,7 @@ func (e *ExplainExec) executeAnalyzeExec(ctx context.Context) (err error) { minHeapInUse: mathutil.Abs(minHeapInUse), alarmRatio: alarmRatio, autoGC: minHeapInUse > 0, - memTracker: e.ctx.GetSessionVars().MemTracker, + memTracker: e.Ctx().GetSessionVars().MemTracker, wg: &waitGroup, }).run() } @@ -134,7 +135,7 @@ func (e *ExplainExec) executeAnalyzeExec(ctx context.Context) (err error) { } // Register the RU runtime stats to the runtime stats collection after the analyze executor has been executed. if e.analyzeExec != nil && e.executed { - if coll := e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl; coll != nil { + if coll := e.Ctx().GetSessionVars().StmtCtx.RuntimeStatsColl; coll != nil { coll.RegisterStats(e.explain.TargetPlan.ID(), &ruRuntimeStats{e.ruRuntimeStats}) } } @@ -156,7 +157,7 @@ func (e *ExplainExec) generateExplainInfo(ctx context.Context) (rows [][]string, // function and then commit transaction if needed. // Otherwise, in autocommit transaction, the table record change of analyze executor(insert/update/delete...) // will not be committed. -func (e *ExplainExec) getAnalyzeExecToExecutedNoDelay() Executor { +func (e *ExplainExec) getAnalyzeExecToExecutedNoDelay() exec.Executor { if e.analyzeExec != nil && !e.executed && e.analyzeExec.Schema().Len() == 0 { e.executed = true return e.analyzeExec diff --git a/executor/explain_unit_test.go b/executor/explain_unit_test.go index b1aacb6d5cb7f..1b3c50d656ed6 100644 --- a/executor/explain_unit_test.go +++ b/executor/explain_unit_test.go @@ -19,6 +19,7 @@ import ( "errors" "testing" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/sessionctx/variable" @@ -29,11 +30,11 @@ import ( ) var ( - _ Executor = &mockErrorOperator{} + _ exec.Executor = &mockErrorOperator{} ) type mockErrorOperator struct { - baseExecutor + exec.BaseExecutor toPanic bool closed bool } @@ -67,9 +68,9 @@ func TestExplainAnalyzeInvokeNextAndClose(t *testing.T) { ctx.GetSessionVars().InitChunkSize = variable.DefInitChunkSize ctx.GetSessionVars().MaxChunkSize = variable.DefMaxChunkSize schema := expression.NewSchema(getColumns()...) - baseExec := newBaseExecutor(ctx, schema, 0) + baseExec := exec.NewBaseExecutor(ctx, schema, 0) explainExec := &ExplainExec{ - baseExecutor: baseExec, + BaseExecutor: baseExec, explain: nil, } // mockErrorOperator returns errors @@ -82,7 +83,7 @@ func TestExplainAnalyzeInvokeNextAndClose(t *testing.T) { // mockErrorOperator panic explainExec = &ExplainExec{ - baseExecutor: baseExec, + BaseExecutor: baseExec, explain: nil, } mockOpr = mockErrorOperator{baseExec, true, false} diff --git a/executor/foreign_key.go b/executor/foreign_key.go index 658808e714de2..df32b95210f81 100644 --- a/executor/foreign_key.go +++ b/executor/foreign_key.go @@ -22,6 +22,7 @@ import ( "time" "github.com/pingcap/errors" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/model" @@ -701,7 +702,7 @@ func (fkc *FKCascadeExec) onUpdateRow(sc *stmtctx.StatementContext, oldRow, newR return nil } -func (fkc *FKCascadeExec) buildExecutor(ctx context.Context) (Executor, error) { +func (fkc *FKCascadeExec) buildExecutor(ctx context.Context) (exec.Executor, error) { p, err := fkc.buildFKCascadePlan(ctx) if err != nil || p == nil { return nil, err diff --git a/executor/grant.go b/executor/grant.go index 8022542a08edf..6193982594f8a 100644 --- a/executor/grant.go +++ b/executor/grant.go @@ -21,6 +21,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/ast" @@ -45,12 +46,12 @@ import ( * See https://dev.mysql.com/doc/refman/5.7/en/grant.html ************************************************************************************/ var ( - _ Executor = (*GrantExec)(nil) + _ exec.Executor = (*GrantExec)(nil) ) // GrantExec executes GrantStmt. type GrantExec struct { - baseExecutor + exec.BaseExecutor Privs []*ast.PrivElem ObjectType ast.ObjectTypeType @@ -73,7 +74,7 @@ func (e *GrantExec) Next(ctx context.Context, req *chunk.Chunk) error { dbName := e.Level.DBName if len(dbName) == 0 { - dbName = e.ctx.GetSessionVars().CurrentDB + dbName = e.Ctx().GetSessionVars().CurrentDB } // For table & column level, check whether table exists and privilege is valid @@ -91,7 +92,7 @@ func (e *GrantExec) Next(ctx context.Context, req *chunk.Chunk) error { } } dbNameStr := model.NewCIStr(dbName) - schema := e.ctx.GetInfoSchema().(infoschema.InfoSchema) + schema := e.Ctx().GetInfoSchema().(infoschema.InfoSchema) tbl, err := schema.TableByName(dbNameStr, model.NewCIStr(e.Level.TableName)) // Allow GRANT on non-existent table with at least create privilege, see issue #28533 #29268 if err != nil { @@ -123,15 +124,15 @@ func (e *GrantExec) Next(ctx context.Context, req *chunk.Chunk) error { } // Commit the old transaction, like DDL. - if err := sessiontxn.NewTxnInStmt(ctx, e.ctx); err != nil { + if err := sessiontxn.NewTxnInStmt(ctx, e.Ctx()); err != nil { return err } - defer func() { e.ctx.GetSessionVars().SetInTxn(false) }() + defer func() { e.Ctx().GetSessionVars().SetInTxn(false) }() // Create internal session to start internal transaction. isCommit := false - internalSession, err := e.getSysSession() - internalSession.GetSessionVars().User = e.ctx.GetSessionVars().User + internalSession, err := e.GetSysSession() + internalSession.GetSessionVars().User = e.Ctx().GetSessionVars().User if err != nil { return err } @@ -142,7 +143,7 @@ func (e *GrantExec) Next(ctx context.Context, req *chunk.Chunk) error { logutil.BgLogger().Error("rollback error occur at grant privilege", zap.Error(err)) } } - e.releaseSysSession(internalCtx, internalSession) + e.ReleaseSysSession(internalCtx, internalSession) }() _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(internalCtx, "begin") @@ -152,11 +153,11 @@ func (e *GrantExec) Next(ctx context.Context, req *chunk.Chunk) error { // Check which user is not exist. for _, user := range e.Users { - exists, err := userExists(ctx, e.ctx, user.User.Username, user.User.Hostname) + exists, err := userExists(ctx, e.Ctx(), user.User.Username, user.User.Hostname) if err != nil { return err } - if !exists && e.ctx.GetSessionVars().SQLMode.HasNoAutoCreateUserMode() { + if !exists && e.Ctx().GetSessionVars().SQLMode.HasNoAutoCreateUserMode() { return exeerrors.ErrCantCreateUserWithGrant } else if !exists { // This code path only applies if mode NO_AUTO_CREATE_USER is unset. @@ -244,7 +245,7 @@ func (e *GrantExec) Next(ctx context.Context, req *chunk.Chunk) error { return err } isCommit = true - return domain.GetDomain(e.ctx).NotifyUpdatePrivilege() + return domain.GetDomain(e.Ctx()).NotifyUpdatePrivilege() } func containsNonDynamicPriv(privList []*ast.PrivElem) bool { @@ -301,7 +302,7 @@ func checkAndInitTablePriv(ctx sessionctx.Context, dbName, tblName string, is in // checkAndInitColumnPriv checks if column scope privilege entry exists in mysql.Columns_priv. // If unexists, insert a new one. func (e *GrantExec) checkAndInitColumnPriv(user string, host string, cols []*ast.ColumnName, internalSession sessionctx.Context) error { - dbName, tbl, err := getTargetSchemaAndTable(e.ctx, e.Level.DBName, e.Level.TableName, e.is) + dbName, tbl, err := getTargetSchemaAndTable(e.Ctx(), e.Level.DBName, e.Level.TableName, e.is) if err != nil { return err } @@ -477,7 +478,7 @@ func (e *GrantExec) grantDynamicPriv(privName string, user *ast.UserSpec, intern if e.Level.Level != ast.GrantLevelGlobal { // DYNAMIC can only be *.* return exeerrors.ErrIllegalPrivilegeLevel.GenWithStackByArgs(privName) } - if !privilege.GetPrivilegeManager(e.ctx).IsDynamicPrivilege(privName) { + if !privilege.GetPrivilegeManager(e.Ctx()).IsDynamicPrivilege(privName) { // In GRANT context, MySQL returns a syntax error if the privilege has not been registered with the server: // ERROR 1149 (42000): You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use // But in REVOKE context, it returns a warning ErrDynamicPrivilegeNotRegistered. It is not strictly compatible, @@ -518,7 +519,7 @@ func (e *GrantExec) grantDBLevel(priv *ast.PrivElem, user *ast.UserSpec, interna dbName := e.Level.DBName if len(dbName) == 0 { - dbName = e.ctx.GetSessionVars().CurrentDB + dbName = e.Ctx().GetSessionVars().CurrentDB } sql := new(strings.Builder) @@ -538,7 +539,7 @@ func (e *GrantExec) grantDBLevel(priv *ast.PrivElem, user *ast.UserSpec, interna func (e *GrantExec) grantTableLevel(priv *ast.PrivElem, user *ast.UserSpec, internalSession sessionctx.Context) error { dbName := e.Level.DBName if len(dbName) == 0 { - dbName = e.ctx.GetSessionVars().CurrentDB + dbName = e.Ctx().GetSessionVars().CurrentDB } tblName := e.Level.TableName @@ -557,7 +558,7 @@ func (e *GrantExec) grantTableLevel(priv *ast.PrivElem, user *ast.UserSpec, inte // grantColumnLevel manipulates mysql.tables_priv table. func (e *GrantExec) grantColumnLevel(priv *ast.PrivElem, user *ast.UserSpec, internalSession sessionctx.Context) error { - dbName, tbl, err := getTargetSchemaAndTable(e.ctx, e.Level.DBName, e.Level.TableName, e.is) + dbName, tbl, err := getTargetSchemaAndTable(e.Ctx(), e.Level.DBName, e.Level.TableName, e.is) if err != nil { return err } diff --git a/executor/import_into.go b/executor/import_into.go index 92f16fb13f611..71483f8c0ebcb 100644 --- a/executor/import_into.go +++ b/executor/import_into.go @@ -26,6 +26,7 @@ import ( "github.com/pingcap/tidb/disttask/importinto" "github.com/pingcap/tidb/executor/asyncloaddata" "github.com/pingcap/tidb/executor/importer" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/mysql" @@ -54,7 +55,7 @@ const unknownImportedRowCount = -1 // ImportIntoExec represents a IMPORT INTO executor. type ImportIntoExec struct { - baseExecutor + exec.BaseExecutor userSctx sessionctx.Context importPlan *importer.Plan controller *importer.LoadDataController @@ -64,10 +65,10 @@ type ImportIntoExec struct { } var ( - _ Executor = (*ImportIntoExec)(nil) + _ exec.Executor = (*ImportIntoExec)(nil) ) -func newImportIntoExec(b baseExecutor, userSctx sessionctx.Context, plan *plannercore.ImportInto, tbl table.Table) ( +func newImportIntoExec(b exec.BaseExecutor, userSctx sessionctx.Context, plan *plannercore.ImportInto, tbl table.Table) ( *ImportIntoExec, error) { importPlan, err := importer.NewImportPlan(userSctx, plan, tbl) if err != nil { @@ -79,7 +80,7 @@ func newImportIntoExec(b baseExecutor, userSctx sessionctx.Context, plan *planne return nil, err } return &ImportIntoExec{ - baseExecutor: b, + BaseExecutor: b, userSctx: userSctx, importPlan: importPlan, controller: controller, @@ -89,7 +90,7 @@ func newImportIntoExec(b baseExecutor, userSctx sessionctx.Context, plan *planne // Next implements the Executor Next interface. func (e *ImportIntoExec) Next(ctx context.Context, req *chunk.Chunk) (err error) { - req.GrowAndReset(e.maxChunkSize) + req.GrowAndReset(e.MaxChunkSize()) ctx = kv.WithInternalSourceType(ctx, kv.InternalImportInto) if e.dataFilled { // need to return an empty req to indicate all results have been written @@ -176,7 +177,7 @@ func (e *ImportIntoExec) fillJobInfo(ctx context.Context, jobID int64, req *chun if err = globalTaskManager.WithNewSession(func(se sessionctx.Context) error { sqlExec := se.(sqlexec.SQLExecutor) var err2 error - info, err2 = importer.GetJob(ctx, sqlExec, jobID, e.ctx.GetSessionVars().User.String(), false) + info, err2 = importer.GetJob(ctx, sqlExec, jobID, e.Ctx().GetSessionVars().User.String(), false) return err2 }); err != nil { return err @@ -228,13 +229,13 @@ func (e *ImportIntoExec) doImport(ctx context.Context, se sessionctx.Context, di // ImportIntoActionExec represents a import into action executor. type ImportIntoActionExec struct { - baseExecutor + exec.BaseExecutor tp ast.ImportIntoActionTp jobID int64 } var ( - _ Executor = (*ImportIntoActionExec)(nil) + _ exec.Executor = (*ImportIntoActionExec)(nil) ) // Next implements the Executor Next interface. @@ -242,8 +243,8 @@ func (e *ImportIntoActionExec) Next(ctx context.Context, _ *chunk.Chunk) error { ctx = kv.WithInternalSourceType(ctx, kv.InternalImportInto) var hasSuperPriv bool - if pm := privilege.GetPrivilegeManager(e.ctx); pm != nil { - hasSuperPriv = pm.RequestVerification(e.ctx.GetSessionVars().ActiveRoles, "", "", "", mysql.SuperPriv) + if pm := privilege.GetPrivilegeManager(e.Ctx()); pm != nil { + hasSuperPriv = pm.RequestVerification(e.Ctx().GetSessionVars().ActiveRoles, "", "", "", mysql.SuperPriv) } // we use sessionCtx from GetTaskManager, user ctx might not have enough privileges. globalTaskManager, err := fstorage.GetTaskManager() @@ -263,7 +264,7 @@ func (e *ImportIntoActionExec) checkPrivilegeAndStatus(ctx context.Context, mana if err := manager.WithNewSession(func(se sessionctx.Context) error { exec := se.(sqlexec.SQLExecutor) var err2 error - info, err2 = importer.GetJob(ctx, exec, e.jobID, e.ctx.GetSessionVars().User.String(), hasSuperPriv) + info, err2 = importer.GetJob(ctx, exec, e.jobID, e.Ctx().GetSessionVars().User.String(), hasSuperPriv) return err2 }); err != nil { return err diff --git a/executor/index_advise.go b/executor/index_advise.go index c5e71de08ad26..42b0a4209b8e4 100644 --- a/executor/index_advise.go +++ b/executor/index_advise.go @@ -19,6 +19,7 @@ import ( "strings" "github.com/pingcap/errors" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/parser" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/planner/core" @@ -29,7 +30,7 @@ import ( // IndexAdviseExec represents a index advise executor. type IndexAdviseExec struct { - baseExecutor + exec.BaseExecutor IsLocal bool indexAdviseInfo *IndexAdviseInfo @@ -47,11 +48,11 @@ func (e *IndexAdviseExec) Next(ctx context.Context, req *chunk.Chunk) error { return errors.New("Index Advise: don't support advise index for SQL terminated by nil") } - if val := e.ctx.Value(IndexAdviseVarKey); val != nil { - e.ctx.SetValue(IndexAdviseVarKey, nil) + if val := e.Ctx().Value(IndexAdviseVarKey); val != nil { + e.Ctx().SetValue(IndexAdviseVarKey, nil) return errors.New("Index Advise: previous index advise option isn't closed normally") } - e.ctx.SetValue(IndexAdviseVarKey, e.indexAdviseInfo) + e.Ctx().SetValue(IndexAdviseVarKey, e.indexAdviseInfo) return nil } diff --git a/executor/index_lookup_hash_join.go b/executor/index_lookup_hash_join.go index 2224dcb2a1583..e850f472c9c9c 100644 --- a/executor/index_lookup_hash_join.go +++ b/executor/index_lookup_hash_join.go @@ -124,19 +124,19 @@ type indexHashJoinTask struct { // Open implements the IndexNestedLoopHashJoin Executor interface. func (e *IndexNestedLoopHashJoin) Open(ctx context.Context) error { - err := e.children[0].Open(ctx) + err := e.Children(0).Open(ctx) if err != nil { return err } if e.memTracker != nil { e.memTracker.Reset() } else { - e.memTracker = memory.NewTracker(e.id, -1) + e.memTracker = memory.NewTracker(e.ID(), -1) } - e.memTracker.AttachTo(e.ctx.GetSessionVars().StmtCtx.MemTracker) + e.memTracker.AttachTo(e.Ctx().GetSessionVars().StmtCtx.MemTracker) e.cancelFunc = nil e.innerPtrBytes = make([][]byte, 0, 8) - if e.runtimeStats != nil { + if e.RuntimeStats() != nil { e.stats = &indexLookUpJoinRuntimeStats{} } e.finished.Store(false) @@ -144,7 +144,7 @@ func (e *IndexNestedLoopHashJoin) Open(ctx context.Context) error { } func (e *IndexNestedLoopHashJoin) startWorkers(ctx context.Context) { - concurrency := e.ctx.GetSessionVars().IndexLookupJoinConcurrency() + concurrency := e.Ctx().GetSessionVars().IndexLookupJoinConcurrency() if e.stats != nil { e.stats.concurrency = concurrency } @@ -292,7 +292,7 @@ func (e *IndexNestedLoopHashJoin) isDryUpTasks(ctx context.Context) bool { // Close implements the IndexNestedLoopHashJoin Executor interface. func (e *IndexNestedLoopHashJoin) Close() error { if e.stats != nil { - defer e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, e.stats) + defer e.Ctx().GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.ID(), e.stats) } if e.cancelFunc != nil { e.cancelFunc() @@ -311,7 +311,7 @@ func (e *IndexNestedLoopHashJoin) Close() error { e.joinChkResourceCh = nil e.finished.Store(false) e.prepared = false - return e.baseExecutor.Close() + return e.BaseExecutor.Close() } func (ow *indexHashJoinOuterWorker) run(ctx context.Context) { @@ -395,10 +395,10 @@ func (e *IndexNestedLoopHashJoin) newOuterWorker(innerCh chan *indexHashJoinTask ow := &indexHashJoinOuterWorker{ outerWorker: outerWorker{ outerCtx: e.outerCtx, - ctx: e.ctx, - executor: e.children[0], + ctx: e.Ctx(), + executor: e.Children(0), batchSize: 32, - maxBatchSize: e.ctx.GetSessionVars().IndexJoinBatchSize, + maxBatchSize: e.Ctx().GetSessionVars().IndexJoinBatchSize, parentMemTracker: e.memTracker, lookup: &e.IndexLookUpJoin, }, @@ -423,8 +423,8 @@ func (e *IndexNestedLoopHashJoin) newInnerWorker(taskCh chan *indexHashJoinTask, innerWorker: innerWorker{ innerCtx: e.innerCtx, outerCtx: e.outerCtx, - ctx: e.ctx, - executorChk: e.ctx.GetSessionVars().GetNewChunkWithCapacity(e.innerCtx.rowTypes, e.maxChunkSize, e.maxChunkSize, e.AllocPool), + ctx: e.Ctx(), + executorChk: e.Ctx().GetSessionVars().GetNewChunkWithCapacity(e.innerCtx.rowTypes, e.MaxChunkSize(), e.MaxChunkSize(), e.AllocPool), indexRanges: copiedRanges, keyOff2IdxOff: e.keyOff2IdxOff, stats: innerStats, @@ -435,9 +435,9 @@ func (e *IndexNestedLoopHashJoin) newInnerWorker(taskCh chan *indexHashJoinTask, joiner: e.joiners[workerID], joinChkResourceCh: e.joinChkResourceCh[workerID], resultCh: e.resultCh, - matchedOuterPtrs: make([]chunk.RowPtr, 0, e.maxChunkSize), + matchedOuterPtrs: make([]chunk.RowPtr, 0, e.MaxChunkSize()), joinKeyBuf: make([]byte, 1), - outerRowStatus: make([]outerRowStatusFlag, 0, e.maxChunkSize), + outerRowStatus: make([]outerRowStatusFlag, 0, e.MaxChunkSize()), rowIter: chunk.NewIterator4Slice([]chunk.Row{}).(*chunk.Iterator4Slice), } iw.memTracker.AttachTo(e.memTracker) @@ -446,7 +446,7 @@ func (e *IndexNestedLoopHashJoin) newInnerWorker(taskCh chan *indexHashJoinTask, // memory usage of inner worker will be reset the end of iw.handleTask. // While the life cycle of this memory consumption exists throughout the // whole active period of inner worker. - e.ctx.GetSessionVars().StmtCtx.MemTracker.Consume(2 * types.EstimatedMemUsage(copiedRanges[0].LowVal, len(copiedRanges))) + e.Ctx().GetSessionVars().StmtCtx.MemTracker.Consume(2 * types.EstimatedMemUsage(copiedRanges[0].LowVal, len(copiedRanges))) } if e.lastColHelper != nil { // nextCwf.TmpConstant needs to be reset for every individual diff --git a/executor/index_lookup_join.go b/executor/index_lookup_join.go index 187e83cc0f763..0c19fe4ea5bd4 100644 --- a/executor/index_lookup_join.go +++ b/executor/index_lookup_join.go @@ -26,6 +26,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/parser/terror" @@ -45,7 +46,7 @@ import ( "golang.org/x/exp/slices" ) -var _ Executor = &IndexLookUpJoin{} +var _ exec.Executor = &IndexLookUpJoin{} // IndexLookUpJoin employs one outer worker and N innerWorkers to execute concurrently. // It preserves the order of the outer table and support batch lookup. @@ -56,7 +57,7 @@ var _ Executor = &IndexLookUpJoin{} // 3. main thread receives the task, waits for inner worker finish handling the task. // 4. main thread join each outer row by look up the inner rows hash map in the task. type IndexLookUpJoin struct { - baseExecutor + exec.BaseExecutor resultCh <-chan *lookUpJoinTask cancelFunc context.CancelFunc @@ -132,7 +133,7 @@ type outerWorker struct { lookup *IndexLookUpJoin ctx sessionctx.Context - executor Executor + executor exec.Executor maxBatchSize int batchSize int @@ -161,15 +162,15 @@ type innerWorker struct { // Open implements the Executor interface. func (e *IndexLookUpJoin) Open(ctx context.Context) error { - err := e.children[0].Open(ctx) + err := e.Children(0).Open(ctx) if err != nil { return err } - e.memTracker = memory.NewTracker(e.id, -1) - e.memTracker.AttachTo(e.ctx.GetSessionVars().StmtCtx.MemTracker) + e.memTracker = memory.NewTracker(e.ID(), -1) + e.memTracker.AttachTo(e.Ctx().GetSessionVars().StmtCtx.MemTracker) e.innerPtrBytes = make([][]byte, 0, 8) e.finished.Store(false) - if e.runtimeStats != nil { + if e.RuntimeStats() != nil { e.stats = &indexLookUpJoinRuntimeStats{} } e.cancelFunc = nil @@ -177,7 +178,7 @@ func (e *IndexLookUpJoin) Open(ctx context.Context) error { } func (e *IndexLookUpJoin) startWorkers(ctx context.Context) { - concurrency := e.ctx.GetSessionVars().IndexLookupJoinConcurrency() + concurrency := e.Ctx().GetSessionVars().IndexLookupJoinConcurrency() if e.stats != nil { e.stats.concurrency = concurrency } @@ -197,12 +198,12 @@ func (e *IndexLookUpJoin) startWorkers(ctx context.Context) { func (e *IndexLookUpJoin) newOuterWorker(resultCh, innerCh chan *lookUpJoinTask) *outerWorker { ow := &outerWorker{ outerCtx: e.outerCtx, - ctx: e.ctx, - executor: e.children[0], + ctx: e.Ctx(), + executor: e.Children(0), resultCh: resultCh, innerCh: innerCh, batchSize: 32, - maxBatchSize: e.ctx.GetSessionVars().IndexJoinBatchSize, + maxBatchSize: e.Ctx().GetSessionVars().IndexJoinBatchSize, parentMemTracker: e.memTracker, lookup: e, } @@ -224,8 +225,8 @@ func (e *IndexLookUpJoin) newInnerWorker(taskCh chan *lookUpJoinTask) *innerWork innerCtx: e.innerCtx, outerCtx: e.outerCtx, taskCh: taskCh, - ctx: e.ctx, - executorChk: e.ctx.GetSessionVars().GetNewChunkWithCapacity(e.innerCtx.rowTypes, e.maxChunkSize, e.maxChunkSize, e.AllocPool), + ctx: e.Ctx(), + executorChk: e.Ctx().GetSessionVars().GetNewChunkWithCapacity(e.innerCtx.rowTypes, e.MaxChunkSize(), e.MaxChunkSize(), e.AllocPool), indexRanges: copiedRanges, keyOff2IdxOff: e.keyOff2IdxOff, stats: innerStats, @@ -238,7 +239,7 @@ func (e *IndexLookUpJoin) newInnerWorker(taskCh chan *lookUpJoinTask) *innerWork // memory usage of inner worker will be reset the end of iw.handleTask. // While the life cycle of this memory consumption exists throughout the // whole active period of inner worker. - e.ctx.GetSessionVars().StmtCtx.MemTracker.Consume(2 * types.EstimatedMemUsage(copiedRanges[0].LowVal, len(copiedRanges))) + e.Ctx().GetSessionVars().StmtCtx.MemTracker.Consume(2 * types.EstimatedMemUsage(copiedRanges[0].LowVal, len(copiedRanges))) } if e.lastColHelper != nil { // nextCwf.TmpConstant needs to be reset for every individual @@ -430,7 +431,7 @@ func (ow *outerWorker) buildTask(ctx context.Context) (*lookUpJoinTask, error) { } maxChunkSize := ow.ctx.GetSessionVars().MaxChunkSize for requiredRows > task.outerResult.Len() { - chk := ow.ctx.GetSessionVars().GetNewChunkWithCapacity(ow.outerCtx.rowTypes, maxChunkSize, maxChunkSize, ow.executor.base().AllocPool) + chk := ow.ctx.GetSessionVars().GetNewChunkWithCapacity(ow.outerCtx.rowTypes, maxChunkSize, maxChunkSize, ow.executor.Base().AllocPool) chk = chk.SetRequiredRows(requiredRows, maxChunkSize) err := Next(ctx, ow.executor, chk) if err != nil { @@ -461,7 +462,7 @@ func (ow *outerWorker) buildTask(ctx context.Context) (*lookUpJoinTask, error) { } task.encodedLookUpKeys = make([]*chunk.Chunk, task.outerResult.NumChunks()) for i := range task.encodedLookUpKeys { - task.encodedLookUpKeys[i] = ow.ctx.GetSessionVars().GetNewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeBlob)}, task.outerResult.GetChunk(i).NumRows(), task.outerResult.GetChunk(i).NumRows(), ow.executor.base().AllocPool) + task.encodedLookUpKeys[i] = ow.ctx.GetSessionVars().GetNewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeBlob)}, task.outerResult.GetChunk(i).NumRows(), task.outerResult.GetChunk(i).NumRows(), ow.executor.Base().AllocPool) } return task, nil } @@ -765,7 +766,7 @@ func (iw *innerWorker) hasNullInJoinKey(row chunk.Row) bool { // Close implements the Executor interface. func (e *IndexLookUpJoin) Close() error { if e.stats != nil { - defer e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, e.stats) + defer e.Ctx().GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.ID(), e.stats) } if e.cancelFunc != nil { e.cancelFunc() @@ -775,7 +776,7 @@ func (e *IndexLookUpJoin) Close() error { e.task = nil e.finished.Store(false) e.prepared = false - return e.baseExecutor.Close() + return e.BaseExecutor.Close() } type indexLookUpJoinRuntimeStats struct { diff --git a/executor/index_lookup_merge_join.go b/executor/index_lookup_merge_join.go index e0fb176fff589..ed90c9b7baa99 100644 --- a/executor/index_lookup_merge_join.go +++ b/executor/index_lookup_merge_join.go @@ -23,6 +23,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/parser/terror" @@ -49,7 +50,7 @@ import ( // 3. main thread receives the task and fetch results from the channel in task one by one. // 4. If channel has been closed, main thread receives the next task. type IndexLookUpMergeJoin struct { - baseExecutor + exec.BaseExecutor resultCh <-chan *lookUpMergeJoinTask cancelFunc context.CancelFunc @@ -120,7 +121,7 @@ type outerMergeWorker struct { lookup *IndexLookUpMergeJoin ctx sessionctx.Context - executor Executor + executor exec.Executor maxBatchSize int batchSize int @@ -140,7 +141,7 @@ type innerMergeWorker struct { joinChkResourceCh chan *chunk.Chunk outerMergeCtx outerMergeCtx ctx sessionctx.Context - innerExec Executor + innerExec exec.Executor joiner joiner retFieldTypes []*types.FieldType @@ -157,23 +158,23 @@ type indexMergeJoinResult struct { // Open implements the Executor interface func (e *IndexLookUpMergeJoin) Open(ctx context.Context) error { - err := e.children[0].Open(ctx) + err := e.Children(0).Open(ctx) if err != nil { return err } - e.memTracker = memory.NewTracker(e.id, -1) - e.memTracker.AttachTo(e.ctx.GetSessionVars().StmtCtx.MemTracker) + e.memTracker = memory.NewTracker(e.ID(), -1) + e.memTracker.AttachTo(e.Ctx().GetSessionVars().StmtCtx.MemTracker) return nil } func (e *IndexLookUpMergeJoin) startWorkers(ctx context.Context) { // TODO: consider another session currency variable for index merge join. // Because its parallelization is not complete. - concurrency := e.ctx.GetSessionVars().IndexLookupJoinConcurrency() - if e.runtimeStats != nil { + concurrency := e.Ctx().GetSessionVars().IndexLookupJoinConcurrency() + if e.RuntimeStats() != nil { runtimeStats := &execdetails.RuntimeStatsWithConcurrencyInfo{} runtimeStats.SetConcurrencyInfo(execdetails.NewConcurrencyInfo("Concurrency", concurrency)) - e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, runtimeStats) + e.Ctx().GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.ID(), runtimeStats) } resultCh := make(chan *lookUpMergeJoinTask, concurrency) @@ -182,7 +183,7 @@ func (e *IndexLookUpMergeJoin) startWorkers(ctx context.Context) { for i := 0; i < concurrency; i++ { e.joinChkResourceCh[i] = make(chan *chunk.Chunk, numResChkHold) for j := 0; j < numResChkHold; j++ { - e.joinChkResourceCh[i] <- chunk.NewChunkWithCapacity(e.retFieldTypes, e.maxChunkSize) + e.joinChkResourceCh[i] <- chunk.NewChunkWithCapacity(e.RetFieldTypes(), e.MaxChunkSize()) } } workerCtx, cancelFunc := context.WithCancel(ctx) @@ -199,13 +200,13 @@ func (e *IndexLookUpMergeJoin) startWorkers(ctx context.Context) { func (e *IndexLookUpMergeJoin) newOuterWorker(resultCh, innerCh chan *lookUpMergeJoinTask) *outerMergeWorker { omw := &outerMergeWorker{ outerMergeCtx: e.outerMergeCtx, - ctx: e.ctx, + ctx: e.Ctx(), lookup: e, - executor: e.children[0], + executor: e.Children(0), resultCh: resultCh, innerCh: innerCh, batchSize: 32, - maxBatchSize: e.ctx.GetSessionVars().IndexJoinBatchSize, + maxBatchSize: e.Ctx().GetSessionVars().IndexJoinBatchSize, parentMemTracker: e.memTracker, nextColCompareFilters: e.lastColHelper, } @@ -225,13 +226,13 @@ func (e *IndexLookUpMergeJoin) newInnerMergeWorker(taskCh chan *lookUpMergeJoinT innerMergeCtx: e.innerMergeCtx, outerMergeCtx: e.outerMergeCtx, taskCh: taskCh, - ctx: e.ctx, + ctx: e.Ctx(), indexRanges: copiedRanges, keyOff2IdxOff: e.keyOff2IdxOff, joiner: e.joiners[workID], joinChkResourceCh: e.joinChkResourceCh[workID], - retFieldTypes: e.retFieldTypes, - maxChunkSize: e.maxChunkSize, + retFieldTypes: e.RetFieldTypes(), + maxChunkSize: e.MaxChunkSize(), } if e.lastColHelper != nil { // nextCwf.TmpConstant needs to be reset for every individual @@ -343,7 +344,7 @@ func (omw *outerMergeWorker) pushToChan(ctx context.Context, task *lookUpMergeJo func (omw *outerMergeWorker) buildTask(ctx context.Context) (*lookUpMergeJoinTask, error) { task := &lookUpMergeJoinTask{ results: make(chan *indexMergeJoinResult, numResChkHold), - outerResult: chunk.NewList(omw.rowTypes, omw.executor.base().initCap, omw.executor.base().maxChunkSize), + outerResult: chunk.NewList(omw.rowTypes, omw.executor.Base().InitCap(), omw.executor.Base().MaxChunkSize()), } task.memTracker = memory.NewTracker(memory.LabelForSimpleTask, -1) task.memTracker.AttachTo(omw.parentMemTracker) @@ -706,7 +707,7 @@ func (imw *innerMergeWorker) dedupDatumLookUpKeys(lookUpContents []*indexJoinLoo // fetchNextInnerResult collects a chunk of inner results from inner child executor. func (imw *innerMergeWorker) fetchNextInnerResult(ctx context.Context, task *lookUpMergeJoinTask) (beginRow chunk.Row, err error) { - task.innerResult = imw.ctx.GetSessionVars().GetNewChunkWithCapacity(retTypes(imw.innerExec), imw.ctx.GetSessionVars().MaxChunkSize, imw.ctx.GetSessionVars().MaxChunkSize, imw.innerExec.base().AllocPool) + task.innerResult = imw.ctx.GetSessionVars().GetNewChunkWithCapacity(retTypes(imw.innerExec), imw.ctx.GetSessionVars().MaxChunkSize, imw.ctx.GetSessionVars().MaxChunkSize, imw.innerExec.Base().AllocPool) err = Next(ctx, imw.innerExec, task.innerResult) task.innerIter = chunk.NewIterator4Chunk(task.innerResult) beginRow = task.innerIter.Begin() @@ -715,8 +716,8 @@ func (imw *innerMergeWorker) fetchNextInnerResult(ctx context.Context, task *loo // Close implements the Executor interface. func (e *IndexLookUpMergeJoin) Close() error { - if e.runtimeStats != nil { - defer e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, e.runtimeStats) + if e.RuntimeStats() != nil { + defer e.Ctx().GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.ID(), e.RuntimeStats()) } if e.cancelFunc != nil { e.cancelFunc() @@ -733,5 +734,5 @@ func (e *IndexLookUpMergeJoin) Close() error { e.workerWg.Wait() e.memTracker = nil e.prepared = false - return e.baseExecutor.Close() + return e.BaseExecutor.Close() } diff --git a/executor/index_merge_reader.go b/executor/index_merge_reader.go index 3ca465a46f062..2a8f14f46533e 100644 --- a/executor/index_merge_reader.go +++ b/executor/index_merge_reader.go @@ -30,6 +30,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/distsql" "github.com/pingcap/tidb/executor/internal/builder" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" @@ -54,7 +55,7 @@ import ( ) var ( - _ Executor = &IndexMergeReaderExecutor{} + _ exec.Executor = &IndexMergeReaderExecutor{} ) const ( @@ -81,7 +82,7 @@ const ( // 2. if not, record it and send it to the indexMergeTableScanWorker. // 3. if accessed, just ignore it. type IndexMergeReaderExecutor struct { - baseExecutor + exec.BaseExecutor table table.Table indexes []*model.IndexInfo @@ -163,7 +164,7 @@ func (e *IndexMergeReaderExecutor) Open(ctx context.Context) (err error) { e.keyRanges = make([][]kv.KeyRange, 0, len(e.partialPlans)) e.initRuntimeStats() if e.isCorColInTableFilter { - e.tableRequest.Executors, err = builder.ConstructListBasedDistExec(e.ctx, e.tblPlans) + e.tableRequest.Executors, err = builder.ConstructListBasedDistExec(e.Ctx(), e.tblPlans) if err != nil { return err } @@ -192,9 +193,9 @@ func (e *IndexMergeReaderExecutor) Open(ctx context.Context) (err error) { if e.memTracker != nil { e.memTracker.Reset() } else { - e.memTracker = memory.NewTracker(e.id, -1) + e.memTracker = memory.NewTracker(e.ID(), -1) } - e.memTracker.AttachTo(e.ctx.GetSessionVars().StmtCtx.MemTracker) + e.memTracker.AttachTo(e.Ctx().GetSessionVars().StmtCtx.MemTracker) return nil } @@ -208,7 +209,7 @@ func (e *IndexMergeReaderExecutor) rebuildRangeForCorCol() (err error) { if e.isCorColInPartialAccess[i] { switch x := plan[0].(type) { case *plannercore.PhysicalIndexScan: - e.ranges[i], err = rebuildIndexRanges(e.ctx, x, x.IdxCols, x.IdxColLens) + e.ranges[i], err = rebuildIndexRanges(e.Ctx(), x, x.IdxCols, x.IdxColLens) case *plannercore.PhysicalTableScan: e.ranges[i], err = x.ResolveCorrelatedColumns() default: @@ -223,7 +224,7 @@ func (e *IndexMergeReaderExecutor) rebuildRangeForCorCol() (err error) { } func (e *IndexMergeReaderExecutor) buildKeyRangesForTable(tbl table.Table) (ranges [][]kv.KeyRange, err error) { - sc := e.ctx.GetSessionVars().StmtCtx + sc := e.Ctx().GetSessionVars().StmtCtx for i, plan := range e.partialPlans { _, ok := plan[0].(*plannercore.PhysicalIndexScan) if !ok { @@ -315,7 +316,7 @@ func (e *IndexMergeReaderExecutor) startPartialIndexWorker(ctx context.Context, // Should use fetchCh instead of resultCh to send error. syncErr(ctx, e.finished, fetchCh, errors.New("testIndexMergeResultChCloseEarly")) }) - if e.runtimeStats != nil { + if e.RuntimeStats() != nil { collExec := true e.dagPBs[workID].CollectExecutionSummaries = &collExec } @@ -342,12 +343,12 @@ func (e *IndexMergeReaderExecutor) startPartialIndexWorker(ctx context.Context, worker := &partialIndexWorker{ stats: e.stats, idxID: e.getPartitalPlanID(workID), - sc: e.ctx, + sc: e.Ctx(), dagPB: e.dagPBs[workID], plan: e.partialPlans[workID], - batchSize: e.maxChunkSize, - maxBatchSize: e.ctx.GetSessionVars().IndexLookupSize, - maxChunkSize: e.maxChunkSize, + batchSize: e.MaxChunkSize(), + maxBatchSize: e.Ctx().GetSessionVars().IndexLookupSize, + maxChunkSize: e.MaxChunkSize(), memTracker: e.memTracker, partitionTableMode: e.partitionTableMode, prunedPartitions: e.prunedPartitions, @@ -358,7 +359,7 @@ func (e *IndexMergeReaderExecutor) startPartialIndexWorker(ctx context.Context, if e.isCorColInPartialFilters[workID] { // We got correlated column, so need to refresh Selection operator. var err error - if e.dagPBs[workID].Executors, err = builder.ConstructListBasedDistExec(e.ctx, e.partialPlans[workID]); err != nil { + if e.dagPBs[workID].Executors, err = builder.ConstructListBasedDistExec(e.Ctx(), e.partialPlans[workID]); err != nil { syncErr(ctx, e.finished, fetchCh, err) return } @@ -372,12 +373,12 @@ func (e *IndexMergeReaderExecutor) startPartialIndexWorker(ctx context.Context, SetTxnScope(e.txnScope). SetReadReplicaScope(e.readReplicaScope). SetIsStaleness(e.isStaleness). - SetFromSessionVars(e.ctx.GetSessionVars()). + SetFromSessionVars(e.Ctx().GetSessionVars()). SetMemTracker(e.memTracker). SetPaging(e.paging). - SetFromInfoSchema(e.ctx.GetInfoSchema()). - SetClosestReplicaReadAdjuster(newClosestReadAdjuster(e.ctx, &builder.Request, e.partialNetDataSizes[workID])). - SetConnID(e.ctx.GetSessionVars().ConnectionID) + SetFromInfoSchema(e.Ctx().GetInfoSchema()). + SetClosestReplicaReadAdjuster(newClosestReadAdjuster(e.Ctx(), &builder.Request, e.partialNetDataSizes[workID])). + SetConnID(e.Ctx().GetSessionVars().ConnectionID) tps := worker.getRetTpsForIndexScan(e.handleCols) results := make([]distsql.SelectResult, 0, len(keyRanges)) @@ -409,7 +410,7 @@ func (e *IndexMergeReaderExecutor) startPartialIndexWorker(ctx context.Context, syncErr(ctx, e.finished, fetchCh, err) return } - result, err := distsql.SelectWithRuntimeStats(ctx, e.ctx, kvReq, tps, e.feedbacks[workID], getPhysicalPlanIDs(e.partialPlans[workID]), e.getPartitalPlanID(workID)) + result, err := distsql.SelectWithRuntimeStats(ctx, e.Ctx(), kvReq, tps, e.feedbacks[workID], getPhysicalPlanIDs(e.partialPlans[workID]), e.getPartitalPlanID(workID)) if err != nil { syncErr(ctx, e.finished, fetchCh, err) return @@ -417,7 +418,7 @@ func (e *IndexMergeReaderExecutor) startPartialIndexWorker(ctx context.Context, results = append(results, result) failpoint.Inject("testIndexMergePartialIndexWorkerCoprLeak", nil) } - worker.batchSize = mathutil.Min(e.maxChunkSize, worker.maxBatchSize) + worker.batchSize = mathutil.Min(e.MaxChunkSize(), worker.maxBatchSize) if len(results) > 1 && len(e.byItems) != 0 { // e.Schema() not the output schema for partialIndexReader, and we put byItems related column at first in `buildIndexReq`, so use nil here. ssr := distsql.NewSortedSelectResults(results, nil, e.byItems, e.memTracker) @@ -429,7 +430,7 @@ func (e *IndexMergeReaderExecutor) startPartialIndexWorker(ctx context.Context, e.feedbacks[workID].Invalidate() } cancel() - e.ctx.StoreQueryFeedback(e.feedbacks[workID]) + e.Ctx().StoreQueryFeedback(e.feedbacks[workID]) }, handleWorkerPanic(ctx, e.finished, fetchCh, nil, partialIndexWorkerType), ) @@ -458,7 +459,7 @@ func (e *IndexMergeReaderExecutor) startPartialTableWorker(ctx context.Context, failpoint.Inject("testIndexMergePanicPartialTableWorker", nil) var err error partialTableReader := &TableReaderExecutor{ - baseExecutor: newBaseExecutor(e.ctx, ts.Schema(), e.getPartitalPlanID(workID)), + BaseExecutor: exec.NewBaseExecutor(e.Ctx(), ts.Schema(), e.getPartitalPlanID(workID)), dagPB: e.dagPBs[workID], startTS: e.startTS, txnScope: e.txnScope, @@ -474,10 +475,10 @@ func (e *IndexMergeReaderExecutor) startPartialTableWorker(ctx context.Context, worker := &partialTableWorker{ stats: e.stats, - sc: e.ctx, - batchSize: e.maxChunkSize, - maxBatchSize: e.ctx.GetSessionVars().IndexLookupSize, - maxChunkSize: e.maxChunkSize, + sc: e.Ctx(), + batchSize: e.MaxChunkSize(), + maxBatchSize: e.Ctx().GetSessionVars().IndexLookupSize, + maxChunkSize: e.MaxChunkSize(), tableReader: partialTableReader, memTracker: e.memTracker, partitionTableMode: e.partitionTableMode, @@ -491,13 +492,13 @@ func (e *IndexMergeReaderExecutor) startPartialTableWorker(ctx context.Context, return i.GetPhysicalID() < j.GetPhysicalID() }) partialTableReader.kvRangeBuilder = kvRangeBuilderFromRangeAndPartition{ - sctx: e.ctx, + sctx: e.Ctx(), partitions: worker.prunedPartitions, } } if e.isCorColInPartialFilters[workID] { - if e.dagPBs[workID].Executors, err = builder.ConstructListBasedDistExec(e.ctx, e.partialPlans[workID]); err != nil { + if e.dagPBs[workID].Executors, err = builder.ConstructListBasedDistExec(e.Ctx(), e.partialPlans[workID]); err != nil { syncErr(ctx, e.finished, fetchCh, err) return } @@ -530,7 +531,7 @@ func (e *IndexMergeReaderExecutor) startPartialTableWorker(ctx context.Context, } failpoint.Inject("testIndexMergePartialTableWorkerCoprLeak", nil) tableReaderClosed = false - worker.batchSize = e.maxChunkSize + worker.batchSize = e.MaxChunkSize() if worker.batchSize > worker.maxBatchSize { worker.batchSize = worker.maxBatchSize } @@ -548,7 +549,7 @@ func (e *IndexMergeReaderExecutor) startPartialTableWorker(ctx context.Context, if err = worker.tableReader.Close(); err != nil { logutil.Logger(ctx).Error("close Select result failed:", zap.Error(err)) } - e.ctx.StoreQueryFeedback(e.feedbacks[workID]) + e.Ctx().StoreQueryFeedback(e.feedbacks[workID]) if fetchErr != nil { break } @@ -561,9 +562,9 @@ func (e *IndexMergeReaderExecutor) startPartialTableWorker(ctx context.Context, } func (e *IndexMergeReaderExecutor) initRuntimeStats() { - if e.runtimeStats != nil { + if e.RuntimeStats() != nil { e.stats = &IndexMergeRuntimeStat{ - Concurrency: e.ctx.GetSessionVars().IndexLookupConcurrency(), + Concurrency: e.Ctx().GetSessionVars().IndexLookupConcurrency(), } } } @@ -579,7 +580,7 @@ func (e *IndexMergeReaderExecutor) getTablePlanRootID() int { if len(e.tblPlans) > 0 { return e.tblPlans[len(e.tblPlans)-1].ID() } - return e.id + return e.ID() } type partialTableWorker struct { @@ -588,7 +589,7 @@ type partialTableWorker struct { batchSize int maxBatchSize int maxChunkSize int - tableReader Executor + tableReader exec.Executor memTracker *memory.Tracker partitionTableMode bool prunedPartitions []table.PhysicalTable @@ -621,7 +622,7 @@ func (w *partialTableWorker) needPartitionHandle() (bool, error) { func (w *partialTableWorker) fetchHandles(ctx context.Context, exitCh <-chan struct{}, fetchCh chan<- *indexMergeTableTask, finished <-chan struct{}, handleCols plannercore.HandleCols, parTblIdx int, partialPlanIndex int) (count int64, err error) { - chk := w.sc.GetSessionVars().GetNewChunkWithCapacity(w.getRetTpsForTableScan(), w.maxChunkSize, w.maxChunkSize, w.tableReader.base().AllocPool) + chk := w.sc.GetSessionVars().GetNewChunkWithCapacity(w.getRetTpsForTableScan(), w.maxChunkSize, w.maxChunkSize, w.tableReader.Base().AllocPool) for { start := time.Now() handles, retChunk, err := w.extractTaskHandles(ctx, chk, handleCols) @@ -672,8 +673,8 @@ func (w *partialTableWorker) extractTaskHandles(ctx context.Context, chk *chunk. if err != nil { return handles, nil, err } - if be := w.tableReader.base(); be != nil && be.runtimeStats != nil { - be.runtimeStats.Record(time.Since(start), chk.NumRows()) + if be := w.tableReader.Base(); be != nil && be.RuntimeStats() != nil { + be.RuntimeStats().Record(time.Since(start), chk.NumRows()) } if chk.NumRows() == 0 { failpoint.Inject("testIndexMergeErrorPartialTableWorker", func(v failpoint.Value) { @@ -741,7 +742,7 @@ func (w *partialTableWorker) buildTableTask(handles []kv.Handle, retChk *chunk.C } func (e *IndexMergeReaderExecutor) startIndexMergeTableScanWorker(ctx context.Context, workCh <-chan *indexMergeTableTask) { - lookupConcurrencyLimit := e.ctx.GetSessionVars().IndexLookupConcurrency() + lookupConcurrencyLimit := e.Ctx().GetSessionVars().IndexLookupConcurrency() e.tblWorkerWg.Add(lookupConcurrencyLimit) for i := 0; i < lookupConcurrencyLimit; i++ { worker := &indexMergeTableScanWorker{ @@ -771,9 +772,9 @@ func (e *IndexMergeReaderExecutor) startIndexMergeTableScanWorker(ctx context.Co } } -func (e *IndexMergeReaderExecutor) buildFinalTableReader(ctx context.Context, tbl table.Table, handles []kv.Handle) (_ Executor, err error) { +func (e *IndexMergeReaderExecutor) buildFinalTableReader(ctx context.Context, tbl table.Table, handles []kv.Handle) (_ exec.Executor, err error) { tableReaderExec := &TableReaderExecutor{ - baseExecutor: newBaseExecutor(e.ctx, e.schema, e.getTablePlanRootID()), + BaseExecutor: exec.NewBaseExecutor(e.Ctx(), e.Schema(), e.getTablePlanRootID()), table: tbl, dagPB: e.tableRequest, startTS: e.startTS, @@ -814,10 +815,10 @@ func (e *IndexMergeReaderExecutor) Next(ctx context.Context, req *chunk.Chunk) e return nil } if resultTask.cursor < len(resultTask.rows) { - numToAppend := mathutil.Min(len(resultTask.rows)-resultTask.cursor, e.maxChunkSize-req.NumRows()) + numToAppend := mathutil.Min(len(resultTask.rows)-resultTask.cursor, e.MaxChunkSize()-req.NumRows()) req.AppendRows(resultTask.rows[resultTask.cursor : resultTask.cursor+numToAppend]) resultTask.cursor += numToAppend - if req.NumRows() >= e.maxChunkSize { + if req.NumRows() >= e.MaxChunkSize() { return nil } } @@ -889,7 +890,7 @@ func handleWorkerPanic(ctx context.Context, finished <-chan struct{}, ch chan<- // Close implements Exec Close interface. func (e *IndexMergeReaderExecutor) Close() error { if e.stats != nil { - defer e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, e.stats) + defer e.Ctx().GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.ID(), e.stats) } if e.finished == nil { return nil @@ -998,7 +999,7 @@ func (w *indexMergeProcessWorker) pruneTableWorkerTaskIdxRows(task *indexMergeTa func (w *indexMergeProcessWorker) fetchLoopUnionWithOrderByAndPushedLimit(ctx context.Context, fetchCh <-chan *indexMergeTableTask, workCh chan<- *indexMergeTableTask, resultCh chan<- *indexMergeTableTask, finished <-chan struct{}) { - memTracker := memory.NewTracker(w.indexMerge.id, -1) + memTracker := memory.NewTracker(w.indexMerge.ID(), -1) memTracker.AttachTo(w.indexMerge.memTracker) defer memTracker.Detach() defer close(workCh) @@ -1070,7 +1071,7 @@ func (w *indexMergeProcessWorker) fetchLoopUnionWithOrderByAndPushedLimit(ctx co fhs[i] = taskMap[idx.partialID][idx.taskID].handles[idx.rowID] } - batchSize := w.indexMerge.ctx.GetSessionVars().IndexLookupSize + batchSize := w.indexMerge.Ctx().GetSessionVars().IndexLookupSize tasks := make([]*indexMergeTableTask, 0, len(fhs)/batchSize+1) for len(fhs) > 0 { l := mathutil.Min(len(fhs), batchSize) @@ -1112,7 +1113,7 @@ func (w *indexMergeProcessWorker) fetchLoopUnion(ctx context.Context, fetchCh <- failpoint.Inject("testIndexMergeResultChCloseEarly", func(_ failpoint.Value) { failpoint.Return() }) - memTracker := memory.NewTracker(w.indexMerge.id, -1) + memTracker := memory.NewTracker(w.indexMerge.ID(), -1) memTracker.AttachTo(w.indexMerge.memTracker) defer memTracker.Detach() defer close(workCh) @@ -1342,9 +1343,9 @@ func (w *indexMergeProcessWorker) fetchLoopIntersection(ctx context.Context, fet // One goroutine may handle one or multiple partitions. // Max number of partition number is 8192, we use ExecutorConcurrency to avoid too many goroutines. - maxWorkerCnt := w.indexMerge.ctx.GetSessionVars().IndexMergeIntersectionConcurrency() + maxWorkerCnt := w.indexMerge.Ctx().GetSessionVars().IndexMergeIntersectionConcurrency() maxChannelSize := atomic.LoadInt32(&LookupTableTaskChannelSize) - batchSize := w.indexMerge.ctx.GetSessionVars().IndexLookupSize + batchSize := w.indexMerge.Ctx().GetSessionVars().IndexLookupSize partCnt := 1 if w.indexMerge.partitionTableMode { @@ -1362,7 +1363,7 @@ func (w *indexMergeProcessWorker) fetchLoopIntersection(ctx context.Context, fet wg := util.WaitGroupWrapper{} errCh := make(chan bool, workerCnt) for i := 0; i < workerCnt; i++ { - tracker := memory.NewTracker(w.indexMerge.id, -1) + tracker := memory.NewTracker(w.indexMerge.ID(), -1) tracker.AttachTo(w.indexMerge.memTracker) worker := &intersectionProcessWorker{ workerID: i, diff --git a/executor/infoschema_reader.go b/executor/infoschema_reader.go index 1f6f560c618a2..88f3b51675719 100644 --- a/executor/infoschema_reader.go +++ b/executor/infoschema_reader.go @@ -36,6 +36,7 @@ import ( "github.com/pingcap/tidb/domain/infosync" "github.com/pingcap/tidb/domain/resourcegroup" "github.com/pingcap/tidb/errno" + "github.com/pingcap/tidb/executor/internal/exec" internalutil "github.com/pingcap/tidb/executor/internal/util" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/infoschema" @@ -1208,7 +1209,7 @@ func (e *memtableRetriever) dataForTiKVStoreStatus(ctx sessionctx.Context) (err // DDLJobsReaderExec executes DDLJobs information retrieving. type DDLJobsReaderExec struct { - baseExecutor + exec.BaseExecutor DDLJobRetriever cacheJobs []*model.Job @@ -1218,12 +1219,12 @@ type DDLJobsReaderExec struct { // Open implements the Executor Next interface. func (e *DDLJobsReaderExec) Open(ctx context.Context) error { - if err := e.baseExecutor.Open(ctx); err != nil { + if err := e.BaseExecutor.Open(ctx); err != nil { return err } e.DDLJobRetriever.is = e.is - e.activeRoles = e.ctx.GetSessionVars().ActiveRoles - sess, err := e.getSysSession() + e.activeRoles = e.Ctx().GetSessionVars().ActiveRoles + sess, err := e.GetSysSession() if err != nil { return err } @@ -1246,8 +1247,8 @@ func (e *DDLJobsReaderExec) Open(ctx context.Context) error { // Next implements the Executor Next interface. func (e *DDLJobsReaderExec) Next(ctx context.Context, req *chunk.Chunk) error { - req.GrowAndReset(e.maxChunkSize) - checker := privilege.GetPrivilegeManager(e.ctx) + req.GrowAndReset(e.MaxChunkSize()) + checker := privilege.GetPrivilegeManager(e.Ctx()) count := 0 // Append running DDL jobs. @@ -1289,8 +1290,8 @@ func (e *DDLJobsReaderExec) Next(ctx context.Context, req *chunk.Chunk) error { // Close implements the Executor Close interface. func (e *DDLJobsReaderExec) Close() error { - e.releaseSysSession(kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL), e.sess) - return e.baseExecutor.Close() + e.ReleaseSysSession(kv.WithInternalSourceType(context.Background(), kv.InternalTxnDDL), e.sess) + return e.BaseExecutor.Close() } func (e *memtableRetriever) setDataFromEngines() { diff --git a/executor/insert.go b/executor/insert.go index a13783f91f4a7..302b2bdd5ac67 100644 --- a/executor/insert.go +++ b/executor/insert.go @@ -62,11 +62,11 @@ func (e *InsertExec) exec(ctx context.Context, rows [][]types.Datum) error { return tblName })) // If tidb_batch_insert is ON and not in a transaction, we could use BatchInsert mode. - sessVars := e.ctx.GetSessionVars() + sessVars := e.Ctx().GetSessionVars() defer sessVars.CleanBuffers() ignoreErr := sessVars.StmtCtx.DupKeyAsWarning - txn, err := e.ctx.Txn(true) + txn, err := e.Ctx().Txn(true) if err != nil { return err } @@ -187,20 +187,20 @@ func (e *InsertValues) prefetchDataCache(ctx context.Context, txn kv.Transaction // updateDupRow updates a duplicate row to a new row. func (e *InsertExec) updateDupRow(ctx context.Context, idxInBatch int, txn kv.Transaction, row toBeCheckedRow, handle kv.Handle, onDuplicate []*expression.Assignment) error { - oldRow, err := getOldRow(ctx, e.ctx, txn, row.t, handle, e.GenExprs) + oldRow, err := getOldRow(ctx, e.Ctx(), txn, row.t, handle, e.GenExprs) if err != nil { return err } // get the extra columns from the SELECT clause. var extraCols []types.Datum - if len(e.ctx.GetSessionVars().CurrInsertBatchExtraCols) > 0 { - extraCols = e.ctx.GetSessionVars().CurrInsertBatchExtraCols[idxInBatch] + if len(e.Ctx().GetSessionVars().CurrInsertBatchExtraCols) > 0 { + extraCols = e.Ctx().GetSessionVars().CurrInsertBatchExtraCols[idxInBatch] } err = e.doDupRowUpdate(ctx, handle, oldRow, row.row, extraCols, e.OnDuplicate, idxInBatch) - if e.ctx.GetSessionVars().StmtCtx.DupKeyAsWarning && (kv.ErrKeyExists.Equal(err) || + if e.Ctx().GetSessionVars().StmtCtx.DupKeyAsWarning && (kv.ErrKeyExists.Equal(err) || table.ErrCheckConstraintViolated.Equal(err)) { - e.ctx.GetSessionVars().StmtCtx.AppendWarning(err) + e.Ctx().GetSessionVars().StmtCtx.AppendWarning(err) return nil } return err @@ -210,12 +210,12 @@ func (e *InsertExec) updateDupRow(ctx context.Context, idxInBatch int, txn kv.Tr func (e *InsertExec) batchUpdateDupRows(ctx context.Context, newRows [][]types.Datum) error { // Get keys need to be checked. start := time.Now() - toBeCheckedRows, err := getKeysNeedCheck(ctx, e.ctx, e.Table, newRows) + toBeCheckedRows, err := getKeysNeedCheck(ctx, e.Ctx(), e.Table, newRows) if err != nil { return err } - txn, err := e.ctx.Txn(true) + txn, err := e.Ctx().Txn(true) if err != nil { return err } @@ -301,16 +301,16 @@ func (e *InsertExec) Next(ctx context.Context, req *chunk.Chunk) error { ctx = context.WithValue(ctx, autoid.AllocatorRuntimeStatsCtxKey, e.stats.AllocatorRuntimeStats) } - if len(e.children) > 0 && e.children[0] != nil { + if !e.EmptyChildren() && e.Children(0) != nil { return insertRowsFromSelect(ctx, e) } err := insertRows(ctx, e) if err != nil { terr, ok := errors.Cause(err).(*terror.Error) if ok && len(e.OnDuplicate) == 0 && - e.ctx.GetSessionVars().StmtCtx.ErrAutoincReadFailedAsWarning && + e.Ctx().GetSessionVars().StmtCtx.ErrAutoincReadFailedAsWarning && terr.Code() == errno.ErrAutoincReadFailed { - e.ctx.GetSessionVars().StmtCtx.AppendWarning(err) + e.Ctx().GetSessionVars().StmtCtx.AppendWarning(err) return nil } return err @@ -320,8 +320,8 @@ func (e *InsertExec) Next(ctx context.Context, req *chunk.Chunk) error { // Close implements the Executor Close interface. func (e *InsertExec) Close() error { - if e.runtimeStats != nil && e.stats != nil { - defer e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, e.stats) + if e.RuntimeStats() != nil && e.stats != nil { + defer e.Ctx().GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.ID(), e.stats) } defer e.memTracker.ReplaceBytesUsed(0) e.setMessage() @@ -333,8 +333,8 @@ func (e *InsertExec) Close() error { // Open implements the Executor Open interface. func (e *InsertExec) Open(ctx context.Context) error { - e.memTracker = memory.NewTracker(e.id, -1) - e.memTracker.AttachTo(e.ctx.GetSessionVars().StmtCtx.MemTracker) + e.memTracker = memory.NewTracker(e.ID(), -1) + e.memTracker.AttachTo(e.Ctx().GetSessionVars().StmtCtx.MemTracker) if e.OnDuplicate != nil { e.initEvalBuffer4Dup() @@ -366,7 +366,7 @@ func (e *InsertExec) initEvalBuffer4Dup() { evalBufferTypes = append(evalBufferTypes, &(col.FieldType)) } if extraLen > 0 { - evalBufferTypes = append(evalBufferTypes, e.SelectExec.base().retFieldTypes[e.rowLen:]...) + evalBufferTypes = append(evalBufferTypes, e.SelectExec.Base().RetFieldTypes()[e.rowLen:]...) } for _, col := range e.Table.Cols() { evalBufferTypes = append(evalBufferTypes, &(col.FieldType)) @@ -385,7 +385,7 @@ func (e *InsertExec) doDupRowUpdate(ctx context.Context, handle kv.Handle, oldRo assignFlag := make([]bool, len(e.Table.WritableCols())) // See http://dev.mysql.com/doc/refman/5.7/en/miscellaneous-functions.html#function_values e.curInsertVals.SetDatums(newRow...) - e.ctx.GetSessionVars().CurrInsertValues = e.curInsertVals.ToRow() + e.Ctx().GetSessionVars().CurrInsertValues = e.curInsertVals.ToRow() // NOTE: In order to execute the expression inside the column assignment, // we have to put the value of "oldRow" and "extraCols" before "newRow" in // "row4Update" to be consistent with "Schema4OnDuplicate" in the "Insert" @@ -397,7 +397,7 @@ func (e *InsertExec) doDupRowUpdate(ctx context.Context, handle kv.Handle, oldRo // Update old row when the key is duplicated. e.evalBuffer4Dup.SetDatums(e.row4Update...) - sc := e.ctx.GetSessionVars().StmtCtx + sc := e.Ctx().GetSessionVars().StmtCtx warnCnt := int(sc.WarningCount()) for _, col := range cols { if col.LazyErr != nil { @@ -409,7 +409,7 @@ func (e *InsertExec) doDupRowUpdate(ctx context.Context, handle kv.Handle, oldRo } c := col.Col.ToInfo() c.Name = col.ColName - e.row4Update[col.Col.Index], err1 = table.CastValue(e.ctx, val, c, false, false) + e.row4Update[col.Col.Index], err1 = table.CastValue(e.Ctx(), val, c, false, false) if err1 != nil { return err1 } @@ -426,7 +426,7 @@ func (e *InsertExec) doDupRowUpdate(ctx context.Context, handle kv.Handle, oldRo } newData := e.row4Update[:len(oldRow)] - _, err := updateRecord(ctx, e.ctx, handle, oldRow, newData, assignFlag, e.Table, true, e.memTracker, e.fkChecks, e.fkCascades) + _, err := updateRecord(ctx, e.Ctx(), handle, oldRow, newData, assignFlag, e.Table, true, e.memTracker, e.fkChecks, e.fkCascades) if err != nil { return err } @@ -435,7 +435,7 @@ func (e *InsertExec) doDupRowUpdate(ctx context.Context, handle kv.Handle, oldRo // setMessage sets info message(ERR_INSERT_INFO) generated by INSERT statement func (e *InsertExec) setMessage() { - stmtCtx := e.ctx.GetSessionVars().StmtCtx + stmtCtx := e.Ctx().GetSessionVars().StmtCtx numRecords := stmtCtx.RecordRows() if e.SelectExec != nil || numRecords > 1 { numWarnings := stmtCtx.WarningCount() @@ -444,7 +444,7 @@ func (e *InsertExec) setMessage() { // if ignoreErr numDuplicates = numRecords - stmtCtx.CopiedRows() } else { - if e.ctx.GetSessionVars().ClientCapability&mysql.ClientFoundRows > 0 { + if e.Ctx().GetSessionVars().ClientCapability&mysql.ClientFoundRows > 0 { numDuplicates = stmtCtx.TouchedRows() } else { numDuplicates = stmtCtx.UpdatedRows() diff --git a/executor/insert_common.go b/executor/insert_common.go index 0881c09c9e13e..5ff765019784b 100644 --- a/executor/insert_common.go +++ b/executor/insert_common.go @@ -23,6 +23,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/ddl" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" @@ -53,14 +54,14 @@ import ( // InsertValues is the data to insert. // nolint:structcheck type InsertValues struct { - baseExecutor + exec.BaseExecutor rowCount uint64 curBatchCnt uint64 maxRowsInBatch uint64 lastInsertID uint64 - SelectExec Executor + SelectExec exec.Executor Table table.Table Columns []*ast.ColumnName @@ -152,7 +153,7 @@ func (e *InsertValues) initInsertColumns() error { e.insertColumns = append(e.insertColumns, col) } if col.Name.L == model.ExtraHandleName.L { - if !e.ctx.GetSessionVars().AllowWriteRowID { + if !e.Ctx().GetSessionVars().AllowWriteRowID { return errors.Errorf("insert, update and replace statements for _tidb_rowid are not supported") } e.hasExtraHandle = true @@ -200,7 +201,7 @@ func (e *InsertValues) lazilyInitColDefaultValBuf() (ok bool) { // insertRows processes `insert|replace into values ()` or `insert|replace into set x=y` func insertRows(ctx context.Context, base insertCommon) (err error) { e := base.insertCommon() - sessVars := e.ctx.GetSessionVars() + sessVars := e.Ctx().GetSessionVars() batchSize := sessVars.DMLBatchSize batchInsert := sessVars.BatchInsert && !sessVars.InTxn() && variable.EnableBatchDML.Load() && batchSize > 0 @@ -315,13 +316,13 @@ func (e *InsertValues) handleErr(col *table.Column, val *types.Datum, rowIdx int if col != nil { c = col.ColumnInfo } - if e.ctx.GetSessionVars().StmtCtx.InLoadDataStmt { + if e.Ctx().GetSessionVars().StmtCtx.InLoadDataStmt { err = completeLoadErr(c, rowIdx, err) } else { err = completeInsertErr(c, val, rowIdx, err) } - if !e.ctx.GetSessionVars().StmtCtx.DupKeyAsWarning { + if !e.Ctx().GetSessionVars().StmtCtx.DupKeyAsWarning { return err } // TODO: should not filter all types of errors here. @@ -347,14 +348,14 @@ func (e *InsertValues) evalRow(ctx context.Context, list []expression.Expression } e.evalBuffer.SetDatums(row...) - sc := e.ctx.GetSessionVars().StmtCtx + sc := e.Ctx().GetSessionVars().StmtCtx warnCnt := int(sc.WarningCount()) for i, expr := range list { val, err := expr.Eval(e.evalBuffer.ToRow()) if err != nil { return nil, err } - val1, err := table.CastValue(e.ctx, val, e.insertColumns[i].ToInfo(), false, false) + val1, err := table.CastValue(e.Ctx(), val, e.insertColumns[i].ToInfo(), false, false) if err = e.handleErr(e.insertColumns[i], &val, rowIdx, err); err != nil { return nil, err } @@ -386,7 +387,7 @@ func (e *InsertValues) fastEvalRow(ctx context.Context, list []expression.Expres } row := make([]types.Datum, rowLen) hasValue := make([]bool, rowLen) - sc := e.ctx.GetSessionVars().StmtCtx + sc := e.Ctx().GetSessionVars().StmtCtx warnCnt := int(sc.WarningCount()) for i, expr := range list { con := expr.(*expression.Constant) @@ -394,7 +395,7 @@ func (e *InsertValues) fastEvalRow(ctx context.Context, list []expression.Expres if err = e.handleErr(e.insertColumns[i], &val, rowIdx, err); err != nil { return nil, err } - val1, err := table.CastValue(e.ctx, val, e.insertColumns[i].ToInfo(), false, false) + val1, err := table.CastValue(e.Ctx(), val, e.insertColumns[i].ToInfo(), false, false) if err = e.handleErr(e.insertColumns[i], &val, rowIdx, err); err != nil { return nil, err } @@ -444,13 +445,13 @@ func (e *InsertValues) setValueForRefColumn(row []types.Datum, hasValue []bool) func insertRowsFromSelect(ctx context.Context, base insertCommon) error { // process `insert|replace into ... select ... from ...` e := base.insertCommon() - selectExec := e.children[0] + selectExec := e.Children(0) fields := retTypes(selectExec) chk := tryNewCacheChunk(selectExec) iter := chunk.NewIterator4Chunk(chk) rows := make([][]types.Datum, 0, chk.Capacity()) - sessVars := e.ctx.GetSessionVars() + sessVars := e.Ctx().GetSessionVars() batchSize := sessVars.DMLBatchSize batchInsert := sessVars.BatchInsert && !sessVars.InTxn() && variable.EnableBatchDML.Load() && batchSize > 0 memUsageOfRows := int64(0) @@ -459,7 +460,7 @@ func insertRowsFromSelect(ctx context.Context, base insertCommon) error { extraColsInSel := make([][]types.Datum, 0, chk.Capacity()) // In order to ensure the correctness of the `transaction write throughput` SLI statistics, // just ignore the transaction which contain `insert|replace into ... select ... from ...` statement. - e.ctx.GetTxnWriteThroughputSLI().SetInvalid() + e.Ctx().GetTxnWriteThroughputSLI().SetInvalid() for { err := Next(ctx, selectExec, chk) if err != nil { @@ -483,7 +484,7 @@ func insertRowsFromSelect(ctx context.Context, base insertCommon) error { memUsageOfRows = types.EstimatedMemUsage(rows[0], len(rows)) memUsageOfExtraCols = types.EstimatedMemUsage(extraColsInSel[0], len(extraColsInSel)) memTracker.Consume(memUsageOfRows + memUsageOfExtraCols) - e.ctx.GetSessionVars().CurrInsertBatchExtraCols = extraColsInSel + e.Ctx().GetSessionVars().CurrInsertBatchExtraCols = extraColsInSel if err = base.exec(ctx, rows); err != nil { return err } @@ -502,7 +503,7 @@ func insertRowsFromSelect(ctx context.Context, base insertCommon) error { memUsageOfRows = types.EstimatedMemUsage(rows[0], len(rows)) memUsageOfExtraCols = types.EstimatedMemUsage(extraColsInSel[0], len(extraColsInSel)) memTracker.Consume(memUsageOfRows + memUsageOfExtraCols) - e.ctx.GetSessionVars().CurrInsertBatchExtraCols = extraColsInSel + e.Ctx().GetSessionVars().CurrInsertBatchExtraCols = extraColsInSel } err = base.exec(ctx, rows) if err != nil { @@ -518,13 +519,13 @@ func insertRowsFromSelect(ctx context.Context, base insertCommon) error { } func (e *InsertValues) doBatchInsert(ctx context.Context) error { - txn, err := e.ctx.Txn(false) + txn, err := e.Ctx().Txn(false) if err != nil { return exeerrors.ErrBatchInsertFail.GenWithStack("BatchInsert failed with error: %v", err) } e.memTracker.Consume(-int64(txn.Size())) - e.ctx.StmtCommit(ctx) - if err := sessiontxn.NewTxnInStmt(ctx, e.ctx); err != nil { + e.Ctx().StmtCommit(ctx) + if err := sessiontxn.NewTxnInStmt(ctx, e.Ctx()); err != nil { // We should return a special error for batch insert. return exeerrors.ErrBatchInsertFail.GenWithStack("BatchInsert failed with error: %v", err) } @@ -537,14 +538,14 @@ func (e *InsertValues) doBatchInsert(ctx context.Context) error { func (e *InsertValues) getRow(ctx context.Context, vals []types.Datum) ([]types.Datum, error) { row := make([]types.Datum, len(e.Table.Cols())) hasValue := make([]bool, len(e.Table.Cols())) - sc := e.ctx.GetSessionVars().StmtCtx + sc := e.Ctx().GetSessionVars().StmtCtx warnCnt := int(sc.WarningCount()) - inLoadData := e.ctx.GetSessionVars().StmtCtx.InLoadDataStmt + inLoadData := e.Ctx().GetSessionVars().StmtCtx.InLoadDataStmt for i := 0; i < e.rowLen; i++ { col := e.insertColumns[i].ToInfo() - casted, err := table.CastValue(e.ctx, vals[i], col, false, false) + casted, err := table.CastValue(e.Ctx(), vals[i], col, false, false) if newErr := e.handleErr(e.insertColumns[i], &vals[i], int(e.rowCount), err); newErr != nil { if inLoadData { return nil, newErr @@ -578,9 +579,9 @@ func (e *InsertValues) getColDefaultValue(idx int, col *table.Column) (d types.D var defaultVal types.Datum if col.DefaultIsExpr && col.DefaultExpr != nil { - defaultVal, err = table.EvalColDefaultExpr(e.ctx, col.ToInfo(), col.DefaultExpr) + defaultVal, err = table.EvalColDefaultExpr(e.Ctx(), col.ToInfo(), col.DefaultExpr) } else { - defaultVal, err = table.GetColDefaultValue(e.ctx, col.ToInfo()) + defaultVal, err = table.GetColDefaultValue(e.Ctx(), col.ToInfo()) } if err != nil { return types.Datum{}, err @@ -603,7 +604,7 @@ func (e *InsertValues) fillColValue( ) (types.Datum, error) { if mysql.HasAutoIncrementFlag(column.GetFlag()) { if !hasValue && mysql.HasNoDefaultValueFlag(column.ToInfo().GetFlag()) { - vars := e.ctx.GetSessionVars() + vars := e.Ctx().GetSessionVars() sc := vars.StmtCtx if vars.StrictSQLMode { return datum, table.ErrNoDefaultValue.FastGenByArgs(column.ToInfo().Name) @@ -668,7 +669,7 @@ func (e *InsertValues) fillRow(ctx context.Context, row []types.Datum, hasValue tCols = append(tCols, col) } rowCntInLoadData := uint64(0) - if e.ctx.GetSessionVars().StmtCtx.InLoadDataStmt { + if e.Ctx().GetSessionVars().StmtCtx.InLoadDataStmt { rowCntInLoadData = e.rowCount } @@ -683,7 +684,7 @@ func (e *InsertValues) fillRow(ctx context.Context, row []types.Datum, hasValue return nil, err } if !e.lazyFillAutoID || (e.lazyFillAutoID && !mysql.HasAutoIncrementFlag(c.GetFlag())) { - if err = c.HandleBadNull(&row[i], e.ctx.GetSessionVars().StmtCtx, rowCntInLoadData); err != nil { + if err = c.HandleBadNull(&row[i], e.Ctx().GetSessionVars().StmtCtx, rowCntInLoadData); err != nil { return nil, err } } @@ -692,7 +693,7 @@ func (e *InsertValues) fillRow(ctx context.Context, row []types.Datum, hasValue tbl := e.Table.Meta() // Handle exchange partition if tbl.ExchangePartitionInfo != nil && tbl.ExchangePartitionInfo.ExchangePartitionFlag { - is := e.ctx.GetDomainInfoSchema().(infoschema.InfoSchema) + is := e.Ctx().GetDomainInfoSchema().(infoschema.InfoSchema) pt, tableFound := is.TableByID(tbl.ExchangePartitionInfo.ExchangePartitionID) if !tableFound { return nil, errors.Errorf("exchange partition process table by id failed") @@ -702,7 +703,7 @@ func (e *InsertValues) fillRow(ctx context.Context, row []types.Datum, hasValue return nil, errors.Errorf("exchange partition process assert table partition failed") } err := p.CheckForExchangePartition( - e.ctx, + e.Ctx(), pt.Meta().Partition, row, tbl.ExchangePartitionInfo.ExchangePartitionDefID, @@ -711,7 +712,7 @@ func (e *InsertValues) fillRow(ctx context.Context, row []types.Datum, hasValue return nil, err } } - sc := e.ctx.GetSessionVars().StmtCtx + sc := e.Ctx().GetSessionVars().StmtCtx warnCnt := int(sc.WarningCount()) for i, gCol := range gCols { colIdx := gCol.ColumnInfo.Offset @@ -719,10 +720,10 @@ func (e *InsertValues) fillRow(ctx context.Context, row []types.Datum, hasValue if err != nil && gCol.FieldType.IsArray() { return nil, completeError(tbl, gCol.Offset, rowIdx, err) } - if e.ctx.GetSessionVars().StmtCtx.HandleTruncate(err) != nil { + if e.Ctx().GetSessionVars().StmtCtx.HandleTruncate(err) != nil { return nil, err } - row[colIdx], err = table.CastValue(e.ctx, val, gCol.ToInfo(), false, false) + row[colIdx], err = table.CastValue(e.Ctx(), val, gCol.ToInfo(), false, false) if err = e.handleErr(gCol, &val, rowIdx, err); err != nil { return nil, err } @@ -734,7 +735,7 @@ func (e *InsertValues) fillRow(ctx context.Context, row []types.Datum, hasValue warnCnt += len(newWarnings) } // Handle the bad null error. - if err = gCol.HandleBadNull(&row[colIdx], e.ctx.GetSessionVars().StmtCtx, rowCntInLoadData); err != nil { + if err = gCol.HandleBadNull(&row[colIdx], e.Ctx().GetSessionVars().StmtCtx, rowCntInLoadData); err != nil { return nil, err } } @@ -766,7 +767,7 @@ func completeError(tbl *model.TableInfo, offset int, rowIdx int, err error) erro // isAutoNull can help judge whether a datum is AutoIncrement Null quickly. // This used to help lazyFillAutoIncrement to find consecutive N datum backwards for batch autoID alloc. -func (e *InsertValues) isAutoNull(ctx context.Context, d types.Datum, col *table.Column) bool { +func (e *InsertValues) isAutoNull(_ context.Context, d types.Datum, col *table.Column) bool { var err error var recordID int64 if !d.IsNull() { @@ -781,7 +782,7 @@ func (e *InsertValues) isAutoNull(ctx context.Context, d types.Datum, col *table } // Change NULL to auto id. // Change value 0 to auto id, if NoAutoValueOnZero SQL mode is not set. - if d.IsNull() || e.ctx.GetSessionVars().SQLMode&mysql.ModeNoAutoValueOnZero == 0 { + if d.IsNull() || e.Ctx().GetSessionVars().SQLMode&mysql.ModeNoAutoValueOnZero == 0 { return true } return false @@ -830,7 +831,7 @@ func (e *InsertValues) lazyAdjustAutoIncrementDatum(ctx context.Context, rows [] if !found { return rows, nil } - retryInfo := e.ctx.GetSessionVars().RetryInfo + retryInfo := e.Ctx().GetSessionVars().RetryInfo rowCount := len(rows) for processedIdx := 0; processedIdx < rowCount; processedIdx++ { autoDatum := rows[processedIdx][idx] @@ -845,26 +846,26 @@ func (e *InsertValues) lazyAdjustAutoIncrementDatum(ctx context.Context, rows [] } // Use the value if it's not null and not 0. if recordID != 0 { - alloc := e.Table.Allocators(e.ctx).Get(autoid.AutoIncrementType) + alloc := e.Table.Allocators(e.Ctx()).Get(autoid.AutoIncrementType) err = alloc.Rebase(ctx, recordID, true) if err != nil { return nil, err } - e.ctx.GetSessionVars().StmtCtx.InsertID = uint64(recordID) + e.Ctx().GetSessionVars().StmtCtx.InsertID = uint64(recordID) retryInfo.AddAutoIncrementID(recordID) continue } // Change NULL to auto id. // Change value 0 to auto id, if NoAutoValueOnZero SQL mode is not set. - if autoDatum.IsNull() || e.ctx.GetSessionVars().SQLMode&mysql.ModeNoAutoValueOnZero == 0 { + if autoDatum.IsNull() || e.Ctx().GetSessionVars().SQLMode&mysql.ModeNoAutoValueOnZero == 0 { // Consume the auto IDs in RetryInfo first. for retryInfo.Retrying && processedIdx < rowCount { nextID, ok := retryInfo.GetCurrAutoIncrementID() if !ok { break } - err = setDatumAutoIDAndCast(e.ctx, &rows[processedIdx][idx], nextID, col) + err = setDatumAutoIDAndCast(e.Ctx(), &rows[processedIdx][idx], nextID, col) if err != nil { return nil, err } @@ -882,7 +883,7 @@ func (e *InsertValues) lazyAdjustAutoIncrementDatum(ctx context.Context, rows [] } // AllocBatchAutoIncrementValue allocates batch N consecutive autoIDs. // The max value can be derived from adding the increment value to min for cnt-1 times. - min, increment, err := table.AllocBatchAutoIncrementValue(ctx, e.Table, e.ctx, cnt) + min, increment, err := table.AllocBatchAutoIncrementValue(ctx, e.Table, e.Ctx(), cnt) if e.handleErr(col, &autoDatum, cnt, err) != nil { return nil, err } @@ -895,7 +896,7 @@ func (e *InsertValues) lazyAdjustAutoIncrementDatum(ctx context.Context, rows [] for j := 0; j < cnt; j++ { offset := j + start id := int64(uint64(min) + uint64(j)*uint64(increment)) - err = setDatumAutoIDAndCast(e.ctx, &rows[offset][idx], id, col) + err = setDatumAutoIDAndCast(e.Ctx(), &rows[offset][idx], id, col) if err != nil { return nil, err } @@ -904,7 +905,7 @@ func (e *InsertValues) lazyAdjustAutoIncrementDatum(ctx context.Context, rows [] continue } - err = setDatumAutoIDAndCast(e.ctx, &rows[processedIdx][idx], recordID, col) + err = setDatumAutoIDAndCast(e.Ctx(), &rows[processedIdx][idx], recordID, col) if err != nil { return nil, err } @@ -916,11 +917,11 @@ func (e *InsertValues) lazyAdjustAutoIncrementDatum(ctx context.Context, rows [] func (e *InsertValues) adjustAutoIncrementDatum( ctx context.Context, d types.Datum, hasValue bool, c *table.Column, ) (types.Datum, error) { - retryInfo := e.ctx.GetSessionVars().RetryInfo + retryInfo := e.Ctx().GetSessionVars().RetryInfo if retryInfo.Retrying { id, ok := retryInfo.GetCurrAutoIncrementID() if ok { - err := setDatumAutoIDAndCast(e.ctx, &d, id, c) + err := setDatumAutoIDAndCast(e.Ctx(), &d, id, c) if err != nil { return types.Datum{}, err } @@ -941,19 +942,19 @@ func (e *InsertValues) adjustAutoIncrementDatum( } // Use the value if it's not null and not 0. if recordID != 0 { - err = e.Table.Allocators(e.ctx).Get(autoid.AutoIncrementType).Rebase(ctx, recordID, true) + err = e.Table.Allocators(e.Ctx()).Get(autoid.AutoIncrementType).Rebase(ctx, recordID, true) if err != nil { return types.Datum{}, err } - e.ctx.GetSessionVars().StmtCtx.InsertID = uint64(recordID) + e.Ctx().GetSessionVars().StmtCtx.InsertID = uint64(recordID) retryInfo.AddAutoIncrementID(recordID) return d, nil } // Change NULL to auto id. // Change value 0 to auto id, if NoAutoValueOnZero SQL mode is not set. - if d.IsNull() || e.ctx.GetSessionVars().SQLMode&mysql.ModeNoAutoValueOnZero == 0 { - recordID, err = table.AllocAutoIncrementValue(ctx, e.Table, e.ctx) + if d.IsNull() || e.Ctx().GetSessionVars().SQLMode&mysql.ModeNoAutoValueOnZero == 0 { + recordID, err = table.AllocAutoIncrementValue(ctx, e.Table, e.Ctx()) if e.handleErr(c, &d, 0, err) != nil { return types.Datum{}, err } @@ -964,7 +965,7 @@ func (e *InsertValues) adjustAutoIncrementDatum( } } - err = setDatumAutoIDAndCast(e.ctx, &d, recordID, c) + err = setDatumAutoIDAndCast(e.Ctx(), &d, recordID, c) if err != nil { return types.Datum{}, err } @@ -994,11 +995,11 @@ func getAutoRecordID(d types.Datum, target *types.FieldType, isInsert bool) (int func (e *InsertValues) adjustAutoRandomDatum( ctx context.Context, d types.Datum, hasValue bool, c *table.Column, ) (types.Datum, error) { - retryInfo := e.ctx.GetSessionVars().RetryInfo + retryInfo := e.Ctx().GetSessionVars().RetryInfo if retryInfo.Retrying { autoRandomID, ok := retryInfo.GetCurrAutoRandomID() if ok { - err := setDatumAutoIDAndCast(e.ctx, &d, autoRandomID, c) + err := setDatumAutoIDAndCast(e.Ctx(), &d, autoRandomID, c) if err != nil { return types.Datum{}, err } @@ -1019,15 +1020,15 @@ func (e *InsertValues) adjustAutoRandomDatum( } // Use the value if it's not null and not 0. if recordID != 0 { - if !e.ctx.GetSessionVars().AllowAutoRandExplicitInsert { + if !e.Ctx().GetSessionVars().AllowAutoRandExplicitInsert { return types.Datum{}, dbterror.ErrInvalidAutoRandom.GenWithStackByArgs(autoid.AutoRandomExplicitInsertDisabledErrMsg) } err = e.rebaseAutoRandomID(ctx, recordID, &c.FieldType) if err != nil { return types.Datum{}, err } - e.ctx.GetSessionVars().StmtCtx.InsertID = uint64(recordID) - err = setDatumAutoIDAndCast(e.ctx, &d, recordID, c) + e.Ctx().GetSessionVars().StmtCtx.InsertID = uint64(recordID) + err = setDatumAutoIDAndCast(e.Ctx(), &d, recordID, c) if err != nil { return types.Datum{}, err } @@ -1037,7 +1038,7 @@ func (e *InsertValues) adjustAutoRandomDatum( // Change NULL to auto id. // Change value 0 to auto id, if NoAutoValueOnZero SQL mode is not set. - if d.IsNull() || e.ctx.GetSessionVars().SQLMode&mysql.ModeNoAutoValueOnZero == 0 { + if d.IsNull() || e.Ctx().GetSessionVars().SQLMode&mysql.ModeNoAutoValueOnZero == 0 { recordID, err = e.allocAutoRandomID(ctx, &c.FieldType) if err != nil { return types.Datum{}, err @@ -1049,7 +1050,7 @@ func (e *InsertValues) adjustAutoRandomDatum( } } - err = setDatumAutoIDAndCast(e.ctx, &d, recordID, c) + err = setDatumAutoIDAndCast(e.Ctx(), &d, recordID, c) if err != nil { return types.Datum{}, err } @@ -1059,10 +1060,10 @@ func (e *InsertValues) adjustAutoRandomDatum( // allocAutoRandomID allocates a random id for primary key column. It assumes tableInfo.AutoRandomBits > 0. func (e *InsertValues) allocAutoRandomID(ctx context.Context, fieldType *types.FieldType) (int64, error) { - alloc := e.Table.Allocators(e.ctx).Get(autoid.AutoRandomType) + alloc := e.Table.Allocators(e.Ctx()).Get(autoid.AutoRandomType) tableInfo := e.Table.Meta() - increment := e.ctx.GetSessionVars().AutoIncrementIncrement - offset := e.ctx.GetSessionVars().AutoIncrementOffset + increment := e.Ctx().GetSessionVars().AutoIncrementIncrement + offset := e.Ctx().GetSessionVars().AutoIncrementOffset _, autoRandomID, err := alloc.Alloc(ctx, 1, int64(increment), int64(offset)) if err != nil { return 0, err @@ -1071,11 +1072,11 @@ func (e *InsertValues) allocAutoRandomID(ctx context.Context, fieldType *types.F if shardFmt.IncrementalMask()&autoRandomID != autoRandomID { return 0, autoid.ErrAutoRandReadFailed } - _, err = e.ctx.Txn(true) + _, err = e.Ctx().Txn(true) if err != nil { return 0, err } - currentShard := e.ctx.GetSessionVars().GetCurrentShard(1) + currentShard := e.Ctx().GetSessionVars().GetCurrentShard(1) return shardFmt.Compose(currentShard, autoRandomID), nil } @@ -1083,7 +1084,7 @@ func (e *InsertValues) rebaseAutoRandomID(ctx context.Context, recordID int64, f if recordID < 0 { return nil } - alloc := e.Table.Allocators(e.ctx).Get(autoid.AutoRandomType) + alloc := e.Table.Allocators(e.Ctx()).Get(autoid.AutoRandomType) tableInfo := e.Table.Meta() shardFmt := autoid.NewShardIDFormat(fieldType, tableInfo.AutoRandomBits, tableInfo.AutoRandomRangeBits) @@ -1105,7 +1106,7 @@ func (e *InsertValues) adjustImplicitRowID( } // Use the value if it's not null and not 0. if recordID != 0 { - if !e.ctx.GetSessionVars().AllowWriteRowID { + if !e.Ctx().GetSessionVars().AllowWriteRowID { return types.Datum{}, errors.Errorf("insert, update and replace statements for _tidb_rowid are not supported") } err = e.rebaseImplicitRowID(ctx, recordID) @@ -1117,18 +1118,18 @@ func (e *InsertValues) adjustImplicitRowID( } // Change NULL to auto id. // Change value 0 to auto id, if NoAutoValueOnZero SQL mode is not set. - if d.IsNull() || e.ctx.GetSessionVars().SQLMode&mysql.ModeNoAutoValueOnZero == 0 { - _, err := e.ctx.Txn(true) + if d.IsNull() || e.Ctx().GetSessionVars().SQLMode&mysql.ModeNoAutoValueOnZero == 0 { + _, err := e.Ctx().Txn(true) if err != nil { return types.Datum{}, errors.Trace(err) } - intHandle, err := tables.AllocHandle(ctx, e.ctx, e.Table) + intHandle, err := tables.AllocHandle(ctx, e.Ctx(), e.Table) if err != nil { return types.Datum{}, err } recordID = intHandle.IntValue() } - err = setDatumAutoIDAndCast(e.ctx, &d, recordID, c) + err = setDatumAutoIDAndCast(e.Ctx(), &d, recordID, c) if err != nil { return types.Datum{}, err } @@ -1139,7 +1140,7 @@ func (e *InsertValues) rebaseImplicitRowID(ctx context.Context, recordID int64) if recordID < 0 { return nil } - alloc := e.Table.Allocators(e.ctx).Get(autoid.RowIDAllocType) + alloc := e.Table.Allocators(e.Ctx()).Get(autoid.RowIDAllocType) tableInfo := e.Table.Meta() shardFmt := autoid.NewShardIDFormat( @@ -1153,16 +1154,16 @@ func (e *InsertValues) rebaseImplicitRowID(ctx context.Context, recordID int64) } func (e *InsertValues) handleWarning(err error) { - sc := e.ctx.GetSessionVars().StmtCtx + sc := e.Ctx().GetSessionVars().StmtCtx sc.AppendWarning(err) } func (e *InsertValues) collectRuntimeStatsEnabled() bool { - if e.runtimeStats != nil { + if e.RuntimeStats() != nil { if e.stats == nil { snapshotStats := &txnsnapshot.SnapshotRuntimeStats{} e.stats = &InsertRuntimeStat{ - BasicRuntimeStats: e.runtimeStats, + BasicRuntimeStats: e.RuntimeStats(), SnapshotRuntimeStats: snapshotStats, AllocatorRuntimeStats: autoid.NewAllocatorRuntimeStats(), } @@ -1180,27 +1181,27 @@ func (e *InsertValues) batchCheckAndInsert( replace bool, ) error { // all the rows will be checked, so it is safe to set BatchCheck = true - e.ctx.GetSessionVars().StmtCtx.BatchCheck = true + e.Ctx().GetSessionVars().StmtCtx.BatchCheck = true defer tracing.StartRegion(ctx, "InsertValues.batchCheckAndInsert").End() start := time.Now() // Get keys need to be checked. - toBeCheckedRows, err := getKeysNeedCheck(ctx, e.ctx, e.Table, rows) + toBeCheckedRows, err := getKeysNeedCheck(ctx, e.Ctx(), e.Table, rows) if err != nil { return err } - txn, err := e.ctx.Txn(true) + txn, err := e.Ctx().Txn(true) if err != nil { return err } - setOptionForTopSQL(e.ctx.GetSessionVars().StmtCtx, txn) + setOptionForTopSQL(e.Ctx().GetSessionVars().StmtCtx, txn) if e.collectRuntimeStatsEnabled() { if snapshot := txn.GetSnapshot(); snapshot != nil { snapshot.SetOption(kv.CollectRuntimeStats, e.stats.SnapshotRuntimeStats) defer snapshot.SetOption(kv.CollectRuntimeStats, nil) } } - sc := e.ctx.GetSessionVars().StmtCtx + sc := e.Ctx().GetSessionVars().StmtCtx for _, fkc := range e.fkChecks { err = fkc.checkRows(ctx, sc, txn, toBeCheckedRows) if err != nil { @@ -1231,9 +1232,9 @@ CheckAndInsert: _, err := txn.Get(ctx, r.handleKey.newKey) if err == nil { if !replace { - e.ctx.GetSessionVars().StmtCtx.AppendWarning(r.handleKey.dupErr) - if txnCtx := e.ctx.GetSessionVars().TxnCtx; txnCtx.IsPessimistic && - e.ctx.GetSessionVars().LockUnchangedKeys { + e.Ctx().GetSessionVars().StmtCtx.AppendWarning(r.handleKey.dupErr) + if txnCtx := e.Ctx().GetSessionVars().TxnCtx; txnCtx.IsPessimistic && + e.Ctx().GetSessionVars().LockUnchangedKeys { // lock duplicated row key on insert-ignore txnCtx.AddUnchangedKeyForLock(r.handleKey.newKey) } @@ -1250,7 +1251,7 @@ CheckAndInsert: if unchanged { // we don't need to add the identical row again, but the // counter should act as if we did. - e.ctx.GetSessionVars().StmtCtx.AddCopiedRows(1) + e.Ctx().GetSessionVars().StmtCtx.AddCopiedRows(1) continue } } else if !kv.IsErrNotFound(err) { @@ -1263,9 +1264,9 @@ CheckAndInsert: if err == nil { if !replace { // If duplicate keys were found in BatchGet, mark row = nil. - e.ctx.GetSessionVars().StmtCtx.AppendWarning(uk.dupErr) - if txnCtx := e.ctx.GetSessionVars().TxnCtx; txnCtx.IsPessimistic && - e.ctx.GetSessionVars().LockUnchangedKeys { + e.Ctx().GetSessionVars().StmtCtx.AppendWarning(uk.dupErr) + if txnCtx := e.Ctx().GetSessionVars().TxnCtx; txnCtx.IsPessimistic && + e.Ctx().GetSessionVars().LockUnchangedKeys { // lock duplicated unique key on insert-ignore txnCtx.AddUnchangedKeyForLock(uk.newKey) } @@ -1297,7 +1298,7 @@ CheckAndInsert: // If row was checked with no duplicate keys, // it should be added to values map for the further row check. // There may be duplicate keys inside the insert statement. - e.ctx.GetSessionVars().StmtCtx.AddCopiedRows(1) + e.Ctx().GetSessionVars().StmtCtx.AddCopiedRows(1) err = addRecord(ctx, rows[i]) if err != nil { // throw warning when violate check constraint @@ -1327,7 +1328,7 @@ func (e *InsertValues) removeRow( inReplace bool, ) (bool, error) { newRow := r.row - oldRow, err := getOldRow(ctx, e.ctx, txn, r.t, handle, e.GenExprs) + oldRow, err := getOldRow(ctx, e.Ctx(), txn, r.t, handle, e.GenExprs) if err != nil { logutil.BgLogger().Error( "get old row failed when replace", @@ -1346,30 +1347,30 @@ func (e *InsertValues) removeRow( } if identical { if inReplace { - e.ctx.GetSessionVars().StmtCtx.AddAffectedRows(1) + e.Ctx().GetSessionVars().StmtCtx.AddAffectedRows(1) } keySet := lockRowKey - if e.ctx.GetSessionVars().LockUnchangedKeys { + if e.Ctx().GetSessionVars().LockUnchangedKeys { keySet |= lockUniqueKeys } - if _, err := addUnchangedKeysForLockByRow(e.ctx, r.t, handle, oldRow, keySet); err != nil { + if _, err := addUnchangedKeysForLockByRow(e.Ctx(), r.t, handle, oldRow, keySet); err != nil { return false, err } return true, nil } - err = r.t.RemoveRecord(e.ctx, handle, oldRow) + err = r.t.RemoveRecord(e.Ctx(), handle, oldRow) if err != nil { return false, err } - err = onRemoveRowForFK(e.ctx, oldRow, e.fkChecks, e.fkCascades) + err = onRemoveRowForFK(e.Ctx(), oldRow, e.fkChecks, e.fkCascades) if err != nil { return false, err } if inReplace { - e.ctx.GetSessionVars().StmtCtx.AddAffectedRows(1) + e.Ctx().GetSessionVars().StmtCtx.AddAffectedRows(1) } else { - e.ctx.GetSessionVars().StmtCtx.AddDeletedRows(1) + e.Ctx().GetSessionVars().StmtCtx.AddDeletedRows(1) } return false, nil @@ -1381,7 +1382,7 @@ func (e *InsertValues) equalDatumsAsBinary(a []types.Datum, b []types.Datum) (bo return false, nil } for i, ai := range a { - v, err := ai.Compare(e.ctx.GetSessionVars().StmtCtx, &b[i], collate.GetBinaryCollator()) + v, err := ai.Compare(e.Ctx().GetSessionVars().StmtCtx, &b[i], collate.GetBinaryCollator()) if err != nil { return false, errors.Trace(err) } @@ -1399,14 +1400,14 @@ func (e *InsertValues) addRecord(ctx context.Context, row []types.Datum) error { func (e *InsertValues) addRecordWithAutoIDHint( ctx context.Context, row []types.Datum, reserveAutoIDCount int, ) (err error) { - vars := e.ctx.GetSessionVars() + vars := e.Ctx().GetSessionVars() if !vars.ConstraintCheckInPlace { vars.PresumeKeyNotExists = true } if reserveAutoIDCount > 0 { - _, err = e.Table.AddRecord(e.ctx, row, table.WithCtx(ctx), table.WithReserveAutoIDHint(reserveAutoIDCount)) + _, err = e.Table.AddRecord(e.Ctx(), row, table.WithCtx(ctx), table.WithReserveAutoIDHint(reserveAutoIDCount)) } else { - _, err = e.Table.AddRecord(e.ctx, row, table.WithCtx(ctx)) + _, err = e.Table.AddRecord(e.Ctx(), row, table.WithCtx(ctx)) } vars.PresumeKeyNotExists = false if err != nil { diff --git a/executor/internal/exec/BUILD.bazel b/executor/internal/exec/BUILD.bazel new file mode 100644 index 0000000000000..9d1ce7767d4f6 --- /dev/null +++ b/executor/internal/exec/BUILD.bazel @@ -0,0 +1,18 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "exec", + srcs = ["executor.go"], + importpath = "github.com/pingcap/tidb/executor/internal/exec", + visibility = ["//executor:__subpackages__"], + deps = [ + "//domain", + "//expression", + "//sessionctx", + "//types", + "//util/chunk", + "//util/execdetails", + "//util/sqlexec", + "@com_github_ngaut_pools//:pools", + ], +) diff --git a/executor/internal/exec/executor.go b/executor/internal/exec/executor.go new file mode 100644 index 0000000000000..711acd3bfeb60 --- /dev/null +++ b/executor/internal/exec/executor.go @@ -0,0 +1,233 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package exec + +import ( + "context" + + "github.com/ngaut/pools" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/chunk" + "github.com/pingcap/tidb/util/execdetails" + "github.com/pingcap/tidb/util/sqlexec" +) + +// Executor is the physical implementation of an algebra operator. +// +// In TiDB, all algebra operators are implemented as iterators, i.e., they +// support a simple Open-Next-Close protocol. See this paper for more details: +// +// "Volcano-An Extensible and Parallel Query Evaluation System" +// +// Different from Volcano's execution model, a "Next" function call in TiDB will +// return a batch of rows, other than a single row in Volcano. +// NOTE: Executors must call "chk.Reset()" before appending their results to it. +type Executor interface { + Base() *BaseExecutor + Open(context.Context) error + Next(ctx context.Context, req *chunk.Chunk) error + Close() error + Schema() *expression.Schema + RetFieldTypes() []*types.FieldType + InitCap() int + MaxChunkSize() int +} + +var _ Executor = &BaseExecutor{} + +// BaseExecutor holds common information for executors. +type BaseExecutor struct { + ctx sessionctx.Context + AllocPool chunk.Allocator + schema *expression.Schema // output schema + runtimeStats *execdetails.BasicRuntimeStats + children []Executor + retFieldTypes []*types.FieldType + id int + initCap int + maxChunkSize int +} + +// NewBaseExecutor creates a new BaseExecutor instance. +func NewBaseExecutor(ctx sessionctx.Context, schema *expression.Schema, id int, children ...Executor) BaseExecutor { + e := BaseExecutor{ + children: children, + ctx: ctx, + id: id, + schema: schema, + initCap: ctx.GetSessionVars().InitChunkSize, + maxChunkSize: ctx.GetSessionVars().MaxChunkSize, + AllocPool: ctx.GetSessionVars().ChunkPool.Alloc, + } + if ctx.GetSessionVars().StmtCtx.RuntimeStatsColl != nil { + if e.id > 0 { + e.runtimeStats = e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.GetBasicRuntimeStats(id) + } + } + if schema != nil { + cols := schema.Columns + e.retFieldTypes = make([]*types.FieldType, len(cols)) + for i := range cols { + e.retFieldTypes[i] = cols[i].RetType + } + } + return e +} + +// RuntimeStats returns the runtime stats of an executor. +func (e *BaseExecutor) RuntimeStats() *execdetails.BasicRuntimeStats { + return e.runtimeStats +} + +// ID returns the id of an executor. +func (e *BaseExecutor) ID() int { + return e.id +} + +// AllChildren returns all children. +func (e *BaseExecutor) AllChildren() []Executor { + return e.children +} + +// ChildrenLen returns the length of children. +func (e *BaseExecutor) ChildrenLen() int { + return len(e.children) +} + +// EmptyChildren judges whether the children is empty. +func (e *BaseExecutor) EmptyChildren() bool { + return len(e.children) == 0 +} + +// SetChildren sets the children for an executor. +func (e *BaseExecutor) SetChildren(idx int, ex Executor) { + e.children[idx] = ex +} + +// Children returns the children for an executor. +func (e *BaseExecutor) Children(idx int) Executor { + return e.children[idx] +} + +// RetFieldTypes returns the return field types of an executor. +func (e *BaseExecutor) RetFieldTypes() []*types.FieldType { + return e.retFieldTypes +} + +// InitCap returns the initial capacity for chunk +func (e *BaseExecutor) InitCap() int { + return e.initCap +} + +// SetInitCap sets the initial capacity for chunk +func (e *BaseExecutor) SetInitCap(c int) { + e.initCap = c +} + +// MaxChunkSize returns the max chunk size. +func (e *BaseExecutor) MaxChunkSize() int { + return e.maxChunkSize +} + +// SetMaxChunkSize sets the max chunk size. +func (e *BaseExecutor) SetMaxChunkSize(size int) { + e.maxChunkSize = size +} + +// Base returns the BaseExecutor of an executor, don't override this method! +func (e *BaseExecutor) Base() *BaseExecutor { + return e +} + +// Open initializes children recursively and "childrenResults" according to children's schemas. +func (e *BaseExecutor) Open(ctx context.Context) error { + for _, child := range e.children { + err := child.Open(ctx) + if err != nil { + return err + } + } + return nil +} + +// Close closes all executors and release all resources. +func (e *BaseExecutor) Close() error { + var firstErr error + for _, src := range e.children { + if err := src.Close(); err != nil && firstErr == nil { + firstErr = err + } + } + return firstErr +} + +// Schema returns the current BaseExecutor's schema. If it is nil, then create and return a new one. +func (e *BaseExecutor) Schema() *expression.Schema { + if e.schema == nil { + return expression.NewSchema() + } + return e.schema +} + +// Next fills multiple rows into a chunk. +func (*BaseExecutor) Next(_ context.Context, _ *chunk.Chunk) error { + return nil +} + +// Ctx return ```sessionctx.Context``` of Executor +func (e *BaseExecutor) Ctx() sessionctx.Context { + return e.ctx +} + +// GetSchema gets the schema. +func (e *BaseExecutor) GetSchema() *expression.Schema { + return e.schema +} + +// UpdateDeltaForTableID updates the delta info for the table with tableID. +func (e *BaseExecutor) UpdateDeltaForTableID(id int64) { + txnCtx := e.ctx.GetSessionVars().TxnCtx + txnCtx.UpdateDeltaForTable(id, 0, 0, map[int64]int64{}) +} + +// GetSysSession gets a system session context from executor. +func (e *BaseExecutor) GetSysSession() (sessionctx.Context, error) { + dom := domain.GetDomain(e.Ctx()) + sysSessionPool := dom.SysSessionPool() + ctx, err := sysSessionPool.Get() + if err != nil { + return nil, err + } + restrictedCtx := ctx.(sessionctx.Context) + restrictedCtx.GetSessionVars().InRestrictedSQL = true + return restrictedCtx, nil +} + +// ReleaseSysSession releases a system session context to executor. +func (e *BaseExecutor) ReleaseSysSession(ctx context.Context, sctx sessionctx.Context) { + if sctx == nil { + return + } + dom := domain.GetDomain(e.Ctx()) + sysSessionPool := dom.SysSessionPool() + if _, err := sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, "rollback"); err != nil { + sctx.(pools.Resource).Close() + return + } + sysSessionPool.Put(sctx.(pools.Resource)) +} diff --git a/executor/join.go b/executor/join.go index 2406e1c61c584..ce93bfb255e0f 100644 --- a/executor/join.go +++ b/executor/join.go @@ -25,6 +25,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/parser/terror" plannercore "github.com/pingcap/tidb/planner/core" @@ -43,8 +44,8 @@ import ( ) var ( - _ Executor = &HashJoinExec{} - _ Executor = &NestedLoopApplyExec{} + _ exec.Executor = &HashJoinExec{} + _ exec.Executor = &NestedLoopApplyExec{} ) type hashJoinCtx struct { @@ -76,7 +77,7 @@ type hashJoinCtx struct { type probeSideTupleFetcher struct { *hashJoinCtx - probeSideExec Executor + probeSideExec exec.Executor probeChkResourceCh chan *probeChkResource probeResultChs []chan *chunk.Chunk requiredRows int64 @@ -109,14 +110,14 @@ type probeWorker struct { type buildWorker struct { hashJoinCtx *hashJoinCtx - buildSideExec Executor + buildSideExec exec.Executor buildKeyColIdx []int buildNAKeyColIdx []int } // HashJoinExec implements the hash join algorithm. type HashJoinExec struct { - baseExecutor + exec.BaseExecutor *hashJoinCtx probeSideTupleFetcher *probeSideTupleFetcher @@ -190,15 +191,15 @@ func (e *HashJoinExec) Close() error { e.stats.hashStat = *e.rowContainer.stat } if e.stats != nil { - defer e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, e.stats) + defer e.Ctx().GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.ID(), e.stats) } - err := e.baseExecutor.Close() + err := e.BaseExecutor.Close() return err } // Open implements the Executor Open interface. func (e *HashJoinExec) Open(ctx context.Context) error { - if err := e.baseExecutor.Open(ctx); err != nil { + if err := e.BaseExecutor.Open(ctx); err != nil { e.closeCh = nil e.prepared = false return err @@ -207,19 +208,19 @@ func (e *HashJoinExec) Open(ctx context.Context) error { if e.hashJoinCtx.memTracker != nil { e.hashJoinCtx.memTracker.Reset() } else { - e.hashJoinCtx.memTracker = memory.NewTracker(e.id, -1) + e.hashJoinCtx.memTracker = memory.NewTracker(e.ID(), -1) } - e.hashJoinCtx.memTracker.AttachTo(e.ctx.GetSessionVars().StmtCtx.MemTracker) + e.hashJoinCtx.memTracker.AttachTo(e.Ctx().GetSessionVars().StmtCtx.MemTracker) - e.diskTracker = disk.NewTracker(e.id, -1) - e.diskTracker.AttachTo(e.ctx.GetSessionVars().StmtCtx.DiskTracker) + e.diskTracker = disk.NewTracker(e.ID(), -1) + e.diskTracker.AttachTo(e.Ctx().GetSessionVars().StmtCtx.DiskTracker) e.workerWg = util.WaitGroupWrapper{} e.waiterWg = util.WaitGroupWrapper{} e.closeCh = make(chan struct{}) e.finished.Store(false) - if e.runtimeStats != nil { + if e.RuntimeStats() != nil { e.stats = &hashJoinRuntimeStats{ concurrent: int(e.concurrency), } @@ -329,7 +330,7 @@ func (w *buildWorker) fetchBuildSideRows(ctx context.Context, chkCh chan<- *chun if w.hashJoinCtx.finished.Load() { return } - chk := sessVars.GetNewChunkWithCapacity(w.buildSideExec.base().retFieldTypes, sessVars.MaxChunkSize, sessVars.MaxChunkSize, w.hashJoinCtx.allocPool) + chk := sessVars.GetNewChunkWithCapacity(w.buildSideExec.Base().RetFieldTypes(), sessVars.MaxChunkSize, sessVars.MaxChunkSize, w.hashJoinCtx.allocPool) err = Next(ctx, w.buildSideExec, chk) if err != nil { errCh <- errors.Trace(err) @@ -388,7 +389,7 @@ func (e *HashJoinExec) fetchAndProbeHashTable(ctx context.Context) { e.initializeForProbe() e.workerWg.RunWithRecover(func() { defer trace.StartRegion(ctx, "HashJoinProbeSideFetcher").End() - e.probeSideTupleFetcher.fetchProbeSideChunks(ctx, e.maxChunkSize) + e.probeSideTupleFetcher.fetchProbeSideChunks(ctx, e.MaxChunkSize()) }, e.probeSideTupleFetcher.handleProbeSideFetcherPanic) for i := uint(0); i < e.concurrency; i++ { @@ -1118,7 +1119,7 @@ func (e *HashJoinExec) Next(ctx context.Context, req *chunk.Chunk) (err error) { keyColIdx: e.buildWorker.buildKeyColIdx, naKeyColIdx: e.buildWorker.buildNAKeyColIdx, } - e.rowContainer = newHashRowContainer(e.ctx, hCtx, retTypes(e.buildWorker.buildSideExec)) + e.rowContainer = newHashRowContainer(e.Ctx(), hCtx, retTypes(e.buildWorker.buildSideExec)) // we shallow copies rowContainer for each probe worker to avoid lock contention for i := uint(0); i < e.concurrency; i++ { if i == 0 { @@ -1253,13 +1254,13 @@ func (w *buildWorker) buildHashTableForList(buildSideResultCh <-chan *chunk.Chun // NestedLoopApplyExec is the executor for apply. type NestedLoopApplyExec struct { - baseExecutor + exec.BaseExecutor ctx sessionctx.Context innerRows []chunk.Row cursor int - innerExec Executor - outerExec Executor + innerExec exec.Executor + outerExec exec.Executor innerFilter expression.CNFExprs outerFilter expression.CNFExprs @@ -1292,7 +1293,7 @@ type NestedLoopApplyExec struct { func (e *NestedLoopApplyExec) Close() error { e.innerRows = nil e.memTracker = nil - if e.runtimeStats != nil { + if e.RuntimeStats() != nil { runtimeStats := newJoinRuntimeStats() if e.canUseCache { var hitRatio float64 @@ -1304,7 +1305,7 @@ func (e *NestedLoopApplyExec) Close() error { runtimeStats.setCacheInfo(false, 0) } runtimeStats.SetConcurrencyInfo(execdetails.NewConcurrencyInfo("Concurrency", 0)) - defer e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, runtimeStats) + defer e.Ctx().GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.ID(), runtimeStats) } return e.outerExec.Close() } @@ -1319,10 +1320,10 @@ func (e *NestedLoopApplyExec) Open(ctx context.Context) error { e.innerRows = e.innerRows[:0] e.outerChunk = tryNewCacheChunk(e.outerExec) e.innerChunk = tryNewCacheChunk(e.innerExec) - e.innerList = chunk.NewList(retTypes(e.innerExec), e.initCap, e.maxChunkSize) + e.innerList = chunk.NewList(retTypes(e.innerExec), e.InitCap(), e.MaxChunkSize()) - e.memTracker = memory.NewTracker(e.id, -1) - e.memTracker.AttachTo(e.ctx.GetSessionVars().StmtCtx.MemTracker) + e.memTracker = memory.NewTracker(e.ID(), -1) + e.memTracker.AttachTo(e.Ctx().GetSessionVars().StmtCtx.MemTracker) e.innerList.GetMemTracker().SetLabel(memory.LabelForInnerList) e.innerList.GetMemTracker().AttachTo(e.memTracker) @@ -1341,8 +1342,8 @@ func (e *NestedLoopApplyExec) Open(ctx context.Context) error { // aggExecutorTreeInputEmpty checks whether the executor tree returns empty if without aggregate operators. // Note that, the prerequisite is that this executor tree has been executed already and it returns one row. -func aggExecutorTreeInputEmpty(e Executor) bool { - children := e.base().children +func aggExecutorTreeInputEmpty(e exec.Executor) bool { + children := e.Base().AllChildren() if len(children) == 0 { return false } @@ -1420,7 +1421,7 @@ func (e *NestedLoopApplyExec) fetchAllInners(ctx context.Context) error { if e.canUseCache { // create a new one since it may be in the cache - e.innerList = chunk.NewList(retTypes(e.innerExec), e.initCap, e.maxChunkSize) + e.innerList = chunk.NewList(retTypes(e.innerExec), e.InitCap(), e.MaxChunkSize()) } else { e.innerList.Reset() } @@ -1465,7 +1466,7 @@ func (e *NestedLoopApplyExec) Next(ctx context.Context, req *chunk.Chunk) (err e var key []byte for _, col := range e.outerSchema { *col.Data = e.outerRow.GetDatum(col.Index, col.RetType) - key, err = codec.EncodeKey(e.ctx.GetSessionVars().StmtCtx, key, *col.Data) + key, err = codec.EncodeKey(e.Ctx().GetSessionVars().StmtCtx, key, *col.Data) if err != nil { return err } diff --git a/executor/load_data.go b/executor/load_data.go index 1c1241857e567..4e08a7eae41de 100644 --- a/executor/load_data.go +++ b/executor/load_data.go @@ -29,6 +29,7 @@ import ( "github.com/pingcap/tidb/br/pkg/storage" "github.com/pingcap/tidb/executor/asyncloaddata" "github.com/pingcap/tidb/executor/importer" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/model" @@ -54,7 +55,7 @@ var ( // LoadDataExec represents a load data executor. type LoadDataExec struct { - baseExecutor + exec.BaseExecutor FileLocRef ast.FileLocRefTp loadDataWorker *LoadDataWorker @@ -235,7 +236,7 @@ func initEncodeCommitWorkers(e *LoadDataWorker) (*encodeWorker, *commitWorker, e if err2 != nil { return nil, nil, err2 } - colAssignExprs, exprWarnings, err2 := e.controller.CreateColAssignExprs(insertValues.ctx) + colAssignExprs, exprWarnings, err2 := e.controller.CreateColAssignExprs(insertValues.Ctx()) if err2 != nil { return nil, nil, err2 } @@ -268,7 +269,7 @@ func createInsertValues(e *LoadDataWorker) (insertVal *InsertValues, err error) } } ret := &InsertValues{ - baseExecutor: newBaseExecutor(e.UserSctx, nil, e.planInfo.ID), + BaseExecutor: exec.NewBaseExecutor(e.UserSctx, nil, e.planInfo.ID), Table: e.table, Columns: e.planInfo.Columns, GenExprs: e.planInfo.GenColExprs, @@ -411,7 +412,7 @@ func (w *encodeWorker) readOneBatchRows(ctx context.Context, parser mydump.Parse w.rows = append(w.rows, r) w.curBatchCnt++ if w.maxRowsInBatch != 0 && w.rowCount%w.maxRowsInBatch == 0 { - logutil.Logger(ctx).Info("batch limit hit when inserting rows", zap.Int("maxBatchRows", w.maxChunkSize), + logutil.Logger(ctx).Info("batch limit hit when inserting rows", zap.Int("maxBatchRows", w.MaxChunkSize()), zap.Uint64("totalRows", w.rowCount)) return nil } @@ -439,7 +440,7 @@ func (w *encodeWorker) parserData2TableData( } row := make([]types.Datum, 0, len(w.insertColumns)) - sessionVars := w.ctx.GetSessionVars() + sessionVars := w.Ctx().GetSessionVars() setVar := func(name string, col *types.Datum) { // User variable names are not case-sensitive // https://dev.mysql.com/doc/refman/8.0/en/user-variables.html @@ -529,10 +530,10 @@ func (w *commitWorker) commitWork(ctx context.Context, inCh <-chan commitTask) ( if err != nil { background := context.Background() - w.ctx.StmtRollback(background, false) - w.ctx.RollbackTxn(background) + w.Ctx().StmtRollback(background, false) + w.Ctx().RollbackTxn(background) } else { - if err = w.ctx.CommitTxn(ctx); err != nil { + if err = w.Ctx().CommitTxn(ctx); err != nil { logutil.Logger(ctx).Error("commit error refresh", zap.Error(err)) } } @@ -574,7 +575,7 @@ func (w *commitWorker) commitOneTask(ctx context.Context, task commitTask) error failpoint.Inject("commitOneTaskErr", func() { failpoint.Return(errors.New("mock commit one task error")) }) - w.ctx.StmtCommit(ctx) + w.Ctx().StmtCommit(ctx) return nil } @@ -591,7 +592,7 @@ func (w *commitWorker) checkAndInsertOneBatch(ctx context.Context, rows [][]type if cnt == 0 { return err } - w.ctx.GetSessionVars().StmtCtx.AddRecordRows(cnt) + w.Ctx().GetSessionVars().StmtCtx.AddRecordRows(cnt) switch w.controller.OnDuplicate { case ast.OnDuplicateKeyHandlingReplace: @@ -600,7 +601,7 @@ func (w *commitWorker) checkAndInsertOneBatch(ctx context.Context, rows [][]type return w.batchCheckAndInsert(ctx, rows[0:cnt], w.addRecordLD, false) case ast.OnDuplicateKeyHandlingError: for i, row := range rows[0:cnt] { - sizeHintStep := int(w.ctx.GetSessionVars().ShardAllocateStep) + sizeHintStep := int(w.Ctx().GetSessionVars().ShardAllocateStep) if sizeHintStep > 0 && i%sizeHintStep == 0 { sizeHint := sizeHintStep remain := len(rows[0:cnt]) - i @@ -614,7 +615,7 @@ func (w *commitWorker) checkAndInsertOneBatch(ctx context.Context, rows [][]type if err != nil { return err } - w.ctx.GetSessionVars().StmtCtx.AddCopiedRows(1) + w.Ctx().GetSessionVars().StmtCtx.AddCopiedRows(1) } return nil default: @@ -675,8 +676,8 @@ func (e *LoadDataWorker) TestLoadLocal(parser mydump.Parser) error { return err } encoder.resetBatch() - committer.ctx.StmtCommit(ctx) - err = committer.ctx.CommitTxn(ctx) + committer.Ctx().StmtCommit(ctx) + err = committer.Ctx().CommitTxn(ctx) if err != nil { return err } @@ -730,12 +731,12 @@ func (k loadDataVarKeyType) String() string { const LoadDataVarKey loadDataVarKeyType = 0 var ( - _ Executor = (*LoadDataActionExec)(nil) + _ exec.Executor = (*LoadDataActionExec)(nil) ) // LoadDataActionExec executes LoadDataActionStmt. type LoadDataActionExec struct { - baseExecutor + exec.BaseExecutor tp ast.LoadDataActionTp jobID int64 @@ -743,8 +744,8 @@ type LoadDataActionExec struct { // Next implements the Executor Next interface. func (e *LoadDataActionExec) Next(ctx context.Context, _ *chunk.Chunk) error { - sqlExec := e.ctx.(sqlexec.SQLExecutor) - user := e.ctx.GetSessionVars().User.String() + sqlExec := e.Ctx().(sqlexec.SQLExecutor) + user := e.Ctx().GetSessionVars().User.String() job := asyncloaddata.NewJob(e.jobID, sqlExec, user) switch e.tp { diff --git a/executor/load_stats.go b/executor/load_stats.go index e292eb817519d..c409aad12c1c6 100644 --- a/executor/load_stats.go +++ b/executor/load_stats.go @@ -20,17 +20,18 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/statistics/handle" "github.com/pingcap/tidb/util/chunk" ) -var _ Executor = &LoadStatsExec{} +var _ exec.Executor = &LoadStatsExec{} // LoadStatsExec represents a load statistic executor. type LoadStatsExec struct { - baseExecutor + exec.BaseExecutor info *LoadStatsInfo } @@ -53,16 +54,16 @@ const LoadStatsVarKey loadStatsVarKeyType = 0 // Next implements the Executor Next interface. func (e *LoadStatsExec) Next(_ context.Context, req *chunk.Chunk) error { - req.GrowAndReset(e.maxChunkSize) + req.GrowAndReset(e.MaxChunkSize()) if len(e.info.Path) == 0 { return errors.New("Load Stats: file path is empty") } - val := e.ctx.Value(LoadStatsVarKey) + val := e.Ctx().Value(LoadStatsVarKey) if val != nil { - e.ctx.SetValue(LoadStatsVarKey, nil) + e.Ctx().SetValue(LoadStatsVarKey, nil) return errors.New("Load Stats: previous load stats option isn't closed normally") } - e.ctx.SetValue(LoadStatsVarKey, e.info) + e.Ctx().SetValue(LoadStatsVarKey, e.info) return nil } diff --git a/executor/lock_stats.go b/executor/lock_stats.go index ccf6df123db07..5d36b900887e7 100644 --- a/executor/lock_stats.go +++ b/executor/lock_stats.go @@ -19,16 +19,17 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/util/chunk" ) -var _ Executor = &LockStatsExec{} -var _ Executor = &UnlockStatsExec{} +var _ exec.Executor = &LockStatsExec{} +var _ exec.Executor = &UnlockStatsExec{} // LockStatsExec represents a lock statistic executor. type LockStatsExec struct { - baseExecutor + exec.BaseExecutor Tables []*ast.TableName } @@ -45,7 +46,7 @@ const LockStatsVarKey lockStatsVarKeyType = 0 // Next implements the Executor Next interface. func (e *LockStatsExec) Next(_ context.Context, _ *chunk.Chunk) error { - do := domain.GetDomain(e.ctx) + do := domain.GetDomain(e.Ctx()) is := do.InfoSchema() h := do.StatsHandle() if h == nil { @@ -76,7 +77,7 @@ func (e *LockStatsExec) Next(_ context.Context, _ *chunk.Chunk) error { } msg, err := h.AddLockedTables(tids, pids, e.Tables) if msg != "" { - e.ctx.GetSessionVars().StmtCtx.AppendWarning(errors.New(msg)) + e.Ctx().GetSessionVars().StmtCtx.AppendWarning(errors.New(msg)) } return err } @@ -93,7 +94,7 @@ func (e *LockStatsExec) Open(_ context.Context) error { // UnlockStatsExec represents a unlock statistic executor. type UnlockStatsExec struct { - baseExecutor + exec.BaseExecutor Tables []*ast.TableName } @@ -110,7 +111,7 @@ const UnlockStatsVarKey unlockStatsVarKeyType = 0 // Next implements the Executor Next interface. func (e *UnlockStatsExec) Next(_ context.Context, _ *chunk.Chunk) error { - do := domain.GetDomain(e.ctx) + do := domain.GetDomain(e.Ctx()) is := do.InfoSchema() h := do.StatsHandle() if h == nil { @@ -141,7 +142,7 @@ func (e *UnlockStatsExec) Next(_ context.Context, _ *chunk.Chunk) error { } msg, err := h.RemoveLockedTables(tids, pids, e.Tables) if msg != "" { - e.ctx.GetSessionVars().StmtCtx.AppendWarning(errors.New(msg)) + e.Ctx().GetSessionVars().StmtCtx.AppendWarning(errors.New(msg)) } return err } diff --git a/executor/mem_reader.go b/executor/mem_reader.go index 0382f6b156691..2f9412b0960a1 100644 --- a/executor/mem_reader.go +++ b/executor/mem_reader.go @@ -72,7 +72,7 @@ func buildMemIndexReader(ctx context.Context, us *UnionScanExec, idxReader *Inde outputOffset = append(outputOffset, col.Index) } return &memIndexReader{ - ctx: us.ctx, + ctx: us.Ctx(), index: idxReader.index, table: idxReader.table.Meta(), kvRanges: kvRanges, @@ -212,15 +212,15 @@ func buildMemTableReader(ctx context.Context, us *UnionScanExec, kvRanges []kv.K } defVal := func(i int) ([]byte, error) { - d, err := table.GetColOriginDefaultValueWithoutStrictSQLMode(us.ctx, us.columns[i]) + d, err := table.GetColOriginDefaultValueWithoutStrictSQLMode(us.Ctx(), us.columns[i]) if err != nil { return nil, err } - return tablecodec.EncodeValue(us.ctx.GetSessionVars().StmtCtx, nil, d) + return tablecodec.EncodeValue(us.Ctx().GetSessionVars().StmtCtx, nil, d) } - rd := rowcodec.NewByteDecoder(colInfo, pkColIDs, defVal, us.ctx.GetSessionVars().Location()) + rd := rowcodec.NewByteDecoder(colInfo, pkColIDs, defVal, us.Ctx().GetSessionVars().Location()) return &memTableReader{ - ctx: us.ctx, + ctx: us.Ctx(), table: us.table.Meta(), columns: us.columns, kvRanges: kvRanges, @@ -588,7 +588,7 @@ func buildMemIndexLookUpReader(ctx context.Context, us *UnionScanExec, idxLookUp kvRanges := idxLookUpReader.kvRanges outputOffset := []int{len(idxLookUpReader.index.Columns)} memIdxReader := &memIndexReader{ - ctx: us.ctx, + ctx: us.Ctx(), index: idxLookUpReader.index, table: idxLookUpReader.table.Meta(), kvRanges: kvRanges, @@ -600,7 +600,7 @@ func buildMemIndexLookUpReader(ctx context.Context, us *UnionScanExec, idxLookUp } return &memIndexLookUpReader{ - ctx: us.ctx, + ctx: us.Ctx(), index: idxLookUpReader.index, columns: idxLookUpReader.columns, table: idxLookUpReader.table, @@ -705,9 +705,9 @@ func buildMemIndexMergeReader(ctx context.Context, us *UnionScanExec, indexMerge memReaders := make([]memReader, 0, indexCount) for i := 0; i < indexCount; i++ { if indexMergeReader.indexes[i] == nil { - colIDs, pkColIDs, rd := getColIDAndPkColIDs(indexMergeReader.ctx, indexMergeReader.table, indexMergeReader.columns) + colIDs, pkColIDs, rd := getColIDAndPkColIDs(indexMergeReader.Ctx(), indexMergeReader.table, indexMergeReader.columns) memReaders = append(memReaders, &memTableReader{ - ctx: us.ctx, + ctx: us.Ctx(), table: indexMergeReader.table.Meta(), columns: indexMergeReader.columns, kvRanges: nil, @@ -724,7 +724,7 @@ func buildMemIndexMergeReader(ctx context.Context, us *UnionScanExec, indexMerge } else { outputOffset := []int{len(indexMergeReader.indexes[i].Columns)} memReaders = append(memReaders, &memIndexReader{ - ctx: us.ctx, + ctx: us.Ctx(), index: indexMergeReader.indexes[i], table: indexMergeReader.table.Meta(), kvRanges: nil, @@ -737,7 +737,7 @@ func buildMemIndexMergeReader(ctx context.Context, us *UnionScanExec, indexMerge } return &memIndexMergeReader{ - ctx: us.ctx, + ctx: us.Ctx(), table: indexMergeReader.table, columns: indexMergeReader.columns, conditions: us.conditions, diff --git a/executor/memtable_reader.go b/executor/memtable_reader.go index a299438da0777..8d2634ba76696 100644 --- a/executor/memtable_reader.go +++ b/executor/memtable_reader.go @@ -31,6 +31,7 @@ import ( "github.com/pingcap/kvproto/pkg/diagnosticspb" "github.com/pingcap/sysutil" "github.com/pingcap/tidb/config" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" @@ -69,7 +70,7 @@ type memTableRetriever interface { // MemTableReaderExec executes memTable information retrieving from the MemTable components type MemTableReaderExec struct { - baseExecutor + exec.BaseExecutor table *model.TableInfo retriever memTableRetriever // cacheRetrieved is used to indicate whether has the parent executor retrieved @@ -99,14 +100,14 @@ func (e *MemTableReaderExec) Next(ctx context.Context, req *chunk.Chunk) error { // The `InspectionTableCache` will be assigned in the begin of retrieving` and be // cleaned at the end of retrieving, so nil represents currently in non-inspection mode. - if cache, tbl := e.ctx.GetSessionVars().InspectionTableCache, e.table.Name.L; cache != nil && + if cache, tbl := e.Ctx().GetSessionVars().InspectionTableCache, e.table.Name.L; cache != nil && e.isInspectionCacheableTable(tbl) { // TODO: cached rows will be returned fully, we should refactor this part. if !e.cacheRetrieved { // Obtain data from cache first. cached, found := cache[tbl] if !found { - rows, err := e.retriever.retrieve(ctx, e.ctx) + rows, err := e.retriever.retrieve(ctx, e.Ctx()) cached = variable.TableSnapshot{Rows: rows, Err: err} cache[tbl] = cached } @@ -114,7 +115,7 @@ func (e *MemTableReaderExec) Next(ctx context.Context, req *chunk.Chunk) error { rows, err = cached.Rows, cached.Err } } else { - rows, err = e.retriever.retrieve(ctx, e.ctx) + rows, err = e.retriever.retrieve(ctx, e.Ctx()) } if err != nil { return err @@ -136,8 +137,8 @@ func (e *MemTableReaderExec) Next(ctx context.Context, req *chunk.Chunk) error { // Close implements the Executor Close interface. func (e *MemTableReaderExec) Close() error { - if stats := e.retriever.getRuntimeStats(); stats != nil && e.runtimeStats != nil { - defer e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, stats) + if stats := e.retriever.getRuntimeStats(); stats != nil && e.RuntimeStats() != nil { + defer e.Ctx().GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.ID(), stats) } return e.retriever.close() } @@ -662,7 +663,7 @@ type HistoryHotRegion struct { EndKey string `json:"end_key"` } -func (e *hotRegionsHistoryRetriver) initialize(ctx context.Context, sctx sessionctx.Context) ([]chan hotRegionsResult, error) { +func (e *hotRegionsHistoryRetriver) initialize(_ context.Context, sctx sessionctx.Context) ([]chan hotRegionsResult, error) { if !hasPriv(sctx, mysql.ProcessPriv) { return nil, plannercore.ErrSpecificAccessDenied.GenWithStackByArgs("PROCESS") } @@ -689,12 +690,10 @@ func (e *hotRegionsHistoryRetriver) initialize(ctx context.Context, sctx session IsLeaders: e.extractor.IsLeaders, } - return e.startRetrieving(ctx, sctx, pdServers, historyHotRegionsRequest) + return e.startRetrieving(pdServers, historyHotRegionsRequest) } func (e *hotRegionsHistoryRetriver) startRetrieving( - ctx context.Context, - sctx sessionctx.Context, pdServers []infoschema.ServerInfo, req *HistoryHotRegionsRequest, ) ([]chan hotRegionsResult, error) { @@ -870,7 +869,7 @@ type tikvRegionPeersRetriever struct { retrieved bool } -func (e *tikvRegionPeersRetriever) retrieve(ctx context.Context, sctx sessionctx.Context) ([][]types.Datum, error) { +func (e *tikvRegionPeersRetriever) retrieve(_ context.Context, sctx sessionctx.Context) ([][]types.Datum, error) { if e.extractor.SkipRequest || e.retrieved { return nil, nil } diff --git a/executor/merge_join.go b/executor/merge_join.go index a64a9fa0c33dc..ae2a113ceb217 100644 --- a/executor/merge_join.go +++ b/executor/merge_join.go @@ -18,6 +18,7 @@ import ( "context" "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/sessionctx/variable" @@ -34,7 +35,7 @@ import ( // 2. For other cases its preferred not to use SMJ and operator // will throw error. type MergeJoinExec struct { - baseExecutor + exec.BaseExecutor stmtCtx *stmtctx.StatementContext compareFuncs []expression.CompareFunc @@ -76,7 +77,7 @@ type mergeJoinTable struct { } func (t *mergeJoinTable) init(exec *MergeJoinExec) { - child := exec.children[t.childIndex] + child := exec.Children(t.childIndex) t.childChunk = tryNewCacheChunk(child) t.childChunkIter = chunk.NewIterator4Chunk(t.childChunk) @@ -84,11 +85,11 @@ func (t *mergeJoinTable) init(exec *MergeJoinExec) { for _, col := range t.joinKeys { items = append(items, col) } - t.groupChecker = newVecGroupChecker(exec.ctx, items) + t.groupChecker = newVecGroupChecker(exec.Ctx(), items) t.groupRowsIter = chunk.NewIterator4Chunk(t.childChunk) if t.isInner { - t.rowContainer = chunk.NewRowContainer(child.base().retFieldTypes, t.childChunk.Capacity()) + t.rowContainer = chunk.NewRowContainer(child.Base().RetFieldTypes(), t.childChunk.Capacity()) t.rowContainer.GetMemTracker().AttachTo(exec.memTracker) t.rowContainer.GetMemTracker().SetLabel(memory.LabelForInnerTable) t.rowContainer.GetDiskTracker().AttachTo(exec.diskTracker) @@ -100,11 +101,11 @@ func (t *mergeJoinTable) init(exec *MergeJoinExec) { actionSpill = t.rowContainer.ActionSpillForTest() } }) - exec.ctx.GetSessionVars().MemTracker.FallbackOldAndSetNewAction(actionSpill) + exec.Ctx().GetSessionVars().MemTracker.FallbackOldAndSetNewAction(actionSpill) } t.memTracker = memory.NewTracker(memory.LabelForInnerTable, -1) } else { - t.filtersSelected = make([]bool, 0, exec.maxChunkSize) + t.filtersSelected = make([]bool, 0, exec.MaxChunkSize()) t.memTracker = memory.NewTracker(memory.LabelForOuterTable, -1) } @@ -158,7 +159,7 @@ func (t *mergeJoinTable) selectNextGroup() { func (t *mergeJoinTable) fetchNextChunk(ctx context.Context, exec *MergeJoinExec) error { oldMemUsage := t.childChunk.MemoryUsage() - err := Next(ctx, exec.children[t.childIndex], t.childChunk) + err := Next(ctx, exec.Children(t.childIndex), t.childChunk) t.memTracker.Consume(t.childChunk.MemoryUsage() - oldMemUsage) if err != nil { return err @@ -247,7 +248,7 @@ func (t *mergeJoinTable) fetchNextOuterGroup(ctx context.Context, exec *MergeJoi // It's hard to calculate selectivity if there is any filter or it's inner join, // so we just push the requiredRows down when it's outer join and has no filter. if exec.isOuterJoin && len(t.filters) == 0 { - t.childChunk.SetRequiredRows(requiredRows, exec.maxChunkSize) + t.childChunk.SetRequiredRows(requiredRows, exec.MaxChunkSize()) } err := t.fetchNextChunk(ctx, exec) if err != nil || t.executed { @@ -255,7 +256,7 @@ func (t *mergeJoinTable) fetchNextOuterGroup(ctx context.Context, exec *MergeJoi } t.childChunkIter.Begin() - t.filtersSelected, err = expression.VectorizedFilter(exec.ctx, t.filters, t.childChunkIter, t.filtersSelected) + t.filtersSelected, err = expression.VectorizedFilter(exec.Ctx(), t.filters, t.childChunkIter, t.filtersSelected) if err != nil { return err } @@ -294,19 +295,19 @@ func (e *MergeJoinExec) Close() error { e.hasNull = false e.memTracker = nil e.diskTracker = nil - return e.baseExecutor.Close() + return e.BaseExecutor.Close() } // Open implements the Executor Open interface. func (e *MergeJoinExec) Open(ctx context.Context) error { - if err := e.baseExecutor.Open(ctx); err != nil { + if err := e.BaseExecutor.Open(ctx); err != nil { return err } - e.memTracker = memory.NewTracker(e.id, -1) - e.memTracker.AttachTo(e.ctx.GetSessionVars().StmtCtx.MemTracker) - e.diskTracker = disk.NewTracker(e.id, -1) - e.diskTracker.AttachTo(e.ctx.GetSessionVars().StmtCtx.DiskTracker) + e.memTracker = memory.NewTracker(e.ID(), -1) + e.memTracker.AttachTo(e.Ctx().GetSessionVars().StmtCtx.MemTracker) + e.diskTracker = disk.NewTracker(e.ID(), -1) + e.diskTracker.AttachTo(e.Ctx().GetSessionVars().StmtCtx.DiskTracker) e.innerTable.init(e) e.outerTable.init(e) @@ -399,7 +400,7 @@ func (e *MergeJoinExec) compare(outerRow, innerRow chunk.Row) (int, error) { outerJoinKeys := e.outerTable.joinKeys innerJoinKeys := e.innerTable.joinKeys for i := range outerJoinKeys { - cmp, _, err := e.compareFuncs[i](e.ctx, outerJoinKeys[i], innerJoinKeys[i], outerRow, innerRow) + cmp, _, err := e.compareFuncs[i](e.Ctx(), outerJoinKeys[i], innerJoinKeys[i], outerRow, innerRow) if err != nil { return 0, err } diff --git a/executor/mpp_gather.go b/executor/mpp_gather.go index 89bfcba3e9955..84d5690ef6b9c 100644 --- a/executor/mpp_gather.go +++ b/executor/mpp_gather.go @@ -20,6 +20,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/distsql" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/executor/internal/mpp" "github.com/pingcap/tidb/executor/mppcoordmanager" "github.com/pingcap/tidb/infoschema" @@ -56,7 +57,7 @@ func getMPPQueryTS(ctx sessionctx.Context) uint64 { // MPPGather dispatch MPP tasks and read data from root tasks. type MPPGather struct { // following fields are construct needed - baseExecutor + exec.BaseExecutor is infoschema.InfoSchema originalPlan plannercore.PhysicalPlan startTS uint64 @@ -100,11 +101,11 @@ func (e *MPPGather) Open(ctx context.Context) (err error) { if !ok { return errors.Errorf("unexpected plan type, expect: PhysicalExchangeSender, got: %s", e.originalPlan.TP()) } - _, e.kvRanges, err = plannercore.GenerateRootMPPTasks(e.ctx, e.startTS, e.mppQueryID, sender, e.is) + _, e.kvRanges, err = plannercore.GenerateRootMPPTasks(e.Ctx(), e.startTS, e.mppQueryID, sender, e.is) return err } planIDs := collectPlanIDS(e.originalPlan, nil) - e.gatherID = allocMPPGatherID(e.ctx) + e.gatherID = allocMPPGatherID(e.Ctx()) coord := e.buildCoordinator(planIDs) err = mppcoordmanager.InstanceMPPCoordinatorManager.Register(mppcoordmanager.CoordinatorUniqueID{MPPQueryID: e.mppQueryID, GatherID: e.gatherID}, coord) if err != nil { @@ -115,13 +116,13 @@ func (e *MPPGather) Open(ctx context.Context) (err error) { if err != nil { return errors.Trace(err) } - e.respIter = distsql.GenSelectResultFromResponse(e.ctx, e.retFieldTypes, planIDs, e.id, resp) + e.respIter = distsql.GenSelectResultFromResponse(e.Ctx(), e.RetFieldTypes(), planIDs, e.ID(), resp) return nil } func (e *MPPGather) buildCoordinator(planIDs []int) kv.MppCoordinator { _, serverAddr := mppcoordmanager.InstanceMPPCoordinatorManager.GetServerAddr() - coord := mpp.NewLocalMPPCoordinator(e.ctx, e.is, e.originalPlan, planIDs, e.startTS, e.mppQueryID, e.gatherID, serverAddr, e.memTracker) + coord := mpp.NewLocalMPPCoordinator(e.Ctx(), e.is, e.originalPlan, planIDs, e.startTS, e.mppQueryID, e.gatherID, serverAddr, e.memTracker) return coord } @@ -135,7 +136,7 @@ func (e *MPPGather) Next(ctx context.Context, chk *chunk.Chunk) error { if err != nil { return err } - err = table.FillVirtualColumnValue(e.virtualColumnRetFieldTypes, e.virtualColumnIndex, e.schema.Columns, e.columns, e.ctx, chk) + err = table.FillVirtualColumnValue(e.virtualColumnRetFieldTypes, e.virtualColumnIndex, e.Schema().Columns, e.columns, e.Ctx(), chk) if err != nil { return err } diff --git a/executor/opt_rule_blacklist.go b/executor/opt_rule_blacklist.go index f711d2feacb03..1525c79594141 100644 --- a/executor/opt_rule_blacklist.go +++ b/executor/opt_rule_blacklist.go @@ -17,6 +17,7 @@ package executor import ( "context" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/kv" plannercore "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/sessionctx" @@ -27,13 +28,13 @@ import ( // ReloadOptRuleBlacklistExec indicates ReloadOptRuleBlacklist executor. type ReloadOptRuleBlacklistExec struct { - baseExecutor + exec.BaseExecutor } // Next implements the Executor Next interface. func (e *ReloadOptRuleBlacklistExec) Next(ctx context.Context, _ *chunk.Chunk) error { internalCtx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) - return LoadOptRuleBlacklist(internalCtx, e.ctx) + return LoadOptRuleBlacklist(internalCtx, e.Ctx()) } // LoadOptRuleBlacklist loads the latest data from table mysql.opt_rule_blacklist. diff --git a/executor/parallel_apply.go b/executor/parallel_apply.go index a0d418cc0e441..c2c7500aa989b 100644 --- a/executor/parallel_apply.go +++ b/executor/parallel_apply.go @@ -22,6 +22,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/parser/terror" "github.com/pingcap/tidb/util/chunk" @@ -44,10 +45,10 @@ type outerRow struct { // ParallelNestedLoopApplyExec is the executor for apply. type ParallelNestedLoopApplyExec struct { - baseExecutor + exec.BaseExecutor // outer-side fields - outerExec Executor + outerExec exec.Executor outerFilter expression.CNFExprs outerList *chunk.List outer bool @@ -56,7 +57,7 @@ type ParallelNestedLoopApplyExec struct { // use slices since the inner side is paralleled corCols [][]*expression.CorrelatedColumn innerFilter []expression.CNFExprs - innerExecs []Executor + innerExecs []exec.Executor innerList []*chunk.List innerChunk []*chunk.Chunk innerSelected [][]bool @@ -92,10 +93,10 @@ func (e *ParallelNestedLoopApplyExec) Open(ctx context.Context) error { if err != nil { return err } - e.memTracker = memory.NewTracker(e.id, -1) - e.memTracker.AttachTo(e.ctx.GetSessionVars().StmtCtx.MemTracker) + e.memTracker = memory.NewTracker(e.ID(), -1) + e.memTracker.AttachTo(e.Ctx().GetSessionVars().StmtCtx.MemTracker) - e.outerList = chunk.NewList(retTypes(e.outerExec), e.initCap, e.maxChunkSize) + e.outerList = chunk.NewList(retTypes(e.outerExec), e.InitCap(), e.MaxChunkSize()) e.outerList.GetMemTracker().SetLabel(memory.LabelForOuterList) e.outerList.GetMemTracker().AttachTo(e.memTracker) @@ -108,7 +109,7 @@ func (e *ParallelNestedLoopApplyExec) Open(ctx context.Context) error { e.hasNull = make([]bool, e.concurrency) for i := 0; i < e.concurrency; i++ { e.innerChunk[i] = tryNewCacheChunk(e.innerExecs[i]) - e.innerList[i] = chunk.NewList(retTypes(e.innerExecs[i]), e.initCap, e.maxChunkSize) + e.innerList[i] = chunk.NewList(retTypes(e.innerExecs[i]), e.InitCap(), e.MaxChunkSize()) e.innerList[i].GetMemTracker().SetLabel(memory.LabelForInnerList) e.innerList[i].GetMemTracker().AttachTo(e.memTracker) } @@ -122,7 +123,7 @@ func (e *ParallelNestedLoopApplyExec) Open(ctx context.Context) error { } if e.useCache { - if e.cache, err = newApplyCache(e.ctx); err != nil { + if e.cache, err = newApplyCache(e.Ctx()); err != nil { return err } e.cache.GetMemTracker().AttachTo(e.memTracker) @@ -174,7 +175,7 @@ func (e *ParallelNestedLoopApplyExec) Close() error { // Otherwise we may got data race. err := e.outerExec.Close() - if e.runtimeStats != nil { + if e.RuntimeStats() != nil { runtimeStats := newJoinRuntimeStats() if e.useCache { var hitRatio float64 @@ -186,7 +187,7 @@ func (e *ParallelNestedLoopApplyExec) Close() error { runtimeStats.setCacheInfo(false, 0) } runtimeStats.SetConcurrencyInfo(execdetails.NewConcurrencyInfo("Concurrency", e.concurrency)) - defer e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, runtimeStats) + defer e.Ctx().GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.ID(), runtimeStats) } return err } @@ -217,7 +218,7 @@ func (e *ParallelNestedLoopApplyExec) outerWorker(ctx context.Context) { } e.outerList.Add(chk) outerIter := chunk.NewIterator4Chunk(chk) - selected, err = expression.VectorizedFilter(e.ctx, e.outerFilter, outerIter, selected) + selected, err = expression.VectorizedFilter(e.Ctx(), e.outerFilter, outerIter, selected) if err != nil { e.putResult(nil, err) return @@ -280,7 +281,7 @@ func (e *ParallelNestedLoopApplyExec) fetchAllInners(ctx context.Context, id int for _, col := range e.corCols[id] { *col.Data = e.outerRow[id].GetDatum(col.Index, col.RetType) if e.useCache { - if key, err = codec.EncodeKey(e.ctx.GetSessionVars().StmtCtx, key, *col.Data); err != nil { + if key, err = codec.EncodeKey(e.Ctx().GetSessionVars().StmtCtx, key, *col.Data); err != nil { return err } } @@ -307,7 +308,7 @@ func (e *ParallelNestedLoopApplyExec) fetchAllInners(ctx context.Context, id int if e.useCache { // create a new one in this case since it may be in the cache - e.innerList[id] = chunk.NewList(retTypes(e.innerExecs[id]), e.initCap, e.maxChunkSize) + e.innerList[id] = chunk.NewList(retTypes(e.innerExecs[id]), e.InitCap(), e.MaxChunkSize()) } else { e.innerList[id].Reset() } @@ -322,7 +323,7 @@ func (e *ParallelNestedLoopApplyExec) fetchAllInners(ctx context.Context, id int break } - e.innerSelected[id], err = expression.VectorizedFilter(e.ctx, e.innerFilter[id], innerIter, e.innerSelected[id]) + e.innerSelected[id], err = expression.VectorizedFilter(e.Ctx(), e.innerFilter[id], innerIter, e.innerSelected[id]) if err != nil { return err } diff --git a/executor/pipelined_window.go b/executor/pipelined_window.go index cda1d9c389fd0..01164c878b7ee 100644 --- a/executor/pipelined_window.go +++ b/executor/pipelined_window.go @@ -19,6 +19,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/executor/aggfuncs" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/planner/core" @@ -35,7 +36,7 @@ type dataInfo struct { // PipelinedWindowExec is the executor for window functions. type PipelinedWindowExec struct { - baseExecutor + exec.BaseExecutor numWindowFuncs int windowFuncs []aggfuncs.AggFunc slidingWindowFuncs []aggfuncs.SlidingWindowAggFunc @@ -78,7 +79,7 @@ type PipelinedWindowExec struct { // Close implements the Executor Close interface. func (e *PipelinedWindowExec) Close() error { - return errors.Trace(e.baseExecutor.Close()) + return errors.Trace(e.BaseExecutor.Close()) } // Open implements the Executor Open interface @@ -96,7 +97,7 @@ func (e *PipelinedWindowExec) Open(ctx context.Context) (err error) { } } e.rows = make([]chunk.Row, 0) - return e.baseExecutor.Open(ctx) + return e.BaseExecutor.Open(ctx) } func (e *PipelinedWindowExec) firstResultChunkNotReady() bool { @@ -116,7 +117,7 @@ func (e *PipelinedWindowExec) Next(ctx context.Context, chk *chunk.Chunk) (err e // for unbounded frame, it needs consume the whole partition before being able to produce, in this case // e.p.enoughToProduce will be false until so. var enough bool - enough, err = e.enoughToProduce(e.ctx) + enough, err = e.enoughToProduce(e.Ctx()) if err != nil { return } @@ -130,7 +131,7 @@ func (e *PipelinedWindowExec) Next(ctx context.Context, chk *chunk.Chunk) (err e if e.done || e.newPartition { e.finish() // if we continued, the rows will not be consumed, so next time we should consume it instead of calling e.getRowsInPartition - enough, err = e.enoughToProduce(e.ctx) + enough, err = e.enoughToProduce(e.Ctx()) if err != nil { return } @@ -150,7 +151,7 @@ func (e *PipelinedWindowExec) Next(ctx context.Context, chk *chunk.Chunk) (err e // e.p is ready to produce data if len(e.data) > e.dataIdx && e.data[e.dataIdx].remaining != 0 { - produced, err := e.produce(e.ctx, e.data[e.dataIdx].chk, e.data[e.dataIdx].remaining) + produced, err := e.produce(e.Ctx(), e.data[e.dataIdx].chk, e.data[e.dataIdx].remaining) if err != nil { return err } @@ -205,8 +206,8 @@ func (e *PipelinedWindowExec) getRowsInPartition(ctx context.Context) (err error func (e *PipelinedWindowExec) fetchChild(ctx context.Context) (EOF bool, err error) { // TODO: reuse chunks - childResult := tryNewCacheChunk(e.children[0]) - err = Next(ctx, e.children[0], childResult) + childResult := tryNewCacheChunk(e.Children(0)) + err = Next(ctx, e.Children(0), childResult) if err != nil { return false, errors.Trace(err) } @@ -217,7 +218,7 @@ func (e *PipelinedWindowExec) fetchChild(ctx context.Context) (EOF bool, err err } // TODO: reuse chunks - resultChk := e.ctx.GetSessionVars().GetNewChunkWithCapacity(e.retFieldTypes, 0, numRows, e.AllocPool) + resultChk := e.Ctx().GetSessionVars().GetNewChunkWithCapacity(e.RetFieldTypes(), 0, numRows, e.AllocPool) err = e.copyChk(childResult, resultChk) if err != nil { return false, err diff --git a/executor/pkg_test.go b/executor/pkg_test.go index f91197250be7b..2ef7ebe085961 100644 --- a/executor/pkg_test.go +++ b/executor/pkg_test.go @@ -19,6 +19,7 @@ import ( "fmt" "testing" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/mysql" @@ -65,7 +66,7 @@ func TestNestedLoopApply(t *testing.T) { retTypes(outerExec), retTypes(innerExec), nil, false) joinSchema := expression.NewSchema(col0, col1) join := &NestedLoopApplyExec{ - baseExecutor: newBaseExecutor(sctx, joinSchema, 0), + BaseExecutor: exec.NewBaseExecutor(sctx, joinSchema, 0), outerExec: outerExec, innerExec: innerExec, outerFilter: []expression.Expression{outerFilter}, @@ -73,7 +74,7 @@ func TestNestedLoopApply(t *testing.T) { joiner: joiner, ctx: sctx, } - join.innerList = chunk.NewList(retTypes(innerExec), innerExec.initCap, innerExec.maxChunkSize) + join.innerList = chunk.NewList(retTypes(innerExec), innerExec.InitCap(), innerExec.MaxChunkSize()) join.innerChunk = newFirstChunk(innerExec) join.outerChunk = newFirstChunk(outerExec) joinChk := newFirstChunk(join) diff --git a/executor/plan_replayer.go b/executor/plan_replayer.go index f8a7ffe90fcca..c732ec1e6691e 100644 --- a/executor/plan_replayer.go +++ b/executor/plan_replayer.go @@ -26,6 +26,7 @@ import ( "github.com/BurntSushi/toml" "github.com/pingcap/errors" "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/ast" @@ -40,12 +41,12 @@ import ( "go.uber.org/zap" ) -var _ Executor = &PlanReplayerExec{} -var _ Executor = &PlanReplayerLoadExec{} +var _ exec.Executor = &PlanReplayerExec{} +var _ exec.Executor = &PlanReplayerLoadExec{} // PlanReplayerExec represents a plan replayer executor. type PlanReplayerExec struct { - baseExecutor + exec.BaseExecutor CaptureInfo *PlanReplayerCaptureInfo DumpInfo *PlanReplayerDumpInfo endFlag bool @@ -70,7 +71,7 @@ type PlanReplayerDumpInfo struct { // Next implements the Executor Next interface. func (e *PlanReplayerExec) Next(ctx context.Context, req *chunk.Chunk) error { - req.GrowAndReset(e.maxChunkSize) + req.GrowAndReset(e.MaxChunkSize()) if e.endFlag { return nil } @@ -108,7 +109,7 @@ func (e *PlanReplayerExec) Next(ctx context.Context, req *chunk.Chunk) error { func (e *PlanReplayerExec) removeCaptureTask(ctx context.Context) error { ctx1 := kv.WithInternalSourceType(ctx, kv.InternalTxnStats) - exec := e.ctx.(sqlexec.RestrictedSQLExecutor) + exec := e.Ctx().(sqlexec.RestrictedSQLExecutor) _, _, err := exec.ExecRestrictedSQL(ctx1, nil, fmt.Sprintf("delete from mysql.plan_replayer_task where sql_digest = '%s' and plan_digest = '%s'", e.CaptureInfo.SQLDigest, e.CaptureInfo.PlanDigest)) if err != nil { @@ -116,7 +117,7 @@ func (e *PlanReplayerExec) removeCaptureTask(ctx context.Context) error { zap.Error(err)) return err } - err = domain.GetDomain(e.ctx).GetPlanReplayerHandle().CollectPlanReplayerTask() + err = domain.GetDomain(e.Ctx()).GetPlanReplayerHandle().CollectPlanReplayerTask() if err != nil { logutil.BgLogger().Warn("collect task failed", zap.Error(err)) } @@ -127,14 +128,14 @@ func (e *PlanReplayerExec) removeCaptureTask(ctx context.Context) error { func (e *PlanReplayerExec) registerCaptureTask(ctx context.Context) error { ctx1 := kv.WithInternalSourceType(ctx, kv.InternalTxnStats) - exists, err := domain.CheckPlanReplayerTaskExists(ctx1, e.ctx, e.CaptureInfo.SQLDigest, e.CaptureInfo.PlanDigest) + exists, err := domain.CheckPlanReplayerTaskExists(ctx1, e.Ctx(), e.CaptureInfo.SQLDigest, e.CaptureInfo.PlanDigest) if err != nil { return err } if exists { return errors.New("plan replayer capture task already exists") } - exec := e.ctx.(sqlexec.RestrictedSQLExecutor) + exec := e.Ctx().(sqlexec.RestrictedSQLExecutor) _, _, err = exec.ExecRestrictedSQL(ctx1, nil, fmt.Sprintf("insert into mysql.plan_replayer_task (sql_digest, plan_digest) values ('%s','%s')", e.CaptureInfo.SQLDigest, e.CaptureInfo.PlanDigest)) if err != nil { @@ -142,7 +143,7 @@ func (e *PlanReplayerExec) registerCaptureTask(ctx context.Context) error { zap.Error(err)) return err } - err = domain.GetDomain(e.ctx).GetPlanReplayerHandle().CollectPlanReplayerTask() + err = domain.GetDomain(e.Ctx()).GetPlanReplayerHandle().CollectPlanReplayerTask() if err != nil { logutil.BgLogger().Warn("collect task failed", zap.Error(err)) } @@ -185,12 +186,12 @@ func (e *PlanReplayerDumpInfo) dump(ctx context.Context) (err error) { } func (e *PlanReplayerExec) prepare() error { - val := e.ctx.Value(PlanReplayerDumpVarKey) + val := e.Ctx().Value(PlanReplayerDumpVarKey) if val != nil { - e.ctx.SetValue(PlanReplayerDumpVarKey, nil) + e.Ctx().SetValue(PlanReplayerDumpVarKey, nil) return errors.New("plan replayer: previous plan replayer dump option isn't closed normally, please try again") } - e.ctx.SetValue(PlanReplayerDumpVarKey, e.DumpInfo) + e.Ctx().SetValue(PlanReplayerDumpVarKey, e.DumpInfo) return nil } @@ -214,7 +215,7 @@ func (e *PlanReplayerDumpInfo) DumpSQLsFromFile(ctx context.Context, b []byte) e // PlanReplayerLoadExec represents a plan replayer load executor. type PlanReplayerLoadExec struct { - baseExecutor + exec.BaseExecutor info *PlanReplayerLoadInfo } @@ -244,16 +245,16 @@ const PlanReplayerDumpVarKey planReplayerDumpKeyType = 1 // Next implements the Executor Next interface. func (e *PlanReplayerLoadExec) Next(ctx context.Context, req *chunk.Chunk) error { - req.GrowAndReset(e.maxChunkSize) + req.GrowAndReset(e.MaxChunkSize()) if len(e.info.Path) == 0 { return errors.New("plan replayer: file path is empty") } - val := e.ctx.Value(PlanReplayerLoadVarKey) + val := e.Ctx().Value(PlanReplayerLoadVarKey) if val != nil { - e.ctx.SetValue(PlanReplayerLoadVarKey, nil) + e.Ctx().SetValue(PlanReplayerLoadVarKey, nil) return errors.New("plan replayer: previous plan replayer load option isn't closed normally, please try again") } - e.ctx.SetValue(PlanReplayerLoadVarKey, e.info) + e.Ctx().SetValue(PlanReplayerLoadVarKey, e.info) return nil } diff --git a/executor/point_get.go b/executor/point_get.go index 55062cda824e8..81e1278b4848a 100644 --- a/executor/point_get.go +++ b/executor/point_get.go @@ -21,6 +21,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/tidb/distsql" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" @@ -41,7 +42,7 @@ import ( "github.com/tikv/client-go/v2/txnkv/txnsnapshot" ) -func (b *executorBuilder) buildPointGet(p *plannercore.PointGetPlan) Executor { +func (b *executorBuilder) buildPointGet(p *plannercore.PointGetPlan) exec.Executor { var err error if err = b.validCanReadTemporaryOrCacheTable(p.TblInfo); err != nil { b.err = err @@ -56,14 +57,14 @@ func (b *executorBuilder) buildPointGet(p *plannercore.PointGetPlan) Executor { } e := &PointGetExecutor{ - baseExecutor: newBaseExecutor(b.ctx, p.Schema(), p.ID()), + BaseExecutor: exec.NewBaseExecutor(b.ctx, p.Schema(), p.ID()), txnScope: b.txnScope, readReplicaScope: b.readReplicaScope, isStaleness: b.isStaleness, } - e.base().initCap = 1 - e.base().maxChunkSize = 1 + e.Base().SetInitCap(1) + e.Base().SetMaxChunkSize(1) e.Init(p) e.snapshot, err = b.getSnapshot() @@ -72,10 +73,10 @@ func (b *executorBuilder) buildPointGet(p *plannercore.PointGetPlan) Executor { return nil } if b.ctx.GetSessionVars().IsReplicaReadClosestAdaptive() { - e.snapshot.SetOption(kv.ReplicaReadAdjuster, newReplicaReadAdjuster(e.ctx, p.GetAvgRowSize())) + e.snapshot.SetOption(kv.ReplicaReadAdjuster, newReplicaReadAdjuster(e.Ctx(), p.GetAvgRowSize())) } e.snapshot.SetOption(kv.ResourceGroupName, b.ctx.GetSessionVars().ResourceGroupName) - if e.runtimeStats != nil { + if e.RuntimeStats() != nil { snapshotStats := &txnsnapshot.SnapshotRuntimeStats{} e.stats = &runtimeStatsWithSnapshot{ SnapshotRuntimeStats: snapshotStats, @@ -90,7 +91,7 @@ func (b *executorBuilder) buildPointGet(p *plannercore.PointGetPlan) Executor { failpoint.Inject("assertPointReplicaOption", func(val failpoint.Value) { assertScope := val.(string) - if e.ctx.GetSessionVars().GetReplicaRead().IsClosestRead() && assertScope != e.readReplicaScope { + if e.Ctx().GetSessionVars().GetReplicaRead().IsClosestRead() && assertScope != e.readReplicaScope { panic("point get replica option fail") } }) @@ -115,7 +116,7 @@ func (b *executorBuilder) buildPointGet(p *plannercore.PointGetPlan) Executor { // PointGetExecutor executes point select query. type PointGetExecutor struct { - baseExecutor + exec.BaseExecutor tblInfo *model.TableInfo handle kv.Handle @@ -147,7 +148,7 @@ type PointGetExecutor struct { // Init set fields needed for PointGetExecutor reuse, this does NOT change baseExecutor field func (e *PointGetExecutor) Init(p *plannercore.PointGetPlan) { - decoder := NewRowDecoder(e.ctx, p.Schema(), p.TblInfo) + decoder := NewRowDecoder(e.Ctx(), p.Schema(), p.TblInfo) e.tblInfo = p.TblInfo e.handle = p.Handle e.idxInfo = p.IndexInfo @@ -173,7 +174,7 @@ func (e *PointGetExecutor) buildVirtualColumnInfo() { if len(e.virtualColumnIndex) > 0 { e.virtualColumnRetFieldTypes = make([]*types.FieldType, len(e.virtualColumnIndex)) for i, idx := range e.virtualColumnIndex { - e.virtualColumnRetFieldTypes[i] = e.schema.Columns[idx].RetType + e.virtualColumnRetFieldTypes[i] = e.Schema().Columns[idx].RetType } } } @@ -181,31 +182,31 @@ func (e *PointGetExecutor) buildVirtualColumnInfo() { // Open implements the Executor interface. func (e *PointGetExecutor) Open(context.Context) error { var err error - e.txn, err = e.ctx.Txn(false) + e.txn, err = e.Ctx().Txn(false) if err != nil { return err } if err := e.verifyTxnScope(); err != nil { return err } - setOptionForTopSQL(e.ctx.GetSessionVars().StmtCtx, e.snapshot) + setOptionForTopSQL(e.Ctx().GetSessionVars().StmtCtx, e.snapshot) return nil } // Close implements the Executor interface. func (e *PointGetExecutor) Close() error { if e.stats != nil { - defer e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, e.stats) + defer e.Ctx().GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.ID(), e.stats) } - if e.runtimeStats != nil && e.snapshot != nil { + if e.RuntimeStats() != nil && e.snapshot != nil { e.snapshot.SetOption(kv.CollectRuntimeStats, nil) } if e.idxInfo != nil && e.tblInfo != nil { actRows := int64(0) - if e.runtimeStats != nil { - actRows = e.runtimeStats.GetActRows() + if e.RuntimeStats() != nil { + actRows = e.RuntimeStats().GetActRows() } - e.ctx.StoreIndexUsage(e.tblInfo.ID, e.idxInfo.ID, actRows) + e.Ctx().StoreIndexUsage(e.tblInfo.ID, e.idxInfo.ID, actRows) } e.done = false return nil @@ -227,11 +228,11 @@ func (e *PointGetExecutor) Next(ctx context.Context, req *chunk.Chunk) error { tblID = e.tblInfo.ID } if e.lock { - e.updateDeltaForTableID(tblID) + e.UpdateDeltaForTableID(tblID) } if e.idxInfo != nil { if isCommonHandleRead(e.tblInfo, e.idxInfo) { - handleBytes, err := EncodeUniqueIndexValuesForKey(e.ctx, e.tblInfo, e.idxInfo, e.idxVals) + handleBytes, err := EncodeUniqueIndexValuesForKey(e.Ctx(), e.tblInfo, e.idxInfo, e.idxVals) if err != nil { if kv.ErrNotExist.Equal(err) { return nil @@ -243,13 +244,13 @@ func (e *PointGetExecutor) Next(ctx context.Context, req *chunk.Chunk) error { return err } } else { - e.idxKey, err = EncodeUniqueIndexKey(e.ctx, e.tblInfo, e.idxInfo, e.idxVals, tblID) + e.idxKey, err = EncodeUniqueIndexKey(e.Ctx(), e.tblInfo, e.idxInfo, e.idxVals, tblID) if err != nil && !kv.ErrNotExist.Equal(err) { return err } // lockNonExistIdxKey indicates the key will be locked regardless of its existence. - lockNonExistIdxKey := !e.ctx.GetSessionVars().IsPessimisticReadConsistency() + lockNonExistIdxKey := !e.Ctx().GetSessionVars().IsPessimisticReadConsistency() // Non-exist keys are also locked if the isolation level is not read consistency, // lock it before read here, then it's able to read from pessimistic lock cache. if lockNonExistIdxKey { @@ -313,7 +314,7 @@ func (e *PointGetExecutor) Next(ctx context.Context, req *chunk.Chunk) error { } if len(val) == 0 { if e.idxInfo != nil && !isCommonHandleRead(e.tblInfo, e.idxInfo) && - !e.ctx.GetSessionVars().StmtCtx.WeakConsistency { + !e.Ctx().GetSessionVars().StmtCtx.WeakConsistency { return (&consistency.Reporter{ HandleEncode: func(handle kv.Handle) kv.Key { return key @@ -323,7 +324,7 @@ func (e *PointGetExecutor) Next(ctx context.Context, req *chunk.Chunk) error { }, Tbl: e.tblInfo, Idx: e.idxInfo, - Sctx: e.ctx, + Sctx: e.Ctx(), }).ReportLookupInconsistent(ctx, 1, 0, []kv.Handle{e.handle}, @@ -333,13 +334,13 @@ func (e *PointGetExecutor) Next(ctx context.Context, req *chunk.Chunk) error { } return nil } - err = DecodeRowValToChunk(e.base().ctx, e.schema, e.tblInfo, e.handle, val, req, e.rowDecoder) + err = DecodeRowValToChunk(e.Base().Ctx(), e.Schema(), e.tblInfo, e.handle, val, req, e.rowDecoder) if err != nil { return err } err = table.FillVirtualColumnValue(e.virtualColumnRetFieldTypes, e.virtualColumnIndex, - e.schema.Columns, e.columns, e.ctx, req) + e.Schema().Columns, e.columns, e.Ctx(), req) if err != nil { return err } @@ -347,7 +348,7 @@ func (e *PointGetExecutor) Next(ctx context.Context, req *chunk.Chunk) error { } func (e *PointGetExecutor) getAndLock(ctx context.Context, key kv.Key) (val []byte, err error) { - if e.ctx.GetSessionVars().IsPessimisticReadConsistency() { + if e.Ctx().GetSessionVars().IsPessimisticReadConsistency() { // Only Lock the existing keys in RC isolation. if e.lock { val, err = e.lockKeyIfExists(ctx, key) @@ -399,14 +400,14 @@ func (e *PointGetExecutor) lockKeyBase(ctx context.Context, } if e.lock { - seVars := e.ctx.GetSessionVars() - lockCtx, err := newLockCtx(e.ctx, e.lockWaitTime, 1) + seVars := e.Ctx().GetSessionVars() + lockCtx, err := newLockCtx(e.Ctx(), e.lockWaitTime, 1) if err != nil { return nil, err } lockCtx.LockOnlyIfExists = LockOnlyIfExists lockCtx.InitReturnValues(1) - err = doLockKeys(ctx, e.ctx, lockCtx, key) + err = doLockKeys(ctx, e.Ctx(), lockCtx, key) if err != nil { return nil, err } @@ -470,7 +471,7 @@ func (e *PointGetExecutor) get(ctx context.Context, key kv.Key) ([]byte, error) // key does not exist in mem buffer, check the lock cache if e.lock { var ok bool - val, ok = e.ctx.GetSessionVars().TxnCtx.GetKeyInPessimisticLockCache(key) + val, ok = e.Ctx().GetSessionVars().TxnCtx.GetKeyInPessimisticLockCache(key) if ok { return val, nil } @@ -480,8 +481,8 @@ func (e *PointGetExecutor) get(ctx context.Context, key kv.Key) ([]byte, error) lock := e.tblInfo.Lock if lock != nil && (lock.Tp == model.TableLockRead || lock.Tp == model.TableLockReadOnly) { - if e.ctx.GetSessionVars().EnablePointGetCache { - cacheDB := e.ctx.GetStore().GetMemCache() + if e.Ctx().GetSessionVars().EnablePointGetCache { + cacheDB := e.Ctx().GetStore().GetMemCache() val, err = cacheDB.UnionGet(ctx, e.tblInfo.ID, e.snapshot, key) if err != nil { return nil, err @@ -501,7 +502,7 @@ func (e *PointGetExecutor) verifyTxnScope() error { var tblID int64 var tblName string var partName string - is := e.ctx.GetInfoSchema().(infoschema.InfoSchema) + is := e.Ctx().GetInfoSchema().(infoschema.InfoSchema) if e.partInfo != nil { tblID = e.partInfo.ID tblInfo, _, partInfo := is.FindTableByPartitionID(tblID) diff --git a/executor/prepared.go b/executor/prepared.go index face131da31ac..b5fd7ebed1fe6 100644 --- a/executor/prepared.go +++ b/executor/prepared.go @@ -19,6 +19,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/log" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/parser" @@ -37,14 +38,14 @@ import ( ) var ( - _ Executor = &DeallocateExec{} - _ Executor = &ExecuteExec{} - _ Executor = &PrepareExec{} + _ exec.Executor = &DeallocateExec{} + _ exec.Executor = &ExecuteExec{} + _ exec.Executor = &PrepareExec{} ) // PrepareExec represents a PREPARE executor. type PrepareExec struct { - baseExecutor + exec.BaseExecutor name string sqlText string @@ -62,10 +63,10 @@ type PrepareExec struct { // NewPrepareExec creates a new PrepareExec. func NewPrepareExec(ctx sessionctx.Context, sqlTxt string) *PrepareExec { - base := newBaseExecutor(ctx, nil, 0) - base.initCap = chunk.ZeroCapacity + base := exec.NewBaseExecutor(ctx, nil, 0) + base.SetInitCap(chunk.ZeroCapacity) return &PrepareExec{ - baseExecutor: base, + BaseExecutor: base, sqlText: sqlTxt, needReset: true, } @@ -73,7 +74,7 @@ func NewPrepareExec(ctx sessionctx.Context, sqlTxt string) *PrepareExec { // Next implements the Executor Next interface. func (e *PrepareExec) Next(ctx context.Context, req *chunk.Chunk) error { - vars := e.ctx.GetSessionVars() + vars := e.Ctx().GetSessionVars() if e.ID != 0 { // Must be the case when we retry a prepare. // Make sure it is idempotent. @@ -87,7 +88,7 @@ func (e *PrepareExec) Next(ctx context.Context, req *chunk.Chunk) error { stmts []ast.StmtNode err error ) - if sqlParser, ok := e.ctx.(sqlexec.SQLParser); ok { + if sqlParser, ok := e.Ctx().(sqlexec.SQLParser); ok { // FIXME: ok... yet another parse API, may need some api interface clean. stmts, _, err = sqlParser.ParseSQL(ctx, e.sqlText, parser.CharsetConnection(charset), @@ -100,7 +101,7 @@ func (e *PrepareExec) Next(ctx context.Context, req *chunk.Chunk) error { parser.CharsetConnection(charset), parser.CollationConnection(collation)) for _, warn := range warns { - e.ctx.GetSessionVars().StmtCtx.AppendWarning(util.SyntaxWarn(warn)) + e.Ctx().GetSessionVars().StmtCtx.AppendWarning(util.SyntaxWarn(warn)) } } if err != nil { @@ -111,23 +112,23 @@ func (e *PrepareExec) Next(ctx context.Context, req *chunk.Chunk) error { } stmt0 := stmts[0] if e.needReset { - err = ResetContextOfStmt(e.ctx, stmt0) + err = ResetContextOfStmt(e.Ctx(), stmt0) if err != nil { return err } } - stmt, p, paramCnt, err := plannercore.GeneratePlanCacheStmtWithAST(ctx, e.ctx, true, stmt0.Text(), stmt0, nil) + stmt, p, paramCnt, err := plannercore.GeneratePlanCacheStmtWithAST(ctx, e.Ctx(), true, stmt0.Text(), stmt0, nil) if err != nil { return err } if topsqlstate.TopSQLEnabled() { - e.ctx.GetSessionVars().StmtCtx.IsSQLRegistered.Store(true) + e.Ctx().GetSessionVars().StmtCtx.IsSQLRegistered.Store(true) topsql.AttachAndRegisterSQLInfo(ctx, stmt.NormalizedSQL, stmt.SQLDigest, vars.InRestrictedSQL) } - e.ctx.GetSessionVars().PlanID.Store(0) - e.ctx.GetSessionVars().PlanColumnID.Store(0) - e.ctx.GetSessionVars().MapHashCode2UniqueID4ExtendedCol = nil + e.Ctx().GetSessionVars().PlanID.Store(0) + e.Ctx().GetSessionVars().PlanColumnID.Store(0) + e.Ctx().GetSessionVars().MapHashCode2UniqueID4ExtendedCol = nil // In MySQL prepare protocol, the server need to tell the client how many column the prepared statement would return when executing it. // For a query with on result, e.g. an insert statement, there will be no result, so 'e.Fields' is not set. // Usually, p.Schema().Len() == 0 means no result. A special case is the 'do' statement, it looks like 'select' but discard the result. @@ -150,12 +151,12 @@ func (e *PrepareExec) Next(ctx context.Context, req *chunk.Chunk) error { // It cannot be executed by itself, all it needs to do is to build // another Executor from a prepared statement. type ExecuteExec struct { - baseExecutor + exec.BaseExecutor is infoschema.InfoSchema name string usingVars []expression.Expression - stmtExec Executor + stmtExec exec.Executor stmt ast.StmtNode plan plannercore.Plan lowerPriority bool @@ -176,7 +177,7 @@ func (e *ExecuteExec) Build(b *executorBuilder) error { return errors.Trace(b.err) } e.stmtExec = stmtExec - if e.ctx.GetSessionVars().StmtCtx.Priority == mysql.NoPriority { + if e.Ctx().GetSessionVars().StmtCtx.Priority == mysql.NoPriority { e.lowerPriority = needLowerPriority(e.plan) } return nil @@ -184,14 +185,14 @@ func (e *ExecuteExec) Build(b *executorBuilder) error { // DeallocateExec represent a DEALLOCATE executor. type DeallocateExec struct { - baseExecutor + exec.BaseExecutor Name string } // Next implements the Executor Next interface. func (e *DeallocateExec) Next(ctx context.Context, req *chunk.Chunk) error { - vars := e.ctx.GetSessionVars() + vars := e.Ctx().GetSessionVars() id, ok := vars.PreparedStmtNameToID[e.Name] if !ok { return errors.Trace(plannercore.ErrStmtNotFound) @@ -203,15 +204,15 @@ func (e *DeallocateExec) Next(ctx context.Context, req *chunk.Chunk) error { } prepared := preparedObj.PreparedAst delete(vars.PreparedStmtNameToID, e.Name) - if e.ctx.GetSessionVars().EnablePreparedPlanCache { - bindSQL, _ := plannercore.GetBindSQL4PlanCache(e.ctx, preparedObj) + if e.Ctx().GetSessionVars().EnablePreparedPlanCache { + bindSQL, _ := plannercore.GetBindSQL4PlanCache(e.Ctx(), preparedObj) cacheKey, err := plannercore.NewPlanCacheKey(vars, preparedObj.StmtText, preparedObj.StmtDB, prepared.SchemaVersion, 0, bindSQL, expression.ExprPushDownBlackListReloadTimeStamp.Load()) if err != nil { return err } if !vars.IgnorePreparedCacheCloseStmt { // keep the plan in cache - e.ctx.GetSessionPlanCache().Delete(cacheKey) + e.Ctx().GetSessionPlanCache().Delete(cacheKey) } } vars.RemovePreparedStmt(id) diff --git a/executor/projection.go b/executor/projection.go index aecb1d44cec58..2c8723efff881 100644 --- a/executor/projection.go +++ b/executor/projection.go @@ -23,6 +23,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/util/chunk" @@ -56,7 +57,7 @@ type projectionOutput struct { // ProjectionExec implements the physical Projection Operator: // https://en.wikipedia.org/wiki/Projection_(relational_algebra) type ProjectionExec struct { - baseExecutor + exec.BaseExecutor evaluatorSuit *expression.EvaluatorSuite @@ -83,7 +84,7 @@ type ProjectionExec struct { // Open implements the Executor Open interface. func (e *ProjectionExec) Open(ctx context.Context) error { - if err := e.baseExecutor.Open(ctx); err != nil { + if err := e.BaseExecutor.Open(ctx); err != nil { return err } failpoint.Inject("mockProjectionExecBaseExecutorOpenReturnedError", func(val failpoint.Value) { @@ -96,14 +97,14 @@ func (e *ProjectionExec) Open(ctx context.Context) error { func (e *ProjectionExec) open(_ context.Context) error { e.prepared = false - e.parentReqRows = int64(e.maxChunkSize) + e.parentReqRows = int64(e.MaxChunkSize()) if e.memTracker != nil { e.memTracker.Reset() } else { - e.memTracker = memory.NewTracker(e.id, -1) + e.memTracker = memory.NewTracker(e.ID(), -1) } - e.memTracker.AttachTo(e.ctx.GetSessionVars().StmtCtx.MemTracker) + e.memTracker.AttachTo(e.Ctx().GetSessionVars().StmtCtx.MemTracker) // For now a Projection can not be executed vectorially only because it // contains "SetVar" or "GetVar" functions, in this scenario this @@ -113,7 +114,7 @@ func (e *ProjectionExec) open(_ context.Context) error { } if e.isUnparallelExec() { - e.childResult = tryNewCacheChunk(e.children[0]) + e.childResult = tryNewCacheChunk(e.Children(0)) e.memTracker.Consume(e.childResult.MemoryUsage()) } @@ -178,7 +179,7 @@ func (e *ProjectionExec) open(_ context.Context) error { +------------------------------+ +----------------------+ */ func (e *ProjectionExec) Next(ctx context.Context, req *chunk.Chunk) error { - req.GrowAndReset(e.maxChunkSize) + req.GrowAndReset(e.MaxChunkSize()) if e.isUnparallelExec() { return e.unParallelExecute(ctx, req) } @@ -191,9 +192,9 @@ func (e *ProjectionExec) isUnparallelExec() bool { func (e *ProjectionExec) unParallelExecute(ctx context.Context, chk *chunk.Chunk) error { // transmit the requiredRows - e.childResult.SetRequiredRows(chk.RequiredRows(), e.maxChunkSize) + e.childResult.SetRequiredRows(chk.RequiredRows(), e.MaxChunkSize()) mSize := e.childResult.MemoryUsage() - err := Next(ctx, e.children[0], e.childResult) + err := Next(ctx, e.Children(0), e.childResult) failpoint.Inject("ConsumeRandomPanic", nil) e.memTracker.Consume(e.childResult.MemoryUsage() - mSize) if err != nil { @@ -202,7 +203,7 @@ func (e *ProjectionExec) unParallelExecute(ctx context.Context, chk *chunk.Chunk if e.childResult.NumRows() == 0 { return nil } - err = e.evaluatorSuit.Run(e.ctx, e.childResult, chk) + err = e.evaluatorSuit.Run(e.Ctx(), e.childResult, chk) return err } @@ -237,7 +238,7 @@ func (e *ProjectionExec) prepare(ctx context.Context) { // Initialize projectionInputFetcher. e.fetcher = projectionInputFetcher{ proj: e, - child: e.children[0], + child: e.Children(0), globalFinishCh: e.finishCh, globalOutputCh: e.outputCh, inputCh: make(chan *projectionInput, e.numWorkers), @@ -249,7 +250,7 @@ func (e *ProjectionExec) prepare(ctx context.Context) { for i := int64(0); i < e.numWorkers; i++ { e.workers = append(e.workers, &projectionWorker{ proj: e, - sctx: e.ctx, + sctx: e.Ctx(), evaluatorSuit: e.evaluatorSuit, globalFinishCh: e.finishCh, inputGiveBackCh: e.fetcher.inputCh, @@ -257,7 +258,7 @@ func (e *ProjectionExec) prepare(ctx context.Context) { outputCh: make(chan *projectionOutput, 1), }) - inputChk := newFirstChunk(e.children[0]) + inputChk := newFirstChunk(e.Children(0)) failpoint.Inject("ConsumeRandomPanic", nil) e.memTracker.Consume(inputChk.MemoryUsage()) e.fetcher.inputCh <- &projectionInput{ @@ -302,7 +303,7 @@ func (e *ProjectionExec) drainOutputCh(ch chan *projectionOutput) { // Close implements the Executor Close interface. func (e *ProjectionExec) Close() error { - // if e.baseExecutor.Open returns error, e.childResult will be nil, see https://github.com/pingcap/tidb/issues/24210 + // if e.BaseExecutor.Open returns error, e.childResult will be nil, see https://github.com/pingcap/tidb/issues/24210 // for more information if e.isUnparallelExec() && e.childResult != nil { e.memTracker.Consume(-e.childResult.MemoryUsage()) @@ -322,21 +323,21 @@ func (e *ProjectionExec) Close() error { e.drainOutputCh(w.outputCh) } } - if e.baseExecutor.runtimeStats != nil { + if e.BaseExecutor.RuntimeStats() != nil { runtimeStats := &execdetails.RuntimeStatsWithConcurrencyInfo{} if e.isUnparallelExec() { runtimeStats.SetConcurrencyInfo(execdetails.NewConcurrencyInfo("Concurrency", 0)) } else { runtimeStats.SetConcurrencyInfo(execdetails.NewConcurrencyInfo("Concurrency", int(e.numWorkers))) } - e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, runtimeStats) + e.Ctx().GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.ID(), runtimeStats) } - return e.baseExecutor.Close() + return e.BaseExecutor.Close() } type projectionInputFetcher struct { proj *ProjectionExec - child Executor + child exec.Executor globalFinishCh <-chan struct{} globalOutputCh chan<- *projectionOutput @@ -383,7 +384,7 @@ func (f *projectionInputFetcher) run(ctx context.Context) { f.globalOutputCh <- output requiredRows := atomic.LoadInt64(&f.proj.parentReqRows) - input.chk.SetRequiredRows(int(requiredRows), f.proj.maxChunkSize) + input.chk.SetRequiredRows(int(requiredRows), f.proj.MaxChunkSize()) mSize := input.chk.MemoryUsage() err := Next(ctx, f.child, input.chk) failpoint.Inject("ConsumeRandomPanic", nil) diff --git a/executor/reload_expr_pushdown_blacklist.go b/executor/reload_expr_pushdown_blacklist.go index 74cdf88bbfc6d..b6f1ef810756a 100644 --- a/executor/reload_expr_pushdown_blacklist.go +++ b/executor/reload_expr_pushdown_blacklist.go @@ -19,6 +19,7 @@ import ( "strings" "time" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/ast" @@ -29,12 +30,12 @@ import ( // ReloadExprPushdownBlacklistExec indicates ReloadExprPushdownBlacklist executor. type ReloadExprPushdownBlacklistExec struct { - baseExecutor + exec.BaseExecutor } // Next implements the Executor Next interface. func (e *ReloadExprPushdownBlacklistExec) Next(ctx context.Context, _ *chunk.Chunk) error { - return LoadExprPushdownBlacklist(e.ctx) + return LoadExprPushdownBlacklist(e.Ctx()) } // LoadExprPushdownBlacklist loads the latest data from table mysql.expr_pushdown_blacklist. diff --git a/executor/replace.go b/executor/replace.go index 4f85cc7b1543e..e62ae05b018c0 100644 --- a/executor/replace.go +++ b/executor/replace.go @@ -39,8 +39,8 @@ type ReplaceExec struct { // Close implements the Executor Close interface. func (e *ReplaceExec) Close() error { e.setMessage() - if e.runtimeStats != nil && e.stats != nil { - defer e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, e.stats) + if e.RuntimeStats() != nil && e.stats != nil { + defer e.Ctx().GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.ID(), e.stats) } if e.SelectExec != nil { return e.SelectExec.Close() @@ -50,8 +50,8 @@ func (e *ReplaceExec) Close() error { // Open implements the Executor Open interface. func (e *ReplaceExec) Open(ctx context.Context) error { - e.memTracker = memory.NewTracker(e.id, -1) - e.memTracker.AttachTo(e.ctx.GetSessionVars().StmtCtx.MemTracker) + e.memTracker = memory.NewTracker(e.ID(), -1) + e.memTracker.AttachTo(e.Ctx().GetSessionVars().StmtCtx.MemTracker) if e.SelectExec != nil { return e.SelectExec.Open(ctx) @@ -62,7 +62,7 @@ func (e *ReplaceExec) Open(ctx context.Context) error { // replaceRow removes all duplicate rows for one row, then inserts it. func (e *ReplaceExec) replaceRow(ctx context.Context, r toBeCheckedRow) error { - txn, err := e.ctx.Txn(true) + txn, err := e.Ctx().Txn(true) if err != nil { return err } @@ -151,12 +151,12 @@ func (e *ReplaceExec) exec(ctx context.Context, newRows [][]types.Datum) error { defer trace.StartRegion(ctx, "ReplaceExec").End() // Get keys need to be checked. - toBeCheckedRows, err := getKeysNeedCheck(ctx, e.ctx, e.Table, newRows) + toBeCheckedRows, err := getKeysNeedCheck(ctx, e.Ctx(), e.Table, newRows) if err != nil { return err } - txn, err := e.ctx.Txn(true) + txn, err := e.Ctx().Txn(true) if err != nil { return err } @@ -168,7 +168,7 @@ func (e *ReplaceExec) exec(ctx context.Context, newRows [][]types.Datum) error { defer snapshot.SetOption(kv.CollectRuntimeStats, nil) } } - setOptionForTopSQL(e.ctx.GetSessionVars().StmtCtx, txn) + setOptionForTopSQL(e.Ctx().GetSessionVars().StmtCtx, txn) prefetchStart := time.Now() // Use BatchGet to fill cache. // It's an optimization and could be removed without affecting correctness. @@ -179,7 +179,7 @@ func (e *ReplaceExec) exec(ctx context.Context, newRows [][]types.Datum) error { if e.stats != nil { e.stats.Prefetch = time.Since(prefetchStart) } - e.ctx.GetSessionVars().StmtCtx.AddRecordRows(uint64(len(newRows))) + e.Ctx().GetSessionVars().StmtCtx.AddRecordRows(uint64(len(newRows))) for _, r := range toBeCheckedRows { err = e.replaceRow(ctx, r) if err != nil { @@ -197,7 +197,7 @@ func (e *ReplaceExec) Next(ctx context.Context, req *chunk.Chunk) error { ctx = context.WithValue(ctx, autoid.AllocatorRuntimeStatsCtxKey, e.stats.AllocatorRuntimeStats) } - if len(e.children) > 0 && e.children[0] != nil { + if !e.EmptyChildren() && e.Children(0) != nil { return insertRowsFromSelect(ctx, e) } return insertRows(ctx, e) @@ -205,7 +205,7 @@ func (e *ReplaceExec) Next(ctx context.Context, req *chunk.Chunk) error { // setMessage sets info message(ERR_INSERT_INFO) generated by REPLACE statement func (e *ReplaceExec) setMessage() { - stmtCtx := e.ctx.GetSessionVars().StmtCtx + stmtCtx := e.Ctx().GetSessionVars().StmtCtx numRecords := stmtCtx.RecordRows() if e.SelectExec != nil || numRecords > 1 { numWarnings := stmtCtx.WarningCount() diff --git a/executor/revoke.go b/executor/revoke.go index cd8f24ce168d3..1e74cd620202d 100644 --- a/executor/revoke.go +++ b/executor/revoke.go @@ -20,6 +20,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/ast" @@ -41,12 +42,12 @@ import ( * See https://dev.mysql.com/doc/refman/5.7/en/revoke.html ************************************************************************************/ var ( - _ Executor = (*RevokeExec)(nil) + _ exec.Executor = (*RevokeExec)(nil) ) // RevokeExec executes RevokeStmt. type RevokeExec struct { - baseExecutor + exec.BaseExecutor Privs []*ast.PrivElem ObjectType ast.ObjectTypeType @@ -67,14 +68,14 @@ func (e *RevokeExec) Next(ctx context.Context, req *chunk.Chunk) error { internalCtx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) // Commit the old transaction, like DDL. - if err := sessiontxn.NewTxnInStmt(ctx, e.ctx); err != nil { + if err := sessiontxn.NewTxnInStmt(ctx, e.Ctx()); err != nil { return err } - defer func() { e.ctx.GetSessionVars().SetInTxn(false) }() + defer func() { e.Ctx().GetSessionVars().SetInTxn(false) }() // Create internal session to start internal transaction. isCommit := false - internalSession, err := e.getSysSession() + internalSession, err := e.GetSysSession() if err != nil { return err } @@ -85,7 +86,7 @@ func (e *RevokeExec) Next(ctx context.Context, req *chunk.Chunk) error { logutil.BgLogger().Error("rollback error occur at grant privilege", zap.Error(err)) } } - e.releaseSysSession(internalCtx, internalSession) + e.ReleaseSysSession(internalCtx, internalSession) }() _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(internalCtx, "begin") @@ -93,7 +94,7 @@ func (e *RevokeExec) Next(ctx context.Context, req *chunk.Chunk) error { return err } - sessVars := e.ctx.GetSessionVars() + sessVars := e.Ctx().GetSessionVars() // Revoke for each user. for _, user := range e.Users { if user.User.CurrentUser { @@ -102,7 +103,7 @@ func (e *RevokeExec) Next(ctx context.Context, req *chunk.Chunk) error { } // Check if user exists. - exists, err := userExists(ctx, e.ctx, user.User.Username, user.User.Hostname) + exists, err := userExists(ctx, e.Ctx(), user.User.Username, user.User.Hostname) if err != nil { return err } @@ -124,7 +125,7 @@ func (e *RevokeExec) Next(ctx context.Context, req *chunk.Chunk) error { return err } isCommit = true - return domain.GetDomain(e.ctx).NotifyUpdatePrivilege() + return domain.GetDomain(e.Ctx()).NotifyUpdatePrivilege() } // Checks that dynamic privileges are only of global scope. @@ -145,7 +146,7 @@ func (e *RevokeExec) checkDynamicPrivilegeUsage() error { func (e *RevokeExec) revokeOneUser(internalSession sessionctx.Context, user, host string) error { dbName := e.Level.DBName if len(dbName) == 0 { - dbName = e.ctx.GetSessionVars().CurrentDB + dbName = e.Ctx().GetSessionVars().CurrentDB } // If there is no privilege entry in corresponding table, insert a new one. @@ -200,8 +201,8 @@ func (e *RevokeExec) revokePriv(internalSession sessionctx.Context, priv *ast.Pr func (e *RevokeExec) revokeDynamicPriv(internalSession sessionctx.Context, privName string, user, host string) error { privName = strings.ToUpper(privName) - if !privilege.GetPrivilegeManager(e.ctx).IsDynamicPrivilege(privName) { // for MySQL compatibility - e.ctx.GetSessionVars().StmtCtx.AppendWarning(exeerrors.ErrDynamicPrivilegeNotRegistered.GenWithStackByArgs(privName)) + if !privilege.GetPrivilegeManager(e.Ctx()).IsDynamicPrivilege(privName) { // for MySQL compatibility + e.Ctx().GetSessionVars().StmtCtx.AppendWarning(exeerrors.ErrDynamicPrivilegeNotRegistered.GenWithStackByArgs(privName)) } ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) _, err := internalSession.(sqlexec.SQLExecutor).ExecuteInternal(ctx, "DELETE FROM mysql.global_grants WHERE user = %? AND host = %? AND priv = %?", user, host, privName) @@ -235,7 +236,7 @@ func (e *RevokeExec) revokeDBPriv(internalSession sessionctx.Context, priv *ast. ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) dbName := e.Level.DBName if len(dbName) == 0 { - dbName = e.ctx.GetSessionVars().CurrentDB + dbName = e.Ctx().GetSessionVars().CurrentDB } sql := new(strings.Builder) @@ -263,7 +264,7 @@ func (e *RevokeExec) revokeDBPriv(internalSession sessionctx.Context, priv *ast. func (e *RevokeExec) revokeTablePriv(internalSession sessionctx.Context, priv *ast.PrivElem, user, host string) error { ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) - dbName, tbl, err := getTargetSchemaAndTable(e.ctx, e.Level.DBName, e.Level.TableName, e.is) + dbName, tbl, err := getTargetSchemaAndTable(e.Ctx(), e.Level.DBName, e.Level.TableName, e.is) if err != nil && !terror.ErrorEqual(err, infoschema.ErrTableNotExists) { return err } @@ -296,7 +297,7 @@ func (e *RevokeExec) revokeTablePriv(internalSession sessionctx.Context, priv *a func (e *RevokeExec) revokeColumnPriv(internalSession sessionctx.Context, priv *ast.PrivElem, user, host string) error { ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) - dbName, tbl, err := getTargetSchemaAndTable(e.ctx, e.Level.DBName, e.Level.TableName, e.is) + dbName, tbl, err := getTargetSchemaAndTable(e.Ctx(), e.Level.DBName, e.Level.TableName, e.is) if err != nil { return err } diff --git a/executor/sample.go b/executor/sample.go index 7f64c365d599e..c6d897d49865c 100644 --- a/executor/sample.go +++ b/executor/sample.go @@ -18,6 +18,7 @@ import ( "context" "github.com/pingcap/errors" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" @@ -32,12 +33,12 @@ import ( "golang.org/x/exp/slices" ) -var _ Executor = &TableSampleExecutor{} +var _ exec.Executor = &TableSampleExecutor{} // TableSampleExecutor fetches a few rows through kv.Scan // according to the specific sample method. type TableSampleExecutor struct { - baseExecutor + exec.BaseExecutor table table.Table startTS uint64 diff --git a/executor/select_into.go b/executor/select_into.go index 65a47ce4b6f2b..89ccb33d8cf16 100644 --- a/executor/select_into.go +++ b/executor/select_into.go @@ -23,6 +23,7 @@ import ( "strconv" "github.com/pingcap/errors" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/planner/core" @@ -32,7 +33,7 @@ import ( // SelectIntoExec represents a SelectInto executor. type SelectIntoExec struct { - baseExecutor + exec.BaseExecutor intoOpt *ast.SelectIntoOption core.LineFieldsInfo @@ -62,17 +63,17 @@ func (s *SelectIntoExec) Open(ctx context.Context) error { s.started = true s.dstFile = f s.writer = bufio.NewWriter(s.dstFile) - s.chk = tryNewCacheChunk(s.children[0]) + s.chk = tryNewCacheChunk(s.Children(0)) s.lineBuf = make([]byte, 0, 1024) s.fieldBuf = make([]byte, 0, 64) s.escapeBuf = make([]byte, 0, 64) - return s.baseExecutor.Open(ctx) + return s.BaseExecutor.Open(ctx) } // Next implements the Executor Next interface. func (s *SelectIntoExec) Next(ctx context.Context, req *chunk.Chunk) error { for { - if err := Next(ctx, s.children[0], s.chk); err != nil { + if err := Next(ctx, s.Children(0), s.chk); err != nil { return err } if s.chk.NumRows() == 0 { @@ -136,7 +137,7 @@ func (s *SelectIntoExec) dumpToOutfile() error { nullTerm = []byte("NULL") } - cols := s.children[0].Schema().Columns + cols := s.Children(0).Schema().Columns for i := 0; i < s.chk.NumRows(); i++ { row := s.chk.GetRow(i) s.lineBuf = s.lineBuf[:0] @@ -206,7 +207,7 @@ func (s *SelectIntoExec) dumpToOutfile() error { return errors.Trace(err) } } - s.ctx.GetSessionVars().StmtCtx.AddAffectedRows(uint64(s.chk.NumRows())) + s.Ctx().GetSessionVars().StmtCtx.AddAffectedRows(uint64(s.chk.NumRows())) return nil } @@ -217,7 +218,7 @@ func (s *SelectIntoExec) Close() error { } err1 := s.writer.Flush() err2 := s.dstFile.Close() - err3 := s.baseExecutor.Close() + err3 := s.BaseExecutor.Close() if err1 != nil { return errors.Trace(err1) } else if err2 != nil { diff --git a/executor/set.go b/executor/set.go index f9ac8abd38a1d..f52130fc9ac7e 100644 --- a/executor/set.go +++ b/executor/set.go @@ -20,6 +20,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/charset" @@ -41,7 +42,7 @@ import ( // SetExecutor executes set statement. type SetExecutor struct { - baseExecutor + exec.BaseExecutor vars []*expression.VarAssignment done bool @@ -54,7 +55,7 @@ func (e *SetExecutor) Next(ctx context.Context, req *chunk.Chunk) error { return nil } e.done = true - sessionVars := e.ctx.GetSessionVars() + sessionVars := e.Ctx().GetSessionVars() for _, v := range e.vars { // Variable is case insensitive, we use lower case. if v.Name == ast.SetNames || v.Name == ast.SetCharset { @@ -105,7 +106,7 @@ func (e *SetExecutor) Next(ctx context.Context, req *chunk.Chunk) error { } func (e *SetExecutor) setSysVariable(ctx context.Context, name string, v *expression.VarAssignment) error { - sessionVars := e.ctx.GetSessionVars() + sessionVars := e.Ctx().GetSessionVars() sysVar := variable.GetSysVar(name) if sysVar == nil { if variable.IsRemovedSysVar(name) { @@ -116,7 +117,7 @@ func (e *SetExecutor) setSysVariable(ctx context.Context, name string, v *expres if sysVar.RequireDynamicPrivileges != nil { semEnabled := sem.IsEnabled() - pm := privilege.GetPrivilegeManager(e.ctx) + pm := privilege.GetPrivilegeManager(e.Ctx()) privs := sysVar.RequireDynamicPrivileges(v.IsGlobal, semEnabled) for _, priv := range privs { if !pm.RequestDynamicVerification(sessionVars.ActiveRoles, priv, false) { @@ -154,7 +155,7 @@ func (e *SetExecutor) setSysVariable(ctx context.Context, name string, v *expres err = plugin.ForeachPlugin(plugin.Audit, func(p *plugin.Plugin) error { auditPlugin := plugin.DeclareAuditManifest(p.Manifest) if auditPlugin.OnGlobalVariableEvent != nil { - auditPlugin.OnGlobalVariableEvent(context.Background(), e.ctx.GetSessionVars(), name, valStr) + auditPlugin.OnGlobalVariableEvent(context.Background(), e.Ctx().GetSessionVars(), name, valStr) } return nil }) @@ -199,14 +200,14 @@ func (e *SetExecutor) setSysVariable(ctx context.Context, name string, v *expres newSnapshotIsSet := newSnapshotTS > 0 && newSnapshotTS != oldSnapshotTS if newSnapshotIsSet { if name == variable.TiDBTxnReadTS { - err = sessionctx.ValidateStaleReadTS(ctx, e.ctx, newSnapshotTS) + err = sessionctx.ValidateStaleReadTS(ctx, e.Ctx(), newSnapshotTS) } else { - err = sessionctx.ValidateSnapshotReadTS(ctx, e.ctx, newSnapshotTS) + err = sessionctx.ValidateSnapshotReadTS(ctx, e.Ctx(), newSnapshotTS) // Also check gc safe point for snapshot read. // We don't check snapshot with gc safe point for read_ts // Client-go will automatically check the snapshotTS with gc safe point. It's unnecessary to check gc safe point during set executor. if err == nil { - err = gcutil.ValidateSnapshot(e.ctx, newSnapshotTS) + err = gcutil.ValidateSnapshot(e.Ctx(), newSnapshotTS) } } if err != nil { @@ -228,7 +229,7 @@ func (e *SetExecutor) setSysVariable(ctx context.Context, name string, v *expres func (e *SetExecutor) setCharset(cs, co string, isSetName bool) error { var err error - sessionVars := e.ctx.GetSessionVars() + sessionVars := e.Ctx().GetSessionVars() if co == "" { if co, err = charset.GetDefaultCollation(cs); err != nil { return err @@ -279,7 +280,7 @@ func (e *SetExecutor) getVarValue(ctx context.Context, v *expression.VarAssignme if sysVar != nil { return sysVar.Value, nil } - return e.ctx.GetSessionVars().GetGlobalSystemVar(ctx, v.Name) + return e.Ctx().GetSessionVars().GetGlobalSystemVar(ctx, v.Name) } nativeVal, err := v.Expr.Eval(chunk.Row{}) if err != nil || nativeVal.IsNull() { @@ -301,7 +302,7 @@ func (e *SetExecutor) loadSnapshotInfoSchemaIfNeeded(name string, snapshotTS uin if name != variable.TiDBSnapshot && name != variable.TiDBTxnReadTS { return nil } - vars := e.ctx.GetSessionVars() + vars := e.Ctx().GetSessionVars() if snapshotTS == 0 { vars.SnapshotInfoschema = nil return nil @@ -309,12 +310,12 @@ func (e *SetExecutor) loadSnapshotInfoSchemaIfNeeded(name string, snapshotTS uin logutil.BgLogger().Info("load snapshot info schema", zap.Uint64("conn", vars.ConnectionID), zap.Uint64("SnapshotTS", snapshotTS)) - dom := domain.GetDomain(e.ctx) + dom := domain.GetDomain(e.Ctx()) snapInfo, err := dom.GetSnapshotInfoSchema(snapshotTS) if err != nil { return err } - vars.SnapshotInfoschema = temptable.AttachLocalTemporaryTableInfoSchema(e.ctx, snapInfo) + vars.SnapshotInfoschema = temptable.AttachLocalTemporaryTableInfoSchema(e.Ctx(), snapInfo) return nil } diff --git a/executor/set_config.go b/executor/set_config.go index b508b55eb2fc7..0fb8c5d255ce1 100644 --- a/executor/set_config.go +++ b/executor/set_config.go @@ -24,6 +24,7 @@ import ( "strings" "github.com/pingcap/errors" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/parser/mysql" @@ -39,7 +40,7 @@ import ( // SetConfigExec executes 'SET CONFIG' statement. type SetConfigExec struct { - baseExecutor + exec.BaseExecutor p *core.SetConfig jsonBody string } @@ -71,7 +72,7 @@ func (s *SetConfigExec) Open(ctx context.Context) error { s.p.Name = strings.TrimPrefix(s.p.Name, "raftstore-proxy.") } - body, err := ConvertConfigItem2JSON(s.ctx, s.p.Name, s.p.Value) + body, err := ConvertConfigItem2JSON(s.Ctx(), s.p.Name, s.p.Value) s.jsonBody = body return err } @@ -86,11 +87,11 @@ var TestSetConfigHTTPHandlerKey stringutil.StringerStr = "TestSetConfigHTTPHandl func (s *SetConfigExec) Next(ctx context.Context, req *chunk.Chunk) error { req.Reset() getServerFunc := infoschema.GetClusterServerInfo - if v := s.ctx.Value(TestSetConfigServerInfoKey); v != nil { + if v := s.Ctx().Value(TestSetConfigServerInfoKey); v != nil { getServerFunc = v.(func(sessionctx.Context) ([]infoschema.ServerInfo, error)) } - serversInfo, err := getServerFunc(s.ctx) + serversInfo, err := getServerFunc(s.Ctx()) if err != nil { return err } @@ -122,7 +123,7 @@ func (s *SetConfigExec) Next(ctx context.Context, req *chunk.Chunk) error { return errors.Errorf("Unknown server type %s", serverInfo.ServerType) } if err := s.doRequest(url); err != nil { - s.ctx.GetSessionVars().StmtCtx.AppendWarning(err) + s.Ctx().GetSessionVars().StmtCtx.AppendWarning(err) } } return nil @@ -135,7 +136,7 @@ func (s *SetConfigExec) doRequest(url string) (retErr error) { return err } var httpHandler func(req *http.Request) (*http.Response, error) - if v := s.ctx.Value(TestSetConfigHTTPHandlerKey); v != nil { + if v := s.Ctx().Value(TestSetConfigHTTPHandlerKey); v != nil { httpHandler = v.(func(*http.Request) (*http.Response, error)) } else { httpHandler = util.InternalHTTPClient().Do diff --git a/executor/show.go b/executor/show.go index f151f938a7ffd..b1293aed14ab6 100644 --- a/executor/show.go +++ b/executor/show.go @@ -36,6 +36,7 @@ import ( "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/domain/infosync" "github.com/pingcap/tidb/executor/importer" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" @@ -85,7 +86,7 @@ var etcdDialTimeout = 5 * time.Second // ShowExec represents a show executor. type ShowExec struct { - baseExecutor + exec.BaseExecutor Tp ast.ShowStmtType // Databases/Tables/Columns/.... DBName model.CIStr @@ -122,7 +123,7 @@ type showTableRegionRowItem struct { // Next implements the Executor Next interface. func (e *ShowExec) Next(ctx context.Context, req *chunk.Chunk) error { - req.GrowAndReset(e.maxChunkSize) + req.GrowAndReset(e.MaxChunkSize()) if e.result == nil { e.result = newFirstChunk(e) err := e.fetchAll(ctx) @@ -163,11 +164,11 @@ func (e *ShowExec) fetchAll(ctx context.Context) error { // Finally, when above result set, may not include qualified record, is returned to up // level show stmt's selection, which really applies the filter operation on returned // result set, it may return empty result to client. - oldSelectLimit := e.ctx.GetSessionVars().SelectLimit - e.ctx.GetSessionVars().SelectLimit = math.MaxUint64 + oldSelectLimit := e.Ctx().GetSessionVars().SelectLimit + e.Ctx().GetSessionVars().SelectLimit = math.MaxUint64 defer func() { // Restore session Var SelectLimit value. - e.ctx.GetSessionVars().SelectLimit = oldSelectLimit + e.Ctx().GetSessionVars().SelectLimit = oldSelectLimit }() switch e.Tp { @@ -322,10 +323,10 @@ func (v *visibleChecker) Leave(in ast.Node) (out ast.Node, ok bool) { func (e *ShowExec) fetchShowBind() error { var tmp []*bindinfo.BindRecord if !e.GlobalScope { - handle := e.ctx.Value(bindinfo.SessionBindInfoKeyType).(*bindinfo.SessionHandle) + handle := e.Ctx().Value(bindinfo.SessionBindInfoKeyType).(*bindinfo.SessionHandle) tmp = handle.GetAllBindRecord() } else { - tmp = domain.GetDomain(e.ctx).BindHandle().GetAllBindRecord() + tmp = domain.GetDomain(e.Ctx()).BindHandle().GetAllBindRecord() } bindRecords := make([]*bindinfo.BindRecord, 0) for _, bindRecord := range tmp { @@ -369,9 +370,9 @@ func (e *ShowExec) fetchShowBind() error { } checker := visibleChecker{ defaultDB: bindData.Db, - ctx: e.ctx, + ctx: e.Ctx(), is: e.is, - manager: privilege.GetPrivilegeManager(e.ctx), + manager: privilege.GetPrivilegeManager(e.Ctx()), ok: true, } stmt.Accept(&checker) @@ -397,7 +398,7 @@ func (e *ShowExec) fetchShowBind() error { } func (e *ShowExec) fetchShowBindingCacheStatus(ctx context.Context) error { - exec := e.ctx.(sqlexec.RestrictedSQLExecutor) + exec := e.Ctx().(sqlexec.RestrictedSQLExecutor) ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnBindInfo) rows, _, err := exec.ExecRestrictedSQL(ctx, nil, fmt.Sprintf("SELECT count(*) FROM mysql.bind_info where status = '%s' or status = '%s';", bindinfo.Enabled, bindinfo.Using)) @@ -405,7 +406,7 @@ func (e *ShowExec) fetchShowBindingCacheStatus(ctx context.Context) error { return errors.Trace(err) } - handle := domain.GetDomain(e.ctx).BindHandle() + handle := domain.GetDomain(e.Ctx()).BindHandle() bindRecords := handle.GetAllBindRecord() numBindings := 0 @@ -430,7 +431,7 @@ func (e *ShowExec) fetchShowBindingCacheStatus(ctx context.Context) error { func (e *ShowExec) fetchShowEngines(ctx context.Context) error { ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnMeta) - exec := e.ctx.(sqlexec.RestrictedSQLExecutor) + exec := e.Ctx().(sqlexec.RestrictedSQLExecutor) rows, _, err := exec.ExecRestrictedSQL(ctx, nil, `SELECT * FROM information_schema.engines`) if err != nil { @@ -456,7 +457,7 @@ func moveInfoSchemaToFront(dbs []string) { func (e *ShowExec) fetchShowDatabases() error { dbs := e.is.AllSchemaNames() - checker := privilege.GetPrivilegeManager(e.ctx) + checker := privilege.GetPrivilegeManager(e.Ctx()) slices.Sort(dbs) var ( fieldPatternsLike collate.WildcardPattern @@ -470,7 +471,7 @@ func (e *ShowExec) fetchShowDatabases() error { // let information_schema be the first database moveInfoSchemaToFront(dbs) for _, d := range dbs { - if checker != nil && !checker.DBIsVisible(e.ctx.GetSessionVars().ActiveRoles, d) { + if checker != nil && !checker.DBIsVisible(e.Ctx().GetSessionVars().ActiveRoles, d) { continue } else if fieldFilter != "" && strings.ToLower(d) != fieldFilter { continue @@ -485,14 +486,14 @@ func (e *ShowExec) fetchShowDatabases() error { } func (e *ShowExec) fetchShowProcessList() error { - sm := e.ctx.GetSessionManager() + sm := e.Ctx().GetSessionManager() if sm == nil { return nil } - loginUser, activeRoles := e.ctx.GetSessionVars().User, e.ctx.GetSessionVars().ActiveRoles + loginUser, activeRoles := e.Ctx().GetSessionVars().User, e.Ctx().GetSessionVars().ActiveRoles var hasProcessPriv bool - if pm := privilege.GetPrivilegeManager(e.ctx); pm != nil { + if pm := privilege.GetPrivilegeManager(e.Ctx()); pm != nil { if pm.RequestVerification(activeRoles, "", "", "", mysql.ProcessPriv) { hasProcessPriv = true } @@ -518,9 +519,9 @@ func (e *ShowExec) fetchShowOpenTables() error { } func (e *ShowExec) fetchShowTables() error { - checker := privilege.GetPrivilegeManager(e.ctx) - if checker != nil && e.ctx.GetSessionVars().User != nil { - if !checker.DBIsVisible(e.ctx.GetSessionVars().ActiveRoles, e.DBName.O) { + checker := privilege.GetPrivilegeManager(e.Ctx()) + if checker != nil && e.Ctx().GetSessionVars().User != nil { + if !checker.DBIsVisible(e.Ctx().GetSessionVars().ActiveRoles, e.DBName.O) { return e.dbAccessDenied() } } @@ -530,7 +531,7 @@ func (e *ShowExec) fetchShowTables() error { // sort for tables schemaTables := e.is.SchemaTables(e.DBName) tableNames := make([]string, 0, len(schemaTables)) - activeRoles := e.ctx.GetSessionVars().ActiveRoles + activeRoles := e.Ctx().GetSessionVars().ActiveRoles var ( tableTypes = make(map[string]string) fieldPatternsLike collate.WildcardPattern @@ -574,9 +575,9 @@ func (e *ShowExec) fetchShowTables() error { } func (e *ShowExec) fetchShowTableStatus(ctx context.Context) error { - checker := privilege.GetPrivilegeManager(e.ctx) - if checker != nil && e.ctx.GetSessionVars().User != nil { - if !checker.DBIsVisible(e.ctx.GetSessionVars().ActiveRoles, e.DBName.O) { + checker := privilege.GetPrivilegeManager(e.Ctx()) + if checker != nil && e.Ctx().GetSessionVars().User != nil { + if !checker.DBIsVisible(e.Ctx().GetSessionVars().ActiveRoles, e.DBName.O) { return e.dbAccessDenied() } } @@ -584,19 +585,19 @@ func (e *ShowExec) fetchShowTableStatus(ctx context.Context) error { return exeerrors.ErrBadDB.GenWithStackByArgs(e.DBName) } - exec := e.ctx.(sqlexec.RestrictedSQLExecutor) + exec := e.Ctx().(sqlexec.RestrictedSQLExecutor) ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnStats) var snapshot uint64 - txn, err := e.ctx.Txn(false) + txn, err := e.Ctx().Txn(false) if err != nil { return errors.Trace(err) } if txn.Valid() { snapshot = txn.StartTS() } - if e.ctx.GetSessionVars().SnapshotTS != 0 { - snapshot = e.ctx.GetSessionVars().SnapshotTS + if e.Ctx().GetSessionVars().SnapshotTS != 0 { + snapshot = e.Ctx().GetSessionVars().SnapshotTS } rows, _, err := exec.ExecRestrictedSQL(ctx, []sqlexec.OptionFuncAlias{sqlexec.ExecOptionWithSnapshot(snapshot), sqlexec.ExecOptionUseCurSession}, @@ -618,7 +619,7 @@ func (e *ShowExec) fetchShowTableStatus(ctx context.Context) error { fieldFilter = e.Extractor.Field() fieldPatternsLike = e.Extractor.FieldPatternLike() } - activeRoles := e.ctx.GetSessionVars().ActiveRoles + activeRoles := e.Ctx().GetSessionVars().ActiveRoles for _, row := range rows { tableName := row.GetString(0) if checker != nil && !checker.RequestVerification(activeRoles, e.DBName.O, tableName, "", mysql.AllPrivMask) { @@ -648,9 +649,9 @@ func (e *ShowExec) fetchShowColumns(ctx context.Context) error { fieldPatternsLike = e.Extractor.FieldPatternLike() } - checker := privilege.GetPrivilegeManager(e.ctx) - activeRoles := e.ctx.GetSessionVars().ActiveRoles - if checker != nil && e.ctx.GetSessionVars().User != nil && !checker.RequestVerification(activeRoles, e.DBName.O, tb.Meta().Name.O, "", mysql.InsertPriv|mysql.SelectPriv|mysql.UpdatePriv|mysql.ReferencesPriv) { + checker := privilege.GetPrivilegeManager(e.Ctx()) + activeRoles := e.Ctx().GetSessionVars().ActiveRoles + if checker != nil && e.Ctx().GetSessionVars().User != nil && !checker.RequestVerification(activeRoles, e.DBName.O, tb.Meta().Name.O, "", mysql.InsertPriv|mysql.SelectPriv|mysql.UpdatePriv|mysql.ReferencesPriv) { return e.tableAccessDenied("SELECT", tb.Meta().Name.O) } @@ -662,7 +663,7 @@ func (e *ShowExec) fetchShowColumns(ctx context.Context) error { } else { cols = tb.VisibleCols() } - if err := tryFillViewColumnType(ctx, e.ctx, e.is, e.DBName, tb.Meta()); err != nil { + if err := tryFillViewColumnType(ctx, e.Ctx(), e.is, e.DBName, tb.Meta()); err != nil { return err } for _, col := range cols { @@ -678,7 +679,7 @@ func (e *ShowExec) fetchShowColumns(ctx context.Context) error { defaultValStr := fmt.Sprintf("%v", desc.DefaultValue) // If column is timestamp, and default value is not current_timestamp, should convert the default value to the current session time zone. if col.GetType() == mysql.TypeTimestamp && defaultValStr != types.ZeroDatetimeStr && !strings.HasPrefix(strings.ToUpper(defaultValStr), strings.ToUpper(ast.CurrentTimestamp)) { - timeValue, err := table.GetColDefaultValue(e.ctx, col.ToInfo()) + timeValue, err := table.GetColDefaultValue(e.Ctx(), col.ToInfo()) if err != nil { return errors.Trace(err) } @@ -721,7 +722,7 @@ func (e *ShowExec) fetchShowColumns(ctx context.Context) error { } func (e *ShowExec) fetchShowIndex() error { - do := domain.GetDomain(e.ctx) + do := domain.GetDomain(e.Ctx()) h := do.StatsHandle() tb, err := e.getTable() @@ -731,9 +732,9 @@ func (e *ShowExec) fetchShowIndex() error { statsTbl := h.GetTableStats(tb.Meta()) - checker := privilege.GetPrivilegeManager(e.ctx) - activeRoles := e.ctx.GetSessionVars().ActiveRoles - if checker != nil && e.ctx.GetSessionVars().User != nil && !checker.RequestVerification(activeRoles, e.DBName.O, tb.Meta().Name.O, "", mysql.AllPrivMask) { + checker := privilege.GetPrivilegeManager(e.Ctx()) + activeRoles := e.Ctx().GetSessionVars().ActiveRoles + if checker != nil && e.Ctx().GetSessionVars().User != nil && !checker.RequestVerification(activeRoles, e.DBName.O, tb.Meta().Name.O, "", mysql.AllPrivMask) { return e.tableAccessDenied("SELECT", tb.Meta().Name.O) } @@ -852,7 +853,7 @@ func (e *ShowExec) fetchShowCharset() error { } func (e *ShowExec) fetchShowMasterStatus() error { - tso := e.ctx.GetSessionVars().TxnCtx.StartTS + tso := e.Ctx().GetSessionVars().TxnCtx.StartTS e.appendRow([]interface{}{"tidb-binlog", tso, "", "", ""}) return nil } @@ -860,7 +861,7 @@ func (e *ShowExec) fetchShowMasterStatus() error { func (e *ShowExec) fetchShowVariables(ctx context.Context) (err error) { var ( value string - sessionVars = e.ctx.GetSessionVars() + sessionVars = e.Ctx().GetSessionVars() ) var ( fieldPatternsLike collate.WildcardPattern @@ -886,7 +887,7 @@ func (e *ShowExec) fetchShowVariables(ctx context.Context) (err error) { } else if fieldPatternsLike != nil && !fieldPatternsLike.DoMatch(v.Name) { continue } - if infoschema.SysVarHiddenForSem(e.ctx, v.Name) { + if infoschema.SysVarHiddenForSem(e.Ctx(), v.Name) { continue } value, err = sessionVars.GetGlobalSystemVar(ctx, v.Name) @@ -911,7 +912,7 @@ func (e *ShowExec) fetchShowVariables(ctx context.Context) (err error) { } else if fieldPatternsLike != nil && !fieldPatternsLike.DoMatch(v.Name) { continue } - if infoschema.SysVarHiddenForSem(e.ctx, v.Name) { + if infoschema.SysVarHiddenForSem(e.Ctx(), v.Name) { continue } value, err = sessionVars.GetSessionOrGlobalSystemVar(context.Background(), v.Name) @@ -924,12 +925,12 @@ func (e *ShowExec) fetchShowVariables(ctx context.Context) (err error) { } func (e *ShowExec) fetchShowStatus() error { - sessionVars := e.ctx.GetSessionVars() + sessionVars := e.Ctx().GetSessionVars() statusVars, err := variable.GetStatusVars(sessionVars) if err != nil { return errors.Trace(err) } - checker := privilege.GetPrivilegeManager(e.ctx) + checker := privilege.GetPrivilegeManager(e.Ctx()) for status, v := range statusVars { if e.GlobalScope && v.Scope == variable.ScopeSession { continue @@ -1394,7 +1395,7 @@ func (e *ShowExec) fetchShowCreateSequence() error { return exeerrors.ErrWrongObject.GenWithStackByArgs(e.DBName.O, tableInfo.Name.O, "SEQUENCE") } var buf bytes.Buffer - ConstructResultOfShowCreateSequence(e.ctx, tableInfo, &buf) + ConstructResultOfShowCreateSequence(e.Ctx(), tableInfo, &buf) e.appendRow([]interface{}{tableInfo.Name.O, buf.String()}) return nil } @@ -1409,10 +1410,10 @@ func (e *ShowExec) fetchShowClusterConfigs(ctx context.Context) error { emptySet := set.NewStringSet() var confItems [][]types.Datum var err error - if f := e.ctx.Value(TestShowClusterConfigKey); f != nil { + if f := e.Ctx().Value(TestShowClusterConfigKey); f != nil { confItems, err = f.(TestShowClusterConfigFunc)() } else { - confItems, err = fetchClusterConfig(e.ctx, emptySet, emptySet) + confItems, err = fetchClusterConfig(e.Ctx(), emptySet, emptySet) } if err != nil { return err @@ -1436,7 +1437,7 @@ func (e *ShowExec) fetchShowCreateTable() error { tableInfo := tb.Meta() var buf bytes.Buffer // TODO: let the result more like MySQL. - if err = constructResultOfShowCreateTable(e.ctx, &e.DBName, tableInfo, tb.Allocators(e.ctx), &buf); err != nil { + if err = constructResultOfShowCreateTable(e.Ctx(), &e.DBName, tableInfo, tb.Allocators(e.Ctx()), &buf); err != nil { return err } if tableInfo.IsView() { @@ -1464,7 +1465,7 @@ func (e *ShowExec) fetchShowCreateView() error { } var buf bytes.Buffer - fetchShowCreateTable4View(e.ctx, tb.Meta(), &buf) + fetchShowCreateTable4View(e.Ctx(), tb.Meta(), &buf) e.appendRow([]interface{}{tb.Meta().Name.O, buf.String(), tb.Meta().Charset, tb.Meta().Collate}) return nil } @@ -1538,9 +1539,9 @@ func constructResultOfShowCreateResourceGroup(resourceGroup *model.ResourceGroup // fetchShowCreateDatabase composes show create database result. func (e *ShowExec) fetchShowCreateDatabase() error { - checker := privilege.GetPrivilegeManager(e.ctx) - if checker != nil && e.ctx.GetSessionVars().User != nil { - if !checker.DBIsVisible(e.ctx.GetSessionVars().ActiveRoles, e.DBName.String()) { + checker := privilege.GetPrivilegeManager(e.Ctx()) + if checker != nil && e.Ctx().GetSessionVars().User != nil { + if !checker.DBIsVisible(e.Ctx().GetSessionVars().ActiveRoles, e.DBName.String()) { return e.dbAccessDenied() } } @@ -1550,7 +1551,7 @@ func (e *ShowExec) fetchShowCreateDatabase() error { } var buf bytes.Buffer - err := ConstructResultOfShowCreateDatabase(e.ctx, dbInfo, e.IfNotExists, &buf) + err := ConstructResultOfShowCreateDatabase(e.Ctx(), dbInfo, e.IfNotExists, &buf) if err != nil { return err } @@ -1615,14 +1616,14 @@ func (e *ShowExec) fetchShowCollation() error { // fetchShowCreateUser composes 'show create user' result. func (e *ShowExec) fetchShowCreateUser(ctx context.Context) error { - checker := privilege.GetPrivilegeManager(e.ctx) + checker := privilege.GetPrivilegeManager(e.Ctx()) if checker == nil { return errors.New("miss privilege checker") } ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnPrivilege) userName, hostName := e.User.Username, e.User.Hostname - sessVars := e.ctx.GetSessionVars() + sessVars := e.Ctx().GetSessionVars() if e.User.CurrentUser { userName = sessVars.User.AuthUsername hostName = sessVars.User.AuthHostname @@ -1635,7 +1636,7 @@ func (e *ShowExec) fetchShowCreateUser(ctx context.Context) error { } } - exec := e.ctx.(sqlexec.RestrictedSQLExecutor) + exec := e.Ctx().(sqlexec.RestrictedSQLExecutor) rows, _, err := exec.ExecRestrictedSQL(ctx, nil, `SELECT plugin, Account_locked, user_attributes->>'$.metadata', Token_issuer, @@ -1746,8 +1747,8 @@ func (e *ShowExec) fetchShowCreateUser(ctx context.Context) error { } func (e *ShowExec) fetchShowGrants() error { - vars := e.ctx.GetSessionVars() - checker := privilege.GetPrivilegeManager(e.ctx) + vars := e.Ctx().GetSessionVars() + checker := privilege.GetPrivilegeManager(e.Ctx()) if checker == nil { return errors.New("miss privilege checker") } @@ -1774,11 +1775,11 @@ func (e *ShowExec) fetchShowGrants() error { if r.Hostname == "" { r.Hostname = "%" } - if !checker.FindEdge(e.ctx, r, e.User) { + if !checker.FindEdge(e.Ctx(), r, e.User) { return exeerrors.ErrRoleNotGranted.GenWithStackByArgs(r.String(), e.User.String()) } } - gs, err := checker.ShowGrants(e.ctx, e.User, e.Roles) + gs, err := checker.ShowGrants(e.Ctx(), e.User, e.Roles) if err != nil { return errors.Trace(err) } @@ -1849,7 +1850,7 @@ func (e *ShowExec) fetchShowPlugins() error { } func (e *ShowExec) fetchShowWarnings(errOnly bool) error { - stmtCtx := e.ctx.GetSessionVars().StmtCtx + stmtCtx := e.Ctx().GetSessionVars().StmtCtx if e.CountWarningsOrErrors { errCount, warnCount := stmtCtx.NumErrorWarnings() if errOnly { @@ -1932,7 +1933,7 @@ func (e *ShowExec) getTable() (table.Table, error) { } func (e *ShowExec) dbAccessDenied() error { - user := e.ctx.GetSessionVars().User + user := e.Ctx().GetSessionVars().User u := user.Username h := user.Hostname if len(user.AuthUsername) > 0 && len(user.AuthHostname) > 0 { @@ -1943,7 +1944,7 @@ func (e *ShowExec) dbAccessDenied() error { } func (e *ShowExec) tableAccessDenied(access string, table string) error { - user := e.ctx.GetSessionVars().User + user := e.Ctx().GetSessionVars().User u := user.Username h := user.Hostname if len(user.AuthUsername) > 0 && len(user.AuthHostname) > 0 { @@ -1993,7 +1994,7 @@ func (e *ShowExec) appendRow(row []interface{}) { } func (e *ShowExec) fetchShowTableRegions(ctx context.Context) error { - store := e.ctx.GetStore() + store := e.Ctx().GetStore() tikvStore, ok := store.(helper.Storage) if !ok { return nil @@ -2174,7 +2175,7 @@ func (e *ShowExec) fetchShowBuiltins() error { func (e *ShowExec) fetchShowSessionStates(ctx context.Context) error { sessionStates := &sessionstates.SessionStates{} - err := e.ctx.EncodeSessionStates(ctx, e.ctx, sessionStates) + err := e.Ctx().EncodeSessionStates(ctx, e.Ctx(), sessionStates) if err != nil { return err } @@ -2189,7 +2190,7 @@ func (e *ShowExec) fetchShowSessionStates(ctx context.Context) error { // session token var token *sessionstates.SessionToken // In testing, user may be nil. - if user := e.ctx.GetSessionVars().User; user != nil { + if user := e.Ctx().GetSessionVars().User; user != nil { // The token may be leaked without secure transport, but the cloud can ensure security in some situations, // so we don't enforce secure connections. if token, err = sessionstates.CreateSessionToken(user.Username); err != nil { @@ -2258,8 +2259,8 @@ func handleImportJobInfo(info *importer.JobInfo, result *chunk.Chunk) error { // "Result_Message", "Create_Time", "Start_Time", "End_Time", "Created_By"} func (e *ShowExec) fetchShowImportJobs(ctx context.Context) error { var hasSuperPriv bool - if pm := privilege.GetPrivilegeManager(e.ctx); pm != nil { - hasSuperPriv = pm.RequestVerification(e.ctx.GetSessionVars().ActiveRoles, "", "", "", mysql.SuperPriv) + if pm := privilege.GetPrivilegeManager(e.Ctx()); pm != nil { + hasSuperPriv = pm.RequestVerification(e.Ctx().GetSessionVars().ActiveRoles, "", "", "", mysql.SuperPriv) } // we use sessionCtx from GetTaskManager, user ctx might not have system table privileges. globalTaskManager, err := fstorage.GetTaskManager() @@ -2271,7 +2272,7 @@ func (e *ShowExec) fetchShowImportJobs(ctx context.Context) error { if err = globalTaskManager.WithNewSession(func(se sessionctx.Context) error { exec := se.(sqlexec.SQLExecutor) var err2 error - info, err2 = importer.GetJob(ctx, exec, *e.ImportJobID, e.ctx.GetSessionVars().User.String(), hasSuperPriv) + info, err2 = importer.GetJob(ctx, exec, *e.ImportJobID, e.Ctx().GetSessionVars().User.String(), hasSuperPriv) return err2 }); err != nil { return err @@ -2282,7 +2283,7 @@ func (e *ShowExec) fetchShowImportJobs(ctx context.Context) error { if err = globalTaskManager.WithNewSession(func(se sessionctx.Context) error { exec := se.(sqlexec.SQLExecutor) var err2 error - infos, err2 = importer.GetAllViewableJobs(ctx, exec, e.ctx.GetSessionVars().User.String(), hasSuperPriv) + infos, err2 = importer.GetAllViewableJobs(ctx, exec, e.Ctx().GetSessionVars().User.String(), hasSuperPriv) return err2 }); err != nil { return err @@ -2331,11 +2332,11 @@ func tryFillViewColumnType(ctx context.Context, sctx sessionctx.Context, is info } func runWithSystemSession(ctx context.Context, sctx sessionctx.Context, fn func(sessionctx.Context) error) error { - b := &baseExecutor{ctx: sctx} - sysCtx, err := b.getSysSession() + b := exec.NewBaseExecutor(sctx, nil, 0) + sysCtx, err := b.GetSysSession() if err != nil { return err } - defer b.releaseSysSession(ctx, sysCtx) + defer b.ReleaseSysSession(ctx, sysCtx) return fn(sysCtx) } diff --git a/executor/show_placement.go b/executor/show_placement.go index ae562105476be..f577c3bb2b6be 100644 --- a/executor/show_placement.go +++ b/executor/show_placement.go @@ -106,7 +106,7 @@ func (b *showPlacementLabelsResultBuilder) sortMapKeys(m map[string]interface{}) } func (e *ShowExec) fetchShowPlacementLabels(ctx context.Context) error { - exec := e.ctx.(sqlexec.RestrictedSQLExecutor) + exec := e.Ctx().(sqlexec.RestrictedSQLExecutor) rows, _, err := exec.ExecRestrictedSQL(ctx, nil, "SELECT DISTINCT LABEL FROM %n.%n", "INFORMATION_SCHEMA", infoschema.TableTiKVStoreStatus) if err != nil { return errors.Trace(err) @@ -133,9 +133,9 @@ func (e *ShowExec) fetchShowPlacementLabels(ctx context.Context) error { } func (e *ShowExec) fetchShowPlacementForDB(ctx context.Context) (err error) { - checker := privilege.GetPrivilegeManager(e.ctx) - if checker != nil && e.ctx.GetSessionVars().User != nil { - if !checker.DBIsVisible(e.ctx.GetSessionVars().ActiveRoles, e.DBName.String()) { + checker := privilege.GetPrivilegeManager(e.Ctx()) + if checker != nil && e.Ctx().GetSessionVars().User != nil { + if !checker.DBIsVisible(e.Ctx().GetSessionVars().ActiveRoles, e.DBName.String()) { return e.dbAccessDenied() } } @@ -262,14 +262,14 @@ func (e *ShowExec) fetchAllPlacementPolicies() error { } func (e *ShowExec) fetchAllDBPlacements(ctx context.Context, scheduleState map[int64]infosync.PlacementScheduleState) error { - checker := privilege.GetPrivilegeManager(e.ctx) - activeRoles := e.ctx.GetSessionVars().ActiveRoles + checker := privilege.GetPrivilegeManager(e.Ctx()) + activeRoles := e.Ctx().GetSessionVars().ActiveRoles dbs := e.is.AllSchemas() slices.SortFunc(dbs, func(i, j *model.DBInfo) bool { return i.Name.O < j.Name.O }) for _, dbInfo := range dbs { - if e.ctx.GetSessionVars().User != nil && checker != nil && !checker.DBIsVisible(activeRoles, dbInfo.Name.O) { + if e.Ctx().GetSessionVars().User != nil && checker != nil && !checker.DBIsVisible(activeRoles, dbInfo.Name.O) { continue } @@ -296,8 +296,8 @@ type tableRowSet struct { } func (e *ShowExec) fetchAllTablePlacements(ctx context.Context, scheduleState map[int64]infosync.PlacementScheduleState) error { - checker := privilege.GetPrivilegeManager(e.ctx) - activeRoles := e.ctx.GetSessionVars().ActiveRoles + checker := privilege.GetPrivilegeManager(e.Ctx()) + activeRoles := e.Ctx().GetSessionVars().ActiveRoles dbs := e.is.AllSchemas() slices.SortFunc(dbs, func(i, j *model.DBInfo) bool { return i.Name.O < j.Name.O }) diff --git a/executor/show_stats.go b/executor/show_stats.go index 6d38e9bcd4c88..babd61b3360e1 100644 --- a/executor/show_stats.go +++ b/executor/show_stats.go @@ -32,7 +32,7 @@ import ( ) func (e *ShowExec) fetchShowStatsExtended() error { - do := domain.GetDomain(e.ctx) + do := domain.GetDomain(e.Ctx()) h := do.StatsHandle() dbs := do.InfoSchema().AllSchemas() for _, db := range dbs { @@ -104,13 +104,13 @@ func (e *ShowExec) appendTableForStatsExtended(dbName string, tbl *model.TableIn } func (e *ShowExec) fetchShowStatsMeta() error { - do := domain.GetDomain(e.ctx) + do := domain.GetDomain(e.Ctx()) h := do.StatsHandle() dbs := do.InfoSchema().AllSchemas() for _, db := range dbs { for _, tbl := range db.Tables { pi := tbl.GetPartitionInfo() - if pi == nil || e.ctx.GetSessionVars().IsDynamicPartitionPruneEnabled() { + if pi == nil || e.Ctx().GetSessionVars().IsDynamicPartitionPruneEnabled() { partitionName := "" if pi != nil { partitionName = "global" @@ -155,13 +155,13 @@ func (e *ShowExec) appendTableForStatsLocked(dbName, tblName, partitionName stri } func (e *ShowExec) fetchShowStatsLocked() error { - do := domain.GetDomain(e.ctx) + do := domain.GetDomain(e.Ctx()) h := do.StatsHandle() dbs := do.InfoSchema().AllSchemas() for _, db := range dbs { for _, tbl := range db.Tables { pi := tbl.GetPartitionInfo() - if pi == nil || e.ctx.GetSessionVars().IsDynamicPartitionPruneEnabled() { + if pi == nil || e.Ctx().GetSessionVars().IsDynamicPartitionPruneEnabled() { partitionName := "" if pi != nil { partitionName = "global" @@ -189,13 +189,13 @@ func (e *ShowExec) fetchShowStatsLocked() error { } func (e *ShowExec) fetchShowStatsHistogram() error { - do := domain.GetDomain(e.ctx) + do := domain.GetDomain(e.Ctx()) h := do.StatsHandle() dbs := do.InfoSchema().AllSchemas() for _, db := range dbs { for _, tbl := range db.Tables { pi := tbl.GetPartitionInfo() - if pi == nil || e.ctx.GetSessionVars().IsDynamicPartitionPruneEnabled() { + if pi == nil || e.Ctx().GetSessionVars().IsDynamicPartitionPruneEnabled() { partitionName := "" if pi != nil { partitionName = "global" @@ -263,13 +263,13 @@ func (e *ShowExec) versionToTime(version uint64) types.Time { } func (e *ShowExec) fetchShowStatsBuckets() error { - do := domain.GetDomain(e.ctx) + do := domain.GetDomain(e.Ctx()) h := do.StatsHandle() dbs := do.InfoSchema().AllSchemas() for _, db := range dbs { for _, tbl := range db.Tables { pi := tbl.GetPartitionInfo() - if pi == nil || e.ctx.GetSessionVars().IsDynamicPartitionPruneEnabled() { + if pi == nil || e.Ctx().GetSessionVars().IsDynamicPartitionPruneEnabled() { partitionName := "" if pi != nil { partitionName = "global" @@ -322,13 +322,13 @@ func (e *ShowExec) appendTableForStatsBuckets(dbName, tblName, partitionName str } func (e *ShowExec) fetchShowStatsTopN() error { - do := domain.GetDomain(e.ctx) + do := domain.GetDomain(e.Ctx()) h := do.StatsHandle() dbs := do.InfoSchema().AllSchemas() for _, db := range dbs { for _, tbl := range db.Tables { pi := tbl.GetPartitionInfo() - if pi == nil || e.ctx.GetSessionVars().IsDynamicPartitionPruneEnabled() { + if pi == nil || e.Ctx().GetSessionVars().IsDynamicPartitionPruneEnabled() { partitionName := "" if pi != nil { partitionName = "global" @@ -403,7 +403,7 @@ func (e *ShowExec) topNToRows(dbName, tblName, partitionName, colName string, nu var tmpDatum types.Datum for i := 0; i < len(topN.TopN); i++ { tmpDatum.SetBytes(topN.TopN[i].Encoded) - valStr, err := statistics.ValueToString(e.ctx.GetSessionVars(), &tmpDatum, numOfCols, columnTypes) + valStr, err := statistics.ValueToString(e.Ctx().GetSessionVars(), &tmpDatum, numOfCols, columnTypes) if err != nil { return err } @@ -428,11 +428,11 @@ func (e *ShowExec) bucketsToRows(dbName, tblName, partitionName, colName string, isIndex = 1 } for i := 0; i < hist.Len(); i++ { - lowerBoundStr, err := statistics.ValueToString(e.ctx.GetSessionVars(), hist.GetLower(i), numOfCols, idxColumnTypes) + lowerBoundStr, err := statistics.ValueToString(e.Ctx().GetSessionVars(), hist.GetLower(i), numOfCols, idxColumnTypes) if err != nil { return errors.Trace(err) } - upperBoundStr, err := statistics.ValueToString(e.ctx.GetSessionVars(), hist.GetUpper(i), numOfCols, idxColumnTypes) + upperBoundStr, err := statistics.ValueToString(e.Ctx().GetSessionVars(), hist.GetUpper(i), numOfCols, idxColumnTypes) if err != nil { return errors.Trace(err) } @@ -454,7 +454,7 @@ func (e *ShowExec) bucketsToRows(dbName, tblName, partitionName, colName string, } func (e *ShowExec) fetchShowStatsHealthy() { - do := domain.GetDomain(e.ctx) + do := domain.GetDomain(e.Ctx()) h := do.StatsHandle() dbs := do.InfoSchema().AllSchemas() var ( @@ -473,7 +473,7 @@ func (e *ShowExec) fetchShowStatsHealthy() { } for _, tbl := range db.Tables { pi := tbl.GetPartitionInfo() - if pi == nil || e.ctx.GetSessionVars().IsDynamicPartitionPruneEnabled() { + if pi == nil || e.Ctx().GetSessionVars().IsDynamicPartitionPruneEnabled() { partitionName := "" if pi != nil { partitionName = "global" @@ -511,7 +511,7 @@ func (e *ShowExec) fetchShowHistogramsInFlight() { } func (e *ShowExec) fetchShowAnalyzeStatus(ctx context.Context) error { - rows, err := dataForAnalyzeStatusHelper(ctx, e.baseExecutor.ctx) + rows, err := dataForAnalyzeStatusHelper(ctx, e.BaseExecutor.Ctx()) if err != nil { return err } @@ -524,9 +524,9 @@ func (e *ShowExec) fetchShowAnalyzeStatus(ctx context.Context) error { } func (e *ShowExec) fetchShowColumnStatsUsage() error { - do := domain.GetDomain(e.ctx) + do := domain.GetDomain(e.Ctx()) h := do.StatsHandle() - colStatsMap, err := h.LoadColumnStatsUsage(e.ctx.GetSessionVars().Location()) + colStatsMap, err := h.LoadColumnStatsUsage(e.Ctx().GetSessionVars().Location()) if err != nil { return err } diff --git a/executor/shuffle.go b/executor/shuffle.go index 7596d2cef1970..4d2c02cf73f60 100644 --- a/executor/shuffle.go +++ b/executor/shuffle.go @@ -20,6 +20,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/util/channel" @@ -79,7 +80,7 @@ import ( // +----------> | fetch data from DataSource | // +---------------------------------+ type ShuffleExec struct { - baseExecutor + exec.BaseExecutor concurrency int workers []*shuffleWorker @@ -88,7 +89,7 @@ type ShuffleExec struct { // each dataSource has a corresponding spliter splitters []partitionSplitter - dataSources []Executor + dataSources []exec.Executor finishCh chan struct{} outputCh chan *shuffleOutput @@ -107,7 +108,7 @@ func (e *ShuffleExec) Open(ctx context.Context) error { return err } } - if err := e.baseExecutor.Open(ctx); err != nil { + if err := e.BaseExecutor.Open(ctx); err != nil { return err } @@ -179,10 +180,10 @@ func (e *ShuffleExec) Close() error { } e.executed = false - if e.runtimeStats != nil { + if e.RuntimeStats() != nil { runtimeStats := &execdetails.RuntimeStatsWithConcurrencyInfo{} runtimeStats.SetConcurrencyInfo(execdetails.NewConcurrencyInfo("ShuffleConcurrency", e.concurrency)) - e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, runtimeStats) + e.Ctx().GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.ID(), runtimeStats) } // close dataSources @@ -192,7 +193,7 @@ func (e *ShuffleExec) Close() error { } } // close baseExecutor - if err := e.baseExecutor.Close(); err != nil && firstErr == nil { + if err := e.BaseExecutor.Close(); err != nil && firstErr == nil { firstErr = err } return errors.Trace(firstErr) @@ -283,7 +284,7 @@ func (e *ShuffleExec) fetchDataAndSplit(ctx context.Context, dataSourceIndex int break } - workerIndices, err = e.splitters[dataSourceIndex].split(e.ctx, chk, workerIndices) + workerIndices, err = e.splitters[dataSourceIndex].split(e.Ctx(), chk, workerIndices) if err != nil { e.outputCh <- &shuffleOutput{err: err} return @@ -316,11 +317,11 @@ func (e *ShuffleExec) fetchDataAndSplit(ctx context.Context, dataSourceIndex int } } -var _ Executor = &shuffleReceiver{} +var _ exec.Executor = &shuffleReceiver{} // shuffleReceiver receives chunk from dataSource through inputCh type shuffleReceiver struct { - baseExecutor + exec.BaseExecutor finishCh <-chan struct{} executed bool @@ -331,7 +332,7 @@ type shuffleReceiver struct { // Open implements the Executor Open interface. func (e *shuffleReceiver) Open(ctx context.Context) error { - if err := e.baseExecutor.Open(ctx); err != nil { + if err := e.BaseExecutor.Open(ctx); err != nil { return err } e.executed = false @@ -340,7 +341,7 @@ func (e *shuffleReceiver) Open(ctx context.Context) error { // Close implements the Executor Close interface. func (e *shuffleReceiver) Close() error { - return errors.Trace(e.baseExecutor.Close()) + return errors.Trace(e.BaseExecutor.Close()) } // Next implements the Executor Next interface. @@ -367,7 +368,7 @@ func (e *shuffleReceiver) Next(ctx context.Context, req *chunk.Chunk) error { // shuffleWorker is the multi-thread worker executing child executors within "partition". type shuffleWorker struct { - childExec Executor + childExec exec.Executor finishCh <-chan struct{} diff --git a/executor/simple.go b/executor/simple.go index 6efda2cbc513f..6d1f6955f9b1d 100644 --- a/executor/simple.go +++ b/executor/simple.go @@ -33,6 +33,7 @@ import ( "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/domain/resourcegroup" "github.com/pingcap/tidb/errno" + "github.com/pingcap/tidb/executor/internal/exec" executor_metrics "github.com/pingcap/tidb/executor/metrics" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/infoschema" @@ -74,7 +75,7 @@ const notSpecified = -1 // `BeginStmt`, `CommitStmt`, `RollbackStmt`. // TODO: list all simple statements. type SimpleExec struct { - baseExecutor + exec.BaseExecutor Statement ast.StmtNode // IsFromRemote indicates whether the statement IS FROM REMOTE TiDB instance in cluster, @@ -115,31 +116,6 @@ type userInfo struct { authString string } -func (e *baseExecutor) getSysSession() (sessionctx.Context, error) { - dom := domain.GetDomain(e.ctx) - sysSessionPool := dom.SysSessionPool() - ctx, err := sysSessionPool.Get() - if err != nil { - return nil, err - } - restrictedCtx := ctx.(sessionctx.Context) - restrictedCtx.GetSessionVars().InRestrictedSQL = true - return restrictedCtx, nil -} - -func (e *baseExecutor) releaseSysSession(ctx context.Context, sctx sessionctx.Context) { - if sctx == nil { - return - } - dom := domain.GetDomain(e.ctx) - sysSessionPool := dom.SysSessionPool() - if _, err := sctx.(sqlexec.SQLExecutor).ExecuteInternal(ctx, "rollback"); err != nil { - sctx.(pools.Resource).Close() - return - } - sysSessionPool.Put(sctx.(pools.Resource)) -} - // clearSysSession close the session does not return the session. // Since the environment variables in the session are changed, the session object is not returned. func clearSysSession(ctx context.Context, sctx sessionctx.Context) { @@ -158,10 +134,10 @@ func (e *SimpleExec) Next(ctx context.Context, req *chunk.Chunk) (err error) { if e.autoNewTxn() { // Commit the old transaction, like DDL. - if err := sessiontxn.NewTxnInStmt(ctx, e.ctx); err != nil { + if err := sessiontxn.NewTxnInStmt(ctx, e.Ctx()); err != nil { return err } - defer func() { e.ctx.GetSessionVars().SetInTxn(false) }() + defer func() { e.Ctx().GetSessionVars().SetInTxn(false) }() } switch x := e.Statement.(type) { @@ -220,12 +196,12 @@ func (e *SimpleExec) Next(ctx context.Context, req *chunk.Chunk) (err error) { } func (e *SimpleExec) setDefaultRoleNone(s *ast.SetDefaultRoleStmt) error { - restrictedCtx, err := e.getSysSession() + restrictedCtx, err := e.GetSysSession() if err != nil { return err } ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) - defer e.releaseSysSession(ctx, restrictedCtx) + defer e.ReleaseSysSession(ctx, restrictedCtx) sqlExecutor := restrictedCtx.(sqlexec.SQLExecutor) if _, err := sqlExecutor.ExecuteInternal(ctx, "begin"); err != nil { return err @@ -253,7 +229,7 @@ func (e *SimpleExec) setDefaultRoleNone(s *ast.SetDefaultRoleStmt) error { func (e *SimpleExec) setDefaultRoleRegular(ctx context.Context, s *ast.SetDefaultRoleStmt) error { for _, user := range s.UserList { - exists, err := userExists(ctx, e.ctx, user.Username, user.Hostname) + exists, err := userExists(ctx, e.Ctx(), user.Username, user.Hostname) if err != nil { return err } @@ -262,7 +238,7 @@ func (e *SimpleExec) setDefaultRoleRegular(ctx context.Context, s *ast.SetDefaul } } for _, role := range s.RoleList { - exists, err := userExists(ctx, e.ctx, role.Username, role.Hostname) + exists, err := userExists(ctx, e.Ctx(), role.Username, role.Hostname) if err != nil { return err } @@ -271,12 +247,12 @@ func (e *SimpleExec) setDefaultRoleRegular(ctx context.Context, s *ast.SetDefaul } } - restrictedCtx, err := e.getSysSession() + restrictedCtx, err := e.GetSysSession() if err != nil { return err } internalCtx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) - defer e.releaseSysSession(internalCtx, restrictedCtx) + defer e.ReleaseSysSession(internalCtx, restrictedCtx) sqlExecutor := restrictedCtx.(sqlexec.SQLExecutor) if _, err := sqlExecutor.ExecuteInternal(internalCtx, "begin"); err != nil { return err @@ -296,8 +272,8 @@ func (e *SimpleExec) setDefaultRoleRegular(ctx context.Context, s *ast.SetDefaul return err } for _, role := range s.RoleList { - checker := privilege.GetPrivilegeManager(e.ctx) - ok := checker.FindEdge(e.ctx, role, user) + checker := privilege.GetPrivilegeManager(e.Ctx()) + ok := checker.FindEdge(e.Ctx(), role, user) if !ok { if _, rollbackErr := sqlExecutor.ExecuteInternal(internalCtx, "rollback"); rollbackErr != nil { return rollbackErr @@ -323,7 +299,7 @@ func (e *SimpleExec) setDefaultRoleRegular(ctx context.Context, s *ast.SetDefaul func (e *SimpleExec) setDefaultRoleAll(ctx context.Context, s *ast.SetDefaultRoleStmt) error { for _, user := range s.UserList { - exists, err := userExists(ctx, e.ctx, user.Username, user.Hostname) + exists, err := userExists(ctx, e.Ctx(), user.Username, user.Hostname) if err != nil { return err } @@ -332,11 +308,11 @@ func (e *SimpleExec) setDefaultRoleAll(ctx context.Context, s *ast.SetDefaultRol } } internalCtx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) - restrictedCtx, err := e.getSysSession() + restrictedCtx, err := e.GetSysSession() if err != nil { return err } - defer e.releaseSysSession(internalCtx, restrictedCtx) + defer e.ReleaseSysSession(internalCtx, restrictedCtx) sqlExecutor := restrictedCtx.(sqlexec.SQLExecutor) if _, err := sqlExecutor.ExecuteInternal(internalCtx, "begin"); err != nil { return err @@ -372,17 +348,17 @@ func (e *SimpleExec) setDefaultRoleAll(ctx context.Context, s *ast.SetDefaultRol } func (e *SimpleExec) setDefaultRoleForCurrentUser(s *ast.SetDefaultRoleStmt) (err error) { - checker := privilege.GetPrivilegeManager(e.ctx) + checker := privilege.GetPrivilegeManager(e.Ctx()) user := s.UserList[0] if user.Hostname == "" { user.Hostname = "%" } - restrictedCtx, err := e.getSysSession() + restrictedCtx, err := e.GetSysSession() if err != nil { return err } ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) - defer e.releaseSysSession(ctx, restrictedCtx) + defer e.ReleaseSysSession(ctx, restrictedCtx) sqlExecutor := restrictedCtx.(sqlexec.SQLExecutor) if _, err := sqlExecutor.ExecuteInternal(ctx, "begin"); err != nil { @@ -411,7 +387,7 @@ func (e *SimpleExec) setDefaultRoleForCurrentUser(s *ast.SetDefaultRoleStmt) (er if i > 0 { sqlexec.MustFormatSQL(sql, ",") } - ok := checker.FindEdge(e.ctx, role, user) + ok := checker.FindEdge(e.Ctx(), role, user) if !ok { return exeerrors.ErrRoleNotGranted.GenWithStackByArgs(role.String(), user.String()) } @@ -433,8 +409,8 @@ func (e *SimpleExec) setDefaultRoleForCurrentUser(s *ast.SetDefaultRoleStmt) (er } func (e *SimpleExec) executeSetDefaultRole(ctx context.Context, s *ast.SetDefaultRoleStmt) (err error) { - sessionVars := e.ctx.GetSessionVars() - checker := privilege.GetPrivilegeManager(e.ctx) + sessionVars := e.Ctx().GetSessionVars() + checker := privilege.GetPrivilegeManager(e.Ctx()) if checker == nil { return errors.New("miss privilege checker") } @@ -446,7 +422,7 @@ func (e *SimpleExec) executeSetDefaultRole(ctx context.Context, s *ast.SetDefaul if err != nil { return err } - return domain.GetDomain(e.ctx).NotifyUpdatePrivilege() + return domain.GetDomain(e.Ctx()).NotifyUpdatePrivilege() } } @@ -468,7 +444,7 @@ func (e *SimpleExec) executeSetDefaultRole(ctx context.Context, s *ast.SetDefaul if err != nil { return } - return domain.GetDomain(e.ctx).NotifyUpdatePrivilege() + return domain.GetDomain(e.Ctx()).NotifyUpdatePrivilege() } func (e *SimpleExec) setRoleRegular(s *ast.SetRoleStmt) error { @@ -484,10 +460,10 @@ func (e *SimpleExec) setRoleRegular(s *ast.SetRoleStmt) error { roleList = append(roleList, v) } - checker := privilege.GetPrivilegeManager(e.ctx) - ok, roleName := checker.ActiveRoles(e.ctx, roleList) + checker := privilege.GetPrivilegeManager(e.Ctx()) + ok, roleName := checker.ActiveRoles(e.Ctx(), roleList) if !ok { - u := e.ctx.GetSessionVars().User + u := e.Ctx().GetSessionVars().User return exeerrors.ErrRoleNotGranted.GenWithStackByArgs(roleName, u.String()) } return nil @@ -495,12 +471,12 @@ func (e *SimpleExec) setRoleRegular(s *ast.SetRoleStmt) error { func (e *SimpleExec) setRoleAll(s *ast.SetRoleStmt) error { // Deal with SQL like `SET ROLE ALL;` - checker := privilege.GetPrivilegeManager(e.ctx) - user, host := e.ctx.GetSessionVars().User.AuthUsername, e.ctx.GetSessionVars().User.AuthHostname + checker := privilege.GetPrivilegeManager(e.Ctx()) + user, host := e.Ctx().GetSessionVars().User.AuthUsername, e.Ctx().GetSessionVars().User.AuthHostname roles := checker.GetAllRoles(user, host) - ok, roleName := checker.ActiveRoles(e.ctx, roles) + ok, roleName := checker.ActiveRoles(e.Ctx(), roles) if !ok { - u := e.ctx.GetSessionVars().User + u := e.Ctx().GetSessionVars().User return exeerrors.ErrRoleNotGranted.GenWithStackByArgs(roleName, u.String()) } return nil @@ -513,8 +489,8 @@ func (e *SimpleExec) setRoleAllExcept(s *ast.SetRoleStmt) error { r.Hostname = "%" } } - checker := privilege.GetPrivilegeManager(e.ctx) - user, host := e.ctx.GetSessionVars().User.AuthUsername, e.ctx.GetSessionVars().User.AuthHostname + checker := privilege.GetPrivilegeManager(e.Ctx()) + user, host := e.Ctx().GetSessionVars().User.AuthUsername, e.Ctx().GetSessionVars().User.AuthHostname roles := checker.GetAllRoles(user, host) filter := func(arr []*auth.RoleIdentity, f func(*auth.RoleIdentity) bool) []*auth.RoleIdentity { @@ -537,9 +513,9 @@ func (e *SimpleExec) setRoleAllExcept(s *ast.SetRoleStmt) error { } afterExcept := filter(roles, banned) - ok, roleName := checker.ActiveRoles(e.ctx, afterExcept) + ok, roleName := checker.ActiveRoles(e.Ctx(), afterExcept) if !ok { - u := e.ctx.GetSessionVars().User + u := e.Ctx().GetSessionVars().User return exeerrors.ErrRoleNotGranted.GenWithStackByArgs(roleName, u.String()) } return nil @@ -547,12 +523,12 @@ func (e *SimpleExec) setRoleAllExcept(s *ast.SetRoleStmt) error { func (e *SimpleExec) setRoleDefault(s *ast.SetRoleStmt) error { // Deal with SQL like `SET ROLE DEFAULT;` - checker := privilege.GetPrivilegeManager(e.ctx) - user, host := e.ctx.GetSessionVars().User.AuthUsername, e.ctx.GetSessionVars().User.AuthHostname + checker := privilege.GetPrivilegeManager(e.Ctx()) + user, host := e.Ctx().GetSessionVars().User.AuthUsername, e.Ctx().GetSessionVars().User.AuthHostname roles := checker.GetDefaultRoles(user, host) - ok, roleName := checker.ActiveRoles(e.ctx, roles) + ok, roleName := checker.ActiveRoles(e.Ctx(), roles) if !ok { - u := e.ctx.GetSessionVars().User + u := e.Ctx().GetSessionVars().User return exeerrors.ErrRoleNotGranted.GenWithStackByArgs(roleName, u.String()) } return nil @@ -560,11 +536,11 @@ func (e *SimpleExec) setRoleDefault(s *ast.SetRoleStmt) error { func (e *SimpleExec) setRoleNone(s *ast.SetRoleStmt) error { // Deal with SQL like `SET ROLE NONE;` - checker := privilege.GetPrivilegeManager(e.ctx) + checker := privilege.GetPrivilegeManager(e.Ctx()) roles := make([]*auth.RoleIdentity, 0) - ok, roleName := checker.ActiveRoles(e.ctx, roles) + ok, roleName := checker.ActiveRoles(e.Ctx(), roles) if !ok { - u := e.ctx.GetSessionVars().User + u := e.Ctx().GetSessionVars().User return exeerrors.ErrRoleNotGranted.GenWithStackByArgs(roleName, u.String()) } return nil @@ -587,7 +563,7 @@ func (e *SimpleExec) executeSetRole(s *ast.SetRoleStmt) error { } func (e *SimpleExec) dbAccessDenied(dbname string) error { - user := e.ctx.GetSessionVars().User + user := e.Ctx().GetSessionVars().User u := user.Username h := user.Hostname if len(user.AuthUsername) > 0 && len(user.AuthHostname) > 0 { @@ -600,9 +576,9 @@ func (e *SimpleExec) dbAccessDenied(dbname string) error { func (e *SimpleExec) executeUse(s *ast.UseStmt) error { dbname := model.NewCIStr(s.DBName) - checker := privilege.GetPrivilegeManager(e.ctx) - if checker != nil && e.ctx.GetSessionVars().User != nil { - if !checker.DBIsVisible(e.ctx.GetSessionVars().ActiveRoles, dbname.String()) { + checker := privilege.GetPrivilegeManager(e.Ctx()) + if checker != nil && e.Ctx().GetSessionVars().User != nil { + if !checker.DBIsVisible(e.Ctx().GetSessionVars().ActiveRoles, dbname.String()) { return e.dbAccessDenied(dbname.O) } } @@ -611,9 +587,9 @@ func (e *SimpleExec) executeUse(s *ast.UseStmt) error { if !exists { return infoschema.ErrDatabaseNotExists.GenWithStackByArgs(dbname) } - e.ctx.GetSessionVars().CurrentDBChanged = dbname.O != e.ctx.GetSessionVars().CurrentDB - e.ctx.GetSessionVars().CurrentDB = dbname.O - sessionVars := e.ctx.GetSessionVars() + e.Ctx().GetSessionVars().CurrentDBChanged = dbname.O != e.Ctx().GetSessionVars().CurrentDB + e.Ctx().GetSessionVars().CurrentDB = dbname.O + sessionVars := e.Ctx().GetSessionVars() dbCollate := dbinfo.Collate if dbCollate == "" { dbCollate = getDefaultCollate(dbinfo.Charset) @@ -629,23 +605,23 @@ func (e *SimpleExec) executeBegin(ctx context.Context, s *ast.BeginStmt) error { // If `START TRANSACTION READ ONLY` is the first statement in TxnCtx, we should // always create a new Txn instead of reusing it. if s.ReadOnly { - noopFuncsMode := e.ctx.GetSessionVars().NoopFuncsMode + noopFuncsMode := e.Ctx().GetSessionVars().NoopFuncsMode if s.AsOf == nil && noopFuncsMode != variable.OnInt { err := expression.ErrFunctionsNoopImpl.GenWithStackByArgs("READ ONLY") if noopFuncsMode == variable.OffInt { return err } - e.ctx.GetSessionVars().StmtCtx.AppendWarning(err) + e.Ctx().GetSessionVars().StmtCtx.AppendWarning(err) } if s.AsOf != nil { // start transaction read only as of failed due to we set tx_read_ts before - if e.ctx.GetSessionVars().TxnReadTS.PeakTxnReadTS() > 0 { + if e.Ctx().GetSessionVars().TxnReadTS.PeakTxnReadTS() > 0 { return errors.New("start transaction read only as of is forbidden after set transaction read only as of") } } } - return sessiontxn.GetTxnManager(e.ctx).EnterNewTxn(ctx, &sessiontxn.EnterNewTxnRequest{ + return sessiontxn.GetTxnManager(e.Ctx()).EnterNewTxn(ctx, &sessiontxn.EnterNewTxnRequest{ Type: sessiontxn.EnterNewTxnWithBeginStmt, TxnMode: s.Mode, CausalConsistencyOnly: s.CausalConsistencyOnly, @@ -657,7 +633,7 @@ func (e *SimpleExec) executeBegin(ctx context.Context, s *ast.BeginStmt) error { var ErrSavepointNotSupportedWithBinlog = errors.New("SAVEPOINT is not supported when binlog is enabled") func (e *SimpleExec) executeSavepoint(s *ast.SavepointStmt) error { - sessVars := e.ctx.GetSessionVars() + sessVars := e.Ctx().GetSessionVars() txnCtx := sessVars.TxnCtx if !sessVars.InTxn() && sessVars.IsAutocommit() { return nil @@ -668,7 +644,7 @@ func (e *SimpleExec) executeSavepoint(s *ast.SavepointStmt) error { if !sessVars.ConstraintCheckInPlacePessimistic && sessVars.TxnCtx.IsPessimistic { return errors.New("savepoint is not supported in pessimistic transactions when in-place constraint check is disabled") } - txn, err := e.ctx.Txn(true) + txn, err := e.Ctx().Txn(true) if err != nil { return err } @@ -678,7 +654,7 @@ func (e *SimpleExec) executeSavepoint(s *ast.SavepointStmt) error { } func (e *SimpleExec) executeReleaseSavepoint(s *ast.ReleaseSavepointStmt) error { - deleted := e.ctx.GetSessionVars().TxnCtx.ReleaseSavepoint(s.Name) + deleted := e.Ctx().GetSessionVars().TxnCtx.ReleaseSavepoint(s.Name) if !deleted { return exeerrors.ErrSavepointNotExists.GenWithStackByArgs("SAVEPOINT", s.Name) } @@ -686,7 +662,7 @@ func (e *SimpleExec) executeReleaseSavepoint(s *ast.ReleaseSavepointStmt) error } func (e *SimpleExec) setCurrentUser(users []*auth.UserIdentity) { - sessionVars := e.ctx.GetSessionVars() + sessionVars := e.Ctx().GetSessionVars() for i, user := range users { if user.CurrentUser { users[i].Username = sessionVars.User.AuthUsername @@ -702,7 +678,7 @@ func (e *SimpleExec) executeRevokeRole(ctx context.Context, s *ast.RevokeRoleStm e.setCurrentUser(s.Users) for _, role := range s.Roles { - exists, err := userExists(ctx, e.ctx, role.Username, role.Hostname) + exists, err := userExists(ctx, e.Ctx(), role.Username, role.Hostname) if err != nil { return errors.Trace(err) } @@ -711,11 +687,11 @@ func (e *SimpleExec) executeRevokeRole(ctx context.Context, s *ast.RevokeRoleStm } } - restrictedCtx, err := e.getSysSession() + restrictedCtx, err := e.GetSysSession() if err != nil { return err } - defer e.releaseSysSession(internalCtx, restrictedCtx) + defer e.ReleaseSysSession(internalCtx, restrictedCtx) sqlExecutor := restrictedCtx.(sqlexec.SQLExecutor) // begin a transaction to insert role graph edges. @@ -725,12 +701,12 @@ func (e *SimpleExec) executeRevokeRole(ctx context.Context, s *ast.RevokeRoleStm sql := new(strings.Builder) // when an active role of current user is revoked, // it should be removed from activeRoles - activeRoles, curUser, curHost := e.ctx.GetSessionVars().ActiveRoles, "", "" - if user := e.ctx.GetSessionVars().User; user != nil { + activeRoles, curUser, curHost := e.Ctx().GetSessionVars().ActiveRoles, "", "" + if user := e.Ctx().GetSessionVars().User; user != nil { curUser, curHost = user.AuthUsername, user.AuthHostname } for _, user := range s.Users { - exists, err := userExists(ctx, e.ctx, user.Username, user.Hostname) + exists, err := userExists(ctx, e.Ctx(), user.Username, user.Hostname) if err != nil { return errors.Trace(err) } @@ -776,25 +752,25 @@ func (e *SimpleExec) executeRevokeRole(ctx context.Context, s *ast.RevokeRoleStm if _, err := sqlExecutor.ExecuteInternal(internalCtx, "commit"); err != nil { return err } - checker := privilege.GetPrivilegeManager(e.ctx) + checker := privilege.GetPrivilegeManager(e.Ctx()) if checker == nil { return errors.New("miss privilege checker") } - if ok, roleName := checker.ActiveRoles(e.ctx, activeRoles); !ok { - u := e.ctx.GetSessionVars().User + if ok, roleName := checker.ActiveRoles(e.Ctx(), activeRoles); !ok { + u := e.Ctx().GetSessionVars().User return exeerrors.ErrRoleNotGranted.GenWithStackByArgs(roleName, u.String()) } - return domain.GetDomain(e.ctx).NotifyUpdatePrivilege() + return domain.GetDomain(e.Ctx()).NotifyUpdatePrivilege() } func (e *SimpleExec) executeCommit(s *ast.CommitStmt) { - e.ctx.GetSessionVars().SetInTxn(false) + e.Ctx().GetSessionVars().SetInTxn(false) } func (e *SimpleExec) executeRollback(s *ast.RollbackStmt) error { - sessVars := e.ctx.GetSessionVars() + sessVars := e.Ctx().GetSessionVars() logutil.BgLogger().Debug("execute rollback statement", zap.Uint64("conn", sessVars.ConnectionID)) - txn, err := e.ctx.Txn(false) + txn, err := e.Ctx().Txn(false) if err != nil { return err } @@ -1027,7 +1003,7 @@ func deletePasswordLockingAttribute(ctx context.Context, sqlExecutor sqlexec.SQL } func (e *SimpleExec) isValidatePasswordEnabled() bool { - validatePwdEnable, err := e.ctx.GetSessionVars().GlobalVarsAccessor.GetGlobalSysVar(variable.ValidatePasswordEnable) + validatePwdEnable, err := e.Ctx().GetSessionVars().GlobalVarsAccessor.GetGlobalSysVar(variable.ValidatePasswordEnable) if err != nil { return false } @@ -1038,11 +1014,11 @@ func (e *SimpleExec) executeCreateUser(ctx context.Context, s *ast.CreateUserStm internalCtx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) // Check `CREATE USER` privilege. if !config.GetGlobalConfig().Security.SkipGrantTable { - checker := privilege.GetPrivilegeManager(e.ctx) + checker := privilege.GetPrivilegeManager(e.Ctx()) if checker == nil { return errors.New("miss privilege checker") } - activeRoles := e.ctx.GetSessionVars().ActiveRoles + activeRoles := e.Ctx().GetSessionVars().ActiveRoles if !checker.RequestVerification(activeRoles, mysql.SystemDB, mysql.UserTable, "", mysql.InsertPriv) { if s.IsCreateRole { if !checker.RequestVerification(activeRoles, "", "", "", mysql.CreateRolePriv) && @@ -1149,7 +1125,7 @@ func (e *SimpleExec) executeCreateUser(ctx context.Context, s *ast.CreateUserStm if len(users) > 0 { sqlexec.MustFormatSQL(sql, ",") } - exists, err1 := userExists(ctx, e.ctx, spec.User.Username, spec.User.Hostname) + exists, err1 := userExists(ctx, e.Ctx(), spec.User.Username, spec.User.Hostname) if err1 != nil { return err1 } @@ -1162,7 +1138,7 @@ func (e *SimpleExec) executeCreateUser(ctx context.Context, s *ast.CreateUserStm return exeerrors.ErrCannotUser.GenWithStackByArgs("CREATE USER", user) } err := infoschema.ErrUserAlreadyExists.GenWithStackByArgs(user) - e.ctx.GetSessionVars().StmtCtx.AppendNote(err) + e.Ctx().GetSessionVars().StmtCtx.AppendNote(err) continue } authPlugin := mysql.AuthNativePassword @@ -1175,7 +1151,7 @@ func (e *SimpleExec) executeCreateUser(ctx context.Context, s *ast.CreateUserStm if spec.AuthOpt != nil { pwd = spec.AuthOpt.AuthString } - if err := pwdValidator.ValidatePassword(e.ctx.GetSessionVars(), pwd); err != nil { + if err := pwdValidator.ValidatePassword(e.Ctx().GetSessionVars(), pwd); err != nil { return err } } @@ -1194,11 +1170,11 @@ func (e *SimpleExec) executeCreateUser(ctx context.Context, s *ast.CreateUserStm recordTokenIssuer := tokenIssuer if len(recordTokenIssuer) > 0 && authPlugin != mysql.AuthTiDBAuthToken { err := fmt.Errorf("TOKEN_ISSUER is not needed for '%s' user", authPlugin) - e.ctx.GetSessionVars().StmtCtx.AppendWarning(err) + e.Ctx().GetSessionVars().StmtCtx.AppendWarning(err) recordTokenIssuer = "" } else if len(recordTokenIssuer) == 0 && authPlugin == mysql.AuthTiDBAuthToken { err := fmt.Errorf("TOKEN_ISSUER is needed for 'tidb_auth_token' user, please use 'alter user' to declare it") - e.ctx.GetSessionVars().StmtCtx.AppendWarning(err) + e.Ctx().GetSessionVars().StmtCtx.AppendWarning(err) } hostName := strings.ToLower(spec.User.Hostname) @@ -1233,11 +1209,11 @@ func (e *SimpleExec) executeCreateUser(ctx context.Context, s *ast.CreateUserStm return nil } - restrictedCtx, err := e.getSysSession() + restrictedCtx, err := e.GetSysSession() if err != nil { return err } - defer e.releaseSysSession(internalCtx, restrictedCtx) + defer e.ReleaseSysSession(internalCtx, restrictedCtx) sqlExecutor := restrictedCtx.(sqlexec.SQLExecutor) if _, err := sqlExecutor.ExecuteInternal(internalCtx, "begin"); err != nil { @@ -1282,7 +1258,7 @@ func (e *SimpleExec) executeCreateUser(ctx context.Context, s *ast.CreateUserStm if _, err := sqlExecutor.ExecuteInternal(internalCtx, "commit"); err != nil { return errors.Trace(err) } - return domain.GetDomain(e.ctx).NotifyUpdatePrivilege() + return domain.GetDomain(e.Ctx()).NotifyUpdatePrivilege() } func getUserPasswordLimit(ctx context.Context, sqlExecutor sqlexec.SQLExecutor, name string, host string, plOptions *passwordOrLockOptionsInfo) (pRI *passwordReuseInfo, err error) { @@ -1650,7 +1626,7 @@ func checkPasswordReusePolicy(ctx context.Context, sqlExecutor sqlexec.SQLExecut func (e *SimpleExec) executeAlterUser(ctx context.Context, s *ast.AlterUserStmt) error { disableSandBoxMode := false var err error - if e.ctx.InSandBoxMode() { + if e.Ctx().InSandBoxMode() { if err = e.checkSandboxMode(s.Specs); err != nil { return err } @@ -1658,7 +1634,7 @@ func (e *SimpleExec) executeAlterUser(ctx context.Context, s *ast.AlterUserStmt) } ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnPrivilege) if s.CurrentAuth != nil { - user := e.ctx.GetSessionVars().User + user := e.Ctx().GetSessionVars().User if user == nil { return errors.New("Session user is empty") } @@ -1695,11 +1671,11 @@ func (e *SimpleExec) executeAlterUser(ctx context.Context, s *ast.AlterUserStmt) failedUsers := make([]string, 0, len(s.Specs)) needRollback := false - checker := privilege.GetPrivilegeManager(e.ctx) + checker := privilege.GetPrivilegeManager(e.Ctx()) if checker == nil { return errors.New("could not load privilege checker") } - activeRoles := e.ctx.GetSessionVars().ActiveRoles + activeRoles := e.Ctx().GetSessionVars().ActiveRoles hasCreateUserPriv := checker.RequestVerification(activeRoles, "", "", "", mysql.CreateUserPriv) hasSystemUserPriv := checker.RequestDynamicVerification(activeRoles, "SYSTEM_USER", false) hasRestrictedUserPriv := checker.RequestDynamicVerification(activeRoles, "RESTRICTED_USER_ADMIN", false) @@ -1712,7 +1688,7 @@ func (e *SimpleExec) executeAlterUser(ctx context.Context, s *ast.AlterUserStmt) } } - sysSession, err := e.getSysSession() + sysSession, err := e.GetSysSession() defer clearSysSession(ctx, sysSession) if err != nil { return err @@ -1731,7 +1707,7 @@ func (e *SimpleExec) executeAlterUser(ctx context.Context, s *ast.AlterUserStmt) } for _, spec := range s.Specs { - user := e.ctx.GetSessionVars().User + user := e.Ctx().GetSessionVars().User if spec.User.CurrentUser || ((user != nil) && (user.Username == spec.User.Username) && (user.AuthHostname == spec.User.Hostname)) { spec.User.Username = user.Username spec.User.Hostname = user.AuthHostname @@ -1784,7 +1760,7 @@ func (e *SimpleExec) executeAlterUser(ctx context.Context, s *ast.AlterUserStmt) RequireAuthTokenOptions ) authTokenOptionHandler := NoNeedAuthTokenOptions - currentAuthPlugin, err := privilege.GetPrivilegeManager(e.ctx).GetAuthPlugin(spec.User.Username, spec.User.Hostname) + currentAuthPlugin, err := privilege.GetPrivilegeManager(e.Ctx()).GetAuthPlugin(spec.User.Username, spec.User.Hostname) if err != nil { return err } @@ -1824,7 +1800,7 @@ func (e *SimpleExec) executeAlterUser(ctx context.Context, s *ast.AlterUserStmt) } } if e.isValidatePasswordEnabled() && spec.AuthOpt.ByAuthString && mysql.IsAuthPluginClearText(spec.AuthOpt.AuthPlugin) { - if err := pwdValidator.ValidatePassword(e.ctx.GetSessionVars(), spec.AuthOpt.AuthString); err != nil { + if err := pwdValidator.ValidatePassword(e.Ctx().GetSessionVars(), spec.AuthOpt.AuthString); err != nil { return err } } @@ -1843,7 +1819,7 @@ func (e *SimpleExec) executeAlterUser(ctx context.Context, s *ast.AlterUserStmt) pwd: pwd, authString: spec.AuthOpt.AuthString, } - err := checkPasswordReusePolicy(ctx, sqlExecutor, userDetail, e.ctx, spec.AuthOpt.AuthPlugin) + err := checkPasswordReusePolicy(ctx, sqlExecutor, userDetail, e.Ctx(), spec.AuthOpt.AuthPlugin) if err != nil { return err } @@ -1934,7 +1910,7 @@ func (e *SimpleExec) executeAlterUser(ctx context.Context, s *ast.AlterUserStmt) case NoNeedAuthTokenOptions: if len(authTokenOptions) > 0 { err := errors.New("TOKEN_ISSUER is not needed for the auth plugin") - e.ctx.GetSessionVars().StmtCtx.AppendWarning(err) + e.Ctx().GetSessionVars().StmtCtx.AppendWarning(err) } case OptionalAuthTokenOptions: if len(authTokenOptions) > 0 { @@ -1949,7 +1925,7 @@ func (e *SimpleExec) executeAlterUser(ctx context.Context, s *ast.AlterUserStmt) } } else { err := errors.New("Auth plugin 'tidb_auth_plugin' needs TOKEN_ISSUER") - e.ctx.GetSessionVars().StmtCtx.AppendWarning(err) + e.Ctx().GetSessionVars().StmtCtx.AppendWarning(err) } } @@ -1996,17 +1972,17 @@ func (e *SimpleExec) executeAlterUser(ctx context.Context, s *ast.AlterUserStmt) } for _, user := range failedUsers { err := infoschema.ErrUserDropExists.GenWithStackByArgs(user) - e.ctx.GetSessionVars().StmtCtx.AppendNote(err) + e.Ctx().GetSessionVars().StmtCtx.AppendNote(err) } } if _, err := sqlExecutor.ExecuteInternal(ctx, "commit"); err != nil { return err } - if err = domain.GetDomain(e.ctx).NotifyUpdatePrivilege(); err != nil { + if err = domain.GetDomain(e.Ctx()).NotifyUpdatePrivilege(); err != nil { return err } if disableSandBoxMode { - e.ctx.DisableSandBoxMode() + e.Ctx().DisableSandBoxMode() } return nil } @@ -2017,7 +1993,7 @@ func (e *SimpleExec) checkSandboxMode(specs []*ast.UserSpec) error { continue } if spec.AuthOpt.ByAuthString || spec.AuthOpt.ByHashString { - if spec.User.CurrentUser || e.ctx.GetSessionVars().User.Username == spec.User.Username { + if spec.User.CurrentUser || e.Ctx().GetSessionVars().User.Username == spec.User.Username { return nil } } @@ -2031,7 +2007,7 @@ func (e *SimpleExec) executeGrantRole(ctx context.Context, s *ast.GrantRoleStmt) e.setCurrentUser(s.Users) for _, role := range s.Roles { - exists, err := userExists(ctx, e.ctx, role.Username, role.Hostname) + exists, err := userExists(ctx, e.Ctx(), role.Username, role.Hostname) if err != nil { return err } @@ -2040,7 +2016,7 @@ func (e *SimpleExec) executeGrantRole(ctx context.Context, s *ast.GrantRoleStmt) } } for _, user := range s.Users { - exists, err := userExists(ctx, e.ctx, user.Username, user.Hostname) + exists, err := userExists(ctx, e.Ctx(), user.Username, user.Hostname) if err != nil { return err } @@ -2049,11 +2025,11 @@ func (e *SimpleExec) executeGrantRole(ctx context.Context, s *ast.GrantRoleStmt) } } - restrictedCtx, err := e.getSysSession() + restrictedCtx, err := e.GetSysSession() if err != nil { return err } - defer e.releaseSysSession(internalCtx, restrictedCtx) + defer e.ReleaseSysSession(internalCtx, restrictedCtx) sqlExecutor := restrictedCtx.(sqlexec.SQLExecutor) // begin a transaction to insert role graph edges. @@ -2078,15 +2054,15 @@ func (e *SimpleExec) executeGrantRole(ctx context.Context, s *ast.GrantRoleStmt) if _, err := sqlExecutor.ExecuteInternal(internalCtx, "commit"); err != nil { return err } - return domain.GetDomain(e.ctx).NotifyUpdatePrivilege() + return domain.GetDomain(e.Ctx()).NotifyUpdatePrivilege() } // Should cover same internal mysql.* tables as DROP USER, so this function is very similar func (e *SimpleExec) executeRenameUser(s *ast.RenameUserStmt) error { ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) var failedUser string - sysSession, err := e.getSysSession() - defer e.releaseSysSession(ctx, sysSession) + sysSession, err := e.GetSysSession() + defer e.ReleaseSysSession(ctx, sysSession) if err != nil { return err } @@ -2193,7 +2169,7 @@ func (e *SimpleExec) executeRenameUser(s *ast.RenameUserStmt) error { if _, err := sqlExecutor.ExecuteInternal(ctx, "commit"); err != nil { return err } - return domain.GetDomain(e.ctx).NotifyUpdatePrivilege() + return domain.GetDomain(e.Ctx()).NotifyUpdatePrivilege() } func renameUserHostInSystemTable(sqlExecutor sqlexec.SQLExecutor, tableName, usernameColumn, hostColumn string, users *ast.UserToUser) error { @@ -2211,11 +2187,11 @@ func (e *SimpleExec) executeDropUser(ctx context.Context, s *ast.DropUserStmt) e internalCtx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) // Check privileges. // Check `CREATE USER` privilege. - checker := privilege.GetPrivilegeManager(e.ctx) + checker := privilege.GetPrivilegeManager(e.Ctx()) if checker == nil { return errors.New("miss privilege checker") } - activeRoles := e.ctx.GetSessionVars().ActiveRoles + activeRoles := e.Ctx().GetSessionVars().ActiveRoles if !checker.RequestVerification(activeRoles, mysql.SystemDB, mysql.UserTable, "", mysql.DeletePriv) { if s.IsDropRole { if !checker.RequestVerification(activeRoles, "", "", "", mysql.DropRolePriv) && @@ -2230,8 +2206,8 @@ func (e *SimpleExec) executeDropUser(ctx context.Context, s *ast.DropUserStmt) e hasSystemUserPriv := checker.RequestDynamicVerification(activeRoles, "SYSTEM_USER", false) hasRestrictedUserPriv := checker.RequestDynamicVerification(activeRoles, "RESTRICTED_USER_ADMIN", false) failedUsers := make([]string, 0, len(s.UserList)) - sysSession, err := e.getSysSession() - defer e.releaseSysSession(internalCtx, sysSession) + sysSession, err := e.GetSysSession() + defer e.ReleaseSysSession(internalCtx, sysSession) if err != nil { return err } @@ -2243,7 +2219,7 @@ func (e *SimpleExec) executeDropUser(ctx context.Context, s *ast.DropUserStmt) e sql := new(strings.Builder) for _, user := range s.UserList { - exists, err := userExists(ctx, e.ctx, user.Username, user.Hostname) + exists, err := userExists(ctx, e.Ctx(), user.Username, user.Hostname) if err != nil { return err } @@ -2252,7 +2228,7 @@ func (e *SimpleExec) executeDropUser(ctx context.Context, s *ast.DropUserStmt) e failedUsers = append(failedUsers, user.String()) break } - e.ctx.GetSessionVars().StmtCtx.AppendNote(infoschema.ErrUserDropExists.GenWithStackByArgs(user)) + e.Ctx().GetSessionVars().StmtCtx.AppendNote(infoschema.ErrUserDropExists.GenWithStackByArgs(user)) } // Certain users require additional privileges in order to be modified. @@ -2381,12 +2357,12 @@ func (e *SimpleExec) executeDropUser(ctx context.Context, s *ast.DropUserStmt) e } if s.IsDropRole { // apply new activeRoles - if ok, roleName := checker.ActiveRoles(e.ctx, activeRoles); !ok { - u := e.ctx.GetSessionVars().User + if ok, roleName := checker.ActiveRoles(e.Ctx(), activeRoles); !ok { + u := e.Ctx().GetSessionVars().User return exeerrors.ErrRoleNotGranted.GenWithStackByArgs(roleName, u.String()) } } - return domain.GetDomain(e.ctx).NotifyUpdatePrivilege() + return domain.GetDomain(e.Ctx()).NotifyUpdatePrivilege() } func userExists(ctx context.Context, sctx sessionctx.Context, name string, host string) (bool, error) { @@ -2422,7 +2398,7 @@ func userExistsInternal(ctx context.Context, sqlExecutor sqlexec.SQLExecutor, na func (e *SimpleExec) executeSetPwd(ctx context.Context, s *ast.SetPwdStmt) error { ctx = kv.WithInternalSourceType(ctx, kv.InternalTxnPrivilege) - sysSession, err := e.getSysSession() + sysSession, err := e.GetSysSession() defer clearSysSession(ctx, sysSession) if err != nil { return err @@ -2444,14 +2420,14 @@ func (e *SimpleExec) executeSetPwd(ctx context.Context, s *ast.SetPwdStmt) error var u, h string disableSandboxMode := false if s.User == nil || s.User.CurrentUser { - if e.ctx.GetSessionVars().User == nil { + if e.Ctx().GetSessionVars().User == nil { return errors.New("Session error is empty") } - u = e.ctx.GetSessionVars().User.AuthUsername - h = e.ctx.GetSessionVars().User.AuthHostname + u = e.Ctx().GetSessionVars().User.AuthUsername + h = e.Ctx().GetSessionVars().User.AuthHostname } else { - checker := privilege.GetPrivilegeManager(e.ctx) - activeRoles := e.ctx.GetSessionVars().ActiveRoles + checker := privilege.GetPrivilegeManager(e.Ctx()) + activeRoles := e.Ctx().GetSessionVars().ActiveRoles if checker != nil && !checker.RequestVerification(activeRoles, "", "", "", mysql.SuperPriv) { return exeerrors.ErrDBaccessDenied.GenWithStackByArgs(u, h, "mysql") } @@ -2465,20 +2441,20 @@ func (e *SimpleExec) executeSetPwd(ctx context.Context, s *ast.SetPwdStmt) error if !exists { return errors.Trace(exeerrors.ErrPasswordNoMatch) } - if e.ctx.InSandBoxMode() { + if e.Ctx().InSandBoxMode() { if !(s.User == nil || s.User.CurrentUser || - e.ctx.GetSessionVars().User.AuthUsername == u && e.ctx.GetSessionVars().User.AuthHostname == strings.ToLower(h)) { + e.Ctx().GetSessionVars().User.AuthUsername == u && e.Ctx().GetSessionVars().User.AuthHostname == strings.ToLower(h)) { return exeerrors.ErrMustChangePassword.GenWithStackByArgs() } disableSandboxMode = true } - authplugin, err := privilege.GetPrivilegeManager(e.ctx).GetAuthPlugin(u, h) + authplugin, err := privilege.GetPrivilegeManager(e.Ctx()).GetAuthPlugin(u, h) if err != nil { return err } if e.isValidatePasswordEnabled() { - if err := pwdValidator.ValidatePassword(e.ctx.GetSessionVars(), s.Password); err != nil { + if err := pwdValidator.ValidatePassword(e.Ctx().GetSessionVars(), s.Password); err != nil { return err } } @@ -2487,7 +2463,7 @@ func (e *SimpleExec) executeSetPwd(ctx context.Context, s *ast.SetPwdStmt) error case mysql.AuthCachingSha2Password, mysql.AuthTiDBSM3Password: pwd = auth.NewHashPassword(s.Password, authplugin) case mysql.AuthSocket: - e.ctx.GetSessionVars().StmtCtx.AppendNote(exeerrors.ErrSetPasswordAuthPlugin.GenWithStackByArgs(u, h)) + e.Ctx().GetSessionVars().StmtCtx.AppendNote(exeerrors.ErrSetPasswordAuthPlugin.GenWithStackByArgs(u, h)) pwd = "" default: pwd = auth.EncodePassword(s.Password) @@ -2511,7 +2487,7 @@ func (e *SimpleExec) executeSetPwd(ctx context.Context, s *ast.SetPwdStmt) error pwd: pwd, authString: s.Password, } - err := checkPasswordReusePolicy(ctx, sqlExecutor, userDetail, e.ctx, authplugin) + err := checkPasswordReusePolicy(ctx, sqlExecutor, userDetail, e.Ctx(), authplugin) if err != nil { return err } @@ -2526,12 +2502,12 @@ func (e *SimpleExec) executeSetPwd(ctx context.Context, s *ast.SetPwdStmt) error if _, err := sqlExecutor.ExecuteInternal(ctx, "commit"); err != nil { return err } - err = domain.GetDomain(e.ctx).NotifyUpdatePrivilege() + err = domain.GetDomain(e.Ctx()).NotifyUpdatePrivilege() if err != nil { return err } if disableSandboxMode { - e.ctx.DisableSandBoxMode() + e.Ctx().DisableSandBoxMode() } return nil } @@ -2539,8 +2515,8 @@ func (e *SimpleExec) executeSetPwd(ctx context.Context, s *ast.SetPwdStmt) error func (e *SimpleExec) executeKillStmt(ctx context.Context, s *ast.KillStmt) error { if x, ok := s.Expr.(*ast.FuncCallExpr); ok { if x.FnName.L == ast.ConnectionID { - sm := e.ctx.GetSessionManager() - sm.Kill(e.ctx.GetSessionVars().ConnectionID, s.Query, false) + sm := e.Ctx().GetSessionManager() + sm.Kill(e.Ctx().GetSessionVars().ConnectionID, s.Query, false) return nil } return errors.New("Invalid operation. Please use 'KILL TIDB [CONNECTION | QUERY] [connectionID | CONNECTION_ID()]' instead") @@ -2548,25 +2524,25 @@ func (e *SimpleExec) executeKillStmt(ctx context.Context, s *ast.KillStmt) error if !config.GetGlobalConfig().EnableGlobalKill { conf := config.GetGlobalConfig() if s.TiDBExtension || conf.CompatibleKillQuery { - sm := e.ctx.GetSessionManager() + sm := e.Ctx().GetSessionManager() if sm == nil { return nil } sm.Kill(s.ConnectionID, s.Query, false) } else { err := errors.New("Invalid operation. Please use 'KILL TIDB [CONNECTION | QUERY] [connectionID | CONNECTION_ID()]' instead") - e.ctx.GetSessionVars().StmtCtx.AppendWarning(err) + e.Ctx().GetSessionVars().StmtCtx.AppendWarning(err) } return nil } - sm := e.ctx.GetSessionManager() + sm := e.Ctx().GetSessionManager() if sm == nil { return nil } if e.IsFromRemote { logutil.BgLogger().Info("Killing connection in current instance redirected from remote TiDB", zap.Uint64("conn", s.ConnectionID), zap.Bool("query", s.Query), - zap.String("sourceAddr", e.ctx.GetSessionVars().SourceAddr.IP.String())) + zap.String("sourceAddr", e.Ctx().GetSessionVars().SourceAddr.IP.String())) sm.Kill(s.ConnectionID, s.Query, false) return nil } @@ -2574,7 +2550,7 @@ func (e *SimpleExec) executeKillStmt(ctx context.Context, s *ast.KillStmt) error gcid, isTruncated, err := globalconn.ParseConnID(s.ConnectionID) if err != nil { err1 := errors.New("Parse ConnectionID failed: " + err.Error()) - e.ctx.GetSessionVars().StmtCtx.AppendWarning(err1) + e.Ctx().GetSessionVars().StmtCtx.AppendWarning(err1) return nil } if isTruncated { @@ -2583,14 +2559,14 @@ func (e *SimpleExec) executeKillStmt(ctx context.Context, s *ast.KillStmt) error // Notice that this warning cannot be seen if KILL is triggered by "CTRL-C" of mysql client, // as the KILL is sent by a new connection. err := errors.New(message) - e.ctx.GetSessionVars().StmtCtx.AppendWarning(err) + e.Ctx().GetSessionVars().StmtCtx.AppendWarning(err) return nil } if gcid.ServerID != sm.ServerID() { - if err := killRemoteConn(ctx, e.ctx, &gcid, s.Query); err != nil { + if err := killRemoteConn(ctx, e.Ctx(), &gcid, s.Query); err != nil { err1 := errors.New("KILL remote connection failed: " + err.Error()) - e.ctx.GetSessionVars().StmtCtx.AppendWarning(err1) + e.Ctx().GetSessionVars().StmtCtx.AppendWarning(err1) } } else { sm.Kill(s.ConnectionID, s.Query, false) @@ -2648,10 +2624,10 @@ func (e *SimpleExec) executeFlush(s *ast.FlushStmt) error { return errors.New("FLUSH TABLES WITH READ LOCK is not supported. Please use @@tidb_snapshot") } case ast.FlushPrivileges: - dom := domain.GetDomain(e.ctx) + dom := domain.GetDomain(e.Ctx()) return dom.NotifyUpdatePrivilege() case ast.FlushTiDBPlugin: - dom := domain.GetDomain(e.ctx) + dom := domain.GetDomain(e.Ctx()) for _, pluginName := range s.Plugins { err := plugin.NotifyFlush(dom, pluginName) if err != nil { @@ -2667,7 +2643,7 @@ func (e *SimpleExec) executeFlush(s *ast.FlushStmt) error { func (e *SimpleExec) executeAlterInstance(s *ast.AlterInstanceStmt) error { if s.ReloadTLS { logutil.BgLogger().Info("execute reload tls", zap.Bool("NoRollbackOnError", s.NoRollbackOnError)) - sm := e.ctx.GetSessionManager() + sm := e.Ctx().GetSessionManager() tlsCfg, _, err := util.LoadTLSCertificates( variable.GetSysVar("ssl_ca").Value, variable.GetSysVar("ssl_key").Value, @@ -2687,7 +2663,7 @@ func (e *SimpleExec) executeAlterInstance(s *ast.AlterInstanceStmt) error { } func (e *SimpleExec) executeDropStats(s *ast.DropStatsStmt) (err error) { - h := domain.GetDomain(e.ctx).StatsHandle() + h := domain.GetDomain(e.Ctx()).StatsHandle() var statsIDs []int64 // TODO: GLOBAL option will be deprecated. Also remove this condition when the syntax is removed if s.IsGlobalStats { @@ -2712,7 +2688,7 @@ func (e *SimpleExec) executeDropStats(s *ast.DropStatsStmt) (err error) { if err := h.DeleteTableStatsFromKV(statsIDs); err != nil { return err } - return h.Update(e.ctx.GetInfoSchema().(infoschema.InfoSchema)) + return h.Update(e.Ctx().GetInfoSchema().(infoschema.InfoSchema)) } func (e *SimpleExec) autoNewTxn() bool { @@ -2736,7 +2712,7 @@ func (e *SimpleExec) autoNewTxn() bool { } func (e *SimpleExec) executeShutdown(s *ast.ShutdownStmt) error { - sessVars := e.ctx.GetSessionVars() + sessVars := e.Ctx().GetSessionVars() logutil.BgLogger().Info("execute shutdown statement", zap.Uint64("conn", sessVars.ConnectionID)) p, err := os.FindProcess(os.Getpid()) if err != nil { @@ -2782,7 +2758,7 @@ func (e *SimpleExec) executeSetSessionStates(ctx context.Context, s *ast.SetSess if err := decoder.Decode(&sessionStates); err != nil { return errors.Trace(err) } - return e.ctx.DecodeSessionStates(ctx, e.ctx, &sessionStates) + return e.Ctx().DecodeSessionStates(ctx, e.Ctx(), &sessionStates) } func (e *SimpleExec) executeAdmin(s *ast.AdminStmt) error { @@ -2799,10 +2775,10 @@ func (e *SimpleExec) executeAdminReloadStatistics(s *ast.AdminStmt) error { if s.Tp != ast.AdminReloadStatistics { return errors.New("This AdminStmt is not ADMIN RELOAD STATS_EXTENDED") } - if !e.ctx.GetSessionVars().EnableExtendedStats { + if !e.Ctx().GetSessionVars().EnableExtendedStats { return errors.New("Extended statistics feature is not generally available now, and tidb_enable_extended_stats is OFF") } - return domain.GetDomain(e.ctx).StatsHandle().ReloadExtendedStatistics() + return domain.GetDomain(e.Ctx()).StatsHandle().ReloadExtendedStatistics() } func (e *SimpleExec) executeAdminFlushPlanCache(s *ast.AdminStmt) error { @@ -2812,17 +2788,17 @@ func (e *SimpleExec) executeAdminFlushPlanCache(s *ast.AdminStmt) error { if s.StatementScope == ast.StatementScopeGlobal { return errors.New("Do not support the 'admin flush global scope.'") } - if !e.ctx.GetSessionVars().EnablePreparedPlanCache { - e.ctx.GetSessionVars().StmtCtx.AppendWarning(errors.New("The plan cache is disable. So there no need to flush the plan cache")) + if !e.Ctx().GetSessionVars().EnablePreparedPlanCache { + e.Ctx().GetSessionVars().StmtCtx.AppendWarning(errors.New("The plan cache is disable. So there no need to flush the plan cache")) return nil } - now := types.NewTime(types.FromGoTime(time.Now().In(e.ctx.GetSessionVars().StmtCtx.TimeZone)), mysql.TypeTimestamp, 3) - e.ctx.GetSessionVars().LastUpdateTime4PC = now - e.ctx.GetSessionPlanCache().DeleteAll() + now := types.NewTime(types.FromGoTime(time.Now().In(e.Ctx().GetSessionVars().StmtCtx.TimeZone)), mysql.TypeTimestamp, 3) + e.Ctx().GetSessionVars().LastUpdateTime4PC = now + e.Ctx().GetSessionPlanCache().DeleteAll() if s.StatementScope == ast.StatementScopeInstance { // Record the timestamp. When other sessions want to use the plan cache, // it will check the timestamp first to decide whether the plan cache should be flushed. - domain.GetDomain(e.ctx).SetExpiredTimeStamp4PC(now) + domain.GetDomain(e.Ctx()).SetExpiredTimeStamp4PC(now) } return nil } @@ -2832,9 +2808,9 @@ func (e *SimpleExec) executeSetResourceGroupName(s *ast.SetResourceGroupStmt) er if _, ok := e.is.ResourceGroupByName(s.Name); !ok { return infoschema.ErrResourceGroupNotExists.GenWithStackByArgs(s.Name.O) } - e.ctx.GetSessionVars().ResourceGroupName = s.Name.L + e.Ctx().GetSessionVars().ResourceGroupName = s.Name.L } else { - e.ctx.GetSessionVars().ResourceGroupName = resourcegroup.DefaultResourceGroupName + e.Ctx().GetSessionVars().ResourceGroupName = resourcegroup.DefaultResourceGroupName } return nil } diff --git a/executor/sort.go b/executor/sort.go index 06241993e05f3..97ace1c18e124 100644 --- a/executor/sort.go +++ b/executor/sort.go @@ -20,6 +20,7 @@ import ( "errors" "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/expression" plannercore "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/planner/util" @@ -33,7 +34,7 @@ import ( // SortExec represents sorting executor. type SortExec struct { - baseExecutor + exec.BaseExecutor ByItems []*util.ByItems Idx int @@ -81,7 +82,7 @@ func (e *SortExec) Close() error { e.spillAction.SetFinished() } e.spillAction = nil - return e.children[0].Close() + return e.Children(0).Close() } // Open implements the Executor Open interface. @@ -91,13 +92,13 @@ func (e *SortExec) Open(ctx context.Context) error { // To avoid duplicated initialization for TopNExec. if e.memTracker == nil { - e.memTracker = memory.NewTracker(e.id, -1) - e.memTracker.AttachTo(e.ctx.GetSessionVars().StmtCtx.MemTracker) - e.diskTracker = memory.NewTracker(e.id, -1) - e.diskTracker.AttachTo(e.ctx.GetSessionVars().StmtCtx.DiskTracker) + e.memTracker = memory.NewTracker(e.ID(), -1) + e.memTracker.AttachTo(e.Ctx().GetSessionVars().StmtCtx.MemTracker) + e.diskTracker = memory.NewTracker(e.ID(), -1) + e.diskTracker.AttachTo(e.Ctx().GetSessionVars().StmtCtx.DiskTracker) } e.partitionList = e.partitionList[:0] - return e.children[0].Open(ctx) + return e.Children(0).Open(ctx) } // Next implements the Executor Next interface. @@ -178,7 +179,7 @@ func (e *SortExec) fetchRowChunks(ctx context.Context) error { for i, byItem := range e.ByItems { byItemsDesc[i] = byItem.Desc } - e.rowChunks = chunk.NewSortedRowContainer(fields, e.maxChunkSize, byItemsDesc, e.keyColumns, e.keyCmpFuncs) + e.rowChunks = chunk.NewSortedRowContainer(fields, e.MaxChunkSize(), byItemsDesc, e.keyColumns, e.keyCmpFuncs) e.rowChunks.GetMemTracker().AttachTo(e.memTracker) e.rowChunks.GetMemTracker().SetLabel(memory.LabelForRowChunks) if variable.EnableTmpStorageOnOOM.Load() { @@ -189,13 +190,13 @@ func (e *SortExec) fetchRowChunks(ctx context.Context) error { defer e.spillAction.WaitForTest() } }) - e.ctx.GetSessionVars().MemTracker.FallbackOldAndSetNewAction(e.spillAction) + e.Ctx().GetSessionVars().MemTracker.FallbackOldAndSetNewAction(e.spillAction) e.rowChunks.GetDiskTracker().AttachTo(e.diskTracker) e.rowChunks.GetDiskTracker().SetLabel(memory.LabelForRowChunks) } for { - chk := tryNewCacheChunk(e.children[0]) - err := Next(ctx, e.children[0], chk) + chk := tryNewCacheChunk(e.Children(0)) + err := Next(ctx, e.Children(0), chk) if err != nil { return err } @@ -206,7 +207,7 @@ func (e *SortExec) fetchRowChunks(ctx context.Context) error { if err := e.rowChunks.Add(chk); err != nil { if errors.Is(err, chunk.ErrCannotAddBecauseSorted) { e.partitionList = append(e.partitionList, e.rowChunks) - e.rowChunks = chunk.NewSortedRowContainer(fields, e.maxChunkSize, byItemsDesc, e.keyColumns, e.keyCmpFuncs) + e.rowChunks = chunk.NewSortedRowContainer(fields, e.MaxChunkSize(), byItemsDesc, e.keyColumns, e.keyCmpFuncs) e.rowChunks.GetMemTracker().AttachTo(e.memTracker) e.rowChunks.GetMemTracker().SetLabel(memory.LabelForRowChunks) e.rowChunks.GetDiskTracker().AttachTo(e.diskTracker) @@ -218,7 +219,7 @@ func (e *SortExec) fetchRowChunks(ctx context.Context) error { defer e.spillAction.WaitForTest() } }) - e.ctx.GetSessionVars().MemTracker.FallbackOldAndSetNewAction(e.spillAction) + e.Ctx().GetSessionVars().MemTracker.FallbackOldAndSetNewAction(e.spillAction) err = e.rowChunks.Add(chk) } if err != nil { @@ -228,8 +229,8 @@ func (e *SortExec) fetchRowChunks(ctx context.Context) error { } failpoint.Inject("SignalCheckpointForSort", func(val failpoint.Value) { if val.(bool) { - if e.ctx.GetSessionVars().ConnectionID == 123456 { - e.ctx.GetSessionVars().MemTracker.NeedKill.Store(true) + if e.Ctx().GetSessionVars().ConnectionID == 123456 { + e.Ctx().GetSessionVars().MemTracker.NeedKill.Store(true) } } }) @@ -388,13 +389,13 @@ func (e *TopNExec) initPointers() { // Open implements the Executor Open interface. func (e *TopNExec) Open(ctx context.Context) error { - e.memTracker = memory.NewTracker(e.id, -1) - e.memTracker.AttachTo(e.ctx.GetSessionVars().StmtCtx.MemTracker) + e.memTracker = memory.NewTracker(e.ID(), -1) + e.memTracker.AttachTo(e.Ctx().GetSessionVars().StmtCtx.MemTracker) e.fetched = false e.Idx = 0 - return e.children[0].Open(ctx) + return e.Children(0).Open(ctx) } // Next implements the Executor Next interface. @@ -430,14 +431,14 @@ func (e *TopNExec) Next(ctx context.Context, req *chunk.Chunk) error { func (e *TopNExec) loadChunksUntilTotalLimit(ctx context.Context) error { e.chkHeap = &topNChunkHeap{e} - e.rowChunks = chunk.NewList(retTypes(e), e.initCap, e.maxChunkSize) + e.rowChunks = chunk.NewList(retTypes(e), e.InitCap(), e.MaxChunkSize()) e.rowChunks.GetMemTracker().AttachTo(e.memTracker) e.rowChunks.GetMemTracker().SetLabel(memory.LabelForRowChunks) for uint64(e.rowChunks.Len()) < e.totalLimit { - srcChk := tryNewCacheChunk(e.children[0]) + srcChk := tryNewCacheChunk(e.Children(0)) // adjust required rows by total limit - srcChk.SetRequiredRows(int(e.totalLimit-uint64(e.rowChunks.Len())), e.maxChunkSize) - err := Next(ctx, e.children[0], srcChk) + srcChk.SetRequiredRows(int(e.totalLimit-uint64(e.rowChunks.Len())), e.MaxChunkSize()) + err := Next(ctx, e.Children(0), srcChk) if err != nil { return err } @@ -460,9 +461,9 @@ func (e *TopNExec) executeTopN(ctx context.Context) error { // The number of rows we loaded may exceeds total limit, remove greatest rows by Pop. heap.Pop(e.chkHeap) } - childRowChk := tryNewCacheChunk(e.children[0]) + childRowChk := tryNewCacheChunk(e.Children(0)) for { - err := Next(ctx, e.children[0], childRowChk) + err := Next(ctx, e.Children(0), childRowChk) if err != nil { return err } @@ -504,7 +505,7 @@ func (e *TopNExec) processChildChk(childRowChk *chunk.Chunk) error { // but we want descending top N, then we will keep all data in memory. // But if data is distributed randomly, this function will be called log(n) times. func (e *TopNExec) doCompaction() error { - newRowChunks := chunk.NewList(retTypes(e), e.initCap, e.maxChunkSize) + newRowChunks := chunk.NewList(retTypes(e), e.InitCap(), e.MaxChunkSize()) newRowPtrs := make([]chunk.RowPtr, 0, e.rowChunks.Len()) for _, rowPtr := range e.rowPtrs { newRowPtr := newRowChunks.AppendRow(e.rowChunks.GetRow(rowPtr)) diff --git a/executor/split.go b/executor/split.go index 4c7552737c404..10dfefea82310 100644 --- a/executor/split.go +++ b/executor/split.go @@ -23,6 +23,7 @@ import ( "time" "github.com/pingcap/kvproto/pkg/metapb" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" @@ -43,7 +44,7 @@ import ( // SplitIndexRegionExec represents a split index regions executor. type SplitIndexRegionExec struct { - baseExecutor + exec.BaseExecutor tableInfo *model.TableInfo partitionNames []model.CIStr @@ -90,14 +91,14 @@ const checkScatterRegionFinishBackOff = 50 // splitIndexRegion is used to split index regions. func (e *SplitIndexRegionExec) splitIndexRegion(ctx context.Context) error { - store := e.ctx.GetStore() + store := e.Ctx().GetStore() s, ok := store.(kv.SplittableStore) if !ok { return nil } start := time.Now() - ctxWithTimeout, cancel := context.WithTimeout(ctx, e.ctx.GetSessionVars().GetSplitRegionTimeout()) + ctxWithTimeout, cancel := context.WithTimeout(ctx, e.Ctx().GetSessionVars().GetSplitRegionTimeout()) defer cancel() regionIDs, err := s.SplitRegions(ctxWithTimeout, e.splitIdxKeys, true, &e.tableInfo.ID) if err != nil { @@ -111,10 +112,10 @@ func (e *SplitIndexRegionExec) splitIndexRegion(ctx context.Context) error { return nil } - if !e.ctx.GetSessionVars().WaitSplitRegionFinish { + if !e.Ctx().GetSessionVars().WaitSplitRegionFinish { return nil } - e.finishScatterNum = waitScatterRegionFinish(ctxWithTimeout, e.ctx, start, s, regionIDs, e.tableInfo.Name.L, e.indexInfo.Name.L) + e.finishScatterNum = waitScatterRegionFinish(ctxWithTimeout, e.Ctx(), start, s, regionIDs, e.tableInfo.Name.L, e.indexInfo.Name.L) return nil } @@ -165,7 +166,7 @@ func (e *SplitIndexRegionExec) getSplitIdxPhysicalKeysFromValueList(physicalID i keys = e.getSplitIdxPhysicalStartAndOtherIdxKeys(physicalID, keys) index := tables.NewIndex(physicalID, e.tableInfo, e.indexInfo) for _, v := range e.valueLists { - idxKey, _, err := index.GenIndexKey(e.ctx.GetSessionVars().StmtCtx, v, kv.IntHandle(math.MinInt64), nil) + idxKey, _, err := index.GenIndexKey(e.Ctx().GetSessionVars().StmtCtx, v, kv.IntHandle(math.MinInt64), nil) if err != nil { return nil, err } @@ -226,13 +227,13 @@ func (e *SplitIndexRegionExec) getSplitIdxPhysicalKeysFromBound(physicalID int64 keys = e.getSplitIdxPhysicalStartAndOtherIdxKeys(physicalID, keys) index := tables.NewIndex(physicalID, e.tableInfo, e.indexInfo) // Split index regions by lower, upper value and calculate the step by (upper - lower)/num. - lowerIdxKey, _, err := index.GenIndexKey(e.ctx.GetSessionVars().StmtCtx, e.lower, kv.IntHandle(math.MinInt64), nil) + lowerIdxKey, _, err := index.GenIndexKey(e.Ctx().GetSessionVars().StmtCtx, e.lower, kv.IntHandle(math.MinInt64), nil) if err != nil { return nil, err } // Use math.MinInt64 as handle_id for the upper index key to avoid affecting calculate split point. // If use math.MaxInt64 here, test of `TestSplitIndex` will report error. - upperIdxKey, _, err := index.GenIndexKey(e.ctx.GetSessionVars().StmtCtx, e.upper, kv.IntHandle(math.MinInt64), nil) + upperIdxKey, _, err := index.GenIndexKey(e.Ctx().GetSessionVars().StmtCtx, e.upper, kv.IntHandle(math.MinInt64), nil) if err != nil { return nil, err } @@ -321,7 +322,7 @@ func datumSliceToString(ds []types.Datum) string { // SplitTableRegionExec represents a split table regions executor. type SplitTableRegionExec struct { - baseExecutor + exec.BaseExecutor tableInfo *model.TableInfo partitionNames []model.CIStr @@ -358,14 +359,14 @@ func (e *SplitTableRegionExec) Next(ctx context.Context, chk *chunk.Chunk) error } func (e *SplitTableRegionExec) splitTableRegion(ctx context.Context) error { - store := e.ctx.GetStore() + store := e.Ctx().GetStore() s, ok := store.(kv.SplittableStore) if !ok { return nil } start := time.Now() - ctxWithTimeout, cancel := context.WithTimeout(ctx, e.ctx.GetSessionVars().GetSplitRegionTimeout()) + ctxWithTimeout, cancel := context.WithTimeout(ctx, e.Ctx().GetSessionVars().GetSplitRegionTimeout()) defer cancel() ctxWithTimeout = kv.WithInternalSourceType(ctxWithTimeout, kv.InternalTxnDDL) @@ -380,11 +381,11 @@ func (e *SplitTableRegionExec) splitTableRegion(ctx context.Context) error { return nil } - if !e.ctx.GetSessionVars().WaitSplitRegionFinish { + if !e.Ctx().GetSessionVars().WaitSplitRegionFinish { return nil } - e.finishScatterNum = waitScatterRegionFinish(ctxWithTimeout, e.ctx, start, s, regionIDs, e.tableInfo.Name.L, "") + e.finishScatterNum = waitScatterRegionFinish(ctxWithTimeout, e.Ctx(), start, s, regionIDs, e.tableInfo.Name.L, "") return nil } diff --git a/executor/split_test.go b/executor/split_test.go index e346150ee241c..a557244cc407a 100644 --- a/executor/split_test.go +++ b/executor/split_test.go @@ -23,6 +23,7 @@ import ( "testing" "time" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" @@ -125,7 +126,7 @@ func TestSplitIndex(t *testing.T) { // region10: [90 ~ +inf) ctx := mock.NewContext() e := &SplitIndexRegionExec{ - baseExecutor: newBaseExecutor(ctx, nil, 0), + BaseExecutor: exec.NewBaseExecutor(ctx, nil, 0), tableInfo: tbInfo, indexInfo: idxInfo, lower: []types.Datum{types.NewDatum(0)}, @@ -314,7 +315,7 @@ func TestSplitTable(t *testing.T) { // region10: [90 ~ +inf) ctx := mock.NewContext() e := &SplitTableRegionExec{ - baseExecutor: newBaseExecutor(ctx, nil, 0), + BaseExecutor: exec.NewBaseExecutor(ctx, nil, 0), tableInfo: tbInfo, handleCols: core.NewIntHandleCols(&expression.Column{RetType: types.NewFieldType(mysql.TypeLonglong)}), lower: []types.Datum{types.NewDatum(0)}, @@ -374,7 +375,7 @@ func TestStepShouldLargeThanMinStep(t *testing.T) { }, } e1 := &SplitTableRegionExec{ - baseExecutor: newBaseExecutor(ctx, nil, 0), + BaseExecutor: exec.NewBaseExecutor(ctx, nil, 0), tableInfo: tbInfo, handleCols: core.NewIntHandleCols(&expression.Column{RetType: types.NewFieldType(mysql.TypeLonglong)}), lower: []types.Datum{types.NewDatum(0)}, @@ -433,7 +434,7 @@ func TestClusterIndexSplitTable(t *testing.T) { ctx := mock.NewContext() sc := &stmtctx.StatementContext{TimeZone: time.Local} e := &SplitTableRegionExec{ - baseExecutor: newBaseExecutor(ctx, nil, 0), + BaseExecutor: exec.NewBaseExecutor(ctx, nil, 0), tableInfo: tbInfo, handleCols: buildHandleColsForSplit(sc, tbInfo), lower: types.MakeDatums(1, 0), diff --git a/executor/table_reader.go b/executor/table_reader.go index 827500ab434d5..63f2d274348a4 100644 --- a/executor/table_reader.go +++ b/executor/table_reader.go @@ -24,6 +24,7 @@ import ( "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/domain/infosync" "github.com/pingcap/tidb/executor/internal/builder" + "github.com/pingcap/tidb/executor/internal/exec" internalutil "github.com/pingcap/tidb/executor/internal/util" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/infoschema" @@ -47,7 +48,7 @@ import ( ) // make sure `TableReaderExecutor` implements `Executor`. -var _ Executor = &TableReaderExecutor{} +var _ exec.Executor = &TableReaderExecutor{} // selectResultHook is used to hack distsql.SelectWithRuntimeStats safely for testing. type selectResultHook struct { @@ -70,7 +71,7 @@ type kvRangeBuilder interface { // TableReaderExecutor sends DAG request and reads table data from kv layer. type TableReaderExecutor struct { - baseExecutor + exec.BaseExecutor table table.Table @@ -150,26 +151,26 @@ func (e *TableReaderExecutor) Open(ctx context.Context) error { if e.memTracker != nil { e.memTracker.Reset() } else { - e.memTracker = memory.NewTracker(e.id, -1) + e.memTracker = memory.NewTracker(e.ID(), -1) } - e.memTracker.AttachTo(e.ctx.GetSessionVars().StmtCtx.MemTracker) + e.memTracker.AttachTo(e.Ctx().GetSessionVars().StmtCtx.MemTracker) var err error if e.corColInFilter { if e.storeType == kv.TiFlash { - execs, err := builder.ConstructTreeBasedDistExec(e.ctx, e.tablePlan) + execs, err := builder.ConstructTreeBasedDistExec(e.Ctx(), e.tablePlan) if err != nil { return err } e.dagPB.RootExecutor = execs[0] } else { - e.dagPB.Executors, err = builder.ConstructListBasedDistExec(e.ctx, e.plans) + e.dagPB.Executors, err = builder.ConstructListBasedDistExec(e.Ctx(), e.plans) if err != nil { return err } } } - if e.runtimeStats != nil { + if e.RuntimeStats() != nil { collExec := true e.dagPB.CollectExecutionSummaries = &collExec } @@ -263,7 +264,7 @@ func (e *TableReaderExecutor) Next(ctx context.Context, req *chunk.Chunk) error return err } - err := table.FillVirtualColumnValue(e.virtualColumnRetFieldTypes, e.virtualColumnIndex, e.schema.Columns, e.columns, e.ctx, req) + err := table.FillVirtualColumnValue(e.virtualColumnRetFieldTypes, e.virtualColumnIndex, e.Schema().Columns, e.columns, e.Ctx(), req) if err != nil { return err } @@ -281,7 +282,7 @@ func (e *TableReaderExecutor) Close() error { if e.dummy { return nil } - e.ctx.StoreQueryFeedback(e.feedback) + e.Ctx().StoreQueryFeedback(e.feedback) return err } @@ -297,7 +298,7 @@ func (e *TableReaderExecutor) buildResp(ctx context.Context, ranges []*ranger.Ra } var results []distsql.SelectResult for _, kvReq := range kvReqs { - result, err := e.SelectResult(ctx, e.ctx, kvReq, retTypes(e), e.feedback, getPhysicalPlanIDs(e.plans), e.id) + result, err := e.SelectResult(ctx, e.Ctx(), kvReq, retTypes(e), e.feedback, getPhysicalPlanIDs(e.plans), e.ID()) if err != nil { return nil, err } @@ -310,7 +311,7 @@ func (e *TableReaderExecutor) buildResp(ctx context.Context, ranges []*ranger.Ra if err != nil { return nil, err } - result, err := e.SelectResult(ctx, e.ctx, kvReq, retTypes(e), e.feedback, getPhysicalPlanIDs(e.plans), e.id) + result, err := e.SelectResult(ctx, e.Ctx(), kvReq, retTypes(e), e.feedback, getPhysicalPlanIDs(e.plans), e.ID()) if err != nil { return nil, err } @@ -325,7 +326,7 @@ func (e *TableReaderExecutor) buildResp(ctx context.Context, ranges []*ranger.Ra } var results []distsql.SelectResult for _, kvReq := range kvReqs { - result, err := e.SelectResult(ctx, e.ctx, kvReq, retTypes(e), e.feedback, getPhysicalPlanIDs(e.plans), e.id) + result, err := e.SelectResult(ctx, e.Ctx(), kvReq, retTypes(e), e.feedback, getPhysicalPlanIDs(e.plans), e.ID()) if err != nil { return nil, err } @@ -346,7 +347,7 @@ func (e *TableReaderExecutor) buildResp(ctx context.Context, ranges []*ranger.Ra }) e.kvRanges = kvReq.KeyRanges.AppendSelfTo(e.kvRanges) - result, err := e.SelectResult(ctx, e.ctx, kvReq, retTypes(e), e.feedback, getPhysicalPlanIDs(e.plans), e.id) + result, err := e.SelectResult(ctx, e.Ctx(), kvReq, retTypes(e), e.feedback, getPhysicalPlanIDs(e.plans), e.ID()) if err != nil { return nil, err } @@ -373,14 +374,14 @@ func (e *TableReaderExecutor) buildKVReqSeparately(ctx context.Context, ranges [ SetKeepOrder(e.keepOrder). SetTxnScope(e.txnScope). SetReadReplicaScope(e.readReplicaScope). - SetFromSessionVars(e.ctx.GetSessionVars()). - SetFromInfoSchema(e.ctx.GetInfoSchema()). + SetFromSessionVars(e.Ctx().GetSessionVars()). + SetFromInfoSchema(e.Ctx().GetInfoSchema()). SetMemTracker(e.memTracker). SetStoreType(e.storeType). SetPaging(e.paging). SetAllowBatchCop(e.batchCop). - SetClosestReplicaReadAdjuster(newClosestReadAdjuster(e.ctx, &reqBuilder.Request, e.netDataSize)). - SetConnID(e.ctx.GetSessionVars().ConnectionID). + SetClosestReplicaReadAdjuster(newClosestReadAdjuster(e.Ctx(), &reqBuilder.Request, e.netDataSize)). + SetConnID(e.Ctx().GetSessionVars().ConnectionID). Build() if err != nil { return nil, err @@ -415,14 +416,14 @@ func (e *TableReaderExecutor) buildKVReqForPartitionTableScan(ctx context.Contex SetKeepOrder(e.keepOrder). SetTxnScope(e.txnScope). SetReadReplicaScope(e.readReplicaScope). - SetFromSessionVars(e.ctx.GetSessionVars()). - SetFromInfoSchema(e.ctx.GetInfoSchema()). + SetFromSessionVars(e.Ctx().GetSessionVars()). + SetFromInfoSchema(e.Ctx().GetInfoSchema()). SetMemTracker(e.memTracker). SetStoreType(e.storeType). SetPaging(e.paging). SetAllowBatchCop(e.batchCop). - SetClosestReplicaReadAdjuster(newClosestReadAdjuster(e.ctx, &reqBuilder.Request, e.netDataSize)). - SetConnID(e.ctx.GetSessionVars().ConnectionID). + SetClosestReplicaReadAdjuster(newClosestReadAdjuster(e.Ctx(), &reqBuilder.Request, e.netDataSize)). + SetConnID(e.Ctx().GetSessionVars().ConnectionID). Build() if err != nil { return nil, err @@ -440,12 +441,12 @@ func (e *TableReaderExecutor) buildKVReq(ctx context.Context, ranges []*ranger.R } reqBuilder = builder.SetPartitionKeyRanges(kvRange) } else { - reqBuilder = builder.SetHandleRanges(e.ctx.GetSessionVars().StmtCtx, getPhysicalTableID(e.table), e.table.Meta() != nil && e.table.Meta().IsCommonHandle, ranges, e.feedback) + reqBuilder = builder.SetHandleRanges(e.Ctx().GetSessionVars().StmtCtx, getPhysicalTableID(e.table), e.table.Meta() != nil && e.table.Meta().IsCommonHandle, ranges, e.feedback) } if e.table != nil && e.table.Type().IsClusterTable() { copDestination := infoschema.GetClusterTableCopDestination(e.table.Meta().Name.L) if copDestination == infoschema.DDLOwner { - ownerManager := domain.GetDomain(e.ctx).DDL().OwnerManager() + ownerManager := domain.GetDomain(e.Ctx()).DDL().OwnerManager() ddlOwnerID, err := ownerManager.GetOwnerID(ctx) if err != nil { return nil, err @@ -465,14 +466,14 @@ func (e *TableReaderExecutor) buildKVReq(ctx context.Context, ranges []*ranger.R SetTxnScope(e.txnScope). SetReadReplicaScope(e.readReplicaScope). SetIsStaleness(e.isStaleness). - SetFromSessionVars(e.ctx.GetSessionVars()). - SetFromInfoSchema(sessiontxn.GetTxnManager(e.ctx).GetTxnInfoSchema()). + SetFromSessionVars(e.Ctx().GetSessionVars()). + SetFromInfoSchema(sessiontxn.GetTxnManager(e.Ctx()).GetTxnInfoSchema()). SetMemTracker(e.memTracker). SetStoreType(e.storeType). SetAllowBatchCop(e.batchCop). - SetClosestReplicaReadAdjuster(newClosestReadAdjuster(e.ctx, &reqBuilder.Request, e.netDataSize)). + SetClosestReplicaReadAdjuster(newClosestReadAdjuster(e.Ctx(), &reqBuilder.Request, e.netDataSize)). SetPaging(e.paging). - SetConnID(e.ctx.GetSessionVars().ConnectionID) + SetConnID(e.Ctx().GetSessionVars().ConnectionID) return reqBuilder.Build() } diff --git a/executor/table_readers_required_rows_test.go b/executor/table_readers_required_rows_test.go index 1c6a27329dc5a..2a18ae7a48cab 100644 --- a/executor/table_readers_required_rows_test.go +++ b/executor/table_readers_required_rows_test.go @@ -22,6 +22,7 @@ import ( "github.com/pingcap/tidb/distsql" "github.com/pingcap/tidb/executor/internal/builder" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" @@ -122,9 +123,9 @@ func mockSelectResult(ctx context.Context, sctx sessionctx.Context, kvReq *kv.Re }, nil } -func buildTableReader(sctx sessionctx.Context) Executor { +func buildTableReader(sctx sessionctx.Context) exec.Executor { e := &TableReaderExecutor{ - baseExecutor: buildMockBaseExec(sctx), + BaseExecutor: buildMockBaseExec(sctx), table: &tables.TableCommon{}, dagPB: buildMockDAGRequest(sctx), selectResultHook: selectResultHook{mockSelectResult}, @@ -144,14 +145,14 @@ func buildMockDAGRequest(sctx sessionctx.Context) *tipb.DAGRequest { return req } -func buildMockBaseExec(sctx sessionctx.Context) baseExecutor { +func buildMockBaseExec(sctx sessionctx.Context) exec.BaseExecutor { retTypes := []*types.FieldType{types.NewFieldType(mysql.TypeDouble), types.NewFieldType(mysql.TypeLonglong)} cols := make([]*expression.Column, len(retTypes)) for i := range retTypes { cols[i] = &expression.Column{Index: i, RetType: retTypes[i]} } schema := expression.NewSchema(cols...) - baseExec := newBaseExecutor(sctx, schema, 0) + baseExec := exec.NewBaseExecutor(sctx, schema, 0) return baseExec } @@ -197,9 +198,9 @@ func TestTableReaderRequiredRows(t *testing.T) { } } -func buildIndexReader(sctx sessionctx.Context) Executor { +func buildIndexReader(sctx sessionctx.Context) exec.Executor { e := &IndexReaderExecutor{ - baseExecutor: buildMockBaseExec(sctx), + BaseExecutor: buildMockBaseExec(sctx), dagPB: buildMockDAGRequest(sctx), index: &model.IndexInfo{}, selectResultHook: selectResultHook{mockSelectResult}, diff --git a/executor/trace.go b/executor/trace.go index 0a4c65b8e7e63..9006717c802ca 100644 --- a/executor/trace.go +++ b/executor/trace.go @@ -30,6 +30,7 @@ import ( "github.com/opentracing/opentracing-go" "github.com/pingcap/errors" "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/ast" @@ -50,7 +51,7 @@ import ( // TraceExec represents a root executor of trace query. type TraceExec struct { - baseExecutor + exec.BaseExecutor // CollectedSpans collects all span during execution. Span is appended via // callback method which passes into tracer implementation. CollectedSpans []basictracer.RawSpan @@ -73,26 +74,26 @@ func (e *TraceExec) Next(ctx context.Context, req *chunk.Chunk) error { if e.exhausted { return nil } - se, ok := e.ctx.(sqlexec.SQLExecutor) + se, ok := e.Ctx().(sqlexec.SQLExecutor) if !ok { e.exhausted = true return nil } // For audit log plugin to set the correct statement. - stmtCtx := e.ctx.GetSessionVars().StmtCtx + stmtCtx := e.Ctx().GetSessionVars().StmtCtx defer func() { - e.ctx.GetSessionVars().StmtCtx = stmtCtx + e.Ctx().GetSessionVars().StmtCtx = stmtCtx }() if e.optimizerTrace { switch e.optimizerTraceTarget { case core.TracePlanTargetEstimation: - return e.nextOptimizerCEPlanTrace(ctx, e.ctx, req) + return e.nextOptimizerCEPlanTrace(ctx, e.Ctx(), req) case core.TracePlanTargetDebug: - return e.nextOptimizerDebugPlanTrace(ctx, e.ctx, req) + return e.nextOptimizerDebugPlanTrace(ctx, e.Ctx(), req) default: - return e.nextOptimizerPlanTrace(ctx, e.ctx, req) + return e.nextOptimizerPlanTrace(ctx, e.Ctx(), req) } } @@ -271,7 +272,7 @@ func (e *TraceExec) nextRowJSON(ctx context.Context, se sqlexec.SQLExecutor, req func (e *TraceExec) executeChild(ctx context.Context, se sqlexec.SQLExecutor) { // For audit log plugin to log the statement correctly. // Should be logged as 'explain ...', instead of the executed SQL. - vars := e.ctx.GetSessionVars() + vars := e.Ctx().GetSessionVars() origin := vars.InRestrictedSQL vars.InRestrictedSQL = true defer func() { @@ -287,12 +288,12 @@ func (e *TraceExec) executeChild(ctx context.Context, se sqlexec.SQLExecutor) { logutil.Eventf(ctx, "execute with error(%d): %s", errCode, err.Error()) } if rs != nil { - drainRecordSet(ctx, e.ctx, rs) + drainRecordSet(ctx, e.Ctx(), rs) if err = rs.Close(); err != nil { logutil.Logger(ctx).Error("run trace close result with error", zap.Error(err)) } } - logutil.Eventf(ctx, "execute done, modify row: %d", e.ctx.GetSessionVars().StmtCtx.AffectedRows()) + logutil.Eventf(ctx, "execute done, modify row: %d", e.Ctx().GetSessionVars().StmtCtx.AffectedRows()) } func drainRecordSet(ctx context.Context, sctx sessionctx.Context, rs sqlexec.RecordSet) { diff --git a/executor/union_scan.go b/executor/union_scan.go index fb658112d104a..36cbbc61b3fa9 100644 --- a/executor/union_scan.go +++ b/executor/union_scan.go @@ -19,6 +19,7 @@ import ( "fmt" "runtime/trace" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/model" @@ -34,7 +35,7 @@ import ( // UnionScanExec merges the rows from dirty table and the rows from distsql request. type UnionScanExec struct { - baseExecutor + exec.BaseExecutor memBuf kv.MemBuffer memBufSnap kv.Getter @@ -74,7 +75,7 @@ func (us *UnionScanExec) Open(ctx context.Context) error { r, ctx := tracing.StartRegionEx(ctx, "UnionScanExec.Open") defer r.End() - if err := us.baseExecutor.Open(ctx); err != nil { + if err := us.BaseExecutor.Open(ctx); err != nil { return err } return us.open(ctx) @@ -82,16 +83,16 @@ func (us *UnionScanExec) Open(ctx context.Context) error { func (us *UnionScanExec) open(ctx context.Context) error { var err error - reader := us.children[0] + reader := us.Children(0) // If the push-downed condition contains virtual column, we may build a selection upon reader. Since unionScanExec // has already contained condition, we can ignore the selection. if sel, ok := reader.(*SelectionExec); ok { - reader = sel.children[0] + reader = sel.Children(0) } defer trace.StartRegion(ctx, "UnionScanBuildRows").End() - txn, err := us.ctx.Txn(false) + txn, err := us.Ctx().Txn(false) if err != nil { return err } @@ -140,7 +141,7 @@ func (us *UnionScanExec) Next(ctx context.Context, req *chunk.Chunk) error { // Assume req.Capacity() > 0 after GrowAndReset(), if this assumption fail, // the for-loop may exit without read one single row! - req.GrowAndReset(us.maxChunkSize) + req.GrowAndReset(us.MaxChunkSize()) mutableRow := chunk.MutRowFromTypes(retTypes(us)) for batchSize := req.Capacity(); req.NumRows() < batchSize; { @@ -155,13 +156,13 @@ func (us *UnionScanExec) Next(ctx context.Context, req *chunk.Chunk) error { mutableRow.SetDatums(row...) for _, idx := range us.virtualColumnIndex { - datum, err := us.schema.Columns[idx].EvalVirtualColumn(mutableRow.ToRow()) + datum, err := us.Schema().Columns[idx].EvalVirtualColumn(mutableRow.ToRow()) if err != nil { return err } // Because the expression might return different type from // the generated column, we should wrap a CAST on the result. - castDatum, err := table.CastValue(us.ctx, datum, us.columns[idx], false, true) + castDatum, err := table.CastValue(us.Ctx(), datum, us.columns[idx], false, true) if err != nil { return err } @@ -172,7 +173,7 @@ func (us *UnionScanExec) Next(ctx context.Context, req *chunk.Chunk) error { mutableRow.SetDatum(idx, castDatum) } - matched, _, err := expression.EvalBool(us.ctx, us.conditionsWithVirCol, mutableRow.ToRow()) + matched, _, err := expression.EvalBool(us.Ctx(), us.conditionsWithVirCol, mutableRow.ToRow()) if err != nil { return err } @@ -188,7 +189,7 @@ func (us *UnionScanExec) Close() error { us.cursor4AddRows = nil us.cursor4SnapshotRows = 0 us.snapshotRows = us.snapshotRows[:0] - return us.children[0].Close() + return us.Children(0).Close() } // getOneRow gets one result row from dirty table or child. @@ -244,7 +245,7 @@ func (us *UnionScanExec) getSnapshotRow(ctx context.Context) ([]types.Datum, err us.cursor4SnapshotRows = 0 us.snapshotRows = us.snapshotRows[:0] for len(us.snapshotRows) == 0 { - err = Next(ctx, us.children[0], us.snapshotChunkBuffer) + err = Next(ctx, us.Children(0), us.snapshotChunkBuffer) if err != nil || us.snapshotChunkBuffer.NumRows() == 0 { return nil, err } @@ -267,7 +268,7 @@ func (us *UnionScanExec) getSnapshotRow(ctx context.Context) ([]types.Datum, err // commit, but for simplicity, we don't handle it here. continue } - us.snapshotRows = append(us.snapshotRows, row.GetDatumRow(retTypes(us.children[0]))) + us.snapshotRows = append(us.snapshotRows, row.GetDatumRow(retTypes(us.Children(0)))) } } return us.snapshotRows[0], nil @@ -306,7 +307,7 @@ func (us *UnionScanExec) shouldPickFirstRow(a, b []types.Datum) (bool, error) { } func (us *UnionScanExec) compare(a, b []types.Datum) (int, error) { - sc := us.ctx.GetSessionVars().StmtCtx + sc := us.Ctx().GetSessionVars().StmtCtx for _, colOff := range us.usedIndex { aColumn := a[colOff] bColumn := b[colOff] diff --git a/executor/update.go b/executor/update.go index 39fc0468136de..7929a97a79d2e 100644 --- a/executor/update.go +++ b/executor/update.go @@ -20,6 +20,7 @@ import ( "fmt" "runtime/trace" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta/autoid" @@ -36,7 +37,7 @@ import ( // UpdateExec represents a new update executor. type UpdateExec struct { - baseExecutor + exec.BaseExecutor OrderedList []*expression.Assignment @@ -197,7 +198,7 @@ func (e *UpdateExec) exec(ctx context.Context, schema *expression.Schema, row, n // Update row fkChecks := e.fkChecks[content.TblID] fkCascades := e.fkCascades[content.TblID] - changed, err1 := updateRecord(ctx, e.ctx, handle, oldData, newTableData, flags, tbl, false, e.memTracker, fkChecks, fkCascades) + changed, err1 := updateRecord(ctx, e.Ctx(), handle, oldData, newTableData, flags, tbl, false, e.memTracker, fkChecks, fkCascades) if err1 == nil { _, exist := e.updatedRowKeys[content.Start].Get(handle) memDelta := e.updatedRowKeys[content.Start].Set(handle, changed) @@ -208,7 +209,7 @@ func (e *UpdateExec) exec(ctx context.Context, schema *expression.Schema, row, n continue } - sc := e.ctx.GetSessionVars().StmtCtx + sc := e.Ctx().GetSessionVars().StmtCtx if (kv.ErrKeyExists.Equal(err1) || table.ErrCheckConstraintViolated.Equal(err1)) && sc.DupKeyAsWarning { sc.AppendWarning(err1) continue @@ -241,16 +242,16 @@ func (e *UpdateExec) Next(ctx context.Context, req *chunk.Chunk) error { return err } e.drained = true - e.ctx.GetSessionVars().StmtCtx.AddRecordRows(uint64(numRows)) + e.Ctx().GetSessionVars().StmtCtx.AddRecordRows(uint64(numRows)) } return nil } func (e *UpdateExec) updateRows(ctx context.Context) (int, error) { - fields := retTypes(e.children[0]) + fields := retTypes(e.Children(0)) colsInfo := plannercore.GetUpdateColumnsInfo(e.tblID2table, e.tblColPosInfos, len(fields)) globalRowIdx := 0 - chk := tryNewCacheChunk(e.children[0]) + chk := tryNewCacheChunk(e.Children(0)) if !e.allAssignmentsAreConstant { e.evalBuffer = chunk.MutRowFromTypes(fields) } @@ -262,7 +263,7 @@ func (e *UpdateExec) updateRows(ctx context.Context) (int, error) { totalNumRows := 0 for { e.memTracker.Consume(-memUsageOfChk) - err := Next(ctx, e.children[0], chk) + err := Next(ctx, e.Children(0), chk) if err != nil { return 0, err } @@ -273,14 +274,14 @@ func (e *UpdateExec) updateRows(ctx context.Context) (int, error) { memUsageOfChk = chk.MemoryUsage() e.memTracker.Consume(memUsageOfChk) if e.collectRuntimeStatsEnabled() { - txn, err := e.ctx.Txn(true) + txn, err := e.Ctx().Txn(true) if err == nil && txn.GetSnapshot() != nil { txn.GetSnapshot().SetOption(kv.CollectRuntimeStats, e.stats.SnapshotRuntimeStats) } } - txn, err := e.ctx.Txn(true) + txn, err := e.Ctx().Txn(true) if err == nil { - sc := e.ctx.GetSessionVars().StmtCtx + sc := e.Ctx().GetSessionVars().StmtCtx txn.SetOption(kv.ResourceGroupTagger, sc.GetResourceGroupTagger()) if sc.KvExecCounter != nil { // Bind an interceptor for client-go to count the number of SQL executions of each TiKV. @@ -315,12 +316,12 @@ func (e *UpdateExec) updateRows(ctx context.Context) (int, error) { } } // write to table - if err := e.exec(ctx, e.children[0].Schema(), datumRow, newRow); err != nil { + if err := e.exec(ctx, e.Children(0).Schema(), datumRow, newRow); err != nil { return 0, err } } totalNumRows += chk.NumRows() - chk = chunk.Renew(chk, e.maxChunkSize) + chk = chunk.Renew(chk, e.MaxChunkSize()) } return totalNumRows, nil } @@ -357,7 +358,7 @@ func (e *UpdateExec) fastComposeNewRow(rowIdx int, oldRow []types.Datum, cols [] // info of `_tidb_rowid` column is nil. // No need to cast `_tidb_rowid` column value. if cols[assign.Col.Index] != nil { - val, err = table.CastValue(e.ctx, val, cols[assign.Col.Index].ColumnInfo, false, false) + val, err = table.CastValue(e.Ctx(), val, cols[assign.Col.Index].ColumnInfo, false, false) if err = e.handleErr(assign.ColName, rowIdx, err); err != nil { return nil, err } @@ -384,7 +385,7 @@ func (e *UpdateExec) composeNewRow(rowIdx int, oldRow []types.Datum, cols []*tab // info of `_tidb_rowid` column is nil. // No need to cast `_tidb_rowid` column value. if cols[assign.Col.Index] != nil { - val, err = table.CastValue(e.ctx, val, cols[assign.Col.Index].ColumnInfo, false, false) + val, err = table.CastValue(e.Ctx(), val, cols[assign.Col.Index].ColumnInfo, false, false) if err = e.handleErr(assign.ColName, rowIdx, err); err != nil { return nil, err } @@ -413,7 +414,7 @@ func (e *UpdateExec) composeGeneratedColumns(rowIdx int, newRowData []types.Datu // info of `_tidb_rowid` column is nil. // No need to cast `_tidb_rowid` column value. if cols[assign.Col.Index] != nil { - val, err = table.CastValue(e.ctx, val, cols[assign.Col.Index].ColumnInfo, false, false) + val, err = table.CastValue(e.Ctx(), val, cols[assign.Col.Index].ColumnInfo, false, false) if err = e.handleErr(assign.ColName, rowIdx, err); err != nil { return nil, err } @@ -429,27 +430,27 @@ func (e *UpdateExec) composeGeneratedColumns(rowIdx int, newRowData []types.Datu func (e *UpdateExec) Close() error { defer e.memTracker.ReplaceBytesUsed(0) e.setMessage() - if e.runtimeStats != nil && e.stats != nil { - txn, err := e.ctx.Txn(false) + if e.RuntimeStats() != nil && e.stats != nil { + txn, err := e.Ctx().Txn(false) if err == nil && txn.Valid() && txn.GetSnapshot() != nil { txn.GetSnapshot().SetOption(kv.CollectRuntimeStats, nil) } - defer e.ctx.GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.id, e.stats) + defer e.Ctx().GetSessionVars().StmtCtx.RuntimeStatsColl.RegisterStats(e.ID(), e.stats) } - return e.children[0].Close() + return e.Children(0).Close() } // Open implements the Executor Open interface. func (e *UpdateExec) Open(ctx context.Context) error { - e.memTracker = memory.NewTracker(e.id, -1) - e.memTracker.AttachTo(e.ctx.GetSessionVars().StmtCtx.MemTracker) + e.memTracker = memory.NewTracker(e.ID(), -1) + e.memTracker.AttachTo(e.Ctx().GetSessionVars().StmtCtx.MemTracker) - return e.children[0].Open(ctx) + return e.Children(0).Open(ctx) } // setMessage sets info message(ERR_UPDATE_INFO) generated by UPDATE statement func (e *UpdateExec) setMessage() { - stmtCtx := e.ctx.GetSessionVars().StmtCtx + stmtCtx := e.Ctx().GetSessionVars().StmtCtx numMatched := e.matched numChanged := stmtCtx.UpdatedRows() numWarnings := stmtCtx.WarningCount() @@ -458,7 +459,7 @@ func (e *UpdateExec) setMessage() { } func (e *UpdateExec) collectRuntimeStatsEnabled() bool { - if e.runtimeStats != nil { + if e.RuntimeStats() != nil { if e.stats == nil { e.stats = &updateRuntimeStats{ SnapshotRuntimeStats: &txnsnapshot.SnapshotRuntimeStats{}, diff --git a/executor/utils_test.go b/executor/utils_test.go index 8afcddb16d781..87e9385a43033 100644 --- a/executor/utils_test.go +++ b/executor/utils_test.go @@ -18,6 +18,7 @@ import ( "testing" "github.com/pingcap/errors" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/types" "github.com/stretchr/testify/require" @@ -119,8 +120,8 @@ func TestEqualDatumsAsBinary(t *testing.T) { {[]interface{}{1}, []interface{}{1, 1}, false}, {[]interface{}{nil}, []interface{}{1}, false}, } - - e := &InsertValues{baseExecutor: baseExecutor{ctx: core.MockContext()}} + base := exec.NewBaseExecutor(core.MockContext(), nil, 0) + e := &InsertValues{BaseExecutor: base} for _, tt := range tests { res, err := e.equalDatumsAsBinary(types.MakeDatums(tt.a...), types.MakeDatums(tt.b...)) require.NoError(t, err) diff --git a/executor/window.go b/executor/window.go index aaa1e51cacc85..e585e2689c526 100644 --- a/executor/window.go +++ b/executor/window.go @@ -19,6 +19,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/executor/aggfuncs" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/planner/core" @@ -29,7 +30,7 @@ import ( // WindowExec is the executor for window functions. type WindowExec struct { - baseExecutor + exec.BaseExecutor groupChecker *vecGroupChecker // childResult stores the child chunk @@ -47,7 +48,7 @@ type WindowExec struct { // Close implements the Executor Close interface. func (e *WindowExec) Close() error { - return errors.Trace(e.baseExecutor.Close()) + return errors.Trace(e.BaseExecutor.Close()) } // Next implements the Executor Next interface. @@ -134,11 +135,11 @@ func (e *WindowExec) consumeGroupRows(groupRows []chunk.Row) (err error) { // TODO: Combine these three methods. // The old implementation needs the processor has these three methods // but now it does not have to. - groupRows, err = e.processor.consumeGroupRows(e.ctx, groupRows) + groupRows, err = e.processor.consumeGroupRows(e.Ctx(), groupRows) if err != nil { return errors.Trace(err) } - _, err = e.processor.appendResult2Chunk(e.ctx, groupRows, e.resultChunks[i], remained) + _, err = e.processor.appendResult2Chunk(e.Ctx(), groupRows, e.resultChunks[i], remained) if err != nil { return errors.Trace(err) } @@ -151,8 +152,8 @@ func (e *WindowExec) consumeGroupRows(groupRows []chunk.Row) (err error) { } func (e *WindowExec) fetchChild(ctx context.Context) (EOF bool, err error) { - childResult := tryNewCacheChunk(e.children[0]) - err = Next(ctx, e.children[0], childResult) + childResult := tryNewCacheChunk(e.Children(0)) + err = Next(ctx, e.Children(0), childResult) if err != nil { return false, errors.Trace(err) } @@ -162,7 +163,7 @@ func (e *WindowExec) fetchChild(ctx context.Context) (EOF bool, err error) { return true, nil } - resultChk := e.ctx.GetSessionVars().GetNewChunkWithCapacity(e.retFieldTypes, 0, numRows, e.AllocPool) + resultChk := e.Ctx().GetSessionVars().GetNewChunkWithCapacity(e.RetFieldTypes(), 0, numRows, e.AllocPool) err = e.copyChk(childResult, resultChk) if err != nil { return false, err @@ -287,7 +288,7 @@ func (p *rowFrameWindowProcessor) getEndOffset(numRows uint64) uint64 { return 0 } -func (p *rowFrameWindowProcessor) consumeGroupRows(ctx sessionctx.Context, rows []chunk.Row) ([]chunk.Row, error) { +func (p *rowFrameWindowProcessor) consumeGroupRows(_ sessionctx.Context, rows []chunk.Row) ([]chunk.Row, error) { return rows, nil } @@ -522,7 +523,7 @@ func (p *rangeFrameWindowProcessor) appendResult2Chunk(ctx sessionctx.Context, r return rows, nil } -func (p *rangeFrameWindowProcessor) consumeGroupRows(ctx sessionctx.Context, rows []chunk.Row) ([]chunk.Row, error) { +func (p *rangeFrameWindowProcessor) consumeGroupRows(_ sessionctx.Context, rows []chunk.Row) ([]chunk.Row, error) { return rows, nil } diff --git a/executor/write.go b/executor/write.go index dcb8e60a2c6d3..29bc2adf401bd 100644 --- a/executor/write.go +++ b/executor/write.go @@ -20,6 +20,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/errno" + "github.com/pingcap/tidb/executor/internal/exec" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/infoschema" "github.com/pingcap/tidb/kv" @@ -37,11 +38,11 @@ import ( ) var ( - _ Executor = &UpdateExec{} - _ Executor = &DeleteExec{} - _ Executor = &InsertExec{} - _ Executor = &ReplaceExec{} - _ Executor = &LoadDataExec{} + _ exec.Executor = &UpdateExec{} + _ exec.Executor = &DeleteExec{} + _ exec.Executor = &InsertExec{} + _ exec.Executor = &ReplaceExec{} + _ exec.Executor = &LoadDataExec{} ) // updateRecord updates the row specified by the handle `h`, from `oldData` to `newData`.