Skip to content

Commit

Permalink
planner: skip all system tables when collecting prediction columns (#…
Browse files Browse the repository at this point in the history
  • Loading branch information
Rustin170506 authored May 23, 2024
1 parent fe5858b commit d6308af
Show file tree
Hide file tree
Showing 3 changed files with 58 additions and 0 deletions.
30 changes: 30 additions & 0 deletions pkg/infoschema/infoschema.go
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,36 @@ func MockInfoSchema(tbList []*model.TableInfo) InfoSchema {
bucketIdx := tableBucketIdx(tb.ID)
result.sortedTablesBuckets[bucketIdx] = append(result.sortedTablesBuckets[bucketIdx], tbl)
}
// Add a system table.
tables := []*model.TableInfo{
{
// Use a very big ID to avoid conflict with normal tables.
ID: 9999,
Name: model.NewCIStr("stats_meta"),
Columns: []*model.ColumnInfo{
{
State: model.StatePublic,
Offset: 0,
Name: model.NewCIStr("a"),
ID: 1,
},
},
State: model.StatePublic,
},
}
mysqlDBInfo := &model.DBInfo{ID: 2, Name: model.NewCIStr("mysql"), Tables: tables}
tableNames = &schemaTables{
dbInfo: mysqlDBInfo,
tables: make(map[string]table.Table),
}
result.addSchema(tableNames)
for _, tb := range tables {
tb.DBID = mysqlDBInfo.ID
tbl := table.MockTableFromMeta(tb)
tableNames.tables[tb.Name.L] = tbl
bucketIdx := tableBucketIdx(tb.ID)
result.sortedTablesBuckets[bucketIdx] = append(result.sortedTablesBuckets[bucketIdx], tbl)
}
for i := range result.sortedTablesBuckets {
slices.SortFunc(result.sortedTablesBuckets[i], func(i, j table.Table) int {
return cmp.Compare(i.Meta().ID, j.Meta().ID)
Expand Down
5 changes: 5 additions & 0 deletions pkg/planner/core/collect_column_stats_usage.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import (
"github.com/pingcap/tidb/pkg/planner/core/base"
"github.com/pingcap/tidb/pkg/sessionctx/variable"
"github.com/pingcap/tidb/pkg/statistics/asyncload"
"github.com/pingcap/tidb/pkg/util/filter"
"github.com/pingcap/tidb/pkg/util/intset"
"golang.org/x/exp/maps"
)
Expand Down Expand Up @@ -123,6 +124,10 @@ func (c *columnStatsUsageCollector) updateColMapFromExpressions(col *expression.
}

func (c *columnStatsUsageCollector) collectPredicateColumnsForDataSource(ds *DataSource) {
// Skip all system tables.
if filter.IsSystemSchema(ds.DBName.L) {
return
}
// For partition tables, no matter whether it is static or dynamic pruning mode, we use table ID rather than partition ID to
// set TableColumnID.TableID. In this way, we keep the set of predicate columns consistent between different partitions and global table.
tblID := ds.TableInfo().ID
Expand Down
23 changes: 23 additions & 0 deletions pkg/planner/core/collect_column_stats_usage_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,29 @@ func checkColumnStatsUsageForStatsLoad(t *testing.T, is infoschema.InfoSchema, l
require.Equal(t, expected, cols, comment+", we get %v", cols)
}

func TestSkipSystemTables(t *testing.T) {
sql := "select * from mysql.stats_meta where a > 1"
res := []string{}
s := createPlannerSuite()
defer s.Close()
ctx := context.Background()
stmt, err := s.p.ParseOneStmt(sql, "", "")
require.NoError(t, err)
err = Preprocess(context.Background(), s.sctx, stmt, WithPreprocessorReturn(&PreprocessorReturn{InfoSchema: s.is}))
require.NoError(t, err)
builder, _ := NewPlanBuilder().Init(s.ctx, s.is, hint.NewQBHintHandler(nil))
p, err := builder.Build(ctx, stmt)
require.NoError(t, err)
lp, ok := p.(base.LogicalPlan)
require.True(t, ok)
// We check predicate columns twice, before and after logical optimization. Some logical plan patterns may occur before
// logical optimization while others may occur after logical optimization.
checkColumnStatsUsageForPredicates(t, s.is, lp, res, sql)
lp, err = logicalOptimize(ctx, builder.GetOptFlag(), lp)
require.NoError(t, err)
checkColumnStatsUsageForPredicates(t, s.is, lp, res, sql)
}

func TestCollectPredicateColumns(t *testing.T) {
tests := []struct {
pruneMode string
Expand Down

0 comments on commit d6308af

Please sign in to comment.