From bfc96821f185bb666900528038ddbd5d831bbd14 Mon Sep 17 00:00:00 2001 From: TonsnakeLin Date: Wed, 10 Nov 2021 14:05:45 +0800 Subject: [PATCH 1/4] ddl: Do not consider the clustered index when checking the length of the secondary index --- ddl/db_integration_test.go | 11 +++--- ddl/ddl_api.go | 69 ++++++-------------------------------- ddl/index.go | 4 +-- 3 files changed, 18 insertions(+), 66 deletions(-) diff --git a/ddl/db_integration_test.go b/ddl/db_integration_test.go index bb045d6ff80b0..b6f1106ad2fca 100644 --- a/ddl/db_integration_test.go +++ b/ddl/db_integration_test.go @@ -1530,7 +1530,7 @@ func (s *testSerialDBSuite1) TestCreateSecondaryIndexInCluster(c *C) { tk.MustExec("use test") // test create table with non-unique key - tk.MustGetErrCode(` + tk.MustExec(` CREATE TABLE t ( c01 varchar(255) NOT NULL, c02 varchar(255) NOT NULL, @@ -1540,7 +1540,8 @@ CREATE TABLE t ( c06 varchar(255) DEFAULT NULL, PRIMARY KEY (c01,c02,c03) clustered, KEY c04 (c04) -)`, errno.ErrTooLongKey) +)`) + tk.MustExec("drop table t") // test create long clustered primary key. tk.MustGetErrCode(` @@ -1580,7 +1581,7 @@ CREATE TABLE t ( PRIMARY KEY (c01,c02) clustered )`) tk.MustExec("create index idx1 on t(c03)") - tk.MustGetErrCode("create index idx2 on t(c03, c04)", errno.ErrTooLongKey) + tk.MustExec("create index idx2 on t(c03, c04)") tk.MustExec("create unique index uk2 on t(c03, c04)") tk.MustExec("drop table t") @@ -1599,9 +1600,9 @@ CREATE TABLE t ( )`) tk.MustExec("alter table t change c03 c10 varchar(256) default null") tk.MustGetErrCode("alter table t change c10 c100 varchar(1024) default null", errno.ErrTooLongKey) - tk.MustGetErrCode("alter table t modify c10 varchar(600) default null", errno.ErrTooLongKey) + tk.MustExec("alter table t modify c10 varchar(600) default null") tk.MustExec("alter table t modify c06 varchar(600) default null") - tk.MustGetErrCode("alter table t modify c01 varchar(510)", errno.ErrTooLongKey) + tk.MustExec("alter table t modify c01 varchar(510)") tk.MustExec("create table t2 like t") } diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index 4b47459203343..63f1c5afbdc61 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -1483,27 +1483,7 @@ func buildTableInfo( idxInfo.ID = allocateIndexID(tbInfo) tbInfo.Indices = append(tbInfo.Indices, idxInfo) } - if tbInfo.IsCommonHandle { - // Ensure tblInfo's each non-unique secondary-index's len + primary-key's len <= MaxIndexLength for clustered index table. - var pkLen, idxLen int - pkLen, err = indexColumnsLen(tbInfo.Columns, tables.FindPrimaryIndex(tbInfo).Columns) - if err != nil { - return - } - for _, idx := range tbInfo.Indices { - if idx.Unique { - // Only need check for non-unique secondary-index. - continue - } - idxLen, err = indexColumnsLen(tbInfo.Columns, idx.Columns) - if err != nil { - return - } - if pkLen+idxLen > config.GetGlobalConfig().MaxIndexLength { - return nil, errTooLongKey.GenWithStackByArgs(config.GetGlobalConfig().MaxIndexLength) - } - } - } + return } @@ -4124,40 +4104,30 @@ func checkColumnWithIndexConstraint(tbInfo *model.TableInfo, originalCol, newCol } pkIndex := tables.FindPrimaryIndex(tbInfo) - var clusteredPkLen int - if tbInfo.IsCommonHandle { - var err error - clusteredPkLen, err = indexColumnsLen(columns, pkIndex.Columns) - if err != nil { - return err - } - } - checkOneIndex := func(indexInfo *model.IndexInfo, pkLenAppendToKey int, skipCheckIfNotModify bool) (modified bool, err error) { + checkOneIndex := func(indexInfo *model.IndexInfo) (modified bool, err error) { for _, col := range indexInfo.Columns { if col.Name.L == originalCol.Name.L { modified = true break } } - if skipCheckIfNotModify && !modified { + if !modified { return } err = checkIndexInModifiableColumns(columns, indexInfo.Columns) if err != nil { return } - err = checkIndexPrefixLength(columns, indexInfo.Columns, pkLenAppendToKey) + err = checkIndexPrefixLength(columns, indexInfo.Columns) return } // Check primary key first and get "does primary key's column has be modified?" info. - var ( - pkModified bool - err error - ) + var err error + if pkIndex != nil { - pkModified, err = checkOneIndex(pkIndex, 0, true) + _, err = checkOneIndex(pkIndex) if err != nil { return err } @@ -4168,12 +4138,9 @@ func checkColumnWithIndexConstraint(tbInfo *model.TableInfo, originalCol, newCol if indexInfo.Primary { continue } - var pkLenAppendToKey int - if !indexInfo.Unique { - pkLenAppendToKey = clusteredPkLen - } - - _, err = checkOneIndex(indexInfo, pkLenAppendToKey, !tbInfo.IsCommonHandle || !pkModified) + // the second param should always be set to true, check index length only if it was modified + // checkOneIndex needs one param only. + _, err = checkOneIndex(indexInfo) if err != nil { return err } @@ -5302,22 +5269,6 @@ func (d *ddl) CreateIndex(ctx sessionctx.Context, ti ast.Ident, keyType ast.Inde return errors.Trace(err) } - if !unique && tblInfo.IsCommonHandle { - // Ensure new created non-unique secondary-index's len + primary-key's len <= MaxIndexLength in clustered index table. - var pkLen, idxLen int - pkLen, err = indexColumnsLen(tblInfo.Columns, tables.FindPrimaryIndex(tblInfo).Columns) - if err != nil { - return err - } - idxLen, err = indexColumnsLen(finalColumns, indexColumns) - if err != nil { - return err - } - if pkLen+idxLen > config.GetGlobalConfig().MaxIndexLength { - return errTooLongKey.GenWithStackByArgs(config.GetGlobalConfig().MaxIndexLength) - } - } - global := false if unique && tblInfo.GetPartitionInfo() != nil { ck, err := checkPartitionKeysConstraint(tblInfo.GetPartitionInfo(), indexColumns, tblInfo) diff --git a/ddl/index.go b/ddl/index.go index 8585487af10a0..e79b07af523db 100644 --- a/ddl/index.go +++ b/ddl/index.go @@ -110,12 +110,12 @@ func checkPKOnGeneratedColumn(tblInfo *model.TableInfo, indexPartSpecifications return lastCol, nil } -func checkIndexPrefixLength(columns []*model.ColumnInfo, idxColumns []*model.IndexColumn, pkLenAppendToKey int) error { +func checkIndexPrefixLength(columns []*model.ColumnInfo, idxColumns []*model.IndexColumn) error { idxLen, err := indexColumnsLen(columns, idxColumns) if err != nil { return err } - if idxLen+pkLenAppendToKey > config.GetGlobalConfig().MaxIndexLength { + if idxLen > config.GetGlobalConfig().MaxIndexLength { return errTooLongKey.GenWithStackByArgs(config.GetGlobalConfig().MaxIndexLength) } return nil From 68a32a059dbdc05b7ffded591d7802d901d0ea89 Mon Sep 17 00:00:00 2001 From: TonsnakeLin Date: Mon, 22 Nov 2021 09:44:52 +0800 Subject: [PATCH 2/4] ddl: Do not consider the clustered index when checking the length of the secondary index(#29660) close #29658 --- ddl/ddl_api.go | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index 7e3ab0ed003bb..e7408b34abd7d 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -4237,9 +4237,6 @@ func (d *ddl) getModifiableColumnJob(ctx context.Context, sctx sessionctx.Contex // Index has a max-prefix-length constraint. eg: a varchar(100), index idx(a), modifying column a to a varchar(4000) // will cause index idx to break the max-prefix-length constraint. // -// For clustered index: -// Change column in pk need recheck all non-unique index, new pk len + index len < maxIndexLength. -// Change column in secondary only need related index, pk len + new index len < maxIndexLength. func checkColumnWithIndexConstraint(tbInfo *model.TableInfo, originalCol, newCol *model.ColumnInfo) error { columns := make([]*model.ColumnInfo, 0, len(tbInfo.Columns)) columns = append(columns, tbInfo.Columns...) @@ -4255,7 +4252,8 @@ func checkColumnWithIndexConstraint(tbInfo *model.TableInfo, originalCol, newCol pkIndex := tables.FindPrimaryIndex(tbInfo) - checkOneIndex := func(indexInfo *model.IndexInfo) (modified bool, err error) { + checkOneIndex := func(indexInfo *model.IndexInfo) (err error) { + var modified bool for _, col := range indexInfo.Columns { if col.Name.L == originalCol.Name.L { modified = true @@ -4273,11 +4271,11 @@ func checkColumnWithIndexConstraint(tbInfo *model.TableInfo, originalCol, newCol return } - // Check primary key first and get "does primary key's column has be modified?" info. + // Check primary key first. var err error if pkIndex != nil { - _, err = checkOneIndex(pkIndex) + err = checkOneIndex(pkIndex) if err != nil { return err } @@ -4290,7 +4288,7 @@ func checkColumnWithIndexConstraint(tbInfo *model.TableInfo, originalCol, newCol } // the second param should always be set to true, check index length only if it was modified // checkOneIndex needs one param only. - _, err = checkOneIndex(indexInfo) + err = checkOneIndex(indexInfo) if err != nil { return err } From 8f129903a46b7b6046580f917b9a2b392679d60e Mon Sep 17 00:00:00 2001 From: TonsnakeLin Date: Tue, 7 Dec 2021 11:17:32 +0800 Subject: [PATCH 3/4] fix the bug TiDB doesn't log any slow log if log level higher than warn(#30461) close #30309 Signed-off-by: TonsnakeLin --- executor/adapter.go | 8 ++++---- util/logutil/log_test.go | 8 ++++++-- util/logutil/slow_query_logger.go | 3 +++ 3 files changed, 13 insertions(+), 6 deletions(-) diff --git a/executor/adapter.go b/executor/adapter.go index f8098751e5f08..03b0a5d964404 100644 --- a/executor/adapter.go +++ b/executor/adapter.go @@ -1022,12 +1022,12 @@ func (a *ExecStmt) LogSlowQuery(txnTS uint64, succ bool, hasMoreResults bool) { if _, ok := a.StmtNode.(*ast.CommitStmt); ok { slowItems.PrevStmt = sessVars.PrevStmt.String() } + slowLog := sessVars.SlowLogFormat(slowItems) if trace.IsEnabled() { - trace.Log(a.GoCtx, "details", sessVars.SlowLogFormat(slowItems)) + trace.Log(a.GoCtx, "details", slowLog) } - if costTime < threshold { - logutil.SlowQueryLogger.Debug(sessVars.SlowLogFormat(slowItems)) - } else { + logutil.SlowQueryLogger.Warn(slowLog) + if costTime >= threshold { logutil.SlowQueryLogger.Warn(sessVars.SlowLogFormat(slowItems)) if sessVars.InRestrictedSQL { totalQueryProcHistogramInternal.Observe(costTime.Seconds()) diff --git a/util/logutil/log_test.go b/util/logutil/log_test.go index d4f671f05dc92..713c34de77ab5 100644 --- a/util/logutil/log_test.go +++ b/util/logutil/log_test.go @@ -25,6 +25,7 @@ import ( "github.com/pingcap/log" "github.com/stretchr/testify/require" "go.uber.org/zap" + "go.uber.org/zap/zapcore" ) func TestZapLoggerWithKeys(t *testing.T) { @@ -115,13 +116,16 @@ func TestGrpcLoggerCreation(t *testing.T) { } func TestSlowQueryLoggerCreation(t *testing.T) { - level := "warn" + level := "Error" conf := NewLogConfig(level, DefaultLogFormat, "", EmptyFileLogConfig, false) _, prop, err := newSlowQueryLogger(conf) // assert after init slow query logger, the original conf is not changed require.Equal(t, conf.Level, level) require.Nil(t, err) - require.Equal(t, prop.Level.String(), conf.Level) + // slow query logger doesn't use the level of the global log config, and the + // level should be less than WarnLevel which is used by it to log slow query. + require.NotEqual(t, conf.Level, prop.Level.String()) + require.True(t, prop.Level.Level() <= zapcore.WarnLevel) } func TestGlobalLoggerReplace(t *testing.T) { diff --git a/util/logutil/slow_query_logger.go b/util/logutil/slow_query_logger.go index 2588c36131fd9..0c7fd10982314 100644 --- a/util/logutil/slow_query_logger.go +++ b/util/logutil/slow_query_logger.go @@ -32,6 +32,9 @@ func newSlowQueryLogger(cfg *LogConfig) (*zap.Logger, *log.ZapProperties, error) // copy the global log config to slow log config // if the filename of slow log config is empty, slow log will behave the same as global log. sqConfig := cfg.Config + // level of the global log config doesn't affect the slow query logger which determines whether to + // log by execution duration. + sqConfig.Level = LogConfig{}.Level if len(cfg.SlowQueryFile) != 0 { sqConfig.File = cfg.File sqConfig.File.Filename = cfg.SlowQueryFile From 5b0286278edc20629c819c14c93dc3a8acce2d7d Mon Sep 17 00:00:00 2001 From: TonsnakeLin Date: Tue, 7 Dec 2021 15:11:32 +0800 Subject: [PATCH 4/4] fix the bug TiDB doesn't log any slow log(#30461) close #30309 Signed-off-by: TonsnakeLin --- executor/adapter.go | 1 - 1 file changed, 1 deletion(-) diff --git a/executor/adapter.go b/executor/adapter.go index bacf2e1d4cc0e..87f87a9712516 100644 --- a/executor/adapter.go +++ b/executor/adapter.go @@ -1028,7 +1028,6 @@ func (a *ExecStmt) LogSlowQuery(txnTS uint64, succ bool, hasMoreResults bool) { } logutil.SlowQueryLogger.Warn(slowLog) if costTime >= threshold { - logutil.SlowQueryLogger.Warn(sessVars.SlowLogFormat(slowItems)) if sessVars.InRestrictedSQL { totalQueryProcHistogramInternal.Observe(costTime.Seconds()) totalCopProcHistogramInternal.Observe(execDetail.TimeDetail.ProcessTime.Seconds())