diff --git a/bindinfo/capture_serial_test.go b/bindinfo/capture_serial_test.go index bb73a3777a09d..e65697c3f2a21 100644 --- a/bindinfo/capture_serial_test.go +++ b/bindinfo/capture_serial_test.go @@ -332,9 +332,8 @@ func TestCapturedBindingCharset(t *testing.T) { require.Len(t, rows, 1) require.Equal(t, "update `test` . `t` set `name` = ? where `name` <= ?", rows[0][0]) require.Equal(t, "UPDATE /*+ use_index(@`upd_1` `test`.`t` `idx`)*/ `test`.`t` SET `name`='hello' WHERE `name` <= 'abc'", rows[0][1]) - // Charset and Collation are empty now, they are not used currently. - require.Equal(t, "", rows[0][6]) - require.Equal(t, "", rows[0][7]) + require.Equal(t, "utf8mb4", rows[0][6]) + require.Equal(t, "utf8mb4_bin", rows[0][7]) } func TestConcurrentCapture(t *testing.T) { diff --git a/br/pkg/lightning/backend/tidb/tidb_test.go b/br/pkg/lightning/backend/tidb/tidb_test.go index 3739bf7551db9..e37f53d12b27e 100644 --- a/br/pkg/lightning/backend/tidb/tidb_test.go +++ b/br/pkg/lightning/backend/tidb/tidb_test.go @@ -417,13 +417,10 @@ func (s *mysqlSuite) TestWriteRowsErrorDowngrading(c *C) { ExpectExec("INSERT INTO `tidb_lightning_errors`\\.type_error_v1.*"). WithArgs(sqlmock.AnyArg(), "`foo`.`bar`", "9.csv", int64(0), nonRetryableError.Error(), "(3)"). WillReturnResult(driver.ResultNoRows) + // the forth row will exceed the error threshold, won't record this error s.mockDB. ExpectExec("\\QINSERT INTO `foo`.`bar`(`a`) VALUES(4)\\E"). WillReturnError(nonRetryableError) - s.mockDB. - ExpectExec("INSERT INTO `tidb_lightning_errors`\\.type_error_v1.*"). - WithArgs(sqlmock.AnyArg(), "`foo`.`bar`", "10.csv", int64(0), nonRetryableError.Error(), "(4)"). - WillReturnResult(driver.ResultNoRows) ctx := context.Background() logger := log.L() diff --git a/br/pkg/lightning/config/config.go b/br/pkg/lightning/config/config.go index b0c926d561691..8f2e6f2dfa9ac 100644 --- a/br/pkg/lightning/config/config.go +++ b/br/pkg/lightning/config/config.go @@ -585,6 +585,48 @@ func (d *Duration) MarshalJSON() ([]byte, error) { return []byte(fmt.Sprintf(`"%s"`, d.Duration)), nil } +// Charset defines character set +type Charset int + +const ( + Binary Charset = iota + UTF8MB4 + GB18030 + GBK +) + +// String return the string value of charset +func (c Charset) String() string { + switch c { + case Binary: + return "binary" + case UTF8MB4: + return "utf8mb4" + case GB18030: + return "gb18030" + case GBK: + return "gbk" + default: + return "unknown_charset" + } +} + +// ParseCharset parser character set for string +func ParseCharset(dataCharacterSet string) (Charset, error) { + switch strings.ToLower(dataCharacterSet) { + case "", "binary": + return Binary, nil + case "utf8mb4": + return UTF8MB4, nil + case "gb18030": + return GB18030, nil + case "gbk": + return GBK, nil + default: + return Binary, errors.Errorf("found unsupported data-character-set: %s", dataCharacterSet) + } +} + func NewConfig() *Config { return &Config{ App: Lightning{ @@ -786,6 +828,16 @@ func (cfg *Config) Adjust(ctx context.Context) error { if len(cfg.Mydumper.DataCharacterSet) == 0 { cfg.Mydumper.DataCharacterSet = defaultCSVDataCharacterSet } + charset, err1 := ParseCharset(cfg.Mydumper.DataCharacterSet) + if err1 != nil { + return err1 + } + if charset == GBK || charset == GB18030 { + log.L().Warn( + "incompatible strings may be encountered during the transcoding process and will be replaced, please be aware of the risk of not being able to retain the original information", + zap.String("source-character-set", charset.String()), + zap.ByteString("invalid-char-replacement", []byte(cfg.Mydumper.DataInvalidCharReplace))) + } if cfg.TikvImporter.Backend == "" { return errors.New("tikv-importer.backend must not be empty!") diff --git a/br/pkg/lightning/config/config_test.go b/br/pkg/lightning/config/config_test.go index 8fb5f2f61ea41..1e7e751b20b3d 100644 --- a/br/pkg/lightning/config/config_test.go +++ b/br/pkg/lightning/config/config_test.go @@ -854,3 +854,23 @@ func (s *configTestSuite) TestCheckpointKeepStrategy(c *C) { c.Assert(res, DeepEquals, []byte(value)) } } + +func (s configTestSuite) TestLoadCharsetFromConfig(c *C) { + cases := map[string]config.Charset{ + "binary": config.Binary, + "BINARY": config.Binary, + "GBK": config.GBK, + "gbk": config.GBK, + "Gbk": config.GBK, + "gB18030": config.GB18030, + "GB18030": config.GB18030, + } + for k, v := range cases { + charset, err := config.ParseCharset(k) + c.Assert(err, IsNil) + c.Assert(charset, Equals, v) + } + + _, err := config.ParseCharset("Unknown") + c.Assert(err, ErrorMatches, "found unsupported data-character-set: Unknown") +} diff --git a/br/pkg/lightning/errormanager/errormanager.go b/br/pkg/lightning/errormanager/errormanager.go index aaa155576bb19..e301af21cb826 100644 --- a/br/pkg/lightning/errormanager/errormanager.go +++ b/br/pkg/lightning/errormanager/errormanager.go @@ -1,9 +1,24 @@ +// Copyright 2021 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package errormanager import ( "context" "database/sql" "fmt" + "strings" "github.com/pingcap/errors" "github.com/pingcap/tidb/br/pkg/lightning/common" @@ -94,6 +109,7 @@ type ErrorManager struct { taskID int64 schemaEscaped string remainingError config.MaxError + dupResolution config.DuplicateResolutionAlgorithm } // New creates a new error manager. @@ -111,7 +127,7 @@ func New(db *sql.DB, cfg *config.Config) *ErrorManager { // Init creates the schemas and tables to store the task information. func (em *ErrorManager) Init(ctx context.Context) error { - if em.db == nil { + if em.db == nil || (em.remainingError.Type.Load() == 0 && em.dupResolution == config.DupeResAlgNone) { return nil } @@ -120,15 +136,21 @@ func (em *ErrorManager) Init(ctx context.Context) error { Logger: log.L(), } - sqls := [][2]string{ - {"create task info schema", createSchema}, - {"create syntax error table", createSyntaxErrorTable}, - {"create type error table", createTypeErrorTable}, - {"create conflict error table", createConflictErrorTable}, + sqls := make([][2]string, 0) + sqls = append(sqls, [2]string{"create task info schema", createSchema}) + if em.remainingError.Syntax.Load() > 0 { + sqls = append(sqls, [2]string{"create syntax error table", createSyntaxErrorTable}) + } + if em.remainingError.Type.Load() > 0 { + sqls = append(sqls, [2]string{"create type error table", createTypeErrorTable}) + } + if em.dupResolution != config.DupeResAlgNone && em.remainingError.Conflict.Load() > 0 { + sqls = append(sqls, [2]string{"create conflict error table", createConflictErrorTable}) } for _, sql := range sqls { - err := exec.Exec(ctx, sql[0], fmt.Sprintf(sql[1], em.schemaEscaped)) + // trim spaces for unit test pattern matching + err := exec.Exec(ctx, sql[0], strings.TrimSpace(fmt.Sprintf(sql[1], em.schemaEscaped))) if err != nil { return err } @@ -148,6 +170,11 @@ func (em *ErrorManager) RecordTypeError( rowText string, encodeErr error, ) error { + // elide the encode error if needed. + if em.remainingError.Type.Dec() < 0 { + return encodeErr + } + if em.db != nil { errMsg := encodeErr.Error() logger = logger.With( @@ -173,11 +200,6 @@ func (em *ErrorManager) RecordTypeError( return multierr.Append(encodeErr, err) } } - - // elide the encode error if needed. - if em.remainingError.Type.Dec() < 0 { - return encodeErr - } return nil } diff --git a/br/pkg/lightning/errormanager/errormanager_test.go b/br/pkg/lightning/errormanager/errormanager_test.go new file mode 100644 index 0000000000000..4434cff5012bc --- /dev/null +++ b/br/pkg/lightning/errormanager/errormanager_test.go @@ -0,0 +1,83 @@ +// Copyright 2021 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errormanager + +import ( + "context" + "math" + "testing" + + "github.com/DATA-DOG/go-sqlmock" + . "github.com/pingcap/check" + "go.uber.org/atomic" + + "github.com/pingcap/tidb/br/pkg/lightning/config" +) + +var _ = Suite(errorManagerSuite{}) + +func TestErrorManager(t *testing.T) { + TestingT(t) +} + +type errorManagerSuite struct{} + +func (e errorManagerSuite) TestInit(c *C) { + db, mock, err := sqlmock.New() + c.Assert(err, IsNil) + + em := &ErrorManager{ + db: db, + schemaEscaped: "`lightning_errors`", + remainingError: config.MaxError{ + Charset: *atomic.NewInt64(math.MaxInt64), + Conflict: *atomic.NewInt64(math.MaxInt64), + }, + } + + ctx := context.Background() + err = em.Init(ctx) + c.Assert(err, IsNil) + + em.dupResolution = config.DupeResAlgRecord + mock.ExpectExec("CREATE SCHEMA IF NOT EXISTS `lightning_errors`;"). + WillReturnResult(sqlmock.NewResult(1, 1)) + mock.ExpectExec("CREATE TABLE IF NOT EXISTS `lightning_errors`\\.conflict_error_v1.*"). + WillReturnResult(sqlmock.NewResult(2, 1)) + err = em.Init(ctx) + c.Assert(err, IsNil) + + em.dupResolution = config.DupeResAlgNone + em.remainingError.Type.Store(1) + mock.ExpectExec("CREATE SCHEMA IF NOT EXISTS `lightning_errors`;"). + WillReturnResult(sqlmock.NewResult(3, 1)) + mock.ExpectExec("CREATE TABLE IF NOT EXISTS `lightning_errors`\\.type_error_v1.*"). + WillReturnResult(sqlmock.NewResult(4, 1)) + err = em.Init(ctx) + c.Assert(err, IsNil) + + em.dupResolution = config.DupeResAlgRecord + em.remainingError.Type.Store(1) + mock.ExpectExec("CREATE SCHEMA IF NOT EXISTS `lightning_errors`.*"). + WillReturnResult(sqlmock.NewResult(5, 1)) + mock.ExpectExec("CREATE TABLE IF NOT EXISTS `lightning_errors`\\.type_error_v1.*"). + WillReturnResult(sqlmock.NewResult(6, 1)) + mock.ExpectExec("CREATE TABLE IF NOT EXISTS `lightning_errors`\\.conflict_error_v1.*"). + WillReturnResult(sqlmock.NewResult(7, 1)) + err = em.Init(ctx) + c.Assert(err, IsNil) + + c.Assert(mock.ExpectationsWereMet(), IsNil) +} diff --git a/br/pkg/lightning/mydump/charset_convertor.go b/br/pkg/lightning/mydump/charset_convertor.go index e4cd7a4e72c1d..81e57be681a18 100644 --- a/br/pkg/lightning/mydump/charset_convertor.go +++ b/br/pkg/lightning/mydump/charset_convertor.go @@ -19,41 +19,17 @@ import ( "unicode/utf8" "github.com/pingcap/errors" - "github.com/pingcap/tidb/br/pkg/lightning/log" - "go.uber.org/zap" "golang.org/x/text/encoding" "golang.org/x/text/encoding/simplifiedchinese" -) - -type Charset int -const ( - Binary Charset = iota - UTF8MB4 - GB18030 - GBK + "github.com/pingcap/tidb/br/pkg/lightning/config" ) -func (c Charset) String() string { - switch c { - case Binary: - return "binary" - case UTF8MB4: - return "utf8mb4" - case GB18030: - return "gb18030" - case GBK: - return "gbk" - default: - return "unknown_charset" - } -} - // CharsetConvertor is used to convert a character set to utf8mb4 encoding. // In Lightning, we mainly use it to do the GB18030/GBK -> UTF8MB4 conversion. type CharsetConvertor struct { // sourceCharacterSet represents the charset that the data source uses. - sourceCharacterSet Charset + sourceCharacterSet config.Charset // invalidCharReplacement is the default replacement character bytes for the invalid content, e.g "\ufffd". invalidCharReplacement string @@ -63,14 +39,10 @@ type CharsetConvertor struct { // NewCharsetConvertor creates a new CharsetConvertor. func NewCharsetConvertor(dataCharacterSet, dataInvalidCharReplace string) (*CharsetConvertor, error) { - sourceCharacterSet, err := loadCharsetFromConfig(dataCharacterSet) + sourceCharacterSet, err := config.ParseCharset(dataCharacterSet) if err != nil { return nil, err } - log.L().Warn( - "incompatible strings may be encountered during the transcoding process and will be replaced, please be aware of the risk of not being able to retain the original information", - zap.String("source-character-set", sourceCharacterSet.String()), - zap.ByteString("invalid-char-replacement", []byte(dataInvalidCharReplace))) cc := &CharsetConvertor{ sourceCharacterSet, dataInvalidCharReplace, @@ -87,29 +59,14 @@ func NewCharsetConvertor(dataCharacterSet, dataInvalidCharReplace string) (*Char return cc, nil } -func loadCharsetFromConfig(dataCharacterSet string) (Charset, error) { - switch dataCharacterSet { - case "", "binary": - return Binary, nil - case "utf8mb4": - return UTF8MB4, nil - case "gb18030": - return GB18030, nil - case "gbk": - return GBK, nil - default: - return Binary, errors.Errorf("found unsupported data-character-set: %s", dataCharacterSet) - } -} - func (cc *CharsetConvertor) initDecoder() error { switch cc.sourceCharacterSet { - case Binary, UTF8MB4: + case config.Binary, config.UTF8MB4: return nil - case GB18030: + case config.GB18030: cc.decoder = simplifiedchinese.GB18030.NewDecoder() return nil - case GBK: + case config.GBK: cc.decoder = simplifiedchinese.GBK.NewDecoder() return nil } @@ -118,12 +75,12 @@ func (cc *CharsetConvertor) initDecoder() error { func (cc *CharsetConvertor) initEncoder() error { switch cc.sourceCharacterSet { - case Binary, UTF8MB4: + case config.Binary, config.UTF8MB4: return nil - case GB18030: + case config.GB18030: cc.encoder = simplifiedchinese.GB18030.NewEncoder() return nil - case GBK: + case config.GBK: cc.encoder = simplifiedchinese.GBK.NewEncoder() return nil } @@ -151,7 +108,7 @@ func (cc *CharsetConvertor) Decode(src string) (string, error) { func (cc *CharsetConvertor) precheck(src string) bool { // No need to convert the charset encoding, just return the original data. if len(src) == 0 || cc == nil || - cc.sourceCharacterSet == Binary || cc.sourceCharacterSet == UTF8MB4 || + cc.sourceCharacterSet == config.Binary || cc.sourceCharacterSet == config.UTF8MB4 || cc.decoder == nil || cc.encoder == nil { return false } diff --git a/br/pkg/lightning/mydump/charset_convertor_test.go b/br/pkg/lightning/mydump/charset_convertor_test.go index 5220f0575360c..cf091c09b142e 100644 --- a/br/pkg/lightning/mydump/charset_convertor_test.go +++ b/br/pkg/lightning/mydump/charset_convertor_test.go @@ -12,14 +12,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -package mydump_test +package mydump import ( "io" "os" . "github.com/pingcap/check" - "github.com/pingcap/tidb/br/pkg/lightning/mydump" ) var _ = Suite(&testCharsetConvertorSuite{}) @@ -51,7 +50,7 @@ func (s testCharsetConvertorSuite) TestCharsetConvertor(c *C) { gbkData, err := io.ReadAll(gbkReader) c.Assert(err, IsNil) - cc, err := mydump.NewCharsetConvertor("gb18030", "\ufffd") + cc, err := NewCharsetConvertor("gb18030", "\ufffd") c.Assert(err, IsNil) gbkToUTF8Data, err := cc.Decode(string(gbkData)) c.Assert(err, IsNil) @@ -79,7 +78,7 @@ func (s testCharsetConvertorSuite) TestInvalidCharReplace(c *C) { c.Assert(err, IsNil) gbkData, err := io.ReadAll(gbkReader) c.Assert(err, IsNil) - cc, err := mydump.NewCharsetConvertor("gb18030", dataInvalidCharReplace) + cc, err := NewCharsetConvertor("gb18030", dataInvalidCharReplace) c.Assert(err, IsNil) gbkToUTF8Data, err := cc.Decode(string(gbkData)) c.Assert(err, IsNil) diff --git a/br/pkg/version/build/info_test.go b/br/pkg/version/build/info_test.go index b90deab036407..4714d21e78232 100644 --- a/br/pkg/version/build/info_test.go +++ b/br/pkg/version/build/info_test.go @@ -6,28 +6,22 @@ import ( "strings" "testing" - . "github.com/pingcap/check" + "github.com/stretchr/testify/require" ) -type infoSuite struct{} +func TestInfo(t *testing.T) { + t.Parallel() -var _ = Suite(&infoSuite{}) - -func TestT(t *testing.T) { - TestingT(t) -} - -func (*infoSuite) TestInfo(c *C) { info := Info() lines := strings.Split(info, "\n") - c.Assert(lines[0], Matches, "Release Version.*") - c.Assert(lines[1], Matches, "Git Commit Hash.*") - c.Assert(lines[2], Matches, "Git Branch.*") - c.Assert(lines[3], Matches, "Go Version.*") - c.Assert(lines[4], Matches, "UTC Build Time.*") + require.Regexp(t, "Release Version.*", lines[0]) + require.Regexp(t, "Git Commit Hash.*", lines[1]) + require.Regexp(t, "Git Branch.*", lines[2]) + require.Regexp(t, "Go Version.*", lines[3]) + require.Regexp(t, "UTC Build Time.*", lines[4]) } -func (*infoSuite) TestLogInfo(c *C) { +func TestLogInfo(t *testing.T) { LogInfo(BR) LogInfo(Lightning) } diff --git a/br/pkg/version/version_test.go b/br/pkg/version/version_test.go index e1f614c313699..a30668b6f99d6 100644 --- a/br/pkg/version/version_test.go +++ b/br/pkg/version/version_test.go @@ -11,20 +11,12 @@ import ( "github.com/DATA-DOG/go-sqlmock" "github.com/coreos/go-semver/semver" - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/tidb/br/pkg/version/build" + "github.com/stretchr/testify/require" pd "github.com/tikv/pd/client" ) -type checkSuite struct{} - -var _ = Suite(&checkSuite{}) - -func TestT(t *testing.T) { - TestingT(t) -} - type mockPDClient struct { pd.Client getAllStores func() []*metapb.Store @@ -43,7 +35,9 @@ func tiflash(version string) []*metapb.Store { } } -func (s *checkSuite) TestCheckClusterVersion(c *C) { +func TestCheckClusterVersion(t *testing.T) { + t.Parallel() + mock := mockPDClient{ Client: nil, } @@ -54,7 +48,8 @@ func (s *checkSuite) TestCheckClusterVersion(c *C) { return tiflash("v4.0.0-rc.1") } err := CheckClusterVersion(context.Background(), &mock, CheckVersionForBR) - c.Assert(err, ErrorMatches, `incompatible.*version v4.0.0-rc.1, try update it to 4.0.0.*`) + require.Error(t, err) + require.Regexp(t, `incompatible.*version v4.0.0-rc.1, try update it to 4.0.0.*`, err.Error()) } { @@ -63,7 +58,8 @@ func (s *checkSuite) TestCheckClusterVersion(c *C) { return tiflash("v3.1.0-beta.1") } err := CheckClusterVersion(context.Background(), &mock, CheckVersionForBR) - c.Assert(err, ErrorMatches, `incompatible.*version v3.1.0-beta.1, try update it to 3.1.0.*`) + require.Error(t, err) + require.Regexp(t, `incompatible.*version v3.1.0-beta.1, try update it to 3.1.0.*`, err.Error()) } { @@ -72,7 +68,8 @@ func (s *checkSuite) TestCheckClusterVersion(c *C) { return tiflash("v3.0.15") } err := CheckClusterVersion(context.Background(), &mock, CheckVersionForBR) - c.Assert(err, ErrorMatches, `incompatible.*version v3.0.15, try update it to 3.1.0.*`) + require.Error(t, err) + require.Regexp(t, `incompatible.*version v3.0.15, try update it to 3.1.0.*`, err.Error()) } { @@ -81,7 +78,7 @@ func (s *checkSuite) TestCheckClusterVersion(c *C) { return []*metapb.Store{{Version: minTiKVVersion.String()}} } err := CheckClusterVersion(context.Background(), &mock, CheckVersionForBR) - c.Assert(err, IsNil) + require.NoError(t, err) } { @@ -91,7 +88,8 @@ func (s *checkSuite) TestCheckClusterVersion(c *C) { return []*metapb.Store{{Version: `v2.1.0`}} } err := CheckClusterVersion(context.Background(), &mock, CheckVersionForBR) - c.Assert(err, ErrorMatches, ".*TiKV .* don't support BR, please upgrade cluster .*") + require.Error(t, err) + require.Regexp(t, ".*TiKV .* don't support BR, please upgrade cluster .*", err.Error()) } { @@ -101,7 +99,8 @@ func (s *checkSuite) TestCheckClusterVersion(c *C) { return []*metapb.Store{{Version: minTiKVVersion.String()}} } err := CheckClusterVersion(context.Background(), &mock, CheckVersionForBR) - c.Assert(err, ErrorMatches, "TiKV .* mismatch, please .*") + require.Error(t, err) + require.Regexp(t, "TiKV .* mismatch, please .*", err.Error()) } { @@ -111,7 +110,8 @@ func (s *checkSuite) TestCheckClusterVersion(c *C) { return []*metapb.Store{{Version: "v4.0.0-rc"}} } err := CheckClusterVersion(context.Background(), &mock, CheckVersionForBR) - c.Assert(err, ErrorMatches, "TiKV .* major version mismatch, please .*") + require.Error(t, err) + require.Regexp(t, "TiKV .* major version mismatch, please .*", err.Error()) } { @@ -121,7 +121,8 @@ func (s *checkSuite) TestCheckClusterVersion(c *C) { return []*metapb.Store{{Version: "v4.0.0-beta.1"}} } err := CheckClusterVersion(context.Background(), &mock, CheckVersionForBR) - c.Assert(err, ErrorMatches, "TiKV .* mismatch, please .*") + require.Error(t, err) + require.Regexp(t, "TiKV .* mismatch, please .*", err.Error()) } { @@ -131,7 +132,7 @@ func (s *checkSuite) TestCheckClusterVersion(c *C) { return []*metapb.Store{{Version: "v4.0.0-rc.1"}} } err := CheckClusterVersion(context.Background(), &mock, CheckVersionForBR) - c.Assert(err, IsNil) + require.NoError(t, err) } { @@ -140,7 +141,7 @@ func (s *checkSuite) TestCheckClusterVersion(c *C) { return []*metapb.Store{{Version: "v4.0.0-rc.1"}} } err := CheckClusterVersion(context.Background(), &mock, CheckVersionForBackup(semver.New("4.0.12"))) - c.Assert(err, IsNil) + require.NoError(t, err) } { @@ -149,7 +150,7 @@ func (s *checkSuite) TestCheckClusterVersion(c *C) { return []*metapb.Store{{Version: "v4.0.0-rc.1"}} } err := CheckClusterVersion(context.Background(), &mock, CheckVersionForBackup(semver.New("5.0.0-rc"))) - c.Assert(err, Not(IsNil)) + require.Error(t, err) } { @@ -159,127 +160,123 @@ func (s *checkSuite) TestCheckClusterVersion(c *C) { return []*metapb.Store{{Version: "v4.0.0-rc.2"}} } err := CheckClusterVersion(context.Background(), &mock, CheckVersionForBR) - c.Assert(err, IsNil) + require.NoError(t, err) } } -func (s *checkSuite) TestCompareVersion(c *C) { - c.Assert(semver.New("4.0.0-rc").Compare(*semver.New("4.0.0-rc.2")), Equals, -1) - c.Assert(semver.New("4.0.0-beta.3").Compare(*semver.New("4.0.0-rc.2")), Equals, -1) - c.Assert(semver.New("4.0.0-rc.1").Compare(*semver.New("4.0.0")), Equals, -1) - c.Assert(semver.New("4.0.0-beta.1").Compare(*semver.New("4.0.0")), Equals, -1) - c.Assert(semver.New(removeVAndHash("4.0.0-rc-35-g31dae220")).Compare(*semver.New("4.0.0-rc.2")), Equals, -1) - c.Assert(semver.New(removeVAndHash("4.0.0-9-g30f0b014")).Compare(*semver.New("4.0.0-rc.1")), Equals, 1) - c.Assert(semver.New(removeVAndHash("v3.0.0-beta-211-g09beefbe0-dirty")). - Compare(*semver.New("3.0.0-beta")), Equals, 0) - c.Assert(semver.New(removeVAndHash("v3.0.5-dirty")). - Compare(*semver.New("3.0.5")), Equals, 0) - c.Assert(semver.New(removeVAndHash("v3.0.5-beta.12-dirty")). - Compare(*semver.New("3.0.5-beta.12")), Equals, 0) - c.Assert(semver.New(removeVAndHash("v2.1.0-rc.1-7-g38c939f-dirty")). - Compare(*semver.New("2.1.0-rc.1")), Equals, 0) +func TestCompareVersion(t *testing.T) { + t.Parallel() + + require.Equal(t, -1, semver.New("4.0.0-rc").Compare(*semver.New("4.0.0-rc.2"))) + require.Equal(t, -1, semver.New("4.0.0-beta.3").Compare(*semver.New("4.0.0-rc.2"))) + require.Equal(t, -1, semver.New("4.0.0-rc.1").Compare(*semver.New("4.0.0"))) + require.Equal(t, -1, semver.New("4.0.0-beta.1").Compare(*semver.New("4.0.0"))) + require.Equal(t, -1, semver.New(removeVAndHash("4.0.0-rc-35-g31dae220")).Compare(*semver.New("4.0.0-rc.2"))) + require.Equal(t, 1, semver.New(removeVAndHash("4.0.0-9-g30f0b014")).Compare(*semver.New("4.0.0-rc.1"))) + require.Equal(t, 0, semver.New(removeVAndHash("v3.0.0-beta-211-g09beefbe0-dirty")). + Compare(*semver.New("3.0.0-beta"))) + require.Equal(t, 0, semver.New(removeVAndHash("v3.0.5-dirty")). + Compare(*semver.New("3.0.5"))) + require.Equal(t, 0, semver.New(removeVAndHash("v3.0.5-beta.12-dirty")). + Compare(*semver.New("3.0.5-beta.12"))) + require.Equal(t, 0, semver.New(removeVAndHash("v2.1.0-rc.1-7-g38c939f-dirty")). + Compare(*semver.New("2.1.0-rc.1"))) } -func (s *checkSuite) TestNextMajorVersion(c *C) { +func TestNextMajorVersion(t *testing.T) { + t.Parallel() + build.ReleaseVersion = "v4.0.0-rc.1" - c.Assert(NextMajorVersion().String(), Equals, "5.0.0") + require.Equal(t, "5.0.0", NextMajorVersion().String()) build.ReleaseVersion = "4.0.0-rc-35-g31dae220" - c.Assert(NextMajorVersion().String(), Equals, "5.0.0") + require.Equal(t, "5.0.0", NextMajorVersion().String()) build.ReleaseVersion = "4.0.0-9-g30f0b014" - c.Assert(NextMajorVersion().String(), Equals, "5.0.0") + require.Equal(t, "5.0.0", NextMajorVersion().String()) build.ReleaseVersion = "v5.0.0-rc.2" - c.Assert(NextMajorVersion().String(), Equals, "6.0.0") + require.Equal(t, "6.0.0", NextMajorVersion().String()) build.ReleaseVersion = "v5.0.0-master" - c.Assert(NextMajorVersion().String(), Equals, "6.0.0") + require.Equal(t, "6.0.0", NextMajorVersion().String()) } -func (s *checkSuite) TestExtractTiDBVersion(c *C) { +func TestExtractTiDBVersion(t *testing.T) { + t.Parallel() + vers, err := ExtractTiDBVersion("5.7.10-TiDB-v2.1.0-rc.1-7-g38c939f") - c.Assert(err, IsNil) - c.Assert(*vers, Equals, *semver.New("2.1.0-rc.1")) + require.NoError(t, err) + require.Equal(t, *semver.New("2.1.0-rc.1"), *vers) vers, err = ExtractTiDBVersion("5.7.10-TiDB-v2.0.4-1-g06a0bf5") - c.Assert(err, IsNil) - c.Assert(*vers, Equals, *semver.New("2.0.4")) + require.NoError(t, err) + require.Equal(t, *semver.New("2.0.4"), *vers) vers, err = ExtractTiDBVersion("5.7.10-TiDB-v2.0.7") - c.Assert(err, IsNil) - c.Assert(*vers, Equals, *semver.New("2.0.7")) + require.NoError(t, err) + require.Equal(t, *semver.New("2.0.7"), *vers) vers, err = ExtractTiDBVersion("8.0.12-TiDB-v3.0.5-beta.12") - c.Assert(err, IsNil) - c.Assert(*vers, Equals, *semver.New("3.0.5-beta.12")) + require.NoError(t, err) + require.Equal(t, *semver.New("3.0.5-beta.12"), *vers) vers, err = ExtractTiDBVersion("5.7.25-TiDB-v3.0.0-beta-211-g09beefbe0-dirty") - c.Assert(err, IsNil) - c.Assert(*vers, Equals, *semver.New("3.0.0-beta")) + require.NoError(t, err) + require.Equal(t, *semver.New("3.0.0-beta"), *vers) vers, err = ExtractTiDBVersion("8.0.12-TiDB-v3.0.5-dirty") - c.Assert(err, IsNil) - c.Assert(*vers, Equals, *semver.New("3.0.5")) + require.NoError(t, err) + require.Equal(t, *semver.New("3.0.5"), *vers) vers, err = ExtractTiDBVersion("8.0.12-TiDB-v3.0.5-beta.12-dirty") - c.Assert(err, IsNil) - c.Assert(*vers, Equals, *semver.New("3.0.5-beta.12")) + require.NoError(t, err) + require.Equal(t, *semver.New("3.0.5-beta.12"), *vers) vers, err = ExtractTiDBVersion("5.7.10-TiDB-v2.1.0-rc.1-7-g38c939f-dirty") - c.Assert(err, IsNil) - c.Assert(*vers, Equals, *semver.New("2.1.0-rc.1")) + require.NoError(t, err) + require.Equal(t, *semver.New("2.1.0-rc.1"), *vers) _, err = ExtractTiDBVersion("") - c.Assert(err, ErrorMatches, "not a valid TiDB version.*") + require.Error(t, err) + require.Regexp(t, "not a valid TiDB version.*", err.Error()) _, err = ExtractTiDBVersion("8.0.12") - c.Assert(err, ErrorMatches, "not a valid TiDB version.*") + require.Error(t, err) + require.Regexp(t, "not a valid TiDB version.*", err.Error()) _, err = ExtractTiDBVersion("not-a-valid-version") - c.Assert(err, NotNil) + require.Error(t, err) } -func (s *checkSuite) TestCheckVersion(c *C) { +func TestCheckVersion(t *testing.T) { + t.Parallel() + err := CheckVersion("TiNB", *semver.New("2.3.5"), *semver.New("2.1.0"), *semver.New("3.0.0")) - c.Assert(err, IsNil) + require.NoError(t, err) err = CheckVersion("TiNB", *semver.New("2.1.0"), *semver.New("2.3.5"), *semver.New("3.0.0")) - c.Assert(err, ErrorMatches, "TiNB version too old.*") + require.Error(t, err) + require.Regexp(t, "TiNB version too old.*", err.Error()) err = CheckVersion("TiNB", *semver.New("3.1.0"), *semver.New("2.3.5"), *semver.New("3.0.0")) - c.Assert(err, ErrorMatches, "TiNB version too new.*") + require.Error(t, err) + require.Regexp(t, "TiNB version too new.*", err.Error()) err = CheckVersion("TiNB", *semver.New("3.0.0-beta"), *semver.New("2.3.5"), *semver.New("3.0.0")) - c.Assert(err, ErrorMatches, "TiNB version too new.*") + require.Error(t, err) + require.Regexp(t, "TiNB version too new.*", err.Error()) } -type versionEqualsC struct{} - -func (v versionEqualsC) Info() *CheckerInfo { - return &CheckerInfo{ - Name: "VersionEquals", - Params: []string{"source", "target"}, - } -} - -func (v versionEqualsC) Check(params []interface{}, names []string) (result bool, error string) { - source := params[0].(*semver.Version) - target := params[1].(*semver.Version) +func versionEqualCheck(source *semver.Version, target *semver.Version) (result bool) { if source == nil || target == nil { - if target == source { - return true, "" - } - return false, fmt.Sprintf("one of version is nil but another is not (%s and %s)", params[0], params[1]) + return target == source } - if source.Equal(*target) { - return true, "" - } - return false, fmt.Sprintf("version not equal (%s vs %s)", source, target) + return source.Equal(*target) } -var versionEquals versionEqualsC +func TestNormalizeBackupVersion(t *testing.T) { + t.Parallel() -func (s *checkSuite) TestNormalizeBackupVersion(c *C) { cases := []struct { target string source string @@ -294,13 +291,15 @@ func (s *checkSuite) TestNormalizeBackupVersion(c *C) { for _, testCase := range cases { target, _ := semver.NewVersion(testCase.target) source := NormalizeBackupVersion(testCase.source) - c.Assert(source, versionEquals, target) + result := versionEqualCheck(source, target) + require.Truef(t, result, "source=%v, target=%v", source, target) } } -func (s *checkSuite) TestDetectServerInfo(c *C) { +func TestDetectServerInfo(t *testing.T) { + t.Parallel() db, mock, err := sqlmock.New() - c.Assert(err, IsNil) + require.NoError(t, err) defer db.Close() mkVer := makeVersion @@ -320,7 +319,7 @@ func (s *checkSuite) TestDetectServerInfo(c *C) { for _, datum := range data { tag, r, serverTp, expectVer := dec(datum) - cmt := Commentf("test case number: %d", tag) + cmt := fmt.Sprintf("test case number: %d", tag) tidbVersionQuery := mock.ExpectQuery("SELECT tidb_version\\(\\);") if strings.HasPrefix(r, "Release Version:") { @@ -332,17 +331,18 @@ func (s *checkSuite) TestDetectServerInfo(c *C) { } verStr, err := FetchVersion(context.Background(), db) - c.Assert(err, IsNil, cmt) + require.NoError(t, err, cmt) + info := ParseServerInfo(verStr) - c.Assert(info.ServerType, Equals, serverTp, cmt) - c.Assert(info.ServerVersion == nil, Equals, expectVer == nil, cmt) + require.Equal(t, serverTp, info.ServerType, cmt) + require.Equal(t, expectVer == nil, info.ServerVersion == nil, cmt) if info.ServerVersion == nil { - c.Assert(expectVer, IsNil, cmt) + require.Nil(t, expectVer, cmt) } else { fmt.Printf("%v, %v\n", *info.ServerVersion, *expectVer) - c.Assert(info.ServerVersion.Equal(*expectVer), IsTrue) + require.True(t, info.ServerVersion.Equal(*expectVer)) } - c.Assert(mock.ExpectationsWereMet(), IsNil, cmt) + require.NoError(t, mock.ExpectationsWereMet(), cmt) } } func makeVersion(major, minor, patch int64, preRelease string) *semver.Version { @@ -355,9 +355,9 @@ func makeVersion(major, minor, patch int64, preRelease string) *semver.Version { } } -func (s *checkSuite) TestFetchVersion(c *C) { +func TestFetchVersion(t *testing.T) { db, mock, err := sqlmock.New() - c.Assert(err, IsNil) + require.NoError(t, err) tidbVersion := `Release Version: v5.2.1 Edition: Community @@ -373,20 +373,21 @@ Check Table Before Drop: false` mock.ExpectQuery("SELECT tidb_version\\(\\);").WillReturnRows(sqlmock. NewRows([]string{""}).AddRow(tidbVersion)) versionStr, err := FetchVersion(ctx, db) - c.Assert(err, IsNil) - c.Assert(versionStr, Equals, tidbVersion) + require.NoError(t, err) + require.Equal(t, tidbVersion, versionStr) mock.ExpectQuery("SELECT tidb_version\\(\\);").WillReturnError(errors.New("mock failure")) mock.ExpectQuery("SELECT version\\(\\);").WillReturnRows(sqlmock. NewRows([]string{""}).AddRow("5.7.25")) versionStr, err = FetchVersion(ctx, db) - c.Assert(err, IsNil) - c.Assert(versionStr, Equals, "5.7.25") + require.NoError(t, err) + require.Equal(t, "5.7.25", versionStr) mock.ExpectQuery("SELECT tidb_version\\(\\);").WillReturnError(errors.New("mock failure")) mock.ExpectQuery("SELECT version\\(\\);").WillReturnError(errors.New("mock failure")) _, err = FetchVersion(ctx, db) - c.Assert(err, ErrorMatches, ".*mock failure") + require.Error(t, err) + require.Regexp(t, ".*mock failure", err.Error()) } diff --git a/cmd/explaintest/r/new_character_set_builtin.result b/cmd/explaintest/r/new_character_set_builtin.result index 75a3b89e5d192..2b9ce85ba776a 100644 --- a/cmd/explaintest/r/new_character_set_builtin.result +++ b/cmd/explaintest/r/new_character_set_builtin.result @@ -226,3 +226,35 @@ select md5(b) from t where md5(b) = 'a45d4af7b243e7f393fa09bed72ac73e'; md5(b) a45d4af7b243e7f393fa09bed72ac73e set @@tidb_enable_vectorized_expression = false; +drop table if exists t; +create table t (a char(20)); +insert into t values ('65'), ('123456'), ('123456789'); +select char(a using gbk), char(a using utf8), char(a) from t; +char(a using gbk) char(a using utf8) char(a) +A A A +釦 â@ â@ +NULL [Í [Í +select char(12345678 using gbk); +char(12345678 using gbk) +ç³°N +set @@tidb_enable_vectorized_expression = true; +select char(a using gbk), char(a using utf8), char(a) from t; +char(a using gbk) char(a using utf8) char(a) +A A A +釦 â@ â@ +NULL [Í [Í +select char(12345678 using gbk); +char(12345678 using gbk) +ç³°N +set @@tidb_enable_vectorized_expression = false; +drop table if exists t; +create table t (a char(20) charset utf8mb4, b char(20) charset gbk, c binary(20)); +insert into t values ('一二三', '一二三', '一二三'); +select md5(compress(a)), md5(compress(b)), md5(compress(c)) from t; +md5(compress(a)) md5(compress(b)) md5(compress(c)) +2198d4d3b06a6cba3f9275c7e364105c 5e587a14393aecf0629bb29dbd6b4379 905068487b6220f70bb71a48323826be +set @@tidb_enable_vectorized_expression = true; +select md5(compress(a)), md5(compress(b)), md5(compress(c)) from t; +md5(compress(a)) md5(compress(b)) md5(compress(c)) +2198d4d3b06a6cba3f9275c7e364105c 5e587a14393aecf0629bb29dbd6b4379 905068487b6220f70bb71a48323826be +set @@tidb_enable_vectorized_expression = false; diff --git a/cmd/explaintest/t/new_character_set_builtin.test b/cmd/explaintest/t/new_character_set_builtin.test index 4258c5742e308..a3efb3ec2a67f 100644 --- a/cmd/explaintest/t/new_character_set_builtin.test +++ b/cmd/explaintest/t/new_character_set_builtin.test @@ -122,3 +122,22 @@ set @@tidb_enable_vectorized_expression = true; select md5(b) from t where md5(b) = 'a45d4af7b243e7f393fa09bed72ac73e'; set @@tidb_enable_vectorized_expression = false; +-- test for builtin function char() +drop table if exists t; +create table t (a char(20)); +insert into t values ('65'), ('123456'), ('123456789'); +select char(a using gbk), char(a using utf8), char(a) from t; +select char(12345678 using gbk); +set @@tidb_enable_vectorized_expression = true; +select char(a using gbk), char(a using utf8), char(a) from t; +select char(12345678 using gbk); +set @@tidb_enable_vectorized_expression = false; + +-- test for builtin function compress() +drop table if exists t; +create table t (a char(20) charset utf8mb4, b char(20) charset gbk, c binary(20)); +insert into t values ('一二三', '一二三', '一二三'); +select md5(compress(a)), md5(compress(b)), md5(compress(c)) from t; +set @@tidb_enable_vectorized_expression = true; +select md5(compress(a)), md5(compress(b)), md5(compress(c)) from t; +set @@tidb_enable_vectorized_expression = false; diff --git a/ddl/attributes_sql_test.go b/ddl/attributes_sql_test.go index 58511b4cb9fa3..81349ed98d47a 100644 --- a/ddl/attributes_sql_test.go +++ b/ddl/attributes_sql_test.go @@ -15,10 +15,14 @@ package ddl_test import ( + "context" "fmt" + "math" . "github.com/pingcap/check" + "github.com/pingcap/failpoint" "github.com/pingcap/tidb/session" + "github.com/pingcap/tidb/store/gcworker" "github.com/pingcap/tidb/store/mockstore" "github.com/pingcap/tidb/util/gcutil" "github.com/pingcap/tidb/util/testkit" @@ -333,6 +337,132 @@ PARTITION BY RANGE (c) ( c.Assert(rows2[1][3], Equals, rows[1][3]) } +func (s *testDBSuite8) TestDropTable(c *C) { + store, err := mockstore.NewMockStore() + c.Assert(err, IsNil) + dom, err := session.BootstrapSession(store) + c.Assert(err, IsNil) + defer func() { + dom.Close() + err := store.Close() + c.Assert(err, IsNil) + }() + tk := testkit.NewTestKit(c, store) + tk.MustExec("use test") + tk.MustExec(`create table t1 (c int) +PARTITION BY RANGE (c) ( + PARTITION p0 VALUES LESS THAN (6), + PARTITION p1 VALUES LESS THAN (11) +);`) + failpoint.Enable("github.com/pingcap/tidb/store/gcworker/ignoreDeleteRangeFailed", `return`) + defer func() { + failpoint.Disable("github.com/pingcap/tidb/store/gcworker/ignoreDeleteRangeFailed") + }() + + timeBeforeDrop, _, safePointSQL, resetGC := testkit.MockGC(tk) + defer resetGC() + + // Set GC safe point + tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop)) + // Set GC enable. + err = gcutil.EnableGC(tk.Se) + c.Assert(err, IsNil) + + gcWorker, err := gcworker.NewMockGCWorker(store) + c.Assert(err, IsNil) + + // add rules + _, err = tk.Exec(`alter table t1 attributes="key=value";`) + c.Assert(err, IsNil) + _, err = tk.Exec(`alter table t1 partition p0 attributes="key1=value1";`) + c.Assert(err, IsNil) + rows := tk.MustQuery(`select * from information_schema.attributes;`).Sort().Rows() + c.Assert(len(rows), Equals, 2) + // drop table + _, err = tk.Exec(`drop table t1;`) + c.Assert(err, IsNil) + + err = gcWorker.DeleteRanges(context.Background(), uint64(math.MaxInt64)) + c.Assert(err, IsNil) + rows = tk.MustQuery(`select * from information_schema.attributes;`).Sort().Rows() + c.Assert(len(rows), Equals, 0) +} + +func (s *testDBSuite8) TestCreateWithSameName(c *C) { + store, err := mockstore.NewMockStore() + c.Assert(err, IsNil) + dom, err := session.BootstrapSession(store) + c.Assert(err, IsNil) + defer func() { + dom.Close() + err := store.Close() + c.Assert(err, IsNil) + }() + tk := testkit.NewTestKit(c, store) + tk.MustExec("use test") + tk.MustExec(`create table t1 (c int) +PARTITION BY RANGE (c) ( + PARTITION p0 VALUES LESS THAN (6), + PARTITION p1 VALUES LESS THAN (11) +);`) + failpoint.Enable("github.com/pingcap/tidb/store/gcworker/ignoreDeleteRangeFailed", `return`) + defer func() { + failpoint.Disable("github.com/pingcap/tidb/store/gcworker/ignoreDeleteRangeFailed") + }() + + timeBeforeDrop, _, safePointSQL, resetGC := testkit.MockGC(tk) + defer resetGC() + + // Set GC safe point + tk.MustExec(fmt.Sprintf(safePointSQL, timeBeforeDrop)) + // Set GC enable. + err = gcutil.EnableGC(tk.Se) + c.Assert(err, IsNil) + + gcWorker, err := gcworker.NewMockGCWorker(store) + c.Assert(err, IsNil) + + // add rules + _, err = tk.Exec(`alter table t1 attributes="key=value";`) + c.Assert(err, IsNil) + _, err = tk.Exec(`alter table t1 partition p0 attributes="key1=value1";`) + c.Assert(err, IsNil) + rows := tk.MustQuery(`select * from information_schema.attributes;`).Sort().Rows() + c.Assert(len(rows), Equals, 2) + // drop table + _, err = tk.Exec(`drop table t1;`) + c.Assert(err, IsNil) + + rows = tk.MustQuery(`select * from information_schema.attributes;`).Sort().Rows() + c.Assert(len(rows), Equals, 2) + + tk.MustExec(`create table t1 (c int) + PARTITION BY RANGE (c) ( + PARTITION p0 VALUES LESS THAN (6), + PARTITION p1 VALUES LESS THAN (11) + );`) + // add rules + _, err = tk.Exec(`alter table t1 attributes="key=value";`) + c.Assert(err, IsNil) + _, err = tk.Exec(`alter table t1 partition p1 attributes="key1=value1";`) + c.Assert(err, IsNil) + rows = tk.MustQuery(`select * from information_schema.attributes;`).Sort().Rows() + c.Assert(len(rows), Equals, 3) + + err = gcWorker.DeleteRanges(context.Background(), uint64(math.MaxInt64)) + c.Assert(err, IsNil) + rows = tk.MustQuery(`select * from information_schema.attributes;`).Sort().Rows() + c.Assert(len(rows), Equals, 2) + + // drop table + _, err = tk.Exec(`drop table t1;`) + c.Assert(err, IsNil) + err = gcWorker.DeleteRanges(context.Background(), uint64(math.MaxInt64)) + c.Assert(err, IsNil) + rows = tk.MustQuery(`select * from information_schema.attributes;`).Sort().Rows() + c.Assert(len(rows), Equals, 0) +} + func (s *testDBSuite8) TestPartition(c *C) { store, err := mockstore.NewMockStore() c.Assert(err, IsNil) diff --git a/domain/sysvar_cache.go b/domain/sysvar_cache.go index a035889570f51..d89ba88a76ee0 100644 --- a/domain/sysvar_cache.go +++ b/domain/sysvar_cache.go @@ -168,23 +168,23 @@ func (do *Domain) checkEnableServerGlobalVar(name, sVal string) { var err error switch name { case variable.TiDBTSOClientBatchMaxWaitTime: - var val int64 - val, err = strconv.ParseInt(sVal, 10, 64) + var val float64 + val, err = strconv.ParseFloat(sVal, 64) if err != nil { break } - variable.MaxTSOBatchWaitInterval.Store(val) - err = do.SetPDClientDynamicOption(pd.MaxTSOBatchWaitInterval, time.Millisecond*time.Duration(val)) + err = do.SetPDClientDynamicOption(pd.MaxTSOBatchWaitInterval, time.Duration(float64(time.Millisecond)*val)) if err != nil { break } + variable.MaxTSOBatchWaitInterval.Store(val) case variable.TiDBEnableTSOFollowerProxy: val := variable.TiDBOptOn(sVal) - variable.EnableTSOFollowerProxy.Store(val) err = do.SetPDClientDynamicOption(pd.EnableTSOFollowerProxy, val) if err != nil { break } + variable.EnableTSOFollowerProxy.Store(val) case variable.TiDBEnableLocalTxn: variable.EnableLocalTxn.Store(variable.TiDBOptOn(sVal)) case variable.TiDBEnableStmtSummary: diff --git a/executor/admin_serial_test.go b/executor/admin_serial_test.go new file mode 100644 index 0000000000000..c9a60228ac7f8 --- /dev/null +++ b/executor/admin_serial_test.go @@ -0,0 +1,167 @@ +// Copyright 2021 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package executor_test + +import ( + "context" + "testing" + + "github.com/pingcap/tidb/executor" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/parser/model" + "github.com/pingcap/tidb/table/tables" + "github.com/pingcap/tidb/testkit" + "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/mock" + "github.com/stretchr/testify/require" +) + +func TestAdminCheckTableFailed(t *testing.T) { + store, domain, clean := testkit.CreateMockStoreAndDomain(t) + defer clean() + + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("drop table if exists admin_test") + tk.MustExec("create table admin_test (c1 int, c2 int, c3 varchar(255) default '1', primary key(c1), key(c3), unique key(c2), key(c2, c3))") + tk.MustExec("insert admin_test (c1, c2, c3) values (-10, -20, 'y'), (-1, -10, 'z'), (1, 11, 'a'), (2, 12, 'b'), (5, 15, 'c'), (10, 20, 'd'), (20, 30, 'e')") + + // Make some corrupted index. Build the index information. + ctx := mock.NewContext() + ctx.Store = store + is := domain.InfoSchema() + dbName := model.NewCIStr("test") + tblName := model.NewCIStr("admin_test") + tbl, err := is.TableByName(dbName, tblName) + require.NoError(t, err) + tblInfo := tbl.Meta() + idxInfo := tblInfo.Indices[1] + indexOpr := tables.NewIndex(tblInfo.ID, tblInfo, idxInfo) + sc := ctx.GetSessionVars().StmtCtx + tk.Session().GetSessionVars().IndexLookupSize = 3 + tk.Session().GetSessionVars().MaxChunkSize = 3 + + // Reduce one row of index. + // Table count > index count. + // Index c2 is missing 11. + txn, err := store.Begin() + require.NoError(t, err) + err = indexOpr.Delete(sc, txn, types.MakeDatums(-10), kv.IntHandle(-1)) + require.NoError(t, err) + err = txn.Commit(context.Background()) + require.NoError(t, err) + err = tk.ExecToErr("admin check table admin_test") + require.Error(t, err) + require.EqualError(t, err, "[executor:8003]admin_test err:[admin:8223]index: != record:&admin.RecordData{Handle:-1, Values:[]types.Datum{types.Datum{k:0x1, decimal:0x0, length:0x0, i:-10, collation:\"\", b:[]uint8(nil), x:interface {}(nil)}}}") + require.True(t, executor.ErrAdminCheckTable.Equal(err)) + tk.MustExec("set @@tidb_redact_log=1;") + err = tk.ExecToErr("admin check table admin_test") + require.Error(t, err) + require.EqualError(t, err, "[executor:8003]admin_test err:[admin:8223]index:\"?\" != record:\"?\"") + tk.MustExec("set @@tidb_redact_log=0;") + r := tk.MustQuery("admin recover index admin_test c2") + r.Check(testkit.Rows("1 7")) + tk.MustExec("admin check table admin_test") + + // Add one row of index. + // Table count < index count. + // Index c2 has one more values than table data: 0, and the handle 0 hasn't correlative record. + txn, err = store.Begin() + require.NoError(t, err) + _, err = indexOpr.Create(ctx, txn, types.MakeDatums(0), kv.IntHandle(0), nil) + require.NoError(t, err) + err = txn.Commit(context.Background()) + require.NoError(t, err) + err = tk.ExecToErr("admin check table admin_test") + require.Error(t, err) + require.EqualError(t, err, "[executor:8133]handle 0, index:types.Datum{k:0x1, decimal:0x0, length:0x0, i:0, collation:\"\", b:[]uint8(nil), x:interface {}(nil)} != record:") + tk.MustExec("set @@tidb_redact_log=1;") + err = tk.ExecToErr("admin check table admin_test") + require.Error(t, err) + require.EqualError(t, err, "[executor:8133]handle \"?\", index:\"?\" != record:\"?\"") + tk.MustExec("set @@tidb_redact_log=0;") + + // Add one row of index. + // Table count < index count. + // Index c2 has two more values than table data: 10, 13, and these handles have correlative record. + txn, err = store.Begin() + require.NoError(t, err) + err = indexOpr.Delete(sc, txn, types.MakeDatums(0), kv.IntHandle(0)) + require.NoError(t, err) + // Make sure the index value "19" is smaller "21". Then we scan to "19" before "21". + _, err = indexOpr.Create(ctx, txn, types.MakeDatums(19), kv.IntHandle(10), nil) + require.NoError(t, err) + _, err = indexOpr.Create(ctx, txn, types.MakeDatums(13), kv.IntHandle(2), nil) + require.NoError(t, err) + err = txn.Commit(context.Background()) + require.NoError(t, err) + err = tk.ExecToErr("admin check table admin_test") + require.Error(t, err) + require.EqualError(t, err, "[executor:8134]col c2, handle 2, index:types.Datum{k:0x1, decimal:0x0, length:0x0, i:13, collation:\"\", b:[]uint8(nil), x:interface {}(nil)} != record:types.Datum{k:0x1, decimal:0x0, length:0x0, i:12, collation:\"\", b:[]uint8(nil), x:interface {}(nil)}, compare err:") + tk.MustExec("set @@tidb_redact_log=1;") + err = tk.ExecToErr("admin check table admin_test") + require.Error(t, err) + require.EqualError(t, err, "[executor:8134]col c2, handle \"?\", index:\"?\" != record:\"?\", compare err:\"?\"") + tk.MustExec("set @@tidb_redact_log=0;") + + // Table count = index count. + // Two indices have the same handle. + txn, err = store.Begin() + require.NoError(t, err) + err = indexOpr.Delete(sc, txn, types.MakeDatums(13), kv.IntHandle(2)) + require.NoError(t, err) + err = indexOpr.Delete(sc, txn, types.MakeDatums(12), kv.IntHandle(2)) + require.NoError(t, err) + err = txn.Commit(context.Background()) + require.NoError(t, err) + err = tk.ExecToErr("admin check table admin_test") + require.Error(t, err) + require.EqualError(t, err, "[executor:8134]col c2, handle 10, index:types.Datum{k:0x1, decimal:0x0, length:0x0, i:19, collation:\"\", b:[]uint8(nil), x:interface {}(nil)} != record:types.Datum{k:0x1, decimal:0x0, length:0x0, i:20, collation:\"\", b:[]uint8(nil), x:interface {}(nil)}, compare err:") + tk.MustExec("set @@tidb_redact_log=1;") + err = tk.ExecToErr("admin check table admin_test") + require.Error(t, err) + require.EqualError(t, err, "[executor:8134]col c2, handle \"?\", index:\"?\" != record:\"?\", compare err:\"?\"") + tk.MustExec("set @@tidb_redact_log=0;") + + // Table count = index count. + // Index c2 has one line of data is 19, the corresponding table data is 20. + txn, err = store.Begin() + require.NoError(t, err) + _, err = indexOpr.Create(ctx, txn, types.MakeDatums(12), kv.IntHandle(2), nil) + require.NoError(t, err) + err = indexOpr.Delete(sc, txn, types.MakeDatums(20), kv.IntHandle(10)) + require.NoError(t, err) + err = txn.Commit(context.Background()) + require.NoError(t, err) + err = tk.ExecToErr("admin check table admin_test") + require.Error(t, err) + require.EqualError(t, err, "[executor:8134]col c2, handle 10, index:types.Datum{k:0x1, decimal:0x0, length:0x0, i:19, collation:\"\", b:[]uint8(nil), x:interface {}(nil)} != record:types.Datum{k:0x1, decimal:0x0, length:0x0, i:20, collation:\"\", b:[]uint8(nil), x:interface {}(nil)}, compare err:") + tk.MustExec("set @@tidb_redact_log=1;") + err = tk.ExecToErr("admin check table admin_test") + require.Error(t, err) + require.EqualError(t, err, "[executor:8134]col c2, handle \"?\", index:\"?\" != record:\"?\", compare err:\"?\"") + tk.MustExec("set @@tidb_redact_log=0;") + + // Recover records. + txn, err = store.Begin() + require.NoError(t, err) + err = indexOpr.Delete(sc, txn, types.MakeDatums(19), kv.IntHandle(10)) + require.NoError(t, err) + _, err = indexOpr.Create(ctx, txn, types.MakeDatums(20), kv.IntHandle(10), nil) + require.NoError(t, err) + err = txn.Commit(context.Background()) + require.NoError(t, err) + tk.MustExec("admin check table admin_test") +} diff --git a/executor/admin_test.go b/executor/admin_test.go index 8707332688ad2..e984177ab4c52 100644 --- a/executor/admin_test.go +++ b/executor/admin_test.go @@ -1,4 +1,4 @@ -// Copyright 2018 PingCAP, Inc. +// Copyright 2021 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,9 +17,9 @@ package executor_test import ( "context" "fmt" + "testing" "time" - . "github.com/pingcap/check" "github.com/pingcap/errors" mysql "github.com/pingcap/tidb/errno" "github.com/pingcap/tidb/executor" @@ -29,14 +29,19 @@ import ( "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/table/tables" + "github.com/pingcap/tidb/testkit" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/mock" - "github.com/pingcap/tidb/util/testkit" - "github.com/pingcap/tidb/util/testutil" + "github.com/stretchr/testify/require" ) -func (s *testSuite1) TestAdminCheckIndexRange(c *C) { - tk := testkit.NewTestKit(c, s.store) +func TestAdminCheckIndexRange(t *testing.T) { + t.Parallel() + + store, clean := testkit.CreateMockStore(t) + defer clean() + + tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec(`drop table if exists check_index_test;`) tk.MustExec(`create table check_index_test (a int, b varchar(10), index a_b (a, b), index b (b))`) @@ -52,8 +57,13 @@ func (s *testSuite1) TestAdminCheckIndexRange(c *C) { result.Check(testkit.Rows("-1 hi 4", "2 cd 2")) } -func (s *testSuite5) TestAdminCheckIndex(c *C) { - tk := testkit.NewTestKit(c, s.store) +func TestAdminCheckIndex(t *testing.T) { + t.Parallel() + + store, clean := testkit.CreateMockStore(t) + defer clean() + + tk := testkit.NewTestKit(t, store) tk.MustExec("use test") check := func() { tk.MustExec("insert admin_test (c1, c2) values (1, 1), (2, 2), (5, 5), (10, 10), (11, 11), (NULL, NULL)") @@ -78,16 +88,21 @@ func (s *testSuite5) TestAdminCheckIndex(c *C) { check() } -func (s *testSuite5) TestAdminCheckIndexInTemporaryMode(c *C) { - tk := testkit.NewTestKit(c, s.store) +func TestAdminCheckIndexInTemporaryMode(t *testing.T) { + t.Parallel() + + store, clean := testkit.CreateMockStore(t) + defer clean() + + tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("drop table if exists temporary_admin_test;") tk.MustExec("create global temporary table temporary_admin_test (c1 int, c2 int, c3 int default 1, primary key (c1), index (c1), unique key(c2)) ON COMMIT DELETE ROWS;") tk.MustExec("insert temporary_admin_test (c1, c2) values (1, 1), (2, 2), (3, 3);") _, err := tk.Exec("admin check table temporary_admin_test;") - c.Assert(err.Error(), Equals, core.ErrOptOnTemporaryTable.GenWithStackByArgs("admin check table").Error()) + require.EqualError(t, err, core.ErrOptOnTemporaryTable.GenWithStackByArgs("admin check table").Error()) _, err = tk.Exec("admin check index temporary_admin_test c1;") - c.Assert(err.Error(), Equals, core.ErrOptOnTemporaryTable.GenWithStackByArgs("admin check index").Error()) + require.EqualError(t, err, core.ErrOptOnTemporaryTable.GenWithStackByArgs("admin check index").Error()) tk.MustExec("drop table if exists temporary_admin_test;") tk.MustExec("drop table if exists non_temporary_admin_test;") @@ -101,20 +116,25 @@ func (s *testSuite5) TestAdminCheckIndexInTemporaryMode(c *C) { tk.MustExec("create global temporary table temporary_admin_checksum_table_with_index_test (id int, count int, PRIMARY KEY(id), KEY(count)) ON COMMIT DELETE ROWS;") tk.MustExec("create global temporary table temporary_admin_checksum_table_without_index_test (id int, count int, PRIMARY KEY(id)) ON COMMIT DELETE ROWS;") _, err = tk.Exec("admin checksum table temporary_admin_checksum_table_with_index_test;") - c.Assert(err.Error(), Equals, core.ErrOptOnTemporaryTable.GenWithStackByArgs("admin checksum table").Error()) + require.EqualError(t, err, core.ErrOptOnTemporaryTable.GenWithStackByArgs("admin checksum table").Error()) _, err = tk.Exec("admin checksum table temporary_admin_checksum_table_without_index_test;") - c.Assert(err.Error(), Equals, core.ErrOptOnTemporaryTable.GenWithStackByArgs("admin checksum table").Error()) + require.EqualError(t, err, core.ErrOptOnTemporaryTable.GenWithStackByArgs("admin checksum table").Error()) tk.MustExec("drop table if exists temporary_admin_checksum_table_with_index_test,temporary_admin_checksum_table_without_index_test;") } -func (s *testSuite5) TestAdminCheckIndexInLocalTemporaryMode(c *C) { - tk := testkit.NewTestKit(c, s.store) +func TestAdminCheckIndexInLocalTemporaryMode(t *testing.T) { + t.Parallel() + + store, clean := testkit.CreateMockStore(t) + defer clean() + + tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("drop table if exists local_temporary_admin_test;") tk.MustExec("create temporary table local_temporary_admin_test (c1 int, c2 int, c3 int default 1, primary key (c1), index (c1), unique key(c2))") tk.MustExec("insert local_temporary_admin_test (c1, c2) values (1,1), (2,2), (3,3);") _, err := tk.Exec("admin check table local_temporary_admin_test;") - c.Assert(err.Error(), Equals, core.ErrOptOnTemporaryTable.GenWithStackByArgs("admin check table").Error()) + require.EqualError(t, err, core.ErrOptOnTemporaryTable.GenWithStackByArgs("admin check table").Error()) tk.MustExec("drop table if exists temporary_admin_test;") tk.MustExec("drop table if exists local_temporary_admin_checksum_table_with_index_test;") @@ -122,14 +142,19 @@ func (s *testSuite5) TestAdminCheckIndexInLocalTemporaryMode(c *C) { tk.MustExec("create temporary table local_temporary_admin_checksum_table_with_index_test (id int, count int, PRIMARY KEY(id), KEY(count))") tk.MustExec("create temporary table local_temporary_admin_checksum_table_without_index_test (id int, count int, PRIMARY KEY(id))") _, err = tk.Exec("admin checksum table local_temporary_admin_checksum_table_with_index_test;") - c.Assert(err.Error(), Equals, core.ErrOptOnTemporaryTable.GenWithStackByArgs("admin checksum table").Error()) + require.EqualError(t, err, core.ErrOptOnTemporaryTable.GenWithStackByArgs("admin checksum table").Error()) _, err = tk.Exec("admin checksum table local_temporary_admin_checksum_table_without_index_test;") - c.Assert(err.Error(), Equals, core.ErrOptOnTemporaryTable.GenWithStackByArgs("admin checksum table").Error()) + require.EqualError(t, err, core.ErrOptOnTemporaryTable.GenWithStackByArgs("admin checksum table").Error()) tk.MustExec("drop table if exists local_temporary_admin_checksum_table_with_index_test,local_temporary_admin_checksum_table_without_index_test;") } -func (s *testSuite5) TestAdminCheckIndexInCacheTable(c *C) { - tk := testkit.NewTestKit(c, s.store) +func TestAdminCheckIndexInCacheTable(t *testing.T) { + t.Parallel() + + store, clean := testkit.CreateMockStore(t) + defer clean() + + tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("drop table if exists cache_admin_test;") tk.MustExec("create table cache_admin_test (c1 int, c2 int, c3 int default 1, index (c1), unique key(c2))") @@ -160,9 +185,13 @@ func (s *testSuite5) TestAdminCheckIndexInCacheTable(c *C) { tk.MustExec("admin checksum table cache_admin_table_without_index_test;") tk.MustExec("drop table if exists cache_admin_table_with_index_test,cache_admin_table_without_index_test;") } +func TestAdminRecoverIndex(t *testing.T) { + t.Parallel() + + store, domain, clean := testkit.CreateMockStoreAndDomain(t) + defer clean() -func (s *testSuite5) TestAdminRecoverIndex(c *C) { - tk := testkit.NewTestKit(c, s.store) + tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("drop table if exists admin_test") tk.MustExec("create table admin_test (c1 int, c2 int, c3 int default 1, index (c1), unique key(c2))") @@ -183,36 +212,36 @@ func (s *testSuite5) TestAdminRecoverIndex(c *C) { // pk is handle, no additional unique index, no way to recover _, err := tk.Exec("admin recover index admin_test c1") // err:index is not found - c.Assert(err, NotNil) + require.Error(t, err) r = tk.MustQuery("admin recover index admin_test c2") r.Check(testkit.Rows("0 5")) tk.MustExec("admin check index admin_test c2") // Make some corrupted index. - s.ctx = mock.NewContext() - s.ctx.Store = s.store - is := s.domain.InfoSchema() + ctx := mock.NewContext() + ctx.Store = store + is := domain.InfoSchema() dbName := model.NewCIStr("test") tblName := model.NewCIStr("admin_test") tbl, err := is.TableByName(dbName, tblName) - c.Assert(err, IsNil) + require.NoError(t, err) tblInfo := tbl.Meta() idxInfo := tblInfo.FindIndexByName("c2") indexOpr := tables.NewIndex(tblInfo.ID, tblInfo, idxInfo) - sc := s.ctx.GetSessionVars().StmtCtx - txn, err := s.store.Begin() - c.Assert(err, IsNil) + sc := ctx.GetSessionVars().StmtCtx + txn, err := store.Begin() + require.NoError(t, err) err = indexOpr.Delete(sc, txn, types.MakeDatums(1), kv.IntHandle(1)) - c.Assert(err, IsNil) + require.NoError(t, err) err = txn.Commit(context.Background()) - c.Assert(err, IsNil) + require.NoError(t, err) err = tk.ExecToErr("admin check table admin_test") - c.Assert(err, NotNil) - c.Assert(executor.ErrAdminCheckTable.Equal(err), IsTrue) + require.Error(t, err) + require.True(t, executor.ErrAdminCheckTable.Equal(err)) err = tk.ExecToErr("admin check index admin_test c2") - c.Assert(err, NotNil) + require.Error(t, err) r = tk.MustQuery("SELECT COUNT(*) FROM admin_test USE INDEX(c2)") r.Check(testkit.Rows("4")) @@ -225,39 +254,39 @@ func (s *testSuite5) TestAdminRecoverIndex(c *C) { tk.MustExec("admin check index admin_test c2") tk.MustExec("admin check table admin_test") - txn, err = s.store.Begin() - c.Assert(err, IsNil) + txn, err = store.Begin() + require.NoError(t, err) err = indexOpr.Delete(sc, txn, types.MakeDatums(10), kv.IntHandle(10)) - c.Assert(err, IsNil) + require.NoError(t, err) err = txn.Commit(context.Background()) - c.Assert(err, IsNil) + require.NoError(t, err) err = tk.ExecToErr("admin check index admin_test c2") - c.Assert(err, NotNil) + require.Error(t, err) r = tk.MustQuery("admin recover index admin_test c2") r.Check(testkit.Rows("1 5")) tk.MustExec("admin check index admin_test c2") tk.MustExec("admin check table admin_test") - txn, err = s.store.Begin() - c.Assert(err, IsNil) + txn, err = store.Begin() + require.NoError(t, err) err = indexOpr.Delete(sc, txn, types.MakeDatums(1), kv.IntHandle(1)) - c.Assert(err, IsNil) + require.NoError(t, err) err = indexOpr.Delete(sc, txn, types.MakeDatums(2), kv.IntHandle(2)) - c.Assert(err, IsNil) + require.NoError(t, err) err = indexOpr.Delete(sc, txn, types.MakeDatums(3), kv.IntHandle(3)) - c.Assert(err, IsNil) + require.NoError(t, err) err = indexOpr.Delete(sc, txn, types.MakeDatums(10), kv.IntHandle(10)) - c.Assert(err, IsNil) + require.NoError(t, err) err = indexOpr.Delete(sc, txn, types.MakeDatums(20), kv.IntHandle(20)) - c.Assert(err, IsNil) + require.NoError(t, err) err = txn.Commit(context.Background()) - c.Assert(err, IsNil) + require.NoError(t, err) err = tk.ExecToErr("admin check table admin_test") - c.Assert(err, NotNil) + require.Error(t, err) err = tk.ExecToErr("admin check index admin_test c2") - c.Assert(err, NotNil) + require.Error(t, err) r = tk.MustQuery("SELECT COUNT(*) FROM admin_test USE INDEX(c2)") r.Check(testkit.Rows("0")) @@ -275,12 +304,17 @@ func (s *testSuite5) TestAdminRecoverIndex(c *C) { tk.MustExec("admin check table admin_test") } -func (s *testSuite5) TestClusteredIndexAdminRecoverIndex(c *C) { - tk := testkit.NewTestKit(c, s.store) +func TestClusteredIndexAdminRecoverIndex(t *testing.T) { + t.Parallel() + + store, domain, clean := testkit.CreateMockStoreAndDomain(t) + defer clean() + + tk := testkit.NewTestKit(t, store) tk.MustExec("drop database if exists test_cluster_index_admin_recover;") tk.MustExec("create database test_cluster_index_admin_recover;") tk.MustExec("use test_cluster_index_admin_recover;") - tk.Se.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn + tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn dbName := model.NewCIStr("test_cluster_index_admin_recover") tblName := model.NewCIStr("t") @@ -291,24 +325,24 @@ func (s *testSuite5) TestClusteredIndexAdminRecoverIndex(c *C) { tk.MustQuery("admin recover index t `idx`;").Check(testkit.Rows("0 3")) tk.MustExec("admin check table t;") - s.ctx = mock.NewContext() - s.ctx.Store = s.store - is := s.domain.InfoSchema() + ctx := mock.NewContext() + ctx.Store = store + is := domain.InfoSchema() tbl, err := is.TableByName(dbName, tblName) - c.Assert(err, IsNil) + require.NoError(t, err) tblInfo := tbl.Meta() idxInfo := tblInfo.FindIndexByName("idx") indexOpr := tables.NewIndex(tblInfo.ID, tblInfo, idxInfo) - sc := s.ctx.GetSessionVars().StmtCtx + sc := ctx.GetSessionVars().StmtCtx // Some index entries are missed. - txn, err := s.store.Begin() - c.Assert(err, IsNil) - cHandle := testutil.MustNewCommonHandle(c, "1", "3") + txn, err := store.Begin() + require.NoError(t, err) + cHandle := testkit.MustNewCommonHandle(t, "1", "3") err = indexOpr.Delete(sc, txn, types.MakeDatums(2), cHandle) - c.Assert(err, IsNil) + require.NoError(t, err) err = txn.Commit(context.Background()) - c.Assert(err, IsNil) + require.NoError(t, err) tk.MustGetErrCode("admin check table t", mysql.ErrAdminCheckTable) tk.MustGetErrCode("admin check index t idx", mysql.ErrAdminCheckTable) @@ -318,33 +352,39 @@ func (s *testSuite5) TestClusteredIndexAdminRecoverIndex(c *C) { tk.MustExec("admin check table t;") } -func (s *testSuite5) TestAdminRecoverPartitionTableIndex(c *C) { - tk := testkit.NewTestKit(c, s.store) +func TestAdminRecoverPartitionTableIndex(t *testing.T) { + t.Parallel() + + store, domain, clean := testkit.CreateMockStoreAndDomain(t) + defer clean() + + tk := testkit.NewTestKit(t, store) tk.MustExec("use test") getTable := func() table.Table { - s.ctx = mock.NewContext() - s.ctx.Store = s.store - is := s.domain.InfoSchema() + ctx := mock.NewContext() + ctx.Store = store + is := domain.InfoSchema() dbName := model.NewCIStr("test") tblName := model.NewCIStr("admin_test") tbl, err := is.TableByName(dbName, tblName) - c.Assert(err, IsNil) + require.NoError(t, err) return tbl } checkFunc := func(tbl table.Table, pid int64, idxValue int) { idxInfo := tbl.Meta().FindIndexByName("c2") indexOpr := tables.NewIndex(pid, tbl.Meta(), idxInfo) - sc := s.ctx.GetSessionVars().StmtCtx - txn, err := s.store.Begin() - c.Assert(err, IsNil) + ctx := mock.NewContext() + sc := ctx.GetSessionVars().StmtCtx + txn, err := store.Begin() + require.NoError(t, err) err = indexOpr.Delete(sc, txn, types.MakeDatums(idxValue), kv.IntHandle(idxValue)) - c.Assert(err, IsNil) + require.NoError(t, err) err = txn.Commit(context.Background()) - c.Assert(err, IsNil) + require.NoError(t, err) err = tk.ExecToErr("admin check table admin_test") - c.Assert(err, NotNil) - c.Assert(executor.ErrAdminCheckTable.Equal(err), IsTrue) + require.Error(t, err) + require.True(t, executor.ErrAdminCheckTable.Equal(err)) r := tk.MustQuery("SELECT COUNT(*) FROM admin_test USE INDEX(c2)") r.Check(testkit.Rows("2")) @@ -365,7 +405,7 @@ func (s *testSuite5) TestAdminRecoverPartitionTableIndex(c *C) { r.Check(testkit.Rows("0 3")) tbl := getTable() pi := tbl.Meta().GetPartitionInfo() - c.Assert(pi, NotNil) + require.NotNil(t, pi) for i, p := range pi.Definitions { checkFunc(tbl, p.ID, i) } @@ -381,49 +421,54 @@ func (s *testSuite5) TestAdminRecoverPartitionTableIndex(c *C) { r.Check(testkit.Rows("0 3")) tbl = getTable() pi = tbl.Meta().GetPartitionInfo() - c.Assert(pi, NotNil) + require.NotNil(t, pi) for i, p := range pi.Definitions { checkFunc(tbl, p.ID, i*6) } } -func (s *testSuite5) TestAdminRecoverIndex1(c *C) { - tk := testkit.NewTestKit(c, s.store) - s.ctx = mock.NewContext() - s.ctx.Store = s.store +func TestAdminRecoverIndex1(t *testing.T) { + t.Parallel() + + store, domain, clean := testkit.CreateMockStoreAndDomain(t) + defer clean() + + tk := testkit.NewTestKit(t, store) + ctx := mock.NewContext() + ctx.Store = store dbName := model.NewCIStr("test") tblName := model.NewCIStr("admin_test") - sc := s.ctx.GetSessionVars().StmtCtx + sc := ctx.GetSessionVars().StmtCtx tk.MustExec("use test") tk.MustExec("drop table if exists admin_test") - tk.Se.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeIntOnly + tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeIntOnly tk.MustExec("create table admin_test (c1 varchar(255), c2 int, c3 int default 1, primary key(c1), unique key(c2))") tk.MustExec("insert admin_test (c1, c2) values ('1', 1), ('2', 2), ('3', 3), ('10', 10), ('20', 20)") r := tk.MustQuery("SELECT COUNT(*) FROM admin_test USE INDEX(`primary`)") r.Check(testkit.Rows("5")) - is := s.domain.InfoSchema() + is := domain.InfoSchema() tbl, err := is.TableByName(dbName, tblName) - c.Assert(err, IsNil) + require.NoError(t, err) tblInfo := tbl.Meta() idxInfo := tblInfo.FindIndexByName("primary") - c.Assert(idxInfo, NotNil) + require.NotNil(t, idxInfo) indexOpr := tables.NewIndex(tblInfo.ID, tblInfo, idxInfo) - txn, err := s.store.Begin() - c.Assert(err, IsNil) + txn, err := store.Begin() + require.NoError(t, err) err = indexOpr.Delete(sc, txn, types.MakeDatums("1"), kv.IntHandle(1)) - c.Assert(err, IsNil) + require.NoError(t, err) err = indexOpr.Delete(sc, txn, types.MakeDatums("2"), kv.IntHandle(2)) - c.Assert(err, IsNil) + require.NoError(t, err) err = indexOpr.Delete(sc, txn, types.MakeDatums("3"), kv.IntHandle(3)) - c.Assert(err, IsNil) + require.NoError(t, err) err = indexOpr.Delete(sc, txn, types.MakeDatums("10"), kv.IntHandle(4)) - c.Assert(err, IsNil) + require.NoError(t, err) err = txn.Commit(context.Background()) - c.Assert(err, IsNil) + require.NoError(t, err) r = tk.MustQuery("SELECT COUNT(*) FROM admin_test USE INDEX(`primary`)") r.Check(testkit.Rows("1")) @@ -439,8 +484,13 @@ func (s *testSuite5) TestAdminRecoverIndex1(c *C) { tk.MustExec("admin check index admin_test `primary`") } -func (s *testSuite5) TestAdminCleanupIndex(c *C) { - tk := testkit.NewTestKit(c, s.store) +func TestAdminCleanupIndex(t *testing.T) { + t.Parallel() + + store, domain, clean := testkit.CreateMockStoreAndDomain(t) + defer clean() + + tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("drop table if exists admin_test") tk.MustExec("create table admin_test (c1 int, c2 int, c3 int default 1, primary key (c1), unique key(c2), key (c3))") @@ -449,20 +499,20 @@ func (s *testSuite5) TestAdminCleanupIndex(c *C) { // pk is handle, no need to cleanup _, err := tk.Exec("admin cleanup index admin_test `primary`") - c.Assert(err, NotNil) + require.Error(t, err) r := tk.MustQuery("admin cleanup index admin_test c2") r.Check(testkit.Rows("0")) r = tk.MustQuery("admin cleanup index admin_test c3") r.Check(testkit.Rows("0")) // Make some dangling index. - s.ctx = mock.NewContext() - s.ctx.Store = s.store - is := s.domain.InfoSchema() + ctx := mock.NewContext() + ctx.Store = store + is := domain.InfoSchema() dbName := model.NewCIStr("test") tblName := model.NewCIStr("admin_test") tbl, err := is.TableByName(dbName, tblName) - c.Assert(err, IsNil) + require.NoError(t, err) tblInfo := tbl.Meta() idxInfo2 := tblInfo.FindIndexByName("c2") @@ -470,31 +520,31 @@ func (s *testSuite5) TestAdminCleanupIndex(c *C) { idxInfo3 := tblInfo.FindIndexByName("c3") indexOpr3 := tables.NewIndex(tblInfo.ID, tblInfo, idxInfo3) - txn, err := s.store.Begin() - c.Assert(err, IsNil) - _, err = indexOpr2.Create(s.ctx, txn, types.MakeDatums(1), kv.IntHandle(-100), nil) - c.Assert(err, IsNil) - _, err = indexOpr2.Create(s.ctx, txn, types.MakeDatums(6), kv.IntHandle(100), nil) - c.Assert(err, IsNil) - _, err = indexOpr2.Create(s.ctx, txn, types.MakeDatums(8), kv.IntHandle(100), nil) - c.Assert(err, IsNil) - _, err = indexOpr2.Create(s.ctx, txn, types.MakeDatums(nil), kv.IntHandle(101), nil) - c.Assert(err, IsNil) - _, err = indexOpr2.Create(s.ctx, txn, types.MakeDatums(nil), kv.IntHandle(102), nil) - c.Assert(err, IsNil) - _, err = indexOpr3.Create(s.ctx, txn, types.MakeDatums(6), kv.IntHandle(200), nil) - c.Assert(err, IsNil) - _, err = indexOpr3.Create(s.ctx, txn, types.MakeDatums(6), kv.IntHandle(-200), nil) - c.Assert(err, IsNil) - _, err = indexOpr3.Create(s.ctx, txn, types.MakeDatums(8), kv.IntHandle(-200), nil) - c.Assert(err, IsNil) + txn, err := store.Begin() + require.NoError(t, err) + _, err = indexOpr2.Create(ctx, txn, types.MakeDatums(1), kv.IntHandle(-100), nil) + require.NoError(t, err) + _, err = indexOpr2.Create(ctx, txn, types.MakeDatums(6), kv.IntHandle(100), nil) + require.NoError(t, err) + _, err = indexOpr2.Create(ctx, txn, types.MakeDatums(8), kv.IntHandle(100), nil) + require.NoError(t, err) + _, err = indexOpr2.Create(ctx, txn, types.MakeDatums(nil), kv.IntHandle(101), nil) + require.NoError(t, err) + _, err = indexOpr2.Create(ctx, txn, types.MakeDatums(nil), kv.IntHandle(102), nil) + require.NoError(t, err) + _, err = indexOpr3.Create(ctx, txn, types.MakeDatums(6), kv.IntHandle(200), nil) + require.NoError(t, err) + _, err = indexOpr3.Create(ctx, txn, types.MakeDatums(6), kv.IntHandle(-200), nil) + require.NoError(t, err) + _, err = indexOpr3.Create(ctx, txn, types.MakeDatums(8), kv.IntHandle(-200), nil) + require.NoError(t, err) err = txn.Commit(context.Background()) - c.Assert(err, IsNil) + require.NoError(t, err) err = tk.ExecToErr("admin check table admin_test") - c.Assert(err, NotNil) + require.Error(t, err) err = tk.ExecToErr("admin check index admin_test c2") - c.Assert(err, NotNil) + require.Error(t, err) r = tk.MustQuery("SELECT COUNT(*) FROM admin_test USE INDEX(c2)") r.Check(testkit.Rows("11")) r = tk.MustQuery("admin cleanup index admin_test c2") @@ -504,9 +554,9 @@ func (s *testSuite5) TestAdminCleanupIndex(c *C) { tk.MustExec("admin check index admin_test c2") err = tk.ExecToErr("admin check table admin_test") - c.Assert(err, NotNil) + require.Error(t, err) err = tk.ExecToErr("admin check index admin_test c3") - c.Assert(err, NotNil) + require.Error(t, err) r = tk.MustQuery("SELECT COUNT(*) FROM admin_test USE INDEX(c3)") r.Check(testkit.Rows("9")) r = tk.MustQuery("admin cleanup index admin_test c3") @@ -518,18 +568,23 @@ func (s *testSuite5) TestAdminCleanupIndex(c *C) { tk.MustExec("admin check table admin_test") } -func (s *testSuite5) TestAdminCleanupIndexForPartitionTable(c *C) { - tk := testkit.NewTestKit(c, s.store) +func TestAdminCleanupIndexForPartitionTable(t *testing.T) { + t.Parallel() + + store, domain, clean := testkit.CreateMockStoreAndDomain(t) + defer clean() + + tk := testkit.NewTestKit(t, store) tk.MustExec("use test") getTable := func() table.Table { - s.ctx = mock.NewContext() - s.ctx.Store = s.store - is := s.domain.InfoSchema() + ctx := mock.NewContext() + ctx.Store = store + is := domain.InfoSchema() dbName := model.NewCIStr("test") tblName := model.NewCIStr("admin_test") tbl, err := is.TableByName(dbName, tblName) - c.Assert(err, IsNil) + require.NoError(t, err) return tbl } @@ -539,17 +594,18 @@ func (s *testSuite5) TestAdminCleanupIndexForPartitionTable(c *C) { idxInfo3 := tbl.Meta().FindIndexByName("c3") indexOpr3 := tables.NewIndex(pid, tbl.Meta(), idxInfo3) - txn, err := s.store.Begin() - c.Assert(err, IsNil) - _, err = indexOpr2.Create(s.ctx, txn, types.MakeDatums(idxValue), kv.IntHandle(handle), nil) - c.Assert(err, IsNil) - _, err = indexOpr3.Create(s.ctx, txn, types.MakeDatums(idxValue), kv.IntHandle(handle), nil) - c.Assert(err, IsNil) + txn, err := store.Begin() + ctx := mock.NewContext() + require.NoError(t, err) + _, err = indexOpr2.Create(ctx, txn, types.MakeDatums(idxValue), kv.IntHandle(handle), nil) + require.NoError(t, err) + _, err = indexOpr3.Create(ctx, txn, types.MakeDatums(idxValue), kv.IntHandle(handle), nil) + require.NoError(t, err) err = txn.Commit(context.Background()) - c.Assert(err, IsNil) + require.NoError(t, err) err = tk.ExecToErr("admin check table admin_test") - c.Assert(err, NotNil) + require.Error(t, err) r := tk.MustQuery("SELECT COUNT(*) FROM admin_test USE INDEX(c2)") r.Check(testkit.Rows("4")) @@ -575,7 +631,7 @@ func (s *testSuite5) TestAdminCleanupIndexForPartitionTable(c *C) { r.Check(testkit.Rows("0")) tbl := getTable() pi := tbl.Meta().GetPartitionInfo() - c.Assert(pi, NotNil) + require.NotNil(t, pi) for i, p := range pi.Definitions { checkFunc(tbl, p.ID, i+6, i+6) } @@ -591,17 +647,22 @@ func (s *testSuite5) TestAdminCleanupIndexForPartitionTable(c *C) { r.Check(testkit.Rows("0")) tbl = getTable() pi = tbl.Meta().GetPartitionInfo() - c.Assert(pi, NotNil) + require.NotNil(t, pi) for i, p := range pi.Definitions { checkFunc(tbl, p.ID, i*6+1, i*6+1) } } -func (s *testSuite5) TestAdminCleanupIndexPKNotHandle(c *C) { - tk := testkit.NewTestKit(c, s.store) +func TestAdminCleanupIndexPKNotHandle(t *testing.T) { + t.Parallel() + + store, domain, clean := testkit.CreateMockStoreAndDomain(t) + defer clean() + + tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("drop table if exists admin_test") - tk.Se.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeIntOnly + tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeIntOnly tk.MustExec("create table admin_test (c1 int, c2 int, c3 int, primary key (c1, c2))") tk.MustExec("insert admin_test (c1, c2) values (1, 2), (3, 4), (-5, 5)") @@ -609,33 +670,33 @@ func (s *testSuite5) TestAdminCleanupIndexPKNotHandle(c *C) { r.Check(testkit.Rows("0")) // Make some dangling index. - s.ctx = mock.NewContext() - s.ctx.Store = s.store - is := s.domain.InfoSchema() + ctx := mock.NewContext() + ctx.Store = store + is := domain.InfoSchema() dbName := model.NewCIStr("test") tblName := model.NewCIStr("admin_test") tbl, err := is.TableByName(dbName, tblName) - c.Assert(err, IsNil) + require.NoError(t, err) tblInfo := tbl.Meta() idxInfo := tblInfo.FindIndexByName("primary") indexOpr := tables.NewIndex(tblInfo.ID, tblInfo, idxInfo) - txn, err := s.store.Begin() - c.Assert(err, IsNil) - _, err = indexOpr.Create(s.ctx, txn, types.MakeDatums(7, 10), kv.IntHandle(-100), nil) - c.Assert(err, IsNil) - _, err = indexOpr.Create(s.ctx, txn, types.MakeDatums(4, 6), kv.IntHandle(100), nil) - c.Assert(err, IsNil) - _, err = indexOpr.Create(s.ctx, txn, types.MakeDatums(-7, 4), kv.IntHandle(101), nil) - c.Assert(err, IsNil) + txn, err := store.Begin() + require.NoError(t, err) + _, err = indexOpr.Create(ctx, txn, types.MakeDatums(7, 10), kv.IntHandle(-100), nil) + require.NoError(t, err) + _, err = indexOpr.Create(ctx, txn, types.MakeDatums(4, 6), kv.IntHandle(100), nil) + require.NoError(t, err) + _, err = indexOpr.Create(ctx, txn, types.MakeDatums(-7, 4), kv.IntHandle(101), nil) + require.NoError(t, err) err = txn.Commit(context.Background()) - c.Assert(err, IsNil) + require.NoError(t, err) err = tk.ExecToErr("admin check table admin_test") - c.Assert(err, NotNil) + require.Error(t, err) err = tk.ExecToErr("admin check index admin_test `primary`") - c.Assert(err, NotNil) + require.Error(t, err) r = tk.MustQuery("SELECT COUNT(*) FROM admin_test USE INDEX(`primary`)") r.Check(testkit.Rows("6")) r = tk.MustQuery("admin cleanup index admin_test `primary`") @@ -646,8 +707,13 @@ func (s *testSuite5) TestAdminCleanupIndexPKNotHandle(c *C) { tk.MustExec("admin check table admin_test") } -func (s *testSuite5) TestAdminCleanupIndexMore(c *C) { - tk := testkit.NewTestKit(c, s.store) +func TestAdminCleanupIndexMore(t *testing.T) { + t.Parallel() + + store, domain, clean := testkit.CreateMockStoreAndDomain(t) + defer clean() + + tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("drop table if exists admin_test") tk.MustExec("create table admin_test (c1 int, c2 int, unique key (c1, c2), key (c2))") @@ -657,13 +723,13 @@ func (s *testSuite5) TestAdminCleanupIndexMore(c *C) { tk.MustExec("admin cleanup index admin_test c2") // Make some dangling index. - s.ctx = mock.NewContext() - s.ctx.Store = s.store - is := s.domain.InfoSchema() + ctx := mock.NewContext() + ctx.Store = store + is := domain.InfoSchema() dbName := model.NewCIStr("test") tblName := model.NewCIStr("admin_test") tbl, err := is.TableByName(dbName, tblName) - c.Assert(err, IsNil) + require.NoError(t, err) tblInfo := tbl.Meta() idxInfo1 := tblInfo.FindIndexByName("c1") @@ -671,25 +737,25 @@ func (s *testSuite5) TestAdminCleanupIndexMore(c *C) { idxInfo2 := tblInfo.FindIndexByName("c2") indexOpr2 := tables.NewIndex(tblInfo.ID, tblInfo, idxInfo2) - txn, err := s.store.Begin() - c.Assert(err, IsNil) + txn, err := store.Begin() + require.NoError(t, err) for i := 0; i < 2000; i++ { c1 := int64(2*i + 7) c2 := int64(2*i + 8) - _, err = indexOpr1.Create(s.ctx, txn, types.MakeDatums(c1, c2), kv.IntHandle(c1), nil) - c.Assert(err, IsNil, Commentf(errors.ErrorStack(err))) - _, err = indexOpr2.Create(s.ctx, txn, types.MakeDatums(c2), kv.IntHandle(c1), nil) - c.Assert(err, IsNil) + _, err = indexOpr1.Create(ctx, txn, types.MakeDatums(c1, c2), kv.IntHandle(c1), nil) + require.NoErrorf(t, err, errors.ErrorStack(err)) + _, err = indexOpr2.Create(ctx, txn, types.MakeDatums(c2), kv.IntHandle(c1), nil) + require.NoError(t, err) } err = txn.Commit(context.Background()) - c.Assert(err, IsNil) + require.NoError(t, err) err = tk.ExecToErr("admin check table admin_test") - c.Assert(err, NotNil) + require.Error(t, err) err = tk.ExecToErr("admin check index admin_test c1") - c.Assert(err, NotNil) + require.Error(t, err) err = tk.ExecToErr("admin check index admin_test c2") - c.Assert(err, NotNil) + require.Error(t, err) r := tk.MustQuery("SELECT COUNT(*) FROM admin_test USE INDEX()") r.Check(testkit.Rows("3")) r = tk.MustQuery("SELECT COUNT(*) FROM admin_test USE INDEX(c1)") @@ -709,11 +775,16 @@ func (s *testSuite5) TestAdminCleanupIndexMore(c *C) { tk.MustExec("admin check table admin_test") } -func (s *testSuite5) TestClusteredAdminCleanupIndex(c *C) { - tk := testkit.NewTestKit(c, s.store) +func TestClusteredAdminCleanupIndex(t *testing.T) { + t.Parallel() + + store, domain, clean := testkit.CreateMockStoreAndDomain(t) + defer clean() + + tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("drop table if exists admin_test") - tk.Se.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn + tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn tk.MustExec("create table admin_test (c1 varchar(255), c2 int, c3 char(10) default 'c3', primary key (c1, c3), unique key(c2), key (c3))") tk.MustExec("insert admin_test (c1, c2) values ('c1_1', 2), ('c1_2', 4), ('c1_3', NULL)") tk.MustExec("insert admin_test (c1, c3) values ('c1_4', 'c3_4'), ('c1_5', 'c3_5'), ('c1_6', default)") @@ -724,10 +795,10 @@ func (s *testSuite5) TestClusteredAdminCleanupIndex(c *C) { tk.MustQuery("admin cleanup index admin_test `c3`").Check(testkit.Rows("0")) // Make some dangling index. - s.ctx = mock.NewContext() - s.ctx.Store = s.store - tbl, err := s.domain.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("admin_test")) - c.Assert(err, IsNil) + ctx := mock.NewContext() + ctx.Store = store + tbl, err := domain.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("admin_test")) + require.NoError(t, err) // cleanup clustered primary key takes no effect. tblInfo := tbl.Meta() @@ -740,44 +811,44 @@ func (s *testSuite5) TestClusteredAdminCleanupIndex(c *C) { handle kv.Handle idxVal []types.Datum }{ - {testutil.MustNewCommonHandle(c, "c1_10", "c3_10"), types.MakeDatums(10)}, - {testutil.MustNewCommonHandle(c, "c1_10", "c3_11"), types.MakeDatums(11)}, - {testutil.MustNewCommonHandle(c, "c1_12", "c3_12"), types.MakeDatums(12)}, + {testkit.MustNewCommonHandle(t, "c1_10", "c3_10"), types.MakeDatums(10)}, + {testkit.MustNewCommonHandle(t, "c1_10", "c3_11"), types.MakeDatums(11)}, + {testkit.MustNewCommonHandle(t, "c1_12", "c3_12"), types.MakeDatums(12)}, } c3DanglingIdx := []struct { handle kv.Handle idxVal []types.Datum }{ - {testutil.MustNewCommonHandle(c, "c1_13", "c3_13"), types.MakeDatums("c3_13")}, - {testutil.MustNewCommonHandle(c, "c1_14", "c3_14"), types.MakeDatums("c3_14")}, - {testutil.MustNewCommonHandle(c, "c1_15", "c3_15"), types.MakeDatums("c3_15")}, + {testkit.MustNewCommonHandle(t, "c1_13", "c3_13"), types.MakeDatums("c3_13")}, + {testkit.MustNewCommonHandle(t, "c1_14", "c3_14"), types.MakeDatums("c3_14")}, + {testkit.MustNewCommonHandle(t, "c1_15", "c3_15"), types.MakeDatums("c3_15")}, } - txn, err := s.store.Begin() - c.Assert(err, IsNil) + txn, err := store.Begin() + require.NoError(t, err) for _, di := range c2DanglingIdx { - _, err := indexOpr2.Create(s.ctx, txn, di.idxVal, di.handle, nil) - c.Assert(err, IsNil) + _, err := indexOpr2.Create(ctx, txn, di.idxVal, di.handle, nil) + require.NoError(t, err) } for _, di := range c3DanglingIdx { - _, err := indexOpr3.Create(s.ctx, txn, di.idxVal, di.handle, nil) - c.Assert(err, IsNil) + _, err := indexOpr3.Create(ctx, txn, di.idxVal, di.handle, nil) + require.NoError(t, err) } err = txn.Commit(context.Background()) - c.Assert(err, IsNil) + require.NoError(t, err) err = tk.ExecToErr("admin check table admin_test") - c.Assert(err, NotNil) + require.Error(t, err) err = tk.ExecToErr("admin check index admin_test c2") - c.Assert(err, NotNil) + require.Error(t, err) tk.MustQuery("SELECT COUNT(*) FROM admin_test USE INDEX(c2)").Check(testkit.Rows("9")) tk.MustQuery("admin cleanup index admin_test c2").Check(testkit.Rows("3")) tk.MustQuery("SELECT COUNT(*) FROM admin_test USE INDEX(c2)").Check(testkit.Rows("6")) tk.MustExec("admin check index admin_test c2") err = tk.ExecToErr("admin check table admin_test") - c.Assert(err, NotNil) + require.Error(t, err) err = tk.ExecToErr("admin check index admin_test c3") - c.Assert(err, NotNil) + require.Error(t, err) tk.MustQuery("SELECT COUNT(*) FROM admin_test USE INDEX(c3)").Check(testkit.Rows("9")) tk.MustQuery("admin cleanup index admin_test c3").Check(testkit.Rows("3")) tk.MustQuery("SELECT COUNT(*) FROM admin_test USE INDEX(c3)").Check(testkit.Rows("6")) @@ -785,8 +856,13 @@ func (s *testSuite5) TestClusteredAdminCleanupIndex(c *C) { tk.MustExec("admin check table admin_test") } -func (s *testSuite3) TestAdminCheckPartitionTableFailed(c *C) { - tk := testkit.NewTestKit(c, s.store) +func TestAdminCheckPartitionTableFailed(t *testing.T) { + t.Parallel() + + store, domain, clean := testkit.CreateMockStoreAndDomain(t) + defer clean() + + tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("drop table if exists admin_test_p") tk.MustExec("create table admin_test_p (c1 int key,c2 int,c3 int,index idx(c2)) partition by hash(c1) partitions 4") @@ -794,45 +870,45 @@ func (s *testSuite3) TestAdminCheckPartitionTableFailed(c *C) { tk.MustExec("admin check table admin_test_p") // Make some corrupted index. Build the index information. - s.ctx = mock.NewContext() - s.ctx.Store = s.store - is := s.domain.InfoSchema() + ctx := mock.NewContext() + ctx.Store = store + is := domain.InfoSchema() dbName := model.NewCIStr("test") tblName := model.NewCIStr("admin_test_p") tbl, err := is.TableByName(dbName, tblName) - c.Assert(err, IsNil) + require.NoError(t, err) tblInfo := tbl.Meta() idxInfo := tblInfo.Indices[0] - sc := s.ctx.GetSessionVars().StmtCtx - tk.Se.GetSessionVars().IndexLookupSize = 3 - tk.Se.GetSessionVars().MaxChunkSize = 3 + sc := ctx.GetSessionVars().StmtCtx + tk.Session().GetSessionVars().IndexLookupSize = 3 + tk.Session().GetSessionVars().MaxChunkSize = 3 // Reduce one row of index on partitions. // Table count > index count. for i := 0; i <= 5; i++ { partitionIdx := i % len(tblInfo.GetPartitionInfo().Definitions) indexOpr := tables.NewIndex(tblInfo.GetPartitionInfo().Definitions[partitionIdx].ID, tblInfo, idxInfo) - txn, err := s.store.Begin() - c.Assert(err, IsNil) + txn, err := store.Begin() + require.NoError(t, err) err = indexOpr.Delete(sc, txn, types.MakeDatums(i), kv.IntHandle(i)) - c.Assert(err, IsNil) + require.NoError(t, err) err = txn.Commit(context.Background()) - c.Assert(err, IsNil) + require.NoError(t, err) err = tk.ExecToErr("admin check table admin_test_p") - c.Assert(err, NotNil) - c.Assert(err.Error(), Equals, fmt.Sprintf("[executor:8003]admin_test_p err:[admin:8223]index: != record:&admin.RecordData{Handle:%d, Values:[]types.Datum{types.Datum{k:0x1, decimal:0x0, length:0x0, i:%d, collation:\"\", b:[]uint8(nil), x:interface {}(nil)}}}", i, i)) - c.Assert(executor.ErrAdminCheckTable.Equal(err), IsTrue) + require.Error(t, err) + require.EqualError(t, err, fmt.Sprintf("[executor:8003]admin_test_p err:[admin:8223]index: != record:&admin.RecordData{Handle:%d, Values:[]types.Datum{types.Datum{k:0x1, decimal:0x0, length:0x0, i:%d, collation:\"\", b:[]uint8(nil), x:interface {}(nil)}}}", i, i)) + require.True(t, executor.ErrAdminCheckTable.Equal(err)) // TODO: fix admin recover for partition table. // r := tk.MustQuery("admin recover index admin_test_p idx") // r.Check(testkit.Rows("0 0")) // tk.MustExec("admin check table admin_test_p") // Manual recover index. - txn, err = s.store.Begin() - c.Assert(err, IsNil) - _, err = indexOpr.Create(s.ctx, txn, types.MakeDatums(i), kv.IntHandle(i), nil) - c.Assert(err, IsNil) + txn, err = store.Begin() + require.NoError(t, err) + _, err = indexOpr.Create(ctx, txn, types.MakeDatums(i), kv.IntHandle(i), nil) + require.NoError(t, err) err = txn.Commit(context.Background()) - c.Assert(err, IsNil) + require.NoError(t, err) tk.MustExec("admin check table admin_test_p") } @@ -841,22 +917,21 @@ func (s *testSuite3) TestAdminCheckPartitionTableFailed(c *C) { for i := 0; i <= 5; i++ { partitionIdx := i % len(tblInfo.GetPartitionInfo().Definitions) indexOpr := tables.NewIndex(tblInfo.GetPartitionInfo().Definitions[partitionIdx].ID, tblInfo, idxInfo) - txn, err := s.store.Begin() - c.Assert(err, IsNil) - _, err = indexOpr.Create(s.ctx, txn, types.MakeDatums(i+8), kv.IntHandle(i+8), nil) - c.Assert(err, IsNil) + txn, err := store.Begin() + require.NoError(t, err) + _, err = indexOpr.Create(ctx, txn, types.MakeDatums(i+8), kv.IntHandle(i+8), nil) + require.NoError(t, err) err = txn.Commit(context.Background()) - c.Assert(err, IsNil) + require.NoError(t, err) err = tk.ExecToErr("admin check table admin_test_p") - c.Assert(err, NotNil) - c.Assert(err.Error(), Equals, fmt.Sprintf("[executor:8133]handle %d, index:types.Datum{k:0x1, decimal:0x0, length:0x0, i:%d, collation:\"\", b:[]uint8(nil), x:interface {}(nil)} != record:", i+8, i+8)) + require.EqualError(t, err, fmt.Sprintf("[executor:8133]handle %d, index:types.Datum{k:0x1, decimal:0x0, length:0x0, i:%d, collation:\"\", b:[]uint8(nil), x:interface {}(nil)} != record:", i+8, i+8)) // TODO: fix admin recover for partition table. - txn, err = s.store.Begin() - c.Assert(err, IsNil) + txn, err = store.Begin() + require.NoError(t, err) err = indexOpr.Delete(sc, txn, types.MakeDatums(i+8), kv.IntHandle(i+8)) - c.Assert(err, IsNil) + require.NoError(t, err) err = txn.Commit(context.Background()) - c.Assert(err, IsNil) + require.NoError(t, err) tk.MustExec("admin check table admin_test_p") } @@ -864,165 +939,33 @@ func (s *testSuite3) TestAdminCheckPartitionTableFailed(c *C) { for i := 0; i <= 5; i++ { partitionIdx := i % len(tblInfo.GetPartitionInfo().Definitions) indexOpr := tables.NewIndex(tblInfo.GetPartitionInfo().Definitions[partitionIdx].ID, tblInfo, idxInfo) - txn, err := s.store.Begin() - c.Assert(err, IsNil) - _, err = indexOpr.Create(s.ctx, txn, types.MakeDatums(i+8), kv.IntHandle(i), nil) - c.Assert(err, IsNil) + txn, err := store.Begin() + require.NoError(t, err) + _, err = indexOpr.Create(ctx, txn, types.MakeDatums(i+8), kv.IntHandle(i), nil) + require.NoError(t, err) err = txn.Commit(context.Background()) - c.Assert(err, IsNil) + require.NoError(t, err) err = tk.ExecToErr("admin check table admin_test_p") - c.Assert(err, NotNil) - c.Assert(err.Error(), Equals, fmt.Sprintf("[executor:8134]col c2, handle %d, index:types.Datum{k:0x1, decimal:0x0, length:0x0, i:%d, collation:\"\", b:[]uint8(nil), x:interface {}(nil)} != record:types.Datum{k:0x1, decimal:0x0, length:0x0, i:%d, collation:\"\", b:[]uint8(nil), x:interface {}(nil)}, compare err:", i, i+8, i)) + require.EqualError(t, err, fmt.Sprintf("[executor:8134]col c2, handle %d, index:types.Datum{k:0x1, decimal:0x0, length:0x0, i:%d, collation:\"\", b:[]uint8(nil), x:interface {}(nil)} != record:types.Datum{k:0x1, decimal:0x0, length:0x0, i:%d, collation:\"\", b:[]uint8(nil), x:interface {}(nil)}, compare err:", i, i+8, i)) // TODO: fix admin recover for partition table. - txn, err = s.store.Begin() - c.Assert(err, IsNil) + txn, err = store.Begin() + require.NoError(t, err) err = indexOpr.Delete(sc, txn, types.MakeDatums(i+8), kv.IntHandle(i)) - c.Assert(err, IsNil) + require.NoError(t, err) err = txn.Commit(context.Background()) - c.Assert(err, IsNil) + require.NoError(t, err) tk.MustExec("admin check table admin_test_p") } } -func (s *testSuiteJoinSerial) TestAdminCheckTableFailed(c *C) { - tk := testkit.NewTestKit(c, s.store) - tk.MustExec("use test") - tk.MustExec("drop table if exists admin_test") - tk.MustExec("create table admin_test (c1 int, c2 int, c3 varchar(255) default '1', primary key(c1), key(c3), unique key(c2), key(c2, c3))") - tk.MustExec("insert admin_test (c1, c2, c3) values (-10, -20, 'y'), (-1, -10, 'z'), (1, 11, 'a'), (2, 12, 'b'), (5, 15, 'c'), (10, 20, 'd'), (20, 30, 'e')") - - // Make some corrupted index. Build the index information. - s.ctx = mock.NewContext() - s.ctx.Store = s.store - is := s.domain.InfoSchema() - dbName := model.NewCIStr("test") - tblName := model.NewCIStr("admin_test") - tbl, err := is.TableByName(dbName, tblName) - c.Assert(err, IsNil) - tblInfo := tbl.Meta() - idxInfo := tblInfo.Indices[1] - indexOpr := tables.NewIndex(tblInfo.ID, tblInfo, idxInfo) - sc := s.ctx.GetSessionVars().StmtCtx - tk.Se.GetSessionVars().IndexLookupSize = 3 - tk.Se.GetSessionVars().MaxChunkSize = 3 - - // Reduce one row of index. - // Table count > index count. - // Index c2 is missing 11. - txn, err := s.store.Begin() - c.Assert(err, IsNil) - err = indexOpr.Delete(sc, txn, types.MakeDatums(-10), kv.IntHandle(-1)) - c.Assert(err, IsNil) - err = txn.Commit(context.Background()) - c.Assert(err, IsNil) - err = tk.ExecToErr("admin check table admin_test") - c.Assert(err, NotNil) - c.Assert(err.Error(), Equals, - "[executor:8003]admin_test err:[admin:8223]index: != record:&admin.RecordData{Handle:-1, Values:[]types.Datum{types.Datum{k:0x1, decimal:0x0, length:0x0, i:-10, collation:\"\", b:[]uint8(nil), x:interface {}(nil)}}}") - c.Assert(executor.ErrAdminCheckTable.Equal(err), IsTrue) - tk.MustExec("set @@tidb_redact_log=1;") - err = tk.ExecToErr("admin check table admin_test") - c.Assert(err, NotNil) - c.Assert(err.Error(), Equals, "[executor:8003]admin_test err:[admin:8223]index:\"?\" != record:\"?\"") - tk.MustExec("set @@tidb_redact_log=0;") - r := tk.MustQuery("admin recover index admin_test c2") - r.Check(testkit.Rows("1 7")) - tk.MustExec("admin check table admin_test") - - // Add one row of index. - // Table count < index count. - // Index c2 has one more values than table data: 0, and the handle 0 hasn't correlative record. - txn, err = s.store.Begin() - c.Assert(err, IsNil) - _, err = indexOpr.Create(s.ctx, txn, types.MakeDatums(0), kv.IntHandle(0), nil) - c.Assert(err, IsNil) - err = txn.Commit(context.Background()) - c.Assert(err, IsNil) - err = tk.ExecToErr("admin check table admin_test") - c.Assert(err, NotNil) - c.Assert(err.Error(), Equals, "[executor:8133]handle 0, index:types.Datum{k:0x1, decimal:0x0, length:0x0, i:0, collation:\"\", b:[]uint8(nil), x:interface {}(nil)} != record:") - tk.MustExec("set @@tidb_redact_log=1;") - err = tk.ExecToErr("admin check table admin_test") - c.Assert(err, NotNil) - c.Assert(err.Error(), Equals, "[executor:8133]handle \"?\", index:\"?\" != record:\"?\"") - tk.MustExec("set @@tidb_redact_log=0;") +func TestAdminCheckTable(t *testing.T) { + // test NULL value. + t.Parallel() - // Add one row of index. - // Table count < index count. - // Index c2 has two more values than table data: 10, 13, and these handles have correlative record. - txn, err = s.store.Begin() - c.Assert(err, IsNil) - err = indexOpr.Delete(sc, txn, types.MakeDatums(0), kv.IntHandle(0)) - c.Assert(err, IsNil) - // Make sure the index value "19" is smaller "21". Then we scan to "19" before "21". - _, err = indexOpr.Create(s.ctx, txn, types.MakeDatums(19), kv.IntHandle(10), nil) - c.Assert(err, IsNil) - _, err = indexOpr.Create(s.ctx, txn, types.MakeDatums(13), kv.IntHandle(2), nil) - c.Assert(err, IsNil) - err = txn.Commit(context.Background()) - c.Assert(err, IsNil) - err = tk.ExecToErr("admin check table admin_test") - c.Assert(err, NotNil) - c.Assert(err.Error(), Equals, "[executor:8134]col c2, handle 2, index:types.Datum{k:0x1, decimal:0x0, length:0x0, i:13, collation:\"\", b:[]uint8(nil), x:interface {}(nil)} != record:types.Datum{k:0x1, decimal:0x0, length:0x0, i:12, collation:\"\", b:[]uint8(nil), x:interface {}(nil)}, compare err:") - tk.MustExec("set @@tidb_redact_log=1;") - err = tk.ExecToErr("admin check table admin_test") - c.Assert(err, NotNil) - c.Assert(err.Error(), Equals, "[executor:8134]col c2, handle \"?\", index:\"?\" != record:\"?\", compare err:\"?\"") - tk.MustExec("set @@tidb_redact_log=0;") - - // Table count = index count. - // Two indices have the same handle. - txn, err = s.store.Begin() - c.Assert(err, IsNil) - err = indexOpr.Delete(sc, txn, types.MakeDatums(13), kv.IntHandle(2)) - c.Assert(err, IsNil) - err = indexOpr.Delete(sc, txn, types.MakeDatums(12), kv.IntHandle(2)) - c.Assert(err, IsNil) - err = txn.Commit(context.Background()) - c.Assert(err, IsNil) - err = tk.ExecToErr("admin check table admin_test") - c.Assert(err, NotNil) - c.Assert(err.Error(), Equals, "[executor:8134]col c2, handle 10, index:types.Datum{k:0x1, decimal:0x0, length:0x0, i:19, collation:\"\", b:[]uint8(nil), x:interface {}(nil)} != record:types.Datum{k:0x1, decimal:0x0, length:0x0, i:20, collation:\"\", b:[]uint8(nil), x:interface {}(nil)}, compare err:") - tk.MustExec("set @@tidb_redact_log=1;") - err = tk.ExecToErr("admin check table admin_test") - c.Assert(err, NotNil) - c.Assert(err.Error(), Equals, "[executor:8134]col c2, handle \"?\", index:\"?\" != record:\"?\", compare err:\"?\"") - tk.MustExec("set @@tidb_redact_log=0;") - - // Table count = index count. - // Index c2 has one line of data is 19, the corresponding table data is 20. - txn, err = s.store.Begin() - c.Assert(err, IsNil) - _, err = indexOpr.Create(s.ctx, txn, types.MakeDatums(12), kv.IntHandle(2), nil) - c.Assert(err, IsNil) - err = indexOpr.Delete(sc, txn, types.MakeDatums(20), kv.IntHandle(10)) - c.Assert(err, IsNil) - err = txn.Commit(context.Background()) - c.Assert(err, IsNil) - err = tk.ExecToErr("admin check table admin_test") - c.Assert(err, NotNil) - c.Assert(err.Error(), Equals, "[executor:8134]col c2, handle 10, index:types.Datum{k:0x1, decimal:0x0, length:0x0, i:19, collation:\"\", b:[]uint8(nil), x:interface {}(nil)} != record:types.Datum{k:0x1, decimal:0x0, length:0x0, i:20, collation:\"\", b:[]uint8(nil), x:interface {}(nil)}, compare err:") - tk.MustExec("set @@tidb_redact_log=1;") - err = tk.ExecToErr("admin check table admin_test") - c.Assert(err, NotNil) - c.Assert(err.Error(), Equals, "[executor:8134]col c2, handle \"?\", index:\"?\" != record:\"?\", compare err:\"?\"") - tk.MustExec("set @@tidb_redact_log=0;") - - // Recover records. - txn, err = s.store.Begin() - c.Assert(err, IsNil) - err = indexOpr.Delete(sc, txn, types.MakeDatums(19), kv.IntHandle(10)) - c.Assert(err, IsNil) - _, err = indexOpr.Create(s.ctx, txn, types.MakeDatums(20), kv.IntHandle(10), nil) - c.Assert(err, IsNil) - err = txn.Commit(context.Background()) - c.Assert(err, IsNil) - tk.MustExec("admin check table admin_test") -} + store, clean := testkit.CreateMockStore(t) + defer clean() -func (s *testSuite8) TestAdminCheckTable(c *C) { - // test NULL value. - tk := testkit.NewTestKit(c, s.store) + tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec(`CREATE TABLE test_null ( a int(11) NOT NULL, @@ -1092,7 +1035,7 @@ func (s *testSuite8) TestAdminCheckTable(c *C) { tk.MustExec("use mysql") tk.MustExec(`admin check table test.t;`) err := tk.ExecToErr("admin check table t") - c.Assert(err, NotNil) + require.Error(t, err) // test add index on time type column which have default value tk.MustExec("use test") @@ -1132,21 +1075,31 @@ func (s *testSuite8) TestAdminCheckTable(c *C) { tk.MustExec(`create table t1 (a decimal(2,1), index(a))`) tk.MustExec(`insert into t1 set a='1.9'`) err = tk.ExecToErr(`alter table t1 modify column a decimal(3,2);`) - c.Assert(err, IsNil) + require.NoError(t, err) tk.MustExec(`delete from t1;`) tk.MustExec(`admin check table t1;`) } -func (s *testSuite1) TestAdminCheckPrimaryIndex(c *C) { - tk := testkit.NewTestKit(c, s.store) +func TestAdminCheckPrimaryIndex(t *testing.T) { + t.Parallel() + + store, clean := testkit.CreateMockStore(t) + defer clean() + + tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("create table t(a bigint unsigned primary key, b int, c int, index idx(a, b));") tk.MustExec("insert into t values(1, 1, 1), (9223372036854775807, 2, 2);") tk.MustExec("admin check index t idx;") } -func (s *testSuite5) TestAdminCheckWithSnapshot(c *C) { - tk := testkit.NewTestKit(c, s.store) +func TestAdminCheckWithSnapshot(t *testing.T) { + t.Parallel() + + store, domain, clean := testkit.CreateMockStoreAndDomain(t) + defer clean() + + tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("drop table if exists admin_t_s") tk.MustExec("create table admin_t_s (a int, b int, key(a));") @@ -1156,27 +1109,27 @@ func (s *testSuite5) TestAdminCheckWithSnapshot(c *C) { snapshotTime := time.Now() - s.ctx = mock.NewContext() - s.ctx.Store = s.store - is := s.domain.InfoSchema() + ctx := mock.NewContext() + ctx.Store = store + is := domain.InfoSchema() dbName := model.NewCIStr("test") tblName := model.NewCIStr("admin_t_s") tbl, err := is.TableByName(dbName, tblName) - c.Assert(err, IsNil) + require.NoError(t, err) tblInfo := tbl.Meta() idxInfo := tblInfo.FindIndexByName("a") idxOpr := tables.NewIndex(tblInfo.ID, tblInfo, idxInfo) - txn, err := s.store.Begin() - c.Assert(err, IsNil) - _, err = idxOpr.Create(s.ctx, txn, types.MakeDatums(2), kv.IntHandle(100), nil) - c.Assert(err, IsNil) + txn, err := store.Begin() + require.NoError(t, err) + _, err = idxOpr.Create(ctx, txn, types.MakeDatums(2), kv.IntHandle(100), nil) + require.NoError(t, err) err = txn.Commit(context.Background()) - c.Assert(err, IsNil) + require.NoError(t, err) err = tk.ExecToErr("admin check table admin_t_s") - c.Assert(err, NotNil) + require.Error(t, err) err = tk.ExecToErr("admin check index admin_t_s a") - c.Assert(err, NotNil) + require.Error(t, err) // For mocktikv, safe point is not initialized, we manually insert it for snapshot to use. safePointName := "tikv_gc_safe_point" @@ -1193,9 +1146,9 @@ func (s *testSuite5) TestAdminCheckWithSnapshot(c *C) { tk.MustExec("set @@tidb_snapshot = ''") err = tk.ExecToErr("admin check table admin_t_s") - c.Assert(err, NotNil) + require.Error(t, err) err = tk.ExecToErr("admin check index admin_t_s a") - c.Assert(err, NotNil) + require.Error(t, err) r := tk.MustQuery("admin cleanup index admin_t_s a") r.Check(testkit.Rows("1")) diff --git a/executor/aggfuncs/aggfunc_test.go b/executor/aggfuncs/aggfunc_test.go index fb6e0ad05c041..f5ed94e821d63 100644 --- a/executor/aggfuncs/aggfunc_test.go +++ b/executor/aggfuncs/aggfunc_test.go @@ -466,109 +466,6 @@ func testMergePartialResult(t *testing.T, p aggTest) { require.Equalf(t, 0, result, "%v != %v", dt.String(), p.results[2]) } -// Deprecated: migrating to testMergePartialResult(t *testing.T, p aggTest) -func (s *testSuite) testMergePartialResult(c *C, p aggTest) { - srcChk := p.genSrcChk() - iter := chunk.NewIterator4Chunk(srcChk) - - args := []expression.Expression{&expression.Column{RetType: p.dataType, Index: 0}} - if p.funcName == ast.AggFuncGroupConcat { - args = append(args, &expression.Constant{Value: types.NewStringDatum(separator), RetType: types.NewFieldType(mysql.TypeString)}) - } - desc, err := aggregation.NewAggFuncDesc(s.ctx, p.funcName, args, false) - c.Assert(err, IsNil) - if p.orderBy { - desc.OrderByItems = []*util.ByItems{ - {Expr: args[0], Desc: true}, - } - } - partialDesc, finalDesc := desc.Split([]int{0, 1}) - - // build partial func for partial phase. - partialFunc := aggfuncs.Build(s.ctx, partialDesc, 0) - partialResult, _ := partialFunc.AllocPartialResult() - - // build final func for final phase. - finalFunc := aggfuncs.Build(s.ctx, finalDesc, 0) - finalPr, _ := finalFunc.AllocPartialResult() - resultChk := chunk.NewChunkWithCapacity([]*types.FieldType{p.dataType}, 1) - if p.funcName == ast.AggFuncApproxCountDistinct { - resultChk = chunk.NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeString)}, 1) - } - if p.funcName == ast.AggFuncJsonArrayagg { - resultChk = chunk.NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeJSON)}, 1) - } - - // update partial result. - for row := iter.Begin(); row != iter.End(); row = iter.Next() { - _, err = partialFunc.UpdatePartialResult(s.ctx, []chunk.Row{row}, partialResult) - c.Assert(err, IsNil) - } - p.messUpChunk(srcChk) - err = partialFunc.AppendFinalResult2Chunk(s.ctx, partialResult, resultChk) - c.Assert(err, IsNil) - dt := resultChk.GetRow(0).GetDatum(0, p.dataType) - if p.funcName == ast.AggFuncApproxCountDistinct { - dt = resultChk.GetRow(0).GetDatum(0, types.NewFieldType(mysql.TypeString)) - } - if p.funcName == ast.AggFuncJsonArrayagg { - dt = resultChk.GetRow(0).GetDatum(0, types.NewFieldType(mysql.TypeJSON)) - } - result, err := dt.CompareDatum(s.ctx.GetSessionVars().StmtCtx, &p.results[0]) - c.Assert(err, IsNil) - c.Assert(result, Equals, 0, Commentf("%v != %v", dt.String(), p.results[0])) - - _, err = finalFunc.MergePartialResult(s.ctx, partialResult, finalPr) - c.Assert(err, IsNil) - partialFunc.ResetPartialResult(partialResult) - - srcChk = p.genSrcChk() - iter = chunk.NewIterator4Chunk(srcChk) - iter.Begin() - iter.Next() - for row := iter.Next(); row != iter.End(); row = iter.Next() { - _, err = partialFunc.UpdatePartialResult(s.ctx, []chunk.Row{row}, partialResult) - c.Assert(err, IsNil) - } - p.messUpChunk(srcChk) - resultChk.Reset() - err = partialFunc.AppendFinalResult2Chunk(s.ctx, partialResult, resultChk) - c.Assert(err, IsNil) - dt = resultChk.GetRow(0).GetDatum(0, p.dataType) - if p.funcName == ast.AggFuncApproxCountDistinct { - dt = resultChk.GetRow(0).GetDatum(0, types.NewFieldType(mysql.TypeString)) - } - if p.funcName == ast.AggFuncJsonArrayagg { - dt = resultChk.GetRow(0).GetDatum(0, types.NewFieldType(mysql.TypeJSON)) - } - result, err = dt.CompareDatum(s.ctx.GetSessionVars().StmtCtx, &p.results[1]) - c.Assert(err, IsNil) - c.Assert(result, Equals, 0, Commentf("%v != %v", dt.String(), p.results[1])) - _, err = finalFunc.MergePartialResult(s.ctx, partialResult, finalPr) - c.Assert(err, IsNil) - - if p.funcName == ast.AggFuncApproxCountDistinct { - resultChk = chunk.NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeLonglong)}, 1) - } - if p.funcName == ast.AggFuncJsonArrayagg { - resultChk = chunk.NewChunkWithCapacity([]*types.FieldType{types.NewFieldType(mysql.TypeJSON)}, 1) - } - resultChk.Reset() - err = finalFunc.AppendFinalResult2Chunk(s.ctx, finalPr, resultChk) - c.Assert(err, IsNil) - - dt = resultChk.GetRow(0).GetDatum(0, p.dataType) - if p.funcName == ast.AggFuncApproxCountDistinct { - dt = resultChk.GetRow(0).GetDatum(0, types.NewFieldType(mysql.TypeLonglong)) - } - if p.funcName == ast.AggFuncJsonArrayagg { - dt = resultChk.GetRow(0).GetDatum(0, types.NewFieldType(mysql.TypeJSON)) - } - result, err = dt.CompareDatum(s.ctx.GetSessionVars().StmtCtx, &p.results[2]) - c.Assert(err, IsNil) - c.Assert(result, Equals, 0, Commentf("%v != %v", dt.String(), p.results[2])) -} - func buildAggTester(funcName string, tp byte, numRows int, results ...interface{}) aggTest { return buildAggTesterWithFieldType(funcName, types.NewFieldType(tp), numRows, results...) } @@ -809,95 +706,6 @@ func testAggFunc(t *testing.T, p aggTest) { require.Equalf(t, 0, result, "%v != %v", dt.String(), p.results[0]) } -// Deprecated: migrating to func testAggFunc(t *testing.T, p aggTest) -func (s *testSuite) testAggFunc(c *C, p aggTest) { - srcChk := p.genSrcChk() - - args := []expression.Expression{&expression.Column{RetType: p.dataType, Index: 0}} - if p.funcName == ast.AggFuncGroupConcat { - args = append(args, &expression.Constant{Value: types.NewStringDatum(separator), RetType: types.NewFieldType(mysql.TypeString)}) - } - if p.funcName == ast.AggFuncApproxPercentile { - args = append(args, &expression.Constant{Value: types.NewIntDatum(50), RetType: types.NewFieldType(mysql.TypeLong)}) - } - desc, err := aggregation.NewAggFuncDesc(s.ctx, p.funcName, args, false) - c.Assert(err, IsNil) - if p.orderBy { - desc.OrderByItems = []*util.ByItems{ - {Expr: args[0], Desc: true}, - } - } - finalFunc := aggfuncs.Build(s.ctx, desc, 0) - finalPr, _ := finalFunc.AllocPartialResult() - resultChk := chunk.NewChunkWithCapacity([]*types.FieldType{desc.RetTp}, 1) - - iter := chunk.NewIterator4Chunk(srcChk) - for row := iter.Begin(); row != iter.End(); row = iter.Next() { - _, err = finalFunc.UpdatePartialResult(s.ctx, []chunk.Row{row}, finalPr) - c.Assert(err, IsNil) - } - p.messUpChunk(srcChk) - err = finalFunc.AppendFinalResult2Chunk(s.ctx, finalPr, resultChk) - c.Assert(err, IsNil) - dt := resultChk.GetRow(0).GetDatum(0, desc.RetTp) - result, err := dt.CompareDatum(s.ctx.GetSessionVars().StmtCtx, &p.results[1]) - c.Assert(err, IsNil) - c.Assert(result, Equals, 0, Commentf("%v != %v", dt.String(), p.results[1])) - - // test the empty input - resultChk.Reset() - finalFunc.ResetPartialResult(finalPr) - err = finalFunc.AppendFinalResult2Chunk(s.ctx, finalPr, resultChk) - c.Assert(err, IsNil) - dt = resultChk.GetRow(0).GetDatum(0, desc.RetTp) - result, err = dt.CompareDatum(s.ctx.GetSessionVars().StmtCtx, &p.results[0]) - c.Assert(err, IsNil) - c.Assert(result, Equals, 0, Commentf("%v != %v", dt.String(), p.results[0])) - - // test the agg func with distinct - desc, err = aggregation.NewAggFuncDesc(s.ctx, p.funcName, args, true) - c.Assert(err, IsNil) - if p.orderBy { - desc.OrderByItems = []*util.ByItems{ - {Expr: args[0], Desc: true}, - } - } - finalFunc = aggfuncs.Build(s.ctx, desc, 0) - finalPr, _ = finalFunc.AllocPartialResult() - - resultChk.Reset() - srcChk = p.genSrcChk() - iter = chunk.NewIterator4Chunk(srcChk) - for row := iter.Begin(); row != iter.End(); row = iter.Next() { - _, err = finalFunc.UpdatePartialResult(s.ctx, []chunk.Row{row}, finalPr) - c.Assert(err, IsNil) - } - p.messUpChunk(srcChk) - srcChk = p.genSrcChk() - iter = chunk.NewIterator4Chunk(srcChk) - for row := iter.Begin(); row != iter.End(); row = iter.Next() { - _, err = finalFunc.UpdatePartialResult(s.ctx, []chunk.Row{row}, finalPr) - c.Assert(err, IsNil) - } - p.messUpChunk(srcChk) - err = finalFunc.AppendFinalResult2Chunk(s.ctx, finalPr, resultChk) - c.Assert(err, IsNil) - dt = resultChk.GetRow(0).GetDatum(0, desc.RetTp) - result, err = dt.CompareDatum(s.ctx.GetSessionVars().StmtCtx, &p.results[1]) - c.Assert(err, IsNil) - c.Assert(result, Equals, 0, Commentf("%v != %v", dt.String(), p.results[1])) - - // test the empty input - resultChk.Reset() - finalFunc.ResetPartialResult(finalPr) - err = finalFunc.AppendFinalResult2Chunk(s.ctx, finalPr, resultChk) - c.Assert(err, IsNil) - dt = resultChk.GetRow(0).GetDatum(0, desc.RetTp) - result, err = dt.CompareDatum(s.ctx.GetSessionVars().StmtCtx, &p.results[0]) - c.Assert(err, IsNil) - c.Assert(result, Equals, 0, Commentf("%v != %v", dt.String(), p.results[0])) -} - func testAggFuncWithoutDistinct(t *testing.T, p aggTest) { srcChk := p.genSrcChk() @@ -975,37 +783,96 @@ func testAggMemFunc(t *testing.T, p aggMemTest) { } } -// Deprecated: migrating to testAggMemFunc(t *testing.T, p aggMemTest) -func (s *testSuite) testAggMemFunc(c *C, p aggMemTest) { - srcChk := p.aggTest.genSrcChk() +func testMultiArgsAggFunc(t *testing.T, ctx sessionctx.Context, p multiArgsAggTest) { + srcChk := p.genSrcChk() - args := []expression.Expression{&expression.Column{RetType: p.aggTest.dataType, Index: 0}} - if p.aggTest.funcName == ast.AggFuncGroupConcat { + args := make([]expression.Expression, len(p.dataTypes)) + for k := 0; k < len(p.dataTypes); k++ { + args[k] = &expression.Column{RetType: p.dataTypes[k], Index: k} + } + if p.funcName == ast.AggFuncGroupConcat { args = append(args, &expression.Constant{Value: types.NewStringDatum(separator), RetType: types.NewFieldType(mysql.TypeString)}) } - desc, err := aggregation.NewAggFuncDesc(s.ctx, p.aggTest.funcName, args, p.isDistinct) - c.Assert(err, IsNil) - if p.aggTest.orderBy { + + desc, err := aggregation.NewAggFuncDesc(ctx, p.funcName, args, false) + require.NoError(t, err) + if p.orderBy { desc.OrderByItems = []*util.ByItems{ {Expr: args[0], Desc: true}, } } - finalFunc := aggfuncs.Build(s.ctx, desc, 0) - finalPr, memDelta := finalFunc.AllocPartialResult() - c.Assert(memDelta, Equals, p.allocMemDelta) + finalFunc := aggfuncs.Build(ctx, desc, 0) + finalPr, _ := finalFunc.AllocPartialResult() + resultChk := chunk.NewChunkWithCapacity([]*types.FieldType{desc.RetTp}, 1) - updateMemDeltas, err := p.updateMemDeltaGens(srcChk, p.aggTest.dataType) - c.Assert(err, IsNil) iter := chunk.NewIterator4Chunk(srcChk) - i := 0 for row := iter.Begin(); row != iter.End(); row = iter.Next() { - memDelta, err := finalFunc.UpdatePartialResult(s.ctx, []chunk.Row{row}, finalPr) - c.Assert(err, IsNil) - c.Assert(memDelta, Equals, updateMemDeltas[i]) - i++ + // FIXME: cannot assert error since there are cases of error, e.g. rows were cut by GROUPCONCAT + _, _ = finalFunc.UpdatePartialResult(ctx, []chunk.Row{row}, finalPr) + } + p.messUpChunk(srcChk) + err = finalFunc.AppendFinalResult2Chunk(ctx, finalPr, resultChk) + require.NoError(t, err) + dt := resultChk.GetRow(0).GetDatum(0, desc.RetTp) + result, err := dt.CompareDatum(ctx.GetSessionVars().StmtCtx, &p.results[1]) + require.NoError(t, err) + require.Zerof(t, result, "%v != %v", dt.String(), p.results[1]) + + // test the empty input + resultChk.Reset() + finalFunc.ResetPartialResult(finalPr) + err = finalFunc.AppendFinalResult2Chunk(ctx, finalPr, resultChk) + require.NoError(t, err) + dt = resultChk.GetRow(0).GetDatum(0, desc.RetTp) + result, err = dt.CompareDatum(ctx.GetSessionVars().StmtCtx, &p.results[0]) + require.NoError(t, err) + require.Zerof(t, result, "%v != %v", dt.String(), p.results[0]) + + // test the agg func with distinct + desc, err = aggregation.NewAggFuncDesc(ctx, p.funcName, args, true) + require.NoError(t, err) + if p.orderBy { + desc.OrderByItems = []*util.ByItems{ + {Expr: args[0], Desc: true}, + } + } + finalFunc = aggfuncs.Build(ctx, desc, 0) + finalPr, _ = finalFunc.AllocPartialResult() + + resultChk.Reset() + srcChk = p.genSrcChk() + iter = chunk.NewIterator4Chunk(srcChk) + for row := iter.Begin(); row != iter.End(); row = iter.Next() { + // FIXME: cannot check error + _, _ = finalFunc.UpdatePartialResult(ctx, []chunk.Row{row}, finalPr) } + p.messUpChunk(srcChk) + srcChk = p.genSrcChk() + iter = chunk.NewIterator4Chunk(srcChk) + for row := iter.Begin(); row != iter.End(); row = iter.Next() { + // FIXME: cannot check error + _, _ = finalFunc.UpdatePartialResult(ctx, []chunk.Row{row}, finalPr) + } + p.messUpChunk(srcChk) + err = finalFunc.AppendFinalResult2Chunk(ctx, finalPr, resultChk) + require.NoError(t, err) + dt = resultChk.GetRow(0).GetDatum(0, desc.RetTp) + result, err = dt.CompareDatum(ctx.GetSessionVars().StmtCtx, &p.results[1]) + require.NoError(t, err) + require.Zerof(t, result, "%v != %v", dt.String(), p.results[1]) + + // test the empty input + resultChk.Reset() + finalFunc.ResetPartialResult(finalPr) + err = finalFunc.AppendFinalResult2Chunk(ctx, finalPr, resultChk) + require.NoError(t, err) + dt = resultChk.GetRow(0).GetDatum(0, desc.RetTp) + result, err = dt.CompareDatum(ctx.GetSessionVars().StmtCtx, &p.results[0]) + require.NoError(t, err) + require.Zero(t, result) } +// Deprecated: migrating to testMultiArgsAggFunc(t *testing.T, ctx sessionctx.Context, p multiArgsAggTest) func (s *testSuite) testMultiArgsAggFunc(c *C, p multiArgsAggTest) { srcChk := p.genSrcChk() @@ -1095,6 +962,41 @@ func (s *testSuite) testMultiArgsAggFunc(c *C, p multiArgsAggTest) { c.Assert(result, Equals, 0) } +func testMultiArgsAggMemFunc(t *testing.T, p multiArgsAggMemTest) { + srcChk := p.multiArgsAggTest.genSrcChk() + ctx := mock.NewContext() + + args := make([]expression.Expression, len(p.multiArgsAggTest.dataTypes)) + for k := 0; k < len(p.multiArgsAggTest.dataTypes); k++ { + args[k] = &expression.Column{RetType: p.multiArgsAggTest.dataTypes[k], Index: k} + } + if p.multiArgsAggTest.funcName == ast.AggFuncGroupConcat { + args = append(args, &expression.Constant{Value: types.NewStringDatum(separator), RetType: types.NewFieldType(mysql.TypeString)}) + } + + desc, err := aggregation.NewAggFuncDesc(ctx, p.multiArgsAggTest.funcName, args, p.isDistinct) + require.NoError(t, err) + if p.multiArgsAggTest.orderBy { + desc.OrderByItems = []*util.ByItems{ + {Expr: args[0], Desc: true}, + } + } + finalFunc := aggfuncs.Build(ctx, desc, 0) + finalPr, memDelta := finalFunc.AllocPartialResult() + require.Equal(t, p.allocMemDelta, memDelta) + + updateMemDeltas, err := p.multiArgsUpdateMemDeltaGens(srcChk, p.multiArgsAggTest.dataTypes, desc.OrderByItems) + require.NoError(t, err) + iter := chunk.NewIterator4Chunk(srcChk) + i := 0 + for row := iter.Begin(); row != iter.End(); row = iter.Next() { + memDelta, _ := finalFunc.UpdatePartialResult(ctx, []chunk.Row{row}, finalPr) + require.Equal(t, updateMemDeltas[i], memDelta) + i++ + } +} + +// Deprecated: migrating to testMultiArgsAggMemFunc(t *testing.T, p multiArgsAggMemTest) func (s *testSuite) testMultiArgsAggMemFunc(c *C, p multiArgsAggMemTest) { srcChk := p.multiArgsAggTest.genSrcChk() @@ -1127,7 +1029,6 @@ func (s *testSuite) testMultiArgsAggMemFunc(c *C, p multiArgsAggMemTest) { i++ } } - func (s *testSuite) benchmarkAggFunc(b *testing.B, p aggTest) { srcChk := chunk.NewChunkWithCapacity([]*types.FieldType{p.dataType}, p.numRows) for i := 0; i < p.numRows; i++ { diff --git a/executor/aggfuncs/func_count_test.go b/executor/aggfuncs/func_count_test.go index a1a04c7438d79..603c46e4fba3b 100644 --- a/executor/aggfuncs/func_count_test.go +++ b/executor/aggfuncs/func_count_test.go @@ -16,16 +16,18 @@ package aggfuncs_test import ( "encoding/binary" + "fmt" "testing" "github.com/dgryski/go-farm" - . "github.com/pingcap/check" "github.com/pingcap/tidb/executor/aggfuncs" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/types" + "github.com/pingcap/tidb/util/mock" "github.com/pingcap/tidb/util/set" + "github.com/stretchr/testify/require" ) func genApproxDistinctMergePartialResult(begin, end uint64) string { @@ -39,15 +41,19 @@ func genApproxDistinctMergePartialResult(begin, end uint64) string { return string(o.Serialize()) } -func (s *testSuite) TestMergePartialResult4Count(c *C) { +func TestMergePartialResult4Count(t *testing.T) { + t.Parallel() + tester := buildAggTester(ast.AggFuncCount, mysql.TypeLonglong, 5, 5, 3, 8) - s.testMergePartialResult(c, tester) + testMergePartialResult(t, tester) tester = buildAggTester(ast.AggFuncApproxCountDistinct, mysql.TypeLonglong, 5, genApproxDistinctMergePartialResult(0, 5), genApproxDistinctMergePartialResult(2, 5), 5) - s.testMergePartialResult(c, tester) + testMergePartialResult(t, tester) } -func (s *testSuite) TestCount(c *C) { +func TestCount(t *testing.T) { + t.Parallel() + tests := []aggTest{ buildAggTester(ast.AggFuncCount, mysql.TypeLonglong, 5, 0, 5), buildAggTester(ast.AggFuncCount, mysql.TypeFloat, 5, 0, 5), @@ -58,8 +64,10 @@ func (s *testSuite) TestCount(c *C) { buildAggTester(ast.AggFuncCount, mysql.TypeDuration, 5, 0, 5), buildAggTester(ast.AggFuncCount, mysql.TypeJSON, 5, 0, 5), } - for _, test := range tests { - s.testAggFunc(c, test) + for i, test := range tests { + t.Run(fmt.Sprintf("%s_%d", test.funcName, i), func(t *testing.T) { + testAggFunc(t, test) + }) } tests2 := []multiArgsAggTest{ buildMultiArgsAggTester(ast.AggFuncCount, []byte{mysql.TypeLonglong, mysql.TypeLonglong}, mysql.TypeLonglong, 5, 0, 5), @@ -71,8 +79,10 @@ func (s *testSuite) TestCount(c *C) { buildMultiArgsAggTester(ast.AggFuncCount, []byte{mysql.TypeDuration, mysql.TypeDuration}, mysql.TypeLonglong, 5, 0, 5), buildMultiArgsAggTester(ast.AggFuncCount, []byte{mysql.TypeJSON, mysql.TypeJSON}, mysql.TypeLonglong, 5, 0, 5), } - for _, test := range tests2 { - s.testMultiArgsAggFunc(c, test) + for i, test := range tests2 { + t.Run(fmt.Sprintf("%s_%d", test.funcName, i), func(t *testing.T) { + testMultiArgsAggFunc(t, mock.NewContext(), test) + }) } tests3 := []aggTest{ @@ -86,7 +96,7 @@ func (s *testSuite) TestCount(c *C) { buildAggTester(ast.AggFuncCount, mysql.TypeJSON, 5, 0, 5), } for _, test := range tests3 { - s.testAggFunc(c, test) + testAggFunc(t, test) } tests4 := []multiArgsAggTest{ @@ -100,12 +110,16 @@ func (s *testSuite) TestCount(c *C) { buildMultiArgsAggTester(ast.AggFuncApproxCountDistinct, []byte{mysql.TypeJSON, mysql.TypeJSON}, mysql.TypeLonglong, 5, 0, 5), } - for _, test := range tests4 { - s.testMultiArgsAggFunc(c, test) + for i, test := range tests4 { + t.Run(fmt.Sprintf("%s_%d", test.funcName, i), func(t *testing.T) { + testMultiArgsAggFunc(t, mock.NewContext(), test) + }) } } -func (s *testSuite) TestMemCount(c *C) { +func TestMemCount(t *testing.T) { + t.Parallel() + tests := []aggMemTest{ buildAggMemTester(ast.AggFuncCount, mysql.TypeLonglong, 5, aggfuncs.DefPartialResult4CountSize, defaultUpdateMemDeltaGens, false), @@ -142,22 +156,26 @@ func (s *testSuite) TestMemCount(c *C) { buildAggMemTester(ast.AggFuncApproxCountDistinct, mysql.TypeString, 5, aggfuncs.DefPartialResult4ApproxCountDistinctSize, approxCountDistinctUpdateMemDeltaGens, true), } - for _, test := range tests { - s.testAggMemFunc(c, test) + for i, test := range tests { + t.Run(fmt.Sprintf("%s_%d", test.aggTest.funcName, i), func(t *testing.T) { + testAggMemFunc(t, test) + }) } } -func (s *testSuite) TestWriteTime(c *C) { - t, err := types.ParseDate(&(stmtctx.StatementContext{}), "2020-11-11") - c.Assert(err, IsNil) +func TestWriteTime(t *testing.T) { + t.Parallel() + + tt, err := types.ParseDate(&(stmtctx.StatementContext{}), "2020-11-11") + require.NoError(t, err) buf := make([]byte, 16) for i := range buf { buf[i] = uint8(255) } - aggfuncs.WriteTime(buf, t) + aggfuncs.WriteTime(buf, tt) for i := range buf { - c.Assert(buf[i] == uint8(255), IsFalse) + require.False(t, buf[i] == uint8(255)) } } diff --git a/executor/aggfuncs/func_group_concat.go b/executor/aggfuncs/func_group_concat.go index 1718b22682cfd..a7fb3b7359ca5 100644 --- a/executor/aggfuncs/func_group_concat.go +++ b/executor/aggfuncs/func_group_concat.go @@ -21,15 +21,14 @@ import ( "sync/atomic" "unsafe" - mysql "github.com/pingcap/tidb/errno" "github.com/pingcap/tidb/expression" + plannercore "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/planner/util" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/codec" "github.com/pingcap/tidb/util/collate" - "github.com/pingcap/tidb/util/dbterror" "github.com/pingcap/tidb/util/set" ) @@ -479,7 +478,7 @@ func (e *groupConcatOrder) UpdatePartialResult(sctx sessionctx.Context, rowsInGr func (e *groupConcatOrder) MergePartialResult(sctx sessionctx.Context, src, dst PartialResult) (memDelta int64, err error) { // If order by exists, the parallel hash aggregation is forbidden in executorBuilder.buildHashAgg. // So MergePartialResult will not be called. - return 0, dbterror.ClassOptimizer.NewStd(mysql.ErrInternal).GenWithStack("groupConcatOrder.MergePartialResult should not be called") + return 0, plannercore.ErrInternal.GenWithStack("groupConcatOrder.MergePartialResult should not be called") } // SetTruncated will be called in `executorBuilder#buildHashAgg` with duck-type. @@ -599,7 +598,7 @@ func (e *groupConcatDistinctOrder) UpdatePartialResult(sctx sessionctx.Context, func (e *groupConcatDistinctOrder) MergePartialResult(sctx sessionctx.Context, src, dst PartialResult) (memDelta int64, err error) { // If order by exists, the parallel hash aggregation is forbidden in executorBuilder.buildHashAgg. // So MergePartialResult will not be called. - return 0, dbterror.ClassOptimizer.NewStd(mysql.ErrInternal).GenWithStack("groupConcatDistinctOrder.MergePartialResult should not be called") + return 0, plannercore.ErrInternal.GenWithStack("groupConcatDistinctOrder.MergePartialResult should not be called") } // GetDatumMemSize calculates the memory size of each types.Datum in sortRow.byItems. diff --git a/executor/aggfuncs/func_group_concat_test.go b/executor/aggfuncs/func_group_concat_test.go index be51720e3db0d..f609de53f858e 100644 --- a/executor/aggfuncs/func_group_concat_test.go +++ b/executor/aggfuncs/func_group_concat_test.go @@ -17,8 +17,8 @@ package aggfuncs_test import ( "bytes" "fmt" + "testing" - . "github.com/pingcap/check" "github.com/pingcap/tidb/executor/aggfuncs" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/mysql" @@ -28,37 +28,47 @@ import ( "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/codec" "github.com/pingcap/tidb/util/hack" + "github.com/pingcap/tidb/util/mock" "github.com/pingcap/tidb/util/set" + "github.com/stretchr/testify/require" ) -func (s *testSuite) TestMergePartialResult4GroupConcat(c *C) { +func TestMergePartialResult4GroupConcat(t *testing.T) { + t.Parallel() + test := buildAggTester(ast.AggFuncGroupConcat, mysql.TypeString, 5, "0 1 2 3 4", "2 3 4", "0 1 2 3 4 2 3 4") - s.testMergePartialResult(c, test) + testMergePartialResult(t, test) } -func (s *testSuite) TestGroupConcat(c *C) { +func TestGroupConcat(t *testing.T) { + t.Parallel() + + ctx := mock.NewContext() + test := buildAggTester(ast.AggFuncGroupConcat, mysql.TypeString, 5, nil, "0 1 2 3 4") - s.testAggFunc(c, test) + testAggFunc(t, test) test2 := buildMultiArgsAggTester(ast.AggFuncGroupConcat, []byte{mysql.TypeString, mysql.TypeString}, mysql.TypeString, 5, nil, "44 33 22 11 00") test2.orderBy = true - s.testMultiArgsAggFunc(c, test2) + testMultiArgsAggFunc(t, ctx, test2) defer func() { - err := variable.SetSessionSystemVar(s.ctx.GetSessionVars(), variable.GroupConcatMaxLen, "1024") - c.Assert(err, IsNil) + err := variable.SetSessionSystemVar(ctx.GetSessionVars(), variable.GroupConcatMaxLen, "1024") + require.NoError(t, err) }() // minimum GroupConcatMaxLen is 4 for i := 4; i <= 7; i++ { - err := variable.SetSessionSystemVar(s.ctx.GetSessionVars(), variable.GroupConcatMaxLen, fmt.Sprint(i)) - c.Assert(err, IsNil) + err := variable.SetSessionSystemVar(ctx.GetSessionVars(), variable.GroupConcatMaxLen, fmt.Sprint(i)) + require.NoError(t, err) test2 = buildMultiArgsAggTester(ast.AggFuncGroupConcat, []byte{mysql.TypeString, mysql.TypeString}, mysql.TypeString, 5, nil, "44 33 22 11 00"[:i]) test2.orderBy = true - s.testMultiArgsAggFunc(c, test2) + testMultiArgsAggFunc(t, ctx, test2) } } -func (s *testSuite) TestMemGroupConcat(c *C) { +func TestMemGroupConcat(t *testing.T) { + t.Parallel() + multiArgsTest1 := buildMultiArgsAggMemTester(ast.AggFuncGroupConcat, []byte{mysql.TypeString, mysql.TypeString}, mysql.TypeString, 5, aggfuncs.DefPartialResult4GroupConcatSize+aggfuncs.DefBytesBufferSize, groupConcatMultiArgsUpdateMemDeltaGens, false) multiArgsTest2 := buildMultiArgsAggMemTester(ast.AggFuncGroupConcat, []byte{mysql.TypeString, mysql.TypeString}, mysql.TypeString, 5, @@ -72,8 +82,10 @@ func (s *testSuite) TestMemGroupConcat(c *C) { multiArgsTest4.multiArgsAggTest.orderBy = true multiArgsTests := []multiArgsAggMemTest{multiArgsTest1, multiArgsTest2, multiArgsTest3, multiArgsTest4} - for _, test := range multiArgsTests { - s.testMultiArgsAggMemFunc(c, test) + for i, test := range multiArgsTests { + t.Run(fmt.Sprintf("%s_%d", test.multiArgsAggTest.funcName, i), func(t *testing.T) { + testMultiArgsAggMemFunc(t, test) + }) } } diff --git a/executor/aggfuncs/func_stddevpop_test.go b/executor/aggfuncs/func_stddevpop_test.go index c3f2d688e9a0f..3dc95820351cd 100644 --- a/executor/aggfuncs/func_stddevpop_test.go +++ b/executor/aggfuncs/func_stddevpop_test.go @@ -15,25 +15,30 @@ package aggfuncs_test import ( - . "github.com/pingcap/check" + "testing" + "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/mysql" ) -func (s *testSuite) TestMergePartialResult4Stddevpop(c *C) { +func TestMergePartialResult4Stddevpop(t *testing.T) { + t.Parallel() + tests := []aggTest{ buildAggTester(ast.AggFuncStddevPop, mysql.TypeDouble, 5, 1.4142135623730951, 0.816496580927726, 1.3169567191065923), } for _, test := range tests { - s.testMergePartialResult(c, test) + testMergePartialResult(t, test) } } -func (s *testSuite) TestStddevpop(c *C) { +func TestStddevpop(t *testing.T) { + t.Parallel() + tests := []aggTest{ buildAggTester(ast.AggFuncStddevPop, mysql.TypeDouble, 5, nil, 1.4142135623730951), } for _, test := range tests { - s.testAggFunc(c, test) + testAggFunc(t, test) } } diff --git a/executor/aggfuncs/func_stddevsamp_test.go b/executor/aggfuncs/func_stddevsamp_test.go index e8a45f688ab64..dc0f972c30712 100644 --- a/executor/aggfuncs/func_stddevsamp_test.go +++ b/executor/aggfuncs/func_stddevsamp_test.go @@ -15,25 +15,30 @@ package aggfuncs_test import ( - . "github.com/pingcap/check" + "testing" + "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/mysql" ) -func (s *testSuite) TestMergePartialResult4Stddevsamp(c *C) { +func TestMergePartialResult4Stddevsamp(t *testing.T) { + t.Parallel() + tests := []aggTest{ buildAggTester(ast.AggFuncStddevSamp, mysql.TypeDouble, 5, 1.5811388300841898, 1, 1.407885953173359), } for _, test := range tests { - s.testMergePartialResult(c, test) + testMergePartialResult(t, test) } } -func (s *testSuite) TestStddevsamp(c *C) { +func TestStddevsamp(t *testing.T) { + t.Parallel() + tests := []aggTest{ buildAggTester(ast.AggFuncStddevSamp, mysql.TypeDouble, 5, nil, 1.5811388300841898), } for _, test := range tests { - s.testAggFunc(c, test) + testAggFunc(t, test) } } diff --git a/executor/aggfuncs/func_varsamp_test.go b/executor/aggfuncs/func_varsamp_test.go index c2cba624336e2..fceb8a2a1a559 100644 --- a/executor/aggfuncs/func_varsamp_test.go +++ b/executor/aggfuncs/func_varsamp_test.go @@ -15,25 +15,30 @@ package aggfuncs_test import ( - . "github.com/pingcap/check" + "testing" + "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/mysql" ) -func (s *testSuite) TestMergePartialResult4Varsamp(c *C) { +func TestMergePartialResult4Varsamp(t *testing.T) { + t.Parallel() + tests := []aggTest{ buildAggTester(ast.AggFuncVarSamp, mysql.TypeDouble, 5, 2.5, 1, 1.9821428571428572), } for _, test := range tests { - s.testMergePartialResult(c, test) + testMergePartialResult(t, test) } } -func (s *testSuite) TestVarsamp(c *C) { +func TestVarsamp(t *testing.T) { + t.Parallel() + tests := []aggTest{ buildAggTester(ast.AggFuncVarSamp, mysql.TypeDouble, 5, nil, 2.5), } for _, test := range tests { - s.testAggFunc(c, test) + testAggFunc(t, test) } } diff --git a/executor/builder.go b/executor/builder.go index 29c8d1d0e029c..0c5f11b96015b 100644 --- a/executor/builder.go +++ b/executor/builder.go @@ -54,7 +54,6 @@ import ( "github.com/pingcap/tidb/util/admin" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/cteutil" - "github.com/pingcap/tidb/util/dbterror" "github.com/pingcap/tidb/util/execdetails" "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/ranger" @@ -1265,7 +1264,7 @@ func (b *executorBuilder) buildHashJoin(v *plannercore.PhysicalHashJoin) Executo // consider collations for i := range v.EqualConditions { - chs, coll := v.EqualConditions[i].CharsetAndCollation(e.ctx) + chs, coll := v.EqualConditions[i].CharsetAndCollation() leftTypes[i].Charset, leftTypes[i].Collate = chs, coll rightTypes[i].Charset, rightTypes[i].Collate = chs, coll } @@ -3136,8 +3135,7 @@ func prunePartitionForInnerExecutor(ctx sessionctx.Context, tbl table.Table, sch return nil, false, nil, nil } if lookUpContent[0].keyColIDs == nil { - return nil, false, nil, - dbterror.ClassOptimizer.NewStd(mysql.ErrInternal).GenWithStack("cannot get column IDs when dynamic pruning") + return nil, false, nil, plannercore.ErrInternal.GenWithStack("cannot get column IDs when dynamic pruning") } keyColOffsets := make([]int, len(lookUpContent[0].keyColIDs)) for i, colID := range lookUpContent[0].keyColIDs { @@ -3149,8 +3147,7 @@ func prunePartitionForInnerExecutor(ctx sessionctx.Context, tbl table.Table, sch } } if offset == -1 { - return nil, false, nil, - dbterror.ClassOptimizer.NewStd(mysql.ErrInternal).GenWithStack("invalid column offset when dynamic pruning") + return nil, false, nil, plannercore.ErrInternal.GenWithStack("invalid column offset when dynamic pruning") } keyColOffsets[i] = offset } diff --git a/executor/executor.go b/executor/executor.go index 572f30649a6ad..7383941acd046 100644 --- a/executor/executor.go +++ b/executor/executor.go @@ -1682,6 +1682,8 @@ func ResetContextOfStmt(ctx sessionctx.Context, s ast.StmtNode) (err error) { sc.CTEStorageMap = map[int]*CTEStorages{} sc.IsStaleness = false sc.LockTableIDs = make(map[int64]struct{}) + sc.EnableOptimizeTrace = false + sc.LogicalOptimizeTrace = nil sc.InitMemTracker(memory.LabelForSQLText, vars.MemQuotaQuery) sc.InitDiskTracker(memory.LabelForSQLText, -1) diff --git a/executor/grant.go b/executor/grant.go index bf4582952f380..00cbee41123df 100644 --- a/executor/grant.go +++ b/executor/grant.go @@ -155,7 +155,7 @@ func (e *GrantExec) Next(ctx context.Context, req *chunk.Chunk) error { } _, err := internalSession.(sqlexec.SQLExecutor).ExecuteInternal(ctx, `INSERT INTO %n.%n (Host, User, authentication_string, plugin) VALUES (%?, %?, %?, %?);`, - mysql.SystemDB, mysql.UserTable, user.User.Hostname, user.User.Username, pwd, authPlugin) + mysql.SystemDB, mysql.UserTable, strings.ToLower(user.User.Hostname), user.User.Username, pwd, authPlugin) if err != nil { return err } @@ -476,7 +476,7 @@ func (e *GrantExec) grantGlobalLevel(priv *ast.PrivElem, user *ast.UserSpec, int if err != nil { return err } - sqlexec.MustFormatSQL(sql, ` WHERE User=%? AND Host=%?`, user.User.Username, user.User.Hostname) + sqlexec.MustFormatSQL(sql, ` WHERE User=%? AND Host=%?`, user.User.Username, strings.ToLower(user.User.Hostname)) _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), sql.String()) return err diff --git a/executor/revoke.go b/executor/revoke.go index ac279e65828b2..5a66cf3e405ee 100644 --- a/executor/revoke.go +++ b/executor/revoke.go @@ -216,7 +216,7 @@ func (e *RevokeExec) revokeGlobalPriv(internalSession sessionctx.Context, priv * if err != nil { return err } - sqlexec.MustFormatSQL(sql, " WHERE User=%? AND Host=%?", user, host) + sqlexec.MustFormatSQL(sql, " WHERE User=%? AND Host=%?", user, strings.ToLower(host)) _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(context.Background(), sql.String()) return err diff --git a/executor/set.go b/executor/set.go index 5ad8790a746f3..65e21d470cc78 100644 --- a/executor/set.go +++ b/executor/set.go @@ -124,8 +124,8 @@ func (e *SetExecutor) setSysVariable(ctx context.Context, name string, v *expres if err != nil { return err } - // Some PD client dynamic options need to be checked and set here. - err = e.checkPDClientDynamicOption(name, valStr, sessionVars) + // Some PD client dynamic options need to be checked first and set here. + err = e.checkPDClientDynamicOption(name, sessionVars) if err != nil { return err } @@ -204,16 +204,30 @@ func (e *SetExecutor) setSysVariable(ctx context.Context, name string, v *expres return nil } -func (e *SetExecutor) checkPDClientDynamicOption(name, valStr string, sessionVars *variable.SessionVars) error { - var err error +func (e *SetExecutor) checkPDClientDynamicOption(name string, sessionVars *variable.SessionVars) error { + if name != variable.TiDBTSOClientBatchMaxWaitTime && + name != variable.TiDBEnableTSOFollowerProxy { + return nil + } + var ( + err error + valStr string + ) + valStr, err = sessionVars.GlobalVarsAccessor.GetGlobalSysVar(name) + if err != nil { + return err + } switch name { case variable.TiDBTSOClientBatchMaxWaitTime: - var val int64 - val, err = strconv.ParseInt(valStr, 10, 64) + var val float64 + val, err = strconv.ParseFloat(valStr, 64) if err != nil { return err } - err = domain.GetDomain(e.ctx).SetPDClientDynamicOption(pd.MaxTSOBatchWaitInterval, time.Millisecond*time.Duration(val)) + err = domain.GetDomain(e.ctx).SetPDClientDynamicOption( + pd.MaxTSOBatchWaitInterval, + time.Duration(float64(time.Millisecond)*val), + ) if err != nil { return err } diff --git a/executor/set_test.go b/executor/set_test.go index d53cd64b20966..2595b4a3131f5 100644 --- a/executor/set_test.go +++ b/executor/set_test.go @@ -559,14 +559,26 @@ func (s *testSerialSuite1) TestSetVar(c *C) { tk.MustQuery("select @@session.tidb_opt_prefer_range_scan").Check(testkit.Rows("0")) tk.MustQuery("select @@tidb_tso_client_batch_max_wait_time").Check(testkit.Rows("0")) + tk.MustExec("set global tidb_tso_client_batch_max_wait_time = 0.5") + tk.MustQuery("select @@tidb_tso_client_batch_max_wait_time").Check(testkit.Rows("0.5")) tk.MustExec("set global tidb_tso_client_batch_max_wait_time = 1") tk.MustQuery("select @@tidb_tso_client_batch_max_wait_time").Check(testkit.Rows("1")) + tk.MustExec("set global tidb_tso_client_batch_max_wait_time = 1.5") + tk.MustQuery("select @@tidb_tso_client_batch_max_wait_time").Check(testkit.Rows("1.5")) tk.MustExec("set global tidb_tso_client_batch_max_wait_time = 10") tk.MustQuery("select @@tidb_tso_client_batch_max_wait_time").Check(testkit.Rows("10")) tk.MustExec("set global tidb_tso_client_batch_max_wait_time = -1") tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1292 Truncated incorrect tidb_tso_client_batch_max_wait_time value: '-1'")) - tk.MustExec("set global tidb_tso_client_batch_max_wait_time = 11") - tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1292 Truncated incorrect tidb_tso_client_batch_max_wait_time value: '11'")) + tk.MustQuery("select @@tidb_tso_client_batch_max_wait_time").Check(testkit.Rows("0")) + tk.MustExec("set global tidb_tso_client_batch_max_wait_time = -0.01") + tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1292 Truncated incorrect tidb_tso_client_batch_max_wait_time value: '-0.01'")) + tk.MustQuery("select @@tidb_tso_client_batch_max_wait_time").Check(testkit.Rows("0")) + tk.MustExec("set global tidb_tso_client_batch_max_wait_time = 10.01") + tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1292 Truncated incorrect tidb_tso_client_batch_max_wait_time value: '10.01'")) + tk.MustQuery("select @@tidb_tso_client_batch_max_wait_time").Check(testkit.Rows("10")) + tk.MustExec("set global tidb_tso_client_batch_max_wait_time = 10.1") + tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1292 Truncated incorrect tidb_tso_client_batch_max_wait_time value: '10.1'")) + tk.MustQuery("select @@tidb_tso_client_batch_max_wait_time").Check(testkit.Rows("10")) c.Assert(tk.ExecToErr("set tidb_tso_client_batch_max_wait_time = 1"), NotNil) tk.MustQuery("select @@tidb_enable_tso_follower_proxy").Check(testkit.Rows("0")) diff --git a/executor/show.go b/executor/show.go index 0dfbf98843f3a..9fcbb67e7cf66 100644 --- a/executor/show.go +++ b/executor/show.go @@ -1433,7 +1433,7 @@ func (e *ShowExec) fetchShowCreateUser(ctx context.Context) error { exec := e.ctx.(sqlexec.RestrictedSQLExecutor) - stmt, err := exec.ParseWithParams(ctx, `SELECT plugin FROM %n.%n WHERE User=%? AND Host=%?`, mysql.SystemDB, mysql.UserTable, userName, hostName) + stmt, err := exec.ParseWithParams(ctx, `SELECT plugin FROM %n.%n WHERE User=%? AND Host=%?`, mysql.SystemDB, mysql.UserTable, userName, strings.ToLower(hostName)) if err != nil { return errors.Trace(err) } diff --git a/executor/simple.go b/executor/simple.go index f42de4b927e96..519c02a37834b 100644 --- a/executor/simple.go +++ b/executor/simple.go @@ -944,7 +944,7 @@ func (e *SimpleExec) executeAlterUser(ctx context.Context, s *ast.AlterUserStmt) } stmt, err := exec.ParseWithParams(ctx, `UPDATE %n.%n SET authentication_string=%?, plugin=%? WHERE Host=%? and User=%?;`, - mysql.SystemDB, mysql.UserTable, pwd, spec.AuthOpt.AuthPlugin, spec.User.Hostname, spec.User.Username, + mysql.SystemDB, mysql.UserTable, pwd, spec.AuthOpt.AuthPlugin, strings.ToLower(spec.User.Hostname), spec.User.Username, ) if err != nil { return err @@ -1158,7 +1158,7 @@ func renameUserHostInSystemTable(sqlExecutor sqlexec.SQLExecutor, tableName, use sqlexec.MustFormatSQL(sql, `UPDATE %n.%n SET %n = %?, %n = %? WHERE %n = %? and %n = %?;`, mysql.SystemDB, tableName, usernameColumn, users.NewUser.Username, hostColumn, strings.ToLower(users.NewUser.Hostname), - usernameColumn, users.OldUser.Username, hostColumn, users.OldUser.Hostname) + usernameColumn, users.OldUser.Username, hostColumn, strings.ToLower(users.OldUser.Hostname)) _, err := sqlExecutor.ExecuteInternal(context.TODO(), sql.String()) return err } @@ -1225,7 +1225,7 @@ func (e *SimpleExec) executeDropUser(ctx context.Context, s *ast.DropUserStmt) e // begin a transaction to delete a user. sql.Reset() - sqlexec.MustFormatSQL(sql, `DELETE FROM %n.%n WHERE Host = %? and User = %?;`, mysql.SystemDB, mysql.UserTable, user.Hostname, user.Username) + sqlexec.MustFormatSQL(sql, `DELETE FROM %n.%n WHERE Host = %? and User = %?;`, mysql.SystemDB, mysql.UserTable, strings.ToLower(user.Hostname), user.Username) if _, err = sqlExecutor.ExecuteInternal(context.TODO(), sql.String()); err != nil { failedUsers = append(failedUsers, user.String()) break @@ -1417,7 +1417,7 @@ func (e *SimpleExec) executeSetPwd(ctx context.Context, s *ast.SetPwdStmt) error // update mysql.user exec := e.ctx.(sqlexec.RestrictedSQLExecutor) - stmt, err := exec.ParseWithParams(ctx, `UPDATE %n.%n SET authentication_string=%? WHERE User=%? AND Host=%?;`, mysql.SystemDB, mysql.UserTable, pwd, u, h) + stmt, err := exec.ParseWithParams(ctx, `UPDATE %n.%n SET authentication_string=%? WHERE User=%? AND Host=%?;`, mysql.SystemDB, mysql.UserTable, pwd, u, strings.ToLower(h)) if err != nil { return err } diff --git a/executor/simple_test.go b/executor/simple_test.go index 47359392d7a1c..271b51d591008 100644 --- a/executor/simple_test.go +++ b/executor/simple_test.go @@ -523,6 +523,12 @@ func (s *testSuite7) TestUser(c *C) { Check(testkit.Rows("engineering india")) tk.MustQuery("select user,host from mysql.user where user='engineering' and host = 'us'"). Check(testkit.Rows("engineering us")) + + tk.MustExec("drop role engineering@INDIA;") + tk.MustExec("drop role engineering@US;") + + tk.MustQuery("select user from mysql.user where user='engineering' and host = 'india'").Check(testkit.Rows()) + tk.MustQuery("select user from mysql.user where user='engineering' and host = 'us'").Check(testkit.Rows()) } func (s *testSuite3) TestSetPwd(c *C) { diff --git a/expression/builtin_cast.go b/expression/builtin_cast.go index b155370d64462..407982af11e31 100644 --- a/expression/builtin_cast.go +++ b/expression/builtin_cast.go @@ -1931,7 +1931,7 @@ func WrapWithCastAsString(ctx sessionctx.Context, expr Expression) Expression { } tp := types.NewFieldType(mysql.TypeVarString) if expr.Coercibility() == CoercibilityExplicit { - tp.Charset, tp.Collate = expr.CharsetAndCollation(ctx) + tp.Charset, tp.Collate = expr.CharsetAndCollation() } else { tp.Charset, tp.Collate = ctx.GetSessionVars().GetCharsetInfo() } diff --git a/expression/builtin_encryption.go b/expression/builtin_encryption.go index f6badf0fcc72c..621c9d5e68195 100644 --- a/expression/builtin_encryption.go +++ b/expression/builtin_encryption.go @@ -856,6 +856,11 @@ func (b *builtinCompressSig) evalString(row chunk.Row) (string, bool, error) { if isNull || err != nil { return "", true, err } + strTp := b.args[0].GetType() + str, err = charset.NewEncoding(strTp.Charset).EncodeString(str) + if err != nil { + return "", false, err + } // According to doc: Empty strings are stored as empty strings. if len(str) == 0 { diff --git a/expression/builtin_encryption_test.go b/expression/builtin_encryption_test.go index 40e1d19f95d07..54eccf42a93d9 100644 --- a/expression/builtin_encryption_test.go +++ b/expression/builtin_encryption_test.go @@ -462,17 +462,24 @@ func decodeHex(str string) []byte { func TestCompress(t *testing.T) { t.Parallel() ctx := createContext(t) + fc := funcs[ast.Compress] + gbkStr, _ := charset.NewEncoding("gbk").EncodeString("你好") tests := []struct { + chs string in interface{} expect interface{} }{ - {"hello world", string(decodeHex("0B000000789CCA48CDC9C95728CF2FCA4901040000FFFF1A0B045D"))}, - {"", ""}, - {nil, nil}, + {"", "hello world", string(decodeHex("0B000000789CCA48CDC9C95728CF2FCA4901040000FFFF1A0B045D"))}, + {"", "", ""}, + {"", nil, nil}, + {"utf8mb4", "hello world", string(decodeHex("0B000000789CCA48CDC9C95728CF2FCA4901040000FFFF1A0B045D"))}, + {"gbk", "hello world", string(decodeHex("0B000000789CCA48CDC9C95728CF2FCA4901040000FFFF1A0B045D"))}, + {"utf8mb4", "你好", string(decodeHex("06000000789C7AB277C1D3A57B01010000FFFF10450489"))}, + {"gbk", gbkStr, string(decodeHex("04000000789C3AF278D76140000000FFFF07F40325"))}, } - - fc := funcs[ast.Compress] for _, test := range tests { + err := ctx.GetSessionVars().SetSystemVar(variable.CharacterSetConnection, test.chs) + require.NoErrorf(t, err, "%v", test) arg := types.NewDatum(test.in) f, err := fc.getFunction(ctx, datumsToConstants([]types.Datum{arg})) require.NoErrorf(t, err, "%v", test) diff --git a/expression/builtin_encryption_vec.go b/expression/builtin_encryption_vec.go index 3b6c0b5cffd4d..2048614d7b518 100644 --- a/expression/builtin_encryption_vec.go +++ b/expression/builtin_encryption_vec.go @@ -34,7 +34,6 @@ import ( "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/encrypt" - "github.com/pingcap/tidb/util/hack" ) func (b *builtinAesDecryptSig) vectorized() bool { @@ -584,6 +583,9 @@ func (b *builtinCompressSig) vecEvalString(input *chunk.Chunk, result *chunk.Col if err := b.args[0].VecEvalString(b.ctx, input, buf); err != nil { return err } + bufTp := b.args[0].GetType() + bufEnc := charset.NewEncoding(bufTp.Charset) + var encodedBuf []byte result.ReserveString(n) for i := 0; i < n; i++ { @@ -592,14 +594,19 @@ func (b *builtinCompressSig) vecEvalString(input *chunk.Chunk, result *chunk.Col continue } - str := buf.GetString(i) + str := buf.GetBytes(i) // According to doc: Empty strings are stored as empty strings. if len(str) == 0 { result.AppendString("") } - compressed, err := deflate(hack.Slice(str)) + strBuf, err := bufEnc.Encode(encodedBuf, str) + if err != nil { + return err + } + + compressed, err := deflate(strBuf) if err != nil { result.AppendNull() continue @@ -617,7 +624,7 @@ func (b *builtinCompressSig) vecEvalString(input *chunk.Chunk, result *chunk.Col defer deallocateByteSlice(buffer) buffer = buffer[:resultLength] - binary.LittleEndian.PutUint32(buffer, uint32(len(str))) + binary.LittleEndian.PutUint32(buffer, uint32(len(strBuf))) copy(buffer[4:], compressed) if shouldAppendSuffix { diff --git a/expression/builtin_string.go b/expression/builtin_string.go index b5b495321e16c..13733abd4cc10 100644 --- a/expression/builtin_string.go +++ b/expression/builtin_string.go @@ -2446,8 +2446,14 @@ func (b *builtinCharSig) evalString(row chunk.Row) (string, bool, error) { } bigints = append(bigints, val) } - result := string(b.convertToBytes(bigints)) - return result, false, nil + + dBytes := b.convertToBytes(bigints) + resultBytes, err := charset.NewEncoding(b.tp.Charset).Decode(nil, dBytes) + if err != nil { + b.ctx.GetSessionVars().StmtCtx.AppendWarning(err) + return "", true, nil + } + return string(resultBytes), false, nil } type charLengthFunctionClass struct { @@ -3629,10 +3635,9 @@ func (b *builtinToBase64Sig) evalString(row chunk.Row) (d string, isNull bool, e return "", isNull, err } argTp := b.args[0].GetType() - if !types.IsBinaryStr(argTp) { - if encodedStr, err := charset.NewEncoding(argTp.Charset).EncodeString(str); err == nil { - str = encodedStr - } + str, err = charset.NewEncoding(argTp.Charset).EncodeString(str) + if err != nil { + return "", false, err } needEncodeLen := base64NeededEncodedLength(len(str)) if needEncodeLen == -1 { diff --git a/expression/builtin_string_test.go b/expression/builtin_string_test.go index 6a98d367107e6..28d98d0215091 100644 --- a/expression/builtin_string_test.go +++ b/expression/builtin_string_test.go @@ -1430,34 +1430,37 @@ func TestChar(t *testing.T) { }() tbl := []struct { - str string - iNum int64 - fNum float64 - result string + str string + iNum int64 + fNum float64 + charset interface{} + result interface{} + warnings int }{ - {"65", 66, 67.5, "ABD"}, // float - {"65", 16740, 67.5, "AAdD"}, // large num - {"65", -1, 67.5, "A\xff\xff\xff\xffD"}, // nagtive int - {"a", -1, 67.5, "\x00\xff\xff\xff\xffD"}, // invalid 'a' + {"65", 66, 67.5, "utf8", "ABD", 0}, // float + {"65", 16740, 67.5, "utf8", "AAdD", 0}, // large num + {"65", -1, 67.5, nil, "A\xff\xff\xff\xffD", 0}, // nagtive int + {"a", -1, 67.5, nil, "\x00\xff\xff\xff\xffD", 0}, // invalid 'a' + // TODO: Uncomment it when issue #29685 be closed + // {"65", -1, 67.5, "utf8", nil, 1}, // with utf8, return nil + // {"a", -1, 67.5, "utf8", nil, 2}, // with utf8, return nil + // TODO: Uncomment it when gbk be added into charsetInfos + // {"1234567", 1234567, 1234567, "gbk", "謬謬謬", 0}, // test char for gbk + // {"123456789", 123456789, 123456789, "gbk", nil, 3}, // invalid 123456789 in gbk } for _, v := range tbl { - for _, char := range []interface{}{"utf8", nil} { - fc := funcs[ast.CharFunc] - f, err := fc.getFunction(ctx, datumsToConstants(types.MakeDatums(v.str, v.iNum, v.fNum, char))) - require.NoError(t, err) - require.NotNil(t, f) - r, err := evalBuiltinFunc(f, chunk.Row{}) - require.NoError(t, err) - trequire.DatumEqual(t, types.NewDatum(v.result), r) + fc := funcs[ast.CharFunc] + f, err := fc.getFunction(ctx, datumsToConstants(types.MakeDatums(v.str, v.iNum, v.fNum, v.charset))) + require.NoError(t, err) + require.NotNil(t, f) + r, err := evalBuiltinFunc(f, chunk.Row{}) + require.NoError(t, err) + trequire.DatumEqual(t, types.NewDatum(v.result), r) + if v.warnings != 0 { + warnings := ctx.GetSessionVars().StmtCtx.GetWarnings() + require.Equal(t, v.warnings, len(warnings)) } } - - fc := funcs[ast.CharFunc] - f, err := fc.getFunction(ctx, datumsToConstants(types.MakeDatums("65", 66, nil))) - require.NoError(t, err) - r, err := evalBuiltinFunc(f, chunk.Row{}) - require.NoError(t, err) - trequire.DatumEqual(t, types.NewDatum("AB"), r) } func TestCharLength(t *testing.T) { diff --git a/expression/builtin_string_vec.go b/expression/builtin_string_vec.go index 531d2379e1135..ca21e724f0dfa 100644 --- a/expression/builtin_string_vec.go +++ b/expression/builtin_string_vec.go @@ -2301,16 +2301,26 @@ func (b *builtinCharSig) vecEvalString(input *chunk.Chunk, result *chunk.Column) for i := 0; i < l-1; i++ { bufint[i] = buf[i].Int64s() } + var resultBytes []byte + enc := charset.NewEncoding(b.tp.Charset) for i := 0; i < n; i++ { bigints = bigints[0:0] for j := 0; j < l-1; j++ { if buf[j].IsNull(i) { + result.AppendNull() continue } bigints = append(bigints, bufint[j][i]) } - tempString := string(b.convertToBytes(bigints)) - result.AppendString(tempString) + dBytes := b.convertToBytes(bigints) + + resultBytes, err := enc.Decode(resultBytes, dBytes) + if err != nil { + b.ctx.GetSessionVars().StmtCtx.AppendWarning(err) + result.AppendNull() + continue + } + result.AppendString(string(resultBytes)) } return nil } @@ -2463,7 +2473,6 @@ func (b *builtinToBase64Sig) vecEvalString(input *chunk.Chunk, result *chunk.Col argTp := b.args[0].GetType() enc := charset.NewEncoding(argTp.Charset) - isBinaryStr := types.IsBinaryStr(argTp) result.ReserveString(n) for i := 0; i < n; i++ { @@ -2471,12 +2480,9 @@ func (b *builtinToBase64Sig) vecEvalString(input *chunk.Chunk, result *chunk.Col result.AppendNull() continue } - str := buf.GetString(i) - if !isBinaryStr { - str, err = enc.EncodeString(str) - if err != nil { - return err - } + str, err := enc.EncodeString(buf.GetString(i)) + if err != nil { + return err } needEncodeLen := base64NeededEncodedLength(len(str)) if needEncodeLen == -1 { diff --git a/expression/collation.go b/expression/collation.go index ed7d673c56d28..7db5645941601 100644 --- a/expression/collation.go +++ b/expression/collation.go @@ -68,17 +68,7 @@ func (c *collationInfo) SetCharsetAndCollation(chs, coll string) { c.charset, c.collation = chs, coll } -func (c *collationInfo) CharsetAndCollation(ctx sessionctx.Context) (string, string) { - if c.charset != "" || c.collation != "" { - return c.charset, c.collation - } - - if ctx != nil && ctx.GetSessionVars() != nil { - c.charset, c.collation = ctx.GetSessionVars().GetCharsetInfo() - } - if c.charset == "" || c.collation == "" { - c.charset, c.collation = charset.GetDefaultCharsetAndCollate() - } +func (c *collationInfo) CharsetAndCollation() (string, string) { return c.charset, c.collation } @@ -99,10 +89,10 @@ type CollationInfo interface { // SetRepertoire sets a specified repertoire for this expression. SetRepertoire(r Repertoire) - // CharsetAndCollation ... - CharsetAndCollation(ctx sessionctx.Context) (string, string) + // CharsetAndCollation gets charset and collation. + CharsetAndCollation() (string, string) - // SetCharsetAndCollation ... + // SetCharsetAndCollation sets charset and collation. SetCharsetAndCollation(chs, coll string) } diff --git a/expression/constant_propagation.go b/expression/constant_propagation.go index b007d4b79a2eb..70fea317fe57b 100644 --- a/expression/constant_propagation.go +++ b/expression/constant_propagation.go @@ -152,7 +152,7 @@ func tryToReplaceCond(ctx sessionctx.Context, src *Column, tgt *Column, cond Exp } for idx, expr := range sf.GetArgs() { if src.Equal(nil, expr) { - _, coll := cond.CharsetAndCollation(ctx) + _, coll := cond.CharsetAndCollation() if tgt.GetType().Collate != coll { continue } diff --git a/expression/distsql_builtin.go b/expression/distsql_builtin.go index 99693677091bc..d4f0d5eb84fd9 100644 --- a/expression/distsql_builtin.go +++ b/expression/distsql_builtin.go @@ -967,7 +967,7 @@ func getSignatureByPB(ctx sessionctx.Context, sigCode tipb.ScalarFuncSig, tp *ti case tipb.ScalarFuncSig_HexStrArg: chs, args := "utf-8", base.getArgs() if len(args) == 1 { - chs, _ = args[0].CharsetAndCollation(ctx) + chs, _ = args[0].CharsetAndCollation() } f = &builtinHexStrArgSig{base, charset.NewEncoding(chs)} case tipb.ScalarFuncSig_InsertUTF8: diff --git a/expression/errors.go b/expression/errors.go index 0d2714ad683ad..cfadcc6811e02 100644 --- a/expression/errors.go +++ b/expression/errors.go @@ -34,6 +34,8 @@ var ( ErrInvalidArgumentForLogarithm = dbterror.ClassExpression.NewStd(mysql.ErrInvalidArgumentForLogarithm) ErrIncorrectType = dbterror.ClassExpression.NewStd(mysql.ErrIncorrectType) ErrInvalidTableSample = dbterror.ClassExpression.NewStd(mysql.ErrInvalidTableSample) + ErrInternal = dbterror.ClassOptimizer.NewStd(mysql.ErrInternal) + ErrNoDB = dbterror.ClassOptimizer.NewStd(mysql.ErrNoDB) // All the un-exported errors are defined here: errFunctionNotExists = dbterror.ClassExpression.NewStd(mysql.ErrSpDoesNotExist) diff --git a/expression/expr_to_pb.go b/expression/expr_to_pb.go index 569b8450397d3..5cb01638eae0b 100644 --- a/expression/expr_to_pb.go +++ b/expression/expr_to_pb.go @@ -26,7 +26,6 @@ import ( "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/codec" "github.com/pingcap/tidb/util/collate" - "github.com/pingcap/tidb/util/dbterror" "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tipb/go-tipb" "go.uber.org/zap" @@ -38,8 +37,7 @@ func ExpressionsToPBList(sc *stmtctx.StatementContext, exprs []Expression, clien for _, expr := range exprs { v := pc.ExprToPB(expr) if v == nil { - return nil, dbterror.ClassOptimizer.NewStd(mysql.ErrInternal). - GenWithStack("expression %v cannot be pushed down", expr) + return nil, ErrInternal.GenWithStack("expression %v cannot be pushed down", expr) } pbExpr = append(pbExpr, v) } @@ -274,7 +272,7 @@ func (pc PbConverter) scalarFuncToPBExpr(expr *ScalarFunction) *tipb.Expr { // put collation information into the RetType enforcedly and push it down to TiKV/MockTiKV tp := *expr.RetType if collate.NewCollationEnabled() { - _, tp.Collate = expr.CharsetAndCollation(expr.GetCtx()) + _, tp.Collate = expr.CharsetAndCollation() } // Construct expression ProtoBuf. diff --git a/expression/expr_to_pb_serial_test.go b/expression/expr_to_pb_serial_test.go index d4e21165f688d..624e5dcb90a23 100644 --- a/expression/expr_to_pb_serial_test.go +++ b/expression/expr_to_pb_serial_test.go @@ -50,7 +50,7 @@ func TestPushCollationDown(t *testing.T) { require.NoError(t, err) expr, err := PBToExpr(pbExpr[0], tps, sc) require.NoError(t, err) - _, eColl := expr.CharsetAndCollation(nil) + _, eColl := expr.CharsetAndCollation() require.Equal(t, coll, eColl) } } diff --git a/expression/scalar_function.go b/expression/scalar_function.go index dd7805a6c282f..220eeb090a5b1 100644 --- a/expression/scalar_function.go +++ b/expression/scalar_function.go @@ -30,15 +30,9 @@ import ( "github.com/pingcap/tidb/types/json" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/codec" - "github.com/pingcap/tidb/util/dbterror" "github.com/pingcap/tidb/util/hack" ) -// error definitions. -var ( - ErrNoDB = dbterror.ClassOptimizer.NewStd(mysql.ErrNoDB) -) - // ScalarFunction is the function that returns a value. type ScalarFunction struct { FuncName model.CIStr @@ -286,7 +280,7 @@ func (sf *ScalarFunction) Clone() Expression { Function: sf.Function.Clone(), hashcode: sf.hashcode, } - c.SetCharsetAndCollation(sf.CharsetAndCollation(sf.GetCtx())) + c.SetCharsetAndCollation(sf.CharsetAndCollation()) c.SetCoercibility(sf.Coercibility()) return c } @@ -558,12 +552,12 @@ func (sf *ScalarFunction) SetCoercibility(val Coercibility) { sf.Function.SetCoercibility(val) } -// CharsetAndCollation ... -func (sf *ScalarFunction) CharsetAndCollation(ctx sessionctx.Context) (string, string) { - return sf.Function.CharsetAndCollation(ctx) +// CharsetAndCollation gets charset and collation. +func (sf *ScalarFunction) CharsetAndCollation() (string, string) { + return sf.Function.CharsetAndCollation() } -// SetCharsetAndCollation ... +// SetCharsetAndCollation sets charset and collation. func (sf *ScalarFunction) SetCharsetAndCollation(chs, coll string) { sf.Function.SetCharsetAndCollation(chs, coll) } diff --git a/expression/util_test.go b/expression/util_test.go index 5d44c8137eee9..75e6b9c67113f 100644 --- a/expression/util_test.go +++ b/expression/util_test.go @@ -608,7 +608,7 @@ func (m *MockExpr) SetCoercibility(Coercibility) func (m *MockExpr) Repertoire() Repertoire { return UNICODE } func (m *MockExpr) SetRepertoire(Repertoire) {} -func (m *MockExpr) CharsetAndCollation(ctx sessionctx.Context) (string, string) { +func (m *MockExpr) CharsetAndCollation() (string, string) { return "", "" } func (m *MockExpr) SetCharsetAndCollation(chs, coll string) {} diff --git a/go.mod b/go.mod index d6f04fbd750ad..d1a7543ff5720 100644 --- a/go.mod +++ b/go.mod @@ -48,7 +48,7 @@ require ( github.com/pingcap/errors v0.11.5-0.20211009033009-93128226aaa3 github.com/pingcap/failpoint v0.0.0-20210316064728-7acb0f0a3dfd github.com/pingcap/fn v0.0.0-20200306044125-d5540d389059 - github.com/pingcap/kvproto v0.0.0-20211011060348-d957056f1551 + github.com/pingcap/kvproto v0.0.0-20211029081837-3c7bd947cf9b github.com/pingcap/log v0.0.0-20210906054005-afc726e70354 github.com/pingcap/sysutil v0.0.0-20210730114356-fcd8a63f68c5 github.com/pingcap/tidb-tools v5.2.2-0.20211019062242-37a8bef2fa17+incompatible @@ -66,7 +66,7 @@ require ( github.com/stretchr/testify v1.7.0 github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2 github.com/tikv/client-go/v2 v2.0.0-alpha.0.20211103022933-5ae005dac331 - github.com/tikv/pd v1.1.0-beta.0.20211029083450-e65f0c55b6ae + github.com/tikv/pd v1.1.0-beta.0.20211104095303-69c86d05d379 github.com/twmb/murmur3 v1.1.3 github.com/uber/jaeger-client-go v2.22.1+incompatible github.com/uber/jaeger-lib v2.4.1+incompatible // indirect diff --git a/go.sum b/go.sum index bdad95501e5f7..053fb24f063b3 100644 --- a/go.sum +++ b/go.sum @@ -585,8 +585,8 @@ github.com/pingcap/kvproto v0.0.0-20191211054548-3c6b38ea5107/go.mod h1:WWLmULLO github.com/pingcap/kvproto v0.0.0-20200411081810-b85805c9476c/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= github.com/pingcap/kvproto v0.0.0-20210819164333-bd5706b9d9f2/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= github.com/pingcap/kvproto v0.0.0-20210915062418-0f5764a128ad/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= -github.com/pingcap/kvproto v0.0.0-20211011060348-d957056f1551 h1:aRx2l2TAeYNPPUc+lk5dEFCXfUGxR/C2fbt/YA5nqiQ= -github.com/pingcap/kvproto v0.0.0-20211011060348-d957056f1551/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= +github.com/pingcap/kvproto v0.0.0-20211029081837-3c7bd947cf9b h1:/aj6ITlHSJZmsm4hIMOgJAAZti+Dmq11tCyKedA6Dcs= +github.com/pingcap/kvproto v0.0.0-20211029081837-3c7bd947cf9b/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= github.com/pingcap/log v0.0.0-20200511115504-543df19646ad/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= github.com/pingcap/log v0.0.0-20210317133921-96f4fcab92a4/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= @@ -597,6 +597,7 @@ github.com/pingcap/sysutil v0.0.0-20210315073920-cc0985d983a3/go.mod h1:tckvA041 github.com/pingcap/sysutil v0.0.0-20210730114356-fcd8a63f68c5 h1:7rvAtZe/ZUzOKzgriNPQoBNvleJXBk4z7L3Z47+tS98= github.com/pingcap/sysutil v0.0.0-20210730114356-fcd8a63f68c5/go.mod h1:XsOaV712rUk63aOEKYP9PhXTIE3FMNHmC2r1wX5wElY= github.com/pingcap/tidb-dashboard v0.0.0-20211008050453-a25c25809529/go.mod h1:OCXbZTBTIMRcIt0jFsuCakZP+goYRv6IjawKbwLS2TQ= +github.com/pingcap/tidb-dashboard v0.0.0-20211031170437-08e58c069a2a/go.mod h1:OCXbZTBTIMRcIt0jFsuCakZP+goYRv6IjawKbwLS2TQ= github.com/pingcap/tidb-tools v5.2.2-0.20211019062242-37a8bef2fa17+incompatible h1:c7+izmker91NkjkZ6FgTlmD4k1A5FLOAq+li6Ki2/GY= github.com/pingcap/tidb-tools v5.2.2-0.20211019062242-37a8bef2fa17+incompatible/go.mod h1:XGdcy9+yqlDSEMTpOXnwf3hiTeqrV6MN/u1se9N8yIM= github.com/pingcap/tipb v0.0.0-20211105090418-71142a4d40e3 h1:xnp/Qkk5gELlB8TaY6oro0JNXMBXTafNVxU/vbrNU8I= @@ -714,8 +715,9 @@ github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0 github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tikv/client-go/v2 v2.0.0-alpha.0.20211103022933-5ae005dac331 h1:bKvceeTA5ZwWMOGMbnNJXRCpf8QThxRgaUkFg5u9HBg= github.com/tikv/client-go/v2 v2.0.0-alpha.0.20211103022933-5ae005dac331/go.mod h1:iiwtsCxcbNLK5i9VRYGvdcihgHXTKy2ukWjoaJsrphg= -github.com/tikv/pd v1.1.0-beta.0.20211029083450-e65f0c55b6ae h1:PmnkhiOopgMZYDQ7Htj1kt/zwW4MEOUL+Dem6WLZISY= github.com/tikv/pd v1.1.0-beta.0.20211029083450-e65f0c55b6ae/go.mod h1:varH0IE0jJ9E9WN2Ei/N6pajMlPkcXdDEf7f5mmsUVQ= +github.com/tikv/pd v1.1.0-beta.0.20211104095303-69c86d05d379 h1:nFm1jQDz1iRktoyV2SyM5zVk6+PJHQNunJZ7ZJcqzAo= +github.com/tikv/pd v1.1.0-beta.0.20211104095303-69c86d05d379/go.mod h1:y+09hAUXJbrd4c0nktL74zXDDuD7atGtfOKxL90PCOE= github.com/tklauser/go-sysconf v0.3.4 h1:HT8SVixZd3IzLdfs/xlpq0jeSfTX57g1v6wB1EuzV7M= github.com/tklauser/go-sysconf v0.3.4/go.mod h1:Cl2c8ZRWfHD5IrfHo9VN+FX9kCFjIOyVklgXycLB6ek= github.com/tklauser/numcpus v0.2.1 h1:ct88eFm+Q7m2ZfXJdan1xYoXKlmwsfP+k88q05KvlZc= diff --git a/planner/core/logical_plan_test.go b/planner/core/logical_plan_test.go index a0bb8a8070726..c800f5c577a4f 100644 --- a/planner/core/logical_plan_test.go +++ b/planner/core/logical_plan_test.go @@ -2071,3 +2071,58 @@ func (s *testPlanSuite) TestWindowLogicalPlanAmbiguous(c *C) { } } } + +func (s *testPlanSuite) TestLogicalOptimizeWithTraceEnabled(c *C) { + sql := "select * from t where a in (1,2)" + defer testleak.AfterTest(c)() + tt := []struct { + flags []uint64 + steps int + }{ + { + flags: []uint64{ + flagEliminateAgg, + flagPushDownAgg}, + steps: 2, + }, + { + flags: []uint64{ + flagEliminateAgg, + flagPushDownAgg, + flagPrunColumns, + flagBuildKeyInfo, + }, + steps: 4, + }, + { + flags: []uint64{}, + steps: 0, + }, + } + + for i, tc := range tt { + comment := Commentf("case:%v sql:%s", i, sql) + stmt, err := s.ParseOneStmt(sql, "", "") + c.Assert(err, IsNil, comment) + err = Preprocess(s.ctx, stmt, WithPreprocessorReturn(&PreprocessorReturn{InfoSchema: s.is})) + c.Assert(err, IsNil, comment) + sctx := MockContext() + sctx.GetSessionVars().StmtCtx.EnableOptimizeTrace = true + builder, _ := NewPlanBuilder().Init(sctx, s.is, &hint.BlockHintProcessor{}) + domain.GetDomain(sctx).MockInfoCacheAndLoadInfoSchema(s.is) + ctx := context.TODO() + p, err := builder.Build(ctx, stmt) + c.Assert(err, IsNil) + flag := uint64(0) + for _, f := range tc.flags { + flag = flag | f + } + p, err = logicalOptimize(ctx, flag, p.(LogicalPlan)) + c.Assert(err, IsNil) + _, ok := p.(*LogicalProjection) + c.Assert(ok, IsTrue) + otrace := sctx.GetSessionVars().StmtCtx.LogicalOptimizeTrace + c.Assert(otrace, NotNil) + c.Assert(len(otrace.Steps), Equals, tc.steps) + } +} diff --git a/planner/core/logical_plans.go b/planner/core/logical_plans.go index 0401ae967283d..2258e2b4de31d 100644 --- a/planner/core/logical_plans.go +++ b/planner/core/logical_plans.go @@ -179,7 +179,7 @@ func (p *LogicalJoin) GetJoinKeys() (leftKeys, rightKeys []*expression.Column, i // the join keys of EqualConditions func (p *LogicalJoin) GetPotentialPartitionKeys() (leftKeys, rightKeys []*property.MPPPartitionColumn) { for _, expr := range p.EqualConditions { - _, coll := expr.CharsetAndCollation(p.ctx) + _, coll := expr.CharsetAndCollation() collateID := property.GetCollateIDByNameForPartition(coll) leftKeys = append(leftKeys, &property.MPPPartitionColumn{Col: expr.GetArgs()[0].(*expression.Column), CollateID: collateID}) rightKeys = append(rightKeys, &property.MPPPartitionColumn{Col: expr.GetArgs()[1].(*expression.Column), CollateID: collateID}) diff --git a/planner/core/optimizer.go b/planner/core/optimizer.go index 7cc352c9b8447..55127627b7477 100644 --- a/planner/core/optimizer.go +++ b/planner/core/optimizer.go @@ -35,6 +35,7 @@ import ( "github.com/pingcap/tidb/types" utilhint "github.com/pingcap/tidb/util/hint" "github.com/pingcap/tidb/util/set" + "github.com/pingcap/tidb/util/tracing" "go.uber.org/atomic" ) @@ -83,9 +84,44 @@ var optRuleList = []logicalOptRule{ &columnPruner{}, // column pruning again at last, note it will mess up the results of buildKeySolver } +type logicalOptimizeOp struct { + // tracer is goring to track optimize steps during rule optimizing + tracer *tracing.LogicalOptimizeTracer +} + +func defaultLogicalOptimizeOption() *logicalOptimizeOp { + return &logicalOptimizeOp{} +} + +func (op *logicalOptimizeOp) withEnableOptimizeTracer(tracer *tracing.LogicalOptimizeTracer) *logicalOptimizeOp { + op.tracer = tracer + return op +} + +func (op *logicalOptimizeOp) appendBeforeRuleOptimize(name string, before LogicalPlan) { + if op.tracer == nil { + return + } + op.tracer.AppendRuleTracerBeforeRuleOptimize(name, before.buildLogicalPlanTrace()) +} + +func (op *logicalOptimizeOp) appendStepToCurrent(id int, tp, reason, action string) { + if op.tracer == nil { + return + } + op.tracer.AppendRuleTracerStepToCurrent(id, tp, reason, action) +} + +func (op *logicalOptimizeOp) trackAfterRuleOptimize(after LogicalPlan) { + if op.tracer == nil { + return + } + op.tracer.TrackLogicalPlanAfterRuleOptimize(after.buildLogicalPlanTrace()) +} + // logicalOptRule means a logical optimizing rule, which contains decorrelate, ppd, column pruning, etc. type logicalOptRule interface { - optimize(context.Context, LogicalPlan) (LogicalPlan, error) + optimize(context.Context, LogicalPlan, *logicalOptimizeOp) (LogicalPlan, error) name() string } @@ -335,6 +371,15 @@ func enableParallelApply(sctx sessionctx.Context, plan PhysicalPlan) PhysicalPla } func logicalOptimize(ctx context.Context, flag uint64, logic LogicalPlan) (LogicalPlan, error) { + opt := defaultLogicalOptimizeOption() + stmtCtx := logic.SCtx().GetSessionVars().StmtCtx + if stmtCtx.EnableOptimizeTrace { + tracer := &tracing.LogicalOptimizeTracer{Steps: make([]*tracing.LogicalRuleOptimizeTracer, 0)} + opt = opt.withEnableOptimizeTracer(tracer) + defer func() { + stmtCtx.LogicalOptimizeTrace = tracer + }() + } var err error for i, rule := range optRuleList { // The order of flags is same as the order of optRule in the list. @@ -343,10 +388,12 @@ func logicalOptimize(ctx context.Context, flag uint64, logic LogicalPlan) (Logic if flag&(1< 0 diff --git a/planner/core/rule_predicate_push_down.go b/planner/core/rule_predicate_push_down.go index 3b7be4fbee6be..793dbc0d31cbb 100644 --- a/planner/core/rule_predicate_push_down.go +++ b/planner/core/rule_predicate_push_down.go @@ -27,7 +27,7 @@ import ( type ppdSolver struct{} -func (s *ppdSolver) optimize(ctx context.Context, lp LogicalPlan) (LogicalPlan, error) { +func (s *ppdSolver) optimize(ctx context.Context, lp LogicalPlan, opt *logicalOptimizeOp) (LogicalPlan, error) { _, p := lp.PredicatePushDown(nil) return p, nil } diff --git a/planner/core/rule_result_reorder.go b/planner/core/rule_result_reorder.go index 3a4069e9945fc..7ea7d73556b4d 100644 --- a/planner/core/rule_result_reorder.go +++ b/planner/core/rule_result_reorder.go @@ -37,7 +37,7 @@ import ( type resultReorder struct { } -func (rs *resultReorder) optimize(ctx context.Context, lp LogicalPlan) (LogicalPlan, error) { +func (rs *resultReorder) optimize(ctx context.Context, lp LogicalPlan, opt *logicalOptimizeOp) (LogicalPlan, error) { ordered := rs.completeSort(lp) if !ordered { lp = rs.injectSort(lp) diff --git a/planner/core/rule_topn_push_down.go b/planner/core/rule_topn_push_down.go index 60a8fb92b39af..e6234bbc3f3dc 100644 --- a/planner/core/rule_topn_push_down.go +++ b/planner/core/rule_topn_push_down.go @@ -26,7 +26,7 @@ import ( type pushDownTopNOptimizer struct { } -func (s *pushDownTopNOptimizer) optimize(ctx context.Context, p LogicalPlan) (LogicalPlan, error) { +func (s *pushDownTopNOptimizer) optimize(ctx context.Context, p LogicalPlan, opt *logicalOptimizeOp) (LogicalPlan, error) { return p.pushDownTopN(nil), nil } diff --git a/planner/util/path.go b/planner/util/path.go index 8c4abd6a94ab5..694ea781959aa 100644 --- a/planner/util/path.go +++ b/planner/util/path.go @@ -116,7 +116,7 @@ func isColEqCorColOrConstant(ctx sessionctx.Context, filter expression.Expressio if !ok || f.FuncName.L != ast.EQ { return false } - _, collation := f.CharsetAndCollation(ctx) + _, collation := f.CharsetAndCollation() if c, ok := f.GetArgs()[0].(*expression.Column); ok { if c.RetType.EvalType() == types.ETString && !collate.CompatibleCollate(collation, c.RetType.Collate) { return false diff --git a/server/server.go b/server/server.go index 36b564f44d410..e656569a14a77 100644 --- a/server/server.go +++ b/server/server.go @@ -618,6 +618,9 @@ func (s *Server) ShowProcessList() map[uint64]*util.ProcessInfo { defer s.rwlock.RUnlock() rs := make(map[uint64]*util.ProcessInfo, len(s.clients)) for _, client := range s.clients { + if atomic.LoadInt32(&client.status) == connStatusWaitShutdown { + continue + } if pi := client.ctx.ShowProcess(); pi != nil { rs[pi.ID] = pi } diff --git a/session/session_test.go b/session/session_test.go index 4811eaac34038..ceca24f509680 100644 --- a/session/session_test.go +++ b/session/session_test.go @@ -5718,26 +5718,28 @@ func (s *testSessionSuite) TestSetPDClientDynmaicOption(c *C) { var err error tk := testkit.NewTestKit(c, s.store) tk.MustQuery("select @@tidb_tso_client_batch_max_wait_time;").Check(testkit.Rows("0")) + tk.MustExec("set global tidb_tso_client_batch_max_wait_time = 0.5;") + tk.MustQuery("select @@tidb_tso_client_batch_max_wait_time;").Check(testkit.Rows("0.5")) tk.MustExec("set global tidb_tso_client_batch_max_wait_time = 1;") tk.MustQuery("select @@tidb_tso_client_batch_max_wait_time;").Check(testkit.Rows("1")) + tk.MustExec("set global tidb_tso_client_batch_max_wait_time = 1.5;") + tk.MustQuery("select @@tidb_tso_client_batch_max_wait_time;").Check(testkit.Rows("1.5")) tk.MustExec("set global tidb_tso_client_batch_max_wait_time = 10;") tk.MustQuery("select @@tidb_tso_client_batch_max_wait_time;").Check(testkit.Rows("10")) err = tk.ExecToErr("set tidb_tso_client_batch_max_wait_time = 0;") c.Assert(err, NotNil) - if *withTiKV { - err = tk.ExecToErr("set global tidb_tso_client_batch_max_wait_time = -1;") - c.Assert(err, NotNil) - c.Assert(err, ErrorMatches, ".*invalid max TSO batch wait interval.*") - err = tk.ExecToErr("set global tidb_tso_client_batch_max_wait_time = 11;") - c.Assert(err, NotNil) - c.Assert(err, ErrorMatches, ".*invalid max TSO batch wait interval.*") - } else { - // Because the PD client in the unit test may be nil, so we only check the warning here. - tk.MustExec("set global tidb_tso_client_batch_max_wait_time = -1;") - tk.MustQuery("show warnings").Check(testutil.RowsWithSep("|", "Warning|1292|Truncated incorrect tidb_tso_client_batch_max_wait_time value: '-1'")) - tk.MustExec("set global tidb_tso_client_batch_max_wait_time = 11;") - tk.MustQuery("show warnings").Check(testutil.RowsWithSep("|", "Warning|1292|Truncated incorrect tidb_tso_client_batch_max_wait_time value: '11'")) - } + tk.MustExec("set global tidb_tso_client_batch_max_wait_time = -1;") + tk.MustQuery("show warnings").Check(testutil.RowsWithSep("|", "Warning|1292|Truncated incorrect tidb_tso_client_batch_max_wait_time value: '-1'")) + tk.MustQuery("select @@tidb_tso_client_batch_max_wait_time;").Check(testkit.Rows("0")) + tk.MustExec("set global tidb_tso_client_batch_max_wait_time = -0.1;") + tk.MustQuery("show warnings").Check(testutil.RowsWithSep("|", "Warning|1292|Truncated incorrect tidb_tso_client_batch_max_wait_time value: '-0.1'")) + tk.MustQuery("select @@tidb_tso_client_batch_max_wait_time;").Check(testkit.Rows("0")) + tk.MustExec("set global tidb_tso_client_batch_max_wait_time = 10.1;") + tk.MustQuery("show warnings").Check(testutil.RowsWithSep("|", "Warning|1292|Truncated incorrect tidb_tso_client_batch_max_wait_time value: '10.1'")) + tk.MustQuery("select @@tidb_tso_client_batch_max_wait_time;").Check(testkit.Rows("10")) + tk.MustExec("set global tidb_tso_client_batch_max_wait_time = 11;") + tk.MustQuery("show warnings").Check(testutil.RowsWithSep("|", "Warning|1292|Truncated incorrect tidb_tso_client_batch_max_wait_time value: '11'")) + tk.MustQuery("select @@tidb_tso_client_batch_max_wait_time;").Check(testkit.Rows("10")) tk.MustQuery("select @@tidb_enable_tso_follower_proxy;").Check(testkit.Rows("0")) tk.MustExec("set global tidb_enable_tso_follower_proxy = on;") diff --git a/sessionctx/stmtctx/stmtctx.go b/sessionctx/stmtctx/stmtctx.go index df91df21ff3bb..03c488c883a0c 100644 --- a/sessionctx/stmtctx/stmtctx.go +++ b/sessionctx/stmtctx/stmtctx.go @@ -29,6 +29,7 @@ import ( "github.com/pingcap/tidb/util/execdetails" "github.com/pingcap/tidb/util/memory" "github.com/pingcap/tidb/util/resourcegrouptag" + "github.com/pingcap/tidb/util/tracing" "github.com/tikv/client-go/v2/util" atomic2 "go.uber.org/atomic" "go.uber.org/zap" @@ -191,6 +192,11 @@ type StatementContext struct { OptimInfo map[int]string // InVerboseExplain indicates the statement is "explain format='verbose' ...". InVerboseExplain bool + + // EnableOptimizeTrace indicates whether the statement is enable optimize trace + EnableOptimizeTrace bool + // LogicalOptimizeTrace indicates the trace for optimize + LogicalOptimizeTrace *tracing.LogicalOptimizeTracer } // StmtHints are SessionVars related sql hints. diff --git a/sessionctx/variable/session.go b/sessionctx/variable/session.go index 913f09b93e486..c395d98c85356 100644 --- a/sessionctx/variable/session.go +++ b/sessionctx/variable/session.go @@ -38,6 +38,7 @@ import ( "github.com/pingcap/tidb/parser" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/auth" + "github.com/pingcap/tidb/parser/charset" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/parser/terror" @@ -1258,6 +1259,7 @@ func NewSessionVars() *SessionVars { if !EnableLocalTxn.Load() { vars.TxnScope = kv.NewGlobalTxnScopeVar() } + vars.systems[CharacterSetConnection], vars.systems[CollationConnection] = charset.GetDefaultCharsetAndCollate() return vars } diff --git a/sessionctx/variable/sysvar.go b/sessionctx/variable/sysvar.go index b320e3a60fd66..4d0a77a5cb511 100644 --- a/sessionctx/variable/sysvar.go +++ b/sessionctx/variable/sysvar.go @@ -807,12 +807,14 @@ var defaultSysVars = []*SysVar{ {Scope: ScopeGlobal, Name: InitConnect, Value: ""}, /* TiDB specific variables */ - {Scope: ScopeGlobal, Name: TiDBTSOClientBatchMaxWaitTime, Value: strconv.Itoa(DefTiDBTSOClientBatchMaxWaitTime), Type: TypeInt, MinValue: 0, MaxValue: 10, GetGlobal: func(sv *SessionVars) (string, error) { - return strconv.Itoa(int(MaxTSOBatchWaitInterval.Load())), nil - }, SetGlobal: func(s *SessionVars, val string) error { - MaxTSOBatchWaitInterval.Store(tidbOptInt64(val, DefTiDBTSOClientBatchMaxWaitTime)) - return nil - }}, + {Scope: ScopeGlobal, Name: TiDBTSOClientBatchMaxWaitTime, Value: strconv.FormatFloat(DefTiDBTSOClientBatchMaxWaitTime, 'f', -1, 64), Type: TypeFloat, MinValue: 0, MaxValue: 10, + GetGlobal: func(sv *SessionVars) (string, error) { + return strconv.FormatFloat(MaxTSOBatchWaitInterval.Load(), 'f', -1, 64), nil + }, + SetGlobal: func(s *SessionVars, val string) error { + MaxTSOBatchWaitInterval.Store(tidbOptFloat64(val, DefTiDBTSOClientBatchMaxWaitTime)) + return nil + }}, {Scope: ScopeGlobal, Name: TiDBEnableTSOFollowerProxy, Value: BoolToOnOff(DefTiDBEnableTSOFollowerProxy), Type: TypeBool, GetGlobal: func(sv *SessionVars) (string, error) { return BoolToOnOff(EnableTSOFollowerProxy.Load()), nil }, SetGlobal: func(s *SessionVars, val string) error { diff --git a/sessionctx/variable/sysvar_test.go b/sessionctx/variable/sysvar_test.go index ea461d2864ed2..8ae408eee6d67 100644 --- a/sessionctx/variable/sysvar_test.go +++ b/sessionctx/variable/sysvar_test.go @@ -807,3 +807,13 @@ func TestDDLWorkers(t *testing.T) { require.NoError(t, err) require.Equal(t, val, "100") // unchanged } + +func TestDefaultCharsetAndCollation(t *testing.T) { + vars := NewSessionVars() + val, err := GetSessionOrGlobalSystemVar(vars, CharacterSetConnection) + require.NoError(t, err) + require.Equal(t, val, mysql.DefaultCharset) + val, err = GetSessionOrGlobalSystemVar(vars, CollationConnection) + require.NoError(t, err) + require.Equal(t, val, mysql.DefaultCollationName) +} diff --git a/sessionctx/variable/tidb_vars.go b/sessionctx/variable/tidb_vars.go index dcc7fc8eba57f..ef7c001841d13 100644 --- a/sessionctx/variable/tidb_vars.go +++ b/sessionctx/variable/tidb_vars.go @@ -764,7 +764,7 @@ const ( DefTiDBTopSQLReportIntervalSeconds = 60 DefTiDBTmpTableMaxSize = 64 << 20 // 64MB. DefTiDBEnableLocalTxn = false - DefTiDBTSOClientBatchMaxWaitTime = 0 // 0ms + DefTiDBTSOClientBatchMaxWaitTime = 0.0 // 0ms DefTiDBEnableTSOFollowerProxy = false DefTiDBEnableOrderedResultMode = false DefTiDBEnablePseudoForOutdatedStats = true @@ -802,7 +802,7 @@ var ( ReportIntervalSeconds: atomic.NewInt64(DefTiDBTopSQLReportIntervalSeconds), } EnableLocalTxn = atomic.NewBool(DefTiDBEnableLocalTxn) - MaxTSOBatchWaitInterval = atomic.NewInt64(DefTiDBTSOClientBatchMaxWaitTime) + MaxTSOBatchWaitInterval = atomic.NewFloat64(DefTiDBTSOClientBatchMaxWaitTime) EnableTSOFollowerProxy = atomic.NewBool(DefTiDBEnableTSOFollowerProxy) RestrictedReadOnly = atomic.NewBool(DefTiDBRestrictedReadOnly) ) diff --git a/store/gcworker/gc_worker.go b/store/gcworker/gc_worker.go index 1a8da7c96ad05..293f57755ed77 100644 --- a/store/gcworker/gc_worker.go +++ b/store/gcworker/gc_worker.go @@ -18,6 +18,7 @@ import ( "bytes" "container/heap" "context" + "encoding/hex" "encoding/json" "fmt" "math" @@ -45,7 +46,9 @@ import ( "github.com/pingcap/tidb/privilege" "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/util/admin" + "github.com/pingcap/tidb/util/codec" "github.com/pingcap/tidb/util/logutil" tikverr "github.com/tikv/client-go/v2/error" tikvstore "github.com/tikv/client-go/v2/kv" @@ -703,6 +706,9 @@ func (w *GCWorker) deleteRanges(ctx context.Context, safePoint uint64, concurren startKey, endKey := r.Range() err = w.doUnsafeDestroyRangeRequest(ctx, startKey, endKey, concurrency) + failpoint.Inject("ignoreDeleteRangeFailed", func() { + err = nil + }) if err != nil { logutil.Logger(ctx).Error("[gc worker] delete range failed on range", zap.String("uuid", w.uuid), @@ -1938,17 +1944,51 @@ func (w *GCWorker) doGCLabelRules(dr util.DelRangeTask) (err error) { startKey kv.Key physicalTableIDs []int64 ruleIDs []string + rules map[string]*label.Rule ) if err = historyJob.DecodeArgs(&startKey, &physicalTableIDs, &ruleIDs); err != nil { return } + // TODO: Here we need to get rules from PD and filter the rules which is not elegant. We should find a better way. + rules, err = infosync.GetLabelRules(context.TODO(), ruleIDs) + if err != nil { + return + } + + ruleIDs = getGCRules(append(physicalTableIDs, historyJob.TableID), rules) patch := label.NewRulePatch([]*label.Rule{}, ruleIDs) err = infosync.UpdateLabelRules(context.TODO(), patch) } return } +func getGCRules(ids []int64, rules map[string]*label.Rule) []string { + oldRange := make(map[string]struct{}) + for _, id := range ids { + startKey := hex.EncodeToString(codec.EncodeBytes(nil, tablecodec.GenTableRecordPrefix(id))) + endKey := hex.EncodeToString(codec.EncodeBytes(nil, tablecodec.GenTableRecordPrefix(id+1))) + oldRange[startKey+endKey] = struct{}{} + } + + var gcRules []string + for _, rule := range rules { + find := false + for _, d := range rule.Data { + if r, ok := d.(map[string]interface{}); ok { + nowRange := fmt.Sprintf("%s%s", r["start_key"], r["end_key"]) + if _, ok := oldRange[nowRange]; ok { + find = true + } + } + } + if find { + gcRules = append(gcRules, rule.ID) + } + } + return gcRules +} + // RunGCJob sends GC command to KV. It is exported for kv api, do not use it with GCWorker at the same time. func RunGCJob(ctx context.Context, s tikv.Storage, pd pd.Client, safePoint uint64, identifier string, concurrency int) error { gcWorker := &GCWorker{ @@ -2053,6 +2093,7 @@ func NewMockGCWorker(store kv.Storage) (*MockGCWorker, error) { gcIsRunning: false, lastFinish: time.Now(), done: make(chan error), + pdClient: store.(tikv.Storage).GetRegionCache().PDClient(), } return &MockGCWorker{worker: worker}, nil } diff --git a/util/hint/hint_processor.go b/util/hint/hint_processor.go index 9cbf41e93c0a2..be0d5bcd02a2b 100644 --- a/util/hint/hint_processor.go +++ b/util/hint/hint_processor.go @@ -32,6 +32,7 @@ import ( ) var supportedHintNameForInsertStmt = map[string]struct{}{} +var errWarnConflictingHint = dbterror.ClassUtil.NewStd(errno.ErrWarnConflictingHint) func init() { supportedHintNameForInsertStmt["memory_quota"] = struct{}{} @@ -118,8 +119,7 @@ func checkInsertStmtHintDuplicated(node ast.Node, sctx sessionctx.Context) { } if duplicatedHint != nil { hint := fmt.Sprintf("%s(`%v`)", duplicatedHint.HintName.O, duplicatedHint.HintData) - err := dbterror.ClassUtil.NewStd(errno.ErrWarnConflictingHint).FastGenByArgs(hint) - sctx.GetSessionVars().StmtCtx.AppendWarning(err) + sctx.GetSessionVars().StmtCtx.AppendWarning(errWarnConflictingHint.FastGenByArgs(hint)) } } } diff --git a/util/ranger/checker.go b/util/ranger/checker.go index a61e31a13b24c..8431bc05abef6 100644 --- a/util/ranger/checker.go +++ b/util/ranger/checker.go @@ -46,7 +46,7 @@ func (c *conditionChecker) check(condition expression.Expression) bool { } func (c *conditionChecker) checkScalarFunction(scalar *expression.ScalarFunction) bool { - _, collation := scalar.CharsetAndCollation(scalar.GetCtx()) + _, collation := scalar.CharsetAndCollation() switch scalar.FuncName.L { case ast.LogicOr, ast.LogicAnd: return c.check(scalar.GetArgs()[0]) && c.check(scalar.GetArgs()[1]) @@ -111,7 +111,7 @@ func (c *conditionChecker) checkScalarFunction(scalar *expression.ScalarFunction } func (c *conditionChecker) checkLikeFunc(scalar *expression.ScalarFunction) bool { - _, collation := scalar.CharsetAndCollation(scalar.GetCtx()) + _, collation := scalar.CharsetAndCollation() if !collate.CompatibleCollate(scalar.GetArgs()[0].GetType().Collate, collation) { return false } diff --git a/util/ranger/detacher.go b/util/ranger/detacher.go index e19a777006876..9b03bb48f72db 100644 --- a/util/ranger/detacher.go +++ b/util/ranger/detacher.go @@ -103,7 +103,7 @@ func getPotentialEqOrInColOffset(expr expression.Expression, cols []*expression. if !ok { return -1 } - _, collation := expr.CharsetAndCollation(f.GetCtx()) + _, collation := expr.CharsetAndCollation() switch f.FuncName.L { case ast.LogicOr: dnfItems := expression.FlattenDNFConditions(f) diff --git a/util/ranger/points.go b/util/ranger/points.go index da6489f4d7c69..a02f77cc08909 100644 --- a/util/ranger/points.go +++ b/util/ranger/points.go @@ -627,7 +627,7 @@ func (r *builder) buildFromIn(expr *expression.ScalarFunction) ([]*point, bool) } func (r *builder) newBuildFromPatternLike(expr *expression.ScalarFunction) []*point { - _, collation := expr.CharsetAndCollation(expr.GetCtx()) + _, collation := expr.CharsetAndCollation() if !collate.CompatibleCollate(expr.GetArgs()[0].GetType().Collate, collation) { return getFullRange() } diff --git a/util/tracing/opt_trace.go b/util/tracing/opt_trace.go new file mode 100644 index 0000000000000..6c0e7243bcdf2 --- /dev/null +++ b/util/tracing/opt_trace.go @@ -0,0 +1,81 @@ +// Copyright 2021 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tracing + +// LogicalPlanTrace indicates for the LogicalPlan trace information +type LogicalPlanTrace struct { + ID int + TP string + Children []*LogicalPlanTrace + + // ExplainInfo should be implemented by each implemented LogicalPlan + ExplainInfo string +} + +// LogicalOptimizeTracer indicates the trace for the whole logicalOptimize processing +type LogicalOptimizeTracer struct { + Steps []*LogicalRuleOptimizeTracer + // curRuleTracer indicates the current rule Tracer during optimize by rule + curRuleTracer *LogicalRuleOptimizeTracer +} + +// AppendRuleTracerBeforeRuleOptimize add plan tracer before optimize +func (tracer *LogicalOptimizeTracer) AppendRuleTracerBeforeRuleOptimize(name string, before *LogicalPlanTrace) { + ruleTracer := buildLogicalRuleOptimizeTracerBeforeOptimize(name, before) + tracer.Steps = append(tracer.Steps, ruleTracer) + tracer.curRuleTracer = ruleTracer +} + +// AppendRuleTracerStepToCurrent add rule optimize step to current +func (tracer *LogicalOptimizeTracer) AppendRuleTracerStepToCurrent(id int, tp, reason, action string) { + tracer.curRuleTracer.Steps = append(tracer.curRuleTracer.Steps, LogicalRuleOptimizeTraceStep{ + ID: id, + TP: tp, + Reason: reason, + Action: action, + }) +} + +// TrackLogicalPlanAfterRuleOptimize add plan trace after optimize +func (tracer *LogicalOptimizeTracer) TrackLogicalPlanAfterRuleOptimize(after *LogicalPlanTrace) { + tracer.curRuleTracer.After = after +} + +// LogicalRuleOptimizeTracer indicates the trace for the LogicalPlan tree before and after +// logical rule optimize +type LogicalRuleOptimizeTracer struct { + Before *LogicalPlanTrace + After *LogicalPlanTrace + RuleName string + Steps []LogicalRuleOptimizeTraceStep +} + +// buildLogicalRuleOptimizeTracerBeforeOptimize build rule tracer before rule optimize +func buildLogicalRuleOptimizeTracerBeforeOptimize(name string, before *LogicalPlanTrace) *LogicalRuleOptimizeTracer { + return &LogicalRuleOptimizeTracer{ + Before: before, + RuleName: name, + Steps: make([]LogicalRuleOptimizeTraceStep, 0), + } +} + +// LogicalRuleOptimizeTraceStep indicates the trace for the detailed optimize changing in +// logical rule optimize +type LogicalRuleOptimizeTraceStep struct { + Action string + Reason string + ID int + TP string +}