Skip to content

Commit

Permalink
Merge branch 'update-contributing-guide' of github.com:dcalvin/tidb i…
Browse files Browse the repository at this point in the history
…nto update-contributing-guide
  • Loading branch information
dcalvin committed Sep 29, 2020
2 parents 32f9c42 + e92a26a commit ca4762e
Show file tree
Hide file tree
Showing 51 changed files with 932 additions and 249 deletions.
17 changes: 12 additions & 5 deletions cmd/explaintest/r/explain_indexmerge.result
Original file line number Diff line number Diff line change
Expand Up @@ -97,19 +97,26 @@ label = "cop"
}

set session tidb_enable_index_merge = off;
explain select /*+ use_index_merge(t, primary, tb, tc) */ * from t where a <= 500000 or b <= 1000000 or c <= 3000000;
id estRows task access object operator info
IndexMerge_9 3560000.00 root
├─TableRangeScan_5(Build) 500000.00 cop[tikv] table:t range:[-inf,500000], keep order:false
├─IndexRangeScan_6(Build) 1000000.00 cop[tikv] table:t, index:tb(b) range:[-inf,1000000], keep order:false
├─IndexRangeScan_7(Build) 3000000.00 cop[tikv] table:t, index:tc(c) range:[-inf,3000000], keep order:false
└─TableRowIDScan_8(Probe) 3560000.00 cop[tikv] table:t keep order:false
explain select /*+ use_index_merge(t, tb, tc) */ * from t where b < 50 or c < 5000000;
id estRows task access object operator info
IndexMerge_8 5000000.00 root
IndexMerge_8 4999999.00 root
├─IndexRangeScan_5(Build) 49.00 cop[tikv] table:t, index:tb(b) range:[-inf,50), keep order:false
├─IndexRangeScan_6(Build) 4999999.00 cop[tikv] table:t, index:tc(c) range:[-inf,5000000), keep order:false
└─TableRowIDScan_7(Probe) 5000000.00 cop[tikv] table:t keep order:false
└─TableRowIDScan_7(Probe) 4999999.00 cop[tikv] table:t keep order:false
explain select /*+ use_index_merge(t, tb, tc) */ * from t where (b < 10000 or c < 10000) and (a < 10 or d < 10) and f < 10;
id estRows task access object operator info
IndexMerge_9 0.00 root
├─IndexRangeScan_5(Build) 9999.00 cop[tikv] table:t, index:tb(b) range:[-inf,10000), keep order:false
├─IndexRangeScan_6(Build) 9999.00 cop[tikv] table:t, index:tc(c) range:[-inf,10000), keep order:false
└─Selection_8(Probe) 0.00 cop[tikv] lt(test.t.f, 10), or(lt(test.t.a, 10), lt(test.t.d, 10))
└─TableRowIDScan_7 19998.00 cop[tikv] table:t keep order:false
└─TableRowIDScan_7 19978.00 cop[tikv] table:t keep order:false
explain select /*+ use_index_merge(t, tb) */ * from t where b < 50 or c < 5000000;
id estRows task access object operator info
TableReader_7 4999999.00 root data:Selection_6
Expand All @@ -122,7 +129,7 @@ TableReader_7 4999999.00 root data:Selection_6
└─TableFullScan_5 5000000.00 cop[tikv] table:t keep order:false
explain select /*+ use_index_merge(t, primary, tb) */ * from t where a < 50 or b < 5000000;
id estRows task access object operator info
IndexMerge_8 5000000.00 root
IndexMerge_8 4999999.00 root
├─TableRangeScan_5(Build) 49.00 cop[tikv] table:t range:[-inf,50), keep order:false
├─IndexRangeScan_6(Build) 4999999.00 cop[tikv] table:t, index:tb(b) range:[-inf,5000000), keep order:false
└─TableRowIDScan_7(Probe) 5000000.00 cop[tikv] table:t keep order:false
└─TableRowIDScan_7(Probe) 4999999.00 cop[tikv] table:t keep order:false
1 change: 1 addition & 0 deletions cmd/explaintest/t/explain_indexmerge.test
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ explain select * from t where (b < 10000 or c < 10000) and (a < 10 or d < 10) an
explain format="dot" select * from t where (a < 50 or b < 50) and f > 100;
set session tidb_enable_index_merge = off;
# be forced to use IndexMerge
explain select /*+ use_index_merge(t, primary, tb, tc) */ * from t where a <= 500000 or b <= 1000000 or c <= 3000000;
explain select /*+ use_index_merge(t, tb, tc) */ * from t where b < 50 or c < 5000000;
explain select /*+ use_index_merge(t, tb, tc) */ * from t where (b < 10000 or c < 10000) and (a < 10 or d < 10) and f < 10;
explain select /*+ use_index_merge(t, tb) */ * from t where b < 50 or c < 5000000;
Expand Down
25 changes: 18 additions & 7 deletions config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -494,11 +494,8 @@ type TiKVClient struct {
// and if no activity is seen even after that the connection is closed.
GrpcKeepAliveTimeout uint `toml:"grpc-keepalive-timeout" json:"grpc-keepalive-timeout"`
// CommitTimeout is the max time which command 'commit' will wait.
CommitTimeout string `toml:"commit-timeout" json:"commit-timeout"`
// EnableAsyncCommit enables async commit for all transactions.
EnableAsyncCommit bool `toml:"enable-async-commit" json:"enable-async-commit"`
AsyncCommitKeysLimit uint `toml:"async-commit-keys-limit" json:"async-commit-keys-limit"`

CommitTimeout string `toml:"commit-timeout" json:"commit-timeout"`
AsyncCommit AsyncCommit `toml:"async-commit" json:"async-commit"`
// MaxBatchSize is the max batch size when calling batch commands API.
MaxBatchSize uint `toml:"max-batch-size" json:"max-batch-size"`
// If TiKV load is greater than this, TiDB will wait for a while to avoid little batch.
Expand All @@ -522,6 +519,16 @@ type TiKVClient struct {
TTLRefreshedTxnSize int64 `toml:"ttl-refreshed-txn-size" json:"ttl-refreshed-txn-size"`
}

// AsyncCommit is the config for the async commit feature.
type AsyncCommit struct {
// Whether to enable the async commit feature.
Enable bool `toml:"enable" json:"enable"`
// Use async commit only if the number of keys does not exceed KeysLimit.
KeysLimit uint `toml:"keys-limit" json:"keys-limit"`
// Use async commit only if the total size of keys does not exceed TotalKeySizeLimit.
TotalKeySizeLimit uint64 `toml:"total-key-size-limit" json:"total-key-size-limit"`
}

// CoprocessorCache is the config for coprocessor cache.
type CoprocessorCache struct {
// Whether to enable the copr cache. The copr cache saves the result from TiKV Coprocessor in the memory and
Expand Down Expand Up @@ -687,8 +694,12 @@ var defaultConf = Config{
GrpcKeepAliveTime: 10,
GrpcKeepAliveTimeout: 3,
CommitTimeout: "41s",
EnableAsyncCommit: false,
AsyncCommitKeysLimit: 256,
AsyncCommit: AsyncCommit{
Enable: false,
// FIXME: Find an appropriate default limit.
KeysLimit: 256,
TotalKeySizeLimit: 4 * 1024, // 4 KiB
},

MaxBatchSize: 128,
OverloadThreshold: 200,
Expand Down
16 changes: 10 additions & 6 deletions config/config.toml.example
Original file line number Diff line number Diff line change
Expand Up @@ -356,12 +356,6 @@ grpc-keepalive-timeout = 3
# Max time for commit command, must be twice bigger than raft election timeout.
commit-timeout = "41s"

# Enable async commit for all transactions.
enable-async-commit = false
# The maximum allowed keys in a async commit transaction. Transactions with more keys than the limit
# will be committed with normal 2PC way.
async-commit-keys-limit = 256

# Max batch size in gRPC.
max-batch-size = 128
# Overload threshold of TiKV.
Expand Down Expand Up @@ -390,6 +384,16 @@ store-liveness-timeout = "5s"
# If the size(in byte) of a transaction is large than `ttl-refreshed-txn-size`, it update the lock TTL during the 2PC.
ttl-refreshed-txn-size = 33554432

[tikv-client.async-commit]
# Whether to enable the async commit feature. This feature reduces the latency of the two-phase commit.
enable = false
# The maximum allowed keys in an async commit transaction. Transactions with more keys than the limit
# will be committed with normal 2PC way.
keys-limit = 256
# The maximum length total of keys in bytes. Transactions will be committed with the normal 2PC way
# if the limit is exceeded.
total-key-size-limit = 4096

[tikv-client.copr-cache]
# Whether to enable the copr cache. The copr cache saves the result from TiKV Coprocessor in the memory and
# reuses the result when corresponding data in TiKV is unchanged, on a region basis.
Expand Down
11 changes: 7 additions & 4 deletions config/config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -196,12 +196,14 @@ deprecate-integer-display-length = true
txn-total-size-limit=2000
[tikv-client]
commit-timeout="41s"
enable-async-commit=true
async-commit-keys-limit=123
max-batch-size=128
region-cache-ttl=6000
store-limit=0
ttl-refreshed-txn-size=8192
[tikv-client.async-commit]
enable=true
keys-limit=123
total-key-size-limit=1024
[stmt-summary]
enable=false
enable-internal-query=true
Expand Down Expand Up @@ -235,8 +237,9 @@ spilled-file-encryption-method = "plaintext"
c.Assert(conf.AlterPrimaryKey, Equals, true)

c.Assert(conf.TiKVClient.CommitTimeout, Equals, "41s")
c.Assert(conf.TiKVClient.EnableAsyncCommit, Equals, true)
c.Assert(conf.TiKVClient.AsyncCommitKeysLimit, Equals, uint(123))
c.Assert(conf.TiKVClient.AsyncCommit.Enable, Equals, true)
c.Assert(conf.TiKVClient.AsyncCommit.KeysLimit, Equals, uint(123))
c.Assert(conf.TiKVClient.AsyncCommit.TotalKeySizeLimit, Equals, uint64(1024))
c.Assert(conf.TiKVClient.MaxBatchSize, Equals, uint(128))
c.Assert(conf.TiKVClient.RegionCacheTTL, Equals, uint(6000))
c.Assert(conf.TiKVClient.StoreLimit, Equals, int64(0))
Expand Down
19 changes: 14 additions & 5 deletions ddl/ddl_api.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ import (

"github.com/cznic/mathutil"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/parser/ast"
"github.com/pingcap/parser/charset"
"github.com/pingcap/parser/format"
Expand Down Expand Up @@ -1896,6 +1897,9 @@ func checkPartitionByHash(ctx sessionctx.Context, tbInfo *model.TableInfo, s *as

// checkPartitionByRange checks validity of a "BY RANGE" partition.
func checkPartitionByRange(ctx sessionctx.Context, tbInfo *model.TableInfo, s *ast.CreateTableStmt) error {
failpoint.Inject("CheckPartitionByRangeErr", func() {
panic("Out Of Memory Quota!")
})
pi := tbInfo.Partition
if err := checkPartitionNameUnique(pi); err != nil {
return err
Expand Down Expand Up @@ -2758,11 +2762,11 @@ func (d *ddl) AddTablePartitions(ctx sessionctx.Context, ident ast.Ident, spec *

// partInfo contains only the new added partition, we have to combine it with the
// old partitions to check all partitions is strictly increasing.
clonedMeta := meta.Clone()
tmp := *partInfo
tmp.Definitions = append(pi.Definitions, tmp.Definitions...)
meta.Partition = &tmp
err = checkPartitionByRange(ctx, meta, nil)
meta.Partition = pi
clonedMeta.Partition = &tmp
err = checkPartitionByRange(ctx, clonedMeta, nil)
if err != nil {
if ErrSameNamePartition.Equal(err) && spec.IfNotExists {
ctx.GetSessionVars().StmtCtx.AppendNote(err)
Expand Down Expand Up @@ -5513,8 +5517,13 @@ func (d *ddl) AlterTablePartition(ctx sessionctx.Context, ident ast.Ident, spec
EndKeyHex: endKey,
})
}
bundle.Index = placement.RuleIndexPartition
bundle.Override = true
if len(bundle.Rules) == 0 {
bundle.Index = 0
bundle.Override = false
} else {
bundle.Index = placement.RuleIndexPartition
bundle.Override = true
}

job := &model.Job{
SchemaID: schema.ID,
Expand Down
2 changes: 1 addition & 1 deletion ddl/ddl_worker.go
Original file line number Diff line number Diff line change
Expand Up @@ -622,7 +622,7 @@ func (w *worker) runDDLJob(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64,
case model.ActionDropTable, model.ActionDropView, model.ActionDropSequence:
ver, err = onDropTableOrView(t, job)
case model.ActionDropTablePartition:
ver, err = onDropTablePartition(t, job)
ver, err = onDropTablePartition(d, t, job)
case model.ActionTruncateTablePartition:
ver, err = onTruncateTablePartition(d, t, job)
case model.ActionExchangeTablePartition:
Expand Down
18 changes: 18 additions & 0 deletions ddl/failtest/fail_db_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -436,6 +436,7 @@ func (s *testFailDBSuite) TestPartitionAddIndexGC(c *C) {
func (s *testFailDBSuite) TestModifyColumn(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t;")

enableChangeColumnType := tk.Se.GetSessionVars().EnableChangeColumnType
tk.Se.GetSessionVars().EnableChangeColumnType = true
Expand Down Expand Up @@ -534,3 +535,20 @@ func (s *testFailDBSuite) TestModifyColumn(c *C) {

tk.MustExec("drop table t, t1, t2, t3, t4, t5")
}

func (s *testFailDBSuite) TestPartitionAddPanic(c *C) {
tk := testkit.NewTestKit(c, s.store)
tk.MustExec(`use test;`)
tk.MustExec(`drop table if exists t;`)
tk.MustExec(`create table t (a int) partition by range(a) (partition p0 values less than (10));`)
c.Assert(failpoint.Enable("github.com/pingcap/tidb/ddl/CheckPartitionByRangeErr", `return(true)`), IsNil)
defer func() {
c.Assert(failpoint.Disable("github.com/pingcap/tidb/ddl/CheckPartitionByRangeErr"), IsNil)
}()

_, err := tk.Exec(`alter table t add partition (partition p1 values less than (20));`)
c.Assert(err, NotNil)
result := tk.MustQuery("show create table t").Rows()[0][1]
c.Assert(result, Matches, `(?s).*PARTITION .p0. VALUES LESS THAN \(10\).*`)
c.Assert(result, Not(Matches), `(?s).*PARTITION .p0. VALUES LESS THAN \(20\).*`)
}
42 changes: 21 additions & 21 deletions ddl/partition.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ func checkAddPartition(t *meta.Meta, job *model.Job) (*model.TableInfo, *model.P
func onAddTablePartition(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) {
// Handle the rolling back job
if job.IsRollingback() {
ver, err := onDropTablePartition(t, job)
ver, err := onDropTablePartition(d, t, job)
if err != nil {
return ver, errors.Trace(err)
}
Expand Down Expand Up @@ -867,23 +867,14 @@ func getPartitionDef(tblInfo *model.TableInfo, partName string) (index int, def
return index, nil, table.ErrUnknownPartition.GenWithStackByArgs(partName, tblInfo.Name.O)
}

func buildPlacementDropRules(schemaID, tableID int64, partitionIDs []int64) []*placement.RuleOp {
rules := make([]*placement.RuleOp, 0, len(partitionIDs))
for _, partitionID := range partitionIDs {
rules = append(rules, &placement.RuleOp{
Action: placement.RuleOpDel,
DeleteByIDPrefix: true,
Rule: &placement.Rule{
GroupID: placement.RuleDefaultGroupID,
ID: fmt.Sprintf("%d_t%d_p%d", schemaID, tableID, partitionID),
},
})
}
return rules
func buildPlacementDropBundle(partitionID int64) *placement.Bundle {
return &placement.Bundle{
ID: placement.GroupID(partitionID),
}
}

// onDropTablePartition deletes old partition meta.
func onDropTablePartition(t *meta.Meta, job *model.Job) (ver int64, _ error) {
func onDropTablePartition(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) {
var partNames []string
if err := job.DecodeArgs(&partNames); err != nil {
job.State = model.JobStateCancelled
Expand All @@ -907,11 +898,20 @@ func onDropTablePartition(t *meta.Meta, job *model.Job) (ver int64, _ error) {
physicalTableIDs = removePartitionInfo(tblInfo, partNames)
}

rules := buildPlacementDropRules(job.SchemaID, tblInfo.ID, physicalTableIDs)
err = infosync.UpdatePlacementRules(nil, rules)
if err != nil {
job.State = model.JobStateCancelled
return ver, errors.Wrapf(err, "failed to notify PD the placement rules")
if d.infoHandle != nil {
bundles := make([]*placement.Bundle, 0, len(physicalTableIDs))
for _, ID := range physicalTableIDs {
oldBundle, ok := d.infoHandle.Get().BundleByName(placement.GroupID(ID))
if ok && !oldBundle.IsEmpty() {
bundles = append(bundles, buildPlacementDropBundle(ID))
}
}

err = infosync.PutRuleBundles(nil, bundles)
if err != nil {
job.State = model.JobStateCancelled
return ver, errors.Wrapf(err, "failed to notify PD the placement rules")
}
}

ver, err = updateVersionAndTableInfo(t, job, tblInfo, true)
Expand Down Expand Up @@ -1536,7 +1536,7 @@ func onAlterTablePartition(t *meta.Meta, job *model.Job) (int64, error) {
return 0, errors.Trace(table.ErrUnknownPartition.GenWithStackByArgs("drop?", tblInfo.Name.O))
}

err = infosync.PutRuleBundle(nil, bundle)
err = infosync.PutRuleBundles(nil, []*placement.Bundle{bundle})
if err != nil {
job.State = model.JobStateCancelled
return 0, errors.Wrapf(err, "failed to notify PD the placement rules")
Expand Down
5 changes: 5 additions & 0 deletions ddl/placement/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,11 @@ func (b *Bundle) Clone() *Bundle {
return newBundle
}

// IsEmpty is used to check if a bundle is empty.
func (b *Bundle) IsEmpty() bool {
return len(b.Rules) == 0 && b.Index == 0 && !b.Override
}

// RuleOpType indicates the operation type.
type RuleOpType string

Expand Down
17 changes: 17 additions & 0 deletions ddl/placement/types_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,23 @@ var _ = Suite(&testRuleSuite{})

type testBundleSuite struct{}

func (t *testBundleSuite) TestEmpty(c *C) {
bundle := &Bundle{ID: GroupID(1)}
c.Assert(bundle.IsEmpty(), IsTrue)

bundle = &Bundle{ID: GroupID(1), Index: 1}
c.Assert(bundle.IsEmpty(), IsFalse)

bundle = &Bundle{ID: GroupID(1), Override: true}
c.Assert(bundle.IsEmpty(), IsFalse)

bundle = &Bundle{ID: GroupID(1), Rules: []*Rule{{ID: "434"}}}
c.Assert(bundle.IsEmpty(), IsFalse)

bundle = &Bundle{ID: GroupID(1), Index: 1, Override: true}
c.Assert(bundle.IsEmpty(), IsFalse)
}

func (t *testBundleSuite) TestClone(c *C) {
bundle := &Bundle{ID: GroupID(1), Rules: []*Rule{{ID: "434"}}}

Expand Down
Loading

0 comments on commit ca4762e

Please sign in to comment.