Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

kv: make txn entry size limit configurable #21843

Merged
merged 3 commits into from
Dec 25, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,8 @@ import (
// Config number limitations
const (
MaxLogFileSize = 4096 // MB
// DefTxnEntrySizeLimit is the default value of TxnEntrySizeLimit.
DefTxnEntrySizeLimit = 6 * 1024 * 1024
// DefTxnTotalSizeLimit is the default value of TxnTxnTotalSizeLimit.
DefTxnTotalSizeLimit = 100 * 1024 * 1024
// DefMaxIndexLength is the maximum index length(in bytes). This value is consistent with MySQL.
Expand Down Expand Up @@ -379,6 +381,7 @@ type Performance struct {
PseudoEstimateRatio float64 `toml:"pseudo-estimate-ratio" json:"pseudo-estimate-ratio"`
ForcePriority string `toml:"force-priority" json:"force-priority"`
BindInfoLease string `toml:"bind-info-lease" json:"bind-info-lease"`
TxnEntrySizeLimit uint64 `toml:"txn-entry-size-limit" json:"txn-entry-size-limit"`
TxnTotalSizeLimit uint64 `toml:"txn-total-size-limit" json:"txn-total-size-limit"`
TCPKeepAlive bool `toml:"tcp-keep-alive" json:"tcp-keep-alive"`
CrossJoin bool `toml:"cross-join" json:"cross-join"`
Expand Down Expand Up @@ -624,6 +627,7 @@ var defaultConf = Config{
PseudoEstimateRatio: 0.8,
ForcePriority: "NO_PRIORITY",
BindInfoLease: "3s",
TxnEntrySizeLimit: DefTxnEntrySizeLimit,
TxnTotalSizeLimit: DefTxnTotalSizeLimit,
DistinctAggPushDown: false,
CommitterConcurrency: 16,
Expand Down
3 changes: 3 additions & 0 deletions config/config.toml.example
Original file line number Diff line number Diff line change
Expand Up @@ -264,6 +264,9 @@ distinct-agg-push-down = false
# If binlog is disabled or binlog is enabled without Kafka, this value should be less than 10737418240(10G).
txn-total-size-limit = 104857600

# The limitation of the size in byte for each entry in one transaction.
txn-entry-size-limit = 6291456

# The max number of running concurrency two phase committer request for an SQL.
committer-concurrency = 16

Expand Down
2 changes: 2 additions & 0 deletions config/config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -192,6 +192,7 @@ mem-quota-query = 10000
max-index-length = 3080
skip-register-to-dashboard = true
[performance]
txn-entry-size-limit=1000
txn-total-size-limit=2000
[tikv-client]
commit-timeout="41s"
Expand Down Expand Up @@ -222,6 +223,7 @@ engines = ["tiflash"]
c.Assert(conf.Binlog.Enable, Equals, true)
c.Assert(conf.Binlog.Strategy, Equals, "hash")

c.Assert(conf.Performance.TxnEntrySizeLimit, Equals, uint64(1000))
// Test that the value will be overwritten by the config file.
c.Assert(conf.Performance.TxnTotalSizeLimit, Equals, uint64(2000))
c.Assert(conf.AlterPrimaryKey, Equals, true)
Expand Down
2 changes: 1 addition & 1 deletion kv/kv.go
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ func (r ReplicaReadType) IsFollowerRead() bool {
// Those limits is enforced to make sure the transaction can be well handled by TiKV.
var (
// TxnEntrySizeLimit is limit of single entry size (len(key) + len(value)).
TxnEntrySizeLimit = 6 * 1024 * 1024
TxnEntrySizeLimit uint64 = config.DefTxnEntrySizeLimit
// TxnTotalSizeLimit is limit of the sum of all entry size.
TxnTotalSizeLimit uint64 = config.DefTxnTotalSizeLimit
)
Expand Down
8 changes: 4 additions & 4 deletions kv/memdb_buffer.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ import (
// memDbBuffer implements the MemBuffer interface.
type memDbBuffer struct {
sandbox *memdb.Sandbox
entrySizeLimit int
entrySizeLimit uint64
bufferLenLimit uint64
bufferSizeLimit uint64
}
Expand All @@ -43,7 +43,7 @@ type memDbIter struct {
func NewMemDbBuffer() MemBuffer {
return &memDbBuffer{
sandbox: memdb.NewSandbox(),
entrySizeLimit: TxnEntrySizeLimit,
entrySizeLimit: atomic.LoadUint64(&TxnEntrySizeLimit),
bufferSizeLimit: atomic.LoadUint64(&TxnTotalSizeLimit),
}
}
Expand Down Expand Up @@ -98,7 +98,7 @@ func (m *memDbBuffer) Set(k Key, v []byte) error {
if len(v) == 0 {
return errors.Trace(ErrCannotSetNilValue)
}
if len(k)+len(v) > m.entrySizeLimit {
if uint64(len(k)+len(v)) > m.entrySizeLimit {
return ErrEntryTooLarge.GenWithStackByArgs(m.entrySizeLimit, len(k)+len(v))
}

Expand Down Expand Up @@ -140,7 +140,7 @@ func (m *memDbBuffer) Len() int {
func (m *memDbBuffer) NewStagingBuffer() MemBuffer {
return &memDbBuffer{
sandbox: m.sandbox.Derive(),
entrySizeLimit: TxnEntrySizeLimit,
entrySizeLimit: m.entrySizeLimit,
bufferSizeLimit: m.bufferSizeLimit - uint64(m.sandbox.Size()),
}
}
Expand Down
2 changes: 1 addition & 1 deletion store/tikv/2pc.go
Original file line number Diff line number Diff line change
Expand Up @@ -385,7 +385,7 @@ func (c *twoPhaseCommitter) initKeysAndMutations() error {
}
mutations.Push(op, k, value, isPessimisticLock)
entrySize := len(k) + len(v)
if entrySize > kv.TxnEntrySizeLimit {
if uint64(entrySize) > kv.TxnEntrySizeLimit {
return kv.ErrEntryTooLarge.GenWithStackByArgs(kv.TxnEntrySizeLimit, entrySize)
}
size += entrySize
Expand Down
4 changes: 4 additions & 0 deletions tidb-server/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -530,6 +530,10 @@ func setGlobalVars() {
plannercore.AllowCartesianProduct.Store(cfg.Performance.CrossJoin)
privileges.SkipWithGrant = cfg.Security.SkipGrantTable
kv.TxnTotalSizeLimit = cfg.Performance.TxnTotalSizeLimit
if cfg.Performance.TxnEntrySizeLimit > 120*1024*1024 {
log.Fatal("cannot set txn entry size limit larger than 120M")
}
kv.TxnEntrySizeLimit = cfg.Performance.TxnEntrySizeLimit

priority := mysql.Str2Priority(cfg.Performance.ForcePriority)
variable.ForcePriority = int32(priority)
Expand Down