Skip to content
This repository has been archived by the owner on Jul 24, 2024. It is now read-only.

lightning: fix encode kvs size greater than 4.0g caused pebble panic #1105

Merged
merged 7 commits into from
May 20, 2021
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions pkg/lightning/backend/kv/sql2kv.go
Original file line number Diff line number Diff line change
Expand Up @@ -408,6 +408,14 @@ func (kvcodec *tableKVEncoder) Encode(
return kvPairs(pairs), nil
}

func (kvs kvPairs) Size() uint64 {
size := uint64(0)
for _, kv := range kvs {
size += uint64(len(kv.Key) + len(kv.Val))
}
return size
}

func (kvs kvPairs) ClassifyAndAppend(
data *Rows,
dataChecksum *verification.KVChecksum,
Expand Down
3 changes: 3 additions & 0 deletions pkg/lightning/backend/kv/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,9 @@ type Row interface {
indices *Rows,
indexChecksum *verification.KVChecksum,
)

// Size represents the total kv size of this Row.
Size() uint64
}

// Rows represents a collection of encoded rows.
Expand Down
4 changes: 4 additions & 0 deletions pkg/lightning/backend/noop/noop.go
Original file line number Diff line number Diff line change
Expand Up @@ -154,6 +154,10 @@ func (e noopEncoder) Encode(log.Logger, []types.Datum, int64, []int) (kv.Row, er

type noopRow struct{}

func (r noopRow) Size() uint64 {
return 0
}

func (r noopRow) ClassifyAndAppend(*kv.Rows, *verification.KVChecksum, *kv.Rows, *verification.KVChecksum) {
}

Expand Down
4 changes: 4 additions & 0 deletions pkg/lightning/backend/tidb/tidb.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,10 @@ func NewTiDBBackend(db *sql.DB, onDuplicate string) backend.Backend {
return backend.MakeBackend(&tidbBackend{db: db, onDuplicate: onDuplicate})
}

func (row tidbRow) Size() uint64 {
return uint64(len(row))
}

func (row tidbRow) ClassifyAndAppend(data *kv.Rows, checksum *verification.KVChecksum, _ *kv.Rows, _ *verification.KVChecksum) {
rows := (*data).(tidbRows)
// Cannot do `rows := data.(*tidbRows); *rows = append(*rows, row)`.
Expand Down
12 changes: 12 additions & 0 deletions pkg/lightning/restore/restore.go
Original file line number Diff line number Diff line change
Expand Up @@ -2601,6 +2601,7 @@ func (cr *chunkRestore) encodeLoop(
canDeliver := false
kvPacket := make([]deliveredKVs, 0, maxKvPairsCnt)
var newOffset, rowID int64
var kvSize uint64
outLoop:
for !canDeliver {
readDurStart := time.Now()
Expand Down Expand Up @@ -2636,6 +2637,17 @@ func (cr *chunkRestore) encodeLoop(
return
}
kvPacket = append(kvPacket, deliveredKVs{kvs: kvs, columns: columnNames, offset: newOffset, rowID: rowID})
kvSize += kvs.Size()
failpoint.Inject("mock-kv-size", func(val failpoint.Value) {
kvSize += uint64(val.(int))
})
// pebble cannot allow > 4.0G kv in one batch.
// we will meet pebble panic when import sql file and each kv has the size larger than 4G / maxKvPairsCnt.
// so add this check.
if kvSize > minDeliverBytes {
3pointer marked this conversation as resolved.
Show resolved Hide resolved
canDeliver = true
kvSize = 0
}
if len(kvPacket) >= maxKvPairsCnt || newOffset == cr.chunk.Chunk.EndOffset {
canDeliver = true
}
Expand Down
51 changes: 51 additions & 0 deletions pkg/lightning/restore/restore_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1150,6 +1150,57 @@ func (s *chunkRestoreSuite) TestEncodeLoopForcedError(c *C) {
c.Assert(kvsCh, HasLen, 0)
}

func (s *chunkRestoreSuite) TestEncodeLoopDeliverLimit(c *C) {
ctx := context.Background()
kvsCh := make(chan []deliveredKVs, 4)
deliverCompleteCh := make(chan deliverResult)
kvEncoder, err := kv.NewTableKVEncoder(s.tr.encTable, &kv.SessionOptions{
SQLMode: s.cfg.TiDB.SQLMode,
Timestamp: 1234567898,
})
c.Assert(err, IsNil)

dir := c.MkDir()
fileName := "db.limit.000.csv"
err = ioutil.WriteFile(filepath.Join(dir, fileName), []byte("1,2,3\r\n4,5,6\r\n7,8,9\r"), 0o644)
c.Assert(err, IsNil)

store, err := storage.NewLocalStorage(dir)
c.Assert(err, IsNil)
cfg := config.NewConfig()

reader, err := store.Open(ctx, fileName)
c.Assert(err, IsNil)
w := worker.NewPool(ctx, 1, "io")
p := mydump.NewCSVParser(&cfg.Mydumper.CSV, reader, 111, w, false)
s.cr.parser = p

rc := &Controller{pauser: DeliverPauser, cfg: cfg}
c.Assert(failpoint.Enable(
"github.com/pingcap/br/pkg/lightning/restore/mock-kv-size", "return(110000000)"), IsNil)
_, _, err = s.cr.encodeLoop(ctx, kvsCh, s.tr, s.tr.logger, kvEncoder, deliverCompleteCh, rc)

// we have 3 kvs total. after the failpoint injected.
// we will send one kv each time.
count := 0
for {
kvs, ok := <-kvsCh
if !ok {
break
}
count += 1
if count <= 3 {
c.Assert(kvs, HasLen, 1)
}
if count == 4 {
// we will send empty kvs before encodeLoop exists
// so, we can receive 4 batch and 1 is empty
c.Assert(kvs, HasLen, 0)
break
}
}
}

func (s *chunkRestoreSuite) TestEncodeLoopDeliverErrored(c *C) {
ctx := context.Background()
kvsCh := make(chan []deliveredKVs)
Expand Down