Skip to content

Commit cc127c1

Browse files
authored
ddl: change interface of lightning package wrappers (#53233)
close #53165
1 parent 6612270 commit cc127c1

16 files changed

+301
-282
lines changed

pkg/ddl/backfilling.go

+6
Original file line numberDiff line numberDiff line change
@@ -612,6 +612,12 @@ func (dc *ddlCtx) writePhysicalTableRecord(
612612
return errors.Trace(err)
613613
}
614614
defer scheduler.close(true)
615+
if lit, ok := scheduler.(*ingestBackfillScheduler); ok {
616+
if lit.finishedWritingNeedImport() {
617+
return nil
618+
}
619+
}
620+
615621
err = scheduler.setupWorkers()
616622
if err != nil {
617623
return errors.Trace(err)

pkg/ddl/backfilling_operators.go

+31-13
Original file line numberDiff line numberDiff line change
@@ -656,7 +656,7 @@ func (w *indexIngestLocalWorker) HandleTask(rs IndexRecordChunk, send func(Index
656656
}()
657657
w.indexIngestBaseWorker.HandleTask(rs, send)
658658
// needs to flush and import to avoid too much use of disk.
659-
_, _, _, err := ingest.TryFlushAllIndexes(w.backendCtx, ingest.FlushModeAuto, w.indexIDs)
659+
_, _, _, err := w.backendCtx.Flush(ingest.FlushModeAuto)
660660
if err != nil {
661661
w.ctx.onError(err)
662662
return
@@ -726,8 +726,14 @@ func (w *indexIngestBaseWorker) initSessCtx() {
726726
}
727727

728728
func (w *indexIngestBaseWorker) Close() {
729+
// TODO(lance6716): unify the real write action for engineInfo and external
730+
// writer.
729731
for _, writer := range w.writers {
730-
err := writer.Close(w.ctx)
732+
ew, ok := writer.(*external.Writer)
733+
if !ok {
734+
break
735+
}
736+
err := ew.Close(w.ctx)
731737
if err != nil {
732738
w.ctx.onError(err)
733739
}
@@ -827,24 +833,36 @@ func (s *indexWriteResultSink) flush() error {
827833
failpoint.Inject("mockFlushError", func(_ failpoint.Value) {
828834
failpoint.Return(errors.New("mock flush error"))
829835
})
830-
for _, index := range s.indexes {
831-
idxInfo := index.Meta()
832-
_, _, err := s.backendCtx.Flush(idxInfo.ID, ingest.FlushModeForceFlushAndImport)
833-
if err != nil {
834-
if common.ErrFoundDuplicateKeys.Equal(err) {
835-
err = convertToKeyExistsErr(err, idxInfo, s.tbl.Meta())
836-
return err
836+
_, _, errIdxID, err := s.backendCtx.Flush(ingest.FlushModeForceFlushAndImport)
837+
if err != nil {
838+
if common.ErrFoundDuplicateKeys.Equal(err) {
839+
var idxInfo table.Index
840+
for _, idx := range s.indexes {
841+
if idx.Meta().ID == errIdxID {
842+
idxInfo = idx
843+
break
844+
}
837845
}
838-
logutil.Logger(s.ctx).Error("flush error",
839-
zap.String("category", "ddl"), zap.Error(err))
840-
return err
846+
if idxInfo == nil {
847+
logutil.Logger(s.ctx).Error("index not found", zap.Int64("indexID", errIdxID))
848+
return kv.ErrKeyExists
849+
}
850+
return convertToKeyExistsErr(err, idxInfo.Meta(), s.tbl.Meta())
841851
}
852+
logutil.Logger(s.ctx).Error("flush error",
853+
zap.String("category", "ddl"), zap.Error(err))
854+
return err
842855
}
843856
return nil
844857
}
845858

846859
func (s *indexWriteResultSink) Close() error {
847-
return s.errGroup.Wait()
860+
err := s.errGroup.Wait()
861+
// for local pipeline
862+
if bc := s.backendCtx; bc != nil {
863+
bc.UnregisterEngines()
864+
}
865+
return err
848866
}
849867

850868
func (*indexWriteResultSink) String() string {

pkg/ddl/backfilling_read_index.go

+11-14
Original file line numberDiff line numberDiff line change
@@ -133,12 +133,7 @@ func (r *readIndexExecutor) RunSubtask(ctx context.Context, subtask *proto.Subta
133133
if opCtx.OperatorErr() != nil {
134134
return opCtx.OperatorErr()
135135
}
136-
if err != nil {
137-
return err
138-
}
139-
140-
r.bc.ResetWorkers(r.job.ID)
141-
return nil
136+
return err
142137
}
143138

144139
func (r *readIndexExecutor) RealtimeSummary() *execute.SubtaskSummary {
@@ -226,15 +221,17 @@ func (r *readIndexExecutor) buildLocalStorePipeline(
226221
return nil, err
227222
}
228223
d := r.d
229-
engines := make([]ingest.Engine, 0, len(r.indexes))
224+
indexIDs := make([]int64, 0, len(r.indexes))
230225
for _, index := range r.indexes {
231-
ei, err := r.bc.Register(r.job.ID, index.ID, r.job.SchemaName, r.job.TableName)
232-
if err != nil {
233-
tidblogutil.Logger(opCtx).Warn("cannot register new engine", zap.Error(err),
234-
zap.Int64("job ID", r.job.ID), zap.Int64("index ID", index.ID))
235-
return nil, err
236-
}
237-
engines = append(engines, ei)
226+
indexIDs = append(indexIDs, index.ID)
227+
}
228+
engines, err := r.bc.Register(indexIDs, r.job.TableName)
229+
if err != nil {
230+
tidblogutil.Logger(opCtx).Error("cannot register new engine",
231+
zap.Error(err),
232+
zap.Int64("job ID", r.job.ID),
233+
zap.Int64s("index IDs", indexIDs))
234+
return nil, err
238235
}
239236
counter := metrics.BackfillTotalCounter.WithLabelValues(
240237
metrics.GenerateReorgLabel("add_idx_rate", r.job.SchemaName, tbl.Meta().Name.O))

pkg/ddl/backfilling_scheduler.go

+32-26
Original file line numberDiff line numberDiff line change
@@ -354,6 +354,15 @@ func newIngestBackfillScheduler(
354354
}, nil
355355
}
356356

357+
func (b *ingestBackfillScheduler) finishedWritingNeedImport() bool {
358+
job := b.reorgInfo.Job
359+
bc, ok := ingest.LitBackCtxMgr.Load(job.ID)
360+
if !ok {
361+
return false
362+
}
363+
return bc.FinishedWritingNeedImport()
364+
}
365+
357366
func (b *ingestBackfillScheduler) setupWorkers() error {
358367
job := b.reorgInfo.Job
359368
bc, ok := ingest.LitBackCtxMgr.Load(job.ID)
@@ -371,10 +380,26 @@ func (b *ingestBackfillScheduler) setupWorkers() error {
371380
if err != nil {
372381
return errors.Trace(err)
373382
}
383+
384+
indexIDs := make([]int64, 0, len(b.reorgInfo.elements))
385+
for _, e := range b.reorgInfo.elements {
386+
indexIDs = append(indexIDs, e.ID)
387+
}
388+
engines, err := b.backendCtx.Register(indexIDs, job.TableName)
389+
if err != nil {
390+
return errors.Trace(err)
391+
}
392+
374393
b.copReqSenderPool = copReqSenderPool
375394
readerCnt, writerCnt := b.expectedWorkerSize()
376-
writerPool := workerpool.NewWorkerPool[IndexRecordChunk]("ingest_writer",
377-
poolutil.DDL, writerCnt, b.createWorker)
395+
writerPool := workerpool.NewWorkerPool[IndexRecordChunk](
396+
"ingest_writer",
397+
poolutil.DDL,
398+
writerCnt,
399+
func() workerpool.Worker[IndexRecordChunk, workerpool.None] {
400+
return b.createWorker(indexIDs, engines)
401+
},
402+
)
378403
writerPool.Start(b.ctx)
379404
b.writerPool = writerPool
380405
b.copReqSenderPool.chunkSender = writerPool
@@ -406,13 +431,9 @@ func (b *ingestBackfillScheduler) close(force bool) {
406431
})
407432
}
408433
close(b.resultCh)
409-
if intest.InTest && len(b.copReqSenderPool.srcChkPool) != copReadChunkPoolSize() {
434+
if intest.InTest && b.copReqSenderPool != nil && len(b.copReqSenderPool.srcChkPool) != copReadChunkPoolSize() {
410435
panic(fmt.Sprintf("unexpected chunk size %d", len(b.copReqSenderPool.srcChkPool)))
411436
}
412-
if !force {
413-
jobID := b.reorgInfo.ID
414-
b.backendCtx.ResetWorkers(jobID)
415-
}
416437
}
417438

418439
func (b *ingestBackfillScheduler) sendTask(task *reorgBackfillTask) error {
@@ -446,32 +467,17 @@ func (b *ingestBackfillScheduler) adjustWorkerSize() error {
446467
return nil
447468
}
448469

449-
func (b *ingestBackfillScheduler) createWorker() workerpool.Worker[IndexRecordChunk, workerpool.None] {
470+
func (b *ingestBackfillScheduler) createWorker(
471+
indexIDs []int64,
472+
engines []ingest.Engine,
473+
) workerpool.Worker[IndexRecordChunk, workerpool.None] {
450474
reorgInfo := b.reorgInfo
451475
job := reorgInfo.Job
452476
sessCtx, err := newSessCtx(reorgInfo.d.store, reorgInfo.ReorgMeta.SQLMode, reorgInfo.ReorgMeta.Location, reorgInfo.ReorgMeta.ResourceGroupName)
453477
if err != nil {
454478
b.sendResult(&backfillResult{err: err})
455479
return nil
456480
}
457-
bcCtx := b.backendCtx
458-
indexIDs := make([]int64, 0, len(reorgInfo.elements))
459-
engines := make([]ingest.Engine, 0, len(reorgInfo.elements))
460-
for _, elem := range reorgInfo.elements {
461-
ei, err := bcCtx.Register(job.ID, elem.ID, job.SchemaName, job.TableName)
462-
if err != nil {
463-
// Return an error only if it is the first worker.
464-
if b.writerMaxID == 0 {
465-
b.sendResult(&backfillResult{err: err})
466-
return nil
467-
}
468-
logutil.Logger(b.ctx).Warn("cannot create new writer", zap.Error(err),
469-
zap.Int64("job ID", reorgInfo.ID), zap.Int64("index ID", elem.ID))
470-
return nil
471-
}
472-
indexIDs = append(indexIDs, elem.ID)
473-
engines = append(engines, ei)
474-
}
475481

476482
worker, err := newAddIndexIngestWorker(
477483
b.ctx, b.tbl, reorgInfo.d, engines, b.resultCh, job.ID,

pkg/ddl/ingest/BUILD.bazel

-1
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,6 @@ go_library(
1111
"engine.go",
1212
"engine_mgr.go",
1313
"env.go",
14-
"flush.go",
1514
"mem_root.go",
1615
"message.go",
1716
"mock.go",

0 commit comments

Comments
 (0)