Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

sink(cdc): handle sink errors more fast and light #8949

Merged
merged 23 commits into from
May 16, 2023
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 25 additions & 1 deletion cdc/model/errors.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ import (
cerror "github.com/pingcap/tiflow/pkg/errors"
)

// RunningError represents some running error from cdc components, such as processor.
// RunningError represents some running errors from cdc components, such as processor.
type RunningError struct {
Time time.Time `json:"time"`
Addr string `json:"addr"`
Expand All @@ -32,3 +32,27 @@ type RunningError struct {
func (r RunningError) IsChangefeedUnRetryableError() bool {
return cerror.IsChangefeedUnRetryableError(errors.New(r.Message + r.Code))
}

const (
// ComponentProcessorSink indicates the sink module in processor.
ComponentProcessorSink string = "processor/sink"
// ComponentOwnerSink indicates the sink module in owner.
ComponentOwnerSink string = "owner/sink"
)

// Warning is like an error, but has one difference:
// generally an error will stop and restart a changefeed, but a warning won't.
type Warning struct {
err error
Component string
}

// Error implements builtin `error` interface.
func (e Warning) Error() string {
return e.err.Error()
}

// NewWarning creates a Warning.
func NewWarning(e error, component string) Warning {
return Warning{e, component}
}
40 changes: 28 additions & 12 deletions cdc/owner/changefeed.go
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,11 @@ type changefeed struct {
filter filter.Filter,
) (puller.DDLPuller, error)

newSink func(changefeedID model.ChangeFeedID, info *model.ChangeFeedInfo, reportErr func(error)) DDLSink
newSink func(
changefeedID model.ChangeFeedID, info *model.ChangeFeedInfo,
reportError func(err error), reportWarning func(err error),
) DDLSink

newScheduler func(
ctx cdcContext.Context, up *upstream.Upstream, epoch uint64, cfg *config.SchedulerConfig,
) (scheduler.Scheduler, error)
Expand Down Expand Up @@ -179,7 +183,10 @@ func newChangefeed4Test(
schemaStorage entry.SchemaStorage,
filter filter.Filter,
) (puller.DDLPuller, error),
newSink func(changefeedID model.ChangeFeedID, info *model.ChangeFeedInfo, reportErr func(err error)) DDLSink,
newSink func(
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe we can give it a type, then we don't need to copy and paste it a couple of times.

changefeedID model.ChangeFeedID, info *model.ChangeFeedInfo,
reportError func(err error), reportWarning func(err error),
) DDLSink,
newScheduler func(
ctx cdcContext.Context, up *upstream.Upstream, epoch uint64, cfg *config.SchedulerConfig,
) (scheduler.Scheduler, error),
Expand Down Expand Up @@ -242,6 +249,14 @@ func (c *changefeed) handleErr(ctx cdcContext.Context, err error) {
} else {
code = string(cerror.ErrOwnerUnknown.RFCCode())
}

switch errors.Cause(err).(type) {
case model.Warning:
// TODO(qupeng): patch it into changefeed info.
return
default:
}

c.feedStateManager.handleError(&model.RunningError{
Time: time.Now(),
Addr: contextutil.CaptureAddrFromCtx(ctx),
Expand Down Expand Up @@ -296,11 +311,7 @@ func (c *changefeed) tick(ctx cdcContext.Context, captures map[model.CaptureID]*
return errors.Trace(err)
default:
}
// we need to wait ddl ddlSink to be ready before we do the other things
// otherwise, we may cause a nil pointer panic when we try to write to the ddl ddlSink.
if !c.ddlSink.isInitialized() {
return nil
}

hicqu marked this conversation as resolved.
Show resolved Hide resolved
// TODO: pass table checkpointTs when we support concurrent process ddl
allPhysicalTables, minTableBarrierTs, barrier, err := c.ddlManager.tick(ctx, checkpointTs, nil)
if err != nil {
Expand Down Expand Up @@ -567,7 +578,13 @@ LOOP:
zap.String("changefeed", c.id.ID),
)

c.ddlSink = c.newSink(c.id, c.state.Info, ctx.Throw)
c.ddlSink = c.newSink(c.id, c.state.Info, ctx.Throw, func(err error) {
// TODO(qupeng): report the warning.
log.Info("ddlSink internal error",
zap.String("namespace", c.id.Namespace),
zap.String("changefeed", c.id.ID),
zap.Error(err))
})
c.ddlSink.run(cancelCtx)

c.ddlPuller, err = c.newDDLPuller(cancelCtx,
Expand All @@ -586,8 +603,7 @@ LOOP:
ctx.Throw(c.ddlPuller.Run(cancelCtx))
}()

c.downstreamObserver, err = c.newDownstreamObserver(
ctx, c.state.Info.SinkURI, c.state.Info.Config)
c.downstreamObserver, err = c.newDownstreamObserver(ctx, c.state.Info.SinkURI, c.state.Info.Config)
if err != nil {
return err
}
Expand Down Expand Up @@ -1013,8 +1029,8 @@ func (c *changefeed) tickDownstreamObserver(ctx context.Context) {
defer cancel()
if err := c.downstreamObserver.Tick(cctx); err != nil {
// Prometheus is not deployed, it happens in non production env.
if strings.Contains(err.Error(),
fmt.Sprintf(":%d", errno.ErrPrometheusAddrIsNotSet)) {
noPrometheusMsg := fmt.Sprintf(":%d", errno.ErrPrometheusAddrIsNotSet)
if strings.Contains(err.Error(), noPrometheusMsg) {
return
}
log.Warn("backend observer tick error", zap.Error(err))
Expand Down
4 changes: 0 additions & 4 deletions cdc/owner/changefeed_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -150,10 +150,6 @@ func (m *mockDDLSink) close(ctx context.Context) error {
return nil
}

func (m *mockDDLSink) isInitialized() bool {
return true
}

func (m *mockDDLSink) Barrier(ctx context.Context) error {
return nil
}
Expand Down
Loading