Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

ticdc/mq: accurately demonstrate txn_batch_size metric for MQ sink (#3609) #3820

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
45 changes: 31 additions & 14 deletions cdc/sink/codec/interface.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,13 +55,14 @@ type EventBatchEncoder interface {

// MQMessage represents an MQ message to the mqSink
type MQMessage struct {
Key []byte
Value []byte
Ts uint64 // reserved for possible output sorting
Schema *string // schema
Table *string // table
Type model.MqMessageType // type
Protocol Protocol // protocol
Key []byte
Value []byte
Ts uint64 // reserved for possible output sorting
Schema *string // schema
Table *string // table
Type model.MqMessageType // type
Protocol Protocol // protocol
rowsCount int // rows in one MQ Message
}

// maximumRecordOverhead is used to calculate ProducerMessage's byteSize by sarama kafka client.
Expand All @@ -81,6 +82,21 @@ func (m *MQMessage) PhysicalTime() time.Time {
return oracle.GetTimeFromTS(m.Ts)
}

// GetRowsCount returns the number of rows batched in one MQMessage
func (m *MQMessage) GetRowsCount() int {
return m.rowsCount
}

// SetRowsCount set the number of rows
func (m *MQMessage) SetRowsCount(cnt int) {
m.rowsCount = cnt
}

// IncRowsCount increase the number of rows
func (m *MQMessage) IncRowsCount() {
m.rowsCount++
}

func newDDLMQMessage(proto Protocol, key, value []byte, event *model.DDLEvent) *MQMessage {
return NewMQMessage(proto, key, value, event.CommitTs, model.MqMessageTypeDDL, &event.TableInfo.Schema, &event.TableInfo.Table)
}
Expand All @@ -93,13 +109,14 @@ func newResolvedMQMessage(proto Protocol, key, value []byte, ts uint64) *MQMessa
// It copies the input byte slices to avoid any surprises in asynchronous MQ writes.
func NewMQMessage(proto Protocol, key []byte, value []byte, ts uint64, ty model.MqMessageType, schema, table *string) *MQMessage {
ret := &MQMessage{
Key: nil,
Value: nil,
Ts: ts,
Schema: schema,
Table: table,
Type: ty,
Protocol: proto,
Key: nil,
Value: nil,
Ts: ts,
Schema: schema,
Table: table,
Type: ty,
Protocol: proto,
rowsCount: 0,
}

if key != nil {
Expand Down
1 change: 1 addition & 0 deletions cdc/sink/codec/json.go
Original file line number Diff line number Diff line change
Expand Up @@ -427,6 +427,7 @@ func (d *JSONEventBatchEncoder) AppendRowChangedEvent(e *model.RowChangedEvent)
message.Ts = e.CommitTs
message.Schema = &e.Table.Schema
message.Table = &e.Table.Table
message.IncRowsCount()

if message.Length() > d.maxKafkaMessageSize {
// `len(d.messageBuf) == 1` is implied
Expand Down
5 changes: 3 additions & 2 deletions cdc/sink/mq.go
Original file line number Diff line number Diff line change
Expand Up @@ -302,8 +302,8 @@ func (k *mqSink) runWorker(ctx context.Context, partition int32) error {
flushToProducer := func(op codec.EncoderResult) error {
return k.statistics.RecordBatchExecution(func() (int, error) {
messages := encoder.Build()
thisBatchSize := len(messages)
if thisBatchSize == 0 {
thisBatchSize := 0
if len(messages) == 0 {
return 0, nil
}

Expand All @@ -312,6 +312,7 @@ func (k *mqSink) runWorker(ctx context.Context, partition int32) error {
if err != nil {
return 0, err
}
thisBatchSize += msg.GetRowsCount()
}

if op == codec.EncoderNeedSyncWrite {
Expand Down