Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

optimize(op-node): increase catching up speed when sequencer lagging #108

Merged
merged 2 commits into from
Jan 5, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions op-node/rollup/driver/origin_selector.go
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,7 @@ func (los *L1OriginSelector) FindL1Origin(ctx context.Context, l2Head eth.L2Bloc
_, _, err = los.l1.FetchReceipts(receiptsCtx, nextOrigin.Hash)
if err != nil {
receiptsCached = false
log.Warn("Fetch receipts cache missed when sequencer building block")
}

// If the next L2 block time is greater than the next origin block's time, we can choose to
Expand Down
24 changes: 21 additions & 3 deletions op-node/rollup/driver/sequencer.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,12 @@ import (
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
)

// When block produce is interrupted by high L1 latency, sequencer will build a full block periodically to avoid chain stuck
const buildFullBlockInterval = 20

// When block produce is lagging exceed lagTimeWindow, sequencer will set attrs.NoTxPool to true to quickly catch up
const lagTimeWindow = 2 * time.Minute

type Downloader interface {
InfoByHash(ctx context.Context, hash common.Hash) (eth.BlockInfo, error)
FetchReceipts(ctx context.Context, blockHash common.Hash) (eth.BlockInfo, types.Receipts, error)
Expand Down Expand Up @@ -47,7 +53,7 @@ type Sequencer struct {

nextAction time.Time

// if accEmptyBlocks>10, will delay nextAction 600ms for full block building
// if accEmptyBlocks > buildFullBlockInterval, will delay nextAction 600ms for full block building
accEmptyBlocks int
}

Expand Down Expand Up @@ -93,7 +99,19 @@ func (d *Sequencer) StartBuildingBlock(ctx context.Context) error {
// empty blocks (other than the L1 info deposit and any user deposits). We handle this by
// setting NoTxPool to true, which will cause the Sequencer to not include any transactions
// from the transaction pool.
attrs.NoTxPool = uint64(attrs.Timestamp) > l1Origin.Time+d.config.MaxSequencerDrift
if uint64(attrs.Timestamp) > l1Origin.Time+d.config.MaxSequencerDrift {
attrs.NoTxPool = true
} else {
// This is short term solution to increase sequencer catching up speed.
// Long term solution should optimize op-geth payload building work flow.
attrsTime := time.Unix(int64(attrs.Timestamp), 0)
isCatchingUp := time.Since(attrsTime) > lagTimeWindow
if isCatchingUp && (d.accEmptyBlocks < buildFullBlockInterval) {
attrs.NoTxPool = true
} else {
attrs.NoTxPool = false
}
}

d.log.Debug("prepared attributes for new block",
"num", l2Head.Number+1, "time", uint64(attrs.Timestamp),
Expand Down Expand Up @@ -256,7 +274,7 @@ func (d *Sequencer) RunNextSequencerAction(ctx context.Context) (*eth.ExecutionP
}
} else {
parent, buildingID, _ := d.engine.BuildingPayload() // we should have a new payload ID now that we're building a block
if d.accEmptyBlocks > 10 {
if d.accEmptyBlocks >= buildFullBlockInterval {
d.nextAction = d.timeNow().Add(600 * time.Millisecond)
d.accEmptyBlocks = 0
d.log.Info("sequencer delay next action 600ms and reset accEmptyBlocks")
Expand Down
3 changes: 2 additions & 1 deletion op-node/rollup/driver/sequencer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -378,5 +378,6 @@ func TestSequencerChaosMonkey(t *testing.T) {
require.Less(t, l2Head.Time-l1Times[l2Head.L1Origin], uint64(100), "The L1 origin time is close to the L2 time")
require.Less(t, clockTime.Sub(time.Unix(int64(l2Head.Time), 0)).Abs(), 2*time.Second, "L2 time is accurate, within 2 seconds of wallclock")
require.Greater(t, engControl.avgBuildingTime(), time.Second, "With 2 second block time and 1 second error backoff and healthy-on-average errors, building time should at least be a second")
require.Greater(t, engControl.avgTxsPerBlock(), 3.0, "We expect at least 1 system tx per block, but with a mocked 0-10 txs we expect an higher avg")
// sequencer catching up optimization will reduce the avgTxs per block, but still should be greater than 1
require.Greater(t, engControl.avgTxsPerBlock(), 1.0, "We expect at least 1 system tx per block, but with a mocked 0-10 txs we expect an higher avg")
}
Loading