Skip to content

Commit

Permalink
op-batcher: handle EIP-7623 in DA switching calculation (#13870)
Browse files Browse the repository at this point in the history
* WIP sketch out change in calldata calculation if l1 activated EIP-7623

* infer l1 Pectra activation from nullity of  header.requestsHash

pass this down to the TxData method so it can make an informed decision on DA type

* add godoc

* refactor and add unit test

* tweak godoc

* use much larger / more representative values in test

* don't forget version byte

* add another unit test

* fix bug

* typos

* typo

* make e2e test handle pectra

* tweak

* unit tests for DA switching cover L1 Pectra active and inactive

* prefer isPectra = true in tests generally

* tighten up test params

* Wire up Prague Activation time in L1  genesis

* run da switching test on holocene and activate prague at genesis with HoloceneSystemConfig

* fix op-e2e fakepos to handle prague fork on L1

* update e2e tests to activate prague if they activate cancun

* Revert "update e2e tests to activate prague if they activate cancun"

This reverts commit f626d8d.

* only activate prague in AutoDA test for now

* default to no Prague

* increase numTxs

* fix godoc
  • Loading branch information
geoknee authored Jan 31, 2025
1 parent d06c7c6 commit 4bb98fa
Show file tree
Hide file tree
Showing 11 changed files with 207 additions and 88 deletions.
2 changes: 1 addition & 1 deletion op-batcher/batcher/channel_config.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ type ChannelConfig struct {

// ChannelConfig returns a copy of the receiver.
// This allows the receiver to be a static ChannelConfigProvider of itself.
func (cc ChannelConfig) ChannelConfig() ChannelConfig {
func (cc ChannelConfig) ChannelConfig(isPectra bool) ChannelConfig {
return cc
}

Expand Down
81 changes: 55 additions & 26 deletions op-batcher/batcher/channel_config_provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,11 +10,9 @@ import (
"github.com/ethereum/go-ethereum/params"
)

const randomByteCalldataGas = params.TxDataNonZeroGasEIP2028

type (
ChannelConfigProvider interface {
ChannelConfig() ChannelConfig
ChannelConfig(isPectra bool) ChannelConfig
}

GasPricer interface {
Expand Down Expand Up @@ -52,7 +50,7 @@ func NewDynamicEthChannelConfig(lgr log.Logger,
// calldata and for blobs, given current market conditions: it will return
// the appropriate ChannelConfig depending on which is cheaper. It makes
// assumptions about the typical makeup of channel data.
func (dec *DynamicEthChannelConfig) ChannelConfig() ChannelConfig {
func (dec *DynamicEthChannelConfig) ChannelConfig(isPectra bool) ChannelConfig {
ctx, cancel := context.WithTimeout(context.Background(), dec.timeout)
defer cancel()
tipCap, baseFee, blobBaseFee, err := dec.gasPricer.SuggestGasPriceCaps(ctx)
Expand All @@ -61,36 +59,36 @@ func (dec *DynamicEthChannelConfig) ChannelConfig() ChannelConfig {
return *dec.lastConfig
}

// We estimate the gas costs of a calldata and blob tx under the assumption that we'd fill
// a frame fully and compressed random channel data has few zeros, so they can be
// ignored in the calldata gas price estimation.
// It is also assumed that a calldata tx would contain exactly one full frame
// and a blob tx would contain target-num-frames many blobs.
// Channels built for blobs have higher capacity than channels built for calldata.
// If we have a channel built for calldata, we want to switch to blobs if the cost per byte is lower. Doing so
// will mean a new channel is built which will not be full but will eventually fill up with additional data.
// If we have a channel built for blobs, we similarly want to switch to calldata if the cost per byte is lower. Doing so
// will mean several new (full) channels will be built resulting in several calldata txs. We compute the cost per byte
// for a _single_ transaction in either case.

// It would be nicer to use core.IntrinsicGas, but we don't have the actual data at hand
calldataBytes := dec.calldataConfig.MaxFrameSize + 1 // + 1 version byte
calldataGas := big.NewInt(int64(calldataBytes*randomByteCalldataGas + params.TxGas))
calldataPrice := new(big.Int).Add(baseFee, tipCap)
calldataCost := new(big.Int).Mul(calldataGas, calldataPrice)
// We assume that compressed random channel data has few zeros so they can be ignored (in actuality,
// zero bytes are worth one token instead of four):
calldataBytesPerTx := dec.calldataConfig.MaxFrameSize + 1 // +1 for the version byte
tokensPerCalldataTx := uint64(calldataBytesPerTx * 4)
numBlobsPerTx := dec.blobConfig.TargetNumFrames

blobGas := big.NewInt(params.BlobTxBlobGasPerBlob * int64(dec.blobConfig.TargetNumFrames))
blobCost := new(big.Int).Mul(blobGas, blobBaseFee)
// blobs still have intrinsic calldata costs
blobCalldataCost := new(big.Int).Mul(big.NewInt(int64(params.TxGas)), calldataPrice)
blobCost = blobCost.Add(blobCost, blobCalldataCost)
// Compute the total absolute cost of submitting either a single calldata tx or a single blob tx.
calldataCost, blobCost := computeSingleCalldataTxCost(tokensPerCalldataTx, baseFee, tipCap, isPectra),
computeSingleBlobTxCost(numBlobsPerTx, baseFee, tipCap, blobBaseFee)

// Now we compare the absolute cost per tx divided by the number of bytes per tx:
blobDataBytesPerTx := big.NewInt(eth.MaxBlobDataSize * int64(numBlobsPerTx))

// Now we compare the prices divided by the number of bytes that can be
// submitted for that price.
blobDataBytes := big.NewInt(eth.MaxBlobDataSize * int64(dec.blobConfig.TargetNumFrames))
// The following will compare blobCost(a)/blobDataBytes(x) > calldataCost(b)/calldataBytes(y):
ay := new(big.Int).Mul(blobCost, big.NewInt(int64(calldataBytes)))
bx := new(big.Int).Mul(calldataCost, blobDataBytes)
ay := new(big.Int).Mul(blobCost, big.NewInt(int64(calldataBytesPerTx)))
bx := new(big.Int).Mul(calldataCost, blobDataBytesPerTx)

// ratio only used for logging, more correct multiplicative calculation used for comparison
ayf, bxf := new(big.Float).SetInt(ay), new(big.Float).SetInt(bx)
costRatio := new(big.Float).Quo(ayf, bxf)
lgr := dec.log.New("base_fee", baseFee, "blob_base_fee", blobBaseFee, "tip_cap", tipCap,
"calldata_bytes", calldataBytes, "calldata_cost", calldataCost,
"blob_data_bytes", blobDataBytes, "blob_cost", blobCost,
"calldata_bytes", calldataBytesPerTx, "calldata_cost", calldataCost,
"blob_data_bytes", blobDataBytesPerTx, "blob_cost", blobCost,
"cost_ratio", costRatio)

if ay.Cmp(bx) == 1 {
Expand All @@ -102,3 +100,34 @@ func (dec *DynamicEthChannelConfig) ChannelConfig() ChannelConfig {
dec.lastConfig = &dec.blobConfig
return dec.blobConfig
}

func computeSingleCalldataTxCost(numTokens uint64, baseFee, tipCap *big.Int, isPectra bool) *big.Int {
// We assume isContractCreation = false and execution_gas_used = 0 in https://eips.ethereum.org/EIPS/eip-7623
// This is a safe assumption given how batcher transactions are constructed.
const (
standardTokenCost = 4
totalCostFloorPerToken = 10
)
var multiplier uint64
if isPectra {
multiplier = totalCostFloorPerToken
} else {
multiplier = standardTokenCost
}

calldataPrice := new(big.Int).Add(baseFee, tipCap)
calldataGas := big.NewInt(int64(params.TxGas + numTokens*multiplier))

return new(big.Int).Mul(calldataGas, calldataPrice)
}

func computeSingleBlobTxCost(numBlobs int, baseFee, tipCap, blobBaseFee *big.Int) *big.Int {
// There is no execution gas or contract creation cost for blob transactions
calldataPrice := new(big.Int).Add(baseFee, tipCap)
blobCalldataCost := new(big.Int).Mul(big.NewInt(int64(params.TxGas)), calldataPrice)

blobGas := big.NewInt(params.BlobTxBlobGasPerBlob * int64(numBlobs))
blobCost := new(big.Int).Mul(blobGas, blobBaseFee)

return blobCost.Add(blobCost, blobCalldataCost)
}
57 changes: 53 additions & 4 deletions op-batcher/batcher/channel_config_provider_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ func TestDynamicEthChannelConfig_ChannelConfig(t *testing.T) {
baseFee int64
blobBaseFee int64
wantCalldata bool
isL1Pectra bool
}{
{
name: "much-cheaper-blobs",
Expand Down Expand Up @@ -71,6 +72,36 @@ func TestDynamicEthChannelConfig_ChannelConfig(t *testing.T) {
blobBaseFee: 1e9,
wantCalldata: true,
},
{
name: "much-cheaper-blobs-l1-pectra",
tipCap: 1e3,
baseFee: 1e6,
blobBaseFee: 1,
isL1Pectra: true,
},
{
name: "close-cheaper-blobs-l1-pectra",
tipCap: 1e3,
baseFee: 1e6,
blobBaseFee: 398e5, // this value just under the equilibrium point for 3 blobs
isL1Pectra: true,
},
{
name: "close-cheaper-calldata-l1-pectra",
tipCap: 1e3,
baseFee: 1e6,
blobBaseFee: 399e5, // this value just over the equilibrium point for 3 blobs
wantCalldata: true,
isL1Pectra: true,
},
{
name: "much-cheaper-calldata-l1-pectra",
tipCap: 1e3,
baseFee: 1e6,
blobBaseFee: 1e9,
wantCalldata: true,
isL1Pectra: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
Expand All @@ -81,7 +112,7 @@ func TestDynamicEthChannelConfig_ChannelConfig(t *testing.T) {
blobBaseFee: tt.blobBaseFee,
}
dec := NewDynamicEthChannelConfig(lgr, 1*time.Second, gp, blobCfg, calldataCfg)
cc := dec.ChannelConfig()
cc := dec.ChannelConfig(tt.isL1Pectra)
if tt.wantCalldata {
require.Equal(t, cc, calldataCfg)
require.NotNil(t, ch.FindLog(testlog.NewMessageContainsFilter("calldata")))
Expand All @@ -103,24 +134,42 @@ func TestDynamicEthChannelConfig_ChannelConfig(t *testing.T) {
err: errors.New("gp-error"),
}
dec := NewDynamicEthChannelConfig(lgr, 1*time.Second, gp, blobCfg, calldataCfg)
require.Equal(t, dec.ChannelConfig(), blobCfg)
require.Equal(t, dec.ChannelConfig(false), blobCfg)
require.NotNil(t, ch.FindLog(
testlog.NewLevelFilter(slog.LevelWarn),
testlog.NewMessageContainsFilter("returning last config"),
))

gp.err = nil
require.Equal(t, dec.ChannelConfig(), calldataCfg)
require.Equal(t, dec.ChannelConfig(false), calldataCfg)
require.NotNil(t, ch.FindLog(
testlog.NewLevelFilter(slog.LevelInfo),
testlog.NewMessageContainsFilter("calldata"),
))

gp.err = errors.New("gp-error-2")
require.Equal(t, dec.ChannelConfig(), calldataCfg)
require.Equal(t, dec.ChannelConfig(false), calldataCfg)
require.NotNil(t, ch.FindLog(
testlog.NewLevelFilter(slog.LevelWarn),
testlog.NewMessageContainsFilter("returning last config"),
))
})
}

func TestComputeSingleCalldataTxCost(t *testing.T) {
// 30KB of data
got := computeSingleCalldataTxCost(120_000, big.NewInt(1), big.NewInt(1), false)
require.Equal(t, big.NewInt(1_002_000), got) // (21_000 + 4*120_000) * (1+1)

got = computeSingleCalldataTxCost(120_000, big.NewInt(1), big.NewInt(1), true)
require.Equal(t, big.NewInt(2_442_000), got) // (21_000 + 10*120_000) * (1+1)
}

func TestComputeSingleBlobTxCost(t *testing.T) {
// This tx submits 655KB of data (21x the calldata example above)
// Setting blobBaseFee to 16x (baseFee + tipCap) gives a cost which is ~21x higher
// than the calldata example, showing the rough equilibrium point
// of the two DA markets.
got := computeSingleBlobTxCost(5, big.NewInt(1), big.NewInt(1), big.NewInt(32))
require.Equal(t, big.NewInt(21_013_520), got) // 21_000 * (1+1) + 131_072*5*32
}
6 changes: 3 additions & 3 deletions op-batcher/batcher/channel_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ func NewChannelManager(log log.Logger, metr metrics.Metricer, cfgProvider Channe
log: log,
metr: metr,
cfgProvider: cfgProvider,
defaultCfg: cfgProvider.ChannelConfig(),
defaultCfg: cfgProvider.ChannelConfig(false),
rollupCfg: rollupCfg,
outFactory: NewChannelOut,
txChannels: make(map[string]*channel),
Expand Down Expand Up @@ -190,7 +190,7 @@ func (s *channelManager) nextTxData(channel *channel) (txData, error) {
// It will decide whether to switch DA type automatically.
// When switching DA type, the channelManager state will be rebuilt
// with a new ChannelConfig.
func (s *channelManager) TxData(l1Head eth.BlockID) (txData, error) {
func (s *channelManager) TxData(l1Head eth.BlockID, isPectra bool) (txData, error) {
channel, err := s.getReadyChannel(l1Head)
if err != nil {
return emptyTxData, err
Expand All @@ -202,7 +202,7 @@ func (s *channelManager) TxData(l1Head eth.BlockID) (txData, error) {
}

// Call provider method to reassess optimal DA type
newCfg := s.cfgProvider.ChannelConfig()
newCfg := s.cfgProvider.ChannelConfig(isPectra)

// No change:
if newCfg.UseBlobs == s.defaultCfg.UseBlobs {
Expand Down
16 changes: 8 additions & 8 deletions op-batcher/batcher/channel_manager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -101,9 +101,9 @@ func ChannelManagerReturnsErrReorgWhenDrained(t *testing.T, batchType uint) {

require.NoError(t, m.AddL2Block(a))

_, err := m.TxData(eth.BlockID{})
_, err := m.TxData(eth.BlockID{}, false)
require.NoError(t, err)
_, err = m.TxData(eth.BlockID{})
_, err = m.TxData(eth.BlockID{}, false)
require.ErrorIs(t, err, io.EOF)

require.ErrorIs(t, m.AddL2Block(x), ErrReorg)
Expand Down Expand Up @@ -199,21 +199,21 @@ func ChannelManager_TxResend(t *testing.T, batchType uint) {

require.NoError(m.AddL2Block(a))

txdata0, err := m.TxData(eth.BlockID{})
txdata0, err := m.TxData(eth.BlockID{}, false)
require.NoError(err)
txdata0bytes := txdata0.CallData()
data0 := make([]byte, len(txdata0bytes))
// make sure we have a clone for later comparison
copy(data0, txdata0bytes)

// ensure channel is drained
_, err = m.TxData(eth.BlockID{})
_, err = m.TxData(eth.BlockID{}, false)
require.ErrorIs(err, io.EOF)

// requeue frame
m.TxFailed(txdata0.ID())

txdata1, err := m.TxData(eth.BlockID{})
txdata1, err := m.TxData(eth.BlockID{}, false)
require.NoError(err)

data1 := txdata1.CallData()
Expand Down Expand Up @@ -276,7 +276,7 @@ type FakeDynamicEthChannelConfig struct {
assessments int
}

func (f *FakeDynamicEthChannelConfig) ChannelConfig() ChannelConfig {
func (f *FakeDynamicEthChannelConfig) ChannelConfig(isPectra bool) ChannelConfig {
f.assessments++
if f.chooseBlobs {
return f.blobConfig
Expand Down Expand Up @@ -356,7 +356,7 @@ func TestChannelManager_TxData(t *testing.T) {
m.blocks = []*types.Block{blockA}

// Call TxData a first time to trigger blocks->channels pipeline
_, err := m.TxData(eth.BlockID{})
_, err := m.TxData(eth.BlockID{}, false)
require.ErrorIs(t, err, io.EOF)

// The test requires us to have something in the channel queue
Expand All @@ -375,7 +375,7 @@ func TestChannelManager_TxData(t *testing.T) {
var data txData
for {
m.blocks = append(m.blocks, blockA)
data, err = m.TxData(eth.BlockID{})
data, err = m.TxData(eth.BlockID{}, false)
if err == nil && data.Len() > 0 {
break
}
Expand Down
15 changes: 9 additions & 6 deletions op-batcher/batcher/driver.go
Original file line number Diff line number Diff line change
Expand Up @@ -602,7 +602,7 @@ func (l *BatchSubmitter) waitNodeSync() error {
cCtx, cancel := context.WithTimeout(ctx, l.Config.NetworkTimeout)
defer cancel()

l1Tip, err := l.l1Tip(cCtx)
l1Tip, _, err := l.l1Tip(cCtx)
if err != nil {
return fmt.Errorf("failed to retrieve l1 tip: %w", err)
}
Expand Down Expand Up @@ -700,7 +700,7 @@ func (l *BatchSubmitter) clearState(ctx context.Context) {
func (l *BatchSubmitter) publishTxToL1(ctx context.Context, queue *txmgr.Queue[txRef], receiptsCh chan txmgr.TxReceipt[txRef], daGroup *errgroup.Group) error {

// send all available transactions
l1tip, err := l.l1Tip(ctx)
l1tip, isPectra, err := l.l1Tip(ctx)
if err != nil {
l.Log.Error("Failed to query L1 tip", "err", err)
return err
Expand All @@ -710,7 +710,7 @@ func (l *BatchSubmitter) publishTxToL1(ctx context.Context, queue *txmgr.Queue[t
// Collect next transaction data. This pulls data out of the channel, so we need to make sure
// to put it back if ever da or txmgr requests fail, by calling l.recordFailedDARequest/recordFailedTx.
l.channelMgrMutex.Lock()
txdata, err := l.channelMgr.TxData(l1tip.ID())
txdata, err := l.channelMgr.TxData(l1tip.ID(), isPectra)
l.channelMgrMutex.Unlock()

if err == io.EOF {
Expand Down Expand Up @@ -917,14 +917,17 @@ func (l *BatchSubmitter) recordConfirmedTx(id txID, receipt *types.Receipt) {

// l1Tip gets the current L1 tip as a L1BlockRef. The passed context is assumed
// to be a lifetime context, so it is internally wrapped with a network timeout.
func (l *BatchSubmitter) l1Tip(ctx context.Context) (eth.L1BlockRef, error) {
// It also returns a boolean indicating if the tip is from a Pectra chain.
func (l *BatchSubmitter) l1Tip(ctx context.Context) (eth.L1BlockRef, bool, error) {
tctx, cancel := context.WithTimeout(ctx, l.Config.NetworkTimeout)
defer cancel()
head, err := l.L1Client.HeaderByNumber(tctx, nil)

if err != nil {
return eth.L1BlockRef{}, fmt.Errorf("getting latest L1 block: %w", err)
return eth.L1BlockRef{}, false, fmt.Errorf("getting latest L1 block: %w", err)
}
return eth.InfoToL1BlockRef(eth.HeaderBlockInfo(head)), nil
isPectra := head.RequestsHash != nil // See https://eips.ethereum.org/EIPS/eip-7685
return eth.InfoToL1BlockRef(eth.HeaderBlockInfo(head)), isPectra, nil
}

func (l *BatchSubmitter) checkTxpool(queue *txmgr.Queue[txRef], receiptsCh chan txmgr.TxReceipt[txRef]) bool {
Expand Down
2 changes: 1 addition & 1 deletion op-batcher/batcher/test_batch_submitter.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ func (l *TestBatchSubmitter) JamTxPool(ctx context.Context) error {
}
var candidate *txmgr.TxCandidate
var err error
cc := l.channelMgr.cfgProvider.ChannelConfig()
cc := l.channelMgr.cfgProvider.ChannelConfig(true)
if cc.UseBlobs {
candidate = l.calldataTxCandidate([]byte{})
} else if candidate, err = l.blobTxCandidate(emptyTxData); err != nil {
Expand Down
2 changes: 2 additions & 0 deletions op-chain-ops/genesis/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -354,6 +354,8 @@ type UpgradeScheduleDeployConfig struct {

// When Cancun activates. Relative to L1 genesis.
L1CancunTimeOffset *hexutil.Uint64 `json:"l1CancunTimeOffset,omitempty"`
// When Prague activates. Relative to L1 genesis.
L1PragueTimeOffset *hexutil.Uint64 `json:"l1PragueTimeOffset,omitempty"`

// UseInterop is a flag that indicates if the system is using interop
UseInterop bool `json:"useInterop,omitempty"`
Expand Down
4 changes: 4 additions & 0 deletions op-chain-ops/genesis/genesis.go
Original file line number Diff line number Diff line change
Expand Up @@ -167,6 +167,10 @@ func NewL1Genesis(config *DeployConfig) (*core.Genesis, error) {
cancunTime := uint64(timestamp) + uint64(*config.L1CancunTimeOffset)
chainConfig.CancunTime = &cancunTime
}
if config.L1PragueTimeOffset != nil {
pragueTime := uint64(timestamp) + uint64(*config.L1PragueTimeOffset)
chainConfig.PragueTime = &pragueTime
}

return &core.Genesis{
Config: &chainConfig,
Expand Down
Loading

0 comments on commit 4bb98fa

Please sign in to comment.