Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(op-node): add l1 cache size config #225

Merged
merged 5 commits into from
Aug 15, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion op-chain-ops/cmd/check-derivation/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ func newClientsFromContext(cliCtx *cli.Context) (*ethclient.Client, *sources.Eth
MethodResetDuration: time.Minute,
}
cl := ethclient.NewClient(clients.L2RpcClient)
ethCl, err := sources.NewEthClient(client.NewBaseRPCClient(clients.L2RpcClient), log.Root(), nil, &ethClCfg)
ethCl, err := sources.NewEthClient(client.NewBaseRPCClient(clients.L2RpcClient), log.Root(), nil, &ethClCfg, false)
if err != nil {
return nil, nil, err
}
Expand Down
1 change: 1 addition & 0 deletions op-e2e/setup.go
Original file line number Diff line number Diff line change
Expand Up @@ -923,6 +923,7 @@ func configureL1(rollupNodeCfg *rollupNode.Config, l1Node EthInstance) {
BatchSize: 20,
HttpPollInterval: time.Millisecond * 100,
MaxConcurrency: 10,
CacheSize: 1000,
}
}

Expand Down
8 changes: 8 additions & 0 deletions op-node/flags/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -180,6 +180,13 @@ var (
Value: 20,
Category: L1RPCCategory,
}
L1RPCMaxCacheSize = &cli.IntFlag{
Name: "l1.rpc-max-cache-size",
Usage: "The maximum cache size of the L1 client. it should be greater than or equal to the maximum height difference between the L1 blocks corresponding to the unsafe block height and the safe block height. Must be greater than or equal to 1",
EnvVars: prefixEnvVars("L1_RPC_MAX_CACHE_SIZE"),
Value: 1000,
Category: L1RPCCategory,
}
L1HTTPPollInterval = &cli.DurationFlag{
Name: "l1.http-poll-interval",
Usage: "Polling interval for latest-block subscription when using an HTTP RPC provider. Ignored for other types of RPC endpoints.",
Expand Down Expand Up @@ -417,6 +424,7 @@ var optionalFlags = []cli.Flag{
L1RPCProviderKind,
L1RPCRateLimit,
L1RPCMaxBatchSize,
L1RPCMaxCacheSize,
L1RPCMaxConcurrency,
L1HTTPPollInterval,
L1ArchiveBlobRpcAddr,
Expand Down
15 changes: 15 additions & 0 deletions op-node/node/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,10 @@ type L1EndpointConfig struct {
// BatchSize specifies the maximum batch-size, which also applies as L1 rate-limit burst amount (if set).
BatchSize int

// CacheSize specifies the maximum cache size of l1 client.
// it should be greater than or equal to the maximum height difference between the L1 blocks corresponding to the unsafe block height and the safe block height.
CacheSize int

// MaxConcurrency specifies the maximum number of concurrent requests to the L1 RPC.
MaxConcurrency int

Expand All @@ -133,6 +137,9 @@ func (cfg *L1EndpointConfig) Check() error {
if cfg.BatchSize < 1 || cfg.BatchSize > 500 {
return fmt.Errorf("batch size is invalid or unreasonable: %d", cfg.BatchSize)
}
if cfg.CacheSize < 1 {
return fmt.Errorf("cache size is invalid or unreasonable: %d", cfg.CacheSize)
}
if cfg.RateLimit < 0 {
return fmt.Errorf("rate limit cannot be negative")
}
Expand Down Expand Up @@ -163,6 +170,10 @@ func (cfg *L1EndpointConfig) Setup(ctx context.Context, log log.Logger, rollupCf
rpcCfg := sources.L1ClientDefaultConfig(rollupCfg, cfg.L1TrustRPC, cfg.L1RPCKind)
rpcCfg.MaxRequestsPerBatch = cfg.BatchSize
rpcCfg.MaxConcurrentRequests = cfg.MaxConcurrency
rpcCfg.ReceiptsCacheSize = cfg.CacheSize
rpcCfg.HeadersCacheSize = cfg.CacheSize
rpcCfg.TransactionsCacheSize = cfg.CacheSize
rpcCfg.PayloadsCacheSize = cfg.CacheSize
return l1Node, rpcCfg, nil
}

Expand All @@ -177,6 +188,10 @@ func fallbackClientWrap(ctx context.Context, logger log.Logger, urlList []string
rpcCfg := sources.L1ClientDefaultConfig(rollupCfg, cfg.L1TrustRPC, cfg.L1RPCKind)
rpcCfg.MaxRequestsPerBatch = cfg.BatchSize
rpcCfg.MaxConcurrentRequests = cfg.MaxConcurrency
rpcCfg.ReceiptsCacheSize = cfg.CacheSize
rpcCfg.HeadersCacheSize = cfg.CacheSize
rpcCfg.TransactionsCacheSize = cfg.CacheSize
rpcCfg.PayloadsCacheSize = cfg.CacheSize
return l1Node, rpcCfg, nil
}

Expand Down
1 change: 1 addition & 0 deletions op-node/service.go
Original file line number Diff line number Diff line change
Expand Up @@ -158,6 +158,7 @@ func NewL1EndpointConfig(ctx *cli.Context) *node.L1EndpointConfig {
L1RPCKind: sources.RPCProviderKind(strings.ToLower(ctx.String(flags.L1RPCProviderKind.Name))),
RateLimit: ctx.Float64(flags.L1RPCRateLimit.Name),
BatchSize: ctx.Int(flags.L1RPCMaxBatchSize.Name),
CacheSize: ctx.Int(flags.L1RPCMaxCacheSize.Name),
HttpPollInterval: ctx.Duration(flags.L1HTTPPollInterval.Name),
MaxConcurrency: ctx.Int(flags.L1RPCMaxConcurrency.Name),
}
Expand Down
5 changes: 4 additions & 1 deletion op-service/sources/l1_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,10 @@ func (s *L1Client) GoOrUpdatePreFetchReceipts(ctx context.Context, l1Start uint6
continue
}
if !isSuccess {
s.log.Debug("pre fetch receipts fail without error,need retry", "blockHash", blockInfo.Hash, "blockNumber", blockNumber)
s.log.Debug("The receipts cache may be full. "+
"please ensure the maximum height difference between the L1 blocks "+
"corresponding to the unsafe block height and the safe block height is less than or equal to the cache size.",
"blockHash", blockInfo.Hash, "blockNumber", blockNumber)
time.Sleep(1 * time.Second)
continue
}
Expand Down
Loading