diff --git a/bindings/Makefile b/bindings/Makefile index f17aca421..f3d728317 100644 --- a/bindings/Makefile +++ b/bindings/Makefile @@ -22,7 +22,6 @@ bindings: basefee-vault-bindings \ l2-output-oracle-bindings \ l2-standard-bridge-bindings \ l2-to-l1-message-passer-bindings \ - proposer-fee-vault-bindings \ proxy-admin-bindings \ proxy-bindings \ security-council-bindings \ @@ -31,6 +30,7 @@ bindings: basefee-vault-bindings \ timelock-bindings \ upgrade-governor-bindings \ validator-pool-bindings \ + validator-reward-vault-bindings \ weth9-bindings \ zk-merkle-trie-bindings \ zk-verifier-bindings @@ -113,7 +113,7 @@ l1-erc721-bridge-bindings: compile bash ./gen_bindings.sh contracts/L1/L1ERC721Bridge.sol:L1ERC721Bridge $(pkg) l1-fee-vault-bindings: compile - bash ./gen_bindings.sh contracts/L2/ProposerRewardVault.sol:ProposerRewardVault $(pkg) + bash ./gen_bindings.sh contracts/L2/L1FeeVault.sol:L1FeeVault $(pkg) l1-standard-bridge-bindings: compile bash ./gen_bindings.sh contracts/L1/L1StandardBridge.sol:L1StandardBridge $(pkg) @@ -133,7 +133,7 @@ l2-standard-bridge-bindings: compile l2-to-l1-message-passer-bindings: compile bash ./gen_bindings.sh contracts/L2/L2ToL1MessagePasser.sol:L2ToL1MessagePasser $(pkg) -proposer-fee-vault-bindings: compile +validator-reward-vault-bindings: compile bash ./gen_bindings.sh contracts/L2/ValidatorRewardVault.sol:ValidatorRewardVault $(pkg) proxy-admin-bindings: compile diff --git a/bindings/bindings/proposerrewardvault.go b/bindings/bindings/l1feevault.go similarity index 53% rename from bindings/bindings/proposerrewardvault.go rename to bindings/bindings/l1feevault.go index eeffe60e7..de7c5ecdd 100644 --- a/bindings/bindings/proposerrewardvault.go +++ b/bindings/bindings/l1feevault.go @@ -29,23 +29,23 @@ var ( _ = abi.ConvertType ) -// ProposerRewardVaultMetaData contains all meta data concerning the ProposerRewardVault contract. -var ProposerRewardVaultMetaData = &bind.MetaData{ +// L1FeeVaultMetaData contains all meta data concerning the L1FeeVault contract. +var L1FeeVaultMetaData = &bind.MetaData{ ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_recipient\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"}],\"name\":\"Withdrawal\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"MIN_WITHDRAWAL_AMOUNT\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"RECIPIENT\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"totalProcessed\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"version\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"withdraw\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"stateMutability\":\"payable\",\"type\":\"receive\"}]", - Bin: "0x61012060405234801561001157600080fd5b5060405161086c38038061086c8339810160408190526100309161005d565b678ac7230489e800006080526001600160a01b031660a052600160c052600060e08190526101005261008d565b60006020828403121561006f57600080fd5b81516001600160a01b038116811461008657600080fd5b9392505050565b60805160a05160c05160e051610100516107876100e560003960006103d3015260006103aa01526000610381015260008181607c015281816102570152610319015260008181610137015261015b01526107876000f3fe60806040526004361061005e5760003560e01c806354fd4d501161004357806354fd4d50146100df57806384411d6514610101578063d3e5792b1461012557600080fd5b80630d9019e11461006a5780633ccfd60b146100c857600080fd5b3661006557005b600080fd5b34801561007657600080fd5b5061009e7f000000000000000000000000000000000000000000000000000000000000000081565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020015b60405180910390f35b3480156100d457600080fd5b506100dd610159565b005b3480156100eb57600080fd5b506100f461037a565b6040516100bf9190610638565b34801561010d57600080fd5b5061011760005481565b6040519081526020016100bf565b34801561013157600080fd5b506101177f000000000000000000000000000000000000000000000000000000000000000081565b7f0000000000000000000000000000000000000000000000000000000000000000471015610233576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152604a60248201527f4665655661756c743a207769746864726177616c20616d6f756e74206d75737460448201527f2062652067726561746572207468616e206d696e696d756d207769746864726160648201527f77616c20616d6f756e7400000000000000000000000000000000000000000000608482015260a40160405180910390fd5b6000479050806000808282546102499190610652565b9091555050604080518281527f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff166020820152338183015290517fc8a211cc64b6ed1b50595a9fcb1932b6d1e5a6e8ef15b60e5b1f988ea9086bba9181900360600190a1604080516020810182526000815290517fe11013dd0000000000000000000000000000000000000000000000000000000081527342000000000000000000000000000000000000099163e11013dd918491610345917f0000000000000000000000000000000000000000000000000000000000000000916188b891600401610691565b6000604051808303818588803b15801561035e57600080fd5b505af1158015610372573d6000803e3d6000fd5b505050505050565b60606103a57f000000000000000000000000000000000000000000000000000000000000000061041d565b6103ce7f000000000000000000000000000000000000000000000000000000000000000061041d565b6103f77f000000000000000000000000000000000000000000000000000000000000000061041d565b604051602001610409939291906106d5565b604051602081830303815290604052905090565b6060600061042a836104db565b600101905060008167ffffffffffffffff81111561044a5761044a61074b565b6040519080825280601f01601f191660200182016040528015610474576020820181803683370190505b5090508181016020015b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff017f3031323334353637383961626364656600000000000000000000000000000000600a86061a8153600a850494508461047e57509392505050565b6000807a184f03e93ff9f4daa797ed6e38ed64bf6a1f0100000000000000008310610524577a184f03e93ff9f4daa797ed6e38ed64bf6a1f010000000000000000830492506040015b6d04ee2d6d415b85acef81000000008310610550576d04ee2d6d415b85acef8100000000830492506020015b662386f26fc10000831061056e57662386f26fc10000830492506010015b6305f5e1008310610586576305f5e100830492506008015b612710831061059a57612710830492506004015b606483106105ac576064830492506002015b600a83106105b8576001015b92915050565b60005b838110156105d95781810151838201526020016105c1565b838111156105e8576000848401525b50505050565b600081518084526106068160208601602086016105be565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b60208152600061064b60208301846105ee565b9392505050565b6000821982111561068c577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b500190565b73ffffffffffffffffffffffffffffffffffffffff8416815263ffffffff831660208201526060604082015260006106cc60608301846105ee565b95945050505050565b600084516106e78184602089016105be565b80830190507f2e000000000000000000000000000000000000000000000000000000000000008082528551610723816001850160208a016105be565b6001920191820152835161073e8160028401602088016105be565b0160020195945050505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fdfea164736f6c634300080f000a", + Bin: "0x61012060405234801561001157600080fd5b5060405161086c38038061086c8339810160408190526100309161005d565b678ac7230489e800006080526001600160a01b031660a052600160c0819052600060e0526101005261008d565b60006020828403121561006f57600080fd5b81516001600160a01b038116811461008657600080fd5b9392505050565b60805160a05160c05160e051610100516107876100e560003960006103d3015260006103aa01526000610381015260008181607c015281816102570152610319015260008181610137015261015b01526107876000f3fe60806040526004361061005e5760003560e01c806354fd4d501161004357806354fd4d50146100df57806384411d6514610101578063d3e5792b1461012557600080fd5b80630d9019e11461006a5780633ccfd60b146100c857600080fd5b3661006557005b600080fd5b34801561007657600080fd5b5061009e7f000000000000000000000000000000000000000000000000000000000000000081565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020015b60405180910390f35b3480156100d457600080fd5b506100dd610159565b005b3480156100eb57600080fd5b506100f461037a565b6040516100bf9190610638565b34801561010d57600080fd5b5061011760005481565b6040519081526020016100bf565b34801561013157600080fd5b506101177f000000000000000000000000000000000000000000000000000000000000000081565b7f0000000000000000000000000000000000000000000000000000000000000000471015610233576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152604a60248201527f4665655661756c743a207769746864726177616c20616d6f756e74206d75737460448201527f2062652067726561746572207468616e206d696e696d756d207769746864726160648201527f77616c20616d6f756e7400000000000000000000000000000000000000000000608482015260a40160405180910390fd5b6000479050806000808282546102499190610652565b9091555050604080518281527f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff166020820152338183015290517fc8a211cc64b6ed1b50595a9fcb1932b6d1e5a6e8ef15b60e5b1f988ea9086bba9181900360600190a1604080516020810182526000815290517fe11013dd0000000000000000000000000000000000000000000000000000000081527342000000000000000000000000000000000000099163e11013dd918491610345917f0000000000000000000000000000000000000000000000000000000000000000916188b891600401610691565b6000604051808303818588803b15801561035e57600080fd5b505af1158015610372573d6000803e3d6000fd5b505050505050565b60606103a57f000000000000000000000000000000000000000000000000000000000000000061041d565b6103ce7f000000000000000000000000000000000000000000000000000000000000000061041d565b6103f77f000000000000000000000000000000000000000000000000000000000000000061041d565b604051602001610409939291906106d5565b604051602081830303815290604052905090565b6060600061042a836104db565b600101905060008167ffffffffffffffff81111561044a5761044a61074b565b6040519080825280601f01601f191660200182016040528015610474576020820181803683370190505b5090508181016020015b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff017f3031323334353637383961626364656600000000000000000000000000000000600a86061a8153600a850494508461047e57509392505050565b6000807a184f03e93ff9f4daa797ed6e38ed64bf6a1f0100000000000000008310610524577a184f03e93ff9f4daa797ed6e38ed64bf6a1f010000000000000000830492506040015b6d04ee2d6d415b85acef81000000008310610550576d04ee2d6d415b85acef8100000000830492506020015b662386f26fc10000831061056e57662386f26fc10000830492506010015b6305f5e1008310610586576305f5e100830492506008015b612710831061059a57612710830492506004015b606483106105ac576064830492506002015b600a83106105b8576001015b92915050565b60005b838110156105d95781810151838201526020016105c1565b838111156105e8576000848401525b50505050565b600081518084526106068160208601602086016105be565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b60208152600061064b60208301846105ee565b9392505050565b6000821982111561068c577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b500190565b73ffffffffffffffffffffffffffffffffffffffff8416815263ffffffff831660208201526060604082015260006106cc60608301846105ee565b95945050505050565b600084516106e78184602089016105be565b80830190507f2e000000000000000000000000000000000000000000000000000000000000008082528551610723816001850160208a016105be565b6001920191820152835161073e8160028401602088016105be565b0160020195945050505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fdfea164736f6c634300080f000a", } -// ProposerRewardVaultABI is the input ABI used to generate the binding from. -// Deprecated: Use ProposerRewardVaultMetaData.ABI instead. -var ProposerRewardVaultABI = ProposerRewardVaultMetaData.ABI +// L1FeeVaultABI is the input ABI used to generate the binding from. +// Deprecated: Use L1FeeVaultMetaData.ABI instead. +var L1FeeVaultABI = L1FeeVaultMetaData.ABI -// ProposerRewardVaultBin is the compiled bytecode used for deploying new contracts. -// Deprecated: Use ProposerRewardVaultMetaData.Bin instead. -var ProposerRewardVaultBin = ProposerRewardVaultMetaData.Bin +// L1FeeVaultBin is the compiled bytecode used for deploying new contracts. +// Deprecated: Use L1FeeVaultMetaData.Bin instead. +var L1FeeVaultBin = L1FeeVaultMetaData.Bin -// DeployProposerRewardVault deploys a new Ethereum contract, binding an instance of ProposerRewardVault to it. -func DeployProposerRewardVault(auth *bind.TransactOpts, backend bind.ContractBackend, _recipient common.Address) (common.Address, *types.Transaction, *ProposerRewardVault, error) { - parsed, err := ProposerRewardVaultMetaData.GetAbi() +// DeployL1FeeVault deploys a new Ethereum contract, binding an instance of L1FeeVault to it. +func DeployL1FeeVault(auth *bind.TransactOpts, backend bind.ContractBackend, _recipient common.Address) (common.Address, *types.Transaction, *L1FeeVault, error) { + parsed, err := L1FeeVaultMetaData.GetAbi() if err != nil { return common.Address{}, nil, nil, err } @@ -53,111 +53,111 @@ func DeployProposerRewardVault(auth *bind.TransactOpts, backend bind.ContractBac return common.Address{}, nil, nil, errors.New("GetABI returned nil") } - address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(ProposerRewardVaultBin), backend, _recipient) + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(L1FeeVaultBin), backend, _recipient) if err != nil { return common.Address{}, nil, nil, err } - return address, tx, &ProposerRewardVault{ProposerRewardVaultCaller: ProposerRewardVaultCaller{contract: contract}, ProposerRewardVaultTransactor: ProposerRewardVaultTransactor{contract: contract}, ProposerRewardVaultFilterer: ProposerRewardVaultFilterer{contract: contract}}, nil + return address, tx, &L1FeeVault{L1FeeVaultCaller: L1FeeVaultCaller{contract: contract}, L1FeeVaultTransactor: L1FeeVaultTransactor{contract: contract}, L1FeeVaultFilterer: L1FeeVaultFilterer{contract: contract}}, nil } -// ProposerRewardVault is an auto generated Go binding around an Ethereum contract. -type ProposerRewardVault struct { - ProposerRewardVaultCaller // Read-only binding to the contract - ProposerRewardVaultTransactor // Write-only binding to the contract - ProposerRewardVaultFilterer // Log filterer for contract events +// L1FeeVault is an auto generated Go binding around an Ethereum contract. +type L1FeeVault struct { + L1FeeVaultCaller // Read-only binding to the contract + L1FeeVaultTransactor // Write-only binding to the contract + L1FeeVaultFilterer // Log filterer for contract events } -// ProposerRewardVaultCaller is an auto generated read-only Go binding around an Ethereum contract. -type ProposerRewardVaultCaller struct { +// L1FeeVaultCaller is an auto generated read-only Go binding around an Ethereum contract. +type L1FeeVaultCaller struct { contract *bind.BoundContract // Generic contract wrapper for the low level calls } -// ProposerRewardVaultTransactor is an auto generated write-only Go binding around an Ethereum contract. -type ProposerRewardVaultTransactor struct { +// L1FeeVaultTransactor is an auto generated write-only Go binding around an Ethereum contract. +type L1FeeVaultTransactor struct { contract *bind.BoundContract // Generic contract wrapper for the low level calls } -// ProposerRewardVaultFilterer is an auto generated log filtering Go binding around an Ethereum contract events. -type ProposerRewardVaultFilterer struct { +// L1FeeVaultFilterer is an auto generated log filtering Go binding around an Ethereum contract events. +type L1FeeVaultFilterer struct { contract *bind.BoundContract // Generic contract wrapper for the low level calls } -// ProposerRewardVaultSession is an auto generated Go binding around an Ethereum contract, +// L1FeeVaultSession is an auto generated Go binding around an Ethereum contract, // with pre-set call and transact options. -type ProposerRewardVaultSession struct { - Contract *ProposerRewardVault // Generic contract binding to set the session for - CallOpts bind.CallOpts // Call options to use throughout this session - TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +type L1FeeVaultSession struct { + Contract *L1FeeVault // Generic contract binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session } -// ProposerRewardVaultCallerSession is an auto generated read-only Go binding around an Ethereum contract, +// L1FeeVaultCallerSession is an auto generated read-only Go binding around an Ethereum contract, // with pre-set call options. -type ProposerRewardVaultCallerSession struct { - Contract *ProposerRewardVaultCaller // Generic contract caller binding to set the session for - CallOpts bind.CallOpts // Call options to use throughout this session +type L1FeeVaultCallerSession struct { + Contract *L1FeeVaultCaller // Generic contract caller binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session } -// ProposerRewardVaultTransactorSession is an auto generated write-only Go binding around an Ethereum contract, +// L1FeeVaultTransactorSession is an auto generated write-only Go binding around an Ethereum contract, // with pre-set transact options. -type ProposerRewardVaultTransactorSession struct { - Contract *ProposerRewardVaultTransactor // Generic contract transactor binding to set the session for - TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +type L1FeeVaultTransactorSession struct { + Contract *L1FeeVaultTransactor // Generic contract transactor binding to set the session for + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session } -// ProposerRewardVaultRaw is an auto generated low-level Go binding around an Ethereum contract. -type ProposerRewardVaultRaw struct { - Contract *ProposerRewardVault // Generic contract binding to access the raw methods on +// L1FeeVaultRaw is an auto generated low-level Go binding around an Ethereum contract. +type L1FeeVaultRaw struct { + Contract *L1FeeVault // Generic contract binding to access the raw methods on } -// ProposerRewardVaultCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. -type ProposerRewardVaultCallerRaw struct { - Contract *ProposerRewardVaultCaller // Generic read-only contract binding to access the raw methods on +// L1FeeVaultCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. +type L1FeeVaultCallerRaw struct { + Contract *L1FeeVaultCaller // Generic read-only contract binding to access the raw methods on } -// ProposerRewardVaultTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. -type ProposerRewardVaultTransactorRaw struct { - Contract *ProposerRewardVaultTransactor // Generic write-only contract binding to access the raw methods on +// L1FeeVaultTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. +type L1FeeVaultTransactorRaw struct { + Contract *L1FeeVaultTransactor // Generic write-only contract binding to access the raw methods on } -// NewProposerRewardVault creates a new instance of ProposerRewardVault, bound to a specific deployed contract. -func NewProposerRewardVault(address common.Address, backend bind.ContractBackend) (*ProposerRewardVault, error) { - contract, err := bindProposerRewardVault(address, backend, backend, backend) +// NewL1FeeVault creates a new instance of L1FeeVault, bound to a specific deployed contract. +func NewL1FeeVault(address common.Address, backend bind.ContractBackend) (*L1FeeVault, error) { + contract, err := bindL1FeeVault(address, backend, backend, backend) if err != nil { return nil, err } - return &ProposerRewardVault{ProposerRewardVaultCaller: ProposerRewardVaultCaller{contract: contract}, ProposerRewardVaultTransactor: ProposerRewardVaultTransactor{contract: contract}, ProposerRewardVaultFilterer: ProposerRewardVaultFilterer{contract: contract}}, nil + return &L1FeeVault{L1FeeVaultCaller: L1FeeVaultCaller{contract: contract}, L1FeeVaultTransactor: L1FeeVaultTransactor{contract: contract}, L1FeeVaultFilterer: L1FeeVaultFilterer{contract: contract}}, nil } -// NewProposerRewardVaultCaller creates a new read-only instance of ProposerRewardVault, bound to a specific deployed contract. -func NewProposerRewardVaultCaller(address common.Address, caller bind.ContractCaller) (*ProposerRewardVaultCaller, error) { - contract, err := bindProposerRewardVault(address, caller, nil, nil) +// NewL1FeeVaultCaller creates a new read-only instance of L1FeeVault, bound to a specific deployed contract. +func NewL1FeeVaultCaller(address common.Address, caller bind.ContractCaller) (*L1FeeVaultCaller, error) { + contract, err := bindL1FeeVault(address, caller, nil, nil) if err != nil { return nil, err } - return &ProposerRewardVaultCaller{contract: contract}, nil + return &L1FeeVaultCaller{contract: contract}, nil } -// NewProposerRewardVaultTransactor creates a new write-only instance of ProposerRewardVault, bound to a specific deployed contract. -func NewProposerRewardVaultTransactor(address common.Address, transactor bind.ContractTransactor) (*ProposerRewardVaultTransactor, error) { - contract, err := bindProposerRewardVault(address, nil, transactor, nil) +// NewL1FeeVaultTransactor creates a new write-only instance of L1FeeVault, bound to a specific deployed contract. +func NewL1FeeVaultTransactor(address common.Address, transactor bind.ContractTransactor) (*L1FeeVaultTransactor, error) { + contract, err := bindL1FeeVault(address, nil, transactor, nil) if err != nil { return nil, err } - return &ProposerRewardVaultTransactor{contract: contract}, nil + return &L1FeeVaultTransactor{contract: contract}, nil } -// NewProposerRewardVaultFilterer creates a new log filterer instance of ProposerRewardVault, bound to a specific deployed contract. -func NewProposerRewardVaultFilterer(address common.Address, filterer bind.ContractFilterer) (*ProposerRewardVaultFilterer, error) { - contract, err := bindProposerRewardVault(address, nil, nil, filterer) +// NewL1FeeVaultFilterer creates a new log filterer instance of L1FeeVault, bound to a specific deployed contract. +func NewL1FeeVaultFilterer(address common.Address, filterer bind.ContractFilterer) (*L1FeeVaultFilterer, error) { + contract, err := bindL1FeeVault(address, nil, nil, filterer) if err != nil { return nil, err } - return &ProposerRewardVaultFilterer{contract: contract}, nil + return &L1FeeVaultFilterer{contract: contract}, nil } -// bindProposerRewardVault binds a generic wrapper to an already deployed contract. -func bindProposerRewardVault(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { - parsed, err := ProposerRewardVaultMetaData.GetAbi() +// bindL1FeeVault binds a generic wrapper to an already deployed contract. +func bindL1FeeVault(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := L1FeeVaultMetaData.GetAbi() if err != nil { return nil, err } @@ -168,46 +168,46 @@ func bindProposerRewardVault(address common.Address, caller bind.ContractCaller, // sets the output to result. The result type might be a single field for simple // returns, a slice of interfaces for anonymous returns and a struct for named // returns. -func (_ProposerRewardVault *ProposerRewardVaultRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { - return _ProposerRewardVault.Contract.ProposerRewardVaultCaller.contract.Call(opts, result, method, params...) +func (_L1FeeVault *L1FeeVaultRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _L1FeeVault.Contract.L1FeeVaultCaller.contract.Call(opts, result, method, params...) } // Transfer initiates a plain transaction to move funds to the contract, calling // its default method if one is available. -func (_ProposerRewardVault *ProposerRewardVaultRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { - return _ProposerRewardVault.Contract.ProposerRewardVaultTransactor.contract.Transfer(opts) +func (_L1FeeVault *L1FeeVaultRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _L1FeeVault.Contract.L1FeeVaultTransactor.contract.Transfer(opts) } // Transact invokes the (paid) contract method with params as input values. -func (_ProposerRewardVault *ProposerRewardVaultRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { - return _ProposerRewardVault.Contract.ProposerRewardVaultTransactor.contract.Transact(opts, method, params...) +func (_L1FeeVault *L1FeeVaultRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _L1FeeVault.Contract.L1FeeVaultTransactor.contract.Transact(opts, method, params...) } // Call invokes the (constant) contract method with params as input values and // sets the output to result. The result type might be a single field for simple // returns, a slice of interfaces for anonymous returns and a struct for named // returns. -func (_ProposerRewardVault *ProposerRewardVaultCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { - return _ProposerRewardVault.Contract.contract.Call(opts, result, method, params...) +func (_L1FeeVault *L1FeeVaultCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _L1FeeVault.Contract.contract.Call(opts, result, method, params...) } // Transfer initiates a plain transaction to move funds to the contract, calling // its default method if one is available. -func (_ProposerRewardVault *ProposerRewardVaultTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { - return _ProposerRewardVault.Contract.contract.Transfer(opts) +func (_L1FeeVault *L1FeeVaultTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _L1FeeVault.Contract.contract.Transfer(opts) } // Transact invokes the (paid) contract method with params as input values. -func (_ProposerRewardVault *ProposerRewardVaultTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { - return _ProposerRewardVault.Contract.contract.Transact(opts, method, params...) +func (_L1FeeVault *L1FeeVaultTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _L1FeeVault.Contract.contract.Transact(opts, method, params...) } // MINWITHDRAWALAMOUNT is a free data retrieval call binding the contract method 0xd3e5792b. // // Solidity: function MIN_WITHDRAWAL_AMOUNT() view returns(uint256) -func (_ProposerRewardVault *ProposerRewardVaultCaller) MINWITHDRAWALAMOUNT(opts *bind.CallOpts) (*big.Int, error) { +func (_L1FeeVault *L1FeeVaultCaller) MINWITHDRAWALAMOUNT(opts *bind.CallOpts) (*big.Int, error) { var out []interface{} - err := _ProposerRewardVault.contract.Call(opts, &out, "MIN_WITHDRAWAL_AMOUNT") + err := _L1FeeVault.contract.Call(opts, &out, "MIN_WITHDRAWAL_AMOUNT") if err != nil { return *new(*big.Int), err @@ -222,23 +222,23 @@ func (_ProposerRewardVault *ProposerRewardVaultCaller) MINWITHDRAWALAMOUNT(opts // MINWITHDRAWALAMOUNT is a free data retrieval call binding the contract method 0xd3e5792b. // // Solidity: function MIN_WITHDRAWAL_AMOUNT() view returns(uint256) -func (_ProposerRewardVault *ProposerRewardVaultSession) MINWITHDRAWALAMOUNT() (*big.Int, error) { - return _ProposerRewardVault.Contract.MINWITHDRAWALAMOUNT(&_ProposerRewardVault.CallOpts) +func (_L1FeeVault *L1FeeVaultSession) MINWITHDRAWALAMOUNT() (*big.Int, error) { + return _L1FeeVault.Contract.MINWITHDRAWALAMOUNT(&_L1FeeVault.CallOpts) } // MINWITHDRAWALAMOUNT is a free data retrieval call binding the contract method 0xd3e5792b. // // Solidity: function MIN_WITHDRAWAL_AMOUNT() view returns(uint256) -func (_ProposerRewardVault *ProposerRewardVaultCallerSession) MINWITHDRAWALAMOUNT() (*big.Int, error) { - return _ProposerRewardVault.Contract.MINWITHDRAWALAMOUNT(&_ProposerRewardVault.CallOpts) +func (_L1FeeVault *L1FeeVaultCallerSession) MINWITHDRAWALAMOUNT() (*big.Int, error) { + return _L1FeeVault.Contract.MINWITHDRAWALAMOUNT(&_L1FeeVault.CallOpts) } // RECIPIENT is a free data retrieval call binding the contract method 0x0d9019e1. // // Solidity: function RECIPIENT() view returns(address) -func (_ProposerRewardVault *ProposerRewardVaultCaller) RECIPIENT(opts *bind.CallOpts) (common.Address, error) { +func (_L1FeeVault *L1FeeVaultCaller) RECIPIENT(opts *bind.CallOpts) (common.Address, error) { var out []interface{} - err := _ProposerRewardVault.contract.Call(opts, &out, "RECIPIENT") + err := _L1FeeVault.contract.Call(opts, &out, "RECIPIENT") if err != nil { return *new(common.Address), err @@ -253,23 +253,23 @@ func (_ProposerRewardVault *ProposerRewardVaultCaller) RECIPIENT(opts *bind.Call // RECIPIENT is a free data retrieval call binding the contract method 0x0d9019e1. // // Solidity: function RECIPIENT() view returns(address) -func (_ProposerRewardVault *ProposerRewardVaultSession) RECIPIENT() (common.Address, error) { - return _ProposerRewardVault.Contract.RECIPIENT(&_ProposerRewardVault.CallOpts) +func (_L1FeeVault *L1FeeVaultSession) RECIPIENT() (common.Address, error) { + return _L1FeeVault.Contract.RECIPIENT(&_L1FeeVault.CallOpts) } // RECIPIENT is a free data retrieval call binding the contract method 0x0d9019e1. // // Solidity: function RECIPIENT() view returns(address) -func (_ProposerRewardVault *ProposerRewardVaultCallerSession) RECIPIENT() (common.Address, error) { - return _ProposerRewardVault.Contract.RECIPIENT(&_ProposerRewardVault.CallOpts) +func (_L1FeeVault *L1FeeVaultCallerSession) RECIPIENT() (common.Address, error) { + return _L1FeeVault.Contract.RECIPIENT(&_L1FeeVault.CallOpts) } // TotalProcessed is a free data retrieval call binding the contract method 0x84411d65. // // Solidity: function totalProcessed() view returns(uint256) -func (_ProposerRewardVault *ProposerRewardVaultCaller) TotalProcessed(opts *bind.CallOpts) (*big.Int, error) { +func (_L1FeeVault *L1FeeVaultCaller) TotalProcessed(opts *bind.CallOpts) (*big.Int, error) { var out []interface{} - err := _ProposerRewardVault.contract.Call(opts, &out, "totalProcessed") + err := _L1FeeVault.contract.Call(opts, &out, "totalProcessed") if err != nil { return *new(*big.Int), err @@ -284,23 +284,23 @@ func (_ProposerRewardVault *ProposerRewardVaultCaller) TotalProcessed(opts *bind // TotalProcessed is a free data retrieval call binding the contract method 0x84411d65. // // Solidity: function totalProcessed() view returns(uint256) -func (_ProposerRewardVault *ProposerRewardVaultSession) TotalProcessed() (*big.Int, error) { - return _ProposerRewardVault.Contract.TotalProcessed(&_ProposerRewardVault.CallOpts) +func (_L1FeeVault *L1FeeVaultSession) TotalProcessed() (*big.Int, error) { + return _L1FeeVault.Contract.TotalProcessed(&_L1FeeVault.CallOpts) } // TotalProcessed is a free data retrieval call binding the contract method 0x84411d65. // // Solidity: function totalProcessed() view returns(uint256) -func (_ProposerRewardVault *ProposerRewardVaultCallerSession) TotalProcessed() (*big.Int, error) { - return _ProposerRewardVault.Contract.TotalProcessed(&_ProposerRewardVault.CallOpts) +func (_L1FeeVault *L1FeeVaultCallerSession) TotalProcessed() (*big.Int, error) { + return _L1FeeVault.Contract.TotalProcessed(&_L1FeeVault.CallOpts) } // Version is a free data retrieval call binding the contract method 0x54fd4d50. // // Solidity: function version() view returns(string) -func (_ProposerRewardVault *ProposerRewardVaultCaller) Version(opts *bind.CallOpts) (string, error) { +func (_L1FeeVault *L1FeeVaultCaller) Version(opts *bind.CallOpts) (string, error) { var out []interface{} - err := _ProposerRewardVault.contract.Call(opts, &out, "version") + err := _L1FeeVault.contract.Call(opts, &out, "version") if err != nil { return *new(string), err @@ -315,62 +315,62 @@ func (_ProposerRewardVault *ProposerRewardVaultCaller) Version(opts *bind.CallOp // Version is a free data retrieval call binding the contract method 0x54fd4d50. // // Solidity: function version() view returns(string) -func (_ProposerRewardVault *ProposerRewardVaultSession) Version() (string, error) { - return _ProposerRewardVault.Contract.Version(&_ProposerRewardVault.CallOpts) +func (_L1FeeVault *L1FeeVaultSession) Version() (string, error) { + return _L1FeeVault.Contract.Version(&_L1FeeVault.CallOpts) } // Version is a free data retrieval call binding the contract method 0x54fd4d50. // // Solidity: function version() view returns(string) -func (_ProposerRewardVault *ProposerRewardVaultCallerSession) Version() (string, error) { - return _ProposerRewardVault.Contract.Version(&_ProposerRewardVault.CallOpts) +func (_L1FeeVault *L1FeeVaultCallerSession) Version() (string, error) { + return _L1FeeVault.Contract.Version(&_L1FeeVault.CallOpts) } // Withdraw is a paid mutator transaction binding the contract method 0x3ccfd60b. // // Solidity: function withdraw() returns() -func (_ProposerRewardVault *ProposerRewardVaultTransactor) Withdraw(opts *bind.TransactOpts) (*types.Transaction, error) { - return _ProposerRewardVault.contract.Transact(opts, "withdraw") +func (_L1FeeVault *L1FeeVaultTransactor) Withdraw(opts *bind.TransactOpts) (*types.Transaction, error) { + return _L1FeeVault.contract.Transact(opts, "withdraw") } // Withdraw is a paid mutator transaction binding the contract method 0x3ccfd60b. // // Solidity: function withdraw() returns() -func (_ProposerRewardVault *ProposerRewardVaultSession) Withdraw() (*types.Transaction, error) { - return _ProposerRewardVault.Contract.Withdraw(&_ProposerRewardVault.TransactOpts) +func (_L1FeeVault *L1FeeVaultSession) Withdraw() (*types.Transaction, error) { + return _L1FeeVault.Contract.Withdraw(&_L1FeeVault.TransactOpts) } // Withdraw is a paid mutator transaction binding the contract method 0x3ccfd60b. // // Solidity: function withdraw() returns() -func (_ProposerRewardVault *ProposerRewardVaultTransactorSession) Withdraw() (*types.Transaction, error) { - return _ProposerRewardVault.Contract.Withdraw(&_ProposerRewardVault.TransactOpts) +func (_L1FeeVault *L1FeeVaultTransactorSession) Withdraw() (*types.Transaction, error) { + return _L1FeeVault.Contract.Withdraw(&_L1FeeVault.TransactOpts) } // Receive is a paid mutator transaction binding the contract receive function. // // Solidity: receive() payable returns() -func (_ProposerRewardVault *ProposerRewardVaultTransactor) Receive(opts *bind.TransactOpts) (*types.Transaction, error) { - return _ProposerRewardVault.contract.RawTransact(opts, nil) // calldata is disallowed for receive function +func (_L1FeeVault *L1FeeVaultTransactor) Receive(opts *bind.TransactOpts) (*types.Transaction, error) { + return _L1FeeVault.contract.RawTransact(opts, nil) // calldata is disallowed for receive function } // Receive is a paid mutator transaction binding the contract receive function. // // Solidity: receive() payable returns() -func (_ProposerRewardVault *ProposerRewardVaultSession) Receive() (*types.Transaction, error) { - return _ProposerRewardVault.Contract.Receive(&_ProposerRewardVault.TransactOpts) +func (_L1FeeVault *L1FeeVaultSession) Receive() (*types.Transaction, error) { + return _L1FeeVault.Contract.Receive(&_L1FeeVault.TransactOpts) } // Receive is a paid mutator transaction binding the contract receive function. // // Solidity: receive() payable returns() -func (_ProposerRewardVault *ProposerRewardVaultTransactorSession) Receive() (*types.Transaction, error) { - return _ProposerRewardVault.Contract.Receive(&_ProposerRewardVault.TransactOpts) +func (_L1FeeVault *L1FeeVaultTransactorSession) Receive() (*types.Transaction, error) { + return _L1FeeVault.Contract.Receive(&_L1FeeVault.TransactOpts) } -// ProposerRewardVaultWithdrawalIterator is returned from FilterWithdrawal and is used to iterate over the raw logs and unpacked data for Withdrawal events raised by the ProposerRewardVault contract. -type ProposerRewardVaultWithdrawalIterator struct { - Event *ProposerRewardVaultWithdrawal // Event containing the contract specifics and raw log +// L1FeeVaultWithdrawalIterator is returned from FilterWithdrawal and is used to iterate over the raw logs and unpacked data for Withdrawal events raised by the L1FeeVault contract. +type L1FeeVaultWithdrawalIterator struct { + Event *L1FeeVaultWithdrawal // Event containing the contract specifics and raw log contract *bind.BoundContract // Generic contract to use for unpacking event data event string // Event name to use for unpacking event data @@ -384,7 +384,7 @@ type ProposerRewardVaultWithdrawalIterator struct { // Next advances the iterator to the subsequent event, returning whether there // are any more events found. In case of a retrieval or parsing error, false is // returned and Error() can be queried for the exact failure. -func (it *ProposerRewardVaultWithdrawalIterator) Next() bool { +func (it *L1FeeVaultWithdrawalIterator) Next() bool { // If the iterator failed, stop iterating if it.fail != nil { return false @@ -393,7 +393,7 @@ func (it *ProposerRewardVaultWithdrawalIterator) Next() bool { if it.done { select { case log := <-it.logs: - it.Event = new(ProposerRewardVaultWithdrawal) + it.Event = new(L1FeeVaultWithdrawal) if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { it.fail = err return false @@ -408,7 +408,7 @@ func (it *ProposerRewardVaultWithdrawalIterator) Next() bool { // Iterator still in progress, wait for either a data or an error event select { case log := <-it.logs: - it.Event = new(ProposerRewardVaultWithdrawal) + it.Event = new(L1FeeVaultWithdrawal) if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { it.fail = err return false @@ -424,19 +424,19 @@ func (it *ProposerRewardVaultWithdrawalIterator) Next() bool { } // Error returns any retrieval or parsing error occurred during filtering. -func (it *ProposerRewardVaultWithdrawalIterator) Error() error { +func (it *L1FeeVaultWithdrawalIterator) Error() error { return it.fail } // Close terminates the iteration process, releasing any pending underlying // resources. -func (it *ProposerRewardVaultWithdrawalIterator) Close() error { +func (it *L1FeeVaultWithdrawalIterator) Close() error { it.sub.Unsubscribe() return nil } -// ProposerRewardVaultWithdrawal represents a Withdrawal event raised by the ProposerRewardVault contract. -type ProposerRewardVaultWithdrawal struct { +// L1FeeVaultWithdrawal represents a Withdrawal event raised by the L1FeeVault contract. +type L1FeeVaultWithdrawal struct { Value *big.Int To common.Address From common.Address @@ -446,21 +446,21 @@ type ProposerRewardVaultWithdrawal struct { // FilterWithdrawal is a free log retrieval operation binding the contract event 0xc8a211cc64b6ed1b50595a9fcb1932b6d1e5a6e8ef15b60e5b1f988ea9086bba. // // Solidity: event Withdrawal(uint256 value, address to, address from) -func (_ProposerRewardVault *ProposerRewardVaultFilterer) FilterWithdrawal(opts *bind.FilterOpts) (*ProposerRewardVaultWithdrawalIterator, error) { +func (_L1FeeVault *L1FeeVaultFilterer) FilterWithdrawal(opts *bind.FilterOpts) (*L1FeeVaultWithdrawalIterator, error) { - logs, sub, err := _ProposerRewardVault.contract.FilterLogs(opts, "Withdrawal") + logs, sub, err := _L1FeeVault.contract.FilterLogs(opts, "Withdrawal") if err != nil { return nil, err } - return &ProposerRewardVaultWithdrawalIterator{contract: _ProposerRewardVault.contract, event: "Withdrawal", logs: logs, sub: sub}, nil + return &L1FeeVaultWithdrawalIterator{contract: _L1FeeVault.contract, event: "Withdrawal", logs: logs, sub: sub}, nil } // WatchWithdrawal is a free log subscription operation binding the contract event 0xc8a211cc64b6ed1b50595a9fcb1932b6d1e5a6e8ef15b60e5b1f988ea9086bba. // // Solidity: event Withdrawal(uint256 value, address to, address from) -func (_ProposerRewardVault *ProposerRewardVaultFilterer) WatchWithdrawal(opts *bind.WatchOpts, sink chan<- *ProposerRewardVaultWithdrawal) (event.Subscription, error) { +func (_L1FeeVault *L1FeeVaultFilterer) WatchWithdrawal(opts *bind.WatchOpts, sink chan<- *L1FeeVaultWithdrawal) (event.Subscription, error) { - logs, sub, err := _ProposerRewardVault.contract.WatchLogs(opts, "Withdrawal") + logs, sub, err := _L1FeeVault.contract.WatchLogs(opts, "Withdrawal") if err != nil { return nil, err } @@ -470,8 +470,8 @@ func (_ProposerRewardVault *ProposerRewardVaultFilterer) WatchWithdrawal(opts *b select { case log := <-logs: // New log arrived, parse the event and forward to the user - event := new(ProposerRewardVaultWithdrawal) - if err := _ProposerRewardVault.contract.UnpackLog(event, "Withdrawal", log); err != nil { + event := new(L1FeeVaultWithdrawal) + if err := _L1FeeVault.contract.UnpackLog(event, "Withdrawal", log); err != nil { return err } event.Raw = log @@ -495,9 +495,9 @@ func (_ProposerRewardVault *ProposerRewardVaultFilterer) WatchWithdrawal(opts *b // ParseWithdrawal is a log parse operation binding the contract event 0xc8a211cc64b6ed1b50595a9fcb1932b6d1e5a6e8ef15b60e5b1f988ea9086bba. // // Solidity: event Withdrawal(uint256 value, address to, address from) -func (_ProposerRewardVault *ProposerRewardVaultFilterer) ParseWithdrawal(log types.Log) (*ProposerRewardVaultWithdrawal, error) { - event := new(ProposerRewardVaultWithdrawal) - if err := _ProposerRewardVault.contract.UnpackLog(event, "Withdrawal", log); err != nil { +func (_L1FeeVault *L1FeeVaultFilterer) ParseWithdrawal(log types.Log) (*L1FeeVaultWithdrawal, error) { + event := new(L1FeeVaultWithdrawal) + if err := _L1FeeVault.contract.UnpackLog(event, "Withdrawal", log); err != nil { return nil, err } event.Raw = log diff --git a/bindings/predeploys/addresses.go b/bindings/predeploys/addresses.go index 02fb3c621..76958567d 100644 --- a/bindings/predeploys/addresses.go +++ b/bindings/predeploys/addresses.go @@ -10,7 +10,7 @@ const ( L2CrossDomainMessenger = "0x4200000000000000000000000000000000000004" GasPriceOracle = "0x4200000000000000000000000000000000000005" ProtocolVault = "0x4200000000000000000000000000000000000006" - ProposerRewardVault = "0x4200000000000000000000000000000000000007" + L1FeeVault = "0x4200000000000000000000000000000000000007" ValidatorRewardVault = "0x4200000000000000000000000000000000000008" L2StandardBridge = "0x4200000000000000000000000000000000000009" L2ERC721Bridge = "0x420000000000000000000000000000000000000A" @@ -26,7 +26,7 @@ var ( L2CrossDomainMessengerAddr = common.HexToAddress(L2CrossDomainMessenger) GasPriceOracleAddr = common.HexToAddress(GasPriceOracle) ProtocolVaultAddr = common.HexToAddress(ProtocolVault) - ProposerRewardVaultAddr = common.HexToAddress(ProposerRewardVault) + L1FeeVaultAddr = common.HexToAddress(L1FeeVault) ValidatorRewardVaultAddr = common.HexToAddress(ValidatorRewardVault) L2StandardBridgeAddr = common.HexToAddress(L2StandardBridge) L2ERC721BridgeAddr = common.HexToAddress(L2ERC721Bridge) @@ -44,7 +44,7 @@ func init() { Predeploys["L2CrossDomainMessenger"] = &L2CrossDomainMessengerAddr Predeploys["GasPriceOracle"] = &GasPriceOracleAddr Predeploys["ProtocolVault"] = &ProtocolVaultAddr - Predeploys["ProposerRewardVault"] = &ProposerRewardVaultAddr + Predeploys["L1FeeVault"] = &L1FeeVaultAddr Predeploys["ValidatorRewardVault"] = &ValidatorRewardVaultAddr Predeploys["L2StandardBridge"] = &L2StandardBridgeAddr Predeploys["L2ERC721Bridge"] = &L2ERC721BridgeAddr diff --git a/components/batcher/batch_submitter.go b/components/batcher/batch_submitter.go index b17249ef2..a9d68c973 100644 --- a/components/batcher/batch_submitter.go +++ b/components/batcher/batch_submitter.go @@ -38,7 +38,7 @@ func NewBatchSubmitter(cfg Config, l log.Logger, m metrics.Metricer) (*BatchSubm // loadBlocksIntoState loads all blocks since the previous stored block // It does the following: -// 1. Fetch the sync status of the proposer +// 1. Fetch the sync status of the sequencer // 2. Check if the sync status is valid or if we are all the way up to date // 3. Check if it needs to initialize state OR it is lagging (todo: lagging just means race condition?) // 4. Load all new blocks into the local state. diff --git a/components/batcher/channel_builder.go b/components/batcher/channel_builder.go index 505f3441e..65567b20e 100644 --- a/components/batcher/channel_builder.go +++ b/components/batcher/channel_builder.go @@ -18,7 +18,7 @@ var ( ErrMaxFrameIndex = errors.New("max frame index reached (uint16)") ErrMaxDurationReached = errors.New("max channel duration reached") ErrChannelTimeoutClose = errors.New("close to channel timeout") - ErrProposerWindowClose = errors.New("close to proposer window timeout") + ErrSeqWindowClose = errors.New("close to sequencer window timeout") ErrTerminated = errors.New("channel terminated") ) @@ -35,9 +35,9 @@ func (e *ChannelFullError) Unwrap() error { } type ChannelConfig struct { - // Number of epochs (L1 blocks) per proposing window, including the epoch + // Number of epochs (L1 blocks) per sequencing window, including the epoch // L1 origin block itself - ProposerWindowSize uint64 + SeqWindowSize uint64 // The maximum number of L1 blocks that the inclusion transactions of a // channel's frames can span. ChannelTimeout uint64 @@ -51,7 +51,7 @@ type ChannelConfig struct { // If 0, duration checks are disabled. MaxChannelDuration uint64 // The batcher tx submission safety margin (in #L1-blocks) to subtract from - // a channel's timeout and proposing window, to guarantee safe inclusion of + // a channel's timeout and sequencing window, to guarantee safe inclusion of // a channel on L1. SubSafetyMargin uint64 // The maximum byte-size a frame can have. @@ -121,7 +121,7 @@ type channelBuilder struct { // L1 block number timeout of combined // - channel duration timeout, // - consensus channel timeout, - // - proposing window timeout. + // - sequencing window timeout. // 0 if no block number timeout set yet. timeout uint64 // reason for currently set timeout @@ -217,7 +217,7 @@ func (c *channelBuilder) AddBlock(block *types.Block) (derive.L1BlockInfo, error return l1info, fmt.Errorf("adding block to channel out: %w", err) } c.blocks = append(c.blocks, block) - c.updatePwTimeout(batch) + c.updateSwTimeout(batch) if c.inputTargetReached() { c.setFullErr(ErrInputTargetReached) @@ -232,7 +232,7 @@ func (c *channelBuilder) AddBlock(block *types.Block) (derive.L1BlockInfo, error // RegisterL1Block should be called whenever a new L1-block is seen. // // It ensures proper tracking of all possible timeouts (max channel duration, -// close to consensus channel timeout, close to end of proposing window). +// close to consensus channel timeout, close to end of sequencing window). func (c *channelBuilder) RegisterL1Block(l1BlockNum uint64) { c.updateDurationTimeout(l1BlockNum) c.checkTimeout(l1BlockNum) @@ -259,13 +259,13 @@ func (c *channelBuilder) updateDurationTimeout(l1BlockNum uint64) { c.updateTimeout(timeout, ErrMaxDurationReached) } -// updatePwTimeout updates the block timeout with the proposer window timeout +// updateSwTimeout updates the block timeout with the sequencer window timeout // derived from the batch's origin L1 block. The timeout is only moved forward -// if the derived proposer window timeout is earlier than the currently set +// if the derived sequencer window timeout is earlier than the currently set // timeout. -func (c *channelBuilder) updatePwTimeout(batch *derive.BatchData) { - timeout := uint64(batch.EpochNum) + c.cfg.ProposerWindowSize - c.cfg.SubSafetyMargin - c.updateTimeout(timeout, ErrProposerWindowClose) +func (c *channelBuilder) updateSwTimeout(batch *derive.BatchData) { + timeout := uint64(batch.EpochNum) + c.cfg.SeqWindowSize - c.cfg.SubSafetyMargin + c.updateTimeout(timeout, ErrSeqWindowClose) } // updateTimeout updates the timeout block to the given block number if it is @@ -318,7 +318,7 @@ func (c *channelBuilder) IsFull() bool { // (uint16), // - ErrMaxDurationReached if the max channel duration got reached, // - ErrChannelTimeoutClose if the consensus channel timeout got too close, -// - ErrProposerWindowClose if the end of the proposer window got too close, +// - ErrSeqWindowClose if the end of the sequencer window got too close, // - ErrTerminated if the channel was explicitly terminated. func (c *channelBuilder) FullErr() error { return c.fullErr diff --git a/components/batcher/channel_builder_test.go b/components/batcher/channel_builder_test.go index 9d48cd398..0b4992e2e 100644 --- a/components/batcher/channel_builder_test.go +++ b/components/batcher/channel_builder_test.go @@ -22,7 +22,7 @@ import ( ) var defaultTestChannelConfig = ChannelConfig{ - ProposerWindowSize: 15, + SeqWindowSize: 15, ChannelTimeout: 40, MaxChannelDuration: 1, SubSafetyMargin: 4, @@ -302,65 +302,65 @@ func FuzzChannelZeroCloseTimeout(f *testing.F) { }) } -// FuzzProposerWindowClose ensures that the channel builder has a [ErrProposerWindowClose] +// FuzzSeqWindowClose ensures that the channel builder has a [ErrSeqWindowClose] // as long as the timeout constraint is met and the builder's timeout is greater than // the calculated timeout. -func FuzzProposerWindowClose(f *testing.F) { +func FuzzSeqWindowClose(f *testing.F) { // Set multiple seeds in case fuzzing isn't explicitly used for i := range [10]int{} { f.Add(uint64(i), uint64(i), uint64(i), uint64(i*5)) } - f.Fuzz(func(t *testing.T, epochNum uint64, proposerWindowSize uint64, subSafetyMargin uint64, timeout uint64) { + f.Fuzz(func(t *testing.T, epochNum uint64, seqWindowSize uint64, subSafetyMargin uint64, timeout uint64) { // Create the channel builder channelConfig := defaultTestChannelConfig - channelConfig.ProposerWindowSize = proposerWindowSize + channelConfig.SeqWindowSize = seqWindowSize channelConfig.SubSafetyMargin = subSafetyMargin cb, err := newChannelBuilder(channelConfig) require.NoError(t, err) // Check the timeout cb.timeout = timeout - cb.updatePwTimeout(&derive.BatchData{ + cb.updateSwTimeout(&derive.BatchData{ BatchV1: derive.BatchV1{ EpochNum: rollup.Epoch(epochNum), }, }) - calculatedTimeout := epochNum + proposerWindowSize - subSafetyMargin + calculatedTimeout := epochNum + seqWindowSize - subSafetyMargin if timeout > calculatedTimeout && calculatedTimeout != 0 { cb.checkTimeout(calculatedTimeout) - require.ErrorIs(t, cb.FullErr(), ErrProposerWindowClose) + require.ErrorIs(t, cb.FullErr(), ErrSeqWindowClose) } else { require.NoError(t, cb.FullErr()) } }) } -// FuzzProposerWindowZeroTimeoutClose ensures that the channel builder has a [ErrProposerWindowClose] +// FuzzSeqWindowZeroTimeoutClose ensures that the channel builder has a [ErrSeqWindowClose] // as long as the timeout constraint is met and the builder's timeout is set to zero. -func FuzzProposerWindowZeroTimeoutClose(f *testing.F) { +func FuzzSeqWindowZeroTimeoutClose(f *testing.F) { // Set multiple seeds in case fuzzing isn't explicitly used for i := range [10]int{} { f.Add(uint64(i), uint64(i), uint64(i)) } - f.Fuzz(func(t *testing.T, epochNum uint64, proposerWindowSize uint64, subSafetyMargin uint64) { + f.Fuzz(func(t *testing.T, epochNum uint64, seqWindowSize uint64, subSafetyMargin uint64) { // Create the channel builder channelConfig := defaultTestChannelConfig - channelConfig.ProposerWindowSize = proposerWindowSize + channelConfig.SeqWindowSize = seqWindowSize channelConfig.SubSafetyMargin = subSafetyMargin cb, err := newChannelBuilder(channelConfig) require.NoError(t, err) // Check the timeout cb.timeout = 0 - cb.updatePwTimeout(&derive.BatchData{ + cb.updateSwTimeout(&derive.BatchData{ BatchV1: derive.BatchV1{ EpochNum: rollup.Epoch(epochNum), }, }) - calculatedTimeout := epochNum + proposerWindowSize - subSafetyMargin + calculatedTimeout := epochNum + seqWindowSize - subSafetyMargin cb.checkTimeout(calculatedTimeout) if cb.timeout != 0 { - require.ErrorIs(t, cb.FullErr(), ErrProposerWindowClose, "Proposer window close should be reached") + require.ErrorIs(t, cb.FullErr(), ErrSeqWindowClose, "Sequence window close should be reached") } }) } @@ -583,8 +583,8 @@ func TestChannelBuilder_Reset(t *testing.T) { // Check the fields reset in the Reset function require.Equal(t, 1, len(cb.blocks)) require.Equal(t, 0, len(cb.frames)) - // Timeout should be updated in the AddBlock internal call to `updatePwTimeout` - timeout := uint64(100) + cb.cfg.ProposerWindowSize - cb.cfg.SubSafetyMargin + // Timeout should be updated in the AddBlock internal call to `updateSwTimeout` + timeout := uint64(100) + cb.cfg.SeqWindowSize - cb.cfg.SubSafetyMargin require.Equal(t, timeout, cb.timeout) require.NoError(t, cb.fullErr) diff --git a/components/batcher/config.go b/components/batcher/config.go index 41321e197..a449e320e 100644 --- a/components/batcher/config.go +++ b/components/batcher/config.go @@ -71,7 +71,7 @@ type CLIConfig struct { MaxChannelDuration uint64 // The batcher tx submission safety margin (in #L1-blocks) to subtract from - // a channel's timeout and proposing window, to guarantee safe inclusion of + // a channel's timeout and sequencing window, to guarantee safe inclusion of // a channel on L1. SubSafetyMargin uint64 @@ -183,7 +183,7 @@ func NewBatcherConfig(cfg CLIConfig, l log.Logger, m metrics.Metricer) (*Config, TxManager: txManager, Rollup: rcfg, Channel: ChannelConfig{ - ProposerWindowSize: rcfg.ProposerWindowSize, + SeqWindowSize: rcfg.SeqWindowSize, ChannelTimeout: rcfg.ChannelTimeout, MaxChannelDuration: cfg.MaxChannelDuration, SubSafetyMargin: cfg.SubSafetyMargin, diff --git a/components/batcher/flags/flags.go b/components/batcher/flags/flags.go index df26d5048..e149bdd9e 100644 --- a/components/batcher/flags/flags.go +++ b/components/batcher/flags/flags.go @@ -38,7 +38,7 @@ var ( SubSafetyMarginFlag = &cli.Uint64Flag{ Name: "sub-safety-margin", Usage: "The batcher tx submission safety margin (in #L1-blocks) to subtract " + - "from a channel's timeout and proposing window, to guarantee safe inclusion " + + "from a channel's timeout and sequencing window, to guarantee safe inclusion " + "of a channel on L1.", Required: true, EnvVars: kservice.PrefixEnvVar(envVarPrefix, "SUB_SAFETY_MARGIN"), diff --git a/components/node/chaincfg/chains.go b/components/node/chaincfg/chains.go index 87a319619..651194e9d 100644 --- a/components/node/chaincfg/chains.go +++ b/components/node/chaincfg/chains.go @@ -30,8 +30,8 @@ var Mainnet = rollup.Config{ }, }, BlockTime: 2, - MaxProposerDrift: 600, - ProposerWindowSize: 3600, + MaxSequencerDrift: 600, + SeqWindowSize: 3600, ChannelTimeout: 300, L1ChainID: big.NewInt(1), L2ChainID: big.NewInt(255), @@ -60,8 +60,8 @@ var Sepolia = rollup.Config{ }, }, BlockTime: 2, - MaxProposerDrift: 1200, - ProposerWindowSize: 3600, + MaxSequencerDrift: 1200, + SeqWindowSize: 3600, ChannelTimeout: 120, L1ChainID: big.NewInt(11155111), L2ChainID: big.NewInt(2358), diff --git a/components/node/eth/sync_status.go b/components/node/eth/sync_status.go index d50e8aacd..66b6162df 100644 --- a/components/node/eth/sync_status.go +++ b/components/node/eth/sync_status.go @@ -23,7 +23,7 @@ type SyncStatus struct { FinalizedL1 L1BlockRef `json:"finalized_l1"` // UnsafeL2 is the absolute tip of the L2 chain, // pointing to block data that has not been submitted to L1 yet. - // The proposer is building this, and verifiers may also be ahead of the + // The sequencer is building this, and verifiers may also be ahead of the // SafeL2 block if they sync blocks via p2p or other offchain sources. UnsafeL2 L2BlockRef `json:"unsafe_l2"` // SafeL2 points to the L2 block that was derived from the L1 chain. diff --git a/components/node/flags/flags.go b/components/node/flags/flags.go index eab8db587..7709c050f 100644 --- a/components/node/flags/flags.go +++ b/components/node/flags/flags.go @@ -110,27 +110,27 @@ var ( Required: false, Value: 0, } - ProposerEnabledFlag = &cli.BoolFlag{ - Name: "proposer.enabled", - Usage: "Enable proposing of new L2 blocks. A separate batch submitter has to be deployed to publish the data for syncers.", - EnvVars: prefixEnvVar("PROPOSER_ENABLED"), - } - ProposerStoppedFlag = &cli.BoolFlag{ - Name: "proposer.stopped", - Usage: "Initialize the proposer in a stopped state. The proposer can be started using the admin_startProposer RPC", - EnvVars: prefixEnvVar("PROPOSER_STOPPED"), - } - ProposerMaxSafeLagFlag = &cli.Uint64Flag{ - Name: "proposer.max-safe-lag", + SequencerEnabledFlag = &cli.BoolFlag{ + Name: "sequencer.enabled", + Usage: "Enable sequencing of new L2 blocks. A separate batch submitter has to be deployed to publish the data for syncers.", + EnvVars: prefixEnvVar("SEQUENCER_ENABLED"), + } + SequencerStoppedFlag = &cli.BoolFlag{ + Name: "sequencer.stopped", + Usage: "Initialize the sequencer in a stopped state. The sequencer can be started using the admin_startSequencer RPC", + EnvVars: prefixEnvVar("SEQUENCER_STOPPED"), + } + SequencerMaxSafeLagFlag = &cli.Uint64Flag{ + Name: "sequencer.max-safe-lag", Usage: "Maximum number of L2 blocks for restricting the distance between L2 safe and unsafe. Disabled if 0.", - EnvVars: prefixEnvVar("PROPOSER_MAX_SAFE_LAG"), + EnvVars: prefixEnvVar("SEQUENCER_MAX_SAFE_LAG"), Required: false, Value: 0, } - ProposerL1Confs = &cli.Uint64Flag{ - Name: "proposer.l1-confs", - Usage: "Number of L1 blocks to keep distance from the L1 head as a proposer for picking an L1 origin.", - EnvVars: prefixEnvVar("PROPOSER_L1_CONFS"), + SequencerL1Confs = &cli.Uint64Flag{ + Name: "sequencer.l1-confs", + Usage: "Number of L1 blocks to keep distance from the L1 head as a sequencer for picking an L1 origin.", + EnvVars: prefixEnvVar("SEQUENCER_L1_CONFS"), Required: false, Value: 4, } @@ -228,10 +228,10 @@ var optionalFlags = []cli.Flag{ L1HTTPPollInterval, L2EngineJWTSecret, SyncerL1Confs, - ProposerEnabledFlag, - ProposerStoppedFlag, - ProposerMaxSafeLagFlag, - ProposerL1Confs, + SequencerEnabledFlag, + SequencerStoppedFlag, + SequencerMaxSafeLagFlag, + SequencerL1Confs, L1EpochPollIntervalFlag, RPCEnableAdmin, MetricsEnabledFlag, diff --git a/components/node/flags/p2p_flags.go b/components/node/flags/p2p_flags.go index 2beac0531..60af7359b 100644 --- a/components/node/flags/p2p_flags.go +++ b/components/node/flags/p2p_flags.go @@ -244,12 +244,12 @@ var ( Value: "kroma_node_discovery_db", EnvVars: p2pEnv("DISCOVERY_PATH"), } - ProposerP2PKeyFlag = &cli.StringFlag{ - Name: "p2p.proposer.key", - Usage: "Hex-encoded private key for signing off on p2p application messages as a proposer.", + SequencerP2PKeyFlag = &cli.StringFlag{ + Name: "p2p.sequencer.key", + Usage: "Hex-encoded private key for signing off on p2p application messages as sequencer.", Required: false, Value: "", - EnvVars: p2pEnv("PROPOSER_KEY"), + EnvVars: p2pEnv("SEQUENCER_KEY"), } GossipMeshDFlag = &cli.UintFlag{ Name: "p2p.gossip.mesh.d", @@ -333,7 +333,7 @@ var p2pFlags = []cli.Flag{ TimeoutDial, PeerstorePath, DiscoveryPath, - ProposerP2PKeyFlag, + SequencerP2PKeyFlag, GossipMeshDFlag, GossipMeshDloFlag, GossipMeshDhiFlag, diff --git a/components/node/metrics/metrics.go b/components/node/metrics/metrics.go index b90f8260b..76ef5b634 100644 --- a/components/node/metrics/metrics.go +++ b/components/node/metrics/metrics.go @@ -52,16 +52,16 @@ type Metricer interface { RecordUnsafePayloadsBuffer(length uint64, memSize uint64, next eth.BlockID) CountSequencedTxs(count int) RecordL1ReorgDepth(d uint64) - RecordProposerInconsistentL1Origin(from eth.BlockID, to eth.BlockID) - RecordProposerReset() + RecordSequencerInconsistentL1Origin(from eth.BlockID, to eth.BlockID) + RecordSequencerReset() RecordGossipEvent(evType int32) IncPeerCount() DecPeerCount() IncStreamCount() DecStreamCount() RecordBandwidth(ctx context.Context, bwc *libp2pmetrics.BandwidthCounter) - RecordProposerBuildingDiffTime(duration time.Duration) - RecordProposerSealingTime(duration time.Duration) + RecordSequencerBuildingDiffTime(duration time.Duration) + RecordSequencerSealingTime(duration time.Duration) Document() []metrics.DocumentedMetric RecordChannelInputBytes(num int) // P2P Metrics @@ -103,16 +103,16 @@ type Metrics struct { PayloadsQuarantineTotal prometheus.Gauge - ProposerInconsistentL1Origin *EventMetrics - ProposerResets *EventMetrics + SequencerInconsistentL1Origin *EventMetrics + SequencerResets *EventMetrics L1RequestDurationSeconds *prometheus.HistogramVec - ProposerBuildingDiffDurationSeconds prometheus.Histogram - ProposerBuildingDiffTotal prometheus.Counter + SequencerBuildingDiffDurationSeconds prometheus.Histogram + SequencerBuildingDiffTotal prometheus.Counter - ProposerSealingDurationSeconds prometheus.Histogram - ProposerSealingTotal prometheus.Counter + SequencerSealingDurationSeconds prometheus.Histogram + SequencerSealingTotal prometheus.Counter UnsafePayloadsBufferLen prometheus.Gauge UnsafePayloadsBufferMemSize prometheus.Gauge @@ -233,8 +233,8 @@ func NewMetrics(procName string) *Metrics { SequencingErrors: NewEventMetrics(factory, ns, "sequencing_errors", "sequencing errors"), PublishingErrors: NewEventMetrics(factory, ns, "publishing_errors", "p2p publishing errors"), - ProposerInconsistentL1Origin: NewEventMetrics(factory, ns, "proposer_inconsistent_l1_origin", "events when the proposer selects an inconsistent L1 origin"), - ProposerResets: NewEventMetrics(factory, ns, "proposer_resets", "proposer resets"), + SequencerInconsistentL1Origin: NewEventMetrics(factory, ns, "sequencer_inconsistent_l1_origin", "events when the sequencer selects an inconsistent L1 origin"), + SequencerResets: NewEventMetrics(factory, ns, "sequencer_resets", "sequencer resets"), UnsafePayloadsBufferLen: factory.NewGauge(prometheus.GaugeOpts{ Namespace: ns, @@ -413,30 +413,30 @@ func NewMetrics(procName string) *Metrics { Help: "Histogram of L1 request time", }, []string{"request"}), - ProposerBuildingDiffDurationSeconds: factory.NewHistogram(prometheus.HistogramOpts{ + SequencerBuildingDiffDurationSeconds: factory.NewHistogram(prometheus.HistogramOpts{ Namespace: ns, - Name: "proposer_building_diff_seconds", + Name: "sequencer_building_diff_seconds", Buckets: []float64{ -10, -5, -2.5, -1, -.5, -.25, -.1, -0.05, -0.025, -0.01, -0.005, .005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10, }, - Help: "Histogram of Proposer building time, minus block time", + Help: "Histogram of Sequencer building time, minus block time", }), - ProposerBuildingDiffTotal: factory.NewCounter(prometheus.CounterOpts{ + SequencerBuildingDiffTotal: factory.NewCounter(prometheus.CounterOpts{ Namespace: ns, - Name: "proposer_building_diff_total", - Help: "Number of proposer block building jobs", + Name: "sequencer_building_diff_total", + Help: "Number of sequencer block building jobs", }), - ProposerSealingDurationSeconds: factory.NewHistogram(prometheus.HistogramOpts{ + SequencerSealingDurationSeconds: factory.NewHistogram(prometheus.HistogramOpts{ Namespace: ns, - Name: "proposer_sealing_seconds", + Name: "sequencer_sealing_seconds", Buckets: []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}, - Help: "Histogram of proposer block sealing time", + Help: "Histogram of Sequencer block sealing time", }), - ProposerSealingTotal: factory.NewCounter(prometheus.CounterOpts{ + SequencerSealingTotal: factory.NewCounter(prometheus.CounterOpts{ Namespace: ns, - Name: "proposer_sealing_total", - Help: "Number of proposer block sealing jobs", + Name: "sequencer_sealing_total", + Help: "Number of sequencer block sealing jobs", }), registry: registry, @@ -589,14 +589,14 @@ func (m *Metrics) RecordL1ReorgDepth(d uint64) { m.L1ReorgDepth.Observe(float64(d)) } -func (m *Metrics) RecordProposerInconsistentL1Origin(from eth.BlockID, to eth.BlockID) { - m.ProposerInconsistentL1Origin.RecordEvent() +func (m *Metrics) RecordSequencerInconsistentL1Origin(from eth.BlockID, to eth.BlockID) { + m.SequencerInconsistentL1Origin.RecordEvent() m.recordRef("l1_origin", "inconsistent_from", from.Number, 0, from.Hash) m.recordRef("l1_origin", "inconsistent_to", to.Number, 0, to.Hash) } -func (m *Metrics) RecordProposerReset() { - m.ProposerResets.RecordEvent() +func (m *Metrics) RecordSequencerReset() { + m.SequencerResets.RecordEvent() } func (m *Metrics) RecordGossipEvent(evType int32) { @@ -640,19 +640,19 @@ func (m *Metrics) RecordL1RequestTime(method string, duration time.Duration) { m.L1RequestDurationSeconds.WithLabelValues(method).Observe(float64(duration) / float64(time.Second)) } -// RecordProposerBuildingDiffTime tracks the amount of time the proposer was allowed between +// RecordSequencerBuildingDiffTime tracks the amount of time the sequencer was allowed between // start to finish, incl. sealing, minus the block time. -// Ideally this is 0, realistically the proposer scheduler may be busy with other jobs like syncing sometimes. -func (m *Metrics) RecordProposerBuildingDiffTime(duration time.Duration) { - m.ProposerBuildingDiffTotal.Inc() - m.ProposerBuildingDiffDurationSeconds.Observe(float64(duration) / float64(time.Second)) +// Ideally this is 0, realistically the sequencer scheduler may be busy with other jobs like syncing sometimes. +func (m *Metrics) RecordSequencerBuildingDiffTime(duration time.Duration) { + m.SequencerBuildingDiffTotal.Inc() + m.SequencerBuildingDiffDurationSeconds.Observe(float64(duration) / float64(time.Second)) } -// RecordProposerSealingTime tracks the amount of time the proposer took to finish sealing the block. +// RecordSequencerSealingTime tracks the amount of time the sequencer took to finish sealing the block. // Ideally this is 0, realistically it may take some time. -func (m *Metrics) RecordProposerSealingTime(duration time.Duration) { - m.ProposerSealingTotal.Inc() - m.ProposerSealingDurationSeconds.Observe(float64(duration) / float64(time.Second)) +func (m *Metrics) RecordSequencerSealingTime(duration time.Duration) { + m.SequencerSealingTotal.Inc() + m.SequencerSealingDurationSeconds.Observe(float64(duration) / float64(time.Second)) } // Serve starts the metrics server on the given hostname and port. @@ -780,10 +780,10 @@ func (n *noopMetricer) CountSequencedTxs(count int) { func (n *noopMetricer) RecordL1ReorgDepth(d uint64) { } -func (n *noopMetricer) RecordProposerInconsistentL1Origin(from eth.BlockID, to eth.BlockID) { +func (n *noopMetricer) RecordSequencerInconsistentL1Origin(from eth.BlockID, to eth.BlockID) { } -func (n *noopMetricer) RecordProposerReset() { +func (n *noopMetricer) RecordSequencerReset() { } func (n *noopMetricer) RecordGossipEvent(evType int32) { @@ -807,10 +807,10 @@ func (n *noopMetricer) DecStreamCount() { func (n *noopMetricer) RecordBandwidth(ctx context.Context, bwc *libp2pmetrics.BandwidthCounter) { } -func (n *noopMetricer) RecordProposerBuildingDiffTime(duration time.Duration) { +func (n *noopMetricer) RecordSequencerBuildingDiffTime(duration time.Duration) { } -func (n *noopMetricer) RecordProposerSealingTime(duration time.Duration) { +func (n *noopMetricer) RecordSequencerSealingTime(duration time.Duration) { } func (n *noopMetricer) Document() []metrics.DocumentedMetric { diff --git a/components/node/node/api.go b/components/node/node/api.go index 0dc5268db..096c2a08d 100644 --- a/components/node/node/api.go +++ b/components/node/node/api.go @@ -29,8 +29,8 @@ type driverClient interface { SyncStatus(ctx context.Context) (*eth.SyncStatus, error) BlockRefsWithStatus(ctx context.Context, num uint64) (eth.L2BlockRef, eth.L2BlockRef, *eth.SyncStatus, error) ResetDerivationPipeline(context.Context) error - StartProposer(ctx context.Context, blockHash common.Hash) error - StopProposer(context.Context) (common.Hash, error) + StartSequencer(ctx context.Context, blockHash common.Hash) error + StopSequencer(context.Context) (common.Hash, error) } type rpcMetrics interface { @@ -56,16 +56,16 @@ func (n *adminAPI) ResetDerivationPipeline(ctx context.Context) error { return n.dr.ResetDerivationPipeline(ctx) } -func (n *adminAPI) StartProposer(ctx context.Context, blockHash common.Hash) error { - recordDur := n.m.RecordRPCServerRequest("admin_startProposer") +func (n *adminAPI) StartSequencer(ctx context.Context, blockHash common.Hash) error { + recordDur := n.m.RecordRPCServerRequest("admin_startSequencer") defer recordDur() - return n.dr.StartProposer(ctx, blockHash) + return n.dr.StartSequencer(ctx, blockHash) } -func (n *adminAPI) StopProposer(ctx context.Context) (common.Hash, error) { - recordDur := n.m.RecordRPCServerRequest("admin_stopProposer") +func (n *adminAPI) StopSequencer(ctx context.Context) (common.Hash, error) { + recordDur := n.m.RecordRPCServerRequest("admin_stopSequencer") defer recordDur() - return n.dr.StopProposer(ctx) + return n.dr.StopSequencer(ctx) } type nodeAPI struct { diff --git a/components/node/node/config.go b/components/node/node/config.go index 087d76c4a..c530fd67e 100644 --- a/components/node/node/config.go +++ b/components/node/node/config.go @@ -22,7 +22,7 @@ type Config struct { Rollup rollup.Config // P2PSigner will be used for signing off on published content - // if the node is proposing and if the p2p stack is enabled + // if the node is sequencing and if the p2p stack is enabled P2PSigner p2p.SignerSetup RPC RPCConfig diff --git a/components/node/node/runtime_config.go b/components/node/node/runtime_config.go index 37ddf32ec..a34c5d7eb 100644 --- a/components/node/node/runtime_config.go +++ b/components/node/node/runtime_config.go @@ -13,11 +13,9 @@ import ( "github.com/kroma-network/kroma/components/node/rollup" ) -var ( - // UnsafeBlockSignerAddressSystemConfigStorageSlot is the storage slot identifier of the unsafeBlockSigner - // `address` storage value in the SystemConfig L1 contract. Computed as `keccak256("systemconfig.unsafeblocksigner")` - UnsafeBlockSignerAddressSystemConfigStorageSlot = common.HexToHash("0x65a7ed542fb37fe237fdfbdd70b31598523fe5b32879e307bae27a0bd9581c08") -) +// UnsafeBlockSignerAddressSystemConfigStorageSlot is the storage slot identifier of the unsafeBlockSigner +// `address` storage value in the SystemConfig L1 contract. Computed as `keccak256("systemconfig.unsafeblocksigner")` +var UnsafeBlockSignerAddressSystemConfigStorageSlot = common.HexToHash("0x65a7ed542fb37fe237fdfbdd70b31598523fe5b32879e307bae27a0bd9581c08") type RuntimeCfgL1Source interface { ReadStorageAt(ctx context.Context, address common.Address, storageSlot common.Hash, blockHash common.Hash) (common.Hash, error) @@ -57,7 +55,7 @@ func NewRuntimeConfig(log log.Logger, l1Client RuntimeCfgL1Source, rollupCfg *ro } } -func (r *RuntimeConfig) P2PProposerAddress() common.Address { +func (r *RuntimeConfig) P2PSequencerAddress() common.Address { r.mu.RLock() defer r.mu.RUnlock() return r.p2pBlockSignerAddr @@ -75,6 +73,6 @@ func (r *RuntimeConfig) Load(ctx context.Context, l1Ref eth.L1BlockRef) error { defer r.mu.Unlock() r.l1Ref = l1Ref r.p2pBlockSignerAddr = common.BytesToAddress(val[:]) - r.log.Info("loaded new runtime config values!", "p2p_proposer_address", r.p2pBlockSignerAddr) + r.log.Info("loaded new runtime config values!", "p2p_sequencer_address", r.p2pBlockSignerAddr) return nil } diff --git a/components/node/node/server_test.go b/components/node/node/server_test.go index 8377b5024..f0c3d6f49 100644 --- a/components/node/node/server_test.go +++ b/components/node/node/server_test.go @@ -240,10 +240,10 @@ func (c *mockDriverClient) ResetDerivationPipeline(ctx context.Context) error { return c.Mock.MethodCalled("ResetDerivationPipeline").Get(0).(error) } -func (c *mockDriverClient) StartProposer(ctx context.Context, blockHash common.Hash) error { - return c.Mock.MethodCalled("StartProposer").Get(0).(error) +func (c *mockDriverClient) StartSequencer(ctx context.Context, blockHash common.Hash) error { + return c.Mock.MethodCalled("StartSequencer").Get(0).(error) } -func (c *mockDriverClient) StopProposer(ctx context.Context) (common.Hash, error) { - return c.Mock.MethodCalled("StopProposer").Get(0).(common.Hash), nil +func (c *mockDriverClient) StopSequencer(ctx context.Context) (common.Hash, error) { + return c.Mock.MethodCalled("StopSequencer").Get(0).(common.Hash), nil } diff --git a/components/node/p2p/cli/load_signer.go b/components/node/p2p/cli/load_signer.go index d37c6239e..19e7964fa 100644 --- a/components/node/p2p/cli/load_signer.go +++ b/components/node/p2p/cli/load_signer.go @@ -15,7 +15,7 @@ import ( // LoadSignerSetup loads a configuration for a Signer to be set up later func LoadSignerSetup(ctx *cli.Context) (p2p.SignerSetup, error) { - key := ctx.String(flags.ProposerP2PKeyFlag.Name) + key := ctx.String(flags.SequencerP2PKeyFlag.Name) if key != "" { // Mnemonics are bad because they leak *all* keys when they leak. // Unencrypted keys from file are bad because they are easy to leak (and we are not checking file permissions). diff --git a/components/node/p2p/gossip.go b/components/node/p2p/gossip.go index 293787f63..65d4e4d35 100644 --- a/components/node/p2p/gossip.go +++ b/components/node/p2p/gossip.go @@ -58,7 +58,7 @@ type GossipSetupConfigurables interface { } type GossipRuntimeConfig interface { - P2PProposerAddress() common.Address + P2PSequencerAddress() common.Address } //go:generate mockery --name GossipMetricer @@ -275,7 +275,7 @@ func BuildBlocksValidator(log log.Logger, cfg *rollup.Config, runCfg GossipRunti // message starts with compact-encoding secp256k1 encoded signature signatureBytes, payloadBytes := data[:65], data[65:] - // [REJECT] if the signature by the proposer is not valid + // [REJECT] if the signature by the sequencer is not valid result := verifyBlockSignature(log, cfg, runCfg, id, signatureBytes, payloadBytes) if result != pubsub.ValidationAccept { return result @@ -354,8 +354,8 @@ func verifyBlockSignature(log log.Logger, cfg *rollup.Config, runCfg GossipRunti // For now we only have one signer at a time and thus check the address directly. // This means we may drop old payloads upon key rotation, // but this can be recovered from like any other missed unsafe payload. - if expected := runCfg.P2PProposerAddress(); expected == (common.Address{}) { - log.Warn("no configured p2p proposer address, ignoring gossiped block", "peer", id, "addr", addr) + if expected := runCfg.P2PSequencerAddress(); expected == (common.Address{}) { + log.Warn("no configured p2p sequencer address, ignoring gossiped block", "peer", id, "addr", addr) return pubsub.ValidationIgnore } else if addr != expected { log.Warn("unexpected block author", "err", err, "peer", id, "addr", addr, "expected", expected) diff --git a/components/node/p2p/gossip_test.go b/components/node/p2p/gossip_test.go index b8aefb2cc..ac043ebdd 100644 --- a/components/node/p2p/gossip_test.go +++ b/components/node/p2p/gossip_test.go @@ -50,8 +50,8 @@ func TestVerifyBlockSignature(t *testing.T) { msg := []byte("any msg") t.Run("Valid", func(t *testing.T) { - runCfg := &testutils.MockRuntimeConfig{P2PPropAddress: crypto.PubkeyToAddress(secrets.ProposerP2P.PublicKey)} - signer := &PreparedSigner{Signer: NewLocalSigner(secrets.ProposerP2P)} + runCfg := &testutils.MockRuntimeConfig{P2PSeqAddress: crypto.PubkeyToAddress(secrets.SequencerP2P.PublicKey)} + signer := &PreparedSigner{Signer: NewLocalSigner(secrets.SequencerP2P)} sig, err := signer.Sign(context.Background(), SigningDomainBlocksV1, cfg.L2ChainID, msg) require.NoError(t, err) result := verifyBlockSignature(logger, cfg, runCfg, peerId, sig[:65], msg) @@ -59,8 +59,8 @@ func TestVerifyBlockSignature(t *testing.T) { }) t.Run("WrongSigner", func(t *testing.T) { - runCfg := &testutils.MockRuntimeConfig{P2PPropAddress: common.HexToAddress("0x1234")} - signer := &PreparedSigner{Signer: NewLocalSigner(secrets.ProposerP2P)} + runCfg := &testutils.MockRuntimeConfig{P2PSeqAddress: common.HexToAddress("0x1234")} + signer := &PreparedSigner{Signer: NewLocalSigner(secrets.SequencerP2P)} sig, err := signer.Sign(context.Background(), SigningDomainBlocksV1, cfg.L2ChainID, msg) require.NoError(t, err) result := verifyBlockSignature(logger, cfg, runCfg, peerId, sig[:65], msg) @@ -68,15 +68,15 @@ func TestVerifyBlockSignature(t *testing.T) { }) t.Run("InvalidSignature", func(t *testing.T) { - runCfg := &testutils.MockRuntimeConfig{P2PPropAddress: crypto.PubkeyToAddress(secrets.ProposerP2P.PublicKey)} + runCfg := &testutils.MockRuntimeConfig{P2PSeqAddress: crypto.PubkeyToAddress(secrets.SequencerP2P.PublicKey)} sig := make([]byte, 65) result := verifyBlockSignature(logger, cfg, runCfg, peerId, sig, msg) require.Equal(t, pubsub.ValidationReject, result) }) - t.Run("NoProposer", func(t *testing.T) { + t.Run("NoSequencer", func(t *testing.T) { runCfg := &testutils.MockRuntimeConfig{} - signer := &PreparedSigner{Signer: NewLocalSigner(secrets.ProposerP2P)} + signer := &PreparedSigner{Signer: NewLocalSigner(secrets.SequencerP2P)} sig, err := signer.Sign(context.Background(), SigningDomainBlocksV1, cfg.L2ChainID, msg) require.NoError(t, err) result := verifyBlockSignature(logger, cfg, runCfg, peerId, sig[:65], msg) diff --git a/components/node/p2p/host_test.go b/components/node/p2p/host_test.go index dbc409927..527866496 100644 --- a/components/node/p2p/host_test.go +++ b/components/node/p2p/host_test.go @@ -113,8 +113,8 @@ func TestP2PFull(t *testing.T) { confB.Store = sync.MutexWrap(ds.NewMapDatastore()) // TODO: maybe swap the order of sec/mux preferences, to test that negotiation works - runCfgA := &testutils.MockRuntimeConfig{P2PPropAddress: common.Address{0x42}} - runCfgB := &testutils.MockRuntimeConfig{P2PPropAddress: common.Address{0x42}} + runCfgA := &testutils.MockRuntimeConfig{P2PSeqAddress: common.Address{0x42}} + runCfgB := &testutils.MockRuntimeConfig{P2PSeqAddress: common.Address{0x42}} logA := testlog.Logger(t, log.LvlError).New("host", "A") nodeA, err := NewNodeP2P(context.Background(), &rollup.Config{}, logA, &confA, &mockGossipIn{}, nil, runCfgA, metrics.NoopMetrics) @@ -261,9 +261,9 @@ func TestDiscovery(t *testing.T) { confB.Store = sync.MutexWrap(ds.NewMapDatastore()) confB.DiscoveryDB = discDBB - runCfgA := &testutils.MockRuntimeConfig{P2PPropAddress: common.Address{0x42}} - runCfgB := &testutils.MockRuntimeConfig{P2PPropAddress: common.Address{0x42}} - runCfgC := &testutils.MockRuntimeConfig{P2PPropAddress: common.Address{0x42}} + runCfgA := &testutils.MockRuntimeConfig{P2PSeqAddress: common.Address{0x42}} + runCfgB := &testutils.MockRuntimeConfig{P2PSeqAddress: common.Address{0x42}} + runCfgC := &testutils.MockRuntimeConfig{P2PSeqAddress: common.Address{0x42}} resourcesCtx, resourcesCancel := context.WithCancel(context.Background()) defer resourcesCancel() diff --git a/components/node/rollup/derive/attributes.go b/components/node/rollup/derive/attributes.go index 5820725d3..1d61b8ec7 100644 --- a/components/node/rollup/derive/attributes.go +++ b/components/node/rollup/derive/attributes.go @@ -38,8 +38,8 @@ func NewFetchingAttributesBuilder(cfg *rollup.Config, l1 L1ReceiptsFetcher, l2 S } // PreparePayloadAttributes prepares a PayloadAttributes template that is ready to build a L2 block with deposits only, on top of the given l2Parent, with the given epoch as L1 origin. -// The template defaults to NoTxPool=true, and no proposer transactions: the caller has to modify the template to add transactions, -// by setting NoTxPool=false as proposer, or by appending batch transactions as syncer. +// The template defaults to NoTxPool=true, and no sequencer transactions: the caller has to modify the template to add transactions, +// by setting NoTxPool=false as sequencer, or by appending batch transactions as syncer. // The severity of the error is returned; a crit=false error means there was a temporary issue, like a failed RPC or time-out. // A crit=true error means the input arguments are inconsistent or invalid. func (ba *FetchingAttributesBuilder) PreparePayloadAttributes(ctx context.Context, l2Parent eth.L2BlockRef, epoch eth.BlockID) (attrs *eth.PayloadAttributes, err error) { diff --git a/components/node/rollup/derive/attributes_queue.go b/components/node/rollup/derive/attributes_queue.go index 3af12ccee..e15ed4ee0 100644 --- a/components/node/rollup/derive/attributes_queue.go +++ b/components/node/rollup/derive/attributes_queue.go @@ -87,7 +87,7 @@ func (aq *AttributesQueue) createNextAttributes(ctx context.Context, batch *Batc return nil, err } - // we are syncing, not proposing, we've got all transactions and do not pull from the tx-pool + // we are syncing, not sequencing, we've got all transactions and do not pull from the tx-pool // (that would make the block derivation non-deterministic) attrs.NoTxPool = true attrs.Transactions = append(attrs.Transactions, batch.Transactions...) diff --git a/components/node/rollup/derive/batch_queue.go b/components/node/rollup/derive/batch_queue.go index f73f0ed38..7fc40f987 100644 --- a/components/node/rollup/derive/batch_queue.go +++ b/components/node/rollup/derive/batch_queue.go @@ -13,10 +13,10 @@ import ( ) // The batch queue is responsible for ordering unordered batches & generating empty batches -// when the proposer window has passed. This is a very stateful stage. +// when the sequencer window has passed. This is a very stateful stage. // // It receives batches that are tagged with the L1 Inclusion block of the batch. It only considers -// batches that are inside the proposer window of a specific L1 Origin. +// batches that are inside the sequencer window of a specific L1 Origin. // It tries to eagerly pull batches based on the current L2 safe head. // Otherwise it filters/creates an entire epoch's worth of batches at once. // @@ -218,8 +218,8 @@ batchLoop: } // If the current epoch is too old compared to the L1 block we are at, - // i.e. if the proposer window expired, we create empty batches for the current epoch - expiryEpoch := epoch.Number + bq.config.ProposerWindowSize + // i.e. if the sequence window expired, we create empty batches for the current epoch + expiryEpoch := epoch.Number + bq.config.SeqWindowSize forceEmptyBatches := (expiryEpoch == bq.origin.Number && outOfData) || expiryEpoch < bq.origin.Number firstOfEpoch := epoch.Number == l2SafeHead.L1Origin.Number+1 @@ -228,7 +228,7 @@ batchLoop: "epoch_time", epoch.Time, "len_l1_blocks", len(bq.l1Blocks), "firstOfEpoch", firstOfEpoch) if !forceEmptyBatches { - // proposer window did not expire yet, still room to receive batches for the current epoch, + // sequencer window did not expire yet, still room to receive batches for the current epoch, // no need to force-create empty batch(es) towards the next epoch yet. return nil, io.EOF } diff --git a/components/node/rollup/derive/batch_queue_test.go b/components/node/rollup/derive/batch_queue_test.go index d0f2d2bb3..026ed1f85 100644 --- a/components/node/rollup/derive/batch_queue_test.go +++ b/components/node/rollup/derive/batch_queue_test.go @@ -91,9 +91,9 @@ func TestBatchQueueNewOrigin(t *testing.T) { Genesis: rollup.Genesis{ L2Time: 10, }, - BlockTime: 2, - MaxProposerDrift: 600, - ProposerWindowSize: 2, + BlockTime: 2, + MaxSequencerDrift: 600, + SeqWindowSize: 2, } input := &fakeBatchQueueInput{ @@ -150,9 +150,9 @@ func TestBatchQueueEager(t *testing.T) { Genesis: rollup.Genesis{ L2Time: 10, }, - BlockTime: 2, - MaxProposerDrift: 600, - ProposerWindowSize: 30, + BlockTime: 2, + MaxSequencerDrift: 600, + SeqWindowSize: 30, } batches := []*BatchData{b(12, l1[0]), b(14, l1[0]), b(16, l1[0]), b(18, l1[0]), b(20, l1[0]), b(22, l1[0]), b(24, l1[1]), nil} @@ -200,9 +200,9 @@ func TestBatchQueueInvalidInternalAdvance(t *testing.T) { Genesis: rollup.Genesis{ L2Time: 10, }, - BlockTime: 2, - MaxProposerDrift: 600, - ProposerWindowSize: 2, + BlockTime: 2, + MaxSequencerDrift: 600, + SeqWindowSize: 2, } batches := []*BatchData{b(12, l1[0]), b(14, l1[0]), b(16, l1[0]), b(18, l1[0]), b(20, l1[0]), b(22, l1[0]), nil} @@ -290,14 +290,14 @@ func TestBatchQueueMissing(t *testing.T) { Genesis: rollup.Genesis{ L2Time: 10, }, - BlockTime: 2, - MaxProposerDrift: 600, - ProposerWindowSize: 2, + BlockTime: 2, + MaxSequencerDrift: 600, + SeqWindowSize: 2, } // The batches at 18 and 20 are skipped to stop 22 from being eagerly processed. // This test checks that batch timestamp 12 & 14 are created, 16 is used, and 18 is advancing the epoch. - // Due to the large proposer time drift 16 is perfectly valid to have epoch 0 as origin. + // Due to the large sequencer time drift 16 is perfectly valid to have epoch 0 as origin. batches := []*BatchData{b(16, l1[0]), b(22, l1[1])} errors := []error{nil, nil} diff --git a/components/node/rollup/derive/batches.go b/components/node/rollup/derive/batches.go index 6c75573a9..a3ed31d4c 100644 --- a/components/node/rollup/derive/batches.go +++ b/components/node/rollup/derive/batches.go @@ -62,8 +62,8 @@ func CheckBatch(cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1BlockRef, l } // Filter out batches that were included too late. - if uint64(batch.Batch.EpochNum)+cfg.ProposerWindowSize < batch.L1InclusionBlock.Number { - log.Warn("batch was included too late, proposer window expired") + if uint64(batch.Batch.EpochNum)+cfg.SeqWindowSize < batch.L1InclusionBlock.Number { + log.Warn("batch was included too late, sequencer window expired") return BatchDrop } @@ -101,10 +101,10 @@ func CheckBatch(cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1BlockRef, l return BatchDrop } - // Check if we ran out of proposer time drift - if max := batchOrigin.Time + cfg.MaxProposerDrift; batch.Batch.Timestamp > max { + // Check if we ran out of sequencer time drift + if max := batchOrigin.Time + cfg.MaxSequencerDrift; batch.Batch.Timestamp > max { if len(batch.Batch.Transactions) == 0 { - // If the proposer is co-operating by producing an empty batch, + // If the sequencer is co-operating by producing an empty batch, // then allow the batch if it was the right thing to do to maintain the L2 time >= L1 time invariant. // We only check batches that do not advance the epoch, to ensure epoch advancement regardless of time drift is allowed. if epoch.Number == batchOrigin.Number { @@ -114,16 +114,16 @@ func CheckBatch(cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1BlockRef, l } nextOrigin := l1Blocks[1] if batch.Batch.Timestamp >= nextOrigin.Time { // check if the next L1 origin could have been adopted - log.Info("batch exceeded proposer time drift without adopting next origin, and next L1 origin would have been valid") + log.Info("batch exceeded sequencer time drift without adopting next origin, and next L1 origin would have been valid") return BatchDrop } else { log.Info("continuing with empty batch before late L1 block to preserve L2 time invariant") } } } else { - // If the proposer is ignoring the time drift rule, then drop the batch and force an empty batch instead, - // as the proposer is not allowed to include anything past this point without moving to the next epoch. - log.Warn("batch exceeded proposer time drift, proposer must adopt new L1 origin to include transactions again", "max_time", max) + // If the sequencer is ignoring the time drift rule, then drop the batch and force an empty batch instead, + // as the sequencer is not allowed to include anything past this point without moving to the next epoch. + log.Warn("batch exceeded sequencer time drift, sequencer must adopt new L1 origin to include transactions again", "max_time", max) return BatchDrop } } @@ -135,7 +135,7 @@ func CheckBatch(cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1BlockRef, l return BatchDrop } if txBytes[0] == types.DepositTxType { - log.Warn("proposers may not embed any deposits into batch data, but found tx that has one", "tx_index", i) + log.Warn("sequencers may not embed any deposits into batch data, but found tx that has one", "tx_index", i) return BatchDrop } } diff --git a/components/node/rollup/derive/batches_test.go b/components/node/rollup/derive/batches_test.go index 239d6d56a..faa6242f1 100644 --- a/components/node/rollup/derive/batches_test.go +++ b/components/node/rollup/derive/batches_test.go @@ -32,9 +32,9 @@ func TestValidBatch(t *testing.T) { Genesis: rollup.Genesis{ L2Time: 31, // a genesis time that itself does not align to make it more interesting }, - BlockTime: 2, - ProposerWindowSize: 4, - MaxProposerDrift: 6, + BlockTime: 2, + SeqWindowSize: 4, + MaxSequencerDrift: 6, // other config fields are ignored and can be left empty. } @@ -146,7 +146,7 @@ func TestValidBatch(t *testing.T) { Hash: testutils.RandomHash(rng), Number: l2X0.Number + 1, ParentHash: l2X0.Hash, - Time: l2X0.Time + conf.BlockTime, // exceeds proposer time drift, forced to be empty block + Time: l2X0.Time + conf.BlockTime, // exceeds sequencer time drift, forced to be empty block L1Origin: l1Y.ID(), SequenceNumber: 0, } @@ -249,7 +249,7 @@ func TestValidBatch(t *testing.T) { Expected: BatchDrop, }, { - Name: "proposer window expired", + Name: "sequencer window expired", L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C, l1D, l1E, l1F}, L2SafeHead: l2A0, Batch: BatchWithL1InclusionBlock{ @@ -329,7 +329,7 @@ func TestValidBatch(t *testing.T) { Expected: BatchDrop, }, { - Name: "proposer time drift on same epoch with non-empty txs", + Name: "sequencer time drift on same epoch with non-empty txs", L1Blocks: []eth.L1BlockRef{l1A, l1B}, L2SafeHead: l2A3, Batch: BatchWithL1InclusionBlock{ @@ -339,13 +339,13 @@ func TestValidBatch(t *testing.T) { EpochNum: rollup.Epoch(l2A4.L1Origin.Number), EpochHash: l2A4.L1Origin.Hash, Timestamp: l2A4.Time, - Transactions: []hexutil.Bytes{[]byte("proposer should not include this tx")}, + Transactions: []hexutil.Bytes{[]byte("sequencer should not include this tx")}, }}, }, Expected: BatchDrop, }, { - Name: "proposer time drift on changing epoch with non-empty txs", + Name: "sequencer time drift on changing epoch with non-empty txs", L1Blocks: []eth.L1BlockRef{l1X, l1Y, l1Z}, L2SafeHead: l2X0, Batch: BatchWithL1InclusionBlock{ @@ -355,13 +355,13 @@ func TestValidBatch(t *testing.T) { EpochNum: rollup.Epoch(l2Y0.L1Origin.Number), EpochHash: l2Y0.L1Origin.Hash, Timestamp: l2Y0.Time, // valid, but more than 6 ahead of l1Y.Time - Transactions: []hexutil.Bytes{[]byte("proposer should not include this tx")}, + Transactions: []hexutil.Bytes{[]byte("sequencer should not include this tx")}, }}, }, Expected: BatchDrop, }, { - Name: "proposer time drift on same epoch with empty txs and late next epoch", + Name: "sequencer time drift on same epoch with empty txs and late next epoch", L1Blocks: []eth.L1BlockRef{l1A, l1BLate}, L2SafeHead: l2A3, Batch: BatchWithL1InclusionBlock{ @@ -377,7 +377,7 @@ func TestValidBatch(t *testing.T) { Expected: BatchAccept, // accepted because empty & preserving L2 time invariant }, { - Name: "proposer time drift on changing epoch with empty txs", + Name: "sequencer time drift on changing epoch with empty txs", L1Blocks: []eth.L1BlockRef{l1X, l1Y, l1Z}, L2SafeHead: l2X0, Batch: BatchWithL1InclusionBlock{ @@ -393,7 +393,7 @@ func TestValidBatch(t *testing.T) { Expected: BatchAccept, // accepted because empty & still advancing epoch }, { - Name: "proposer time drift on same epoch with empty txs and no next epoch in sight yet", + Name: "sequencer time drift on same epoch with empty txs and no next epoch in sight yet", L1Blocks: []eth.L1BlockRef{l1A}, L2SafeHead: l2A3, Batch: BatchWithL1InclusionBlock{ @@ -409,7 +409,7 @@ func TestValidBatch(t *testing.T) { Expected: BatchUndecided, // we have to wait till the next epoch is in sight to check the time }, { - Name: "proposer time drift on same epoch with empty txs and but in-sight epoch that invalidates it", + Name: "sequencer time drift on same epoch with empty txs and but in-sight epoch that invalidates it", L1Blocks: []eth.L1BlockRef{l1A, l1B, l1C}, L2SafeHead: l2A3, Batch: BatchWithL1InclusionBlock{ diff --git a/components/node/rollup/derive/engine_queue.go b/components/node/rollup/derive/engine_queue.go index 1c2ccd132..e7549bdab 100644 --- a/components/node/rollup/derive/engine_queue.go +++ b/components/node/rollup/derive/engine_queue.go @@ -483,7 +483,7 @@ func (eq *EngineQueue) tryNextUnsafePayload(ctx context.Context) error { eq.unsafePayloads.Pop() eq.metrics.RecordL2Ref("l2_unsafe", ref) eq.log.Trace("Executed unsafe payload", "hash", ref.Hash, "number", ref.Number, "timestamp", ref.Time, "l1Origin", ref.L1Origin) - eq.logSyncProgress("unsafe payload from proposer") + eq.logSyncProgress("unsafe payload from sequencer") return nil } diff --git a/components/node/rollup/derive/engine_queue_test.go b/components/node/rollup/derive/engine_queue_test.go index 2b437f64b..b3136c500 100644 --- a/components/node/rollup/derive/engine_queue_test.go +++ b/components/node/rollup/derive/engine_queue_test.go @@ -92,8 +92,8 @@ func TestEngineQueue_Finalize(t *testing.T) { L2: refA0.ID(), L2Time: refA0.Time, }, - BlockTime: 1, - ProposerWindowSize: 2, + BlockTime: 1, + SeqWindowSize: 2, } refA1 := eth.L2BlockRef{ Hash: testutils.RandomHash(rng), @@ -221,7 +221,7 @@ func TestEngineQueue_Finalize(t *testing.T) { eng.ExpectL2BlockRefByHash(refE1.ParentHash, refE0, nil) eng.ExpectL2BlockRefByHash(refE0.ParentHash, refD1, nil) - // now full proposer window, inclusive + // now full seq window, inclusive l1F.ExpectL1BlockRefByHash(refD.Hash, refD, nil) eng.ExpectL2BlockRefByHash(refD1.ParentHash, refD0, nil) eng.ExpectL2BlockRefByHash(refD0.ParentHash, refC1, nil) @@ -249,7 +249,7 @@ func TestEngineQueue_Finalize(t *testing.T) { eq := NewEngineQueue(logger, cfg, eng, metrics, prev, l1F) require.ErrorIs(t, eq.Reset(context.Background(), eth.L1BlockRef{}, eth.SystemConfig{}), io.EOF) - require.Equal(t, refB1, eq.SafeL2Head(), "L2 reset should go back to proposer window ago: blocks with origin E and D are not safe until we reconcile, C is extra, and B1 is the end we look for") + require.Equal(t, refB1, eq.SafeL2Head(), "L2 reset should go back to sequence window ago: blocks with origin E and D are not safe until we reconcile, C is extra, and B1 is the end we look for") require.Equal(t, refB, eq.Origin(), "Expecting to be set back derivation L1 progress to B") require.Equal(t, refA1, eq.Finalized(), "A1 is recognized as finalized before we run any steps") @@ -327,8 +327,8 @@ func TestEngineQueue_ResetWhenUnsafeOriginNotCanonical(t *testing.T) { L2: refA0.ID(), L2Time: refA0.Time, }, - BlockTime: 1, - ProposerWindowSize: 2, + BlockTime: 1, + SeqWindowSize: 2, } refA1 := eth.L2BlockRef{ Hash: testutils.RandomHash(rng), @@ -456,7 +456,7 @@ func TestEngineQueue_ResetWhenUnsafeOriginNotCanonical(t *testing.T) { eng.ExpectL2BlockRefByHash(refE1.ParentHash, refE0, nil) eng.ExpectL2BlockRefByHash(refE0.ParentHash, refD1, nil) - // now full proposer window, inclusive + // now full sequencer window, inclusive l1F.ExpectL1BlockRefByHash(refD.Hash, refD, nil) eng.ExpectL2BlockRefByHash(refD1.ParentHash, refD0, nil) eng.ExpectL2BlockRefByHash(refD0.ParentHash, refC1, nil) @@ -484,7 +484,7 @@ func TestEngineQueue_ResetWhenUnsafeOriginNotCanonical(t *testing.T) { eq := NewEngineQueue(logger, cfg, eng, metrics, prev, l1F) require.ErrorIs(t, eq.Reset(context.Background(), eth.L1BlockRef{}, eth.SystemConfig{}), io.EOF) - require.Equal(t, refB1, eq.SafeL2Head(), "L2 reset should go back to proposer window ago: blocks with origin E and D are not safe until we reconcile, C is extra, and B1 is the end we look for") + require.Equal(t, refB1, eq.SafeL2Head(), "L2 reset should go back to sequencer window ago: blocks with origin E and D are not safe until we reconcile, C is extra, and B1 is the end we look for") require.Equal(t, refB, eq.Origin(), "Expecting to be set back derivation L1 progress to B") require.Equal(t, refA1, eq.Finalized(), "A1 is recognized as finalized before we run any steps") @@ -580,8 +580,8 @@ func TestVerifyNewL1Origin(t *testing.T) { L2: refA0.ID(), L2Time: refA0.Time, }, - BlockTime: 1, - ProposerWindowSize: 2, + BlockTime: 1, + SeqWindowSize: 2, } refA1 := eth.L2BlockRef{ Hash: testutils.RandomHash(rng), @@ -788,7 +788,7 @@ func TestVerifyNewL1Origin(t *testing.T) { eng.ExpectL2BlockRefByHash(refE1.ParentHash, refE0, nil) eng.ExpectL2BlockRefByHash(refE0.ParentHash, refD1, nil) - // now full proposer window, inclusive + // now full sequencer window, inclusive l1F.ExpectL1BlockRefByHash(refD.Hash, refD, nil) eng.ExpectL2BlockRefByHash(refD1.ParentHash, refD0, nil) eng.ExpectL2BlockRefByHash(refD0.ParentHash, refC1, nil) @@ -815,7 +815,7 @@ func TestVerifyNewL1Origin(t *testing.T) { eq := NewEngineQueue(logger, cfg, eng, metrics, prev, l1F) require.ErrorIs(t, eq.Reset(context.Background(), eth.L1BlockRef{}, eth.SystemConfig{}), io.EOF) - require.Equal(t, refB1, eq.SafeL2Head(), "L2 reset should go back to proposer window ago: blocks with origin E and D are not safe until we reconcile, C is extra, and B1 is the end we look for") + require.Equal(t, refB1, eq.SafeL2Head(), "L2 reset should go back to sequencer window ago: blocks with origin E and D are not safe until we reconcile, C is extra, and B1 is the end we look for") require.Equal(t, refB, eq.Origin(), "Expecting to be set back derivation L1 progress to B") require.Equal(t, refA1, eq.Finalized(), "A1 is recognized as finalized before we run any steps") @@ -874,8 +874,8 @@ func TestBlockBuildingRace(t *testing.T) { GasLimit: 20_000_000, }, }, - BlockTime: 1, - ProposerWindowSize: 2, + BlockTime: 1, + SeqWindowSize: 2, } refA1 := eth.L2BlockRef{ Hash: testutils.RandomHash(rng), @@ -1044,8 +1044,8 @@ func TestResetLoop(t *testing.T) { GasLimit: 20_000_000, }, }, - BlockTime: 1, - ProposerWindowSize: 2, + BlockTime: 1, + SeqWindowSize: 2, } refA1 := eth.L2BlockRef{ Hash: testutils.RandomHash(rng), @@ -1143,8 +1143,8 @@ func TestEngineQueue_StepPopOlderUnsafe(t *testing.T) { GasLimit: 20_000_000, }, }, - BlockTime: 1, - ProposerWindowSize: 2, + BlockTime: 1, + SeqWindowSize: 2, } refA1 := eth.L2BlockRef{ diff --git a/components/node/rollup/driver/config.go b/components/node/rollup/driver/config.go index f2107331b..05b785d31 100644 --- a/components/node/rollup/driver/config.go +++ b/components/node/rollup/driver/config.go @@ -4,20 +4,20 @@ type Config struct { // SyncerConfDepth is the distance to keep from the L1 head when reading L1 data for L2 derivation. SyncerConfDepth uint64 `json:"syncer_conf_depth"` - // ProposerConfDepth is the distance to keep from the L1 head as origin when proposing new L2 blocks. - // If this distance is too large, the proposer may: - // - not adopt a L1 origin within the allowed time (rollup.Config.MaxProposerDrift) - // - not adopt a L1 origin that can be included on L1 within the allowed range (rollup.Config.ProposerWindowSize) + // SequencerConfDepth is the distance to keep from the L1 head as origin when sequencing new L2 blocks. + // If this distance is too large, the sequencer may: + // - not adopt a L1 origin within the allowed time (rollup.Config.MaxSequencerDrift) + // - not adopt a L1 origin that can be included on L1 within the allowed range (rollup.Config.SeqWindowSize) // and thus fail to produce a block with anything more than deposits. - ProposerConfDepth uint64 `json:"proposer_conf_depth"` + SequencerConfDepth uint64 `json:"sequencer_conf_depth"` - // ProposerEnabled is true when the driver should propose new blocks. - ProposerEnabled bool `json:"proposer_enabled"` + // SequencerEnabled is true when the driver should sequence new blocks. + SequencerEnabled bool `json:"sequencer_enabled"` - // ProposerStopped is false when the driver should propose new blocks. - ProposerStopped bool `json:"proposer_stopped"` + // SequencerStopped is false when the driver should sequence new blocks. + SequencerStopped bool `json:"sequencer_stopped"` - // ProposerMaxSafeLag is the maximum number of L2 blocks for restricting the distance between L2 safe and unsafe. + // SequencerMaxSafeLag is the maximum number of L2 blocks for restricting the distance between L2 safe and unsafe. // Disabled if 0. - ProposerMaxSafeLag uint64 `json:"proposer_max_safe_lag"` + SequencerMaxSafeLag uint64 `json:"sequencer_max_safe_lag"` } diff --git a/components/node/rollup/driver/driver.go b/components/node/rollup/driver/driver.go index ba27b0ed4..8cf9a8700 100644 --- a/components/node/rollup/driver/driver.go +++ b/components/node/rollup/driver/driver.go @@ -30,7 +30,7 @@ type Metrics interface { RecordL1ReorgDepth(d uint64) EngineMetrics - ProposerMetrics + SequencerMetrics } type L1Chain interface { @@ -69,11 +69,11 @@ type L1StateIface interface { L1Finalized() eth.L1BlockRef } -type ProposerIface interface { +type SequencerIface interface { StartBuildingBlock(ctx context.Context) error CompleteBuildingBlock(ctx context.Context) (*eth.ExecutionPayload, error) - PlanNextProposerAction() time.Duration - RunNextProposerAction(ctx context.Context) (*eth.ExecutionPayload, error) + PlanNextSequencerAction() time.Duration + RunNextSequencerAction(ctx context.Context) (*eth.ExecutionPayload, error) BuildingOnto() eth.L2BlockRef } @@ -101,25 +101,25 @@ type AltSync interface { RequestL2Range(ctx context.Context, start, end eth.L2BlockRef) error } -// NewDriver composes an events handler that tracks L1 state, triggers L2 derivation, and optionally proposes new L2 blocks. +// NewDriver composes an events handler that tracks L1 state, triggers L2 derivation, and optionally sequences new L2 blocks. func NewDriver(driverCfg *Config, cfg *rollup.Config, l2 L2Chain, l1 L1Chain, altSync AltSync, network Network, log log.Logger, snapshotLog log.Logger, metrics Metrics) *Driver { l1State := NewL1State(log, metrics) - proposerConfDepth := NewConfDepth(driverCfg.ProposerConfDepth, l1State.L1Head, l1) - findL1Origin := NewL1OriginSelector(log, cfg, proposerConfDepth) + sequencerConfDepth := NewConfDepth(driverCfg.SequencerConfDepth, l1State.L1Head, l1) + findL1Origin := NewL1OriginSelector(log, cfg, sequencerConfDepth) syncConfDepth := NewConfDepth(driverCfg.SyncerConfDepth, l1State.L1Head, l1) derivationPipeline := derive.NewDerivationPipeline(log, cfg, syncConfDepth, l2, metrics) attrBuilder := derive.NewFetchingAttributesBuilder(cfg, l1, l2) engine := derivationPipeline meteredEngine := NewMeteredEngine(cfg, engine, metrics, log) - proposer := NewProposer(log, cfg, meteredEngine, attrBuilder, findL1Origin, metrics) + sequencer := NewSequencer(log, cfg, meteredEngine, attrBuilder, findL1Origin, metrics) return &Driver{ l1State: l1State, derivation: derivationPipeline, stateReq: make(chan chan struct{}), forceReset: make(chan chan struct{}, 10), - startProposer: make(chan hashAndErrorChannel, 10), - stopProposer: make(chan chan hashAndError, 10), + startSequencer: make(chan hashAndErrorChannel, 10), + stopSequencer: make(chan chan hashAndError, 10), config: cfg, driverConfig: driverCfg, done: make(chan struct{}), @@ -127,7 +127,7 @@ func NewDriver(driverCfg *Config, cfg *rollup.Config, l2 L2Chain, l1 L1Chain, al snapshotLog: snapshotLog, l1: l1, l2: l2, - proposer: proposer, + sequencer: sequencer, network: network, metrics: metrics, l1HeadSig: make(chan eth.L1BlockRef, 10), diff --git a/components/node/rollup/driver/metered_engine.go b/components/node/rollup/driver/metered_engine.go index 1549b4f40..5a1b10783 100644 --- a/components/node/rollup/driver/metered_engine.go +++ b/components/node/rollup/driver/metered_engine.go @@ -15,8 +15,8 @@ type EngineMetrics interface { RecordSequencingError() CountSequencedTxs(count int) - RecordProposerBuildingDiffTime(duration time.Duration) - RecordProposerSealingTime(duration time.Duration) + RecordSequencerBuildingDiffTime(duration time.Duration) + RecordSequencerSealingTime(duration time.Duration) } // MeteredEngine wraps an EngineControl and adds metrics such as block building time diff and sealing time @@ -74,8 +74,8 @@ func (m *MeteredEngine) ConfirmPayload(ctx context.Context) (out *eth.ExecutionP now := time.Now() sealTime := now.Sub(sealingStart) buildTime := now.Sub(m.buildingStartTime) - m.metrics.RecordProposerSealingTime(sealTime) - m.metrics.RecordProposerBuildingDiffTime(buildTime - time.Duration(m.cfg.BlockTime)*time.Second) + m.metrics.RecordSequencerSealingTime(sealTime) + m.metrics.RecordSequencerBuildingDiffTime(buildTime - time.Duration(m.cfg.BlockTime)*time.Second) m.metrics.CountSequencedTxs(len(payload.Transactions)) ref := m.inner.UnsafeL2Head() diff --git a/components/node/rollup/driver/origin_selector.go b/components/node/rollup/driver/origin_selector.go index eb1f45ce5..1d752cdd2 100644 --- a/components/node/rollup/driver/origin_selector.go +++ b/components/node/rollup/driver/origin_selector.go @@ -45,20 +45,20 @@ func (los *L1OriginSelector) FindL1Origin(ctx context.Context, l2Head eth.L2Bloc log := los.log.New("current", currentOrigin, "current_time", currentOrigin.Time, "l2_head", l2Head, "l2_head_time", l2Head.Time) - // If we are past the proposer depth, we may want to advance the origin, but need to still + // If we are past the sequencer depth, we may want to advance the origin, but need to still // check the time of the next origin. - pastPropDrift := l2Head.Time+los.cfg.BlockTime > currentOrigin.Time+los.cfg.MaxProposerDrift - if pastPropDrift { - log.Warn("Next L2 block time is past the proposer drift + current origin time") + pastSeqDrift := l2Head.Time+los.cfg.BlockTime > currentOrigin.Time+los.cfg.MaxSequencerDrift + if pastSeqDrift { + log.Warn("Next L2 block time is past the sequencer drift + current origin time") } // Attempt to find the next L1 origin block, where the next origin is the immediate child of // the current origin block. - // The L1 source can be shimmed to hide new L1 blocks and enforce a proposer confirmation distance. + // The L1 source can be shimmed to hide new L1 blocks and enforce a sequencer confirmation distance. nextOrigin, err := los.l1.L1BlockRefByNumber(ctx, currentOrigin.Number+1) if err != nil { - if pastPropDrift { - return eth.L1BlockRef{}, fmt.Errorf("cannot build next L2 block past current L1 origin %s by more than proposer time drift, and failed to find next L1 origin: %w", currentOrigin, err) + if pastSeqDrift { + return eth.L1BlockRef{}, fmt.Errorf("cannot build next L2 block past current L1 origin %s by more than sequencer time drift, and failed to find next L1 origin: %w", currentOrigin, err) } if errors.Is(err, ethereum.NotFound) { log.Debug("No next L1 block found, repeating current origin") @@ -69,9 +69,9 @@ func (los *L1OriginSelector) FindL1Origin(ctx context.Context, l2Head eth.L2Bloc } // If the next L2 block time is greater than the next origin block's time, we can choose to - // start building on top of the next origin. Proposer implementation has some leeway here and - // could decide to continue to build on top of the previous origin until the Proposer runs out - // of slack. For simplicity, we implement our Proposer to always start building on the latest + // start building on top of the next origin. Sequencer implementation has some leeway here and + // could decide to continue to build on top of the previous origin until the Sequencer runs out + // of slack. For simplicity, we implement our Sequencer to always start building on the latest // L1 block when we can. if l2Head.Time+los.cfg.BlockTime >= nextOrigin.Time { return nextOrigin, nil diff --git a/components/node/rollup/driver/origin_selector_test.go b/components/node/rollup/driver/origin_selector_test.go index f803dc47b..fee99217c 100644 --- a/components/node/rollup/driver/origin_selector_test.go +++ b/components/node/rollup/driver/origin_selector_test.go @@ -24,8 +24,8 @@ import ( func TestOriginSelectorAdvances(t *testing.T) { log := testlog.Logger(t, log.LvlCrit) cfg := &rollup.Config{ - MaxProposerDrift: 500, - BlockTime: 2, + MaxSequencerDrift: 500, + BlockTime: 2, } l1 := &testutils.MockL1Source{} defer l1.AssertExpectations(t) @@ -65,8 +65,8 @@ func TestOriginSelectorAdvances(t *testing.T) { func TestOriginSelectorRespectsOriginTiming(t *testing.T) { log := testlog.Logger(t, log.LvlCrit) cfg := &rollup.Config{ - MaxProposerDrift: 500, - BlockTime: 2, + MaxSequencerDrift: 500, + BlockTime: 2, } l1 := &testutils.MockL1Source{} defer l1.AssertExpectations(t) @@ -100,13 +100,13 @@ func TestOriginSelectorRespectsOriginTiming(t *testing.T) { // // There are 2 L1 blocks at time 20 & 25. The L2 Head is at time 27. // The next L2 time is 29 which enough to normally select block `b` -// as the origin, however block `b` is the L1 Head & the proposer +// as the origin, however block `b` is the L1 Head & the sequencer // needs to wait until that block is confirmed enough before advancing. func TestOriginSelectorRespectsConfDepth(t *testing.T) { log := testlog.Logger(t, log.LvlCrit) cfg := &rollup.Config{ - MaxProposerDrift: 500, - BlockTime: 2, + MaxSequencerDrift: 500, + BlockTime: 2, } l1 := &testutils.MockL1Source{} defer l1.AssertExpectations(t) @@ -135,21 +135,21 @@ func TestOriginSelectorRespectsConfDepth(t *testing.T) { require.Equal(t, a, next) } -// TestOriginSelectorStrictConfDepth ensures that the origin selector will maintain the proposer conf depth, +// TestOriginSelectorStrictConfDepth ensures that the origin selector will maintain the sequencer conf depth, // even while the time delta between the current L1 origin and the next -// L2 block is greater than the proposer drift. +// L2 block is greater than the sequencer drift. // It's more important to maintain safety with an empty block than to maintain liveness with poor conf depth. // // There are 2 L1 blocks at time 20 & 25. The L2 Head is at time 27. -// The next L2 time is 29. The proposer drift is 8 so the L2 head is +// The next L2 time is 29. The sequencer drift is 8 so the L2 head is // valid with origin `a`, but the next L2 block is not valid with origin `b.` -// This is because 29 (next L2 time) > 20 (origin) + 8 (proposer drift) => invalid block. +// This is because 29 (next L2 time) > 20 (origin) + 8 (sequencer drift) => invalid block. // We maintain confirmation distance, even though we would shift to the next origin if we could. func TestOriginSelectorStrictConfDepth(t *testing.T) { log := testlog.Logger(t, log.LvlCrit) cfg := &rollup.Config{ - MaxProposerDrift: 8, - BlockTime: 2, + MaxSequencerDrift: 8, + BlockTime: 2, } l1 := &testutils.MockL1Source{} defer l1.AssertExpectations(t) @@ -174,20 +174,20 @@ func TestOriginSelectorStrictConfDepth(t *testing.T) { s := NewL1OriginSelector(log, cfg, confDepthL1) _, err := s.FindL1Origin(context.Background(), l2Head) - require.ErrorContains(t, err, "proposer time drift") + require.ErrorContains(t, err, "sequencer time drift") } -// TestOriginSelectorPropDriftRespectsNextOriginTime +// TestOriginSelectorSeqDriftRespectsNextOriginTime // // There are 2 L1 blocks at time 20 & 100. The L2 Head is at time 27. -// The next L2 time is 29. Even though the next L2 time is past the proposer +// The next L2 time is 29. Even though the next L2 time is past the sequencer // drift, the origin should remain on block `a` because the next origin's // time is greater than the next L2 time. -func TestOriginSelectorPropDriftRespectsNextOriginTime(t *testing.T) { +func TestOriginSelectorSeqDriftRespectsNextOriginTime(t *testing.T) { log := testlog.Logger(t, log.LvlCrit) cfg := &rollup.Config{ - MaxProposerDrift: 8, - BlockTime: 2, + MaxSequencerDrift: 8, + BlockTime: 2, } l1 := &testutils.MockL1Source{} defer l1.AssertExpectations(t) @@ -220,7 +220,7 @@ func TestOriginSelectorPropDriftRespectsNextOriginTime(t *testing.T) { // but with a conf depth that first prevents it from learning about the need to repeat. // // There are 2 L1 blocks at time 20 & 100. The L2 Head is at time 27. -// The next L2 time is 29. Even though the next L2 time is past the proposer +// The next L2 time is 29. Even though the next L2 time is past the sequencer // drift, the origin should remain on block `a` because the next origin's // time is greater than the next L2 time. // Due to a conf depth of 2, block `b` is not immediately visible, @@ -228,8 +228,8 @@ func TestOriginSelectorPropDriftRespectsNextOriginTime(t *testing.T) { func TestOriginSelectorHandlesLateL1Blocks(t *testing.T) { log := testlog.Logger(t, log.LvlCrit) cfg := &rollup.Config{ - MaxProposerDrift: 8, - BlockTime: 2, + MaxSequencerDrift: 8, + BlockTime: 2, } l1 := &testutils.MockL1Source{} defer l1.AssertExpectations(t) @@ -272,11 +272,11 @@ func TestOriginSelectorHandlesLateL1Blocks(t *testing.T) { s := NewL1OriginSelector(log, cfg, confDepthL1) _, err := s.FindL1Origin(context.Background(), l2Head) - require.ErrorContains(t, err, "proposer time drift") + require.ErrorContains(t, err, "sequencer time drift") l1Head = c _, err = s.FindL1Origin(context.Background(), l2Head) - require.ErrorContains(t, err, "proposer time drift") + require.ErrorContains(t, err, "sequencer time drift") l1Head = d next, err := s.FindL1Origin(context.Background(), l2Head) diff --git a/components/node/rollup/driver/proposer.go b/components/node/rollup/driver/sequencer.go similarity index 57% rename from components/node/rollup/driver/proposer.go rename to components/node/rollup/driver/sequencer.go index 3e1101522..aaa45bf3b 100644 --- a/components/node/rollup/driver/proposer.go +++ b/components/node/rollup/driver/sequencer.go @@ -24,13 +24,13 @@ type L1OriginSelectorIface interface { FindL1Origin(ctx context.Context, l2Head eth.L2BlockRef) (eth.L1BlockRef, error) } -type ProposerMetrics interface { - RecordProposerInconsistentL1Origin(from eth.BlockID, to eth.BlockID) - RecordProposerReset() +type SequencerMetrics interface { + RecordSequencerInconsistentL1Origin(from eth.BlockID, to eth.BlockID) + RecordSequencerReset() } -// Proposer implements the proposing interface of the driver: it starts and completes block building jobs. -type Proposer struct { +// Sequencer implements the sequencing interface of the driver: it starts and completes block building jobs. +type Sequencer struct { log log.Logger config *rollup.Config @@ -39,16 +39,16 @@ type Proposer struct { attrBuilder derive.AttributesBuilder l1OriginSelector L1OriginSelectorIface - metrics ProposerMetrics + metrics SequencerMetrics - // timeNow enables proposer testing to mock the time + // timeNow enables sequencer testing to mock the time timeNow func() time.Time nextAction time.Time } -func NewProposer(log log.Logger, cfg *rollup.Config, engine derive.ResettableEngineControl, attributesBuilder derive.AttributesBuilder, l1OriginSelector L1OriginSelectorIface, metrics ProposerMetrics) *Proposer { - return &Proposer{ +func NewSequencer(log log.Logger, cfg *rollup.Config, engine derive.ResettableEngineControl, attributesBuilder derive.AttributesBuilder, l1OriginSelector L1OriginSelectorIface, metrics SequencerMetrics) *Sequencer { + return &Sequencer{ log: log, config: cfg, engine: engine, @@ -60,43 +60,43 @@ func NewProposer(log log.Logger, cfg *rollup.Config, engine derive.ResettableEng } // StartBuildingBlock initiates a block building job on top of the given L2 head, safe and finalized blocks, and using the provided l1Origin. -func (p *Proposer) StartBuildingBlock(ctx context.Context) error { - l2Head := p.engine.UnsafeL2Head() +func (d *Sequencer) StartBuildingBlock(ctx context.Context) error { + l2Head := d.engine.UnsafeL2Head() // Figure out which L1 origin block we're going to be building on top of. - l1Origin, err := p.l1OriginSelector.FindL1Origin(ctx, l2Head) + l1Origin, err := d.l1OriginSelector.FindL1Origin(ctx, l2Head) if err != nil { - p.log.Error("Error finding next L1 Origin", "err", err) + d.log.Error("Error finding next L1 Origin", "err", err) return err } if !(l2Head.L1Origin.Hash == l1Origin.ParentHash || l2Head.L1Origin.Hash == l1Origin.Hash) { - p.metrics.RecordProposerInconsistentL1Origin(l2Head.L1Origin, l1Origin.ID()) + d.metrics.RecordSequencerInconsistentL1Origin(l2Head.L1Origin, l1Origin.ID()) return derive.NewResetError(fmt.Errorf("cannot build new L2 block with L1 origin %s (parent L1 %s) on current L2 head %s with L1 origin %s", l1Origin, l1Origin.ParentHash, l2Head, l2Head.L1Origin)) } - p.log.Info("creating new block", "parent", l2Head, "l1Origin", l1Origin) + d.log.Info("creating new block", "parent", l2Head, "l1Origin", l1Origin) fetchCtx, cancel := context.WithTimeout(ctx, time.Second*20) defer cancel() - attrs, err := p.attrBuilder.PreparePayloadAttributes(fetchCtx, l2Head, l1Origin.ID()) + attrs, err := d.attrBuilder.PreparePayloadAttributes(fetchCtx, l2Head, l1Origin.ID()) if err != nil { return err } - // If our next L2 block timestamp is beyond the Proposer drift threshold, then we must produce + // If our next L2 block timestamp is beyond the Sequencer drift threshold, then we must produce // empty blocks (other than the L1 info deposit and any user deposits). We handle this by - // setting NoTxPool to true, which will cause the Proposer to not include any transactions + // setting NoTxPool to true, which will cause the Sequencer to not include any transactions // from the transaction pool. - attrs.NoTxPool = uint64(attrs.Timestamp) > l1Origin.Time+p.config.MaxProposerDrift + attrs.NoTxPool = uint64(attrs.Timestamp) > l1Origin.Time+d.config.MaxSequencerDrift - p.log.Debug("prepared attributes for new block", + d.log.Debug("prepared attributes for new block", "num", l2Head.Number+1, "time", uint64(attrs.Timestamp), "origin", l1Origin, "origin_time", l1Origin.Time, "noTxPool", attrs.NoTxPool) // Start a payload building process. - errTyp, err := p.engine.StartPayload(ctx, l2Head, attrs, false) + errTyp, err := d.engine.StartPayload(ctx, l2Head, attrs, false) if err != nil { return fmt.Errorf("failed to start building on top of L2 chain %s, error (%d): %w", l2Head, errTyp, err) } @@ -106,8 +106,8 @@ func (p *Proposer) StartBuildingBlock(ctx context.Context) error { // CompleteBuildingBlock takes the current block that is being built, and asks the engine to complete the building, seal the block, and persist it as canonical. // Warning: the safe and finalized L2 blocks as viewed during the initiation of the block building are reused for completion of the block building. // The Execution engine should not change the safe and finalized blocks between start and completion of block building. -func (p *Proposer) CompleteBuildingBlock(ctx context.Context) (*eth.ExecutionPayload, error) { - payload, errTyp, err := p.engine.ConfirmPayload(ctx) +func (d *Sequencer) CompleteBuildingBlock(ctx context.Context) (*eth.ExecutionPayload, error) { + payload, errTyp, err := d.engine.ConfirmPayload(ctx) if err != nil { return nil, fmt.Errorf("failed to complete building block: error (%d): %w", errTyp, err) } @@ -115,35 +115,35 @@ func (p *Proposer) CompleteBuildingBlock(ctx context.Context) (*eth.ExecutionPay } // CancelBuildingBlock cancels the current open block building job. -// This proposer only maintains one block building job at a time. -func (p *Proposer) CancelBuildingBlock(ctx context.Context) { +// This sequencer only maintains one block building job at a time. +func (d *Sequencer) CancelBuildingBlock(ctx context.Context) { // force-cancel, we can always continue block building, and any error is logged by the engine state - _ = p.engine.CancelPayload(ctx, true) + _ = d.engine.CancelPayload(ctx, true) } -// PlanNextProposerAction returns a desired delay till the RunNextProposerAction call. -func (p *Proposer) PlanNextProposerAction() time.Duration { +// PlanNextSequencerAction returns a desired delay till the RunNextSequencerAction call. +func (d *Sequencer) PlanNextSequencerAction() time.Duration { // If the engine is busy building safe blocks (and thus changing the head that we would sync on top of), // then give it time to sync up. - if onto, _, safe := p.engine.BuildingPayload(); safe { - p.log.Warn("delaying proposing to not interrupt safe-head changes", "onto", onto, "onto_time", onto.Time) - // approximates the worst-case time it takes to build a block, to reattempt proposing after. - return time.Second * time.Duration(p.config.BlockTime) + if onto, _, safe := d.engine.BuildingPayload(); safe { + d.log.Warn("delaying sequencing to not interrupt safe-head changes", "onto", onto, "onto_time", onto.Time) + // approximates the worst-case time it takes to build a block, to reattempt sequencing after. + return time.Second * time.Duration(d.config.BlockTime) } - head := p.engine.UnsafeL2Head() - now := p.timeNow() + head := d.engine.UnsafeL2Head() + now := d.timeNow() - buildingOnto, buildingID, _ := p.engine.BuildingPayload() + buildingOnto, buildingID, _ := d.engine.BuildingPayload() - // We may have to wait till the next proposing action, e.g. upon an error. - // If the head changed we need to respond and will not delay the proposing. - if delay := p.nextAction.Sub(now); delay > 0 && buildingOnto.Hash == head.Hash { + // We may have to wait till the next sequencing action, e.g. upon an error. + // If the head changed we need to respond and will not delay the sequencing. + if delay := d.nextAction.Sub(now); delay > 0 && buildingOnto.Hash == head.Hash { return delay } - blockTime := time.Duration(p.config.BlockTime) * time.Second - payloadTime := time.Unix(int64(head.Time+p.config.BlockTime), 0) + blockTime := time.Duration(d.config.BlockTime) * time.Second + payloadTime := time.Unix(int64(head.Time+d.config.BlockTime), 0) remainingTime := payloadTime.Sub(now) // If we started building a block already, and if that work is still consistent, @@ -169,13 +169,13 @@ func (p *Proposer) PlanNextProposerAction() time.Duration { } // BuildingOnto returns the L2 head reference that the latest block is or was being built on top of. -func (p *Proposer) BuildingOnto() eth.L2BlockRef { - ref, _, _ := p.engine.BuildingPayload() +func (d *Sequencer) BuildingOnto() eth.L2BlockRef { + ref, _, _ := d.engine.BuildingPayload() return ref } -// RunNextProposerAction starts new block building work, or seals existing work, -// and is best timed by first awaiting the delay returned by PlanNextProposerAction. +// RunNextSequencerAction starts new block building work, or seals existing work, +// and is best timed by first awaiting the delay returned by PlanNextSequencerAction. // If a new block is successfully sealed, it will be returned for publishing, nil otherwise. // // Only critical errors are bubbled up, other errors are handled internally. @@ -187,68 +187,68 @@ func (p *Proposer) BuildingOnto() eth.L2BlockRef { // - If it is any other error, a backoff is applied and building is cancelled. // // Upon L1 reorgs that are deep enough to affect the L1 origin selection, a reset-error may occur, -// to direct the engine to follow the new L1 chain before continuing to propose blocks. +// to direct the engine to follow the new L1 chain before continuing to sequence blocks. // It is up to the EngineControl implementation to handle conflicting build jobs of the derivation -// process (as syncer) and proposing process. +// process (as syncer) and sequencing process. // Generally it is expected that the latest call interrupts any ongoing work, // and the derivation process does not interrupt in the happy case, -// since it can consolidate previously proposed blocks by comparing proposed inputs with derived inputs. -// If the derivation pipeline does force a conflicting block, then an ongoing proposer task might still finish, +// since it can consolidate previously sequenced blocks by comparing sequenced inputs with derived inputs. +// If the derivation pipeline does force a conflicting block, then an ongoing sequencer task might still finish, // but the derivation can continue to reset until the chain is correct. -// If the engine is currently building safe blocks, then that building is not interrupted, and proposing is delayed. -func (p *Proposer) RunNextProposerAction(ctx context.Context) (*eth.ExecutionPayload, error) { - if onto, buildingID, safe := p.engine.BuildingPayload(); buildingID != (eth.PayloadID{}) { +// If the engine is currently building safe blocks, then that building is not interrupted, and sequencing is delayed. +func (d *Sequencer) RunNextSequencerAction(ctx context.Context) (*eth.ExecutionPayload, error) { + if onto, buildingID, safe := d.engine.BuildingPayload(); buildingID != (eth.PayloadID{}) { if safe { - p.log.Warn("avoiding proposing to not interrupt safe-head changes", "onto", onto, "onto_time", onto.Time) - // approximates the worst-case time it takes to build a block, to reattempt proposing after. - p.nextAction = p.timeNow().Add(time.Second * time.Duration(p.config.BlockTime)) + d.log.Warn("avoiding sequencing to not interrupt safe-head changes", "onto", onto, "onto_time", onto.Time) + // approximates the worst-case time it takes to build a block, to reattempt sequencing after. + d.nextAction = d.timeNow().Add(time.Second * time.Duration(d.config.BlockTime)) return nil, nil } - payload, err := p.CompleteBuildingBlock(ctx) + payload, err := d.CompleteBuildingBlock(ctx) if err != nil { if errors.Is(err, derive.ErrCritical) { return nil, err // bubble up critical errors. } else if errors.Is(err, derive.ErrReset) { - p.log.Error("proposer failed to seal new block, requiring derivation reset", "err", err) - p.metrics.RecordProposerReset() - p.nextAction = p.timeNow().Add(time.Second * time.Duration(p.config.BlockTime)) // hold off from proposing for a full block - p.CancelBuildingBlock(ctx) - p.engine.Reset() + d.log.Error("sequencer failed to seal new block, requiring derivation reset", "err", err) + d.metrics.RecordSequencerReset() + d.nextAction = d.timeNow().Add(time.Second * time.Duration(d.config.BlockTime)) // hold off from sequencing for a full block + d.CancelBuildingBlock(ctx) + d.engine.Reset() } else if errors.Is(err, derive.ErrTemporary) { - p.log.Error("proposer failed temporarily to seal new block", "err", err) - p.nextAction = p.timeNow().Add(time.Second) + d.log.Error("sequencer failed temporarily to seal new block", "err", err) + d.nextAction = d.timeNow().Add(time.Second) // We don't explicitly cancel block building jobs upon temporary errors: we may still finish the block. // Any unfinished block building work eventually times out, and will be cleaned up that way. } else { - p.log.Error("proposer failed to seal block with unclassified error", "err", err) - p.nextAction = p.timeNow().Add(time.Second) - p.CancelBuildingBlock(ctx) + d.log.Error("sequencer failed to seal block with unclassified error", "err", err) + d.nextAction = d.timeNow().Add(time.Second) + d.CancelBuildingBlock(ctx) } return nil, nil } else { - p.log.Info("proposer successfully built a new block", "block", payload.ID(), "time", uint64(payload.Timestamp), "txs", len(payload.Transactions)) + d.log.Info("sequencer successfully built a new block", "block", payload.ID(), "time", uint64(payload.Timestamp), "txs", len(payload.Transactions)) return payload, nil } } else { - err := p.StartBuildingBlock(ctx) + err := d.StartBuildingBlock(ctx) if err != nil { if errors.Is(err, derive.ErrCritical) { return nil, err } else if errors.Is(err, derive.ErrReset) { - p.log.Error("proposer failed to seal new block, requiring derivation reset", "err", err) - p.metrics.RecordProposerReset() - p.nextAction = p.timeNow().Add(time.Second * time.Duration(p.config.BlockTime)) // hold off from proposing for a full block - p.engine.Reset() + d.log.Error("sequencer failed to seal new block, requiring derivation reset", "err", err) + d.metrics.RecordSequencerReset() + d.nextAction = d.timeNow().Add(time.Second * time.Duration(d.config.BlockTime)) // hold off from sequencing for a full block + d.engine.Reset() } else if errors.Is(err, derive.ErrTemporary) { - p.log.Error("proposer temporarily failed to start building new block", "err", err) - p.nextAction = p.timeNow().Add(time.Second) + d.log.Error("sequencer temporarily failed to start building new block", "err", err) + d.nextAction = d.timeNow().Add(time.Second) } else { - p.log.Error("proposer failed to start building new block with unclassified error", "err", err) - p.nextAction = p.timeNow().Add(time.Second) + d.log.Error("sequencer failed to start building new block with unclassified error", "err", err) + d.nextAction = d.timeNow().Add(time.Second) } } else { - parent, buildingID, _ := p.engine.BuildingPayload() // we should have a new payload ID now that we're building a block - p.log.Info("proposer started building new block", "payload_id", buildingID, "l2_parent_block", parent, "l2_parent_block_time", parent.Time) + parent, buildingID, _ := d.engine.BuildingPayload() // we should have a new payload ID now that we're building a block + d.log.Info("sequencer started building new block", "payload_id", buildingID, "l2_parent_block", parent, "l2_parent_block_time", parent.Time) } return nil, nil } diff --git a/components/node/rollup/driver/proposer_test.go b/components/node/rollup/driver/sequencer_test.go similarity index 95% rename from components/node/rollup/driver/proposer_test.go rename to components/node/rollup/driver/sequencer_test.go index c08af7277..cefc871cf 100644 --- a/components/node/rollup/driver/proposer_test.go +++ b/components/node/rollup/driver/sequencer_test.go @@ -147,10 +147,10 @@ func (fn testOriginSelectorFn) FindL1Origin(ctx context.Context, l2Head eth.L2Bl var _ L1OriginSelectorIface = (testOriginSelectorFn)(nil) -// TestProposerChaosMonkey runs the proposer in a mocked adversarial environment with +// TestSequencerChaosMonkey runs the sequencer in a mocked adversarial environment with // repeated random errors in dependencies and poor clock timing. -// At the end the health of the chain is checked to show that the proposer kept the chain in shape. -func TestProposerChaosMonkey(t *testing.T) { +// At the end the health of the chain is checked to show that the sequencer kept the chain in shape. +func TestSequencerChaosMonkey(t *testing.T) { mockL1Hash := func(num uint64) (out common.Hash) { out[31] = 1 binary.BigEndian.PutUint64(out[:], num) @@ -182,8 +182,8 @@ func TestProposerChaosMonkey(t *testing.T) { L2Time: l1Time + 300, // L2 may start with a relative old L1 origin and will have to catch it up SystemConfig: eth.SystemConfig{}, }, - BlockTime: 2, - MaxProposerDrift: 30, + BlockTime: 2, + MaxSequencerDrift: 30, } // keep track of the L1 timestamps we mock because sometimes we only have the L1 hash/num handy l1Times := map[eth.BlockID]uint64{cfg.Genesis.L1: l1Time} @@ -204,7 +204,7 @@ func TestProposerChaosMonkey(t *testing.T) { cfg: cfg, } - // start wallclock at 5 minutes after the current L2 head. The proposer has some catching up to do! + // start wallclock at 5 minutes after the current L2 head. The sequencer has some catching up to do! clockTime := time.Unix(int64(engControl.unsafe.Time)+5*60, 0) clockFn := func() time.Time { return clockTime @@ -308,13 +308,13 @@ func TestProposerChaosMonkey(t *testing.T) { } }) - proposer := NewProposer(log, cfg, engControl, attrBuilder, originSelector, metrics.NoopMetrics) - proposer.timeNow = clockFn + seq := NewSequencer(log, cfg, engControl, attrBuilder, originSelector, metrics.NoopMetrics) + seq.timeNow = clockFn // try to build 1000 blocks, with 5x as many planning attempts, to handle errors and clock problems desiredBlocks := 1000 for i := 0; i < 5*desiredBlocks && engControl.totalBuiltBlocks < desiredBlocks; i++ { - delta := proposer.PlanNextProposerAction() + delta := seq.PlanNextSequencerAction() x := rng.Float32() if x < 0.01 { // 1%: mess a lot with the clock: simulate a hang of up to 30 seconds @@ -332,13 +332,13 @@ func TestProposerChaosMonkey(t *testing.T) { // reset errors originErr = nil attrsErr = nil - if engControl.err != mockResetErr { // the mockResetErr requires the proposer to Reset() to recover. + if engControl.err != mockResetErr { // the mockResetErr requires the sequencer to Reset() to recover. engControl.err = nil } engControl.errTyp = derive.BlockInsertOK // maybe make something maybe fail, or try a new L1 origin - switch rng.Intn(20) { // 9/20 = 45% chance to fail proposer action (!!!) + switch rng.Intn(20) { // 9/20 = 45% chance to fail sequencer action (!!!) case 0, 1: originErr = errors.New("mock origin error") case 2, 3: @@ -354,7 +354,7 @@ func TestProposerChaosMonkey(t *testing.T) { default: // no error } - payload, err := proposer.RunNextProposerAction(context.Background()) + payload, err := seq.RunNextSequencerAction(context.Background()) require.NoError(t, err) if payload != nil { require.Equal(t, engControl.UnsafeL2Head().ID(), payload.ID(), "head must stay in sync with emitted payloads") diff --git a/components/node/rollup/driver/state.go b/components/node/rollup/driver/state.go index 4d4c1e810..4fccf44a4 100644 --- a/components/node/rollup/driver/state.go +++ b/components/node/rollup/driver/state.go @@ -39,18 +39,18 @@ type Driver struct { // It tells the caller that the reset occurred by closing the passed in channel. forceReset chan chan struct{} - // Upon receiving a hash in this channel, the proposer is started at the given hash. - // It tells the caller that the proposer started by closing the passed in channel (or returning an error). - startProposer chan hashAndErrorChannel + // Upon receiving a hash in this channel, the sequencer is started at the given hash. + // It tells the caller that the sequencer started by closing the passed in channel (or returning an error). + startSequencer chan hashAndErrorChannel - // Upon receiving a channel in this channel, the proposer is stopped. - // It tells the caller that the proposer stopped by returning the latest proposed L2 block hash. - stopProposer chan chan hashAndError + // Upon receiving a channel in this channel, the sequencer is stopped. + // It tells the caller that the sequencer stopped by returning the latest sequenced L2 block hash. + stopSequencer chan chan hashAndError // Rollup config: rollup chain configuration config *rollup.Config - // Driver config: syncer and proposer settings + // Driver config: syncer and sequencer settings driverConfig *Config // L1 Signals: @@ -69,10 +69,10 @@ type Driver struct { // L2 Signals: unsafeL2Payloads chan *eth.ExecutionPayload - l1 L1Chain - l2 L2Chain - proposer ProposerIface - network Network // may be nil, network for is optional + l1 L1Chain + l2 L2Chain + sequencer SequencerIface + network Network // may be nil, network for is optional metrics Metrics log log.Logger @@ -84,65 +84,65 @@ type Driver struct { // Start starts up the state loop. // The loop will have been started iff err is not nil. -func (d *Driver) Start() error { - d.derivation.Reset() +func (s *Driver) Start() error { + s.derivation.Reset() - d.wg.Add(1) - go d.eventLoop() + s.wg.Add(1) + go s.eventLoop() return nil } -func (d *Driver) Close() error { - d.done <- struct{}{} - d.wg.Wait() +func (s *Driver) Close() error { + s.done <- struct{}{} + s.wg.Wait() return nil } // OnL1Head signals the driver that the L1 chain changed the "unsafe" block, // also known as head of the chain, or "latest". -func (d *Driver) OnL1Head(ctx context.Context, unsafe eth.L1BlockRef) error { +func (s *Driver) OnL1Head(ctx context.Context, unsafe eth.L1BlockRef) error { select { case <-ctx.Done(): return ctx.Err() - case d.l1HeadSig <- unsafe: + case s.l1HeadSig <- unsafe: return nil } } // OnL1Safe signals the driver that the L1 chain changed the "safe", // also known as the justified checkpoint (as seen on L1 beacon-chain). -func (d *Driver) OnL1Safe(ctx context.Context, safe eth.L1BlockRef) error { +func (s *Driver) OnL1Safe(ctx context.Context, safe eth.L1BlockRef) error { select { case <-ctx.Done(): return ctx.Err() - case d.l1SafeSig <- safe: + case s.l1SafeSig <- safe: return nil } } -func (d *Driver) OnL1Finalized(ctx context.Context, finalized eth.L1BlockRef) error { +func (s *Driver) OnL1Finalized(ctx context.Context, finalized eth.L1BlockRef) error { select { case <-ctx.Done(): return ctx.Err() - case d.l1FinalizedSig <- finalized: + case s.l1FinalizedSig <- finalized: return nil } } -func (d *Driver) OnUnsafeL2Payload(ctx context.Context, payload *eth.ExecutionPayload) error { +func (s *Driver) OnUnsafeL2Payload(ctx context.Context, payload *eth.ExecutionPayload) error { select { case <-ctx.Done(): return ctx.Err() - case d.unsafeL2Payloads <- payload: + case s.unsafeL2Payloads <- payload: return nil } } // the eventLoop responds to L1 changes and internal timers to produce L2 blocks. -func (d *Driver) eventLoop() { - defer d.wg.Done() - d.log.Info("State loop started") +func (s *Driver) eventLoop() { + defer s.wg.Done() + s.log.Info("State loop started") ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -172,10 +172,10 @@ func (d *Driver) eventLoop() { // if this is not the first attempt, we re-schedule with a backoff, *without blocking other events* if delayedStepReq == nil { delay := bOffStrategy.Duration(stepAttempts) - d.log.Debug("scheduling re-attempt with delay", "attempts", stepAttempts, "delay", delay) + s.log.Debug("scheduling re-attempt with delay", "attempts", stepAttempts, "delay", delay) delayedStepReq = time.After(delay) } else { - d.log.Debug("ignoring step request, already scheduled re-attempt after previous failure", "attempts", stepAttempts) + s.log.Debug("ignoring step request, already scheduled re-attempt after previous failure", "attempts", stepAttempts) } } else { step() @@ -187,168 +187,168 @@ func (d *Driver) eventLoop() { // L1 chain that we need to handle. reqStep() - proposerTimer := time.NewTimer(0) - var proposerCh <-chan time.Time - planProposerAction := func() { - delay := d.proposer.PlanNextProposerAction() - proposerCh = proposerTimer.C - if len(proposerCh) > 0 { // empty if not already drained before resetting - <-proposerCh + sequencerTimer := time.NewTimer(0) + var sequencerCh <-chan time.Time + planSequencerAction := func() { + delay := s.sequencer.PlanNextSequencerAction() + sequencerCh = sequencerTimer.C + if len(sequencerCh) > 0 { // empty if not already drained before resetting + <-sequencerCh } - proposerTimer.Reset(delay) + sequencerTimer.Reset(delay) } // Create a ticker to check if there is a gap in the engine queue. Whenever // there is, we send requests to sync source to retrieve the missing payloads. - syncCheckInterval := time.Duration(d.config.BlockTime) * time.Second * 2 + syncCheckInterval := time.Duration(s.config.BlockTime) * time.Second * 2 altSyncTicker := time.NewTicker(syncCheckInterval) defer altSyncTicker.Stop() - lastUnsafeL2 := d.derivation.UnsafeL2Head() + lastUnsafeL2 := s.derivation.UnsafeL2Head() for { - // If we are proposing, and the L1 state is ready, update the trigger for the next proposer action. + // If we are sequencing, and the L1 state is ready, update the trigger for the next sequencer action. // This may adjust at any time based on fork-choice changes or previous errors. // And avoid sequencing if the derivation pipeline indicates the engine is not ready. - if d.driverConfig.ProposerEnabled && !d.driverConfig.ProposerStopped && - d.l1State.L1Head() != (eth.L1BlockRef{}) && d.derivation.EngineReady() { - if d.driverConfig.ProposerMaxSafeLag > 0 && d.derivation.SafeL2Head().Number+d.driverConfig.ProposerMaxSafeLag <= d.derivation.UnsafeL2Head().Number { + if s.driverConfig.SequencerEnabled && !s.driverConfig.SequencerStopped && + s.l1State.L1Head() != (eth.L1BlockRef{}) && s.derivation.EngineReady() { + if s.driverConfig.SequencerMaxSafeLag > 0 && s.derivation.SafeL2Head().Number+s.driverConfig.SequencerMaxSafeLag <= s.derivation.UnsafeL2Head().Number { // If the safe head has fallen behind by a significant number of blocks, delay creating new blocks - // until the safe lag is below ProposerMaxSafeLag. - if proposerCh != nil { - d.log.Warn( + // until the safe lag is below SequencerMaxSafeLag. + if sequencerCh != nil { + s.log.Warn( "Delay creating new block since safe lag exceeds limit", - "safe_l2", d.derivation.SafeL2Head(), - "unsafe_l2", d.derivation.UnsafeL2Head(), + "safe_l2", s.derivation.SafeL2Head(), + "unsafe_l2", s.derivation.UnsafeL2Head(), ) - proposerCh = nil + sequencerCh = nil } - } else if d.proposer.BuildingOnto().ID() != d.derivation.UnsafeL2Head().ID() { - // If we are sequencing, and the L1 state is ready, update the trigger for the next proposer action. + } else if s.sequencer.BuildingOnto().ID() != s.derivation.UnsafeL2Head().ID() { + // If we are sequencing, and the L1 state is ready, update the trigger for the next sequencer action. // This may adjust at any time based on fork-choice changes or previous errors. // - // update proposer time if the head changed - planProposerAction() + // update sequencer time if the head changed + planSequencerAction() } } else { - proposerCh = nil + sequencerCh = nil } // If the engine is not ready, or if the L2 head is actively changing, then reset the alt-sync: // there is no need to request L2 blocks when we are syncing already. - if head := d.derivation.UnsafeL2Head(); head != lastUnsafeL2 || !d.derivation.EngineReady() { + if head := s.derivation.UnsafeL2Head(); head != lastUnsafeL2 || !s.derivation.EngineReady() { lastUnsafeL2 = head altSyncTicker.Reset(syncCheckInterval) } select { - case <-proposerCh: - payload, err := d.proposer.RunNextProposerAction(ctx) + case <-sequencerCh: + payload, err := s.sequencer.RunNextSequencerAction(ctx) if err != nil { - d.log.Error("Proposer critical error", "err", err) + s.log.Error("Sequencer critical error", "err", err) return } - if d.network != nil && payload != nil { + if s.network != nil && payload != nil { // Publishing of unsafe data via p2p is optional. - // Errors are not severe enough to change/halt proposing but should be logged and metered. - if err := d.network.PublishL2Payload(ctx, payload); err != nil { - d.log.Warn("failed to publish newly created block", "id", payload.ID(), "err", err) - d.metrics.RecordPublishingError() + // Errors are not severe enough to change/halt sequencing but should be logged and metered. + if err := s.network.PublishL2Payload(ctx, payload); err != nil { + s.log.Warn("failed to publish newly created block", "id", payload.ID(), "err", err) + s.metrics.RecordPublishingError() } } - planProposerAction() // schedule the next proposer action to keep the proposing looping + planSequencerAction() // schedule the next sequencer action to keep the sequencing looping case <-altSyncTicker.C: func() { // Check if there is a gap in the current unsafe payload queue. ctx, cancel := context.WithTimeout(ctx, time.Second*2) defer cancel() - err := d.checkForGapInUnsafeQueue(ctx) + err := s.checkForGapInUnsafeQueue(ctx) if err != nil { - d.log.Warn("failed to check for unsafe L2 blocks to sync", "err", err) + s.log.Warn("failed to check for unsafe L2 blocks to sync", "err", err) } }() - case payload := <-d.unsafeL2Payloads: - d.snapshot("New unsafe payload") - d.log.Info("Optimistically queueing unsafe L2 execution payload", "id", payload.ID()) - d.derivation.AddUnsafePayload(payload) - d.metrics.RecordReceivedUnsafePayload(payload) + case payload := <-s.unsafeL2Payloads: + s.snapshot("New unsafe payload") + s.log.Info("Optimistically queueing unsafe L2 execution payload", "id", payload.ID()) + s.derivation.AddUnsafePayload(payload) + s.metrics.RecordReceivedUnsafePayload(payload) reqStep() - case newL1Head := <-d.l1HeadSig: - d.l1State.HandleNewL1HeadBlock(newL1Head) + case newL1Head := <-s.l1HeadSig: + s.l1State.HandleNewL1HeadBlock(newL1Head) reqStep() // a new L1 head may mean we have the data to not get an EOF again. - case newL1Safe := <-d.l1SafeSig: - d.l1State.HandleNewL1SafeBlock(newL1Safe) + case newL1Safe := <-s.l1SafeSig: + s.l1State.HandleNewL1SafeBlock(newL1Safe) // no step, justified L1 information does not do anything for L2 derivation or status - case newL1Finalized := <-d.l1FinalizedSig: - d.l1State.HandleNewL1FinalizedBlock(newL1Finalized) - d.derivation.Finalize(newL1Finalized) + case newL1Finalized := <-s.l1FinalizedSig: + s.l1State.HandleNewL1FinalizedBlock(newL1Finalized) + s.derivation.Finalize(newL1Finalized) reqStep() // we may be able to mark more L2 data as finalized now case <-delayedStepReq: delayedStepReq = nil step() case <-stepReqCh: - d.metrics.SetDerivationIdle(false) - d.log.Debug("Derivation process step", "onto_origin", d.derivation.Origin(), "attempts", stepAttempts) - err := d.derivation.Step(context.Background()) + s.metrics.SetDerivationIdle(false) + s.log.Debug("Derivation process step", "onto_origin", s.derivation.Origin(), "attempts", stepAttempts) + err := s.derivation.Step(context.Background()) stepAttempts += 1 // count as attempt by default. We reset to 0 if we are making healthy progress. if err == io.EOF { - d.log.Debug("Derivation process went idle", "progress", d.derivation.Origin()) + s.log.Debug("Derivation process went idle", "progress", s.derivation.Origin()) stepAttempts = 0 - d.metrics.SetDerivationIdle(true) + s.metrics.SetDerivationIdle(true) continue } else if err != nil && errors.Is(err, derive.ErrReset) { // If the pipeline corrupts, e.g. due to a reorg, simply reset it - d.log.Warn("Derivation pipeline is reset", "err", err) - d.derivation.Reset() - d.metrics.RecordPipelineReset() + s.log.Warn("Derivation pipeline is reset", "err", err) + s.derivation.Reset() + s.metrics.RecordPipelineReset() continue } else if err != nil && errors.Is(err, derive.ErrTemporary) { - d.log.Warn("Derivation process temporary error", "attempts", stepAttempts, "err", err) + s.log.Warn("Derivation process temporary error", "attempts", stepAttempts, "err", err) reqStep() continue } else if err != nil && errors.Is(err, derive.ErrCritical) { - d.log.Error("Derivation process critical error", "err", err) + s.log.Error("Derivation process critical error", "err", err) return } else if err != nil && errors.Is(err, derive.NotEnoughData) { stepAttempts = 0 // don't do a backoff for this error reqStep() continue } else if err != nil { - d.log.Error("Derivation process error", "attempts", stepAttempts, "err", err) + s.log.Error("Derivation process error", "attempts", stepAttempts, "err", err) reqStep() continue } else { stepAttempts = 0 reqStep() // continue with the next step if we can } - case respCh := <-d.stateReq: + case respCh := <-s.stateReq: respCh <- struct{}{} - case respCh := <-d.forceReset: - d.log.Warn("Derivation pipeline is manually reset") - d.derivation.Reset() - d.metrics.RecordPipelineReset() + case respCh := <-s.forceReset: + s.log.Warn("Derivation pipeline is manually reset") + s.derivation.Reset() + s.metrics.RecordPipelineReset() close(respCh) - case resp := <-d.startProposer: - unsafeHead := d.derivation.UnsafeL2Head().Hash - if !d.driverConfig.ProposerStopped { - resp.err <- errors.New("proposer already running") + case resp := <-s.startSequencer: + unsafeHead := s.derivation.UnsafeL2Head().Hash + if !s.driverConfig.SequencerStopped { + resp.err <- errors.New("sequencer already running") } else if !bytes.Equal(unsafeHead[:], resp.hash[:]) { resp.err <- fmt.Errorf("block hash does not match: head %s, received %s", unsafeHead.String(), resp.hash.String()) } else { - d.log.Info("Proposer has been started") - d.driverConfig.ProposerStopped = false + s.log.Info("Sequencer has been started") + s.driverConfig.SequencerStopped = false close(resp.err) - planProposerAction() // resume proposing + planSequencerAction() // resume sequencing } - case respCh := <-d.stopProposer: - if d.driverConfig.ProposerStopped { - respCh <- hashAndError{err: errors.New("proposer not running")} + case respCh := <-s.stopSequencer: + if s.driverConfig.SequencerStopped { + respCh <- hashAndError{err: errors.New("sequencer not running")} } else { - d.log.Warn("Proposer has been stopped") - d.driverConfig.ProposerStopped = true - respCh <- hashAndError{hash: d.derivation.UnsafeL2Head().Hash} + s.log.Warn("Sequencer has been stopped") + s.driverConfig.SequencerStopped = true + respCh <- hashAndError{hash: s.derivation.UnsafeL2Head().Hash} } - case <-d.done: + case <-s.done: return } } @@ -357,12 +357,12 @@ func (d *Driver) eventLoop() { // ResetDerivationPipeline forces a reset of the derivation pipeline. // It waits for the reset to occur. It simply unblocks the caller rather // than fully cancelling the reset request upon a context cancellation. -func (d *Driver) ResetDerivationPipeline(ctx context.Context) error { +func (s *Driver) ResetDerivationPipeline(ctx context.Context) error { respCh := make(chan struct{}, 1) select { case <-ctx.Done(): return ctx.Err() - case d.forceReset <- respCh: + case s.forceReset <- respCh: select { case <-ctx.Done(): return ctx.Err() @@ -372,9 +372,9 @@ func (d *Driver) ResetDerivationPipeline(ctx context.Context) error { } } -func (d *Driver) StartProposer(ctx context.Context, blockHash common.Hash) error { - if !d.driverConfig.ProposerEnabled { - return errors.New("proposer is not enabled") +func (s *Driver) StartSequencer(ctx context.Context, blockHash common.Hash) error { + if !s.driverConfig.SequencerEnabled { + return errors.New("sequencer is not enabled") } h := hashAndErrorChannel{ hash: blockHash, @@ -383,7 +383,7 @@ func (d *Driver) StartProposer(ctx context.Context, blockHash common.Hash) error select { case <-ctx.Done(): return ctx.Err() - case d.startProposer <- h: + case s.startSequencer <- h: select { case <-ctx.Done(): return ctx.Err() @@ -393,15 +393,15 @@ func (d *Driver) StartProposer(ctx context.Context, blockHash common.Hash) error } } -func (d *Driver) StopProposer(ctx context.Context) (common.Hash, error) { - if !d.driverConfig.ProposerEnabled { - return common.Hash{}, errors.New("proposer is not enabled") +func (s *Driver) StopSequencer(ctx context.Context) (common.Hash, error) { + if !s.driverConfig.SequencerEnabled { + return common.Hash{}, errors.New("sequencer is not enabled") } respCh := make(chan hashAndError, 1) select { case <-ctx.Done(): return common.Hash{}, ctx.Err() - case d.stopProposer <- respCh: + case s.stopSequencer <- respCh: select { case <-ctx.Done(): return common.Hash{}, ctx.Err() @@ -413,27 +413,27 @@ func (d *Driver) StopProposer(ctx context.Context) (common.Hash, error) { // syncStatus returns the current sync status, and should only be called synchronously with // the driver event loop to avoid retrieval of an inconsistent status. -func (d *Driver) syncStatus() *eth.SyncStatus { +func (s *Driver) syncStatus() *eth.SyncStatus { return ð.SyncStatus{ - CurrentL1: d.derivation.Origin(), - CurrentL1Finalized: d.derivation.FinalizedL1(), - HeadL1: d.l1State.L1Head(), - SafeL1: d.l1State.L1Safe(), - FinalizedL1: d.l1State.L1Finalized(), - UnsafeL2: d.derivation.UnsafeL2Head(), - SafeL2: d.derivation.SafeL2Head(), - FinalizedL2: d.derivation.Finalized(), - UnsafeL2SyncTarget: d.derivation.UnsafeL2SyncTarget(), + CurrentL1: s.derivation.Origin(), + CurrentL1Finalized: s.derivation.FinalizedL1(), + HeadL1: s.l1State.L1Head(), + SafeL1: s.l1State.L1Safe(), + FinalizedL1: s.l1State.L1Finalized(), + UnsafeL2: s.derivation.UnsafeL2Head(), + SafeL2: s.derivation.SafeL2Head(), + FinalizedL2: s.derivation.Finalized(), + UnsafeL2SyncTarget: s.derivation.UnsafeL2SyncTarget(), } } // SyncStatus blocks the driver event loop and captures the syncing status. // If the event loop is too busy and the context expires, a context error is returned. -func (d *Driver) SyncStatus(ctx context.Context) (*eth.SyncStatus, error) { +func (s *Driver) SyncStatus(ctx context.Context) (*eth.SyncStatus, error) { wait := make(chan struct{}) select { - case d.stateReq <- wait: - resp := d.syncStatus() + case s.stateReq <- wait: + resp := s.syncStatus() <-wait return resp, nil case <-ctx.Done(): @@ -444,16 +444,16 @@ func (d *Driver) SyncStatus(ctx context.Context) (*eth.SyncStatus, error) { // BlockRefsWithStatus blocks the driver event loop and captures the syncing status, // along with L2 blocks reference by number and number plus 1 consistent with that same status. // If the event loop is too busy and the context expires, a context error is returned. -func (d *Driver) BlockRefsWithStatus(ctx context.Context, num uint64) (eth.L2BlockRef, eth.L2BlockRef, *eth.SyncStatus, error) { +func (s *Driver) BlockRefsWithStatus(ctx context.Context, num uint64) (eth.L2BlockRef, eth.L2BlockRef, *eth.SyncStatus, error) { wait := make(chan struct{}) select { - case d.stateReq <- wait: + case s.stateReq <- wait: nextRef := eth.L2BlockRef{} - resp := d.syncStatus() - ref, err := d.l2.L2BlockRefByNumber(ctx, num) + resp := s.syncStatus() + ref, err := s.l2.L2BlockRefByNumber(ctx, num) if err == nil { - nextRef, err = d.l2.L2BlockRefByNumber(ctx, num+1) + nextRef, err = s.l2.L2BlockRefByNumber(ctx, num+1) } <-wait @@ -473,14 +473,14 @@ func (v deferJSONString) String() string { return string(out) } -func (d *Driver) snapshot(event string) { - d.snapshotLog.Info("Rollup State Snapshot", +func (s *Driver) snapshot(event string) { + s.snapshotLog.Info("Rollup State Snapshot", "event", event, - "l1Head", deferJSONString{d.l1State.L1Head()}, - "l1Current", deferJSONString{d.derivation.Origin()}, - "l2Head", deferJSONString{d.derivation.UnsafeL2Head()}, - "l2Safe", deferJSONString{d.derivation.SafeL2Head()}, - "l2FinalizedHead", deferJSONString{d.derivation.Finalized()}) + "l1Head", deferJSONString{s.l1State.L1Head()}, + "l1Current", deferJSONString{s.derivation.Origin()}, + "l2Head", deferJSONString{s.derivation.UnsafeL2Head()}, + "l2Safe", deferJSONString{s.derivation.SafeL2Head()}, + "l2FinalizedHead", deferJSONString{s.derivation.Finalized()}) } type hashAndError struct { @@ -494,18 +494,18 @@ type hashAndErrorChannel struct { } // checkForGapInUnsafeQueue checks if there is a gap in the unsafe queue and attempts to retrieve the missing payloads from an alt-sync method. -// WARNING: This is only an outgoing signal, the blocks are not guaranteed to be retrieved. +// WARNING: This is only an outgoing signal, the blocks are not guaranteed to be retrieves. // Results are received through OnUnsafeL2Payload. -func (d *Driver) checkForGapInUnsafeQueue(ctx context.Context) error { - start := d.derivation.UnsafeL2Head() - end := d.derivation.UnsafeL2SyncTarget() +func (s *Driver) checkForGapInUnsafeQueue(ctx context.Context) error { + start := s.derivation.UnsafeL2Head() + end := s.derivation.UnsafeL2SyncTarget() // Check if we have missing blocks between the start and end. Request them if we do. if end == (eth.L2BlockRef{}) { - d.log.Debug("requesting sync with open-end range", "start", start) - return d.altSync.RequestL2Range(ctx, start, eth.L2BlockRef{}) + s.log.Debug("requesting sync with open-end range", "start", start) + return s.altSync.RequestL2Range(ctx, start, eth.L2BlockRef{}) } else if end.Number > start.Number+1 { - d.log.Debug("requesting missing unsafe L2 block range", "start", start, "end", end, "size", end.Number-start.Number) - return d.altSync.RequestL2Range(ctx, start, end) + s.log.Debug("requesting missing unsafe L2 block range", "start", start, "end", end, "size", end.Number-start.Number) + return s.altSync.RequestL2Range(ctx, start, end) } return nil } diff --git a/components/node/rollup/sync/start.go b/components/node/rollup/sync/start.go index bde68dac1..4088d607a 100644 --- a/components/node/rollup/sync/start.go +++ b/components/node/rollup/sync/start.go @@ -53,7 +53,7 @@ var ( ErrReorgTooDeep = errors.New("reorg is too deep") ) -const MaxReorgProposerWindows = 5 +const MaxReorgSeqWindows = 5 type FindHeadsResult struct { Unsafe eth.L2BlockRef @@ -97,7 +97,7 @@ func currentHeads(ctx context.Context, cfg *rollup.Config, l2 L2Chain) (*FindHea // // - The *unsafe L2 block*: This is the highest L2 block whose L1 origin is a *plausible* // extension of the canonical L1 chain (as known to the kroma-node). -// - The *safe L2 block*: This is the highest L2 block whose epoch's proposing window is +// - The *safe L2 block*: This is the highest L2 block whose epoch's sequencing window is // complete within the canonical L1 chain (as known to the kroma-node). // - The *finalized L2 block*: This is the L2 block which is known to be fully derived from // finalized L1 block data. @@ -128,7 +128,7 @@ func FindL2Heads(ctx context.Context, cfg *rollup.Config, l1 L1Chain, l2 L2Chain ready := false // when we found the block after the safe head, and we just need to return the parent block. // Each loop iteration we traverse further from the unsafe head towards the finalized head. - // Once we pass the previous safe head and we have seen enough canonical L1 origins to fill a proposer window worth of data, + // Once we pass the previous safe head and we have seen enough canonical L1 origins to fill a sequencer window worth of data, // then we return the last L2 block of the epoch before that as safe head. // Each loop iteration we traverse a single L2 block, and we check if the L1 origins are consistent. for { @@ -174,7 +174,7 @@ func FindL2Heads(ctx context.Context, cfg *rollup.Config, l1 L1Chain, l2 L2Chain return nil, fmt.Errorf("%w: finalized %s, got: %s", ErrReorgFinalized, result.Finalized, n) } // Check we are not reorging L2 incredibly deep - if n.L1Origin.Number+(MaxReorgProposerWindows*cfg.ProposerWindowSize) < prevUnsafe.L1Origin.Number { + if n.L1Origin.Number+(MaxReorgSeqWindows*cfg.SeqWindowSize) < prevUnsafe.L1Origin.Number { // If the reorg depth is too large, something is fishy. // This can legitimately happen if L1 goes down for a while. But in that case, // restarting the L2 node with a bigger configured MaxReorgDepth is an acceptable @@ -191,7 +191,7 @@ func FindL2Heads(ctx context.Context, cfg *rollup.Config, l1 L1Chain, l2 L2Chain // keep the unsafe head if we can't tell if its L1 origin is canonical or not yet. } else if l1Block.Hash == n.L1Origin.Hash { // if L2 matches canonical chain, even if unsafe, - // then we can start finding a span of L1 blocks to cover the proposer window, + // then we can start finding a span of L1 blocks to cover the sequencer window, // which may help avoid rewinding the existing safe head unnecessarily. if highestL2WithCanonicalL1Origin == (eth.L2BlockRef{}) { highestL2WithCanonicalL1Origin = n @@ -202,8 +202,8 @@ func FindL2Heads(ctx context.Context, cfg *rollup.Config, l1 L1Chain, l2 L2Chain highestL2WithCanonicalL1Origin = eth.L2BlockRef{} } - // If the L2 block is at least as old as the previous safe head, and we have seen at least a full proposer window worth of L1 blocks to confirm - if n.Number <= result.Safe.Number && n.L1Origin.Number+cfg.ProposerWindowSize < highestL2WithCanonicalL1Origin.L1Origin.Number && n.SequenceNumber == 0 { + // If the L2 block is at least as old as the previous safe head, and we have seen at least a full sequencer window worth of L1 blocks to confirm + if n.Number <= result.Safe.Number && n.L1Origin.Number+cfg.SeqWindowSize < highestL2WithCanonicalL1Origin.L1Origin.Number && n.SequenceNumber == 0 { ready = true } @@ -243,7 +243,7 @@ func FindL2Heads(ctx context.Context, cfg *rollup.Config, l1 L1Chain, l2 L2Chain n = parent - // once we found the block at seq nr 0 that is more than a full proposer window behind the common chain post-reorg, then use the parent block as safe head. + // once we found the block at seq nr 0 that is more than a full sequencer window behind the common chain post-reorg, then use the parent block as safe head. if ready { result.Safe = n return result, nil diff --git a/components/node/rollup/sync/start_test.go b/components/node/rollup/sync/start_test.go index f8c209ee6..2ba508553 100644 --- a/components/node/rollup/sync/start_test.go +++ b/components/node/rollup/sync/start_test.go @@ -56,10 +56,10 @@ type syncStartTestCase struct { GenesisL1Num uint64 GenesisL2 rune - ProposerWindowSize uint64 - SafeL2Head rune - UnsafeL2Head rune - ExpectedErr error + SeqWindowSize uint64 + SafeL2Head rune + UnsafeL2Head rune + ExpectedErr error } func refToRune(r eth.BlockID) rune { @@ -72,8 +72,8 @@ func (c *syncStartTestCase) Run(t *testing.T) { chain.SetL2Safe(runeToHash(c.PreSafeL2)) cfg := &rollup.Config{ - Genesis: genesis, - ProposerWindowSize: c.ProposerWindowSize, + Genesis: genesis, + SeqWindowSize: c.SeqWindowSize, } lgr := log.New() lgr.SetHandler(log.DiscardHandler()) @@ -95,197 +95,197 @@ func (c *syncStartTestCase) Run(t *testing.T) { func TestFindSyncStart(t *testing.T) { testCases := []syncStartTestCase{ { - Name: "already synced", - GenesisL1Num: 0, - L1: "ab", - L2: "AB", - NewL1: "ab", - PreFinalizedL2: 'A', - PreSafeL2: 'A', - GenesisL1: 'a', - GenesisL2: 'A', - UnsafeL2Head: 'B', - ProposerWindowSize: 2, - SafeL2Head: 'A', - ExpectedErr: nil, + Name: "already synced", + GenesisL1Num: 0, + L1: "ab", + L2: "AB", + NewL1: "ab", + PreFinalizedL2: 'A', + PreSafeL2: 'A', + GenesisL1: 'a', + GenesisL2: 'A', + UnsafeL2Head: 'B', + SeqWindowSize: 2, + SafeL2Head: 'A', + ExpectedErr: nil, }, { - Name: "small reorg long chain", - GenesisL1Num: 0, - L1: "abcdefgh", - L2: "ABCDEFGH", - NewL1: "abcdefgx", - PreFinalizedL2: 'B', - PreSafeL2: 'H', - GenesisL1: 'a', - GenesisL2: 'A', - UnsafeL2Head: 'G', - ProposerWindowSize: 2, - SafeL2Head: 'C', - ExpectedErr: nil, + Name: "small reorg long chain", + GenesisL1Num: 0, + L1: "abcdefgh", + L2: "ABCDEFGH", + NewL1: "abcdefgx", + PreFinalizedL2: 'B', + PreSafeL2: 'H', + GenesisL1: 'a', + GenesisL2: 'A', + UnsafeL2Head: 'G', + SeqWindowSize: 2, + SafeL2Head: 'C', + ExpectedErr: nil, }, { - Name: "L1 Chain ahead", - GenesisL1Num: 0, - L1: "abcdef", - L2: "ABCDE", - NewL1: "abcdef", - PreFinalizedL2: 'A', - PreSafeL2: 'D', - GenesisL1: 'a', - GenesisL2: 'A', - UnsafeL2Head: 'E', - ProposerWindowSize: 2, - SafeL2Head: 'A', - ExpectedErr: nil, + Name: "L1 Chain ahead", + GenesisL1Num: 0, + L1: "abcdef", + L2: "ABCDE", + NewL1: "abcdef", + PreFinalizedL2: 'A', + PreSafeL2: 'D', + GenesisL1: 'a', + GenesisL2: 'A', + UnsafeL2Head: 'E', + SeqWindowSize: 2, + SafeL2Head: 'A', + ExpectedErr: nil, }, { - Name: "L2 Chain ahead after reorg", - GenesisL1Num: 0, - L1: "abcxyz", - L2: "ABCXYZ", - NewL1: "abcx", - PreFinalizedL2: 'B', - PreSafeL2: 'X', - GenesisL1: 'a', - GenesisL2: 'A', - UnsafeL2Head: 'Z', - ProposerWindowSize: 2, - SafeL2Head: 'B', - ExpectedErr: nil, + Name: "L2 Chain ahead after reorg", + GenesisL1Num: 0, + L1: "abcxyz", + L2: "ABCXYZ", + NewL1: "abcx", + PreFinalizedL2: 'B', + PreSafeL2: 'X', + GenesisL1: 'a', + GenesisL2: 'A', + UnsafeL2Head: 'Z', + SeqWindowSize: 2, + SafeL2Head: 'B', + ExpectedErr: nil, }, { - Name: "genesis", - GenesisL1Num: 0, - L1: "a", - L2: "A", - NewL1: "a", - PreFinalizedL2: 'A', - PreSafeL2: 'A', - GenesisL1: 'a', - GenesisL2: 'A', - UnsafeL2Head: 'A', - ProposerWindowSize: 2, - SafeL2Head: 'A', - ExpectedErr: nil, + Name: "genesis", + GenesisL1Num: 0, + L1: "a", + L2: "A", + NewL1: "a", + PreFinalizedL2: 'A', + PreSafeL2: 'A', + GenesisL1: 'a', + GenesisL2: 'A', + UnsafeL2Head: 'A', + SeqWindowSize: 2, + SafeL2Head: 'A', + ExpectedErr: nil, }, { - Name: "reorg one step back", - GenesisL1Num: 0, - L1: "abcdefg", - L2: "ABCDEFG", - NewL1: "abcdefx", - PreFinalizedL2: 'A', - PreSafeL2: 'E', - GenesisL1: 'a', - GenesisL2: 'A', - UnsafeL2Head: 'F', - ProposerWindowSize: 3, - SafeL2Head: 'A', - ExpectedErr: nil, + Name: "reorg one step back", + GenesisL1Num: 0, + L1: "abcdefg", + L2: "ABCDEFG", + NewL1: "abcdefx", + PreFinalizedL2: 'A', + PreSafeL2: 'E', + GenesisL1: 'a', + GenesisL2: 'A', + UnsafeL2Head: 'F', + SeqWindowSize: 3, + SafeL2Head: 'A', + ExpectedErr: nil, }, { - Name: "reorg two steps back, clip genesis and finalized", - GenesisL1Num: 0, - L1: "abc", - L2: "ABC", - PreFinalizedL2: 'A', - PreSafeL2: 'B', - NewL1: "axy", - GenesisL1: 'a', - GenesisL2: 'A', - UnsafeL2Head: 'A', - ProposerWindowSize: 2, - SafeL2Head: 'A', - ExpectedErr: nil, + Name: "reorg two steps back, clip genesis and finalized", + GenesisL1Num: 0, + L1: "abc", + L2: "ABC", + PreFinalizedL2: 'A', + PreSafeL2: 'B', + NewL1: "axy", + GenesisL1: 'a', + GenesisL2: 'A', + UnsafeL2Head: 'A', + SeqWindowSize: 2, + SafeL2Head: 'A', + ExpectedErr: nil, }, { - Name: "reorg three steps back", - GenesisL1Num: 0, - L1: "abcdefgh", - L2: "ABCDEFGH", - NewL1: "abcdexyz", - PreFinalizedL2: 'A', - PreSafeL2: 'D', - GenesisL1: 'a', - GenesisL2: 'A', - UnsafeL2Head: 'E', - ProposerWindowSize: 2, - SafeL2Head: 'A', - ExpectedErr: nil, + Name: "reorg three steps back", + GenesisL1Num: 0, + L1: "abcdefgh", + L2: "ABCDEFGH", + NewL1: "abcdexyz", + PreFinalizedL2: 'A', + PreSafeL2: 'D', + GenesisL1: 'a', + GenesisL2: 'A', + UnsafeL2Head: 'E', + SeqWindowSize: 2, + SafeL2Head: 'A', + ExpectedErr: nil, }, { - Name: "unexpected L1 chain", - GenesisL1Num: 0, - L1: "abcdef", - L2: "ABCDEF", - NewL1: "xyzwio", - PreFinalizedL2: 'A', - PreSafeL2: 'B', - GenesisL1: 'a', - GenesisL2: 'A', - UnsafeL2Head: 0, - ProposerWindowSize: 2, - ExpectedErr: ErrWrongChain, + Name: "unexpected L1 chain", + GenesisL1Num: 0, + L1: "abcdef", + L2: "ABCDEF", + NewL1: "xyzwio", + PreFinalizedL2: 'A', + PreSafeL2: 'B', + GenesisL1: 'a', + GenesisL2: 'A', + UnsafeL2Head: 0, + SeqWindowSize: 2, + ExpectedErr: ErrWrongChain, }, { - Name: "unexpected L2 chain", - GenesisL1Num: 0, - L1: "abcdef", - L2: "ABCDEF", - NewL1: "xyzwio", - PreFinalizedL2: 'A', - PreSafeL2: 'B', - GenesisL1: 'a', - GenesisL2: 'X', - UnsafeL2Head: 0, - ProposerWindowSize: 2, - ExpectedErr: ErrWrongChain, + Name: "unexpected L2 chain", + GenesisL1Num: 0, + L1: "abcdef", + L2: "ABCDEF", + NewL1: "xyzwio", + PreFinalizedL2: 'A', + PreSafeL2: 'B', + GenesisL1: 'a', + GenesisL2: 'X', + UnsafeL2Head: 0, + SeqWindowSize: 2, + ExpectedErr: ErrWrongChain, }, { - Name: "offset L2 genesis", - GenesisL1Num: 3, - L1: "abcdefghi", - L2: "DEFGHI", - NewL1: "abcdefghi", - PreFinalizedL2: 'E', - PreSafeL2: 'H', - GenesisL1: 'd', - GenesisL2: 'D', - UnsafeL2Head: 'I', - ProposerWindowSize: 2, - SafeL2Head: 'E', - ExpectedErr: nil, + Name: "offset L2 genesis", + GenesisL1Num: 3, + L1: "abcdefghi", + L2: "DEFGHI", + NewL1: "abcdefghi", + PreFinalizedL2: 'E', + PreSafeL2: 'H', + GenesisL1: 'd', + GenesisL2: 'D', + UnsafeL2Head: 'I', + SeqWindowSize: 2, + SafeL2Head: 'E', + ExpectedErr: nil, }, { - Name: "offset L2 genesis reorg", - GenesisL1Num: 3, - L1: "abcdefgh", - L2: "DEFGH", - NewL1: "abcdxyzw", - PreFinalizedL2: 'D', - PreSafeL2: 'D', - GenesisL1: 'd', - GenesisL2: 'D', - UnsafeL2Head: 'D', - ProposerWindowSize: 2, - SafeL2Head: 'D', - ExpectedErr: nil, + Name: "offset L2 genesis reorg", + GenesisL1Num: 3, + L1: "abcdefgh", + L2: "DEFGH", + NewL1: "abcdxyzw", + PreFinalizedL2: 'D', + PreSafeL2: 'D', + GenesisL1: 'd', + GenesisL2: 'D', + UnsafeL2Head: 'D', + SeqWindowSize: 2, + SafeL2Head: 'D', + ExpectedErr: nil, }, { - Name: "reorg past offset genesis", - GenesisL1Num: 3, - L1: "abcdefgh", - L2: "DEFGH", - NewL1: "abxyzwio", - PreFinalizedL2: 'D', - PreSafeL2: 'D', - GenesisL1: 'd', - GenesisL2: 'D', - UnsafeL2Head: 0, - ProposerWindowSize: 2, - SafeL2Head: 'D', - ExpectedErr: ErrWrongChain, + Name: "reorg past offset genesis", + GenesisL1Num: 3, + L1: "abcdefgh", + L2: "DEFGH", + NewL1: "abxyzwio", + PreFinalizedL2: 'D', + PreSafeL2: 'D', + GenesisL1: 'd', + GenesisL2: 'D', + UnsafeL2Head: 0, + SeqWindowSize: 2, + SafeL2Head: 'D', + ExpectedErr: ErrWrongChain, }, } diff --git a/components/node/rollup/types.go b/components/node/rollup/types.go index 6f0dd9022..d807035e8 100644 --- a/components/node/rollup/types.go +++ b/components/node/rollup/types.go @@ -18,7 +18,7 @@ import ( var ( ErrBlockTimeZero = errors.New("block time cannot be 0") ErrMissingChannelTimeout = errors.New("channel timeout must be set, this should cover at least a L1 block time") - ErrInvalidProposerWindowSize = errors.New("proposing window size must at least be 2") + ErrInvalidSeqWindowSize = errors.New("sequencing window size must at least be 2") ErrMissingGenesisL1Hash = errors.New("genesis L1 hash cannot be empty") ErrMissingGenesisL2Hash = errors.New("genesis L2 hash cannot be empty") ErrGenesisHashesSame = errors.New("achievement get! rollup inception: L1 and L2 genesis cannot be the same") @@ -54,14 +54,14 @@ type Config struct { Genesis Genesis `json:"genesis"` // Seconds per L2 block BlockTime uint64 `json:"block_time"` - // Proposer batches may not be more than MaxProposerDrift seconds after - // the L1 timestamp of the proposer window end. + // Sequencer batches may not be more than MaxSequencerDrift seconds after + // the L1 timestamp of the sequencing window end. // // Note: When L1 has many 1 second consecutive blocks, and L2 grows at fixed 2 seconds, // the L2 time may still grow beyond this difference. - MaxProposerDrift uint64 `json:"max_proposer_drift"` - // Number of epochs (L1 blocks) per proposer window, including the epoch L1 origin block itself - ProposerWindowSize uint64 `json:"proposer_window_size"` + MaxSequencerDrift uint64 `json:"max_sequencer_drift"` + // Number of epochs (L1 blocks) per sequencing window, including the epoch L1 origin block itself + SeqWindowSize uint64 `json:"seq_window_size"` // Number of L1 blocks between when a channel can be opened and when it must be closed by. ChannelTimeout uint64 `json:"channel_timeout"` // Required to verify L1 signatures @@ -190,8 +190,8 @@ func (cfg *Config) Check() error { if cfg.ChannelTimeout == 0 { return ErrMissingChannelTimeout } - if cfg.ProposerWindowSize < 2 { - return ErrInvalidProposerWindowSize + if cfg.SeqWindowSize < 2 { + return ErrInvalidSeqWindowSize } if cfg.Genesis.L1.Hash == (common.Hash{}) { return ErrMissingGenesisL1Hash diff --git a/components/node/rollup/types_test.go b/components/node/rollup/types_test.go index 017b9a304..7cef34d49 100644 --- a/components/node/rollup/types_test.go +++ b/components/node/rollup/types_test.go @@ -39,8 +39,8 @@ func randConfig() *Config { }, }, BlockTime: 2, - MaxProposerDrift: 100, - ProposerWindowSize: 2, + MaxSequencerDrift: 100, + SeqWindowSize: 2, ChannelTimeout: 123, L1ChainID: big.NewInt(900), L2ChainID: big.NewInt(901), @@ -253,14 +253,14 @@ func TestConfig_Check(t *testing.T) { expectedErr: ErrMissingChannelTimeout, }, { - name: "ProposerWindowSizeZero", - modifier: func(cfg *Config) { cfg.ProposerWindowSize = 0 }, - expectedErr: ErrInvalidProposerWindowSize, + name: "SeqWindowSizeZero", + modifier: func(cfg *Config) { cfg.SeqWindowSize = 0 }, + expectedErr: ErrInvalidSeqWindowSize, }, { - name: "ProposerWindowSizeOne", - modifier: func(cfg *Config) { cfg.ProposerWindowSize = 1 }, - expectedErr: ErrInvalidProposerWindowSize, + name: "SeqWindowSizeOne", + modifier: func(cfg *Config) { cfg.SeqWindowSize = 1 }, + expectedErr: ErrInvalidSeqWindowSize, }, { name: "NoL1Genesis", diff --git a/components/node/service.go b/components/node/service.go index fd9c5f22d..f4ac79fc7 100644 --- a/components/node/service.go +++ b/components/node/service.go @@ -143,11 +143,11 @@ func NewL2SyncEndpointConfig(ctx *cli.Context) *node.L2SyncEndpointConfig { func NewDriverConfig(ctx *cli.Context) *driver.Config { return &driver.Config{ - SyncerConfDepth: ctx.Uint64(flags.SyncerL1Confs.Name), - ProposerConfDepth: ctx.Uint64(flags.ProposerL1Confs.Name), - ProposerEnabled: ctx.Bool(flags.ProposerEnabledFlag.Name), - ProposerStopped: ctx.Bool(flags.ProposerStoppedFlag.Name), - ProposerMaxSafeLag: ctx.Uint64(flags.ProposerMaxSafeLagFlag.Name), + SyncerConfDepth: ctx.Uint64(flags.SyncerL1Confs.Name), + SequencerConfDepth: ctx.Uint64(flags.SequencerL1Confs.Name), + SequencerEnabled: ctx.Bool(flags.SequencerEnabledFlag.Name), + SequencerStopped: ctx.Bool(flags.SequencerStoppedFlag.Name), + SequencerMaxSafeLag: ctx.Uint64(flags.SequencerMaxSafeLagFlag.Name), } } diff --git a/components/node/sources/eth_client_test.go b/components/node/sources/eth_client_test.go index 3f5c86500..59201a9a0 100644 --- a/components/node/sources/eth_client_test.go +++ b/components/node/sources/eth_client_test.go @@ -132,7 +132,7 @@ func TestEthClient_InfoByNumber(t *testing.T) { "eth_getBlockByNumber", []any{n.String(), false}).Run(func(args mock.Arguments) { *args[1].(**rpcHeader) = rhdr }).Return([]error{nil}) - s, err := NewL1Client(m, nil, nil, L1ClientDefaultConfig(&rollup.Config{ProposerWindowSize: 10}, true, RPCKindBasic)) + s, err := NewL1Client(m, nil, nil, L1ClientDefaultConfig(&rollup.Config{SeqWindowSize: 10}, true, RPCKindBasic)) require.NoError(t, err) info, err := s.InfoByNumber(ctx, uint64(n)) require.NoError(t, err) @@ -151,7 +151,7 @@ func TestEthClient_WrongInfoByNumber(t *testing.T) { "eth_getBlockByNumber", []any{n.String(), false}).Run(func(args mock.Arguments) { *args[1].(**rpcHeader) = &rhdr2 }).Return([]error{nil}) - s, err := NewL1Client(m, nil, nil, L1ClientDefaultConfig(&rollup.Config{ProposerWindowSize: 10}, true, RPCKindBasic)) + s, err := NewL1Client(m, nil, nil, L1ClientDefaultConfig(&rollup.Config{SeqWindowSize: 10}, true, RPCKindBasic)) require.NoError(t, err) _, err = s.InfoByNumber(ctx, uint64(n)) require.Error(t, err, "cannot accept the wrong block") @@ -170,7 +170,7 @@ func TestEthClient_WrongInfoByHash(t *testing.T) { "eth_getBlockByHash", []any{k, false}).Run(func(args mock.Arguments) { *args[1].(**rpcHeader) = &rhdr2 }).Return([]error{nil}) - s, err := NewL1Client(m, nil, nil, L1ClientDefaultConfig(&rollup.Config{ProposerWindowSize: 10}, true, RPCKindBasic)) + s, err := NewL1Client(m, nil, nil, L1ClientDefaultConfig(&rollup.Config{SeqWindowSize: 10}, true, RPCKindBasic)) require.NoError(t, err) _, err = s.InfoByHash(ctx, k) require.Error(t, err, "cannot accept the wrong block") diff --git a/components/node/sources/l1_client.go b/components/node/sources/l1_client.go index d496ee18f..0e0a83aec 100644 --- a/components/node/sources/l1_client.go +++ b/components/node/sources/l1_client.go @@ -23,8 +23,8 @@ type L1ClientConfig struct { } func L1ClientDefaultConfig(config *rollup.Config, trustRPC bool, kind RPCProviderKind) *L1ClientConfig { - // Cache 3/2 worth of proposing window of receipts and txs - span := int(config.ProposerWindowSize) * 3 / 2 + // Cache 3/2 worth of sequencing window of receipts and txs + span := int(config.SeqWindowSize) * 3 / 2 fullSpan := span if span > 1000 { // sanity cap. If a large sequencing window is configured, do not make the cache too large span = 1000 diff --git a/components/node/sources/l2_client.go b/components/node/sources/l2_client.go index 3269b9c14..e7fda0631 100644 --- a/components/node/sources/l2_client.go +++ b/components/node/sources/l2_client.go @@ -27,8 +27,8 @@ type L2ClientConfig struct { } func L2ClientDefaultConfig(config *rollup.Config, trustRPC bool) *L2ClientConfig { - // Cache 3/2 worth of proposing window of payloads, block references, receipts and txs - span := int(config.ProposerWindowSize) * 3 / 2 + // Cache 3/2 worth of sequencing window of payloads, block references, receipts and txs + span := int(config.SeqWindowSize) * 3 / 2 // Estimate number of L2 blocks in this span of L1 blocks // (there's always one L2 block per L1 block, L1 is thus the minimum, even if block time is very high) if config.BlockTime < 12 && config.BlockTime > 0 { diff --git a/components/node/testutils/runtime_config.go b/components/node/testutils/runtime_config.go index 0434ff8d3..7c14a9855 100644 --- a/components/node/testutils/runtime_config.go +++ b/components/node/testutils/runtime_config.go @@ -3,9 +3,9 @@ package testutils import "github.com/ethereum/go-ethereum/common" type MockRuntimeConfig struct { - P2PPropAddress common.Address + P2PSeqAddress common.Address } -func (m *MockRuntimeConfig) P2PProposerAddress() common.Address { - return m.P2PPropAddress +func (m *MockRuntimeConfig) P2PSequencerAddress() common.Address { + return m.P2PSeqAddress } diff --git a/e2e/actions/blocktime_test.go b/e2e/actions/blocktime_test.go index 05e489492..4bf71e1cf 100644 --- a/e2e/actions/blocktime_test.go +++ b/e2e/actions/blocktime_test.go @@ -15,20 +15,20 @@ import ( // TestBatchInLastPossibleBlocks tests that the derivation pipeline // accepts a batch that is included in the last possible L1 block -// where there are also no other batches included in the proposer +// where there are also no other batches included in the sequencer // window. // This is a regression test against the bug fixed in PR #4566 func TestBatchInLastPossibleBlocks(gt *testing.T) { t := NewDefaultTesting(gt) dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams) - dp.DeployConfig.ProposerWindowSize = 4 + dp.DeployConfig.SequencerWindowSize = 4 sd := e2eutils.Setup(t, dp, defaultAlloc) log := testlog.Logger(t, log.LvlDebug) - sd, _, miner, proposer, proposerEngine, _, _, batcher := setupReorgTestActors(t, dp, sd, log) + sd, _, miner, sequencer, sequencerEngine, _, _, batcher := setupReorgTestActors(t, dp, sd, log) signer := types.LatestSigner(sd.L2Cfg.Config) - cl := proposerEngine.EthClient() + cl := sequencerEngine.EthClient() aliceNonce := uint64(0) // manual nonce management to avoid geth pending-tx nonce non-determinism flakiness aliceTx := func() { tx := types.MustSignNewTx(dp.Secrets.Alice, signer, &types.DynamicFeeTx{ @@ -45,23 +45,23 @@ func TestBatchInLastPossibleBlocks(gt *testing.T) { } makeL2BlockWithAliceTx := func() { aliceTx() - proposer.ActL2StartBlock(t) - proposerEngine.ActL2IncludeTx(dp.Addresses.Alice)(t) // include a test tx from alice - proposer.ActL2EndBlock(t) + sequencer.ActL2StartBlock(t) + sequencerEngine.ActL2IncludeTx(dp.Addresses.Alice)(t) // include a test tx from alice + sequencer.ActL2EndBlock(t) } - verifyChainStateOnProposer := func(l1Number, unsafeHead, unsafeHeadOrigin, safeHead, safeHeadOrigin uint64) { + verifyChainStateOnSequencer := func(l1Number, unsafeHead, unsafeHeadOrigin, safeHead, safeHeadOrigin uint64) { require.Equal(t, l1Number, miner.l1Chain.CurrentHeader().Number.Uint64()) - require.Equal(t, unsafeHead, proposer.L2Unsafe().Number) - require.Equal(t, unsafeHeadOrigin, proposer.L2Unsafe().L1Origin.Number) - require.Equal(t, safeHead, proposer.L2Safe().Number) - require.Equal(t, safeHeadOrigin, proposer.L2Safe().L1Origin.Number) + require.Equal(t, unsafeHead, sequencer.L2Unsafe().Number) + require.Equal(t, unsafeHeadOrigin, sequencer.L2Unsafe().L1Origin.Number) + require.Equal(t, safeHead, sequencer.L2Safe().Number) + require.Equal(t, safeHeadOrigin, sequencer.L2Safe().L1Origin.Number) } // Make 8 L1 blocks & 17 L2 blocks. miner.ActL1StartBlock(4)(t) miner.ActL1EndBlock(t) - proposer.ActL1HeadSignal(t) - proposer.ActL2PipelineFull(t) + sequencer.ActL1HeadSignal(t) + sequencer.ActL2PipelineFull(t) makeL2BlockWithAliceTx() makeL2BlockWithAliceTx() makeL2BlockWithAliceTx() @@ -71,15 +71,15 @@ func TestBatchInLastPossibleBlocks(gt *testing.T) { miner.ActL1StartBlock(4)(t) miner.ActL1IncludeTx(sd.RollupCfg.Genesis.SystemConfig.BatcherAddr)(t) miner.ActL1EndBlock(t) - proposer.ActL1HeadSignal(t) - proposer.ActL2PipelineFull(t) + sequencer.ActL1HeadSignal(t) + sequencer.ActL2PipelineFull(t) makeL2BlockWithAliceTx() makeL2BlockWithAliceTx() } // 8 L1 blocks with 17 L2 blocks is the unsafe state. // Because we consistently batch submitted we are one epoch behind the unsafe head with the safe head - verifyChainStateOnProposer(8, 17, 8, 15, 7) + verifyChainStateOnSequencer(8, 17, 8, 15, 7) // Create the batch for L2 blocks 16 & 17 batcher.ActSubmitAll(t) @@ -87,38 +87,38 @@ func TestBatchInLastPossibleBlocks(gt *testing.T) { // L1 Block 8 contains the batch for L2 blocks 14 & 15 // Then we create L1 blocks 9, 10, 11 // The L1 origin of L2 block 16 is L1 block 8 - // At a proposer window of 4, should be possible to include the batch for L2 block 16 & 17 at L1 block 12 + // At a sequencer window of 4, should be possible to include the batch for L2 block 16 & 17 at L1 block 12 // Make 3 more L1 + 6 L2 blocks for i := 0; i < 3; i++ { miner.ActL1StartBlock(4)(t) miner.ActL1EndBlock(t) - proposer.ActL1HeadSignal(t) - proposer.ActL2PipelineFull(t) + sequencer.ActL1HeadSignal(t) + sequencer.ActL2PipelineFull(t) makeL2BlockWithAliceTx() makeL2BlockWithAliceTx() } // At this point verify that we have not started auto generating blocks // by checking that L1 & the unsafe head have advanced as expected, but the safe head is the same. - verifyChainStateOnProposer(11, 23, 11, 15, 7) + verifyChainStateOnSequencer(11, 23, 11, 15, 7) - // Check that the batch can go in on the last block of the proposer window + // Check that the batch can go in on the last block of the sequencer window miner.ActL1StartBlock(4)(t) miner.ActL1IncludeTx(sd.RollupCfg.Genesis.SystemConfig.BatcherAddr)(t) miner.ActL1EndBlock(t) - proposer.ActL1HeadSignal(t) - proposer.ActL2PipelineFull(t) + sequencer.ActL1HeadSignal(t) + sequencer.ActL2PipelineFull(t) // We have one more L1 block, no more unsafe blocks, but advance one // epoch on the safe head with the submitted batches - verifyChainStateOnProposer(12, 23, 11, 17, 8) + verifyChainStateOnSequencer(12, 23, 11, 17, 8) } // TestLargeL1Gaps tests the case that there is a gap between two L1 blocks which -// is larger than the proposer drift. +// is larger than the sequencer drift. // This test has the following parameters: -// L1 Block time: 4s. L2 Block time: 2s. Proposer Drift: 32s +// L1 Block time: 4s. L2 Block time: 2s. Sequencer Drift: 32s // // It generates 8 L1 blocks & 16 L2 blocks. // Then generates an L1 block that has a time delta of 48s. @@ -131,15 +131,15 @@ func TestLargeL1Gaps(gt *testing.T) { dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams) dp.DeployConfig.L1BlockTime = 4 dp.DeployConfig.L2BlockTime = 2 - dp.DeployConfig.ProposerWindowSize = 4 - dp.DeployConfig.MaxProposerDrift = 32 + dp.DeployConfig.SequencerWindowSize = 4 + dp.DeployConfig.MaxSequencerDrift = 32 sd := e2eutils.Setup(t, dp, defaultAlloc) log := testlog.Logger(t, log.LvlDebug) - sd, _, miner, proposer, proposerEngine, syncer, _, batcher := setupReorgTestActors(t, dp, sd, log) + sd, _, miner, sequencer, sequencerEngine, syncer, _, batcher := setupReorgTestActors(t, dp, sd, log) signer := types.LatestSigner(sd.L2Cfg.Config) - cl := proposerEngine.EthClient() + cl := sequencerEngine.EthClient() aliceTx := func() { n, err := cl.PendingNonceAt(t.Ctx(), dp.Addresses.Alice) require.NoError(t, err) @@ -156,17 +156,17 @@ func TestLargeL1Gaps(gt *testing.T) { } makeL2BlockWithAliceTx := func() { aliceTx() - proposer.ActL2StartBlock(t) - proposerEngine.ActL2IncludeTx(dp.Addresses.Alice)(t) // include a test tx from alice - proposer.ActL2EndBlock(t) + sequencer.ActL2StartBlock(t) + sequencerEngine.ActL2IncludeTx(dp.Addresses.Alice)(t) // include a test tx from alice + sequencer.ActL2EndBlock(t) } - verifyChainStateOnProposer := func(l1Number, unsafeHead, unsafeHeadOrigin, safeHead, safeHeadOrigin uint64) { + verifyChainStateOnSequencer := func(l1Number, unsafeHead, unsafeHeadOrigin, safeHead, safeHeadOrigin uint64) { require.Equal(t, l1Number, miner.l1Chain.CurrentHeader().Number.Uint64()) - require.Equal(t, unsafeHead, proposer.L2Unsafe().Number) - require.Equal(t, unsafeHeadOrigin, proposer.L2Unsafe().L1Origin.Number) - require.Equal(t, safeHead, proposer.L2Safe().Number) - require.Equal(t, safeHeadOrigin, proposer.L2Safe().L1Origin.Number) + require.Equal(t, unsafeHead, sequencer.L2Unsafe().Number) + require.Equal(t, unsafeHeadOrigin, sequencer.L2Unsafe().L1Origin.Number) + require.Equal(t, safeHead, sequencer.L2Safe().Number) + require.Equal(t, safeHeadOrigin, sequencer.L2Safe().L1Origin.Number) } verifyChainStateOnSyncer := func(l1Number, unsafeHead, unsafeHeadOrigin, safeHead, safeHeadOrigin uint64) { @@ -180,8 +180,8 @@ func TestLargeL1Gaps(gt *testing.T) { // Make 8 L1 blocks & 16 L2 blocks. miner.ActL1StartBlock(4)(t) miner.ActL1EndBlock(t) - proposer.ActL1HeadSignal(t) - proposer.ActL2PipelineFull(t) + sequencer.ActL1HeadSignal(t) + sequencer.ActL2PipelineFull(t) makeL2BlockWithAliceTx() makeL2BlockWithAliceTx() @@ -190,8 +190,8 @@ func TestLargeL1Gaps(gt *testing.T) { miner.ActL1StartBlock(4)(t) miner.ActL1IncludeTx(sd.RollupCfg.Genesis.SystemConfig.BatcherAddr)(t) miner.ActL1EndBlock(t) - proposer.ActL1HeadSignal(t) - proposer.ActL2PipelineFull(t) + sequencer.ActL1HeadSignal(t) + sequencer.ActL2PipelineFull(t) makeL2BlockWithAliceTx() makeL2BlockWithAliceTx() } @@ -200,28 +200,28 @@ func TestLargeL1Gaps(gt *testing.T) { require.NoError(t, err) require.Equal(t, uint64(16), n) // 16 valid blocks with txns. - verifyChainStateOnProposer(8, 16, 8, 14, 7) + verifyChainStateOnSequencer(8, 16, 8, 14, 7) // Make the really long L1 block. Do include previous batches batcher.ActSubmitAll(t) miner.ActL1StartBlock(48)(t) miner.ActL1IncludeTx(sd.RollupCfg.Genesis.SystemConfig.BatcherAddr)(t) miner.ActL1EndBlock(t) - proposer.ActL1HeadSignal(t) - proposer.ActL2PipelineFull(t) + sequencer.ActL1HeadSignal(t) + sequencer.ActL2PipelineFull(t) - verifyChainStateOnProposer(9, 16, 8, 16, 8) + verifyChainStateOnSequencer(9, 16, 8, 16, 8) // Make the L2 blocks corresponding to the long L1 block for i := 0; i < 24; i++ { makeL2BlockWithAliceTx() } - verifyChainStateOnProposer(9, 40, 9, 16, 8) + verifyChainStateOnSequencer(9, 40, 9, 16, 8) // Check how many transactions from alice got included on L2 // We created one transaction for every L2 block. So we should have created 40 transactions. // The first 16 L2 block where included without issue. - // Then over the long block, 32s proposer drift / 2s block time => 16 blocks with transactions + // Then over the long block, 32s sequencer drift / 2s block time => 16 blocks with transactions // Then at the last L2 block we reached the next origin, and accept txs again => 17 blocks with transactions // That leaves 7 L2 blocks without transactions. So we should have 16+17 = 33 transactions on chain. n, err = cl.PendingNonceAt(t.Ctx(), dp.Addresses.Alice) @@ -232,7 +232,7 @@ func TestLargeL1Gaps(gt *testing.T) { require.NoError(t, err) require.Equal(t, uint64(33), n) - // Make more L1 blocks to get past the proposer window for the large range. + // Make more L1 blocks to get past the sequencer window for the large range. // Do batch submit the previous L2 blocks. batcher.ActSubmitAll(t) miner.ActL1StartBlock(4)(t) @@ -241,7 +241,7 @@ func TestLargeL1Gaps(gt *testing.T) { // We are not able to do eager batch derivation for these L2 blocks because // we reject batches with a greater timestamp than the drift. - verifyChainStateOnProposer(10, 40, 9, 16, 8) + verifyChainStateOnSequencer(10, 40, 9, 16, 8) for i := 0; i < 2; i++ { miner.ActL1StartBlock(4)(t) @@ -249,18 +249,18 @@ func TestLargeL1Gaps(gt *testing.T) { } // Run the pipeline against the batches + to be auto-generated batches. - proposer.ActL1HeadSignal(t) - proposer.ActL2PipelineFull(t) - verifyChainStateOnProposer(12, 40, 9, 40, 9) + sequencer.ActL1HeadSignal(t) + sequencer.ActL2PipelineFull(t) + verifyChainStateOnSequencer(12, 40, 9, 40, 9) // Recheck nonce. Will fail if no batches where submitted n, err = cl.NonceAt(t.Ctx(), dp.Addresses.Alice, nil) require.NoError(t, err) - require.Equal(t, uint64(33), n) // 16 valid blocks with txns. Get proposer drift non-empty (32/2 => 16) & 7 forced empty + require.Equal(t, uint64(33), n) // 16 valid blocks with txns. Get sequencer drift non-empty (32/2 => 16) & 7 forced empty // Check that the syncer got the same result syncer.ActL1HeadSignal(t) syncer.ActL2PipelineFull(t) verifyChainStateOnSyncer(12, 40, 9, 40, 9) - require.Equal(t, syncer.L2Safe(), proposer.L2Safe()) + require.Equal(t, syncer.L2Safe(), sequencer.L2Safe()) } diff --git a/e2e/actions/fork_test.go b/e2e/actions/fork_test.go index 2382ac9aa..68c6fb82d 100644 --- a/e2e/actions/fork_test.go +++ b/e2e/actions/fork_test.go @@ -19,12 +19,12 @@ func TestShapellaL1Fork(gt *testing.T) { sd.L1Cfg.Config.ShanghaiTime = &activation log := testlog.Logger(t, log.LvlDebug) - _, _, miner, proposer, _, syncer, _, batcher := setupReorgTestActors(t, dp, sd, log) + _, _, miner, sequencer, _, syncer, _, batcher := setupReorgTestActors(t, dp, sd, log) require.False(t, sd.L1Cfg.Config.IsShanghai(miner.l1Chain.CurrentBlock().Time), "not active yet") // start nodes - proposer.ActL2PipelineFull(t) + sequencer.ActL2PipelineFull(t) syncer.ActL2PipelineFull(t) // build empty L1 blocks, crossing the fork boundary @@ -37,8 +37,8 @@ func TestShapellaL1Fork(gt *testing.T) { require.True(t, sd.L1Cfg.Config.IsShanghai(l1Block.Time)) // build L2 chain up to and including L2 blocks referencing shanghai L1 blocks - proposer.ActL1HeadSignal(t) - proposer.ActBuildToL1Head(t) + sequencer.ActL1HeadSignal(t) + sequencer.ActBuildToL1Head(t) miner.ActL1StartBlock(12)(t) batcher.ActSubmitAll(t) miner.ActL1IncludeTx(batcher.batcherAddr)(t) @@ -49,5 +49,5 @@ func TestShapellaL1Fork(gt *testing.T) { syncer.ActL2PipelineFull(t) // verify syncer accepted shanghai L1 inputs require.Equal(t, l1Block.Hash(), syncer.SyncStatus().SafeL2.L1Origin.Hash, "syncer synced L1 chain that includes shanghai headers") - require.Equal(t, proposer.SyncStatus().UnsafeL2, syncer.SyncStatus().UnsafeL2, "syncer and proposer agree") + require.Equal(t, sequencer.SyncStatus().UnsafeL2, syncer.SyncStatus().UnsafeL2, "syncer and sequencer agree") } diff --git a/e2e/actions/l1_replica.go b/e2e/actions/l1_replica.go index 12347eb56..944e8a921 100644 --- a/e2e/actions/l1_replica.go +++ b/e2e/actions/l1_replica.go @@ -47,10 +47,10 @@ type L1Replica struct { } var defaultRollupTestParams = &e2eutils.TestParams{ - MaxProposerDrift: 40, - ProposerWindowSize: 120, - ChannelTimeout: 120, - L1BlockTime: 15, + MaxSequencerDrift: 40, + SequencerWindowSize: 120, + ChannelTimeout: 120, + L1BlockTime: 15, } var defaultAlloc = &e2eutils.AllocParams{PrefundTestUsers: true} diff --git a/e2e/actions/l2_batcher_test.go b/e2e/actions/l2_batcher_test.go index cfa1e2b34..34478de96 100644 --- a/e2e/actions/l2_batcher_test.go +++ b/e2e/actions/l2_batcher_test.go @@ -22,25 +22,25 @@ import ( func TestBatcher(gt *testing.T) { t := NewDefaultTesting(gt) p := &e2eutils.TestParams{ - MaxProposerDrift: 20, // larger than L1 block time we simulate in this test (12) - ProposerWindowSize: 24, - ChannelTimeout: 20, + MaxSequencerDrift: 20, // larger than L1 block time we simulate in this test (12) + SequencerWindowSize: 24, + ChannelTimeout: 20, } dp := e2eutils.MakeDeployParams(t, p) sd := e2eutils.Setup(t, dp, defaultAlloc) log := testlog.Logger(t, log.LvlDebug) - miner, propEngine, proposer := setupProposerTest(t, sd, log) + miner, seqEngine, sequencer := setupSequencerTest(t, sd, log) syncEngine, syncer := setupSyncer(t, sd, log, miner.L1Client(t, sd.RollupCfg)) - rollupPropCl := proposer.RollupClient() + rollupSeqCl := sequencer.RollupClient() batcher := NewL2Batcher(log, sd.RollupCfg, &BatcherCfg{ MinL1TxSize: 0, MaxL1TxSize: 128_000, BatcherKey: dp.Secrets.Batcher, - }, rollupPropCl, miner.EthClient(), propEngine.EthClient()) + }, rollupSeqCl, miner.EthClient(), seqEngine.EthClient()) // Alice makes a L2 tx - cl := propEngine.EthClient() + cl := seqEngine.EthClient() n, err := cl.PendingNonceAt(t.Ctx(), dp.Addresses.Alice) require.NoError(t, err) signer := types.LatestSigner(sd.L2Cfg.Config) @@ -55,13 +55,13 @@ func TestBatcher(gt *testing.T) { }) require.NoError(gt, cl.SendTransaction(t.Ctx(), tx)) - proposer.ActL2PipelineFull(t) + sequencer.ActL2PipelineFull(t) syncer.ActL2PipelineFull(t) // Make L2 block - proposer.ActL2StartBlock(t) - propEngine.ActL2IncludeTx(dp.Addresses.Alice)(t) - proposer.ActL2EndBlock(t) + sequencer.ActL2StartBlock(t) + seqEngine.ActL2IncludeTx(dp.Addresses.Alice)(t) + sequencer.ActL2EndBlock(t) // batch submit to L1 batcher.ActL2BatchBuffer(t) @@ -80,12 +80,12 @@ func TestBatcher(gt *testing.T) { // Now make enough L1 blocks that the syncer will have to derive a L2 block // It will also eagerly derive the block from the batcher - for i := uint64(0); i < sd.RollupCfg.ProposerWindowSize; i++ { + for i := uint64(0); i < sd.RollupCfg.SeqWindowSize; i++ { miner.ActL1StartBlock(12)(t) miner.ActL1EndBlock(t) } - // sync syncer from L1 batch in otherwise empty proposer window + // sync syncer from L1 batch in otherwise empty sequencer window syncer.ActL1HeadSignal(t) syncer.ActL2PipelineFull(t) require.Equal(t, uint64(1), syncer.SyncStatus().SafeL2.L1Origin.Number) @@ -103,44 +103,44 @@ func TestL2Finalization(gt *testing.T) { dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams) sd := e2eutils.Setup(t, dp, defaultAlloc) log := testlog.Logger(t, log.LvlDebug) - miner, engine, proposer := setupProposerTest(t, sd, log) + miner, engine, sequencer := setupSequencerTest(t, sd, log) - proposer.ActL2PipelineFull(t) + sequencer.ActL2PipelineFull(t) // build an empty L1 block (#1), mark it as justified miner.ActEmptyBlock(t) miner.ActL1SafeNext(t) // #0 -> #1 - // proposer builds L2 chain, up to and including a block that has the new L1 block as origin - proposer.ActL1HeadSignal(t) - proposer.ActBuildToL1Head(t) + // sequencer builds L2 chain, up to and including a block that has the new L1 block as origin + sequencer.ActL1HeadSignal(t) + sequencer.ActBuildToL1Head(t) - proposer.ActL2PipelineFull(t) - proposer.ActL1SafeSignal(t) - require.Equal(t, uint64(1), proposer.SyncStatus().SafeL1.Number) + sequencer.ActL2PipelineFull(t) + sequencer.ActL1SafeSignal(t) + require.Equal(t, uint64(1), sequencer.SyncStatus().SafeL1.Number) // build another L1 block (#2), mark it as justified. And mark previous justified as finalized. miner.ActEmptyBlock(t) miner.ActL1SafeNext(t) // #1 -> #2 miner.ActL1FinalizeNext(t) // #0 -> #1 - proposer.ActL1HeadSignal(t) - proposer.ActBuildToL1Head(t) + sequencer.ActL1HeadSignal(t) + sequencer.ActBuildToL1Head(t) // continue to build L2 chain referencing the new L1 blocks - proposer.ActL2PipelineFull(t) - proposer.ActL1FinalizedSignal(t) - proposer.ActL1SafeSignal(t) - require.Equal(t, uint64(2), proposer.SyncStatus().SafeL1.Number) - require.Equal(t, uint64(1), proposer.SyncStatus().FinalizedL1.Number) - require.Equal(t, uint64(0), proposer.SyncStatus().FinalizedL2.Number, "L2 block has to be included on L1 before it can be finalized") + sequencer.ActL2PipelineFull(t) + sequencer.ActL1FinalizedSignal(t) + sequencer.ActL1SafeSignal(t) + require.Equal(t, uint64(2), sequencer.SyncStatus().SafeL1.Number) + require.Equal(t, uint64(1), sequencer.SyncStatus().FinalizedL1.Number) + require.Equal(t, uint64(0), sequencer.SyncStatus().FinalizedL2.Number, "L2 block has to be included on L1 before it can be finalized") batcher := NewL2Batcher(log, sd.RollupCfg, &BatcherCfg{ MinL1TxSize: 0, MaxL1TxSize: 128_000, BatcherKey: dp.Secrets.Batcher, - }, proposer.RollupClient(), miner.EthClient(), engine.EthClient()) + }, sequencer.RollupClient(), miner.EthClient(), engine.EthClient()) - heightToSubmit := proposer.SyncStatus().UnsafeL2.Number + heightToSubmit := sequencer.SyncStatus().UnsafeL2.Number batcher.ActSubmitAll(t) // confirm batch on L1, block #3 @@ -149,12 +149,12 @@ func TestL2Finalization(gt *testing.T) { miner.ActL1EndBlock(t) // read the batch - proposer.ActL2PipelineFull(t) - require.Equal(t, uint64(0), proposer.SyncStatus().FinalizedL2.Number, "Batch must be included in finalized part of L1 chain for L2 block to finalize") + sequencer.ActL2PipelineFull(t) + require.Equal(t, uint64(0), sequencer.SyncStatus().FinalizedL2.Number, "Batch must be included in finalized part of L1 chain for L2 block to finalize") // build some more L2 blocks, so there is an unsafe part again that hasn't been submitted yet - proposer.ActL1HeadSignal(t) - proposer.ActBuildToL1Head(t) + sequencer.ActL1HeadSignal(t) + sequencer.ActBuildToL1Head(t) // submit those blocks too, block #4 batcher.ActSubmitAll(t) @@ -167,8 +167,8 @@ func TestL2Finalization(gt *testing.T) { miner.ActEmptyBlock(t) // and more unsafe L2 blocks - proposer.ActL1HeadSignal(t) - proposer.ActBuildToL1Head(t) + sequencer.ActL1HeadSignal(t) + sequencer.ActBuildToL1Head(t) // move safe/finalize markers: finalize the L1 chain block with the first batch, but not the second miner.ActL1SafeNext(t) // #2 -> #3 @@ -176,17 +176,17 @@ func TestL2Finalization(gt *testing.T) { miner.ActL1FinalizeNext(t) // #1 -> #2 miner.ActL1FinalizeNext(t) // #2 -> #3 - proposer.ActL2PipelineFull(t) - proposer.ActL1FinalizedSignal(t) - proposer.ActL1SafeSignal(t) - proposer.ActL1HeadSignal(t) - require.Equal(t, uint64(6), proposer.SyncStatus().HeadL1.Number) - require.Equal(t, uint64(4), proposer.SyncStatus().SafeL1.Number) - require.Equal(t, uint64(3), proposer.SyncStatus().FinalizedL1.Number) - require.Equal(t, heightToSubmit, proposer.SyncStatus().FinalizedL2.Number, "finalized L2 blocks in first batch") + sequencer.ActL2PipelineFull(t) + sequencer.ActL1FinalizedSignal(t) + sequencer.ActL1SafeSignal(t) + sequencer.ActL1HeadSignal(t) + require.Equal(t, uint64(6), sequencer.SyncStatus().HeadL1.Number) + require.Equal(t, uint64(4), sequencer.SyncStatus().SafeL1.Number) + require.Equal(t, uint64(3), sequencer.SyncStatus().FinalizedL1.Number) + require.Equal(t, heightToSubmit, sequencer.SyncStatus().FinalizedL2.Number, "finalized L2 blocks in first batch") // need to act with the engine on the signals still - proposer.ActL2PipelineFull(t) + sequencer.ActL2PipelineFull(t) engCl := engine.EngineClient(t, sd.RollupCfg) engBlock, err := engCl.L2BlockRefByLabel(t.Ctx(), eth.Finalized) @@ -195,12 +195,12 @@ func TestL2Finalization(gt *testing.T) { // Now try to finalize block 4, but with a bad/malicious alternative hash. // If we get this false signal, we shouldn't finalize the L2 chain. - altBlock4 := proposer.SyncStatus().SafeL1 + altBlock4 := sequencer.SyncStatus().SafeL1 altBlock4.Hash = common.HexToHash("0xdead") - proposer.derivation.Finalize(altBlock4) - proposer.ActL2PipelineFull(t) - require.Equal(t, uint64(3), proposer.SyncStatus().FinalizedL1.Number) - require.Equal(t, heightToSubmit, proposer.SyncStatus().FinalizedL2.Number, "unknown/bad finalized L1 blocks are ignored") + sequencer.derivation.Finalize(altBlock4) + sequencer.ActL2PipelineFull(t) + require.Equal(t, uint64(3), sequencer.SyncStatus().FinalizedL1.Number) + require.Equal(t, heightToSubmit, sequencer.SyncStatus().FinalizedL2.Number, "unknown/bad finalized L1 blocks are ignored") } // TestL2FinalizationWithSparseL1 tests that safe L2 blocks can be finalized even if we do not regularly get a L1 finalization signal @@ -209,22 +209,22 @@ func TestL2FinalizationWithSparseL1(gt *testing.T) { dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams) sd := e2eutils.Setup(t, dp, defaultAlloc) log := testlog.Logger(t, log.LvlDebug) - miner, engine, proposer := setupProposerTest(t, sd, log) + miner, engine, sequencer := setupSequencerTest(t, sd, log) - proposer.ActL2PipelineFull(t) + sequencer.ActL2PipelineFull(t) miner.ActEmptyBlock(t) - proposer.ActL1HeadSignal(t) - proposer.ActBuildToL1Head(t) + sequencer.ActL1HeadSignal(t) + sequencer.ActBuildToL1Head(t) - startStatus := proposer.SyncStatus() - require.Less(t, startStatus.SafeL2.Number, startStatus.UnsafeL2.Number, "proposer has unsafe L2 block") + startStatus := sequencer.SyncStatus() + require.Less(t, startStatus.SafeL2.Number, startStatus.UnsafeL2.Number, "sequencer has unsafe L2 block") batcher := NewL2Batcher(log, sd.RollupCfg, &BatcherCfg{ MinL1TxSize: 0, MaxL1TxSize: 128_000, BatcherKey: dp.Secrets.Batcher, - }, proposer.RollupClient(), miner.EthClient(), engine.EthClient()) + }, sequencer.RollupClient(), miner.EthClient(), engine.EthClient()) batcher.ActSubmitAll(t) // include in L1 @@ -237,26 +237,26 @@ func TestL2FinalizationWithSparseL1(gt *testing.T) { miner.ActEmptyBlock(t) // See the L1 head, and traverse the pipeline to it - proposer.ActL1HeadSignal(t) - proposer.ActL2PipelineFull(t) + sequencer.ActL1HeadSignal(t) + sequencer.ActL2PipelineFull(t) - updatedStatus := proposer.SyncStatus() + updatedStatus := sequencer.SyncStatus() require.Equal(t, updatedStatus.SafeL2.Number, updatedStatus.UnsafeL2.Number, "unsafe L2 block is now safe") require.Less(t, updatedStatus.FinalizedL2.Number, updatedStatus.UnsafeL2.Number, "submitted block is not yet finalized") - // Now skip straight to the head with L1 signals (proposer has traversed the L1 blocks, but they did not have L2 contents) + // Now skip straight to the head with L1 signals (sequencer has traversed the L1 blocks, but they did not have L2 contents) headL1Num := miner.UnsafeNum() miner.ActL1Safe(t, headL1Num) miner.ActL1Finalize(t, headL1Num) - proposer.ActL1SafeSignal(t) - proposer.ActL1FinalizedSignal(t) + sequencer.ActL1SafeSignal(t) + sequencer.ActL1FinalizedSignal(t) // Now see if the signals can be processed - proposer.ActL2PipelineFull(t) + sequencer.ActL2PipelineFull(t) - finalStatus := proposer.SyncStatus() + finalStatus := sequencer.SyncStatus() // Verify the signal was processed, even though we signalled a later L1 block than the one with the batch. - require.Equal(t, finalStatus.FinalizedL2.Number, finalStatus.UnsafeL2.Number, "proposer submitted its L2 block and it finalized") + require.Equal(t, finalStatus.FinalizedL2.Number, finalStatus.UnsafeL2.Number, "sequencer submitted its L2 block and it finalized") } // TestGarbageBatch tests the behavior of an invalid/malformed output channel frame containing @@ -269,7 +269,7 @@ func TestGarbageBatch(gt *testing.T) { for _, garbageKind := range GarbageKinds { sd := e2eutils.Setup(t, dp, defaultAlloc) log := testlog.Logger(t, log.LvlError) - miner, engine, proposer := setupProposerTest(t, sd, log) + miner, engine, sequencer := setupSequencerTest(t, sd, log) _, syncer := setupSyncer(t, sd, log, miner.L1Client(t, sd.RollupCfg)) @@ -288,22 +288,22 @@ func TestGarbageBatch(gt *testing.T) { } } - batcher := NewL2Batcher(log, sd.RollupCfg, batcherCfg, proposer.RollupClient(), miner.EthClient(), engine.EthClient()) + batcher := NewL2Batcher(log, sd.RollupCfg, batcherCfg, sequencer.RollupClient(), miner.EthClient(), engine.EthClient()) - proposer.ActL2PipelineFull(t) + sequencer.ActL2PipelineFull(t) syncer.ActL2PipelineFull(t) syncAndBuildL2 := func() { - // Send a head signal to the proposer and syncer - proposer.ActL1HeadSignal(t) + // Send a head signal to the sequencer and syncer + sequencer.ActL1HeadSignal(t) syncer.ActL1HeadSignal(t) - // Run the derivation pipeline on the proposer and syncer - proposer.ActL2PipelineFull(t) + // Run the derivation pipeline on the sequencer and syncer + sequencer.ActL2PipelineFull(t) syncer.ActL2PipelineFull(t) // Build the L2 chain to the L1 head - proposer.ActBuildToL1Head(t) + sequencer.ActBuildToL1Head(t) } // Build an empty block on L1 and run the derivation pipeline + build L2 @@ -313,8 +313,8 @@ func TestGarbageBatch(gt *testing.T) { // Ensure that the L2 safe head has an L1 Origin at genesis before any // batches are submitted. - require.Equal(t, uint64(0), proposer.L2Safe().L1Origin.Number) - require.Equal(t, uint64(1), proposer.L2Unsafe().L1Origin.Number) + require.Equal(t, uint64(0), sequencer.L2Safe().L1Origin.Number) + require.Equal(t, uint64(1), sequencer.L2Unsafe().L1Origin.Number) // Submit a batch containing all blocks built on L2 while catching up // to the L1 head above. The output channel frame submitted to the batch @@ -329,29 +329,29 @@ func TestGarbageBatch(gt *testing.T) { miner.ActL1IncludeTx(dp.Addresses.Batcher)(t) miner.ActL1EndBlock(t) - // Send a head signal + run the derivation pipeline on the proposer + // Send a head signal + run the derivation pipeline on the sequencer // and syncer. syncAndBuildL2() // Verify that the L2 blocks that were batch submitted were *not* marked // as safe due to the malformed output channel frame. The safe head should // still have an L1 Origin at genesis. - require.Equal(t, uint64(0), proposer.L2Safe().L1Origin.Number) - require.Equal(t, uint64(2), proposer.L2Unsafe().L1Origin.Number) + require.Equal(t, uint64(0), sequencer.L2Safe().L1Origin.Number) + require.Equal(t, uint64(2), sequencer.L2Unsafe().L1Origin.Number) } } func TestExtendedTimeWithoutL1Batches(gt *testing.T) { t := NewDefaultTesting(gt) p := &e2eutils.TestParams{ - MaxProposerDrift: 20, // larger than L1 block time we simulate in this test (12) - ProposerWindowSize: 24, - ChannelTimeout: 20, + MaxSequencerDrift: 20, // larger than L1 block time we simulate in this test (12) + SequencerWindowSize: 24, + ChannelTimeout: 20, } dp := e2eutils.MakeDeployParams(t, p) sd := e2eutils.Setup(t, dp, defaultAlloc) log := testlog.Logger(t, log.LvlError) - miner, engine, proposer := setupProposerTest(t, sd, log) + miner, engine, sequencer := setupSequencerTest(t, sd, log) _, syncer := setupSyncer(t, sd, log, miner.L1Client(t, sd.RollupCfg)) @@ -359,31 +359,31 @@ func TestExtendedTimeWithoutL1Batches(gt *testing.T) { MinL1TxSize: 0, MaxL1TxSize: 128_000, BatcherKey: dp.Secrets.Batcher, - }, proposer.RollupClient(), miner.EthClient(), engine.EthClient()) + }, sequencer.RollupClient(), miner.EthClient(), engine.EthClient()) - proposer.ActL2PipelineFull(t) + sequencer.ActL2PipelineFull(t) syncer.ActL2PipelineFull(t) // make a long L1 chain, up to just one block left for L2 blocks to be included. - for i := uint64(0); i < p.ProposerWindowSize-1; i++ { + for i := uint64(0); i < p.SequencerWindowSize-1; i++ { miner.ActEmptyBlock(t) } // Now build a L2 chain that references all of these L1 blocks - proposer.ActL1HeadSignal(t) - proposer.ActBuildToL1Head(t) + sequencer.ActL1HeadSignal(t) + sequencer.ActBuildToL1Head(t) - // Now submit all the L2 blocks in the very last L1 block within proposer window range + // Now submit all the L2 blocks in the very last L1 block within sequencer window range batcher.ActSubmitAll(t) miner.ActL1StartBlock(12)(t) miner.ActL1IncludeTx(dp.Addresses.Batcher)(t) miner.ActL1EndBlock(t) - // Now sync the syncer, and see if the L2 chain of the proposer is safe + // Now sync the syncer, and see if the L2 chain of the sequencer is safe syncer.ActL2PipelineFull(t) - require.Equal(t, proposer.L2Unsafe(), syncer.L2Safe(), "all L2 blocks should have been included just in time") - proposer.ActL2PipelineFull(t) - require.Equal(t, proposer.L2Unsafe(), proposer.L2Safe(), "same for proposer") + require.Equal(t, sequencer.L2Unsafe(), syncer.L2Safe(), "all L2 blocks should have been included just in time") + sequencer.ActL2PipelineFull(t) + require.Equal(t, sequencer.L2Unsafe(), sequencer.L2Safe(), "same for sequencer") } // TestBigL2Txs tests a high-throughput case with constrained batcher: @@ -393,7 +393,7 @@ func TestExtendedTimeWithoutL1Batches(gt *testing.T) { // - Limit the data-tx size to 40 KB, to force data to be split across multiple data-txs // - Defer all data-tx inclusion till the end // - Fill L1 blocks with data-txs until we have processed them all -// - Run the syncer, and check if it derives the same L2 chain as was created by the proposer. +// - Run the syncer, and check if it derives the same L2 chain as was created by the sequencer. // // The goal of this test is to quickly run through an otherwise very slow process of submitting and including lots of data. // This does not test the batcher code, but is really focused at testing the batcher utils @@ -401,14 +401,14 @@ func TestExtendedTimeWithoutL1Batches(gt *testing.T) { func TestBigL2Txs(gt *testing.T) { t := NewDefaultTesting(gt) p := &e2eutils.TestParams{ - MaxProposerDrift: 100, - ProposerWindowSize: 1000, - ChannelTimeout: 200, // give enough space to buffer large amounts of data before submitting it + MaxSequencerDrift: 100, + SequencerWindowSize: 1000, + ChannelTimeout: 200, // give enough space to buffer large amounts of data before submitting it } dp := e2eutils.MakeDeployParams(t, p) sd := e2eutils.Setup(t, dp, defaultAlloc) log := testlog.Logger(t, log.LvlInfo) - miner, engine, proposer := setupProposerTest(t, sd, log) + miner, engine, sequencer := setupSequencerTest(t, sd, log) _, syncer := setupSyncer(t, sd, log, miner.L1Client(t, sd.RollupCfg)) @@ -416,9 +416,9 @@ func TestBigL2Txs(gt *testing.T) { MinL1TxSize: 0, MaxL1TxSize: 40_000, // try a small batch size, to force the data to be split between more frames BatcherKey: dp.Secrets.Batcher, - }, proposer.RollupClient(), miner.EthClient(), engine.EthClient()) + }, sequencer.RollupClient(), miner.EthClient(), engine.EthClient()) - proposer.ActL2PipelineFull(t) + sequencer.ActL2PipelineFull(t) syncer.ActL2PipelineFull(t) cl := engine.EthClient() @@ -433,13 +433,13 @@ func TestBigL2Txs(gt *testing.T) { // build many L2 blocks filled to the brim with large txs of random data for i := 0; i < 40; i++ { aliceNonce, err := cl.PendingNonceAt(t.Ctx(), dp.Addresses.Alice) - status := proposer.SyncStatus() - // build empty L1 blocks as necessary, so the L2 proposer can continue to include txs while not drifting too far out + status := sequencer.SyncStatus() + // build empty L1 blocks as necessary, so the L2 sequencer can continue to include txs while not drifting too far out if status.UnsafeL2.Time >= status.HeadL1.Time+12 { miner.ActEmptyBlock(t) } - proposer.ActL1HeadSignal(t) - proposer.ActL2StartBlock(t) + sequencer.ActL1HeadSignal(t) + sequencer.ActL2StartBlock(t) baseFee := engine.l2Chain.CurrentBlock().BaseFee // this will go quite high, since so many consecutive blocks are filled at capacity. // fill the block with large L2 txs from alice for n := aliceNonce; ; n++ { @@ -466,8 +466,8 @@ func TestBigL2Txs(gt *testing.T) { require.NoError(gt, cl.SendTransaction(t.Ctx(), tx)) engine.ActL2IncludeTx(dp.Addresses.Alice)(t) } - proposer.ActL2EndBlock(t) - for batcher.l2BufferedBlock.Number < proposer.SyncStatus().UnsafeL2.Number { + sequencer.ActL2EndBlock(t) + for batcher.l2BufferedBlock.Number < sequencer.SyncStatus().UnsafeL2.Number { // if we run out of space, close the channel and submit all the txs if err := batcher.Buffer(t); errors.Is(err, derive.ErrTooManyRLPBytes) { log.Info("flushing filled channel to batch txs", "id", batcher.l2ChannelOut.ID()) @@ -511,5 +511,5 @@ func TestBigL2Txs(gt *testing.T) { } syncer.ActL1HeadSignal(t) syncer.ActL2PipelineFull(t) - require.Equal(t, proposer.SyncStatus().UnsafeL2, syncer.SyncStatus().SafeL2, "syncer synced proposer data even though of huge tx in block") + require.Equal(t, sequencer.SyncStatus().UnsafeL2, syncer.SyncStatus().SafeL2, "syncer synced sequencer data even though of huge tx in block") } diff --git a/e2e/actions/l2_engine.go b/e2e/actions/l2_engine.go index 0440a4692..ff2b407ce 100644 --- a/e2e/actions/l2_engine.go +++ b/e2e/actions/l2_engine.go @@ -65,7 +65,7 @@ func NewL2Engine(t Testing, log log.Logger, genesis *core.Genesis, rollupGenesis engineApi: engineApi, } // register the custom engine API, so we can serve engine requests while having more control - // over proposing of individual txs. + // over sequencing of individual txs. n.RegisterAPIs([]rpc.API{ { Namespace: "engine", diff --git a/e2e/actions/l2_engine_api.go b/e2e/actions/l2_engine_api.go index cec8ec808..cd753860f 100644 --- a/e2e/actions/l2_engine_api.go +++ b/e2e/actions/l2_engine_api.go @@ -53,7 +53,7 @@ type L2EngineAPI struct { // L2 block building data blockProcessor *BlockProcessor pendingIndices map[common.Address]uint64 // per account, how many txs from the pool were already included in the block, since the pool is lagging behind block mining. - l2ForceEmpty bool // when no additional txs may be processed (i.e. when proposer drift runs out) + l2ForceEmpty bool // when no additional txs may be processed (i.e. when sequencer drift runs out) l2TxFailed []*types.Transaction // log of failed transactions which could not be included payloadID engine.PayloadID // ID of payload that is currently being built diff --git a/e2e/actions/l2_runtime.go b/e2e/actions/l2_runtime.go index 5703eda00..f228cf7e8 100644 --- a/e2e/actions/l2_runtime.go +++ b/e2e/actions/l2_runtime.go @@ -26,8 +26,8 @@ type Runtime struct { sd *e2eutils.SetupData dp *e2eutils.DeployParams miner *L1Miner - propEngine *L2Engine - proposer *L2Proposer + seqEngine *L2Engine + sequencer *L2Sequencer batcher *L2Batcher validator *L2Validator challenger1 *L2Validator @@ -58,19 +58,19 @@ func defaultRuntime(gt *testing.T) Runtime { sd: sd, l: l, } - rt.miner, rt.propEngine, rt.proposer = setupProposerTest(rt.t, rt.sd, rt.l) + rt.miner, rt.seqEngine, rt.sequencer = setupSequencerTest(rt.t, rt.sd, rt.l) rt.setupBatcher() return rt } func (rt *Runtime) setupBatcher() { - rollupPropCl := rt.proposer.RollupClient() + rollupSeqCl := rt.sequencer.RollupClient() batcher := NewL2Batcher(rt.l, rt.sd.RollupCfg, &BatcherCfg{ MinL1TxSize: 0, MaxL1TxSize: 128_000, BatcherKey: rt.dp.Secrets.Batcher, - }, rollupPropCl, rt.miner.EthClient(), rt.propEngine.EthClient()) + }, rollupSeqCl, rt.miner.EthClient(), rt.seqEngine.EthClient()) rt.batcher = batcher } @@ -112,7 +112,7 @@ func (rt *Runtime) setupMaliciousGuardian() { func (rt *Runtime) honestValidator(pk *ecdsa.PrivateKey) *L2Validator { // setup mockup rpc for returning valid output - validatorRPC := e2eutils.NewHonestL2RPC(rt.proposer.RPCClient()) + validatorRPC := e2eutils.NewHonestL2RPC(rt.sequencer.RPCClient()) validatorRollupClient := sources.NewRollupClient(validatorRPC) validator := NewL2Validator(rt.t, rt.l, &ValidatorCfg{ OutputOracleAddr: rt.sd.DeploymentsL1.L2OutputOracleProxy, @@ -121,14 +121,14 @@ func (rt *Runtime) honestValidator(pk *ecdsa.PrivateKey) *L2Validator { SecurityCouncilAddr: rt.sd.DeploymentsL1.SecurityCouncilProxy, ValidatorKey: pk, AllowNonFinalized: false, - }, rt.miner.EthClient(), rt.propEngine.EthClient(), validatorRollupClient) + }, rt.miner.EthClient(), rt.seqEngine.EthClient(), validatorRollupClient) validatorRPC.SetTargetBlockNumber(rt.targetInvalidBlockNumber) return validator } func (rt *Runtime) maliciousValidator(pk *ecdsa.PrivateKey) *L2Validator { // setup mockup rpc for returning invalid output - validatorRPC := e2eutils.NewMaliciousL2RPC(rt.proposer.RPCClient()) + validatorRPC := e2eutils.NewMaliciousL2RPC(rt.sequencer.RPCClient()) validatorRollupClient := sources.NewRollupClient(validatorRPC) validator := NewL2Validator(rt.t, rt.l, &ValidatorCfg{ OutputOracleAddr: rt.sd.DeploymentsL1.L2OutputOracleProxy, @@ -137,7 +137,7 @@ func (rt *Runtime) maliciousValidator(pk *ecdsa.PrivateKey) *L2Validator { SecurityCouncilAddr: rt.sd.DeploymentsL1.SecurityCouncilProxy, ValidatorKey: pk, AllowNonFinalized: false, - }, rt.miner.EthClient(), rt.propEngine.EthClient(), validatorRollupClient) + }, rt.miner.EthClient(), rt.seqEngine.EthClient(), validatorRollupClient) validatorRPC.SetTargetBlockNumber(rt.targetInvalidBlockNumber) return validator } @@ -170,9 +170,9 @@ func (rt *Runtime) setupOutputSubmitted() { // L1 block rt.miner.ActEmptyBlock(rt.t) // L2 block - rt.proposer.ActL1HeadSignal(rt.t) - rt.proposer.ActL2PipelineFull(rt.t) - rt.proposer.ActBuildToL1Head(rt.t) + rt.sequencer.ActL1HeadSignal(rt.t) + rt.sequencer.ActL2PipelineFull(rt.t) + rt.sequencer.ActBuildToL1Head(rt.t) // submit and include in L1 rt.batcher.ActSubmitAll(rt.t) rt.miner.includeL1Block(rt.t, rt.dp.Addresses.Batcher) @@ -182,9 +182,9 @@ func (rt *Runtime) setupOutputSubmitted() { rt.miner.ActL1FinalizeNext(rt.t) rt.miner.ActL1FinalizeNext(rt.t) // derive and see the L2 chain fully finalize - rt.proposer.ActL2PipelineFull(rt.t) - rt.proposer.ActL1SafeSignal(rt.t) - rt.proposer.ActL1FinalizedSignal(rt.t) + rt.sequencer.ActL2PipelineFull(rt.t) + rt.sequencer.ActL1SafeSignal(rt.t) + rt.sequencer.ActL1FinalizedSignal(rt.t) } // deposit bond for validator @@ -196,7 +196,7 @@ func (rt *Runtime) setupOutputSubmitted() { require.NoError(rt.t, err) require.Equal(rt.t, new(big.Int).SetUint64(defaultDepositAmount), bal) - require.Equal(rt.t, rt.proposer.SyncStatus().UnsafeL2, rt.proposer.SyncStatus().FinalizedL2) + require.Equal(rt.t, rt.sequencer.SyncStatus().UnsafeL2, rt.sequencer.SyncStatus().FinalizedL2) // create l2 output submission transactions until there is nothing left to submit for { @@ -220,7 +220,7 @@ func (rt *Runtime) setupChallenge(challenger *L2Validator) { // check that the output root that L1 stores is different from challenger's output root // NOTE(chokobole): Comment these 2 lines because of the reason above. // If Proto Dank Sharding is introduced, the below code fix may be restored. - // block := proposer.SyncStatus().FinalizedL2 + // block := sequencer.SyncStatus().FinalizedL2 // outputOnL1, err := outputOracleContract.GetL2OutputAfter(nil, new(big.Int).SetUint64(block.Number)) targetBlockNum := big.NewInt(int64(rt.targetInvalidBlockNumber)) var err error @@ -228,7 +228,7 @@ func (rt *Runtime) setupChallenge(challenger *L2Validator) { require.NoError(rt.t, err) rt.outputOnL1, err = rt.outputOracleContract.GetL2OutputAfter(nil, targetBlockNum) require.NoError(rt.t, err) - block, err := rt.propEngine.EthClient().BlockByNumber(rt.t.Ctx(), targetBlockNum) + block, err := rt.seqEngine.EthClient().BlockByNumber(rt.t.Ctx(), targetBlockNum) require.NoError(rt.t, err) require.Less(rt.t, block.Time(), rt.outputOnL1.Timestamp.Uint64(), "output is registered with L1 timestamp of L2 tx output submission, past L2 block") outputComputed := challenger.fetchOutput(rt.t, rt.outputOnL1.L2BlockNumber) diff --git a/e2e/actions/l2_proposer.go b/e2e/actions/l2_sequencer.go similarity index 75% rename from e2e/actions/l2_proposer.go rename to e2e/actions/l2_sequencer.go index 52b8a71e0..5e593796e 100644 --- a/e2e/actions/l2_proposer.go +++ b/e2e/actions/l2_sequencer.go @@ -16,7 +16,7 @@ import ( "github.com/kroma-network/kroma/e2e/e2eutils" ) -// MockL1OriginSelector is a shim to override the origin as proposer, so we can force it to stay on an older origin. +// MockL1OriginSelector is a shim to override the origin as sequencer, so we can force it to stay on an older origin. type MockL1OriginSelector struct { actual *driver.L1OriginSelector originOverride eth.L1BlockRef // override which origin gets picked @@ -29,34 +29,34 @@ func (m *MockL1OriginSelector) FindL1Origin(ctx context.Context, l2Head eth.L2Bl return m.actual.FindL1Origin(ctx, l2Head) } -// L2Proposer is an actor that functions like a rollup node, +// L2Sequencer is an actor that functions like a rollup node, // without the full P2P/API/Node stack, but just the derivation state, and simplified driver with sequencing ability. -type L2Proposer struct { +type L2Sequencer struct { L2Syncer - proposer *driver.Proposer + sequencer *driver.Sequencer failL2GossipUnsafeBlock error // mock error mockL1OriginSelector *MockL1OriginSelector } -func NewL2Proposer(t Testing, log log.Logger, l1 derive.L1Fetcher, eng L2API, cfg *rollup.Config, propConfDepth uint64) *L2Proposer { +func NewL2Sequencer(t Testing, log log.Logger, l1 derive.L1Fetcher, eng L2API, cfg *rollup.Config, seqConfDepth uint64) *L2Sequencer { syncer := NewL2Syncer(t, log, l1, eng, cfg) attrBuilder := derive.NewFetchingAttributesBuilder(cfg, l1, eng) - propConfDepthL1 := driver.NewConfDepth(propConfDepth, syncer.l1State.L1Head, l1) + seqConfDepthL1 := driver.NewConfDepth(seqConfDepth, syncer.l1State.L1Head, l1) l1OriginSelector := &MockL1OriginSelector{ - actual: driver.NewL1OriginSelector(log, cfg, propConfDepthL1), + actual: driver.NewL1OriginSelector(log, cfg, seqConfDepthL1), } - return &L2Proposer{ + return &L2Sequencer{ L2Syncer: *syncer, - proposer: driver.NewProposer(log, cfg, syncer.derivation, attrBuilder, l1OriginSelector, metrics.NoopMetrics), + sequencer: driver.NewSequencer(log, cfg, syncer.derivation, attrBuilder, l1OriginSelector, metrics.NoopMetrics), mockL1OriginSelector: l1OriginSelector, failL2GossipUnsafeBlock: nil, } } -func setupProposerTest(t Testing, sd *e2eutils.SetupData, log log.Logger) (*L1Miner, *L2Engine, *L2Proposer) { +func setupSequencerTest(t Testing, sd *e2eutils.SetupData, log log.Logger) (*L1Miner, *L2Engine, *L2Sequencer) { jwtPath := e2eutils.WriteDefaultJWT(t) miner := NewL1Miner(t, log, sd.L1Cfg) @@ -67,16 +67,16 @@ func setupProposerTest(t Testing, sd *e2eutils.SetupData, log log.Logger) (*L1Mi l2Cl, err := sources.NewEngineClient(engine.RPCClient(), log, nil, sources.EngineClientDefaultConfig(sd.RollupCfg)) require.NoError(t, err) - proposer := NewL2Proposer(t, log, l1F, l2Cl, sd.RollupCfg, 0) - return miner, engine, proposer + sequencer := NewL2Sequencer(t, log, l1F, l2Cl, sd.RollupCfg, 0) + return miner, engine, sequencer } // ActL2StartBlock starts building of a new L2 block on top of the head -func (p *L2Proposer) ActL2StartBlock(t Testing) { +func (p *L2Sequencer) ActL2StartBlock(t Testing) { p.ActL2StartBlockCheckErr(t, nil) } -func (p *L2Proposer) ActL2StartBlockCheckErr(t Testing, checkErr error) { +func (p *L2Sequencer) ActL2StartBlockCheckErr(t Testing, checkErr error) { if !p.l2PipelineIdle { t.InvalidAction("cannot start L2 build when derivation is not idle") return @@ -86,7 +86,7 @@ func (p *L2Proposer) ActL2StartBlockCheckErr(t Testing, checkErr error) { return } - err := p.proposer.StartBuildingBlock(t.Ctx()) + err := p.sequencer.StartBuildingBlock(t.Ctx()) if checkErr == nil { require.NoError(t, err, "failed to start block building") } else { @@ -103,14 +103,14 @@ func (p *L2Proposer) ActL2StartBlockCheckErr(t Testing, checkErr error) { } // ActL2EndBlock completes a new L2 block and applies it to the L2 chain as new canonical unsafe head -func (p *L2Proposer) ActL2EndBlock(t Testing) { +func (p *L2Sequencer) ActL2EndBlock(t Testing) { if !p.l2Building { t.InvalidAction("cannot end L2 block building when no block is being built") return } p.l2Building = false - _, err := p.proposer.CompleteBuildingBlock(t.Ctx()) + _, err := p.sequencer.CompleteBuildingBlock(t.Ctx()) // TODO: there may be legitimate temporary errors here, if we mock engine API RPC-failure. // For advanced tests we can catch those and print a warning instead. require.NoError(t, err) @@ -118,8 +118,8 @@ func (p *L2Proposer) ActL2EndBlock(t Testing) { // TODO: action-test publishing of payload on p2p } -// ActL2KeepL1Origin makes the proposer use the current L1 origin, even if the next origin is available. -func (p *L2Proposer) ActL2KeepL1Origin(t Testing) { +// ActL2KeepL1Origin makes the sequencer use the current L1 origin, even if the next origin is available. +func (p *L2Sequencer) ActL2KeepL1Origin(t Testing) { parent := p.derivation.UnsafeL2Head() // force old origin, for testing purposes oldOrigin, err := p.l1.L1BlockRefByHash(t.Ctx(), parent.L1Origin.Hash) @@ -128,7 +128,7 @@ func (p *L2Proposer) ActL2KeepL1Origin(t Testing) { } // ActBuildToL1Head builds empty blocks until (incl.) the L1 head becomes the L2 origin -func (p *L2Proposer) ActBuildToL1Head(t Testing) { +func (p *L2Sequencer) ActBuildToL1Head(t Testing) { for p.derivation.UnsafeL2Head().L1Origin.Number < p.l1State.L1Head().Number { p.ActL2PipelineFull(t) p.ActL2StartBlock(t) @@ -137,7 +137,7 @@ func (p *L2Proposer) ActBuildToL1Head(t Testing) { } // ActBuildToL1HeadUnsafe builds empty blocks until (incl.) the L1 head becomes the L1 origin of the L2 head -func (p *L2Proposer) ActBuildToL1HeadUnsafe(t Testing) { +func (p *L2Sequencer) ActBuildToL1HeadUnsafe(t Testing) { for p.derivation.UnsafeL2Head().L1Origin.Number < p.l1State.L1Head().Number { // Note: the derivation pipeline does not run, we are just sequencing a block on top of the existing L2 chain. p.ActL2StartBlock(t) @@ -146,7 +146,7 @@ func (p *L2Proposer) ActBuildToL1HeadUnsafe(t Testing) { } // ActBuildToL1HeadExcl builds empty blocks until (excl.) the L1 head becomes the L1 origin of the L2 head -func (p *L2Proposer) ActBuildToL1HeadExcl(t Testing) { +func (p *L2Sequencer) ActBuildToL1HeadExcl(t Testing) { for { p.ActL2PipelineFull(t) nextOrigin, err := p.mockL1OriginSelector.FindL1Origin(t.Ctx(), p.derivation.UnsafeL2Head()) @@ -160,7 +160,7 @@ func (p *L2Proposer) ActBuildToL1HeadExcl(t Testing) { } // ActBuildToL1HeadExclUnsafe builds empty blocks until (excl.) the L1 head becomes the L1 origin of the L2 head, without safe-head progression. -func (p *L2Proposer) ActBuildToL1HeadExclUnsafe(t Testing) { +func (p *L2Sequencer) ActBuildToL1HeadExclUnsafe(t Testing) { for { // Note: the derivation pipeline does not run, we are just sequencing a block on top of the existing L2 chain. nextOrigin, err := p.mockL1OriginSelector.FindL1Origin(t.Ctx(), p.derivation.UnsafeL2Head()) diff --git a/e2e/actions/l2_proposer_test.go b/e2e/actions/l2_sequencer_test.go similarity index 63% rename from e2e/actions/l2_proposer_test.go rename to e2e/actions/l2_sequencer_test.go index 3da9a7e22..2c461042f 100644 --- a/e2e/actions/l2_proposer_test.go +++ b/e2e/actions/l2_sequencer_test.go @@ -14,20 +14,20 @@ import ( "github.com/kroma-network/kroma/e2e/e2eutils" ) -func TestL2Proposer_ProposerDrift(gt *testing.T) { +func TestL2Sequencer_SequencerDrift(gt *testing.T) { t := NewDefaultTesting(gt) p := &e2eutils.TestParams{ - MaxProposerDrift: 20, // larger than L1 block time we simulate in this test (12) - ProposerWindowSize: 24, - ChannelTimeout: 20, + MaxSequencerDrift: 20, // larger than L1 block time we simulate in this test (12) + SequencerWindowSize: 24, + ChannelTimeout: 20, } dp := e2eutils.MakeDeployParams(t, p) sd := e2eutils.Setup(t, dp, defaultAlloc) log := testlog.Logger(t, log.LvlDebug) - miner, engine, proposer := setupProposerTest(t, sd, log) + miner, engine, sequencer := setupSequencerTest(t, sd, log) miner.ActL1SetFeeRecipient(common.Address{'A'}) - proposer.ActL2PipelineFull(t) + sequencer.ActL2PipelineFull(t) signer := types.LatestSigner(sd.L2Cfg.Config) cl := engine.EthClient() @@ -47,65 +47,65 @@ func TestL2Proposer_ProposerDrift(gt *testing.T) { } makeL2BlockWithAliceTx := func() { aliceTx() - proposer.ActL2StartBlock(t) + sequencer.ActL2StartBlock(t) engine.ActL2IncludeTx(dp.Addresses.Alice)(t) // include a test tx from alice - proposer.ActL2EndBlock(t) + sequencer.ActL2EndBlock(t) } // L1 makes a block miner.ActL1StartBlock(12)(t) miner.ActL1EndBlock(t) - proposer.ActL1HeadSignal(t) + sequencer.ActL1HeadSignal(t) origin := miner.l1Chain.CurrentBlock() // L2 makes blocks to catch up - for proposer.SyncStatus().UnsafeL2.Time+sd.RollupCfg.BlockTime < origin.Time { + for sequencer.SyncStatus().UnsafeL2.Time+sd.RollupCfg.BlockTime < origin.Time { makeL2BlockWithAliceTx() - require.Equal(t, uint64(0), proposer.SyncStatus().UnsafeL2.L1Origin.Number, "no L1 origin change before time matches") + require.Equal(t, uint64(0), sequencer.SyncStatus().UnsafeL2.L1Origin.Number, "no L1 origin change before time matches") } // Check that we adopted the origin as soon as we could (conf depth is 0) makeL2BlockWithAliceTx() - require.Equal(t, uint64(1), proposer.SyncStatus().UnsafeL2.L1Origin.Number, "L1 origin changes as soon as L2 time equals or exceeds L1 time") + require.Equal(t, uint64(1), sequencer.SyncStatus().UnsafeL2.L1Origin.Number, "L1 origin changes as soon as L2 time equals or exceeds L1 time") miner.ActL1StartBlock(12)(t) miner.ActL1EndBlock(t) - proposer.ActL1HeadSignal(t) + sequencer.ActL1HeadSignal(t) - // Make blocks up till the proposer drift is about to surpass, but keep the old L1 origin - for proposer.SyncStatus().UnsafeL2.Time+sd.RollupCfg.BlockTime <= origin.Time+sd.RollupCfg.MaxProposerDrift { - proposer.ActL2KeepL1Origin(t) + // Make blocks up till the sequencer drift is about to surpass, but keep the old L1 origin + for sequencer.SyncStatus().UnsafeL2.Time+sd.RollupCfg.BlockTime <= origin.Time+sd.RollupCfg.MaxSequencerDrift { + sequencer.ActL2KeepL1Origin(t) makeL2BlockWithAliceTx() - require.Equal(t, uint64(1), proposer.SyncStatus().UnsafeL2.L1Origin.Number, "expected to keep old L1 origin") + require.Equal(t, uint64(1), sequencer.SyncStatus().UnsafeL2.L1Origin.Number, "expected to keep old L1 origin") } - // We passed the proposer drift: we can still keep the old origin, but can't include any txs - proposer.ActL2KeepL1Origin(t) - proposer.ActL2StartBlock(t) - require.True(t, engine.engineApi.ForcedEmpty(), "engine should not be allowed to include anything after proposer drift is surpassed") + // We passed the sequencer drift: we can still keep the old origin, but can't include any txs + sequencer.ActL2KeepL1Origin(t) + sequencer.ActL2StartBlock(t) + require.True(t, engine.engineApi.ForcedEmpty(), "engine should not be allowed to include anything after sequencer drift is surpassed") } -// This tests a chain halt where the proposer would build an unsafe L2 block with a L1 origin +// This tests a chain halt where the sequencer would build an unsafe L2 block with a L1 origin // that then gets reorged out, while the syncer-codepath only ever sees the valid post-reorg L1 chain. -func TestL2Proposer_ProposerOnlyReorg(gt *testing.T) { +func TestL2Sequencer_SequencerOnlyReorg(gt *testing.T) { t := NewDefaultTesting(gt) dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams) sd := e2eutils.Setup(t, dp, defaultAlloc) log := testlog.Logger(t, log.LvlDebug) - miner, _, proposer := setupProposerTest(t, sd, log) + miner, _, sequencer := setupSequencerTest(t, sd, log) - // Proposer at first only recognizes the genesis as safe. + // Sequencer at first only recognizes the genesis as safe. // The rest of the L1 chain will be incorporated as L1 origins into unsafe L2 blocks. - proposer.ActL2PipelineFull(t) + sequencer.ActL2PipelineFull(t) // build L1 block with coinbase A miner.ActL1SetFeeRecipient(common.Address{'A'}) miner.ActEmptyBlock(t) - // Proposer builds L2 blocks, until (incl.) it creates a L2 block with a L1 origin that has A as coinbase address - proposer.ActL1HeadSignal(t) - proposer.ActBuildToL1HeadUnsafe(t) + // Sequencer builds L2 blocks, until (incl.) it creates a L2 block with a L1 origin that has A as coinbase address + sequencer.ActL1HeadSignal(t) + sequencer.ActBuildToL1HeadUnsafe(t) - status := proposer.SyncStatus() + status := sequencer.SyncStatus() require.Zero(t, status.SafeL2.L1Origin.Number, "no safe head progress") require.Equal(t, status.HeadL1.Hash, status.UnsafeL2.L1Origin.Hash, "have head L1 origin") // reorg out block with coinbase A, and make a block with coinbase B @@ -117,15 +117,15 @@ func TestL2Proposer_ProposerOnlyReorg(gt *testing.T) { // (height is used as heuristic to not flip-flop between chains too frequently) miner.ActEmptyBlock(t) - // Make the proposer aware of the new head, and try to sync it. + // Make the sequencer aware of the new head, and try to sync it. // Since the safe chain never incorporated the now reorged L1 block with coinbase A, // it will sync the new L1 chain fine. // No batches are submitted yet however, // so it'll keep the L2 block with the old L1 origin, since no conflict is detected. - proposer.ActL1HeadSignal(t) - proposer.ActL2PipelineFull(t) + sequencer.ActL1HeadSignal(t) + sequencer.ActL2PipelineFull(t) // Syncer should detect the inconsistency of the L1 origin and reset the pipeline to follow the reorg - newStatus := proposer.SyncStatus() + newStatus := sequencer.SyncStatus() require.Zero(t, newStatus.UnsafeL2.L1Origin.Number, "back to genesis block with good L1 origin, drop old unsafe L2 chain with bad L1 origins") require.NotEqual(t, status.HeadL1.Hash, newStatus.HeadL1.Hash, "did see the new L1 head change") require.Equal(t, newStatus.HeadL1.Hash, newStatus.CurrentL1.Hash, "did sync the new L1 head as syncer") @@ -135,9 +135,9 @@ func TestL2Proposer_ProposerOnlyReorg(gt *testing.T) { require.NotEqual(t, status.UnsafeL2.L1Origin.Hash, newStatus.HeadL1.ParentHash, "but N+1 cannot fit on N") // After hitting a reset error, it resets derivation, and drops the old L1 chain - proposer.ActL2PipelineFull(t) + sequencer.ActL2PipelineFull(t) // Can build new L2 blocks with good L1 origin - proposer.ActBuildToL1HeadUnsafe(t) - require.Equal(t, newStatus.HeadL1.Hash, proposer.SyncStatus().UnsafeL2.L1Origin.Hash, "build L2 chain with new correct L1 origins") + sequencer.ActBuildToL1HeadUnsafe(t) + require.Equal(t, newStatus.HeadL1.Hash, sequencer.SyncStatus().UnsafeL2.L1Origin.Hash, "build L2 chain with new correct L1 origins") } diff --git a/e2e/actions/l2_syncer.go b/e2e/actions/l2_syncer.go index a4b4b5bde..c9a2ffaa3 100644 --- a/e2e/actions/l2_syncer.go +++ b/e2e/actions/l2_syncer.go @@ -122,12 +122,12 @@ func (s *l2SyncerBackend) ResetDerivationPipeline(ctx context.Context) error { return nil } -func (s *l2SyncerBackend) StartProposer(ctx context.Context, blockHash common.Hash) error { +func (s *l2SyncerBackend) StartSequencer(ctx context.Context, blockHash common.Hash) error { return nil } -func (s *l2SyncerBackend) StopProposer(ctx context.Context) (common.Hash, error) { - return common.Hash{}, errors.New("stopping the L2Syncer proposer is not supported") +func (s *l2SyncerBackend) StopSequencer(ctx context.Context) (common.Hash, error) { + return common.Hash{}, errors.New("stopping the L2Syncer sequencer is not supported") } func (s *L2Syncer) L2Finalized() eth.L2BlockRef { diff --git a/e2e/actions/l2_syncer_test.go b/e2e/actions/l2_syncer_test.go index 8e5e1dd20..72210125b 100644 --- a/e2e/actions/l2_syncer_test.go +++ b/e2e/actions/l2_syncer_test.go @@ -27,12 +27,12 @@ func setupSyncerOnlyTest(t Testing, sd *e2eutils.SetupData, log log.Logger) (*L1 return miner, engine, syncer } -func TestL2Syncer_ProposerWindow(gt *testing.T) { +func TestL2Syncer_SequenceWindow(gt *testing.T) { t := NewDefaultTesting(gt) p := &e2eutils.TestParams{ - MaxProposerDrift: 10, - ProposerWindowSize: 24, - ChannelTimeout: 10, + MaxSequencerDrift: 10, + SequencerWindowSize: 24, + ChannelTimeout: 10, } dp := e2eutils.MakeDeployParams(t, p) sd := e2eutils.Setup(t, dp, defaultAlloc) @@ -40,8 +40,8 @@ func TestL2Syncer_ProposerWindow(gt *testing.T) { miner, engine, syncer := setupSyncerOnlyTest(t, sd, log) miner.ActL1SetFeeRecipient(common.Address{'A'}) - // Make two proposer windows worth of empty L1 blocks. After we pass the first proposer window, the L2 chain should get blocks - for miner.l1Chain.CurrentBlock().Number.Uint64() < sd.RollupCfg.ProposerWindowSize*2 { + // Make two sequencer windows worth of empty L1 blocks. After we pass the first sequencer window, the L2 chain should get blocks + for miner.l1Chain.CurrentBlock().Number.Uint64() < sd.RollupCfg.SeqWindowSize*2 { miner.ActL1StartBlock(10)(t) miner.ActL1EndBlock(t) @@ -49,17 +49,17 @@ func TestL2Syncer_ProposerWindow(gt *testing.T) { l1Head := miner.l1Chain.CurrentBlock().Number.Uint64() expectedL1Origin := uint64(0) - // as soon as we complete the proposer window, we force-adopt the L1 origin - if l1Head >= sd.RollupCfg.ProposerWindowSize { - expectedL1Origin = l1Head - sd.RollupCfg.ProposerWindowSize + // as soon as we complete the sequencer window, we force-adopt the L1 origin + if l1Head >= sd.RollupCfg.SeqWindowSize { + expectedL1Origin = l1Head - sd.RollupCfg.SeqWindowSize } require.Equal(t, expectedL1Origin, syncer.SyncStatus().SafeL2.L1Origin.Number, "L1 origin is forced in, given enough L1 blocks pass by") require.LessOrEqual(t, miner.l1Chain.GetBlockByNumber(expectedL1Origin).Time(), engine.l2Chain.CurrentBlock().Time, "L2 time higher than L1 origin time") } tip2N := syncer.SyncStatus() - // Do a deep L1 reorg as deep as a proposer window, this should affect the safe L2 chain - miner.ActL1RewindDepth(sd.RollupCfg.ProposerWindowSize)(t) + // Do a deep L1 reorg as deep as a sequencer window, this should affect the safe L2 chain + miner.ActL1RewindDepth(sd.RollupCfg.SeqWindowSize)(t) // Without new L1 block, the L1 appears to not be synced, and the node shouldn't reorg syncer.ActL2PipelineFull(t) @@ -71,11 +71,11 @@ func TestL2Syncer_ProposerWindow(gt *testing.T) { miner.ActL1EndBlock(t) reorgL1Block := miner.l1Chain.CurrentBlock() - // Still no reorg, we need more L1 blocks first, before the reorged L1 block is forced in by proposer window + // Still no reorg, we need more L1 blocks first, before the reorged L1 block is forced in by sequencer window syncer.ActL2PipelineFull(t) require.Equal(t, tip2N.SafeL2, syncer.SyncStatus().SafeL2) - for miner.l1Chain.CurrentBlock().Number.Uint64() < sd.RollupCfg.ProposerWindowSize*2 { + for miner.l1Chain.CurrentBlock().Number.Uint64() < sd.RollupCfg.SeqWindowSize*2 { miner.ActL1StartBlock(10)(t) miner.ActL1EndBlock(t) } diff --git a/e2e/actions/l2_validator_test.go b/e2e/actions/l2_validator_test.go index 4429ce457..8c6616652 100644 --- a/e2e/actions/l2_validator_test.go +++ b/e2e/actions/l2_validator_test.go @@ -18,14 +18,14 @@ func TestValidator(gt *testing.T) { dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams) sd := e2eutils.Setup(t, dp, defaultAlloc) log := testlog.Logger(t, log.LvlDebug) - miner, propEngine, proposer := setupProposerTest(t, sd, log) + miner, seqEngine, sequencer := setupSequencerTest(t, sd, log) - rollupPropCl := proposer.RollupClient() + rollupSeqCl := sequencer.RollupClient() batcher := NewL2Batcher(log, sd.RollupCfg, &BatcherCfg{ MinL1TxSize: 0, MaxL1TxSize: 128_000, BatcherKey: dp.Secrets.Batcher, - }, rollupPropCl, miner.EthClient(), propEngine.EthClient()) + }, rollupSeqCl, miner.EthClient(), seqEngine.EthClient()) validator := NewL2Validator(t, log, &ValidatorCfg{ OutputOracleAddr: sd.DeploymentsL1.L2OutputOracleProxy, @@ -34,7 +34,7 @@ func TestValidator(gt *testing.T) { SecurityCouncilAddr: sd.DeploymentsL1.SecurityCouncilProxy, ValidatorKey: dp.Secrets.TrustedValidator, AllowNonFinalized: false, - }, miner.EthClient(), propEngine.EthClient(), proposer.RollupClient()) + }, miner.EthClient(), seqEngine.EthClient(), sequencer.RollupClient()) // NOTE(chokobole): It is necessary to wait for one finalized (or safe if AllowNonFinalized // config is set) block to pass after each submission interval before submitting the output @@ -46,9 +46,9 @@ func TestValidator(gt *testing.T) { // L1 block miner.ActEmptyBlock(t) // L2 block - proposer.ActL1HeadSignal(t) - proposer.ActL2PipelineFull(t) - proposer.ActBuildToL1Head(t) + sequencer.ActL1HeadSignal(t) + sequencer.ActL2PipelineFull(t) + sequencer.ActBuildToL1Head(t) // submit and include in L1 batcher.ActSubmitAll(t) miner.includeL1Block(t, dp.Addresses.Batcher) @@ -58,16 +58,16 @@ func TestValidator(gt *testing.T) { miner.ActL1FinalizeNext(t) miner.ActL1FinalizeNext(t) // derive and see the L2 chain fully finalize - proposer.ActL2PipelineFull(t) - proposer.ActL1SafeSignal(t) - proposer.ActL1FinalizedSignal(t) + sequencer.ActL2PipelineFull(t) + sequencer.ActL1SafeSignal(t) + sequencer.ActL1FinalizedSignal(t) } // deposit bond for validator validator.ActDeposit(t, 1_000) miner.includeL1Block(t, validator.address) - require.Equal(t, proposer.SyncStatus().UnsafeL2, proposer.SyncStatus().FinalizedL2) + require.Equal(t, sequencer.SyncStatus().UnsafeL2, sequencer.SyncStatus().FinalizedL2) // create l2 output submission transactions until there is nothing left to submit for { waitTime := validator.CalculateWaitTime(t) @@ -90,16 +90,16 @@ func TestValidator(gt *testing.T) { require.NoError(t, err) // NOTE(chokobole): Comment these 2 lines because of the reason above. // If Proto Dank Sharding is introduced, the below code fix may be restored. - // block := proposer.SyncStatus().FinalizedL2 + // block := sequencer.SyncStatus().FinalizedL2 // outputOnL1, err := outputOracleContract.GetL2OutputAfter(nil, new(big.Int).SetUint64(block.Number)) blockNum, err := outputOracleContract.LatestBlockNumber(nil) require.NoError(t, err) outputOnL1, err := outputOracleContract.GetL2OutputAfter(nil, blockNum) require.NoError(t, err) - block, err := propEngine.EthClient().BlockByNumber(t.Ctx(), blockNum) + block, err := seqEngine.EthClient().BlockByNumber(t.Ctx(), blockNum) require.NoError(t, err) require.Less(t, block.Time(), outputOnL1.Timestamp.Uint64(), "output is registered with L1 timestamp of L2 tx output submission, past L2 block") - outputComputed, err := proposer.RollupClient().OutputAtBlock(t.Ctx(), blockNum.Uint64()) + outputComputed, err := sequencer.RollupClient().OutputAtBlock(t.Ctx(), blockNum.Uint64()) require.NoError(t, err) require.Equal(t, eth.Bytes32(outputOnL1.OutputRoot), outputComputed.OutputRoot, "output roots must match") } diff --git a/e2e/actions/reorg_test.go b/e2e/actions/reorg_test.go index d8a8187b8..8d4ae59ba 100644 --- a/e2e/actions/reorg_test.go +++ b/e2e/actions/reorg_test.go @@ -20,7 +20,7 @@ import ( "github.com/kroma-network/kroma/e2e/e2eutils" ) -func setupReorgTest(t Testing, config *e2eutils.TestParams) (*e2eutils.SetupData, *e2eutils.DeployParams, *L1Miner, *L2Proposer, *L2Engine, *L2Syncer, *L2Engine, *L2Batcher) { +func setupReorgTest(t Testing, config *e2eutils.TestParams) (*e2eutils.SetupData, *e2eutils.DeployParams, *L1Miner, *L2Sequencer, *L2Engine, *L2Syncer, *L2Engine, *L2Batcher) { dp := e2eutils.MakeDeployParams(t, config) sd := e2eutils.Setup(t, dp, defaultAlloc) @@ -29,34 +29,34 @@ func setupReorgTest(t Testing, config *e2eutils.TestParams) (*e2eutils.SetupData return setupReorgTestActors(t, dp, sd, log) } -func setupReorgTestActors(t Testing, dp *e2eutils.DeployParams, sd *e2eutils.SetupData, log log.Logger) (*e2eutils.SetupData, *e2eutils.DeployParams, *L1Miner, *L2Proposer, *L2Engine, *L2Syncer, *L2Engine, *L2Batcher) { - miner, propEngine, proposer := setupProposerTest(t, sd, log) +func setupReorgTestActors(t Testing, dp *e2eutils.DeployParams, sd *e2eutils.SetupData, log log.Logger) (*e2eutils.SetupData, *e2eutils.DeployParams, *L1Miner, *L2Sequencer, *L2Engine, *L2Syncer, *L2Engine, *L2Batcher) { + miner, seqEngine, sequencer := setupSequencerTest(t, sd, log) miner.ActL1SetFeeRecipient(common.Address{'A'}) - proposer.ActL2PipelineFull(t) + sequencer.ActL2PipelineFull(t) syncEngine, syncer := setupSyncer(t, sd, log, miner.L1Client(t, sd.RollupCfg)) - rollupPropCl := proposer.RollupClient() + rollupSeqCl := sequencer.RollupClient() batcher := NewL2Batcher(log, sd.RollupCfg, &BatcherCfg{ MinL1TxSize: 0, MaxL1TxSize: 128_000, BatcherKey: dp.Secrets.Batcher, - }, rollupPropCl, miner.EthClient(), propEngine.EthClient()) - return sd, dp, miner, proposer, propEngine, syncer, syncEngine, batcher + }, rollupSeqCl, miner.EthClient(), seqEngine.EthClient()) + return sd, dp, miner, sequencer, seqEngine, syncer, syncEngine, batcher } func TestReorgOrphanBlock(gt *testing.T) { t := NewDefaultTesting(gt) - sd, _, miner, proposer, _, syncer, syncerEngine, batcher := setupReorgTest(t, defaultRollupTestParams) + sd, _, miner, sequencer, _, syncer, syncerEngine, batcher := setupReorgTest(t, defaultRollupTestParams) syncEngClient := syncerEngine.EngineClient(t, sd.RollupCfg) - proposer.ActL2PipelineFull(t) + sequencer.ActL2PipelineFull(t) syncer.ActL2PipelineFull(t) // build empty L1 block miner.ActEmptyBlock(t) // Create L2 blocks, and reference the L1 head as origin - proposer.ActL1HeadSignal(t) - proposer.ActBuildToL1Head(t) + sequencer.ActL1HeadSignal(t) + sequencer.ActBuildToL1Head(t) // submit all new L2 blocks batcher.ActSubmitAll(t) @@ -70,8 +70,8 @@ func TestReorgOrphanBlock(gt *testing.T) { // syncer picks up the L2 chain that was submitted syncer.ActL1HeadSignal(t) syncer.ActL2PipelineFull(t) - require.Equal(t, syncer.L2Safe(), proposer.L2Unsafe(), "syncer syncs from proposer via L1") - require.NotEqual(t, proposer.L2Safe(), proposer.L2Unsafe(), "proposer has not processed L1 yet") + require.Equal(t, syncer.L2Safe(), sequencer.L2Unsafe(), "syncer syncs from sequencer via L1") + require.NotEqual(t, sequencer.L2Safe(), sequencer.L2Unsafe(), "sequencer has not processed L1 yet") // orphan the L1 block that included the batch tx, and build a new different L1 block miner.ActL1RewindToParent(t) @@ -83,7 +83,7 @@ func TestReorgOrphanBlock(gt *testing.T) { // However, the L2 chain can still be canonical later, since it did not reference the reorged L1 block syncer.ActL1HeadSignal(t) syncer.ActL2PipelineFull(t) - require.Equal(t, syncer.L2Safe(), proposer.L2Safe(), "syncer rewinds safe when L1 reorgs out batch") + require.Equal(t, syncer.L2Safe(), sequencer.L2Safe(), "syncer rewinds safe when L1 reorgs out batch") ref, err := syncEngClient.L2BlockRefByLabel(t.Ctx(), eth.Safe) require.NoError(t, err) require.Equal(t, syncer.L2Safe(), ref, "syncer engine matches rollup client") @@ -101,35 +101,35 @@ func TestReorgOrphanBlock(gt *testing.T) { // sync the syncer again: now it should be safe again syncer.ActL1HeadSignal(t) syncer.ActL2PipelineFull(t) - require.Equal(t, syncer.L2Safe(), proposer.L2Unsafe(), "syncer syncs from proposer via replayed batch on L1") + require.Equal(t, syncer.L2Safe(), sequencer.L2Unsafe(), "syncer syncs from sequencer via replayed batch on L1") ref, err = syncEngClient.L2BlockRefByLabel(t.Ctx(), eth.Safe) require.NoError(t, err) require.Equal(t, syncer.L2Safe(), ref, "syncer engine matches rollup client") - proposer.ActL1HeadSignal(t) - proposer.ActL2PipelineFull(t) - require.Equal(t, syncer.L2Safe(), proposer.L2Safe(), "syncer and proposer see same safe L2 block, while only syncer dealt with the orphan and replay") + sequencer.ActL1HeadSignal(t) + sequencer.ActL2PipelineFull(t) + require.Equal(t, syncer.L2Safe(), sequencer.L2Safe(), "syncer and sequencer see same safe L2 block, while only syncer dealt with the orphan and replay") } func TestReorgFlipFlop(gt *testing.T) { t := NewDefaultTesting(gt) - sd, _, miner, proposer, _, syncer, syncerEng, batcher := setupReorgTest(t, defaultRollupTestParams) + sd, _, miner, sequencer, _, syncer, syncerEng, batcher := setupReorgTest(t, defaultRollupTestParams) minerCl := miner.L1Client(t, sd.RollupCfg) syncEngClient := syncerEng.EngineClient(t, sd.RollupCfg) checkSyncEngine := func() { // TODO: geth preserves L2 chain with origin A1 after flip-flopping to B? - //ref, err := syncEngClient.L2BlockRefByLabel(t.Ctx(), eth.Unsafe) - //require.NoError(t, err) - //t.Logf("l2 unsafe head %s with origin %s", ref, ref.L1Origin) - //require.NotEqual(t, syncer.L2Unsafe().Hash, ref.ParentHash, "TODO off by one, engine syncs A0 after reorging back from B, while rollup node only inserts up to A0 (excl.)") - //require.Equal(t, syncer.L2Unsafe(), ref, "syncer safe head of engine matches rollup client") + // ref, err := syncEngClient.L2BlockRefByLabel(t.Ctx(), eth.Unsafe) + // require.NoError(t, err) + // t.Logf("l2 unsafe head %s with origin %s", ref, ref.L1Origin) + // require.NotEqual(t, syncer.L2Unsafe().Hash, ref.ParentHash, "TODO off by one, engine syncs A0 after reorging back from B, while rollup node only inserts up to A0 (excl.)") + // require.Equal(t, syncer.L2Unsafe(), ref, "syncer safe head of engine matches rollup client") ref, err := syncEngClient.L2BlockRefByLabel(t.Ctx(), eth.Safe) require.NoError(t, err) require.Equal(t, syncer.L2Safe(), ref, "syncer safe head of engine matches rollup client") } - proposer.ActL2PipelineFull(t) + sequencer.ActL2PipelineFull(t) syncer.ActL2PipelineFull(t) // Start building chain A @@ -139,8 +139,8 @@ func TestReorgFlipFlop(gt *testing.T) { require.NoError(t, err) // Create L2 blocks, and reference the L1 head A0 as origin - proposer.ActL1HeadSignal(t) - proposer.ActBuildToL1Head(t) + sequencer.ActL1HeadSignal(t) + sequencer.ActBuildToL1Head(t) // submit all new L2 blocks batcher.ActSubmitAll(t) @@ -192,11 +192,11 @@ func TestReorgFlipFlop(gt *testing.T) { require.Equal(t, syncer.L2Safe(), syncer.L2Unsafe(), "head is at safe block after L1 reorg") checkSyncEngine() - // and sync the proposer, then build some new L2 blocks, up to and including with L1 origin B2 - proposer.ActL1HeadSignal(t) - proposer.ActL2PipelineFull(t) - proposer.ActBuildToL1Head(t) - require.Equal(t, proposer.L2Unsafe().L1Origin, blockB2.ID(), "B2 is the unsafe L1 origin of proposer now") + // and sync the sequencer, then build some new L2 blocks, up to and including with L1 origin B2 + sequencer.ActL1HeadSignal(t) + sequencer.ActL2PipelineFull(t) + sequencer.ActBuildToL1Head(t) + require.Equal(t, sequencer.L2Unsafe().L1Origin, blockB2.ID(), "B2 is the unsafe L1 origin of sequencer now") // submit all new L2 blocks for chain B, and include in new block B3 batcher.ActSubmitAll(t) @@ -246,13 +246,13 @@ func TestReorgFlipFlop(gt *testing.T) { require.Equal(t, syncer.L2Safe().L1Origin, blockA0.ID(), "B2 is the L1 origin of syncer now") checkSyncEngine() - // sync proposer to the replayed L1 chain A - proposer.ActL1HeadSignal(t) - proposer.ActL2PipelineFull(t) - require.Equal(t, syncer.L2Safe(), proposer.L2Safe(), "proposer reorgs to match syncer again") + // sync sequencer to the replayed L1 chain A + sequencer.ActL1HeadSignal(t) + sequencer.ActL2PipelineFull(t) + require.Equal(t, syncer.L2Safe(), sequencer.L2Safe(), "sequencer reorgs to match syncer again") // and adopt the rest of L1 chain A into L2 - proposer.ActBuildToL1Head(t) + sequencer.ActBuildToL1Head(t) // submit the new unsafe A blocks batcher.ActSubmitAll(t) @@ -261,10 +261,10 @@ func TestReorgFlipFlop(gt *testing.T) { miner.ActL1IncludeTx(sd.RollupCfg.Genesis.SystemConfig.BatcherAddr)(t) miner.ActL1EndBlock(t) - // sync syncer to what ths proposer submitted + // sync syncer to what ths sequencer submitted syncer.ActL1HeadSignal(t) syncer.ActL2PipelineFull(t) - require.Equal(t, syncer.L2Safe(), proposer.L2Unsafe(), "syncer syncs from proposer") + require.Equal(t, syncer.L2Safe(), sequencer.L2Unsafe(), "syncer syncs from sequencer") require.Equal(t, syncer.L2Safe().L1Origin, blockA4.ID(), "L2 chain origin is A4") checkSyncEngine() } @@ -273,19 +273,19 @@ func TestReorgFlipFlop(gt *testing.T) { // // Steps: // 1. Create an L1 actor -// 2. Ask the L1 actor to build three proposer windows of empty blocks +// 2. Ask the L1 actor to build three sequencer windows of empty blocks // 2.a alice submits a transaction on l2 with an l1 origin of block #35 // 2.b in block #50, include the batch that contains the l2 block with alice's transaction as well // as all other blocks before it. -// 3. Ask the L2 proposer to build a chain that references these L1 blocks +// 3. Ask the L2 sequencer to build a chain that references these L1 blocks // 4. Ask the batch submitter to submit remaining unsafe L2 blocks to L1 // 5. Ask the L1 to include this data // 6. Rewind chain A 21 blocks -// 7. Ask the L1 actor to build one proposer window + 1 empty blocks on chain B +// 7. Ask the L1 actor to build one sequencer window + 1 empty blocks on chain B // 8. Ask the L1 actor to build an empty block in place of the batch submission block on chain A // 9. Ask the L1 actor to create another empty block so that chain B is longer than chain A -// 10. Ask the L2 proposer to send a head signal and run one iteration of the derivation pipeline. -// 11. Ask the L2 proposer build a chain that references chain B's blocks +// 10. Ask the L2 sequencer to send a head signal and run one iteration of the derivation pipeline. +// 11. Ask the L2 sequencer build a chain that references chain B's blocks // 12. Sync the syncer and assert that the L2 safe head L1 origin has caught up with chain B // 13. Ensure that the parent L2 block of the block that contains Alice's transaction still exists // after the L2 has re-derived from chain B. @@ -304,10 +304,10 @@ func TestReorgFlipFlop(gt *testing.T) { // - Unsafe head origin is A61 // // Reorg L1 (start: block #61, depth: 22 blocks) -// - Rewind depth: Batch submission block + ProposerWindowSize+1 blocks +// - Rewind depth: Batch submission block + SequencerWindowSize+1 blocks // - Wind back to block #39 // -// Before building L2 to L1 head / syncing syncer & proposer: +// Before building L2 to L1 head / syncing syncer & sequencer: // Syncer // - Unsafe head L1 origin is block #60 // - Safe head L1 origin is at genesis block #60 @@ -327,14 +327,14 @@ func TestDeepReorg(gt *testing.T) { t := NewDefaultTesting(gt) // Create actor and synchronization engine client - sd, dp, miner, proposer, propEngine, syncer, syncerEng, batcher := setupReorgTest(t, &e2eutils.TestParams{ - MaxProposerDrift: 40, - ProposerWindowSize: 20, - ChannelTimeout: 120, - L1BlockTime: 4, + sd, dp, miner, sequencer, seqEngine, syncer, syncerEng, batcher := setupReorgTest(t, &e2eutils.TestParams{ + MaxSequencerDrift: 40, + SequencerWindowSize: 20, + ChannelTimeout: 120, + L1BlockTime: 4, }) minerCl := miner.L1Client(t, sd.RollupCfg) - l2Client := propEngine.EthClient() + l2Client := seqEngine.EthClient() syncEngClient := syncerEng.EngineClient(t, sd.RollupCfg) checkSyncEngine := func() { ref, err := syncEngClient.L2BlockRefByLabel(t.Ctx(), eth.Safe) @@ -349,25 +349,25 @@ func TestDeepReorg(gt *testing.T) { EthCl: l2Client, Signer: types.LatestSigner(sd.L2Cfg.Config), AddressCorpora: addresses, - Bindings: NewL2Bindings(t, l2Client, propEngine.GethClient()), + Bindings: NewL2Bindings(t, l2Client, seqEngine.GethClient()), } alice := NewCrossLayerUser(log, dp.Secrets.Alice, rand.New(rand.NewSource(0xa57b)), sd.RollupCfg) alice.L2.SetUserEnv(l2UserEnv) // Run one iteration of the L2 derivation pipeline - proposer.ActL1HeadSignal(t) - proposer.ActL2PipelineFull(t) + sequencer.ActL1HeadSignal(t) + sequencer.ActL2PipelineFull(t) syncer.ActL2PipelineFull(t) // Start building chain A miner.ActL1SetFeeRecipient(common.Address{0x0A, 0x00}) - // Create a var to store the ref for the second to last block of the second proposer window + // Create a var to store the ref for the second to last block of the second sequencer window var blockA39 eth.L1BlockRef var aliceL2TxBlock types.Block - // Mine enough empty blocks on L1 to reach two proposer windows. - for i := uint64(0); i < sd.RollupCfg.ProposerWindowSize*3; i++ { + // Mine enough empty blocks on L1 to reach two sequencer windows. + for i := uint64(0); i < sd.RollupCfg.SeqWindowSize*3; i++ { // At block #50, send a batch to L1 containing all L2 blocks built up to this point. // This batch contains alice's transaction, and will be reorg'd out of the L1 chain // later in the test. @@ -381,11 +381,11 @@ func TestDeepReorg(gt *testing.T) { miner.ActEmptyBlock(t) } - // Get the second to last block of the first proposer window + // Get the second to last block of the first sequencer window // This is used later to verify the head of chain B after rewinding - // chain A 1 proposer window + 1 block + Block A1 (batch submission with two - // proposer windows worth of transactions) - if i == sd.RollupCfg.ProposerWindowSize*2-2 { + // chain A 1 sequencer window + 1 block + Block A1 (batch submission with two + // sequencer windows worth of transactions) + if i == sd.RollupCfg.SeqWindowSize*2-2 { var err error blockA39, err = minerCl.L1BlockRefByLabel(t.Ctx(), eth.Unsafe) require.NoError(t, err) @@ -399,7 +399,7 @@ func TestDeepReorg(gt *testing.T) { // been re-derived from chain B later on in the test. if i == 35 { // Include alice's transaction on L2 - proposer.ActL2StartBlock(t) + sequencer.ActL2StartBlock(t) // Submit a dummy tx alice.L2.ActResetTxOpts(t) @@ -407,33 +407,33 @@ func TestDeepReorg(gt *testing.T) { alice.L2.ActMakeTx(t) // Include the tx in the block we're making - propEngine.ActL2IncludeTx(alice.Address())(t) + seqEngine.ActL2IncludeTx(alice.Address())(t) // Finalize the L2 block containing alice's transaction - proposer.ActL2EndBlock(t) + sequencer.ActL2EndBlock(t) // Store the ref to the L2 block that the transaction was included in for later. - b0, err := l2Client.BlockByNumber(t.Ctx(), big.NewInt(int64(proposer.L2Unsafe().Number))) + b0, err := l2Client.BlockByNumber(t.Ctx(), big.NewInt(int64(sequencer.L2Unsafe().Number))) require.NoError(t, err, "failed to fetch unsafe head of L2 after submitting alice's transaction") aliceL2TxBlock = *b0 } - // Ask proposer to handle new L1 head and build L2 blocks up to the L1 head - proposer.ActL1HeadSignal(t) - proposer.ActL2PipelineFull(t) - proposer.ActBuildToL1Head(t) + // Ask sequencer to handle new L1 head and build L2 blocks up to the L1 head + sequencer.ActL1HeadSignal(t) + sequencer.ActL2PipelineFull(t) + sequencer.ActBuildToL1Head(t) } // Get the last empty block built in the loop above. - // This will be the last block in the third proposer window. + // This will be the last block in the third sequencer window. blockA60, err := minerCl.L1BlockRefByLabel(t.Ctx(), eth.Unsafe) require.NoError(t, err) // Check that the safe head's L1 origin is block A50 before batch submission - require.Equal(t, uint64(50), proposer.L2Safe().L1Origin.Number) + require.Equal(t, uint64(50), sequencer.L2Safe().L1Origin.Number) // Check that the unsafe head's L1 origin is block A60 - require.Equal(t, blockA60.ID(), proposer.L2Unsafe().L1Origin) + require.Equal(t, blockA60.ID(), sequencer.L2Unsafe().L1Origin) // Batch and submit all new L2 blocks that were built above to L1 batcher.ActSubmitAll(t) @@ -445,29 +445,29 @@ func TestDeepReorg(gt *testing.T) { miner.ActL1IncludeTx(sd.RollupCfg.Genesis.SystemConfig.BatcherAddr)(t) miner.ActL1EndBlock(t) - // Handle the new head block on both the syncer and the proposer + // Handle the new head block on both the syncer and the sequencer syncer.ActL1HeadSignal(t) - proposer.ActL1HeadSignal(t) + sequencer.ActL1HeadSignal(t) - // Run one iteration of the L2 derivation pipeline on both the syncer and proposer + // Run one iteration of the L2 derivation pipeline on both the syncer and sequencer syncer.ActL2PipelineFull(t) - proposer.ActL2PipelineFull(t) + sequencer.ActL2PipelineFull(t) // Ensure that the syncer picks up that the L2 blocks were submitted to L1 // and marks them as safe. // We check that the L2 safe L1 origin is block A240, or the last block - // within the second proposer window. This is the block directly before + // within the second sequencer window. This is the block directly before // the block that included the batch on chain A. require.Equal(t, blockA60.ID(), syncer.L2Safe().L1Origin) checkSyncEngine() - // Perform a deep reorg the size of one proposer window + 2 blocks. + // Perform a deep reorg the size of one sequencer window + 2 blocks. // This will affect the safe L2 chain. - miner.ActL1RewindToParent(t) // Rewind the batch submission - miner.ActL1RewindDepth(sd.RollupCfg.ProposerWindowSize + 1)(t) // Rewind one proposer window + 1 block + miner.ActL1RewindToParent(t) // Rewind the batch submission + miner.ActL1RewindDepth(sd.RollupCfg.SeqWindowSize + 1)(t) // Rewind one sequencer window + 1 block // Ensure that the block we rewinded to on L1 is the second to last block of the first - // proposer window. + // sequencer window. headAfterReorg, err := minerCl.L1BlockRefByLabel(t.Ctx(), eth.Unsafe) require.NoError(t, err) @@ -475,31 +475,31 @@ func TestDeepReorg(gt *testing.T) { require.Equal(t, blockA39.ID(), headAfterReorg.ID()) // Ensure that the safe L2 head has not been altered yet- we have not issued - // a head signal to the proposer or syncer post reorg. + // a head signal to the sequencer or syncer post reorg. require.Equal(t, blockA60.ID(), syncer.L2Safe().L1Origin) - require.Equal(t, blockA60.ID(), proposer.L2Safe().L1Origin) + require.Equal(t, blockA60.ID(), sequencer.L2Safe().L1Origin) // Ensure that the L2 unsafe head has not been altered yet- we have not issued - // a head signal to the proposer or syncer post reorg. + // a head signal to the sequencer or syncer post reorg. require.Equal(t, blockA60.ID(), syncer.L2Unsafe().L1Origin) - require.Equal(t, blockA60.ID(), proposer.L2Unsafe().L1Origin) + require.Equal(t, blockA60.ID(), sequencer.L2Unsafe().L1Origin) checkSyncEngine() // --------- [ CHAIN B ] --------- // Start building chain B miner.ActL1SetFeeRecipient(common.Address{0x0B, 0x00}) - // Mine enough empty blocks on L1 to reach three proposer windows or 60 blocks. + // Mine enough empty blocks on L1 to reach three sequencer windows or 60 blocks. // We already have 39 empty blocks on the rewinded L1 that are left over from chain A. - for i := uint64(0); i < sd.RollupCfg.ProposerWindowSize+1; i++ { + for i := uint64(0); i < sd.RollupCfg.SeqWindowSize+1; i++ { miner.ActEmptyBlock(t) - // Ask proposer to handle new L1 head and build L2 blocks up to the L1 head - proposer.ActL1HeadSignal(t) - proposer.ActL2PipelineFull(t) - proposer.ActBuildToL1Head(t) + // Ask sequencer to handle new L1 head and build L2 blocks up to the L1 head + sequencer.ActL1HeadSignal(t) + sequencer.ActL2PipelineFull(t) + sequencer.ActBuildToL1Head(t) } - // Get the last unsafe block on chain B after creating ProposerWindowSize+1 empty blocks + // Get the last unsafe block on chain B after creating SequencerWindowSize+1 empty blocks blockB60, err := minerCl.L1BlockRefByLabel(t.Ctx(), eth.Unsafe) require.NoError(t, err) // Ensure blockB60 is #60 on chain B @@ -526,11 +526,11 @@ func TestDeepReorg(gt *testing.T) { require.Equal(t, syncer.L2Safe(), syncer.L2Unsafe(), "L2 safe and unsafe head should be equal") checkSyncEngine() - // Sync the proposer, then build some new L2 blocks, up to and including with L1 origin B62 - proposer.ActL1HeadSignal(t) - proposer.ActL2PipelineFull(t) - proposer.ActBuildToL1Head(t) - require.Equal(t, proposer.L2Unsafe().L1Origin, blockB62.ID()) + // Sync the sequencer, then build some new L2 blocks, up to and including with L1 origin B62 + sequencer.ActL1HeadSignal(t) + sequencer.ActL2PipelineFull(t) + sequencer.ActBuildToL1Head(t) + require.Equal(t, sequencer.L2Unsafe().L1Origin, blockB62.ID()) // Sync the syncer to the L2 chain with origin B62 // Run an iteration of the derivation pipeline and ensure that the L2 safe L1 origin is block B62 @@ -556,7 +556,7 @@ type rpcWrapper struct { client.RPC } -// TestRestartKromaGeth tests that the proposer can restart its execution engine without rollup-node restart, +// TestRestartKromaGeth tests that the sequencer can restart its execution engine without rollup-node restart, // including recovering the finalized/safe state of L2 chain without reorging. func TestRestartKromaGeth(gt *testing.T) { t := NewDefaultTesting(gt) @@ -573,34 +573,34 @@ func TestRestartKromaGeth(gt *testing.T) { miner := NewL1Miner(t, log, sd.L1Cfg) l1F, err := sources.NewL1Client(miner.RPCClient(), log, nil, sources.L1ClientDefaultConfig(sd.RollupCfg, false, sources.RPCKindBasic)) require.NoError(t, err) - // Proposer - propEng := NewL2Engine(t, log, sd.L2Cfg, sd.RollupCfg.Genesis.L1, jwtPath, dbOption) - engRpc := &rpcWrapper{propEng.RPCClient()} + // Sequencer + seqEng := NewL2Engine(t, log, sd.L2Cfg, sd.RollupCfg.Genesis.L1, jwtPath, dbOption) + engRpc := &rpcWrapper{seqEng.RPCClient()} l2Cl, err := sources.NewEngineClient(engRpc, log, nil, sources.EngineClientDefaultConfig(sd.RollupCfg)) require.NoError(t, err) - proposer := NewL2Proposer(t, log, l1F, l2Cl, sd.RollupCfg, 0) + sequencer := NewL2Sequencer(t, log, l1F, l2Cl, sd.RollupCfg, 0) batcher := NewL2Batcher(log, sd.RollupCfg, &BatcherCfg{ MinL1TxSize: 0, MaxL1TxSize: 128_000, BatcherKey: dp.Secrets.Batcher, - }, proposer.RollupClient(), miner.EthClient(), propEng.EthClient()) + }, sequencer.RollupClient(), miner.EthClient(), seqEng.EthClient()) // start - proposer.ActL2PipelineFull(t) + sequencer.ActL2PipelineFull(t) miner.ActEmptyBlock(t) buildAndSubmit := func() { // build some blocks - proposer.ActL1HeadSignal(t) - proposer.ActBuildToL1Head(t) + sequencer.ActL1HeadSignal(t) + sequencer.ActBuildToL1Head(t) // submit the blocks, confirm on L1 batcher.ActSubmitAll(t) miner.ActL1StartBlock(12)(t) miner.ActL1IncludeTx(sd.RollupCfg.Genesis.SystemConfig.BatcherAddr)(t) miner.ActL1EndBlock(t) - proposer.ActL2PipelineFull(t) + sequencer.ActL2PipelineFull(t) } buildAndSubmit() @@ -609,32 +609,32 @@ func TestRestartKromaGeth(gt *testing.T) { miner.ActL1SafeNext(t) miner.ActL1FinalizeNext(t) miner.ActL1FinalizeNext(t) - proposer.ActL1FinalizedSignal(t) - proposer.ActL1SafeSignal(t) + sequencer.ActL1FinalizedSignal(t) + sequencer.ActL1SafeSignal(t) // build and submit more buildAndSubmit() // but only mark the L1 block with this batch as safe miner.ActL1SafeNext(t) - proposer.ActL1SafeSignal(t) + sequencer.ActL1SafeSignal(t) // build some more, these stay unsafe miner.ActEmptyBlock(t) - proposer.ActL1HeadSignal(t) - proposer.ActBuildToL1Head(t) + sequencer.ActL1HeadSignal(t) + sequencer.ActBuildToL1Head(t) - statusBeforeRestart := proposer.SyncStatus() + statusBeforeRestart := sequencer.SyncStatus() // before restart scenario: we have a distinct finalized, safe, and unsafe part of the L2 chain require.NotZero(t, statusBeforeRestart.FinalizedL2.L1Origin.Number) require.Less(t, statusBeforeRestart.FinalizedL2.L1Origin.Number, statusBeforeRestart.SafeL2.L1Origin.Number) require.Less(t, statusBeforeRestart.SafeL2.L1Origin.Number, statusBeforeRestart.UnsafeL2.L1Origin.Number) - // close the proposer engine - require.NoError(t, propEng.Close()) + // close the sequencer engine + require.NoError(t, seqEng.Close()) // and start a new one with same db path - propEngNew := NewL2Engine(t, log, sd.L2Cfg, sd.RollupCfg.Genesis.L1, jwtPath, dbOption) + seqEngNew := NewL2Engine(t, log, sd.L2Cfg, sd.RollupCfg.Genesis.L1, jwtPath, dbOption) // swap in the new rpc. This is as close as we can get to reconnecting to a new in-memory rpc connection - engRpc.RPC = propEngNew.RPCClient() + engRpc.RPC = seqEngNew.RPCClient() // note: geth does not persist the safe block label, only the finalized block label safe, err := l2Cl.L2BlockRefByLabel(t.Ctx(), eth.Safe) @@ -644,14 +644,14 @@ func TestRestartKromaGeth(gt *testing.T) { require.Equal(t, statusBeforeRestart.FinalizedL2, safe, "expecting to revert safe head to finalized head upon restart") require.Equal(t, statusBeforeRestart.FinalizedL2, finalized, "expecting to keep same finalized head upon restart") - // proposer runs pipeline, but now attached to the restarted geth node - proposer.ActL2PipelineFull(t) - require.Equal(t, statusBeforeRestart.UnsafeL2, proposer.L2Unsafe(), "expecting to keep same unsafe head upon restart") - require.Equal(t, statusBeforeRestart.SafeL2, proposer.L2Safe(), "expecting the safe block to catch up to what it was before shutdown after syncing from L1, and not be stuck at the finalized block") + // sequencer runs pipeline, but now attached to the restarted geth node + sequencer.ActL2PipelineFull(t) + require.Equal(t, statusBeforeRestart.UnsafeL2, sequencer.L2Unsafe(), "expecting to keep same unsafe head upon restart") + require.Equal(t, statusBeforeRestart.SafeL2, sequencer.L2Safe(), "expecting the safe block to catch up to what it was before shutdown after syncing from L1, and not be stuck at the finalized block") } -// TestConflictingL2Blocks tests that a second copy of the proposer stack cannot introduce an alternative -// L2 block (compared to something already secured by the first proposer): +// TestConflictingL2Blocks tests that a second copy of the sequencer stack cannot introduce an alternative +// L2 block (compared to something already secured by the first sequencer): // the alt block is not synced by the syncer, in unsafe and safe sync modes. func TestConflictingL2Blocks(gt *testing.T) { t := NewDefaultTesting(gt) @@ -659,44 +659,44 @@ func TestConflictingL2Blocks(gt *testing.T) { sd := e2eutils.Setup(t, dp, defaultAlloc) log := testlog.Logger(t, log.LvlDebug) - sd, _, miner, proposer, propEng, syncer, _, batcher := setupReorgTestActors(t, dp, sd, log) + sd, _, miner, sequencer, seqEng, syncer, _, batcher := setupReorgTestActors(t, dp, sd, log) - // Extra setup: a full alternative proposer, proposer engine, and batcher + // Extra setup: a full alternative sequencer, sequencer engine, and batcher jwtPath := e2eutils.WriteDefaultJWT(t) - altPropEng := NewL2Engine(t, log, sd.L2Cfg, sd.RollupCfg.Genesis.L1, jwtPath) - altPropEngCl, err := sources.NewEngineClient(altPropEng.RPCClient(), log, nil, sources.EngineClientDefaultConfig(sd.RollupCfg)) + altSeqEng := NewL2Engine(t, log, sd.L2Cfg, sd.RollupCfg.Genesis.L1, jwtPath) + altSeqEngCl, err := sources.NewEngineClient(altSeqEng.RPCClient(), log, nil, sources.EngineClientDefaultConfig(sd.RollupCfg)) require.NoError(t, err) l1F, err := sources.NewL1Client(miner.RPCClient(), log, nil, sources.L1ClientDefaultConfig(sd.RollupCfg, false, sources.RPCKindBasic)) require.NoError(t, err) - altProposer := NewL2Proposer(t, log, l1F, altPropEngCl, sd.RollupCfg, 0) + altSequencer := NewL2Sequencer(t, log, l1F, altSeqEngCl, sd.RollupCfg, 0) altBatcher := NewL2Batcher(log, sd.RollupCfg, &BatcherCfg{ MinL1TxSize: 0, MaxL1TxSize: 128_000, BatcherKey: dp.Secrets.Batcher, - }, altProposer.RollupClient(), miner.EthClient(), altPropEng.EthClient()) + }, altSequencer.RollupClient(), miner.EthClient(), altSeqEng.EthClient()) - // And set up user Alice, using the alternative proposer endpoint - l2Cl := altPropEng.EthClient() + // And set up user Alice, using the alternative sequencer endpoint + l2Cl := altSeqEng.EthClient() addresses := e2eutils.CollectAddresses(sd, dp) l2UserEnv := &BasicUserEnv[*L2Bindings]{ EthCl: l2Cl, Signer: types.LatestSigner(sd.L2Cfg.Config), AddressCorpora: addresses, - Bindings: NewL2Bindings(t, l2Cl, altPropEng.GethClient()), + Bindings: NewL2Bindings(t, l2Cl, altSeqEng.GethClient()), } alice := NewCrossLayerUser(log, dp.Secrets.Alice, rand.New(rand.NewSource(1234)), sd.RollupCfg) alice.L2.SetUserEnv(l2UserEnv) - proposer.ActL2PipelineFull(t) + sequencer.ActL2PipelineFull(t) syncer.ActL2PipelineFull(t) - altProposer.ActL2PipelineFull(t) + altSequencer.ActL2PipelineFull(t) // build empty L1 block miner.ActEmptyBlock(t) // Create L2 blocks, and reference the L1 head as origin - proposer.ActL1HeadSignal(t) - proposer.ActBuildToL1Head(t) + sequencer.ActL1HeadSignal(t) + sequencer.ActBuildToL1Head(t) // submit all new L2 blocks batcher.ActSubmitAll(t) @@ -710,27 +710,27 @@ func TestConflictingL2Blocks(gt *testing.T) { syncer.ActL1HeadSignal(t) syncer.ActL2PipelineFull(t) syncerHead := syncer.L2Unsafe() - require.Equal(t, syncer.L2Safe(), proposer.L2Unsafe(), "syncer syncs from proposer via L1") + require.Equal(t, syncer.L2Safe(), sequencer.L2Unsafe(), "syncer syncs from sequencer via L1") require.Equal(t, syncer.L2Safe(), syncerHead, "syncer head is the same as that what was derived from L1") - require.NotEqual(t, proposer.L2Safe(), proposer.L2Unsafe(), "proposer has not processed L1 yet") + require.NotEqual(t, sequencer.L2Safe(), sequencer.L2Unsafe(), "sequencer has not processed L1 yet") - require.Less(t, altProposer.L2Unsafe().L1Origin.Number, proposer.L2Unsafe().L1Origin.Number, "alt-proposer is behind") + require.Less(t, altSequencer.L2Unsafe().L1Origin.Number, sequencer.L2Unsafe().L1Origin.Number, "alt-sequencer is behind") - // produce a conflicting L2 block with the alt proposer: + // produce a conflicting L2 block with the alt sequencer: // a new unsafe block that should not replace the existing safe block at the same height - altProposer.ActL2StartBlock(t) + altSequencer.ActL2StartBlock(t) // include tx to force the L2 block to really be different than the previous empty block alice.L2.ActResetTxOpts(t) alice.L2.ActSetTxToAddr(&dp.Addresses.Bob)(t) alice.L2.ActMakeTx(t) - altPropEng.ActL2IncludeTx(alice.Address())(t) - altProposer.ActL2EndBlock(t) + altSeqEng.ActL2IncludeTx(alice.Address())(t) + altSequencer.ActL2EndBlock(t) - conflictBlock := propEng.l2Chain.GetBlockByNumber(altProposer.L2Unsafe().Number) - require.NotEqual(t, conflictBlock.Hash(), altProposer.L2Unsafe().Hash, "alt proposer has built a conflicting block") + conflictBlock := seqEng.l2Chain.GetBlockByNumber(altSequencer.L2Unsafe().Number) + require.NotEqual(t, conflictBlock.Hash(), altSequencer.L2Unsafe().Hash, "alt sequencer has built a conflicting block") // give the unsafe block to the syncer, and see if it reorgs because of any unsafe inputs - head, err := altPropEngCl.PayloadByLabel(t.Ctx(), eth.Unsafe) + head, err := altSeqEngCl.PayloadByLabel(t.Ctx(), eth.Unsafe) require.NoError(t, err) syncer.ActL2UnsafeGossipReceive(head) @@ -754,10 +754,10 @@ func TestConflictingL2Blocks(gt *testing.T) { require.Equal(t, syncer.SyncStatus().CurrentL1.Number, l1Number, "syncer has synced all new L1 blocks") require.Equal(t, syncer.L2Unsafe(), syncerHead, "syncer sticks to first included L2 block") - // Now make the alt proposer aware of the L1 chain and derive the L2 chain like the syncer; + // Now make the alt sequencer aware of the L1 chain and derive the L2 chain like the syncer; // it should reorg out its conflicting blocks to get back in harmony with the syncer. - altProposer.ActL1HeadSignal(t) - altProposer.ActL2PipelineFull(t) - require.Equal(t, syncer.L2Unsafe(), altProposer.L2Unsafe(), "alt-proposer gets back in harmony with syncer by reorging out its conflicting data") - require.Equal(t, proposer.L2Unsafe(), altProposer.L2Unsafe(), "and gets back in harmony with original proposer") + altSequencer.ActL1HeadSignal(t) + altSequencer.ActL2PipelineFull(t) + require.Equal(t, syncer.L2Unsafe(), altSequencer.L2Unsafe(), "alt-sequencer gets back in harmony with syncer by reorging out its conflicting data") + require.Equal(t, sequencer.L2Unsafe(), altSequencer.L2Unsafe(), "and gets back in harmony with original sequencer") } diff --git a/e2e/actions/sync_test.go b/e2e/actions/sync_test.go index 1749c0bcb..c96eeef66 100644 --- a/e2e/actions/sync_test.go +++ b/e2e/actions/sync_test.go @@ -18,18 +18,18 @@ func TestDerivationWithFlakyL1RPC(gt *testing.T) { dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams) sd := e2eutils.Setup(t, dp, defaultAlloc) log := testlog.Logger(t, log.LvlError) // mute all the temporary derivation errors that we forcefully create - _, _, miner, proposer, _, syncer, _, batcher := setupReorgTestActors(t, dp, sd, log) + _, _, miner, sequencer, _, syncer, _, batcher := setupReorgTestActors(t, dp, sd, log) rng := rand.New(rand.NewSource(1234)) - proposer.ActL2PipelineFull(t) + sequencer.ActL2PipelineFull(t) syncer.ActL2PipelineFull(t) // build a L1 chain with 20 blocks and matching L2 chain and batches to test some derivation work miner.ActEmptyBlock(t) for i := 0; i < 20; i++ { - proposer.ActL1HeadSignal(t) - proposer.ActL2PipelineFull(t) - proposer.ActBuildToL1Head(t) + sequencer.ActL1HeadSignal(t) + sequencer.ActL2PipelineFull(t) + sequencer.ActBuildToL1Head(t) batcher.ActSubmitAll(t) miner.ActL1StartBlock(12)(t) miner.ActL1IncludeTx(batcher.batcherAddr)(t) @@ -49,7 +49,7 @@ func TestDerivationWithFlakyL1RPC(gt *testing.T) { // And sync the syncer syncer.ActL2PipelineFull(t) // syncer should be synced, even though it hit lots of temporary L1 RPC errors - require.Equal(t, proposer.L2Unsafe(), syncer.L2Safe(), "syncer is synced") + require.Equal(t, sequencer.L2Unsafe(), syncer.L2Safe(), "syncer is synced") } func TestFinalizeWhileSyncing(gt *testing.T) { @@ -57,9 +57,9 @@ func TestFinalizeWhileSyncing(gt *testing.T) { dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams) sd := e2eutils.Setup(t, dp, defaultAlloc) log := testlog.Logger(t, log.LvlError) // mute all the temporary derivation errors that we forcefully create - _, _, miner, proposer, _, syncer, _, batcher := setupReorgTestActors(t, dp, sd, log) + _, _, miner, sequencer, _, syncer, _, batcher := setupReorgTestActors(t, dp, sd, log) - proposer.ActL2PipelineFull(t) + sequencer.ActL2PipelineFull(t) syncer.ActL2PipelineFull(t) syncerStartStatus := syncer.SyncStatus() @@ -69,9 +69,9 @@ func TestFinalizeWhileSyncing(gt *testing.T) { // to make the syncer finalize while it syncs. miner.ActEmptyBlock(t) for i := 0; i < derive.FinalityDelay+1; i++ { - proposer.ActL1HeadSignal(t) - proposer.ActL2PipelineFull(t) - proposer.ActBuildToL1Head(t) + sequencer.ActL1HeadSignal(t) + sequencer.ActL2PipelineFull(t) + sequencer.ActBuildToL1Head(t) batcher.ActSubmitAll(t) miner.ActL1StartBlock(12)(t) miner.ActL1IncludeTx(batcher.batcherAddr)(t) diff --git a/e2e/actions/system_config_test.go b/e2e/actions/system_config_test.go index 3c41a5251..b04a4bf7f 100644 --- a/e2e/actions/system_config_test.go +++ b/e2e/actions/system_config_test.go @@ -26,34 +26,34 @@ func TestBatcherKeyRotation(gt *testing.T) { dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams) sd := e2eutils.Setup(t, dp, defaultAlloc) log := testlog.Logger(t, log.LvlDebug) - miner, propEngine, proposer := setupProposerTest(t, sd, log) + miner, seqEngine, sequencer := setupSequencerTest(t, sd, log) miner.ActL1SetFeeRecipient(common.Address{'A'}) - proposer.ActL2PipelineFull(t) + sequencer.ActL2PipelineFull(t) _, syncer := setupSyncer(t, sd, log, miner.L1Client(t, sd.RollupCfg)) - rollupPropCl := proposer.RollupClient() + rollupSeqCl := sequencer.RollupClient() // the default batcher batcherA := NewL2Batcher(log, sd.RollupCfg, &BatcherCfg{ MinL1TxSize: 0, MaxL1TxSize: 128_000, BatcherKey: dp.Secrets.Batcher, - }, rollupPropCl, miner.EthClient(), propEngine.EthClient()) + }, rollupSeqCl, miner.EthClient(), seqEngine.EthClient()) // a batcher with a new key batcherB := NewL2Batcher(log, sd.RollupCfg, &BatcherCfg{ MinL1TxSize: 0, MaxL1TxSize: 128_000, BatcherKey: dp.Secrets.Bob, - }, rollupPropCl, miner.EthClient(), propEngine.EthClient()) + }, rollupSeqCl, miner.EthClient(), seqEngine.EthClient()) - proposer.ActL2PipelineFull(t) + sequencer.ActL2PipelineFull(t) syncer.ActL2PipelineFull(t) // build a L1 chain, and then L2 chain, for batcher A to submit miner.ActEmptyBlock(t) miner.ActEmptyBlock(t) - proposer.ActL1HeadSignal(t) - proposer.ActBuildToL1Head(t) + sequencer.ActL1HeadSignal(t) + sequencer.ActBuildToL1Head(t) batcherA.ActSubmitAll(t) // include the batch data on L1 @@ -62,10 +62,10 @@ func TestBatcherKeyRotation(gt *testing.T) { miner.ActL1EndBlock(t) // sync from L1 - proposer.ActL2PipelineFull(t) + sequencer.ActL2PipelineFull(t) syncer.ActL2PipelineFull(t) - require.Equal(t, uint64(2), proposer.L2Safe().L1Origin.Number, "l2 chain with new L1 origins") - require.Equal(t, proposer.L2Safe(), syncer.L2Safe(), "fully synced syncer") + require.Equal(t, uint64(2), sequencer.L2Safe().L1Origin.Number, "l2 chain with new L1 origins") + require.Equal(t, sequencer.L2Safe(), syncer.L2Safe(), "fully synced syncer") sysCfgContract, err := bindings.NewSystemConfig(sd.RollupCfg.L1SystemConfigAddress, miner.EthClient()) require.NoError(t, err) @@ -83,16 +83,16 @@ func TestBatcherKeyRotation(gt *testing.T) { cfgChangeL1BlockNum := miner.l1Chain.CurrentBlock().Number.Uint64() // sequence L2 blocks, and submit with new batcher - proposer.ActL1HeadSignal(t) - proposer.ActBuildToL1Head(t) + sequencer.ActL1HeadSignal(t) + sequencer.ActBuildToL1Head(t) batcherB.ActSubmitAll(t) miner.ActL1StartBlock(12)(t) miner.ActL1IncludeTx(dp.Addresses.Bob)(t) miner.ActL1EndBlock(t) // check that the first L2 payload that adopted the L1 block with the batcher key change indeed changed the batcher key in the system config - engCl := propEngine.EngineClient(t, sd.RollupCfg) - payload, err := engCl.PayloadByNumber(t.Ctx(), proposer.L2Safe().Number+12) // 12 new L2 blocks: 5 with origin before L1 block with batch, 6 with origin of L1 block with batch, 1 with new origin that changed the batcher + engCl := seqEngine.EngineClient(t, sd.RollupCfg) + payload, err := engCl.PayloadByNumber(t.Ctx(), sequencer.L2Safe().Number+12) // 12 new L2 blocks: 5 with origin before L1 block with batch, 6 with origin of L1 block with batch, 1 with new origin that changed the batcher require.NoError(t, err) ref, err := derive.PayloadToBlockRef(payload, &sd.RollupCfg.Genesis) require.NoError(t, err) @@ -103,25 +103,25 @@ func TestBatcherKeyRotation(gt *testing.T) { require.Equal(t, dp.Addresses.Bob, sysCfg.BatcherAddr, "bob should be batcher now") // sync from L1 - proposer.ActL2PipelineFull(t) + sequencer.ActL2PipelineFull(t) syncer.ActL2PipelineFull(t) - require.Equal(t, proposer.L2Safe().L1Origin.Number, uint64(4), "safe l2 chain with two new l1 blocks") - require.Equal(t, proposer.L2Safe(), syncer.L2Safe(), "fully synced syncer") + require.Equal(t, sequencer.L2Safe().L1Origin.Number, uint64(4), "safe l2 chain with two new l1 blocks") + require.Equal(t, sequencer.L2Safe(), syncer.L2Safe(), "fully synced syncer") // now try to build a new L1 block, and corresponding L2 blocks, and submit with the old batcher - before := proposer.L2Safe() + before := sequencer.L2Safe() miner.ActEmptyBlock(t) - proposer.ActL1HeadSignal(t) - proposer.ActBuildToL1Head(t) + sequencer.ActL1HeadSignal(t) + sequencer.ActBuildToL1Head(t) batcherA.ActSubmitAll(t) miner.ActL1StartBlock(12)(t) miner.ActL1IncludeTx(dp.Addresses.Batcher)(t) miner.ActL1EndBlock(t) // check that the data submitted by the old batcher is ignored - proposer.ActL2PipelineFull(t) + sequencer.ActL2PipelineFull(t) syncer.ActL2PipelineFull(t) - require.Equal(t, proposer.L2Safe(), before, "no new safe l1 chain") + require.Equal(t, sequencer.L2Safe(), before, "no new safe l1 chain") require.Equal(t, syncer.L2Safe(), before, "syncer is ignoring old batcher too") // now submit with the new batcher @@ -131,9 +131,9 @@ func TestBatcherKeyRotation(gt *testing.T) { miner.ActL1EndBlock(t) // not ignored now with new batcher - proposer.ActL2PipelineFull(t) + sequencer.ActL2PipelineFull(t) syncer.ActL2PipelineFull(t) - require.NotEqual(t, proposer.L2Safe(), before, "new safe l1 chain") + require.NotEqual(t, sequencer.L2Safe(), before, "new safe l1 chain") require.NotEqual(t, syncer.L2Safe(), before, "syncer is not ignoring new batcher") // twist: reorg L1, including the batcher key change @@ -141,12 +141,12 @@ func TestBatcherKeyRotation(gt *testing.T) { for i := 0; i < 6; i++ { // build some empty blocks so the reorg is picked up miner.ActEmptyBlock(t) } - proposer.ActL2PipelineFull(t) + sequencer.ActL2PipelineFull(t) syncer.ActL2PipelineFull(t) - require.Equal(t, uint64(2), proposer.L2Safe().L1Origin.Number, "l2 safe is first batch submission with original batcher") - require.Equal(t, uint64(3), proposer.L2Unsafe().L1Origin.Number, "l2 unsafe l1 origin is the block that included the first batch") - require.Equal(t, proposer.L2Safe(), syncer.L2Safe(), "syncer safe head check") - require.Equal(t, proposer.L2Unsafe(), syncer.L2Unsafe(), "syncer unsafe head check") + require.Equal(t, uint64(2), sequencer.L2Safe().L1Origin.Number, "l2 safe is first batch submission with original batcher") + require.Equal(t, uint64(3), sequencer.L2Unsafe().L1Origin.Number, "l2 unsafe l1 origin is the block that included the first batch") + require.Equal(t, sequencer.L2Safe(), syncer.L2Safe(), "syncer safe head check") + require.Equal(t, sequencer.L2Unsafe(), syncer.L2Unsafe(), "syncer unsafe head check") // without building L2 chain for the new L1 blocks yet, just batch-submit the unsafe part batcherA.ActL2BatchBuffer(t) // forces the buffer state to handle the rewind, before we loop with ActSubmitAll @@ -155,23 +155,23 @@ func TestBatcherKeyRotation(gt *testing.T) { miner.ActL1IncludeTx(dp.Addresses.Batcher)(t) miner.ActL1EndBlock(t) - proposer.ActL2PipelineFull(t) + sequencer.ActL2PipelineFull(t) syncer.ActL2PipelineFull(t) - require.Equal(t, proposer.L2Safe(), proposer.L2Unsafe(), "all L2 blocks are safe now") - require.Equal(t, proposer.L2Unsafe(), syncer.L2Unsafe(), "syncer synced") + require.Equal(t, sequencer.L2Safe(), sequencer.L2Unsafe(), "all L2 blocks are safe now") + require.Equal(t, sequencer.L2Unsafe(), syncer.L2Unsafe(), "syncer synced") // and see if we can go past it, with new L2 blocks - proposer.ActL1HeadSignal(t) - proposer.ActBuildToL1Head(t) + sequencer.ActL1HeadSignal(t) + sequencer.ActBuildToL1Head(t) batcherA.ActSubmitAll(t) miner.ActL1StartBlock(12)(t) miner.ActL1IncludeTx(dp.Addresses.Batcher)(t) miner.ActL1EndBlock(t) - proposer.ActL2PipelineFull(t) + sequencer.ActL2PipelineFull(t) syncer.ActL2PipelineFull(t) require.Equal(t, uint64(3+6+1), syncer.L2Safe().L1Origin.Number, "sync new L1 chain, while key change is reorged out") - require.Equal(t, proposer.L2Unsafe(), syncer.L2Unsafe(), "syncer synced") + require.Equal(t, sequencer.L2Unsafe(), syncer.L2Unsafe(), "syncer synced") } // TestGPOParamsChange tests that the GPO params can be updated to adjust fees of L2 transactions, @@ -181,33 +181,33 @@ func TestGPOParamsChange(gt *testing.T) { dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams) sd := e2eutils.Setup(t, dp, defaultAlloc) log := testlog.Logger(t, log.LvlDebug) - miner, propEngine, proposer := setupProposerTest(t, sd, log) + miner, seqEngine, sequencer := setupSequencerTest(t, sd, log) batcher := NewL2Batcher(log, sd.RollupCfg, &BatcherCfg{ MinL1TxSize: 0, MaxL1TxSize: 128_000, BatcherKey: dp.Secrets.Batcher, - }, proposer.RollupClient(), miner.EthClient(), propEngine.EthClient()) + }, sequencer.RollupClient(), miner.EthClient(), seqEngine.EthClient()) alice := NewBasicUser[any](log, dp.Secrets.Alice, rand.New(rand.NewSource(1234))) alice.SetUserEnv(&BasicUserEnv[any]{ - EthCl: propEngine.EthClient(), + EthCl: seqEngine.EthClient(), Signer: types.LatestSigner(sd.L2Cfg.Config), }) - proposer.ActL2PipelineFull(t) + sequencer.ActL2PipelineFull(t) // new L1 block, with new L2 chain miner.ActEmptyBlock(t) - proposer.ActL1HeadSignal(t) - proposer.ActBuildToL1Head(t) + sequencer.ActL1HeadSignal(t) + sequencer.ActBuildToL1Head(t) basefee := miner.l1Chain.CurrentBlock().BaseFee - // alice makes a L2 tx, proposer includes it + // alice makes a L2 tx, sequencer includes it alice.ActResetTxOpts(t) alice.ActMakeTx(t) - proposer.ActL2StartBlock(t) - propEngine.ActL2IncludeTx(dp.Addresses.Alice)(t) - proposer.ActL2EndBlock(t) + sequencer.ActL2StartBlock(t) + seqEngine.ActL2IncludeTx(dp.Addresses.Alice)(t) + sequencer.ActL2EndBlock(t) receipt := alice.LastTxReceipt(t) require.Equal(t, basefee, receipt.L1GasPrice, "L1 gas price matches basefee of L1 origin") @@ -241,10 +241,10 @@ func TestGPOParamsChange(gt *testing.T) { basefeeGPOUpdate := miner.l1Chain.CurrentBlock().BaseFee // build empty L2 chain, up to but excluding the L2 block with the L1 origin that processes the GPO change - proposer.ActL1HeadSignal(t) - proposer.ActBuildToL1HeadExcl(t) + sequencer.ActL1HeadSignal(t) + sequencer.ActBuildToL1HeadExcl(t) - engCl := propEngine.EngineClient(t, sd.RollupCfg) + engCl := seqEngine.EngineClient(t, sd.RollupCfg) payload, err := engCl.PayloadByLabel(t.Ctx(), eth.Unsafe) require.NoError(t, err) sysCfg, err := derive.PayloadToSystemConfig(payload, sd.RollupCfg) @@ -254,9 +254,9 @@ func TestGPOParamsChange(gt *testing.T) { // Now alice makes another transaction, which gets included in the same block that adopts the L1 origin with GPO change alice.ActResetTxOpts(t) alice.ActMakeTx(t) - proposer.ActL2StartBlock(t) - propEngine.ActL2IncludeTx(dp.Addresses.Alice)(t) - proposer.ActL2EndBlock(t) + sequencer.ActL2StartBlock(t) + seqEngine.ActL2IncludeTx(dp.Addresses.Alice)(t) + sequencer.ActL2EndBlock(t) payload, err = engCl.PayloadByLabel(t.Ctx(), eth.Unsafe) require.NoError(t, err) @@ -275,14 +275,14 @@ func TestGPOParamsChange(gt *testing.T) { // build more L2 blocks, with new L1 origin miner.ActEmptyBlock(t) basefee = miner.l1Chain.CurrentBlock().BaseFee - proposer.ActL1HeadSignal(t) - proposer.ActBuildToL1Head(t) + sequencer.ActL1HeadSignal(t) + sequencer.ActBuildToL1Head(t) // and Alice makes a tx again alice.ActResetTxOpts(t) alice.ActMakeTx(t) - proposer.ActL2StartBlock(t) - propEngine.ActL2IncludeTx(dp.Addresses.Alice)(t) - proposer.ActL2EndBlock(t) + sequencer.ActL2StartBlock(t) + seqEngine.ActL2IncludeTx(dp.Addresses.Alice)(t) + sequencer.ActL2EndBlock(t) // and verify the new GPO params are persistent, even though the L1 origin and L2 chain have progressed receipt = alice.LastTxReceipt(t) @@ -301,19 +301,19 @@ func TestGasLimitChange(gt *testing.T) { dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams) sd := e2eutils.Setup(t, dp, defaultAlloc) log := testlog.Logger(t, log.LvlDebug) - miner, propEngine, proposer := setupProposerTest(t, sd, log) + miner, seqEngine, sequencer := setupSequencerTest(t, sd, log) batcher := NewL2Batcher(log, sd.RollupCfg, &BatcherCfg{ MinL1TxSize: 0, MaxL1TxSize: 128_000, BatcherKey: dp.Secrets.Batcher, - }, proposer.RollupClient(), miner.EthClient(), propEngine.EthClient()) + }, sequencer.RollupClient(), miner.EthClient(), seqEngine.EthClient()) - proposer.ActL2PipelineFull(t) + sequencer.ActL2PipelineFull(t) miner.ActEmptyBlock(t) - proposer.ActL1HeadSignal(t) - proposer.ActBuildToL1Head(t) + sequencer.ActL1HeadSignal(t) + sequencer.ActBuildToL1Head(t) - oldGasLimit := propEngine.l2Chain.CurrentBlock().GasLimit + oldGasLimit := seqEngine.l2Chain.CurrentBlock().GasLimit require.Equal(t, oldGasLimit, uint64(dp.DeployConfig.L2GenesisBlockGasLimit)) // change gas limit on L1 to triple what it was @@ -332,16 +332,16 @@ func TestGasLimitChange(gt *testing.T) { miner.ActL1EndBlock(t) // build to latest L1, excluding the block that adopts the L1 block with the gaslimit change - proposer.ActL1HeadSignal(t) - proposer.ActBuildToL1HeadExcl(t) + sequencer.ActL1HeadSignal(t) + sequencer.ActBuildToL1HeadExcl(t) - require.Equal(t, oldGasLimit, propEngine.l2Chain.CurrentBlock().GasLimit) - require.Equal(t, uint64(1), proposer.SyncStatus().UnsafeL2.L1Origin.Number) + require.Equal(t, oldGasLimit, seqEngine.l2Chain.CurrentBlock().GasLimit) + require.Equal(t, uint64(1), sequencer.SyncStatus().UnsafeL2.L1Origin.Number) // now include the L1 block with the gaslimit change, and see if it changes as expected - proposer.ActBuildToL1Head(t) - require.Equal(t, oldGasLimit*3, propEngine.l2Chain.CurrentBlock().GasLimit) - require.Equal(t, uint64(2), proposer.SyncStatus().UnsafeL2.L1Origin.Number) + sequencer.ActBuildToL1Head(t) + require.Equal(t, oldGasLimit*3, seqEngine.l2Chain.CurrentBlock().GasLimit) + require.Equal(t, uint64(2), sequencer.SyncStatus().UnsafeL2.L1Origin.Number) // now submit all this to L1, and see if a syncer can sync and reproduce it batcher.ActSubmitAll(t) @@ -352,7 +352,7 @@ func TestGasLimitChange(gt *testing.T) { _, syncer := setupSyncer(t, sd, log, miner.L1Client(t, sd.RollupCfg)) syncer.ActL2PipelineFull(t) - require.Equal(t, proposer.L2Unsafe(), syncer.L2Safe(), "syncer stays in sync, even with gaslimit changes") + require.Equal(t, sequencer.L2Unsafe(), syncer.L2Safe(), "syncer stays in sync, even with gaslimit changes") } func TestValidatorRewardScalarChange(gt *testing.T) { @@ -360,20 +360,20 @@ func TestValidatorRewardScalarChange(gt *testing.T) { dp := e2eutils.MakeDeployParams(t, defaultRollupTestParams) sd := e2eutils.Setup(t, dp, defaultAlloc) log := testlog.Logger(t, log.LvlDebug) - miner, propEngine, proposer := setupProposerTest(t, sd, log) + miner, seqEngine, sequencer := setupSequencerTest(t, sd, log) batcher := NewL2Batcher(log, sd.RollupCfg, &BatcherCfg{ MinL1TxSize: 0, MaxL1TxSize: 128_000, BatcherKey: dp.Secrets.Batcher, - }, proposer.RollupClient(), miner.EthClient(), propEngine.EthClient()) + }, sequencer.RollupClient(), miner.EthClient(), seqEngine.EthClient()) - engCl := propEngine.EngineClient(t, sd.RollupCfg) + engCl := seqEngine.EngineClient(t, sd.RollupCfg) - proposer.ActL2PipelineFull(t) + sequencer.ActL2PipelineFull(t) miner.ActEmptyBlock(t) - proposer.ActL1HeadSignal(t) - proposer.ActBuildToL1Head(t) + sequencer.ActL1HeadSignal(t) + sequencer.ActBuildToL1Head(t) // change validator reward scalar on L1 sysCfgContract, err := bindings.NewSystemConfig(sd.RollupCfg.L1SystemConfigAddress, miner.EthClient()) @@ -406,10 +406,10 @@ func TestValidatorRewardScalarChange(gt *testing.T) { miner.ActL1EndBlock(t) // build to latest L1, excluding the block that adopts the L1 block with the validator reward scalar change - proposer.ActL1HeadSignal(t) - proposer.ActBuildToL1HeadExcl(t) + sequencer.ActL1HeadSignal(t) + sequencer.ActBuildToL1HeadExcl(t) // now include the L1 block with the gaslimit change, and see if it changes as expected - proposer.ActBuildToL1Head(t) + sequencer.ActBuildToL1Head(t) require.NoError(t, err) payload, err = engCl.PayloadByLabel(t.Ctx(), eth.Unsafe) @@ -427,5 +427,5 @@ func TestValidatorRewardScalarChange(gt *testing.T) { _, syncer := setupSyncer(t, sd, log, miner.L1Client(t, sd.RollupCfg)) syncer.ActL2PipelineFull(t) - require.Equal(t, proposer.L2Unsafe(), syncer.L2Safe(), "syncer stays in sync, even with validator reward scalar changes") + require.Equal(t, sequencer.L2Unsafe(), syncer.L2Safe(), "syncer stays in sync, even with validator reward scalar changes") } diff --git a/e2e/actions/user_test.go b/e2e/actions/user_test.go index a55b0331e..99f947729 100644 --- a/e2e/actions/user_test.go +++ b/e2e/actions/user_test.go @@ -26,12 +26,12 @@ func TestCrossLayerUser(gt *testing.T) { sd := e2eutils.Setup(t, dp, defaultAlloc) log := testlog.Logger(t, log.LvlDebug) - miner, propEngine, proposer := setupProposerTest(t, sd, log) + miner, seqEngine, sequencer := setupSequencerTest(t, sd, log) batcher := NewL2Batcher(log, sd.RollupCfg, &BatcherCfg{ MinL1TxSize: 0, MaxL1TxSize: 128_000, BatcherKey: dp.Secrets.Batcher, - }, proposer.RollupClient(), miner.EthClient(), propEngine.EthClient()) + }, sequencer.RollupClient(), miner.EthClient(), seqEngine.EthClient()) validator := NewL2Validator(t, log, &ValidatorCfg{ OutputOracleAddr: sd.DeploymentsL1.L2OutputOracleProxy, ValidatorPoolAddr: sd.DeploymentsL1.ValidatorPoolProxy, @@ -39,14 +39,14 @@ func TestCrossLayerUser(gt *testing.T) { SecurityCouncilAddr: sd.DeploymentsL1.SecurityCouncilProxy, ValidatorKey: dp.Secrets.TrustedValidator, AllowNonFinalized: true, - }, miner.EthClient(), propEngine.EthClient(), proposer.RollupClient()) + }, miner.EthClient(), seqEngine.EthClient(), sequencer.RollupClient()) // need to start derivation before we can make L2 blocks - proposer.ActL2PipelineFull(t) + sequencer.ActL2PipelineFull(t) l1Cl := miner.EthClient() - l2Cl := propEngine.EthClient() - l2ProofCl := propEngine.GethClient() + l2Cl := seqEngine.EthClient() + l2ProofCl := seqEngine.GethClient() addresses := e2eutils.CollectAddresses(sd, dp) @@ -68,16 +68,16 @@ func TestCrossLayerUser(gt *testing.T) { alice.L2.SetUserEnv(l2UserEnv) // Build at least one l2 block so we have an unsafe head with a deposit info tx (genesis block doesn't) - proposer.ActL2StartBlock(t) - proposer.ActL2EndBlock(t) + sequencer.ActL2StartBlock(t) + sequencer.ActL2EndBlock(t) // regular L2 tx, in new L2 block alice.L2.ActResetTxOpts(t) alice.L2.ActSetTxToAddr(&dp.Addresses.Bob)(t) alice.L2.ActMakeTx(t) - proposer.ActL2StartBlock(t) - propEngine.ActL2IncludeTx(alice.Address())(t) - proposer.ActL2EndBlock(t) + sequencer.ActL2StartBlock(t) + seqEngine.ActL2IncludeTx(alice.Address())(t) + sequencer.ActL2EndBlock(t) alice.L2.ActCheckReceiptStatusOfLastTx(true)(t) // regular L1 tx, in new L1 block @@ -95,21 +95,21 @@ func TestCrossLayerUser(gt *testing.T) { miner.ActL1IncludeTx(alice.Address())(t) miner.ActL1EndBlock(t) - proposer.ActL1HeadSignal(t) + sequencer.ActL1HeadSignal(t) - // sync proposer build enough blocks to adopt latest L1 origin - for proposer.SyncStatus().UnsafeL2.L1Origin.Number < miner.l1Chain.CurrentBlock().Number.Uint64() { - proposer.ActL2StartBlock(t) - proposer.ActL2EndBlock(t) + // sync sequencer build enough blocks to adopt latest L1 origin + for sequencer.SyncStatus().UnsafeL2.L1Origin.Number < miner.l1Chain.CurrentBlock().Number.Uint64() { + sequencer.ActL2StartBlock(t) + sequencer.ActL2EndBlock(t) } // Now that the L2 chain adopted the latest L1 block, check that we processed the deposit alice.ActCheckDepositStatus(true, true)(t) // regular withdrawal, in new L2 block alice.ActStartWithdrawal(t) - proposer.ActL2StartBlock(t) - propEngine.ActL2IncludeTx(alice.Address())(t) - proposer.ActL2EndBlock(t) + sequencer.ActL2StartBlock(t) + seqEngine.ActL2IncludeTx(alice.Address())(t) + sequencer.ActL2EndBlock(t) alice.ActCheckStartWithdrawal(true)(t) // NOTE(chokobole): It is necessary to wait for one finalized (or safe if AllowNonFinalized @@ -122,8 +122,8 @@ func TestCrossLayerUser(gt *testing.T) { // build a L1 block and more L2 blocks, // to ensure the L2 withdrawal is old enough to be able to get into a checkpoint output on L1 miner.ActEmptyBlock(t) - proposer.ActL1HeadSignal(t) - proposer.ActBuildToL1Head(t) + sequencer.ActL1HeadSignal(t) + sequencer.ActBuildToL1Head(t) // submit everything to L1 batcher.ActSubmitAll(t) @@ -134,7 +134,7 @@ func TestCrossLayerUser(gt *testing.T) { } // derive from L1, blocks will now become safe to submit - proposer.ActL2PipelineFull(t) + sequencer.ActL2PipelineFull(t) validator.ActDeposit(t, 1000) miner.includeL1Block(t, dp.Addresses.TrustedValidator) diff --git a/e2e/bridge_test.go b/e2e/bridge_test.go index 9ace8bc93..21da177c4 100644 --- a/e2e/bridge_test.go +++ b/e2e/bridge_test.go @@ -36,7 +36,7 @@ func TestERC20BridgeDeposits(t *testing.T) { log.Info("genesis", "l2", sys.RollupConfig.Genesis.L2, "l1", sys.RollupConfig.Genesis.L1, "l2_time", sys.RollupConfig.Genesis.L2Time) l1Client := sys.Clients["l1"] - l2Client := sys.Clients["proposer"] + l2Client := sys.Clients["sequencer"] opts, err := bind.NewKeyedTransactorWithChainID(sys.cfg.Secrets.Alice, cfg.L1ChainIDBig()) require.Nil(t, err) diff --git a/e2e/e2eutils/addresses.go b/e2e/e2eutils/addresses.go index 8bf89804a..1ffe3692b 100644 --- a/e2e/e2eutils/addresses.go +++ b/e2e/e2eutils/addresses.go @@ -41,7 +41,7 @@ func CollectAddresses(sd *SetupData, dp *DeployParams) (out []common.Address) { out = append(out, sd.L1Cfg.Coinbase, sd.L2Cfg.Coinbase, - dp.Addresses.ProposerP2P, + dp.Addresses.SequencerP2P, predeploys.ValidatorRewardVaultAddr, sd.RollupCfg.BatchInboxAddress, sd.RollupCfg.Genesis.SystemConfig.BatcherAddr, diff --git a/e2e/e2eutils/addresses_test.go b/e2e/e2eutils/addresses_test.go index c87b4d238..e4a5df73b 100644 --- a/e2e/e2eutils/addresses_test.go +++ b/e2e/e2eutils/addresses_test.go @@ -8,9 +8,9 @@ import ( func TestCollectAddresses(t *testing.T) { tp := &TestParams{ - MaxProposerDrift: 40, - ProposerWindowSize: 120, - ChannelTimeout: 120, + MaxSequencerDrift: 40, + SequencerWindowSize: 120, + ChannelTimeout: 120, } dp := MakeDeployParams(t, tp) alloc := &AllocParams{PrefundTestUsers: true} diff --git a/e2e/e2eutils/secrets.go b/e2e/e2eutils/secrets.go index fbe4f2a24..7953e81e1 100644 --- a/e2e/e2eutils/secrets.go +++ b/e2e/e2eutils/secrets.go @@ -22,7 +22,7 @@ var DefaultMnemonicConfig = &MnemonicConfig{ Challenger1: "m/44'/60'/0'/0/4", Challenger2: "m/44'/60'/0'/0/5", Batcher: "m/44'/60'/0'/0/6", - ProposerP2P: "m/44'/60'/0'/0/7", + SequencerP2P: "m/44'/60'/0'/0/7", Alice: "m/44'/60'/0'/0/8", Bob: "m/44'/60'/0'/0/9", Mallory: "m/44'/60'/0'/0/10", @@ -43,7 +43,7 @@ type MnemonicConfig struct { Challenger1 string Challenger2 string Batcher string - ProposerP2P string + SequencerP2P string // prefunded L1/L2 accounts for testing Alice string @@ -90,7 +90,7 @@ func (m *MnemonicConfig) Secrets() (*Secrets, error) { if err != nil { return nil, err } - proposerP2P, err := wallet.PrivateKey(account(m.ProposerP2P)) + sequencerP2P, err := wallet.PrivateKey(account(m.SequencerP2P)) if err != nil { return nil, err } @@ -115,7 +115,7 @@ func (m *MnemonicConfig) Secrets() (*Secrets, error) { Challenger1: challenger1, Challenger2: challenger2, Batcher: batcher, - ProposerP2P: proposerP2P, + SequencerP2P: sequencerP2P, Alice: alice, Bob: bob, Mallory: mallory, @@ -134,7 +134,7 @@ type Secrets struct { Challenger1 *ecdsa.PrivateKey Challenger2 *ecdsa.PrivateKey Batcher *ecdsa.PrivateKey - ProposerP2P *ecdsa.PrivateKey + SequencerP2P *ecdsa.PrivateKey // prefunded L1/L2 accounts for testing Alice *ecdsa.PrivateKey @@ -164,7 +164,7 @@ func (s *Secrets) Addresses() *Addresses { Challenger1: crypto.PubkeyToAddress(s.Challenger1.PublicKey), Challenger2: crypto.PubkeyToAddress(s.Challenger2.PublicKey), Batcher: crypto.PubkeyToAddress(s.Batcher.PublicKey), - ProposerP2P: crypto.PubkeyToAddress(s.ProposerP2P.PublicKey), + SequencerP2P: crypto.PubkeyToAddress(s.SequencerP2P.PublicKey), Alice: crypto.PubkeyToAddress(s.Alice.PublicKey), Bob: crypto.PubkeyToAddress(s.Bob.PublicKey), Mallory: crypto.PubkeyToAddress(s.Mallory.PublicKey), @@ -182,7 +182,7 @@ type Addresses struct { Challenger1 common.Address Challenger2 common.Address Batcher common.Address - ProposerP2P common.Address + SequencerP2P common.Address // prefunded L1/L2 accounts for testing Alice common.Address @@ -199,7 +199,7 @@ func (a *Addresses) All() []common.Address { a.Challenger1, a.Challenger2, a.Batcher, - a.ProposerP2P, + a.SequencerP2P, a.Alice, a.Bob, a.Mallory, diff --git a/e2e/e2eutils/setup.go b/e2e/e2eutils/setup.go index 66ed9a5a5..960c469bf 100644 --- a/e2e/e2eutils/setup.go +++ b/e2e/e2eutils/setup.go @@ -43,10 +43,10 @@ type DeployParams struct { // TestParams parametrizes the most essential rollup configuration parameters type TestParams struct { - MaxProposerDrift uint64 - ProposerWindowSize uint64 - ChannelTimeout uint64 - L1BlockTime uint64 + MaxSequencerDrift uint64 + SequencerWindowSize uint64 + ChannelTimeout uint64 + L1BlockTime uint64 } func MakeDeployParams(t require.TestingT, tp *TestParams) *DeployParams { @@ -59,12 +59,12 @@ func MakeDeployParams(t require.TestingT, tp *TestParams) *DeployParams { L2ChainID: 901, L2BlockTime: 2, - MaxProposerDrift: tp.MaxProposerDrift, - ProposerWindowSize: tp.ProposerWindowSize, - ChannelTimeout: tp.ChannelTimeout, - P2PProposerAddress: addresses.ProposerP2P, - BatchInboxAddress: common.Address{0: 0x42, 19: 0xff}, // tbd - BatchSenderAddress: addresses.Batcher, + MaxSequencerDrift: tp.MaxSequencerDrift, + SequencerWindowSize: tp.SequencerWindowSize, + ChannelTimeout: tp.ChannelTimeout, + P2PSequencerAddress: addresses.SequencerP2P, + BatchInboxAddress: common.Address{0: 0x42, 19: 0xff}, // tbd + BatchSenderAddress: addresses.Batcher, ValidatorPoolTrustedValidator: addresses.TrustedValidator, ValidatorPoolRequiredBondAmount: uint64ToBig(1), @@ -117,7 +117,7 @@ func MakeDeployParams(t require.TestingT, tp *TestParams) *DeployParams { ProxyAdminOwner: addresses.ProxyAdminOwner, ProtocolVaultRecipient: common.Address{19: 2}, - ProposerRewardVaultRecipient: common.Address{19: 3}, + L1FeeVaultRecipient: common.Address{19: 3}, EIP1559Elasticity: 10, EIP1559Denominator: 50, @@ -235,8 +235,8 @@ func Setup(t require.TestingT, deployParams *DeployParams, alloc *AllocParams) * SystemConfig: SystemConfigFromDeployConfig(deployConf), }, BlockTime: deployConf.L2BlockTime, - MaxProposerDrift: deployConf.MaxProposerDrift, - ProposerWindowSize: deployConf.ProposerWindowSize, + MaxSequencerDrift: deployConf.MaxSequencerDrift, + SeqWindowSize: deployConf.SequencerWindowSize, ChannelTimeout: deployConf.ChannelTimeout, L1ChainID: new(big.Int).SetUint64(deployConf.L1ChainID), L2ChainID: new(big.Int).SetUint64(deployConf.L2ChainID), diff --git a/e2e/e2eutils/setup_test.go b/e2e/e2eutils/setup_test.go index 8cf89084f..6c3ce6b0c 100644 --- a/e2e/e2eutils/setup_test.go +++ b/e2e/e2eutils/setup_test.go @@ -19,9 +19,9 @@ func TestWriteDefaultJWT(t *testing.T) { func TestSetup(t *testing.T) { tp := &TestParams{ - MaxProposerDrift: 40, - ProposerWindowSize: 120, - ChannelTimeout: 120, + MaxSequencerDrift: 40, + SequencerWindowSize: 120, + ChannelTimeout: 120, } dp := MakeDeployParams(t, tp) alloc := &AllocParams{PrefundTestUsers: true} diff --git a/e2e/setup.go b/e2e/setup.go index 77507fb5e..7a9e6e15d 100644 --- a/e2e/setup.go +++ b/e2e/setup.go @@ -88,10 +88,10 @@ func DefaultSystemConfig(t *testing.T) SystemConfig { L2BlockTime: 1, FinalizationPeriodSeconds: 60 * 60 * 24, - MaxProposerDrift: 10, - ProposerWindowSize: 30, + MaxSequencerDrift: 10, + SequencerWindowSize: 30, ChannelTimeout: 10, - P2PProposerAddress: addresses.ProposerP2P, + P2PSequencerAddress: addresses.SequencerP2P, BatchInboxAddress: common.Address{0: 0x52, 19: 0xff}, // tbd BatchSenderAddress: addresses.Batcher, @@ -140,7 +140,7 @@ func DefaultSystemConfig(t *testing.T) SystemConfig { ProxyAdminOwner: addresses.ProxyAdminOwner, ProtocolVaultRecipient: common.Address{19: 2}, - ProposerRewardVaultRecipient: common.Address{19: 3}, + L1FeeVaultRecipient: common.Address{19: 3}, DeploymentWaitConfirmations: 1, @@ -175,19 +175,19 @@ func DefaultSystemConfig(t *testing.T) SystemConfig { Nodes: map[string]*rollupNode.Config{ "syncer": { Driver: driver.Config{ - SyncerConfDepth: 0, - ProposerConfDepth: 0, - ProposerEnabled: false, + SyncerConfDepth: 0, + SequencerConfDepth: 0, + SequencerEnabled: false, }, L1EpochPollInterval: time.Second * 4, }, - "proposer": { + "sequencer": { Driver: driver.Config{ - SyncerConfDepth: 0, - ProposerConfDepth: 0, - ProposerEnabled: true, + SyncerConfDepth: 0, + SequencerConfDepth: 0, + SequencerEnabled: true, }, - // Submitter PrivKey is set in system start for rollup nodes where proposer = true + // Submitter PrivKey is set in system start for rollup nodes where sequencer = true RPC: rollupNode.RPCConfig{ ListenAddr: "127.0.0.1", ListenPort: 0, @@ -198,7 +198,7 @@ func DefaultSystemConfig(t *testing.T) SystemConfig { }, Loggers: map[string]log.Logger{ "syncer": testlog.Logger(t, log.LvlInfo).New("role", "syncer"), - "proposer": testlog.Logger(t, log.LvlInfo).New("role", "proposer"), + "sequencer": testlog.Logger(t, log.LvlInfo).New("role", "sequencer"), "batcher": testlog.Logger(t, log.LvlInfo).New("role", "batcher"), "validator": testlog.Logger(t, log.LvlInfo).New("role", "validator"), "challenger": testlog.Logger(t, log.LvlInfo).New("role", "challenger"), @@ -413,8 +413,8 @@ func (cfg SystemConfig) Start(_opts ...SystemConfigOption) (*System, error) { SystemConfig: e2eutils.SystemConfigFromDeployConfig(cfg.DeployConfig), }, BlockTime: cfg.DeployConfig.L2BlockTime, - MaxProposerDrift: cfg.DeployConfig.MaxProposerDrift, - ProposerWindowSize: cfg.DeployConfig.ProposerWindowSize, + MaxSequencerDrift: cfg.DeployConfig.MaxSequencerDrift, + SeqWindowSize: cfg.DeployConfig.SequencerWindowSize, ChannelTimeout: cfg.DeployConfig.ChannelTimeout, L1ChainID: cfg.L1ChainIDBig(), L2ChainID: cfg.L2ChainIDBig(), @@ -549,8 +549,8 @@ func (cfg SystemConfig) Start(_opts ...SystemConfigOption) (*System, error) { if p, ok := p2pNodes[name]; ok { c.P2P = p - if c.Driver.ProposerEnabled && c.P2PSigner == nil { - c.P2PSigner = &p2p.PreparedSigner{Signer: p2p.NewLocalSigner(cfg.Secrets.ProposerP2P)} + if c.Driver.SequencerEnabled && c.P2PSigner == nil { + c.P2PSigner = &p2p.PreparedSigner{Signer: p2p.NewLocalSigner(cfg.Secrets.SequencerP2P)} } } @@ -597,8 +597,8 @@ func (cfg SystemConfig) Start(_opts ...SystemConfigOption) (*System, error) { // Run validator node (L2 Output Submitter, Asserter) validatorCliCfg := validator.CLIConfig{ L1EthRpc: sys.Nodes["l1"].WSEndpoint(), - L2EthRpc: sys.Nodes["proposer"].HTTPEndpoint(), - RollupRpc: sys.RollupNodes["proposer"].HTTPEndpoint(), + L2EthRpc: sys.Nodes["sequencer"].HTTPEndpoint(), + RollupRpc: sys.RollupNodes["sequencer"].HTTPEndpoint(), L2OOAddress: predeploys.DevL2OutputOracleAddr.String(), ColosseumAddress: predeploys.DevColosseumAddr.String(), ValPoolAddress: predeploys.DevValidatorPoolAddr.String(), @@ -636,7 +636,7 @@ func (cfg SystemConfig) Start(_opts ...SystemConfigOption) (*System, error) { rpcCl := client.NewBaseRPCClient(cl) validatorMaliciousL2RPC := e2eutils.NewMaliciousL2RPC(rpcCl) validatorCfg.RollupClient = sources.NewRollupClient(validatorMaliciousL2RPC) - validatorCfg.L2Client = sys.Clients["proposer"] + validatorCfg.L2Client = sys.Clients["sequencer"] // If malicious validator is turned on, set target block number for submitting invalid output if cfg.EnableMaliciousValidator { @@ -655,8 +655,8 @@ func (cfg SystemConfig) Start(_opts ...SystemConfigOption) (*System, error) { // Run validator node (Challenger) challengerCliCfg := validator.CLIConfig{ L1EthRpc: sys.Nodes["l1"].WSEndpoint(), - L2EthRpc: sys.Nodes["proposer"].HTTPEndpoint(), - RollupRpc: sys.RollupNodes["proposer"].HTTPEndpoint(), + L2EthRpc: sys.Nodes["sequencer"].HTTPEndpoint(), + RollupRpc: sys.RollupNodes["sequencer"].HTTPEndpoint(), L2OOAddress: predeploys.DevL2OutputOracleAddr.String(), ColosseumAddress: predeploys.DevColosseumAddr.String(), ValPoolAddress: predeploys.DevValidatorPoolAddr.String(), @@ -686,7 +686,7 @@ func (cfg SystemConfig) Start(_opts ...SystemConfigOption) (*System, error) { rpcCl = client.NewBaseRPCClient(cl) challengerHonestL2RPC := e2eutils.NewHonestL2RPC(rpcCl) challengerCfg.RollupClient = sources.NewRollupClient(challengerHonestL2RPC) - challengerCfg.L2Client = sys.Clients["proposer"] + challengerCfg.L2Client = sys.Clients["sequencer"] // If malicious validator is turned on, set target block number for challenge if cfg.EnableMaliciousValidator { @@ -707,8 +707,8 @@ func (cfg SystemConfig) Start(_opts ...SystemConfigOption) (*System, error) { // Batcher (Batch Submitter) batcherCliCfg := batcher.CLIConfig{ L1EthRpc: sys.Nodes["l1"].WSEndpoint(), - L2EthRpc: sys.Nodes["proposer"].WSEndpoint(), - RollupRpc: sys.RollupNodes["proposer"].HTTPEndpoint(), + L2EthRpc: sys.Nodes["sequencer"].WSEndpoint(), + RollupRpc: sys.RollupNodes["sequencer"].HTTPEndpoint(), MaxChannelDuration: 1, MaxL1TxSize: 120_000, TargetL1TxSize: 100_000, @@ -847,10 +847,10 @@ func (cfg SystemConfig) DepositValidatorPool(l1Client *ethclient.Client, priv *e return nil } -func (cfg SystemConfig) SendTransferTx(l2Prop *ethclient.Client, l2Sync *ethclient.Client) (*types.Receipt, error) { +func (cfg SystemConfig) SendTransferTx(l2Seq *ethclient.Client, l2Sync *ethclient.Client) (*types.Receipt, error) { chainId := cfg.L2ChainIDBig() ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - nonce, err := l2Prop.PendingNonceAt(ctx, cfg.Secrets.Addresses().Alice) + nonce, err := l2Seq.PendingNonceAt(ctx, cfg.Secrets.Addresses().Alice) cancel() if err != nil { return nil, fmt.Errorf("failed to get nonce: %w", err) @@ -866,15 +866,15 @@ func (cfg SystemConfig) SendTransferTx(l2Prop *ethclient.Client, l2Sync *ethclie }) ctx, cancel = context.WithTimeout(context.Background(), 2*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second) - err = l2Prop.SendTransaction(ctx, tx) + err = l2Seq.SendTransaction(ctx, tx) cancel() if err != nil { - return nil, fmt.Errorf("failed to send L2 tx to proposer: %w", err) + return nil, fmt.Errorf("failed to send L2 tx to sequencer: %w", err) } - _, err = waitForL2Transaction(tx.Hash(), l2Prop, 4*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second) + _, err = waitForL2Transaction(tx.Hash(), l2Seq, 4*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second) if err != nil { - return nil, fmt.Errorf("failed to wait L2 tx on proposer: %w", err) + return nil, fmt.Errorf("failed to wait L2 tx on sequencer: %w", err) } receipt, err := waitForL2Transaction(tx.Hash(), l2Sync, 4*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second) diff --git a/e2e/system_test.go b/e2e/system_test.go index 80da20a35..5f4f535d4 100644 --- a/e2e/system_test.go +++ b/e2e/system_test.go @@ -86,7 +86,7 @@ func TestL2OutputSubmitter(t *testing.T) { l1Client := sys.Clients["l1"] - rollupRPCClient, err := rpc.DialContext(context.Background(), sys.RollupNodes["proposer"].HTTPEndpoint()) + rollupRPCClient, err := rpc.DialContext(context.Background(), sys.RollupNodes["sequencer"].HTTPEndpoint()) require.Nil(t, err) rollupClient := sources.NewRollupClient(client.NewBaseRPCClient(rollupRPCClient)) @@ -98,7 +98,7 @@ func TestL2OutputSubmitter(t *testing.T) { require.Nil(t, err) // Wait until the second output submission from L2. The output submitter submits outputs from the - // unsafe portion of the chain which gets reorged on startup. The proposer has an out of date view + // unsafe portion of the chain which gets reorged on startup. The sequencer has an out of date view // when it creates it's first block and uses and old L1 Origin. It then does not submit a batch // for that block and subsequently reorgs to match what the syncer derives when running the // reconciliation process. @@ -161,7 +161,7 @@ func TestValidationReward(t *testing.T) { require.NoError(t, err, "Error starting up system") defer sys.Close() - l2Prop := sys.Clients["proposer"] + l2Seq := sys.Clients["sequencer"] l2Sync := sys.Clients["syncer"] validatorVault, err := bindings.NewValidatorRewardVault(predeploys.ValidatorRewardVaultAddr, l2Sync) @@ -172,7 +172,7 @@ func TestValidationReward(t *testing.T) { require.GreaterOrEqual(t, rewardDivider.Uint64(), uint64(1)) // Send a transaction to pay a fee. - _, err = cfg.SendTransferTx(l2Prop, l2Sync) + _, err = cfg.SendTransferTx(l2Seq, l2Sync) require.NoError(t, err) l2RewardedCh := make(chan *bindings.ValidatorRewardVaultRewarded, 1) @@ -215,7 +215,7 @@ func TestSystemE2E(t *testing.T) { log.Info("genesis", "l2", sys.RollupConfig.Genesis.L2, "l1", sys.RollupConfig.Genesis.L1, "l2_time", sys.RollupConfig.Genesis.L2Time) l1Client := sys.Clients["l1"] - l2Prop := sys.Clients["proposer"] + l2Seq := sys.Clients["sequencer"] l2Sync := sys.Clients["syncer"] // Transactor Account @@ -263,7 +263,7 @@ func TestSystemE2E(t *testing.T) { diff = diff.Sub(endBalance, startBalance) require.Equal(t, mintAmount, diff, "Did not get expected balance change") - // Submit TX to L2 proposer node + // Submit TX to L2 sequencer node toAddr := common.Address{0xff, 0xff} tx = types.MustSignNewTx(ethPrivKey, types.LatestSignerForChainID(cfg.L2ChainIDBig()), &types.DynamicFeeTx{ ChainID: cfg.L2ChainIDBig(), @@ -274,39 +274,39 @@ func TestSystemE2E(t *testing.T) { GasFeeCap: big.NewInt(200), Gas: 21000, }) - err = l2Prop.SendTransaction(context.Background(), tx) - require.Nil(t, err, "Sending L2 tx to proposer") + err = l2Seq.SendTransaction(context.Background(), tx) + require.Nil(t, err, "Sending L2 tx to sequencer") - _, err = waitForL2Transaction(tx.Hash(), l2Prop, 3*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second) - require.Nil(t, err, "Waiting for L2 tx on proposer") + _, err = waitForL2Transaction(tx.Hash(), l2Seq, 3*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second) + require.Nil(t, err, "Waiting for L2 tx on sequencer") receipt, err = waitForL2Transaction(tx.Hash(), l2Sync, 10*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second) require.Nil(t, err, "Waiting for L2 tx on syncer") require.Equal(t, types.ReceiptStatusSuccessful, receipt.Status, "TX should have succeeded") - // Verify blocks match after batch submission on syncers and proposers + // Verify blocks match after batch submission on syncers and sequencers syncBlock, err := l2Sync.BlockByNumber(context.Background(), receipt.BlockNumber) require.Nil(t, err) - propBlock, err := l2Prop.BlockByNumber(context.Background(), receipt.BlockNumber) + seqBlock, err := l2Seq.BlockByNumber(context.Background(), receipt.BlockNumber) require.Nil(t, err) - require.Equal(t, syncBlock.NumberU64(), propBlock.NumberU64(), "Syncer and proposer blocks not the same after including a batch tx") - require.Equal(t, syncBlock.ParentHash(), propBlock.ParentHash(), "Syncer and proposer blocks parent hashes not the same after including a batch tx") - require.Equal(t, syncBlock.Hash(), propBlock.Hash(), "Syncer and proposer blocks not the same after including a batch tx") + require.Equal(t, syncBlock.NumberU64(), seqBlock.NumberU64(), "Syncer and sequencer blocks not the same after including a batch tx") + require.Equal(t, syncBlock.ParentHash(), seqBlock.ParentHash(), "Syncer and sequencer blocks parent hashes not the same after including a batch tx") + require.Equal(t, syncBlock.Hash(), seqBlock.Hash(), "Syncer and sequencer blocks not the same after including a batch tx") - rollupRPCClient, err := rpc.DialContext(context.Background(), sys.RollupNodes["proposer"].HTTPEndpoint()) + rollupRPCClient, err := rpc.DialContext(context.Background(), sys.RollupNodes["sequencer"].HTTPEndpoint()) require.Nil(t, err) rollupClient := sources.NewRollupClient(client.NewBaseRPCClient(rollupRPCClient)) // basic check that sync status works - propStatus, err := rollupClient.SyncStatus(context.Background()) + seqStatus, err := rollupClient.SyncStatus(context.Background()) require.Nil(t, err) - require.LessOrEqual(t, propBlock.NumberU64(), propStatus.UnsafeL2.Number) + require.LessOrEqual(t, seqBlock.NumberU64(), seqStatus.UnsafeL2.Number) // basic check that version endpoint works - propVersion, err := rollupClient.Version(context.Background()) + seqVersion, err := rollupClient.Version(context.Background()) require.Nil(t, err) - require.NotEqual(t, "", propVersion) + require.NotEqual(t, "", seqVersion) } -// TestConfirmationDepth runs the rollup with both proposer and syncer not immediately processing the tip of the chain. +// TestConfirmationDepth runs the rollup with both sequencer and syncer not immediately processing the tip of the chain. func TestConfirmationDepth(t *testing.T) { parallel(t) if !verboseGethNodes { @@ -314,12 +314,12 @@ func TestConfirmationDepth(t *testing.T) { } cfg := DefaultSystemConfig(t) - cfg.DeployConfig.ProposerWindowSize = 4 - cfg.DeployConfig.MaxProposerDrift = 10 * cfg.DeployConfig.L1BlockTime - propConfDepth := uint64(2) + cfg.DeployConfig.SequencerWindowSize = 4 + cfg.DeployConfig.MaxSequencerDrift = 10 * cfg.DeployConfig.L1BlockTime + seqConfDepth := uint64(2) syncConfDepth := uint64(5) - cfg.Nodes["proposer"].Driver.ProposerConfDepth = propConfDepth - cfg.Nodes["proposer"].Driver.SyncerConfDepth = 0 + cfg.Nodes["sequencer"].Driver.SequencerConfDepth = seqConfDepth + cfg.Nodes["sequencer"].Driver.SyncerConfDepth = 0 cfg.Nodes["syncer"].Driver.SyncerConfDepth = syncConfDepth sys, err := cfg.Start() @@ -330,26 +330,26 @@ func TestConfirmationDepth(t *testing.T) { log.Info("genesis", "l2", sys.RollupConfig.Genesis.L2, "l1", sys.RollupConfig.Genesis.L1, "l2_time", sys.RollupConfig.Genesis.L2Time) l1Client := sys.Clients["l1"] - l2Prop := sys.Clients["proposer"] + l2Seq := sys.Clients["sequencer"] l2Sync := sys.Clients["syncer"] - // Wait enough time for the proposer to submit a block with distance from L1 head, submit it, - // and for the slower syncer to read a full proposer window and cover confirmation depth for reading and some margin - <-time.After(time.Duration((cfg.DeployConfig.ProposerWindowSize+syncConfDepth+3)*cfg.DeployConfig.L1BlockTime) * time.Second) + // Wait enough time for the sequencer to submit a block with distance from L1 head, submit it, + // and for the slower syncer to read a full sequence window and cover confirmation depth for reading and some margin + <-time.After(time.Duration((cfg.DeployConfig.SequencerWindowSize+syncConfDepth+3)*cfg.DeployConfig.L1BlockTime) * time.Second) - // within a second, get both L1 and L2 syncer and proposer block heads + // within a second, get both L1 and L2 syncer and sequencer block heads ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) defer cancel() l1Head, err := l1Client.BlockByNumber(ctx, nil) require.NoError(t, err) - l2PropHead, err := l2Prop.BlockByNumber(ctx, nil) + l2SeqHead, err := l2Seq.BlockByNumber(ctx, nil) require.NoError(t, err) l2SyncHead, err := l2Sync.BlockByNumber(ctx, nil) require.NoError(t, err) - propInfo, err := derive.L1InfoDepositTxData(l2PropHead.Transactions()[0].Data()) + seqInfo, err := derive.L1InfoDepositTxData(l2SeqHead.Transactions()[0].Data()) require.NoError(t, err) - require.LessOrEqual(t, propInfo.Number+propConfDepth, l1Head.NumberU64(), "the proposer L2 head block should have an origin older than the L1 head block by at least the proposer conf depth") + require.LessOrEqual(t, seqInfo.Number+seqConfDepth, l1Head.NumberU64(), "the seq L2 head block should have an origin older than the L1 head block by at least the sequencer conf depth") syncInfo, err := derive.L1InfoDepositTxData(l2SyncHead.Transactions()[0].Data()) require.NoError(t, err) @@ -357,7 +357,7 @@ func TestConfirmationDepth(t *testing.T) { } // TestPendingGasLimit tests the configuration of the gas limit of the pending block, -// and if it does not conflict with the regular gas limit on the syncer or proposer. +// and if it does not conflict with the regular gas limit on the syncer or sequencer. func TestPendingGasLimit(t *testing.T) { parallel(t) if !verboseGethNodes { @@ -368,7 +368,7 @@ func TestPendingGasLimit(t *testing.T) { // configure the L2 gas limit to be high, and the pending gas limits to be lower for resource saving. cfg.DeployConfig.L2GenesisBlockGasLimit = 30_000_000 - cfg.GethOptions["proposer"] = []GethOption{ + cfg.GethOptions["sequencer"] = []GethOption{ func(ethCfg *ethconfig.Config, nodeCfg *node.Config) error { ethCfg.Miner.GasCeil = 10_000_000 return nil @@ -389,7 +389,7 @@ func TestPendingGasLimit(t *testing.T) { log.Info("genesis", "l2", sys.RollupConfig.Genesis.L2, "l1", sys.RollupConfig.Genesis.L1, "l2_time", sys.RollupConfig.Genesis.L2Time) l2Sync := sys.Clients["syncer"] - l2Prop := sys.Clients["proposer"] + l2Seq := sys.Clients["sequencer"] checkGasLimit := func(client *ethclient.Client, number *big.Int, expected uint64) *types.Header { ctx, cancel := context.WithTimeout(context.Background(), time.Second) @@ -401,15 +401,15 @@ func TestPendingGasLimit(t *testing.T) { } // check if the gaslimits are matching the expected values, - // and that the syncer/proposer can use their locally configured gas limit for the pending block. + // and that the syncer/sequencer can use their locally configured gas limit for the pending block. for { - checkGasLimit(l2Prop, big.NewInt(-1), 10_000_000) + checkGasLimit(l2Seq, big.NewInt(-1), 10_000_000) checkGasLimit(l2Sync, big.NewInt(-1), 9_000_000) - checkGasLimit(l2Prop, nil, 30_000_000) + checkGasLimit(l2Seq, nil, 30_000_000) latestSyncHeader := checkGasLimit(l2Sync, nil, 30_000_000) // Stop once the syncer passes genesis: - // this implies we checked a new block from the proposer, on both proposer and syncer nodes. + // this implies we checked a new block from the sequencer, on both sequencer and syncer nodes. if latestSyncHeader.Number.Uint64() > 0 { break } @@ -430,7 +430,7 @@ func TestFinalize(t *testing.T) { require.Nil(t, err, "Error starting up system") defer sys.Close() - l2Prop := sys.Clients["proposer"] + l2Seq := sys.Clients["sequencer"] // as configured in the extra geth lifecycle in testing setup const finalizedDistance = 8 @@ -450,7 +450,7 @@ func TestFinalize(t *testing.T) { } // poll until the finalized block number is greater than 0 - l2Finalized, err := waitForL2Block(big.NewInt(int64(rpc.FinalizedBlockNumber)), l2Prop, time.Second) + l2Finalized, err := waitForL2Block(big.NewInt(int64(rpc.FinalizedBlockNumber)), l2Seq, time.Second) require.NoError(t, err) if l2Finalized.NumberU64() > 0 { break @@ -542,8 +542,8 @@ func TestMissingBatchE2E(t *testing.T) { // 'unable to publish transaction role=batcher err="insufficient funds for gas * price + value"' cfg := DefaultSystemConfig(t) - // small proposer window size so the test does not take as long - cfg.DeployConfig.ProposerWindowSize = 4 + // small sequence window size so the test does not take as long + cfg.DeployConfig.SequencerWindowSize = 4 // Specifically set batch submitter balance to stop batches from being included cfg.Premine[cfg.Secrets.Addresses().Batcher] = big.NewInt(0) @@ -552,13 +552,13 @@ func TestMissingBatchE2E(t *testing.T) { require.Nil(t, err, "Error starting up system") defer sys.Close() - l2Prop := sys.Clients["proposer"] + l2Seq := sys.Clients["sequencer"] l2Sync := sys.Clients["syncer"] // Transactor Account ethPrivKey := cfg.Secrets.Alice - // Submit TX to L2 proposer node + // Submit TX to L2 sequencer node toAddr := common.Address{0xff, 0xff} tx := types.MustSignNewTx(ethPrivKey, types.LatestSignerForChainID(cfg.L2ChainIDBig()), &types.DynamicFeeTx{ ChainID: cfg.L2ChainIDBig(), @@ -569,15 +569,15 @@ func TestMissingBatchE2E(t *testing.T) { GasFeeCap: big.NewInt(200), Gas: 21000, }) - err = l2Prop.SendTransaction(context.Background(), tx) - require.Nil(t, err, "Sending L2 tx to proposer") + err = l2Seq.SendTransaction(context.Background(), tx) + require.Nil(t, err, "Sending L2 tx to sequencer") // Let it show up on the unsafe chain - receipt, err := waitForL2Transaction(tx.Hash(), l2Prop, 3*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second) - require.Nil(t, err, "Waiting for L2 tx on proposer") + receipt, err := waitForL2Transaction(tx.Hash(), l2Seq, 3*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second) + require.Nil(t, err, "Waiting for L2 tx on sequencer") // Wait until the block it was first included in shows up in the safe chain on the syncer - _, err = waitForL2Block(receipt.BlockNumber, l2Sync, time.Duration((sys.RollupConfig.ProposerWindowSize+4)*cfg.DeployConfig.L1BlockTime)*time.Second) + _, err = waitForL2Block(receipt.BlockNumber, l2Sync, time.Duration((sys.RollupConfig.SeqWindowSize+4)*cfg.DeployConfig.L1BlockTime)*time.Second) require.Nil(t, err, "Waiting for block on syncer") // Assert that the transaction is not found on the syncer @@ -586,16 +586,16 @@ func TestMissingBatchE2E(t *testing.T) { _, err = l2Sync.TransactionReceipt(ctx, tx.Hash()) require.Equal(t, ethereum.NotFound, err, "Found transaction in syncer when it should not have been included") - // Wait a short time for the L2 reorg to occur on the proposer as well. - // The proper thing to do is to wait until the proposer marks this block safe. + // Wait a short time for the L2 reorg to occur on the sequencer as well. + // The proper thing to do is to wait until the sequencer marks this block safe. <-time.After(2 * time.Second) - // Assert that the reconciliation process did an L2 reorg on the proposer to remove the invalid block + // Assert that the reconciliation process did an L2 reorg on the sequencer to remove the invalid block ctx2, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() - block, err := l2Prop.BlockByNumber(ctx2, receipt.BlockNumber) - require.Nil(t, err, "Get block from proposer") - require.NotEqual(t, block.Hash(), receipt.BlockHash, "L2 Proposer did not reorg out transaction on it's safe chain") + block, err := l2Seq.BlockByNumber(ctx2, receipt.BlockNumber) + require.Nil(t, err, "Get block from sequencer") + require.NotEqual(t, block.Hash(), receipt.BlockHash, "L2 Sequencer did not reorg out transaction on it's safe chain") } func L1InfoFromState(ctx context.Context, contract *bindings.L1Block, l2Number *big.Int) (derive.L1BlockInfo, error) { @@ -671,36 +671,36 @@ func TestSystemMockP2P(t *testing.T) { // Disable batcher, so we don't sync from L1 cfg.DisableBatcher = true // disable at the start, so we don't miss any gossiped blocks. - cfg.Nodes["proposer"].Driver.ProposerStopped = true + cfg.Nodes["sequencer"].Driver.SequencerStopped = true // connect the nodes cfg.P2PTopology = map[string][]string{ - "syncer": {"proposer"}, + "syncer": {"sequencer"}, } var published, received []common.Hash - propTracer, syncTracer := new(FnTracer), new(FnTracer) - propTracer.OnPublishL2PayloadFn = func(ctx context.Context, payload *eth.ExecutionPayload) { + seqTracer, syncTracer := new(FnTracer), new(FnTracer) + seqTracer.OnPublishL2PayloadFn = func(ctx context.Context, payload *eth.ExecutionPayload) { published = append(published, payload.BlockHash) } syncTracer.OnUnsafeL2PayloadFn = func(ctx context.Context, from peer.ID, payload *eth.ExecutionPayload) { received = append(received, payload.BlockHash) } - cfg.Nodes["proposer"].Tracer = propTracer + cfg.Nodes["sequencer"].Tracer = seqTracer cfg.Nodes["syncer"].Tracer = syncTracer sys, err := cfg.Start() require.Nil(t, err, "Error starting up system") defer sys.Close() - // Enable the proposer now that everyone is ready to receive payloads. - rollupRPCClient, err := rpc.DialContext(context.Background(), sys.RollupNodes["proposer"].HTTPEndpoint()) + // Enable the sequencer now that everyone is ready to receive payloads. + rollupRPCClient, err := rpc.DialContext(context.Background(), sys.RollupNodes["sequencer"].HTTPEndpoint()) require.NoError(t, err) syncerPeerID := sys.RollupNodes["syncer"].P2P().Host().ID() check := func() bool { - proposerBlocksTopicPeers := sys.RollupNodes["proposer"].P2P().GossipOut().BlocksTopicPeers() - return slices.Contains[peer.ID](proposerBlocksTopicPeers, syncerPeerID) + sequencerBlocksTopicPeers := sys.RollupNodes["sequencer"].P2P().GossipOut().BlocksTopicPeers() + return slices.Contains[peer.ID](sequencerBlocksTopicPeers, syncerPeerID) } // poll to see if the validator node is connected & meshed on gossip. @@ -712,17 +712,17 @@ func TestSystemMockP2P(t *testing.T) { } time.Sleep(backOffStrategy.Duration(i)) } - require.True(t, check(), "validator must be meshed with proposer for gossip test to proceed") + require.True(t, check(), "validator must be meshed with sequencer for gossip test to proceed") - require.NoError(t, rollupRPCClient.Call(nil, "admin_startProposer", sys.L2GenesisCfg.ToBlock().Hash())) + require.NoError(t, rollupRPCClient.Call(nil, "admin_startSequencer", sys.L2GenesisCfg.ToBlock().Hash())) - l2Prop := sys.Clients["proposer"] + l2Seq := sys.Clients["sequencer"] l2Sync := sys.Clients["syncer"] // Transactor Account ethPrivKey := cfg.Secrets.Alice - // Submit TX to L2 proposer node + // Submit TX to L2 sequencer node toAddr := common.Address{0xff, 0xff} tx := types.MustSignNewTx(ethPrivKey, types.LatestSignerForChainID(cfg.L2ChainIDBig()), &types.DynamicFeeTx{ ChainID: cfg.L2ChainIDBig(), @@ -733,18 +733,18 @@ func TestSystemMockP2P(t *testing.T) { GasFeeCap: big.NewInt(200), Gas: 21000, }) - err = l2Prop.SendTransaction(context.Background(), tx) - require.Nil(t, err, "Sending L2 tx to proposer") + err = l2Seq.SendTransaction(context.Background(), tx) + require.Nil(t, err, "Sending L2 tx to sequencer") - // Wait for tx to be mined on the L2 proposer chain - receiptProp, err := waitForL2Transaction(tx.Hash(), l2Prop, 10*time.Duration(sys.RollupConfig.BlockTime)*time.Second) - require.Nil(t, err, "Waiting for L2 tx on proposer") + // Wait for tx to be mined on the L2 sequencer chain + receiptSeq, err := waitForL2Transaction(tx.Hash(), l2Seq, 10*time.Duration(sys.RollupConfig.BlockTime)*time.Second) + require.Nil(t, err, "Waiting for L2 tx on sequencer") // Wait until the block it was first included in shows up in the safe chain on the syncer receiptSync, err := waitForL2Transaction(tx.Hash(), l2Sync, 10*time.Duration(sys.RollupConfig.BlockTime)*time.Second) require.Nil(t, err, "Waiting for L2 tx on syncer") - require.Equal(t, receiptProp, receiptSync) + require.Equal(t, receiptSeq, receiptSync) // Verify that everything that was received was published require.GreaterOrEqual(t, len(published), len(received)) @@ -759,10 +759,10 @@ func TestSystemMockP2P(t *testing.T) { // // Test steps: // 1. Spin up the nodes (P2P is disabled on the syncer) -// 2. Send a transaction to the proposer. -// 3. Wait for the TX to be mined on the proposer chain. +// 2. Send a transaction to the sequencer. +// 3. Wait for the TX to be mined on the sequencer chain. // 5. Wait for the syncer to detect a gap in the payload queue vs. the unsafe head -// 6. Wait for the RPC sync method to grab the block from the proposer over RPC and insert it into the syncer's unsafe chain. +// 6. Wait for the RPC sync method to grab the block from the sequencer over RPC and insert it into the syncer's unsafe chain. // 7. Wait for the syncer to sync the unsafe chain into the safe chain. // 8. Verify that the TX is included in the syncer's safe chain. func TestSystemRPCAltSync(t *testing.T) { @@ -779,23 +779,23 @@ func TestSystemRPCAltSync(t *testing.T) { cfg.DisableBatcher = true var published, received []string - propTracer, syncTracer := new(FnTracer), new(FnTracer) - // The proposer still publishes the blocks to the tracer, even if they do not reach the network due to disabled P2P - propTracer.OnPublishL2PayloadFn = func(ctx context.Context, payload *eth.ExecutionPayload) { + seqTracer, syncTracer := new(FnTracer), new(FnTracer) + // The sequencer still publishes the blocks to the tracer, even if they do not reach the network due to disabled P2P + seqTracer.OnPublishL2PayloadFn = func(ctx context.Context, payload *eth.ExecutionPayload) { published = append(published, payload.ID().String()) } // Blocks are now received via the RPC based alt-sync method syncTracer.OnUnsafeL2PayloadFn = func(ctx context.Context, from peer.ID, payload *eth.ExecutionPayload) { received = append(received, payload.ID().String()) } - cfg.Nodes["proposer"].Tracer = propTracer + cfg.Nodes["sequencer"].Tracer = seqTracer cfg.Nodes["syncer"].Tracer = syncTracer sys, err := cfg.Start(SystemConfigOption{ key: "afterRollupNodeStart", - role: "proposer", + role: "sequencer", action: func(sCfg *SystemConfig, system *System) { - rpc, _ := system.Nodes["proposer"].Attach() // never errors + rpc, _ := system.Nodes["sequencer"].Attach() // never errors cfg.Nodes["syncer"].L2Sync = &rollupNode.PreparedL2SyncEndpoint{ Client: client.NewBaseRPCClient(rpc), } @@ -804,13 +804,13 @@ func TestSystemRPCAltSync(t *testing.T) { require.Nil(t, err, "Error starting up system") defer sys.Close() - l2Prop := sys.Clients["proposer"] + l2Seq := sys.Clients["sequencer"] l2Sync := sys.Clients["syncer"] // Transactor Account ethPrivKey := cfg.Secrets.Alice - // Submit a TX to L2 proposer node + // Submit a TX to L2 sequencer node toAddr := common.Address{0xff, 0xff} tx := types.MustSignNewTx(ethPrivKey, types.LatestSignerForChainID(cfg.L2ChainIDBig()), &types.DynamicFeeTx{ ChainID: cfg.L2ChainIDBig(), @@ -821,18 +821,18 @@ func TestSystemRPCAltSync(t *testing.T) { GasFeeCap: big.NewInt(200), Gas: 21000, }) - err = l2Prop.SendTransaction(context.Background(), tx) - require.Nil(t, err, "Sending L2 tx to proposer") + err = l2Seq.SendTransaction(context.Background(), tx) + require.Nil(t, err, "Sending L2 tx to sequencer") - // Wait for tx to be mined on the L2 proposer chain - receiptProp, err := waitForTransaction(tx.Hash(), l2Prop, 6*time.Duration(sys.RollupConfig.BlockTime)*time.Second) - require.Nil(t, err, "Waiting for L2 tx on proposer") + // Wait for tx to be mined on the L2 sequencer chain + receiptSeq, err := waitForTransaction(tx.Hash(), l2Seq, 6*time.Duration(sys.RollupConfig.BlockTime)*time.Second) + require.Nil(t, err, "Waiting for L2 tx on sequencer") - // Wait for alt RPC sync to pick up the blocks on the proposer chain + // Wait for alt RPC sync to pick up the blocks on the sequencer chain receiptSync, err := waitForTransaction(tx.Hash(), l2Sync, 12*time.Duration(sys.RollupConfig.BlockTime)*time.Second) require.Nil(t, err, "Waiting for L2 tx on syncer") - require.Equal(t, receiptProp, receiptSync) + require.Equal(t, receiptSeq, receiptSync) // Verify that the tx was received via RPC sync (P2P is disabled) require.Contains(t, received, eth.BlockID{Hash: receiptSync.BlockHash, Number: receiptSync.BlockNumber.Uint64()}.String()) @@ -855,17 +855,17 @@ func TestSystemP2PAltSync(t *testing.T) { // Add more syncer nodes cfg.Nodes["alice"] = &rollupNode.Config{ Driver: driver.Config{ - SyncerConfDepth: 0, - ProposerConfDepth: 0, - ProposerEnabled: false, + SyncerConfDepth: 0, + SequencerConfDepth: 0, + SequencerEnabled: false, }, L1EpochPollInterval: time.Second * 4, } cfg.Nodes["bob"] = &rollupNode.Config{ Driver: driver.Config{ - SyncerConfDepth: 0, - ProposerConfDepth: 0, - ProposerEnabled: false, + SyncerConfDepth: 0, + SequencerConfDepth: 0, + SequencerEnabled: false, }, L1EpochPollInterval: time.Second * 4, } @@ -874,9 +874,9 @@ func TestSystemP2PAltSync(t *testing.T) { // connect the nodes cfg.P2PTopology = map[string][]string{ - "proposer": {"alice", "bob"}, - "alice": {"proposer", "bob"}, - "bob": {"alice", "proposer"}, + "sequencer": {"alice", "bob"}, + "alice": {"sequencer", "bob"}, + "bob": {"alice", "sequencer"}, } // Enable the P2P req-resp based sync cfg.P2PReqRespSync = true @@ -885,24 +885,24 @@ func TestSystemP2PAltSync(t *testing.T) { cfg.DisableBatcher = true var published []string - propTracer := new(FnTracer) - // The proposer still publishes the blocks to the tracer, even if they do not reach the network due to disabled P2P - propTracer.OnPublishL2PayloadFn = func(ctx context.Context, payload *eth.ExecutionPayload) { + seqTracer := new(FnTracer) + // The sequencer still publishes the blocks to the tracer, even if they do not reach the network due to disabled P2P + seqTracer.OnPublishL2PayloadFn = func(ctx context.Context, payload *eth.ExecutionPayload) { published = append(published, payload.ID().String()) } // Blocks are now received via the RPC based alt-sync method - cfg.Nodes["proposer"].Tracer = propTracer + cfg.Nodes["sequencer"].Tracer = seqTracer sys, err := cfg.Start() require.NoError(t, err, "Error starting up system") defer sys.Close() - l2Prop := sys.Clients["proposer"] + l2Seq := sys.Clients["sequencer"] // Transactor Account ethPrivKey := cfg.Secrets.Alice - // Submit a TX to L2 proposer node + // Submit a TX to L2 sequencer node toAddr := common.Address{0xff, 0xff} tx := types.MustSignNewTx(ethPrivKey, types.LatestSignerForChainID(cfg.L2ChainIDBig()), &types.DynamicFeeTx{ ChainID: cfg.L2ChainIDBig(), @@ -913,12 +913,12 @@ func TestSystemP2PAltSync(t *testing.T) { GasFeeCap: big.NewInt(200), Gas: 21000, }) - err = l2Prop.SendTransaction(context.Background(), tx) - require.NoError(t, err, "Sending L2 tx to proposer") + err = l2Seq.SendTransaction(context.Background(), tx) + require.NoError(t, err, "Sending L2 tx to sequencer") - // Wait for tx to be mined on the L2 proposer chain - receiptProp, err := waitForTransaction(tx.Hash(), l2Prop, 6*time.Duration(sys.RollupConfig.BlockTime)*time.Second) - require.NoError(t, err, "Waiting for L2 tx on proposer") + // Wait for tx to be mined on the L2 sequencer chain + receiptSeq, err := waitForTransaction(tx.Hash(), l2Seq, 6*time.Duration(sys.RollupConfig.BlockTime)*time.Second) + require.NoError(t, err, "Waiting for L2 tx on sequencer") // Gossip is able to respond to IWANT messages for the duration of heartbeat_time * message_window = 0.5 * 12 = 6 // Wait till we pass that, and then we'll have missed some blocks that cannot be retrieved in any way from gossip @@ -981,11 +981,11 @@ func TestSystemP2PAltSync(t *testing.T) { require.NoError(t, err) l2Sync := ethclient.NewClient(rpc) - // It may take a while to sync, but eventually we should see the proposed data show up + // It may take a while to sync, but eventually we should see the sequenced data show up receiptSync, err := waitForTransaction(tx.Hash(), l2Sync, 100*time.Duration(sys.RollupConfig.BlockTime)*time.Second) require.NoError(t, err, "Waiting for L2 tx on syncer") - require.Equal(t, receiptProp, receiptSync) + require.Equal(t, receiptSeq, receiptSync) // Verify that the tx was received via P2P sync require.Contains(t, syncedPayloads, eth.BlockID{Hash: receiptSync.BlockHash, Number: receiptSync.BlockNumber.Uint64()}.String()) @@ -995,7 +995,7 @@ func TestSystemP2PAltSync(t *testing.T) { require.ElementsMatch(t, syncedPayloads, published[:len(syncedPayloads)]) } -// TestSystemDenseTopology sets up a dense p2p topology with 3 syncer nodes and 1 proposer node. +// TestSystemDenseTopology sets up a dense p2p topology with 3 syncer nodes and 1 sequencer node. func TestSystemDenseTopology(t *testing.T) { t.Skip("Skipping dense topology test to avoid flakiness. @refcell address in p2p scoring pr.") @@ -1005,24 +1005,24 @@ func TestSystemDenseTopology(t *testing.T) { } cfg := DefaultSystemConfig(t) - // slow down L1 blocks so we can see the L2 blocks arrive well before the L1 blocks do. - // Keep the proposer window small so the L2 chain is started quick + // slow down L1 blocks, so we can see the L2 blocks arrive well before the L1 blocks do. + // Keep the seq window small so the L2 chain is started quick cfg.DeployConfig.L1BlockTime = 10 // Append additional nodes to the system to construct a dense p2p network cfg.Nodes["syncer2"] = &rollupNode.Config{ Driver: driver.Config{ - SyncerConfDepth: 0, - ProposerConfDepth: 0, - ProposerEnabled: false, + SyncerConfDepth: 0, + SequencerConfDepth: 0, + SequencerEnabled: false, }, L1EpochPollInterval: time.Second * 4, } cfg.Nodes["syncer3"] = &rollupNode.Config{ Driver: driver.Config{ - SyncerConfDepth: 0, - ProposerConfDepth: 0, - ProposerEnabled: false, + SyncerConfDepth: 0, + SequencerConfDepth: 0, + SequencerEnabled: false, }, L1EpochPollInterval: time.Second * 4, } @@ -1031,9 +1031,9 @@ func TestSystemDenseTopology(t *testing.T) { // connect the nodes cfg.P2PTopology = map[string][]string{ - "syncer": {"proposer", "syncer2", "syncer3"}, - "syncer2": {"proposer", "syncer", "syncer3"}, - "syncer3": {"proposer", "syncer", "syncer2"}, + "syncer": {"sequencer", "syncer2", "syncer3"}, + "syncer2": {"sequencer", "syncer", "syncer3"}, + "syncer3": {"sequencer", "syncer", "syncer2"}, } // Set peer scoring for each node, but without banning @@ -1047,8 +1047,8 @@ func TestSystemDenseTopology(t *testing.T) { } var published, received1, received2, received3 []common.Hash - propTracer, syncTracer, syncTracer2, syncTracer3 := new(FnTracer), new(FnTracer), new(FnTracer), new(FnTracer) - propTracer.OnPublishL2PayloadFn = func(ctx context.Context, payload *eth.ExecutionPayload) { + seqTracer, syncTracer, syncTracer2, syncTracer3 := new(FnTracer), new(FnTracer), new(FnTracer), new(FnTracer) + seqTracer.OnPublishL2PayloadFn = func(ctx context.Context, payload *eth.ExecutionPayload) { published = append(published, payload.BlockHash) } syncTracer.OnUnsafeL2PayloadFn = func(ctx context.Context, from peer.ID, payload *eth.ExecutionPayload) { @@ -1060,7 +1060,7 @@ func TestSystemDenseTopology(t *testing.T) { syncTracer3.OnUnsafeL2PayloadFn = func(ctx context.Context, from peer.ID, payload *eth.ExecutionPayload) { received3 = append(received3, payload.BlockHash) } - cfg.Nodes["proposer"].Tracer = propTracer + cfg.Nodes["sequencer"].Tracer = seqTracer cfg.Nodes["syncer"].Tracer = syncTracer cfg.Nodes["syncer2"].Tracer = syncTracer2 cfg.Nodes["syncer3"].Tracer = syncTracer3 @@ -1069,7 +1069,7 @@ func TestSystemDenseTopology(t *testing.T) { require.Nil(t, err, "Error starting up system") defer sys.Close() - l2Prop := sys.Clients["proposer"] + l2Seq := sys.Clients["sequencer"] l2Sync := sys.Clients["syncer"] l2Sync2 := sys.Clients["syncer2"] l2Sync3 := sys.Clients["syncer3"] @@ -1077,7 +1077,7 @@ func TestSystemDenseTopology(t *testing.T) { // Transactor Account ethPrivKey := cfg.Secrets.Alice - // Submit TX to L2 proposer node + // Submit TX to L2 sequencer node toAddr := common.Address{0xff, 0xff} tx := types.MustSignNewTx(ethPrivKey, types.LatestSignerForChainID(cfg.L2ChainIDBig()), &types.DynamicFeeTx{ ChainID: cfg.L2ChainIDBig(), @@ -1088,25 +1088,25 @@ func TestSystemDenseTopology(t *testing.T) { GasFeeCap: big.NewInt(200), Gas: 21000, }) - err = l2Prop.SendTransaction(context.Background(), tx) - require.NoError(t, err, "Sending L2 tx to proposer") + err = l2Seq.SendTransaction(context.Background(), tx) + require.NoError(t, err, "Sending L2 tx to sequencer") - // Wait for tx to be mined on the L2 proposer chain - receiptProp, err := waitForTransaction(tx.Hash(), l2Prop, 10*time.Duration(sys.RollupConfig.BlockTime)*time.Second) - require.NoError(t, err, "Waiting for L2 tx on proposer") + // Wait for tx to be mined on the L2 sequencer chain + receiptSeq, err := waitForTransaction(tx.Hash(), l2Seq, 10*time.Duration(sys.RollupConfig.BlockTime)*time.Second) + require.NoError(t, err, "Waiting for L2 tx on sequencer") // Wait until the block it was first included in shows up in the safe chain on the syncer receiptSync, err := waitForTransaction(tx.Hash(), l2Sync, 10*time.Duration(sys.RollupConfig.BlockTime)*time.Second) require.NoError(t, err, "Waiting for L2 tx on syncer") - require.Equal(t, receiptProp, receiptSync) + require.Equal(t, receiptSeq, receiptSync) receiptSync, err = waitForTransaction(tx.Hash(), l2Sync2, 10*time.Duration(sys.RollupConfig.BlockTime)*time.Second) require.NoError(t, err, "Waiting for L2 tx on syncer2") - require.Equal(t, receiptProp, receiptSync) + require.Equal(t, receiptSeq, receiptSync) receiptSync, err = waitForTransaction(tx.Hash(), l2Sync3, 10*time.Duration(sys.RollupConfig.BlockTime)*time.Second) require.NoError(t, err, "Waiting for L2 tx on syncer3") - require.Equal(t, receiptProp, receiptSync) + require.Equal(t, receiptSeq, receiptSync) // Verify that everything that was received was published require.GreaterOrEqual(t, len(published), len(received1)) @@ -1135,17 +1135,17 @@ func TestL1InfoContract(t *testing.T) { defer sys.Close() l1Client := sys.Clients["l1"] - l2Prop := sys.Clients["proposer"] + l2Seq := sys.Clients["sequencer"] l2Sync := sys.Clients["syncer"] endSyncBlockNumber := big.NewInt(4) - endPropBlockNumber := big.NewInt(6) + endSeqBlockNumber := big.NewInt(6) endSyncBlock, err := waitForL2Block(endSyncBlockNumber, l2Sync, time.Minute) require.Nil(t, err) - endPropBlock, err := waitForL2Block(endPropBlockNumber, l2Prop, time.Minute) + endSeqBlock, err := waitForL2Block(endSeqBlockNumber, l2Seq, time.Minute) require.Nil(t, err) - propL1Info, err := bindings.NewL1Block(cfg.L1InfoPredeployAddress, l2Prop) + seqL1Info, err := bindings.NewL1Block(cfg.L1InfoPredeployAddress, l2Seq) require.Nil(t, err) syncL1Info, err := bindings.NewL1Block(cfg.L1InfoPredeployAddress, l2Sync) @@ -1174,11 +1174,11 @@ func TestL1InfoContract(t *testing.T) { } } - l1InfosFromProposerTransactions, l1InfosFromProposerState := fillInfoLists(endPropBlock, propL1Info, l2Prop) + l1InfosFromSequencerTransactions, l1InfosFromSequencerState := fillInfoLists(endSeqBlock, seqL1Info, l2Seq) l1InfosFromSyncerTransactions, l1InfosFromSyncerState := fillInfoLists(endSyncBlock, syncL1Info, l2Sync) l1blocks := make(map[common.Hash]derive.L1BlockInfo) - maxL1Hash := l1InfosFromProposerTransactions[0].BlockHash + maxL1Hash := l1InfosFromSequencerTransactions[0].BlockHash for h := maxL1Hash; ; { b, err := l1Client.BlockByHash(ctx, h) require.Nil(t, err) @@ -1212,8 +1212,8 @@ func TestL1InfoContract(t *testing.T) { } } - checkInfoList("On proposer with tx", l1InfosFromProposerTransactions) - checkInfoList("On proposer with state", l1InfosFromProposerState) + checkInfoList("On sequencer with tx", l1InfosFromSequencerTransactions) + checkInfoList("On sequencer with state", l1InfosFromSequencerState) checkInfoList("On syncer with tx", l1InfosFromSyncerTransactions) checkInfoList("On syncer with state", l1InfosFromSyncerState) } @@ -1264,7 +1264,7 @@ func TestWithdrawals(t *testing.T) { defer sys.Close() l1Client := sys.Clients["l1"] - l2Prop := sys.Clients["proposer"] + l2Seq := sys.Clients["sequencer"] l2Sync := sys.Clients["syncer"] // Transactor Account @@ -1295,7 +1295,7 @@ func TestWithdrawals(t *testing.T) { require.Nil(t, err, "Waiting for deposit tx on L1") // Bind L2 Withdrawer Contract - l2withdrawer, err := bindings.NewL2ToL1MessagePasser(predeploys.L2ToL1MessagePasserAddr, l2Prop) + l2withdrawer, err := bindings.NewL2ToL1MessagePasser(predeploys.L2ToL1MessagePasserAddr, l2Seq) require.Nil(t, err, "binding withdrawer on L2") // Wait for deposit to arrive @@ -1319,7 +1319,7 @@ func TestWithdrawals(t *testing.T) { // Start L2 balance for withdrawal ctx, cancel = context.WithTimeout(context.Background(), 1*time.Second) defer cancel() - startBalance, err = l2Prop.BalanceAt(ctx, fromAddr, nil) + startBalance, err = l2Seq.BalanceAt(ctx, fromAddr, nil) require.Nil(t, err) // Initiate Withdrawal @@ -1331,7 +1331,7 @@ func TestWithdrawals(t *testing.T) { require.Nil(t, err, "sending initiate withdraw tx") receipt, err = waitForL2Transaction(tx.Hash(), l2Sync, 10*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second) - require.Nil(t, err, "withdrawal initiated on L2 proposer") + require.Nil(t, err, "withdrawal initiated on L2 sequencer") require.Equal(t, receipt.Status, types.ReceiptStatusSuccessful, "transaction failed") // Verify L2 balance after withdrawal @@ -1476,7 +1476,7 @@ func TestFees(t *testing.T) { require.Nil(t, err, "Error starting up system") defer sys.Close() - l2Prop := sys.Clients["proposer"] + l2Seq := sys.Clients["sequencer"] l2Sync := sys.Clients["syncer"] // Transactor Account @@ -1484,7 +1484,7 @@ func TestFees(t *testing.T) { fromAddr := crypto.PubkeyToAddress(ethPrivKey.PublicKey) // Find gaspriceoracle contract - gpoContract, err := bindings.NewGasPriceOracle(predeploys.GasPriceOracleAddr, l2Prop) + gpoContract, err := bindings.NewGasPriceOracle(predeploys.GasPriceOracleAddr, l2Seq) require.Nil(t, err) overhead, err := gpoContract.Overhead(&bind.CallOpts{}) @@ -1501,13 +1501,13 @@ func TestFees(t *testing.T) { // Check balances of ProtocolVault ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) defer cancel() - protocolVaultStartBalance, err := l2Prop.BalanceAt(ctx, predeploys.ProtocolVaultAddr, nil) + protocolVaultStartBalance, err := l2Seq.BalanceAt(ctx, predeploys.ProtocolVaultAddr, nil) require.Nil(t, err) - // Check balances of ProposerRewardVault + // Check balances of L1FeeVault ctx, cancel = context.WithTimeout(context.Background(), 1*time.Second) defer cancel() - proposerRewardVaultStartBalance, err := l2Prop.BalanceAt(ctx, predeploys.ProposerRewardVaultAddr, nil) + l1FeeVaultStartBalance, err := l2Seq.BalanceAt(ctx, predeploys.L1FeeVaultAddr, nil) require.Nil(t, err) // Simple transfer from signer to random account @@ -1531,11 +1531,11 @@ func TestFees(t *testing.T) { sender, err := types.LatestSignerForChainID(cfg.L2ChainIDBig()).Sender(tx) require.NoError(t, err) t.Logf("waiting for tx %s from %s to %s", tx.Hash(), sender, tx.To()) - err = l2Prop.SendTransaction(context.Background(), tx) - require.Nil(t, err, "Sending L2 tx to proposer") + err = l2Seq.SendTransaction(context.Background(), tx) + require.Nil(t, err, "Sending L2 tx to sequencer") - _, err = waitForL2Transaction(tx.Hash(), l2Prop, 4*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second) - require.Nil(t, err, "Waiting for L2 tx on proposer") + _, err = waitForL2Transaction(tx.Hash(), l2Seq, 4*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second) + require.Nil(t, err, "Waiting for L2 tx on sequencer") receipt, err := waitForL2Transaction(tx.Hash(), l2Sync, 4*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second) require.Nil(t, err, "Waiting for L2 tx on syncer") @@ -1543,27 +1543,27 @@ func TestFees(t *testing.T) { ctx, cancel = context.WithTimeout(context.Background(), 1*time.Second) defer cancel() - header, err := l2Prop.HeaderByNumber(ctx, receipt.BlockNumber) + header, err := l2Seq.HeaderByNumber(ctx, receipt.BlockNumber) require.Nil(t, err) ctx, cancel = context.WithTimeout(context.Background(), 1*time.Second) defer cancel() - validatorRewardVaultStartBalance, err := l2Prop.BalanceAt(ctx, predeploys.ValidatorRewardVaultAddr, safeAddBig(header.Number, big.NewInt(-1))) + validatorRewardVaultStartBalance, err := l2Seq.BalanceAt(ctx, predeploys.ValidatorRewardVaultAddr, safeAddBig(header.Number, big.NewInt(-1))) require.Nil(t, err) ctx, cancel = context.WithTimeout(context.Background(), 1*time.Second) defer cancel() - validatorRewardVaultEndBalance, err := l2Prop.BalanceAt(ctx, predeploys.ValidatorRewardVaultAddr, header.Number) + validatorRewardVaultEndBalance, err := l2Seq.BalanceAt(ctx, predeploys.ValidatorRewardVaultAddr, header.Number) require.Nil(t, err) ctx, cancel = context.WithTimeout(context.Background(), 1*time.Second) defer cancel() - endBalance, err := l2Prop.BalanceAt(ctx, fromAddr, header.Number) + endBalance, err := l2Seq.BalanceAt(ctx, fromAddr, header.Number) require.Nil(t, err) ctx, cancel = context.WithTimeout(context.Background(), 1*time.Second) defer cancel() - protocolVaultEndBalance, err := l2Prop.BalanceAt(ctx, predeploys.ProtocolVaultAddr, header.Number) + protocolVaultEndBalance, err := l2Seq.BalanceAt(ctx, predeploys.ProtocolVaultAddr, header.Number) require.Nil(t, err) l1Header, err := sys.Clients["l1"].HeaderByNumber(ctx, nil) @@ -1571,16 +1571,16 @@ func TestFees(t *testing.T) { ctx, cancel = context.WithTimeout(context.Background(), 1*time.Second) defer cancel() - proposerRewardVaultEndBalance, err := l2Prop.BalanceAt(ctx, predeploys.ProposerRewardVaultAddr, header.Number) + l1FeeVaultEndBalance, err := l2Seq.BalanceAt(ctx, predeploys.L1FeeVaultAddr, header.Number) require.Nil(t, err) // Diff fee recipients balances protocolVaultDiff := new(big.Int).Sub(protocolVaultEndBalance, protocolVaultStartBalance) - proposerRewardVaultDiff := new(big.Int).Sub(proposerRewardVaultEndBalance, proposerRewardVaultStartBalance) + l1FeeVaultDiff := new(big.Int).Sub(l1FeeVaultEndBalance, l1FeeVaultStartBalance) validatorRewardVaultDiff := new(big.Int).Sub(validatorRewardVaultEndBalance, validatorRewardVaultStartBalance) // get a validator reward scalar from L1Block contract - l1BlockContract, err := bindings.NewL1Block(predeploys.L1BlockAddr, l2Prop) + l1BlockContract, err := bindings.NewL1Block(predeploys.L1BlockAddr, l2Seq) require.Nil(t, err) validatorRewardScalar, err := l1BlockContract.ValidatorRewardScalar(&bind.CallOpts{}) @@ -1602,7 +1602,7 @@ func TestFees(t *testing.T) { protocolFee := new(big.Int).Sub(fee, reward) require.Equal(t, protocolFee.Cmp(protocolVaultDiff), 0, "protocol fund mismatch") - // Tally proposer reward + // Tally sequencer reward bytes, err := tx.MarshalBinary() require.Nil(t, err) l1GasUsed := calcL1GasUsed(bytes, overhead) @@ -1611,22 +1611,22 @@ func TestFees(t *testing.T) { l1Fee = l1Fee.Mul(l1Fee, scalar) l1Fee = l1Fee.Div(l1Fee, divisor) - require.Equal(t, l1Fee, proposerRewardVaultDiff, "proposer reward mismatch") + require.Equal(t, l1Fee, l1FeeVaultDiff, "sequencer reward mismatch") - // Tally Proposer reward against GasPriceOracle + // Tally Sequencer reward against GasPriceOracle gpoL1Fee, err := gpoContract.GetL1Fee(&bind.CallOpts{}, bytes) require.Nil(t, err) - require.Equal(t, l1Fee, gpoL1Fee, "proposer reward mismatch") + require.Equal(t, l1Fee, gpoL1Fee, "sequencer reward mismatch") // Calculate total fee protocolVaultDiff.Add(protocolVaultDiff, validatorRewardVaultDiff) - totalFee := new(big.Int).Add(protocolVaultDiff, proposerRewardVaultDiff) + totalFee := new(big.Int).Add(protocolVaultDiff, l1FeeVaultDiff) balanceDiff := new(big.Int).Sub(startBalance, endBalance) balanceDiff.Sub(balanceDiff, transferAmount) require.Equal(t, balanceDiff, totalFee, "balances should add up") } -func TestStopStartProposer(t *testing.T) { +func TestStopStartSequencer(t *testing.T) { parallel(t) if !verboseGethNodes { log.Root().SetHandler(log.DiscardHandler()) @@ -1637,37 +1637,37 @@ func TestStopStartProposer(t *testing.T) { require.Nil(t, err, "Error starting up system") defer sys.Close() - l2Prop := sys.Clients["proposer"] - rollupNode := sys.RollupNodes["proposer"] + l2Seq := sys.Clients["sequencer"] + rollupNode := sys.RollupNodes["sequencer"] nodeRPC, err := rpc.DialContext(context.Background(), rollupNode.HTTPEndpoint()) require.Nil(t, err, "Error dialing node") - blockBefore := latestBlock(t, l2Prop) + blockBefore := latestBlock(t, l2Seq) time.Sleep(time.Duration(cfg.DeployConfig.L2BlockTime+1) * time.Second) - blockAfter := latestBlock(t, l2Prop) + blockAfter := latestBlock(t, l2Seq) require.Greaterf(t, blockAfter, blockBefore, "Chain did not advance") ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() blockHash := common.Hash{} - err = nodeRPC.CallContext(ctx, &blockHash, "admin_stopProposer") - require.Nil(t, err, "Error stopping proposer") + err = nodeRPC.CallContext(ctx, &blockHash, "admin_stopSequencer") + require.Nil(t, err, "Error stopping sequencer") - blockBefore = latestBlock(t, l2Prop) + blockBefore = latestBlock(t, l2Seq) time.Sleep(time.Duration(cfg.DeployConfig.L2BlockTime+1) * time.Second) - blockAfter = latestBlock(t, l2Prop) - require.Equal(t, blockAfter, blockBefore, "Chain advanced after stopping proposer") + blockAfter = latestBlock(t, l2Seq) + require.Equal(t, blockAfter, blockBefore, "Chain advanced after stopping sequencer") ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - err = nodeRPC.CallContext(ctx, nil, "admin_startProposer", blockHash) - require.Nil(t, err, "Error starting proposer") + err = nodeRPC.CallContext(ctx, nil, "admin_startSequencer", blockHash) + require.Nil(t, err, "Error starting sequencer") - blockBefore = latestBlock(t, l2Prop) + blockBefore = latestBlock(t, l2Seq) time.Sleep(time.Duration(cfg.DeployConfig.L2BlockTime+1) * time.Second) - blockAfter = latestBlock(t, l2Prop) - require.Greater(t, blockAfter, blockBefore, "Chain did not advance after starting proposer") + blockAfter = latestBlock(t, l2Seq) + require.Greater(t, blockAfter, blockBefore, "Chain did not advance after starting sequencer") } func TestStopStartBatcher(t *testing.T) { @@ -1685,16 +1685,16 @@ func TestStopStartBatcher(t *testing.T) { require.Nil(t, err) rollupClient := sources.NewRollupClient(client.NewBaseRPCClient(rollupRPCClient)) - l2Prop := sys.Clients["proposer"] + l2Seq := sys.Clients["sequencer"] l2Sync := sys.Clients["syncer"] // retrieve the initial sync status - propStatus, err := rollupClient.SyncStatus(context.Background()) + seqStatus, err := rollupClient.SyncStatus(context.Background()) require.Nil(t, err) nonce := uint64(0) sendTx := func() *types.Receipt { - // Submit TX to L2 proposer node + // Submit TX to L2 sequencer node tx := types.MustSignNewTx(cfg.Secrets.Alice, types.LatestSignerForChainID(cfg.L2ChainIDBig()), &types.DynamicFeeTx{ ChainID: cfg.L2ChainIDBig(), Nonce: nonce, @@ -1705,12 +1705,12 @@ func TestStopStartBatcher(t *testing.T) { Gas: 21000, }) nonce++ - err = l2Prop.SendTransaction(context.Background(), tx) - require.Nil(t, err, "Sending L2 tx to proposer") + err = l2Seq.SendTransaction(context.Background(), tx) + require.Nil(t, err, "Sending L2 tx to sequencer") // Let it show up on the unsafe chain - receipt, err := waitForTransaction(tx.Hash(), l2Prop, 3*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second) - require.Nil(t, err, "Waiting for L2 tx on proposer") + receipt, err := waitForTransaction(tx.Hash(), l2Seq, 3*time.Duration(cfg.DeployConfig.L1BlockTime)*time.Second) + require.Nil(t, err, "Waiting for L2 tx on sequencer") return receipt } @@ -1725,7 +1725,7 @@ func TestStopStartBatcher(t *testing.T) { // ensure the safe chain advances newSeqStatus, err := rollupClient.SyncStatus(context.Background()) require.Nil(t, err) - require.Greater(t, newSeqStatus.SafeL2.Number, propStatus.SafeL2.Number, "Safe chain did not advance") + require.Greater(t, newSeqStatus.SafeL2.Number, seqStatus.SafeL2.Number, "Safe chain did not advance") // stop the batch submission err = sys.Batcher.Stop(context.Background()) @@ -1735,7 +1735,7 @@ func TestStopStartBatcher(t *testing.T) { time.Sleep(safeBlockInclusionDuration) // get the initial sync status - propStatus, err = rollupClient.SyncStatus(context.Background()) + seqStatus, err = rollupClient.SyncStatus(context.Background()) require.Nil(t, err) // send another tx @@ -1745,7 +1745,7 @@ func TestStopStartBatcher(t *testing.T) { // ensure that the safe chain does not advance while the batcher is stopped newSeqStatus, err = rollupClient.SyncStatus(context.Background()) require.Nil(t, err) - require.Equal(t, newSeqStatus.SafeL2.Number, propStatus.SafeL2.Number, "Safe chain advanced while batcher was stopped") + require.Equal(t, newSeqStatus.SafeL2.Number, seqStatus.SafeL2.Number, "Safe chain advanced while batcher was stopped") // start the batch submission err = sys.Batcher.Start() @@ -1762,7 +1762,7 @@ func TestStopStartBatcher(t *testing.T) { // ensure that the safe chain advances after restarting the batcher newSeqStatus, err = rollupClient.SyncStatus(context.Background()) require.Nil(t, err) - require.Greater(t, newSeqStatus.SafeL2.Number, propStatus.SafeL2.Number, "Safe chain did not advance after batcher was restarted") + require.Greater(t, newSeqStatus.SafeL2.Number, seqStatus.SafeL2.Number, "Safe chain did not advance after batcher was restarted") } func TestChallenge(t *testing.T) { diff --git a/e2e/system_tob_test.go b/e2e/system_tob_test.go index 7d9b61bcf..ecc24e023 100644 --- a/e2e/system_tob_test.go +++ b/e2e/system_tob_test.go @@ -53,16 +53,16 @@ func TestGasPriceOracleFeeUpdates(t *testing.T) { require.Nil(t, err, "Error starting up system") defer sys.Close() - // Obtain our proposer, syncer, and transactor keypair. + // Obtain our sequencer, syncer, and transactor keypair. l1Client := sys.Clients["l1"] - l2Prop := sys.Clients["proposer"] + l2Seq := sys.Clients["sequencer"] // l2Sync := sys.Clients["syncer"] ethPrivKey := cfg.Secrets.ProxyAdminOwner // Bind to the SystemConfig & GasPriceOracle contracts sysconfig, err := bindings.NewSystemConfig(predeploys.DevSystemConfigAddr, l1Client) require.Nil(t, err) - gpoContract, err := bindings.NewGasPriceOracleCaller(predeploys.GasPriceOracleAddr, l2Prop) + gpoContract, err := bindings.NewGasPriceOracleCaller(predeploys.GasPriceOracleAddr, l2Seq) require.Nil(t, err) // Obtain our signer. @@ -82,7 +82,7 @@ func TestGasPriceOracleFeeUpdates(t *testing.T) { require.Nil(t, err, "waiting for sysconfig set gas config update tx") require.Equal(t, receipt.Status, types.ReceiptStatusSuccessful, "transaction failed") - _, err = waitForL1OriginOnL2(receipt.BlockNumber.Uint64(), l2Prop, txTimeoutDuration) + _, err = waitForL1OriginOnL2(receipt.BlockNumber.Uint64(), l2Seq, txTimeoutDuration) require.NoError(t, err, "waiting for L2 block to include the sysconfig update") gpoOverhead, err := gpoContract.Overhead(&bind.CallOpts{}) @@ -109,7 +109,7 @@ func TestGasPriceOracleFeeUpdates(t *testing.T) { require.Nil(t, err, "waiting for sysconfig set gas config update tx") require.Equal(t, receipt.Status, types.ReceiptStatusSuccessful, "transaction failed") - _, err = waitForL1OriginOnL2(receipt.BlockNumber.Uint64(), l2Prop, txTimeoutDuration) + _, err = waitForL1OriginOnL2(receipt.BlockNumber.Uint64(), l2Seq, txTimeoutDuration) require.NoError(t, err, "waiting for L2 block to include the sysconfig update") gpoOverhead, err = gpoContract.Overhead(&bind.CallOpts{}) @@ -125,9 +125,9 @@ func TestGasPriceOracleFeeUpdates(t *testing.T) { } } -// TestL2ProposerRPCDepositTx checks that the L2 proposer will not accept DepositTx type transactions. +// TestL2SequencerRPCDepositTx checks that the L2 sequencer will not accept DepositTx type transactions. // The acceptance of these transactions would allow for arbitrary minting of ETH in L2. -func TestL2ProposerRPCDepositTx(t *testing.T) { +func TestL2SequencerRPCDepositTx(t *testing.T) { parallel(t) // Setup our logger handler if !verboseGethNodes { @@ -140,8 +140,8 @@ func TestL2ProposerRPCDepositTx(t *testing.T) { require.Nil(t, err, "Error starting up system") defer sys.Close() - // Obtain our proposer, syncer, and transactor keypair. - l2Prop := sys.Clients["proposer"] + // Obtain our sequencer, syncer, and transactor keypair. + l2Seq := sys.Clients["sequencer"] l2Sync := sys.Clients["syncer"] txSigningKey := sys.cfg.Secrets.Alice require.Nil(t, err) @@ -158,9 +158,9 @@ func TestL2ProposerRPCDepositTx(t *testing.T) { }) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - err = l2Prop.SendTransaction(ctx, tx) + err = l2Seq.SendTransaction(ctx, tx) cancel() - require.Error(t, err, "a DepositTx was accepted by L2 proposer over RPC when it should not have been.") + require.Error(t, err, "a DepositTx was accepted by L2 sequencer over RPC when it should not have been.") ctx, cancel = context.WithTimeout(context.Background(), 5*time.Second) err = l2Sync.SendTransaction(ctx, tx) @@ -253,9 +253,9 @@ func TestMixedDepositValidity(t *testing.T) { require.Nil(t, err, "Error starting up system") defer sys.Close() - // Obtain our proposer, syncer, and transactor keypair. + // Obtain our sequencer, syncer, and transactor keypair. l1Client := sys.Clients["l1"] - l2Prop := sys.Clients["proposer"] + l2Seq := sys.Clients["sequencer"] l2Sync := sys.Clients["syncer"] require.NoError(t, err) @@ -381,15 +381,15 @@ func TestMixedDepositValidity(t *testing.T) { cancel() require.NoError(t, err) - // Obtain the L2 proposer account balance + // Obtain the L2 sequencer account balance ctx, cancel = context.WithTimeout(context.Background(), txTimeoutDuration) - endL2PropBalance, err := l2Prop.BalanceAt(ctx, transactor.Account.L2Opts.From, nil) + endl2SeqBalance, err := l2Seq.BalanceAt(ctx, transactor.Account.L2Opts.From, nil) cancel() require.NoError(t, err) - // Obtain the L2 proposer account nonce + // Obtain the L2 sequencer account nonce ctx, cancel = context.WithTimeout(context.Background(), txTimeoutDuration) - endL2PropNonce, err := l2Prop.NonceAt(ctx, transactor.Account.L2Opts.From, nil) + endl2SeqNonce, err := l2Seq.NonceAt(ctx, transactor.Account.L2Opts.From, nil) cancel() require.NoError(t, err) @@ -406,8 +406,8 @@ func TestMixedDepositValidity(t *testing.T) { require.NoError(t, err) require.Equal(t, transactor.ExpectedL1Nonce, endL1Nonce, "Unexpected L1 nonce for transactor") - require.Equal(t, transactor.ExpectedL2Nonce, endL2PropNonce, "Unexpected L2 proposer nonce for transactor") - require.Equal(t, transactor.ExpectedL2Balance, endL2PropBalance, "Unexpected L2 proposer balance for transactor") + require.Equal(t, transactor.ExpectedL2Nonce, endl2SeqNonce, "Unexpected L2 sequencer nonce for transactor") + require.Equal(t, transactor.ExpectedL2Balance, endl2SeqBalance, "Unexpected L2 sequencer balance for transactor") require.Equal(t, transactor.ExpectedL2Nonce, endL2SyncNonce, "Unexpected L2 syncer nonce for transactor") require.Equal(t, transactor.ExpectedL2Balance, endL2SyncBalance, "Unexpected L2 syncer balance for transactor") } @@ -435,9 +435,9 @@ func TestMixedWithdrawalValidity(t *testing.T) { require.NoError(t, err, "error starting up system") defer sys.Close() - // Obtain our proposer, syncer, and transactor keypair. + // Obtain our sequencer, syncer, and transactor keypair. l1Client := sys.Clients["l1"] - l2Prop := sys.Clients["proposer"] + l2Seq := sys.Clients["sequencer"] l2Sync := sys.Clients["syncer"] require.NoError(t, err) @@ -490,7 +490,7 @@ func TestMixedWithdrawalValidity(t *testing.T) { require.NoError(t, err) // Bind to the L2-L1 message passer - l2l1MessagePasser, err := bindings.NewL2ToL1MessagePasser(predeploys.L2ToL1MessagePasserAddr, l2Prop) + l2l1MessagePasser, err := bindings.NewL2ToL1MessagePasser(predeploys.L2ToL1MessagePasserAddr, l2Seq) require.NoError(t, err, "error binding to message passer on L2") // Create our fuzzer wrapper to generate complex values (despite this not being a fuzz test, this is still a useful @@ -511,7 +511,7 @@ func TestMixedWithdrawalValidity(t *testing.T) { // Wait for the transaction to appear in L2 syncer receipt, err := waitForL2Transaction(tx.Hash(), l2Sync, txTimeoutDuration) - require.Nil(t, err, "withdrawal initiated on L2 proposer") + require.Nil(t, err, "withdrawal initiated on L2 sequencer") require.Equal(t, receipt.Status, types.ReceiptStatusSuccessful, "transaction failed") // Obtain the header for the block containing the transaction (used to calculate gas fees) @@ -681,7 +681,7 @@ func TestMixedWithdrawalValidity(t *testing.T) { // At the end, assert our account balance/nonce states. - // Obtain the L2 proposer account balance + // Obtain the L2 sequencer account balance ctx, cancel = context.WithTimeout(context.Background(), txTimeoutDuration) endL1Balance, err := l1Client.BalanceAt(ctx, transactor.Account.L1Opts.From, nil) cancel() @@ -693,15 +693,15 @@ func TestMixedWithdrawalValidity(t *testing.T) { cancel() require.NoError(t, err) - // Obtain the L2 proposer account balance + // Obtain the L2 sequencer account balance ctx, cancel = context.WithTimeout(context.Background(), txTimeoutDuration) - endL2PropBalance, err := l2Prop.BalanceAt(ctx, transactor.Account.L1Opts.From, nil) + endl2SeqBalance, err := l2Seq.BalanceAt(ctx, transactor.Account.L1Opts.From, nil) cancel() require.NoError(t, err) - // Obtain the L2 proposer account nonce + // Obtain the L2 sequencer account nonce ctx, cancel = context.WithTimeout(context.Background(), txTimeoutDuration) - endL2PropNonce, err := l2Prop.NonceAt(ctx, transactor.Account.L1Opts.From, nil) + endl2SeqNonce, err := l2Seq.NonceAt(ctx, transactor.Account.L1Opts.From, nil) cancel() require.NoError(t, err) @@ -722,8 +722,8 @@ func TestMixedWithdrawalValidity(t *testing.T) { _ = endL1Balance // require.Equal(t, transactor.ExpectedL1Balance, endL1Balance, "Unexpected L1 balance for transactor") require.Equal(t, transactor.ExpectedL1Nonce, endL1Nonce, "Unexpected L1 nonce for transactor") - require.Equal(t, transactor.ExpectedL2Nonce, endL2PropNonce, "Unexpected L2 proposer nonce for transactor") - require.Equal(t, transactor.ExpectedL2Balance, endL2PropBalance, "Unexpected L2 proposer balance for transactor") + require.Equal(t, transactor.ExpectedL2Nonce, endl2SeqNonce, "Unexpected L2 sequencer nonce for transactor") + require.Equal(t, transactor.ExpectedL2Balance, endl2SeqBalance, "Unexpected L2 sequencer balance for transactor") require.Equal(t, transactor.ExpectedL2Nonce, endL2SyncNonce, "Unexpected L2 syncer nonce for transactor") require.Equal(t, transactor.ExpectedL2Balance, endL2SyncBalance, "Unexpected L2 syncer balance for transactor") }) diff --git a/ops-devnet/docker-compose.yml b/ops-devnet/docker-compose.yml index 6d1a1a845..6397e1af1 100644 --- a/ops-devnet/docker-compose.yml +++ b/ops-devnet/docker-compose.yml @@ -64,8 +64,8 @@ services: NODE_RPC_PORT: 8545 NODE_RPC_ENABLE_ADMIN: true NODE_SNAPSHOT_LOG: /kroma_log/snapshot.log - NODE_PROPOSER_ENABLED: true - NODE_PROPOSER_L1_CONFS: 0 + NODE_SEQUENCER_ENABLED: true + NODE_SEQUENCER_L1_CONFS: 0 NODE_SYNCER_L1_CONFS: 0 NODE_P2P_LISTEN_IP: 0.0.0.0 NODE_P2P_LISTEN_TCP_PORT: 9003 @@ -73,7 +73,7 @@ services: NODE_P2P_PEER_SCORING: light NODE_P2P_PEER_BANNING: true NODE_P2P_PRIV_PATH: /config/p2p-node-key.txt - NODE_P2P_PROPOSER_KEY: 8b3a350cf5c34c9194ca85829a2df0ec3153be0318b5e2d3348e872092edffba + NODE_P2P_SEQUENCER_KEY: 8b3a350cf5c34c9194ca85829a2df0ec3153be0318b5e2d3348e872092edffba NODE_METRICS_ENABLED: true NODE_METRICS_ADDR: 0.0.0.0 NODE_METRICS_PORT: 7300 @@ -84,7 +84,7 @@ services: - "7300:7300" - "6060:6060" volumes: - - "${PWD}/p2p-proposer-key.txt:/config/p2p-proposer-key.txt" + - "${PWD}/p2p-sequencer-key.txt:/config/p2p-sequencer-key.txt" - "${PWD}/p2p-node-key.txt:/config/p2p-node-key.txt" - "${PWD}/test-jwt-secret.txt:/config/test-jwt-secret.txt" - "${PWD}/../.devnet/rollup.json:/rollup.json" diff --git a/ops-devnet/p2p-proposer-key.txt b/ops-devnet/p2p-sequencer-key.txt similarity index 100% rename from ops-devnet/p2p-proposer-key.txt rename to ops-devnet/p2p-sequencer-key.txt diff --git a/packages/contracts/.gas-snapshot b/packages/contracts/.gas-snapshot index a6d99a64b..435a1766c 100644 --- a/packages/contracts/.gas-snapshot +++ b/packages/contracts/.gas-snapshot @@ -62,12 +62,12 @@ FeeVault_Test:test_minWithdrawalAmount_succeeds() (gas: 10737) GasBenchMark_KromaPortal:test_depositTransaction_benchmark() (gas: 75076) GasBenchMark_KromaPortal:test_depositTransaction_benchmark_1() (gas: 75709) GasBenchMark_KromaPortal:test_proveWithdrawalTransaction_benchmark() (gas: 263545) -GasBenchMark_L1CrossDomainMessenger:test_sendMessage_benchmark_0() (gas: 342509) -GasBenchMark_L1CrossDomainMessenger:test_sendMessage_benchmark_1() (gas: 2940760) -GasBenchMark_L1StandardBridge_Deposit:test_depositERC20_benchmark_0() (gas: 518768) -GasBenchMark_L1StandardBridge_Deposit:test_depositERC20_benchmark_1() (gas: 4030961) -GasBenchMark_L1StandardBridge_Deposit:test_depositETH_benchmark_0() (gas: 421561) -GasBenchMark_L1StandardBridge_Deposit:test_depositETH_benchmark_1() (gas: 3467460) +GasBenchMark_L1CrossDomainMessenger:test_sendMessage_benchmark_0() (gas: 342487) +GasBenchMark_L1CrossDomainMessenger:test_sendMessage_benchmark_1() (gas: 2940738) +GasBenchMark_L1StandardBridge_Deposit:test_depositERC20_benchmark_0() (gas: 518746) +GasBenchMark_L1StandardBridge_Deposit:test_depositERC20_benchmark_1() (gas: 4030939) +GasBenchMark_L1StandardBridge_Deposit:test_depositETH_benchmark_0() (gas: 421539) +GasBenchMark_L1StandardBridge_Deposit:test_depositETH_benchmark_1() (gas: 3467438) GasBenchMark_L1StandardBridge_Finalize:test_finalizeBridgeETH_benchmark() (gas: 32274) GasBenchMark_L2OutputOracle:test_submitL2Output_benchmark() (gas: 155604) GasPriceOracle_Test:test_baseFee_succeeds() (gas: 8325) diff --git a/packages/contracts/.storage-layout b/packages/contracts/.storage-layout index d149097d3..1cedb25c6 100644 --- a/packages/contracts/.storage-layout +++ b/packages/contracts/.storage-layout @@ -190,12 +190,12 @@ | totalProcessed | uint256 | 0 | 0 | 32 | contracts/L2/ProtocolVault.sol:ProtocolVault | ======================= -➡ contracts/L2/ProposerRewardVault.sol:ProposerRewardVault +➡ contracts/L2/L1FeeVault.sol:L1FeeVault ======================= -| Name | Type | Slot | Offset | Bytes | Contract | -|----------------|---------|------|--------|-------|----------------------------------------------------------| -| totalProcessed | uint256 | 0 | 0 | 32 | contracts/L2/ProposerRewardVault.sol:ProposerRewardVault | +| Name | Type | Slot | Offset | Bytes | Contract | +|----------------|---------|------|--------|-------|----------------------------------------| +| totalProcessed | uint256 | 0 | 0 | 32 | contracts/L2/L1FeeVault.sol:L1FeeVault | ======================= ➡ contracts/vendor/WETH9.sol:WETH9 diff --git a/packages/contracts/contracts/L2/ProposerRewardVault.sol b/packages/contracts/contracts/L2/L1FeeVault.sol similarity index 65% rename from packages/contracts/contracts/L2/ProposerRewardVault.sol rename to packages/contracts/contracts/L2/L1FeeVault.sol index 301309fa0..730afb28b 100644 --- a/packages/contracts/contracts/L2/ProposerRewardVault.sol +++ b/packages/contracts/contracts/L2/L1FeeVault.sol @@ -7,14 +7,14 @@ import { Semver } from "../universal/Semver.sol"; /** * @custom:proxied * @custom:predeploy 0x4200000000000000000000000000000000000007 - * @title ProposerRewardVault - * @notice The ProposerRewardVault accumulates the L1 portion of the transaction fees. + * @title L1FeeVault + * @notice The L1FeeVault accumulates the L1 portion of the transaction fees. */ -contract ProposerRewardVault is FeeVault, Semver { +contract L1FeeVault is FeeVault, Semver { /** - * @custom:semver 1.0.0 + * @custom:semver 1.0.1 * * @param _recipient Address that will receive the accumulated fees. */ - constructor(address _recipient) FeeVault(_recipient, 10 ether) Semver(1, 0, 0) {} + constructor(address _recipient) FeeVault(_recipient, 10 ether) Semver(1, 0, 1) {} } diff --git a/packages/contracts/contracts/libraries/Predeploys.sol b/packages/contracts/contracts/libraries/Predeploys.sol index 56468709a..44b5afe9f 100644 --- a/packages/contracts/contracts/libraries/Predeploys.sol +++ b/packages/contracts/contracts/libraries/Predeploys.sol @@ -39,9 +39,9 @@ library Predeploys { address internal constant PROTOCOL_VAULT = 0x4200000000000000000000000000000000000006; /** - * @notice Address of the ProposerRewardVault predeploy. + * @notice Address of the L1FeeVault predeploy. */ - address internal constant PROPOSER_REWARD_VAULT = 0x4200000000000000000000000000000000000007; + address internal constant L1_FEE_VAULT = 0x4200000000000000000000000000000000000007; /** * @notice Address of the ValidatorRewardVault predeploy. diff --git a/packages/contracts/contracts/test/FeeVault.t.sol b/packages/contracts/contracts/test/FeeVault.t.sol index 7c773892f..380c5b9cb 100644 --- a/packages/contracts/contracts/test/FeeVault.t.sol +++ b/packages/contracts/contracts/test/FeeVault.t.sol @@ -3,33 +3,33 @@ pragma solidity 0.8.15; import { Predeploys } from "../libraries/Predeploys.sol"; import { ProtocolVault } from "../L2/ProtocolVault.sol"; -import { ProposerRewardVault } from "../L2/ProposerRewardVault.sol"; +import { L1FeeVault } from "../L2/L1FeeVault.sol"; import { StandardBridge } from "../universal/StandardBridge.sol"; import { Bridge_Initializer } from "./CommonTest.t.sol"; // Test the implementations of the FeeVault contract FeeVault_Test is Bridge_Initializer { ProtocolVault protocolVault = ProtocolVault(payable(Predeploys.PROTOCOL_VAULT)); - ProposerRewardVault proposerRewardVault = ProposerRewardVault(payable(Predeploys.PROPOSER_REWARD_VAULT)); + L1FeeVault l1FeeVault = L1FeeVault(payable(Predeploys.L1_FEE_VAULT)); address constant recipient = address(0x10000); function setUp() public override { super.setUp(); vm.etch(Predeploys.PROTOCOL_VAULT, address(new ProtocolVault(recipient)).code); - vm.etch(Predeploys.PROPOSER_REWARD_VAULT, address(new ProposerRewardVault(recipient)).code); + vm.etch(Predeploys.L1_FEE_VAULT, address(new L1FeeVault(recipient)).code); vm.label(Predeploys.PROTOCOL_VAULT, "ProtocolVault"); - vm.label(Predeploys.PROPOSER_REWARD_VAULT, "ProposerRewardVault"); + vm.label(Predeploys.L1_FEE_VAULT, "L1FeeVault"); } function test_constructor_succeeds() external { assertEq(protocolVault.RECIPIENT(), recipient); - assertEq(proposerRewardVault.RECIPIENT(), recipient); + assertEq(l1FeeVault.RECIPIENT(), recipient); } function test_minWithdrawalAmount_succeeds() external { assertEq(protocolVault.MIN_WITHDRAWAL_AMOUNT(), 10 ether); - assertEq(proposerRewardVault.MIN_WITHDRAWAL_AMOUNT(), 10 ether); + assertEq(l1FeeVault.MIN_WITHDRAWAL_AMOUNT(), 10 ether); } } diff --git a/packages/contracts/contracts/test/invariants/L2OutputOracle.t.sol b/packages/contracts/contracts/test/invariants/L2OutputOracle.t.sol index b9d37de6f..7cf05f4a9 100644 --- a/packages/contracts/contracts/test/invariants/L2OutputOracle.t.sol +++ b/packages/contracts/contracts/test/invariants/L2OutputOracle.t.sol @@ -35,7 +35,7 @@ contract L2OutputOracle_MonotonicBlockNumIncrease_Invariant is L2OutputOracle_In function setUp() public override { super.setUp(); - // Create a proposer actor. + // Create a sequencer actor. actor = new L2OutputOracle_Validator(oracle, vm); // Set the target contract to the validator actor diff --git a/packages/contracts/deploy-config/devnetL1.json b/packages/contracts/deploy-config/devnetL1.json index 977423116..bef654a04 100644 --- a/packages/contracts/deploy-config/devnetL1.json +++ b/packages/contracts/deploy-config/devnetL1.json @@ -3,10 +3,10 @@ "l2ChainID": 901, "l2BlockTime": 2, "l1StartingBlockTag": "earliest", - "maxProposerDrift": 300, - "proposerWindowSize": 15, + "maxSequencerDrift": 300, + "sequencerWindowSize": 15, "channelTimeout": 40, - "p2pProposerAddress": "0x9965507D1a55bcC2695C58ba16FB37d819B0A4dc", + "p2pSequencerAddress": "0x9965507D1a55bcC2695C58ba16FB37d819B0A4dc", "batchInboxAddress": "0xff00000000000000000000000000000000000000", "batchSenderAddress": "0x3C44CdDdB6a900fa2b585dd299e03d12FA4293BC", "validatorPoolTrustedValidator": "0x70997970C51812dc3A010C7d01b50e0d17dc79C8", @@ -19,7 +19,7 @@ "l1BlockTime": 3, "cliqueSignerAddress": "0xca062b0fd91172d89bcd4bb084ac4e21972cc467", "protocolVaultRecipient": "0xBcd4042DE499D14e55001CcbB24a551F3b954096", - "proposerRewardVaultRecipient": "0x71bE63f3384f5fb98995898A86B02Fb2426c5788", + "l1FeeVaultRecipient": "0x71bE63f3384f5fb98995898A86B02Fb2426c5788", "proxyAdminOwner": "0xBcd4042DE499D14e55001CcbB24a551F3b954096", "controller": "0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266", "finalizationPeriodSeconds": 600, diff --git a/packages/contracts/deploy-config/mainnet.json b/packages/contracts/deploy-config/mainnet.json index 80cc986f8..03a6f8805 100644 --- a/packages/contracts/deploy-config/mainnet.json +++ b/packages/contracts/deploy-config/mainnet.json @@ -4,10 +4,10 @@ "l1ChainID": 1, "l2ChainID": 255, "l2BlockTime": 2, - "maxProposerDrift": 600, - "proposerWindowSize": 3600, + "maxSequencerDrift": 600, + "sequencerWindowSize": 3600, "channelTimeout": 300, - "p2pProposerAddress": "0x994C074BD9503e4f01CB834e2c409FA16b41E587", + "p2pSequencerAddress": "0x994C074BD9503e4f01CB834e2c409FA16b41E587", "batchInboxAddress": "0xfF00000000000000000000000000000000000255", "batchSenderAddress": "0x41b8cd6791de4d8f9e0eaf7861ac506822adce12", "validatorPoolTrustedValidator": "0x3aa00bb915A8e78b0523E4c365e3E70A19d329e6", @@ -15,7 +15,7 @@ "validatorPoolMaxUnbond": 10, "validatorPoolRoundDuration": 1800, "protocolVaultRecipient": "0xA03c13C6597a0716D1525b7fDaD2fD95ECb49081", - "proposerRewardVaultRecipient": "0xA03c13C6597a0716D1525b7fDaD2fD95ECb49081", + "l1FeeVaultRecipient": "0xA03c13C6597a0716D1525b7fDaD2fD95ECb49081", "l2OutputOracleSubmissionInterval": 1800, "l2OutputOracleStartingTimestamp": -1, "finalizationPeriodSeconds": 604800, diff --git a/packages/contracts/deploy-config/sepolia.json b/packages/contracts/deploy-config/sepolia.json index ef498200b..389fd4faf 100644 --- a/packages/contracts/deploy-config/sepolia.json +++ b/packages/contracts/deploy-config/sepolia.json @@ -4,10 +4,10 @@ "l1ChainID": 11155111, "l2ChainID": 2358, "l2BlockTime": 2, - "maxProposerDrift": 1200, - "proposerWindowSize": 3600, + "maxSequencerDrift": 1200, + "sequencerWindowSize": 3600, "channelTimeout": 120, - "p2pProposerAddress": "0x9847c8baa7369b4b1ea0ade43163ab1c58b09e43", + "p2pSequencerAddress": "0x9847c8baa7369b4b1ea0ade43163ab1c58b09e43", "batchInboxAddress": "0xfa79000000000000000000000000000000000001", "batchSenderAddress": "0xf15dc770221b99c98d4aaed568f2ab04b9d16e42", "validatorPoolTrustedValidator": "0xc2da150ecfaa2e275577203bb177dd4de4a2536e", @@ -15,7 +15,7 @@ "validatorPoolMaxUnbond": 10, "validatorPoolRoundDuration": 60, "protocolVaultRecipient": "0xb5d7c1921fdbdea614f264a75ca0be416cf63eb5", - "proposerRewardVaultRecipient": "0xb5d7c1921fdbdea614f264a75ca0be416cf63eb5", + "l1FeeVaultRecipient": "0xb5d7c1921fdbdea614f264a75ca0be416cf63eb5", "l2OutputOracleSubmissionInterval": 60, "l2OutputOracleStartingTimestamp": -1, "finalizationPeriodSeconds": 21600, diff --git a/packages/contracts/deploy/L1/012-SystemConfig.ts b/packages/contracts/deploy/L1/012-SystemConfig.ts index 56d4feafa..4311910f7 100644 --- a/packages/contracts/deploy/L1/012-SystemConfig.ts +++ b/packages/contracts/deploy/L1/012-SystemConfig.ts @@ -38,7 +38,7 @@ const deployFn: DeployFunction = async (hre) => { hre.deployConfig.gasPriceOracleScalar, batcherHash, hre.deployConfig.l2GenesisBlockGasLimit, - hre.deployConfig.p2pProposerAddress, + hre.deployConfig.p2pSequencerAddress, defaultResourceConfig, hre.deployConfig.validatorRewardScalar, ], @@ -49,7 +49,7 @@ const deployFn: DeployFunction = async (hre) => { hre.deployConfig.gasPriceOracleScalar, batcherHash, hre.deployConfig.l2GenesisBlockGasLimit, - hre.deployConfig.p2pProposerAddress, + hre.deployConfig.p2pSequencerAddress, defaultResourceConfig, hre.deployConfig.validatorRewardScalar, ], @@ -69,7 +69,7 @@ const deployFn: DeployFunction = async (hre) => { await assertContractVariable( contract, 'unsafeBlockSigner', - hre.deployConfig.p2pProposerAddress + hre.deployConfig.p2pSequencerAddress ) await assertContractVariable( contract, diff --git a/packages/contracts/deploy/L2/008-ProposerRewardVault.ts b/packages/contracts/deploy/L2/008-L1FeeVault.ts similarity index 58% rename from packages/contracts/deploy/L2/008-ProposerRewardVault.ts rename to packages/contracts/deploy/L2/008-L1FeeVault.ts index 14818d9ec..d94902c6f 100644 --- a/packages/contracts/deploy/L2/008-ProposerRewardVault.ts +++ b/packages/contracts/deploy/L2/008-L1FeeVault.ts @@ -9,24 +9,24 @@ const deployFn: DeployFunction = async (hre) => { const l1 = hre.network.companionNetworks['l1'] const deployConfig = hre.getDeployConfig(l1) - const proposerRewardVaultRecipient = deployConfig.proposerRewardVaultRecipient - if (proposerRewardVaultRecipient === ethers.constants.AddressZero) { - throw new Error('ProposerRewardVault RECIPIENT zero address') + const l1FeeVaultRecipient = deployConfig.l1FeeVaultRecipient + if (l1FeeVaultRecipient === ethers.constants.AddressZero) { + throw new Error('L1FeeVault RECIPIENT zero address') } - await deploy(hre, 'ProposerRewardVault', { - args: [proposerRewardVaultRecipient], + await deploy(hre, 'L1FeeVault', { + args: [l1FeeVaultRecipient], isProxyImpl: true, postDeployAction: async (contract) => { await assertContractVariable( contract, 'RECIPIENT', - ethers.utils.getAddress(proposerRewardVaultRecipient) + ethers.utils.getAddress(l1FeeVaultRecipient) ) }, }) } -deployFn.tags = ['ProposerRewardVault', 'l2'] +deployFn.tags = ['L1FeeVault', 'l2'] export default deployFn diff --git a/packages/contracts/deployments/kroma/ProposerRewardVault.json b/packages/contracts/deployments/kroma/L1FeeVault.json similarity index 100% rename from packages/contracts/deployments/kroma/ProposerRewardVault.json rename to packages/contracts/deployments/kroma/L1FeeVault.json diff --git a/packages/contracts/deployments/kroma/ProposerRewardVaultProxy.json b/packages/contracts/deployments/kroma/L1FeeVaultProxy.json similarity index 100% rename from packages/contracts/deployments/kroma/ProposerRewardVaultProxy.json rename to packages/contracts/deployments/kroma/L1FeeVaultProxy.json diff --git a/packages/contracts/deployments/kromaSepolia/ProposerRewardVault.json b/packages/contracts/deployments/kromaSepolia/L1FeeVault.json similarity index 100% rename from packages/contracts/deployments/kromaSepolia/ProposerRewardVault.json rename to packages/contracts/deployments/kromaSepolia/L1FeeVault.json diff --git a/packages/contracts/deployments/kromaSepolia/ProposerRewardVaultProxy.json b/packages/contracts/deployments/kromaSepolia/L1FeeVaultProxy.json similarity index 100% rename from packages/contracts/deployments/kromaSepolia/ProposerRewardVaultProxy.json rename to packages/contracts/deployments/kromaSepolia/L1FeeVaultProxy.json diff --git a/packages/contracts/scripts/FeeVaultWithdrawal.s.sol b/packages/contracts/scripts/FeeVaultWithdrawal.s.sol index bbfa02e21..0cec28832 100644 --- a/packages/contracts/scripts/FeeVaultWithdrawal.s.sol +++ b/packages/contracts/scripts/FeeVaultWithdrawal.s.sol @@ -31,7 +31,7 @@ contract FeeVaultWithdrawal is Script { address[] memory vaults = new address[](3); vaults[0] = Predeploys.VALIDATOR_REWARD_VAULT; vaults[1] = Predeploys.PROTOCOL_VAULT; - vaults[2] = Predeploys.PROPOSER_REWARD_VAULT; + vaults[2] = Predeploys.L1_FEE_VAULT; for (uint256 i; i < vaults.length; i++) { address vault = vaults[i]; diff --git a/packages/contracts/scripts/rename-deploy-scripts.ts b/packages/contracts/scripts/rename-deploy-scripts.ts index 4b7ca23b5..7874d1574 100644 --- a/packages/contracts/scripts/rename-deploy-scripts.ts +++ b/packages/contracts/scripts/rename-deploy-scripts.ts @@ -35,7 +35,7 @@ const L2_ORDERED_NAMES = [ 'GasPriceOracle', 'ValidatorRewardVault', 'ProtocolVault', - 'ProposerRewardVault', + 'L1FeeVault', 'KromaMintableERC20Factory', 'KromaMintableERC721Factory', ] diff --git a/packages/contracts/scripts/storage-snapshot.sh b/packages/contracts/scripts/storage-snapshot.sh index e58595d8a..3c6ae73b0 100755 --- a/packages/contracts/scripts/storage-snapshot.sh +++ b/packages/contracts/scripts/storage-snapshot.sh @@ -24,7 +24,7 @@ contracts=( contracts/L2/L2ToL1MessagePasser.sol:L2ToL1MessagePasser contracts/L2/ValidatorRewardVault.sol:ValidatorRewardVault contracts/L2/ProtocolVault.sol:ProtocolVault - contracts/L2/ProposerRewardVault.sol:ProposerRewardVault + contracts/L2/L1FeeVault.sol:L1FeeVault contracts/vendor/WETH9.sol:WETH9 contracts/universal/ProxyAdmin.sol:ProxyAdmin contracts/universal/Proxy.sol:Proxy diff --git a/packages/contracts/src/constants.ts b/packages/contracts/src/constants.ts index 30354d928..30f71ab1c 100644 --- a/packages/contracts/src/constants.ts +++ b/packages/contracts/src/constants.ts @@ -12,7 +12,7 @@ export const predeploys = { L2CrossDomainMessenger: '0x4200000000000000000000000000000000000004', GasPriceOracle: '0x4200000000000000000000000000000000000005', ProtocolVault: '0x4200000000000000000000000000000000000006', - ProposerRewardVault: '0x4200000000000000000000000000000000000007', + L1FeeVault: '0x4200000000000000000000000000000000000007', ValidatorRewardVault: '0x4200000000000000000000000000000000000008', L2StandardBridge: '0x4200000000000000000000000000000000000009', L2ERC721Bridge: '0x420000000000000000000000000000000000000A', diff --git a/packages/contracts/src/deploy-config.ts b/packages/contracts/src/deploy-config.ts index 8e6ede2ad..0141501f4 100644 --- a/packages/contracts/src/deploy-config.ts +++ b/packages/contracts/src/deploy-config.ts @@ -40,15 +40,15 @@ interface RequiredDeployConfig { l2BlockTime: number /** - * Proposer batches may not be more than maxProposerDrift seconds after the L1 timestamp of the - * end of the proposing window end. + * Sequencer batches may not be more than maxSequencerDrift seconds after the L1 timestamp of the + * end of the sequencing window end. */ - maxProposerDrift: number + maxSequencerDrift: number /** - * Number of L1 blocks per proposing window. + * Number of L1 blocks per sequencing window. */ - proposerWindowSize: number + sequencerWindowSize: number /** * Number of L1 blocks that a frame stays valid when included in L1. @@ -56,9 +56,9 @@ interface RequiredDeployConfig { channelTimeout: number /** - * Address of the key the proposer uses to sign blocks on the P2P layer. + * Address of the key the sequencer uses to sign blocks on the P2P layer. */ - p2pProposerAddress: string + p2pSequencerAddress: string /** * L1 address that batches are sent to. @@ -146,9 +146,9 @@ interface RequiredDeployConfig { protocolVaultRecipient: string /** - * L1 recipient of fees accumulated in the ProposerRewardVault. + * L1 recipient of fees accumulated in the L1FeeVault. */ - proposerRewardVaultRecipient: string + l1FeeVaultRecipient: string /** * Timeout seconds of bisection in the Colosseum. @@ -280,16 +280,16 @@ export const deployConfigSpec: { l2BlockTime: { type: 'number', }, - maxProposerDrift: { + maxSequencerDrift: { type: 'number', }, - proposerWindowSize: { + sequencerWindowSize: { type: 'number', }, channelTimeout: { type: 'number', }, - p2pProposerAddress: { + p2pSequencerAddress: { type: 'address', }, batchInboxAddress: { @@ -330,7 +330,7 @@ export const deployConfigSpec: { protocolVaultRecipient: { type: 'address', }, - proposerRewardVaultRecipient: { + l1FeeVaultRecipient: { type: 'address', }, cliqueSignerAddress: { diff --git a/packages/contracts/tasks/check-l2.ts b/packages/contracts/tasks/check-l2.ts index ef338e672..c97a02726 100644 --- a/packages/contracts/tasks/check-l2.ts +++ b/packages/contracts/tasks/check-l2.ts @@ -413,32 +413,32 @@ const check = { await checkProxy(hre, 'ProtocolVault', signer.provider) await assertProxy(hre, 'ProtocolVault', signer.provider) }, - // ProposerRewardVault + // L1FeeVault // - check version // - check MIN_WITHDRAWAL_AMOUNT // - check RECIPIENT - ProposerRewardVault: async ( + L1FeeVault: async ( hre: HardhatRuntimeEnvironment, signer: Signer ) => { - const ProposerRewardVault = await hre.ethers.getContractAt( - 'ProposerRewardVault', - predeploys.ProposerRewardVault, + const L1FeeVault = await hre.ethers.getContractAt( + 'L1FeeVault', + predeploys.L1FeeVault, signer ) - await assertSemver(ProposerRewardVault, 'ProposerRewardVault') + await assertSemver(L1FeeVault, 'L1FeeVault') const MIN_WITHDRAWAL_AMOUNT = - await ProposerRewardVault.MIN_WITHDRAWAL_AMOUNT() + await L1FeeVault.MIN_WITHDRAWAL_AMOUNT() console.log(` - MIN_WITHDRAWAL_AMOUNT: ${MIN_WITHDRAWAL_AMOUNT}`) - const RECIPIENT = await ProposerRewardVault.RECIPIENT() + const RECIPIENT = await L1FeeVault.RECIPIENT() assert(RECIPIENT !== hre.ethers.constants.AddressZero) yell(` - RECIPIENT: ${RECIPIENT}`) - await checkProxy(hre, 'ProposerRewardVault', signer.provider) - await assertProxy(hre, 'ProposerRewardVault', signer.provider) + await checkProxy(hre, 'L1FeeVault', signer.provider) + await assertProxy(hre, 'L1FeeVault', signer.provider) }, // L2ToL1MessagePasser // - check version diff --git a/packages/contracts/tasks/rekey.ts b/packages/contracts/tasks/rekey.ts index 0a1f7515b..2a363a6a6 100644 --- a/packages/contracts/tasks/rekey.ts +++ b/packages/contracts/tasks/rekey.ts @@ -12,7 +12,7 @@ task('rekey', 'Generates a new set of keys for a test network').setAction( 'proxyAdminOwner', 'protocolFundRecipient', 'kromaL1FeeRecipient', - 'p2pProposerAddress', + 'p2pSequencerAddress', 'batchSenderAddress', ] diff --git a/packages/contracts/tasks/watch.ts b/packages/contracts/tasks/watch.ts index cd24eddc1..1f8a00db8 100644 --- a/packages/contracts/tasks/watch.ts +++ b/packages/contracts/tasks/watch.ts @@ -52,12 +52,12 @@ task('watch', 'Watch a Kroma System') 'layer-two-number': kromaNodeConfig.genesis.l2.number, 'layer-two-time': kromaNodeConfig.genesis.l2_time, 'block-time': kromaNodeConfig.block_time, - 'max-proposer-drift': kromaNodeConfig.max_proposer_drift, - 'proposer-window-size': kromaNodeConfig.proposer_window_size, + 'max-sequencer-drift': kromaNodeConfig.max_sequencer_drift, + 'sequencer-window-size': kromaNodeConfig.sequencer_window_size, 'channel-timeout': kromaNodeConfig.channel_timeout, 'l1-chain-id': kromaNodeConfig.l1_chain_id, 'l2-chain-id': kromaNodeConfig.l2_chain_id, - 'p2p-proposer-address': kromaNodeConfig.p2p_proposer_address, + 'p2p-sequencer-address': kromaNodeConfig.p2p_sequencer_address, 'fee-recipient-address': kromaNodeConfig.fee_recipient_address, 'batch-inbox-address': kromaNodeConfig.batch_inbox_address, 'batch-sender-address': kromaNodeConfig.batch_sender_address, diff --git a/packages/core-utils/src/kroma/kroma-node.ts b/packages/core-utils/src/kroma/kroma-node.ts index b3651a280..adc38b5a6 100644 --- a/packages/core-utils/src/kroma/kroma-node.ts +++ b/packages/core-utils/src/kroma/kroma-node.ts @@ -11,12 +11,12 @@ export interface KromaNodeConfig { l2_time: number } block_time: number - max_proposer_drift: number - proposer_window_size: number + max_sequencer_drift: number + sequencer_window_size: number channel_timeout: number l1_chain_id: number l2_chain_id: number - p2p_proposer_address: string + p2p_sequencer_address: string batch_inbox_address: string batch_sender_address: string deposit_contract_address: string diff --git a/utils/chain-ops/genesis/check.go b/utils/chain-ops/genesis/check.go index 5a13631e6..a71091c0d 100644 --- a/utils/chain-ops/genesis/check.go +++ b/utils/chain-ops/genesis/check.go @@ -41,8 +41,8 @@ var ( AdminSlot: common.HexToHash("0x0000000000000000000000004200000000000000000000000000000000000018"), ImplementationSlot: common.HexToHash("0x000000000000000000000000c0d3c0d3c0d3c0d3c0d3c0d3c0d3c0d3c0d30018"), }, - predeploys.ProtocolVaultAddr: eip1967Slots(predeploys.ProtocolVaultAddr), - predeploys.ProposerRewardVaultAddr: eip1967Slots(predeploys.ProposerRewardVaultAddr), + predeploys.ProtocolVaultAddr: eip1967Slots(predeploys.ProtocolVaultAddr), + predeploys.L1FeeVaultAddr: eip1967Slots(predeploys.L1FeeVaultAddr), } ) diff --git a/utils/chain-ops/genesis/config.go b/utils/chain-ops/genesis/config.go index d8d10a02a..145238056 100644 --- a/utils/chain-ops/genesis/config.go +++ b/utils/chain-ops/genesis/config.go @@ -36,10 +36,10 @@ type DeployConfig struct { L2BlockTime uint64 `json:"l2BlockTime"` FinalizationPeriodSeconds uint64 `json:"finalizationPeriodSeconds"` - MaxProposerDrift uint64 `json:"maxProposerDrift"` - ProposerWindowSize uint64 `json:"proposerWindowSize"` + MaxSequencerDrift uint64 `json:"maxSequencerDrift"` + SequencerWindowSize uint64 `json:"sequencerWindowSize"` ChannelTimeout uint64 `json:"channelTimeout"` - P2PProposerAddress common.Address `json:"p2pProposerAddress"` + P2PSequencerAddress common.Address `json:"p2pSequencerAddress"` BatchInboxAddress common.Address `json:"batchInboxAddress"` BatchSenderAddress common.Address `json:"batchSenderAddress"` @@ -105,8 +105,8 @@ type DeployConfig struct { ProxyAdminOwner common.Address `json:"proxyAdminOwner"` // L1 recipient of fees accumulated in the ProtocolVault ProtocolVaultRecipient common.Address `json:"protocolVaultRecipient"` - // L1 recipient of fees accumulated in the ProposerRewardVault - ProposerRewardVaultRecipient common.Address `json:"proposerRewardVaultRecipient"` + // L1 recipient of fees accumulated in the L1FeeVaultRecipient + L1FeeVaultRecipient common.Address `json:"l1FeeVaultRecipient"` // L1StandardBridge proxy address on L1 L1StandardBridgeProxy common.Address `json:"l1StandardBridgeProxy"` // L1CrossDomainMessenger proxy address on L1 @@ -151,17 +151,17 @@ func (d *DeployConfig) Check() error { if d.FinalizationPeriodSeconds == 0 { return fmt.Errorf("%w: FinalizationPeriodSeconds cannot be 0", ErrInvalidDeployConfig) } - if d.MaxProposerDrift == 0 { - return fmt.Errorf("%w: MaxProposerDrift cannot be 0", ErrInvalidDeployConfig) + if d.MaxSequencerDrift == 0 { + return fmt.Errorf("%w: MaxSequencerDrift cannot be 0", ErrInvalidDeployConfig) } - if d.ProposerWindowSize == 0 { - return fmt.Errorf("%w: ProposerWindowSize cannot be 0", ErrInvalidDeployConfig) + if d.SequencerWindowSize == 0 { + return fmt.Errorf("%w: SequencerWindowSize cannot be 0", ErrInvalidDeployConfig) } if d.ChannelTimeout == 0 { return fmt.Errorf("%w: ChannelTimeout cannot be 0", ErrInvalidDeployConfig) } - if d.P2PProposerAddress == (common.Address{}) { - return fmt.Errorf("%w: P2PProposerAddress cannot be address(0)", ErrInvalidDeployConfig) + if d.P2PSequencerAddress == (common.Address{}) { + return fmt.Errorf("%w: P2PSequencerAddress cannot be address(0)", ErrInvalidDeployConfig) } if d.BatchInboxAddress == (common.Address{}) { return fmt.Errorf("%w: BatchInboxAddress cannot be address(0)", ErrInvalidDeployConfig) @@ -196,8 +196,8 @@ func (d *DeployConfig) Check() error { if d.ProtocolVaultRecipient == (common.Address{}) { return fmt.Errorf("%w: ProtocolVaultRecipient cannot be address(0)", ErrInvalidDeployConfig) } - if d.ProposerRewardVaultRecipient == (common.Address{}) { - return fmt.Errorf("%w: ProposerRewardVaultRecipient cannot be address(0)", ErrInvalidDeployConfig) + if d.L1FeeVaultRecipient == (common.Address{}) { + return fmt.Errorf("%w: L1FeeVaultRecipient cannot be address(0)", ErrInvalidDeployConfig) } if d.GasPriceOracleOverhead == 0 { log.Warn("GasPriceOracleOverhead is 0") @@ -374,8 +374,8 @@ func (d *DeployConfig) RollupConfig(l1StartBlock *types.Block, l2GenesisBlockHas }, }, BlockTime: d.L2BlockTime, - MaxProposerDrift: d.MaxProposerDrift, - ProposerWindowSize: d.ProposerWindowSize, + MaxSequencerDrift: d.MaxSequencerDrift, + SeqWindowSize: d.SequencerWindowSize, ChannelTimeout: d.ChannelTimeout, L1ChainID: new(big.Int).SetUint64(d.L1ChainID), L2ChainID: new(big.Int).SetUint64(d.L2ChainID), @@ -425,8 +425,8 @@ func NewL2ImmutableConfig(config *DeployConfig, block *types.Block) (immutables. if config.ProtocolVaultRecipient == (common.Address{}) { return immutable, fmt.Errorf("ProtocolVaultRecipient cannot be address(0): %w", ErrInvalidImmutablesConfig) } - if config.ProposerRewardVaultRecipient == (common.Address{}) { - return immutable, fmt.Errorf("ProposerRewardVaultRecipient cannot be address(0): %w", ErrInvalidImmutablesConfig) + if config.L1FeeVaultRecipient == (common.Address{}) { + return immutable, fmt.Errorf("L1FeeVaultRecipient cannot be address(0): %w", ErrInvalidImmutablesConfig) } immutable["L2StandardBridge"] = immutables.ImmutableValues{ @@ -448,8 +448,8 @@ func NewL2ImmutableConfig(config *DeployConfig, block *types.Block) (immutables. "validatorPoolAddress": config.ValidatorPoolProxy, "rewardDivider": new(big.Int).SetUint64(rewardDivider), } - immutable["ProposerRewardVault"] = immutables.ImmutableValues{ - "recipient": config.ProposerRewardVaultRecipient, + immutable["L1FeeVault"] = immutables.ImmutableValues{ + "recipient": config.L1FeeVaultRecipient, } immutable["ProtocolVault"] = immutables.ImmutableValues{ "recipient": config.ProtocolVaultRecipient, diff --git a/utils/chain-ops/genesis/layer_one.go b/utils/chain-ops/genesis/layer_one.go index fc7fcf213..495cc25b6 100644 --- a/utils/chain-ops/genesis/layer_one.go +++ b/utils/chain-ops/genesis/layer_one.go @@ -137,7 +137,7 @@ func BuildL1DeveloperGenesis(config *DeployConfig) (*core.Genesis, error) { uint642Big(config.GasPriceOracleScalar), config.BatchSenderAddress.Hash(), gasLimit, - config.P2PProposerAddress, + config.P2PSequencerAddress, defaultResourceConfig, uint642Big(config.ValidatorRewardScalar), ) @@ -458,7 +458,7 @@ func deployL1Contracts(config *DeployConfig, backend *backends.SimulatedBackend) uint642Big(config.GasPriceOracleScalar), config.BatchSenderAddress.Hash(), // left-padded 32 bytes value, version is zero anyway gasLimit, - config.P2PProposerAddress, + config.P2PSequencerAddress, defaultResourceConfig, uint642Big(config.ValidatorRewardScalar), }, diff --git a/utils/chain-ops/genesis/layer_one_test.go b/utils/chain-ops/genesis/layer_one_test.go index 322b1fac6..b463f6746 100644 --- a/utils/chain-ops/genesis/layer_one_test.go +++ b/utils/chain-ops/genesis/layer_one_test.go @@ -123,7 +123,7 @@ func TestBuildL1DeveloperGenesis(t *testing.T) { require.Equal(t, gasLimit, uint64(config.L2GenesisBlockGasLimit)) unsafeBlockSigner, err := sysCfg.UnsafeBlockSigner(&bind.CallOpts{}) require.NoError(t, err) - require.Equal(t, unsafeBlockSigner, config.P2PProposerAddress) + require.Equal(t, unsafeBlockSigner, config.P2PSequencerAddress) validatorRewardScalar, err := sysCfg.ValidatorRewardScalar(&bind.CallOpts{}) require.NoError(t, err) require.Equal(t, validatorRewardScalar.Uint64(), config.ValidatorRewardScalar) diff --git a/utils/chain-ops/genesis/testdata/test-deploy-config-devnet-l1.json b/utils/chain-ops/genesis/testdata/test-deploy-config-devnet-l1.json index e714c1e88..cb230d852 100644 --- a/utils/chain-ops/genesis/testdata/test-deploy-config-devnet-l1.json +++ b/utils/chain-ops/genesis/testdata/test-deploy-config-devnet-l1.json @@ -4,10 +4,10 @@ "l2ChainID": 901, "l2BlockTime": 2, - "maxProposerDrift": 100, - "proposerWindowSize": 4, + "maxSequencerDrift": 100, + "sequencerWindowSize": 4, "channelTimeout": 40, - "p2pProposerAddress": "0x9965507D1a55bcC2695C58ba16FB37d819B0A4dc", + "p2pSequencerAddress": "0x9965507D1a55bcC2695C58ba16FB37d819B0A4dc", "batchInboxAddress": "0xff00000000000000000000000000000000000000", "batchSenderAddress": "0x3C44CdDdB6a900fa2b585dd299e03d12FA4293BC", @@ -23,7 +23,7 @@ "cliqueSignerAddress": "0xca062b0fd91172d89bcd4bb084ac4e21972cc467", "protocolVaultRecipient": "0xBcd4042DE499D14e55001CcbB24a551F3b954096", - "proposerRewardVaultRecipient": "0x71bE63f3384f5fb98995898A86B02Fb2426c5788", + "l1FeeVaultRecipient": "0x71bE63f3384f5fb98995898A86B02Fb2426c5788", "l1ERC721BridgeProxy": "0xff000000000000000000000000000000000000ff", "l1StandardBridgeProxy": "0xff000000000000000000000000000000000000fd", "l1CrossDomainMessengerProxy": "0xff000000000000000000000000000000000000dd", diff --git a/utils/chain-ops/genesis/testdata/test-deploy-config-full.json b/utils/chain-ops/genesis/testdata/test-deploy-config-full.json index 71660f807..2019be229 100644 --- a/utils/chain-ops/genesis/testdata/test-deploy-config-full.json +++ b/utils/chain-ops/genesis/testdata/test-deploy-config-full.json @@ -3,10 +3,10 @@ "l1ChainID": 900, "l2ChainID": 901, "l2BlockTime": 2, - "maxProposerDrift": 20, - "proposerWindowSize": 100, + "maxSequencerDrift": 20, + "sequencerWindowSize": 100, "channelTimeout": 30, - "p2pProposerAddress": "0x0000000000000000000000000000000000000000", + "p2pSequencerAddress": "0x0000000000000000000000000000000000000000", "batchInboxAddress": "0x42000000000000000000000000000000000000ff", "batchSenderAddress": "0x0000000000000000000000000000000000000000", "validatorPoolTrustedValidator": "0x7770000000000000000000000000000000000001", @@ -37,7 +37,7 @@ "l2GenesisBlockParentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", "l2GenesisBlockBaseFeePerGas": "0x3b9aca00", "protocolVaultRecipient": "0x42000000000000000000000000000000000000f5", - "proposerRewardVaultRecipient": "0x42000000000000000000000000000000000000f6", + "l1FeeVaultRecipient": "0x42000000000000000000000000000000000000f6", "l1StandardBridgeProxy": "0x42000000000000000000000000000000000000f8", "l1CrossDomainMessengerProxy": "0x42000000000000000000000000000000000000f9", "l1ERC721BridgeProxy": "0x4200000000000000000000000000000000000060", diff --git a/utils/chain-ops/immutables/immutables.go b/utils/chain-ops/immutables/immutables.go index 3d2d6adb4..2276c2f26 100644 --- a/utils/chain-ops/immutables/immutables.go +++ b/utils/chain-ops/immutables/immutables.go @@ -49,8 +49,8 @@ func (i ImmutableConfig) Check() error { if _, ok := i["ValidatorRewardVault"]["validatorPoolAddress"]; !ok { return errors.New("ValidatorRewardVault validatorPoolAddress not set") } - if _, ok := i["ProposerRewardVault"]["recipient"]; !ok { - return errors.New("ProposerRewardVault recipient not set") + if _, ok := i["L1FeeVault"]["recipient"]; !ok { + return errors.New("L1FeeVault recipient not set") } if _, ok := i["ProtocolVault"]["recipient"]; !ok { return errors.New("ProtocolVault recipient not set") @@ -105,9 +105,9 @@ func BuildKroma(immutable ImmutableConfig, zktrie bool) (DeploymentResults, erro }, }, { - Name: "ProposerRewardVault", + Name: "L1FeeVault", Args: []interface{}{ - immutable["ProposerRewardVault"]["recipient"], + immutable["L1FeeVault"]["recipient"], }, }, { @@ -186,12 +186,12 @@ func l2Deployer(backend *backends.SimulatedBackend, opts *bind.TransactOpts, dep return nil, fmt.Errorf("invalid type for recipient") } _, tx, _, err = bindings.DeployProtocolVault(opts, backend, recipient) - case "ProposerRewardVault": + case "L1FeeVault": recipient, ok := deployment.Args[0].(common.Address) if !ok { return nil, fmt.Errorf("invalid type for recipient") } - _, tx, _, err = bindings.DeployProposerRewardVault(opts, backend, recipient) + _, tx, _, err = bindings.DeployL1FeeVault(opts, backend, recipient) case "KromaMintableERC20Factory": _, tx, _, err = bindings.DeployKromaMintableERC20Factory(opts, backend, predeploys.L2StandardBridgeAddr) case "L2ERC721Bridge": diff --git a/utils/chain-ops/immutables/immutables_test.go b/utils/chain-ops/immutables/immutables_test.go index 710e87a76..0c11a5008 100644 --- a/utils/chain-ops/immutables/immutables_test.go +++ b/utils/chain-ops/immutables/immutables_test.go @@ -30,7 +30,7 @@ func TestBuildKroma(t *testing.T) { "validatorPoolAddress": common.HexToAddress("0x1234567890123456789012345678901234567890"), "rewardDivider": new(big.Int).SetUint64(24 * 7), }, - "ProposerRewardVault": { + "L1FeeVault": { "recipient": common.HexToAddress("0x1234567890123456789012345678901234567890"), }, "ProtocolVault": { @@ -48,7 +48,7 @@ func TestBuildKroma(t *testing.T) { "L2ToL1MessagePasser": true, "ValidatorRewardVault": true, "ProtocolVault": true, - "ProposerRewardVault": true, + "L1FeeVault": true, "KromaMintableERC20Factory": true, "L2ERC721Bridge": true, "KromaMintableERC721Factory": true,