diff --git a/.changeset/nervous-icons-retire.md b/.changeset/nervous-icons-retire.md new file mode 100644 index 000000000000..a909a33050c8 --- /dev/null +++ b/.changeset/nervous-icons-retire.md @@ -0,0 +1,5 @@ +--- +'@eth-optimism/fault-detector': minor +--- + +Remove pre-bedrock support from fault detector. diff --git a/packages/fault-detector/README.md b/packages/fault-detector/README.md index 6d8fd29d4b23..cac515ffee83 100644 --- a/packages/fault-detector/README.md +++ b/packages/fault-detector/README.md @@ -17,10 +17,7 @@ yarn build ## Running the service Copy `.env.example` into a new file named `.env`, then set the environment variables listed there. Additional env setting are listed on `--help`. If running the fault detector against -a custom op chain, the necessary contract addresses must also be set associated with the op-chain. - -- Bedrock: `OptimismPortal` -- Legacy: `StateCommitmentChain` +a custom op chain, the `OptimismPortal` contract addresses must also be set associated with the op-chain. Once your environment variables or flags have been set, run the service via: @@ -40,13 +37,11 @@ yarn start The `fault-detector` detects differences between the transaction results generated by your local Optimism node and the transaction results actually published to Ethereum. Currently, transaction results take the form of [the root of the Optimism state trie](https://medium.com/@eiki1212/ethereum-state-trie-architecture-explained-a30237009d4e). -- Post bedrock upgrade, the state root of the block is published to the [`L2OutputOracle`](https://github.com/ethereum-optimism/optimism/blob/39b7262cc3ffd78cd314341b8512b2683c1d9af7/packages/contracts-bedrock/contracts/L1/L2OutputOracle.sol) contract on Ethereum. +The state root of the block is published to the [`L2OutputOracle`](https://github.com/ethereum-optimism/optimism/blob/39b7262cc3ffd78cd314341b8512b2683c1d9af7/packages/contracts-bedrock/contracts/L1/L2OutputOracle.sol) contract on Ethereum. - ***Note***: The service accepts the `OptimismPortal` as a flag instead of the `L2OutputOracle` for backwards compatibility with early versions of these contracts. The `L2OutputOracle` is inferred from the portal contract. -- For pre-bedrock chains, the state root of the block is published to the [`StateCommitmentChain`](https://github.com/ethereum-optimism/optimism/blob/39b7262cc3ffd78cd314341b8512b2683c1d9af7/packages/contracts/contracts/L1/rollup/StateCommitmentChain.sol) contract on Ethereum. We can therefore detect differences by, for each block, checking the state root of the given block as reported by an Optimism node and the state root as published to Ethereum. -In order for the fault detector to differentiate between bedrock and legacy chains, please make sure to specify `--bedrock`. We export a series of Prometheus metrics that you can use to trigger alerting when issues are detected. Check the list of available metrics via `yarn start --help`: @@ -62,9 +57,7 @@ Options: --l2rpcprovider Provider for interacting with L2 (env: FAULT_DETECTOR__L2_RPC_PROVIDER) --startbatchindex Batch index to start checking from. Setting it to -1 will cause the fault detector to find the first state batch index that has not yet passed the fault proof window (env: FAULT_DETECTOR__START_BATCH_INDEX, default value: -1) --loopintervalms Loop interval in milliseconds (env: FAULT_DETECTOR__LOOP_INTERVAL_MS) - --bedrock Whether or not the service is running against a Bedrock chain (env: FAULT_DETECTOR__BEDROCK, default value: false) - --optimismportaladdress [Custom Bedrock Chains] Deployed OptimismPortal contract address. Used to retrieve necessary info for ouput verification (env: FAULT_DETECTOR__OPTIMISM_PORTAL_ADDRESS, default 0x0) - --statecommitmentchainaddress [Custom Legacy Chains] Deployed StateCommitmentChain contract address. Used to fetch necessary info for output verification. (env: FAULT_DETECTOR__STATE_COMMITMENT_CHAIN_ADDRESS, default 0x0) + --optimismportaladdress [Custom OP Chains] Deployed OptimismPortal contract address. Used to retrieve necessary info for ouput verification (env: FAULT_DETECTOR__OPTIMISM_PORTAL_ADDRESS, default 0x0) --port Port for the app server (env: FAULT_DETECTOR__PORT) --hostname Hostname for the app server (env: FAULT_DETECTOR__HOSTNAME) diff --git a/packages/fault-detector/package.json b/packages/fault-detector/package.json index d3a73aa24c9d..bc3140254acd 100644 --- a/packages/fault-detector/package.json +++ b/packages/fault-detector/package.json @@ -51,7 +51,7 @@ }, "dependencies": { "@eth-optimism/common-ts": "^0.8.2", - "@eth-optimism/contracts": "^0.6.0", + "@eth-optimism/contracts-bedrock": "^0.14.0", "@eth-optimism/core-utils": "^0.12.1", "@eth-optimism/sdk": "^3.0.0", "@ethersproject/abstract-provider": "^5.7.0" diff --git a/packages/fault-detector/src/helpers.ts b/packages/fault-detector/src/helpers.ts index d13e148de9a9..65181b132c27 100644 --- a/packages/fault-detector/src/helpers.ts +++ b/packages/fault-detector/src/helpers.ts @@ -1,13 +1,6 @@ -import { Contract, BigNumber } from 'ethers' +import { Contract } from 'ethers' import { Logger } from '@eth-optimism/common-ts' -export interface OutputOracle { - contract: Contract - filter: any - getTotalElements: () => Promise - getEventIndex: (args: TSubmissionEventArgs) => BigNumber -} - /** * Partial event interface, meant to reduce the size of the event cache to avoid * running out of memory. @@ -54,12 +47,12 @@ const getCache = ( * @param contract Contract to update cache for. * @param filter Event filter to use. */ -export const updateOracleCache = async ( - oracle: OutputOracle, +export const updateOracleCache = async ( + oracle: Contract, logger?: Logger ): Promise => { - const cache = getCache(oracle.contract.address) - const endBlock = await oracle.contract.provider.getBlockNumber() + const cache = getCache(oracle.address) + const endBlock = await oracle.provider.getBlockNumber() logger?.info('visiting uncached oracle events for range', { node: 'l1', cachedUntilBlock: cache.highestBlock, @@ -77,17 +70,15 @@ export const updateOracleCache = async ( blockRangeSize: step, }) - const events = await oracle.contract.queryFilter( - oracle.filter, + const events = await oracle.queryFilter( + oracle.filters.OutputProposed(), currentBlock, currentBlock + step ) // Throw the events into the cache. for (const event of events) { - cache.eventCache[ - oracle.getEventIndex(event.args as TSubmissionEventArgs).toNumber() - ] = { + cache.eventCache[event.args.l2OutputIndex.toNumber()] = { blockNumber: event.blockNumber, transactionHash: event.transactionHash, args: event.args, @@ -135,12 +126,12 @@ export const updateOracleCache = async ( * @param index State batch index to search for. * @returns Event corresponding to the batch. */ -export const findEventForStateBatch = async ( - oracle: OutputOracle, +export const findEventForStateBatch = async ( + oracle: Contract, index: number, logger?: Logger ): Promise => { - const cache = getCache(oracle.contract.address) + const cache = getCache(oracle.address) // Try to find the event in cache first. if (cache.eventCache[index]) { @@ -166,13 +157,13 @@ export const findEventForStateBatch = async ( * @param oracle Output oracle contract. * @returns Starting state root batch index. */ -export const findFirstUnfinalizedStateBatchIndex = async ( - oracle: OutputOracle, +export const findFirstUnfinalizedStateBatchIndex = async ( + oracle: Contract, fpw: number, logger?: Logger ): Promise => { - const latestBlock = await oracle.contract.provider.getBlock('latest') - const totalBatches = (await oracle.getTotalElements()).toNumber() + const latestBlock = await oracle.provider.getBlock('latest') + const totalBatches = (await oracle.nextOutputIndex()).toNumber() // Perform a binary search to find the next batch that will pass the challenge period. let lo = 0 @@ -180,7 +171,7 @@ export const findFirstUnfinalizedStateBatchIndex = async ( while (lo !== hi) { const mid = Math.floor((lo + hi) / 2) const event = await findEventForStateBatch(oracle, mid, logger) - const block = await oracle.contract.provider.getBlock(event.blockNumber) + const block = await oracle.provider.getBlock(event.blockNumber) if (block.timestamp + fpw < latestBlock.timestamp) { lo = mid + 1 diff --git a/packages/fault-detector/src/service.ts b/packages/fault-detector/src/service.ts index 0a7fcd322124..ae55e8de874e 100644 --- a/packages/fault-detector/src/service.ts +++ b/packages/fault-detector/src/service.ts @@ -16,7 +16,7 @@ import { OEL1ContractsLike, } from '@eth-optimism/sdk' import { Provider } from '@ethersproject/abstract-provider' -import { ethers, Transaction } from 'ethers' +import { Contract, ethers } from 'ethers' import dateformat from 'dateformat' import { version } from '../package.json' @@ -24,7 +24,6 @@ import { findFirstUnfinalizedStateBatchIndex, findEventForStateBatch, PartialEvent, - OutputOracle, updateOracleCache, } from './helpers' @@ -32,9 +31,7 @@ type Options = { l1RpcProvider: Provider l2RpcProvider: Provider startBatchIndex: number - bedrock: boolean optimismPortalAddress?: string - stateCommitmentChainAddress?: string } type Metrics = { @@ -44,8 +41,8 @@ type Metrics = { } type State = { - fpw: number - oo: OutputOracle + faultProofWindow: number + outputOracle: Contract messenger: CrossChainMessenger currentBatchIndex: number diverged: boolean @@ -73,25 +70,13 @@ export class FaultDetector extends BaseServiceV2 { startBatchIndex: { validator: validators.num, default: -1, - desc: 'Batch index to start checking from. For bedrock chains, this is the L2 height to start from', - public: true, - }, - bedrock: { - validator: validators.bool, - default: true, - desc: 'Whether or not the service is running against a Bedrock chain', + desc: 'The L2 height to start from', public: true, }, optimismPortalAddress: { validator: validators.str, default: ethers.constants.AddressZero, - desc: '[Custom Bedrock Chains] Deployed OptimismPortal contract address. Used to retrieve necessary info for ouput verification ', - public: true, - }, - stateCommitmentChainAddress: { - validator: validators.str, - default: ethers.constants.AddressZero, - desc: '[Custom Legacy Chains] Deployed StateCommitmentChain contract address. Used to fetch necessary info for output verification.', + desc: '[Custom OP Chains] Deployed OptimismPortal contract address. Used to retrieve necessary info for ouput verification ', public: true, }, }, @@ -119,10 +104,9 @@ export class FaultDetector extends BaseServiceV2 { * will fallback to the pre-defined set of addresses from options, otherwise aborting if unset. * * Required Contracts - * - Bedrock: OptimismPortal (used to also fetch L2OutputOracle address variable). This is the preferred address + * - OptimismPortal (used to also fetch L2OutputOracle address variable). This is the preferred address * since in early versions of bedrock, OptimismPortal holds the FINALIZATION_WINDOW variable instead of L2OutputOracle. * The retrieved L2OutputOracle address from OptimismPortal is used to query for output roots. - * - Legacy: StateCommitmentChain to query for output roots. * * @param l2ChainId op chain id * @returns OEL1ContractsLike set of L1 contracts with only the required addresses set @@ -130,19 +114,17 @@ export class FaultDetector extends BaseServiceV2 { async getOEL1Contracts(l2ChainId: number): Promise { // CrossChainMessenger requires all address to be defined. Default to `AddressZero` to ignore unused contracts let contracts: OEL1ContractsLike = { + OptimismPortal: ethers.constants.AddressZero, + L2OutputOracle: ethers.constants.AddressZero, + // Unused contracts AddressManager: ethers.constants.AddressZero, + BondManager: ethers.constants.AddressZero, + CanonicalTransactionChain: ethers.constants.AddressZero, L1CrossDomainMessenger: ethers.constants.AddressZero, L1StandardBridge: ethers.constants.AddressZero, StateCommitmentChain: ethers.constants.AddressZero, - CanonicalTransactionChain: ethers.constants.AddressZero, - BondManager: ethers.constants.AddressZero, - OptimismPortal: ethers.constants.AddressZero, - L2OutputOracle: ethers.constants.AddressZero, } - const chainType = this.options.bedrock ? 'bedrock' : 'legacy' - this.logger.info(`Setting contracts for OP chain type: ${chainType}`) - const knownChainId = L2ChainID[l2ChainId] !== undefined if (knownChainId) { this.logger.info(`Recognized L2 chain id ${L2ChainID[l2ChainId]}`) @@ -152,42 +134,29 @@ export class FaultDetector extends BaseServiceV2 { } this.logger.info('checking contract address options...') - if (this.options.bedrock) { - const address = this.options.optimismPortalAddress - if (!knownChainId && address === ethers.constants.AddressZero) { - this.logger.error('OptimismPortal contract unspecified') - throw new Error( - '--optimismportalcontractaddress needs to set for custom bedrock op chains' - ) - } - - if (address !== ethers.constants.AddressZero) { - this.logger.info('set OptimismPortal contract override') - contracts.OptimismPortal = address - - this.logger.info('fetching L2OutputOracle contract from OptimismPortal') - const opts = { address, signerOrProvider: this.options.l1RpcProvider } - const portalContract = getOEContract('OptimismPortal', l2ChainId, opts) - contracts.L2OutputOracle = await portalContract.L2_ORACLE() - } + const portalAddress = this.options.optimismPortalAddress + if (!knownChainId && portalAddress === ethers.constants.AddressZero) { + this.logger.error('OptimismPortal contract unspecified') + throw new Error( + '--optimismportalcontractaddress needs to set for custom op chains' + ) + } - // ... for a known chain ids without an override, the L2OutputOracle will already - // be set via the hardcoded default - } else { - const address = this.options.stateCommitmentChainAddress - if (!knownChainId && address === ethers.constants.AddressZero) { - this.logger.error('StateCommitmentChain contract unspecified') - throw new Error( - '--statecommitmentchainaddress needs to set for custom legacy op chains' - ) - } + if (portalAddress !== ethers.constants.AddressZero) { + this.logger.info('set OptimismPortal contract override') + contracts.OptimismPortal = portalAddress - if (address !== ethers.constants.AddressZero) { - this.logger.info('set StateCommitmentChain contract override') - contracts.StateCommitmentChain = address + this.logger.info('fetching L2OutputOracle contract from OptimismPortal') + const opts = { + portalAddress, + signerOrProvider: this.options.l1RpcProvider, } + const portalContract = getOEContract('OptimismPortal', l2ChainId, opts) + contracts.L2OutputOracle = await portalContract.L2_ORACLE() } + // ... for a known chain ids without an override, the L2OutputOracle will already + // be set via the hardcoded default return contracts } @@ -211,7 +180,7 @@ export class FaultDetector extends BaseServiceV2 { l2SignerOrProvider: this.options.l2RpcProvider, l1ChainId, l2ChainId, - bedrock: this.options.bedrock, + bedrock: true, contracts: { l1: await this.getOEL1Contracts(l2ChainId) }, }) @@ -219,46 +188,33 @@ export class FaultDetector extends BaseServiceV2 { this.state.diverged = false // We use this a lot, a bit cleaner to pull out to the top level of the state object. - this.state.fpw = await this.state.messenger.getChallengePeriodSeconds() - this.logger.info(`fault proof window is ${this.state.fpw} seconds`) - - if (this.options.bedrock) { - const oo = this.state.messenger.contracts.l1.L2OutputOracle - this.state.oo = { - contract: oo, - filter: oo.filters.OutputProposed(), - getTotalElements: async () => oo.nextOutputIndex(), - getEventIndex: (args) => args.l2OutputIndex, - } - } else { - const oo = this.state.messenger.contracts.l1.StateCommitmentChain - this.state.oo = { - contract: oo, - filter: oo.filters.StateBatchAppended(), - getTotalElements: async () => oo.getTotalBatches(), - getEventIndex: (args) => args._batchIndex, - } - } + this.state.faultProofWindow = + await this.state.messenger.getChallengePeriodSeconds() + this.logger.info( + `fault proof window is ${this.state.faultProofWindow} seconds` + ) + + this.state.outputOracle = this.state.messenger.contracts.l1.L2OutputOracle // Populate the event cache. this.logger.info('warming event cache, this might take a while...') - await updateOracleCache(this.state.oo, this.logger) + await updateOracleCache(this.state.outputOracle, this.logger) // Figure out where to start syncing from. if (this.options.startBatchIndex === -1) { this.logger.info('finding appropriate starting unfinalized batch') const firstUnfinalized = await findFirstUnfinalizedStateBatchIndex( - this.state.oo, - this.state.fpw, + this.state.outputOracle, + this.state.faultProofWindow, this.logger ) // We may not have an unfinalized batches in the case where no batches have been submitted - // for the entire duration of the FPW. We generally do not expect this to happen on mainnet, - // but it happens often on testnets because the FPW is very short. + // for the entire duration of the FAULTPROOFWINDOW. We generally do not expect this to happen on mainnet, + // but it happens often on testnets because the FAULTPROOFWINDOW is very short. if (firstUnfinalized === undefined) { this.logger.info('no unfinalized batches found. skipping all batches.') - const totalBatches = await this.state.oo.getTotalElements() + const totalBatches = await this.state.outputOracle.nextOutputIndex() this.state.currentBatchIndex = totalBatches.toNumber() - 1 } else { this.state.currentBatchIndex = firstUnfinalized @@ -288,17 +244,17 @@ export class FaultDetector extends BaseServiceV2 { let latestBatchIndex: number try { - const totalBatches = await this.state.oo.getTotalElements() + const totalBatches = await this.state.outputOracle.nextOutputIndex() latestBatchIndex = totalBatches.toNumber() - 1 } catch (err) { this.logger.error('failed to query total # of batches', { error: err, node: 'l1', - section: 'getTotalElements', + section: 'nextOutputIndex', }) this.metrics.nodeConnectionFailures.inc({ layer: 'l1', - section: 'getTotalElements', + section: 'nextOutputIndex', }) await sleep(15000) return @@ -322,7 +278,7 @@ export class FaultDetector extends BaseServiceV2 { let event: PartialEvent try { event = await findEventForStateBatch( - this.state.oo, + this.state.outputOracle, this.state.currentBatchIndex, this.logger ) @@ -358,179 +314,86 @@ export class FaultDetector extends BaseServiceV2 { return } - if (this.options.bedrock) { - const outputBlockNumber = event.args.l2BlockNumber.toNumber() - if (latestBlock < outputBlockNumber) { - this.logger.info('L2 node is behind, waiting for sync...', { - l2BlockHeight: latestBlock, - outputBlock: outputBlockNumber, - }) - return - } + const outputBlockNumber = event.args.l2BlockNumber.toNumber() + if (latestBlock < outputBlockNumber) { + this.logger.info('L2 node is behind, waiting for sync...', { + l2BlockHeight: latestBlock, + outputBlock: outputBlockNumber, + }) + return + } - let outputBlock: any - try { - outputBlock = await ( - this.options.l2RpcProvider as ethers.providers.JsonRpcProvider - ).send('eth_getBlockByNumber', [ - toRpcHexString(outputBlockNumber), - false, - ]) - } catch (err) { - this.logger.error('failed to fetch output block', { - error: err, - node: 'l2', - section: 'getBlock', - block: outputBlockNumber, - }) - this.metrics.nodeConnectionFailures.inc({ - layer: 'l2', - section: 'getBlock', - }) - await sleep(15000) - return - } + let outputBlock: any + try { + outputBlock = await ( + this.options.l2RpcProvider as ethers.providers.JsonRpcProvider + ).send('eth_getBlockByNumber', [toRpcHexString(outputBlockNumber), false]) + } catch (err) { + this.logger.error('failed to fetch output block', { + error: err, + node: 'l2', + section: 'getBlock', + block: outputBlockNumber, + }) + this.metrics.nodeConnectionFailures.inc({ + layer: 'l2', + section: 'getBlock', + }) + await sleep(15000) + return + } - let messagePasserProofResponse: any - try { - messagePasserProofResponse = await ( - this.options.l2RpcProvider as ethers.providers.JsonRpcProvider - ).send('eth_getProof', [ - this.state.messenger.contracts.l2.BedrockMessagePasser.address, - [], - toRpcHexString(outputBlockNumber), - ]) - } catch (err) { - this.logger.error('failed to fetch message passer proof', { - error: err, - node: 'l2', - section: 'getProof', - block: outputBlockNumber, - }) - this.metrics.nodeConnectionFailures.inc({ - layer: 'l2', - section: 'getProof', - }) - await sleep(15000) - return - } + let messagePasserProofResponse: any + try { + messagePasserProofResponse = await ( + this.options.l2RpcProvider as ethers.providers.JsonRpcProvider + ).send('eth_getProof', [ + this.state.messenger.contracts.l2.BedrockMessagePasser.address, + [], + toRpcHexString(outputBlockNumber), + ]) + } catch (err) { + this.logger.error('failed to fetch message passer proof', { + error: err, + node: 'l2', + section: 'getProof', + block: outputBlockNumber, + }) + this.metrics.nodeConnectionFailures.inc({ + layer: 'l2', + section: 'getProof', + }) + await sleep(15000) + return + } - const outputRoot = ethers.utils.solidityKeccak256( - ['uint256', 'bytes32', 'bytes32', 'bytes32'], - [ - 0, - outputBlock.stateRoot, - messagePasserProofResponse.storageHash, - outputBlock.hash, - ] - ) + const outputRoot = ethers.utils.solidityKeccak256( + ['uint256', 'bytes32', 'bytes32', 'bytes32'], + [ + 0, + outputBlock.stateRoot, + messagePasserProofResponse.storageHash, + outputBlock.hash, + ] + ) - if (outputRoot !== event.args.outputRoot) { - this.state.diverged = true - this.metrics.isCurrentlyMismatched.set(1) - this.logger.error('state root mismatch', { - blockNumber: outputBlock.number, - expectedStateRoot: event.args.outputRoot, - actualStateRoot: outputRoot, - finalizationTime: dateformat( - new Date( - (ethers.BigNumber.from(outputBlock.timestamp).toNumber() + - this.state.fpw) * - 1000 - ), - 'mmmm dS, yyyy, h:MM:ss TT' + if (outputRoot !== event.args.outputRoot) { + this.state.diverged = true + this.metrics.isCurrentlyMismatched.set(1) + this.logger.error('state root mismatch', { + blockNumber: outputBlock.number, + expectedStateRoot: event.args.outputRoot, + actualStateRoot: outputRoot, + finalizationTime: dateformat( + new Date( + (ethers.BigNumber.from(outputBlock.timestamp).toNumber() + + this.state.faultProofWindow) * + 1000 ), - }) - return - } - } else { - let batchTransaction: Transaction - try { - batchTransaction = await this.options.l1RpcProvider.getTransaction( - event.transactionHash - ) - } catch (err) { - this.logger.error('failed to acquire batch transaction', { - error: err, - node: 'l1', - section: 'getTransaction', - }) - this.metrics.nodeConnectionFailures.inc({ - layer: 'l1', - section: 'getTransaction', - }) - await sleep(15000) - return - } - - const [stateRoots] = this.state.oo.contract.interface.decodeFunctionData( - 'appendStateBatch', - batchTransaction.data - ) - - const batchStart = event.args._prevTotalElements.toNumber() + 1 - const batchSize = event.args._batchSize.toNumber() - const batchEnd = batchStart + batchSize - - if (latestBlock < batchEnd) { - this.logger.info('L2 node is behind. waiting for sync...', { - batchBlockStart: batchStart, - batchBlockEnd: batchEnd, - l2BlockHeight: latestBlock, - }) - return - } - - // `getBlockRange` has a limit of 1000 blocks, so we have to break this request out into - // multiple requests of maximum 1000 blocks in the case that batchSize > 1000. - let blocks: any[] = [] - for (let i = 0; i < batchSize; i += 1000) { - let newBlocks: any[] - try { - newBlocks = await ( - this.options.l2RpcProvider as ethers.providers.JsonRpcProvider - ).send('eth_getBlockRange', [ - toRpcHexString(batchStart + i), - toRpcHexString(batchStart + i + Math.min(batchSize - i, 1000) - 1), - false, - ]) - } catch (err) { - this.logger.error('failed to query for blocks in batch', { - error: err, - node: 'l2', - section: 'getBlockRange', - }) - this.metrics.nodeConnectionFailures.inc({ - layer: 'l2', - section: 'getBlockRange', - }) - await sleep(15000) - return - } - - blocks = blocks.concat(newBlocks) - } - - for (const [i, stateRoot] of stateRoots.entries()) { - if (blocks[i].stateRoot !== stateRoot) { - this.state.diverged = true - this.metrics.isCurrentlyMismatched.set(1) - this.logger.error('state root mismatch', { - blockNumber: blocks[i].number, - expectedStateRoot: blocks[i].stateRoot, - actualStateRoot: stateRoot, - finalizationTime: dateformat( - new Date( - (ethers.BigNumber.from(blocks[i].timestamp).toNumber() + - this.state.fpw) * - 1000 - ), - 'mmmm dS, yyyy, h:MM:ss TT' - ), - }) - return - } - } + 'mmmm dS, yyyy, h:MM:ss TT' + ), + }) + return } const elapsedMs = Date.now() - startMs diff --git a/packages/fault-detector/test/helpers.spec.ts b/packages/fault-detector/test/helpers.spec.ts index 2b8a13f27efc..2ad3ad48cf56 100644 --- a/packages/fault-detector/test/helpers.spec.ts +++ b/packages/fault-detector/test/helpers.spec.ts @@ -1,103 +1,76 @@ import hre from 'hardhat' -import { Contract } from 'ethers' +import { Contract, utils } from 'ethers' import { toRpcHexString } from '@eth-optimism/core-utils' -import { - getContractFactory, - getContractInterface, -} from '@eth-optimism/contracts' +import { getContractFactory } from '@eth-optimism/contracts-bedrock' import { SignerWithAddress } from '@nomiclabs/hardhat-ethers/signers' -import { smock, FakeContract } from '@defi-wonderland/smock' import { expect } from './setup' import { findEventForStateBatch, findFirstUnfinalizedStateBatchIndex, - OutputOracle, } from '../src' describe('helpers', () => { - // Can be any non-zero value, 1000 is fine. - const challengeWindowSeconds = 1000 + const deployConfig = { + l2OutputOracleSubmissionInterval: 6, + l2BlockTime: 2, + l2OutputOracleStartingBlockNumber: 0, + l2OutputOracleStartingTimestamp: 0, + l2OutputOracleProposer: '0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266', + l2OutputOracleChallenger: '0x6925B8704Ff96DEe942623d6FB5e946EF5884b63', + // Can be any non-zero value, 1000 is fine. + finalizationPeriodSeconds: 1000, + } let signer: SignerWithAddress before(async () => { ;[signer] = await hre.ethers.getSigners() }) - let FakeBondManager: FakeContract - let FakeCanonicalTransactionChain: FakeContract - let AddressManager: Contract - let ChainStorageContainer: Contract - let StateCommitmentChain: Contract - let oracle: OutputOracle + let L2OutputOracle: Contract beforeEach(async () => { - // Set up fakes - FakeBondManager = await smock.fake(getContractInterface('BondManager')) - FakeCanonicalTransactionChain = await smock.fake( - getContractInterface('CanonicalTransactionChain') - ) - - // Set up contracts - AddressManager = await getContractFactory( - 'Lib_AddressManager', - signer - ).deploy() - ChainStorageContainer = await getContractFactory( - 'ChainStorageContainer', - signer - ).deploy(AddressManager.address, 'StateCommitmentChain') - StateCommitmentChain = await getContractFactory( - 'StateCommitmentChain', - signer - ).deploy(AddressManager.address, challengeWindowSeconds, 10000000) - - // Set addresses in manager - await AddressManager.setAddress( - 'ChainStorageContainer-SCC-batches', - ChainStorageContainer.address - ) - await AddressManager.setAddress( - 'StateCommitmentChain', - StateCommitmentChain.address - ) - await AddressManager.setAddress( - 'CanonicalTransactionChain', - FakeCanonicalTransactionChain.address + L2OutputOracle = await getContractFactory('L2OutputOracle', signer).deploy( + deployConfig.l2OutputOracleSubmissionInterval, + deployConfig.l2BlockTime, + deployConfig.l2OutputOracleStartingBlockNumber, + deployConfig.l2OutputOracleStartingTimestamp, + deployConfig.l2OutputOracleProposer, + deployConfig.l2OutputOracleChallenger, + deployConfig.finalizationPeriodSeconds ) - await AddressManager.setAddress('BondManager', FakeBondManager.address) - - // Set up mock returns - FakeCanonicalTransactionChain.getTotalElements.returns(1000000000) // just needs to be large - FakeBondManager.isCollateralized.returns(true) - - oracle = { - contract: StateCommitmentChain, - filter: StateCommitmentChain.filters.StateBatchAppended(), - getTotalElements: async () => StateCommitmentChain.getTotalBatches(), - getEventIndex: (args: any) => args._batchIndex, - } }) describe('findEventForStateBatch', () => { describe('when the event exists once', () => { beforeEach(async () => { - await StateCommitmentChain.appendStateBatch( - [hre.ethers.constants.HashZero], - 0 + const latestBlock = await hre.ethers.provider.getBlock('latest') + const params = { + _outputRoot: utils.formatBytes32String('testhash'), + _l2BlockNumber: + deployConfig.l2OutputOracleStartingBlockNumber + + deployConfig.l2OutputOracleSubmissionInterval, + _l1BlockHash: latestBlock.hash, + _l1BlockNumber: latestBlock.number, + } + await L2OutputOracle.proposeL2Output( + params._outputRoot, + params._l2BlockNumber, + params._l1BlockHash, + params._l1BlockNumber ) }) it('should return the event', async () => { - const event = await findEventForStateBatch(oracle, 0) + const event = await findEventForStateBatch(L2OutputOracle, 0) - expect(event.args._batchIndex).to.equal(0) + expect(event.args.l2OutputIndex).to.equal(0) }) }) describe('when the event does not exist', () => { it('should throw an error', async () => { await expect( - findEventForStateBatch(oracle, 0) + findEventForStateBatch(L2OutputOracle, 0) ).to.eventually.be.rejectedWith('unable to find event for batch') }) }) @@ -106,30 +79,46 @@ describe('helpers', () => { describe('findFirstUnfinalizedIndex', () => { describe('when the chain is more then FPW seconds old', () => { beforeEach(async () => { - await StateCommitmentChain.appendStateBatch( - [hre.ethers.constants.HashZero], - 0 + const latestBlock = await hre.ethers.provider.getBlock('latest') + const params = { + _outputRoot: utils.formatBytes32String('testhash'), + _l2BlockNumber: + deployConfig.l2OutputOracleStartingBlockNumber + + deployConfig.l2OutputOracleSubmissionInterval, + _l1BlockHash: latestBlock.hash, + _l1BlockNumber: latestBlock.number, + } + await L2OutputOracle.proposeL2Output( + params._outputRoot, + params._l2BlockNumber, + params._l1BlockHash, + params._l1BlockNumber ) // Simulate FPW passing await hre.ethers.provider.send('evm_increaseTime', [ - toRpcHexString(challengeWindowSeconds * 2), + toRpcHexString(deployConfig.finalizationPeriodSeconds * 2), ]) - await StateCommitmentChain.appendStateBatch( - [hre.ethers.constants.HashZero], - 1 + await L2OutputOracle.proposeL2Output( + params._outputRoot, + params._l2BlockNumber + deployConfig.l2OutputOracleSubmissionInterval, + params._l1BlockHash, + params._l1BlockNumber ) - await StateCommitmentChain.appendStateBatch( - [hre.ethers.constants.HashZero], - 2 + await L2OutputOracle.proposeL2Output( + params._outputRoot, + params._l2BlockNumber + + deployConfig.l2OutputOracleSubmissionInterval * 2, + params._l1BlockHash, + params._l1BlockNumber ) }) it('should find the first batch older than the FPW', async () => { const first = await findFirstUnfinalizedStateBatchIndex( - oracle, - challengeWindowSeconds + L2OutputOracle, + deployConfig.finalizationPeriodSeconds ) expect(first).to.equal(1) @@ -138,24 +127,40 @@ describe('helpers', () => { describe('when the chain is less than FPW seconds old', () => { beforeEach(async () => { - await StateCommitmentChain.appendStateBatch( - [hre.ethers.constants.HashZero], - 0 + const latestBlock = await hre.ethers.provider.getBlock('latest') + const params = { + _outputRoot: utils.formatBytes32String('testhash'), + _l2BlockNumber: + deployConfig.l2OutputOracleStartingBlockNumber + + deployConfig.l2OutputOracleSubmissionInterval, + _l1BlockHash: latestBlock.hash, + _l1BlockNumber: latestBlock.number, + } + await L2OutputOracle.proposeL2Output( + params._outputRoot, + params._l2BlockNumber, + params._l1BlockHash, + params._l1BlockNumber ) - await StateCommitmentChain.appendStateBatch( - [hre.ethers.constants.HashZero], - 1 + await L2OutputOracle.proposeL2Output( + params._outputRoot, + params._l2BlockNumber + deployConfig.l2OutputOracleSubmissionInterval, + params._l1BlockHash, + params._l1BlockNumber ) - await StateCommitmentChain.appendStateBatch( - [hre.ethers.constants.HashZero], - 2 + await L2OutputOracle.proposeL2Output( + params._outputRoot, + params._l2BlockNumber + + deployConfig.l2OutputOracleSubmissionInterval * 2, + params._l1BlockHash, + params._l1BlockNumber ) }) it('should return zero', async () => { const first = await findFirstUnfinalizedStateBatchIndex( - oracle, - challengeWindowSeconds + L2OutputOracle, + deployConfig.finalizationPeriodSeconds ) expect(first).to.equal(0) @@ -164,22 +169,38 @@ describe('helpers', () => { describe('when no batches submitted for the entire FPW', () => { beforeEach(async () => { - await StateCommitmentChain.appendStateBatch( - [hre.ethers.constants.HashZero], - 0 + const latestBlock = await hre.ethers.provider.getBlock('latest') + const params = { + _outputRoot: utils.formatBytes32String('testhash'), + _l2BlockNumber: + deployConfig.l2OutputOracleStartingBlockNumber + + deployConfig.l2OutputOracleSubmissionInterval, + _l1BlockHash: latestBlock.hash, + _l1BlockNumber: latestBlock.number, + } + await L2OutputOracle.proposeL2Output( + params._outputRoot, + params._l2BlockNumber, + params._l1BlockHash, + params._l1BlockNumber ) - await StateCommitmentChain.appendStateBatch( - [hre.ethers.constants.HashZero], - 1 + await L2OutputOracle.proposeL2Output( + params._outputRoot, + params._l2BlockNumber + deployConfig.l2OutputOracleSubmissionInterval, + params._l1BlockHash, + params._l1BlockNumber ) - await StateCommitmentChain.appendStateBatch( - [hre.ethers.constants.HashZero], - 2 + await L2OutputOracle.proposeL2Output( + params._outputRoot, + params._l2BlockNumber + + deployConfig.l2OutputOracleSubmissionInterval * 2, + params._l1BlockHash, + params._l1BlockNumber ) // Simulate FPW passing and no new batches await hre.ethers.provider.send('evm_increaseTime', [ - toRpcHexString(challengeWindowSeconds * 2), + toRpcHexString(deployConfig.finalizationPeriodSeconds * 2), ]) // Mine a block to force timestamp to update @@ -188,8 +209,8 @@ describe('helpers', () => { it('should return undefined', async () => { const first = await findFirstUnfinalizedStateBatchIndex( - oracle, - challengeWindowSeconds + L2OutputOracle, + deployConfig.finalizationPeriodSeconds ) expect(first).to.equal(undefined) diff --git a/yarn.lock b/yarn.lock index 9d04d718d832..29473efabc81 100644 --- a/yarn.lock +++ b/yarn.lock @@ -769,7 +769,17 @@ minimatch "^3.1.2" strip-json-comments "^3.1.1" -"@eth-optimism/contracts@0.6.0", "@eth-optimism/contracts@^0.6.0": +"@eth-optimism/contracts-bedrock@^0.14.0": + version "0.14.0" + resolved "https://registry.yarnpkg.com/@eth-optimism/contracts-bedrock/-/contracts-bedrock-0.14.0.tgz#f93006416c8b114fb78d2e477dccd525aa651458" + integrity sha512-mvbSE2q2cyHUwg1jtHwR4JOQJcwdCVRAkmBdXCKUP0XsP48NT1J92bYileRdiUM5nLIESgNNmPA8L2J87mr62g== + dependencies: + "@eth-optimism/core-utils" "^0.12.0" + "@openzeppelin/contracts" "4.7.3" + "@openzeppelin/contracts-upgradeable" "4.7.3" + ethers "^5.7.0" + +"@eth-optimism/contracts@0.6.0": version "0.6.0" resolved "https://registry.yarnpkg.com/@eth-optimism/contracts/-/contracts-0.6.0.tgz#15ae76222a9b4d958a550cafb1960923af613a31" integrity sha512-vQ04wfG9kMf1Fwy3FEMqH2QZbgS0gldKhcBeBUPfO8zu68L61VI97UDXmsMQXzTsEAxK8HnokW3/gosl4/NW3w==