diff --git a/config/default.go b/config/default.go index a8971263ca..3f8a12407c 100644 --- a/config/default.go +++ b/config/default.go @@ -106,6 +106,14 @@ SyncBlockProtection = "safe" # latest, finalized, safe L1SynchronizationMode = "sequential" L1SyncCheckL2BlockHash = true L1SyncCheckL2BlockNumberhModulus = 30 + [Synchronizer.L1BlockCheck] + Enable = true + L1SafeBlockPoint = "finalized" + L1SafeBlockOffset = 0 + ForceCheckBeforeStart = true + PreCheckEnable = true + L1PreSafeBlockPoint = "safe" + L1PreSafeBlockOffset = 0 [Synchronizer.L1ParallelSynchronization] MaxClients = 10 MaxPendingNoProcessedBlocks = 25 diff --git a/docs/config-file/node-config-doc.html b/docs/config-file/node-config-doc.html index 9122c48735..99a31fd86e 100644 --- a/docs/config-file/node-config-doc.html +++ b/docs/config-file/node-config-doc.html @@ -16,7 +16,7 @@
"300ms"
MaxRequestsPerIPAndSecond defines how much requests a single IP can
send within a single second
SequencerNodeURI is used allow Non-Sequencer nodes
to relay transactions to the Sequencer node
MaxCumulativeGasUsed is the max gas allowed per batch
Enabled defines if the WebSocket requests are enabled or disabled
Host defines the network adapter that will be used to serve the WS requests
Port defines the port to serve the endpoints via WS
ReadLimit defines the maximum size of a message read from the client (in bytes)
EnableL2SuggestedGasPricePolling enables polling of the L2 gas price to block tx in the RPC with lower gas price.
BatchRequestsEnabled defines if the Batch requests are enabled or disabled
BatchRequestsLimit defines the limit of requests that can be incorporated into each batch request
L2Coinbase defines which address is going to receive the fees
Must contain a minimum of 20
items
Must contain a maximum of 20
items
MaxLogsCount is a configuration to set the max number of logs that can be returned
in a single call to the state, if zero it means no limit
MaxLogsBlockRange is a configuration to set the max range for block number when querying TXs
logs in a single call to the state, if zero it means no limit
MaxNativeBlockHashBlockRange is a configuration to set the max range for block number when querying
native block hashes in a single call to the state, if zero it means no limit
EnableHttpLog allows the user to enable or disable the logs related to the HTTP
requests to be captured by the server.
SyncInterval is the delay interval between reading new rollup information
"1m"
"300ms"
-
SyncChunkSize is the number of blocks to sync on each chunk
TrustedSequencerURL is the rpc url to connect and sync the trusted state
SyncBlockProtection specify the state to sync (lastest, finalized or safe)
L1SyncCheckL2BlockHash if is true when a batch is closed is force to check L2Block hash against trustedNode (only apply for permissionless)
L1SyncCheckL2BlockNumberhModulus is the modulus used to choose the l2block to check
a modules 5, for instance, means check all l2block multiples of 5 (10,15,20,...)
L1SynchronizationMode define how to synchronize with L1:
- parallel: Request data to L1 in parallel, and process sequentially. The advantage is that executor is not blocked waiting for L1 data
- sequential: Request data to L1 and execute
MaxClients Number of clients used to synchronize with L1
MaxPendingNoProcessedBlocks Size of the buffer used to store rollup information from L1, must be >= to NumberOfEthereumClientsToSync
sugested twice of NumberOfParallelOfEthereumClients
RequestLastBlockPeriod is the time to wait to request the
last block to L1 to known if we need to retrieve more data.
This value only apply when the system is synchronized
"1m"
+
SyncChunkSize is the number of blocks to sync on each chunk
TrustedSequencerURL is the rpc url to connect and sync the trusted state
SyncBlockProtection specify the state to sync (lastest, finalized or safe)
L1SyncCheckL2BlockHash if is true when a batch is closed is force to check L2Block hash against trustedNode (only apply for permissionless)
L1SyncCheckL2BlockNumberhModulus is the modulus used to choose the l2block to check
a modules 5, for instance, means check all l2block multiples of 5 (10,15,20,...)
Enable if is true then the check l1 Block Hash is active
L1SafeBlockPoint is the point that a block is considered safe enough to be checked
it can be: finalized, safe,pending or latest
L1SafeBlockOffset is the offset to add to L1SafeBlockPoint as a safe point
it can be positive or negative
Example: L1SafeBlockPoint= finalized, L1SafeBlockOffset= -10, then the safe block ten blocks before the finalized block
ForceCheckBeforeStart if is true then the first time the system is started it will force to check all pending blocks
PreCheckEnable if is true then the pre-check is active, will check blocks between L1SafeBlock and L1PreSafeBlock
L1PreSafeBlockPoint is the point that a block is considered safe enough to be checked
it can be: finalized, safe,pending or latest
L1PreSafeBlockOffset is the offset to add to L1PreSafeBlockPoint as a safe point
it can be positive or negative
Example: L1PreSafeBlockPoint= finalized, L1PreSafeBlockOffset= -10, then the safe block ten blocks before the finalized block
L1SynchronizationMode define how to synchronize with L1:
- parallel: Request data to L1 in parallel, and process sequentially. The advantage is that executor is not blocked waiting for L1 data
- sequential: Request data to L1 and execute
MaxClients Number of clients used to synchronize with L1
MaxPendingNoProcessedBlocks Size of the buffer used to store rollup information from L1, must be >= to NumberOfEthereumClientsToSync
sugested twice of NumberOfParallelOfEthereumClients
RequestLastBlockPeriod is the time to wait to request the
last block to L1 to known if we need to retrieve more data.
This value only apply when the system is synchronized
"1m"
"300ms"
AceptableInacctivityTime is the expected maximum time that the consumer
could wait until new data is produced. If the time is greater it emmit a log to warn about
that. The idea is keep working the consumer as much as possible, so if the producer is not
fast enought then you could increse the number of parallel clients to sync with L1
"1m"
"300ms"
diff --git a/docs/config-file/node-config-doc.md b/docs/config-file/node-config-doc.md
index 23619596c8..9a94f41ced 100644
--- a/docs/config-file/node-config-doc.md
+++ b/docs/config-file/node-config-doc.md
@@ -1342,6 +1342,7 @@ because depending of this values is going to ask to a trusted node for trusted t
| - [SyncBlockProtection](#Synchronizer_SyncBlockProtection ) | No | string | No | - | SyncBlockProtection specify the state to sync (lastest, finalized or safe) |
| - [L1SyncCheckL2BlockHash](#Synchronizer_L1SyncCheckL2BlockHash ) | No | boolean | No | - | L1SyncCheckL2BlockHash if is true when a batch is closed is force to check L2Block hash against trustedNode (only apply for permissionless) |
| - [L1SyncCheckL2BlockNumberhModulus](#Synchronizer_L1SyncCheckL2BlockNumberhModulus ) | No | integer | No | - | L1SyncCheckL2BlockNumberhModulus is the modulus used to choose the l2block to check
a modules 5, for instance, means check all l2block multiples of 5 (10,15,20,...) |
+| - [L1BlockCheck](#Synchronizer_L1BlockCheck ) | No | object | No | - | - |
| - [L1SynchronizationMode](#Synchronizer_L1SynchronizationMode ) | No | enum (of string) | No | - | L1SynchronizationMode define how to synchronize with L1:
- parallel: Request data to L1 in parallel, and process sequentially. The advantage is that executor is not blocked waiting for L1 data
- sequential: Request data to L1 and execute |
| - [L1ParallelSynchronization](#Synchronizer_L1ParallelSynchronization ) | No | object | No | - | L1ParallelSynchronization Configuration for parallel mode (if L1SynchronizationMode equal to 'parallel') |
| - [L2Synchronization](#Synchronizer_L2Synchronization ) | No | object | No | - | L2Synchronization Configuration for L2 synchronization |
@@ -1443,7 +1444,135 @@ a modules 5, for instance, means check all l2block multiples of 5 (10,15,20,...)
L1SyncCheckL2BlockNumberhModulus=30
```
-### 9.7. `Synchronizer.L1SynchronizationMode`
+### 9.7. `[Synchronizer.L1BlockCheck]`
+
+**Type:** : `object`
+
+| Property | Pattern | Type | Deprecated | Definition | Title/Description |
+| ---------------------------------------------------------------------------- | ------- | ---------------- | ---------- | ---------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| - [Enable](#Synchronizer_L1BlockCheck_Enable ) | No | boolean | No | - | Enable if is true then the check l1 Block Hash is active |
+| - [L1SafeBlockPoint](#Synchronizer_L1BlockCheck_L1SafeBlockPoint ) | No | enum (of string) | No | - | L1SafeBlockPoint is the point that a block is considered safe enough to be checked
it can be: finalized, safe,pending or latest |
+| - [L1SafeBlockOffset](#Synchronizer_L1BlockCheck_L1SafeBlockOffset ) | No | integer | No | - | L1SafeBlockOffset is the offset to add to L1SafeBlockPoint as a safe point
it can be positive or negative
Example: L1SafeBlockPoint= finalized, L1SafeBlockOffset= -10, then the safe block ten blocks before the finalized block |
+| - [ForceCheckBeforeStart](#Synchronizer_L1BlockCheck_ForceCheckBeforeStart ) | No | boolean | No | - | ForceCheckBeforeStart if is true then the first time the system is started it will force to check all pending blocks |
+| - [PreCheckEnable](#Synchronizer_L1BlockCheck_PreCheckEnable ) | No | boolean | No | - | PreCheckEnable if is true then the pre-check is active, will check blocks between L1SafeBlock and L1PreSafeBlock |
+| - [L1PreSafeBlockPoint](#Synchronizer_L1BlockCheck_L1PreSafeBlockPoint ) | No | enum (of string) | No | - | L1PreSafeBlockPoint is the point that a block is considered safe enough to be checked
it can be: finalized, safe,pending or latest |
+| - [L1PreSafeBlockOffset](#Synchronizer_L1BlockCheck_L1PreSafeBlockOffset ) | No | integer | No | - | L1PreSafeBlockOffset is the offset to add to L1PreSafeBlockPoint as a safe point
it can be positive or negative
Example: L1PreSafeBlockPoint= finalized, L1PreSafeBlockOffset= -10, then the safe block ten blocks before the finalized block |
+
+#### 9.7.1. `Synchronizer.L1BlockCheck.Enable`
+
+**Type:** : `boolean`
+
+**Default:** `true`
+
+**Description:** Enable if is true then the check l1 Block Hash is active
+
+**Example setting the default value** (true):
+```
+[Synchronizer.L1BlockCheck]
+Enable=true
+```
+
+#### 9.7.2. `Synchronizer.L1BlockCheck.L1SafeBlockPoint`
+
+**Type:** : `enum (of string)`
+
+**Default:** `"finalized"`
+
+**Description:** L1SafeBlockPoint is the point that a block is considered safe enough to be checked
+it can be: finalized, safe,pending or latest
+
+**Example setting the default value** ("finalized"):
+```
+[Synchronizer.L1BlockCheck]
+L1SafeBlockPoint="finalized"
+```
+
+Must be one of:
+* "finalized"
+* "safe"
+* "latest"
+
+#### 9.7.3. `Synchronizer.L1BlockCheck.L1SafeBlockOffset`
+
+**Type:** : `integer`
+
+**Default:** `0`
+
+**Description:** L1SafeBlockOffset is the offset to add to L1SafeBlockPoint as a safe point
+it can be positive or negative
+Example: L1SafeBlockPoint= finalized, L1SafeBlockOffset= -10, then the safe block ten blocks before the finalized block
+
+**Example setting the default value** (0):
+```
+[Synchronizer.L1BlockCheck]
+L1SafeBlockOffset=0
+```
+
+#### 9.7.4. `Synchronizer.L1BlockCheck.ForceCheckBeforeStart`
+
+**Type:** : `boolean`
+
+**Default:** `true`
+
+**Description:** ForceCheckBeforeStart if is true then the first time the system is started it will force to check all pending blocks
+
+**Example setting the default value** (true):
+```
+[Synchronizer.L1BlockCheck]
+ForceCheckBeforeStart=true
+```
+
+#### 9.7.5. `Synchronizer.L1BlockCheck.PreCheckEnable`
+
+**Type:** : `boolean`
+
+**Default:** `true`
+
+**Description:** PreCheckEnable if is true then the pre-check is active, will check blocks between L1SafeBlock and L1PreSafeBlock
+
+**Example setting the default value** (true):
+```
+[Synchronizer.L1BlockCheck]
+PreCheckEnable=true
+```
+
+#### 9.7.6. `Synchronizer.L1BlockCheck.L1PreSafeBlockPoint`
+
+**Type:** : `enum (of string)`
+
+**Default:** `"safe"`
+
+**Description:** L1PreSafeBlockPoint is the point that a block is considered safe enough to be checked
+it can be: finalized, safe,pending or latest
+
+**Example setting the default value** ("safe"):
+```
+[Synchronizer.L1BlockCheck]
+L1PreSafeBlockPoint="safe"
+```
+
+Must be one of:
+* "finalized"
+* "safe"
+* "latest"
+
+#### 9.7.7. `Synchronizer.L1BlockCheck.L1PreSafeBlockOffset`
+
+**Type:** : `integer`
+
+**Default:** `0`
+
+**Description:** L1PreSafeBlockOffset is the offset to add to L1PreSafeBlockPoint as a safe point
+it can be positive or negative
+Example: L1PreSafeBlockPoint= finalized, L1PreSafeBlockOffset= -10, then the safe block ten blocks before the finalized block
+
+**Example setting the default value** (0):
+```
+[Synchronizer.L1BlockCheck]
+L1PreSafeBlockOffset=0
+```
+
+### 9.8. `Synchronizer.L1SynchronizationMode`
**Type:** : `enum (of string)`
@@ -1463,7 +1592,7 @@ Must be one of:
* "sequential"
* "parallel"
-### 9.8. `[Synchronizer.L1ParallelSynchronization]`
+### 9.9. `[Synchronizer.L1ParallelSynchronization]`
**Type:** : `object`
**Description:** L1ParallelSynchronization Configuration for parallel mode (if L1SynchronizationMode equal to 'parallel')
@@ -1481,7 +1610,7 @@ Must be one of:
| - [RollupInfoRetriesSpacing](#Synchronizer_L1ParallelSynchronization_RollupInfoRetriesSpacing ) | No | string | No | - | Duration |
| - [FallbackToSequentialModeOnSynchronized](#Synchronizer_L1ParallelSynchronization_FallbackToSequentialModeOnSynchronized ) | No | boolean | No | - | FallbackToSequentialModeOnSynchronized if true switch to sequential mode if the system is synchronized |
-#### 9.8.1. `Synchronizer.L1ParallelSynchronization.MaxClients`
+#### 9.9.1. `Synchronizer.L1ParallelSynchronization.MaxClients`
**Type:** : `integer`
@@ -1495,7 +1624,7 @@ Must be one of:
MaxClients=10
```
-#### 9.8.2. `Synchronizer.L1ParallelSynchronization.MaxPendingNoProcessedBlocks`
+#### 9.9.2. `Synchronizer.L1ParallelSynchronization.MaxPendingNoProcessedBlocks`
**Type:** : `integer`
@@ -1510,7 +1639,7 @@ sugested twice of NumberOfParallelOfEthereumClients
MaxPendingNoProcessedBlocks=25
```
-#### 9.8.3. `Synchronizer.L1ParallelSynchronization.RequestLastBlockPeriod`
+#### 9.9.3. `Synchronizer.L1ParallelSynchronization.RequestLastBlockPeriod`
**Title:** Duration
@@ -1538,7 +1667,7 @@ This value only apply when the system is synchronized
RequestLastBlockPeriod="5s"
```
-#### 9.8.4. `[Synchronizer.L1ParallelSynchronization.PerformanceWarning]`
+#### 9.9.4. `[Synchronizer.L1ParallelSynchronization.PerformanceWarning]`
**Type:** : `object`
**Description:** Consumer Configuration for the consumer of rollup information from L1
@@ -1548,7 +1677,7 @@ RequestLastBlockPeriod="5s"
| - [AceptableInacctivityTime](#Synchronizer_L1ParallelSynchronization_PerformanceWarning_AceptableInacctivityTime ) | No | string | No | - | Duration |
| - [ApplyAfterNumRollupReceived](#Synchronizer_L1ParallelSynchronization_PerformanceWarning_ApplyAfterNumRollupReceived ) | No | integer | No | - | ApplyAfterNumRollupReceived is the number of iterations to
start checking the time waiting for new rollup info data |
-##### 9.8.4.1. `Synchronizer.L1ParallelSynchronization.PerformanceWarning.AceptableInacctivityTime`
+##### 9.9.4.1. `Synchronizer.L1ParallelSynchronization.PerformanceWarning.AceptableInacctivityTime`
**Title:** Duration
@@ -1577,7 +1706,7 @@ fast enought then you could increse the number of parallel clients to sync with
AceptableInacctivityTime="5s"
```
-##### 9.8.4.2. `Synchronizer.L1ParallelSynchronization.PerformanceWarning.ApplyAfterNumRollupReceived`
+##### 9.9.4.2. `Synchronizer.L1ParallelSynchronization.PerformanceWarning.ApplyAfterNumRollupReceived`
**Type:** : `integer`
@@ -1592,7 +1721,7 @@ start checking the time waiting for new rollup info data
ApplyAfterNumRollupReceived=10
```
-#### 9.8.5. `Synchronizer.L1ParallelSynchronization.RequestLastBlockTimeout`
+#### 9.9.5. `Synchronizer.L1ParallelSynchronization.RequestLastBlockTimeout`
**Title:** Duration
@@ -1618,7 +1747,7 @@ ApplyAfterNumRollupReceived=10
RequestLastBlockTimeout="5s"
```
-#### 9.8.6. `Synchronizer.L1ParallelSynchronization.RequestLastBlockMaxRetries`
+#### 9.9.6. `Synchronizer.L1ParallelSynchronization.RequestLastBlockMaxRetries`
**Type:** : `integer`
@@ -1632,7 +1761,7 @@ RequestLastBlockTimeout="5s"
RequestLastBlockMaxRetries=3
```
-#### 9.8.7. `Synchronizer.L1ParallelSynchronization.StatisticsPeriod`
+#### 9.9.7. `Synchronizer.L1ParallelSynchronization.StatisticsPeriod`
**Title:** Duration
@@ -1658,7 +1787,7 @@ RequestLastBlockMaxRetries=3
StatisticsPeriod="5m0s"
```
-#### 9.8.8. `Synchronizer.L1ParallelSynchronization.TimeOutMainLoop`
+#### 9.9.8. `Synchronizer.L1ParallelSynchronization.TimeOutMainLoop`
**Title:** Duration
@@ -1684,7 +1813,7 @@ StatisticsPeriod="5m0s"
TimeOutMainLoop="5m0s"
```
-#### 9.8.9. `Synchronizer.L1ParallelSynchronization.RollupInfoRetriesSpacing`
+#### 9.9.9. `Synchronizer.L1ParallelSynchronization.RollupInfoRetriesSpacing`
**Title:** Duration
@@ -1710,7 +1839,7 @@ TimeOutMainLoop="5m0s"
RollupInfoRetriesSpacing="5s"
```
-#### 9.8.10. `Synchronizer.L1ParallelSynchronization.FallbackToSequentialModeOnSynchronized`
+#### 9.9.10. `Synchronizer.L1ParallelSynchronization.FallbackToSequentialModeOnSynchronized`
**Type:** : `boolean`
@@ -1724,7 +1853,7 @@ RollupInfoRetriesSpacing="5s"
FallbackToSequentialModeOnSynchronized=false
```
-### 9.9. `[Synchronizer.L2Synchronization]`
+### 9.10. `[Synchronizer.L2Synchronization]`
**Type:** : `object`
**Description:** L2Synchronization Configuration for L2 synchronization
@@ -1735,7 +1864,7 @@ FallbackToSequentialModeOnSynchronized=false
| - [ReprocessFullBatchOnClose](#Synchronizer_L2Synchronization_ReprocessFullBatchOnClose ) | No | boolean | No | - | ReprocessFullBatchOnClose if is true when a batch is closed is force to reprocess again |
| - [CheckLastL2BlockHashOnCloseBatch](#Synchronizer_L2Synchronization_CheckLastL2BlockHashOnCloseBatch ) | No | boolean | No | - | CheckLastL2BlockHashOnCloseBatch if is true when a batch is closed is force to check the last L2Block hash |
-#### 9.9.1. `Synchronizer.L2Synchronization.AcceptEmptyClosedBatches`
+#### 9.10.1. `Synchronizer.L2Synchronization.AcceptEmptyClosedBatches`
**Type:** : `boolean`
@@ -1750,7 +1879,7 @@ if true, the synchronizer will accept empty batches and process them.
AcceptEmptyClosedBatches=false
```
-#### 9.9.2. `Synchronizer.L2Synchronization.ReprocessFullBatchOnClose`
+#### 9.10.2. `Synchronizer.L2Synchronization.ReprocessFullBatchOnClose`
**Type:** : `boolean`
@@ -1764,7 +1893,7 @@ AcceptEmptyClosedBatches=false
ReprocessFullBatchOnClose=false
```
-#### 9.9.3. `Synchronizer.L2Synchronization.CheckLastL2BlockHashOnCloseBatch`
+#### 9.10.3. `Synchronizer.L2Synchronization.CheckLastL2BlockHashOnCloseBatch`
**Type:** : `boolean`
diff --git a/docs/config-file/node-config-schema.json b/docs/config-file/node-config-schema.json
index 1c93ea4b3c..504c330f35 100644
--- a/docs/config-file/node-config-schema.json
+++ b/docs/config-file/node-config-schema.json
@@ -532,6 +532,57 @@
"description": "L1SyncCheckL2BlockNumberhModulus is the modulus used to choose the l2block to check\na modules 5, for instance, means check all l2block multiples of 5 (10,15,20,...)",
"default": 30
},
+ "L1BlockCheck": {
+ "properties": {
+ "Enable": {
+ "type": "boolean",
+ "description": "Enable if is true then the check l1 Block Hash is active",
+ "default": true
+ },
+ "L1SafeBlockPoint": {
+ "type": "string",
+ "enum": [
+ "finalized",
+ "safe",
+ "latest"
+ ],
+ "description": "L1SafeBlockPoint is the point that a block is considered safe enough to be checked\nit can be: finalized, safe,pending or latest",
+ "default": "finalized"
+ },
+ "L1SafeBlockOffset": {
+ "type": "integer",
+ "description": "L1SafeBlockOffset is the offset to add to L1SafeBlockPoint as a safe point\nit can be positive or negative\nExample: L1SafeBlockPoint= finalized, L1SafeBlockOffset= -10, then the safe block ten blocks before the finalized block",
+ "default": 0
+ },
+ "ForceCheckBeforeStart": {
+ "type": "boolean",
+ "description": "ForceCheckBeforeStart if is true then the first time the system is started it will force to check all pending blocks",
+ "default": true
+ },
+ "PreCheckEnable": {
+ "type": "boolean",
+ "description": "PreCheckEnable if is true then the pre-check is active, will check blocks between L1SafeBlock and L1PreSafeBlock",
+ "default": true
+ },
+ "L1PreSafeBlockPoint": {
+ "type": "string",
+ "enum": [
+ "finalized",
+ "safe",
+ "latest"
+ ],
+ "description": "L1PreSafeBlockPoint is the point that a block is considered safe enough to be checked\nit can be: finalized, safe,pending or latest",
+ "default": "safe"
+ },
+ "L1PreSafeBlockOffset": {
+ "type": "integer",
+ "description": "L1PreSafeBlockOffset is the offset to add to L1PreSafeBlockPoint as a safe point\nit can be positive or negative\nExample: L1PreSafeBlockPoint= finalized, L1PreSafeBlockOffset= -10, then the safe block ten blocks before the finalized block",
+ "default": 0
+ }
+ },
+ "additionalProperties": false,
+ "type": "object"
+ },
"L1SynchronizationMode": {
"type": "string",
"enum": [
diff --git a/state/interfaces.go b/state/interfaces.go
index dfde07d8ce..cc1f0127a9 100644
--- a/state/interfaces.go
+++ b/state/interfaces.go
@@ -25,6 +25,7 @@ type storage interface {
GetLastBlock(ctx context.Context, dbTx pgx.Tx) (*Block, error)
GetPreviousBlock(ctx context.Context, offset uint64, dbTx pgx.Tx) (*Block, error)
GetFirstUncheckedBlock(ctx context.Context, fromBlockNumber uint64, dbTx pgx.Tx) (*Block, error)
+ GetUncheckedBlocks(ctx context.Context, fromBlockNumber uint64, toBlockNumber uint64, dbTx pgx.Tx) ([]*Block, error)
UpdateCheckedBlockByNumber(ctx context.Context, blockNumber uint64, newCheckedStatus bool, dbTx pgx.Tx) error
AddGlobalExitRoot(ctx context.Context, exitRoot *GlobalExitRoot, dbTx pgx.Tx) error
GetLatestGlobalExitRoot(ctx context.Context, maxBlockNumber uint64, dbTx pgx.Tx) (GlobalExitRoot, time.Time, error)
@@ -161,4 +162,5 @@ type storage interface {
UpdateBatchAsChecked(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) error
GetNotCheckedBatches(ctx context.Context, dbTx pgx.Tx) ([]*Batch, error)
GetLastL2BlockByBatchNumber(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (*L2Block, error)
+ GetPreviousBlockToBlockNumber(ctx context.Context, blockNumber uint64, dbTx pgx.Tx) (*Block, error)
}
diff --git a/state/mocks/mock_storage.go b/state/mocks/mock_storage.go
index 2b03479dee..27964e0247 100644
--- a/state/mocks/mock_storage.go
+++ b/state/mocks/mock_storage.go
@@ -5549,6 +5549,66 @@ func (_c *StorageMock_GetPreviousBlock_Call) RunAndReturn(run func(context.Conte
return _c
}
+// GetPreviousBlockToBlockNumber provides a mock function with given fields: ctx, blockNumber, dbTx
+func (_m *StorageMock) GetPreviousBlockToBlockNumber(ctx context.Context, blockNumber uint64, dbTx pgx.Tx) (*state.Block, error) {
+ ret := _m.Called(ctx, blockNumber, dbTx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetPreviousBlockToBlockNumber")
+ }
+
+ var r0 *state.Block
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) (*state.Block, error)); ok {
+ return rf(ctx, blockNumber, dbTx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) *state.Block); ok {
+ r0 = rf(ctx, blockNumber, dbTx)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*state.Block)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, uint64, pgx.Tx) error); ok {
+ r1 = rf(ctx, blockNumber, dbTx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// StorageMock_GetPreviousBlockToBlockNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetPreviousBlockToBlockNumber'
+type StorageMock_GetPreviousBlockToBlockNumber_Call struct {
+ *mock.Call
+}
+
+// GetPreviousBlockToBlockNumber is a helper method to define mock.On call
+// - ctx context.Context
+// - blockNumber uint64
+// - dbTx pgx.Tx
+func (_e *StorageMock_Expecter) GetPreviousBlockToBlockNumber(ctx interface{}, blockNumber interface{}, dbTx interface{}) *StorageMock_GetPreviousBlockToBlockNumber_Call {
+ return &StorageMock_GetPreviousBlockToBlockNumber_Call{Call: _e.mock.On("GetPreviousBlockToBlockNumber", ctx, blockNumber, dbTx)}
+}
+
+func (_c *StorageMock_GetPreviousBlockToBlockNumber_Call) Run(run func(ctx context.Context, blockNumber uint64, dbTx pgx.Tx)) *StorageMock_GetPreviousBlockToBlockNumber_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(uint64), args[2].(pgx.Tx))
+ })
+ return _c
+}
+
+func (_c *StorageMock_GetPreviousBlockToBlockNumber_Call) Return(_a0 *state.Block, _a1 error) *StorageMock_GetPreviousBlockToBlockNumber_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *StorageMock_GetPreviousBlockToBlockNumber_Call) RunAndReturn(run func(context.Context, uint64, pgx.Tx) (*state.Block, error)) *StorageMock_GetPreviousBlockToBlockNumber_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// GetProcessingContext provides a mock function with given fields: ctx, batchNumber, dbTx
func (_m *StorageMock) GetProcessingContext(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (*state.ProcessingContext, error) {
ret := _m.Called(ctx, batchNumber, dbTx)
@@ -6832,6 +6892,67 @@ func (_c *StorageMock_GetTxsOlderThanNL1BlocksUntilTxHash_Call) RunAndReturn(run
return _c
}
+// GetUncheckedBlocks provides a mock function with given fields: ctx, fromBlockNumber, toBlockNumber, dbTx
+func (_m *StorageMock) GetUncheckedBlocks(ctx context.Context, fromBlockNumber uint64, toBlockNumber uint64, dbTx pgx.Tx) ([]*state.Block, error) {
+ ret := _m.Called(ctx, fromBlockNumber, toBlockNumber, dbTx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetUncheckedBlocks")
+ }
+
+ var r0 []*state.Block
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64, pgx.Tx) ([]*state.Block, error)); ok {
+ return rf(ctx, fromBlockNumber, toBlockNumber, dbTx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64, pgx.Tx) []*state.Block); ok {
+ r0 = rf(ctx, fromBlockNumber, toBlockNumber, dbTx)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]*state.Block)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, uint64, uint64, pgx.Tx) error); ok {
+ r1 = rf(ctx, fromBlockNumber, toBlockNumber, dbTx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// StorageMock_GetUncheckedBlocks_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetUncheckedBlocks'
+type StorageMock_GetUncheckedBlocks_Call struct {
+ *mock.Call
+}
+
+// GetUncheckedBlocks is a helper method to define mock.On call
+// - ctx context.Context
+// - fromBlockNumber uint64
+// - toBlockNumber uint64
+// - dbTx pgx.Tx
+func (_e *StorageMock_Expecter) GetUncheckedBlocks(ctx interface{}, fromBlockNumber interface{}, toBlockNumber interface{}, dbTx interface{}) *StorageMock_GetUncheckedBlocks_Call {
+ return &StorageMock_GetUncheckedBlocks_Call{Call: _e.mock.On("GetUncheckedBlocks", ctx, fromBlockNumber, toBlockNumber, dbTx)}
+}
+
+func (_c *StorageMock_GetUncheckedBlocks_Call) Run(run func(ctx context.Context, fromBlockNumber uint64, toBlockNumber uint64, dbTx pgx.Tx)) *StorageMock_GetUncheckedBlocks_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(uint64), args[2].(uint64), args[3].(pgx.Tx))
+ })
+ return _c
+}
+
+func (_c *StorageMock_GetUncheckedBlocks_Call) Return(_a0 []*state.Block, _a1 error) *StorageMock_GetUncheckedBlocks_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *StorageMock_GetUncheckedBlocks_Call) RunAndReturn(run func(context.Context, uint64, uint64, pgx.Tx) ([]*state.Block, error)) *StorageMock_GetUncheckedBlocks_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// GetVerifiedBatch provides a mock function with given fields: ctx, batchNumber, dbTx
func (_m *StorageMock) GetVerifiedBatch(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (*state.VerifiedBatch, error) {
ret := _m.Called(ctx, batchNumber, dbTx)
diff --git a/state/pgstatestorage/block.go b/state/pgstatestorage/block.go
index 768b384df1..7c657a6e3b 100644
--- a/state/pgstatestorage/block.go
+++ b/state/pgstatestorage/block.go
@@ -63,6 +63,35 @@ func (p *PostgresStorage) GetFirstUncheckedBlock(ctx context.Context, fromBlockN
return &block, err
}
+func (p *PostgresStorage) GetUncheckedBlocks(ctx context.Context, fromBlockNumber uint64, toBlockNumber uint64, dbTx pgx.Tx) ([]*state.Block, error) {
+ const getUncheckedBlocksSQL = "SELECT block_num, block_hash, parent_hash, received_at, checked FROM state.block WHERE block_num>=$1 AND block_num<=$2 AND checked=false ORDER BY block_num"
+
+ q := p.getExecQuerier(dbTx)
+
+ rows, err := q.Query(ctx, getUncheckedBlocksSQL, fromBlockNumber, toBlockNumber)
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+
+ var blocks []*state.Block
+ for rows.Next() {
+ var (
+ blockHash string
+ parentHash string
+ block state.Block
+ )
+ err := rows.Scan(&block.BlockNumber, &blockHash, &parentHash, &block.ReceivedAt, &block.Checked)
+ if err != nil {
+ return nil, err
+ }
+ block.BlockHash = common.HexToHash(blockHash)
+ block.ParentHash = common.HexToHash(parentHash)
+ blocks = append(blocks, &block)
+ }
+ return blocks, nil
+}
+
// GetPreviousBlock gets the offset previous L1 block respect to latest.
func (p *PostgresStorage) GetPreviousBlock(ctx context.Context, offset uint64, dbTx pgx.Tx) (*state.Block, error) {
var (
@@ -83,6 +112,26 @@ func (p *PostgresStorage) GetPreviousBlock(ctx context.Context, offset uint64, d
return &block, err
}
+// GetPreviousBlockToBlockNumber gets the previous L1 block respect blockNumber.
+func (p *PostgresStorage) GetPreviousBlockToBlockNumber(ctx context.Context, blockNumber uint64, dbTx pgx.Tx) (*state.Block, error) {
+ var (
+ blockHash string
+ parentHash string
+ block state.Block
+ )
+ const getPreviousBlockSQL = "SELECT block_num, block_hash, parent_hash, received_at,checked FROM state.block WHERE block_num < $1 ORDER BY block_num DESC LIMIT 1 "
+
+ q := p.getExecQuerier(dbTx)
+
+ err := q.QueryRow(ctx, getPreviousBlockSQL, blockNumber).Scan(&block.BlockNumber, &blockHash, &parentHash, &block.ReceivedAt, &block.Checked)
+ if errors.Is(err, pgx.ErrNoRows) {
+ return nil, state.ErrNotFound
+ }
+ block.BlockHash = common.HexToHash(blockHash)
+ block.ParentHash = common.HexToHash(parentHash)
+ return &block, err
+}
+
// GetBlockByNumber returns the L1 block with the given number.
func (p *PostgresStorage) GetBlockByNumber(ctx context.Context, blockNumber uint64, dbTx pgx.Tx) (*state.Block, error) {
var (
diff --git a/state/pgstatestorage/pgstatestorage_test.go b/state/pgstatestorage/pgstatestorage_test.go
index 416b21b47b..9c8a178dd0 100644
--- a/state/pgstatestorage/pgstatestorage_test.go
+++ b/state/pgstatestorage/pgstatestorage_test.go
@@ -1687,3 +1687,24 @@ func TestUpdateCheckedBlockByNumber(t *testing.T) {
require.NoError(t, err)
require.False(t, b1.Checked)
}
+
+func TestGetUncheckedBlocks(t *testing.T) {
+ var err error
+ blockNumber := uint64(61001)
+ err = testState.AddBlock(context.Background(), &state.Block{BlockNumber: blockNumber, Checked: true}, nil)
+ require.NoError(t, err)
+ err = testState.AddBlock(context.Background(), &state.Block{BlockNumber: blockNumber + 1, Checked: false}, nil)
+ require.NoError(t, err)
+ err = testState.AddBlock(context.Background(), &state.Block{BlockNumber: blockNumber + 2, Checked: true}, nil)
+ require.NoError(t, err)
+ err = testState.AddBlock(context.Background(), &state.Block{BlockNumber: blockNumber + 3, Checked: false}, nil)
+ require.NoError(t, err)
+ err = testState.AddBlock(context.Background(), &state.Block{BlockNumber: blockNumber + 4, Checked: false}, nil)
+ require.NoError(t, err)
+
+ blocks, err := testState.GetUncheckedBlocks(context.Background(), blockNumber, blockNumber+3, nil)
+ require.NoError(t, err)
+ require.Equal(t, 2, len(blocks))
+ require.Equal(t, uint64(blockNumber+1), blocks[0].BlockNumber)
+ require.Equal(t, uint64(blockNumber+3), blocks[1].BlockNumber)
+}
diff --git a/synchronizer/common/reorg_error.go b/synchronizer/common/reorg_error.go
new file mode 100644
index 0000000000..e60dcfb22c
--- /dev/null
+++ b/synchronizer/common/reorg_error.go
@@ -0,0 +1,44 @@
+package common
+
+import "fmt"
+
+// ReorgError is an error that is raised when a reorg is detected
+type ReorgError struct {
+ // BlockNumber is the block number that caused the reorg
+ BlockNumber uint64
+ Err error
+}
+
+// NewReorgError creates a new ReorgError
+func NewReorgError(blockNumber uint64, err error) *ReorgError {
+ return &ReorgError{
+ BlockNumber: blockNumber,
+ Err: err,
+ }
+}
+
+func (e *ReorgError) Error() string {
+ return fmt.Sprintf("%s blockNumber: %d", e.Err.Error(), e.BlockNumber)
+}
+
+// IsReorgError checks if an error is a ReorgError
+func IsReorgError(err error) bool {
+ _, ok := err.(*ReorgError)
+ return ok
+}
+
+// GetReorgErrorBlockNumber returns the block number that caused the reorg
+func GetReorgErrorBlockNumber(err error) uint64 {
+ if reorgErr, ok := err.(*ReorgError); ok {
+ return reorgErr.BlockNumber
+ }
+ return 0
+}
+
+// GetReorgError returns the error that caused the reorg
+func GetReorgError(err error) error {
+ if reorgErr, ok := err.(*ReorgError); ok {
+ return reorgErr.Err
+ }
+ return nil
+}
diff --git a/synchronizer/common/syncinterfaces/async_l1_block_checker.go b/synchronizer/common/syncinterfaces/async_l1_block_checker.go
new file mode 100644
index 0000000000..b95903901a
--- /dev/null
+++ b/synchronizer/common/syncinterfaces/async_l1_block_checker.go
@@ -0,0 +1,40 @@
+package syncinterfaces
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/0xPolygonHermez/zkevm-node/state"
+)
+
+type IterationResult struct {
+ Err error
+ ReorgDetected bool
+ BlockNumber uint64
+ ReorgMessage string
+}
+
+func (ir *IterationResult) String() string {
+ if ir.Err == nil {
+ if ir.ReorgDetected {
+ return fmt.Sprintf("IterationResult{ReorgDetected: %v, BlockNumber: %d ReorgMessage:%s}", ir.ReorgDetected, ir.BlockNumber, ir.ReorgMessage)
+ } else {
+ return "IterationResult{None}"
+ }
+ } else {
+ return fmt.Sprintf("IterationResult{Err: %s, ReorgDetected: %v, BlockNumber: %d ReorgMessage:%s}", ir.Err.Error(), ir.ReorgDetected, ir.BlockNumber, ir.ReorgMessage)
+ }
+}
+
+type AsyncL1BlockChecker interface {
+ Run(ctx context.Context, onFinish func())
+ RunSynchronous(ctx context.Context) IterationResult
+ Stop()
+ GetResult() *IterationResult
+}
+
+type L1BlockCheckerIntegrator interface {
+ OnStart(ctx context.Context) error
+ OnResetState(ctx context.Context)
+ CheckReorgWrapper(ctx context.Context, reorgFirstBlockOk *state.Block, errReportedByReorgFunc error) (*state.Block, error)
+}
diff --git a/synchronizer/common/syncinterfaces/etherman.go b/synchronizer/common/syncinterfaces/etherman.go
index 3d5959ade2..fdbdd669f8 100644
--- a/synchronizer/common/syncinterfaces/etherman.go
+++ b/synchronizer/common/syncinterfaces/etherman.go
@@ -14,10 +14,11 @@ type EthermanFullInterface interface {
HeaderByNumber(ctx context.Context, number *big.Int) (*ethTypes.Header, error)
GetRollupInfoByBlockRange(ctx context.Context, fromBlock uint64, toBlock *uint64) ([]etherman.Block, map[common.Hash][]etherman.Order, error)
EthBlockByNumber(ctx context.Context, blockNumber uint64) (*ethTypes.Block, error)
- GetLatestBatchNumber() (uint64, error)
GetTrustedSequencerURL() (string, error)
VerifyGenBlockNumber(ctx context.Context, genBlockNumber uint64) (bool, error)
GetLatestVerifiedBatchNum() (uint64, error)
+
+ EthermanGetLatestBatchNumber
GetFinalizedBlockNumber(ctx context.Context) (uint64, error)
}
diff --git a/synchronizer/common/syncinterfaces/mocks/async_l1_block_checker.go b/synchronizer/common/syncinterfaces/mocks/async_l1_block_checker.go
new file mode 100644
index 0000000000..67b38de348
--- /dev/null
+++ b/synchronizer/common/syncinterfaces/mocks/async_l1_block_checker.go
@@ -0,0 +1,196 @@
+// Code generated by mockery. DO NOT EDIT.
+
+package mock_syncinterfaces
+
+import (
+ context "context"
+
+ syncinterfaces "github.com/0xPolygonHermez/zkevm-node/synchronizer/common/syncinterfaces"
+ mock "github.com/stretchr/testify/mock"
+)
+
+// AsyncL1BlockChecker is an autogenerated mock type for the AsyncL1BlockChecker type
+type AsyncL1BlockChecker struct {
+ mock.Mock
+}
+
+type AsyncL1BlockChecker_Expecter struct {
+ mock *mock.Mock
+}
+
+func (_m *AsyncL1BlockChecker) EXPECT() *AsyncL1BlockChecker_Expecter {
+ return &AsyncL1BlockChecker_Expecter{mock: &_m.Mock}
+}
+
+// GetResult provides a mock function with given fields:
+func (_m *AsyncL1BlockChecker) GetResult() *syncinterfaces.IterationResult {
+ ret := _m.Called()
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetResult")
+ }
+
+ var r0 *syncinterfaces.IterationResult
+ if rf, ok := ret.Get(0).(func() *syncinterfaces.IterationResult); ok {
+ r0 = rf()
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*syncinterfaces.IterationResult)
+ }
+ }
+
+ return r0
+}
+
+// AsyncL1BlockChecker_GetResult_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetResult'
+type AsyncL1BlockChecker_GetResult_Call struct {
+ *mock.Call
+}
+
+// GetResult is a helper method to define mock.On call
+func (_e *AsyncL1BlockChecker_Expecter) GetResult() *AsyncL1BlockChecker_GetResult_Call {
+ return &AsyncL1BlockChecker_GetResult_Call{Call: _e.mock.On("GetResult")}
+}
+
+func (_c *AsyncL1BlockChecker_GetResult_Call) Run(run func()) *AsyncL1BlockChecker_GetResult_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *AsyncL1BlockChecker_GetResult_Call) Return(_a0 *syncinterfaces.IterationResult) *AsyncL1BlockChecker_GetResult_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *AsyncL1BlockChecker_GetResult_Call) RunAndReturn(run func() *syncinterfaces.IterationResult) *AsyncL1BlockChecker_GetResult_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// Run provides a mock function with given fields: ctx, onFinish
+func (_m *AsyncL1BlockChecker) Run(ctx context.Context, onFinish func()) {
+ _m.Called(ctx, onFinish)
+}
+
+// AsyncL1BlockChecker_Run_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Run'
+type AsyncL1BlockChecker_Run_Call struct {
+ *mock.Call
+}
+
+// Run is a helper method to define mock.On call
+// - ctx context.Context
+// - onFinish func()
+func (_e *AsyncL1BlockChecker_Expecter) Run(ctx interface{}, onFinish interface{}) *AsyncL1BlockChecker_Run_Call {
+ return &AsyncL1BlockChecker_Run_Call{Call: _e.mock.On("Run", ctx, onFinish)}
+}
+
+func (_c *AsyncL1BlockChecker_Run_Call) Run(run func(ctx context.Context, onFinish func())) *AsyncL1BlockChecker_Run_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(func()))
+ })
+ return _c
+}
+
+func (_c *AsyncL1BlockChecker_Run_Call) Return() *AsyncL1BlockChecker_Run_Call {
+ _c.Call.Return()
+ return _c
+}
+
+func (_c *AsyncL1BlockChecker_Run_Call) RunAndReturn(run func(context.Context, func())) *AsyncL1BlockChecker_Run_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// RunSynchronous provides a mock function with given fields: ctx
+func (_m *AsyncL1BlockChecker) RunSynchronous(ctx context.Context) syncinterfaces.IterationResult {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for RunSynchronous")
+ }
+
+ var r0 syncinterfaces.IterationResult
+ if rf, ok := ret.Get(0).(func(context.Context) syncinterfaces.IterationResult); ok {
+ r0 = rf(ctx)
+ } else {
+ r0 = ret.Get(0).(syncinterfaces.IterationResult)
+ }
+
+ return r0
+}
+
+// AsyncL1BlockChecker_RunSynchronous_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RunSynchronous'
+type AsyncL1BlockChecker_RunSynchronous_Call struct {
+ *mock.Call
+}
+
+// RunSynchronous is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *AsyncL1BlockChecker_Expecter) RunSynchronous(ctx interface{}) *AsyncL1BlockChecker_RunSynchronous_Call {
+ return &AsyncL1BlockChecker_RunSynchronous_Call{Call: _e.mock.On("RunSynchronous", ctx)}
+}
+
+func (_c *AsyncL1BlockChecker_RunSynchronous_Call) Run(run func(ctx context.Context)) *AsyncL1BlockChecker_RunSynchronous_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *AsyncL1BlockChecker_RunSynchronous_Call) Return(_a0 syncinterfaces.IterationResult) *AsyncL1BlockChecker_RunSynchronous_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *AsyncL1BlockChecker_RunSynchronous_Call) RunAndReturn(run func(context.Context) syncinterfaces.IterationResult) *AsyncL1BlockChecker_RunSynchronous_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// Stop provides a mock function with given fields:
+func (_m *AsyncL1BlockChecker) Stop() {
+ _m.Called()
+}
+
+// AsyncL1BlockChecker_Stop_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Stop'
+type AsyncL1BlockChecker_Stop_Call struct {
+ *mock.Call
+}
+
+// Stop is a helper method to define mock.On call
+func (_e *AsyncL1BlockChecker_Expecter) Stop() *AsyncL1BlockChecker_Stop_Call {
+ return &AsyncL1BlockChecker_Stop_Call{Call: _e.mock.On("Stop")}
+}
+
+func (_c *AsyncL1BlockChecker_Stop_Call) Run(run func()) *AsyncL1BlockChecker_Stop_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *AsyncL1BlockChecker_Stop_Call) Return() *AsyncL1BlockChecker_Stop_Call {
+ _c.Call.Return()
+ return _c
+}
+
+func (_c *AsyncL1BlockChecker_Stop_Call) RunAndReturn(run func()) *AsyncL1BlockChecker_Stop_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// NewAsyncL1BlockChecker creates a new instance of AsyncL1BlockChecker. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewAsyncL1BlockChecker(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *AsyncL1BlockChecker {
+ mock := &AsyncL1BlockChecker{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/synchronizer/common/syncinterfaces/mocks/l1_block_checker_integrator.go b/synchronizer/common/syncinterfaces/mocks/l1_block_checker_integrator.go
new file mode 100644
index 0000000000..0248874f26
--- /dev/null
+++ b/synchronizer/common/syncinterfaces/mocks/l1_block_checker_integrator.go
@@ -0,0 +1,176 @@
+// Code generated by mockery. DO NOT EDIT.
+
+package mock_syncinterfaces
+
+import (
+ context "context"
+
+ state "github.com/0xPolygonHermez/zkevm-node/state"
+ mock "github.com/stretchr/testify/mock"
+)
+
+// L1BlockCheckerIntegrator is an autogenerated mock type for the L1BlockCheckerIntegrator type
+type L1BlockCheckerIntegrator struct {
+ mock.Mock
+}
+
+type L1BlockCheckerIntegrator_Expecter struct {
+ mock *mock.Mock
+}
+
+func (_m *L1BlockCheckerIntegrator) EXPECT() *L1BlockCheckerIntegrator_Expecter {
+ return &L1BlockCheckerIntegrator_Expecter{mock: &_m.Mock}
+}
+
+// CheckReorgWrapper provides a mock function with given fields: ctx, reorgFirstBlockOk, errReportedByReorgFunc
+func (_m *L1BlockCheckerIntegrator) CheckReorgWrapper(ctx context.Context, reorgFirstBlockOk *state.Block, errReportedByReorgFunc error) (*state.Block, error) {
+ ret := _m.Called(ctx, reorgFirstBlockOk, errReportedByReorgFunc)
+
+ if len(ret) == 0 {
+ panic("no return value specified for CheckReorgWrapper")
+ }
+
+ var r0 *state.Block
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, *state.Block, error) (*state.Block, error)); ok {
+ return rf(ctx, reorgFirstBlockOk, errReportedByReorgFunc)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, *state.Block, error) *state.Block); ok {
+ r0 = rf(ctx, reorgFirstBlockOk, errReportedByReorgFunc)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*state.Block)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, *state.Block, error) error); ok {
+ r1 = rf(ctx, reorgFirstBlockOk, errReportedByReorgFunc)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// L1BlockCheckerIntegrator_CheckReorgWrapper_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CheckReorgWrapper'
+type L1BlockCheckerIntegrator_CheckReorgWrapper_Call struct {
+ *mock.Call
+}
+
+// CheckReorgWrapper is a helper method to define mock.On call
+// - ctx context.Context
+// - reorgFirstBlockOk *state.Block
+// - errReportedByReorgFunc error
+func (_e *L1BlockCheckerIntegrator_Expecter) CheckReorgWrapper(ctx interface{}, reorgFirstBlockOk interface{}, errReportedByReorgFunc interface{}) *L1BlockCheckerIntegrator_CheckReorgWrapper_Call {
+ return &L1BlockCheckerIntegrator_CheckReorgWrapper_Call{Call: _e.mock.On("CheckReorgWrapper", ctx, reorgFirstBlockOk, errReportedByReorgFunc)}
+}
+
+func (_c *L1BlockCheckerIntegrator_CheckReorgWrapper_Call) Run(run func(ctx context.Context, reorgFirstBlockOk *state.Block, errReportedByReorgFunc error)) *L1BlockCheckerIntegrator_CheckReorgWrapper_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(*state.Block), args[2].(error))
+ })
+ return _c
+}
+
+func (_c *L1BlockCheckerIntegrator_CheckReorgWrapper_Call) Return(_a0 *state.Block, _a1 error) *L1BlockCheckerIntegrator_CheckReorgWrapper_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *L1BlockCheckerIntegrator_CheckReorgWrapper_Call) RunAndReturn(run func(context.Context, *state.Block, error) (*state.Block, error)) *L1BlockCheckerIntegrator_CheckReorgWrapper_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// OnResetState provides a mock function with given fields: ctx
+func (_m *L1BlockCheckerIntegrator) OnResetState(ctx context.Context) {
+ _m.Called(ctx)
+}
+
+// L1BlockCheckerIntegrator_OnResetState_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'OnResetState'
+type L1BlockCheckerIntegrator_OnResetState_Call struct {
+ *mock.Call
+}
+
+// OnResetState is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *L1BlockCheckerIntegrator_Expecter) OnResetState(ctx interface{}) *L1BlockCheckerIntegrator_OnResetState_Call {
+ return &L1BlockCheckerIntegrator_OnResetState_Call{Call: _e.mock.On("OnResetState", ctx)}
+}
+
+func (_c *L1BlockCheckerIntegrator_OnResetState_Call) Run(run func(ctx context.Context)) *L1BlockCheckerIntegrator_OnResetState_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *L1BlockCheckerIntegrator_OnResetState_Call) Return() *L1BlockCheckerIntegrator_OnResetState_Call {
+ _c.Call.Return()
+ return _c
+}
+
+func (_c *L1BlockCheckerIntegrator_OnResetState_Call) RunAndReturn(run func(context.Context)) *L1BlockCheckerIntegrator_OnResetState_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// OnStart provides a mock function with given fields: ctx
+func (_m *L1BlockCheckerIntegrator) OnStart(ctx context.Context) error {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for OnStart")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context) error); ok {
+ r0 = rf(ctx)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// L1BlockCheckerIntegrator_OnStart_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'OnStart'
+type L1BlockCheckerIntegrator_OnStart_Call struct {
+ *mock.Call
+}
+
+// OnStart is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *L1BlockCheckerIntegrator_Expecter) OnStart(ctx interface{}) *L1BlockCheckerIntegrator_OnStart_Call {
+ return &L1BlockCheckerIntegrator_OnStart_Call{Call: _e.mock.On("OnStart", ctx)}
+}
+
+func (_c *L1BlockCheckerIntegrator_OnStart_Call) Run(run func(ctx context.Context)) *L1BlockCheckerIntegrator_OnStart_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *L1BlockCheckerIntegrator_OnStart_Call) Return(_a0 error) *L1BlockCheckerIntegrator_OnStart_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *L1BlockCheckerIntegrator_OnStart_Call) RunAndReturn(run func(context.Context) error) *L1BlockCheckerIntegrator_OnStart_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// NewL1BlockCheckerIntegrator creates a new instance of L1BlockCheckerIntegrator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewL1BlockCheckerIntegrator(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *L1BlockCheckerIntegrator {
+ mock := &L1BlockCheckerIntegrator{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/synchronizer/common/syncinterfaces/mocks/state_full_interface.go b/synchronizer/common/syncinterfaces/mocks/state_full_interface.go
index f4790bc695..fa570dbe7f 100644
--- a/synchronizer/common/syncinterfaces/mocks/state_full_interface.go
+++ b/synchronizer/common/syncinterfaces/mocks/state_full_interface.go
@@ -821,6 +821,66 @@ func (_c *StateFullInterface_GetBatchByNumber_Call) RunAndReturn(run func(contex
return _c
}
+// GetBlockByNumber provides a mock function with given fields: ctx, blockNumber, dbTx
+func (_m *StateFullInterface) GetBlockByNumber(ctx context.Context, blockNumber uint64, dbTx pgx.Tx) (*state.Block, error) {
+ ret := _m.Called(ctx, blockNumber, dbTx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetBlockByNumber")
+ }
+
+ var r0 *state.Block
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) (*state.Block, error)); ok {
+ return rf(ctx, blockNumber, dbTx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) *state.Block); ok {
+ r0 = rf(ctx, blockNumber, dbTx)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*state.Block)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, uint64, pgx.Tx) error); ok {
+ r1 = rf(ctx, blockNumber, dbTx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// StateFullInterface_GetBlockByNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBlockByNumber'
+type StateFullInterface_GetBlockByNumber_Call struct {
+ *mock.Call
+}
+
+// GetBlockByNumber is a helper method to define mock.On call
+// - ctx context.Context
+// - blockNumber uint64
+// - dbTx pgx.Tx
+func (_e *StateFullInterface_Expecter) GetBlockByNumber(ctx interface{}, blockNumber interface{}, dbTx interface{}) *StateFullInterface_GetBlockByNumber_Call {
+ return &StateFullInterface_GetBlockByNumber_Call{Call: _e.mock.On("GetBlockByNumber", ctx, blockNumber, dbTx)}
+}
+
+func (_c *StateFullInterface_GetBlockByNumber_Call) Run(run func(ctx context.Context, blockNumber uint64, dbTx pgx.Tx)) *StateFullInterface_GetBlockByNumber_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(uint64), args[2].(pgx.Tx))
+ })
+ return _c
+}
+
+func (_c *StateFullInterface_GetBlockByNumber_Call) Return(_a0 *state.Block, _a1 error) *StateFullInterface_GetBlockByNumber_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *StateFullInterface_GetBlockByNumber_Call) RunAndReturn(run func(context.Context, uint64, pgx.Tx) (*state.Block, error)) *StateFullInterface_GetBlockByNumber_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// GetExitRootByGlobalExitRoot provides a mock function with given fields: ctx, ger, dbTx
func (_m *StateFullInterface) GetExitRootByGlobalExitRoot(ctx context.Context, ger common.Hash, dbTx pgx.Tx) (*state.GlobalExitRoot, error) {
ret := _m.Called(ctx, ger, dbTx)
@@ -1805,6 +1865,66 @@ func (_c *StateFullInterface_GetPreviousBlock_Call) RunAndReturn(run func(contex
return _c
}
+// GetPreviousBlockToBlockNumber provides a mock function with given fields: ctx, blockNumber, dbTx
+func (_m *StateFullInterface) GetPreviousBlockToBlockNumber(ctx context.Context, blockNumber uint64, dbTx pgx.Tx) (*state.Block, error) {
+ ret := _m.Called(ctx, blockNumber, dbTx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetPreviousBlockToBlockNumber")
+ }
+
+ var r0 *state.Block
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) (*state.Block, error)); ok {
+ return rf(ctx, blockNumber, dbTx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) *state.Block); ok {
+ r0 = rf(ctx, blockNumber, dbTx)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*state.Block)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, uint64, pgx.Tx) error); ok {
+ r1 = rf(ctx, blockNumber, dbTx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// StateFullInterface_GetPreviousBlockToBlockNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetPreviousBlockToBlockNumber'
+type StateFullInterface_GetPreviousBlockToBlockNumber_Call struct {
+ *mock.Call
+}
+
+// GetPreviousBlockToBlockNumber is a helper method to define mock.On call
+// - ctx context.Context
+// - blockNumber uint64
+// - dbTx pgx.Tx
+func (_e *StateFullInterface_Expecter) GetPreviousBlockToBlockNumber(ctx interface{}, blockNumber interface{}, dbTx interface{}) *StateFullInterface_GetPreviousBlockToBlockNumber_Call {
+ return &StateFullInterface_GetPreviousBlockToBlockNumber_Call{Call: _e.mock.On("GetPreviousBlockToBlockNumber", ctx, blockNumber, dbTx)}
+}
+
+func (_c *StateFullInterface_GetPreviousBlockToBlockNumber_Call) Run(run func(ctx context.Context, blockNumber uint64, dbTx pgx.Tx)) *StateFullInterface_GetPreviousBlockToBlockNumber_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(uint64), args[2].(pgx.Tx))
+ })
+ return _c
+}
+
+func (_c *StateFullInterface_GetPreviousBlockToBlockNumber_Call) Return(_a0 *state.Block, _a1 error) *StateFullInterface_GetPreviousBlockToBlockNumber_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *StateFullInterface_GetPreviousBlockToBlockNumber_Call) RunAndReturn(run func(context.Context, uint64, pgx.Tx) (*state.Block, error)) *StateFullInterface_GetPreviousBlockToBlockNumber_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// GetReorgedTransactions provides a mock function with given fields: ctx, batchNumber, dbTx
func (_m *StateFullInterface) GetReorgedTransactions(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) ([]*types.Transaction, error) {
ret := _m.Called(ctx, batchNumber, dbTx)
@@ -1988,6 +2108,67 @@ func (_c *StateFullInterface_GetStoredFlushID_Call) RunAndReturn(run func(contex
return _c
}
+// GetUncheckedBlocks provides a mock function with given fields: ctx, fromBlockNumber, toBlockNumber, dbTx
+func (_m *StateFullInterface) GetUncheckedBlocks(ctx context.Context, fromBlockNumber uint64, toBlockNumber uint64, dbTx pgx.Tx) ([]*state.Block, error) {
+ ret := _m.Called(ctx, fromBlockNumber, toBlockNumber, dbTx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetUncheckedBlocks")
+ }
+
+ var r0 []*state.Block
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64, pgx.Tx) ([]*state.Block, error)); ok {
+ return rf(ctx, fromBlockNumber, toBlockNumber, dbTx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64, pgx.Tx) []*state.Block); ok {
+ r0 = rf(ctx, fromBlockNumber, toBlockNumber, dbTx)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]*state.Block)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, uint64, uint64, pgx.Tx) error); ok {
+ r1 = rf(ctx, fromBlockNumber, toBlockNumber, dbTx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// StateFullInterface_GetUncheckedBlocks_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetUncheckedBlocks'
+type StateFullInterface_GetUncheckedBlocks_Call struct {
+ *mock.Call
+}
+
+// GetUncheckedBlocks is a helper method to define mock.On call
+// - ctx context.Context
+// - fromBlockNumber uint64
+// - toBlockNumber uint64
+// - dbTx pgx.Tx
+func (_e *StateFullInterface_Expecter) GetUncheckedBlocks(ctx interface{}, fromBlockNumber interface{}, toBlockNumber interface{}, dbTx interface{}) *StateFullInterface_GetUncheckedBlocks_Call {
+ return &StateFullInterface_GetUncheckedBlocks_Call{Call: _e.mock.On("GetUncheckedBlocks", ctx, fromBlockNumber, toBlockNumber, dbTx)}
+}
+
+func (_c *StateFullInterface_GetUncheckedBlocks_Call) Run(run func(ctx context.Context, fromBlockNumber uint64, toBlockNumber uint64, dbTx pgx.Tx)) *StateFullInterface_GetUncheckedBlocks_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(uint64), args[2].(uint64), args[3].(pgx.Tx))
+ })
+ return _c
+}
+
+func (_c *StateFullInterface_GetUncheckedBlocks_Call) Return(_a0 []*state.Block, _a1 error) *StateFullInterface_GetUncheckedBlocks_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *StateFullInterface_GetUncheckedBlocks_Call) RunAndReturn(run func(context.Context, uint64, uint64, pgx.Tx) ([]*state.Block, error)) *StateFullInterface_GetUncheckedBlocks_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// OpenBatch provides a mock function with given fields: ctx, processingContext, dbTx
func (_m *StateFullInterface) OpenBatch(ctx context.Context, processingContext state.ProcessingContext, dbTx pgx.Tx) error {
ret := _m.Called(ctx, processingContext, dbTx)
diff --git a/synchronizer/common/syncinterfaces/state.go b/synchronizer/common/syncinterfaces/state.go
index 0aff583319..cafae4104e 100644
--- a/synchronizer/common/syncinterfaces/state.go
+++ b/synchronizer/common/syncinterfaces/state.go
@@ -28,6 +28,7 @@ type StateFullInterface interface {
AddForcedBatch(ctx context.Context, forcedBatch *state.ForcedBatch, dbTx pgx.Tx) error
AddBlock(ctx context.Context, block *state.Block, dbTx pgx.Tx) error
Reset(ctx context.Context, blockNumber uint64, dbTx pgx.Tx) error
+ GetBlockByNumber(ctx context.Context, blockNumber uint64, dbTx pgx.Tx) (*state.Block, error)
GetPreviousBlock(ctx context.Context, offset uint64, dbTx pgx.Tx) (*state.Block, error)
GetFirstUncheckedBlock(ctx context.Context, fromBlockNumber uint64, dbTx pgx.Tx) (*state.Block, error)
UpdateCheckedBlockByNumber(ctx context.Context, blockNumber uint64, newCheckedStatus bool, dbTx pgx.Tx) error
@@ -75,4 +76,6 @@ type StateFullInterface interface {
UpdateForkIDBlockNumber(ctx context.Context, forkdID uint64, newBlockNumber uint64, updateMemCache bool, dbTx pgx.Tx) error
GetLastL2BlockNumber(ctx context.Context, dbTx pgx.Tx) (uint64, error)
GetL2BlockByNumber(ctx context.Context, blockNumber uint64, dbTx pgx.Tx) (*state.L2Block, error)
+ GetUncheckedBlocks(ctx context.Context, fromBlockNumber uint64, toBlockNumber uint64, dbTx pgx.Tx) ([]*state.Block, error)
+ GetPreviousBlockToBlockNumber(ctx context.Context, blockNumber uint64, dbTx pgx.Tx) (*state.Block, error)
}
diff --git a/synchronizer/config.go b/synchronizer/config.go
index 0f7d822a60..ef51d41308 100644
--- a/synchronizer/config.go
+++ b/synchronizer/config.go
@@ -1,6 +1,8 @@
package synchronizer
import (
+ "fmt"
+
"github.com/0xPolygonHermez/zkevm-node/config/types"
"github.com/0xPolygonHermez/zkevm-node/synchronizer/l2_sync"
)
@@ -22,6 +24,7 @@ type Config struct {
// a modules 5, for instance, means check all l2block multiples of 5 (10,15,20,...)
L1SyncCheckL2BlockNumberhModulus uint64 `mapstructure:"L1SyncCheckL2BlockNumberhModulus"`
+ L1BlockCheck L1BlockCheckConfig `mapstructure:"L1BlockCheck"`
// L1SynchronizationMode define how to synchronize with L1:
// - parallel: Request data to L1 in parallel, and process sequentially. The advantage is that executor is not blocked waiting for L1 data
// - sequential: Request data to L1 and execute
@@ -32,6 +35,35 @@ type Config struct {
L2Synchronization l2_sync.Config `mapstructure:"L2Synchronization"`
}
+// L1BlockCheckConfig Configuration for L1 Block Checker
+type L1BlockCheckConfig struct {
+ // Enable if is true then the check l1 Block Hash is active
+ Enable bool `mapstructure:"Enable"`
+ // L1SafeBlockPoint is the point that a block is considered safe enough to be checked
+ // it can be: finalized, safe,pending or latest
+ L1SafeBlockPoint string `mapstructure:"L1SafeBlockPoint" jsonschema:"enum=finalized,enum=safe, enum=pending,enum=latest"`
+ // L1SafeBlockOffset is the offset to add to L1SafeBlockPoint as a safe point
+ // it can be positive or negative
+ // Example: L1SafeBlockPoint= finalized, L1SafeBlockOffset= -10, then the safe block ten blocks before the finalized block
+ L1SafeBlockOffset int `mapstructure:"L1SafeBlockOffset"`
+ // ForceCheckBeforeStart if is true then the first time the system is started it will force to check all pending blocks
+ ForceCheckBeforeStart bool `mapstructure:"ForceCheckBeforeStart"`
+
+ // PreCheckEnable if is true then the pre-check is active, will check blocks between L1SafeBlock and L1PreSafeBlock
+ PreCheckEnable bool `mapstructure:"PreCheckEnable"`
+ // L1PreSafeBlockPoint is the point that a block is considered safe enough to be checked
+ // it can be: finalized, safe,pending or latest
+ L1PreSafeBlockPoint string `mapstructure:"L1PreSafeBlockPoint" jsonschema:"enum=finalized,enum=safe, enum=pending,enum=latest"`
+ // L1PreSafeBlockOffset is the offset to add to L1PreSafeBlockPoint as a safe point
+ // it can be positive or negative
+ // Example: L1PreSafeBlockPoint= finalized, L1PreSafeBlockOffset= -10, then the safe block ten blocks before the finalized block
+ L1PreSafeBlockOffset int `mapstructure:"L1PreSafeBlockOffset"`
+}
+
+func (c *L1BlockCheckConfig) String() string {
+ return fmt.Sprintf("Enable: %v, L1SafeBlockPoint: %s, L1SafeBlockOffset: %d, ForceCheckBeforeStart: %v", c.Enable, c.L1SafeBlockPoint, c.L1SafeBlockOffset, c.ForceCheckBeforeStart)
+}
+
// L1ParallelSynchronizationConfig Configuration for parallel mode (if UL1SynchronizationMode equal to 'parallel')
type L1ParallelSynchronizationConfig struct {
// MaxClients Number of clients used to synchronize with L1
diff --git a/synchronizer/l1_check_block/async.go b/synchronizer/l1_check_block/async.go
new file mode 100644
index 0000000000..4a2a45d924
--- /dev/null
+++ b/synchronizer/l1_check_block/async.go
@@ -0,0 +1,183 @@
+package l1_check_block
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ "github.com/0xPolygonHermez/zkevm-node/log"
+ "github.com/0xPolygonHermez/zkevm-node/synchronizer/common"
+ "github.com/0xPolygonHermez/zkevm-node/synchronizer/common/syncinterfaces"
+)
+
+// L1BlockChecker is an interface that defines the method to check L1 blocks
+type L1BlockChecker interface {
+ Step(ctx context.Context) error
+}
+
+const (
+ defaultPeriodTime = time.Second
+)
+
+// AsyncCheck is a wrapper for L1BlockChecker to become asynchronous
+type AsyncCheck struct {
+ checker L1BlockChecker
+ mutex sync.Mutex
+ lastResult *syncinterfaces.IterationResult
+ onFinishCall func()
+ periodTime time.Duration
+ // Wg is a wait group to wait for the result
+ Wg sync.WaitGroup
+ ctx context.Context
+ cancelCtx context.CancelFunc
+ isRunning bool
+}
+
+// NewAsyncCheck creates a new AsyncCheck
+func NewAsyncCheck(checker L1BlockChecker) *AsyncCheck {
+ return &AsyncCheck{
+ checker: checker,
+ periodTime: defaultPeriodTime,
+ }
+}
+
+// SetPeriodTime sets the period time between relaunch checker.Step
+func (a *AsyncCheck) SetPeriodTime(periodTime time.Duration) {
+ a.periodTime = periodTime
+}
+
+// Run is a method that starts the async check
+func (a *AsyncCheck) Run(ctx context.Context, onFinish func()) {
+ a.mutex.Lock()
+ defer a.mutex.Unlock()
+ a.onFinishCall = onFinish
+ if a.isRunning {
+ log.Infof("%s L1BlockChecker: already running, changing onFinish call", logPrefix)
+ return
+ }
+ a.lastResult = nil
+ a.ctx, a.cancelCtx = context.WithCancel(ctx)
+ a.launchChecker(a.ctx)
+}
+
+// Stop is a method that stops the async check
+func (a *AsyncCheck) Stop() {
+ a.cancelCtx()
+ a.Wg.Wait()
+}
+
+// RunSynchronous is a method that forces the check to be synchronous before starting the async check
+func (a *AsyncCheck) RunSynchronous(ctx context.Context) syncinterfaces.IterationResult {
+ return a.executeIteration(ctx)
+}
+
+// GetResult returns the last result of the check:
+// - Nil -> still running
+// - Not nil -> finished, and this is the result. You must call again Run to start a new check
+func (a *AsyncCheck) GetResult() *syncinterfaces.IterationResult {
+ a.mutex.Lock()
+ defer a.mutex.Unlock()
+ return a.lastResult
+}
+
+// https://stackoverflow.com/questions/32840687/timeout-for-waitgroup-wait
+// waitTimeout waits for the waitgroup for the specified max timeout.
+// Returns true if waiting timed out.
+func waitTimeout(wg *sync.WaitGroup, timeout time.Duration) bool {
+ c := make(chan struct{})
+ go func() {
+ defer close(c)
+ wg.Wait()
+ }()
+ select {
+ case <-c:
+ return false // completed normally
+ case <-time.After(timeout):
+ return true // timed out
+ }
+}
+
+// GetResultBlockingUntilAvailable wait the time specific in timeout, if reach timeout returns current
+// result, if not, wait until the result is available.
+// if timeout is 0, it waits indefinitely
+func (a *AsyncCheck) GetResultBlockingUntilAvailable(timeout time.Duration) *syncinterfaces.IterationResult {
+ if timeout == 0 {
+ a.Wg.Wait()
+ } else {
+ waitTimeout(&a.Wg, timeout)
+ }
+ return a.GetResult()
+}
+
+func (a *AsyncCheck) setResult(result syncinterfaces.IterationResult) {
+ a.mutex.Lock()
+ defer a.mutex.Unlock()
+ a.lastResult = &result
+}
+
+func (a *AsyncCheck) launchChecker(ctx context.Context) {
+ // add waitGroup to wait for a result
+ a.Wg.Add(1)
+ a.isRunning = true
+ go func() {
+ log.Infof("%s L1BlockChecker: starting background process", logPrefix)
+ for {
+ result := a.step(ctx)
+ if result != nil {
+ a.setResult(*result)
+ // Result is set wg is done
+ break
+ }
+ }
+ log.Infof("%s L1BlockChecker: finished background process", logPrefix)
+ a.Wg.Done()
+ a.mutex.Lock()
+ onFinishCall := a.onFinishCall
+ a.isRunning = false
+ a.mutex.Unlock()
+ // call onFinish function with no mutex
+ if onFinishCall != nil {
+ onFinishCall()
+ }
+ }()
+}
+
+// step is a method that executes until executeItertion
+// returns an error or a reorg
+func (a *AsyncCheck) step(ctx context.Context) *syncinterfaces.IterationResult {
+ select {
+ case <-ctx.Done():
+ log.Debugf("%s L1BlockChecker: context done", logPrefix)
+ return &syncinterfaces.IterationResult{Err: ctx.Err()}
+ default:
+ result := a.executeIteration(ctx)
+ if result.ReorgDetected {
+ return &result
+ }
+ log.Debugf("%s L1BlockChecker:returned %s waiting %s to relaunch", logPrefix, result.String(), a.periodTime)
+ time.Sleep(a.periodTime)
+ }
+ return nil
+}
+
+// executeIteration executes a single iteration of the checker
+func (a *AsyncCheck) executeIteration(ctx context.Context) syncinterfaces.IterationResult {
+ res := syncinterfaces.IterationResult{}
+ log.Debugf("%s calling checker.Step(...)", logPrefix)
+ res.Err = a.checker.Step(ctx)
+ log.Debugf("%s returned checker.Step(...) %w", logPrefix, res.Err)
+ if res.Err != nil {
+ log.Errorf("%s Fail check L1 Blocks: %w", logPrefix, res.Err)
+ if common.IsReorgError(res.Err) {
+ // log error
+ blockNumber := common.GetReorgErrorBlockNumber(res.Err)
+ log.Infof("%s Reorg detected at block %d", logPrefix, blockNumber)
+ // It keeps blocked until the channel is read
+ res.BlockNumber = blockNumber
+ res.ReorgDetected = true
+ res.ReorgMessage = res.Err.Error()
+ res.Err = nil
+ }
+ }
+ return res
+}
diff --git a/synchronizer/l1_check_block/async_test.go b/synchronizer/l1_check_block/async_test.go
new file mode 100644
index 0000000000..21358b1c8f
--- /dev/null
+++ b/synchronizer/l1_check_block/async_test.go
@@ -0,0 +1,138 @@
+package l1_check_block_test
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/0xPolygonHermez/zkevm-node/synchronizer/common"
+ "github.com/0xPolygonHermez/zkevm-node/synchronizer/l1_check_block"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ errGenericToTestAsync = fmt.Errorf("error_async")
+ errReorgToTestAsync = common.NewReorgError(uint64(1234), fmt.Errorf("fake reorg to test"))
+ timeoutContextForAsyncTests = time.Second
+)
+
+type mockChecker struct {
+ Wg *sync.WaitGroup
+ ErrorsToReturn []error
+}
+
+func (m *mockChecker) Step(ctx context.Context) error {
+ defer m.Wg.Done()
+ err := m.ErrorsToReturn[0]
+ if len(m.ErrorsToReturn) > 0 {
+ m.ErrorsToReturn = m.ErrorsToReturn[1:]
+ }
+ return err
+}
+
+// If checker.step() returns ok, the async object will relaunch the call
+func TestAsyncRelaunchCheckerUntilReorgDetected(t *testing.T) {
+ mockChecker := &mockChecker{ErrorsToReturn: []error{nil, nil, errGenericToTestAsync, errReorgToTestAsync}, Wg: &sync.WaitGroup{}}
+ sut := l1_check_block.NewAsyncCheck(mockChecker)
+ sut.SetPeriodTime(0)
+ ctx, cancel := context.WithTimeout(context.Background(), timeoutContextForAsyncTests)
+ defer cancel()
+ mockChecker.Wg.Add(4)
+
+ sut.Run(ctx, nil)
+
+ mockChecker.Wg.Wait()
+ result := sut.GetResultBlockingUntilAvailable(0)
+ require.NotNil(t, result)
+ require.Equal(t, uint64(1234), result.BlockNumber)
+ require.Equal(t, true, result.ReorgDetected)
+ require.Equal(t, nil, result.Err)
+}
+
+func TestAsyncGetResultIsNilUntilStops(t *testing.T) {
+ mockChecker := &mockChecker{ErrorsToReturn: []error{nil, nil, errGenericToTestAsync, errReorgToTestAsync}, Wg: &sync.WaitGroup{}}
+ sut := l1_check_block.NewAsyncCheck(mockChecker)
+ sut.SetPeriodTime(0)
+ ctx, cancel := context.WithTimeout(context.Background(), timeoutContextForAsyncTests)
+ defer cancel()
+ mockChecker.Wg.Add(4)
+ require.Nil(t, sut.GetResult(), "before start result is Nil")
+
+ sut.Run(ctx, nil)
+
+ require.Nil(t, sut.GetResult(), "after start result is Nil")
+ mockChecker.Wg.Wait()
+ result := sut.GetResultBlockingUntilAvailable(0)
+ require.NotNil(t, result)
+}
+
+// RunSynchronous it returns the first result, doesnt mind if a reorg or not
+func TestAsyncGRunSynchronousReturnTheFirstResult(t *testing.T) {
+ mockChecker := &mockChecker{ErrorsToReturn: []error{errGenericToTestAsync}, Wg: &sync.WaitGroup{}}
+ sut := l1_check_block.NewAsyncCheck(mockChecker)
+ sut.SetPeriodTime(0)
+ ctx, cancel := context.WithTimeout(context.Background(), timeoutContextForAsyncTests)
+ defer cancel()
+ mockChecker.Wg.Add(1)
+
+ result := sut.RunSynchronous(ctx)
+
+ require.NotNil(t, result)
+ require.Equal(t, uint64(0), result.BlockNumber)
+ require.Equal(t, false, result.ReorgDetected)
+ require.Equal(t, errGenericToTestAsync, result.Err)
+}
+
+func TestAsyncGRunSynchronousDontAffectGetResult(t *testing.T) {
+ mockChecker := &mockChecker{ErrorsToReturn: []error{errGenericToTestAsync}, Wg: &sync.WaitGroup{}}
+ sut := l1_check_block.NewAsyncCheck(mockChecker)
+ sut.SetPeriodTime(0)
+ ctx, cancel := context.WithTimeout(context.Background(), timeoutContextForAsyncTests)
+ defer cancel()
+ mockChecker.Wg.Add(1)
+
+ result := sut.RunSynchronous(ctx)
+
+ require.NotNil(t, result)
+ require.Nil(t, sut.GetResult())
+}
+
+func TestAsyncStop(t *testing.T) {
+ mockChecker := &mockChecker{ErrorsToReturn: []error{nil, nil, errGenericToTestAsync, errReorgToTestAsync}, Wg: &sync.WaitGroup{}}
+ sut := l1_check_block.NewAsyncCheck(mockChecker)
+ sut.SetPeriodTime(0)
+ ctx, cancel := context.WithTimeout(context.Background(), timeoutContextForAsyncTests)
+ defer cancel()
+ require.Nil(t, sut.GetResult(), "before start result is Nil")
+ mockChecker.Wg.Add(4)
+ sut.Run(ctx, nil)
+ sut.Stop()
+ sut.Stop()
+
+ result := sut.GetResultBlockingUntilAvailable(0)
+ require.NotNil(t, result)
+ mockChecker.Wg = &sync.WaitGroup{}
+ mockChecker.Wg.Add(4)
+ mockChecker.ErrorsToReturn = []error{nil, nil, errGenericToTestAsync, errReorgToTestAsync}
+ sut.Run(ctx, nil)
+ mockChecker.Wg.Wait()
+ result = sut.GetResultBlockingUntilAvailable(0)
+ require.NotNil(t, result)
+}
+
+func TestAsyncMultipleRun(t *testing.T) {
+ mockChecker := &mockChecker{ErrorsToReturn: []error{nil, nil, errGenericToTestAsync, errReorgToTestAsync}, Wg: &sync.WaitGroup{}}
+ sut := l1_check_block.NewAsyncCheck(mockChecker)
+ sut.SetPeriodTime(0)
+ ctx, cancel := context.WithTimeout(context.Background(), timeoutContextForAsyncTests)
+ defer cancel()
+ require.Nil(t, sut.GetResult(), "before start result is Nil")
+ mockChecker.Wg.Add(4)
+ sut.Run(ctx, nil)
+ sut.Run(ctx, nil)
+ sut.Run(ctx, nil)
+ result := sut.GetResultBlockingUntilAvailable(0)
+ require.NotNil(t, result)
+}
diff --git a/synchronizer/l1_check_block/check_l1block.go b/synchronizer/l1_check_block/check_l1block.go
new file mode 100644
index 0000000000..cd1204c5b3
--- /dev/null
+++ b/synchronizer/l1_check_block/check_l1block.go
@@ -0,0 +1,146 @@
+package l1_check_block
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "math/big"
+ "time"
+
+ "github.com/0xPolygonHermez/zkevm-node/log"
+ "github.com/0xPolygonHermez/zkevm-node/state"
+ "github.com/0xPolygonHermez/zkevm-node/synchronizer/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/jackc/pgx/v4"
+)
+
+// This object check old L1block to double-check that the L1block hash is correct
+// - Get first not checked block
+// - Get last block on L1 (safe/finalized/ or minus -n)
+
+// L1Requester is an interface for GETH client
+type L1Requester interface {
+ HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error)
+}
+
+// StateInterfacer is an interface for the state
+type StateInterfacer interface {
+ GetFirstUncheckedBlock(ctx context.Context, fromBlockNumber uint64, dbTx pgx.Tx) (*state.Block, error)
+ UpdateCheckedBlockByNumber(ctx context.Context, blockNumber uint64, newCheckedStatus bool, dbTx pgx.Tx) error
+}
+
+// SafeL1BlockNumberFetcher is an interface for fetching the L1 block number reference point (safe, finalized,...)
+type SafeL1BlockNumberFetcher interface {
+ GetSafeBlockNumber(ctx context.Context, l1Client L1Requester) (uint64, error)
+ Description() string
+}
+
+// CheckL1BlockHash is a struct that implements a checker of L1Block hash
+type CheckL1BlockHash struct {
+ L1Client L1Requester
+ State StateInterfacer
+ SafeBlockNumberFetcher SafeL1BlockNumberFetcher
+}
+
+// NewCheckL1BlockHash creates a new CheckL1BlockHash
+func NewCheckL1BlockHash(l1Client L1Requester, state StateInterfacer, safeBlockNumberFetcher SafeL1BlockNumberFetcher) *CheckL1BlockHash {
+ return &CheckL1BlockHash{
+ L1Client: l1Client,
+ State: state,
+ SafeBlockNumberFetcher: safeBlockNumberFetcher,
+ }
+}
+
+// Name is a method that returns the name of the checker
+func (p *CheckL1BlockHash) Name() string {
+ return logPrefix + " main_checker: "
+}
+
+// Step is a method that checks the L1 block hash, run until all blocks are checked and returns
+func (p *CheckL1BlockHash) Step(ctx context.Context) error {
+ stateBlock, err := p.State.GetFirstUncheckedBlock(ctx, uint64(0), nil)
+ if errors.Is(err, state.ErrNotFound) {
+ log.Debugf("%s: No unchecked blocks to check", p.Name())
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+ if stateBlock == nil {
+ log.Warnf("%s: function CheckL1Block receive a nil pointer", p.Name())
+ return nil
+ }
+ safeBlockNumber, err := p.SafeBlockNumberFetcher.GetSafeBlockNumber(ctx, p.L1Client)
+ if err != nil {
+ return err
+ }
+ log.Debugf("%s: checking from block (%s) %d first block to check: %d....", p.Name(), p.SafeBlockNumberFetcher.Description(), safeBlockNumber, stateBlock.BlockNumber)
+ return p.doAllBlocks(ctx, *stateBlock, safeBlockNumber)
+}
+
+func (p *CheckL1BlockHash) doAllBlocks(ctx context.Context, firstStateBlock state.Block, safeBlockNumber uint64) error {
+ var err error
+ startTime := time.Now()
+ stateBlock := &firstStateBlock
+ numBlocksChecked := 0
+ for {
+ lastStateBlockNumber := stateBlock.BlockNumber
+ if stateBlock.BlockNumber > safeBlockNumber {
+ log.Debugf("%s: block %d to check is not still safe enough (%s) %d ", p.Name(), stateBlock.BlockNumber, p.SafeBlockNumberFetcher.Description(), safeBlockNumber, logPrefix)
+ return nil
+ }
+ err = p.doBlock(ctx, stateBlock)
+ if err != nil {
+ return err
+ }
+ numBlocksChecked++
+ stateBlock, err = p.State.GetFirstUncheckedBlock(ctx, lastStateBlockNumber, nil)
+ if errors.Is(err, state.ErrNotFound) {
+ diff := time.Since(startTime)
+ log.Infof("%s: checked all blocks (%d) (using as safe Block Point(%s): %d) time:%s", p.Name(), numBlocksChecked, p.SafeBlockNumberFetcher.Description(), safeBlockNumber, diff)
+ return nil
+ }
+ }
+}
+
+func (p *CheckL1BlockHash) doBlock(ctx context.Context, stateBlock *state.Block) error {
+ err := CheckBlockHash(ctx, stateBlock, p.L1Client, p.Name())
+ if err != nil {
+ return err
+ }
+ log.Infof("%s: L1Block: %d hash: %s is correct marking as checked", p.Name(), stateBlock.BlockNumber,
+ stateBlock.BlockHash.String())
+ err = p.State.UpdateCheckedBlockByNumber(ctx, stateBlock.BlockNumber, true, nil)
+ if err != nil {
+ log.Errorf("%s: Error updating block %d as checked. err: %s", p.Name(), stateBlock.BlockNumber, err.Error())
+ return err
+ }
+ return nil
+}
+
+// CheckBlockHash is a method that checks the L1 block hash
+func CheckBlockHash(ctx context.Context, stateBlock *state.Block, L1Client L1Requester, checkerName string) error {
+ if stateBlock == nil {
+ log.Warn("%s function CheckL1Block receive a nil pointer", checkerName)
+ return nil
+ }
+ l1Block, err := L1Client.HeaderByNumber(ctx, big.NewInt(int64(stateBlock.BlockNumber)))
+ if err != nil {
+ return err
+ }
+ if l1Block == nil {
+ err = fmt.Errorf("%s request of block: %d to L1 returns a nil", checkerName, stateBlock.BlockNumber)
+ log.Error(err.Error())
+ return err
+ }
+ if l1Block.Hash() != stateBlock.BlockHash {
+ msg := fmt.Sprintf("%s Reorg detected at block %d l1Block.Hash=%s != stateBlock.Hash=%s. ", checkerName, stateBlock.BlockNumber,
+ l1Block.Hash().String(), stateBlock.BlockHash.String())
+ if l1Block.ParentHash != stateBlock.ParentHash {
+ msg += fmt.Sprintf(" ParentHash are also different. l1Block.ParentHash=%s != stateBlock.ParentHash=%s", l1Block.ParentHash.String(), stateBlock.ParentHash.String())
+ }
+ log.Errorf(msg)
+ return common.NewReorgError(stateBlock.BlockNumber, fmt.Errorf(msg))
+ }
+ return nil
+}
diff --git a/synchronizer/l1_check_block/check_l1block_test.go b/synchronizer/l1_check_block/check_l1block_test.go
new file mode 100644
index 0000000000..e5090140a3
--- /dev/null
+++ b/synchronizer/l1_check_block/check_l1block_test.go
@@ -0,0 +1,128 @@
+package l1_check_block_test
+
+import (
+ "context"
+ "fmt"
+ "math/big"
+ "testing"
+
+ "github.com/0xPolygonHermez/zkevm-node/state"
+ commonsync "github.com/0xPolygonHermez/zkevm-node/synchronizer/common"
+ "github.com/0xPolygonHermez/zkevm-node/synchronizer/l1_check_block"
+ mock_l1_check_block "github.com/0xPolygonHermez/zkevm-node/synchronizer/l1_check_block/mocks"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/require"
+)
+
+type testData struct {
+ mockL1Client *mock_l1_check_block.L1Requester
+ mockState *mock_l1_check_block.StateInterfacer
+ mockBlockNumberFetch *mock_l1_check_block.SafeL1BlockNumberFetcher
+ sut *l1_check_block.CheckL1BlockHash
+ ctx context.Context
+ stateBlock *state.Block
+}
+
+func newTestData(t *testing.T) *testData {
+ mockL1Client := mock_l1_check_block.NewL1Requester(t)
+ mockState := mock_l1_check_block.NewStateInterfacer(t)
+ mockBlockNumberFetch := mock_l1_check_block.NewSafeL1BlockNumberFetcher(t)
+ mockBlockNumberFetch.EXPECT().Description().Return("mock").Maybe()
+ sut := l1_check_block.NewCheckL1BlockHash(mockL1Client, mockState, mockBlockNumberFetch)
+ require.NotNil(t, sut)
+ ctx := context.Background()
+ return &testData{
+ mockL1Client: mockL1Client,
+ mockState: mockState,
+ mockBlockNumberFetch: mockBlockNumberFetch,
+ sut: sut,
+ ctx: ctx,
+ stateBlock: &state.Block{
+ BlockNumber: 1234,
+ BlockHash: common.HexToHash("0xb07e1289b32edefd8f3c702d016fb73c81d5950b2ebc790ad9d2cb8219066b4c"),
+ },
+ }
+}
+
+func TestCheckL1BlockHashNoBlocksOnDB(t *testing.T) {
+ data := newTestData(t)
+ data.mockState.EXPECT().GetFirstUncheckedBlock(data.ctx, uint64(0), nil).Return(nil, state.ErrNotFound)
+ res := data.sut.Step(data.ctx)
+ require.NoError(t, res)
+}
+
+func TestCheckL1BlockHashErrorGettingFirstUncheckedBlockFromDB(t *testing.T) {
+ data := newTestData(t)
+ data.mockState.EXPECT().GetFirstUncheckedBlock(data.ctx, uint64(0), nil).Return(nil, fmt.Errorf("error"))
+ res := data.sut.Step(data.ctx)
+ require.Error(t, res)
+}
+
+func TestCheckL1BlockHashErrorGettingGetSafeBlockNumber(t *testing.T) {
+ data := newTestData(t)
+
+ data.mockState.EXPECT().GetFirstUncheckedBlock(data.ctx, uint64(0), nil).Return(data.stateBlock, nil)
+ data.mockBlockNumberFetch.EXPECT().GetSafeBlockNumber(data.ctx, data.mockL1Client).Return(uint64(0), fmt.Errorf("error"))
+ res := data.sut.Step(data.ctx)
+ require.Error(t, res)
+}
+
+// The first block to check is below the safe point, nothing to do
+func TestCheckL1BlockHashSafePointIsInFuture(t *testing.T) {
+ data := newTestData(t)
+
+ data.mockState.EXPECT().GetFirstUncheckedBlock(data.ctx, uint64(0), nil).Return(data.stateBlock, nil)
+ data.mockBlockNumberFetch.EXPECT().GetSafeBlockNumber(data.ctx, data.mockL1Client).Return(data.stateBlock.BlockNumber-1, nil)
+
+ res := data.sut.Step(data.ctx)
+ require.NoError(t, res)
+}
+
+func TestCheckL1BlockHashL1ClientReturnsANil(t *testing.T) {
+ data := newTestData(t)
+
+ data.mockState.EXPECT().GetFirstUncheckedBlock(data.ctx, uint64(0), nil).Return(data.stateBlock, nil)
+ data.mockBlockNumberFetch.EXPECT().GetSafeBlockNumber(data.ctx, data.mockL1Client).Return(data.stateBlock.BlockNumber+10, nil)
+ data.mockL1Client.EXPECT().HeaderByNumber(data.ctx, big.NewInt(int64(data.stateBlock.BlockNumber))).Return(nil, nil)
+ res := data.sut.Step(data.ctx)
+ require.Error(t, res)
+}
+
+// Check a block that is OK
+func TestCheckL1BlockHashMatchHashUpdateCheckMarkOnDB(t *testing.T) {
+ data := newTestData(t)
+
+ data.mockState.EXPECT().GetFirstUncheckedBlock(data.ctx, uint64(0), nil).Return(data.stateBlock, nil)
+ data.mockBlockNumberFetch.EXPECT().Description().Return("mock")
+ data.mockBlockNumberFetch.EXPECT().GetSafeBlockNumber(data.ctx, data.mockL1Client).Return(data.stateBlock.BlockNumber, nil)
+ l1Block := &types.Header{
+ Number: big.NewInt(100),
+ }
+ data.mockL1Client.EXPECT().HeaderByNumber(data.ctx, big.NewInt(int64(data.stateBlock.BlockNumber))).Return(l1Block, nil)
+ data.mockState.EXPECT().UpdateCheckedBlockByNumber(data.ctx, data.stateBlock.BlockNumber, true, nil).Return(nil)
+ data.mockState.EXPECT().GetFirstUncheckedBlock(data.ctx, mock.Anything, nil).Return(nil, state.ErrNotFound)
+
+ res := data.sut.Step(data.ctx)
+ require.NoError(t, res)
+}
+
+// The first block to check is equal to the safe point, must be processed
+func TestCheckL1BlockHashMismatch(t *testing.T) {
+ data := newTestData(t)
+
+ data.mockState.EXPECT().GetFirstUncheckedBlock(data.ctx, uint64(0), nil).Return(data.stateBlock, nil)
+ data.stateBlock.BlockHash = common.HexToHash("0x1234") // Wrong hash to trigger a mismatch
+ data.mockBlockNumberFetch.EXPECT().GetSafeBlockNumber(data.ctx, data.mockL1Client).Return(data.stateBlock.BlockNumber, nil)
+ l1Block := &types.Header{
+ Number: big.NewInt(100),
+ }
+ data.mockL1Client.EXPECT().HeaderByNumber(data.ctx, big.NewInt(int64(data.stateBlock.BlockNumber))).Return(l1Block, nil)
+
+ res := data.sut.Step(data.ctx)
+ require.Error(t, res)
+ resErr, ok := res.(*commonsync.ReorgError)
+ require.True(t, ok)
+ require.Equal(t, data.stateBlock.BlockNumber, resErr.BlockNumber)
+}
diff --git a/synchronizer/l1_check_block/common.go b/synchronizer/l1_check_block/common.go
new file mode 100644
index 0000000000..a473c220a3
--- /dev/null
+++ b/synchronizer/l1_check_block/common.go
@@ -0,0 +1,5 @@
+package l1_check_block
+
+const (
+ logPrefix = "checkL1block:"
+)
diff --git a/synchronizer/l1_check_block/integration.go b/synchronizer/l1_check_block/integration.go
new file mode 100644
index 0000000000..82a962eb3f
--- /dev/null
+++ b/synchronizer/l1_check_block/integration.go
@@ -0,0 +1,205 @@
+package l1_check_block
+
+import (
+ "context"
+ "time"
+
+ "github.com/0xPolygonHermez/zkevm-node/log"
+ "github.com/0xPolygonHermez/zkevm-node/state"
+ "github.com/0xPolygonHermez/zkevm-node/synchronizer/common/syncinterfaces"
+ "github.com/jackc/pgx/v4"
+)
+
+// StateForL1BlockCheckerIntegration is an interface for the state
+type StateForL1BlockCheckerIntegration interface {
+ GetPreviousBlockToBlockNumber(ctx context.Context, blockNumber uint64, dbTx pgx.Tx) (*state.Block, error)
+}
+
+// L1BlockCheckerIntegration is a struct that integrates the L1BlockChecker with the synchronizer
+type L1BlockCheckerIntegration struct {
+ forceCheckOnStart bool
+ checker syncinterfaces.AsyncL1BlockChecker
+ preChecker syncinterfaces.AsyncL1BlockChecker
+ state StateForL1BlockCheckerIntegration
+ sync SyncCheckReorger
+ timeBetweenRetries time.Duration
+}
+
+// SyncCheckReorger is an interface that defines the methods required from Synchronizer object
+type SyncCheckReorger interface {
+ ExecuteReorgFromMismatchBlock(blockNumber uint64, reason string) error
+ OnDetectedMismatchL1BlockReorg()
+}
+
+// NewL1BlockCheckerIntegration creates a new L1BlockCheckerIntegration
+func NewL1BlockCheckerIntegration(checker syncinterfaces.AsyncL1BlockChecker, preChecker syncinterfaces.AsyncL1BlockChecker, state StateForL1BlockCheckerIntegration, sync SyncCheckReorger, forceCheckOnStart bool, timeBetweenRetries time.Duration) *L1BlockCheckerIntegration {
+ return &L1BlockCheckerIntegration{
+ forceCheckOnStart: forceCheckOnStart,
+ checker: checker,
+ preChecker: preChecker,
+ state: state,
+ sync: sync,
+ timeBetweenRetries: timeBetweenRetries,
+ }
+}
+
+// OnStart is a method that is called before starting the synchronizer
+func (v *L1BlockCheckerIntegration) OnStart(ctx context.Context) error {
+ if v.forceCheckOnStart {
+ log.Infof("%s Forcing L1BlockChecker check before start", logPrefix)
+ result := v.runCheckerSync(ctx, v.checker)
+ if result.ReorgDetected {
+ v.executeResult(ctx, result)
+ } else {
+ log.Infof("%s Forcing L1BlockChecker check:OK ", logPrefix)
+ if v.preChecker != nil {
+ log.Infof("%s Forcing L1BlockChecker preCheck before start", logPrefix)
+ result = v.runCheckerSync(ctx, v.preChecker)
+ if result.ReorgDetected {
+ v.executeResult(ctx, result)
+ } else {
+ log.Infof("%s Forcing L1BlockChecker preCheck:OK", logPrefix)
+ }
+ }
+ }
+ }
+ v.launch(ctx)
+ return nil
+}
+
+func (v *L1BlockCheckerIntegration) runCheckerSync(ctx context.Context, checker syncinterfaces.AsyncL1BlockChecker) syncinterfaces.IterationResult {
+ for {
+ result := checker.RunSynchronous(ctx)
+ if result.Err == nil {
+ return result
+ } else {
+ time.Sleep(v.timeBetweenRetries)
+ }
+ }
+}
+
+// OnStartL1Sync is a method that is called before starting the L1 sync
+func (v *L1BlockCheckerIntegration) OnStartL1Sync(ctx context.Context) bool {
+ return v.checkBackgroundResult(ctx, "before start L1 sync")
+}
+
+// OnStartL2Sync is a method that is called before starting the L2 sync
+func (v *L1BlockCheckerIntegration) OnStartL2Sync(ctx context.Context) bool {
+ return v.checkBackgroundResult(ctx, "before start 2 sync")
+}
+
+// OnResetState is a method that is called after a resetState
+func (v *L1BlockCheckerIntegration) OnResetState(ctx context.Context) {
+ log.Infof("%s L1BlockChecker: after a resetState relaunch background process", logPrefix)
+ v.launch(ctx)
+}
+
+// CheckReorgWrapper is a wrapper over reorg function of synchronizer.
+// it checks the result of the function and the result of background process and decides which return
+func (v *L1BlockCheckerIntegration) CheckReorgWrapper(ctx context.Context, reorgFirstBlockOk *state.Block, errReportedByReorgFunc error) (*state.Block, error) {
+ resultBackground := v.getMergedResults()
+ if resultBackground != nil && resultBackground.ReorgDetected {
+ // Background process detected a reorg, decide which return
+ firstOkBlockBackgroundCheck, err := v.state.GetPreviousBlockToBlockNumber(ctx, resultBackground.BlockNumber, nil)
+ if err != nil {
+ log.Warnf("%s Error getting previous block to block number where a reorg have been detected %d: %s. So we reorgFunc values", logPrefix, resultBackground.BlockNumber, err)
+ return reorgFirstBlockOk, errReportedByReorgFunc
+ }
+ if reorgFirstBlockOk == nil || errReportedByReorgFunc != nil {
+ log.Infof("%s Background checker detects bad block at block %d (first block ok %d) and regular reorg function no. Returning it", logPrefix,
+ resultBackground.BlockNumber, firstOkBlockBackgroundCheck.BlockNumber)
+ return firstOkBlockBackgroundCheck, nil
+ }
+ if firstOkBlockBackgroundCheck.BlockNumber < reorgFirstBlockOk.BlockNumber {
+ // Background process detected a reorg at oldest block
+ log.Warnf("%s Background checker detects bad block at block %d (first block ok %d) and regular reorg function first block ok: %d. Returning from %d",
+ logPrefix, resultBackground.BlockNumber, firstOkBlockBackgroundCheck.BlockNumber, reorgFirstBlockOk.BlockNumber, firstOkBlockBackgroundCheck.BlockNumber)
+ return firstOkBlockBackgroundCheck, nil
+ } else {
+ // Regular reorg function detected a reorg at oldest block
+ log.Warnf("%s Background checker detects bad block at block %d (first block ok %d) and regular reorg function first block ok: %d. Executing from %d",
+ logPrefix, resultBackground.BlockNumber, firstOkBlockBackgroundCheck.BlockNumber, reorgFirstBlockOk.BlockNumber, reorgFirstBlockOk.BlockNumber)
+ return reorgFirstBlockOk, errReportedByReorgFunc
+ }
+ }
+ if resultBackground != nil && !resultBackground.ReorgDetected {
+ // Relaunch checker, if there is a reorg, It is going to be relaunched after (OnResetState)
+ v.launch(ctx)
+ }
+ // Background process doesnt have anything to we return the regular reorg function result
+ return reorgFirstBlockOk, errReportedByReorgFunc
+}
+
+func (v *L1BlockCheckerIntegration) checkBackgroundResult(ctx context.Context, positionMessage string) bool {
+ log.Debugf("%s Checking L1BlockChecker %s", logPrefix, positionMessage)
+ result := v.getMergedResults()
+ if result != nil {
+ if result.ReorgDetected {
+ log.Warnf("%s Checking L1BlockChecker %s: reorg detected %s", logPrefix, positionMessage, result.String())
+ v.executeResult(ctx, *result)
+ }
+ v.launch(ctx)
+ return result.ReorgDetected
+ }
+ return false
+}
+
+func (v *L1BlockCheckerIntegration) getMergedResults() *syncinterfaces.IterationResult {
+ result := v.checker.GetResult()
+ var preResult *syncinterfaces.IterationResult
+ preResult = nil
+ if v.preChecker != nil {
+ preResult = v.preChecker.GetResult()
+ }
+ if preResult == nil {
+ return result
+ }
+ if result == nil {
+ return preResult
+ }
+ // result and preResult have values
+ if result.ReorgDetected && preResult.ReorgDetected {
+ // That is the common case, checker must detect oldest blocks than preChecker
+ if result.BlockNumber < preResult.BlockNumber {
+ return result
+ }
+ return preResult
+ }
+ if preResult.ReorgDetected {
+ return preResult
+ }
+ return result
+}
+
+func (v *L1BlockCheckerIntegration) onFinishChecker() {
+ log.Infof("%s L1BlockChecker: finished background process, calling to synchronizer", logPrefix)
+ // Stop both processes
+ v.checker.Stop()
+ if v.preChecker != nil {
+ v.preChecker.Stop()
+ }
+ v.sync.OnDetectedMismatchL1BlockReorg()
+}
+
+func (v *L1BlockCheckerIntegration) launch(ctx context.Context) {
+ log.Infof("%s L1BlockChecker: starting background process...", logPrefix)
+ v.checker.Run(ctx, v.onFinishChecker)
+ if v.preChecker != nil {
+ log.Infof("%s L1BlockChecker: starting background precheck process...", logPrefix)
+ v.preChecker.Run(ctx, v.onFinishChecker)
+ }
+}
+
+func (v *L1BlockCheckerIntegration) executeResult(ctx context.Context, result syncinterfaces.IterationResult) bool {
+ if result.ReorgDetected {
+ for {
+ err := v.sync.ExecuteReorgFromMismatchBlock(result.BlockNumber, result.ReorgMessage)
+ if err == nil {
+ return true
+ }
+ log.Errorf("%s Error executing reorg: %s", logPrefix, err)
+ time.Sleep(v.timeBetweenRetries)
+ }
+ }
+ return false
+}
diff --git a/synchronizer/l1_check_block/integration_test.go b/synchronizer/l1_check_block/integration_test.go
new file mode 100644
index 0000000000..de79c71351
--- /dev/null
+++ b/synchronizer/l1_check_block/integration_test.go
@@ -0,0 +1,298 @@
+package l1_check_block_test
+
+import (
+ "context"
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/0xPolygonHermez/zkevm-node/state"
+ "github.com/0xPolygonHermez/zkevm-node/synchronizer/common/syncinterfaces"
+ mock_syncinterfaces "github.com/0xPolygonHermez/zkevm-node/synchronizer/common/syncinterfaces/mocks"
+ "github.com/0xPolygonHermez/zkevm-node/synchronizer/l1_check_block"
+ mock_l1_check_block "github.com/0xPolygonHermez/zkevm-node/synchronizer/l1_check_block/mocks"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ genericErrorToTest = fmt.Errorf("error")
+)
+
+type testDataIntegration struct {
+ mockChecker *mock_syncinterfaces.AsyncL1BlockChecker
+ mockPreChecker *mock_syncinterfaces.AsyncL1BlockChecker
+ mockState *mock_l1_check_block.StateForL1BlockCheckerIntegration
+ mockSync *mock_l1_check_block.SyncCheckReorger
+ sut *l1_check_block.L1BlockCheckerIntegration
+ ctx context.Context
+ resultOk syncinterfaces.IterationResult
+ resultError syncinterfaces.IterationResult
+ resultReorg syncinterfaces.IterationResult
+}
+
+func newDataIntegration(t *testing.T, forceCheckOnStart bool) *testDataIntegration {
+ return newDataIntegrationOnlyMainChecker(t, forceCheckOnStart)
+}
+
+func newDataIntegrationWithPreChecker(t *testing.T, forceCheckOnStart bool) *testDataIntegration {
+ res := newDataIntegrationOnlyMainChecker(t, forceCheckOnStart)
+ res.mockPreChecker = mock_syncinterfaces.NewAsyncL1BlockChecker(t)
+ res.sut = l1_check_block.NewL1BlockCheckerIntegration(res.mockChecker, res.mockPreChecker, res.mockState, res.mockSync, forceCheckOnStart, time.Millisecond)
+ return res
+}
+
+func newDataIntegrationOnlyMainChecker(t *testing.T, forceCheckOnStart bool) *testDataIntegration {
+ mockChecker := mock_syncinterfaces.NewAsyncL1BlockChecker(t)
+ mockSync := mock_l1_check_block.NewSyncCheckReorger(t)
+ mockState := mock_l1_check_block.NewStateForL1BlockCheckerIntegration(t)
+ sut := l1_check_block.NewL1BlockCheckerIntegration(mockChecker, nil, mockState, mockSync, forceCheckOnStart, time.Millisecond)
+ return &testDataIntegration{
+ mockChecker: mockChecker,
+ mockPreChecker: nil,
+ mockSync: mockSync,
+ mockState: mockState,
+ sut: sut,
+ ctx: context.Background(),
+ resultReorg: syncinterfaces.IterationResult{
+ ReorgDetected: true,
+ BlockNumber: 1234,
+ },
+ resultOk: syncinterfaces.IterationResult{
+ ReorgDetected: false,
+ },
+ resultError: syncinterfaces.IterationResult{
+ Err: genericErrorToTest,
+ ReorgDetected: false,
+ },
+ }
+}
+
+func TestIntegrationIfNoForceCheckOnlyLaunchBackgroudChecker(t *testing.T) {
+ data := newDataIntegration(t, false)
+ data.mockChecker.EXPECT().Run(data.ctx, mock.Anything).Return()
+ err := data.sut.OnStart(data.ctx)
+ require.NoError(t, err)
+}
+
+func TestIntegrationIfForceCheckRunsSynchronousOneTimeAndAfterLaunchBackgroudChecker(t *testing.T) {
+ data := newDataIntegration(t, true)
+ data.mockChecker.EXPECT().RunSynchronous(data.ctx).Return(data.resultOk)
+ data.mockChecker.EXPECT().Run(data.ctx, mock.Anything).Return()
+ err := data.sut.OnStart(data.ctx)
+ require.NoError(t, err)
+}
+
+func TestIntegrationIfSyncCheckReturnsReorgExecuteIt(t *testing.T) {
+ data := newDataIntegration(t, true)
+ data.mockChecker.EXPECT().RunSynchronous(data.ctx).Return(data.resultReorg)
+ data.mockSync.EXPECT().ExecuteReorgFromMismatchBlock(uint64(1234), "").Return(nil)
+ data.mockChecker.EXPECT().Run(data.ctx, mock.Anything).Return()
+ err := data.sut.OnStart(data.ctx)
+ require.NoError(t, err)
+}
+
+func TestIntegrationIfSyncCheckReturnErrorRetry(t *testing.T) {
+ data := newDataIntegration(t, true)
+ data.mockChecker.EXPECT().RunSynchronous(data.ctx).Return(data.resultError).Once()
+ data.mockChecker.EXPECT().RunSynchronous(data.ctx).Return(data.resultOk).Once()
+ data.mockChecker.EXPECT().Run(data.ctx, mock.Anything).Return()
+ err := data.sut.OnStart(data.ctx)
+ require.NoError(t, err)
+}
+
+func TestIntegrationIfSyncCheckReturnsReorgExecuteItAndFailsRetry(t *testing.T) {
+ data := newDataIntegration(t, true)
+ data.mockChecker.EXPECT().RunSynchronous(data.ctx).Return(data.resultReorg)
+ data.mockSync.EXPECT().ExecuteReorgFromMismatchBlock(uint64(1234), mock.Anything).Return(genericErrorToTest).Once()
+ data.mockSync.EXPECT().ExecuteReorgFromMismatchBlock(uint64(1234), mock.Anything).Return(nil).Once()
+ data.mockChecker.EXPECT().Run(data.ctx, mock.Anything).Return()
+ err := data.sut.OnStart(data.ctx)
+ require.NoError(t, err)
+}
+
+// OnStart if check and preCheck execute both, and launch both in background
+func TestIntegrationCheckAndPreCheckOnStartForceCheck(t *testing.T) {
+ data := newDataIntegrationWithPreChecker(t, true)
+ data.mockChecker.EXPECT().RunSynchronous(data.ctx).Return(data.resultOk)
+ data.mockPreChecker.EXPECT().RunSynchronous(data.ctx).Return(data.resultOk)
+ data.mockChecker.EXPECT().Run(data.ctx, mock.Anything).Return()
+ data.mockPreChecker.EXPECT().Run(data.ctx, mock.Anything).Return()
+ err := data.sut.OnStart(data.ctx)
+ require.NoError(t, err)
+}
+
+// OnStart if mainChecker returns reorg doesnt need to run preCheck
+func TestIntegrationCheckAndPreCheckOnStartMainCheckerReturnReorg(t *testing.T) {
+ data := newDataIntegrationWithPreChecker(t, true)
+ data.mockChecker.EXPECT().RunSynchronous(data.ctx).Return(data.resultReorg)
+ data.mockSync.EXPECT().ExecuteReorgFromMismatchBlock(uint64(1234), mock.Anything).Return(nil).Once()
+ data.mockChecker.EXPECT().Run(data.ctx, mock.Anything).Return()
+ data.mockPreChecker.EXPECT().Run(data.ctx, mock.Anything).Return()
+ err := data.sut.OnStart(data.ctx)
+ require.NoError(t, err)
+}
+
+// If mainCheck is OK, but preCheck returns reorg, it should execute reorg
+func TestIntegrationCheckAndPreCheckOnStartPreCheckerReturnReorg(t *testing.T) {
+ data := newDataIntegrationWithPreChecker(t, true)
+ data.mockChecker.EXPECT().RunSynchronous(data.ctx).Return(data.resultOk)
+ data.mockPreChecker.EXPECT().RunSynchronous(data.ctx).Return(data.resultReorg)
+ data.mockSync.EXPECT().ExecuteReorgFromMismatchBlock(uint64(1234), mock.Anything).Return(nil).Once()
+ data.mockChecker.EXPECT().Run(data.ctx, mock.Anything).Return()
+ data.mockPreChecker.EXPECT().Run(data.ctx, mock.Anything).Return()
+ err := data.sut.OnStart(data.ctx)
+ require.NoError(t, err)
+}
+
+// The process is running on background, no results yet
+func TestIntegrationCheckAndPreCheckOnOnCheckReorgRunningOnBackground(t *testing.T) {
+ data := newDataIntegrationWithPreChecker(t, true)
+ data.mockChecker.EXPECT().GetResult().Return(nil)
+ data.mockPreChecker.EXPECT().GetResult().Return(nil)
+ block, err := data.sut.CheckReorgWrapper(data.ctx, nil, nil)
+ require.Nil(t, block)
+ require.NoError(t, err)
+}
+
+func TestIntegrationCheckAndPreCheckOnOnCheckReorgOneProcessHaveResultOK(t *testing.T) {
+ data := newDataIntegrationWithPreChecker(t, true)
+ data.mockChecker.EXPECT().GetResult().Return(&data.resultOk)
+ data.mockPreChecker.EXPECT().GetResult().Return(nil)
+ // One have been stopped, so must relaunch both
+ data.mockChecker.EXPECT().Run(data.ctx, mock.Anything).Return()
+ data.mockPreChecker.EXPECT().Run(data.ctx, mock.Anything).Return()
+ block, err := data.sut.CheckReorgWrapper(data.ctx, nil, nil)
+ require.Nil(t, block)
+ require.NoError(t, err)
+}
+
+func TestIntegrationCheckAndPreCheckOnOnCheckReorgMainCheckerReorg(t *testing.T) {
+ data := newDataIntegrationWithPreChecker(t, true)
+ data.mockChecker.EXPECT().GetResult().Return(&data.resultReorg)
+ data.mockPreChecker.EXPECT().GetResult().Return(nil)
+ data.mockState.EXPECT().GetPreviousBlockToBlockNumber(data.ctx, uint64(1234), nil).Return(&state.Block{
+ BlockNumber: data.resultReorg.BlockNumber - 1,
+ }, nil)
+ // One have been stopped,but is going to be launched OnResetState call after the reset
+ block, err := data.sut.CheckReorgWrapper(data.ctx, nil, nil)
+ require.NotNil(t, block)
+ require.Equal(t, data.resultReorg.BlockNumber-1, block.BlockNumber)
+ require.NoError(t, err)
+}
+
+func TestIntegrationCheckAndPreCheckOnOnCheckReorgPreCheckerReorg(t *testing.T) {
+ data := newDataIntegrationWithPreChecker(t, true)
+ data.mockChecker.EXPECT().GetResult().Return(nil)
+ data.mockPreChecker.EXPECT().GetResult().Return(&data.resultReorg)
+ data.mockState.EXPECT().GetPreviousBlockToBlockNumber(data.ctx, uint64(1234), nil).Return(&state.Block{
+ BlockNumber: data.resultReorg.BlockNumber - 1,
+ }, nil)
+ // One have been stopped,but is going to be launched OnResetState call after the reset
+
+ block, err := data.sut.CheckReorgWrapper(data.ctx, nil, nil)
+ require.NotNil(t, block)
+ require.Equal(t, data.resultReorg.BlockNumber-1, block.BlockNumber)
+ require.NoError(t, err)
+}
+
+func TestIntegrationCheckAndPreCheckOnOnCheckReorgBothReorgWinOldest1(t *testing.T) {
+ data := newDataIntegrationWithPreChecker(t, true)
+ reorgMain := data.resultReorg
+ reorgMain.BlockNumber = 1235
+ data.mockChecker.EXPECT().GetResult().Return(&reorgMain)
+ reorgPre := data.resultReorg
+ reorgPre.BlockNumber = 1236
+ data.mockPreChecker.EXPECT().GetResult().Return(&reorgPre)
+ data.mockState.EXPECT().GetPreviousBlockToBlockNumber(data.ctx, uint64(1235), nil).Return(&state.Block{
+ BlockNumber: 1234,
+ }, nil)
+
+ // Both have been stopped,but is going to be launched OnResetState call after the reset
+
+ block, err := data.sut.CheckReorgWrapper(data.ctx, nil, nil)
+ require.NotNil(t, block)
+ require.Equal(t, uint64(1234), block.BlockNumber)
+ require.NoError(t, err)
+}
+
+func TestIntegrationCheckAndPreCheckOnOnCheckReorgBothReorgWinOldest2(t *testing.T) {
+ data := newDataIntegrationWithPreChecker(t, true)
+ reorgMain := data.resultReorg
+ reorgMain.BlockNumber = 1236
+ data.mockChecker.EXPECT().GetResult().Return(&reorgMain)
+ reorgPre := data.resultReorg
+ reorgPre.BlockNumber = 1235
+ data.mockPreChecker.EXPECT().GetResult().Return(&reorgPre)
+ data.mockState.EXPECT().GetPreviousBlockToBlockNumber(data.ctx, uint64(1235), nil).Return(&state.Block{
+ BlockNumber: 1234,
+ }, nil)
+ // Both have been stopped,but is going to be launched OnResetState call after the reset
+
+ block, err := data.sut.CheckReorgWrapper(data.ctx, nil, nil)
+ require.NotNil(t, block)
+ require.Equal(t, uint64(1234), block.BlockNumber)
+ require.NoError(t, err)
+}
+
+func TestIntegrationCheckReorgWrapperBypassReorgFuncIfNoBackgroundData(t *testing.T) {
+ data := newDataIntegrationWithPreChecker(t, true)
+ data.mockChecker.EXPECT().GetResult().Return(nil)
+ data.mockPreChecker.EXPECT().GetResult().Return(nil)
+ reorgFuncBlock := &state.Block{
+ BlockNumber: 1234,
+ }
+ reorgFuncErr := fmt.Errorf("error")
+ block, err := data.sut.CheckReorgWrapper(data.ctx, reorgFuncBlock, reorgFuncErr)
+ require.Equal(t, reorgFuncBlock, block)
+ require.Equal(t, reorgFuncErr, err)
+}
+
+func TestIntegrationCheckReorgWrapperChooseOldestReorgFunc(t *testing.T) {
+ data := newDataIntegrationWithPreChecker(t, true)
+ data.mockChecker.EXPECT().GetResult().Return(nil)
+ data.mockPreChecker.EXPECT().GetResult().Return(&data.resultReorg)
+ data.mockState.EXPECT().GetPreviousBlockToBlockNumber(data.ctx, uint64(1234), nil).Return(&state.Block{
+ BlockNumber: 1233,
+ }, nil)
+
+ reorgFuncBlock := &state.Block{
+ BlockNumber: 1230,
+ }
+ block, err := data.sut.CheckReorgWrapper(data.ctx, reorgFuncBlock, nil)
+ require.Equal(t, reorgFuncBlock, block)
+ require.NoError(t, err)
+}
+
+func TestIntegrationCheckReorgWrapperChooseOldestBackgroundCheck(t *testing.T) {
+ data := newDataIntegrationWithPreChecker(t, true)
+ data.mockChecker.EXPECT().GetResult().Return(nil)
+ data.mockPreChecker.EXPECT().GetResult().Return(&data.resultReorg)
+ data.mockState.EXPECT().GetPreviousBlockToBlockNumber(data.ctx, uint64(1234), nil).Return(&state.Block{
+ BlockNumber: 1233,
+ }, nil)
+
+ reorgFuncBlock := &state.Block{
+ BlockNumber: 1240,
+ }
+ block, err := data.sut.CheckReorgWrapper(data.ctx, reorgFuncBlock, nil)
+ require.Equal(t, uint64(1233), block.BlockNumber)
+ require.NoError(t, err)
+}
+
+func TestIntegrationCheckReorgWrapperIgnoreReorgFuncIfError(t *testing.T) {
+ data := newDataIntegrationWithPreChecker(t, true)
+ data.mockChecker.EXPECT().GetResult().Return(nil)
+ data.mockPreChecker.EXPECT().GetResult().Return(&data.resultReorg)
+ data.mockState.EXPECT().GetPreviousBlockToBlockNumber(data.ctx, uint64(1234), nil).Return(&state.Block{
+ BlockNumber: 1233,
+ }, nil)
+
+ reorgFuncBlock := &state.Block{
+ BlockNumber: 1230,
+ }
+ reorgFuncErr := fmt.Errorf("error")
+ block, err := data.sut.CheckReorgWrapper(data.ctx, reorgFuncBlock, reorgFuncErr)
+ require.Equal(t, uint64(1233), block.BlockNumber)
+ require.NoError(t, err)
+}
diff --git a/synchronizer/l1_check_block/mocks/l1_block_checker.go b/synchronizer/l1_check_block/mocks/l1_block_checker.go
new file mode 100644
index 0000000000..6f0eab9acb
--- /dev/null
+++ b/synchronizer/l1_check_block/mocks/l1_block_checker.go
@@ -0,0 +1,82 @@
+// Code generated by mockery. DO NOT EDIT.
+
+package mock_l1_check_block
+
+import (
+ context "context"
+
+ mock "github.com/stretchr/testify/mock"
+)
+
+// L1BlockChecker is an autogenerated mock type for the L1BlockChecker type
+type L1BlockChecker struct {
+ mock.Mock
+}
+
+type L1BlockChecker_Expecter struct {
+ mock *mock.Mock
+}
+
+func (_m *L1BlockChecker) EXPECT() *L1BlockChecker_Expecter {
+ return &L1BlockChecker_Expecter{mock: &_m.Mock}
+}
+
+// Step provides a mock function with given fields: ctx
+func (_m *L1BlockChecker) Step(ctx context.Context) error {
+ ret := _m.Called(ctx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for Step")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context) error); ok {
+ r0 = rf(ctx)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// L1BlockChecker_Step_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Step'
+type L1BlockChecker_Step_Call struct {
+ *mock.Call
+}
+
+// Step is a helper method to define mock.On call
+// - ctx context.Context
+func (_e *L1BlockChecker_Expecter) Step(ctx interface{}) *L1BlockChecker_Step_Call {
+ return &L1BlockChecker_Step_Call{Call: _e.mock.On("Step", ctx)}
+}
+
+func (_c *L1BlockChecker_Step_Call) Run(run func(ctx context.Context)) *L1BlockChecker_Step_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context))
+ })
+ return _c
+}
+
+func (_c *L1BlockChecker_Step_Call) Return(_a0 error) *L1BlockChecker_Step_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *L1BlockChecker_Step_Call) RunAndReturn(run func(context.Context) error) *L1BlockChecker_Step_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// NewL1BlockChecker creates a new instance of L1BlockChecker. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewL1BlockChecker(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *L1BlockChecker {
+ mock := &L1BlockChecker{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/synchronizer/l1_check_block/mocks/l1_requester.go b/synchronizer/l1_check_block/mocks/l1_requester.go
new file mode 100644
index 0000000000..713cc4a5ef
--- /dev/null
+++ b/synchronizer/l1_check_block/mocks/l1_requester.go
@@ -0,0 +1,98 @@
+// Code generated by mockery. DO NOT EDIT.
+
+package mock_l1_check_block
+
+import (
+ context "context"
+ big "math/big"
+
+ mock "github.com/stretchr/testify/mock"
+
+ types "github.com/ethereum/go-ethereum/core/types"
+)
+
+// L1Requester is an autogenerated mock type for the L1Requester type
+type L1Requester struct {
+ mock.Mock
+}
+
+type L1Requester_Expecter struct {
+ mock *mock.Mock
+}
+
+func (_m *L1Requester) EXPECT() *L1Requester_Expecter {
+ return &L1Requester_Expecter{mock: &_m.Mock}
+}
+
+// HeaderByNumber provides a mock function with given fields: ctx, number
+func (_m *L1Requester) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) {
+ ret := _m.Called(ctx, number)
+
+ if len(ret) == 0 {
+ panic("no return value specified for HeaderByNumber")
+ }
+
+ var r0 *types.Header
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (*types.Header, error)); ok {
+ return rf(ctx, number)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, *big.Int) *types.Header); ok {
+ r0 = rf(ctx, number)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*types.Header)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok {
+ r1 = rf(ctx, number)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// L1Requester_HeaderByNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HeaderByNumber'
+type L1Requester_HeaderByNumber_Call struct {
+ *mock.Call
+}
+
+// HeaderByNumber is a helper method to define mock.On call
+// - ctx context.Context
+// - number *big.Int
+func (_e *L1Requester_Expecter) HeaderByNumber(ctx interface{}, number interface{}) *L1Requester_HeaderByNumber_Call {
+ return &L1Requester_HeaderByNumber_Call{Call: _e.mock.On("HeaderByNumber", ctx, number)}
+}
+
+func (_c *L1Requester_HeaderByNumber_Call) Run(run func(ctx context.Context, number *big.Int)) *L1Requester_HeaderByNumber_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(*big.Int))
+ })
+ return _c
+}
+
+func (_c *L1Requester_HeaderByNumber_Call) Return(_a0 *types.Header, _a1 error) *L1Requester_HeaderByNumber_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *L1Requester_HeaderByNumber_Call) RunAndReturn(run func(context.Context, *big.Int) (*types.Header, error)) *L1Requester_HeaderByNumber_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// NewL1Requester creates a new instance of L1Requester. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewL1Requester(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *L1Requester {
+ mock := &L1Requester{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/synchronizer/l1_check_block/mocks/safe_l1_block_number_fetcher.go b/synchronizer/l1_check_block/mocks/safe_l1_block_number_fetcher.go
new file mode 100644
index 0000000000..abb043afb4
--- /dev/null
+++ b/synchronizer/l1_check_block/mocks/safe_l1_block_number_fetcher.go
@@ -0,0 +1,139 @@
+// Code generated by mockery. DO NOT EDIT.
+
+package mock_l1_check_block
+
+import (
+ context "context"
+
+ l1_check_block "github.com/0xPolygonHermez/zkevm-node/synchronizer/l1_check_block"
+ mock "github.com/stretchr/testify/mock"
+)
+
+// SafeL1BlockNumberFetcher is an autogenerated mock type for the SafeL1BlockNumberFetcher type
+type SafeL1BlockNumberFetcher struct {
+ mock.Mock
+}
+
+type SafeL1BlockNumberFetcher_Expecter struct {
+ mock *mock.Mock
+}
+
+func (_m *SafeL1BlockNumberFetcher) EXPECT() *SafeL1BlockNumberFetcher_Expecter {
+ return &SafeL1BlockNumberFetcher_Expecter{mock: &_m.Mock}
+}
+
+// Description provides a mock function with given fields:
+func (_m *SafeL1BlockNumberFetcher) Description() string {
+ ret := _m.Called()
+
+ if len(ret) == 0 {
+ panic("no return value specified for Description")
+ }
+
+ var r0 string
+ if rf, ok := ret.Get(0).(func() string); ok {
+ r0 = rf()
+ } else {
+ r0 = ret.Get(0).(string)
+ }
+
+ return r0
+}
+
+// SafeL1BlockNumberFetcher_Description_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Description'
+type SafeL1BlockNumberFetcher_Description_Call struct {
+ *mock.Call
+}
+
+// Description is a helper method to define mock.On call
+func (_e *SafeL1BlockNumberFetcher_Expecter) Description() *SafeL1BlockNumberFetcher_Description_Call {
+ return &SafeL1BlockNumberFetcher_Description_Call{Call: _e.mock.On("Description")}
+}
+
+func (_c *SafeL1BlockNumberFetcher_Description_Call) Run(run func()) *SafeL1BlockNumberFetcher_Description_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *SafeL1BlockNumberFetcher_Description_Call) Return(_a0 string) *SafeL1BlockNumberFetcher_Description_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *SafeL1BlockNumberFetcher_Description_Call) RunAndReturn(run func() string) *SafeL1BlockNumberFetcher_Description_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// GetSafeBlockNumber provides a mock function with given fields: ctx, l1Client
+func (_m *SafeL1BlockNumberFetcher) GetSafeBlockNumber(ctx context.Context, l1Client l1_check_block.L1Requester) (uint64, error) {
+ ret := _m.Called(ctx, l1Client)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetSafeBlockNumber")
+ }
+
+ var r0 uint64
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, l1_check_block.L1Requester) (uint64, error)); ok {
+ return rf(ctx, l1Client)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, l1_check_block.L1Requester) uint64); ok {
+ r0 = rf(ctx, l1Client)
+ } else {
+ r0 = ret.Get(0).(uint64)
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, l1_check_block.L1Requester) error); ok {
+ r1 = rf(ctx, l1Client)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// SafeL1BlockNumberFetcher_GetSafeBlockNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSafeBlockNumber'
+type SafeL1BlockNumberFetcher_GetSafeBlockNumber_Call struct {
+ *mock.Call
+}
+
+// GetSafeBlockNumber is a helper method to define mock.On call
+// - ctx context.Context
+// - l1Client l1_check_block.L1Requester
+func (_e *SafeL1BlockNumberFetcher_Expecter) GetSafeBlockNumber(ctx interface{}, l1Client interface{}) *SafeL1BlockNumberFetcher_GetSafeBlockNumber_Call {
+ return &SafeL1BlockNumberFetcher_GetSafeBlockNumber_Call{Call: _e.mock.On("GetSafeBlockNumber", ctx, l1Client)}
+}
+
+func (_c *SafeL1BlockNumberFetcher_GetSafeBlockNumber_Call) Run(run func(ctx context.Context, l1Client l1_check_block.L1Requester)) *SafeL1BlockNumberFetcher_GetSafeBlockNumber_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(l1_check_block.L1Requester))
+ })
+ return _c
+}
+
+func (_c *SafeL1BlockNumberFetcher_GetSafeBlockNumber_Call) Return(_a0 uint64, _a1 error) *SafeL1BlockNumberFetcher_GetSafeBlockNumber_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *SafeL1BlockNumberFetcher_GetSafeBlockNumber_Call) RunAndReturn(run func(context.Context, l1_check_block.L1Requester) (uint64, error)) *SafeL1BlockNumberFetcher_GetSafeBlockNumber_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// NewSafeL1BlockNumberFetcher creates a new instance of SafeL1BlockNumberFetcher. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewSafeL1BlockNumberFetcher(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *SafeL1BlockNumberFetcher {
+ mock := &SafeL1BlockNumberFetcher{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/synchronizer/l1_check_block/mocks/state_for_l1_block_checker_integration.go b/synchronizer/l1_check_block/mocks/state_for_l1_block_checker_integration.go
new file mode 100644
index 0000000000..32fbb30b86
--- /dev/null
+++ b/synchronizer/l1_check_block/mocks/state_for_l1_block_checker_integration.go
@@ -0,0 +1,100 @@
+// Code generated by mockery. DO NOT EDIT.
+
+package mock_l1_check_block
+
+import (
+ context "context"
+
+ mock "github.com/stretchr/testify/mock"
+
+ pgx "github.com/jackc/pgx/v4"
+
+ state "github.com/0xPolygonHermez/zkevm-node/state"
+)
+
+// StateForL1BlockCheckerIntegration is an autogenerated mock type for the StateForL1BlockCheckerIntegration type
+type StateForL1BlockCheckerIntegration struct {
+ mock.Mock
+}
+
+type StateForL1BlockCheckerIntegration_Expecter struct {
+ mock *mock.Mock
+}
+
+func (_m *StateForL1BlockCheckerIntegration) EXPECT() *StateForL1BlockCheckerIntegration_Expecter {
+ return &StateForL1BlockCheckerIntegration_Expecter{mock: &_m.Mock}
+}
+
+// GetPreviousBlockToBlockNumber provides a mock function with given fields: ctx, blockNumber, dbTx
+func (_m *StateForL1BlockCheckerIntegration) GetPreviousBlockToBlockNumber(ctx context.Context, blockNumber uint64, dbTx pgx.Tx) (*state.Block, error) {
+ ret := _m.Called(ctx, blockNumber, dbTx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetPreviousBlockToBlockNumber")
+ }
+
+ var r0 *state.Block
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) (*state.Block, error)); ok {
+ return rf(ctx, blockNumber, dbTx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) *state.Block); ok {
+ r0 = rf(ctx, blockNumber, dbTx)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*state.Block)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, uint64, pgx.Tx) error); ok {
+ r1 = rf(ctx, blockNumber, dbTx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// StateForL1BlockCheckerIntegration_GetPreviousBlockToBlockNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetPreviousBlockToBlockNumber'
+type StateForL1BlockCheckerIntegration_GetPreviousBlockToBlockNumber_Call struct {
+ *mock.Call
+}
+
+// GetPreviousBlockToBlockNumber is a helper method to define mock.On call
+// - ctx context.Context
+// - blockNumber uint64
+// - dbTx pgx.Tx
+func (_e *StateForL1BlockCheckerIntegration_Expecter) GetPreviousBlockToBlockNumber(ctx interface{}, blockNumber interface{}, dbTx interface{}) *StateForL1BlockCheckerIntegration_GetPreviousBlockToBlockNumber_Call {
+ return &StateForL1BlockCheckerIntegration_GetPreviousBlockToBlockNumber_Call{Call: _e.mock.On("GetPreviousBlockToBlockNumber", ctx, blockNumber, dbTx)}
+}
+
+func (_c *StateForL1BlockCheckerIntegration_GetPreviousBlockToBlockNumber_Call) Run(run func(ctx context.Context, blockNumber uint64, dbTx pgx.Tx)) *StateForL1BlockCheckerIntegration_GetPreviousBlockToBlockNumber_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(uint64), args[2].(pgx.Tx))
+ })
+ return _c
+}
+
+func (_c *StateForL1BlockCheckerIntegration_GetPreviousBlockToBlockNumber_Call) Return(_a0 *state.Block, _a1 error) *StateForL1BlockCheckerIntegration_GetPreviousBlockToBlockNumber_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *StateForL1BlockCheckerIntegration_GetPreviousBlockToBlockNumber_Call) RunAndReturn(run func(context.Context, uint64, pgx.Tx) (*state.Block, error)) *StateForL1BlockCheckerIntegration_GetPreviousBlockToBlockNumber_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// NewStateForL1BlockCheckerIntegration creates a new instance of StateForL1BlockCheckerIntegration. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewStateForL1BlockCheckerIntegration(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *StateForL1BlockCheckerIntegration {
+ mock := &StateForL1BlockCheckerIntegration{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/synchronizer/l1_check_block/mocks/state_interfacer.go b/synchronizer/l1_check_block/mocks/state_interfacer.go
new file mode 100644
index 0000000000..4855ba5eb1
--- /dev/null
+++ b/synchronizer/l1_check_block/mocks/state_interfacer.go
@@ -0,0 +1,149 @@
+// Code generated by mockery. DO NOT EDIT.
+
+package mock_l1_check_block
+
+import (
+ context "context"
+
+ mock "github.com/stretchr/testify/mock"
+
+ pgx "github.com/jackc/pgx/v4"
+
+ state "github.com/0xPolygonHermez/zkevm-node/state"
+)
+
+// StateInterfacer is an autogenerated mock type for the StateInterfacer type
+type StateInterfacer struct {
+ mock.Mock
+}
+
+type StateInterfacer_Expecter struct {
+ mock *mock.Mock
+}
+
+func (_m *StateInterfacer) EXPECT() *StateInterfacer_Expecter {
+ return &StateInterfacer_Expecter{mock: &_m.Mock}
+}
+
+// GetFirstUncheckedBlock provides a mock function with given fields: ctx, fromBlockNumber, dbTx
+func (_m *StateInterfacer) GetFirstUncheckedBlock(ctx context.Context, fromBlockNumber uint64, dbTx pgx.Tx) (*state.Block, error) {
+ ret := _m.Called(ctx, fromBlockNumber, dbTx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetFirstUncheckedBlock")
+ }
+
+ var r0 *state.Block
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) (*state.Block, error)); ok {
+ return rf(ctx, fromBlockNumber, dbTx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) *state.Block); ok {
+ r0 = rf(ctx, fromBlockNumber, dbTx)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*state.Block)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, uint64, pgx.Tx) error); ok {
+ r1 = rf(ctx, fromBlockNumber, dbTx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// StateInterfacer_GetFirstUncheckedBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFirstUncheckedBlock'
+type StateInterfacer_GetFirstUncheckedBlock_Call struct {
+ *mock.Call
+}
+
+// GetFirstUncheckedBlock is a helper method to define mock.On call
+// - ctx context.Context
+// - fromBlockNumber uint64
+// - dbTx pgx.Tx
+func (_e *StateInterfacer_Expecter) GetFirstUncheckedBlock(ctx interface{}, fromBlockNumber interface{}, dbTx interface{}) *StateInterfacer_GetFirstUncheckedBlock_Call {
+ return &StateInterfacer_GetFirstUncheckedBlock_Call{Call: _e.mock.On("GetFirstUncheckedBlock", ctx, fromBlockNumber, dbTx)}
+}
+
+func (_c *StateInterfacer_GetFirstUncheckedBlock_Call) Run(run func(ctx context.Context, fromBlockNumber uint64, dbTx pgx.Tx)) *StateInterfacer_GetFirstUncheckedBlock_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(uint64), args[2].(pgx.Tx))
+ })
+ return _c
+}
+
+func (_c *StateInterfacer_GetFirstUncheckedBlock_Call) Return(_a0 *state.Block, _a1 error) *StateInterfacer_GetFirstUncheckedBlock_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *StateInterfacer_GetFirstUncheckedBlock_Call) RunAndReturn(run func(context.Context, uint64, pgx.Tx) (*state.Block, error)) *StateInterfacer_GetFirstUncheckedBlock_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// UpdateCheckedBlockByNumber provides a mock function with given fields: ctx, blockNumber, newCheckedStatus, dbTx
+func (_m *StateInterfacer) UpdateCheckedBlockByNumber(ctx context.Context, blockNumber uint64, newCheckedStatus bool, dbTx pgx.Tx) error {
+ ret := _m.Called(ctx, blockNumber, newCheckedStatus, dbTx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for UpdateCheckedBlockByNumber")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(context.Context, uint64, bool, pgx.Tx) error); ok {
+ r0 = rf(ctx, blockNumber, newCheckedStatus, dbTx)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// StateInterfacer_UpdateCheckedBlockByNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateCheckedBlockByNumber'
+type StateInterfacer_UpdateCheckedBlockByNumber_Call struct {
+ *mock.Call
+}
+
+// UpdateCheckedBlockByNumber is a helper method to define mock.On call
+// - ctx context.Context
+// - blockNumber uint64
+// - newCheckedStatus bool
+// - dbTx pgx.Tx
+func (_e *StateInterfacer_Expecter) UpdateCheckedBlockByNumber(ctx interface{}, blockNumber interface{}, newCheckedStatus interface{}, dbTx interface{}) *StateInterfacer_UpdateCheckedBlockByNumber_Call {
+ return &StateInterfacer_UpdateCheckedBlockByNumber_Call{Call: _e.mock.On("UpdateCheckedBlockByNumber", ctx, blockNumber, newCheckedStatus, dbTx)}
+}
+
+func (_c *StateInterfacer_UpdateCheckedBlockByNumber_Call) Run(run func(ctx context.Context, blockNumber uint64, newCheckedStatus bool, dbTx pgx.Tx)) *StateInterfacer_UpdateCheckedBlockByNumber_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(uint64), args[2].(bool), args[3].(pgx.Tx))
+ })
+ return _c
+}
+
+func (_c *StateInterfacer_UpdateCheckedBlockByNumber_Call) Return(_a0 error) *StateInterfacer_UpdateCheckedBlockByNumber_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *StateInterfacer_UpdateCheckedBlockByNumber_Call) RunAndReturn(run func(context.Context, uint64, bool, pgx.Tx) error) *StateInterfacer_UpdateCheckedBlockByNumber_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// NewStateInterfacer creates a new instance of StateInterfacer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewStateInterfacer(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *StateInterfacer {
+ mock := &StateInterfacer{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/synchronizer/l1_check_block/mocks/state_pre_check_interfacer.go b/synchronizer/l1_check_block/mocks/state_pre_check_interfacer.go
new file mode 100644
index 0000000000..2bf5522f60
--- /dev/null
+++ b/synchronizer/l1_check_block/mocks/state_pre_check_interfacer.go
@@ -0,0 +1,101 @@
+// Code generated by mockery. DO NOT EDIT.
+
+package mock_l1_check_block
+
+import (
+ context "context"
+
+ mock "github.com/stretchr/testify/mock"
+
+ pgx "github.com/jackc/pgx/v4"
+
+ state "github.com/0xPolygonHermez/zkevm-node/state"
+)
+
+// StatePreCheckInterfacer is an autogenerated mock type for the StatePreCheckInterfacer type
+type StatePreCheckInterfacer struct {
+ mock.Mock
+}
+
+type StatePreCheckInterfacer_Expecter struct {
+ mock *mock.Mock
+}
+
+func (_m *StatePreCheckInterfacer) EXPECT() *StatePreCheckInterfacer_Expecter {
+ return &StatePreCheckInterfacer_Expecter{mock: &_m.Mock}
+}
+
+// GetUncheckedBlocks provides a mock function with given fields: ctx, fromBlockNumber, toBlockNumber, dbTx
+func (_m *StatePreCheckInterfacer) GetUncheckedBlocks(ctx context.Context, fromBlockNumber uint64, toBlockNumber uint64, dbTx pgx.Tx) ([]*state.Block, error) {
+ ret := _m.Called(ctx, fromBlockNumber, toBlockNumber, dbTx)
+
+ if len(ret) == 0 {
+ panic("no return value specified for GetUncheckedBlocks")
+ }
+
+ var r0 []*state.Block
+ var r1 error
+ if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64, pgx.Tx) ([]*state.Block, error)); ok {
+ return rf(ctx, fromBlockNumber, toBlockNumber, dbTx)
+ }
+ if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64, pgx.Tx) []*state.Block); ok {
+ r0 = rf(ctx, fromBlockNumber, toBlockNumber, dbTx)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).([]*state.Block)
+ }
+ }
+
+ if rf, ok := ret.Get(1).(func(context.Context, uint64, uint64, pgx.Tx) error); ok {
+ r1 = rf(ctx, fromBlockNumber, toBlockNumber, dbTx)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// StatePreCheckInterfacer_GetUncheckedBlocks_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetUncheckedBlocks'
+type StatePreCheckInterfacer_GetUncheckedBlocks_Call struct {
+ *mock.Call
+}
+
+// GetUncheckedBlocks is a helper method to define mock.On call
+// - ctx context.Context
+// - fromBlockNumber uint64
+// - toBlockNumber uint64
+// - dbTx pgx.Tx
+func (_e *StatePreCheckInterfacer_Expecter) GetUncheckedBlocks(ctx interface{}, fromBlockNumber interface{}, toBlockNumber interface{}, dbTx interface{}) *StatePreCheckInterfacer_GetUncheckedBlocks_Call {
+ return &StatePreCheckInterfacer_GetUncheckedBlocks_Call{Call: _e.mock.On("GetUncheckedBlocks", ctx, fromBlockNumber, toBlockNumber, dbTx)}
+}
+
+func (_c *StatePreCheckInterfacer_GetUncheckedBlocks_Call) Run(run func(ctx context.Context, fromBlockNumber uint64, toBlockNumber uint64, dbTx pgx.Tx)) *StatePreCheckInterfacer_GetUncheckedBlocks_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(context.Context), args[1].(uint64), args[2].(uint64), args[3].(pgx.Tx))
+ })
+ return _c
+}
+
+func (_c *StatePreCheckInterfacer_GetUncheckedBlocks_Call) Return(_a0 []*state.Block, _a1 error) *StatePreCheckInterfacer_GetUncheckedBlocks_Call {
+ _c.Call.Return(_a0, _a1)
+ return _c
+}
+
+func (_c *StatePreCheckInterfacer_GetUncheckedBlocks_Call) RunAndReturn(run func(context.Context, uint64, uint64, pgx.Tx) ([]*state.Block, error)) *StatePreCheckInterfacer_GetUncheckedBlocks_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// NewStatePreCheckInterfacer creates a new instance of StatePreCheckInterfacer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewStatePreCheckInterfacer(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *StatePreCheckInterfacer {
+ mock := &StatePreCheckInterfacer{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/synchronizer/l1_check_block/mocks/sync_check_reorger.go b/synchronizer/l1_check_block/mocks/sync_check_reorger.go
new file mode 100644
index 0000000000..bffd02cb87
--- /dev/null
+++ b/synchronizer/l1_check_block/mocks/sync_check_reorger.go
@@ -0,0 +1,111 @@
+// Code generated by mockery. DO NOT EDIT.
+
+package mock_l1_check_block
+
+import mock "github.com/stretchr/testify/mock"
+
+// SyncCheckReorger is an autogenerated mock type for the SyncCheckReorger type
+type SyncCheckReorger struct {
+ mock.Mock
+}
+
+type SyncCheckReorger_Expecter struct {
+ mock *mock.Mock
+}
+
+func (_m *SyncCheckReorger) EXPECT() *SyncCheckReorger_Expecter {
+ return &SyncCheckReorger_Expecter{mock: &_m.Mock}
+}
+
+// ExecuteReorgFromMismatchBlock provides a mock function with given fields: blockNumber, reason
+func (_m *SyncCheckReorger) ExecuteReorgFromMismatchBlock(blockNumber uint64, reason string) error {
+ ret := _m.Called(blockNumber, reason)
+
+ if len(ret) == 0 {
+ panic("no return value specified for ExecuteReorgFromMismatchBlock")
+ }
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(uint64, string) error); ok {
+ r0 = rf(blockNumber, reason)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
+
+// SyncCheckReorger_ExecuteReorgFromMismatchBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ExecuteReorgFromMismatchBlock'
+type SyncCheckReorger_ExecuteReorgFromMismatchBlock_Call struct {
+ *mock.Call
+}
+
+// ExecuteReorgFromMismatchBlock is a helper method to define mock.On call
+// - blockNumber uint64
+// - reason string
+func (_e *SyncCheckReorger_Expecter) ExecuteReorgFromMismatchBlock(blockNumber interface{}, reason interface{}) *SyncCheckReorger_ExecuteReorgFromMismatchBlock_Call {
+ return &SyncCheckReorger_ExecuteReorgFromMismatchBlock_Call{Call: _e.mock.On("ExecuteReorgFromMismatchBlock", blockNumber, reason)}
+}
+
+func (_c *SyncCheckReorger_ExecuteReorgFromMismatchBlock_Call) Run(run func(blockNumber uint64, reason string)) *SyncCheckReorger_ExecuteReorgFromMismatchBlock_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(uint64), args[1].(string))
+ })
+ return _c
+}
+
+func (_c *SyncCheckReorger_ExecuteReorgFromMismatchBlock_Call) Return(_a0 error) *SyncCheckReorger_ExecuteReorgFromMismatchBlock_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *SyncCheckReorger_ExecuteReorgFromMismatchBlock_Call) RunAndReturn(run func(uint64, string) error) *SyncCheckReorger_ExecuteReorgFromMismatchBlock_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// OnDetectedMismatchL1BlockReorg provides a mock function with given fields:
+func (_m *SyncCheckReorger) OnDetectedMismatchL1BlockReorg() {
+ _m.Called()
+}
+
+// SyncCheckReorger_OnDetectedMismatchL1BlockReorg_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'OnDetectedMismatchL1BlockReorg'
+type SyncCheckReorger_OnDetectedMismatchL1BlockReorg_Call struct {
+ *mock.Call
+}
+
+// OnDetectedMismatchL1BlockReorg is a helper method to define mock.On call
+func (_e *SyncCheckReorger_Expecter) OnDetectedMismatchL1BlockReorg() *SyncCheckReorger_OnDetectedMismatchL1BlockReorg_Call {
+ return &SyncCheckReorger_OnDetectedMismatchL1BlockReorg_Call{Call: _e.mock.On("OnDetectedMismatchL1BlockReorg")}
+}
+
+func (_c *SyncCheckReorger_OnDetectedMismatchL1BlockReorg_Call) Run(run func()) *SyncCheckReorger_OnDetectedMismatchL1BlockReorg_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *SyncCheckReorger_OnDetectedMismatchL1BlockReorg_Call) Return() *SyncCheckReorger_OnDetectedMismatchL1BlockReorg_Call {
+ _c.Call.Return()
+ return _c
+}
+
+func (_c *SyncCheckReorger_OnDetectedMismatchL1BlockReorg_Call) RunAndReturn(run func()) *SyncCheckReorger_OnDetectedMismatchL1BlockReorg_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
+// NewSyncCheckReorger creates a new instance of SyncCheckReorger. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
+// The first argument is typically a *testing.T value.
+func NewSyncCheckReorger(t interface {
+ mock.TestingT
+ Cleanup(func())
+}) *SyncCheckReorger {
+ mock := &SyncCheckReorger{}
+ mock.Mock.Test(t)
+
+ t.Cleanup(func() { mock.AssertExpectations(t) })
+
+ return mock
+}
diff --git a/synchronizer/l1_check_block/pre_check_l1block.go b/synchronizer/l1_check_block/pre_check_l1block.go
new file mode 100644
index 0000000000..431777f705
--- /dev/null
+++ b/synchronizer/l1_check_block/pre_check_l1block.go
@@ -0,0 +1,139 @@
+package l1_check_block
+
+// This make a pre-check of blocks but don't mark them as checked
+// It checks blocks between a segment: example:
+// real check point SAFE:
+// pre check: (SAFE+1) -> (LATEST-32)
+// It gets all pending blocks
+// - Start cheking
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/0xPolygonHermez/zkevm-node/log"
+ "github.com/0xPolygonHermez/zkevm-node/state"
+ "github.com/0xPolygonHermez/zkevm-node/synchronizer/common"
+ "github.com/jackc/pgx/v4"
+)
+
+var (
+ // ErrDeSync is an error that indicates that from the starting of verification to end something have been changed on state
+ ErrDeSync = errors.New("DeSync: a block hash is different from the state block hash")
+)
+
+// StatePreCheckInterfacer is an interface for the state
+type StatePreCheckInterfacer interface {
+ GetUncheckedBlocks(ctx context.Context, fromBlockNumber uint64, toBlockNumber uint64, dbTx pgx.Tx) ([]*state.Block, error)
+}
+
+// PreCheckL1BlockHash is a struct that implements a checker of L1Block hash
+type PreCheckL1BlockHash struct {
+ L1Client L1Requester
+ State StatePreCheckInterfacer
+ InitialSegmentBlockNumber SafeL1BlockNumberFetcher
+ EndSegmentBlockNumber SafeL1BlockNumberFetcher
+}
+
+// NewPreCheckL1BlockHash creates a new CheckL1BlockHash
+func NewPreCheckL1BlockHash(l1Client L1Requester, state StatePreCheckInterfacer,
+ initial, end SafeL1BlockNumberFetcher) *PreCheckL1BlockHash {
+ return &PreCheckL1BlockHash{
+ L1Client: l1Client,
+ State: state,
+ InitialSegmentBlockNumber: initial,
+ EndSegmentBlockNumber: end,
+ }
+}
+
+// Name is a method that returns the name of the checker
+func (p *PreCheckL1BlockHash) Name() string {
+ return logPrefix + ":memory_check: "
+}
+
+// Step is a method that checks the L1 block hash, run until all blocks are checked and returns
+func (p *PreCheckL1BlockHash) Step(ctx context.Context) error {
+ from, err := p.InitialSegmentBlockNumber.GetSafeBlockNumber(ctx, p.L1Client)
+ if err != nil {
+ return err
+ }
+ to, err := p.EndSegmentBlockNumber.GetSafeBlockNumber(ctx, p.L1Client)
+ if err != nil {
+ return err
+ }
+ if from > to {
+ log.Warnf("%s: fromBlockNumber(%s) %d is greater than toBlockNumber(%s) %d, Check configuration", p.Name(), p.InitialSegmentBlockNumber.Description(), from, p.EndSegmentBlockNumber.Description(), to)
+ return nil
+ }
+
+ blocksToCheck, err := p.State.GetUncheckedBlocks(ctx, from, to, nil)
+ if err != nil {
+ log.Warnf("%s can't get unchecked blocks, so it discard the reorg error", p.Name())
+ return err
+ }
+ msg := fmt.Sprintf("%s: Checking blocks from (%s) %d to (%s) %d -> len(blocks)=%d", p.Name(), p.InitialSegmentBlockNumber.Description(), from, p.EndSegmentBlockNumber.Description(), to, len(blocksToCheck))
+ if len(blocksToCheck) == 0 {
+ log.Debugf(msg)
+ return nil
+ }
+ log.Infof(msg)
+ startTime := time.Now()
+ for _, block := range blocksToCheck {
+ // check block
+ err = CheckBlockHash(ctx, block, p.L1Client, p.Name())
+ if common.IsReorgError(err) {
+ // Double-check the state block that still is the same
+ log.Debugf("%s: Reorg detected at blockNumber: %d, checking that the block on State doesn't have change", p.Name(), block.BlockNumber)
+ isTheSame, errBlockIsTheSame := p.checkThatStateBlockIsTheSame(ctx, block)
+ if errBlockIsTheSame != nil {
+ log.Warnf("%s can't double-check that blockNumber %d haven't changed, so it discard the reorg error", p.Name(), block.BlockNumber)
+ return err
+ }
+ if !isTheSame {
+ log.Infof("%s: DeSync detected, blockNumber: %d is different now that when we started the check", p.Name(), block.BlockNumber)
+ return ErrDeSync
+ }
+ log.Infof("%s: Reorg detected and verified the state block, blockNumber: %d", p.Name(), block.BlockNumber)
+ return err
+ }
+ if err != nil {
+ return err
+ }
+ }
+ elapsed := time.Since(startTime)
+ log.Infof("%s: Checked blocks from (%s) %d to (%s) %d -> len(blocks):%d elapsed: %s", p.Name(), p.InitialSegmentBlockNumber.Description(), from, p.EndSegmentBlockNumber.Description(), to, len(blocksToCheck), elapsed.String())
+
+ return nil
+}
+
+// CheckBlockHash is a method that checks the L1 block hash
+// returns true if is the same
+func (p *PreCheckL1BlockHash) checkThatStateBlockIsTheSame(ctx context.Context, block *state.Block) (bool, error) {
+ blocks, err := p.State.GetUncheckedBlocks(ctx, block.BlockNumber, block.BlockNumber, nil)
+ if err != nil {
+ log.Warnf("%s: Fails to get blockNumber %d in state .Err:%s", p.Name(), block.BlockNumber, err.Error())
+ return false, err
+ }
+ if len(blocks) == 0 {
+ // The block is checked or deleted, so it is not the same
+ log.Debugf("%s: The blockNumber %d is no longer in the state (or checked or deleted)", p.Name(), block.BlockNumber)
+ return false, nil
+ }
+ stateBlock := blocks[0]
+ if stateBlock.BlockNumber != block.BlockNumber {
+ msg := fmt.Sprintf("%s: The blockNumber returned by state %d is different from the state blockNumber %d",
+ p.Name(), block.BlockNumber, stateBlock.BlockNumber)
+ log.Warn(msg)
+ return false, fmt.Errorf(msg)
+ }
+ if stateBlock.BlockHash != block.BlockHash {
+ msg := fmt.Sprintf("%s: The blockNumber %d differs the hash checked %s from current in state %s",
+ p.Name(), block.BlockNumber, block.BlockHash.String(), stateBlock.BlockHash.String())
+ log.Warn(msg)
+ return false, nil
+ }
+ // The block is the same
+ return true, nil
+}
diff --git a/synchronizer/l1_check_block/pre_check_l1block_test.go b/synchronizer/l1_check_block/pre_check_l1block_test.go
new file mode 100644
index 0000000000..39c359a513
--- /dev/null
+++ b/synchronizer/l1_check_block/pre_check_l1block_test.go
@@ -0,0 +1,144 @@
+package l1_check_block_test
+
+import (
+ "context"
+ "math/big"
+ "testing"
+
+ "github.com/0xPolygonHermez/zkevm-node/state"
+ commonsync "github.com/0xPolygonHermez/zkevm-node/synchronizer/common"
+ "github.com/0xPolygonHermez/zkevm-node/synchronizer/l1_check_block"
+ mock_l1_check_block "github.com/0xPolygonHermez/zkevm-node/synchronizer/l1_check_block/mocks"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/stretchr/testify/require"
+)
+
+type testPreCheckData struct {
+ sut *l1_check_block.PreCheckL1BlockHash
+ mockL1Client *mock_l1_check_block.L1Requester
+ mockState *mock_l1_check_block.StatePreCheckInterfacer
+ mockInitialFetch *mock_l1_check_block.SafeL1BlockNumberFetcher
+ mockEndFetch *mock_l1_check_block.SafeL1BlockNumberFetcher
+ ctx context.Context
+ stateBlocks []*state.Block
+}
+
+func newPreCheckData(t *testing.T) *testPreCheckData {
+ mockL1Client := mock_l1_check_block.NewL1Requester(t)
+ mockState := mock_l1_check_block.NewStatePreCheckInterfacer(t)
+ mockInitialFetch := mock_l1_check_block.NewSafeL1BlockNumberFetcher(t)
+ mockEndFetch := mock_l1_check_block.NewSafeL1BlockNumberFetcher(t)
+ sut := l1_check_block.NewPreCheckL1BlockHash(mockL1Client, mockState, mockInitialFetch, mockEndFetch)
+ return &testPreCheckData{
+ sut: sut,
+ mockL1Client: mockL1Client,
+ mockState: mockState,
+ mockInitialFetch: mockInitialFetch,
+ mockEndFetch: mockEndFetch,
+ ctx: context.Background(),
+ stateBlocks: []*state.Block{
+ {
+ BlockNumber: 1234,
+ BlockHash: common.HexToHash("0xd77dd3a9ee6f9202ca5a75024b7d9cbd3d7436b2910d450f88c261c0089c0cd9"),
+ },
+ {
+ BlockNumber: 1237,
+ BlockHash: common.HexToHash("0x8faffac37f561c18917c33ff3540262ecfbe11a367b4e1c48181326cd8ba347f"),
+ },
+ },
+ }
+}
+
+// If from > to, it ignore because there are no blocks to check
+func TestPreCheckL1BlockFromGreaterThanTo(t *testing.T) {
+ data := newPreCheckData(t)
+ data.mockInitialFetch.EXPECT().Description().Return("initial")
+ data.mockEndFetch.EXPECT().Description().Return("end")
+ data.mockInitialFetch.EXPECT().GetSafeBlockNumber(data.ctx, data.mockL1Client).Return(uint64(1234), nil)
+ data.mockEndFetch.EXPECT().GetSafeBlockNumber(data.ctx, data.mockL1Client).Return(uint64(1230), nil)
+
+ res := data.sut.Step(data.ctx)
+ require.NoError(t, res)
+}
+
+// No blocks on state -> nothing to do
+func TestPreCheckL1BlockNoBlocksOnState(t *testing.T) {
+ data := newPreCheckData(t)
+ data.mockInitialFetch.EXPECT().Description().Return("initial")
+ data.mockEndFetch.EXPECT().Description().Return("end")
+ data.mockInitialFetch.EXPECT().GetSafeBlockNumber(data.ctx, data.mockL1Client).Return(uint64(1234), nil)
+ data.mockEndFetch.EXPECT().GetSafeBlockNumber(data.ctx, data.mockL1Client).Return(uint64(1250), nil)
+ data.mockState.EXPECT().GetUncheckedBlocks(data.ctx, uint64(1234), uint64(1250), nil).Return(nil, nil)
+
+ res := data.sut.Step(data.ctx)
+ require.NoError(t, res)
+}
+
+func TestPreCheckL1BlockBlocksMatch(t *testing.T) {
+ data := newPreCheckData(t)
+ data.mockInitialFetch.EXPECT().Description().Return("initial")
+ data.mockEndFetch.EXPECT().Description().Return("end")
+ data.mockInitialFetch.EXPECT().GetSafeBlockNumber(data.ctx, data.mockL1Client).Return(uint64(1234), nil)
+ data.mockEndFetch.EXPECT().GetSafeBlockNumber(data.ctx, data.mockL1Client).Return(uint64(1250), nil)
+ data.mockState.EXPECT().GetUncheckedBlocks(data.ctx, uint64(1234), uint64(1250), nil).Return(data.stateBlocks, nil)
+ l1Block1 := &types.Header{
+ Number: big.NewInt(int64(data.stateBlocks[0].BlockNumber)),
+ }
+ data.mockL1Client.EXPECT().HeaderByNumber(data.ctx, big.NewInt(int64(data.stateBlocks[0].BlockNumber))).Return(l1Block1, nil)
+ l1Block2 := &types.Header{
+ Number: big.NewInt(int64(data.stateBlocks[1].BlockNumber)),
+ }
+ data.mockL1Client.EXPECT().HeaderByNumber(data.ctx, big.NewInt(int64(data.stateBlocks[1].BlockNumber))).Return(l1Block2, nil)
+ //data.mockState.EXPECT().GetUncheckedBlocks(data.ctx, uint64(1237), uint64(1237), nil).Return(data.stateBlocks[0:1], nil)
+
+ res := data.sut.Step(data.ctx)
+ require.NoError(t, res)
+}
+
+func TestPreCheckL1BlockBlocksMismatch(t *testing.T) {
+ data := newPreCheckData(t)
+ data.mockInitialFetch.EXPECT().Description().Return("initial")
+ data.mockEndFetch.EXPECT().Description().Return("end")
+ data.mockInitialFetch.EXPECT().GetSafeBlockNumber(data.ctx, data.mockL1Client).Return(uint64(1234), nil)
+ data.mockEndFetch.EXPECT().GetSafeBlockNumber(data.ctx, data.mockL1Client).Return(uint64(1250), nil)
+ data.stateBlocks[1].BlockHash = common.HexToHash("0x12345678901234567890123456789012345678901234567890")
+ data.mockState.EXPECT().GetUncheckedBlocks(data.ctx, uint64(1234), uint64(1250), nil).Return(data.stateBlocks, nil)
+ l1Block1 := &types.Header{
+ Number: big.NewInt(int64(data.stateBlocks[0].BlockNumber)),
+ }
+ data.mockL1Client.EXPECT().HeaderByNumber(data.ctx, big.NewInt(int64(data.stateBlocks[0].BlockNumber))).Return(l1Block1, nil)
+ l1Block2 := &types.Header{
+ Number: big.NewInt(int64(data.stateBlocks[1].BlockNumber)),
+ }
+ data.mockL1Client.EXPECT().HeaderByNumber(data.ctx, big.NewInt(int64(data.stateBlocks[1].BlockNumber))).Return(l1Block2, nil)
+ data.mockState.EXPECT().GetUncheckedBlocks(data.ctx, uint64(1237), uint64(1237), nil).Return(data.stateBlocks[1:2], nil)
+
+ res := data.sut.Step(data.ctx)
+ require.Error(t, res)
+ resErr, ok := res.(*commonsync.ReorgError)
+ require.True(t, ok, "The error must be ReorgError")
+ require.Equal(t, uint64(1237), resErr.BlockNumber)
+}
+
+func TestPreCheckL1BlockBlocksMismatchButIsNoLongerInState(t *testing.T) {
+ data := newPreCheckData(t)
+ data.mockInitialFetch.EXPECT().Description().Return("initial")
+ data.mockEndFetch.EXPECT().Description().Return("end")
+ data.mockInitialFetch.EXPECT().GetSafeBlockNumber(data.ctx, data.mockL1Client).Return(uint64(1234), nil)
+ data.mockEndFetch.EXPECT().GetSafeBlockNumber(data.ctx, data.mockL1Client).Return(uint64(1250), nil)
+ data.stateBlocks[1].BlockHash = common.HexToHash("0x12345678901234567890123456789012345678901234567890")
+ data.mockState.EXPECT().GetUncheckedBlocks(data.ctx, uint64(1234), uint64(1250), nil).Return(data.stateBlocks, nil)
+ l1Block1 := &types.Header{
+ Number: big.NewInt(int64(data.stateBlocks[0].BlockNumber)),
+ }
+ data.mockL1Client.EXPECT().HeaderByNumber(data.ctx, big.NewInt(int64(data.stateBlocks[0].BlockNumber))).Return(l1Block1, nil)
+ l1Block2 := &types.Header{
+ Number: big.NewInt(int64(data.stateBlocks[1].BlockNumber)),
+ }
+ data.mockL1Client.EXPECT().HeaderByNumber(data.ctx, big.NewInt(int64(data.stateBlocks[1].BlockNumber))).Return(l1Block2, nil)
+ data.mockState.EXPECT().GetUncheckedBlocks(data.ctx, uint64(1237), uint64(1237), nil).Return(nil, nil)
+
+ res := data.sut.Step(data.ctx)
+ require.ErrorIs(t, res, l1_check_block.ErrDeSync)
+}
diff --git a/synchronizer/l1_check_block/safe_l1_block.go b/synchronizer/l1_check_block/safe_l1_block.go
new file mode 100644
index 0000000000..7b767b4900
--- /dev/null
+++ b/synchronizer/l1_check_block/safe_l1_block.go
@@ -0,0 +1,120 @@
+package l1_check_block
+
+import (
+ "context"
+ "fmt"
+ "math/big"
+
+ "github.com/0xPolygonHermez/zkevm-node/log"
+ "github.com/ethereum/go-ethereum/rpc"
+)
+
+// L1BlockPoint is an enum that represents the point of the L1 block
+type L1BlockPoint int
+
+const (
+ // FinalizedBlockNumber is the finalized block number
+ FinalizedBlockNumber L1BlockPoint = 3
+ // SafeBlockNumber is the safe block number
+ SafeBlockNumber L1BlockPoint = 2
+ // PendingBlockNumber is the pending block number
+ PendingBlockNumber L1BlockPoint = 1
+ // LastBlockNumber is the last block number
+ LastBlockNumber L1BlockPoint = 0
+)
+
+// ToString converts a L1BlockPoint to a string
+func (v L1BlockPoint) ToString() string {
+ switch v {
+ case FinalizedBlockNumber:
+ return "finalized"
+ case SafeBlockNumber:
+ return "safe"
+ case PendingBlockNumber:
+ return "pending"
+ case LastBlockNumber:
+ return "latest"
+ }
+ return "Unknown"
+}
+
+// StringToL1BlockPoint converts a string to a L1BlockPoint
+func StringToL1BlockPoint(s string) L1BlockPoint {
+ switch s {
+ case "finalized":
+ return FinalizedBlockNumber
+ case "safe":
+ return SafeBlockNumber
+ case "pending":
+ return PendingBlockNumber
+ case "latest":
+ return LastBlockNumber
+ default:
+ return FinalizedBlockNumber
+ }
+}
+
+// ToGethRequest converts a L1BlockPoint to a big.Int used for request to GETH
+func (v L1BlockPoint) ToGethRequest() *big.Int {
+ switch v {
+ case FinalizedBlockNumber:
+ return big.NewInt(int64(rpc.FinalizedBlockNumber))
+ case PendingBlockNumber:
+ return big.NewInt(int64(rpc.PendingBlockNumber))
+ case SafeBlockNumber:
+ return big.NewInt(int64(rpc.SafeBlockNumber))
+ case LastBlockNumber:
+ return nil
+ }
+ return big.NewInt(int64(v))
+}
+
+// SafeL1BlockNumberFetch is a struct that implements a safe L1 block number fetch
+type SafeL1BlockNumberFetch struct {
+ // SafeBlockPoint is the block number that is reference to l1 Block
+ SafeBlockPoint L1BlockPoint
+ // Offset is a vaule add to the L1 block
+ Offset int
+}
+
+// NewSafeL1BlockNumberFetch creates a new SafeL1BlockNumberFetch
+func NewSafeL1BlockNumberFetch(safeBlockPoint L1BlockPoint, offset int) *SafeL1BlockNumberFetch {
+ return &SafeL1BlockNumberFetch{
+ SafeBlockPoint: safeBlockPoint,
+ Offset: offset,
+ }
+}
+
+// Description returns a string representation of SafeL1BlockNumberFetch
+func (p *SafeL1BlockNumberFetch) Description() string {
+ return fmt.Sprintf("%s/%d", p.SafeBlockPoint.ToString(), p.Offset)
+}
+
+// GetSafeBlockNumber gets the safe block number from L1
+func (p *SafeL1BlockNumberFetch) GetSafeBlockNumber(ctx context.Context, requester L1Requester) (uint64, error) {
+ l1SafePointBlock, err := requester.HeaderByNumber(ctx, p.SafeBlockPoint.ToGethRequest())
+ if err != nil {
+ log.Errorf("%s: Error getting L1 block %d. err: %s", logPrefix, p.String(), err.Error())
+ return uint64(0), err
+ }
+ result := l1SafePointBlock.Number.Uint64()
+ if p.Offset < 0 {
+ if result < uint64(-p.Offset) {
+ result = 0
+ } else {
+ result += uint64(p.Offset)
+ }
+ } else {
+ result = l1SafePointBlock.Number.Uint64() + uint64(p.Offset)
+ }
+ if p.SafeBlockPoint == LastBlockNumber {
+ result = min(result, l1SafePointBlock.Number.Uint64())
+ }
+
+ return result, nil
+}
+
+// String returns a string representation of SafeL1BlockNumberFetch
+func (p *SafeL1BlockNumberFetch) String() string {
+ return fmt.Sprintf("SafeBlockPoint: %s, Offset: %d", p.SafeBlockPoint.ToString(), p.Offset)
+}
diff --git a/synchronizer/l1_check_block/safe_l1_block_test.go b/synchronizer/l1_check_block/safe_l1_block_test.go
new file mode 100644
index 0000000000..4d3167adcd
--- /dev/null
+++ b/synchronizer/l1_check_block/safe_l1_block_test.go
@@ -0,0 +1,113 @@
+package l1_check_block_test
+
+import (
+ "context"
+ "math/big"
+ "testing"
+
+ "github.com/0xPolygonHermez/zkevm-node/synchronizer/l1_check_block"
+ mock_l1_check_block "github.com/0xPolygonHermez/zkevm-node/synchronizer/l1_check_block/mocks"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/rpc"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+)
+
+func TestGetSafeBlockNumber(t *testing.T) {
+ ctx := context.Background()
+ mockRequester := mock_l1_check_block.NewL1Requester(t)
+ //safeBlockPoint := big.NewInt(50)
+ offset := 10
+ safeL1Block := l1_check_block.NewSafeL1BlockNumberFetch(l1_check_block.StringToL1BlockPoint("safe"), offset)
+
+ mockRequester.EXPECT().HeaderByNumber(ctx, mock.Anything).Return(&types.Header{
+ Number: big.NewInt(100),
+ }, nil)
+ blockNumber, err := safeL1Block.GetSafeBlockNumber(ctx, mockRequester)
+ assert.NoError(t, err)
+ expectedBlockNumber := uint64(100 + offset)
+ assert.Equal(t, expectedBlockNumber, blockNumber)
+}
+
+func TestGetSafeBlockNumberMutliplesCases(t *testing.T) {
+ tests := []struct {
+ name string
+ blockPoint string
+ offset int
+ l1ReturnBlockNumber uint64
+ expectedCallToGeth *big.Int
+ expectedBlockNumber uint64
+ }{
+ {
+ name: "SafeBlockNumber+10",
+ blockPoint: "safe",
+ offset: 10,
+ l1ReturnBlockNumber: 100,
+ expectedCallToGeth: big.NewInt(int64(rpc.SafeBlockNumber)),
+ expectedBlockNumber: 110,
+ },
+ {
+ name: "FinalizedBlockNumber+10",
+ blockPoint: "finalized",
+ offset: 10,
+ l1ReturnBlockNumber: 100,
+ expectedCallToGeth: big.NewInt(int64(rpc.FinalizedBlockNumber)),
+ expectedBlockNumber: 110,
+ },
+ {
+ name: "PendingBlockNumber+10",
+ blockPoint: "pending",
+ offset: 10,
+ l1ReturnBlockNumber: 100,
+ expectedCallToGeth: big.NewInt(int64(rpc.PendingBlockNumber)),
+ expectedBlockNumber: 110,
+ },
+ {
+ name: "LastBlockNumber+10, can't add 10 to latest block number. So must return latest block number and ignore positive offset",
+ blockPoint: "latest",
+ offset: 10,
+ l1ReturnBlockNumber: 100,
+ expectedCallToGeth: nil,
+ expectedBlockNumber: 100,
+ },
+ {
+ name: "FinalizedBlockNumber-1000. negative blockNumbers are not welcome. So must return 0",
+ blockPoint: "finalized",
+ offset: -1000,
+ l1ReturnBlockNumber: 100,
+ expectedCallToGeth: big.NewInt(int64(rpc.FinalizedBlockNumber)),
+ expectedBlockNumber: 0,
+ },
+ {
+ name: "FinalizedBlockNumber(1000)-1000. is 0 ",
+ blockPoint: "finalized",
+ offset: -1000,
+ l1ReturnBlockNumber: 1000,
+ expectedCallToGeth: big.NewInt(int64(rpc.FinalizedBlockNumber)),
+ expectedBlockNumber: 0,
+ },
+ {
+ name: "FinalizedBlockNumber(1001)-1000. is 1 ",
+ blockPoint: "finalized",
+ offset: -1000,
+ l1ReturnBlockNumber: 1001,
+ expectedCallToGeth: big.NewInt(int64(rpc.FinalizedBlockNumber)),
+ expectedBlockNumber: 1,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ ctx := context.Background()
+ mockRequester := mock_l1_check_block.NewL1Requester(t)
+ safeL1Block := l1_check_block.NewSafeL1BlockNumberFetch(l1_check_block.StringToL1BlockPoint(tt.blockPoint), tt.offset)
+
+ mockRequester.EXPECT().HeaderByNumber(ctx, tt.expectedCallToGeth).Return(&types.Header{
+ Number: big.NewInt(int64(tt.l1ReturnBlockNumber)),
+ }, nil)
+ blockNumber, err := safeL1Block.GetSafeBlockNumber(ctx, mockRequester)
+ assert.NoError(t, err)
+ assert.Equal(t, tt.expectedBlockNumber, blockNumber)
+ })
+ }
+}
diff --git a/synchronizer/l1_parallel_sync/l1_rollup_info_consumer.go b/synchronizer/l1_parallel_sync/l1_rollup_info_consumer.go
index 5a457acbf0..4dd78632fd 100644
--- a/synchronizer/l1_parallel_sync/l1_rollup_info_consumer.go
+++ b/synchronizer/l1_parallel_sync/l1_rollup_info_consumer.go
@@ -3,12 +3,14 @@ package l1_parallel_sync
import (
"context"
"errors"
+ "fmt"
"sync"
"time"
"github.com/0xPolygonHermez/zkevm-node/etherman"
"github.com/0xPolygonHermez/zkevm-node/log"
"github.com/0xPolygonHermez/zkevm-node/state"
+ syncCommon "github.com/0xPolygonHermez/zkevm-node/synchronizer/common"
"github.com/ethereum/go-ethereum/common"
types "github.com/ethereum/go-ethereum/core/types"
)
@@ -22,7 +24,6 @@ var (
errContextCanceled = errors.New("consumer:context canceled")
errConsumerStopped = errors.New("consumer:stopped by request")
errConsumerStoppedBecauseIsSynchronized = errors.New("consumer:stopped because is synchronized")
- errL1Reorg = errors.New("consumer: L1 reorg detected")
errConsumerAndProducerDesynchronized = errors.New("consumer: consumer and producer are desynchronized")
)
@@ -155,13 +156,12 @@ func checkPreviousBlocks(rollupInfo rollupInfoByBlockRangeResult, cachedBlock *s
}
if cachedBlock.BlockNumber == rollupInfo.previousBlockOfRange.NumberU64() {
if cachedBlock.BlockHash != rollupInfo.previousBlockOfRange.Hash() {
- log.Errorf("consumer: Previous block %d hash is not the same", cachedBlock.BlockNumber)
- return errL1Reorg
- }
- if cachedBlock.ParentHash != rollupInfo.previousBlockOfRange.ParentHash() {
- log.Errorf("consumer: Previous block %d parentHash is not the same", cachedBlock.BlockNumber)
- return errL1Reorg
+ err := fmt.Errorf("consumer: Previous block %d hash is not the same. state.Hash:%s != l1.Hash:%s",
+ cachedBlock.BlockNumber, cachedBlock.BlockHash, rollupInfo.previousBlockOfRange.Hash())
+ log.Errorf(err.Error())
+ return syncCommon.NewReorgError(cachedBlock.BlockNumber, err)
}
+
log.Infof("consumer: Verified previous block %d not the same: OK", cachedBlock.BlockNumber)
}
return nil
diff --git a/synchronizer/synchronizer.go b/synchronizer/synchronizer.go
index 33cf49a7ee..73ad10eddb 100644
--- a/synchronizer/synchronizer.go
+++ b/synchronizer/synchronizer.go
@@ -16,6 +16,7 @@ import (
"github.com/0xPolygonHermez/zkevm-node/synchronizer/actions/processor_manager"
syncCommon "github.com/0xPolygonHermez/zkevm-node/synchronizer/common"
"github.com/0xPolygonHermez/zkevm-node/synchronizer/common/syncinterfaces"
+ "github.com/0xPolygonHermez/zkevm-node/synchronizer/l1_check_block"
"github.com/0xPolygonHermez/zkevm-node/synchronizer/l1_parallel_sync"
"github.com/0xPolygonHermez/zkevm-node/synchronizer/l1event_orders"
"github.com/0xPolygonHermez/zkevm-node/synchronizer/l2_sync/l2_shared"
@@ -77,6 +78,7 @@ type ClientSynchronizer struct {
l1EventProcessors *processor_manager.L1EventProcessors
syncTrustedStateExecutor syncinterfaces.SyncTrustedStateExecutor
halter syncinterfaces.CriticalErrorHandler
+ asyncL1BlockChecker syncinterfaces.L1BlockCheckerIntegrator
}
// NewSynchronizer creates and initializes an instance of Synchronizer
@@ -123,6 +125,31 @@ func NewSynchronizer(
syncBlockProtection: syncBlockProtection,
halter: syncCommon.NewCriticalErrorHalt(eventLog, 5*time.Second), //nolint:gomnd
}
+ if cfg.L1BlockCheck.Enable {
+ log.Infof("L1BlockChecker enabled: %s", cfg.L1BlockCheck.String())
+ l1BlockChecker := l1_check_block.NewCheckL1BlockHash(ethMan, res.state,
+ l1_check_block.NewSafeL1BlockNumberFetch(l1_check_block.StringToL1BlockPoint(cfg.L1BlockCheck.L1SafeBlockPoint), cfg.L1BlockCheck.L1SafeBlockOffset))
+
+ var preCheckAsync syncinterfaces.AsyncL1BlockChecker
+ if cfg.L1BlockCheck.PreCheckEnable {
+ log.Infof("L1BlockChecker enabled precheck from: %s/%d to: %s/%d",
+ cfg.L1BlockCheck.L1SafeBlockPoint, cfg.L1BlockCheck.L1SafeBlockOffset,
+ cfg.L1BlockCheck.L1PreSafeBlockPoint, cfg.L1BlockCheck.L1PreSafeBlockOffset)
+ l1BlockPreChecker := l1_check_block.NewPreCheckL1BlockHash(ethMan, res.state,
+ l1_check_block.NewSafeL1BlockNumberFetch(l1_check_block.StringToL1BlockPoint(cfg.L1BlockCheck.L1SafeBlockPoint), cfg.L1BlockCheck.L1SafeBlockOffset),
+ l1_check_block.NewSafeL1BlockNumberFetch(l1_check_block.StringToL1BlockPoint(cfg.L1BlockCheck.L1PreSafeBlockPoint), cfg.L1BlockCheck.L1PreSafeBlockOffset),
+ )
+ preCheckAsync = l1_check_block.NewAsyncCheck(l1BlockPreChecker)
+ }
+
+ res.asyncL1BlockChecker = l1_check_block.NewL1BlockCheckerIntegration(
+ l1_check_block.NewAsyncCheck(l1BlockChecker),
+ preCheckAsync,
+ res.state,
+ res,
+ cfg.L1BlockCheck.ForceCheckBeforeStart,
+ time.Second)
+ }
if !isTrustedSequencer {
log.Info("Permissionless: creating and Initializing L2 synchronization components")
@@ -165,7 +192,7 @@ func NewSynchronizer(
res.l1EventProcessors = defaultsL1EventProcessors(res, l1checkerL2Blocks)
switch cfg.L1SynchronizationMode {
case ParallelMode:
- log.Fatal("L1SynchronizationMode is parallel. Not yet suported, please use sequential mode to sync")
+ log.Info("L1SynchronizationMode is parallel")
res.l1SyncOrchestration = newL1SyncParallel(ctx, cfg, etherManForL1, res, runInDevelopmentMode)
case SequentialMode:
log.Info("L1SynchronizationMode is sequential")
@@ -251,6 +278,10 @@ func (s *ClientSynchronizer) Sync() error {
// If there is no lastEthereumBlock means that sync from the beginning is necessary. If not, it continues from the retrieved ethereum block
// Get the latest synced block. If there is no block on db, use genesis block
log.Info("Sync started")
+ if s.asyncL1BlockChecker != nil {
+ _ = s.asyncL1BlockChecker.OnStart(s.ctx)
+ }
+
dbTx, err := s.state.BeginStateTransaction(s.ctx)
if err != nil {
log.Errorf("error creating db transaction to get latest block. Error: %v", err)
@@ -372,6 +403,7 @@ func (s *ClientSynchronizer) Sync() error {
continue
}
log.Infof("latestSequencedBatchNumber: %d, latestSyncedBatch: %d, lastVerifiedBatchNumber: %d", latestSequencedBatchNumber, latestSyncedBatch, lastVerifiedBatchNumber)
+ resetDone := false
// Sync trusted state
// latestSyncedBatch -> Last batch on DB
// latestSequencedBatchNumber -> last batch on SMC
@@ -379,6 +411,13 @@ func (s *ClientSynchronizer) Sync() error {
startTrusted := time.Now()
if s.syncTrustedStateExecutor != nil && !s.isTrustedSequencer {
log.Info("Syncing trusted state (permissionless)")
+ //Sync Trusted State
+ log.Debug("Doing reorg check before L2 sync")
+ resetDone, lastEthBlockSynced, err = s.checkReorgAndExecuteReset(lastEthBlockSynced)
+ if resetDone || err != nil {
+ log.Infof("Reset done before L2 sync")
+ continue
+ }
err = s.syncTrustedState(latestSyncedBatch)
metrics.FullTrustedSyncTime(time.Since(startTrusted))
if err != nil {
@@ -387,10 +426,14 @@ func (s *ClientSynchronizer) Sync() error {
if errors.Is(err, syncinterfaces.ErrFatalDesyncFromL1) {
l1BlockNumber := err.(*l2_shared.DeSyncPermissionlessAndTrustedNodeError).L1BlockNumber
log.Error("Trusted and permissionless desync! reseting to last common point: L1Block (%d-1)", l1BlockNumber)
- err = s.resetState(l1BlockNumber - 1)
- if err != nil {
- log.Errorf("error resetting the state to a discrepancy block. Retrying... Err: %v", err)
- continue
+ for {
+ resetDone, lastEthBlockSynced, err = s.detectedReorgBadBlockExecuteReset(lastEthBlockSynced, syncCommon.GetReorgErrorBlockNumber(err))
+ if resetDone {
+ break
+ } else {
+ log.Error("reorg isn't done, retrying...")
+ time.Sleep(time.Second)
+ }
}
} else if errors.Is(err, syncinterfaces.ErrMissingSyncFromL1) {
log.Info("Syncing from trusted node need data from L1")
@@ -407,6 +450,11 @@ func (s *ClientSynchronizer) Sync() error {
waitDuration = s.cfg.SyncInterval.Duration
}
//Sync L1Blocks
+ resetDone, lastEthBlockSynced, err = s.checkReorgAndExecuteReset(lastEthBlockSynced)
+ if resetDone || err != nil {
+ continue
+ }
+
startL1 := time.Now()
if s.l1SyncOrchestration != nil && (latestSyncedBatch < latestSequencedBatchNumber || !s.cfg.L1ParallelSynchronization.FallbackToSequentialModeOnSynchronized) {
log.Infof("Syncing L1 blocks in parallel lastEthBlockSynced=%d", lastEthBlockSynced.BlockNumber)
@@ -421,6 +469,19 @@ func (s *ClientSynchronizer) Sync() error {
lastEthBlockSynced, err = s.syncBlocksSequential(lastEthBlockSynced)
}
metrics.FullL1SyncTime(time.Since(startL1))
+ if syncCommon.IsReorgError(err) {
+ log.Warnf("error syncing blocks: %s", err.Error())
+ for {
+ resetDone, lastEthBlockSynced, err = s.detectedReorgBadBlockExecuteReset(lastEthBlockSynced, syncCommon.GetReorgErrorBlockNumber(err))
+ if resetDone {
+ break
+ } else {
+ log.Error("reorg isn't done, retrying...")
+ time.Sleep(time.Second)
+ }
+ }
+ continue
+ }
if err != nil {
log.Warn("error syncing blocks: ", err)
s.CleanTrustedState()
@@ -491,22 +552,6 @@ func sanityCheckForGenesisBlockRollupInfo(blocks []etherman.Block, order map[com
// This function syncs the node from a specific block to the latest
// lastEthBlockSynced -> last block synced in the db
func (s *ClientSynchronizer) syncBlocksParallel(lastEthBlockSynced *state.Block) (*state.Block, error) {
- // This function will read events fromBlockNum to latestEthBlock. Check reorg to be sure that everything is ok.
- block, err := s.newCheckReorg(lastEthBlockSynced, nil)
- if err != nil {
- log.Errorf("error checking reorgs. Retrying... Err: %v", err)
- return lastEthBlockSynced, fmt.Errorf("error checking reorgs")
- }
- if block != nil {
- log.Infof("reorg detected. Resetting the state from block %v to block %v", lastEthBlockSynced.BlockNumber, block.BlockNumber)
- err = s.resetState(block.BlockNumber)
- if err != nil {
- log.Errorf("error resetting the state to a previous block. Retrying... Err: %v", err)
- s.l1SyncOrchestration.Reset(lastEthBlockSynced.BlockNumber)
- return lastEthBlockSynced, fmt.Errorf("error resetting the state to a previous block")
- }
- return block, nil
- }
log.Infof("Starting L1 sync orchestrator in parallel block: %d", lastEthBlockSynced.BlockNumber)
return s.l1SyncOrchestration.Start(lastEthBlockSynced)
}
@@ -521,21 +566,6 @@ func (s *ClientSynchronizer) syncBlocksSequential(lastEthBlockSynced *state.Bloc
}
lastKnownBlock := header.Number
- // This function will read events fromBlockNum to latestEthBlock. Check reorg to be sure that everything is ok.
- block, err := s.newCheckReorg(lastEthBlockSynced, nil)
- if err != nil {
- log.Errorf("error checking reorgs. Retrying... Err: %v", err)
- return lastEthBlockSynced, fmt.Errorf("error checking reorgs")
- }
- if block != nil {
- err = s.resetState(block.BlockNumber)
- if err != nil {
- log.Errorf("error resetting the state to a previous block. Retrying... Err: %v", err)
- return lastEthBlockSynced, fmt.Errorf("error resetting the state to a previous block")
- }
- return block, nil
- }
-
var fromBlock uint64
if lastEthBlockSynced.BlockNumber > 0 {
fromBlock = lastEthBlockSynced.BlockNumber
@@ -576,7 +606,7 @@ func (s *ClientSynchronizer) syncBlocksSequential(lastEthBlockSynced *state.Bloc
log.Error("error getting previousBlock from db. Error: ", err)
return lastEthBlockSynced, err
}
- blockReorged, err := s.newCheckReorg(prevBlock, nil)
+ blockReorged, err := s.checkReorg(prevBlock, nil)
if err != nil {
log.Error("error checking reorgs in previous blocks. Error: ", err)
return lastEthBlockSynced, err
@@ -592,7 +622,7 @@ func (s *ClientSynchronizer) syncBlocksSequential(lastEthBlockSynced *state.Bloc
return blockReorged, nil
}
// Check reorg again to be sure that the chain has not changed between the previous checkReorg and the call GetRollupInfoByBlockRange
- block, err := s.newCheckReorg(lastEthBlockSynced, initBlockReceived)
+ block, err := s.checkReorg(lastEthBlockSynced, initBlockReceived)
if err != nil {
log.Errorf("error checking reorgs. Retrying... Err: %v", err)
return lastEthBlockSynced, fmt.Errorf("error checking reorgs")
@@ -773,12 +803,118 @@ func (s *ClientSynchronizer) resetState(blockNumber uint64) error {
log.Error("error committing the resetted state. Error: ", err)
return err
}
+ if s.asyncL1BlockChecker != nil {
+ s.asyncL1BlockChecker.OnResetState(s.ctx)
+ }
if s.l1SyncOrchestration != nil {
- s.l1SyncOrchestration.Reset(blockNumber)
+ lastBlock, err := s.state.GetLastBlock(s.ctx, nil)
+ if err != nil {
+ log.Errorf("error getting last block synced from db. Error: %v", err)
+ s.l1SyncOrchestration.Reset(blockNumber)
+ } else {
+ s.l1SyncOrchestration.Reset(lastBlock.BlockNumber)
+ }
}
return nil
}
+// OnDetectedMismatchL1BlockReorg function will be called when a reorg is detected (asynchronous call)
+func (s *ClientSynchronizer) OnDetectedMismatchL1BlockReorg() {
+ log.Infof("Detected Reorg in background at block (mismatch)")
+ if s.l1SyncOrchestration != nil && s.l1SyncOrchestration.IsProducerRunning() {
+ log.Errorf("Stop synchronizer: because L1 sync parallel aborting background process")
+ s.l1SyncOrchestration.Abort()
+ }
+}
+
+// ExecuteReorgFromMismatchBlock function will reset the state to the block before the bad block
+func (s *ClientSynchronizer) ExecuteReorgFromMismatchBlock(blockNumber uint64, reason string) error {
+ log.Info("Detected reorg at block (mismatch): ", blockNumber, " reason: ", reason, " resetting the state to block:", blockNumber-1)
+ s.CleanTrustedState()
+ return s.resetState(blockNumber - 1)
+}
+func (s *ClientSynchronizer) detectedReorgBadBlockExecuteReset(lastEthBlockSynced *state.Block, badBlockNumber uint64) (bool, *state.Block, error) {
+ firstBlockOK, err := s.checkReorg(lastEthBlockSynced, nil)
+ if err != nil {
+ log.Warnf("error checking reorgs. using badBlock detected: %d Err: %v", badBlockNumber, err)
+ firstBlockOK = nil
+ }
+ if firstBlockOK != nil && firstBlockOK.BlockNumber >= badBlockNumber {
+ log.Warnf("Reorg detected firstBlockOk: %d. But oldest bad block detected: %d", firstBlockOK.BlockNumber, badBlockNumber)
+ firstBlockOK = nil
+ }
+ // We already known a bad block, reset from there
+ if firstBlockOK == nil {
+ firstBlockOK, err = s.state.GetPreviousBlockToBlockNumber(s.ctx, badBlockNumber, nil)
+ if err != nil {
+ log.Errorf("error getting previous block %d from db. Can't execute REORG. Error: %v", badBlockNumber, err)
+ return false, lastEthBlockSynced, err
+ }
+ }
+ newFirstBlock, err := s.executeReorgFromFirstValidBlock(lastEthBlockSynced, firstBlockOK)
+ if err != nil {
+ log.Errorf("error executing reorg. Retrying... Err: %v", err)
+ return false, lastEthBlockSynced, fmt.Errorf("error executing reorg. Err: %w", err)
+ }
+ return true, newFirstBlock, nil
+}
+
+// checkReorgAndExecuteReset function will check if there is a reorg and execute the reset
+// returns true is reset have been done
+func (s *ClientSynchronizer) checkReorgAndExecuteReset(lastEthBlockSynced *state.Block) (bool, *state.Block, error) {
+ var err error
+
+ block, err := s.checkReorg(lastEthBlockSynced, nil)
+ if err != nil {
+ log.Errorf("error checking reorgs. Retrying... Err: %v", err)
+ return false, lastEthBlockSynced, fmt.Errorf("error checking reorgs")
+ }
+ if block != nil {
+ newFirstBlock, err := s.executeReorgFromFirstValidBlock(lastEthBlockSynced, block)
+ if err != nil {
+ log.Errorf("error executing reorg. Retrying... Err: %v", err)
+ return false, lastEthBlockSynced, fmt.Errorf("error executing reorg. Err: %w", err)
+ }
+ return true, newFirstBlock, nil
+ }
+
+ return false, lastEthBlockSynced, nil
+}
+
+func (s *ClientSynchronizer) executeReorgFromFirstValidBlock(lastEthBlockSynced *state.Block, firstValidBlock *state.Block) (*state.Block, error) {
+ log.Infof("reorg detected. Resetting the state from block %v to block %v", lastEthBlockSynced.BlockNumber, firstValidBlock.BlockNumber)
+ s.CleanTrustedState()
+ err := s.resetState(firstValidBlock.BlockNumber)
+ if err != nil {
+ log.Errorf("error resetting the state to a previous block. Retrying... Err: %s", err.Error())
+ return nil, fmt.Errorf("error resetting the state to a previous block. Err: %w", err)
+ }
+ newLastBlock, err := s.state.GetLastBlock(s.ctx, nil)
+ if err != nil {
+ log.Warnf("error getting last block synced from db, returning expected block %d. Error: %v", firstValidBlock.BlockNumber, err)
+ return firstValidBlock, nil
+ }
+ if newLastBlock.BlockNumber != firstValidBlock.BlockNumber {
+ log.Warnf("Doesnt match LastBlock on State and expecting one after a resetState. The block in state is %d and the expected block is %d", newLastBlock.BlockNumber,
+ firstValidBlock.BlockNumber)
+ return firstValidBlock, nil
+ }
+ return newLastBlock, nil
+}
+
+func (s *ClientSynchronizer) checkReorg(latestBlock *state.Block, syncedBlock *etherman.Block) (*state.Block, error) {
+ if latestBlock == nil {
+ err := fmt.Errorf("lastEthBlockSynced is nil calling checkReorgAndExecuteReset")
+ log.Errorf("%s, it never have to happens", err.Error())
+ return nil, err
+ }
+ block, errReturnedReorgFunction := s.newCheckReorg(latestBlock, syncedBlock)
+ if s.asyncL1BlockChecker != nil {
+ return s.asyncL1BlockChecker.CheckReorgWrapper(s.ctx, block, errReturnedReorgFunction)
+ }
+ return block, errReturnedReorgFunction
+}
+
/*
This function will check if there is a reorg.
As input param needs the last ethereum block synced. Retrieve the block info from the blockchain
@@ -787,76 +923,6 @@ If hash or hash parent don't match, reorg detected and the function will return
must be reverted. Then, check the previous ethereum block synced, get block info from the blockchain and check
hash and has parent. This operation has to be done until a match is found.
*/
-// TODO This function will be deprecated
-func (s *ClientSynchronizer) oldCheckReorg(latestBlock *state.Block) (*state.Block, error) { //nolint:unused
- // This function only needs to worry about reorgs if some of the reorganized blocks contained rollup info.
- latestEthBlockSynced := *latestBlock
- reorgedBlock := *latestBlock
- var depth uint64
- for {
- block, err := s.etherMan.EthBlockByNumber(s.ctx, reorgedBlock.BlockNumber)
- if err != nil {
- log.Errorf("error getting latest block synced from blockchain. Block: %d, error: %v", reorgedBlock.BlockNumber, err)
- return nil, err
- }
- log.Infof("[checkReorg function] BlockNumber: %d BlockHash got from L1 provider: %s", block.Number().Uint64(), block.Hash().String())
- log.Infof("[checkReorg function] latestBlockNumber: %d latestBlockHash already synced: %s", latestBlock.BlockNumber, latestBlock.BlockHash.String())
- if block.NumberU64() != reorgedBlock.BlockNumber {
- err = fmt.Errorf("wrong ethereum block retrieved from blockchain. Block numbers don't match. BlockNumber stored: %d. BlockNumber retrieved: %d",
- reorgedBlock.BlockNumber, block.NumberU64())
- log.Error("error: ", err)
- return nil, err
- }
- // Compare hashes
- if (block.Hash() != reorgedBlock.BlockHash || block.ParentHash() != reorgedBlock.ParentHash) && reorgedBlock.BlockNumber > s.genesis.BlockNumber {
- log.Infof("checkReorg: Bad block %d hashOk %t parentHashOk %t", reorgedBlock.BlockNumber, block.Hash() == reorgedBlock.BlockHash, block.ParentHash() == reorgedBlock.ParentHash)
- log.Debug("[checkReorg function] => latestBlockNumber: ", reorgedBlock.BlockNumber)
- log.Debug("[checkReorg function] => latestBlockHash: ", reorgedBlock.BlockHash)
- log.Debug("[checkReorg function] => latestBlockHashParent: ", reorgedBlock.ParentHash)
- log.Debug("[checkReorg function] => BlockNumber: ", reorgedBlock.BlockNumber, block.NumberU64())
- log.Debug("[checkReorg function] => BlockHash: ", block.Hash())
- log.Debug("[checkReorg function] => BlockHashParent: ", block.ParentHash())
- depth++
- log.Debug("REORG: Looking for the latest correct ethereum block. Depth: ", depth)
- // Reorg detected. Getting previous block
- dbTx, err := s.state.BeginStateTransaction(s.ctx)
- if err != nil {
- log.Errorf("error creating db transaction to get prevoius blocks")
- return nil, err
- }
- lb, err := s.state.GetPreviousBlock(s.ctx, depth, dbTx)
- errC := dbTx.Commit(s.ctx)
- if errC != nil {
- log.Errorf("error committing dbTx, err: %v", errC)
- rollbackErr := dbTx.Rollback(s.ctx)
- if rollbackErr != nil {
- log.Errorf("error rolling back state. RollbackErr: %v", rollbackErr)
- return nil, rollbackErr
- }
- log.Errorf("error committing dbTx, err: %v", errC)
- return nil, errC
- }
- if errors.Is(err, state.ErrNotFound) {
- log.Warn("error checking reorg: previous block not found in db: ", err)
- return &state.Block{}, nil
- } else if err != nil {
- log.Error("error getting previousBlock from db. Error: ", err)
- return nil, err
- }
- reorgedBlock = *lb
- } else {
- log.Debugf("checkReorg: Block %d hashOk %t parentHashOk %t", reorgedBlock.BlockNumber, block.Hash() == reorgedBlock.BlockHash, block.ParentHash() == reorgedBlock.ParentHash)
- break
- }
- }
- if latestEthBlockSynced.BlockHash != reorgedBlock.BlockHash {
- latestBlock = &reorgedBlock
- log.Info("Reorg detected in block: ", latestEthBlockSynced.BlockNumber, " last block OK: ", latestBlock.BlockNumber)
- return latestBlock, nil
- }
- log.Debugf("No reorg detected in block: %d. BlockHash: %s", latestEthBlockSynced.BlockNumber, latestEthBlockSynced.BlockHash.String())
- return nil, nil
-}
func (s *ClientSynchronizer) newCheckReorg(latestStoredBlock *state.Block, syncedBlock *etherman.Block) (*state.Block, error) {
// This function only needs to worry about reorgs if some of the reorganized blocks contained rollup info.
diff --git a/synchronizer/synchronizer_test.go b/synchronizer/synchronizer_test.go
index 98ab184de6..00138eb2bd 100644
--- a/synchronizer/synchronizer_test.go
+++ b/synchronizer/synchronizer_test.go
@@ -129,6 +129,9 @@ func TestForcedBatchEtrog(t *testing.T) {
SyncChunkSize: 10,
L1SynchronizationMode: SequentialMode,
SyncBlockProtection: "latest",
+ L1BlockCheck: L1BlockCheckConfig{
+ Enable: false,
+ },
}
m := mocks{
@@ -206,7 +209,7 @@ func TestForcedBatchEtrog(t *testing.T) {
m.Etherman.
On("EthBlockByNumber", ctx, lastBlock0.BlockNumber).
Return(ethBlock0, nil).
- Once()
+ Times(2)
n := big.NewInt(rpc.LatestBlockNumber.Int64())
m.Etherman.
@@ -925,6 +928,9 @@ func TestReorg(t *testing.T) {
SyncChunkSize: 3,
L1SynchronizationMode: SequentialMode,
SyncBlockProtection: "latest",
+ L1BlockCheck: L1BlockCheckConfig{
+ Enable: false,
+ },
}
m := mocks{
@@ -1012,6 +1018,16 @@ func TestReorg(t *testing.T) {
On("SetLastBatchInfoSeenOnEthereum", ctx, uint64(10), uint64(10), nilDbTx).
Return(nil)
+ m.Etherman.
+ On("EthBlockByNumber", ctx, lastBlock1.BlockNumber).
+ Return(ethBlock1, nil).
+ Once()
+
+ m.ZKEVMClient.
+ On("BatchNumber", ctx).
+ Return(uint64(1), nil).
+ Once()
+
n := big.NewInt(rpc.LatestBlockNumber.Int64())
m.Etherman.
On("HeaderByNumber", mock.Anything, n).
@@ -1097,6 +1113,16 @@ func TestReorg(t *testing.T) {
Return(nil).
Once()
+ m.Etherman.
+ On("EthBlockByNumber", ctx, lastBlock0.BlockNumber).
+ Return(ethBlock0, nil).
+ Once()
+
+ m.ZKEVMClient.
+ On("BatchNumber", ctx).
+ Return(uint64(1), nil).
+ Once()
+
m.Etherman.
On("HeaderByNumber", mock.Anything, n).
Return(ethHeader3bis, nil).
@@ -1197,17 +1223,13 @@ func TestReorg(t *testing.T) {
Return(nil).
Once()
- m.ZKEVMClient.
- On("BatchNumber", ctx).
- Return(uint64(1), nil)
-
m.DbTx.
On("Commit", ctx).
+ Return(nil).
Run(func(args mock.Arguments) {
sync.Stop()
ctx.Done()
}).
- Return(nil).
Once()
}).
Return(m.DbTx, nil).
@@ -1226,6 +1248,9 @@ func TestLatestSyncedBlockEmpty(t *testing.T) {
SyncChunkSize: 3,
L1SynchronizationMode: SequentialMode,
SyncBlockProtection: "latest",
+ L1BlockCheck: L1BlockCheckConfig{
+ Enable: false,
+ },
}
m := mocks{
@@ -1307,6 +1332,16 @@ func TestLatestSyncedBlockEmpty(t *testing.T) {
On("SetLastBatchInfoSeenOnEthereum", ctx, uint64(10), uint64(10), nilDbTx).
Return(nil)
+ m.Etherman.
+ On("EthBlockByNumber", ctx, lastBlock1.BlockNumber).
+ Return(ethBlock1, nil).
+ Once()
+
+ m.ZKEVMClient.
+ On("BatchNumber", ctx).
+ Return(uint64(1), nil).
+ Once()
+
n := big.NewInt(rpc.LatestBlockNumber.Int64())
m.Etherman.
On("HeaderByNumber", mock.Anything, n).
@@ -1369,6 +1404,11 @@ func TestLatestSyncedBlockEmpty(t *testing.T) {
Return(nil).
Once()
+ m.Etherman.
+ On("EthBlockByNumber", ctx, lastBlock0.BlockNumber).
+ Return(ethBlock0, nil).
+ Once()
+
m.ZKEVMClient.
On("BatchNumber", ctx).
Return(uint64(1), nil).
@@ -1400,15 +1440,10 @@ func TestLatestSyncedBlockEmpty(t *testing.T) {
m.Etherman.
On("GetFinalizedBlockNumber", ctx).
Return(ethBlock3.NumberU64(), nil).
- Once()
-
- m.ZKEVMClient.
- On("BatchNumber", ctx).
Run(func(args mock.Arguments) {
sync.Stop()
ctx.Done()
}).
- Return(uint64(1), nil).
Once()
}).
Return(m.DbTx, nil).
@@ -1427,6 +1462,9 @@ func TestRegularReorg(t *testing.T) {
SyncChunkSize: 3,
L1SynchronizationMode: SequentialMode,
SyncBlockProtection: "latest",
+ L1BlockCheck: L1BlockCheckConfig{
+ Enable: false,
+ },
}
m := mocks{
@@ -1469,10 +1507,6 @@ func TestRegularReorg(t *testing.T) {
lastBlock0 := &state.Block{BlockHash: ethBlock0.Hash(), BlockNumber: ethBlock0.Number().Uint64(), ParentHash: ethBlock0.ParentHash()}
lastBlock1 := &state.Block{BlockHash: ethBlock1.Hash(), BlockNumber: ethBlock1.Number().Uint64(), ParentHash: ethBlock1.ParentHash()}
- m.ZKEVMClient.
- On("BatchNumber", ctx).
- Return(uint64(1), nil)
-
m.State.
On("GetForkIDByBatchNumber", mock.Anything).
Return(uint64(9), nil).
@@ -1482,6 +1516,12 @@ func TestRegularReorg(t *testing.T) {
Return(lastBlock1, nil).
Once()
+ // After a ResetState get lastblock that must be block 0
+ m.State.
+ On("GetLastBlock", ctx, nil).
+ Return(lastBlock0, nil).
+ Once()
+
m.State.
On("GetLastBatchNumber", ctx, m.DbTx).
Return(uint64(10), nil).
@@ -1514,12 +1554,18 @@ func TestRegularReorg(t *testing.T) {
On("SetLastBatchInfoSeenOnEthereum", ctx, uint64(10), uint64(10), nilDbTx).
Return(nil)
- n := big.NewInt(rpc.LatestBlockNumber.Int64())
m.Etherman.
- On("HeaderByNumber", mock.Anything, n).
- Return(ethHeader2bis, nil).
+ On("EthBlockByNumber", ctx, lastBlock1.BlockNumber).
+ Return(ethBlock1, nil).
Once()
+ m.ZKEVMClient.
+ On("BatchNumber", ctx).
+ Return(uint64(1), nil).
+ Once()
+
+ n := big.NewInt(rpc.LatestBlockNumber.Int64())
+
m.Etherman.
On("EthBlockByNumber", ctx, lastBlock1.BlockNumber).
Return(ethBlock1bis, nil).
@@ -1573,6 +1619,16 @@ func TestRegularReorg(t *testing.T) {
Return(nil).
Once()
+ m.Etherman.
+ On("EthBlockByNumber", ctx, lastBlock0.BlockNumber).
+ Return(ethBlock0, nil).
+ Once()
+
+ m.ZKEVMClient.
+ On("BatchNumber", ctx).
+ Return(uint64(1), nil).
+ Once()
+
m.Etherman.
On("HeaderByNumber", mock.Anything, n).
Return(ethHeader2bis, nil).
@@ -1688,6 +1744,9 @@ func TestLatestSyncedBlockEmptyWithExtraReorg(t *testing.T) {
SyncChunkSize: 3,
L1SynchronizationMode: SequentialMode,
SyncBlockProtection: "latest",
+ L1BlockCheck: L1BlockCheckConfig{
+ Enable: false,
+ },
}
m := mocks{
@@ -1772,6 +1831,16 @@ func TestLatestSyncedBlockEmptyWithExtraReorg(t *testing.T) {
On("SetLastBatchInfoSeenOnEthereum", ctx, uint64(10), uint64(10), nilDbTx).
Return(nil)
+ m.Etherman.
+ On("EthBlockByNumber", ctx, lastBlock2.BlockNumber).
+ Return(ethBlock2, nil).
+ Once()
+
+ m.ZKEVMClient.
+ On("BatchNumber", ctx).
+ Return(uint64(1), nil).
+ Once()
+
n := big.NewInt(rpc.LatestBlockNumber.Int64())
m.Etherman.
On("HeaderByNumber", mock.Anything, n).
@@ -1860,6 +1929,11 @@ func TestLatestSyncedBlockEmptyWithExtraReorg(t *testing.T) {
Return(nil).
Once()
+ m.Etherman.
+ On("EthBlockByNumber", ctx, lastBlock0.BlockNumber).
+ Return(ethBlock0, nil).
+ Once()
+
m.ZKEVMClient.
On("BatchNumber", ctx).
Return(uint64(1), nil).
@@ -1924,15 +1998,10 @@ func TestLatestSyncedBlockEmptyWithExtraReorg(t *testing.T) {
m.DbTx.
On("Commit", ctx).
Return(nil).
- Once()
-
- m.ZKEVMClient.
- On("BatchNumber", ctx).
Run(func(args mock.Arguments) {
sync.Stop()
ctx.Done()
}).
- Return(uint64(1), nil).
Once()
}).
Return(m.DbTx, nil).
diff --git a/test/Makefile b/test/Makefile
index 306cb71c98..adcb59c204 100644
--- a/test/Makefile
+++ b/test/Makefile
@@ -707,6 +707,9 @@ generate-mocks-synchronizer: ## Generates mocks for synchronizer , using mockery
rm -Rf ../synchronizer/actions/elderberry/mocks
export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --all --case snake --dir ../synchronizer/actions/elderberry --output ../synchronizer/actions/elderberry/mocks --outpkg mock_elderberry ${COMMON_MOCKERY_PARAMS}
+ rm -Rf ../synchronizer/l1_check_block/mocks
+ export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --all --case snake --dir ../synchronizer/l1_check_block --output ../synchronizer/l1_check_block/mocks --outpkg mock_l1_check_block ${COMMON_MOCKERY_PARAMS}
+
export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=Tx --srcpkg=github.com/jackc/pgx/v4 --output=../synchronizer/mocks --structname=DbTxMock --filename=mock_dbtx.go ${COMMON_MOCKERY_PARAMS}
diff --git a/test/e2e/jsonrpc2_test.go b/test/e2e/jsonrpc2_test.go
index fcd883a956..f8a0113814 100644
--- a/test/e2e/jsonrpc2_test.go
+++ b/test/e2e/jsonrpc2_test.go
@@ -780,7 +780,10 @@ func TestEstimateGas(t *testing.T) {
msg.GasPrice = gasPrice
}
- _, err = ethereumClient.EstimateGas(ctx, msg)
+ gas, err := ethereumClient.EstimateGas(ctx, msg)
+ t.Log("testCase: ", testCase.name)
+ t.Log("err: ", err)
+ t.Log("gas: ", gas)
if testCase.expectedError != nil {
rpcErr := err.(rpc.Error)
errMsg := fmt.Sprintf("[%v] expected: %v %v found: %v %v", network.Name, testCase.expectedError.ErrorCode(), testCase.expectedError.Error(), rpcErr.ErrorCode(), rpcErr.Error())