Skip to content

Commit

Permalink
feat: updating L2 Block encoding and Rollup.process function (Aztec…
Browse files Browse the repository at this point in the history
  • Loading branch information
benesjan authored Jan 17, 2024
1 parent 526d9c1 commit a9a0904
Show file tree
Hide file tree
Showing 35 changed files with 795 additions and 554 deletions.
15 changes: 0 additions & 15 deletions docs/docs/dev_docs/getting_started/aztecjs-getting-started.md
Original file line number Diff line number Diff line change
Expand Up @@ -140,9 +140,6 @@ A successful run should show something like this:
token },
token contractDeploymentEmitterAddress: EthAddress {
token buffer: <Buffer 5f c8 d3 26 90 cc 91 d4 c3 9d 9d 3a bc bd 16 98 9f 87 57 07>
token },
token decoderHelperAddress: EthAddress {
token buffer: <Buffer 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00>
token }
token }
token } +0ms
Expand Down Expand Up @@ -189,9 +186,6 @@ Now that we have our accounts loaded, let's move on to deploy our pre-compiled t
token },
token contractDeploymentEmitterAddress: EthAddress {
token buffer: <Buffer 5f c8 d3 26 90 cc 91 d4 c3 9d 9d 3a bc bd 16 98 9f 87 57 07>
token },
token decoderHelperAddress: EthAddress {
token buffer: <Buffer 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00>
token }
token }
token } +0ms
Expand Down Expand Up @@ -244,9 +238,6 @@ Running now should yield output:
token },
token contractDeploymentEmitterAddress: EthAddress {
token buffer: <Buffer 5f c8 d3 26 90 cc 91 d4 c3 9d 9d 3a bc bd 16 98 9f 87 57 07>
token },
token decoderHelperAddress: EthAddress {
token buffer: <Buffer 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00>
token }
token }
token } +0ms
Expand Down Expand Up @@ -309,9 +300,6 @@ Our output should now look like this:
token },
token contractDeploymentEmitterAddress: EthAddress {
token buffer: <Buffer 5f c8 d3 26 90 cc 91 d4 c3 9d 9d 3a bc bd 16 98 9f 87 57 07>
token },
token decoderHelperAddress: EthAddress {
token buffer: <Buffer 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00>
token }
token }
token } +0ms
Expand Down Expand Up @@ -369,9 +357,6 @@ Our complete output should now be something like:
token },
token contractDeploymentEmitterAddress: EthAddress {
token buffer: <Buffer 5f c8 d3 26 90 cc 91 d4 c3 9d 9d 3a bc bd 16 98 9f 87 57 07>
token },
token decoderHelperAddress: EthAddress {
token buffer: <Buffer 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00>
token }
token }
token } +0ms
Expand Down
75 changes: 23 additions & 52 deletions l1-contracts/src/core/Rollup.sol
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ import {IOutbox} from "./interfaces/messagebridge/IOutbox.sol";
import {IRegistry} from "./interfaces/messagebridge/IRegistry.sol";

// Libraries
import {HeaderDecoder} from "./libraries/decoders/HeaderDecoder.sol";
import {HeaderLib} from "./libraries/HeaderLib.sol";
import {MessagesDecoder} from "./libraries/decoders/MessagesDecoder.sol";
import {Hash} from "./libraries/Hash.sol";
import {Errors} from "./libraries/Errors.sol";
Expand All @@ -21,7 +21,7 @@ import {AvailabilityOracle} from "./availability_oracle/AvailabilityOracle.sol";
/**
* @title Rollup
* @author Aztec Labs
* @notice Rollup contract that are concerned about readability and velocity of development
* @notice Rollup contract that is concerned about readability and velocity of development
* not giving a damn about gas costs.
*/
contract Rollup is IRollup {
Expand All @@ -30,7 +30,7 @@ contract Rollup is IRollup {
uint256 public immutable VERSION;
AvailabilityOracle public immutable AVAILABILITY_ORACLE;

bytes32 public rollupStateHash;
bytes32 public archive; // Root of the archive tree
uint256 public lastBlockTs;
// Tracks the last time time was warped on L2 ("warp" is the testing cheatcode).
// See https://github.com/AztecProtocol/aztec-packages/issues/1614
Expand All @@ -44,23 +44,28 @@ contract Rollup is IRollup {
}

/**
* @notice Process an incoming L2Block and progress the state
* @notice Process an incoming L2 block and progress the state
* @param _header - The L2 block header
* @param _archive - A root of the archive tree after the L2 block is applied
* @param _body - The L2 block body
* @param _proof - The proof of correct execution
* @param _l2Block - The L2Block data, formatted as outlined in `Decoder.sol`
*/
function process(bytes memory _proof, bytes calldata _l2Block) external override(IRollup) {
_constrainGlobals(_l2Block);

// Decode the header
(uint256 l2BlockNumber, bytes32 oldStateHash, bytes32 newStateHash) =
HeaderDecoder.decode(_l2Block[:HeaderDecoder.BLOCK_HEADER_SIZE]);
function process(
bytes calldata _header,
bytes32 _archive,
bytes calldata _body, // TODO(#3944): this will be replaced with _txsHash once the separation is finished.
bytes memory _proof
) external override(IRollup) {
// Decode and validate header
HeaderLib.Header memory header = HeaderLib.decode(_header);
HeaderLib.validate(header, VERSION, lastBlockTs, archive);

// Check if the data is available using availability oracle (change availability oracle if you want a different DA layer)
bytes32 txsHash;
{
// @todo @LHerskind Hack such that the node is unchanged for now.
// should be removed when we have a proper block publication.
txsHash = AVAILABILITY_ORACLE.publish(_l2Block[HeaderDecoder.BLOCK_HEADER_SIZE:]);
txsHash = AVAILABILITY_ORACLE.publish(_body);
}

if (!AVAILABILITY_ORACLE.isAvailable(txsHash)) {
Expand All @@ -70,25 +75,18 @@ contract Rollup is IRollup {

// Decode the cross-chain messages
(bytes32 inHash,, bytes32[] memory l1ToL2Msgs, bytes32[] memory l2ToL1Msgs) =
MessagesDecoder.decode(_l2Block[HeaderDecoder.BLOCK_HEADER_SIZE:]);

bytes32 publicInputHash =
_computePublicInputHash(_l2Block[:HeaderDecoder.BLOCK_HEADER_SIZE], txsHash, inHash);

// @todo @LHerskind Proper genesis state. If the state is empty, we allow anything for now.
// TODO(#3936): Temporarily disabling this because L2Block encoding has not yet been updated.
// if (rollupStateHash != bytes32(0) && rollupStateHash != oldStateHash) {
// revert Errors.Rollup__InvalidStateHash(rollupStateHash, oldStateHash);
// }
MessagesDecoder.decode(_body);

bytes32[] memory publicInputs = new bytes32[](1);
publicInputs[0] = publicInputHash;
publicInputs[0] = _computePublicInputHash(_header, txsHash, inHash);

// @todo @benesjan We will need `nextAvailableLeafIndex` of archive to verify the proof. This value is equal to
// current block number which is stored in the header (header.globalVariables.blockNumber).
if (!VERIFIER.verify(_proof, publicInputs)) {
revert Errors.Rollup__InvalidProof();
}

rollupStateHash = newStateHash;
archive = _archive;
lastBlockTs = block.timestamp;

// @todo (issue #605) handle fee collector
Expand All @@ -98,34 +96,7 @@ contract Rollup is IRollup {
IOutbox outbox = REGISTRY.getOutbox();
outbox.sendL1Messages(l2ToL1Msgs);

emit L2BlockProcessed(l2BlockNumber);
}

function _constrainGlobals(bytes calldata _header) internal view {
uint256 chainId = uint256(bytes32(_header[:0x20]));
uint256 version = uint256(bytes32(_header[0x20:0x40]));
uint256 ts = uint256(bytes32(_header[0x60:0x80]));
// block number already constrained by start state hash

if (block.chainid != chainId) {
revert Errors.Rollup__InvalidChainId(chainId, block.chainid);
}

if (version != VERSION) {
revert Errors.Rollup__InvalidVersion(version, VERSION);
}

if (ts > block.timestamp) {
revert Errors.Rollup__TimestampInFuture();
}

// @todo @LHerskind consider if this is too strict
// This will make multiple l2 blocks in the same l1 block impractical.
// e.g., the first block will update timestamp which will make the second fail.
// Could possibly allow multiple blocks if in same l1 block
if (ts < lastBlockTs) {
revert Errors.Rollup__TimestampTooOld();
}
emit L2BlockProcessed(header.globalVariables.blockNumber);
}

function _computePublicInputHash(bytes calldata _header, bytes32 _txsHash, bytes32 _inHash)
Expand Down
9 changes: 7 additions & 2 deletions l1-contracts/src/core/interfaces/IRollup.sol
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,12 @@
pragma solidity >=0.8.18;

interface IRollup {
event L2BlockProcessed(uint256 indexed blockNum);
event L2BlockProcessed(uint256 indexed blockNumber);

function process(bytes memory _proof, bytes calldata _l2Block) external;
function process(
bytes calldata _header,
bytes32 _archive,
bytes calldata _body,
bytes memory _proof
) external;
}
2 changes: 1 addition & 1 deletion l1-contracts/src/core/libraries/Errors.sol
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ library Errors {
); // 0x5e789f34

// Rollup
error Rollup__InvalidStateHash(bytes32 expected, bytes32 actual); // 0xa3cfaab3
error Rollup__InvalidArchive(bytes32 expected, bytes32 actual); // 0xb682a40e
error Rollup__InvalidProof(); // 0xa5b2ba17
error Rollup__InvalidChainId(uint256 expected, uint256 actual); // 0x37b5bc12
error Rollup__InvalidVersion(uint256 expected, uint256 actual); // 0x9ef30794
Expand Down
154 changes: 154 additions & 0 deletions l1-contracts/src/core/libraries/HeaderLib.sol
Original file line number Diff line number Diff line change
@@ -0,0 +1,154 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright 2023 Aztec Labs.
pragma solidity >=0.8.18;

// Libraries
import {Errors} from "./Errors.sol";
import {Constants} from "./ConstantsGen.sol";
import {Hash} from "./Hash.sol";

/**
* @title Header Library
* @author Aztec Labs
* @notice Decoding and validating an L2 block header
* Concerned with readability and velocity of development not giving a damn about gas costs.
*
* -------------------
* You can use https://gist.github.com/LHerskind/724a7e362c97e8ac2902c6b961d36830 to generate the below outline.
* -------------------
* L2 Block Header specification
* -------------------
*
* | byte start | num bytes | name
* | --- | --- | ---
* | | | Header {
* | | | GlobalVariables {
* | 0x0000 | 0x20 | chainId
* | 0x0020 | 0x20 | version
* | 0x0040 | 0x20 | blockNumber
* | 0x0060 | 0x20 | timestamp
* | | | }
* | | | StateReference {
* | 0x0080 | 0x20 | l1ToL2MessageTree.root
* | 0x00a0 | 0x04 | l1ToL2MessageTree.nextAvailableLeafIndex
* | | | PartialStateReference {
* | 0x00a4 | 0x20 | noteHashTree.root
* | 0x00c4 | 0x04 | noteHashTree.nextAvailableLeafIndex
* | 0x00c8 | 0x20 | nullifierTree.root
* | 0x00e8 | 0x04 | nullifierTree.nextAvailableLeafIndex
* | 0x00ec | 0x20 | contractTree.root
* | 0x010c | 0x04 | contractTree.nextAvailableLeafIndex
* | 0x0110 | 0x20 | publicDataTree.root
* | 0x0130 | 0x04 | publicDataTree.nextAvailableLeafIndex
* | | | }
* | | | }
* | 0x0134 | 0x20 | lastArchive.root
* | 0x0154 | 0x04 | lastArchive.nextAvailableLeafIndex
* | 0x0158 | 0x20 | bodyHash
* | | | }
* | --- | --- | ---
*/
library HeaderLib {
struct AppendOnlyTreeSnapshot {
bytes32 root;
uint32 nextAvailableLeafIndex;
}

struct GlobalVariables {
uint256 chainId;
uint256 version;
uint256 blockNumber;
uint256 timestamp;
}

struct PartialStateReference {
AppendOnlyTreeSnapshot noteHashTree;
AppendOnlyTreeSnapshot nullifierTree;
AppendOnlyTreeSnapshot contractTree;
AppendOnlyTreeSnapshot publicDataTree;
}

struct StateReference {
AppendOnlyTreeSnapshot l1ToL2MessageTree;
// Note: Can't use "partial" name here as in yellow paper because it is a reserved solidity keyword
PartialStateReference partialStateReference;
}

struct Header {
GlobalVariables globalVariables;
StateReference stateReference;
AppendOnlyTreeSnapshot lastArchive;
bytes32 bodyHash;
}

/**
* @notice Decodes the header
* @param _header - The header calldata
* @return The decoded header
*/
function decode(bytes calldata _header) internal pure returns (Header memory) {
require(_header.length == 376, "Invalid header length");

Header memory header;

header.globalVariables.chainId = uint256(bytes32(_header[:0x20]));
header.globalVariables.version = uint256(bytes32(_header[0x20:0x40]));
header.globalVariables.blockNumber = uint256(bytes32(_header[0x40:0x60]));
header.globalVariables.timestamp = uint256(bytes32(_header[0x60:0x80]));
header.stateReference.l1ToL2MessageTree =
AppendOnlyTreeSnapshot(bytes32(_header[0x80:0xa0]), uint32(bytes4(_header[0xa0:0xa4])));
header.stateReference.partialStateReference.noteHashTree =
AppendOnlyTreeSnapshot(bytes32(_header[0xa4:0xc4]), uint32(bytes4(_header[0xc4:0xc8])));
header.stateReference.partialStateReference.nullifierTree =
AppendOnlyTreeSnapshot(bytes32(_header[0xc8:0xe8]), uint32(bytes4(_header[0xe8:0xec])));
header.stateReference.partialStateReference.contractTree =
AppendOnlyTreeSnapshot(bytes32(_header[0xec:0x10c]), uint32(bytes4(_header[0x10c:0x110])));
header.stateReference.partialStateReference.publicDataTree =
AppendOnlyTreeSnapshot(bytes32(_header[0x110:0x130]), uint32(bytes4(_header[0x130:0x134])));
header.lastArchive =
AppendOnlyTreeSnapshot(bytes32(_header[0x134:0x154]), uint32(bytes4(_header[0x154:0x158])));

header.bodyHash = bytes32(_header[0x158:0x178]);

return header;
}

/**
* @notice Validates the header
* @param _header - The decoded header
* @param _version - The expected version
* @param _lastBlockTs - The timestamp of the last block
* @param _archive - The expected archive root
*/
function validate(Header memory _header, uint256 _version, uint256 _lastBlockTs, bytes32 _archive)
internal
view
{
if (block.chainid != _header.globalVariables.chainId) {
revert Errors.Rollup__InvalidChainId(_header.globalVariables.chainId, block.chainid);
}

if (_header.globalVariables.version != _version) {
revert Errors.Rollup__InvalidVersion(_header.globalVariables.version, _version);
}

// block number already constrained by archive root check

if (_header.globalVariables.timestamp > block.timestamp) {
revert Errors.Rollup__TimestampInFuture();
}

// @todo @LHerskind consider if this is too strict
// This will make multiple l2 blocks in the same l1 block impractical.
// e.g., the first block will update timestamp which will make the second fail.
// Could possibly allow multiple blocks if in same l1 block
if (_header.globalVariables.timestamp < _lastBlockTs) {
revert Errors.Rollup__TimestampTooOld();
}

// @todo @LHerskind Proper genesis state. If the state is empty, we allow anything for now.
if (_archive != bytes32(0) && _archive != _header.lastArchive.root) {
revert Errors.Rollup__InvalidArchive(_archive, _header.lastArchive.root);
}
}
}
Loading

0 comments on commit a9a0904

Please sign in to comment.