Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Improve BlobReader interface #794

Merged
merged 12 commits into from
Sep 5, 2023
2 changes: 1 addition & 1 deletion adapters/celestia/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,6 @@ prost-build = { version = "0.11" }

[features]
default = []
native = ["dep:tokio", "dep:jsonrpsee", "dep:serde_json", "tendermint/default"]
native = ["dep:tokio", "dep:jsonrpsee", "dep:serde_json", "tendermint/default", "sov-rollup-interface/native"]
bench = ["zk-cycle-macros/bench", "risc0-zkvm", "risc0-zkvm-platform"]
verifier = []
41 changes: 21 additions & 20 deletions adapters/celestia/src/verifier/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ use borsh::{BorshDeserialize, BorshSerialize};
use nmt_rs::NamespaceId;
use serde::{Deserialize, Serialize};
use sov_rollup_interface::da::{
self, BlobReaderTrait, BlockHashTrait as BlockHash, BlockHeaderTrait, CountedBufReader, DaSpec,
self, BlobReaderTrait, BlockHashTrait as BlockHash, BlockHeaderTrait, DaSpec,
};
use sov_rollup_interface::digest::Digest;
use sov_rollup_interface::zk::ValidityCondition;
Expand All @@ -18,7 +18,7 @@ use zk_cycle_macros::cycle_tracker;

use self::address::CelestiaAddress;
use crate::share_commit::recreate_commitment;
use crate::shares::{read_varint, BlobIterator, NamespaceGroup, Share};
use crate::shares::{read_varint, NamespaceGroup, Share};
use crate::types::ValidationError;
use crate::{pfb_from_iter, BlobWithSender, CelestiaHeader, DataAvailabilityHeader};

Expand All @@ -30,25 +30,28 @@ pub const PFB_NAMESPACE: NamespaceId = NamespaceId(hex_literal::hex!("0000000000
pub const PARITY_SHARES_NAMESPACE: NamespaceId = NamespaceId(hex_literal::hex!("ffffffffffffffff"));

impl BlobReaderTrait for BlobWithSender {
type Data = BlobIterator;
type Address = CelestiaAddress;

fn sender(&self) -> CelestiaAddress {
self.sender.clone()
}

// Creates a new BufWithCounter structure to read the data
fn data_mut(&mut self) -> &mut CountedBufReader<Self::Data> {
&mut self.blob
fn hash(&self) -> [u8; 32] {
self.hash
}

// Creates a new BufWithCounter structure to read the data
fn data(&self) -> &CountedBufReader<Self::Data> {
&self.blob
fn verified_data(&self) -> &[u8] {
self.blob.accumulator()
}

fn hash(&self) -> [u8; 32] {
self.hash
#[cfg(feature = "native")]
fn advance(&mut self, num_bytes: usize) -> &[u8] {
self.blob.advance(num_bytes);
self.verified_data()
}

fn total_len(&self) -> usize {
self.blob.total_len()
}
}

Expand Down Expand Up @@ -255,16 +258,14 @@ impl da::DaVerifier for CelestiaVerifier {
let mut blob_data = vec![0; blob_iter.remaining()];
blob_iter.copy_to_slice(blob_data.as_mut_slice());

let tx_data = tx.data().accumulator();
let tx_data = tx.verified_data();

match tx_data {
da::Accumulator::Completed(tx_data) => {
assert_eq!(blob_data, *tx_data);
}
// For now we bail and return, maybe want to change that behaviour in the future
da::Accumulator::InProgress(_) => {
return Err(ValidationError::IncompleteData);
}
assert!(
tx_data.len() <= blob_data.len(),
"claimed data must not be larger smaller than blob data"
);
for (l, r) in tx_data.iter().zip(blob_data.iter()) {
assert_eq!(l, r, "claimed data must match observed data");
}

// Link blob commitment to e-tx commitment
Expand Down
2 changes: 1 addition & 1 deletion examples/demo-simple-stf/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ serde = { workspace = true }
sha2 = { workspace = true }
hex = { workspace = true }

sov-rollup-interface = { path = "../../rollup-interface" }
sov-rollup-interface = { path = "../../rollup-interface", features = ["native"] }

[dev-dependencies]
sov-rollup-interface = { path = "../../rollup-interface", features = ["mocks"] }
15 changes: 2 additions & 13 deletions examples/demo-simple-stf/src/lib.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
#![deny(missing_docs)]
#![doc = include_str!("../README.md")]
use std::io::Read;
use std::marker::PhantomData;

use sha2::Digest;
Expand Down Expand Up @@ -69,20 +68,10 @@ impl<Vm: Zkvm, Cond: ValidityCondition, B: BlobReaderTrait> StateTransitionFunct
{
let mut receipts = vec![];
for blob in blobs {
let blob_data = blob.data_mut();

// Read the data from the blob as a byte vec.
let mut data = Vec::new();

// Panicking within the `StateTransitionFunction` is generally not recommended.
// But here, if we encounter an error while reading the bytes,
// it suggests a serious issue with the DA layer or our setup.
blob_data
.read_to_end(&mut data)
.unwrap_or_else(|e| panic!("Unable to read blob data {}", e));
let data = blob.full_data();

// Check if the sender submitted the preimage of the hash.
let hash = sha2::Sha256::digest(&data).into();
let hash = sha2::Sha256::digest(data).into();
let desired_hash = [
102, 104, 122, 173, 248, 98, 189, 119, 108, 143, 193, 139, 142, 159, 142, 32, 8,
151, 20, 133, 110, 226, 51, 179, 144, 42, 89, 29, 13, 95, 41, 37,
Expand Down
18 changes: 4 additions & 14 deletions full-node/sov-sequencer/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -134,8 +134,6 @@ pub enum SubmitTransactionResponse {
#[cfg(test)]
mod tests {

use std::io::Read;

use sov_rollup_interface::da::BlobReaderTrait;
use sov_rollup_interface::mocks::{MockAddress, MockDaService};

Expand Down Expand Up @@ -204,17 +202,13 @@ mod tests {
let arg: &[u8] = &[];
let _: String = rpc.call("sequencer_publishBatch", arg).await.unwrap();

let mut block = vec![];
let mut submitted_block = da_service.get_block_at(0).await.unwrap();
let _ = submitted_block.blobs[0]
.data_mut()
.read_to_end(&mut block)
.unwrap();
let block_data = submitted_block.blobs[0].full_data();

// First bytes of each tx, flattened
let blob: Vec<Vec<u8>> = vec![vec![tx1[0]], vec![tx2[0]]];
let expected: Vec<u8> = borsh::to_vec(&blob).unwrap();
assert_eq!(expected, block);
assert_eq!(expected, block_data);
}

#[tokio::test]
Expand All @@ -233,17 +227,13 @@ mod tests {
let arg: &[u8] = &[];
let _: String = rpc.call("sequencer_publishBatch", arg).await.unwrap();

let mut block = vec![];
let mut submitted_block = da_service.get_block_at(0).await.unwrap();
let _ = submitted_block.blobs[0]
.data_mut()
.read_to_end(&mut block)
.unwrap();
let block_data = submitted_block.blobs[0].full_data();

// First bytes of each tx, flattened
let blob: Vec<Vec<u8>> = vec![vec![tx[0]]];
let expected: Vec<u8> = borsh::to_vec(&blob).unwrap();
assert_eq!(expected, block);
assert_eq!(expected, block_data);
}

#[tokio::test]
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
use std::io::Read;

use sov_bank::TokenConfig;
use sov_blob_storage::BlobStorage;
use sov_chain_state::{ChainState, ChainStateConfig};
Expand Down Expand Up @@ -603,16 +601,9 @@ fn blobs_are_equal<B: BlobReaderTrait>(
slot_hint
);

let mut read_actual = vec![];
actual_inner
.data_mut()
.read_to_end(&mut read_actual)
.unwrap();

let mut read_expected = vec![];
expected.data_mut().read_to_end(&mut read_expected).unwrap();
assert_eq!(
read_expected, read_actual,
actual_inner.full_data(),
expected.full_data(),
"incorrect data read in {}",
slot_hint
);
Expand Down
23 changes: 17 additions & 6 deletions module-system/sov-modules-stf-template/src/app_template.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,7 @@ use std::marker::PhantomData;

use borsh::BorshDeserialize;
use sov_modules_api::{BasicAddress, BlobReaderTrait, Context, DaSpec, DispatchCall};
use sov_rollup_interface::da::CountedBufReader;
use sov_rollup_interface::stf::{BatchReceipt, TransactionReceipt};
use sov_rollup_interface::Buf;
use sov_state::StateCheckpoint;
use tracing::{debug, error};

Expand Down Expand Up @@ -116,7 +114,7 @@ where
// TODO: don't ignore these events: https://github.com/Sovereign-Labs/sovereign/issues/350
let _ = batch_workspace.take_events();

let (txs, messages) = match self.pre_process_batch(blob.data_mut()) {
let (txs, messages) = match self.pre_process_batch(blob) {
Ok((txs, messages)) => (txs, messages),
Err(reason) => {
// Explicitly revert on slashing, even though nothing has changed in pre_process.
Expand Down Expand Up @@ -242,7 +240,7 @@ where
// Do all stateless checks and data formatting, that can be results in sequencer slashing
fn pre_process_batch(
&self,
blob_data: &mut CountedBufReader<impl Buf>,
blob_data: &mut impl BlobReaderTrait,
) -> Result<
(
Vec<TransactionAndRawHash<C>>,
Expand All @@ -264,11 +262,14 @@ where
// Attempt to deserialize batch, error results in sequencer slashing.
fn deserialize_batch(
&self,
blob_data: &mut CountedBufReader<impl Buf>,
blob_data: &mut impl BlobReaderTrait,
) -> Result<Batch, SlashingReason> {
match Batch::deserialize_reader(blob_data) {
match Batch::try_from_slice(data_for_deserialization(blob_data)) {
Ok(batch) => Ok(batch),
Err(e) => {
assert_eq!(blob_data.verified_data().len(), blob_data.total_len(), "Batch deserialization failed and some data was not provided. The prover might be malicious");
// If the deserialization fails, we need to make sure it's not because the prover was malicious and left
// out some relevant data! Make that check here. If the data is missing, panic.
error!(
"Unable to deserialize batch provided by the sequencer {}",
e
Expand Down Expand Up @@ -312,3 +313,13 @@ where
Ok(decoded_messages)
}
}

#[cfg(feature = "native")]
fn data_for_deserialization(blob: &mut impl BlobReaderTrait) -> &[u8] {
blob.full_data()
}

#[cfg(not(feature = "native"))]
fn data_for_deserialization(blob: &mut impl BlobReaderTrait) -> &[u8] {
blob.verified_data()
}
Loading