diff --git a/.vscode/settings.json b/.vscode/settings.json index eddc5902d..33cd79152 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,11 +1,11 @@ { - "files.trimTrailingWhitespace": true, - "rust-analyzer.rustfmt.extraArgs": ["+nightly"], - "rust-analyzer.checkOnSave": false, - "coverage-gutters.coverageBaseDir": "coverage", - "coverage-gutters.coverageFileNames": [ - "pallet-storage-provider/lcov.info", - "pallet-market/lcov.info", - "mater/lcov.info" - ] + "files.trimTrailingWhitespace": true, + "rust-analyzer.rustfmt.extraArgs": ["+nightly"], + "rust-analyzer.checkOnSave": false, + "coverage-gutters.coverageBaseDir": "coverage", + "coverage-gutters.coverageFileNames": [ + "pallet-storage-provider/lcov.info", + "pallet-market/lcov.info", + "mater/lcov.info" + ] } diff --git a/Cargo.lock b/Cargo.lock index f46d4db28..153a7e116 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8223,6 +8223,7 @@ dependencies = [ "pallet-balances", "pallet-storage-provider", "parity-scale-codec", + "primitives-commitment", "primitives-proofs", "scale-info", "sp-arithmetic 26.0.0", @@ -8698,6 +8699,7 @@ dependencies = [ "pallet-balances", "pallet-market", "parity-scale-codec", + "primitives-commitment", "primitives-proofs", "rstest", "scale-info", @@ -9404,11 +9406,11 @@ dependencies = [ "fr32", "futures", "hex", - "ipld-core", "jsonrpsee 0.23.2", "mater", "parity-scale-codec", "polka-storage-proofs", + "primitives-commitment", "primitives-proofs", "rand", "rocksdb", @@ -10860,6 +10862,16 @@ dependencies = [ "uint", ] +[[package]] +name = "primitives-commitment" +version = "0.1.0" +dependencies = [ + "cid 0.11.1", + "primitives-proofs", + "rand", + "sha2 0.10.8", +] + [[package]] name = "primitives-proofs" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index ff2cc50c0..e0be573f1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,6 +16,7 @@ members = [ "pallets/market", "pallets/proofs", "pallets/storage-provider", + "primitives/commitment", "primitives/proofs", "runtime", "storage/mater", @@ -126,6 +127,7 @@ pallet-proofs = { path = "pallets/proofs", default-features = false } pallet-storage-provider = { path = "pallets/storage-provider", default-features = false } polka-storage-proofs = { path = "lib/polka-storage-proofs", default-features = false } polka-storage-runtime = { path = "runtime" } +primitives-commitment = { path = "primitives/commitment" } primitives-proofs = { path = "primitives/proofs", default-features = false } storagext = { path = "cli/polka-storage/storagext" } diff --git a/cli/artifacts/metadata.scale b/cli/artifacts/metadata.scale index 88a8c0484..5194aa60f 100644 Binary files a/cli/artifacts/metadata.scale and b/cli/artifacts/metadata.scale differ diff --git a/cli/polka-storage-provider/Cargo.toml b/cli/polka-storage-provider/Cargo.toml index f7e8bd66a..2b77e8c77 100644 --- a/cli/polka-storage-provider/Cargo.toml +++ b/cli/polka-storage-provider/Cargo.toml @@ -23,9 +23,9 @@ filecoin-hashers.workspace = true fr32.workspace = true futures = { workspace = true } hex = { workspace = true } -ipld-core.workspace = true jsonrpsee = { workspace = true, features = ["http-client", "macros", "server", "ws-client"] } polka-storage-proofs = { workspace = true, features = ["std", "substrate"] } +primitives-commitment = { workspace = true } primitives-proofs = { workspace = true, features = ["std"] } rand = { workspace = true } rocksdb = { workspace = true } diff --git a/cli/polka-storage-provider/src/commands/utils.rs b/cli/polka-storage-provider/src/commands/utils.rs index 52a2a4041..5270b4486 100644 --- a/cli/polka-storage-provider/src/commands/utils.rs +++ b/cli/polka-storage-provider/src/commands/utils.rs @@ -1,14 +1,20 @@ -use std::{fs::File, io::Write, path::PathBuf, str::FromStr}; +use std::{ + fs::File, + io::{BufReader, Write}, + path::PathBuf, + str::FromStr, +}; use mater::CarV2Reader; use polka_storage_proofs::{ porep::{self, sealer::Sealer}, types::PieceInfo, }; +use primitives_commitment::piece::PaddedPieceSize; use primitives_proofs::RegisteredSealProof; use crate::{ - commp::{calculate_piece_commitment, piece_commitment_cid, CommPError}, + commp::{calculate_piece_commitment, CommPError, ZeroPaddingReader}, CliError, }; @@ -67,18 +73,24 @@ impl UtilsCommand { .map_err(|e| UtilsCommandError::InvalidCARv2(input_path.clone(), e))?; // Calculate the piece commitment. - let mut source_file = File::open(&input_path)?; + let source_file = File::open(&input_path)?; let file_size = source_file.metadata()?.len(); + let buffered = BufReader::new(source_file); + let padded_piece_size = PaddedPieceSize::from_arbitrary_size(file_size as u64); + let mut zero_padding_reader = ZeroPaddingReader::new(buffered, *padded_piece_size); + // The calculate_piece_commitment blocks the thread. We could // use tokio::task::spawn_blocking to avoid this, but in this // case it doesn't matter because this is the only thing we are // working on. - let commitment = calculate_piece_commitment(&mut source_file, file_size) - .map_err(|err| UtilsCommandError::CommPError(err))?; - let cid = piece_commitment_cid(commitment); + let commitment = + calculate_piece_commitment(&mut zero_padding_reader, padded_piece_size) + .map_err(|err| UtilsCommandError::CommPError(err))?; + let cid = commitment.cid(); - println!("{cid}"); + println!("Piece commitment CID: {cid}"); + println!("Padded size: {padded_piece_size}"); } UtilsCommand::GeneratePoRepParams { seal_proof, diff --git a/cli/polka-storage-provider/src/commp.rs b/cli/polka-storage-provider/src/commp.rs index 8320b1733..696998073 100644 --- a/cli/polka-storage-provider/src/commp.rs +++ b/cli/polka-storage-provider/src/commp.rs @@ -1,36 +1,24 @@ -use std::io::{BufReader, Read}; +use std::io::Read; use filecoin_hashers::{ sha256::{Sha256Domain, Sha256Hasher}, Domain, }; -use fr32::{to_padded_bytes, Fr32Reader}; -use ipld_core::cid::multihash::Multihash; -use mater::Cid; -use storage_proofs_core::{merkle::BinaryMerkleTree, util::NODE_SIZE}; +use fr32::Fr32Reader; +use primitives_commitment::{piece::PaddedPieceSize, Commitment, CommitmentKind, NODE_SIZE}; +use storage_proofs_core::merkle::BinaryMerkleTree; use thiserror::Error; -/// SHA2-256 with the two most significant bits from the last byte zeroed (as -/// via a mask with 0b00111111) - used for proving trees as in Filecoin. -/// -/// https://github.com/multiformats/multicodec/blob/badcfe56bb7e0bbb06b60d57565186cd6be1f932/table.csv#L153 -pub const SHA2_256_TRUNC254_PADDED: u64 = 0x1012; - -/// Filecoin piece or sector data commitment merkle node/root (CommP & CommD) -/// -/// https://github.com/multiformats/multicodec/blob/badcfe56bb7e0bbb06b60d57565186cd6be1f932/table.csv#L554 -pub const FIL_COMMITMENT_UNSEALED: u64 = 0xf101; - /// Reader that returns zeros if the inner reader is empty. pub struct ZeroPaddingReader { /// The inner reader to read from. inner: R, /// The number of bytes this 0-padding reader has left to produce. - remaining: usize, + remaining: u64, } impl ZeroPaddingReader { - pub fn new(inner: R, total_size: usize) -> Self { + pub fn new(inner: R, total_size: u64) -> Self { Self { inner, remaining: total_size, @@ -45,7 +33,7 @@ impl Read for ZeroPaddingReader { } // Number of bytes that the reader will produce in this execution - let to_read = buf.len().min(self.remaining); + let to_read = buf.len().min(self.remaining as usize); // Number of bytes that we read from the inner reader let read = self.inner.read(&mut buf[..to_read])?; @@ -56,60 +44,32 @@ impl Read for ZeroPaddingReader { } // Decrease the number of bytes this 0-padding reader has left to produce. - self.remaining -= to_read; + self.remaining -= to_read as u64; // Return the number of bytes that we wrote to the buffer. Ok(to_read) } } -// Ensure that the padded piece size is valid. -pub fn ensure_piece_size(padded_piece_size: usize) -> Result<(), CommPError> { - if padded_piece_size < NODE_SIZE { - return Err(CommPError::PieceTooSmall); - } - - if padded_piece_size % NODE_SIZE != 0 { - return Err(CommPError::InvalidPieceSize(format!( - "padded_piece_size is not multiple of {NODE_SIZE}" - ))); - } - - Ok(()) -} - /// Calculate the piece commitment for a given data source. /// /// Reference — pub fn calculate_piece_commitment( source: R, - unpadded_piece_size: u64, -) -> Result<[u8; 32], CommPError> { - // Wrap the source in a BufReader for efficient reading. - let source = BufReader::new(source); + piece_size: PaddedPieceSize, +) -> Result { // This reader adds two zero bits to each 254 bits of data read from the source. - let fr32_reader = Fr32Reader::new(source); - // This is the padded piece size after we add 2 zero bits to each 254 bits of data. - let padded_piece_size = to_padded_bytes(unpadded_piece_size as usize); - // Final padded piece size should be 2^n where n is a positive integer. That - // is because we are using MerkleTree to calculate the piece commitment. - let padded_piece_size = padded_piece_size.next_power_of_two(); - - // Ensure that the piece size is valid, before generating a MerkeTree. - ensure_piece_size(padded_piece_size)?; - - // The reader that pads the source with zeros - let mut zero_padding_reader = ZeroPaddingReader::new(fr32_reader, padded_piece_size); + let mut fr32_reader = Fr32Reader::new(source); // Buffer used for reading data used for leafs. let mut buffer = [0; NODE_SIZE]; // Number of leafs - let num_leafs = padded_piece_size.div_ceil(NODE_SIZE) as usize; + let num_leafs = piece_size.div_ceil(NODE_SIZE as u64) as usize; // Elements iterator used by the MerkleTree. The elements returned by the // iterator represent leafs of the tree let elements_iterator = (0..num_leafs).map(|_| { - zero_padding_reader.read_exact(&mut buffer)?; + fr32_reader.read_exact(&mut buffer)?; let hash = Sha256Domain::try_from_bytes(&buffer)?; Ok(hash) }); @@ -122,20 +82,13 @@ pub fn calculate_piece_commitment( .write_bytes(&mut commitment) .expect("destination buffer large enough"); - Ok(commitment) -} + let commitment = Commitment::new(commitment, CommitmentKind::Piece); -/// Generate Cid from the piece commitment -pub fn piece_commitment_cid(piece_commitment: [u8; 32]) -> Cid { - let hash = Multihash::wrap(SHA2_256_TRUNC254_PADDED, &piece_commitment) - .expect("piece commitment not more than 64 bytes"); - Cid::new_v1(FIL_COMMITMENT_UNSEALED, hash) + Ok(commitment) } #[derive(Debug, Error)] pub enum CommPError { - #[error("Piece is too small")] - PieceTooSmall, #[error("Piece is not valid size: {0}")] InvalidPieceSize(String), #[error("Tree build error: {0}")] @@ -146,9 +99,12 @@ pub enum CommPError { #[cfg(test)] mod tests { - use std::io::Read; + use std::io::{Cursor, Read}; - use super::{calculate_piece_commitment, CommPError, ZeroPaddingReader}; + use primitives_commitment::piece::PaddedPieceSize; + use primitives_proofs::SectorSize; + + use super::{calculate_piece_commitment, ZeroPaddingReader}; #[test] fn test_zero_padding_reader() { @@ -177,25 +133,40 @@ mod tests { #[test] fn test_calculate_piece_commitment() { - use std::io::Cursor; - - let data = vec![2u8; 200]; + let data_size: u64 = 200; + let data = vec![2u8; data_size as usize]; let cursor = Cursor::new(data.clone()); + let padded_piece_size = PaddedPieceSize::from_arbitrary_size(data_size); + let zero_padding_reader = ZeroPaddingReader::new(cursor, *padded_piece_size); - let commitment = calculate_piece_commitment(cursor, data.len() as u64).unwrap(); + let commitment = + calculate_piece_commitment(zero_padding_reader, padded_piece_size).unwrap(); assert_eq!( - commitment, + commitment.raw(), [ 152, 58, 157, 235, 187, 58, 81, 61, 113, 252, 178, 149, 158, 13, 242, 24, 54, 98, 148, 15, 250, 217, 3, 24, 152, 110, 93, 173, 117, 209, 251, 37, ] ); + } - // Test with zero-length data - let empty_data = Vec::new(); - let empty_cursor = Cursor::new(empty_data); + #[test] + fn test_zero_piece_commitment() { + let size = SectorSize::_2KiB; + let padded_piece_size = PaddedPieceSize::new(size.bytes()).unwrap(); + let cursor = Cursor::new(vec![]); + let zero_padding_reader = ZeroPaddingReader::new(cursor, *padded_piece_size); + + let commitment = + calculate_piece_commitment(zero_padding_reader, padded_piece_size).unwrap(); + dbg!(commitment.raw()); - let empty_commitment = calculate_piece_commitment(empty_cursor, 0); - assert!(matches!(empty_commitment, Err(CommPError::PieceTooSmall))); + assert_eq!( + commitment.raw(), + [ + 252, 126, 146, 130, 150, 229, 22, 250, 173, 233, 134, 178, 143, 146, 212, 74, 79, + 36, 185, 53, 72, 82, 35, 55, 106, 121, 144, 39, 188, 24, 248, 51 + ] + ); } } diff --git a/cli/polka-storage-provider/src/storage.rs b/cli/polka-storage-provider/src/storage.rs index 89bdb9558..f317beb2e 100644 --- a/cli/polka-storage-provider/src/storage.rs +++ b/cli/polka-storage-provider/src/storage.rs @@ -10,6 +10,7 @@ use axum::{ }; use futures::{TryFutureExt, TryStreamExt}; use mater::Cid; +use primitives_commitment::piece::PaddedPieceSize; use tokio::{ fs::{self, File}, io::{AsyncRead, BufWriter}, @@ -22,7 +23,7 @@ use tower_http::trace::TraceLayer; use uuid::Uuid; use crate::{ - commp::{calculate_piece_commitment, piece_commitment_cid, CommPError}, + commp::{calculate_piece_commitment, CommPError}, db::DealDB, }; @@ -206,8 +207,9 @@ async fn upload( let piece_commitment_cid = tokio::task::spawn_blocking(move || -> Result<_, CommPError> { let file = std::fs::File::open(&piece_path).map_err(CommPError::Io)?; let file_size = file.metadata().map_err(CommPError::Io)?.len(); + let file_size = PaddedPieceSize::new(file_size).map_err(|err| CommPError::InvalidPieceSize(err.to_string()))?; let piece_commitment = calculate_piece_commitment(file, file_size)?; - let piece_commitment_cid = piece_commitment_cid(piece_commitment); + let piece_commitment_cid = piece_commitment.cid(); tracing::debug!(path = %piece_path.display(), commp = %piece_commitment_cid, "calculated piece commitment"); Ok(piece_commitment_cid) }) diff --git a/maat/tests/real_world.rs b/maat/tests/real_world.rs index 85bfcd1ec..f0275bcf1 100644 --- a/maat/tests/real_world.rs +++ b/maat/tests/real_world.rs @@ -141,10 +141,10 @@ async fn publish_storage_deals( // Publish a storage deal let husky_storage_deal = DealProposal { piece_cid: cid::Cid::try_from( - "bafybeihxgc67fwhdoxo2klvmsetswdmwwz3brpwwl76qizbsl6ypro6vxq", + "baga6ea4seaqgi5lnnv4wi5lnnv4wi5lnnv4wi5lnnv4wi5lnnv4wi5lnnv4wi5i", ) .expect("valid CID"), - piece_size: 1278, + piece_size: 2048, client: alice.account_id().clone(), provider: charlie.account_id().clone(), label: "My lovely Husky (husky.jpg)".to_owned(), @@ -177,18 +177,24 @@ async fn pre_commit_sectors(client: &storagext::Client, charlie: &Keypa where Keypair: subxt::tx::Signer, { - // Pre commit sectors + // Unsealed sector commitment + let unsealed_cid = + cid::Cid::try_from("baga6ea4seaqgi5lnnv4wi5lnnv4wi5lnnv4wi5lnnv4wi5lnnv4wi5lnnv4wi5i") + .expect("valid CID"); + + // Sealed sector commitment. + // Currently a placeholder value. let placeholder_cid = cid::Cid::try_from("bafk2bzaceajreoxfdcpdvitpvxm7vkpvcimlob5ejebqgqidjkz4qoug4q6zu") - .unwrap(); + .expect("valid CID"); let sectors_pre_commit_info = vec![SectorPreCommitInfo { seal_proof: primitives_proofs::RegisteredSealProof::StackedDRG2KiBV1P1, sector_number: 1, - sealed_cid: placeholder_cid.clone(), + sealed_cid: placeholder_cid, deal_ids: vec![0], expiration: 165, - unsealed_cid: placeholder_cid, + unsealed_cid, }]; let result = client diff --git a/pallets/market/Cargo.toml b/pallets/market/Cargo.toml index f16a7bc15..59abe0ecd 100644 --- a/pallets/market/Cargo.toml +++ b/pallets/market/Cargo.toml @@ -21,6 +21,7 @@ codec = { workspace = true, default-features = false, features = ["derive"] } hex = { workspace = true, default-features = false, features = ["alloc"] } log = { workspace = true } multihash-codetable = { workspace = true, features = ["blake2b"] } +primitives-commitment = { workspace = true } primitives-proofs = { workspace = true, default-features = false } scale-info = { workspace = true, default-features = false, features = ["derive"] } diff --git a/pallets/market/src/lib.rs b/pallets/market/src/lib.rs index 66f1d812e..784266a48 100644 --- a/pallets/market/src/lib.rs +++ b/pallets/market/src/lib.rs @@ -36,7 +36,11 @@ pub mod pallet { PalletId, }; use frame_system::{pallet_prelude::*, Config as SystemConfig, Pallet as System}; - use multihash_codetable::{Code, MultihashDigest}; + use primitives_commitment::{ + commd::compute_unsealed_sector_commitment, + piece::{PaddedPieceSize, PieceInfo}, + Commitment, CommitmentKind, + }; use primitives_proofs::{ ActiveDeal, ActiveSector, DealId, Market, RegisteredSealProof, SectorDeal, SectorId, SectorNumber, SectorSize, StorageProviderValidation, MAX_DEALS_FOR_ALL_SECTORS, @@ -46,7 +50,6 @@ pub mod pallet { use sp_arithmetic::traits::BaseArithmetic; use sp_std::vec::Vec; - pub const CID_CODEC: u64 = 0x55; pub const LOG_TARGET: &'static str = "runtime::market"; /// Allows to extract Balance of an account via the Config::Currency associated type. @@ -213,6 +216,9 @@ pub mod pallet { // It maybe doable using newtype pattern, however not sure how the UI on the frontend side would handle that anyways. // There is Encode/Decode implementation though, through the feature flag: `scale-codec`. pub piece_cid: BoundedVec>, + /// The value represents the size of the data piece after padding to the + /// nearest power of two. Padding ensures that all pieces can be + /// efficiently arranged in a binary tree structure for Merkle proofs. pub piece_size: u64, /// Storage Client's Account Id pub client: Address, @@ -244,8 +250,10 @@ pub mod pallet { pub state: DealState, } - impl - DealProposal + impl DealProposal + where + Balance: BaseArithmetic + Copy, + BlockNumber: BaseArithmetic + Copy, { fn duration(&self) -> BlockNumber { self.end_block - self.start_block @@ -451,6 +459,8 @@ pub mod pallet { TooManyDealsPerBlock, /// Try to call an operation as a storage provider but the account is not registered as a storage provider. StorageProviderNotRegistered, + /// CommD related error + CommD, } pub enum DealActivationError { @@ -515,6 +525,10 @@ pub mod pallet { DealDurationOutOfBounds, /// Deal's piece_cid is invalid. InvalidPieceCid(cid::Error), + /// Deal's piece_size is invalid. + InvalidPieceSize(&'static str), + /// CommD related error + CommD(&'static str), } impl core::fmt::Debug for ProposalError { @@ -541,6 +555,12 @@ pub mod pallet { ProposalError::InvalidPieceCid(_err) => { write!(f, "ProposalError::InvalidPieceCid") } + ProposalError::InvalidPieceSize(err) => { + write!(f, "ProposalError::InvalidPieceSize: {}", err) + } + ProposalError::CommD(err) => { + write!(f, "ProposalError::CommD: {}", err) + } } } } @@ -949,17 +969,34 @@ pub mod pallet { /// fn compute_commd<'a>( - _proposals: impl IntoIterator>, - _sector_type: RegisteredSealProof, + proposals: impl Iterator>, + sector_type: RegisteredSealProof, ) -> Result { - // TODO(@th7nder,#92,21/06/2024): - // https://github.com/filecoin-project/rust-fil-proofs/blob/daec42b64ae6bf9a537545d5f116d57b9a29cc11/filecoin-proofs/src/pieces.rs#L85 - let cid = Cid::new_v1( - CID_CODEC, - Code::Blake2b256.digest(b"placeholder-to-be-done"), - ); + let pieces = proposals + .map(|p| { + let cid = p.cid()?; + let commitment = Commitment::from_cid(&cid, CommitmentKind::Piece) + .map_err(|err| ProposalError::CommD(err))?; + let size = PaddedPieceSize::new(p.piece_size) + .map_err(|err| ProposalError::InvalidPieceSize(err))?; + + Ok(PieceInfo { size, commitment }) + }) + .collect::, ProposalError>>(); + + let pieces = pieces.map_err(|err| { + log::error!("error occurred while processing pieces: {:?}", err); + Error::::CommD + })?; - Ok(cid) + let sector_size = sector_type.sector_size(); + let comm_d = + compute_unsealed_sector_commitment(sector_size, &pieces).map_err(|err| { + log::error!("error occurred while computing commd: {:?}", err); + Error::::CommD + })?; + + Ok(comm_d.cid()) } /// diff --git a/pallets/market/src/mock.rs b/pallets/market/src/mock.rs index 7338b6bb8..f5dce80d4 100644 --- a/pallets/market/src/mock.rs +++ b/pallets/market/src/mock.rs @@ -15,7 +15,7 @@ use sp_runtime::{ AccountId32, BuildStorage, MultiSignature, MultiSigner, }; -use crate::{self as pallet_market, BalanceOf, ClientDealProposal, DealProposal, CID_CODEC}; +use crate::{self as pallet_market, BalanceOf, ClientDealProposal, DealProposal}; type Block = frame_system::mocking::MockBlock; type BlockNumber = u64; @@ -115,8 +115,12 @@ pub fn sign(pair: &sp_core::sr25519::Pair, bytes: &[u8]) -> MultiSignature { MultiSignature::Sr25519(pair.sign(bytes)) } +// TODO(#442,@cernicc,09/10/2024): Remove this function. The codec and hashing +// is not correct. This is still here because I don't want to make the PR even +// bigger by changing parts of the implementations that are relying on this. pub fn cid_of(data: &str) -> cid::Cid { - Cid::new_v1(CID_CODEC, Code::Blake2b256.digest(data.as_bytes())) + let cid_codec = 0x55; + Cid::new_v1(cid_codec, Code::Blake2b256.digest(data.as_bytes())) } pub(crate) type DealProposalOf = diff --git a/pallets/market/src/test.rs b/pallets/market/src/test.rs index 92e550eee..8b842fac9 100644 --- a/pallets/market/src/test.rs +++ b/pallets/market/src/test.rs @@ -8,6 +8,7 @@ use frame_support::{ traits::Currency, BoundedVec, }; +use primitives_commitment::{Commitment, CommitmentKind}; use primitives_proofs::{ ActiveDeal, ActiveSector, DealId, Market as MarketTrait, RegisteredSealProof, SectorDeal, MAX_DEALS_PER_SECTOR, @@ -625,8 +626,10 @@ fn verify_deals_for_activation() { assert_eq!( Ok(bounded_vec![ Some( - Cid::from_str("bafk2bzaceajreoxfdcpdvitpvxm7vkpvcimlob5ejebqgqidjkz4qoug4q6zu") - .unwrap() + Cid::from_str( + "baga6ea4seaqgi5lnnv4wi5lnnv4wi5lnnv4wi5lnnv4wi5lnnv4wi5lnnv4wi5i" + ) + .unwrap() ), None, ]), @@ -809,11 +812,12 @@ fn activate_deals() { .build() ]; + // Piece cid and commd cid are the same if only one piece is in a deal. let piece_cid = - Cid::from_str("bafk2bzacecg3xxc4f2ql2hreiuy767u6r72ekdz54k7luieknboaakhft5rgk") + Cid::from_str("baga6ea4seaqgi5lnnv4wi5lnnv4wi5lnnv4wi5lnnv4wi5lnnv4wi5lnnv4wi5i") .unwrap(); - let placeholder_commd_cid = - Cid::from_str("bafk2bzaceajreoxfdcpdvitpvxm7vkpvcimlob5ejebqgqidjkz4qoug4q6zu") + let commd_cid = + Cid::from_str("baga6ea4seaqgi5lnnv4wi5lnnv4wi5lnnv4wi5lnnv4wi5lnnv4wi5lnnv4wi5i") .unwrap(); assert_eq!( Ok(bounded_vec![ @@ -821,9 +825,9 @@ fn activate_deals() { active_deals: bounded_vec![ActiveDeal { client: account::(ALICE), piece_cid: piece_cid, - piece_size: 18 + piece_size: 128 }], - unsealed_cid: Some(placeholder_commd_cid), + unsealed_cid: Some(commd_cid), }, ActiveSector { active_deals: bounded_vec![], @@ -863,10 +867,10 @@ fn activate_deals_fails_for_1_sector_but_succeeds_for_others() { ]; let piece_cid = - Cid::from_str("bafk2bzacecg3xxc4f2ql2hreiuy767u6r72ekdz54k7luieknboaakhft5rgk") + Cid::from_str("baga6ea4seaqgi5lnnv4wi5lnnv4wi5lnnv4wi5lnnv4wi5lnnv4wi5lnnv4wi5i") .unwrap(); let placeholder_commd_cid = - Cid::from_str("bafk2bzaceajreoxfdcpdvitpvxm7vkpvcimlob5ejebqgqidjkz4qoug4q6zu") + Cid::from_str("baga6ea4seaqgi5lnnv4wi5lnnv4wi5lnnv4wi5lnnv4wi5lnnv4wi5lnnv4wi5i") .unwrap(); assert_eq!( Ok(bounded_vec![ @@ -874,7 +878,7 @@ fn activate_deals_fails_for_1_sector_but_succeeds_for_others() { active_deals: bounded_vec![ActiveDeal { client: account::(ALICE), piece_cid: piece_cid, - piece_size: 18 + piece_size: 128 }], unsealed_cid: Some(placeholder_commd_cid), }, @@ -1769,12 +1773,16 @@ pub struct DealProposalBuilder { impl> Default for DealProposalBuilder { fn default() -> Self { + let piece_commitment = + Commitment::new(*b"dummydummydummydummydummydummydu", CommitmentKind::Piece); + Self { - piece_cid: cid_of("polka-storage-data") + piece_cid: piece_commitment + .cid() .to_bytes() .try_into() .expect("hash is always 32 bytes"), - piece_size: 18, + piece_size: 128, client: account::(ALICE), provider: account::(PROVIDER), label: bounded_vec![0xb, 0xe, 0xe, 0xf], diff --git a/pallets/storage-provider/Cargo.toml b/pallets/storage-provider/Cargo.toml index b4ca14634..e75fd3815 100644 --- a/pallets/storage-provider/Cargo.toml +++ b/pallets/storage-provider/Cargo.toml @@ -18,6 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] cid = { workspace = true, features = ["alloc"] } codec = { workspace = true, default-features = false, features = ["derive"] } log = { workspace = true, features = ["kv"] } +primitives-commitment = { workspace = true } primitives-proofs = { workspace = true, default-features = false } scale-info = { workspace = true, default-features = false, features = ["derive"] } sp-arithmetic = { workspace = true, default-features = false } diff --git a/pallets/storage-provider/src/lib.rs b/pallets/storage-provider/src/lib.rs index d0caeeab1..115f40487 100644 --- a/pallets/storage-provider/src/lib.rs +++ b/pallets/storage-provider/src/lib.rs @@ -30,9 +30,6 @@ mod storage_provider; #[frame_support::pallet(dev_mode)] pub mod pallet { - pub const CID_CODEC: u64 = 0x55; - /// Sourced from multihash code table - pub const BLAKE2B_MULTIHASH_CODE: u64 = 0xB220; pub(crate) const DECLARATIONS_MAX: u32 = 3000; const LOG_TARGET: &'static str = "runtime::storage_provider"; @@ -41,7 +38,7 @@ pub mod pallet { use alloc::{vec, vec::Vec}; use core::fmt::Debug; - use cid::{Cid, Version}; + use cid::Cid; use codec::{Decode, Encode}; use frame_support::{ dispatch::DispatchResult, @@ -58,6 +55,7 @@ pub mod pallet { pallet_prelude::{BlockNumberFor, *}, Config as SystemConfig, }; + use primitives_commitment::{Commitment, CommitmentKind}; use primitives_proofs::{ Market, RegisteredPoStProof, RegisteredSealProof, SectorNumber, StorageProviderValidation, MAX_SECTORS_PER_CALL, @@ -427,7 +425,7 @@ pub mod pallet { sector.expiration, )?; - let unsealed_cid = validate_cid::(§or.unsealed_cid[..])?; + let unsealed_cid = validate_data_commitment_cid::(§or.unsealed_cid[..])?; let deposit = calculate_pre_commit_deposit::(); let sector_on_chain = SectorPreCommitOnChainInfo::new(sector.clone(), deposit, current_block); @@ -1249,22 +1247,16 @@ pub mod pallet { } // Adapted from filecoin reference here: https://github.com/filecoin-project/builtin-actors/blob/54236ae89880bf4aa89b0dba6d9060c3fd2aacee/actors/miner/src/commd.rs#L51-L56 - fn validate_cid(bytes: &[u8]) -> Result> { - let c = Cid::try_from(bytes).map_err(|e| { + fn validate_data_commitment_cid(bytes: &[u8]) -> Result> { + let cid = Cid::try_from(bytes).map_err(|e| { log::error!(target: LOG_TARGET, e:?; "failed to validate cid"); Error::::InvalidCid })?; - // these values should be consistent with the cid's created by the SP. - // They could change in the future when we make a definitive decision on what hashing algorithm to use and such - ensure!( - c.version() == Version::V1 - && c.codec() == CID_CODEC // The codec should align with our CID_CODEC value. - && c.hash().code() == BLAKE2B_MULTIHASH_CODE // The CID should be hashed using blake2b - && c.hash().size() == 32, - Error::::InvalidCid - ); - Ok(c) + // This checks if the cid represents correct commitment + Commitment::from_cid(&cid, CommitmentKind::Data).map_err(|_| Error::::InvalidCid)?; + + Ok(cid) } /// Calculate the required pre commit deposit amount diff --git a/pallets/storage-provider/src/tests/mod.rs b/pallets/storage-provider/src/tests/mod.rs index 1f31b54b4..9dc3a810f 100644 --- a/pallets/storage-provider/src/tests/mod.rs +++ b/pallets/storage-provider/src/tests/mod.rs @@ -1,5 +1,6 @@ extern crate alloc; use alloc::collections::BTreeSet; +use core::str::FromStr; use cid::Cid; use codec::Encode; @@ -8,8 +9,8 @@ use frame_support::{ traits::Hooks, PalletId, }; use frame_system::pallet_prelude::BlockNumberFor; -use multihash_codetable::{Code, MultihashDigest}; use pallet_market::{BalanceOf, ClientDealProposal, DealProposal, DealState}; +use primitives_commitment::{Commitment, CommitmentKind}; use primitives_proofs::{ DealId, RegisteredPoStProof, RegisteredSealProof, SectorId, SectorNumber, MAX_DEALS_PER_SECTOR, MAX_TERMINATIONS_PER_CALL, @@ -25,7 +26,7 @@ use crate::{ fault::{ DeclareFaultsParams, DeclareFaultsRecoveredParams, FaultDeclaration, RecoveryDeclaration, }, - pallet::{CID_CODEC, DECLARATIONS_MAX}, + pallet::DECLARATIONS_MAX, partition::{PartitionNumber, MAX_PARTITIONS_PER_DEADLINE}, proofs::{PoStProof, SubmitWindowedPoStParams}, sector::SectorPreCommitInfo, @@ -178,10 +179,6 @@ fn events() -> Vec { evt } -fn cid_of(data: &str) -> cid::Cid { - Cid::new_v1(CID_CODEC, Code::Blake2b256.digest(data.as_bytes())) -} - fn sign(pair: &sp_core::sr25519::Pair, bytes: &[u8]) -> MultiSignature { MultiSignature::Sr25519(pair.sign(bytes)) } @@ -293,20 +290,28 @@ struct SectorPreCommitInfoBuilder { impl Default for SectorPreCommitInfoBuilder { fn default() -> Self { + let unsealed_cid = + Cid::from_str("baga6ea4seaqgi5lnnv4wi5lnnv4wi5lnnv4wi5lnnv4wi5lnnv4wi5lnnv4wi5i") + .unwrap() + .to_bytes() + .try_into() + .expect("hash is always 32 bytes"); + + // TODO: This cid is not correct and it's only used as a place holder for the correct one + let sealed_cid = + Cid::from_str("baga6ea4seaqgi5lnnv4wi5lnnv4wi5lnnv4wi5lnnv4wi5lnnv4wi5lnnv4wi5i") + .unwrap() + .to_bytes() + .try_into() + .expect("hash is always 32 bytes"); + Self { seal_proof: RegisteredSealProof::StackedDRG2KiBV1P1, sector_number: 1, - sealed_cid: cid_of("sealed_cid") - .to_bytes() - .try_into() - .expect("hash is always 32 bytes"), + sealed_cid, deal_ids: bounded_vec![0, 1], expiration: 120 * MINUTES, - // TODO(@th7nder,#92,19/07/2024): compute_commd not yet implemented. - unsealed_cid: cid_of("placeholder-to-be-done") - .to_bytes() - .try_into() - .expect("hash is always 32 bytes"), + unsealed_cid, } } } @@ -327,8 +332,9 @@ impl SectorPreCommitInfoBuilder { self } - pub fn unsealed_cid(mut self, unsealed_cid: SectorId) -> Self { - self.unsealed_cid = unsealed_cid; + pub fn unsealed_cid(mut self, unsealed_cid: &str) -> Self { + let cid = Cid::from_str(unsealed_cid).expect("valid unsealed_cid"); + self.unsealed_cid = BoundedVec::try_from(cid.to_bytes()).unwrap(); self } @@ -361,12 +367,16 @@ struct DealProposalBuilder { impl Default for DealProposalBuilder { fn default() -> Self { + let piece_commitment = + Commitment::new(*b"dummydummydummydummydummydummydu", CommitmentKind::Piece); + Self { - piece_cid: cid_of("polka-storage-data") + piece_cid: piece_commitment + .cid() .to_bytes() .try_into() .expect("hash is always 32 bytes"), - piece_size: 18, + piece_size: 128, // Smallest piece size available for sector client: account(BOB), provider: account(ALICE), label: bounded_vec![0xb, 0xe, 0xe, 0xf], diff --git a/pallets/storage-provider/src/tests/pre_commit_sectors.rs b/pallets/storage-provider/src/tests/pre_commit_sectors.rs index 9b6abec3e..aefc679a1 100644 --- a/pallets/storage-provider/src/tests/pre_commit_sectors.rs +++ b/pallets/storage-provider/src/tests/pre_commit_sectors.rs @@ -9,7 +9,7 @@ use crate::{ pallet::{Error, Event, StorageProviders}, sector::{SectorPreCommitInfo, MAX_SECTORS}, tests::{ - account, cid_of, events, publish_deals, register_storage_provider, run_to_block, Balances, + account, events, publish_deals, register_storage_provider, run_to_block, Balances, MaxProveCommitDuration, MaxSectorExpiration, RuntimeEvent, RuntimeOrigin, SectorPreCommitInfoBuilder, StorageProvider, Test, ALICE, CHARLIE, INITIAL_FUNDS, }, @@ -25,7 +25,9 @@ fn successfully_precommited() { publish_deals(storage_provider); // Sector to be pre-committed. - let sector = SectorPreCommitInfoBuilder::default().build(); + let sector = SectorPreCommitInfoBuilder::default() + .unsealed_cid("baga6ea4seaqeqgpphr6lmjhddjprb2etcfiml4sgr2kpju7kscfdj7227itm4hq") + .build(); // Check starting balance assert_eq!( @@ -78,12 +80,6 @@ fn successfully_precommited_no_deals() { let sector = SectorPreCommitInfoBuilder::default() // No sectors -> No CommD verification .deals(bounded_vec![]) - .unsealed_cid( - cid_of("cc-unsealed-cid") - .to_bytes() - .try_into() - .expect("hash is always 32 bytes"), - ) .build(); // Run pre commit extrinsic @@ -141,6 +137,9 @@ fn successfully_precommited_batch() { .try_push( SectorPreCommitInfoBuilder::default() .sector_number(sector_number) + .unsealed_cid( + "baga6ea4seaqeqgpphr6lmjhddjprb2etcfiml4sgr2kpju7kscfdj7227itm4hq", + ) .build(), ) .expect("BoundedVec should fit all 6 elements"); @@ -223,7 +222,9 @@ fn fails_sector_number_already_used() { publish_deals(storage_provider); // Sector to be pre-committed - let sector = SectorPreCommitInfoBuilder::default().build(); + let sector = SectorPreCommitInfoBuilder::default() + .unsealed_cid("baga6ea4seaqeqgpphr6lmjhddjprb2etcfiml4sgr2kpju7kscfdj7227itm4hq") + .build(); // Run pre commit extrinsic assert_ok!(StorageProvider::pre_commit_sectors( @@ -251,12 +252,8 @@ fn fails_declared_commd_not_matching() { // Sector to be pre-committed let sector = SectorPreCommitInfoBuilder::default() - .unsealed_cid( - cid_of("different-unsealed-cid") - .to_bytes() - .try_into() - .expect("hash is always 32 bytes"), - ) + // wrong cid for for the sector + .unsealed_cid("baga6ea4seaqgi5lnnv4wi5lnnv4wi5lnnv4wi5lnnv4wi5lnnv4wi5lnnv4wi5i") .build(); assert_noop!( diff --git a/pallets/storage-provider/src/tests/prove_commit_sectors.rs b/pallets/storage-provider/src/tests/prove_commit_sectors.rs index 2fafb9d44..026e13e13 100644 --- a/pallets/storage-provider/src/tests/prove_commit_sectors.rs +++ b/pallets/storage-provider/src/tests/prove_commit_sectors.rs @@ -32,6 +32,7 @@ fn successfully_prove_sector() { // Sector data let sector = SectorPreCommitInfoBuilder::default() .sector_number(sector_number) + .unsealed_cid("baga6ea4seaqeqgpphr6lmjhddjprb2etcfiml4sgr2kpju7kscfdj7227itm4hq") .build(); // Run pre commit extrinsic @@ -116,6 +117,9 @@ fn successfully_prove_multiple_sectors() { .try_push( SectorPreCommitInfoBuilder::default() .sector_number(sector_number) + .unsealed_cid( + "baga6ea4seaqeqgpphr6lmjhddjprb2etcfiml4sgr2kpju7kscfdj7227itm4hq", + ) .build(), ) .expect("BoundedVec should fit all 6 elements"); @@ -268,6 +272,7 @@ fn fails_prove_commit_after_deadline() { // Sector data let sector = SectorPreCommitInfoBuilder::default() .sector_number(sector_number) + .unsealed_cid("baga6ea4seaqeqgpphr6lmjhddjprb2etcfiml4sgr2kpju7kscfdj7227itm4hq") .build(); // Run pre commit extrinsic diff --git a/primitives/commitment/Cargo.toml b/primitives/commitment/Cargo.toml new file mode 100644 index 000000000..619ee961e --- /dev/null +++ b/primitives/commitment/Cargo.toml @@ -0,0 +1,19 @@ +[package] +authors.workspace = true +edition.workspace = true +homepage.workspace = true +license-file.workspace = true +name = "primitives-commitment" +repository.workspace = true +version = "0.1.0" + +[dependencies] +cid.workspace = true +primitives-proofs.workspace = true +sha2.workspace = true + +[dev-dependencies] +rand = { workspace = true, features = ["std", "std_rng"] } + +[lints] +workspace = true diff --git a/primitives/commitment/src/commd.rs b/primitives/commitment/src/commd.rs new file mode 100644 index 000000000..bd1044019 --- /dev/null +++ b/primitives/commitment/src/commd.rs @@ -0,0 +1,325 @@ +extern crate alloc; +use alloc::vec::Vec; +use core::ops::Deref; + +use primitives_proofs::SectorSize; +use sha2::{Digest, Sha256}; + +use crate::{ + piece::{PaddedPieceSize, PieceInfo, UnpaddedPieceSize}, + zero_piece_commitment, Commitment, CommitmentKind, NODE_SIZE, +}; + +// Ensure that the pieces are correct sizes +fn ensure_piece_sizes( + sector_size: SectorSize, + piece_infos: &[PieceInfo], +) -> Result<(), CommDError> { + // Sector should be able to hold all pieces + let size_sum = piece_infos.iter().map(|piece| *piece.size).sum::(); + if size_sum > sector_size.bytes() { + return Err(CommDError::PieceSizeTooLarge); + } + + // Check if there are too many pieces for a sector of this size + let sector_size = PaddedPieceSize::new(sector_size.bytes()).unwrap(); + let num_of_pieces = piece_infos.len() as u64; + let max_pieces = *sector_size.unpadded() / *UnpaddedPieceSize::MIN; + if num_of_pieces > max_pieces { + return Err(CommDError::TooManyPieces); + } + + Ok(()) +} + +/// Computes an unsealed sector CID (CommD) from its constituent piece CIDs (CommPs) and sizes. +pub fn compute_unsealed_sector_commitment( + sector_size: SectorSize, + piece_infos: &[PieceInfo], +) -> Result { + let padded_sector_size = PaddedPieceSize::new(sector_size.bytes()).unwrap(); + + // In case of no pieces, return the piece zero commitment for the whole + // sector size. + if piece_infos.is_empty() { + return Ok(zero_piece_commitment(padded_sector_size)); + } + + // Check if pieces are correct sizes. + ensure_piece_sizes(sector_size, piece_infos)?; + + // Reduce the pieces to the 1-piece commitment + let mut reduction = CommDPieceReduction::new(); + reduction.add_pieces(piece_infos.iter().copied()); + let commitment = reduction.finish().expect("at least one piece was added"); + + Ok(commitment) +} + +/// Reduces pieces passed to their data commitment. The process of the reduction +/// is following: +/// +/// 1. Pieces are added to the stack one by one. +/// 2. After each piece is added, the stack is reduced by combining pieces of +/// the same size. +/// 3. If a piece to be added is larger than the last piece on the stack, +/// padding pieces are added until the last piece on the stack is at least as +/// large as the piece to be added. +/// 4. The process continues until all pieces have been added and reduced. +/// 5. At the end, if there is more than one piece on the stack, padding pieces +/// are added until the stack can be reduced to a single piece. +/// 6. The final single piece represents the data commitment for all the input +/// pieces. +struct CommDPieceReduction { + /// Pieces stack + pieces: Vec, +} + +impl CommDPieceReduction { + fn new() -> Self { + CommDPieceReduction { pieces: Vec::new() } + } + + // Add many pieces + fn add_pieces

(&mut self, pieces: P) + where + P: Iterator, + { + pieces.for_each(|p| self.add_piece(p)); + } + + // Add a single piece + fn add_piece(&mut self, piece: PieceInfo) { + // Handle first piece + if self.pieces.is_empty() { + self.pieces.push(piece); + return; + } + + // Add padding pieces to the stack until we reduce the current pieces to + // the size that is equal to the new piece. With this we achieve that + // the new piece will be reduced to a single piece after adding it to + // the stack. Will always iterate at least once since if it was empty + // the first condition would have triggered and returned. + while let Some(last_piece) = self.pieces.last() { + let last_added_piece_size = last_piece.size; + // We can stop stop adding padding pieces if the last added padding + // piece is the same size as the actual piece. + if last_added_piece_size.deref() >= piece.size.deref() { + break; + } + + let padding_piece = padding_piece(last_added_piece_size); + self.pieces.push(padding_piece); + + // We need to reduce the pieces before the next iteration. Because + // we are always adding the padding to the last piece. And the last + // piece changes based on the result of reduction. + self.reduce(); + } + + // Add the new piece to the stack + self.pieces.push(piece); + + // Reduce the pieces + self.reduce(); + } + + /// Combine pieces until there are any on the stack available to combine + fn reduce(&mut self) { + loop { + // If there is only a single piece on the stack we break the loop + let pieces_len = self.pieces.len(); + if pieces_len < 2 { + break; + } + + // If the two pieces on top of the stack are not the same size, we + // can't reduce them + let last_piece_size = self.pieces[pieces_len - 1].size; + let second_last_piece_size = self.pieces[pieces_len - 2].size; + if last_piece_size != second_last_piece_size { + break; + } + + // Pop and join the two pieces on top of the stack. Push the + // combined piece back to the stack + let last_piece = self + .pieces + .pop() + .expect("we know there are at least two pieces"); + let second_last_piece = self + .pieces + .pop() + .expect("we know there are at least two pieces"); + let joined = + join_piece_infos(second_last_piece, last_piece).expect("pieces are the same size"); + self.pieces.push(joined); + } + } + + /// Finish the reduction of all pieces. Result is a data commitment for the + /// pieces added. + fn finish(mut self) -> Option { + // Check if we still have more then one piece on the stack. If we do, + // that means that we should add some additional padding pieces at the + // end until we can reduce them to a single piece + while self.pieces.len() > 1 { + let last_added_piece_size = self.pieces.last().expect("at least one piece exists").size; + self.pieces.push(padding_piece(last_added_piece_size)); + self.reduce(); + } + + // Finally a single piece with the commitment that represents all + // reduced pieces + Some(self.pieces.pop()?.commitment) + } +} + +/// Create a piece of specific size used as a padding. +fn padding_piece(piece_size: PaddedPieceSize) -> PieceInfo { + PieceInfo { + commitment: zero_piece_commitment(piece_size), + size: piece_size, + } +} + +/// Join two equally sized `PieceInfo`s together, by hashing them and adding +/// their sizes. +fn join_piece_infos(left: PieceInfo, right: PieceInfo) -> Result { + // The pieces passed should be same size + if left.size != right.size { + return Err(CommDError::InvalidPieceSize); + } + + let hash = piece_hash(&left.commitment.raw(), &right.commitment.raw()); + let mut comm = [0; 32]; + comm.copy_from_slice(&hash); + + let size = left.size + right.size; + + Ok(PieceInfo { + commitment: Commitment::new(comm, CommitmentKind::Piece), + size, + }) +} + +/// Calculate Hash of two raw piece commitments +pub fn piece_hash(a: &[u8], b: &[u8]) -> [u8; 32] { + let mut buf = [0u8; NODE_SIZE * 2]; + buf[..NODE_SIZE].copy_from_slice(a); + buf[NODE_SIZE..].copy_from_slice(b); + + let mut hashed = Sha256::digest(buf); + + // strip last two bits, to ensure result is in Fr. + hashed[31] &= 0b0011_1111; + + hashed.into() +} + +#[derive(Debug)] +pub enum CommDError { + InvalidPieceSize, + PieceSizeTooLarge, + TooManyPieces, +} + +#[cfg(test)] +mod tests { + use alloc::string::ToString; + use core::str::FromStr; + + use cid::Cid; + use primitives_proofs::SectorSize; + + use super::*; + + #[test] + fn test_compute_comm_d_empty() { + let comm_d = compute_unsealed_sector_commitment(SectorSize::_2KiB, &[]) + .expect("failed to verify pieces, empty piece infos"); + assert_eq!( + comm_d.raw(), + [ + 252, 126, 146, 130, 150, 229, 22, 250, 173, 233, 134, 178, 143, 146, 212, 74, 79, + 36, 185, 53, 72, 82, 35, 55, 106, 121, 144, 39, 188, 24, 248, 51 + ] + ); + } + + /// Reference: + #[test] + fn compute_unsealed_sector_cid() { + let pieces = [ + ( + Some("baga6ea4seaqknzm22isnhsxt2s4dnw45kfywmhenngqq3nc7jvecakoca6ksyhy"), + 256 << 20, + ), + ( + Some("baga6ea4seaqnq6o5wuewdpviyoafno4rdpqnokz6ghvg2iyeyfbqxgcwdlj2egi"), + 1024 << 20, + ), + ( + Some("baga6ea4seaqpixk4ifbkzato3huzycj6ty6gllqwanhdpsvxikawyl5bg2h44mq"), + 512 << 20, + ), + ( + Some("baga6ea4seaqaxwe5dy6nt3ko5tngtmzvpqxqikw5mdwfjqgaxfwtzenc6bgzajq"), + 512 << 20, + ), + ( + Some("baga6ea4seaqpy33nbesa4d6ot2ygeuy43y4t7amc4izt52mlotqenwcmn2kyaai"), + 1024 << 20, + ), + ( + Some("baga6ea4seaqphvv4x2s2v7ykgc3ugs2kkltbdeg7icxstklkrgqvv72m2v3i2aa"), + 256 << 20, + ), + ( + Some("baga6ea4seaqf5u55znk6jwhdsrhe37emzhmehiyvjxpsww274f6fiy3h4yctady"), + 512 << 20, + ), + ( + Some("baga6ea4seaqa3qbabsbmvk5er6rhsjzt74beplzgulthamm22jue4zgqcuszofi"), + 1024 << 20, + ), + ( + Some("baga6ea4seaqiekvf623muj6jpxg6vsqaikyw3r4ob5u7363z7zcaixqvfqsc2ji"), + 256 << 20, + ), + ( + Some("baga6ea4seaqhsewv65z2d4m5o4vo65vl5o6z4bcegdvgnusvlt7rao44gro36pi"), + 512 << 20, + ), + // The sector has to be filled entirely, before we can calculate the + // commitment, so we add two more empty pieces here. + (None, 8 << 30), + (None, 16 << 30), + ]; + + let pieces = pieces + .into_iter() + .map(|(cid, size)| { + let size = PaddedPieceSize::new(size).unwrap(); + let commitment = match cid { + Some(cid) => { + let cid = Cid::from_str(cid).unwrap(); + Commitment::from_cid(&cid, CommitmentKind::Piece).unwrap() + } + None => zero_piece_commitment(size), + }; + + PieceInfo { commitment, size } + }) + .collect::>(); + + let comm_d = compute_unsealed_sector_commitment(SectorSize::_32GiB, &pieces).unwrap(); + let cid = comm_d.cid(); + + assert_eq!( + cid.to_string(), + "baga6ea4seaqiw3gbmstmexb7sqwkc5r23o3i7zcyx5kr76pfobpykes3af62kca" + ); + } +} diff --git a/primitives/commitment/src/lib.rs b/primitives/commitment/src/lib.rs new file mode 100644 index 000000000..b033579fb --- /dev/null +++ b/primitives/commitment/src/lib.rs @@ -0,0 +1,240 @@ +#![no_std] + +pub mod commd; +pub mod piece; +mod zero; + +use cid::{multihash::Multihash, Cid}; + +use crate::piece::PaddedPieceSize; + +/// Merkle tree node size in bytes. +pub const NODE_SIZE: usize = 32; + +/// Filecoin piece or sector data commitment merkle node/root (CommP & CommD) +/// +/// https://github.com/multiformats/multicodec/blob/badcfe56bb7e0bbb06b60d57565186cd6be1f932/table.csv#L554 +pub const FIL_COMMITMENT_UNSEALED: u64 = 0xf101; + +/// Filecoin sector data commitment merkle node/root - sealed and replicated +/// (CommR) +/// +/// https://github.com/multiformats/multicodec/blob/badcfe56bb7e0bbb06b60d57565186cd6be1f932/table.csv#L555 +pub const FIL_COMMITMENT_SEALED: u64 = 0xf102; + +/// SHA2-256 with the two most significant bits from the last byte zeroed (as +/// via a mask with 0b00111111) - used for proving trees as in Filecoin. +/// +/// https://github.com/multiformats/multicodec/blob/badcfe56bb7e0bbb06b60d57565186cd6be1f932/table.csv#L153 +pub const SHA2_256_TRUNC254_PADDED: u64 = 0x1012; + +/// Poseidon using BLS12-381 and arity of 2 with Filecoin parameters +/// +/// https://github.com/multiformats/multicodec/blob/badcfe56bb7e0bbb06b60d57565186cd6be1f932/table.csv#L537 +pub const POSEIDON_BLS12_381_A1_FC1: u64 = 0xb401; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum CommitmentKind { + // CommP - Piece commitment + Piece, + // CommD - Data commitment + Data, + // CommR - Replica commitment + Replica, +} + +impl CommitmentKind { + /// Returns the [Multicodec](https://github.com/multiformats/multicodec/blob/master/table.csv) code for the commitment kind. + fn multicodec(&self) -> u64 { + match self { + CommitmentKind::Piece | CommitmentKind::Data => FIL_COMMITMENT_UNSEALED, + CommitmentKind::Replica => FIL_COMMITMENT_SEALED, + } + } + + /// Returns the [Multihash](https://github.com/multiformats/multicodec/blob/master/table.csv) code for the commitment kind. + fn multihash(&self) -> u64 { + match self { + CommitmentKind::Piece | CommitmentKind::Data => SHA2_256_TRUNC254_PADDED, + CommitmentKind::Replica => POSEIDON_BLS12_381_A1_FC1, + } + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct Commitment { + commitment: [u8; 32], + kind: CommitmentKind, +} + +impl Commitment { + pub fn new(commitment: [u8; 32], kind: CommitmentKind) -> Self { + Self { commitment, kind } + } + + pub fn from_cid(cid: &Cid, kind: CommitmentKind) -> Result { + let mut commitment = [0; 32]; + commitment.copy_from_slice(cid.hash().digest()); + + let multicodec = cid.codec(); + let multihash = cid.hash().code(); + + match kind { + CommitmentKind::Piece | CommitmentKind::Data => { + if multicodec != FIL_COMMITMENT_UNSEALED { + return Err("invalid multicodec for commitment"); + } + + if multihash != SHA2_256_TRUNC254_PADDED { + return Err("invalid multihash for commitment"); + } + } + CommitmentKind::Replica => { + if multicodec != FIL_COMMITMENT_SEALED { + return Err("invalid multicodec for commitment"); + } + + if multihash != POSEIDON_BLS12_381_A1_FC1 { + return Err("invalid multihash for commitment"); + } + } + } + + Ok(Self { commitment, kind }) + } + + /// Returns the raw commitment bytes. + pub fn raw(&self) -> [u8; 32] { + self.commitment + } + + /// Converts the commitment to a CID. + pub fn cid(&self) -> Cid { + let multihash = self.kind.multihash(); + let multicodec = self.kind.multicodec(); + let hash = Multihash::wrap(multihash, &self.commitment) + .expect("multihash is large enough so it can wrap the commitment"); + Cid::new_v1(multicodec, hash) + } +} + +/// Returns a zero-piece commitment for a given piece size. +pub fn zero_piece_commitment(size: PaddedPieceSize) -> Commitment { + Commitment { + commitment: zero::zero_piece_commitment(size), + kind: CommitmentKind::Piece, + } +} + +#[cfg(test)] +mod tests { + use cid::{multihash::Multihash, Cid}; + + use crate::{ + Commitment, CommitmentKind, FIL_COMMITMENT_SEALED, FIL_COMMITMENT_UNSEALED, + POSEIDON_BLS12_381_A1_FC1, SHA2_256_TRUNC254_PADDED, + }; + + fn rand_comm() -> [u8; 32] { + rand::random::<[u8; 32]>() + } + + #[test] + fn comm_d_to_cid() { + let comm = rand_comm(); + + let cid = Commitment::new(comm, CommitmentKind::Data).cid(); + assert_eq!(cid.codec(), FIL_COMMITMENT_UNSEALED); + assert_eq!(cid.hash().code(), SHA2_256_TRUNC254_PADDED); + assert_eq!(cid.hash().digest(), comm); + } + + #[test] + fn cid_to_comm_d() { + let comm = rand_comm(); + + // Correct hash format + let mh = Multihash::wrap(SHA2_256_TRUNC254_PADDED, &comm).unwrap(); + let c = Cid::new_v1(FIL_COMMITMENT_UNSEALED, mh); + let commitment = Commitment::from_cid(&c, CommitmentKind::Data).unwrap(); + assert_eq!(commitment.raw(), comm); + + // Should fail with incorrect codec + let c = Cid::new_v1(FIL_COMMITMENT_SEALED, mh); + let commitment = Commitment::from_cid(&c, CommitmentKind::Data); + assert!(commitment.is_err()); + + // Incorrect hash format + let mh = Multihash::wrap(0x9999, &comm).unwrap(); + let c = Cid::new_v1(FIL_COMMITMENT_UNSEALED, mh); + let commitment = Commitment::from_cid(&c, CommitmentKind::Data); + assert!(commitment.is_err()); + } + + #[test] + fn comm_r_to_cid() { + let comm = rand_comm(); + let cid = Commitment::new(comm, CommitmentKind::Replica).cid(); + + assert_eq!(cid.codec(), FIL_COMMITMENT_SEALED); + assert_eq!(cid.hash().code(), POSEIDON_BLS12_381_A1_FC1); + assert_eq!(cid.hash().digest(), comm); + } + + #[test] + fn cid_to_comm_r() { + let comm = rand_comm(); + + // Correct hash format + let mh = Multihash::wrap(POSEIDON_BLS12_381_A1_FC1, &comm).unwrap(); + let c = Cid::new_v1(FIL_COMMITMENT_SEALED, mh); + let commitment = Commitment::from_cid(&c, CommitmentKind::Replica).unwrap(); + assert_eq!(commitment.raw(), comm); + + // Should fail with incorrect codec + let c = Cid::new_v1(FIL_COMMITMENT_UNSEALED, mh); + let commitment = Commitment::from_cid(&c, CommitmentKind::Replica); + assert!(commitment.is_err()); + + // Incorrect hash format + let mh = Multihash::wrap(0x9999, &comm).unwrap(); + let c = Cid::new_v1(FIL_COMMITMENT_SEALED, mh); + let commitment = Commitment::from_cid(&c, CommitmentKind::Replica); + assert!(commitment.is_err()); + } + + #[test] + fn symmetric_conversion() { + let comm = rand_comm(); + + // piece + let cid = Commitment::new(comm, CommitmentKind::Piece).cid(); + assert_eq!( + Commitment::from_cid(&cid, CommitmentKind::Piece).unwrap(), + Commitment { + commitment: comm, + kind: CommitmentKind::Piece + } + ); + + // data + let cid = Commitment::new(comm, CommitmentKind::Data).cid(); + assert_eq!( + Commitment::from_cid(&cid, CommitmentKind::Data).unwrap(), + Commitment { + commitment: comm, + kind: CommitmentKind::Data + } + ); + + // replica + let cid = Commitment::new(comm, CommitmentKind::Replica).cid(); + assert_eq!( + Commitment::from_cid(&cid, CommitmentKind::Replica).unwrap(), + Commitment { + commitment: comm, + kind: CommitmentKind::Replica + } + ); + } +} diff --git a/primitives/commitment/src/piece.rs b/primitives/commitment/src/piece.rs new file mode 100644 index 000000000..4e6a02fb0 --- /dev/null +++ b/primitives/commitment/src/piece.rs @@ -0,0 +1,181 @@ +use core::ops::{Add, AddAssign, Deref}; + +use crate::{Commitment, NODE_SIZE}; + +/// Piece info contains piece commitment and piece size. +#[derive(Debug, Clone, Copy)] +pub struct PieceInfo { + /// Piece commitment + pub commitment: Commitment, + /// Piece size + pub size: PaddedPieceSize, +} + +/// Size of a piece in bytes. Unpadded piece size should be power of two +/// multiple of 127. +#[derive(PartialEq, Debug, Eq, Clone, Copy)] +pub struct UnpaddedPieceSize(u64); + +impl UnpaddedPieceSize { + /// The minimum pice size + pub const MIN: UnpaddedPieceSize = UnpaddedPieceSize(127); + + /// Initialize new unpadded piece size. Error is returned if the size is + /// invalid. + pub fn new(size: u64) -> Result { + if size < 127 { + return Err("minimum piece size is 127 bytes"); + } + + // is 127 * 2^n + if size >> size.trailing_zeros() != 127 { + return Err("unpadded piece size must be a power of 2 multiple of 127"); + } + + Ok(Self(size)) + } + + /// Converts unpadded piece size into padded piece size. + pub fn padded(self) -> PaddedPieceSize { + let padded_bytes = self.0 + (self.0 / 127); + PaddedPieceSize(padded_bytes) + } +} + +impl core::fmt::Display for UnpaddedPieceSize { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "{}", self.0) + } +} + +impl Deref for UnpaddedPieceSize { + type Target = u64; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl Add for UnpaddedPieceSize { + type Output = Self; + + fn add(self, other: Self) -> Self::Output { + UnpaddedPieceSize(self.0 + other.0) + } +} + +/// Size of a piece in bytes with padding. The size is always a power of two +/// number. +#[derive(PartialEq, Debug, Eq, Clone, Copy)] +pub struct PaddedPieceSize(u64); + +impl PaddedPieceSize { + /// The minimum pice size + pub const MIN: PaddedPieceSize = PaddedPieceSize(128); + + /// Initialize new padded piece size. Error is returned if the size is + /// invalid. + pub fn new(size: u64) -> Result { + if size < 128 { + return Err("minimum piece size is 128 bytes"); + } + + if size.count_ones() != 1 { + return Err("padded piece size must be a power of 2"); + } + + if size % NODE_SIZE as u64 != 0 { + return Err("padded_piece_size is not multiple of NODE_SIZE"); + } + + Ok(Self(size)) + } + + /// Converts padded piece size into an unpadded piece size. + pub fn unpadded(self) -> UnpaddedPieceSize { + let unpadded_bytes = self.0 - (self.0 / 128); + UnpaddedPieceSize(unpadded_bytes) + } + + /// The function accepts arbitrary size and transforms it to the + /// PaddedPieceSize: + /// + /// 1. We first add as many bytes as we get when we add "0" byte after each + /// 127 bytes. That is because we are padding the sector content with + /// "Fr32 padding". + /// 2. We "round" the padded size to the first power of two number. That is + /// needed because we use Binary Merkle Tree for the CommD/CommP + /// computation. + pub fn from_arbitrary_size(size: u64) -> Self { + let padded_bytes = size + (size / 127); + let padded_bytes = padded_bytes.next_power_of_two(); + Self::new(padded_bytes as u64).expect("the padded piece size is correct") + } +} + +impl core::fmt::Display for PaddedPieceSize { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "{}", self.0) + } +} + +impl Deref for PaddedPieceSize { + type Target = u64; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl Add for PaddedPieceSize { + type Output = Self; + + fn add(self, other: Self) -> Self::Output { + PaddedPieceSize(self.0 + other.0) + } +} + +impl AddAssign for PaddedPieceSize { + fn add_assign(&mut self, other: Self) { + self.0 += other.0; + } +} + +impl core::iter::Sum for PaddedPieceSize { + fn sum>(iter: I) -> Self { + iter.fold(PaddedPieceSize(0), |acc, x| acc + x) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn round_trip_piece_size() { + let p_piece = PaddedPieceSize::new(0b10000000).unwrap(); + let up_piece = p_piece.unpadded(); + assert_eq!(&up_piece, &UnpaddedPieceSize(127)); + assert_eq!(&p_piece, &up_piece.padded()); + } + #[test] + fn invalid_piece_checks() { + assert_eq!( + PaddedPieceSize::new(127), + Err("minimum piece size is 128 bytes") + ); + assert_eq!( + UnpaddedPieceSize::new(126), + Err("minimum piece size is 127 bytes") + ); + assert_eq!( + PaddedPieceSize::new(0b10000001), + Err("padded piece size must be a power of 2") + ); + assert_eq!( + UnpaddedPieceSize::new(0b1110111000), + Err("unpadded piece size must be a power of 2 multiple of 127") + ); + assert!(UnpaddedPieceSize::new(0b1111111000).is_ok()); + } +} diff --git a/primitives/commitment/src/zero.rs b/primitives/commitment/src/zero.rs new file mode 100644 index 000000000..f145c06ec --- /dev/null +++ b/primitives/commitment/src/zero.rs @@ -0,0 +1,251 @@ +use crate::piece::PaddedPieceSize; + +/// Can't generate for: 1, 2, 4, 8, 16, 32, 64 bytes +const SKIP: u32 = 7; + +/// Returns a zero piece commitment for a specified piece size. Zero piece +/// commitment is a calculated piece commitment for a 0-filled piece. This +/// commitment is usually used when adding pieces to a sector as a padding. +pub fn zero_piece_commitment(size: PaddedPieceSize) -> [u8; 32] { + let level = size.trailing_zeros() - SKIP; + PIECE_COMMS[level as usize] +} + +/// Zero piece commitments. This is statically defined to be able to pad +/// remaining space with. +const PIECE_COMMS: [[u8; 32]; 35] = [ + // 128 bytes + [ + 0x37, 0x31, 0xbb, 0x99, 0xac, 0x68, 0x9f, 0x66, 0xee, 0xf5, 0x97, 0x3e, 0x4a, 0x94, 0xda, + 0x18, 0x8f, 0x4d, 0xdc, 0xae, 0x58, 0x7, 0x24, 0xfc, 0x6f, 0x3f, 0xd6, 0xd, 0xfd, 0x48, + 0x83, 0x33, + ], + // 256 bytes + [ + 0x64, 0x2a, 0x60, 0x7e, 0xf8, 0x86, 0xb0, 0x4, 0xbf, 0x2c, 0x19, 0x78, 0x46, 0x3a, 0xe1, + 0xd4, 0x69, 0x3a, 0xc0, 0xf4, 0x10, 0xeb, 0x2d, 0x1b, 0x7a, 0x47, 0xfe, 0x20, 0x5e, 0x5e, + 0x75, 0xf, + ], + // 512 bytes + [ + 0x57, 0xa2, 0x38, 0x1a, 0x28, 0x65, 0x2b, 0xf4, 0x7f, 0x6b, 0xef, 0x7a, 0xca, 0x67, 0x9b, + 0xe4, 0xae, 0xde, 0x58, 0x71, 0xab, 0x5c, 0xf3, 0xeb, 0x2c, 0x8, 0x11, 0x44, 0x88, 0xcb, + 0x85, 0x26, + ], + // 1024 bytes + [ + 0x1f, 0x7a, 0xc9, 0x59, 0x55, 0x10, 0xe0, 0x9e, 0xa4, 0x1c, 0x46, 0xb, 0x17, 0x64, 0x30, + 0xbb, 0x32, 0x2c, 0xd6, 0xfb, 0x41, 0x2e, 0xc5, 0x7c, 0xb1, 0x7d, 0x98, 0x9a, 0x43, 0x10, + 0x37, 0x2f, + ], + // 2048 bytes + [ + 0xfc, 0x7e, 0x92, 0x82, 0x96, 0xe5, 0x16, 0xfa, 0xad, 0xe9, 0x86, 0xb2, 0x8f, 0x92, 0xd4, + 0x4a, 0x4f, 0x24, 0xb9, 0x35, 0x48, 0x52, 0x23, 0x37, 0x6a, 0x79, 0x90, 0x27, 0xbc, 0x18, + 0xf8, 0x33, + ], + // 4096 bytes + [ + 0x8, 0xc4, 0x7b, 0x38, 0xee, 0x13, 0xbc, 0x43, 0xf4, 0x1b, 0x91, 0x5c, 0xe, 0xed, 0x99, + 0x11, 0xa2, 0x60, 0x86, 0xb3, 0xed, 0x62, 0x40, 0x1b, 0xf9, 0xd5, 0x8b, 0x8d, 0x19, 0xdf, + 0xf6, 0x24, + ], + // 8192 bytes + [ + 0xb2, 0xe4, 0x7b, 0xfb, 0x11, 0xfa, 0xcd, 0x94, 0x1f, 0x62, 0xaf, 0x5c, 0x75, 0xf, 0x3e, + 0xa5, 0xcc, 0x4d, 0xf5, 0x17, 0xd5, 0xc4, 0xf1, 0x6d, 0xb2, 0xb4, 0xd7, 0x7b, 0xae, 0xc1, + 0xa3, 0x2f, + ], + // 16384 bytes = 16 KiB + [ + 0xf9, 0x22, 0x61, 0x60, 0xc8, 0xf9, 0x27, 0xbf, 0xdc, 0xc4, 0x18, 0xcd, 0xf2, 0x3, 0x49, + 0x31, 0x46, 0x0, 0x8e, 0xae, 0xfb, 0x7d, 0x2, 0x19, 0x4d, 0x5e, 0x54, 0x81, 0x89, 0x0, + 0x51, 0x8, + ], + // 32768 bytes = 32 KiB + [ + 0x2c, 0x1a, 0x96, 0x4b, 0xb9, 0xb, 0x59, 0xeb, 0xfe, 0xf, 0x6d, 0xa2, 0x9a, 0xd6, 0x5a, + 0xe3, 0xe4, 0x17, 0x72, 0x4a, 0x8f, 0x7c, 0x11, 0x74, 0x5a, 0x40, 0xca, 0xc1, 0xe5, 0xe7, + 0x40, 0x11, + ], + // 65536 bytes = 64 KiB + [ + 0xfe, 0xe3, 0x78, 0xce, 0xf1, 0x64, 0x4, 0xb1, 0x99, 0xed, 0xe0, 0xb1, 0x3e, 0x11, 0xb6, + 0x24, 0xff, 0x9d, 0x78, 0x4f, 0xbb, 0xed, 0x87, 0x8d, 0x83, 0x29, 0x7e, 0x79, 0x5e, 0x2, + 0x4f, 0x2, + ], + // 131072 bytes = 128 KiB + [ + 0x8e, 0x9e, 0x24, 0x3, 0xfa, 0x88, 0x4c, 0xf6, 0x23, 0x7f, 0x60, 0xdf, 0x25, 0xf8, 0x3e, + 0xe4, 0xd, 0xca, 0x9e, 0xd8, 0x79, 0xeb, 0x6f, 0x63, 0x52, 0xd1, 0x50, 0x84, 0xf5, 0xad, + 0xd, 0x3f, + ], + // 262144 bytes = 256 KiB + [ + 0x75, 0x2d, 0x96, 0x93, 0xfa, 0x16, 0x75, 0x24, 0x39, 0x54, 0x76, 0xe3, 0x17, 0xa9, 0x85, + 0x80, 0xf0, 0x9, 0x47, 0xaf, 0xb7, 0xa3, 0x5, 0x40, 0xd6, 0x25, 0xa9, 0x29, 0x1c, 0xc1, + 0x2a, 0x7, + ], + // 524288 bytes = 512 KiB + [ + 0x70, 0x22, 0xf6, 0xf, 0x7e, 0xf6, 0xad, 0xfa, 0x17, 0x11, 0x7a, 0x52, 0x61, 0x9e, 0x30, + 0xce, 0xa8, 0x2c, 0x68, 0x7, 0x5a, 0xdf, 0x1c, 0x66, 0x77, 0x86, 0xec, 0x50, 0x6e, 0xef, + 0x2d, 0x19, + ], + // 1048576 bytes = 1 MiB + [ + 0xd9, 0x98, 0x87, 0xb9, 0x73, 0x57, 0x3a, 0x96, 0xe1, 0x13, 0x93, 0x64, 0x52, 0x36, 0xc1, + 0x7b, 0x1f, 0x4c, 0x70, 0x34, 0xd7, 0x23, 0xc7, 0xa9, 0x9f, 0x70, 0x9b, 0xb4, 0xda, 0x61, + 0x16, 0x2b, + ], + // 2097152 bytes = 2 MiB + [ + 0xd0, 0xb5, 0x30, 0xdb, 0xb0, 0xb4, 0xf2, 0x5c, 0x5d, 0x2f, 0x2a, 0x28, 0xdf, 0xee, 0x80, + 0x8b, 0x53, 0x41, 0x2a, 0x2, 0x93, 0x1f, 0x18, 0xc4, 0x99, 0xf5, 0xa2, 0x54, 0x8, 0x6b, + 0x13, 0x26, + ], + // 4194304 bytes = 4 MiB + [ + 0x84, 0xc0, 0x42, 0x1b, 0xa0, 0x68, 0x5a, 0x1, 0xbf, 0x79, 0x5a, 0x23, 0x44, 0x6, 0x4f, + 0xe4, 0x24, 0xbd, 0x52, 0xa9, 0xd2, 0x43, 0x77, 0xb3, 0x94, 0xff, 0x4c, 0x4b, 0x45, 0x68, + 0xe8, 0x11, + ], + // 8388608 bytes = 8 MiB + [ + 0x65, 0xf2, 0x9e, 0x5d, 0x98, 0xd2, 0x46, 0xc3, 0x8b, 0x38, 0x8c, 0xfc, 0x6, 0xdb, 0x1f, + 0x6b, 0x2, 0x13, 0x3, 0xc5, 0xa2, 0x89, 0x0, 0xb, 0xdc, 0xe8, 0x32, 0xa9, 0xc3, 0xec, 0x42, + 0x1c, + ], + // 16777216 bytes = 16 MiB + [ + 0xa2, 0x24, 0x75, 0x8, 0x28, 0x58, 0x50, 0x96, 0x5b, 0x7e, 0x33, 0x4b, 0x31, 0x27, 0xb0, + 0xc0, 0x42, 0xb1, 0xd0, 0x46, 0xdc, 0x54, 0x40, 0x21, 0x37, 0x62, 0x7c, 0xd8, 0x79, 0x9c, + 0xe1, 0x3a, + ], + // 33554432 bytes = 32 MiB + [ + 0xda, 0xfd, 0xab, 0x6d, 0xa9, 0x36, 0x44, 0x53, 0xc2, 0x6d, 0x33, 0x72, 0x6b, 0x9f, 0xef, + 0xe3, 0x43, 0xbe, 0x8f, 0x81, 0x64, 0x9e, 0xc0, 0x9, 0xaa, 0xd3, 0xfa, 0xff, 0x50, 0x61, + 0x75, 0x8, + ], + // 67108864 bytes = 64 MiB + [ + 0xd9, 0x41, 0xd5, 0xe0, 0xd6, 0x31, 0x4a, 0x99, 0x5c, 0x33, 0xff, 0xbd, 0x4f, 0xbe, 0x69, + 0x11, 0x8d, 0x73, 0xd4, 0xe5, 0xfd, 0x2c, 0xd3, 0x1f, 0xf, 0x7c, 0x86, 0xeb, 0xdd, 0x14, + 0xe7, 0x6, + ], + // 134217728 bytes = 128 MiB + [ + 0x51, 0x4c, 0x43, 0x5c, 0x3d, 0x4, 0xd3, 0x49, 0xa5, 0x36, 0x5f, 0xbd, 0x59, 0xff, 0xc7, + 0x13, 0x62, 0x91, 0x11, 0x78, 0x59, 0x91, 0xc1, 0xa3, 0xc5, 0x3a, 0xf2, 0x20, 0x79, 0x74, + 0x1a, 0x2f, + ], + // 268435456 bytes = 256 MiB + [ + 0xad, 0x6, 0x85, 0x39, 0x69, 0xd3, 0x7d, 0x34, 0xff, 0x8, 0xe0, 0x9f, 0x56, 0x93, 0xa, + 0x4a, 0xd1, 0x9a, 0x89, 0xde, 0xf6, 0xc, 0xbf, 0xee, 0x7e, 0x1d, 0x33, 0x81, 0xc1, 0xe7, + 0x1c, 0x37, + ], + // 536870912 bytes = 512 MiB + [ + 0x39, 0x56, 0xe, 0x7b, 0x13, 0xa9, 0x3b, 0x7, 0xa2, 0x43, 0xfd, 0x27, 0x20, 0xff, 0xa7, + 0xcb, 0x3e, 0x1d, 0x2e, 0x50, 0x5a, 0xb3, 0x62, 0x9e, 0x79, 0xf4, 0x63, 0x13, 0x51, 0x2c, + 0xda, 0x6, + ], + // 1073741824 bytes = 1024 MiB = 1 GiB + [ + 0xcc, 0xc3, 0xc0, 0x12, 0xf5, 0xb0, 0x5e, 0x81, 0x1a, 0x2b, 0xbf, 0xdd, 0xf, 0x68, 0x33, + 0xb8, 0x42, 0x75, 0xb4, 0x7b, 0xf2, 0x29, 0xc0, 0x5, 0x2a, 0x82, 0x48, 0x4f, 0x3c, 0x1a, + 0x5b, 0x3d, + ], + // 2147483648 bytes = 2048 MiB = 2 GiB + [ + 0x7d, 0xf2, 0x9b, 0x69, 0x77, 0x31, 0x99, 0xe8, 0xf2, 0xb4, 0xb, 0x77, 0x91, 0x9d, 0x4, + 0x85, 0x9, 0xee, 0xd7, 0x68, 0xe2, 0xc7, 0x29, 0x7b, 0x1f, 0x14, 0x37, 0x3, 0x4f, 0xc3, + 0xc6, 0x2c, + ], + // 4294967296 bytes = 4096 MiB = 4 GiB + [ + 0x66, 0xce, 0x5, 0xa3, 0x66, 0x75, 0x52, 0xcf, 0x45, 0xc0, 0x2b, 0xcc, 0x4e, 0x83, 0x92, + 0x91, 0x9b, 0xde, 0xac, 0x35, 0xde, 0x2f, 0xf5, 0x62, 0x71, 0x84, 0x8e, 0x9f, 0x7b, 0x67, + 0x51, 0x7, + ], + // 8589934592 bytes = 8192 MiB = 8 GiB + [ + 0xd8, 0x61, 0x2, 0x18, 0x42, 0x5a, 0xb5, 0xe9, 0x5b, 0x1c, 0xa6, 0x23, 0x9d, 0x29, 0xa2, + 0xe4, 0x20, 0xd7, 0x6, 0xa9, 0x6f, 0x37, 0x3e, 0x2f, 0x9c, 0x9a, 0x91, 0xd7, 0x59, 0xd1, + 0x9b, 0x1, + ], + // 17179869184 bytes = 16384 MiB = 16 GiB + [ + 0x6d, 0x36, 0x4b, 0x1e, 0xf8, 0x46, 0x44, 0x1a, 0x5a, 0x4a, 0x68, 0x86, 0x23, 0x14, 0xac, + 0xc0, 0xa4, 0x6f, 0x1, 0x67, 0x17, 0xe5, 0x34, 0x43, 0xe8, 0x39, 0xee, 0xdf, 0x83, 0xc2, + 0x85, 0x3c, + ], + // 34359738368 bytes = 32768 MiB = 32 GiB + [ + 0x7, 0x7e, 0x5f, 0xde, 0x35, 0xc5, 0xa, 0x93, 0x3, 0xa5, 0x50, 0x9, 0xe3, 0x49, 0x8a, 0x4e, + 0xbe, 0xdf, 0xf3, 0x9c, 0x42, 0xb7, 0x10, 0xb7, 0x30, 0xd8, 0xec, 0x7a, 0xc7, 0xaf, 0xa6, + 0x3e, + ], + // 68719476736 bytes = 65536 MiB = 64 GiB + [ + 0xe6, 0x40, 0x5, 0xa6, 0xbf, 0xe3, 0x77, 0x79, 0x53, 0xb8, 0xad, 0x6e, 0xf9, 0x3f, 0xf, + 0xca, 0x10, 0x49, 0xb2, 0x4, 0x16, 0x54, 0xf2, 0xa4, 0x11, 0xf7, 0x70, 0x27, 0x99, 0xce, + 0xce, 0x2, + ], + // 137438953472 bytes = 131072 MiB = 128 GiB + [ + 0x25, 0x9d, 0x3d, 0x6b, 0x1f, 0x4d, 0x87, 0x6d, 0x11, 0x85, 0xe1, 0x12, 0x3a, 0xf6, 0xf5, + 0x50, 0x1a, 0xf0, 0xf6, 0x7c, 0xf1, 0x5b, 0x52, 0x16, 0x25, 0x5b, 0x7b, 0x17, 0x8d, 0x12, + 0x5, 0x1d, + ], + // 274877906944 bytes = 262144 MiB = 256 GiB + [ + 0x3f, 0x9a, 0x4d, 0x41, 0x1d, 0xa4, 0xef, 0x1b, 0x36, 0xf3, 0x5f, 0xf0, 0xa1, 0x95, 0xae, + 0x39, 0x2a, 0xb2, 0x3f, 0xee, 0x79, 0x67, 0xb7, 0xc4, 0x1b, 0x3, 0xd1, 0x61, 0x3f, 0xc2, + 0x92, 0x39, + ], + // 549755813888 bytes = 524288 MiB = 512 GiB + [ + 0xfe, 0x4e, 0xf3, 0x28, 0xc6, 0x1a, 0xa3, 0x9c, 0xfd, 0xb2, 0x48, 0x4e, 0xaa, 0x32, 0xa1, + 0x51, 0xb1, 0xfe, 0x3d, 0xfd, 0x1f, 0x96, 0xdd, 0x8c, 0x97, 0x11, 0xfd, 0x86, 0xd6, 0xc5, + 0x81, 0x13, + ], + // 1099511627776 bytes = 1048576 MiB = 1024 GiB + [ + 0xf5, 0x5d, 0x68, 0x90, 0xe, 0x2d, 0x83, 0x81, 0xec, 0xcb, 0x81, 0x64, 0xcb, 0x99, 0x76, + 0xf2, 0x4b, 0x2d, 0xe0, 0xdd, 0x61, 0xa3, 0x1b, 0x97, 0xce, 0x6e, 0xb2, 0x38, 0x50, 0xd5, + 0xe8, 0x19, + ], + // 2199023255552 bytes = 2097152 MiB = 2048 GiB + [ + 0xaa, 0xaa, 0x8c, 0x4c, 0xb4, 0xa, 0xac, 0xee, 0x1e, 0x2, 0xdc, 0x65, 0x42, 0x4b, 0x2a, + 0x6c, 0x8e, 0x99, 0xf8, 0x3, 0xb7, 0x2f, 0x79, 0x29, 0xc4, 0x10, 0x1d, 0x7f, 0xae, 0x6b, + 0xff, 0x32, + ], +]; + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_zero_piece_commitment() { + assert_eq!( + zero_piece_commitment(PaddedPieceSize::new(2048).unwrap()), + [ + 252, 126, 146, 130, 150, 229, 22, 250, 173, 233, 134, 178, 143, 146, 212, 74, 79, + 36, 185, 53, 72, 82, 35, 55, 106, 121, 144, 39, 188, 24, 248, 51 + ] + ); + + assert_eq!( + zero_piece_commitment(PaddedPieceSize::new(128).unwrap()), + [ + 55, 49, 187, 153, 172, 104, 159, 102, 238, 245, 151, 62, 74, 148, 218, 24, 143, 77, + 220, 174, 88, 7, 36, 252, 111, 63, 214, 13, 253, 72, 131, 51 + ] + ); + } +} diff --git a/primitives/proofs/src/types.rs b/primitives/proofs/src/types.rs index 4a0fcf20c..ccaf62bd9 100644 --- a/primitives/proofs/src/types.rs +++ b/primitives/proofs/src/types.rs @@ -20,6 +20,10 @@ pub type SectorNumber = u64; #[encode_as_type(crate_path = "::scale_encode")] pub enum SectorSize { _2KiB, + _8MiB, + _512MiB, + _32GiB, + _64GiB, } impl SectorSize { @@ -28,6 +32,10 @@ impl SectorSize { pub fn bytes(&self) -> u64 { match self { SectorSize::_2KiB => 2 << 10, + SectorSize::_8MiB => 8 << 20, + SectorSize::_512MiB => 512 << 20, + SectorSize::_32GiB => 32 << 30, + SectorSize::_64GiB => 2 * (32 << 30), } } }