diff --git a/crates/bin/src/cfg.rs b/crates/bin/src/cfg.rs index ed789166..f4f283d0 100644 --- a/crates/bin/src/cfg.rs +++ b/crates/bin/src/cfg.rs @@ -41,9 +41,9 @@ pub struct CommandLineArgs { #[arg(long)] snark_namespace_id: Option, - /// Celestia Operation Namespace ID + /// Celestia Transaction Namespace ID #[arg(long)] - operation_namespace_id: Option, + transaction_namespace_id: Option, // Height to start searching the DA layer for SNARKs on #[arg(short = 's', long)] @@ -115,9 +115,8 @@ pub fn load_config(args: CommandLineArgs) -> Result { .context("Failed to build config")?; let default_config = Config::default(); - let loaded_config: Config = config_source - .try_deserialize() - .context("Failed to deserialize config file")?; + let loaded_config: Config = + config_source.try_deserialize().context("Failed to deserialize config file")?; let merged_config = merge_configs(loaded_config, default_config); let final_config = apply_command_line_args(merged_config, args); @@ -213,12 +212,12 @@ fn apply_command_line_args(config: Config, args: CommandLineArgs) -> Config { .map(|c| c.snark_namespace_id.clone()) .unwrap_or_else(|| CelestiaConfig::default().snark_namespace_id) }), - operation_namespace_id: Some(args.operation_namespace_id.unwrap_or_else(|| { + transaction_namespace_id: Some(args.transaction_namespace_id.unwrap_or_else(|| { config .celestia_config .as_ref() - .map(|c| c.operation_namespace_id.clone()) - .unwrap_or_else(|| CelestiaConfig::default().operation_namespace_id) + .map(|c| c.transaction_namespace_id.clone()) + .unwrap_or_else(|| CelestiaConfig::default().transaction_namespace_id) .unwrap() })), }), @@ -234,10 +233,8 @@ pub async fn initialize_da_layer( match da_layer { DALayerOption::Celestia => { - let celestia_conf = config - .celestia_config - .clone() - .context("Celestia configuration not found")?; + let celestia_conf = + config.celestia_config.clone().context("Celestia configuration not found")?; for attempt in 1..=DA_RETRY_COUNT { match CelestiaConnection::new(&celestia_conf, None).await { diff --git a/crates/common/src/digest.rs b/crates/common/src/digest.rs index f8014237..28dee9fb 100644 --- a/crates/common/src/digest.rs +++ b/crates/common/src/digest.rs @@ -15,6 +15,14 @@ impl Digest { Self(hasher.finalize()) } + pub fn hash_items(items: &[impl AsRef<[u8]>]) -> Self { + let mut hasher = Hasher::new(); + for item in items { + hasher.update(item.as_ref()); + } + Self(hasher.finalize()) + } + pub const fn zero() -> Self { Self([0u8; 32]) } diff --git a/crates/common/src/hashchain.rs b/crates/common/src/hashchain.rs index 8f2f7386..d9bd9adc 100644 --- a/crates/common/src/hashchain.rs +++ b/crates/common/src/hashchain.rs @@ -1,16 +1,18 @@ -use anyhow::{anyhow, bail, Result}; -use jmt::KeyHash; +use anyhow::{anyhow, bail, ensure, Result}; use serde::{Deserialize, Serialize}; -use std::{ - collections::HashSet, - ops::{Deref, DerefMut}, +use std::ops::{Deref, DerefMut}; + +use crate::{ + digest::Digest, + keys::{Signature, SigningKey, VerifyingKey}, + operation::{ + HashchainSignatureBundle, Operation, ServiceChallenge, ServiceChallengeInput, + SignatureBundle, + }, }; -use crate::{digest::Digest, hasher::Hasher, keys::VerifyingKey, operation::Operation}; - #[derive(Clone, Serialize, Deserialize, Debug, PartialEq)] pub struct Hashchain { - pub id: String, pub entries: Vec, } @@ -56,56 +58,40 @@ impl DerefMut for Hashchain { } impl Hashchain { - pub fn from_operation(operation: Operation) -> Result { - let mut hc = Hashchain::empty(operation.id()); - hc.perform_operation(operation)?; - Ok(hc) - } - - pub fn empty(id: String) -> Self { + pub fn empty() -> Self { Self { - id, entries: Vec::new(), } } + pub fn from_entry(entry: HashchainEntry) -> Result { + let mut hc = Hashchain::empty(); + hc.add_entry(entry)?; + Ok(hc) + } + pub fn get_key_at_index(&self, idx: usize) -> Result<&VerifyingKey> { self.entries .get(idx) .and_then(|entry| entry.operation.get_public_key()) - .ok_or_else(|| anyhow!("No valid public key found at index {}", idx)) + .ok_or_else(|| anyhow!("No public key found at index {}", idx)) } - pub fn get_valid_keys(&self) -> HashSet { - let mut valid_keys: HashSet = HashSet::new(); - - for entry in self.entries.clone() { - match &entry.operation { - Operation::RegisterService(_) | Operation::AddData(_) => {} - Operation::CreateAccount(args) => { - valid_keys.insert(args.value.clone()); - } - Operation::AddKey(args) => { - valid_keys.insert(args.value.clone()); - } - Operation::RevokeKey(args) => { - valid_keys.remove(&args.value.clone()); + pub fn is_key_invalid(&self, key: &VerifyingKey) -> bool { + for entry in self.iter().rev() { + if let Some(entry_key) = entry.operation.get_public_key() { + if key.eq(entry_key) { + match entry.operation { + Operation::RevokeKey { .. } => return true, + Operation::AddKey { .. } + | Operation::CreateAccount { .. } + | Operation::RegisterService { .. } => return false, + _ => {} + } } } } - valid_keys - } - - pub fn is_key_invalid(&self, key: VerifyingKey) -> bool { - self.iter() - .rev() - .find_map(|entry| match entry.operation.clone() { - Operation::RevokeKey(args) if args.value == key => Some(true), - Operation::AddKey(args) if args.value == key => Some(false), - Operation::CreateAccount(args) if args.value == key => Some(false), - _ => None, - }) - .unwrap_or(true) + true } pub fn get(&self, idx: usize) -> &HashchainEntry { @@ -116,103 +102,126 @@ impl Hashchain { self.last().map_or(Digest::zero(), |entry| entry.hash) } - fn push(&mut self, operation: Operation) -> Result { - if operation.id() != self.id { - bail!("Operation ID does not match Hashchain ID"); - } + pub fn add_entry(&mut self, entry: HashchainEntry) -> Result<()> { + self.validate_new_entry(&entry)?; + self.entries.push(entry); + Ok(()) + } - let previous_hash = self.last_hash(); + pub fn register_service( + &mut self, + id: String, + creation_gate: ServiceChallenge, + key: VerifyingKey, + signing_key: &SigningKey, + ) -> Result { + let entry = HashchainEntry::new_register_service(id, creation_gate, key, signing_key); + self.add_entry(entry.clone())?; + Ok(entry) + } - let entry = HashchainEntry::new(operation, previous_hash); - self.entries.push(entry.clone()); + pub fn create_account( + &mut self, + id: String, + service_id: String, + challenge: ServiceChallengeInput, + key: VerifyingKey, + signing_key: &SigningKey, + ) -> Result { + let entry = HashchainEntry::new_create_account(id, service_id, challenge, key, signing_key); + self.add_entry(entry.clone())?; + Ok(entry) + } + pub fn add_key( + &mut self, + key: VerifyingKey, + signing_key: &SigningKey, + key_idx: usize, + ) -> Result { + let entry = HashchainEntry::new_add_key(key, self.last_hash(), signing_key, key_idx); + self.add_entry(entry.clone())?; Ok(entry) } - pub fn perform_operation(&mut self, operation: Operation) -> Result { - self.validate_new_operation(&operation)?; - self.push(operation) + pub fn revoke_key( + &mut self, + key: VerifyingKey, + signing_key: &SigningKey, + key_idx: usize, + ) -> Result { + let entry = HashchainEntry::new_revoke_key(key, self.last_hash(), signing_key, key_idx); + self.add_entry(entry.clone())?; + Ok(entry) } - /// Verifies the structure and signature of a new operation - fn validate_new_operation(&self, operation: &Operation) -> Result<()> { - match operation { - Operation::RegisterService(args) => { - if !self.entries.is_empty() { - bail!("RegisterService operation must be the first entry"); - } + pub fn add_data( + &mut self, + data: Vec, + data_signature: Option, + signing_key: &SigningKey, + key_idx: usize, + ) -> Result { + let entry = HashchainEntry::new_add_data( + data, + data_signature, + self.last_hash(), + signing_key, + key_idx, + ); + self.add_entry(entry.clone())?; + Ok(entry) + } - if args.prev_hash != Digest::zero() { - bail!( - "Previous hash for initial operation must be zero, but was {}", - args.prev_hash - ) - } + fn validate_new_entry(&self, entry: &HashchainEntry) -> Result<()> { + entry.validate_operation()?; - Ok(()) - } - Operation::AddKey(args) | Operation::RevokeKey(args) => { - let last_hash = self.last_hash(); - if args.prev_hash != last_hash { - bail!( - "Previous hash for key operation must be the last hash - prev: {}, last: {}", - args.prev_hash, - last_hash - ) - } + let last_hash = self.last_hash(); + if entry.previous_hash != last_hash { + bail!( + "Previous hash for new entry must be the last hash - prev: {}, last: {}", + entry.previous_hash, + last_hash + ) + } - let key_idx = args.signature.key_idx; - let verifying_key = self.get_key_at_index(key_idx)?; + let verifying_key = self.verifying_key_for_entry(entry)?; - if self.is_key_invalid(verifying_key.clone()) { - bail!( - "The key at index {}, intended to verify this operation, is invalid", - key_idx - ); + match entry.operation { + Operation::CreateAccount { .. } | Operation::RegisterService { .. } => { + if !self.entries.is_empty() { + bail!("CreateAccount/RegisterService must be the first entry"); } - - operation.verify_user_signature(verifying_key) } - Operation::AddData(args) => { - let last_hash = self.last_hash(); - if args.prev_hash != last_hash { - bail!( - "Previous hash for add-data operation is not equal to the last hash - prev: {}, last: {}", - args.prev_hash, - last_hash - ) + Operation::AddData { .. } | Operation::AddKey { .. } | Operation::RevokeKey { .. } => { + if self.entries.is_empty() { + bail!("CreateAccount/RegisterService must be the first entry"); } - let key_idx = args.op_signature.key_idx; - let verifying_key = self.get_key_at_index(key_idx)?; - - if self.is_key_invalid(verifying_key.clone()) { - bail!( - "The key at index {}, intended to verify this operation, is invalid", - key_idx - ); + if self.is_key_invalid(verifying_key) { + bail!("Invalid key at index {}", &entry.signature_bundle.key_idx); } - - operation.verify_user_signature(verifying_key) } - Operation::CreateAccount(args) => { - if !self.entries.is_empty() { - bail!("CreateAccount operation must be the first entry"); - } + } - if args.prev_hash != Digest::zero() { - bail!("Previous hash for initial operation must be zero") - } + entry.validate_hash()?; + entry.validate_signature(verifying_key) + } - operation.verify_user_signature(&args.value) + fn verifying_key_for_entry<'a>( + &'a self, + entry: &'a HashchainEntry, + ) -> Result<&'a VerifyingKey> { + match &entry.operation { + Operation::CreateAccount { key, .. } | Operation::RegisterService { key, .. } => { + Ok(key) + } + Operation::AddData { .. } | Operation::AddKey { .. } | Operation::RevokeKey { .. } => { + self.get_key_at_index(entry.signature_bundle.key_idx) } } } - pub fn get_keyhash(&self) -> KeyHash { - KeyHash::with::(Digest::hash(self.id.clone())) - } - pub fn is_empty(&self) -> bool { self.entries.is_empty() } @@ -229,19 +238,133 @@ pub struct HashchainEntry { pub hash: Digest, pub previous_hash: Digest, pub operation: Operation, + pub signature_bundle: HashchainSignatureBundle, } impl HashchainEntry { - pub fn new(operation: Operation, previous_hash: Digest) -> Self { - let mut data = Vec::new(); - data.extend_from_slice(operation.to_string().as_bytes()); - data.extend_from_slice(previous_hash.as_ref()); - let hash = Digest::hash(data); + pub fn new( + operation: Operation, + previous_hash: Digest, + signing_key: &SigningKey, + key_idx: usize, + ) -> Self { + let serialized_operation = + bincode::serialize(&operation).expect("Serializing operation should work"); + let hash = + Digest::hash_items(&[serialized_operation.as_slice(), &previous_hash.to_bytes()]); + + let signature_bundle = HashchainSignatureBundle { + signature: signing_key.sign(hash.as_ref()), + key_idx, + }; Self { hash, previous_hash, operation, + signature_bundle, + } + } + + pub fn new_genesis(operation: Operation, signing_key: &SigningKey) -> Self { + Self::new(operation, Digest::zero(), signing_key, 0) + } + + pub fn new_register_service( + id: String, + creation_gate: ServiceChallenge, + key: VerifyingKey, + signing_key: &SigningKey, + ) -> Self { + let operation = Operation::RegisterService { + id, + creation_gate, + key, + }; + Self::new_genesis(operation, signing_key) + } + + pub fn new_create_account( + id: String, + service_id: String, + challenge: ServiceChallengeInput, + key: VerifyingKey, + signing_key: &SigningKey, + ) -> Self { + let operation = Operation::CreateAccount { + id, + service_id, + challenge, + key, + }; + Self::new_genesis(operation, signing_key) + } + + pub fn new_add_key( + key: VerifyingKey, + prev_hash: Digest, + signing_key: &SigningKey, + key_idx: usize, + ) -> Self { + let operation = Operation::AddKey { key }; + Self::new(operation, prev_hash, signing_key, key_idx) + } + + pub fn new_revoke_key( + key: VerifyingKey, + prev_hash: Digest, + signing_key: &SigningKey, + key_idx: usize, + ) -> Self { + let operation = Operation::RevokeKey { key }; + Self::new(operation, prev_hash, signing_key, key_idx) + } + + pub fn new_add_data( + data: Vec, + data_signature: Option, + prev_hash: Digest, + signing_key: &SigningKey, + key_idx: usize, + ) -> Self { + let operation = Operation::AddData { + data, + data_signature, + }; + Self::new(operation, prev_hash, signing_key, key_idx) + } + + pub fn validate_hash(&self) -> Result<()> { + let pristine_entry = self.without_signature(); + + let serialized_operation = bincode::serialize(&pristine_entry.operation)?; + let pristine_entry_hash = Digest::hash_items(&[ + serialized_operation.as_slice(), + &pristine_entry.previous_hash.to_bytes(), + ]); + + ensure!( + self.hash == pristine_entry_hash, + "Hashchain entry has incorrect hash" + ); + Ok(()) + } + + pub fn validate_signature(&self, verifying_key: &VerifyingKey) -> Result<()> { + verifying_key.verify_signature(self.hash.as_ref(), &self.signature_bundle.signature) + } + + pub fn validate_operation(&self) -> Result<()> { + self.operation.validate_basic() + } + + pub fn without_signature(&self) -> Self { + Self { + signature_bundle: HashchainSignatureBundle { + key_idx: self.signature_bundle.key_idx, + signature: Signature::Placeholder, + }, + ..self.clone() } } } diff --git a/crates/common/src/keys.rs b/crates/common/src/keys.rs index 460935da..f281d112 100644 --- a/crates/common/src/keys.rs +++ b/crates/common/src/keys.rs @@ -1,4 +1,4 @@ -use anyhow::{anyhow, Result}; +use anyhow::{anyhow, bail, Result}; use base64::{engine::general_purpose::STANDARD as engine, Engine as _}; use ed25519_consensus::{ Signature as Ed25519Signature, SigningKey as Ed25519SigningKey, @@ -13,6 +13,14 @@ use std::{self}; use crate::digest::Digest; +#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Eq, Default)] +pub enum Signature { + Secp256k1(Secp256k1Signature), + Ed25519(Ed25519Signature), + #[default] + Placeholder, +} + #[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Eq, Hash)] /// Represents a public key supported by the system. pub enum VerifyingKey { @@ -31,21 +39,23 @@ impl VerifyingKey { } } - pub fn verify_signature(&self, message: &[u8], signature: &[u8]) -> Result<()> { - if signature.len() != 64 { - return Err(anyhow!("Invalid signature length")); - } + pub fn verify_signature(&self, message: &[u8], signature: &Signature) -> Result<()> { match self { VerifyingKey::Ed25519(vk) => { - let signature = Ed25519Signature::try_from(signature).map_err(|e| anyhow!(e))?; - vk.verify(&signature, message).map_err(|e| anyhow!(e)) + let Signature::Ed25519(signature) = signature else { + bail!("Invalid signature type"); + }; + + vk.verify(signature, message) + .map_err(|e| anyhow!("Failed to verify signature: {}", e)) } VerifyingKey::Secp256k1(vk) => { + let Signature::Secp256k1(signature) = signature else { + bail!("Invalid signature type"); + }; let hashed_message = Digest::hash(message).to_bytes(); let message = Secp256k1Message::from_digest(hashed_message); - let signature = Secp256k1Signature::from_compact(signature)?; - - vk.verify(SECP256K1, &message, &signature) + vk.verify(SECP256K1, &message, signature) .map_err(|e| anyhow!("Failed to verify signature: {}", e)) } } @@ -121,14 +131,14 @@ pub enum SigningKey { } impl SigningKey { - pub fn sign(&self, message: &[u8]) -> Vec { + pub fn sign(&self, message: &[u8]) -> Signature { match self { - SigningKey::Ed25519(sk) => sk.sign(message).to_bytes().to_vec(), + SigningKey::Ed25519(sk) => Signature::Ed25519(sk.sign(message)), SigningKey::Secp256k1(sk) => { let hashed_message = Digest::hash(message).to_bytes(); let message = Secp256k1Message::from_digest(hashed_message); let signature = SECP256K1.sign_ecdsa(&message, sk); - signature.serialize_compact().to_vec() + Signature::Secp256k1(signature) } } } diff --git a/crates/common/src/lib.rs b/crates/common/src/lib.rs index 18b0a2be..97835a93 100644 --- a/crates/common/src/lib.rs +++ b/crates/common/src/lib.rs @@ -3,12 +3,13 @@ pub mod hashchain; pub mod hasher; pub mod keys; pub mod operation; +pub mod transaction; pub mod tree; #[macro_use] extern crate log; -#[cfg(feature = "test_utils")] -pub mod test_ops; #[cfg(feature = "test_utils")] pub mod test_utils; +#[cfg(feature = "test_utils")] +pub mod transaction_builder; diff --git a/crates/common/src/operation.rs b/crates/common/src/operation.rs index 7279cbd8..1ed4f870 100644 --- a/crates/common/src/operation.rs +++ b/crates/common/src/operation.rs @@ -1,29 +1,36 @@ -use anyhow::{Context, Result}; -use bincode; -use celestia_types::Blob; -use prism_errors::GeneralError; +use anyhow::{bail, ensure, Result}; + use serde::{Deserialize, Serialize}; use std::{self, fmt::Display}; -use crate::{ - digest::Digest, - keys::{SigningKey, VerifyingKey}, -}; +use crate::keys::{Signature, SigningKey, VerifyingKey}; #[derive(Clone, Serialize, Deserialize, Debug, PartialEq)] /// An [`Operation`] represents a state transition in the system. /// In a blockchain analogy, this would be the full set of our transaction types. pub enum Operation { - /// Creates a new account with the given id and value. - CreateAccount(CreateAccountArgs), - /// Adds a key to an existing account. - AddKey(KeyOperationArgs), + /// Creates a new account with the given id and key. + CreateAccount { + id: String, + service_id: String, + challenge: ServiceChallengeInput, + key: VerifyingKey, + }, + /// Registers a new service with the given id. + RegisterService { + id: String, + creation_gate: ServiceChallenge, + key: VerifyingKey, + }, /// Adds arbitrary signed data to an existing account. - AddData(AddDataArgs), + AddData { + data: Vec, + data_signature: Option, + }, + /// Adds a key to an existing account. + AddKey { key: VerifyingKey }, /// Revokes a key from an existing account. - RevokeKey(KeyOperationArgs), - /// Registers a new service with the given id. - RegisterService(RegisterServiceArgs), + RevokeKey { key: VerifyingKey }, } #[derive(Clone, Serialize, Deserialize, Default, Debug, PartialEq)] @@ -33,14 +40,14 @@ pub struct HashchainSignatureBundle { /// Index of the key in the hashchain pub key_idx: usize, /// The actual signature - pub signature: Vec, + pub signature: Signature, } impl HashchainSignatureBundle { pub fn empty_with_idx(idx: usize) -> Self { HashchainSignatureBundle { key_idx: idx, - signature: vec![], + signature: Signature::Placeholder, } } } @@ -51,42 +58,14 @@ pub struct SignatureBundle { /// The key that can be used to verify the signature pub verifying_key: VerifyingKey, /// The actual signature - pub signature: Vec, + pub signature: Signature, } #[derive(Clone, Serialize, Deserialize, Debug, PartialEq)] /// Input required to complete a challenge for account creation. pub enum ServiceChallengeInput { /// Signature bytes - Signed(Vec), -} - -#[derive(Clone, Serialize, Deserialize, Debug, PartialEq)] -/// Arguments for creating an account with a service. -pub struct CreateAccountArgs { - /// Account ID - pub id: String, - /// Public key being added - pub value: VerifyingKey, - /// Associated service ID - pub service_id: String, - /// Challenge input for verification - pub challenge: ServiceChallengeInput, - /// The hash of the previous operation - pub prev_hash: Digest, - /// The signature that signed the operation - pub signature: Vec, -} - -#[derive(Clone, Serialize, Deserialize, Debug, PartialEq)] -/// Arguments for registering a new service. -pub struct RegisterServiceArgs { - /// Service ID - pub id: String, - /// Challenge gate for access control - pub creation_gate: ServiceChallenge, - /// The hash of the previous operation - pub prev_hash: Digest, + Signed(Signature), } #[derive(Clone, Serialize, Deserialize, Debug, PartialEq)] @@ -100,329 +79,44 @@ impl From for ServiceChallenge { } } -#[derive(Clone, Serialize, Deserialize, Debug, PartialEq)] -/// Structure for adding data. -pub struct AddDataArgs { - /// Account ID - pub id: String, - /// Data to be added - pub value: Vec, - /// Optional external signature used to sign the data to be added - pub value_signature: Option, - /// The hash of the previous operation - pub prev_hash: Digest, - /// Signature to authorize the action - pub op_signature: HashchainSignatureBundle, -} - -#[derive(Clone, Serialize, Deserialize, Debug, PartialEq)] -/// Common structure for operations involving keys (adding or revoking). -pub struct KeyOperationArgs { - /// Account ID - pub id: String, - /// Public key being added or revoked - pub value: VerifyingKey, - /// The hash of the previous operation - pub prev_hash: Digest, - /// Signature to authorize the action - pub signature: HashchainSignatureBundle, -} - impl Operation { - pub fn new_create_account( - id: String, - signing_key: &SigningKey, - service_id: String, - service_signer: &SigningKey, - ) -> Result { - let mut op = Operation::CreateAccount(CreateAccountArgs { - id: id.to_string(), - value: signing_key.clone().verifying_key(), - service_id, - challenge: ServiceChallengeInput::Signed(Vec::new()), - prev_hash: Digest::zero(), - signature: Vec::new(), - }); - - op.insert_signature(signing_key) - .expect("Inserting signature into operation should succeed"); - - let msg = bincode::serialize(&op).unwrap(); - let service_challenge = service_signer.sign(&msg); - - match op { - Operation::CreateAccount(ref mut args) => { - args.challenge = ServiceChallengeInput::Signed(service_challenge); - } - _ => panic!("Operation should be CreateAccount"), - }; - Ok(op) - } - - pub fn new_register_service(id: String, creation_gate: ServiceChallenge) -> Self { - Operation::RegisterService(RegisterServiceArgs { - id, - creation_gate, - prev_hash: Digest::zero(), - }) - } - - pub fn new_add_key( - id: String, - value: VerifyingKey, - prev_hash: Digest, - signing_key: &SigningKey, - key_idx: usize, - ) -> Result { - let op_to_sign = Operation::AddKey(KeyOperationArgs { - id: id.clone(), - value: value.clone(), - prev_hash, - signature: HashchainSignatureBundle::empty_with_idx(key_idx), - }); - - let message = bincode::serialize(&op_to_sign)?; - let signature = HashchainSignatureBundle { - key_idx, - signature: signing_key.sign(&message).to_vec(), - }; - - Ok(Operation::AddKey(KeyOperationArgs { - id, - value, - prev_hash, - signature, - })) - } - - pub fn new_revoke_key( - id: String, - value: VerifyingKey, - prev_hash: Digest, - signing_key: &SigningKey, - key_idx: usize, - ) -> Result { - let op_to_sign = Operation::RevokeKey(KeyOperationArgs { - id: id.clone(), - value: value.clone(), - prev_hash, - signature: HashchainSignatureBundle::empty_with_idx(key_idx), - }); - - let message = bincode::serialize(&op_to_sign)?; - let signature = HashchainSignatureBundle { - key_idx, - signature: signing_key.sign(&message).to_vec(), - }; - - Ok(Operation::RevokeKey(KeyOperationArgs { - id, - value, - prev_hash, - signature, - })) - } - - pub fn new_add_signed_data( - id: String, - value: Vec, - value_signature: Option, - prev_hash: Digest, - signing_key: &SigningKey, - key_idx: usize, - ) -> Result { - let op_to_sign = Operation::AddData(AddDataArgs { - id: id.clone(), - value: value.clone(), - value_signature: value_signature.clone(), - prev_hash, - op_signature: HashchainSignatureBundle::empty_with_idx(key_idx), - }); - - let message = { bincode::serialize(&op_to_sign)? }; - let op_signature = HashchainSignatureBundle { - key_idx, - signature: signing_key.sign(&message).to_vec(), - }; - - Ok(Operation::AddData(AddDataArgs { - id, - value, - value_signature, - prev_hash, - op_signature, - })) - } - - pub fn id(&self) -> String { - match self { - Operation::CreateAccount(args) => args.id.clone(), - Operation::AddKey(args) | Operation::RevokeKey(args) => args.id.clone(), - Operation::AddData(args) => args.id.clone(), - Operation::RegisterService(args) => args.id.clone(), - } - } - pub fn get_public_key(&self) -> Option<&VerifyingKey> { match self { - Operation::RevokeKey(args) | Operation::AddKey(args) => Some(&args.value), - Operation::CreateAccount(args) => Some(&args.value), - Operation::RegisterService(_) | Operation::AddData(_) => None, - } - } - - pub fn insert_signature(&mut self, signing_key: &SigningKey) -> Result<()> { - let serialized = bincode::serialize(self).context("Failed to serialize operation")?; - let signature = signing_key.sign(&serialized); - - match self { - Operation::CreateAccount(args) => args.signature = signature, - Operation::AddKey(args) | Operation::RevokeKey(args) => { - args.signature.signature = signature - } - _ => unimplemented!("RegisterService prover gating not yet implemented"), - } - Ok(()) - } - - pub fn without_challenge(&self) -> Self { - match self { - Operation::CreateAccount(args) => Operation::CreateAccount(CreateAccountArgs { - id: args.id.clone(), - value: args.value.clone(), - service_id: args.service_id.clone(), - challenge: ServiceChallengeInput::Signed(Vec::new()), - prev_hash: args.prev_hash, - signature: args.signature.clone(), - }), - _ => self.clone(), - } - } - - pub fn without_signature(&self) -> Self { - match self { - Operation::AddKey(args) => Operation::AddKey(KeyOperationArgs { - id: args.id.clone(), - value: args.value.clone(), - prev_hash: args.prev_hash, - signature: HashchainSignatureBundle { - key_idx: args.signature.key_idx, - signature: Vec::new(), - }, - }), - Operation::RevokeKey(args) => Operation::RevokeKey(KeyOperationArgs { - id: args.id.clone(), - value: args.value.clone(), - prev_hash: args.prev_hash, - signature: HashchainSignatureBundle { - key_idx: args.signature.key_idx, - signature: Vec::new(), - }, - }), - Operation::AddData(args) => Operation::AddData(AddDataArgs { - id: args.id.clone(), - value: args.value.clone(), - prev_hash: args.prev_hash, - value_signature: args.value_signature.clone(), - op_signature: HashchainSignatureBundle { - key_idx: args.op_signature.key_idx, - signature: Vec::new(), - }, - }), - Operation::CreateAccount(args) => Operation::CreateAccount(CreateAccountArgs { - id: args.id.clone(), - value: args.value.clone(), - service_id: args.service_id.clone(), - challenge: args.challenge.clone(), - prev_hash: args.prev_hash, - signature: Vec::new(), - }), - Operation::RegisterService(args) => Operation::RegisterService(RegisterServiceArgs { - id: args.id.clone(), - creation_gate: args.creation_gate.clone(), - prev_hash: args.prev_hash, - }), - } - } - - pub fn verify_user_signature(&self, pubkey: &VerifyingKey) -> Result<()> { - match self { - Operation::RegisterService(_) => Ok(()), - Operation::CreateAccount(args) => { - let message = bincode::serialize(&self.without_signature().without_challenge()) - .context("User signature failed")?; - args.value.verify_signature(&message, &args.signature) - } - Operation::AddKey(args) | Operation::RevokeKey(args) => { - let message = bincode::serialize(&self.without_signature()) - .context("User signature failed")?; - pubkey.verify_signature(&message, &args.signature.signature) - } - Operation::AddData(args) => { - let message = bincode::serialize(&self.without_signature()) - .context("Serializing operation failed")?; - pubkey - .verify_signature(&message, &args.op_signature.signature) - .context("Verifying operation signature failed")?; - - let Some(value_signature) = &args.value_signature else { - return Ok(()); - }; - - // If data to be added is signed, also validate its signature - value_signature - .verifying_key - .verify_signature(&args.value, &value_signature.signature) - .context("Verifying value signature failed") - } + Operation::RevokeKey { key } + | Operation::AddKey { key } + | Operation::CreateAccount { key, .. } + | Operation::RegisterService { key, .. } => Some(key), + Operation::AddData { .. } => None, } } - pub fn validate(&self) -> Result<()> { + pub fn validate_basic(&self) -> Result<()> { match &self { - Operation::AddKey(KeyOperationArgs { id, signature, .. }) - | Operation::RevokeKey(KeyOperationArgs { id, signature, .. }) - | Operation::AddData(AddDataArgs { - id, - op_signature: signature, - .. - }) => { + Operation::RegisterService { id, .. } => { if id.is_empty() { - return Err( - GeneralError::MissingArgumentError("id is empty".to_string()).into(), - ); - } - - if signature.signature.is_empty() { - return Err(GeneralError::MissingArgumentError( - "signature is empty".to_string(), - ) - .into()); + bail!("id must not be empty when registering service"); } Ok(()) } - Operation::CreateAccount(CreateAccountArgs { id, challenge, .. }) => { + Operation::CreateAccount { id, service_id, .. } => { if id.is_empty() { - return Err( - GeneralError::MissingArgumentError("id is empty".to_string()).into(), - ); + bail!("id must not be empty when creating account service"); } - match challenge { - ServiceChallengeInput::Signed(signature) => { - if signature.is_empty() { - return Err(GeneralError::MissingArgumentError( - "challenge data is empty".to_string(), - ) - .into()); - } - } + if service_id.is_empty() { + bail!("service_id must not be empty when creating account service"); } Ok(()) } - Operation::RegisterService(_) => Ok(()), + Operation::AddKey { .. } | Operation::RevokeKey { .. } => Ok(()), + Operation::AddData { data, .. } => { + let data_len = data.len(); + // TODO determine proper max data size here + ensure!(data_len < usize::MAX, "Incoming data size is {}", data_len); + Ok(()) + } } } } @@ -433,12 +127,3 @@ impl Display for Operation { write!(f, "{:?}", self) } } - -impl TryFrom<&Blob> for Operation { - type Error = anyhow::Error; - - fn try_from(value: &Blob) -> Result { - bincode::deserialize(&value.data) - .context(format!("Failed to decode blob into Operation: {value:?}")) - } -} diff --git a/crates/common/src/test_utils.rs b/crates/common/src/test_utils.rs index aa279b82..61a916ab 100644 --- a/crates/common/src/test_utils.rs +++ b/crates/common/src/test_utils.rs @@ -3,7 +3,8 @@ use crate::{ hashchain::Hashchain, hasher::Hasher, keys::{SigningKey, VerifyingKey}, - operation::{Operation, ServiceChallenge, SignatureBundle}, + operation::{ServiceChallenge, ServiceChallengeInput, SignatureBundle}, + transaction::Transaction, tree::{ HashchainResponse::*, InsertProof, KeyDirectoryTree, Proof, SnarkableTree, UpdateProof, }, @@ -26,12 +27,13 @@ use std::{ pub struct TestTreeState { pub tree: KeyDirectoryTree, pub signing_keys: HashMap, - inserted_keys: HashSet, + inserted_keys: HashSet, pub services: HashMap, } #[derive(Clone)] pub struct TestAccount { + pub id: String, pub key_hash: KeyHash, pub hashchain: Hashchain, } @@ -50,58 +52,97 @@ impl TestTreeState { } pub fn register_service(&mut self, service_id: String) -> Service { - let service_key = create_mock_signing_key(); + let service_challenge_key = create_mock_signing_key(); + let service_signing_key = create_mock_signing_key(); - let hashchain = Hashchain::from_operation(Operation::new_register_service( - service_id.clone(), - ServiceChallenge::from(service_key.clone()), - )) - .unwrap(); + let mut hashchain = Hashchain::empty(); - let key_hash = hashchain.get_keyhash(); + hashchain + .register_service( + service_id.clone(), + ServiceChallenge::from(service_challenge_key.clone()), + service_signing_key.verifying_key(), + &service_signing_key, + ) + .unwrap(); + + let hashed_id = Digest::hash(&service_id); + let key_hash = KeyHash::with::(hashed_id); Service { - id: service_id, - sk: service_key.clone(), - vk: service_key.verifying_key(), + id: service_id.clone(), + sk: service_challenge_key.clone(), + vk: service_challenge_key.verifying_key(), registration: TestAccount { + id: service_id, key_hash, hashchain, }, } } - pub fn create_account(&mut self, key: String, service: Service) -> TestAccount { + pub fn create_account(&mut self, id: String, service: Service) -> TestAccount { let signing_key = create_mock_signing_key(); - self.signing_keys.insert(key.clone(), signing_key.clone()); - let hashchain = create_new_hashchain(key.as_str(), &signing_key, service); - let key_hash = hashchain.get_keyhash(); + self.signing_keys.insert(id.clone(), signing_key.clone()); + + // Simulate some external service signing account creation credentials + let hash = Digest::hash_items(&[ + id.as_bytes(), + service.id.as_bytes(), + &signing_key.verifying_key().as_bytes(), + ]); + let signature = service.sk.sign(&hash.to_bytes()); + + let mut hashchain = Hashchain::empty(); + hashchain + .create_account( + id.clone(), + service.id.clone(), + ServiceChallengeInput::Signed(signature), + signing_key.verifying_key(), + &signing_key, + ) + .unwrap(); + + let hashed_id = Digest::hash(&id); + let key_hash = KeyHash::with::(hashed_id); TestAccount { + id, key_hash, hashchain, } } pub fn insert_account(&mut self, account: TestAccount) -> Result { - if self.inserted_keys.contains(&account.key_hash) { - return Err(anyhow!("{:?} already contained in tree", account.key_hash)); + if self.inserted_keys.contains(&account.id) { + return Err(anyhow!("{:?} already contained in tree", account.id)); } - let proof = self.tree.process_operation(&account.hashchain.last().unwrap().operation)?; + let transaction = Transaction { + id: account.id.clone(), + entry: account.hashchain.last().unwrap().clone(), + }; + + let proof = self.tree.process_transaction(transaction)?; if let Proof::Insert(insert_proof) = proof { - self.inserted_keys.insert(account.key_hash); + self.inserted_keys.insert(account.id); return Ok(*insert_proof); } Err(anyhow!("Insert proof not returned")) } pub fn update_account(&mut self, account: TestAccount) -> Result { - if !self.inserted_keys.contains(&account.key_hash) { - return Err(anyhow!("{:?} not found in tree", account.key_hash)); + if !self.inserted_keys.contains(&account.id) { + return Err(anyhow!("{:?} not found in tree", account.id)); } - let proof = self.tree.process_operation(&account.hashchain.last().unwrap().operation)?; + let transaction = Transaction { + id: account.id.clone(), + entry: account.hashchain.last().unwrap().clone(), + }; + + let proof = self.tree.process_transaction(transaction)?; if let Proof::Update(update_proof) = proof { return Ok(*update_proof); } @@ -111,15 +152,11 @@ impl TestTreeState { pub fn add_key_to_account(&mut self, account: &mut TestAccount) -> Result<(), anyhow::Error> { let signing_key_to_add = create_mock_signing_key(); let key_to_add = signing_key_to_add.verifying_key(); - let op = Operation::new_add_key( - account.hashchain.id.clone(), - key_to_add.clone(), - account.hashchain.last_hash(), - self.signing_keys.get(&account.hashchain.id).unwrap(), - 0, - )?; - - account.hashchain.perform_operation(op).unwrap(); + + account + .hashchain + .add_key(key_to_add, self.signing_keys.get(&account.id).unwrap(), 0) + .unwrap(); Ok(()) } @@ -151,20 +188,9 @@ impl TestTreeState { signature: sk.sign(data), }); - let op_signing_key = self.signing_keys.get(&account.hashchain.id).unwrap(); + let signing_key = self.signing_keys.get(&account.id).unwrap(); - let prev_hash = account.hashchain.last_hash(); - - let op = Operation::new_add_signed_data( - account.hashchain.id.clone(), - data.to_vec(), - signature_bundle, - prev_hash, - op_signing_key, - 0, - )?; - - account.hashchain.perform_operation(op).unwrap(); + account.hashchain.add_data(data.to_vec(), signature_bundle, signing_key, 0)?; Ok(()) } } @@ -191,20 +217,30 @@ pub fn create_random_insert(state: &mut TestTreeState, rng: &mut StdRng) -> Inse let (_, service) = state.services.iter().nth(rng.gen_range(0..state.services.len())).unwrap(); - let operation = Operation::new_create_account( - random_string.clone(), - &sk, - service.id.clone(), - &service.sk, - ) - .expect("Creating account operation should succeed"); + // Simulate some external service signing account creation credentials + let hash = Digest::hash_items(&[ + random_string.as_bytes(), + service.id.as_bytes(), + &sk.verifying_key().as_bytes(), + ]); + let signature = service.sk.sign(&hash.to_bytes()); let hashed_id = Digest::hash(&random_string); let key_hash = KeyHash::with::(hashed_id); - if !state.inserted_keys.contains(&key_hash) { - let proof = state.tree.insert(key_hash, operation).expect("Insert should succeed"); - state.inserted_keys.insert(key_hash); + let entry = Hashchain::empty() + .create_account( + random_string.clone(), + service.id.clone(), + ServiceChallengeInput::Signed(signature), + sk.verifying_key(), + &sk, + ) + .unwrap(); + + if !state.inserted_keys.contains(&random_string) { + let proof = state.tree.insert(key_hash, entry).expect("Insert should succeed"); + state.inserted_keys.insert(random_string.clone()); state.signing_keys.insert(random_string, sk); return proof; } @@ -216,9 +252,12 @@ pub fn create_random_update(state: &mut TestTreeState, rng: &mut StdRng) -> Upda panic!("No keys have been inserted yet. Cannot perform update."); } - let key = *state.inserted_keys.iter().nth(rng.gen_range(0..state.inserted_keys.len())).unwrap(); + let key = state.inserted_keys.iter().nth(rng.gen_range(0..state.inserted_keys.len())).unwrap(); - let Found(hc, _) = state.tree.get(key).unwrap() else { + let hashed_id = Digest::hash(key); + let key_hash = KeyHash::with::(hashed_id); + + let Found(mut hc, _) = state.tree.get(key_hash).unwrap() else { panic!("No response found for key. Cannot perform update."); }; @@ -227,21 +266,18 @@ pub fn create_random_update(state: &mut TestTreeState, rng: &mut StdRng) -> Upda let signer = state .signing_keys - .get(&hc.id) + .get(key) .ok_or_else(|| anyhow::anyhow!("Signing key not found for hashchain")) .unwrap(); - let operation = Operation::new_add_key( - hc.id.clone(), - verifying_key.clone(), - hc.last_hash(), - signer, - 0, - ) - .unwrap(); + let entry = hc.add_key(verifying_key, signer, 0).unwrap(); + let transaction = Transaction { + id: key.clone(), + entry, + }; let Proof::Update(update_proof) = - state.tree.process_operation(&operation).expect("Processing operation should succeed") + state.tree.process_transaction(transaction).expect("Processing transaction should succeed") else { panic!("No update proof returned."); }; @@ -258,9 +294,3 @@ pub fn create_mock_signing_key() -> SigningKey { pub fn create_mock_signing_key() -> SigningKey { SigningKey::Secp256k1(Secp256k1SigningKey::new(&mut OsRng)) } - -pub fn create_new_hashchain(id: &str, signing_key: &SigningKey, service: Service) -> Hashchain { - let op = Operation::new_create_account(id.to_string(), signing_key, service.id, &service.sk) - .unwrap(); - Hashchain::from_operation(op.clone()).unwrap() -} diff --git a/crates/common/src/transaction.rs b/crates/common/src/transaction.rs new file mode 100644 index 00000000..1990d4e6 --- /dev/null +++ b/crates/common/src/transaction.rs @@ -0,0 +1,20 @@ +use anyhow::anyhow; +use celestia_types::Blob; +use serde::{Deserialize, Serialize}; + +use crate::hashchain::HashchainEntry; + +#[derive(Clone, Serialize, Deserialize, Debug, PartialEq)] +pub struct Transaction { + pub id: String, + pub entry: HashchainEntry, +} + +impl TryFrom<&Blob> for Transaction { + type Error = anyhow::Error; + + fn try_from(value: &Blob) -> Result { + bincode::deserialize(&value.data) + .map_err(|e| anyhow!("Failed to decode blob into Transaction: error: {}", e)) + } +} diff --git a/crates/common/src/test_ops.rs b/crates/common/src/transaction_builder.rs similarity index 64% rename from crates/common/src/test_ops.rs rename to crates/common/src/transaction_builder.rs index 2fe94f27..7360af6d 100644 --- a/crates/common/src/test_ops.rs +++ b/crates/common/src/transaction_builder.rs @@ -4,31 +4,34 @@ use jmt::{mock::MockTreeStore, KeyHash}; use crate::{ digest::Digest, + hashchain::HashchainEntry, hasher::Hasher, keys::{SigningKey, VerifyingKey}, - operation::{Operation, SignatureBundle}, + operation::{ServiceChallenge, ServiceChallengeInput, SignatureBundle}, test_utils::create_mock_signing_key, + transaction::Transaction, tree::{HashchainResponse::*, KeyDirectoryTree, SnarkableTree}, }; - enum PostCommitAction { UpdateStorageOnly, RememberServiceKey(String, SigningKey), RememberAccountKey(String, SigningKey), } -pub struct UncommittedOperation<'a> { - operation: Operation, - builder: &'a mut OpsBuilder, +pub struct UncommittedTransaction<'a> { + transaction: Transaction, + builder: &'a mut TransactionBuilder, post_commit_action: PostCommitAction, } -impl UncommittedOperation<'_> { - pub fn ex(self) -> Operation { +impl UncommittedTransaction<'_> { + /// Commits and returns a transaction, updating the builder. Subsequent transactions + /// built with the same builder will have the correct previous hash. + pub fn commit(self) -> Transaction { self.builder .tree - .process_operation(&self.operation) - .expect("Processing operation should work"); + .process_transaction(self.transaction.clone()) + .expect("Processing transaction should work"); match self.post_commit_action { PostCommitAction::UpdateStorageOnly => (), @@ -40,16 +43,18 @@ impl UncommittedOperation<'_> { } } - self.operation + self.transaction } - pub fn op(self) -> Operation { - self.operation + /// Returns a transaction without updating the builder. + /// Can be used to create invalid transactions. + pub fn build(self) -> Transaction { + self.transaction } } -pub struct OpsBuilder { - /// Simulated hashchain storage that is mutated when operations are applied +pub struct TransactionBuilder { + /// Simulated hashchain storage that is mutated when transactions are applied tree: Box, /// Remembers private keys of services to simulate account creation via an external service service_keys: HashMap, @@ -57,7 +62,7 @@ pub struct OpsBuilder { account_keys: HashMap, } -impl Default for OpsBuilder { +impl Default for TransactionBuilder { fn default() -> Self { let store = Arc::new(MockTreeStore::default()); let tree = Box::new(KeyDirectoryTree::new(store)); @@ -72,31 +77,37 @@ impl Default for OpsBuilder { } } -impl OpsBuilder { +impl TransactionBuilder { pub fn new() -> Self { Self::default() } - pub fn register_service_with_random_key(&mut self, id: &str) -> UncommittedOperation { - let random_service_key = create_mock_signing_key(); - self.register_service(id, random_service_key) + pub fn register_service_with_random_keys(&mut self, id: &str) -> UncommittedTransaction { + let random_service_challenge_key = create_mock_signing_key(); + let random_service_signing_key = create_mock_signing_key(); + self.register_service(id, random_service_challenge_key, random_service_signing_key) } pub fn register_service( &mut self, id: &str, - service_signing_key: SigningKey, - ) -> UncommittedOperation { - let op = - Operation::new_register_service(id.to_string(), service_signing_key.clone().into()); + challenge_key: SigningKey, + signing_key: SigningKey, + ) -> UncommittedTransaction { + let entry = HashchainEntry::new_register_service( + id.to_string(), + ServiceChallenge::from(challenge_key.clone()), + signing_key.verifying_key(), + &signing_key, + ); - UncommittedOperation { - operation: op, + UncommittedTransaction { + transaction: Transaction { + id: id.to_string(), + entry, + }, builder: self, - post_commit_action: PostCommitAction::RememberServiceKey( - id.to_string(), - service_signing_key, - ), + post_commit_action: PostCommitAction::RememberServiceKey(id.to_string(), challenge_key), } } @@ -104,7 +115,7 @@ impl OpsBuilder { &mut self, id: &str, service_id: &str, - ) -> UncommittedOperation { + ) -> UncommittedTransaction { let random_signing_key = create_mock_signing_key(); self.create_account(id, service_id, random_signing_key) } @@ -114,27 +125,38 @@ impl OpsBuilder { id: &str, service_id: &str, signing_key: SigningKey, - ) -> UncommittedOperation { + ) -> UncommittedTransaction { let Some(service_signing_key) = self.service_keys.get(service_id) else { panic!("No existing service found for {}", service_id) }; - let op = Operation::new_create_account( + // Simulate some external service signing account creation credentials + let hash = Digest::hash_items(&[ + id.as_bytes(), + service_id.as_bytes(), + &signing_key.verifying_key().as_bytes(), + ]); + let signature = service_signing_key.sign(&hash.to_bytes()); + + let entry = HashchainEntry::new_create_account( id.to_string(), - &signing_key, service_id.to_string(), - service_signing_key, - ) - .expect("Creating account operation should work"); + ServiceChallengeInput::Signed(signature), + signing_key.verifying_key(), + &signing_key, + ); - UncommittedOperation { - operation: op, + UncommittedTransaction { + transaction: Transaction { + id: id.to_string(), + entry, + }, builder: self, post_commit_action: PostCommitAction::RememberAccountKey(id.to_string(), signing_key), } } - pub fn add_random_key_verified_with_root(&mut self, id: &str) -> UncommittedOperation { + pub fn add_random_key_verified_with_root(&mut self, id: &str) -> UncommittedTransaction { let Some(account_signing_key) = self.account_keys.get(id).cloned() else { panic!("No existing account key for {}", id) }; @@ -147,7 +169,7 @@ impl OpsBuilder { id: &str, signing_key: &SigningKey, key_idx: usize, - ) -> UncommittedOperation { + ) -> UncommittedTransaction { let random_key = create_mock_signing_key().verifying_key(); self.add_key(id, random_key, signing_key, key_idx) } @@ -156,7 +178,7 @@ impl OpsBuilder { &mut self, id: &str, key: VerifyingKey, - ) -> UncommittedOperation { + ) -> UncommittedTransaction { let Some(account_signing_key) = self.account_keys.get(id).cloned() else { panic!("No existing account key for {}", id) }; @@ -170,7 +192,7 @@ impl OpsBuilder { key: VerifyingKey, signing_key: &SigningKey, key_idx: usize, - ) -> UncommittedOperation { + ) -> UncommittedTransaction { let hashed_id = Digest::hash(id); let key_hash = KeyHash::with::(hashed_id); @@ -178,11 +200,13 @@ impl OpsBuilder { panic!("No existing hashchain found for {}", id) }; - let op = Operation::new_add_key(id.to_string(), key, hc.last_hash(), signing_key, key_idx) - .expect("Creating add-key operation should work"); + let entry = HashchainEntry::new_add_key(key, hc.last_hash(), signing_key, key_idx); - UncommittedOperation { - operation: op, + UncommittedTransaction { + transaction: Transaction { + id: id.to_string(), + entry, + }, builder: self, post_commit_action: PostCommitAction::UpdateStorageOnly, } @@ -192,7 +216,7 @@ impl OpsBuilder { &mut self, id: &str, key: VerifyingKey, - ) -> UncommittedOperation { + ) -> UncommittedTransaction { let Some(account_signing_key) = self.account_keys.get(id).cloned() else { panic!("No existing account key for {}", id) }; @@ -206,7 +230,7 @@ impl OpsBuilder { key: VerifyingKey, signing_key: &SigningKey, key_idx: usize, - ) -> UncommittedOperation { + ) -> UncommittedTransaction { let hashed_id = Digest::hash(id); let key_hash = KeyHash::with::(hashed_id); @@ -214,12 +238,13 @@ impl OpsBuilder { panic!("No existing hashchain found for {}", id) }; - let op = - Operation::new_revoke_key(id.to_string(), key, hc.last_hash(), signing_key, key_idx) - .expect("Creating account operation should work"); + let entry = HashchainEntry::new_revoke_key(key, hc.last_hash(), signing_key, key_idx); - UncommittedOperation { - operation: op, + UncommittedTransaction { + transaction: Transaction { + id: id.to_string(), + entry, + }, builder: self, post_commit_action: PostCommitAction::UpdateStorageOnly, } @@ -232,7 +257,7 @@ impl OpsBuilder { value_signature: SignatureBundle, signing_key: &SigningKey, key_idx: usize, - ) -> UncommittedOperation { + ) -> UncommittedTransaction { self.add_data(id, value, Some(value_signature), signing_key, key_idx) } @@ -241,7 +266,7 @@ impl OpsBuilder { id: &str, value: Vec, value_signature: SignatureBundle, - ) -> UncommittedOperation { + ) -> UncommittedTransaction { self.add_data_verified_with_root(id, value, Some(value_signature)) } @@ -251,7 +276,7 @@ impl OpsBuilder { value: Vec, signing_key: &SigningKey, key_idx: usize, - ) -> UncommittedOperation { + ) -> UncommittedTransaction { self.add_data(id, value, None, signing_key, key_idx) } @@ -259,7 +284,7 @@ impl OpsBuilder { &mut self, id: &str, value: Vec, - ) -> UncommittedOperation { + ) -> UncommittedTransaction { self.add_data_verified_with_root(id, value, None) } @@ -268,7 +293,7 @@ impl OpsBuilder { id: &str, value: Vec, value_signature: Option, - ) -> UncommittedOperation { + ) -> UncommittedTransaction { let Some(account_signing_key) = self.account_keys.get(id).cloned() else { panic!("No existing account key for {}", id) }; @@ -279,11 +304,11 @@ impl OpsBuilder { fn add_data( &mut self, id: &str, - value: Vec, - value_signature: Option, + data: Vec, + data_signature: Option, signing_key: &SigningKey, key_idx: usize, - ) -> UncommittedOperation { + ) -> UncommittedTransaction { let hashed_id = Digest::hash(id); let key_hash = KeyHash::with::(hashed_id); @@ -291,18 +316,19 @@ impl OpsBuilder { panic!("No existing hashchain found for {}", id) }; - let op = Operation::new_add_signed_data( - id.to_string(), - value, - value_signature, + let entry = HashchainEntry::new_add_data( + data, + data_signature, hc.last_hash(), signing_key, key_idx, - ) - .expect("Creating add-data operation should work"); + ); - UncommittedOperation { - operation: op, + UncommittedTransaction { + transaction: Transaction { + id: id.to_string(), + entry, + }, builder: self, post_commit_action: PostCommitAction::UpdateStorageOnly, } diff --git a/crates/common/src/tree.rs b/crates/common/src/tree.rs index 939b864d..524de06e 100644 --- a/crates/common/src/tree.rs +++ b/crates/common/src/tree.rs @@ -1,4 +1,4 @@ -use anyhow::{anyhow, bail, Context, Result}; +use anyhow::{anyhow, bail, ensure, Context, Result}; use bincode; use jmt::{ proof::{SparseMerkleProof, UpdateMerkleProof}, @@ -11,12 +11,10 @@ use std::{convert::Into, sync::Arc}; use crate::{ digest::Digest, - hashchain::Hashchain, + hashchain::{Hashchain, HashchainEntry}, hasher::Hasher, - operation::{ - AddDataArgs, CreateAccountArgs, KeyOperationArgs, Operation, RegisterServiceArgs, - ServiceChallenge, ServiceChallengeInput, - }, + operation::{Operation, ServiceChallenge, ServiceChallengeInput}, + transaction::Transaction, }; use HashchainResponse::*; @@ -72,14 +70,14 @@ pub struct InsertProof { pub new_root: Digest, pub membership_proof: SparseMerkleProof, - pub insertion_op: Operation, + pub new_entry: HashchainEntry, } impl InsertProof { pub fn verify(&self) -> Result<()> { self.non_membership_proof.verify().context("Invalid NonMembershipProof")?; - let hashchain = Hashchain::from_operation(self.insertion_op.clone())?; + let hashchain = Hashchain::from_entry(self.new_entry.clone())?; let serialized_hashchain = bincode::serialize(&hashchain)?; self.membership_proof.clone().verify_existence( @@ -99,7 +97,7 @@ pub struct UpdateProof { pub key: KeyHash, pub old_hashchain: Hashchain, - pub update_op: Operation, + pub new_entry: HashchainEntry, /// Inclusion proof of [`old_value`] pub inclusion_proof: SparseMerkleProof, @@ -116,7 +114,7 @@ impl UpdateProof { let mut hashchain_after_update = self.old_hashchain.clone(); // Append the new entry and verify it's validity - hashchain_after_update.perform_operation(self.update_op.clone())?; + hashchain_after_update.add_entry(self.new_entry.clone())?; // Ensure the update proof corresponds to the new hashchain value let new_serialized_hashchain = bincode::serialize(&hashchain_after_update)?; @@ -140,10 +138,10 @@ pub enum HashchainResponse { NotFound(NonMembershipProof), } -pub trait SnarkableTree { - fn process_operation(&mut self, operation: &Operation) -> Result; - fn insert(&mut self, key: KeyHash, initial_op: Operation) -> Result; - fn update(&mut self, key: KeyHash, update_op: Operation) -> Result; +pub trait SnarkableTree: Send + Sync { + fn process_transaction(&mut self, transaction: Transaction) -> Result; + fn insert(&mut self, key: KeyHash, entry: HashchainEntry) -> Result; + fn update(&mut self, key: KeyHash, entry: HashchainEntry) -> Result; fn get(&self, key: KeyHash) -> Result; } @@ -224,27 +222,30 @@ where impl SnarkableTree for KeyDirectoryTree where - S: TreeReader + TreeWriter, + S: Send + Sync + TreeReader + TreeWriter, { - fn process_operation(&mut self, operation: &Operation) -> Result { - match operation { - Operation::AddKey(KeyOperationArgs { id, .. }) - | Operation::RevokeKey(KeyOperationArgs { id, .. }) - | Operation::AddData(AddDataArgs { id, .. }) => { - let hashed_id = Digest::hash(id); + fn process_transaction(&mut self, transaction: Transaction) -> Result { + match &transaction.entry.operation { + Operation::AddKey { .. } | Operation::RevokeKey { .. } | Operation::AddData { .. } => { + let hashed_id = Digest::hash(&transaction.id); let key_hash = KeyHash::with::(hashed_id); - debug!("updating hashchain for user id {}", id.clone()); - let proof = self.update(key_hash, operation.clone())?; + debug!("updating hashchain for user id {}", transaction.id); + let proof = self.update(key_hash, transaction.entry)?; Ok(Proof::Update(Box::new(proof))) } - Operation::CreateAccount(CreateAccountArgs { + Operation::CreateAccount { id, service_id, challenge, - .. - }) => { + key, + } => { + ensure!( + transaction.id == id.as_str(), + "Id of transaction needs to be equal to operation id" + ); + let hashed_id = Digest::hash(id); let account_key_hash = KeyHash::with::(hashed_id); @@ -267,38 +268,44 @@ where }; let creation_gate = match &service_last_entry.operation { - Operation::RegisterService(args) => &args.creation_gate, + Operation::RegisterService { creation_gate, .. } => creation_gate, _ => { bail!("Service hashchain's last entry was not a RegisterService operation") } }; - let ServiceChallenge::Signed(service_pubkey) = creation_gate; + // Hash and sign credentials that have been signed by the external service + let hash = + Digest::hash_items(&[id.as_bytes(), service_id.as_bytes(), &key.as_bytes()]); + let ServiceChallenge::Signed(service_pubkey) = creation_gate; let ServiceChallengeInput::Signed(challenge_signature) = &challenge; - service_pubkey.verify_signature( - &bincode::serialize(&operation.without_challenge())?, - challenge_signature, - )?; + + service_pubkey.verify_signature(&hash.to_bytes(), challenge_signature)?; debug!("creating new hashchain for user ID {}", id); - let insert_proof = self.insert(account_key_hash, operation.clone())?; + let insert_proof = self.insert(account_key_hash, transaction.entry)?; Ok(Proof::Insert(Box::new(insert_proof))) } - Operation::RegisterService(RegisterServiceArgs { id, .. }) => { + Operation::RegisterService { id, .. } => { + ensure!( + transaction.id == id.as_str(), + "Id of transaction needs to be equal to operation id" + ); + let hashed_id = Digest::hash(id); let key_hash = KeyHash::with::(hashed_id); debug!("creating new hashchain for service id {}", id); - let insert_proof = self.insert(key_hash, operation.clone())?; + let insert_proof = self.insert(key_hash, transaction.entry)?; Ok(Proof::Insert(Box::new(insert_proof))) } } } - fn insert(&mut self, key: KeyHash, insertion_op: Operation) -> Result { + fn insert(&mut self, key: KeyHash, entry: HashchainEntry) -> Result { let old_root = self.get_current_root()?; let (None, non_membership_merkle_proof) = self.jmt.get_with_proof(key, self.epoch)? else { bail!("Key already exists"); @@ -310,7 +317,7 @@ where key, }; - let hashchain = Hashchain::from_operation(insertion_op.clone())?; + let hashchain = Hashchain::from_entry(entry.clone())?; let serialized_hashchain = Self::serialize_value(&hashchain)?; // the update proof just contains another nm proof @@ -324,13 +331,13 @@ where Ok(InsertProof { new_root: new_root.into(), - insertion_op, + new_entry: entry, non_membership_proof, membership_proof, }) } - fn update(&mut self, key: KeyHash, update_op: Operation) -> Result { + fn update(&mut self, key: KeyHash, entry: HashchainEntry) -> Result { let old_root = self.get_current_root()?; let (Some(old_serialized_hashchain), inclusion_proof) = self.jmt.get_with_proof(key, self.epoch)? @@ -341,7 +348,7 @@ where let old_hashchain: Hashchain = bincode::deserialize(old_serialized_hashchain.as_slice())?; let mut new_hashchain = old_hashchain.clone(); - new_hashchain.perform_operation(update_op.clone())?; + new_hashchain.add_entry(entry.clone())?; let serialized_value = Self::serialize_value(&new_hashchain)?; @@ -359,7 +366,7 @@ where old_hashchain, key, update_proof, - update_op, + new_entry: entry, }) } diff --git a/crates/da/src/celestia.rs b/crates/da/src/celestia.rs index 3e814693..ff279880 100644 --- a/crates/da/src/celestia.rs +++ b/crates/da/src/celestia.rs @@ -4,7 +4,7 @@ use async_trait::async_trait; use celestia_rpc::{BlobClient, Client, HeaderClient}; use celestia_types::{nmt::Namespace, Blob, TxConfig}; use log::{debug, error, trace, warn}; -use prism_common::operation::Operation; +use prism_common::transaction::Transaction; use prism_errors::{DataAvailabilityError, GeneralError}; use serde::{Deserialize, Serialize}; use std::{ @@ -33,7 +33,7 @@ pub struct CelestiaConfig { pub connection_string: String, pub start_height: u64, pub snark_namespace_id: String, - pub operation_namespace_id: Option, + pub transaction_namespace_id: Option, } impl Default for CelestiaConfig { @@ -42,7 +42,7 @@ impl Default for CelestiaConfig { connection_string: "ws://localhost:26658".to_string(), start_height: 0, snark_namespace_id: "00000000000000de1008".to_string(), - operation_namespace_id: Some("00000000000000de1009".to_string()), + transaction_namespace_id: Some("00000000000000de1009".to_string()), } } } @@ -50,7 +50,7 @@ impl Default for CelestiaConfig { pub struct CelestiaConnection { pub client: celestia_rpc::Client, pub snark_namespace: Namespace, - pub operation_namespace: Namespace, + pub transaction_namespace: Namespace, height_update_tx: broadcast::Sender, sync_target: Arc, @@ -68,9 +68,9 @@ impl CelestiaConnection { &config.snark_namespace_id ))?; - let operation_namespace = match &config.operation_namespace_id { + let transaction_namespace = match &config.transaction_namespace_id { Some(id) => create_namespace(id).context(format!( - "Failed to create operation namespace from: '{}'", + "Failed to create transaction namespace from: '{}'", id ))?, None => snark_namespace, @@ -81,7 +81,7 @@ impl CelestiaConnection { Ok(CelestiaConnection { client, snark_namespace, - operation_namespace, + transaction_namespace, height_update_tx, sync_target: Arc::new(AtomicU64::new(0)), }) @@ -168,15 +168,18 @@ impl DataAvailabilityLayer for CelestiaConnection { .map_err(|e| anyhow!(DataAvailabilityError::SubmissionError(e.to_string()))) } - async fn get_operations(&self, height: u64) -> Result> { - trace!("searching for operations on da layer at height {}", height); + async fn get_transactions(&self, height: u64) -> Result> { + trace!( + "searching for transactions on da layer at height {}", + height + ); let maybe_blobs = - BlobClient::blob_get_all(&self.client, height, &[self.operation_namespace]) + BlobClient::blob_get_all(&self.client, height, &[self.transaction_namespace]) .await .map_err(|e| { anyhow!(DataAvailabilityError::DataRetrievalError( height, - format!("getting operations from da layer: {}", e) + format!("getting transactions from da layer: {}", e) )) })?; @@ -185,13 +188,13 @@ impl DataAvailabilityLayer for CelestiaConnection { None => return Ok(vec![]), }; - let operations = blobs + let transactions = blobs .iter() - .filter_map(|blob| match Operation::try_from(blob) { - Ok(operation) => Some(operation), + .filter_map(|blob| match Transaction::try_from(blob) { + Ok(transaction) => Some(transaction), Err(e) => { warn!( - "Failed to parse blob from height {} to operation: {:?}", + "Failed to parse blob from height {} to transaction: {:?}", height, e ); None @@ -199,24 +202,27 @@ impl DataAvailabilityLayer for CelestiaConnection { }) .collect(); - Ok(operations) + Ok(transactions) } - async fn submit_operations(&self, operations: Vec) -> Result { - debug!("posting {} operations to DA layer", operations.len()); - let blobs: Result, _> = operations + async fn submit_transactions(&self, transactions: Vec) -> Result { + debug!("posting {} transactions to DA layer", transactions.len()); + let blobs: Result, _> = transactions .iter() - .map(|operation| { - let data = bincode::serialize(operation) - .context(format!("Failed to serialize operation {}", operation)) + .map(|transaction| { + let data = bincode::serialize(transaction) + .context(format!("Failed to serialize transaction {:?}", transaction)) .map_err(|e| { DataAvailabilityError::GeneralError(GeneralError::ParsingError( e.to_string(), )) })?; - Blob::new(self.operation_namespace, data) - .context(format!("Failed to create blob for operation {}", operation)) + Blob::new(self.transaction_namespace, data) + .context(format!( + "Failed to create blob for transaction {:?}", + transaction + )) .map_err(|e| { DataAvailabilityError::GeneralError(GeneralError::BlobCreationError( e.to_string(), diff --git a/crates/da/src/lib.rs b/crates/da/src/lib.rs index ce0e6206..8b4ea30a 100644 --- a/crates/da/src/lib.rs +++ b/crates/da/src/lib.rs @@ -1,7 +1,7 @@ use anyhow::Result; use async_trait::async_trait; use ed25519_consensus::{Signature, SigningKey, VerificationKey as VerifyingKey}; -use prism_common::{digest::Digest, operation::Operation}; +use prism_common::{digest::Digest, transaction::Transaction}; use serde::{Deserialize, Serialize}; use sp1_sdk::SP1ProofWithPublicValues; use tokio::sync::broadcast; @@ -66,8 +66,8 @@ pub trait DataAvailabilityLayer: Send + Sync { async fn initialize_sync_target(&self) -> Result; async fn get_finalized_epoch(&self, height: u64) -> Result>; async fn submit_finalized_epoch(&self, epoch: FinalizedEpoch) -> Result; - async fn get_operations(&self, height: u64) -> Result>; - async fn submit_operations(&self, operations: Vec) -> Result; + async fn get_transactions(&self, height: u64) -> Result>; + async fn submit_transactions(&self, transactions: Vec) -> Result; async fn start(&self) -> Result<()>; fn subscribe_to_heights(&self) -> broadcast::Receiver; } diff --git a/crates/da/src/memory.rs b/crates/da/src/memory.rs index 27772ce2..cdcd52ab 100644 --- a/crates/da/src/memory.rs +++ b/crates/da/src/memory.rs @@ -2,7 +2,7 @@ use crate::{DataAvailabilityLayer, FinalizedEpoch}; use anyhow::Result; use async_trait::async_trait; use log::debug; -use prism_common::operation::Operation; +use prism_common::transaction::Transaction; use std::{collections::VecDeque, sync::Arc}; use tokio::{ sync::{broadcast, RwLock}, @@ -12,14 +12,14 @@ use tokio::{ #[derive(Clone, Debug)] pub struct Block { pub height: u64, - pub operations: Vec, + pub transactions: Vec, pub epoch: Option, } #[derive(Clone)] pub struct InMemoryDataAvailabilityLayer { blocks: Arc>>, - pending_operations: Arc>>, + pending_transactions: Arc>>, pending_epochs: Arc>>, latest_height: Arc>, height_update_tx: broadcast::Sender, @@ -34,7 +34,7 @@ impl InMemoryDataAvailabilityLayer { ( Self { blocks: Arc::new(RwLock::new(Vec::new())), - pending_operations: Arc::new(RwLock::new(Vec::new())), + pending_transactions: Arc::new(RwLock::new(Vec::new())), pending_epochs: Arc::new(RwLock::new(VecDeque::new())), latest_height: Arc::new(RwLock::new(0)), height_update_tx: height_tx, @@ -51,20 +51,20 @@ impl InMemoryDataAvailabilityLayer { loop { interval.tick().await; let mut blocks = self.blocks.write().await; - let mut pending_operations = self.pending_operations.write().await; + let mut pending_transactions = self.pending_transactions.write().await; let mut pending_epochs = self.pending_epochs.write().await; let mut latest_height = self.latest_height.write().await; *latest_height += 1; let new_block = Block { height: *latest_height, - operations: std::mem::take(&mut *pending_operations), + transactions: std::mem::take(&mut *pending_transactions), epoch: pending_epochs.pop_front(), }; debug!( - "new block produced at height {} with {} operations", + "new block produced at height {} with {} transactions", new_block.height, - new_block.operations.len(), + new_block.transactions.len(), ); blocks.push(new_block.clone()); @@ -108,18 +108,18 @@ impl DataAvailabilityLayer for InMemoryDataAvailabilityLayer { self.get_latest_height().await } - async fn get_operations(&self, height: u64) -> Result> { + async fn get_transactions(&self, height: u64) -> Result> { let blocks = self.blocks.read().await; Ok(blocks .iter() .find(|block| block.height == height) - .map(|block| block.operations.clone()) + .map(|block| block.transactions.clone()) .unwrap_or_default()) } - async fn submit_operations(&self, operations: Vec) -> Result { - let mut pending_operations = self.pending_operations.write().await; - pending_operations.extend(operations); + async fn submit_transactions(&self, transactions: Vec) -> Result { + let mut pending_transactions = self.pending_transactions.write().await; + pending_transactions.extend(transactions); self.get_latest_height().await } diff --git a/crates/node_types/prover/src/prover/mod.rs b/crates/node_types/prover/src/prover/mod.rs index a0ffcae7..64ab4d29 100644 --- a/crates/node_types/prover/src/prover/mod.rs +++ b/crates/node_types/prover/src/prover/mod.rs @@ -4,7 +4,9 @@ use jmt::KeyHash; use keystore_rs::create_signing_key; use prism_common::{ digest::Digest, + hashchain::Hashchain, hasher::Hasher, + transaction::Transaction, tree::{ Batch, HashchainResponse::{self, *}, @@ -33,8 +35,8 @@ pub struct Config { /// incoming FinalizedEpochs. pub prover: bool, - /// Enables accepting incoming operations from the webserver and posting batches to the DA layer. - /// When deactivated, the node will reject incoming operations. + /// Enables accepting incoming transactions from the webserver and posting batches to the DA layer. + /// When deactivated, the node will reject incoming transactions. pub batcher: bool, /// Configuration for the webserver. @@ -43,7 +45,7 @@ pub struct Config { /// Key used to sign new FinalizedEpochs. pub key: SigningKey, - /// DA layer height the prover should start syncing operations from. + /// DA layer height the prover should start syncing transactions from. pub start_height: u64, } @@ -66,9 +68,9 @@ pub struct Prover { pub cfg: Config, - /// [`pending_operations`] is a buffer for operations that have not yet been + /// [`pending_transactions`] is a buffer for transactions that have not yet been /// posted to the DA layer. - pub pending_operations: Arc>>, + pub pending_transactions: Arc>>, /// [`tree`] is the representation of the JMT, prism's state tree. It is accessed via the [`db`]. tree: Arc>>>, @@ -111,7 +113,7 @@ impl Prover { verifying_key: vk, prover_client: Arc::new(RwLock::new(prover_client)), tree, - pending_operations: Arc::new(RwLock::new(Vec::new())), + pending_transactions: Arc::new(RwLock::new(Vec::new())), }) } @@ -175,11 +177,11 @@ impl Prover { } // TODO: Should be persisted in database for crash recovery - let mut buffered_operations: VecDeque = VecDeque::new(); + let mut buffered_transactions: VecDeque = VecDeque::new(); let mut current_height = start_height; while current_height <= end_height { - self.process_da_height(current_height, &mut buffered_operations, false).await?; + self.process_da_height(current_height, &mut buffered_transactions, false).await?; // TODO: Race between set_epoch and set_last_synced_height self.db.set_last_synced_height(¤t_height)?; current_height += 1; @@ -199,9 +201,9 @@ impl Prover { height )); } - self.process_da_height(height, &mut buffered_operations, true).await?; + self.process_da_height(height, &mut buffered_transactions, true).await?; current_height += 1; - // TODO: Race between set_epoch and set_last_synced_height - updating these should be a single atomic operation + // TODO: Race between set_epoch and set_last_synced_height - updating these should be a single atomic transaction self.db.set_last_synced_height(¤t_height)?; } } @@ -209,12 +211,12 @@ impl Prover { async fn process_da_height( &self, height: u64, - buffered_operations: &mut VecDeque, + buffered_transactions: &mut VecDeque, is_real_time: bool, ) -> Result<()> { let current_epoch = self.db.get_epoch()?; - let operations = self.da.get_operations(height).await?; + let transactions = self.da.get_transactions(height).await?; let epoch_result = self.da.get_finalized_epoch(height).await?; debug!( @@ -225,21 +227,21 @@ impl Prover { ); if let Some(epoch) = epoch_result { - // run all buffered operations from the last celestia blocks and increment current_epoch - self.process_epoch(epoch, buffered_operations).await?; + // run all buffered transactions from the last celestia blocks and increment current_epoch + self.process_epoch(epoch, buffered_transactions).await?; } else { - debug!("No operations to process at height {}", height); + debug!("No transactions to process at height {}", height); } - if is_real_time && !buffered_operations.is_empty() && self.cfg.prover { - let all_ops: Vec = buffered_operations.drain(..).collect(); - self.finalize_new_epoch(current_epoch, all_ops).await?; + if is_real_time && !buffered_transactions.is_empty() && self.cfg.prover { + let all_transactions: Vec = buffered_transactions.drain(..).collect(); + self.finalize_new_epoch(current_epoch, all_transactions).await?; } - // If there are new operations at this height, add them to the queue to + // If there are new transactions at this height, add them to the queue to // be included in the next finalized epoch. - if !operations.is_empty() { - buffered_operations.extend(operations); + if !transactions.is_empty() { + buffered_transactions.extend(transactions); } Ok(()) @@ -248,12 +250,12 @@ impl Prover { async fn process_epoch( &self, epoch: FinalizedEpoch, - buffered_operations: &mut VecDeque, + buffered_transactions: &mut VecDeque, ) -> Result<()> { let mut current_epoch = self.db.get_epoch()?; // If prover is enabled and is actively producing new epochs, it has - // likely already ran all of the operations in the found epoch, so no + // likely already ran all of the transactions in the found epoch, so no // further processing is needed if epoch.height < current_epoch { debug!("epoch {} already processed internally", current_epoch); @@ -277,9 +279,9 @@ impl Prover { )); } - let all_ops: Vec = buffered_operations.drain(..).collect(); - if !all_ops.is_empty() { - self.execute_block(all_ops).await?; + let all_transactions: Vec = buffered_transactions.drain(..).collect(); + if !all_transactions.is_empty() { + self.execute_block(all_transactions).await?; } let new_commitment = self.get_commitment().await?; @@ -302,17 +304,20 @@ impl Prover { Ok(()) } - async fn execute_block(&self, operations: Vec) -> Result> { - debug!("executing block with {} operations", operations.len()); + async fn execute_block(&self, transactions: Vec) -> Result> { + debug!("executing block with {} transactions", transactions.len()); let mut proofs = Vec::new(); - for operation in operations { - match self.process_operation(&operation).await { + for transaction in transactions { + match self.process_transaction(transaction.clone()).await { Ok(proof) => proofs.push(proof), Err(e) => { - // Log the error and continue with the next operation - warn!("Failed to process operation: {:?}. Error: {}", operation, e); + // Log the error and continue with the next transaction + warn!( + "Failed to process transaction: {:?}. Error: {}", + transaction, e + ); } } } @@ -323,11 +328,11 @@ impl Prover { async fn finalize_new_epoch( &self, epoch_height: u64, - operations: Vec, + transactions: Vec, ) -> Result<()> { let prev_commitment = self.get_commitment().await?; - let proofs = self.execute_block(operations).await?; + let proofs = self.execute_block(transactions).await?; let new_commitment = self.get_commitment().await?; @@ -392,30 +397,30 @@ impl Prover { let height = height_rx.recv().await?; trace!("received height {}", height); - // Get pending operations - let pending_operations = { - let mut ops = self.pending_operations.write().await; + // Get pending transactions + let pending_transactions = { + let mut ops = self.pending_transactions.write().await; std::mem::take(&mut *ops) }; - let op_count = pending_operations.len(); + let tx_count = pending_transactions.len(); - // If there are pending operations, submit them - if !pending_operations.clone().is_empty() { - match self.da.submit_operations(pending_operations).await { + // If there are pending transactions, submit them + if !pending_transactions.clone().is_empty() { + match self.da.submit_transactions(pending_transactions).await { Ok(submitted_height) => { info!( - "post_batch_loop: submitted {} operations at height {}", - op_count, submitted_height + "post_batch_loop: submitted {} transactions at height {}", + tx_count, submitted_height ); } Err(e) => { - error!("post_batch_loop: Failed to submit operations: {}", e); + error!("post_batch_loop: Failed to submit transactions: {}", e); } } } else { debug!( - "post_batch_loop: No pending operations to submit at height {}", + "post_batch_loop: No pending transactions to submit at height {}", height ); } @@ -435,41 +440,39 @@ impl Prover { tree.get(key_hash) } - /// Updates the state from an already verified pending operation. - async fn process_operation(&self, operation: &Operation) -> Result { + /// Updates the state from an already verified pending transaction. + async fn process_transaction(&self, transaction: Transaction) -> Result { let mut tree = self.tree.write().await; - tree.process_operation(operation) + tree.process_transaction(transaction) } - /// Adds an operation to be posted to the DA layer and applied in the next epoch. + /// Adds an transaction to be posted to the DA layer and applied in the next epoch. pub async fn validate_and_queue_update( self: Arc, - incoming_operation: &Operation, + transaction: Transaction, ) -> Result<()> { if !self.cfg.batcher { - bail!("Batcher is disabled, cannot queue operations"); + bail!("Batcher is disabled, cannot queue transactions"); } - // basic validation, does not include signature checks - incoming_operation.validate()?; - - // validate operation against existing hashchain if necessary, including signature checks - match incoming_operation { - Operation::RegisterService(_) => (), - Operation::CreateAccount(_) => (), - Operation::AddKey(_) | Operation::RevokeKey(_) | Operation::AddData(_) => { - let hc_response = self.get_hashchain(&incoming_operation.id()).await?; + // validate against existing hashchain if necessary, including signature checks + match transaction.entry.operation { + Operation::RegisterService { .. } | Operation::CreateAccount { .. } => { + Hashchain::empty().add_entry(transaction.entry.clone())? + } + Operation::AddKey { .. } | Operation::RevokeKey { .. } | Operation::AddData { .. } => { + let hc_response = self.get_hashchain(&transaction.id).await?; let Found(mut hc, _) = hc_response else { - bail!("Hashchain not found for id: {}", incoming_operation.id()) + bail!("Hashchain not found for id: {}", transaction.id) }; - hc.perform_operation(incoming_operation.clone())?; + hc.add_entry(transaction.entry.clone())?; } }; - let mut pending = self.pending_operations.write().await; - pending.push(incoming_operation.clone()); + let mut pending = self.pending_transactions.write().await; + pending.push(transaction); Ok(()) } } diff --git a/crates/node_types/prover/src/prover/tests.rs b/crates/node_types/prover/src/prover/tests.rs index b0cc56a7..e81e8a29 100644 --- a/crates/node_types/prover/src/prover/tests.rs +++ b/crates/node_types/prover/src/prover/tests.rs @@ -1,9 +1,9 @@ use super::*; -use prism_common::{test_ops::OpsBuilder, tree::Proof}; +use prism_common::{transaction_builder::TransactionBuilder, tree::Proof}; use std::{self, sync::Arc, time::Duration}; use tokio::spawn; -use prism_common::{operation::Operation, test_utils::create_mock_signing_key}; +use prism_common::test_utils::create_mock_signing_key; use prism_da::memory::InMemoryDataAvailabilityLayer; use prism_storage::{inmemory::InMemoryDatabase, Database}; @@ -16,14 +16,18 @@ async fn create_test_prover() -> Arc { Arc::new(Prover::new(db.clone(), da_layer, &cfg).unwrap()) } -fn create_mock_operations(service_id: String) -> Vec { - let mut ops_builder = OpsBuilder::new(); +fn create_mock_transactions(service_id: String) -> Vec { + let mut transaction_builder = TransactionBuilder::new(); vec![ - ops_builder.register_service_with_random_key(&service_id).ex(), - ops_builder.create_account_with_random_key("user1@example.com", &service_id).ex(), - ops_builder.create_account_with_random_key("user2@example.com", &service_id).ex(), - ops_builder.add_random_key_verified_with_root("user1@example.com").ex(), + transaction_builder.register_service_with_random_keys(&service_id).commit(), + transaction_builder + .create_account_with_random_key("user1@example.com", &service_id) + .commit(), + transaction_builder + .create_account_with_random_key("user2@example.com", &service_id) + .commit(), + transaction_builder.add_random_key_verified_with_root("user1@example.com").commit(), ] } @@ -31,50 +35,53 @@ fn create_mock_operations(service_id: String) -> Vec { async fn test_validate_and_queue_update() { let prover = create_test_prover().await; - let mut ops_builder = OpsBuilder::new(); - let op = ops_builder.register_service_with_random_key("test_service").ex(); + let mut transaction_builder = TransactionBuilder::new(); + let transaction = + transaction_builder.register_service_with_random_keys("test_service").commit(); - prover.clone().validate_and_queue_update(&op).await.unwrap(); + prover.clone().validate_and_queue_update(transaction.clone()).await.unwrap(); - prover.clone().validate_and_queue_update(&op).await.unwrap(); + prover.clone().validate_and_queue_update(transaction.clone()).await.unwrap(); - let pending_ops = prover.pending_operations.read().await; - assert_eq!(pending_ops.len(), 2); + let pending_transactions = prover.pending_transactions.read().await; + assert_eq!(pending_transactions.len(), 2); } #[tokio::test] -async fn test_process_operation() { +async fn test_process_transactions() { let prover = create_test_prover().await; - let mut ops_builder = OpsBuilder::new(); - let register_service_op = ops_builder.register_service_with_random_key("test_service").ex(); - let create_account_op = - ops_builder.create_account_with_random_key("test_account", "test_service").ex(); + let mut transaction_builder = TransactionBuilder::new(); + let register_service_transaction = + transaction_builder.register_service_with_random_keys("test_service").commit(); + let create_account_transaction = + transaction_builder.create_account_with_random_key("test_account", "test_service").commit(); - let proof = prover.process_operation(®ister_service_op).await.unwrap(); + let proof = prover.process_transaction(register_service_transaction).await.unwrap(); assert!(matches!(proof, Proof::Insert(_))); - let proof = prover.process_operation(&create_account_op).await.unwrap(); + let proof = prover.process_transaction(create_account_transaction.clone()).await.unwrap(); assert!(matches!(proof, Proof::Insert(_))); let new_key = create_mock_signing_key(); - let add_key_op = - ops_builder.add_key_verified_with_root("test_account", new_key.verifying_key()).ex(); + let add_key_transaction = transaction_builder + .add_key_verified_with_root("test_account", new_key.verifying_key()) + .commit(); - let proof = prover.process_operation(&add_key_op).await.unwrap(); + let proof = prover.process_transaction(add_key_transaction).await.unwrap(); assert!(matches!(proof, Proof::Update(_))); // Revoke original key - let revoke_op = ops_builder + let revoke_transaction = transaction_builder .revoke_key( "test_account", - create_account_op.get_public_key().cloned().unwrap(), + create_account_transaction.entry.operation.get_public_key().cloned().unwrap(), &new_key, 1, ) - .ex(); - let proof = prover.process_operation(&revoke_op).await.unwrap(); + .commit(); + let proof = prover.process_transaction(revoke_transaction).await.unwrap(); assert!(matches!(proof, Proof::Update(_))); } @@ -82,23 +89,23 @@ async fn test_process_operation() { async fn test_execute_block_with_invalid_tx() { let prover = create_test_prover().await; - let mut ops_builder = OpsBuilder::new(); + let mut tx_builder = TransactionBuilder::new(); let new_key_1 = create_mock_signing_key(); - let operations = vec![ - ops_builder.register_service_with_random_key("service_id").ex(), - ops_builder.create_account_with_random_key("account_id", "service_id").ex(), + let transactions = vec![ + tx_builder.register_service_with_random_keys("service_id").commit(), + tx_builder.create_account_with_random_key("account_id", "service_id").commit(), // add new key, so it will be index = 1 - ops_builder.add_key_verified_with_root("account_id", new_key_1.verifying_key()).ex(), + tx_builder.add_key_verified_with_root("account_id", new_key_1.verifying_key()).commit(), // revoke new key again - ops_builder.revoke_key_verified_with_root("account_id", new_key_1.verifying_key()).ex(), + tx_builder.revoke_key_verified_with_root("account_id", new_key_1.verifying_key()).commit(), // and adding in same block. - // both of these operations are valid individually, but when processed together it will fail. - ops_builder.add_random_key("account_id", &new_key_1, 1).op(), + // both of these transactions are valid individually, but when processed together it will fail. + tx_builder.add_random_key("account_id", &new_key_1, 1).build(), ]; - let proofs = prover.execute_block(operations).await.unwrap(); + let proofs = prover.execute_block(transactions).await.unwrap(); assert_eq!(proofs.len(), 4); } @@ -106,19 +113,19 @@ async fn test_execute_block_with_invalid_tx() { async fn test_execute_block() { let prover = create_test_prover().await; - let operations = create_mock_operations("test_service".to_string()); + let transactions = create_mock_transactions("test_service".to_string()); - let proofs = prover.execute_block(operations).await.unwrap(); + let proofs = prover.execute_block(transactions).await.unwrap(); assert_eq!(proofs.len(), 4); } #[tokio::test] async fn test_finalize_new_epoch() { let prover = create_test_prover().await; - let operations = create_mock_operations("test_service".to_string()); + let transactions = create_mock_transactions("test_service".to_string()); let prev_commitment = prover.get_commitment().await.unwrap(); - prover.finalize_new_epoch(0, operations).await.unwrap(); + prover.finalize_new_epoch(0, transactions).await.unwrap(); let new_commitment = prover.get_commitment().await.unwrap(); assert_ne!(prev_commitment, new_commitment); @@ -138,10 +145,10 @@ async fn test_restart_sync_from_scratch() { runner.run().await.unwrap(); }); - let operations = create_mock_operations("test_service".to_string()); + let transactions = create_mock_transactions("test_service".to_string()); - for op in operations { - prover.clone().validate_and_queue_update(&op).await.unwrap(); + for transaction in transactions { + prover.clone().validate_and_queue_update(transaction).await.unwrap(); while let Ok(new_block) = brx.recv().await { if new_block.epoch.is_some() { break; @@ -181,10 +188,10 @@ async fn test_load_persisted_state() { runner.run().await.unwrap(); }); - let operations = create_mock_operations("test_service".to_string()); + let transactions = create_mock_transactions("test_service".to_string()); - for op in operations { - prover.clone().validate_and_queue_update(&op).await.unwrap(); + for transaction in transactions { + prover.clone().validate_and_queue_update(transaction).await.unwrap(); while let Ok(new_block) = brx.recv().await { if new_block.epoch.is_some() { break; diff --git a/crates/node_types/prover/src/webserver.rs b/crates/node_types/prover/src/webserver.rs index d51893b3..fef29a1b 100644 --- a/crates/node_types/prover/src/webserver.rs +++ b/crates/node_types/prover/src/webserver.rs @@ -13,7 +13,10 @@ use indexed_merkle_tree::{ }; use jmt::proof::SparseMerkleProof; use prism_common::{ - hashchain::Hashchain, hasher::Hasher, operation::Operation, tree::HashchainResponse, + hashchain::{Hashchain, HashchainEntry}, + hasher::Hasher, + transaction::Transaction, + tree::HashchainResponse, }; use serde::{Deserialize, Serialize}; use std::{self, sync::Arc}; @@ -52,8 +55,9 @@ pub struct EpochData { } #[derive(Deserialize, Debug, ToSchema)] -pub struct OperationInput { - pub operation: Operation, +pub struct TransactionRequest { + pub id: String, + pub entry: HashchainEntry, } #[derive(Serialize, Deserialize, ToSchema)] @@ -75,9 +79,9 @@ pub struct UserKeyResponse { #[derive(OpenApi)] #[openapi( - paths(update_entry, get_hashchain, get_commitment), + paths(post_transaction, get_hashchain, get_commitment), components(schemas( - OperationInput, + TransactionRequest, EpochData, UpdateProofResponse, Hash, @@ -99,7 +103,7 @@ impl WebServer { info!("starting webserver on {}:{}", self.cfg.host, self.cfg.port); let app = Router::new() - .route("/update-entry", post(update_entry)) + .route("/transaction", post(post_transaction)) .route("/get-hashchain", post(get_hashchain)) .route("/get-current-commitment", get(get_commitment)) .merge(SwaggerUi::new("/swagger-ui").url("/api-docs/openapi.json", ApiDoc::openapi())) @@ -116,23 +120,27 @@ impl WebServer { } } -/// Updates or inserts an entry in the transparency dictionary, pending inclusion in the next epoch. +/// Updates or inserts a transaction in the transparency dictionary, pending inclusion in the next epoch. /// #[utoipa::path( post, - path = "/update-entry", - request_body = UpdateEntryJson, + path = "/transaction", + request_body = TransactionRequest, responses( (status = 200, description = "Entry update queued for insertion into next epoch"), (status = 400, description = "Bad request"), (status = 500, description = "Internal server error") ) )] -async fn update_entry( +async fn post_transaction( State(session): State>, - Json(operation_input): Json, + Json(update_input): Json, ) -> impl IntoResponse { - match session.validate_and_queue_update(&operation_input.operation).await { + let transaction = Transaction { + id: update_input.id.clone(), + entry: update_input.entry.clone(), + }; + match session.validate_and_queue_update(transaction).await { Ok(_) => ( StatusCode::OK, "Entry update queued for insertion into next epoch", diff --git a/crates/storage/src/redis.rs b/crates/storage/src/redis.rs index 23beab16..b2fd9c0b 100644 --- a/crates/storage/src/redis.rs +++ b/crates/storage/src/redis.rs @@ -208,6 +208,6 @@ impl Database for RedisConnection { let mut conn = self.lock_connection()?; redis::cmd("FLUSHALL") .query::<()>(&mut conn) - .map_err(|_| anyhow!(DatabaseError::DeleteError("all entries".to_string()))) + .map_err(|_| anyhow!(DatabaseError::DeleteError("all transactions".to_string()))) } } diff --git a/crates/tests/src/lib.rs b/crates/tests/src/lib.rs index 7cf13f76..c41d4252 100644 --- a/crates/tests/src/lib.rs +++ b/crates/tests/src/lib.rs @@ -3,16 +3,9 @@ #[macro_use] extern crate log; -use anyhow::{bail, Result}; -use jmt::KeyHash; +use anyhow::Result; use keystore_rs::create_signing_key; -use prism_common::{ - digest::Digest, - hasher::Hasher, - operation::{Operation, ServiceChallenge}, - test_utils::create_mock_signing_key, - tree::{HashchainResponse::*, SnarkableTree}, -}; +use prism_common::transaction_builder::TransactionBuilder; use prism_da::{ celestia::{CelestiaConfig, CelestiaConnection}, DataAvailabilityLayer, @@ -24,38 +17,7 @@ use rand::{rngs::StdRng, Rng, SeedableRng}; use std::sync::Arc; use tokio::{spawn, time::Duration}; -use prism_common::test_utils::{Service, TestTreeState}; - -fn create_random_user(id: &str, state: &mut TestTreeState, service: &Service) -> Operation { - let account = state.create_account(id.to_string(), service.clone()); - account.hashchain.last().unwrap().operation.clone() -} - -fn add_key(id: &str, state: &mut TestTreeState) -> Result { - let hashed_id = Digest::hash(id); - let key_hash = KeyHash::with::(hashed_id); - - let Found(hc, _) = state.tree.get(key_hash)? else { - bail!("Hashchain not found for account {}", id); - }; - - let Some(signing_key) = state.signing_keys.get(id) else { - bail!("Signing key not found for account {}", id); - }; - - let new_key = create_mock_signing_key(); - let new_public_key = new_key.verifying_key(); - - let op = Operation::new_add_key( - id.to_string(), - new_public_key, - hc.last_hash(), - signing_key, - 0, // Assuming this is the key index, you might need to adjust this - )?; - - Ok(op) -} +use prism_common::test_utils::TestTreeState; fn setup_db() -> Arc> { Arc::new(Box::new(InMemoryDatabase::new()) as Box) @@ -113,55 +75,47 @@ async fn test_light_client_prover_talking() -> Result<()> { spawn(async move { let mut rng = StdRng::from_entropy(); - let mut test_state = TestTreeState::new(); - let service = test_state.register_service("test_service".to_string()); - let op = Operation::new_register_service( - service.clone().id, - ServiceChallenge::Signed(service.clone().vk), - ); + + let mut transaction_builder = TransactionBuilder::new(); + let register_service_req = + transaction_builder.register_service_with_random_keys("test_service").commit(); + let mut i = 0; - prover.clone().validate_and_queue_update(&op).await.unwrap(); + prover.clone().validate_and_queue_update(register_service_req).await.unwrap(); + let mut added_account_ids: Vec = Vec::new(); loop { // Create 1 to 3 new accounts let num_new_accounts = rng.gen_range(1..=3); for _ in 0..num_new_accounts { - let new_acc = create_random_user( - format!("{}@gmail.com", i).as_str(), - &mut test_state, - &service, - ); - match prover.clone().validate_and_queue_update(&new_acc).await { - Ok(_) => i += 1, + let random_user_id = format!("{}@gmail.com", i); + let new_acc = transaction_builder + .create_account_with_random_key(random_user_id.as_str(), "test_service") + .commit(); + match prover.clone().validate_and_queue_update(new_acc).await { + Ok(_) => { + i += 1; + added_account_ids.push(random_user_id); + } Err(e) => eprintln!("Failed to create account: {}", e), } } // Update 5 random existing accounts (if we have at least 5) - if test_state.signing_keys.len() >= 5 { + if added_account_ids.len() >= 5 { for _ in 0..5 { - let account_id = match test_state - .signing_keys - .keys() - .nth(rng.gen_range(0..test_state.signing_keys.len())) - { - Some(id) => id.clone(), - None => { - eprintln!("Failed to get random account id"); - continue; - } + let acc_id = added_account_ids + .get(rng.gen_range(0..added_account_ids.len())) + .map_or("Could not find random account id", |id| id.as_str()); + + let update_acc = + transaction_builder.add_random_key_verified_with_root(acc_id).commit(); + + match prover.clone().validate_and_queue_update(update_acc).await { + Ok(_) => (), + Err(e) => eprintln!("Failed to validate and queue update: {}", e), }; - match add_key(&account_id, &mut test_state) { - Ok(update_op) => { - if let Err(e) = - prover.clone().validate_and_queue_update(&update_op).await - { - eprintln!("Failed to validate and queue update: {}", e); - } - } - Err(e) => eprintln!("Failed to add key: {}", e), - } } } diff --git a/elf/riscv32im-succinct-zkvm-elf b/elf/riscv32im-succinct-zkvm-elf index fcef340f..eb447764 100755 Binary files a/elf/riscv32im-succinct-zkvm-elf and b/elf/riscv32im-succinct-zkvm-elf differ