Skip to content

Commit

Permalink
implement signing
Browse files Browse the repository at this point in the history
  • Loading branch information
itegulov committed Nov 22, 2023
1 parent ccb9bf4 commit 0423757
Show file tree
Hide file tree
Showing 24 changed files with 1,159 additions and 253 deletions.
330 changes: 204 additions & 126 deletions Cargo.lock

Large diffs are not rendered by default.

42 changes: 25 additions & 17 deletions contract/src/lib.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
use near_sdk::borsh::{self, BorshDeserialize, BorshSerialize};
use near_sdk::serde::{Deserialize, Serialize};
use near_sdk::{env, near_bindgen, AccountId, PanicOnDefault, PublicKey};
use std::collections::{HashMap, HashSet};
use std::collections::{BTreeMap, HashSet};

type ParticipantId = u32;

Expand All @@ -26,28 +26,28 @@ pub struct ParticipantInfo {

#[derive(BorshDeserialize, BorshSerialize, Serialize, Deserialize, Debug)]
pub struct InitializingContractState {
pub participants: HashMap<AccountId, ParticipantInfo>,
pub participants: BTreeMap<AccountId, ParticipantInfo>,
pub threshold: usize,
pub pk_votes: HashMap<PublicKey, HashSet<ParticipantId>>,
pub pk_votes: BTreeMap<PublicKey, HashSet<ParticipantId>>,
}

#[derive(BorshDeserialize, BorshSerialize, Serialize, Deserialize, Debug)]
pub struct RunningContractState {
pub epoch: u64,
pub participants: HashMap<AccountId, ParticipantInfo>,
pub participants: BTreeMap<AccountId, ParticipantInfo>,
pub threshold: usize,
pub public_key: PublicKey,
pub candidates: HashMap<ParticipantId, ParticipantInfo>,
pub join_votes: HashMap<ParticipantId, HashSet<ParticipantId>>,
pub leave_votes: HashMap<ParticipantId, HashSet<ParticipantId>>,
pub candidates: BTreeMap<ParticipantId, ParticipantInfo>,
pub join_votes: BTreeMap<ParticipantId, HashSet<ParticipantId>>,
pub leave_votes: BTreeMap<ParticipantId, HashSet<ParticipantId>>,
}

#[derive(BorshDeserialize, BorshSerialize, Serialize, Deserialize, Debug)]
pub struct ResharingContractState {
pub old_epoch: u64,
pub old_participants: HashMap<AccountId, ParticipantInfo>,
pub old_participants: BTreeMap<AccountId, ParticipantInfo>,
// TODO: only store diff to save on storage
pub new_participants: HashMap<AccountId, ParticipantInfo>,
pub new_participants: BTreeMap<AccountId, ParticipantInfo>,
pub threshold: usize,
pub public_key: PublicKey,
pub finished_votes: HashSet<ParticipantId>,
Expand All @@ -69,12 +69,12 @@ pub struct MpcContract {
#[near_bindgen]
impl MpcContract {
#[init]
pub fn init(threshold: usize, participants: HashMap<AccountId, ParticipantInfo>) -> Self {
pub fn init(threshold: usize, participants: BTreeMap<AccountId, ParticipantInfo>) -> Self {
MpcContract {
protocol_state: ProtocolContractState::Initializing(InitializingContractState {
participants,
threshold,
pk_votes: HashMap::new(),
pk_votes: BTreeMap::new(),
}),
}
}
Expand Down Expand Up @@ -211,9 +211,9 @@ impl MpcContract {
participants: participants.clone(),
threshold: *threshold,
public_key,
candidates: HashMap::new(),
join_votes: HashMap::new(),
leave_votes: HashMap::new(),
candidates: BTreeMap::new(),
join_votes: BTreeMap::new(),
leave_votes: BTreeMap::new(),
});
true
} else {
Expand Down Expand Up @@ -251,9 +251,9 @@ impl MpcContract {
participants: new_participants.clone(),
threshold: *threshold,
public_key: public_key.clone(),
candidates: HashMap::new(),
join_votes: HashMap::new(),
leave_votes: HashMap::new(),
candidates: BTreeMap::new(),
join_votes: BTreeMap::new(),
leave_votes: BTreeMap::new(),
});
true
} else {
Expand All @@ -270,4 +270,12 @@ impl MpcContract {
_ => env::panic_str("protocol is not resharing right now"),
}
}

#[allow(unused_variables)]
pub fn sign(&mut self, payload: [u8; 32]) -> [u8; 32] {
near_sdk::env::random_seed_array()
}

#[allow(unused_variables)]
pub fn respond(&mut self, receipt_id: [u8; 32], big_r: String, s: String) {}
}
27 changes: 19 additions & 8 deletions integration-tests/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -8,23 +8,22 @@ publish = false
aes-gcm = "0.10"
anyhow = { version = "1.0", features = ["backtrace"] }
async-process = "1"
aws-config = "0.54.0"
aws-sdk-s3 = "0.24.0"
aws-types = "0.54.0"
bollard = "0.13"
cait-sith = { git = "https://github.com/LIT-Protocol/cait-sith.git", features = [
"k256",
] }
clap = { version = "4.2", features = ["derive", "env"] }
curv = { package = "curv-kzen", version = "0.9", default-features = false }
ed25519-dalek = { version = "1.0.1", features = ["serde"] }
futures = "0.3"
hex = "0.4.3"
hyper = { version = "0.14", features = ["full"] }
mpc-contract = { path = "../contract" }
mpc-recovery = { path = "../mpc-recovery" }
mpc-recovery-node = { path = "../node" }
k256 = { version = "0.13.1", features = ["sha256", "ecdsa", "serde"] }
multi-party-eddsa = { git = "https://github.com/DavidM-D/multi-party-eddsa.git", rev = "25ae4fdc5ff7819ae70e73ab4afacf1c24fc4da1" }
tracing = "0.1"
near-crypto = "0.17"
near-fetch = "0.0.12"
near-jsonrpc-client = "0.6"
near-primitives = "0.17"
near-units = "0.2.0"
nix = { version = "0.27", features = ["signal"] }
once_cell = "1"
rand = "0.7"
Expand All @@ -36,6 +35,18 @@ tracing-subscriber = { version = "0.3", features = ["env-filter"] }
near-workspaces = "0.8.0"
toml = "0.8.1"

near-crypto = "0.17"
near-fetch = "0.0.12"
near-jsonrpc-client = "0.6"
near-primitives = "0.17"
near-lake-framework = { git = "https://github.com/near/near-lake-framework-rs.git", branch = "daniyar/reproduce" }
near-lake-primitives = { git = "https://github.com/near/near-lake-framework-rs.git", branch = "daniyar/reproduce" }
near-units = "0.2.0"

mpc-contract = { path = "../contract" }
mpc-recovery = { path = "../mpc-recovery" }
mpc-recovery-node = { path = "../node" }

[dev-dependencies]
backon = "0.4"
rand = "0.7"
Expand Down
7 changes: 3 additions & 4 deletions integration-tests/src/env/containers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -517,8 +517,7 @@ impl<'a> LocalStack<'a> {
s3_region: String,
) -> anyhow::Result<LocalStack<'a>> {
tracing::info!("running LocalStack container...");
let image = GenericImage::new("localstack/localstack", "latest")
.with_exposed_port(Self::S3_CONTAINER_PORT)
let image = GenericImage::new("localstack/localstack", "3.0.0")
.with_wait_for(WaitFor::message_on_stdout("Running on"));
let image: RunnableImage<GenericImage> = image.into();
let image = image.with_network(network);
Expand Down Expand Up @@ -554,8 +553,8 @@ impl<'a> LocalStack<'a> {
.await?;

let s3_address = format!("http://{}:{}", address, Self::S3_CONTAINER_PORT);
let s3_host_port = container.get_host_port_ipv4(Self::S3_CONTAINER_PORT);
let s3_host_address = format!("http://127.0.0.1:{s3_host_port}");
let s3_host_port = container.get_host_port_ipv6(Self::S3_CONTAINER_PORT);
let s3_host_address = format!("http://[::1]:{s3_host_port}");

tracing::info!(
s3_address,
Expand Down
100 changes: 100 additions & 0 deletions integration-tests/src/indexer.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
use k256::{AffinePoint, Scalar};
use near_lake_framework::{LakeBuilder, LakeContext};
use near_lake_primitives::actions::ActionMetaDataExt;
use near_lake_primitives::{receipts::ExecutionStatus, AccountId};
use near_primitives::hash::CryptoHash;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::sync::Arc;
use tokio::sync::RwLock;

#[derive(Debug, Serialize, Deserialize)]
struct RespondPayload {
receipt_id: [u8; 32],
big_r: AffinePoint,
s: Scalar,
}

pub struct FullSignature {
pub big_r: AffinePoint,
pub s: Scalar,
}

#[derive(LakeContext)]
struct Context {
mpc_contract_id: AccountId,
responses: Arc<RwLock<HashMap<CryptoHash, FullSignature>>>,
}

async fn handle_block(
mut block: near_lake_primitives::block::Block,
ctx: &Context,
) -> anyhow::Result<()> {
for action in block.actions().cloned().collect::<Vec<_>>() {
if action.receiver_id() == ctx.mpc_contract_id {
let receipt = block.receipt_by_id(&action.receipt_id()).unwrap();
if let Some(function_call) = action.as_function_call() {
if function_call.method_name() == "respond" {
let ExecutionStatus::SuccessValue(_) = receipt.status() else {
tracing::error!("indexed a failed `respond` function call");
continue;
};
if let Ok(respond_payload) =
serde_json::from_slice::<'_, RespondPayload>(function_call.args())
{
let receipt_id = CryptoHash(respond_payload.receipt_id);
tracing::info!(
receipt_id = %receipt_id,
caller_id = receipt.predecessor_id().to_string(),
big_r = ?respond_payload.big_r,
s = ?respond_payload.s,
"indexed new `respond` function call"
);
let mut responses = ctx.responses.write().await;
responses.insert(
receipt_id,
FullSignature {
big_r: respond_payload.big_r,
s: respond_payload.s,
},
);
drop(responses);
}
}
}
}
}
Ok(())
}

pub fn run(
s3_bucket: &str,
s3_region: &str,
start_block_height: u64,
s3_url: &str,
mpc_contract_id: AccountId,
responses: Arc<RwLock<HashMap<CryptoHash, FullSignature>>>,
) -> anyhow::Result<()> {
let mut lake_builder = LakeBuilder::default()
.s3_bucket_name(s3_bucket)
.s3_region_name(s3_region)
.start_block_height(start_block_height);
let lake = tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()
.unwrap()
.block_on(async {
let aws_config = aws_config::from_env().load().await;
let s3_config = aws_sdk_s3::config::Builder::from(&aws_config)
.endpoint_url(s3_url)
.build();
lake_builder = lake_builder.s3_config(s3_config);
lake_builder.build()
})?;
let context = Context {
mpc_contract_id,
responses,
};
lake.run_with_context(handle_block, &context)?;
Ok(())
}
1 change: 1 addition & 0 deletions integration-tests/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ use crate::env::containers::{self, LocalStack};
use testcontainers::{Container, GenericImage};

pub mod env;
pub mod indexer;
pub mod mpc;
pub mod multichain;
pub mod sandbox;
Expand Down
3 changes: 2 additions & 1 deletion integration-tests/src/multichain/containers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,9 @@ impl<'a> Node<'a> {
account_sk: account_sk.to_string().parse()?,
web_port: Self::CONTAINER_PORT,
indexer_options: mpc_recovery_node::indexer::Options {
s3_bucket: ctx.localstack.s3_host_address.clone(),
s3_bucket: ctx.localstack.s3_bucket.clone(),
s3_region: ctx.localstack.s3_region.clone(),
s3_url: Some(ctx.localstack.s3_host_address.clone()),
start_block_height: 0,
},
}
Expand Down
3 changes: 2 additions & 1 deletion integration-tests/src/multichain/local.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,9 @@ impl Node {
account_sk: account_sk.to_string().parse()?,
web_port,
indexer_options: mpc_recovery_node::indexer::Options {
s3_bucket: ctx.localstack.s3_host_address.clone(),
s3_bucket: ctx.localstack.s3_bucket.clone(),
s3_region: ctx.localstack.s3_region.clone(),
s3_url: Some(ctx.localstack.s3_host_address.clone()),
start_block_height: 0,
},
};
Expand Down
47 changes: 46 additions & 1 deletion integration-tests/tests/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,15 @@ use mpc_recovery::{
ClaimOidcResponse, MpcPkResponse, NewAccountResponse, SignResponse, UserCredentialsResponse,
},
};
use mpc_recovery_integration_tests::env;
use mpc_recovery_integration_tests::env::containers::DockerClient;
use mpc_recovery_integration_tests::indexer::FullSignature;
use mpc_recovery_integration_tests::{env, indexer};
use near_primitives::hash::CryptoHash;
use near_workspaces::{network::Sandbox, Worker};
use std::collections::HashMap;
use std::sync::Arc;
use std::thread;
use tokio::sync::RwLock;

pub struct TestContext {
env: String,
Expand Down Expand Up @@ -63,6 +69,7 @@ pub struct MultichainTestContext<'a> {
nodes: mpc_recovery_integration_tests::multichain::Nodes<'a>,
rpc_client: near_fetch::Client,
http_client: reqwest::Client,
responses: Arc<RwLock<HashMap<CryptoHash, FullSignature>>>,
}

async fn with_multichain_nodes<F>(nodes: usize, f: F) -> anyhow::Result<()>
Expand All @@ -72,11 +79,30 @@ where
let docker_client = DockerClient::default();
let nodes = mpc_recovery_integration_tests::multichain::run(nodes, &docker_client).await?;

let s3_bucket = nodes.ctx().localstack.s3_bucket.clone();
let s3_region = nodes.ctx().localstack.s3_region.clone();
let s3_url = nodes.ctx().localstack.s3_host_address.clone();
let mpc_contract_id = nodes.ctx().mpc_contract.id().clone();
let responses = Arc::new(RwLock::new(HashMap::new()));
let responses_clone = responses.clone();
thread::spawn(move || {
indexer::run(
&s3_bucket,
&s3_region,
0,
&s3_url,
mpc_contract_id,
responses_clone,
)
.unwrap();
});

let rpc_client = near_fetch::Client::new(&nodes.ctx().lake_indexer.rpc_host_address);
f(MultichainTestContext {
nodes,
rpc_client,
http_client: reqwest::Client::default(),
responses,
})
.await?;

Expand Down Expand Up @@ -190,7 +216,9 @@ mod wait_for {
use backon::Retryable;
use mpc_contract::ProtocolContractState;
use mpc_contract::RunningContractState;
use mpc_recovery_integration_tests::indexer::FullSignature;
use mpc_recovery_node::web::StateView;
use near_primitives::hash::CryptoHash;

pub async fn running_mpc<'a>(
ctx: &MultichainTestContext<'a>,
Expand Down Expand Up @@ -272,6 +300,23 @@ mod wait_for {
.retry(&ExponentialBuilder::default().with_max_times(6))
.await
}

pub async fn has_response<'a>(
ctx: &MultichainTestContext<'a>,
receipt_id: CryptoHash,
) -> anyhow::Result<FullSignature> {
let is_enough_presignatures = || async {
let mut responses = ctx.responses.write().await;
if let Some(signature) = responses.remove(&receipt_id) {
return Ok(signature);
}
drop(responses);
anyhow::bail!("mpc has not responded yet")
};
is_enough_presignatures
.retry(&ExponentialBuilder::default().with_max_times(8))
.await
}
}

trait MpcCheck {
Expand Down
Loading

0 comments on commit 0423757

Please sign in to comment.