From 584adb0ce5b13430e9f206327f5aadf606838d55 Mon Sep 17 00:00:00 2001 From: wadeking98 Date: Mon, 15 Jan 2024 19:19:11 -0800 Subject: [PATCH 01/32] playing around with transaction caching Signed-off-by: wadeking98 --- libindy_vdr/src/pool/cache.rs | 43 +++++++++++++++++++++++++++++++ libindy_vdr/src/pool/helpers.rs | 31 +++++++++++++++++++--- libindy_vdr/src/pool/mod.rs | 2 ++ libindy_vdr/src/resolver/utils.rs | 15 ++++++++--- 4 files changed, 83 insertions(+), 8 deletions(-) create mode 100644 libindy_vdr/src/pool/cache.rs diff --git a/libindy_vdr/src/pool/cache.rs b/libindy_vdr/src/pool/cache.rs new file mode 100644 index 00000000..53cfb862 --- /dev/null +++ b/libindy_vdr/src/pool/cache.rs @@ -0,0 +1,43 @@ +use std::{collections::HashMap, hash::Hash}; + +use futures_util::{future, Future}; + +use super::RequestResultMeta; + +pub trait Cacheable { + fn get_cached_request(&self, key: K) -> impl Future>; + + fn cache_request( + &mut self, + key: K, + result: V, + meta: RequestResultMeta, + ) -> impl Future>; +} + +pub struct MemCache { + cache: HashMap, +} + +impl MemCache { + pub fn new() -> Self { + Self { + cache: HashMap::new(), + } + } +} + +impl Cacheable for MemCache { + fn get_cached_request(&self, key: K) -> impl Future> { + future::ready(self.cache.get(&key).and_then(|(v, m, _)| Some((v.clone(), m.clone())))) + } + + fn cache_request( + &mut self, + key: K, + result: V, + meta: RequestResultMeta, + ) -> impl Future> { + future::ready(self.cache.insert(key, (result, meta, 0)).and_then(|(v, m, _)| Some((v, m)))) + } +} diff --git a/libindy_vdr/src/pool/helpers.rs b/libindy_vdr/src/pool/helpers.rs index f426eadd..b24b2f3f 100644 --- a/libindy_vdr/src/pool/helpers.rs +++ b/libindy_vdr/src/pool/helpers.rs @@ -3,6 +3,7 @@ use std::string::ToString; use serde_json; +use super::cache::Cacheable; use super::genesis::PoolTransactions; use super::handlers::{ build_pool_catchup_request, build_pool_status_request, handle_catchup_request, @@ -19,12 +20,13 @@ use crate::utils::base58; /// Perform a pool ledger status request to see if catchup is required pub async fn perform_pool_status_request( pool: &T, + cache: Option<&mut impl Cacheable>, ) -> VdrResult<(RequestResult>, RequestResultMeta)> { let (mt_root, mt_size) = pool.get_merkle_tree_info(); if pool.get_refreshed() { trace!("Performing fast status check"); - match perform_get_txn(pool, LedgerType::POOL.to_id(), 1).await { + match perform_get_txn(pool, LedgerType::POOL.to_id(), 1, cache).await { Ok((RequestResult::Reply(reply), res_meta)) => { if let Ok(body) = serde_json::from_str::(&reply) { if let (Some(status_root_hash), Some(status_txn_count)) = ( @@ -91,8 +93,9 @@ pub async fn perform_pool_catchup_request( /// Perform a pool ledger status request followed by a catchup request if necessary pub async fn perform_refresh( pool: &T, + cache: Option<&mut impl Cacheable>, ) -> VdrResult<(Option, RequestResultMeta)> { - let (result, meta) = perform_pool_status_request(pool).await?; + let (result, meta) = perform_pool_status_request(pool, cache).await?; trace!("Got status result: {:?}", &result); match result { RequestResult::Reply(target) => match target { @@ -166,10 +169,11 @@ pub async fn perform_get_txn( pool: &T, ledger_type: i32, seq_no: i32, + cache: Option<&mut impl Cacheable>, ) -> VdrResult<(RequestResult, RequestResultMeta)> { let builder = pool.get_request_builder(); let prepared = builder.build_get_txn_request(None, ledger_type, seq_no)?; - perform_ledger_request(pool, &prepared).await + perform_ledger_request(pool, &prepared, cache).await } /// Dispatch a request to a specific set of nodes and collect the results @@ -184,10 +188,13 @@ pub async fn perform_ledger_action( handle_full_request(&mut request, node_aliases, timeout).await } +//do the caching here after we know if it is a read only + /// Dispatch a prepared ledger request to the appropriate handler pub async fn perform_ledger_request( pool: &T, prepared: &PreparedRequest, + cache_opt: Option<&mut impl Cacheable>, ) -> VdrResult<(RequestResult, RequestResultMeta)> { let mut request = pool .create_request(prepared.req_id.clone(), prepared.req_json.to_string()) @@ -214,7 +221,23 @@ pub async fn perform_ledger_request( RequestMethod::Consensus => (None, (None, None), false, None), }; - handle_consensus_request(&mut request, sp_key, sp_timestamps, is_read_req, sp_parser).await + if is_read_req { + if let Some(ref cache) = cache_opt { + if let Some((response, meta)) = cache.get_cached_request(prepared.req_id.clone()).await { + return Ok((RequestResult::Reply(response), meta)); + } + } + } + let result = + handle_consensus_request(&mut request, sp_key, sp_timestamps, is_read_req, sp_parser).await; + if is_read_req && result.is_ok() { + if let (RequestResult::Reply(response), meta) = result.as_ref().unwrap() { + if let Some(cache) = cache_opt { + cache.cache_request(prepared.req_id.clone(), response.to_string(), meta.clone()); + } + } + } + return result; } /// Format a collection of node replies in the expected response format diff --git a/libindy_vdr/src/pool/mod.rs b/libindy_vdr/src/pool/mod.rs index 942f1a1d..c2250e7f 100644 --- a/libindy_vdr/src/pool/mod.rs +++ b/libindy_vdr/src/pool/mod.rs @@ -4,6 +4,8 @@ mod genesis; pub(crate) mod handlers; /// Methods for performing requests against the verifier pool pub mod helpers; +/// A trait for managing a transaction cache +pub mod cache; /// General verifier pool management mod manager; /// Pool networker traits and implementations diff --git a/libindy_vdr/src/resolver/utils.rs b/libindy_vdr/src/resolver/utils.rs index 4438824d..2c0ad93e 100644 --- a/libindy_vdr/src/resolver/utils.rs +++ b/libindy_vdr/src/resolver/utils.rs @@ -11,6 +11,7 @@ use crate::ledger::constants; use crate::ledger::identifiers::{CredentialDefinitionId, RevocationRegistryId, SchemaId}; use crate::ledger::responses::{Endpoint, GetNymResultV1}; use crate::ledger::RequestBuilder; +use crate::pool::cache::Cacheable; use crate::pool::helpers::perform_ledger_request; use crate::pool::{Pool, PreparedRequest, RequestResult, RequestResultMeta}; use crate::utils::did::DidValue; @@ -251,8 +252,12 @@ pub fn parse_or_now(datetime: Option<&String>) -> VdrResult { } } -pub async fn handle_request(pool: &T, request: &PreparedRequest) -> VdrResult { - let (result, _meta) = request_transaction(pool, request).await?; +pub async fn handle_request( + pool: &T, + request: &PreparedRequest, + cache: Option<&mut impl Cacheable>, +) -> VdrResult { + let (result, _meta) = request_transaction(pool, request, cache).await?; match result { RequestResult::Reply(data) => Ok(data), RequestResult::Failed(error) => Err(error), @@ -262,8 +267,9 @@ pub async fn handle_request(pool: &T, request: &PreparedRequest) -> Vdr pub async fn request_transaction( pool: &T, request: &PreparedRequest, + cache: Option<&mut impl Cacheable>, ) -> VdrResult<(RequestResult, RequestResultMeta)> { - perform_ledger_request(pool, request).await + perform_ledger_request(pool, request, cache).await } /// Fetch legacy service endpoint using ATTRIB tx @@ -272,6 +278,7 @@ pub async fn fetch_legacy_endpoint( did: &DidValue, seq_no: Option, timestamp: Option, + cache: Option<&mut impl Cacheable>, ) -> VdrResult { let builder = pool.get_request_builder(); let request = builder.build_get_attrib_request( @@ -287,7 +294,7 @@ pub async fn fetch_legacy_endpoint( "Fetching legacy endpoint for {} with request {:#?}", did, request ); - let ledger_data = handle_request(pool, &request).await?; + let ledger_data = handle_request(pool, &request, cache).await?; let (_, _, endpoint_data) = parse_ledger_data(&ledger_data)?; let endpoint_data: Endpoint = serde_json::from_str(endpoint_data.as_str().unwrap()) .map_err(|_| err_msg(VdrErrorKind::Resolver, "Could not parse endpoint data"))?; From b752781f8c8e2de70a01c4f841ceaa1bf39509ee Mon Sep 17 00:00:00 2001 From: wadeking98 Date: Wed, 17 Jan 2024 13:38:42 -0800 Subject: [PATCH 02/32] implemented caching Signed-off-by: wadeking98 --- indy-vdr-proxy/src/app.rs | 8 ++ indy-vdr-proxy/src/handlers.rs | 72 +++++++------ indy-vdr-proxy/src/main.rs | 40 +++++-- libindy_vdr/Cargo.toml | 1 + libindy_vdr/src/pool/builder.rs | 5 + libindy_vdr/src/pool/cache.rs | 166 ++++++++++++++++++++++++++---- libindy_vdr/src/pool/helpers.rs | 16 +-- libindy_vdr/src/pool/runner.rs | 27 +++-- libindy_vdr/src/resolver/pool.rs | 15 +-- libindy_vdr/src/resolver/utils.rs | 8 +- libindy_vdr/tests/utils/pool.rs | 2 +- 11 files changed, 268 insertions(+), 92 deletions(-) diff --git a/indy-vdr-proxy/src/app.rs b/indy-vdr-proxy/src/app.rs index 5788362b..9077fa57 100644 --- a/indy-vdr-proxy/src/app.rs +++ b/indy-vdr-proxy/src/app.rs @@ -13,6 +13,7 @@ pub struct Config { pub is_multiple: bool, pub tls_cert_path: Option, pub tls_key_path: Option, + pub cache: bool, } pub fn load_config() -> Result { @@ -81,6 +82,11 @@ pub fn load_config() -> Result { .long("tls-key") .value_name("KEY") .help("Path to the TLS private key file") + ).arg( + Arg::new("use-cache") + .long("use-cache") + .value_name("CACHE") + .help("Whether to use cache or not") ); #[cfg(unix)] @@ -139,6 +145,7 @@ pub fn load_config() -> Result { let tls_cert_path = matches.get_one::("tls-cert").cloned(); let tls_key_path = matches.get_one::("tls-key").cloned(); + let cache = matches.get_one::("use-cache").cloned().unwrap_or(false); Ok(Config { genesis, @@ -152,5 +159,6 @@ pub fn load_config() -> Result { is_multiple, tls_cert_path, tls_key_path, + cache }) } diff --git a/indy-vdr-proxy/src/handlers.rs b/indy-vdr-proxy/src/handlers.rs index f8dabb6c..82431004 100644 --- a/indy-vdr-proxy/src/handlers.rs +++ b/indy-vdr-proxy/src/handlers.rs @@ -6,6 +6,7 @@ use std::rc::Rc; use std::time::UNIX_EPOCH; use hyper::{Body, Method, Request, Response, StatusCode}; +use indy_vdr::pool::cache::Cache; use percent_encoding::percent_decode_str; use regex::Regex; @@ -300,6 +301,7 @@ async fn get_attrib( raw: &str, seq_no: Option, timestamp: Option, + cache: Option> ) -> VdrResult { let dest = DidValue::from_str(dest)?; let request = pool.get_request_builder().build_get_attrib_request( @@ -311,7 +313,7 @@ async fn get_attrib( seq_no, timestamp, )?; - let result = perform_ledger_request(pool, &request).await?; + let result = perform_ledger_request(pool, &request, cache).await?; Ok(result.into()) } @@ -320,59 +322,60 @@ async fn get_nym( nym: &str, seq_no: Option, timestamp: Option, + cache: Option> ) -> VdrResult { let nym = DidValue::from_str(nym)?; let request = pool .get_request_builder() .build_get_nym_request(None, &nym, seq_no, timestamp)?; - let result = perform_ledger_request(pool, &request).await?; + let result = perform_ledger_request(pool, &request, cache).await?; Ok(result.into()) } -async fn get_schema(pool: &T, schema_id: &str) -> VdrResult { +async fn get_schema(pool: &T, schema_id: &str, cache: Option>) -> VdrResult { let schema_id = SchemaId::from_str(schema_id)?; let request = pool .get_request_builder() .build_get_schema_request(None, &schema_id)?; - let result = perform_ledger_request(pool, &request).await?; + let result = perform_ledger_request(pool, &request, cache).await?; Ok(result.into()) } -async fn get_cred_def(pool: &T, cred_def_id: &str) -> VdrResult { +async fn get_cred_def(pool: &T, cred_def_id: &str, cache: Option>) -> VdrResult { let cred_def_id = CredentialDefinitionId::from_str(cred_def_id)?; let request = pool .get_request_builder() .build_get_cred_def_request(None, &cred_def_id)?; - let result = perform_ledger_request(pool, &request).await?; + let result = perform_ledger_request(pool, &request, cache).await?; Ok(result.into()) } -async fn get_revoc_reg_def(pool: &T, revoc_reg_def_id: &str) -> VdrResult { +async fn get_revoc_reg_def(pool: &T, revoc_reg_def_id: &str, cache: Option>) -> VdrResult { let revoc_reg_def_id = RevocationRegistryId::from_str(revoc_reg_def_id)?; let request = pool .get_request_builder() .build_get_revoc_reg_def_request(None, &revoc_reg_def_id)?; - let result = perform_ledger_request(pool, &request).await?; + let result = perform_ledger_request(pool, &request, cache).await?; Ok(result.into()) } -async fn get_revoc_reg(pool: &T, revoc_reg_def_id: &str) -> VdrResult { +async fn get_revoc_reg(pool: &T, revoc_reg_def_id: &str, cache: Option>) -> VdrResult { let revoc_reg_def_id = RevocationRegistryId::from_str(revoc_reg_def_id)?; let request = pool.get_request_builder().build_get_revoc_reg_request( None, &revoc_reg_def_id, timestamp_now(), )?; - let result = perform_ledger_request(pool, &request).await?; + let result = perform_ledger_request(pool, &request, cache).await?; Ok(result.into()) } -async fn get_revoc_reg_delta(pool: &T, revoc_reg_def_id: &str) -> VdrResult { +async fn get_revoc_reg_delta(pool: &T, revoc_reg_def_id: &str, cache: Option>) -> VdrResult { let revoc_reg_def_id = RevocationRegistryId::from_str(revoc_reg_def_id)?; let request = pool .get_request_builder() .build_get_revoc_reg_delta_request(None, &revoc_reg_def_id, None, timestamp_now())?; - let result = perform_ledger_request(pool, &request).await?; + let result = perform_ledger_request(pool, &request, cache).await?; Ok(result.into()) } @@ -383,19 +386,19 @@ async fn test_get_validator_info(pool: &T, pretty: bool) -> VdrResult(pool: &T) -> VdrResult { +async fn get_taa(pool: &T, cache: Option>) -> VdrResult { let request = pool .get_request_builder() .build_get_txn_author_agreement_request(None, None)?; - let result = perform_ledger_request(pool, &request).await?; + let result = perform_ledger_request(pool, &request, cache).await?; Ok(result.into()) } -async fn get_aml(pool: &T) -> VdrResult { +async fn get_aml(pool: &T, cache: Option>) -> VdrResult { let request = pool .get_request_builder() .build_get_acceptance_mechanisms_request(None, None, None)?; - let result = perform_ledger_request(pool, &request).await?; + let result = perform_ledger_request(pool, &request, cache).await?; Ok(result.into()) } @@ -404,6 +407,7 @@ async fn get_auth_rule( auth_type: Option, auth_action: Option, field: Option, + cache: Option> ) -> VdrResult { let request = pool.get_request_builder().build_get_auth_rule_request( None, @@ -413,24 +417,25 @@ async fn get_auth_rule( None, None, )?; - let result = perform_ledger_request(pool, &request).await?; + let result = perform_ledger_request(pool, &request, cache).await?; Ok(result.into()) } -async fn get_txn(pool: &T, ledger: LedgerType, seq_no: i32) -> VdrResult { - let result = perform_get_txn(pool, ledger.to_id(), seq_no).await?; +async fn get_txn(pool: &T, ledger: LedgerType, seq_no: i32, cache: Option>) -> VdrResult { + let result = perform_get_txn(pool, ledger.to_id(), seq_no, cache).await?; Ok(result.into()) } async fn submit_request(pool: &T, message: Vec) -> VdrResult { let request = PreparedRequest::from_request_json(message)?; - let result = perform_ledger_request(pool, &request).await?; + let result = perform_ledger_request(pool, &request, None).await?; Ok(result.into()) } pub async fn handle_request( req: Request, state: Rc>, + cache: Option> ) -> Result, hyper::Error> { let mut parts = req .uri() @@ -532,12 +537,12 @@ pub async fn handle_request( let resolver = Resolver::new(pool); // is DID Url if did.find('/').is_some() { - match resolver.dereference(did).await { + match resolver.dereference(did, cache.clone()).await { Ok(result) => Ok(ResponseType::Resolver(result)), Err(err) => http_status_msg(StatusCode::BAD_REQUEST, err.to_string()), } } else { - match resolver.resolve(did).await { + match resolver.resolve(did, cache).await { Ok(result) => Ok(ResponseType::Resolver(result)), Err(err) => http_status_msg(StatusCode::BAD_REQUEST, err.to_string()), } @@ -558,8 +563,8 @@ pub async fn handle_request( } } (&Method::GET, "genesis") => get_pool_genesis(&pool).await, - (&Method::GET, "taa") => get_taa(&pool).await, - (&Method::GET, "aml") => get_aml(&pool).await, + (&Method::GET, "taa") => get_taa(&pool, cache.clone()).await, + (&Method::GET, "aml") => get_aml(&pool, cache.clone()).await, (&Method::GET, "attrib") => { if let (Some(dest), Some(attrib)) = (parts.next(), parts.next()) { // NOTE: 'endpoint' is currently the only supported attribute @@ -569,7 +574,7 @@ pub async fn handle_request( let timestamp: Option = query_params .get("timestamp") .and_then(|ts| ts.as_str().parse().ok()); - get_attrib(&pool, &dest, &attrib, seq_no, timestamp).await + get_attrib(&pool, &dest, &attrib, seq_no, timestamp, cache.clone()).await } else { http_status(StatusCode::NOT_FOUND) } @@ -582,18 +587,19 @@ pub async fn handle_request( Some(auth_type.to_owned()), Some(auth_action.to_owned()), Some("*".to_owned()), + cache.clone() ) .await } else { http_status(StatusCode::NOT_FOUND) } } else { - get_auth_rule(&pool, None, None, None).await // get all + get_auth_rule(&pool, None, None, None, cache.clone()).await // get all } } (&Method::GET, "cred_def") => { if let Some(cred_def_id) = parts.next() { - get_cred_def(&pool, &cred_def_id).await + get_cred_def(&pool, &cred_def_id, cache.clone()).await } else { http_status(StatusCode::NOT_FOUND) } @@ -606,35 +612,35 @@ pub async fn handle_request( let timestamp: Option = query_params .get("timestamp") .and_then(|ts| ts.as_str().parse().ok()); - get_nym(&pool, &nym, seq_no, timestamp).await + get_nym(&pool, &nym, seq_no, timestamp, cache.clone()).await } else { http_status(StatusCode::NOT_FOUND) } } (&Method::GET, "rev_reg_def") => { if let Some(rev_reg_def_id) = parts.next() { - get_revoc_reg_def(&pool, &rev_reg_def_id).await + get_revoc_reg_def(&pool, &rev_reg_def_id, cache.clone()).await } else { http_status(StatusCode::NOT_FOUND) } } (&Method::GET, "rev_reg") => { if let Some(rev_reg_def_id) = parts.next() { - get_revoc_reg(&pool, &rev_reg_def_id).await + get_revoc_reg(&pool, &rev_reg_def_id, cache.clone()).await } else { http_status(StatusCode::NOT_FOUND) } } (&Method::GET, "rev_reg_delta") => { if let Some(rev_reg_def_id) = parts.next() { - get_revoc_reg_delta(&pool, &rev_reg_def_id).await + get_revoc_reg_delta(&pool, &rev_reg_def_id, cache.clone()).await } else { http_status(StatusCode::NOT_FOUND) } } (&Method::GET, "schema") => { if let Some(schema_id) = parts.next() { - get_schema(&pool, &schema_id).await + get_schema(&pool, &schema_id, cache.clone()).await } else { http_status(StatusCode::NOT_FOUND) } @@ -644,7 +650,7 @@ pub async fn handle_request( if let (Ok(ledger), Ok(txn)) = (LedgerType::try_from(ledger.as_str()), txn.parse::()) { - get_txn(&pool, ledger, txn).await + get_txn(&pool, ledger, txn, cache.clone()).await } else { http_status(StatusCode::NOT_FOUND) } diff --git a/indy-vdr-proxy/src/main.rs b/indy-vdr-proxy/src/main.rs index ea16501b..b8d2084b 100644 --- a/indy-vdr-proxy/src/main.rs +++ b/indy-vdr-proxy/src/main.rs @@ -36,6 +36,8 @@ use hyper_tls::HttpsConnector; #[cfg(unix)] use hyper_unix_connector::UnixConnector; +use indy_vdr::pool::RequestResultMeta; +use indy_vdr::pool::cache::{Cache, MemCacheStorage}; #[cfg(feature = "tls")] use rustls_pemfile::{certs, pkcs8_private_keys}; #[cfg(feature = "tls")] @@ -181,11 +183,16 @@ async fn init_app_state( Ok(state) } -async fn run_pools(state: Rc>, init_refresh: bool, interval_refresh: u32) { +async fn run_pools( + state: Rc>, + init_refresh: bool, + interval_refresh: u32, + cache: Option>, +) { let mut pool_states = HashMap::new(); for (namespace, pool_state) in &state.clone().borrow().pool_states { - let pool_state = match create_pool(state.clone(), namespace.as_str(), init_refresh).await { + let pool_state = match create_pool(state.clone(), namespace.as_str(), init_refresh, cache.clone()).await { Ok(pool) => { let pool = Some(pool.clone()); PoolState { @@ -213,7 +220,7 @@ async fn run_pools(state: Rc>, init_refresh: bool, interval_re if interval_refresh > 0 { loop { select! { - refresh_result = refresh_pools(state.clone(), interval_refresh) => { + refresh_result = refresh_pools(state.clone(), interval_refresh, cache.clone()) => { match refresh_result { Ok(upd_pool_states) => { state.borrow_mut().pool_states = upd_pool_states; @@ -258,13 +265,14 @@ async fn create_pool( state: Rc>, namespace: &str, refresh: bool, + cache: Option>, ) -> VdrResult { let pool_states = &state.borrow().pool_states; let pool_state = pool_states.get(namespace).unwrap(); let pool = PoolBuilder::new(PoolConfig::default(), pool_state.transactions.clone()).into_local()?; let refresh_pool = if refresh { - refresh_pool(state.clone(), &pool, 0).await? + refresh_pool(state.clone(), &pool, 0, cache).await? } else { None }; @@ -275,12 +283,13 @@ async fn refresh_pools( state: Rc>, // pool_states: HashMap, delay_mins: u32, + cache: Option>, ) -> VdrResult> { let mut upd_pool_states = HashMap::new(); let pool_states = &state.borrow().pool_states; for (namespace, pool_state) in pool_states { if let Some(pool) = &pool_state.pool { - let upd_pool = match refresh_pool(state.clone(), pool, delay_mins).await { + let upd_pool = match refresh_pool(state.clone(), pool, delay_mins, cache.clone()).await { Ok(p) => p, Err(err) => { eprintln!( @@ -307,13 +316,14 @@ async fn refresh_pool( state: Rc>, pool: &LocalPool, delay_mins: u32, + cache: Option>, ) -> VdrResult> { let n_pools = state.borrow().pool_states.len() as u32; if delay_mins > 0 { tokio::time::sleep(Duration::from_secs((delay_mins * 60 / n_pools) as u64)).await } - let (txns, _meta) = perform_refresh(pool).await?; + let (txns, _meta) = perform_refresh(pool, cache).await?; if let Some(txns) = txns { let pool = PoolBuilder::new(PoolConfig::default(), txns) .refreshed(true) @@ -426,13 +436,27 @@ where I::Conn: tokio::io::AsyncRead + tokio::io::AsyncWrite + Send + Unpin, I::Error: Into>, { - let until_done = run_pools(state.clone(), config.init_refresh, config.interval_refresh); + let cache = if config.cache { + let mem_storage = MemCacheStorage::new(); + let mem_cache = Cache::new(mem_storage, 86400); + Some(mem_cache) + } else { + None + }; + + let until_done = run_pools( + state.clone(), + config.init_refresh, + config.interval_refresh, + cache.clone(), + ); let svc = make_service_fn(move |_| { let state = state.clone(); + let cache = cache.clone(); async move { let state = state.clone(); Ok::<_, hyper::Error>(service_fn(move |req| { - handlers::handle_request(req, state.to_owned()) + handlers::handle_request(req, state.to_owned(), cache.clone()) })) } }); diff --git a/libindy_vdr/Cargo.toml b/libindy_vdr/Cargo.toml index 734bc87a..82f3570c 100644 --- a/libindy_vdr/Cargo.toml +++ b/libindy_vdr/Cargo.toml @@ -60,6 +60,7 @@ thiserror = "1.0" time = { version = "=0.3.20", features = ["parsing"] } url = "2.2.2" zmq = "0.9" +tokio = { version = "1.35.1", features = ["full"] } [dev-dependencies] rstest = "0.18" diff --git a/libindy_vdr/src/pool/builder.rs b/libindy_vdr/src/pool/builder.rs index d85fdf22..54120187 100644 --- a/libindy_vdr/src/pool/builder.rs +++ b/libindy_vdr/src/pool/builder.rs @@ -3,6 +3,8 @@ use std::collections::HashMap; use crate::common::error::prelude::*; use crate::config::PoolConfig; +use super::RequestResultMeta; +use super::cache::Cache; use super::genesis::PoolTransactions; use super::manager::{LocalPool, SharedPool}; use super::networker::{MakeLocal, MakeShared, ZMQNetworkerFactory}; @@ -15,6 +17,7 @@ pub struct PoolBuilder { transactions: PoolTransactions, node_weights: Option>, refreshed: bool, + cache: Option> } impl PoolBuilder { @@ -25,6 +28,7 @@ impl PoolBuilder { transactions, node_weights: None, refreshed: false, + cache: None } } @@ -75,6 +79,7 @@ impl PoolBuilder { MakeLocal(ZMQNetworkerFactory {}), self.node_weights, self.refreshed, + self.cache )) } } diff --git a/libindy_vdr/src/pool/cache.rs b/libindy_vdr/src/pool/cache.rs index 53cfb862..dc3b2121 100644 --- a/libindy_vdr/src/pool/cache.rs +++ b/libindy_vdr/src/pool/cache.rs @@ -1,25 +1,78 @@ -use std::{collections::HashMap, hash::Hash}; +use std::{ + collections::HashMap, + sync::Arc, + time::{SystemTime, UNIX_EPOCH}, +}; -use futures_util::{future, Future}; +use tokio::sync::RwLock; use super::RequestResultMeta; -pub trait Cacheable { - fn get_cached_request(&self, key: K) -> impl Future>; +// cant use async traits yet because not object safe +pub trait CacheStorage: Send + Sync + 'static { + fn get(&self, key: &K) -> Option<(V, u64)>; - fn cache_request( - &mut self, - key: K, - result: V, - meta: RequestResultMeta, - ) -> impl Future>; + fn insert(&mut self, key: K, value: V, expiration: u64) -> Option<(V, u64)>; +} + +pub struct Cache { + storage: Arc>>, + expiration_offset: u64, +} + +impl Cache { + pub fn new(storage: impl CacheStorage, expiration_offset: u64) -> Self { + Self { + storage: Arc::new(RwLock::new(storage)), + expiration_offset, + } + } + pub async fn get(&self, key: &K) -> Option { + match self.storage.read().await.get(key) { + Some((item, expiry)) => { + if expiry > 0 + && expiry + < SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs() + { + None + } else { + Some(item) + } + } + None => None, + } + } + pub async fn insert(&mut self, key: K, value: V) -> Option { + let exp_timestamp = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs() + + self.expiration_offset; + match self.storage.write().await.insert(key, value, exp_timestamp) { + Some(item) => Some(item.0), + None => None, + } + } } -pub struct MemCache { - cache: HashMap, +// need to implement Clone manually because RwLock doesn't implement Clone +impl Clone for Cache { + fn clone(&self) -> Self { + Self { + storage: self.storage.clone(), + expiration_offset: self.expiration_offset, + } + } } -impl MemCache { +pub struct MemCacheStorage { + cache: HashMap, +} + +impl MemCacheStorage { pub fn new() -> Self { Self { cache: HashMap::new(), @@ -27,17 +80,86 @@ impl MemCache { } } -impl Cacheable for MemCache { - fn get_cached_request(&self, key: K) -> impl Future> { - future::ready(self.cache.get(&key).and_then(|(v, m, _)| Some((v.clone(), m.clone())))) +impl CacheStorage for MemCacheStorage { + fn get(&self, key: &String) -> Option<((String, RequestResultMeta), u64)> { + self.cache + .get(key) + .map(|(v, m, e)| ((v.clone(), m.clone()), *e)) } - fn cache_request( + fn insert( &mut self, - key: K, - result: V, - meta: RequestResultMeta, - ) -> impl Future> { - future::ready(self.cache.insert(key, (result, meta, 0)).and_then(|(v, m, _)| Some((v, m)))) + key: String, + value: (String, RequestResultMeta), + expiration: u64, + ) -> Option<((String, RequestResultMeta), u64)> { + self.cache + .insert(key, (value.0, value.1, expiration)) + .map(|(v, m, e)| ((v.clone(), m.clone()), e)) } } + +// pub trait Cacheable: Clone + Send + Sync + 'static { +// fn get_cached_request(&self, key: K) -> impl Future>; + +// fn cache_request( +// &mut self, +// key: K, +// result: V, +// meta: RequestResultMeta, +// ) -> impl Future>; +// } + +// pub struct MemCache { +// cache: Arc>>, +// } + +// impl MemCache { +// pub fn new() -> Self { +// Self { +// cache: Arc::new(RwLock::new(HashMap::new())), +// } +// } +// } + +// need to implement Clone manually because RwLock doesn't implement Clone +// impl Clone for MemCache { +// fn clone(&self) -> Self { +// Self { +// cache: self.cache.clone(), +// } +// } +// } + +// impl Cacheable +// for MemCache +// { +// fn get_cached_request(&self, key: K) -> impl Future> { +// future::ready(match self.cache.read() { +// Ok(cache) => cache +// .get(&key) +// .and_then(|(v, m, _)| Some((v.clone(), m.clone()))), +// Err(err) => { +// warn!("Error reading cache: {}", err); +// None +// } +// }) +// } + +// fn cache_request( +// &mut self, +// key: K, +// result: V, +// meta: RequestResultMeta, +// ) -> impl Future> { +// future::ready(match self.cache.write() { +// Ok(mut cache) => cache +// .insert(key, (result, meta, 0)) +// .and_then(|(v, m, _)| Some((v, m))), +// Err(err) => { +// warn!("Error writing cache: {}", err); +// None +// } +// }) +// } +// } diff --git a/libindy_vdr/src/pool/helpers.rs b/libindy_vdr/src/pool/helpers.rs index b24b2f3f..14acf124 100644 --- a/libindy_vdr/src/pool/helpers.rs +++ b/libindy_vdr/src/pool/helpers.rs @@ -3,7 +3,7 @@ use std::string::ToString; use serde_json; -use super::cache::Cacheable; +use super::cache::Cache; use super::genesis::PoolTransactions; use super::handlers::{ build_pool_catchup_request, build_pool_status_request, handle_catchup_request, @@ -20,7 +20,7 @@ use crate::utils::base58; /// Perform a pool ledger status request to see if catchup is required pub async fn perform_pool_status_request( pool: &T, - cache: Option<&mut impl Cacheable>, + cache: Option>, ) -> VdrResult<(RequestResult>, RequestResultMeta)> { let (mt_root, mt_size) = pool.get_merkle_tree_info(); @@ -93,7 +93,7 @@ pub async fn perform_pool_catchup_request( /// Perform a pool ledger status request followed by a catchup request if necessary pub async fn perform_refresh( pool: &T, - cache: Option<&mut impl Cacheable>, + cache: Option>, ) -> VdrResult<(Option, RequestResultMeta)> { let (result, meta) = perform_pool_status_request(pool, cache).await?; trace!("Got status result: {:?}", &result); @@ -169,7 +169,7 @@ pub async fn perform_get_txn( pool: &T, ledger_type: i32, seq_no: i32, - cache: Option<&mut impl Cacheable>, + cache: Option>, ) -> VdrResult<(RequestResult, RequestResultMeta)> { let builder = pool.get_request_builder(); let prepared = builder.build_get_txn_request(None, ledger_type, seq_no)?; @@ -194,7 +194,7 @@ pub async fn perform_ledger_action( pub async fn perform_ledger_request( pool: &T, prepared: &PreparedRequest, - cache_opt: Option<&mut impl Cacheable>, + cache_opt: Option>, ) -> VdrResult<(RequestResult, RequestResultMeta)> { let mut request = pool .create_request(prepared.req_id.clone(), prepared.req_json.to_string()) @@ -223,7 +223,7 @@ pub async fn perform_ledger_request( if is_read_req { if let Some(ref cache) = cache_opt { - if let Some((response, meta)) = cache.get_cached_request(prepared.req_id.clone()).await { + if let Some((response, meta)) = cache.get(&prepared.req_id).await { return Ok((RequestResult::Reply(response), meta)); } } @@ -232,8 +232,8 @@ pub async fn perform_ledger_request( handle_consensus_request(&mut request, sp_key, sp_timestamps, is_read_req, sp_parser).await; if is_read_req && result.is_ok() { if let (RequestResult::Reply(response), meta) = result.as_ref().unwrap() { - if let Some(cache) = cache_opt { - cache.cache_request(prepared.req_id.clone(), response.to_string(), meta.clone()); + if let Some(mut cache) = cache_opt { + cache.insert(prepared.req_id.clone(), (response.to_string(), meta.clone())).await; } } } diff --git a/libindy_vdr/src/pool/runner.rs b/libindy_vdr/src/pool/runner.rs index 1f6dcf0b..b4b68996 100644 --- a/libindy_vdr/src/pool/runner.rs +++ b/libindy_vdr/src/pool/runner.rs @@ -7,6 +7,7 @@ use futures_executor::block_on; use futures_util::stream::{FuturesUnordered, StreamExt}; use futures_util::{select, FutureExt}; +use super::cache::Cache; use super::helpers::{perform_ledger_request, perform_refresh}; use super::networker::{Networker, NetworkerFactory}; use super::requests::PreparedRequest; @@ -34,6 +35,7 @@ impl PoolRunner { networker_factory: F, node_weights: Option>, refreshed: bool, + cache: Option>, ) -> Self where F: NetworkerFactory> + Send + 'static, @@ -50,7 +52,7 @@ impl PoolRunner { ) .unwrap(); let mut thread = PoolThread::new(pool, receiver); - thread.run(); + thread.run(cache); debug!("Pool thread ended") }); Self { @@ -166,14 +168,16 @@ impl PoolThread { Self { pool, receiver } } - fn run(&mut self) { - block_on(self.run_loop()) + fn run(&mut self, cache: Option>) { + block_on(self.run_loop(cache)) } - async fn run_loop(&mut self) { + async fn run_loop(&mut self, cache: Option>) { let mut futures = FuturesUnordered::new(); let receiver = &mut self.receiver; loop { + let cache_ledger_request = cache.clone(); + let cache_pool_refresh = cache.clone(); select! { recv_evt = receiver.next() => { match recv_evt { @@ -195,11 +199,11 @@ impl PoolThread { callback(vers); } Some(PoolEvent::Refresh(callback)) => { - let fut = _perform_refresh(&self.pool, callback); + let fut = _perform_refresh(&self.pool, callback, cache_pool_refresh); futures.push(fut.boxed_local()); } Some(PoolEvent::SendRequest(request, callback)) => { - let fut = _perform_ledger_request(&self.pool, request, callback); + let fut = _perform_ledger_request(&self.pool, request, callback, cache_ledger_request); futures.push(fut.boxed_local()); } None => { trace!("Pool runner sender dropped") } @@ -217,8 +221,12 @@ impl PoolThread { } } -async fn _perform_refresh(pool: &LocalPool, callback: Callback) { - let result = perform_refresh(pool).await; +async fn _perform_refresh( + pool: &LocalPool, + callback: Callback, + cache: Option>, +) { + let result = perform_refresh(pool, cache).await; callback(result); } @@ -226,7 +234,8 @@ async fn _perform_ledger_request( pool: &LocalPool, request: PreparedRequest, callback: Callback, + cache: Option>, ) { - let result = perform_ledger_request(pool, &request).await; + let result = perform_ledger_request(pool, &request, cache).await; callback(result); } diff --git a/libindy_vdr/src/resolver/pool.rs b/libindy_vdr/src/resolver/pool.rs index bea59542..594b4e76 100644 --- a/libindy_vdr/src/resolver/pool.rs +++ b/libindy_vdr/src/resolver/pool.rs @@ -2,6 +2,7 @@ use super::did::DidUrl; use crate::common::error::prelude::*; use crate::ledger::RequestBuilder; +use crate::pool::cache::Cache; use crate::pool::{Pool, PoolRunner, RequestResult, RequestResultMeta}; use super::types::*; @@ -22,10 +23,10 @@ impl PoolResolver { } /// Dereference a DID Url and return a serialized `DereferencingResult` - pub async fn dereference(&self, did_url: &str) -> VdrResult { + pub async fn dereference(&self, did_url: &str, cache: Option>) -> VdrResult { debug!("PoolResolver: Dereference DID Url {}", did_url); let did_url = DidUrl::parse(did_url)?; - let (data, metadata) = self._resolve(&did_url).await?; + let (data, metadata) = self._resolve(&did_url, cache).await?; let content = match data { Result::Content(c) => Some(c), @@ -48,10 +49,10 @@ impl PoolResolver { } /// Resolve a DID and return a serialized `ResolutionResult` - pub async fn resolve(&self, did: &str) -> VdrResult { + pub async fn resolve(&self, did: &str, cache: Option>) -> VdrResult { debug!("PoolResolver: Resolve DID {}", did); let did = DidUrl::parse(did)?; - let (data, metadata) = self._resolve(&did).await?; + let (data, metadata) = self._resolve(&did, cache.clone()).await?; let md = if let Metadata::DidDocumentMetadata(md) = metadata { Some(md) @@ -85,7 +86,7 @@ impl PoolResolver { } else { (None, None) }; - doc.endpoint = fetch_legacy_endpoint(&self.pool, &did.id, seq_no, timestamp) + doc.endpoint = fetch_legacy_endpoint(&self.pool, &did.id, seq_no, timestamp, cache) .await .ok(); } @@ -104,14 +105,14 @@ impl PoolResolver { } // Internal method to resolve and dereference - async fn _resolve(&self, did_url: &DidUrl) -> VdrResult<(Result, Metadata)> { + async fn _resolve(&self, did_url: &DidUrl, cache: Option>) -> VdrResult<(Result, Metadata)> { let builder = self.pool.get_request_builder(); let request = build_request(did_url, &builder)?; debug!( "PoolResolver: Prepared Request for DID {}: {:#?}", did_url.id, request ); - let ledger_data = handle_request(&self.pool, &request).await?; + let ledger_data = handle_request(&self.pool, &request, cache).await?; let namespace = did_url.namespace.clone(); let result = handle_internal_resolution_result(namespace.as_str(), &ledger_data)?; diff --git a/libindy_vdr/src/resolver/utils.rs b/libindy_vdr/src/resolver/utils.rs index 2c0ad93e..a99d7deb 100644 --- a/libindy_vdr/src/resolver/utils.rs +++ b/libindy_vdr/src/resolver/utils.rs @@ -11,7 +11,7 @@ use crate::ledger::constants; use crate::ledger::identifiers::{CredentialDefinitionId, RevocationRegistryId, SchemaId}; use crate::ledger::responses::{Endpoint, GetNymResultV1}; use crate::ledger::RequestBuilder; -use crate::pool::cache::Cacheable; +use crate::pool::cache::Cache; use crate::pool::helpers::perform_ledger_request; use crate::pool::{Pool, PreparedRequest, RequestResult, RequestResultMeta}; use crate::utils::did::DidValue; @@ -255,7 +255,7 @@ pub fn parse_or_now(datetime: Option<&String>) -> VdrResult { pub async fn handle_request( pool: &T, request: &PreparedRequest, - cache: Option<&mut impl Cacheable>, + cache: Option>, ) -> VdrResult { let (result, _meta) = request_transaction(pool, request, cache).await?; match result { @@ -267,7 +267,7 @@ pub async fn handle_request( pub async fn request_transaction( pool: &T, request: &PreparedRequest, - cache: Option<&mut impl Cacheable>, + cache: Option>, ) -> VdrResult<(RequestResult, RequestResultMeta)> { perform_ledger_request(pool, request, cache).await } @@ -278,7 +278,7 @@ pub async fn fetch_legacy_endpoint( did: &DidValue, seq_no: Option, timestamp: Option, - cache: Option<&mut impl Cacheable>, + cache: Option>, ) -> VdrResult { let builder = pool.get_request_builder(); let request = builder.build_get_attrib_request( diff --git a/libindy_vdr/tests/utils/pool.rs b/libindy_vdr/tests/utils/pool.rs index 7ced5294..76459b61 100644 --- a/libindy_vdr/tests/utils/pool.rs +++ b/libindy_vdr/tests/utils/pool.rs @@ -59,7 +59,7 @@ impl TestPool { pub fn send_request(&self, prepared_request: &PreparedRequest) -> Result { block_on(async { - let (request_result, _meta) = perform_ledger_request(&self.pool, prepared_request) + let (request_result, _meta) = perform_ledger_request(&self.pool, prepared_request, None) .await .unwrap(); From 4478b2f60b725d72582ed2991b082ff690d63f60 Mon Sep 17 00:00:00 2001 From: wadeking98 Date: Wed, 17 Jan 2024 14:15:28 -0800 Subject: [PATCH 03/32] finished writing tests Signed-off-by: wadeking98 --- libindy_vdr/src/lib.rs | 2 +- libindy_vdr/src/pool/cache.rs | 118 ++++++++++------------------------ 2 files changed, 36 insertions(+), 84 deletions(-) diff --git a/libindy_vdr/src/lib.rs b/libindy_vdr/src/lib.rs index c872312e..16c5c0c1 100644 --- a/libindy_vdr/src/lib.rs +++ b/libindy_vdr/src/lib.rs @@ -33,7 +33,7 @@ //! // Create a new GET_TXN request and dispatch it //! let ledger_type = 1; // 1 identifies the Domain ledger, see pool::LedgerType //! let seq_no = 1; // Transaction sequence number -//! let (result, _meta) = block_on(perform_get_txn(&pool, ledger_type, seq_no)).unwrap(); +//! let (result, _meta) = block_on(perform_get_txn(&pool, ledger_type, seq_no, None)).unwrap(); #![cfg_attr(feature = "fatal_warnings", deny(warnings))] #![recursion_limit = "1024"] // for select! macro usage diff --git a/libindy_vdr/src/pool/cache.rs b/libindy_vdr/src/pool/cache.rs index dc3b2121..1b1f3a24 100644 --- a/libindy_vdr/src/pool/cache.rs +++ b/libindy_vdr/src/pool/cache.rs @@ -1,13 +1,12 @@ use std::{ collections::HashMap, + hash::Hash, sync::Arc, time::{SystemTime, UNIX_EPOCH}, }; use tokio::sync::RwLock; -use super::RequestResultMeta; - // cant use async traits yet because not object safe pub trait CacheStorage: Send + Sync + 'static { fn get(&self, key: &K) -> Option<(V, u64)>; @@ -20,7 +19,7 @@ pub struct Cache { expiration_offset: u64, } -impl Cache { +impl Cache { pub fn new(storage: impl CacheStorage, expiration_offset: u64) -> Self { Self { storage: Arc::new(RwLock::new(storage)), @@ -68,11 +67,11 @@ impl Clone for Cache { } } -pub struct MemCacheStorage { - cache: HashMap, +pub struct MemCacheStorage { + cache: HashMap, } -impl MemCacheStorage { +impl MemCacheStorage { pub fn new() -> Self { Self { cache: HashMap::new(), @@ -80,86 +79,39 @@ impl MemCacheStorage { } } -impl CacheStorage for MemCacheStorage { - fn get(&self, key: &String) -> Option<((String, RequestResultMeta), u64)> { - self.cache - .get(key) - .map(|(v, m, e)| ((v.clone(), m.clone()), *e)) +impl CacheStorage + for MemCacheStorage +{ + fn get(&self, key: &K) -> Option<(V, u64)> { + self.cache.get(key).map(|(v, e)| (v.clone(), *e)) } - fn insert( - &mut self, - key: String, - value: (String, RequestResultMeta), - expiration: u64, - ) -> Option<((String, RequestResultMeta), u64)> { + fn insert(&mut self, key: K, value: V, expiration: u64) -> Option<(V, u64)> { self.cache - .insert(key, (value.0, value.1, expiration)) - .map(|(v, m, e)| ((v.clone(), m.clone()), e)) + .insert(key, (value, expiration)) + .map(|(v, e)| (v.clone(), e)) } } -// pub trait Cacheable: Clone + Send + Sync + 'static { -// fn get_cached_request(&self, key: K) -> impl Future>; - -// fn cache_request( -// &mut self, -// key: K, -// result: V, -// meta: RequestResultMeta, -// ) -> impl Future>; -// } - -// pub struct MemCache { -// cache: Arc>>, -// } - -// impl MemCache { -// pub fn new() -> Self { -// Self { -// cache: Arc::new(RwLock::new(HashMap::new())), -// } -// } -// } - -// need to implement Clone manually because RwLock doesn't implement Clone -// impl Clone for MemCache { -// fn clone(&self) -> Self { -// Self { -// cache: self.cache.clone(), -// } -// } -// } - -// impl Cacheable -// for MemCache -// { -// fn get_cached_request(&self, key: K) -> impl Future> { -// future::ready(match self.cache.read() { -// Ok(cache) => cache -// .get(&key) -// .and_then(|(v, m, _)| Some((v.clone(), m.clone()))), -// Err(err) => { -// warn!("Error reading cache: {}", err); -// None -// } -// }) -// } - -// fn cache_request( -// &mut self, -// key: K, -// result: V, -// meta: RequestResultMeta, -// ) -> impl Future> { -// future::ready(match self.cache.write() { -// Ok(mut cache) => cache -// .insert(key, (result, meta, 0)) -// .and_then(|(v, m, _)| Some((v, m))), -// Err(err) => { -// warn!("Error writing cache: {}", err); -// None -// } -// }) -// } -// } +#[cfg(test)] +mod tests { + + use futures_executor::block_on; + + #[rstest] + fn test_cache() { + use super::*; + use std::{time::Duration, thread}; + + let mut cache = Cache::new(MemCacheStorage::new(), 1); + block_on(async { + cache.insert("key".to_string(), "value".to_string()).await; + assert_eq!( + cache.get(&"key".to_string()).await, + Some("value".to_string()) + ); + thread::sleep(Duration::from_secs(2)); + assert_eq!(cache.get(&"key".to_string()).await, None); + }); + } +} From aa57d69d230b5ce4053c83830af5a96920598bbc Mon Sep 17 00:00:00 2001 From: wadeking98 Date: Wed, 17 Jan 2024 14:26:31 -0800 Subject: [PATCH 04/32] fixed cache storage to be async Signed-off-by: wadeking98 --- indy-vdr-proxy/src/app.rs | 4 ++-- libindy_vdr/Cargo.toml | 1 + libindy_vdr/src/pool/cache.rs | 24 ++++++++++++++++-------- 3 files changed, 19 insertions(+), 10 deletions(-) diff --git a/indy-vdr-proxy/src/app.rs b/indy-vdr-proxy/src/app.rs index 9077fa57..ab905907 100644 --- a/indy-vdr-proxy/src/app.rs +++ b/indy-vdr-proxy/src/app.rs @@ -84,7 +84,7 @@ pub fn load_config() -> Result { .help("Path to the TLS private key file") ).arg( Arg::new("use-cache") - .long("use-cache") + .long("use-cache").action(ArgAction::SetTrue) .value_name("CACHE") .help("Whether to use cache or not") ); @@ -145,7 +145,7 @@ pub fn load_config() -> Result { let tls_cert_path = matches.get_one::("tls-cert").cloned(); let tls_key_path = matches.get_one::("tls-key").cloned(); - let cache = matches.get_one::("use-cache").cloned().unwrap_or(false); + let cache = matches.get_flag("use-cache"); Ok(Config { genesis, diff --git a/libindy_vdr/Cargo.toml b/libindy_vdr/Cargo.toml index 82f3570c..0eaea5dd 100644 --- a/libindy_vdr/Cargo.toml +++ b/libindy_vdr/Cargo.toml @@ -61,6 +61,7 @@ time = { version = "=0.3.20", features = ["parsing"] } url = "2.2.2" zmq = "0.9" tokio = { version = "1.35.1", features = ["full"] } +async-trait = "0.1.77" [dev-dependencies] rstest = "0.18" diff --git a/libindy_vdr/src/pool/cache.rs b/libindy_vdr/src/pool/cache.rs index 1b1f3a24..0a6936bb 100644 --- a/libindy_vdr/src/pool/cache.rs +++ b/libindy_vdr/src/pool/cache.rs @@ -1,3 +1,4 @@ +use async_trait::async_trait; use std::{ collections::HashMap, hash::Hash, @@ -8,10 +9,11 @@ use std::{ use tokio::sync::RwLock; // cant use async traits yet because not object safe +#[async_trait] pub trait CacheStorage: Send + Sync + 'static { - fn get(&self, key: &K) -> Option<(V, u64)>; + async fn get(&self, key: &K) -> Option<(V, u64)>; - fn insert(&mut self, key: K, value: V, expiration: u64) -> Option<(V, u64)>; + async fn insert(&mut self, key: K, value: V, expiration: u64) -> Option<(V, u64)>; } pub struct Cache { @@ -27,7 +29,7 @@ impl Cache { } } pub async fn get(&self, key: &K) -> Option { - match self.storage.read().await.get(key) { + match self.storage.read().await.get(key).await { Some((item, expiry)) => { if expiry > 0 && expiry @@ -50,7 +52,13 @@ impl Cache { .unwrap() .as_secs() + self.expiration_offset; - match self.storage.write().await.insert(key, value, exp_timestamp) { + match self + .storage + .write() + .await + .insert(key, value, exp_timestamp) + .await + { Some(item) => Some(item.0), None => None, } @@ -78,15 +86,15 @@ impl MemCacheStorage { } } } - +#[async_trait] impl CacheStorage for MemCacheStorage { - fn get(&self, key: &K) -> Option<(V, u64)> { + async fn get(&self, key: &K) -> Option<(V, u64)> { self.cache.get(key).map(|(v, e)| (v.clone(), *e)) } - fn insert(&mut self, key: K, value: V, expiration: u64) -> Option<(V, u64)> { + async fn insert(&mut self, key: K, value: V, expiration: u64) -> Option<(V, u64)> { self.cache .insert(key, (value, expiration)) .map(|(v, e)| (v.clone(), e)) @@ -101,7 +109,7 @@ mod tests { #[rstest] fn test_cache() { use super::*; - use std::{time::Duration, thread}; + use std::{thread, time::Duration}; let mut cache = Cache::new(MemCacheStorage::new(), 1); block_on(async { From 0b2ddd0e51a77818a22a5ad8f68cb52c091e0c7e Mon Sep 17 00:00:00 2001 From: wadeking98 Date: Wed, 17 Jan 2024 16:24:31 -0800 Subject: [PATCH 05/32] fixed request cache key Signed-off-by: wadeking98 --- libindy_vdr/src/pool/helpers.rs | 11 +++++++++-- libindy_vdr/src/pool/requests/prepared_request.rs | 8 ++++++++ 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/libindy_vdr/src/pool/helpers.rs b/libindy_vdr/src/pool/helpers.rs index 14acf124..8d9858c7 100644 --- a/libindy_vdr/src/pool/helpers.rs +++ b/libindy_vdr/src/pool/helpers.rs @@ -221,9 +221,11 @@ pub async fn perform_ledger_request( RequestMethod::Consensus => (None, (None, None), false, None), }; + let cache_key = prepared.get_cache_key()?; + if is_read_req { if let Some(ref cache) = cache_opt { - if let Some((response, meta)) = cache.get(&prepared.req_id).await { + if let Some((response, meta)) = cache.get(&cache_key).await { return Ok((RequestResult::Reply(response), meta)); } } @@ -233,7 +235,12 @@ pub async fn perform_ledger_request( if is_read_req && result.is_ok() { if let (RequestResult::Reply(response), meta) = result.as_ref().unwrap() { if let Some(mut cache) = cache_opt { - cache.insert(prepared.req_id.clone(), (response.to_string(), meta.clone())).await; + cache + .insert( + cache_key, + (response.to_string(), meta.clone()), + ) + .await; } } } diff --git a/libindy_vdr/src/pool/requests/prepared_request.rs b/libindy_vdr/src/pool/requests/prepared_request.rs index b0a51bc4..902cc5ff 100644 --- a/libindy_vdr/src/pool/requests/prepared_request.rs +++ b/libindy_vdr/src/pool/requests/prepared_request.rs @@ -80,6 +80,14 @@ impl PreparedRequest { } } + pub fn get_cache_key(&self) -> VdrResult{ + let mut req_json = self.req_json.clone(); + let req_map = req_json.as_object_mut().ok_or_else(|| input_err("Invalid request JSON"))?; + req_map.remove("reqId"); + req_map.remove("signature"); + serde_json::to_string(&req_json).with_input_err("Invalid request JSON") + } + /// Generate the normalized representation of a transaction for signing the request pub fn get_signature_input(&self) -> VdrResult { Ok(serialize_signature(&self.req_json)?) From 2b99028abc67e2e05168b2f0a7a8a902dee06f59 Mon Sep 17 00:00:00 2001 From: wadeking98 Date: Wed, 17 Jan 2024 17:53:20 -0800 Subject: [PATCH 06/32] updated formatting Signed-off-by: wadeking98 --- indy-vdr-proxy/src/app.rs | 2 +- indy-vdr-proxy/src/handlers.rs | 59 ++++++++++++++----- indy-vdr-proxy/src/main.rs | 16 +++-- libindy_vdr/src/pool/builder.rs | 8 +-- libindy_vdr/src/pool/cache.rs | 16 ++++- libindy_vdr/src/pool/helpers.rs | 5 +- libindy_vdr/src/pool/mod.rs | 4 +- .../src/pool/requests/prepared_request.rs | 6 +- libindy_vdr/src/resolver/pool.rs | 25 ++++++-- libindy_vdr/src/state_proof/mod.rs | 4 +- libindy_vdr/tests/utils/pool.rs | 7 ++- 11 files changed, 110 insertions(+), 42 deletions(-) diff --git a/indy-vdr-proxy/src/app.rs b/indy-vdr-proxy/src/app.rs index ab905907..a334981a 100644 --- a/indy-vdr-proxy/src/app.rs +++ b/indy-vdr-proxy/src/app.rs @@ -159,6 +159,6 @@ pub fn load_config() -> Result { is_multiple, tls_cert_path, tls_key_path, - cache + cache, }) } diff --git a/indy-vdr-proxy/src/handlers.rs b/indy-vdr-proxy/src/handlers.rs index 82431004..dc2f33ec 100644 --- a/indy-vdr-proxy/src/handlers.rs +++ b/indy-vdr-proxy/src/handlers.rs @@ -301,7 +301,7 @@ async fn get_attrib( raw: &str, seq_no: Option, timestamp: Option, - cache: Option> + cache: Option>, ) -> VdrResult { let dest = DidValue::from_str(dest)?; let request = pool.get_request_builder().build_get_attrib_request( @@ -322,7 +322,7 @@ async fn get_nym( nym: &str, seq_no: Option, timestamp: Option, - cache: Option> + cache: Option>, ) -> VdrResult { let nym = DidValue::from_str(nym)?; let request = pool @@ -332,7 +332,11 @@ async fn get_nym( Ok(result.into()) } -async fn get_schema(pool: &T, schema_id: &str, cache: Option>) -> VdrResult { +async fn get_schema( + pool: &T, + schema_id: &str, + cache: Option>, +) -> VdrResult { let schema_id = SchemaId::from_str(schema_id)?; let request = pool .get_request_builder() @@ -341,7 +345,11 @@ async fn get_schema(pool: &T, schema_id: &str, cache: Option(pool: &T, cred_def_id: &str, cache: Option>) -> VdrResult { +async fn get_cred_def( + pool: &T, + cred_def_id: &str, + cache: Option>, +) -> VdrResult { let cred_def_id = CredentialDefinitionId::from_str(cred_def_id)?; let request = pool .get_request_builder() @@ -350,7 +358,11 @@ async fn get_cred_def(pool: &T, cred_def_id: &str, cache: Option(pool: &T, revoc_reg_def_id: &str, cache: Option>) -> VdrResult { +async fn get_revoc_reg_def( + pool: &T, + revoc_reg_def_id: &str, + cache: Option>, +) -> VdrResult { let revoc_reg_def_id = RevocationRegistryId::from_str(revoc_reg_def_id)?; let request = pool .get_request_builder() @@ -359,7 +371,11 @@ async fn get_revoc_reg_def(pool: &T, revoc_reg_def_id: &str, cache: Opt Ok(result.into()) } -async fn get_revoc_reg(pool: &T, revoc_reg_def_id: &str, cache: Option>) -> VdrResult { +async fn get_revoc_reg( + pool: &T, + revoc_reg_def_id: &str, + cache: Option>, +) -> VdrResult { let revoc_reg_def_id = RevocationRegistryId::from_str(revoc_reg_def_id)?; let request = pool.get_request_builder().build_get_revoc_reg_request( None, @@ -370,7 +386,11 @@ async fn get_revoc_reg(pool: &T, revoc_reg_def_id: &str, cache: Option< Ok(result.into()) } -async fn get_revoc_reg_delta(pool: &T, revoc_reg_def_id: &str, cache: Option>) -> VdrResult { +async fn get_revoc_reg_delta( + pool: &T, + revoc_reg_def_id: &str, + cache: Option>, +) -> VdrResult { let revoc_reg_def_id = RevocationRegistryId::from_str(revoc_reg_def_id)?; let request = pool .get_request_builder() @@ -386,7 +406,10 @@ async fn test_get_validator_info(pool: &T, pretty: bool) -> VdrResult(pool: &T, cache: Option>) -> VdrResult { +async fn get_taa( + pool: &T, + cache: Option>, +) -> VdrResult { let request = pool .get_request_builder() .build_get_txn_author_agreement_request(None, None)?; @@ -394,7 +417,10 @@ async fn get_taa(pool: &T, cache: Option(pool: &T, cache: Option>) -> VdrResult { +async fn get_aml( + pool: &T, + cache: Option>, +) -> VdrResult { let request = pool .get_request_builder() .build_get_acceptance_mechanisms_request(None, None, None)?; @@ -407,7 +433,7 @@ async fn get_auth_rule( auth_type: Option, auth_action: Option, field: Option, - cache: Option> + cache: Option>, ) -> VdrResult { let request = pool.get_request_builder().build_get_auth_rule_request( None, @@ -421,8 +447,13 @@ async fn get_auth_rule( Ok(result.into()) } -async fn get_txn(pool: &T, ledger: LedgerType, seq_no: i32, cache: Option>) -> VdrResult { - let result = perform_get_txn(pool, ledger.to_id(), seq_no, cache).await?; +async fn get_txn( + pool: &T, + ledger: LedgerType, + seq_no: i32, + cache: Option>, +) -> VdrResult { + let result = perform_get_txn(pool, ledger.to_id(), seq_no, cache).await?; Ok(result.into()) } @@ -435,7 +466,7 @@ async fn submit_request(pool: &T, message: Vec) -> VdrResult, state: Rc>, - cache: Option> + cache: Option>, ) -> Result, hyper::Error> { let mut parts = req .uri() @@ -587,7 +618,7 @@ pub async fn handle_request( Some(auth_type.to_owned()), Some(auth_action.to_owned()), Some("*".to_owned()), - cache.clone() + cache.clone(), ) .await } else { diff --git a/indy-vdr-proxy/src/main.rs b/indy-vdr-proxy/src/main.rs index b8d2084b..508d1e4d 100644 --- a/indy-vdr-proxy/src/main.rs +++ b/indy-vdr-proxy/src/main.rs @@ -36,8 +36,8 @@ use hyper_tls::HttpsConnector; #[cfg(unix)] use hyper_unix_connector::UnixConnector; -use indy_vdr::pool::RequestResultMeta; use indy_vdr::pool::cache::{Cache, MemCacheStorage}; +use indy_vdr::pool::RequestResultMeta; #[cfg(feature = "tls")] use rustls_pemfile::{certs, pkcs8_private_keys}; #[cfg(feature = "tls")] @@ -192,7 +192,14 @@ async fn run_pools( let mut pool_states = HashMap::new(); for (namespace, pool_state) in &state.clone().borrow().pool_states { - let pool_state = match create_pool(state.clone(), namespace.as_str(), init_refresh, cache.clone()).await { + let pool_state = match create_pool( + state.clone(), + namespace.as_str(), + init_refresh, + cache.clone(), + ) + .await + { Ok(pool) => { let pool = Some(pool.clone()); PoolState { @@ -289,7 +296,8 @@ async fn refresh_pools( let pool_states = &state.borrow().pool_states; for (namespace, pool_state) in pool_states { if let Some(pool) = &pool_state.pool { - let upd_pool = match refresh_pool(state.clone(), pool, delay_mins, cache.clone()).await { + let upd_pool = match refresh_pool(state.clone(), pool, delay_mins, cache.clone()).await + { Ok(p) => p, Err(err) => { eprintln!( @@ -443,7 +451,7 @@ where } else { None }; - + let until_done = run_pools( state.clone(), config.init_refresh, diff --git a/libindy_vdr/src/pool/builder.rs b/libindy_vdr/src/pool/builder.rs index 54120187..c406f2d6 100644 --- a/libindy_vdr/src/pool/builder.rs +++ b/libindy_vdr/src/pool/builder.rs @@ -3,12 +3,12 @@ use std::collections::HashMap; use crate::common::error::prelude::*; use crate::config::PoolConfig; -use super::RequestResultMeta; use super::cache::Cache; use super::genesis::PoolTransactions; use super::manager::{LocalPool, SharedPool}; use super::networker::{MakeLocal, MakeShared, ZMQNetworkerFactory}; use super::runner::PoolRunner; +use super::RequestResultMeta; /// A utility class for building a new pool instance or runner. #[derive(Clone)] @@ -17,7 +17,7 @@ pub struct PoolBuilder { transactions: PoolTransactions, node_weights: Option>, refreshed: bool, - cache: Option> + cache: Option>, } impl PoolBuilder { @@ -28,7 +28,7 @@ impl PoolBuilder { transactions, node_weights: None, refreshed: false, - cache: None + cache: None, } } @@ -79,7 +79,7 @@ impl PoolBuilder { MakeLocal(ZMQNetworkerFactory {}), self.node_weights, self.refreshed, - self.cache + self.cache, )) } } diff --git a/libindy_vdr/src/pool/cache.rs b/libindy_vdr/src/pool/cache.rs index 0a6936bb..dc4e1b60 100644 --- a/libindy_vdr/src/pool/cache.rs +++ b/libindy_vdr/src/pool/cache.rs @@ -13,6 +13,8 @@ use tokio::sync::RwLock; pub trait CacheStorage: Send + Sync + 'static { async fn get(&self, key: &K) -> Option<(V, u64)>; + async fn remove(&mut self, key: &K) -> Option<(V, u64)>; + async fn insert(&mut self, key: K, value: V, expiration: u64) -> Option<(V, u64)>; } @@ -46,6 +48,12 @@ impl Cache { None => None, } } + pub async fn remove(&mut self, key: &K) -> Option { + match self.storage.write().await.remove(key).await { + Some(item) => Some(item.0), + None => None, + } + } pub async fn insert(&mut self, key: K, value: V) -> Option { let exp_timestamp = SystemTime::now() .duration_since(UNIX_EPOCH) @@ -93,7 +101,9 @@ impl Cac async fn get(&self, key: &K) -> Option<(V, u64)> { self.cache.get(key).map(|(v, e)| (v.clone(), *e)) } - + async fn remove(&mut self, key: &K) -> Option<(V, u64)> { + self.cache.remove(key) + } async fn insert(&mut self, key: K, value: V, expiration: u64) -> Option<(V, u64)> { self.cache .insert(key, (value, expiration)) @@ -120,6 +130,10 @@ mod tests { ); thread::sleep(Duration::from_secs(2)); assert_eq!(cache.get(&"key".to_string()).await, None); + assert_eq!( + cache.remove(&"key".to_string()).await, + Some("value".to_string()) + ); }); } } diff --git a/libindy_vdr/src/pool/helpers.rs b/libindy_vdr/src/pool/helpers.rs index 8d9858c7..427fe58a 100644 --- a/libindy_vdr/src/pool/helpers.rs +++ b/libindy_vdr/src/pool/helpers.rs @@ -236,10 +236,7 @@ pub async fn perform_ledger_request( if let (RequestResult::Reply(response), meta) = result.as_ref().unwrap() { if let Some(mut cache) = cache_opt { cache - .insert( - cache_key, - (response.to_string(), meta.clone()), - ) + .insert(cache_key, (response.to_string(), meta.clone())) .await; } } diff --git a/libindy_vdr/src/pool/mod.rs b/libindy_vdr/src/pool/mod.rs index c2250e7f..f9debfea 100644 --- a/libindy_vdr/src/pool/mod.rs +++ b/libindy_vdr/src/pool/mod.rs @@ -1,11 +1,11 @@ mod builder; +/// A trait for managing a transaction cache +pub mod cache; mod genesis; /// Transaction request handlers pub(crate) mod handlers; /// Methods for performing requests against the verifier pool pub mod helpers; -/// A trait for managing a transaction cache -pub mod cache; /// General verifier pool management mod manager; /// Pool networker traits and implementations diff --git a/libindy_vdr/src/pool/requests/prepared_request.rs b/libindy_vdr/src/pool/requests/prepared_request.rs index 902cc5ff..8bcede37 100644 --- a/libindy_vdr/src/pool/requests/prepared_request.rs +++ b/libindy_vdr/src/pool/requests/prepared_request.rs @@ -80,9 +80,11 @@ impl PreparedRequest { } } - pub fn get_cache_key(&self) -> VdrResult{ + pub fn get_cache_key(&self) -> VdrResult { let mut req_json = self.req_json.clone(); - let req_map = req_json.as_object_mut().ok_or_else(|| input_err("Invalid request JSON"))?; + let req_map = req_json + .as_object_mut() + .ok_or_else(|| input_err("Invalid request JSON"))?; req_map.remove("reqId"); req_map.remove("signature"); serde_json::to_string(&req_json).with_input_err("Invalid request JSON") diff --git a/libindy_vdr/src/resolver/pool.rs b/libindy_vdr/src/resolver/pool.rs index 594b4e76..e3e9cca0 100644 --- a/libindy_vdr/src/resolver/pool.rs +++ b/libindy_vdr/src/resolver/pool.rs @@ -23,7 +23,11 @@ impl PoolResolver { } /// Dereference a DID Url and return a serialized `DereferencingResult` - pub async fn dereference(&self, did_url: &str, cache: Option>) -> VdrResult { + pub async fn dereference( + &self, + did_url: &str, + cache: Option>, + ) -> VdrResult { debug!("PoolResolver: Dereference DID Url {}", did_url); let did_url = DidUrl::parse(did_url)?; let (data, metadata) = self._resolve(&did_url, cache).await?; @@ -49,7 +53,11 @@ impl PoolResolver { } /// Resolve a DID and return a serialized `ResolutionResult` - pub async fn resolve(&self, did: &str, cache: Option>) -> VdrResult { + pub async fn resolve( + &self, + did: &str, + cache: Option>, + ) -> VdrResult { debug!("PoolResolver: Resolve DID {}", did); let did = DidUrl::parse(did)?; let (data, metadata) = self._resolve(&did, cache.clone()).await?; @@ -86,9 +94,10 @@ impl PoolResolver { } else { (None, None) }; - doc.endpoint = fetch_legacy_endpoint(&self.pool, &did.id, seq_no, timestamp, cache) - .await - .ok(); + doc.endpoint = + fetch_legacy_endpoint(&self.pool, &did.id, seq_no, timestamp, cache) + .await + .ok(); } Some(doc.to_value()?) } @@ -105,7 +114,11 @@ impl PoolResolver { } // Internal method to resolve and dereference - async fn _resolve(&self, did_url: &DidUrl, cache: Option>) -> VdrResult<(Result, Metadata)> { + async fn _resolve( + &self, + did_url: &DidUrl, + cache: Option>, + ) -> VdrResult<(Result, Metadata)> { let builder = self.pool.get_request_builder(); let request = build_request(did_url, &builder)?; debug!( diff --git a/libindy_vdr/src/state_proof/mod.rs b/libindy_vdr/src/state_proof/mod.rs index b632ef67..a3ac8144 100644 --- a/libindy_vdr/src/state_proof/mod.rs +++ b/libindy_vdr/src/state_proof/mod.rs @@ -363,7 +363,9 @@ pub(crate) fn verify_parsed_sp( } if let Some(multi_sig) = multi_sig.as_ref() { - let Some((signature, participants, value)) = _parse_reply_for_proof_signature_checking(multi_sig) else { + let Some((signature, participants, value)) = + _parse_reply_for_proof_signature_checking(multi_sig) + else { return Err("State proof parsing of reply failed".into()); }; let verify_err = match _verify_proof_signature( diff --git a/libindy_vdr/tests/utils/pool.rs b/libindy_vdr/tests/utils/pool.rs index 76459b61..93ac29db 100644 --- a/libindy_vdr/tests/utils/pool.rs +++ b/libindy_vdr/tests/utils/pool.rs @@ -59,9 +59,10 @@ impl TestPool { pub fn send_request(&self, prepared_request: &PreparedRequest) -> Result { block_on(async { - let (request_result, _meta) = perform_ledger_request(&self.pool, prepared_request, None) - .await - .unwrap(); + let (request_result, _meta) = + perform_ledger_request(&self.pool, prepared_request, None) + .await + .unwrap(); match request_result { RequestResult::Reply(message) => Ok(message), From ea01382014c6271c313acd9f849eb1c731d3fb0f Mon Sep 17 00:00:00 2001 From: wadeking98 Date: Wed, 17 Jan 2024 18:10:08 -0800 Subject: [PATCH 07/32] fixed tests Signed-off-by: wadeking98 --- libindy_vdr/src/pool/cache.rs | 1 - libindy_vdr/tests/resolver.rs | 4 ++-- libindy_vdr/tests/utils/pool.rs | 1 - 3 files changed, 2 insertions(+), 4 deletions(-) diff --git a/libindy_vdr/src/pool/cache.rs b/libindy_vdr/src/pool/cache.rs index dc4e1b60..3699391f 100644 --- a/libindy_vdr/src/pool/cache.rs +++ b/libindy_vdr/src/pool/cache.rs @@ -8,7 +8,6 @@ use std::{ use tokio::sync::RwLock; -// cant use async traits yet because not object safe #[async_trait] pub trait CacheStorage: Send + Sync + 'static { async fn get(&self, key: &K) -> Option<(V, u64)>; diff --git a/libindy_vdr/tests/resolver.rs b/libindy_vdr/tests/resolver.rs index aa4a7d46..7d06fe04 100644 --- a/libindy_vdr/tests/resolver.rs +++ b/libindy_vdr/tests/resolver.rs @@ -56,7 +56,7 @@ mod send_resolver { // Resolve DID let resolver = Resolver::new(pool.pool); let qualified_did = format!("did:indy:test:{}", &identity.did); - let result = block_on(resolver.resolve(&qualified_did)).unwrap(); + let result = block_on(resolver.resolve(&qualified_did, None)).unwrap(); let v: serde_json::Value = serde_json::from_str(result.as_str()).unwrap(); @@ -107,7 +107,7 @@ mod send_resolver { let resolver = Resolver::new(pool.pool); let qualified_did = format!("did:indy:test:{}", &identity.did); let did_url = format!("{}?versionId={}", qualified_did, seq_no); - let result = block_on(resolver.resolve(&did_url)).unwrap(); + let result = block_on(resolver.resolve(&did_url, None)).unwrap(); let v: serde_json::Value = serde_json::from_str(result.as_str()).unwrap(); diff --git a/libindy_vdr/tests/utils/pool.rs b/libindy_vdr/tests/utils/pool.rs index 93ac29db..283bf874 100644 --- a/libindy_vdr/tests/utils/pool.rs +++ b/libindy_vdr/tests/utils/pool.rs @@ -63,7 +63,6 @@ impl TestPool { perform_ledger_request(&self.pool, prepared_request, None) .await .unwrap(); - match request_result { RequestResult::Reply(message) => Ok(message), RequestResult::Failed(err) => Err(err.extra().unwrap_or_default()), From 43623a9065c3c413a2809e27fde3e05d8848e60f Mon Sep 17 00:00:00 2001 From: wadeking98 Date: Thu, 18 Jan 2024 13:42:32 -0800 Subject: [PATCH 08/32] Added support for LRU memCache Signed-off-by: wadeking98 --- indy-vdr-proxy/src/main.rs | 40 ++------- libindy_vdr/Cargo.toml | 2 +- libindy_vdr/src/pool/cache.rs | 147 +++++++++++++++++--------------- libindy_vdr/src/pool/helpers.rs | 5 +- libindy_vdr/src/pool/runner.rs | 36 ++++---- 5 files changed, 112 insertions(+), 118 deletions(-) diff --git a/indy-vdr-proxy/src/main.rs b/indy-vdr-proxy/src/main.rs index 508d1e4d..9dc4441e 100644 --- a/indy-vdr-proxy/src/main.rs +++ b/indy-vdr-proxy/src/main.rs @@ -37,7 +37,6 @@ use hyper_tls::HttpsConnector; use hyper_unix_connector::UnixConnector; use indy_vdr::pool::cache::{Cache, MemCacheStorage}; -use indy_vdr::pool::RequestResultMeta; #[cfg(feature = "tls")] use rustls_pemfile::{certs, pkcs8_private_keys}; #[cfg(feature = "tls")] @@ -183,23 +182,11 @@ async fn init_app_state( Ok(state) } -async fn run_pools( - state: Rc>, - init_refresh: bool, - interval_refresh: u32, - cache: Option>, -) { +async fn run_pools(state: Rc>, init_refresh: bool, interval_refresh: u32) { let mut pool_states = HashMap::new(); for (namespace, pool_state) in &state.clone().borrow().pool_states { - let pool_state = match create_pool( - state.clone(), - namespace.as_str(), - init_refresh, - cache.clone(), - ) - .await - { + let pool_state = match create_pool(state.clone(), namespace.as_str(), init_refresh).await { Ok(pool) => { let pool = Some(pool.clone()); PoolState { @@ -227,7 +214,7 @@ async fn run_pools( if interval_refresh > 0 { loop { select! { - refresh_result = refresh_pools(state.clone(), interval_refresh, cache.clone()) => { + refresh_result = refresh_pools(state.clone(), interval_refresh) => { match refresh_result { Ok(upd_pool_states) => { state.borrow_mut().pool_states = upd_pool_states; @@ -272,14 +259,13 @@ async fn create_pool( state: Rc>, namespace: &str, refresh: bool, - cache: Option>, ) -> VdrResult { let pool_states = &state.borrow().pool_states; let pool_state = pool_states.get(namespace).unwrap(); let pool = PoolBuilder::new(PoolConfig::default(), pool_state.transactions.clone()).into_local()?; let refresh_pool = if refresh { - refresh_pool(state.clone(), &pool, 0, cache).await? + refresh_pool(state.clone(), &pool, 0).await? } else { None }; @@ -290,14 +276,12 @@ async fn refresh_pools( state: Rc>, // pool_states: HashMap, delay_mins: u32, - cache: Option>, ) -> VdrResult> { let mut upd_pool_states = HashMap::new(); let pool_states = &state.borrow().pool_states; for (namespace, pool_state) in pool_states { if let Some(pool) = &pool_state.pool { - let upd_pool = match refresh_pool(state.clone(), pool, delay_mins, cache.clone()).await - { + let upd_pool = match refresh_pool(state.clone(), pool, delay_mins).await { Ok(p) => p, Err(err) => { eprintln!( @@ -324,14 +308,13 @@ async fn refresh_pool( state: Rc>, pool: &LocalPool, delay_mins: u32, - cache: Option>, ) -> VdrResult> { let n_pools = state.borrow().pool_states.len() as u32; if delay_mins > 0 { tokio::time::sleep(Duration::from_secs((delay_mins * 60 / n_pools) as u64)).await } - let (txns, _meta) = perform_refresh(pool, cache).await?; + let (txns, _meta) = perform_refresh(pool).await?; if let Some(txns) = txns { let pool = PoolBuilder::new(PoolConfig::default(), txns) .refreshed(true) @@ -445,19 +428,14 @@ where I::Error: Into>, { let cache = if config.cache { - let mem_storage = MemCacheStorage::new(); - let mem_cache = Cache::new(mem_storage, 86400); + let mem_storage = MemCacheStorage::new(1024); + let mem_cache = Cache::new(mem_storage); Some(mem_cache) } else { None }; - let until_done = run_pools( - state.clone(), - config.init_refresh, - config.interval_refresh, - cache.clone(), - ); + let until_done = run_pools(state.clone(), config.init_refresh, config.interval_refresh); let svc = make_service_fn(move |_| { let state = state.clone(); let cache = cache.clone(); diff --git a/libindy_vdr/Cargo.toml b/libindy_vdr/Cargo.toml index 0eaea5dd..16daf6cb 100644 --- a/libindy_vdr/Cargo.toml +++ b/libindy_vdr/Cargo.toml @@ -60,8 +60,8 @@ thiserror = "1.0" time = { version = "=0.3.20", features = ["parsing"] } url = "2.2.2" zmq = "0.9" -tokio = { version = "1.35.1", features = ["full"] } async-trait = "0.1.77" +async-lock = "3.3.0" [dev-dependencies] rstest = "0.18" diff --git a/libindy_vdr/src/pool/cache.rs b/libindy_vdr/src/pool/cache.rs index 3699391f..9eeb6969 100644 --- a/libindy_vdr/src/pool/cache.rs +++ b/libindy_vdr/src/pool/cache.rs @@ -1,112 +1,120 @@ use async_trait::async_trait; use std::{ - collections::HashMap, + collections::{BTreeMap, HashMap}, hash::Hash, sync::Arc, - time::{SystemTime, UNIX_EPOCH}, }; -use tokio::sync::RwLock; +use async_lock::Mutex; #[async_trait] pub trait CacheStorage: Send + Sync + 'static { - async fn get(&self, key: &K) -> Option<(V, u64)>; + // Needs to be mutable bc some implementations may need to update the the LRU index of the cache + async fn get(&mut self, key: &K) -> Option; - async fn remove(&mut self, key: &K) -> Option<(V, u64)>; + async fn remove(&mut self, key: &K) -> Option; - async fn insert(&mut self, key: K, value: V, expiration: u64) -> Option<(V, u64)>; + async fn insert(&mut self, key: K, value: V) -> Option; } pub struct Cache { - storage: Arc>>, - expiration_offset: u64, + storage: Arc>>, } impl Cache { - pub fn new(storage: impl CacheStorage, expiration_offset: u64) -> Self { + pub fn new(storage: impl CacheStorage) -> Self { Self { - storage: Arc::new(RwLock::new(storage)), - expiration_offset, + storage: Arc::new(Mutex::new(storage)), } } - pub async fn get(&self, key: &K) -> Option { - match self.storage.read().await.get(key).await { - Some((item, expiry)) => { - if expiry > 0 - && expiry - < SystemTime::now() - .duration_since(UNIX_EPOCH) - .unwrap() - .as_secs() - { - None - } else { - Some(item) - } - } - None => None, - } + pub async fn get(&mut self, key: &K) -> Option { + self.storage.lock().await.get(key).await } pub async fn remove(&mut self, key: &K) -> Option { - match self.storage.write().await.remove(key).await { - Some(item) => Some(item.0), - None => None, - } + self.storage.lock().await.remove(key).await } pub async fn insert(&mut self, key: K, value: V) -> Option { - let exp_timestamp = SystemTime::now() - .duration_since(UNIX_EPOCH) - .unwrap() - .as_secs() - + self.expiration_offset; - match self - .storage - .write() - .await - .insert(key, value, exp_timestamp) - .await - { - Some(item) => Some(item.0), - None => None, - } + self.storage.lock().await.insert(key, value).await } } -// need to implement Clone manually because RwLock doesn't implement Clone +// need to implement Clone manually because Mutex doesn't implement Clone impl Clone for Cache { fn clone(&self) -> Self { Self { storage: self.storage.clone(), - expiration_offset: self.expiration_offset, } } } +/// A simple in-memory LRU cache +/// Uses a hashmap for lookups and a BTreeMap for ordering by least recently used pub struct MemCacheStorage { - cache: HashMap, + cache_lookup: HashMap, + cache_order: BTreeMap, + capacity: usize, } impl MemCacheStorage { - pub fn new() -> Self { + pub fn new(capacity: usize) -> Self { Self { - cache: HashMap::new(), + cache_lookup: HashMap::new(), + cache_order: BTreeMap::new(), + capacity, } } } #[async_trait] -impl CacheStorage - for MemCacheStorage +impl + CacheStorage for MemCacheStorage { - async fn get(&self, key: &K) -> Option<(V, u64)> { - self.cache.get(key).map(|(v, e)| (v.clone(), *e)) + async fn get(&mut self, key: &K) -> Option { + // move the key to the end of the LRU index + // this is O(log(n)) in the worst case, but in the average case it's close to O(1) + match self.cache_lookup.get(key) { + Some((v, ts)) => { + self.cache_order.remove(ts).unwrap(); + self.cache_order.entry(ts + 1).or_insert(key.clone()); + Some(v.clone()) + } + None => None, + } } - async fn remove(&mut self, key: &K) -> Option<(V, u64)> { - self.cache.remove(key) + async fn remove(&mut self, key: &K) -> Option { + let lru_val = self.cache_lookup.remove(key); + match lru_val { + Some((v, ts)) => { + self.cache_order.remove(&ts); + Some(v) + } + None => None, + } } - async fn insert(&mut self, key: K, value: V, expiration: u64) -> Option<(V, u64)> { - self.cache - .insert(key, (value, expiration)) - .map(|(v, e)| (v.clone(), e)) + async fn insert(&mut self, key: K, value: V) -> Option { + // this will be O(log(n)) in all cases when cache is at capacity since we need to fetch the first and last element from the btree + let highest_lru = self + .cache_order + .last_key_value() + .map(|(ts, _)| ts + 1) + .unwrap_or(0); + if self.cache_lookup.len() >= self.capacity { + // remove the LRU item + let (lru_ts, lru_key) = match self.cache_order.first_key_value() { + Some((ts, key)) => (*ts, key.clone()), + None => return None, + }; + self.cache_lookup.remove(&lru_key); + self.cache_order.remove(&lru_ts); + }; + + self.cache_order.insert(highest_lru, key.clone()); + match self + .cache_lookup + .insert(key.clone(), (value.clone(), highest_lru)) + { + Some((v, _)) => Some(v), + None => None, + } } } @@ -118,21 +126,26 @@ mod tests { #[rstest] fn test_cache() { use super::*; - use std::{thread, time::Duration}; - let mut cache = Cache::new(MemCacheStorage::new(), 1); + let mut cache = Cache::new(MemCacheStorage::new(2)); block_on(async { cache.insert("key".to_string(), "value".to_string()).await; assert_eq!( cache.get(&"key".to_string()).await, Some("value".to_string()) ); - thread::sleep(Duration::from_secs(2)); + cache.insert("key1".to_string(), "value1".to_string()).await; + cache.insert("key2".to_string(), "value2".to_string()).await; assert_eq!(cache.get(&"key".to_string()).await, None); + cache.insert("key3".to_string(), "value3".to_string()).await; + cache.get(&"key2".to_string()).await; // move key2 to the end of the LRU index + cache.insert("key4".to_string(), "value4".to_string()).await; + // key3 should be evicted assert_eq!( - cache.remove(&"key".to_string()).await, - Some("value".to_string()) + cache.remove(&"key2".to_string()).await, + Some("value2".to_string()) ); + assert_eq!(cache.remove(&"key3".to_string()).await, None); }); } } diff --git a/libindy_vdr/src/pool/helpers.rs b/libindy_vdr/src/pool/helpers.rs index 427fe58a..9f5a2618 100644 --- a/libindy_vdr/src/pool/helpers.rs +++ b/libindy_vdr/src/pool/helpers.rs @@ -93,9 +93,8 @@ pub async fn perform_pool_catchup_request( /// Perform a pool ledger status request followed by a catchup request if necessary pub async fn perform_refresh( pool: &T, - cache: Option>, ) -> VdrResult<(Option, RequestResultMeta)> { - let (result, meta) = perform_pool_status_request(pool, cache).await?; + let (result, meta) = perform_pool_status_request(pool, None).await?; trace!("Got status result: {:?}", &result); match result { RequestResult::Reply(target) => match target { @@ -224,7 +223,7 @@ pub async fn perform_ledger_request( let cache_key = prepared.get_cache_key()?; if is_read_req { - if let Some(ref cache) = cache_opt { + if let Some(mut cache) = cache_opt.clone() { if let Some((response, meta)) = cache.get(&cache_key).await { return Ok((RequestResult::Reply(response), meta)); } diff --git a/libindy_vdr/src/pool/runner.rs b/libindy_vdr/src/pool/runner.rs index b4b68996..2d4f892a 100644 --- a/libindy_vdr/src/pool/runner.rs +++ b/libindy_vdr/src/pool/runner.rs @@ -51,8 +51,8 @@ impl PoolRunner { refreshed, ) .unwrap(); - let mut thread = PoolThread::new(pool, receiver); - thread.run(cache); + let mut thread = PoolThread::new(pool, receiver, cache); + thread.run(); debug!("Pool thread ended") }); Self { @@ -161,23 +161,31 @@ impl PoolRunnerStatus { struct PoolThread { pool: LocalPool, receiver: UnboundedReceiver, + cache: Option>, } impl PoolThread { - fn new(pool: LocalPool, receiver: UnboundedReceiver) -> Self { - Self { pool, receiver } + fn new( + pool: LocalPool, + receiver: UnboundedReceiver, + cache: Option>, + ) -> Self { + Self { + pool, + receiver, + cache, + } } - fn run(&mut self, cache: Option>) { - block_on(self.run_loop(cache)) + fn run(&mut self) { + block_on(self.run_loop()) } - async fn run_loop(&mut self, cache: Option>) { + async fn run_loop(&mut self) { let mut futures = FuturesUnordered::new(); let receiver = &mut self.receiver; loop { - let cache_ledger_request = cache.clone(); - let cache_pool_refresh = cache.clone(); + let cache_ledger_request = self.cache.clone(); select! { recv_evt = receiver.next() => { match recv_evt { @@ -199,7 +207,7 @@ impl PoolThread { callback(vers); } Some(PoolEvent::Refresh(callback)) => { - let fut = _perform_refresh(&self.pool, callback, cache_pool_refresh); + let fut = _perform_refresh(&self.pool, callback); futures.push(fut.boxed_local()); } Some(PoolEvent::SendRequest(request, callback)) => { @@ -221,12 +229,8 @@ impl PoolThread { } } -async fn _perform_refresh( - pool: &LocalPool, - callback: Callback, - cache: Option>, -) { - let result = perform_refresh(pool, cache).await; +async fn _perform_refresh(pool: &LocalPool, callback: Callback) { + let result = perform_refresh(pool).await; callback(result); } From 8e5f2dc5e48b0f82062c0849134df3b4c842427f Mon Sep 17 00:00:00 2001 From: wadeking98 Date: Thu, 18 Jan 2024 18:07:14 -0800 Subject: [PATCH 09/32] Added a TTL cache and improved LRU cache Signed-off-by: wadeking98 --- indy-vdr-proxy/src/main.rs | 4 +- libindy_vdr/src/pool/cache.rs | 192 ++++++++++++++++++++++++++++------ 2 files changed, 164 insertions(+), 32 deletions(-) diff --git a/indy-vdr-proxy/src/main.rs b/indy-vdr-proxy/src/main.rs index 9dc4441e..d4cea332 100644 --- a/indy-vdr-proxy/src/main.rs +++ b/indy-vdr-proxy/src/main.rs @@ -36,7 +36,7 @@ use hyper_tls::HttpsConnector; #[cfg(unix)] use hyper_unix_connector::UnixConnector; -use indy_vdr::pool::cache::{Cache, MemCacheStorage}; +use indy_vdr::pool::cache::{Cache, MemCacheStorageTTL}; #[cfg(feature = "tls")] use rustls_pemfile::{certs, pkcs8_private_keys}; #[cfg(feature = "tls")] @@ -428,7 +428,7 @@ where I::Error: Into>, { let cache = if config.cache { - let mem_storage = MemCacheStorage::new(1024); + let mem_storage = MemCacheStorageTTL::new(1024); let mem_cache = Cache::new(mem_storage); Some(mem_cache) } else { diff --git a/libindy_vdr/src/pool/cache.rs b/libindy_vdr/src/pool/cache.rs index 9eeb6969..5de42d58 100644 --- a/libindy_vdr/src/pool/cache.rs +++ b/libindy_vdr/src/pool/cache.rs @@ -2,15 +2,16 @@ use async_trait::async_trait; use std::{ collections::{BTreeMap, HashMap}, hash::Hash, + ops::DerefMut, sync::Arc, + time::SystemTime, }; use async_lock::Mutex; #[async_trait] pub trait CacheStorage: Send + Sync + 'static { - // Needs to be mutable bc some implementations may need to update the the LRU index of the cache - async fn get(&mut self, key: &K) -> Option; + async fn get(&self, key: &K) -> Option; async fn remove(&mut self, key: &K) -> Option; @@ -47,44 +48,140 @@ impl Clone for Cache { } } -/// A simple in-memory LRU cache +/// A simple in-memory cache that uses timestamps to expire entries. Once the cache fills up, the oldest entry is evicted. +/// Uses a hashmap for lookups and a BTreeMap for ordering by age +pub struct MemCacheStorageTTL { + store: (HashMap, BTreeMap>), + capacity: usize, + startup_time: SystemTime, +} + +impl MemCacheStorageTTL { + pub fn new(capacity: usize) -> Self { + Self { + store: (HashMap::new(), BTreeMap::new()), + capacity, + startup_time: SystemTime::now(), + } + } +} + +#[async_trait] +impl + CacheStorage for MemCacheStorageTTL +{ + async fn get(&self, key: &K) -> Option { + let (cache_lookup, _) = &self.store; + match cache_lookup.get(key) { + Some((v, _)) => Some(v.clone()), + None => None, + } + } + async fn remove(&mut self, key: &K) -> Option { + let (cache_lookup, cache_order) = &mut self.store; + let ttl_val = cache_lookup.remove(key); + match ttl_val { + Some((v, ts)) => { + let val = cache_order.get_mut(&ts).unwrap(); + if val.len() <= 1 { + cache_order.remove(&ts); + } else { + val.retain(|k| k != key); + } + Some(v) + } + None => None, + } + } + async fn insert(&mut self, key: K, value: V) -> Option { + let (cache_lookup, cache_order) = &mut self.store; + let ts = SystemTime::now() + .duration_since(self.startup_time) + .unwrap() + .as_millis(); + // only remove the oldest item if the cache is full and the key is not already in the cache + if cache_lookup.len() >= self.capacity && cache_lookup.get(&key).is_none() { + // remove the oldest item + let (oldest_ts_ref, _) = cache_order.first_key_value().unwrap(); + let oldest_ts = *oldest_ts_ref; + let oldest_keys = cache_order.get_mut(&oldest_ts).unwrap(); + let removal_key = oldest_keys.first().and_then(|k| Some(k.clone())); + if oldest_keys.len() <= 1 { + // remove the whole array since it's the last entry + cache_order.remove(&oldest_ts); + } else { + oldest_keys.swap_remove(0); + } + cache_lookup.remove(&key); + if let Some(removal_key) = removal_key { + cache_lookup.remove(&removal_key); + } + }; + + // if value is overwritten when inserting a new key, we need to remove the old key from the order index + cache_order.entry(ts).or_insert(vec![]).push(key.clone()); + match cache_lookup.insert(key.clone(), (value.clone(), ts)) { + Some((v, ts)) => { + if let Some(ord_keys) = cache_order.get_mut(&ts) { + if ord_keys.len() <= 1 { + cache_order.remove(&ts); + } else { + ord_keys.retain(|k| k != &key); + } + } + Some(v) + } + None => None, + } + } +} + +/// A simple in-memory LRU cache. Once the cache fills up, the least recently used entry is evicted. /// Uses a hashmap for lookups and a BTreeMap for ordering by least recently used -pub struct MemCacheStorage { - cache_lookup: HashMap, - cache_order: BTreeMap, +pub struct MemCacheStorageLRU { + // The store is wrapped in an arc and a mutex so that get() can be immutable + store: Arc, BTreeMap)>>, capacity: usize, } -impl MemCacheStorage { +impl MemCacheStorageLRU { pub fn new(capacity: usize) -> Self { Self { - cache_lookup: HashMap::new(), - cache_order: BTreeMap::new(), + store: Arc::new(Mutex::new((HashMap::new(), BTreeMap::new()))), capacity, } } } #[async_trait] impl - CacheStorage for MemCacheStorage + CacheStorage for MemCacheStorageLRU { - async fn get(&mut self, key: &K) -> Option { + async fn get(&self, key: &K) -> Option { // move the key to the end of the LRU index // this is O(log(n)) in the worst case, but in the average case it's close to O(1) - match self.cache_lookup.get(key) { + let mut store_lock = self.store.lock().await; + let (cache_lookup, cache_order) = store_lock.deref_mut(); + let highest_lru = cache_order + .last_key_value() + .map(|(hts, _)| hts + 1) + .unwrap_or(0); + match cache_lookup.get_mut(key) { Some((v, ts)) => { - self.cache_order.remove(ts).unwrap(); - self.cache_order.entry(ts + 1).or_insert(key.clone()); + cache_order.remove(ts).unwrap(); + cache_order.entry(highest_lru).or_insert(key.clone()); + *ts = highest_lru; Some(v.clone()) } None => None, } } async fn remove(&mut self, key: &K) -> Option { - let lru_val = self.cache_lookup.remove(key); + let mut store_lock = self.store.lock().await; + let (cache_lookup, cache_order) = store_lock.deref_mut(); + let lru_val = cache_lookup.remove(key); match lru_val { Some((v, ts)) => { - self.cache_order.remove(&ts); + cache_order.remove(&ts); Some(v) } None => None, @@ -92,27 +189,29 @@ impl Option { // this will be O(log(n)) in all cases when cache is at capacity since we need to fetch the first and last element from the btree - let highest_lru = self - .cache_order + let mut store_lock = self.store.lock().await; + let (cache_lookup, cache_order) = store_lock.deref_mut(); + let highest_lru = cache_order .last_key_value() .map(|(ts, _)| ts + 1) .unwrap_or(0); - if self.cache_lookup.len() >= self.capacity { + if cache_lookup.len() >= self.capacity && cache_lookup.get(&key).is_none() { // remove the LRU item - let (lru_ts, lru_key) = match self.cache_order.first_key_value() { + let (lru_ts, lru_key) = match cache_order.first_key_value() { Some((ts, key)) => (*ts, key.clone()), None => return None, }; - self.cache_lookup.remove(&lru_key); - self.cache_order.remove(&lru_ts); + cache_lookup.remove(&lru_key); + cache_order.remove(&lru_ts); }; - self.cache_order.insert(highest_lru, key.clone()); - match self - .cache_lookup - .insert(key.clone(), (value.clone(), highest_lru)) - { - Some((v, _)) => Some(v), + // if value is overwritten when inserting a new key, we need to remove the old key from the order index + cache_order.insert(highest_lru, key.clone()); + match cache_lookup.insert(key.clone(), (value.clone(), highest_lru)) { + Some((v, ts)) => { + cache_order.remove(&ts); + Some(v) + } None => None, } } @@ -121,13 +220,15 @@ impl Date: Thu, 18 Jan 2024 18:12:42 -0800 Subject: [PATCH 10/32] changed cache lock to rwlock Signed-off-by: wadeking98 --- libindy_vdr/src/pool/cache.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/libindy_vdr/src/pool/cache.rs b/libindy_vdr/src/pool/cache.rs index 5de42d58..9cbe9cbc 100644 --- a/libindy_vdr/src/pool/cache.rs +++ b/libindy_vdr/src/pool/cache.rs @@ -7,7 +7,7 @@ use std::{ time::SystemTime, }; -use async_lock::Mutex; +use async_lock::{Mutex, RwLock}; #[async_trait] pub trait CacheStorage: Send + Sync + 'static { @@ -19,23 +19,23 @@ pub trait CacheStorage: Send + Sync + 'static { } pub struct Cache { - storage: Arc>>, + storage: Arc>>, } impl Cache { pub fn new(storage: impl CacheStorage) -> Self { Self { - storage: Arc::new(Mutex::new(storage)), + storage: Arc::new(RwLock::new(storage)), } } pub async fn get(&mut self, key: &K) -> Option { - self.storage.lock().await.get(key).await + self.storage.read().await.get(key).await } pub async fn remove(&mut self, key: &K) -> Option { - self.storage.lock().await.remove(key).await + self.storage.write().await.remove(key).await } pub async fn insert(&mut self, key: K, value: V) -> Option { - self.storage.lock().await.insert(key, value).await + self.storage.write().await.insert(key, value).await } } From cb792612582800eaf6d00191419881e8a45b6ccd Mon Sep 17 00:00:00 2001 From: wadeking98 Date: Thu, 18 Jan 2024 18:59:23 -0800 Subject: [PATCH 11/32] updated ttl cache expiration policy Signed-off-by: wadeking98 --- indy-vdr-proxy/src/main.rs | 2 +- libindy_vdr/src/pool/cache.rs | 56 ++++++++++++++++++++++++++++++----- 2 files changed, 50 insertions(+), 8 deletions(-) diff --git a/indy-vdr-proxy/src/main.rs b/indy-vdr-proxy/src/main.rs index d4cea332..c2b967a5 100644 --- a/indy-vdr-proxy/src/main.rs +++ b/indy-vdr-proxy/src/main.rs @@ -428,7 +428,7 @@ where I::Error: Into>, { let cache = if config.cache { - let mem_storage = MemCacheStorageTTL::new(1024); + let mem_storage = MemCacheStorageTTL::new(1024, Duration::from_secs(86400).as_millis()); let mem_cache = Cache::new(mem_storage); Some(mem_cache) } else { diff --git a/libindy_vdr/src/pool/cache.rs b/libindy_vdr/src/pool/cache.rs index 9cbe9cbc..eba2c86e 100644 --- a/libindy_vdr/src/pool/cache.rs +++ b/libindy_vdr/src/pool/cache.rs @@ -1,6 +1,7 @@ use async_trait::async_trait; use std::{ collections::{BTreeMap, HashMap}, + fmt::Debug, hash::Hash, ops::DerefMut, sync::Arc, @@ -54,26 +55,45 @@ pub struct MemCacheStorageTTL { store: (HashMap, BTreeMap>), capacity: usize, startup_time: SystemTime, + expire_after: u128, } impl MemCacheStorageTTL { - pub fn new(capacity: usize) -> Self { + /// Create a new cache with the given capacity and expiration time in milliseconds + pub fn new(capacity: usize, expire_after: u128) -> Self { Self { store: (HashMap::new(), BTreeMap::new()), capacity, startup_time: SystemTime::now(), + expire_after, + } + } + fn get_oldest_ts(cache_order: &mut BTreeMap>) -> u128 { + match cache_order.first_key_value() { + Some((oldest_ts_ref, _)) => *oldest_ts_ref, + None => 0, } } } #[async_trait] -impl +impl CacheStorage for MemCacheStorageTTL { async fn get(&self, key: &K) -> Option { let (cache_lookup, _) = &self.store; match cache_lookup.get(key) { - Some((v, _)) => Some(v.clone()), + Some((v, ts)) => { + let current_time = SystemTime::now() + .duration_since(self.startup_time) + .unwrap() + .as_millis(); + if current_time < ts + self.expire_after { + Some(v.clone()) + } else { + None + } + } None => None, } } @@ -99,11 +119,31 @@ impl 0 { + let oldest_ts = Self::get_oldest_ts(cache_order); + if ts > oldest_ts + self.expire_after { + let expired_keys = cache_order.get(&oldest_ts).unwrap(); + for key in expired_keys.iter() { + println!( + "removing expired key: {:?}, exp_time {:?}, oldest entry date {:?}", + key, + ts + self.expire_after, + oldest_ts + ); + cache_lookup.remove(key); + } + cache_order.remove(&oldest_ts); + } else { + break; + } + } + + // remove the oldest item if the cache is still full if cache_lookup.len() >= self.capacity && cache_lookup.get(&key).is_none() { // remove the oldest item - let (oldest_ts_ref, _) = cache_order.first_key_value().unwrap(); - let oldest_ts = *oldest_ts_ref; + let oldest_ts = Self::get_oldest_ts(cache_order); let oldest_keys = cache_order.get_mut(&oldest_ts).unwrap(); let removal_key = oldest_keys.first().and_then(|k| Some(k.clone())); if oldest_keys.len() <= 1 { @@ -255,7 +295,7 @@ mod tests { fn test_cache_ttl() { use super::*; - let mut cache = Cache::new(MemCacheStorageTTL::new(2)); + let mut cache = Cache::new(MemCacheStorageTTL::new(2, 5)); block_on(async { cache.insert("key".to_string(), "value".to_string()).await; thread::sleep(std::time::Duration::from_millis(1)); @@ -278,6 +318,8 @@ mod tests { Some("value3".to_string()) ); cache.insert("key5".to_string(), "value5".to_string()).await; + thread::sleep(std::time::Duration::from_millis(6)); + assert_eq!(cache.get(&"key5".to_string()).await, None); }); } } From c54dc01aee39b81e42f2e2014838e2cdc48c1675 Mon Sep 17 00:00:00 2001 From: wadeking98 Date: Fri, 19 Jan 2024 10:16:39 -0800 Subject: [PATCH 12/32] factored ordered hash map code Signed-off-by: wadeking98 --- libindy_vdr/src/pool/cache.rs | 266 +++++++++++++++++++--------------- 1 file changed, 149 insertions(+), 117 deletions(-) diff --git a/libindy_vdr/src/pool/cache.rs b/libindy_vdr/src/pool/cache.rs index eba2c86e..62e9830b 100644 --- a/libindy_vdr/src/pool/cache.rs +++ b/libindy_vdr/src/pool/cache.rs @@ -3,7 +3,6 @@ use std::{ collections::{BTreeMap, HashMap}, fmt::Debug, hash::Hash, - ops::DerefMut, sync::Arc, time::SystemTime, }; @@ -49,10 +48,120 @@ impl Clone for Cache { } } +/// A hashmap that also maintains a BTreeMap of keys ordered by a given value +/// This is useful for structures that need fast O(1) lookups, but also need to evict the oldest or least recently used entries +struct OrderedHashMap((HashMap, BTreeMap>)); + +impl OrderedHashMap { + fn new() -> Self { + Self((HashMap::new(), BTreeMap::new())) + } +} + +impl OrderedHashMap { + fn len(&self) -> usize { + let (lookup, _) = &self.0; + lookup.len() + } + fn get(&self, key: &K) -> Option<&(O, V)> { + let (lookup, _) = &self.0; + lookup.get(key) + } + fn get_key_value( + &self, + selector: Box>) -> Option<(&O, &Vec)>>, + ) -> Option<(&K, &O, &V)> { + let (lookup, ordered_lookup) = &self.0; + selector(ordered_lookup).and_then(|(_, keys)| { + keys.first() + .and_then(|key| lookup.get(key).and_then(|(o, v)| Some((key, o, v)))) + }) + } + /// gets the entry with the lowest order value + fn get_first_key_value(&self) -> Option<(&K, &O, &V)> { + self.get_key_value(Box::new(|ordered_lookup| ordered_lookup.first_key_value())) + } + /// gets the entry with the highest order value + fn get_last_key_value(&self) -> Option<(&K, &O, &V)> { + self.get_key_value(Box::new(|ordered_lookup| ordered_lookup.last_key_value())) + } + /// re-orders the entry with the given key + fn re_order(&mut self, key: &K, new_order: O) { + let (lookup, order_lookup) = &mut self.0; + if let Some((old_order, _)) = lookup.get(key) { + // remove entry in btree + match order_lookup.get_mut(old_order) { + Some(keys) => { + keys.retain(|k| k != key); + if keys.len() == 0 { + order_lookup.remove(old_order); + } + } + None => {} + } + } + order_lookup + .entry(new_order) + .or_insert(vec![]) + .push(key.clone()); + lookup.get_mut(key).map(|(o, _)| *o = new_order); + } + /// inserts a new entry with the given key and value and order + fn insert(&mut self, key: K, value: V, order: O) -> Option { + let (lookup, order_lookup) = &mut self.0; + + if let Some((old_order, _)) = lookup.get(&key) { + // remove entry in btree + match order_lookup.get_mut(old_order) { + Some(keys) => { + keys.retain(|k| k != &key); + if keys.len() == 0 { + order_lookup.remove(old_order); + } + } + None => {} + } + } + order_lookup + .entry(order) + .or_insert(vec![]) + .push(key.clone()); + lookup + .insert(key, (order, value)) + .and_then(|(_, v)| Some(v)) + } + /// removes the entry with the given key + fn remove(&mut self, key: &K) -> Option<(O, V)> { + let (lookup, order_lookup) = &mut self.0; + lookup.remove(key).and_then(|(order, v)| { + match order_lookup.get_mut(&order) { + Some(keys) => { + keys.retain(|k| k != key); + if keys.len() == 0 { + order_lookup.remove(&order); + } + } + None => {} + } + Some((order, v)) + }) + } + /// removes the entry with the lowest order value + fn remove_first(&mut self) -> Option<(K, O, V)> { + let first_key = self.get_first_key_value().map(|(k, _, _)| k.clone()); + if let Some(first_key) = first_key { + self.remove(&first_key) + .map(|(order, v)| (first_key, order, v)) + } else { + None + } + } +} + /// A simple in-memory cache that uses timestamps to expire entries. Once the cache fills up, the oldest entry is evicted. /// Uses a hashmap for lookups and a BTreeMap for ordering by age pub struct MemCacheStorageTTL { - store: (HashMap, BTreeMap>), + store: OrderedHashMap, capacity: usize, startup_time: SystemTime, expire_after: u128, @@ -62,18 +171,12 @@ impl MemCacheStorageTTL { /// Create a new cache with the given capacity and expiration time in milliseconds pub fn new(capacity: usize, expire_after: u128) -> Self { Self { - store: (HashMap::new(), BTreeMap::new()), + store: OrderedHashMap::new(), capacity, startup_time: SystemTime::now(), expire_after, } } - fn get_oldest_ts(cache_order: &mut BTreeMap>) -> u128 { - match cache_order.first_key_value() { - Some((oldest_ts_ref, _)) => *oldest_ts_ref, - None => 0, - } - } } #[async_trait] @@ -81,9 +184,8 @@ impl for MemCacheStorageTTL { async fn get(&self, key: &K) -> Option { - let (cache_lookup, _) = &self.store; - match cache_lookup.get(key) { - Some((v, ts)) => { + match self.store.get(key) { + Some((ts, v)) => { let current_time = SystemTime::now() .duration_since(self.startup_time) .unwrap() @@ -98,81 +200,36 @@ impl Option { - let (cache_lookup, cache_order) = &mut self.store; - let ttl_val = cache_lookup.remove(key); - match ttl_val { - Some((v, ts)) => { - let val = cache_order.get_mut(&ts).unwrap(); - if val.len() <= 1 { - cache_order.remove(&ts); - } else { - val.retain(|k| k != key); - } - Some(v) - } - None => None, - } + self.store.remove(key).map(|(_, v)| v) } async fn insert(&mut self, key: K, value: V) -> Option { - let (cache_lookup, cache_order) = &mut self.store; - let ts = SystemTime::now() + let current_ts = SystemTime::now() .duration_since(self.startup_time) .unwrap() .as_millis(); // remove expired entries - while cache_order.len() > 0 { - let oldest_ts = Self::get_oldest_ts(cache_order); - if ts > oldest_ts + self.expire_after { - let expired_keys = cache_order.get(&oldest_ts).unwrap(); - for key in expired_keys.iter() { - println!( - "removing expired key: {:?}, exp_time {:?}, oldest entry date {:?}", - key, - ts + self.expire_after, - oldest_ts - ); - cache_lookup.remove(key); - } - cache_order.remove(&oldest_ts); - } else { - break; - } + let exp_offset = self.expire_after; + while self.store.len() > 0 + && self + .store + .get_first_key_value() + .map(|(_, ts, _)| ts + exp_offset < current_ts) + .unwrap_or(false) + { + self.store.remove_first(); } // remove the oldest item if the cache is still full - if cache_lookup.len() >= self.capacity && cache_lookup.get(&key).is_none() { + if self.store.len() >= self.capacity && self.store.get(&key).is_none() { // remove the oldest item - let oldest_ts = Self::get_oldest_ts(cache_order); - let oldest_keys = cache_order.get_mut(&oldest_ts).unwrap(); - let removal_key = oldest_keys.first().and_then(|k| Some(k.clone())); - if oldest_keys.len() <= 1 { - // remove the whole array since it's the last entry - cache_order.remove(&oldest_ts); - } else { - oldest_keys.swap_remove(0); - } - cache_lookup.remove(&key); + let removal_key = self.store.get_first_key_value().map(|(k, _, _)| k.clone()); if let Some(removal_key) = removal_key { - cache_lookup.remove(&removal_key); + self.store.remove(&removal_key); } }; - // if value is overwritten when inserting a new key, we need to remove the old key from the order index - cache_order.entry(ts).or_insert(vec![]).push(key.clone()); - match cache_lookup.insert(key.clone(), (value.clone(), ts)) { - Some((v, ts)) => { - if let Some(ord_keys) = cache_order.get_mut(&ts) { - if ord_keys.len() <= 1 { - cache_order.remove(&ts); - } else { - ord_keys.retain(|k| k != &key); - } - } - Some(v) - } - None => None, - } + self.store.insert(key, value, current_ts) } } @@ -180,14 +237,14 @@ impl { // The store is wrapped in an arc and a mutex so that get() can be immutable - store: Arc, BTreeMap)>>, + store: Arc>>, capacity: usize, } impl MemCacheStorageLRU { pub fn new(capacity: usize) -> Self { Self { - store: Arc::new(Mutex::new((HashMap::new(), BTreeMap::new()))), + store: Arc::new(Mutex::new(OrderedHashMap::new())), capacity, } } @@ -198,62 +255,37 @@ impl Option { // move the key to the end of the LRU index - // this is O(log(n)) in the worst case, but in the average case it's close to O(1) + // this is O(log(n)) let mut store_lock = self.store.lock().await; - let (cache_lookup, cache_order) = store_lock.deref_mut(); - let highest_lru = cache_order - .last_key_value() - .map(|(hts, _)| hts + 1) + let highest_lru = store_lock + .get_last_key_value() + .map(|(_, ts, _)| ts + 1) .unwrap_or(0); - match cache_lookup.get_mut(key) { - Some((v, ts)) => { - cache_order.remove(ts).unwrap(); - cache_order.entry(highest_lru).or_insert(key.clone()); - *ts = highest_lru; - Some(v.clone()) - } - None => None, - } + store_lock.re_order(key, highest_lru); + store_lock.get(key).map(|(_, v)| v.clone()) } async fn remove(&mut self, key: &K) -> Option { let mut store_lock = self.store.lock().await; - let (cache_lookup, cache_order) = store_lock.deref_mut(); - let lru_val = cache_lookup.remove(key); - match lru_val { - Some((v, ts)) => { - cache_order.remove(&ts); - Some(v) - } - None => None, - } + store_lock.remove(key).map(|(_, v)| v) } async fn insert(&mut self, key: K, value: V) -> Option { // this will be O(log(n)) in all cases when cache is at capacity since we need to fetch the first and last element from the btree let mut store_lock = self.store.lock().await; - let (cache_lookup, cache_order) = store_lock.deref_mut(); - let highest_lru = cache_order - .last_key_value() - .map(|(ts, _)| ts + 1) + let highest_lru = store_lock + .get_last_key_value() + .map(|(_, ts, _)| ts + 1) .unwrap_or(0); - if cache_lookup.len() >= self.capacity && cache_lookup.get(&key).is_none() { + + if store_lock.len() >= self.capacity && store_lock.get(&key).is_none() { // remove the LRU item - let (lru_ts, lru_key) = match cache_order.first_key_value() { - Some((ts, key)) => (*ts, key.clone()), - None => return None, - }; - cache_lookup.remove(&lru_key); - cache_order.remove(&lru_ts); + let lru_key = store_lock + .get_first_key_value() + .map(|(k, _, _)| k.clone()) + .unwrap(); + store_lock.remove(&lru_key); }; - // if value is overwritten when inserting a new key, we need to remove the old key from the order index - cache_order.insert(highest_lru, key.clone()); - match cache_lookup.insert(key.clone(), (value.clone(), highest_lru)) { - Some((v, ts)) => { - cache_order.remove(&ts); - Some(v) - } - None => None, - } + store_lock.insert(key, value, highest_lru) } } From bd281a79161863e162d638f86cbd1a8094d98ef6 Mon Sep 17 00:00:00 2001 From: wadeking98 Date: Fri, 19 Jan 2024 11:47:15 -0800 Subject: [PATCH 13/32] moved cache functionality to module Signed-off-by: wadeking98 --- indy-vdr-proxy/src/main.rs | 2 +- libindy_vdr/src/pool/cache/helpers.rs | 113 ++++++++++++ .../src/pool/{cache.rs => cache/memcache.rs} | 171 +----------------- libindy_vdr/src/pool/cache/mod.rs | 45 +++++ 4 files changed, 166 insertions(+), 165 deletions(-) create mode 100644 libindy_vdr/src/pool/cache/helpers.rs rename libindy_vdr/src/pool/{cache.rs => cache/memcache.rs} (57%) create mode 100644 libindy_vdr/src/pool/cache/mod.rs diff --git a/indy-vdr-proxy/src/main.rs b/indy-vdr-proxy/src/main.rs index c2b967a5..4f8eb225 100644 --- a/indy-vdr-proxy/src/main.rs +++ b/indy-vdr-proxy/src/main.rs @@ -36,7 +36,7 @@ use hyper_tls::HttpsConnector; #[cfg(unix)] use hyper_unix_connector::UnixConnector; -use indy_vdr::pool::cache::{Cache, MemCacheStorageTTL}; +use indy_vdr::pool::cache::{memcache::MemCacheStorageTTL, Cache}; #[cfg(feature = "tls")] use rustls_pemfile::{certs, pkcs8_private_keys}; #[cfg(feature = "tls")] diff --git a/libindy_vdr/src/pool/cache/helpers.rs b/libindy_vdr/src/pool/cache/helpers.rs new file mode 100644 index 00000000..b476c7ae --- /dev/null +++ b/libindy_vdr/src/pool/cache/helpers.rs @@ -0,0 +1,113 @@ +use std::{ + collections::{BTreeMap, HashMap}, + hash::Hash, +}; +/// A hashmap that also maintains a BTreeMap of keys ordered by a given value +/// This is useful for structures that need fast O(1) lookups, but also need to evict the oldest or least recently used entries +pub(crate) struct OrderedHashMap((HashMap, BTreeMap>)); + +impl OrderedHashMap { + pub(crate) fn new() -> Self { + Self((HashMap::new(), BTreeMap::new())) + } +} + +impl OrderedHashMap { + pub fn len(&self) -> usize { + let (lookup, _) = &self.0; + lookup.len() + } + pub fn get(&self, key: &K) -> Option<&(O, V)> { + let (lookup, _) = &self.0; + lookup.get(key) + } + fn get_key_value( + &self, + selector: Box>) -> Option<(&O, &Vec)>>, + ) -> Option<(&K, &O, &V)> { + let (lookup, ordered_lookup) = &self.0; + selector(ordered_lookup).and_then(|(_, keys)| { + keys.first() + .and_then(|key| lookup.get(key).and_then(|(o, v)| Some((key, o, v)))) + }) + } + /// gets the entry with the lowest order value + pub fn get_first_key_value(&self) -> Option<(&K, &O, &V)> { + self.get_key_value(Box::new(|ordered_lookup| ordered_lookup.first_key_value())) + } + /// gets the entry with the highest order value + pub fn get_last_key_value(&self) -> Option<(&K, &O, &V)> { + self.get_key_value(Box::new(|ordered_lookup| ordered_lookup.last_key_value())) + } + /// re-orders the entry with the given key + pub fn re_order(&mut self, key: &K, new_order: O) { + let (lookup, order_lookup) = &mut self.0; + if let Some((old_order, _)) = lookup.get(key) { + // remove entry in btree + match order_lookup.get_mut(old_order) { + Some(keys) => { + keys.retain(|k| k != key); + if keys.len() == 0 { + order_lookup.remove(old_order); + } + } + None => {} + } + } + order_lookup + .entry(new_order) + .or_insert(vec![]) + .push(key.clone()); + lookup.get_mut(key).map(|(o, _)| *o = new_order); + } + /// inserts a new entry with the given key and value and order + pub fn insert(&mut self, key: K, value: V, order: O) -> Option { + let (lookup, order_lookup) = &mut self.0; + + if let Some((old_order, _)) = lookup.get(&key) { + // remove entry in btree + match order_lookup.get_mut(old_order) { + Some(keys) => { + keys.retain(|k| k != &key); + if keys.len() == 0 { + order_lookup.remove(old_order); + } + } + None => {} + } + } + order_lookup + .entry(order) + .or_insert(vec![]) + .push(key.clone()); + lookup + .insert(key, (order, value)) + .and_then(|(_, v)| Some(v)) + } + /// removes the entry with the given key + pub fn remove(&mut self, key: &K) -> Option<(O, V)> { + let (lookup, order_lookup) = &mut self.0; + lookup.remove(key).and_then(|(order, v)| { + match order_lookup.get_mut(&order) { + Some(keys) => { + keys.retain(|k| k != key); + if keys.len() == 0 { + order_lookup.remove(&order); + } + } + None => {} + } + Some((order, v)) + }) + } + /// removes the entry with the lowest order value + pub fn remove_first(&mut self) -> Option<(K, O, V)> { + let first_key = self.get_first_key_value().map(|(k, _, _)| k.clone()); + if let Some(first_key) = first_key { + self.remove(&first_key) + .map(|(order, v)| (first_key, order, v)) + } else { + None + } + } +} diff --git a/libindy_vdr/src/pool/cache.rs b/libindy_vdr/src/pool/cache/memcache.rs similarity index 57% rename from libindy_vdr/src/pool/cache.rs rename to libindy_vdr/src/pool/cache/memcache.rs index 62e9830b..962b32b8 100644 --- a/libindy_vdr/src/pool/cache.rs +++ b/libindy_vdr/src/pool/cache/memcache.rs @@ -1,163 +1,8 @@ +use super::helpers::OrderedHashMap; +use super::CacheStorage; +use async_lock::Mutex; use async_trait::async_trait; -use std::{ - collections::{BTreeMap, HashMap}, - fmt::Debug, - hash::Hash, - sync::Arc, - time::SystemTime, -}; - -use async_lock::{Mutex, RwLock}; - -#[async_trait] -pub trait CacheStorage: Send + Sync + 'static { - async fn get(&self, key: &K) -> Option; - - async fn remove(&mut self, key: &K) -> Option; - - async fn insert(&mut self, key: K, value: V) -> Option; -} - -pub struct Cache { - storage: Arc>>, -} - -impl Cache { - pub fn new(storage: impl CacheStorage) -> Self { - Self { - storage: Arc::new(RwLock::new(storage)), - } - } - pub async fn get(&mut self, key: &K) -> Option { - self.storage.read().await.get(key).await - } - pub async fn remove(&mut self, key: &K) -> Option { - self.storage.write().await.remove(key).await - } - pub async fn insert(&mut self, key: K, value: V) -> Option { - self.storage.write().await.insert(key, value).await - } -} - -// need to implement Clone manually because Mutex doesn't implement Clone -impl Clone for Cache { - fn clone(&self) -> Self { - Self { - storage: self.storage.clone(), - } - } -} - -/// A hashmap that also maintains a BTreeMap of keys ordered by a given value -/// This is useful for structures that need fast O(1) lookups, but also need to evict the oldest or least recently used entries -struct OrderedHashMap((HashMap, BTreeMap>)); - -impl OrderedHashMap { - fn new() -> Self { - Self((HashMap::new(), BTreeMap::new())) - } -} - -impl OrderedHashMap { - fn len(&self) -> usize { - let (lookup, _) = &self.0; - lookup.len() - } - fn get(&self, key: &K) -> Option<&(O, V)> { - let (lookup, _) = &self.0; - lookup.get(key) - } - fn get_key_value( - &self, - selector: Box>) -> Option<(&O, &Vec)>>, - ) -> Option<(&K, &O, &V)> { - let (lookup, ordered_lookup) = &self.0; - selector(ordered_lookup).and_then(|(_, keys)| { - keys.first() - .and_then(|key| lookup.get(key).and_then(|(o, v)| Some((key, o, v)))) - }) - } - /// gets the entry with the lowest order value - fn get_first_key_value(&self) -> Option<(&K, &O, &V)> { - self.get_key_value(Box::new(|ordered_lookup| ordered_lookup.first_key_value())) - } - /// gets the entry with the highest order value - fn get_last_key_value(&self) -> Option<(&K, &O, &V)> { - self.get_key_value(Box::new(|ordered_lookup| ordered_lookup.last_key_value())) - } - /// re-orders the entry with the given key - fn re_order(&mut self, key: &K, new_order: O) { - let (lookup, order_lookup) = &mut self.0; - if let Some((old_order, _)) = lookup.get(key) { - // remove entry in btree - match order_lookup.get_mut(old_order) { - Some(keys) => { - keys.retain(|k| k != key); - if keys.len() == 0 { - order_lookup.remove(old_order); - } - } - None => {} - } - } - order_lookup - .entry(new_order) - .or_insert(vec![]) - .push(key.clone()); - lookup.get_mut(key).map(|(o, _)| *o = new_order); - } - /// inserts a new entry with the given key and value and order - fn insert(&mut self, key: K, value: V, order: O) -> Option { - let (lookup, order_lookup) = &mut self.0; - - if let Some((old_order, _)) = lookup.get(&key) { - // remove entry in btree - match order_lookup.get_mut(old_order) { - Some(keys) => { - keys.retain(|k| k != &key); - if keys.len() == 0 { - order_lookup.remove(old_order); - } - } - None => {} - } - } - order_lookup - .entry(order) - .or_insert(vec![]) - .push(key.clone()); - lookup - .insert(key, (order, value)) - .and_then(|(_, v)| Some(v)) - } - /// removes the entry with the given key - fn remove(&mut self, key: &K) -> Option<(O, V)> { - let (lookup, order_lookup) = &mut self.0; - lookup.remove(key).and_then(|(order, v)| { - match order_lookup.get_mut(&order) { - Some(keys) => { - keys.retain(|k| k != key); - if keys.len() == 0 { - order_lookup.remove(&order); - } - } - None => {} - } - Some((order, v)) - }) - } - /// removes the entry with the lowest order value - fn remove_first(&mut self) -> Option<(K, O, V)> { - let first_key = self.get_first_key_value().map(|(k, _, _)| k.clone()); - if let Some(first_key) = first_key { - self.remove(&first_key) - .map(|(order, v)| (first_key, order, v)) - } else { - None - } - } -} - +use std::{hash::Hash, sync::Arc, time::SystemTime}; /// A simple in-memory cache that uses timestamps to expire entries. Once the cache fills up, the oldest entry is evicted. /// Uses a hashmap for lookups and a BTreeMap for ordering by age pub struct MemCacheStorageTTL { @@ -180,7 +25,7 @@ impl MemCacheStorageTTL { } #[async_trait] -impl +impl CacheStorage for MemCacheStorageTTL { async fn get(&self, key: &K) -> Option { @@ -294,12 +139,12 @@ mod tests { use std::thread; + use super::*; + use crate::pool::cache::Cache; use futures_executor::block_on; #[rstest] fn test_cache_lru() { - use super::*; - let mut cache = Cache::new(MemCacheStorageLRU::new(2)); block_on(async { cache.insert("key".to_string(), "value".to_string()).await; @@ -325,8 +170,6 @@ mod tests { #[rstest] fn test_cache_ttl() { - use super::*; - let mut cache = Cache::new(MemCacheStorageTTL::new(2, 5)); block_on(async { cache.insert("key".to_string(), "value".to_string()).await; diff --git a/libindy_vdr/src/pool/cache/mod.rs b/libindy_vdr/src/pool/cache/mod.rs new file mode 100644 index 00000000..b657d0f8 --- /dev/null +++ b/libindy_vdr/src/pool/cache/mod.rs @@ -0,0 +1,45 @@ +use async_lock::RwLock; +use async_trait::async_trait; +use std::sync::Arc; + +mod helpers; +pub mod memcache; + +#[async_trait] +pub trait CacheStorage: Send + Sync + 'static { + async fn get(&self, key: &K) -> Option; + + async fn remove(&mut self, key: &K) -> Option; + + async fn insert(&mut self, key: K, value: V) -> Option; +} + +pub struct Cache { + storage: Arc>>, +} + +impl Cache { + pub fn new(storage: impl CacheStorage) -> Self { + Self { + storage: Arc::new(RwLock::new(storage)), + } + } + pub async fn get(&mut self, key: &K) -> Option { + self.storage.read().await.get(key).await + } + pub async fn remove(&mut self, key: &K) -> Option { + self.storage.write().await.remove(key).await + } + pub async fn insert(&mut self, key: K, value: V) -> Option { + self.storage.write().await.insert(key, value).await + } +} + +// need to implement Clone manually because Mutex doesn't implement Clone +impl Clone for Cache { + fn clone(&self) -> Self { + Self { + storage: self.storage.clone(), + } + } +} From 5cb129748d3c93616d84054a9ae4908fac531488 Mon Sep 17 00:00:00 2001 From: wadeking98 Date: Fri, 19 Jan 2024 16:10:49 -0800 Subject: [PATCH 14/32] skip caching if data is null Signed-off-by: wadeking98 --- libindy_vdr/src/pool/helpers.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/libindy_vdr/src/pool/helpers.rs b/libindy_vdr/src/pool/helpers.rs index 9f5a2618..75354b80 100644 --- a/libindy_vdr/src/pool/helpers.rs +++ b/libindy_vdr/src/pool/helpers.rs @@ -233,6 +233,13 @@ pub async fn perform_ledger_request( handle_consensus_request(&mut request, sp_key, sp_timestamps, is_read_req, sp_parser).await; if is_read_req && result.is_ok() { if let (RequestResult::Reply(response), meta) = result.as_ref().unwrap() { + // check and made sure data is not null before caching + let serialized = serde_json::from_str::(response); + if let Ok(data) = serialized { + if data["result"]["data"].is_null() { + return result; + } + } if let Some(mut cache) = cache_opt { cache .insert(cache_key, (response.to_string(), meta.clone())) From 98722bbf4d9c863ee377b37e715eeef5641c1eda Mon Sep 17 00:00:00 2001 From: wadeking98 Date: Mon, 22 Jan 2024 18:07:30 -0800 Subject: [PATCH 15/32] boilerplate for fs caching Signed-off-by: wadeking98 --- libindy_vdr/Cargo.toml | 1 + libindy_vdr/src/pool/cache/fscache.rs | 6 + libindy_vdr/src/pool/cache/helpers.rs | 154 +++++++++++++++++-------- libindy_vdr/src/pool/cache/memcache.rs | 24 ++-- libindy_vdr/src/pool/cache/mod.rs | 7 +- 5 files changed, 132 insertions(+), 60 deletions(-) create mode 100644 libindy_vdr/src/pool/cache/fscache.rs diff --git a/libindy_vdr/Cargo.toml b/libindy_vdr/Cargo.toml index 16daf6cb..71d42402 100644 --- a/libindy_vdr/Cargo.toml +++ b/libindy_vdr/Cargo.toml @@ -62,6 +62,7 @@ url = "2.2.2" zmq = "0.9" async-trait = "0.1.77" async-lock = "3.3.0" +sled = "0.34.7" [dev-dependencies] rstest = "0.18" diff --git a/libindy_vdr/src/pool/cache/fscache.rs b/libindy_vdr/src/pool/cache/fscache.rs new file mode 100644 index 00000000..f3b4f820 --- /dev/null +++ b/libindy_vdr/src/pool/cache/fscache.rs @@ -0,0 +1,6 @@ +use sled; + +pub fn test() { + let sled = sled::open("my_db").unwrap(); + sled.insert(b"yo!", vec![1, 2, 3]).unwrap(); +} diff --git a/libindy_vdr/src/pool/cache/helpers.rs b/libindy_vdr/src/pool/cache/helpers.rs index b476c7ae..d29751f6 100644 --- a/libindy_vdr/src/pool/cache/helpers.rs +++ b/libindy_vdr/src/pool/cache/helpers.rs @@ -1,18 +1,89 @@ +use serde::{de::DeserializeOwned, Serialize}; +use sled::{self, IVec, Tree}; use std::{ collections::{BTreeMap, HashMap}, hash::Hash, }; + +pub trait OrderedStore: Send + Sync { + fn len(&self) -> usize; + fn first_key_value(&self) -> Option<(O, V)>; + fn last_key_value(&self) -> Option<(O, V)>; + fn get(&self, key: &O) -> Option; + fn insert(&mut self, key: O, value: V) -> Option; + fn remove(&mut self, key: &O) -> Option; +} +impl OrderedStore for Tree { + fn len(&self) -> usize { + Tree::len(self) + } + fn first_key_value(&self) -> Option<(IVec, V)> { + match self.first() { + Ok(Some((k, v))) => serde_json::from_slice(v.as_ref()).ok().map(|v| (k, v)), + _ => None, + } + } + fn last_key_value(&self) -> Option<(IVec, V)> { + match self.last() { + Ok(Some((k, v))) => serde_json::from_slice(v.as_ref()).ok().map(|v| (k, v)), + _ => None, + } + } + fn get(&self, key: &IVec) -> Option { + match self.get(key) { + Ok(Some(v)) => serde_json::from_slice(v.as_ref()).ok(), + _ => None, + } + } + fn insert(&mut self, key: IVec, value: V) -> Option { + match Tree::insert(self, key, serde_json::to_vec(&value).unwrap()) { + Ok(Some(v)) => serde_json::from_slice(v.as_ref()).ok(), + _ => None, + } + } + fn remove(&mut self, key: &IVec) -> Option { + match Tree::remove(self, key).map(|v| v) { + Ok(Some(v)) => serde_json::from_slice(&v).ok(), + _ => None, + } + } +} +impl OrderedStore for BTreeMap { + fn len(&self) -> usize { + BTreeMap::len(self) + } + fn first_key_value(&self) -> Option<(O, V)> { + BTreeMap::first_key_value(self).map(|(o, v)| (*o, v.clone())) + } + fn last_key_value(&self) -> Option<(O, V)> { + BTreeMap::last_key_value(self).map(|(o, v)| (*o, v.clone())) + } + fn get(&self, key: &O) -> Option { + BTreeMap::get(self, key).map(|v| v.clone()) + } + fn insert(&mut self, key: O, value: V) -> Option { + BTreeMap::insert(self, key, value) + } + fn remove(&mut self, key: &O) -> Option { + BTreeMap::remove(self, key) + } +} /// A hashmap that also maintains a BTreeMap of keys ordered by a given value /// This is useful for structures that need fast O(1) lookups, but also need to evict the oldest or least recently used entries -pub(crate) struct OrderedHashMap((HashMap, BTreeMap>)); +pub struct OrderedHashMap( + ( + HashMap, + Box> + Send + Sync>, + ), +); -impl OrderedHashMap { - pub(crate) fn new() -> Self { - Self((HashMap::new(), BTreeMap::new())) +impl OrderedHashMap { + pub fn new(order: impl OrderedStore> + 'static) -> Self { + Self((HashMap::new(), Box::new(order))) } } -impl OrderedHashMap { +impl OrderedHashMap { pub fn len(&self) -> usize { let (lookup, _) = &self.0; lookup.len() @@ -23,63 +94,55 @@ impl OrderedHashMap { } fn get_key_value( &self, - selector: Box>) -> Option<(&O, &Vec)>>, - ) -> Option<(&K, &O, &V)> { + selector: Box< + dyn Fn(&Box> + Send + Sync>) -> Option<(O, Vec)>, + >, + ) -> Option<(K, O, V)> { let (lookup, ordered_lookup) = &self.0; selector(ordered_lookup).and_then(|(_, keys)| { - keys.first() - .and_then(|key| lookup.get(key).and_then(|(o, v)| Some((key, o, v)))) + keys.first().and_then(|key| { + lookup + .get(key) + .and_then(|(o, v)| Some((key.clone(), *o, v.clone()))) + }) }) } /// gets the entry with the lowest order value - pub fn get_first_key_value(&self) -> Option<(&K, &O, &V)> { + pub fn get_first_key_value(&self) -> Option<(K, O, V)> { self.get_key_value(Box::new(|ordered_lookup| ordered_lookup.first_key_value())) } /// gets the entry with the highest order value - pub fn get_last_key_value(&self) -> Option<(&K, &O, &V)> { + pub fn get_last_key_value(&self) -> Option<(K, O, V)> { self.get_key_value(Box::new(|ordered_lookup| ordered_lookup.last_key_value())) } - /// re-orders the entry with the given key + /// re-orders the entry with the given new order pub fn re_order(&mut self, key: &K, new_order: O) { - let (lookup, order_lookup) = &mut self.0; - if let Some((old_order, _)) = lookup.get(key) { - // remove entry in btree - match order_lookup.get_mut(old_order) { - Some(keys) => { - keys.retain(|k| k != key); - if keys.len() == 0 { - order_lookup.remove(old_order); - } - } - None => {} - } + if let Some((_, value)) = self.remove(key) { + self.insert(key.clone(), value, new_order); } - order_lookup - .entry(new_order) - .or_insert(vec![]) - .push(key.clone()); - lookup.get_mut(key).map(|(o, _)| *o = new_order); } /// inserts a new entry with the given key and value and order pub fn insert(&mut self, key: K, value: V, order: O) -> Option { let (lookup, order_lookup) = &mut self.0; if let Some((old_order, _)) = lookup.get(&key) { - // remove entry in btree - match order_lookup.get_mut(old_order) { - Some(keys) => { - keys.retain(|k| k != &key); - if keys.len() == 0 { - order_lookup.remove(old_order); - } + // if entry already exists, remove it from the btree + if let Some(mut keys) = order_lookup.remove(old_order) { + keys.retain(|k| *k != key); + // insert modified keys back into btree + if !keys.is_empty() { + order_lookup.insert(*old_order, keys); } - None => {} } } - order_lookup - .entry(order) - .or_insert(vec![]) - .push(key.clone()); + let keys = match order_lookup.remove(&order) { + Some(mut ks) => { + ks.push(key.clone()); + ks + } + None => vec![key.clone()], + }; + order_lookup.insert(order, keys); lookup .insert(key, (order, value)) .and_then(|(_, v)| Some(v)) @@ -88,11 +151,12 @@ impl OrderedHashMap { pub fn remove(&mut self, key: &K) -> Option<(O, V)> { let (lookup, order_lookup) = &mut self.0; lookup.remove(key).and_then(|(order, v)| { - match order_lookup.get_mut(&order) { - Some(keys) => { + match order_lookup.remove(&order) { + Some(mut keys) => { keys.retain(|k| k != key); - if keys.len() == 0 { - order_lookup.remove(&order); + // insert remaining keys back in + if !keys.is_empty() { + order_lookup.insert(order, keys); } } None => {} diff --git a/libindy_vdr/src/pool/cache/memcache.rs b/libindy_vdr/src/pool/cache/memcache.rs index 962b32b8..a5e00dbd 100644 --- a/libindy_vdr/src/pool/cache/memcache.rs +++ b/libindy_vdr/src/pool/cache/memcache.rs @@ -2,7 +2,7 @@ use super::helpers::OrderedHashMap; use super::CacheStorage; use async_lock::Mutex; use async_trait::async_trait; -use std::{hash::Hash, sync::Arc, time::SystemTime}; +use std::{collections::BTreeMap, fmt::Debug, hash::Hash, sync::Arc, time::SystemTime}; /// A simple in-memory cache that uses timestamps to expire entries. Once the cache fills up, the oldest entry is evicted. /// Uses a hashmap for lookups and a BTreeMap for ordering by age pub struct MemCacheStorageTTL { @@ -12,11 +12,11 @@ pub struct MemCacheStorageTTL { expire_after: u128, } -impl MemCacheStorageTTL { +impl MemCacheStorageTTL { /// Create a new cache with the given capacity and expiration time in milliseconds pub fn new(capacity: usize, expire_after: u128) -> Self { Self { - store: OrderedHashMap::new(), + store: OrderedHashMap::new(BTreeMap::new()), capacity, startup_time: SystemTime::now(), expire_after, @@ -25,7 +25,7 @@ impl MemCacheStorageTTL { } #[async_trait] -impl +impl CacheStorage for MemCacheStorageTTL { async fn get(&self, key: &K) -> Option { @@ -35,7 +35,7 @@ impl 0 && self .store .get_first_key_value() - .map(|(_, ts, _)| ts + exp_offset < current_ts) + .map(|(_, ts, _)| ts.clone() < current_ts) .unwrap_or(false) { self.store.remove_first(); @@ -74,7 +73,8 @@ impl { capacity: usize, } -impl MemCacheStorageLRU { +impl MemCacheStorageLRU { pub fn new(capacity: usize) -> Self { Self { - store: Arc::new(Mutex::new(OrderedHashMap::new())), + store: Arc::new(Mutex::new(OrderedHashMap::new(BTreeMap::new()))), capacity, } } @@ -145,7 +145,7 @@ mod tests { #[rstest] fn test_cache_lru() { - let mut cache = Cache::new(MemCacheStorageLRU::new(2)); + let cache = Cache::new(MemCacheStorageLRU::new(2)); block_on(async { cache.insert("key".to_string(), "value".to_string()).await; assert_eq!( @@ -170,7 +170,7 @@ mod tests { #[rstest] fn test_cache_ttl() { - let mut cache = Cache::new(MemCacheStorageTTL::new(2, 5)); + let cache = Cache::new(MemCacheStorageTTL::new(2, 5)); block_on(async { cache.insert("key".to_string(), "value".to_string()).await; thread::sleep(std::time::Duration::from_millis(1)); diff --git a/libindy_vdr/src/pool/cache/mod.rs b/libindy_vdr/src/pool/cache/mod.rs index b657d0f8..2fb6483c 100644 --- a/libindy_vdr/src/pool/cache/mod.rs +++ b/libindy_vdr/src/pool/cache/mod.rs @@ -4,6 +4,7 @@ use std::sync::Arc; mod helpers; pub mod memcache; +pub mod fscache; #[async_trait] pub trait CacheStorage: Send + Sync + 'static { @@ -24,13 +25,13 @@ impl Cache { storage: Arc::new(RwLock::new(storage)), } } - pub async fn get(&mut self, key: &K) -> Option { + pub async fn get(&self, key: &K) -> Option { self.storage.read().await.get(key).await } - pub async fn remove(&mut self, key: &K) -> Option { + pub async fn remove(&self, key: &K) -> Option { self.storage.write().await.remove(key).await } - pub async fn insert(&mut self, key: K, value: V) -> Option { + pub async fn insert(&self, key: K, value: V) -> Option { self.storage.write().await.insert(key, value).await } } From b52ea45db9e3a832e9fbb3b376f7f1e91a265cc0 Mon Sep 17 00:00:00 2001 From: wadeking98 Date: Tue, 23 Jan 2024 13:44:34 -0800 Subject: [PATCH 16/32] added storage filesystem caching strategy Signed-off-by: wadeking98 --- indy-vdr-proxy/src/main.rs | 5 +- libindy_vdr/src/pool/cache/fscache.rs | 6 - libindy_vdr/src/pool/cache/mod.rs | 13 +- .../src/pool/cache/{helpers.rs => storage.rs} | 46 +++--- .../pool/cache/{memcache.rs => strategy.rs} | 133 +++++++++++++++--- libindy_vdr/src/pool/helpers.rs | 4 +- 6 files changed, 150 insertions(+), 57 deletions(-) delete mode 100644 libindy_vdr/src/pool/cache/fscache.rs rename libindy_vdr/src/pool/cache/{helpers.rs => storage.rs} (79%) rename libindy_vdr/src/pool/cache/{memcache.rs => strategy.rs} (54%) diff --git a/indy-vdr-proxy/src/main.rs b/indy-vdr-proxy/src/main.rs index 4f8eb225..6bd59285 100644 --- a/indy-vdr-proxy/src/main.rs +++ b/indy-vdr-proxy/src/main.rs @@ -36,7 +36,7 @@ use hyper_tls::HttpsConnector; #[cfg(unix)] use hyper_unix_connector::UnixConnector; -use indy_vdr::pool::cache::{memcache::MemCacheStorageTTL, Cache}; +use indy_vdr::pool::cache::{strategy::CacheStrategyTTL, Cache}; #[cfg(feature = "tls")] use rustls_pemfile::{certs, pkcs8_private_keys}; #[cfg(feature = "tls")] @@ -428,7 +428,8 @@ where I::Error: Into>, { let cache = if config.cache { - let mem_storage = MemCacheStorageTTL::new(1024, Duration::from_secs(86400).as_millis()); + let mem_storage = + CacheStrategyTTL::new(1024, Duration::from_secs(86400).as_millis(), None, None); let mem_cache = Cache::new(mem_storage); Some(mem_cache) } else { diff --git a/libindy_vdr/src/pool/cache/fscache.rs b/libindy_vdr/src/pool/cache/fscache.rs deleted file mode 100644 index f3b4f820..00000000 --- a/libindy_vdr/src/pool/cache/fscache.rs +++ /dev/null @@ -1,6 +0,0 @@ -use sled; - -pub fn test() { - let sled = sled::open("my_db").unwrap(); - sled.insert(b"yo!", vec![1, 2, 3]).unwrap(); -} diff --git a/libindy_vdr/src/pool/cache/mod.rs b/libindy_vdr/src/pool/cache/mod.rs index 2fb6483c..5845be1f 100644 --- a/libindy_vdr/src/pool/cache/mod.rs +++ b/libindy_vdr/src/pool/cache/mod.rs @@ -2,12 +2,11 @@ use async_lock::RwLock; use async_trait::async_trait; use std::sync::Arc; -mod helpers; -pub mod memcache; -pub mod fscache; +pub mod storage; +pub mod strategy; #[async_trait] -pub trait CacheStorage: Send + Sync + 'static { +pub trait CacheStrategy: Send + Sync + 'static { async fn get(&self, key: &K) -> Option; async fn remove(&mut self, key: &K) -> Option; @@ -16,11 +15,11 @@ pub trait CacheStorage: Send + Sync + 'static { } pub struct Cache { - storage: Arc>>, + storage: Arc>>, } impl Cache { - pub fn new(storage: impl CacheStorage) -> Self { + pub fn new(storage: impl CacheStrategy) -> Self { Self { storage: Arc::new(RwLock::new(storage)), } @@ -36,7 +35,7 @@ impl Cache { } } -// need to implement Clone manually because Mutex doesn't implement Clone +// need to implement Clone manually because Mutex doesn't implement Clone impl Clone for Cache { fn clone(&self) -> Self { Self { diff --git a/libindy_vdr/src/pool/cache/helpers.rs b/libindy_vdr/src/pool/cache/storage.rs similarity index 79% rename from libindy_vdr/src/pool/cache/helpers.rs rename to libindy_vdr/src/pool/cache/storage.rs index d29751f6..2775b06e 100644 --- a/libindy_vdr/src/pool/cache/helpers.rs +++ b/libindy_vdr/src/pool/cache/storage.rs @@ -1,5 +1,5 @@ use serde::{de::DeserializeOwned, Serialize}; -use sled::{self, IVec, Tree}; +use sled::{self, Tree}; use std::{ collections::{BTreeMap, HashMap}, hash::Hash, @@ -13,36 +13,46 @@ pub trait OrderedStore: Send + Sync { fn insert(&mut self, key: O, value: V) -> Option; fn remove(&mut self, key: &O) -> Option; } -impl OrderedStore for Tree { +impl OrderedStore for Tree { fn len(&self) -> usize { Tree::len(self) } - fn first_key_value(&self) -> Option<(IVec, V)> { + fn first_key_value(&self) -> Option<(u128, V)> { match self.first() { - Ok(Some((k, v))) => serde_json::from_slice(v.as_ref()).ok().map(|v| (k, v)), + Ok(Some((k, v))) => serde_json::from_slice(v.as_ref()).ok().map(|v| { + ( + u128::from_be_bytes(k.as_ref().try_into().unwrap_or([0; 16])), + v, + ) + }), _ => None, } } - fn last_key_value(&self) -> Option<(IVec, V)> { + fn last_key_value(&self) -> Option<(u128, V)> { match self.last() { - Ok(Some((k, v))) => serde_json::from_slice(v.as_ref()).ok().map(|v| (k, v)), + Ok(Some((k, v))) => serde_json::from_slice(v.as_ref()).ok().map(|v| { + ( + u128::from_be_bytes(k.as_ref().try_into().unwrap_or([0; 16])), + v, + ) + }), _ => None, } } - fn get(&self, key: &IVec) -> Option { - match self.get(key) { + fn get(&self, key: &u128) -> Option { + match Tree::get(self, key.to_be_bytes()).map(|v| v) { Ok(Some(v)) => serde_json::from_slice(v.as_ref()).ok(), _ => None, } } - fn insert(&mut self, key: IVec, value: V) -> Option { - match Tree::insert(self, key, serde_json::to_vec(&value).unwrap()) { + fn insert(&mut self, key: u128, value: V) -> Option { + match Tree::insert(self, key.to_be_bytes(), serde_json::to_vec(&value).unwrap()) { Ok(Some(v)) => serde_json::from_slice(v.as_ref()).ok(), _ => None, } } - fn remove(&mut self, key: &IVec) -> Option { - match Tree::remove(self, key).map(|v| v) { + fn remove(&mut self, key: &u128) -> Option { + match Tree::remove(self, key.to_be_bytes()).map(|v| v) { Ok(Some(v)) => serde_json::from_slice(&v).ok(), _ => None, } @@ -77,13 +87,13 @@ pub struct OrderedHashMap( ), ); -impl OrderedHashMap { +impl OrderedHashMap { pub fn new(order: impl OrderedStore> + 'static) -> Self { Self((HashMap::new(), Box::new(order))) } } -impl OrderedHashMap { +impl OrderedHashMap { pub fn len(&self) -> usize { let (lookup, _) = &self.0; lookup.len() @@ -103,7 +113,7 @@ impl OrderedHashMap { keys.first().and_then(|key| { lookup .get(key) - .and_then(|(o, v)| Some((key.clone(), *o, v.clone()))) + .and_then(|(o, v)| Some((key.clone(), o.clone(), v.clone()))) }) }) } @@ -131,7 +141,7 @@ impl OrderedHashMap { keys.retain(|k| *k != key); // insert modified keys back into btree if !keys.is_empty() { - order_lookup.insert(*old_order, keys); + order_lookup.insert(old_order.clone(), keys); } } } @@ -142,7 +152,7 @@ impl OrderedHashMap { } None => vec![key.clone()], }; - order_lookup.insert(order, keys); + order_lookup.insert(order.clone(), keys); lookup .insert(key, (order, value)) .and_then(|(_, v)| Some(v)) @@ -156,7 +166,7 @@ impl OrderedHashMap { keys.retain(|k| k != key); // insert remaining keys back in if !keys.is_empty() { - order_lookup.insert(order, keys); + order_lookup.insert(order.clone(), keys); } } None => {} diff --git a/libindy_vdr/src/pool/cache/memcache.rs b/libindy_vdr/src/pool/cache/strategy.rs similarity index 54% rename from libindy_vdr/src/pool/cache/memcache.rs rename to libindy_vdr/src/pool/cache/strategy.rs index a5e00dbd..abd02190 100644 --- a/libindy_vdr/src/pool/cache/memcache.rs +++ b/libindy_vdr/src/pool/cache/strategy.rs @@ -1,24 +1,37 @@ -use super::helpers::OrderedHashMap; -use super::CacheStorage; +use super::storage::OrderedHashMap; +use super::CacheStrategy; use async_lock::Mutex; use async_trait::async_trait; use std::{collections::BTreeMap, fmt::Debug, hash::Hash, sync::Arc, time::SystemTime}; -/// A simple in-memory cache that uses timestamps to expire entries. Once the cache fills up, the oldest entry is evicted. +/// A simple cache that uses timestamps to expire entries. Once the cache fills up, the oldest entry is evicted. /// Uses a hashmap for lookups and a BTreeMap for ordering by age -pub struct MemCacheStorageTTL { +pub struct CacheStrategyTTL { store: OrderedHashMap, capacity: usize, - startup_time: SystemTime, + create_time: SystemTime, expire_after: u128, } -impl MemCacheStorageTTL { +impl CacheStrategyTTL { /// Create a new cache with the given capacity and expiration time in milliseconds - pub fn new(capacity: usize, expire_after: u128) -> Self { + /// If store_type is None, the cache will use an in-memory hashmap and BTreeMap + /// cache_time is used as a starting point to generate timestamps if it is None, the cache will use the UNIX_EPOCH as the cache start time + pub fn new( + capacity: usize, + expire_after: u128, + store_type: Option>, + create_time: Option, + ) -> Self { Self { - store: OrderedHashMap::new(BTreeMap::new()), + store: match store_type { + Some(store) => store, + None => OrderedHashMap::new(BTreeMap::new()), + }, capacity, - startup_time: SystemTime::now(), + create_time: match create_time { + Some(time) => time, + None => SystemTime::UNIX_EPOCH, + }, expire_after, } } @@ -26,13 +39,13 @@ impl MemCacheStorageTTL { #[async_trait] impl - CacheStorage for MemCacheStorageTTL + CacheStrategy for CacheStrategyTTL { async fn get(&self, key: &K) -> Option { match self.store.get(key) { Some((ts, v)) => { let current_time = SystemTime::now() - .duration_since(self.startup_time) + .duration_since(self.create_time) .unwrap() .as_millis(); if current_time < *ts { @@ -49,7 +62,7 @@ impl Option { let current_ts = SystemTime::now() - .duration_since(self.startup_time) + .duration_since(self.create_time) .unwrap() .as_millis(); @@ -78,25 +91,28 @@ impl { +pub struct CacheStrategyLRU { // The store is wrapped in an arc and a mutex so that get() can be immutable - store: Arc>>, + store: Arc>>, capacity: usize, } -impl MemCacheStorageLRU { - pub fn new(capacity: usize) -> Self { +impl CacheStrategyLRU { + pub fn new(capacity: usize, store_type: Option>) -> Self { Self { - store: Arc::new(Mutex::new(OrderedHashMap::new(BTreeMap::new()))), + store: Arc::new(Mutex::new(match store_type { + Some(store) => store, + None => OrderedHashMap::new(BTreeMap::new()), + })), capacity, } } } #[async_trait] impl - CacheStorage for MemCacheStorageLRU + CacheStrategy for CacheStrategyLRU { async fn get(&self, key: &K) -> Option { // move the key to the end of the LRU index @@ -140,12 +156,12 @@ mod tests { use std::thread; use super::*; - use crate::pool::cache::Cache; + use crate::pool::cache::{storage::OrderedHashMap, Cache}; use futures_executor::block_on; #[rstest] fn test_cache_lru() { - let cache = Cache::new(MemCacheStorageLRU::new(2)); + let cache = Cache::new(CacheStrategyLRU::new(2, None)); block_on(async { cache.insert("key".to_string(), "value".to_string()).await; assert_eq!( @@ -168,9 +184,43 @@ mod tests { }); } + #[rstest] + fn test_fs_cache_lru() { + let cache_location = "test_fs_cache_lru"; + let tree = sled::open(cache_location) + .unwrap() + .open_tree(cache_location) + .unwrap(); + let storage: OrderedHashMap = OrderedHashMap::new(tree); + let cache = Cache::new(CacheStrategyLRU::new(2, Some(storage))); + block_on(async { + cache.insert("key".to_string(), "value".to_string()).await; + assert_eq!( + cache.get(&"key".to_string()).await, + Some("value".to_string()) + ); + cache.insert("key1".to_string(), "value1".to_string()).await; + cache.insert("key2".to_string(), "value2".to_string()).await; + assert_eq!(cache.get(&"key".to_string()).await, None); + cache.insert("key3".to_string(), "value3".to_string()).await; + cache.insert("key3".to_string(), "value3".to_string()).await; + cache.get(&"key2".to_string()).await; // move key2 to the end of the LRU index + cache.insert("key4".to_string(), "value4".to_string()).await; + // key3 should be evicted + assert_eq!( + cache.remove(&"key2".to_string()).await, + Some("value2".to_string()) + ); + assert_eq!(cache.remove(&"key3".to_string()).await, None); + + // cleanup + std::fs::remove_dir_all(cache_location).unwrap(); + }); + } + #[rstest] fn test_cache_ttl() { - let cache = Cache::new(MemCacheStorageTTL::new(2, 5)); + let cache = Cache::new(CacheStrategyTTL::new(2, 5, None, None)); block_on(async { cache.insert("key".to_string(), "value".to_string()).await; thread::sleep(std::time::Duration::from_millis(1)); @@ -197,4 +247,43 @@ mod tests { assert_eq!(cache.get(&"key5".to_string()).await, None); }); } + + #[rstest] + fn test_fs_cache_ttl() { + let cache_location = "test_fs_cache_ttl"; + let tree = sled::open(cache_location) + .unwrap() + .open_tree(cache_location) + .unwrap(); + let storage: OrderedHashMap = OrderedHashMap::new(tree); + let cache = Cache::new(CacheStrategyTTL::new(2, 5, Some(storage), None)); + block_on(async { + cache.insert("key".to_string(), "value".to_string()).await; + thread::sleep(std::time::Duration::from_millis(1)); + assert_eq!( + cache.get(&"key".to_string()).await, + Some("value".to_string()) + ); + cache.insert("key1".to_string(), "value1".to_string()).await; + thread::sleep(std::time::Duration::from_millis(1)); + cache.insert("key2".to_string(), "value2".to_string()).await; + assert_eq!(cache.get(&"key".to_string()).await, None); + thread::sleep(std::time::Duration::from_millis(1)); + cache.insert("key3".to_string(), "value3".to_string()).await; + cache.get(&"key2".to_string()).await; + cache.insert("key4".to_string(), "value4".to_string()).await; + // key2 should be evicted + assert_eq!(cache.remove(&"key2".to_string()).await, None); + assert_eq!( + cache.remove(&"key3".to_string()).await, + Some("value3".to_string()) + ); + cache.insert("key5".to_string(), "value5".to_string()).await; + thread::sleep(std::time::Duration::from_millis(6)); + assert_eq!(cache.get(&"key5".to_string()).await, None); + + // cleanup + std::fs::remove_dir_all(cache_location).unwrap(); + }); + } } diff --git a/libindy_vdr/src/pool/helpers.rs b/libindy_vdr/src/pool/helpers.rs index 75354b80..058270a3 100644 --- a/libindy_vdr/src/pool/helpers.rs +++ b/libindy_vdr/src/pool/helpers.rs @@ -223,7 +223,7 @@ pub async fn perform_ledger_request( let cache_key = prepared.get_cache_key()?; if is_read_req { - if let Some(mut cache) = cache_opt.clone() { + if let Some(cache) = cache_opt.clone() { if let Some((response, meta)) = cache.get(&cache_key).await { return Ok((RequestResult::Reply(response), meta)); } @@ -240,7 +240,7 @@ pub async fn perform_ledger_request( return result; } } - if let Some(mut cache) = cache_opt { + if let Some(cache) = cache_opt { cache .insert(cache_key, (response.to_string(), meta.clone())) .await; From e811e5d223e2d2f1e5d8643e4184b0dc1c756209 Mon Sep 17 00:00:00 2001 From: wadeking98 Date: Tue, 23 Jan 2024 16:18:26 -0800 Subject: [PATCH 17/32] fixed cache storage Signed-off-by: wadeking98 --- indy-vdr-proxy/Cargo.toml | 1 + indy-vdr-proxy/src/app.rs | 20 +++++++ indy-vdr-proxy/src/main.rs | 26 +++++++-- libindy_vdr/src/pool/cache/storage.rs | 54 +++++++++++++++---- libindy_vdr/src/pool/cache/strategy.rs | 8 ++- .../src/pool/requests/prepared_request.rs | 5 +- libindy_vdr/src/pool/types.rs | 4 +- 7 files changed, 98 insertions(+), 20 deletions(-) diff --git a/indy-vdr-proxy/Cargo.toml b/indy-vdr-proxy/Cargo.toml index 9111ad47..d6c9587e 100644 --- a/indy-vdr-proxy/Cargo.toml +++ b/indy-vdr-proxy/Cargo.toml @@ -34,6 +34,7 @@ serde_json = "1.0" tokio = { version = "1.0", features = ["macros", "rt-multi-thread", "signal"] } tokio-rustls = { version = "0.24", optional = true } url = "2.2.2" +sled = "0.34.7" [target.'cfg(unix)'.dependencies] hyper-unix-connector = "0.2" diff --git a/indy-vdr-proxy/src/app.rs b/indy-vdr-proxy/src/app.rs index a334981a..42806567 100644 --- a/indy-vdr-proxy/src/app.rs +++ b/indy-vdr-proxy/src/app.rs @@ -14,6 +14,8 @@ pub struct Config { pub tls_cert_path: Option, pub tls_key_path: Option, pub cache: bool, + pub cache_size: usize, + pub cache_path: Option, } pub fn load_config() -> Result { @@ -87,6 +89,16 @@ pub fn load_config() -> Result { .long("use-cache").action(ArgAction::SetTrue) .value_name("CACHE") .help("Whether to use cache or not") + ).arg( + Arg::new("cache-size") + .long("cache-size") + .value_name("CACHE_SIZE") + .help("Size of cache") + ).arg( + Arg::new("cache-path") + .long("cache-path") + .value_name("CACHE_PATH") + .help("Path to cache") ); #[cfg(unix)] @@ -146,6 +158,12 @@ pub fn load_config() -> Result { let tls_cert_path = matches.get_one::("tls-cert").cloned(); let tls_key_path = matches.get_one::("tls-key").cloned(); let cache = matches.get_flag("use-cache"); + let cache_size = matches + .get_one::("cache-size") + .map(|ival| ival.parse::().map_err(|_| "Invalid cache size")) + .transpose()? + .unwrap_or(1000); + let cache_path = matches.get_one::("cache-path").cloned(); Ok(Config { genesis, @@ -160,5 +178,7 @@ pub fn load_config() -> Result { tls_cert_path, tls_key_path, cache, + cache_size, + cache_path, }) } diff --git a/indy-vdr-proxy/src/main.rs b/indy-vdr-proxy/src/main.rs index 6bd59285..73c1cc17 100644 --- a/indy-vdr-proxy/src/main.rs +++ b/indy-vdr-proxy/src/main.rs @@ -7,6 +7,8 @@ mod app; mod handlers; mod utils; +use indy_vdr::pool::cache::storage::OrderedHashMap; +use sled; use std::cell::RefCell; use std::collections::HashMap; #[cfg(unix)] @@ -428,10 +430,26 @@ where I::Error: Into>, { let cache = if config.cache { - let mem_storage = - CacheStrategyTTL::new(1024, Duration::from_secs(86400).as_millis(), None, None); - let mem_cache = Cache::new(mem_storage); - Some(mem_cache) + let storage_type = match config.cache_path { + Some(path) => { + let storage = OrderedHashMap::new( + sled::open(path.clone()) + .expect(format!("Invalid cache location: {}", path).as_str()) + .open_tree(path.clone()) + .expect(format!("Invalid cache location: {}", path).as_str()), + ); + Some(storage) + } + None => None, + }; + let strategy = CacheStrategyTTL::new( + config.cache_size, + Duration::from_secs(86400).as_millis(), + storage_type, + None, + ); + let cache = Cache::new(strategy); + Some(cache) } else { None }; diff --git a/libindy_vdr/src/pool/cache/storage.rs b/libindy_vdr/src/pool/cache/storage.rs index 2775b06e..38a183f6 100644 --- a/libindy_vdr/src/pool/cache/storage.rs +++ b/libindy_vdr/src/pool/cache/storage.rs @@ -12,6 +12,7 @@ pub trait OrderedStore: Send + Sync { fn get(&self, key: &O) -> Option; fn insert(&mut self, key: O, value: V) -> Option; fn remove(&mut self, key: &O) -> Option; + fn entries(&self) -> Box + '_>; } impl OrderedStore for Tree { fn len(&self) -> usize { @@ -57,6 +58,15 @@ impl OrderedStore for Tree { _ => None, } } + fn entries(&self) -> Box> { + Box::new(self.iter().filter_map(|r| { + r.ok().and_then(|(k, v)| { + serde_json::from_slice(v.as_ref()) + .ok() + .map(|v| (u128::from_be_bytes(k.as_ref().try_into().unwrap()), v)) + }) + })) + } } impl OrderedStore for BTreeMap { fn len(&self) -> usize { @@ -77,19 +87,33 @@ impl OrderedStore for fn remove(&mut self, key: &O) -> Option { BTreeMap::remove(self, key) } + fn entries(&self) -> Box + '_> { + Box::new(self.iter().map(|(o, v)| (o.clone(), v.clone()))) + } } /// A hashmap that also maintains a BTreeMap of keys ordered by a given value /// This is useful for structures that need fast O(1) lookups, but also need to evict the oldest or least recently used entries +/// The Ordered Store must contain both the keys and values for persistence pub struct OrderedHashMap( ( HashMap, - Box> + Send + Sync>, + Box> + Send + Sync>, ), ); -impl OrderedHashMap { - pub fn new(order: impl OrderedStore> + 'static) -> Self { - Self((HashMap::new(), Box::new(order))) +impl + OrderedHashMap +{ + pub fn new(order: impl OrderedStore> + 'static) -> Self { + let ordered_data = Box::new(order); + let mut keyed_data = HashMap::new(); + // ordered data may be from the FS, so we need to rebuild the keyed data + ordered_data.entries().for_each(|(order, keys)| { + keys.iter().for_each(|(k, v)| { + keyed_data.insert(k.clone(), (order.clone(), v.clone())); + }) + }); + Self((keyed_data, ordered_data)) } } @@ -105,7 +129,7 @@ impl OrderedHashMap { fn get_key_value( &self, selector: Box< - dyn Fn(&Box> + Send + Sync>) -> Option<(O, Vec)>, + dyn Fn(&Box> + Send + Sync>) -> Option<(O, Vec)>, >, ) -> Option<(K, O, V)> { let (lookup, ordered_lookup) = &self.0; @@ -119,11 +143,19 @@ impl OrderedHashMap { } /// gets the entry with the lowest order value pub fn get_first_key_value(&self) -> Option<(K, O, V)> { - self.get_key_value(Box::new(|ordered_lookup| ordered_lookup.first_key_value())) + self.get_key_value(Box::new(|ordered_lookup| { + ordered_lookup.first_key_value().and_then(|(order, keys)| { + Some((order.clone(), keys.into_iter().map(|(k, _)| k).collect())) + }) + })) } /// gets the entry with the highest order value pub fn get_last_key_value(&self) -> Option<(K, O, V)> { - self.get_key_value(Box::new(|ordered_lookup| ordered_lookup.last_key_value())) + self.get_key_value(Box::new(|ordered_lookup| { + ordered_lookup.last_key_value().and_then(|(order, keys)| { + Some((order.clone(), keys.into_iter().map(|(k, _)| k).collect())) + }) + })) } /// re-orders the entry with the given new order pub fn re_order(&mut self, key: &K, new_order: O) { @@ -138,7 +170,7 @@ impl OrderedHashMap { if let Some((old_order, _)) = lookup.get(&key) { // if entry already exists, remove it from the btree if let Some(mut keys) = order_lookup.remove(old_order) { - keys.retain(|k| *k != key); + keys.retain(|k| k.0 != key); // insert modified keys back into btree if !keys.is_empty() { order_lookup.insert(old_order.clone(), keys); @@ -147,10 +179,10 @@ impl OrderedHashMap { } let keys = match order_lookup.remove(&order) { Some(mut ks) => { - ks.push(key.clone()); + ks.push((key.clone(), value.clone())); ks } - None => vec![key.clone()], + None => vec![(key.clone(), value.clone())], }; order_lookup.insert(order.clone(), keys); lookup @@ -163,7 +195,7 @@ impl OrderedHashMap { lookup.remove(key).and_then(|(order, v)| { match order_lookup.remove(&order) { Some(mut keys) => { - keys.retain(|k| k != key); + keys.retain(|k| k.0 != *key); // insert remaining keys back in if !keys.is_empty() { order_lookup.insert(order.clone(), keys); diff --git a/libindy_vdr/src/pool/cache/strategy.rs b/libindy_vdr/src/pool/cache/strategy.rs index abd02190..a2e7194e 100644 --- a/libindy_vdr/src/pool/cache/strategy.rs +++ b/libindy_vdr/src/pool/cache/strategy.rs @@ -12,7 +12,9 @@ pub struct CacheStrategyTTL { expire_after: u128, } -impl CacheStrategyTTL { +impl + CacheStrategyTTL +{ /// Create a new cache with the given capacity and expiration time in milliseconds /// If store_type is None, the cache will use an in-memory hashmap and BTreeMap /// cache_time is used as a starting point to generate timestamps if it is None, the cache will use the UNIX_EPOCH as the cache start time @@ -99,7 +101,9 @@ pub struct CacheStrategyLRU { capacity: usize, } -impl CacheStrategyLRU { +impl + CacheStrategyLRU +{ pub fn new(capacity: usize, store_type: Option>) -> Self { Self { store: Arc::new(Mutex::new(match store_type { diff --git a/libindy_vdr/src/pool/requests/prepared_request.rs b/libindy_vdr/src/pool/requests/prepared_request.rs index 8bcede37..d1e3e036 100644 --- a/libindy_vdr/src/pool/requests/prepared_request.rs +++ b/libindy_vdr/src/pool/requests/prepared_request.rs @@ -1,4 +1,5 @@ use serde_json::{self, Value as SJsonValue}; +use sha2::{Digest, Sha256}; use super::new_request_id; use crate::common::error::prelude::*; @@ -87,7 +88,9 @@ impl PreparedRequest { .ok_or_else(|| input_err("Invalid request JSON"))?; req_map.remove("reqId"); req_map.remove("signature"); - serde_json::to_string(&req_json).with_input_err("Invalid request JSON") + let raw_key = serde_json::to_string(&req_json).with_input_err("Invalid request JSON"); + let hash = Sha256::digest(raw_key?.as_bytes()); + Ok(hex::encode(hash)) } /// Generate the normalized representation of a transaction for signing the request diff --git a/libindy_vdr/src/pool/types.rs b/libindy_vdr/src/pool/types.rs index 99d94de3..b7963c3d 100644 --- a/libindy_vdr/src/pool/types.rs +++ b/libindy_vdr/src/pool/types.rs @@ -691,7 +691,7 @@ pub struct StateProofAssertions { pub txn_root_hash: String, } -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub enum StateProofResult { Missing, Invalid(String, Option), @@ -719,7 +719,7 @@ impl std::fmt::Display for StateProofResult { /// Type representing timing information collected for ledger transaction request pub type TimingResult = HashMap; -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct RequestResultMeta { pub state_proof: HashMap, pub timing: Option, From 8838b0541f49d1d30339341b99b5e14919f899f1 Mon Sep 17 00:00:00 2001 From: wadeking98 Date: Wed, 24 Jan 2024 10:08:00 -0800 Subject: [PATCH 18/32] added optional per-insert configurations Signed-off-by: wadeking98 --- libindy_vdr/src/pool/cache/mod.rs | 18 ++- libindy_vdr/src/pool/cache/strategy.rs | 205 +++++++++++++++++++++---- libindy_vdr/src/pool/helpers.rs | 2 +- 3 files changed, 193 insertions(+), 32 deletions(-) diff --git a/libindy_vdr/src/pool/cache/mod.rs b/libindy_vdr/src/pool/cache/mod.rs index 5845be1f..f6cd8de8 100644 --- a/libindy_vdr/src/pool/cache/mod.rs +++ b/libindy_vdr/src/pool/cache/mod.rs @@ -2,6 +2,8 @@ use async_lock::RwLock; use async_trait::async_trait; use std::sync::Arc; +use self::strategy::CacheStrategyConfig; + pub mod storage; pub mod strategy; @@ -11,7 +13,12 @@ pub trait CacheStrategy: Send + Sync + 'static { async fn remove(&mut self, key: &K) -> Option; - async fn insert(&mut self, key: K, value: V) -> Option; + async fn insert( + &mut self, + key: K, + value: V, + config: Option>, + ) -> Option; } pub struct Cache { @@ -30,8 +37,13 @@ impl Cache { pub async fn remove(&self, key: &K) -> Option { self.storage.write().await.remove(key).await } - pub async fn insert(&self, key: K, value: V) -> Option { - self.storage.write().await.insert(key, value).await + pub async fn insert( + &self, + key: K, + value: V, + config: Option>, + ) -> Option { + self.storage.write().await.insert(key, value, config).await } } diff --git a/libindy_vdr/src/pool/cache/strategy.rs b/libindy_vdr/src/pool/cache/strategy.rs index a2e7194e..5059a634 100644 --- a/libindy_vdr/src/pool/cache/strategy.rs +++ b/libindy_vdr/src/pool/cache/strategy.rs @@ -3,6 +3,11 @@ use super::CacheStrategy; use async_lock::Mutex; use async_trait::async_trait; use std::{collections::BTreeMap, fmt::Debug, hash::Hash, sync::Arc, time::SystemTime}; + +pub enum CacheStrategyConfig { + LRU(u128), + TTL(u128), +} /// A simple cache that uses timestamps to expire entries. Once the cache fills up, the oldest entry is evicted. /// Uses a hashmap for lookups and a BTreeMap for ordering by age pub struct CacheStrategyTTL { @@ -62,7 +67,13 @@ impl Option { self.store.remove(key).map(|(_, v)| v) } - async fn insert(&mut self, key: K, value: V) -> Option { + + async fn insert( + &mut self, + key: K, + value: V, + config: Option>, + ) -> Option { let current_ts = SystemTime::now() .duration_since(self.create_time) .unwrap() @@ -88,7 +99,20 @@ impl opt_vec + .iter() + .find(|opt| match opt { + CacheStrategyConfig::TTL(_) => true, + _ => false, + }) + .map(|opt| match opt { + CacheStrategyConfig::TTL(ttl) => ttl.clone(), + _ => self.expire_after, + }) + .unwrap_or(self.expire_after), + _ => self.expire_after, + }; self.store.insert(key, value, current_ts + exp_offset) } } @@ -133,7 +157,12 @@ impl Option { + async fn insert( + &mut self, + key: K, + value: V, + config: Option>, + ) -> Option { // this will be O(log(n)) in all cases when cache is at capacity since we need to fetch the first and last element from the btree let mut store_lock = self.store.lock().await; let highest_lru = store_lock @@ -141,6 +170,21 @@ impl opt_vec + .iter() + .find(|opt| match opt { + CacheStrategyConfig::LRU(_) => true, + _ => false, + }) + .map(|opt| match opt { + CacheStrategyConfig::LRU(lru) => lru.clone(), + _ => highest_lru, + }) + .unwrap_or(highest_lru), + _ => highest_lru, + }; + if store_lock.len() >= self.capacity && store_lock.get(&key).is_none() { // remove the LRU item let lru_key = store_lock @@ -150,7 +194,7 @@ impl = OrderedHashMap::new(tree); let cache = Cache::new(CacheStrategyLRU::new(2, Some(storage))); block_on(async { - cache.insert("key".to_string(), "value".to_string()).await; + cache + .insert("key".to_string(), "value".to_string(), None) + .await; assert_eq!( cache.get(&"key".to_string()).await, Some("value".to_string()) ); - cache.insert("key1".to_string(), "value1".to_string()).await; - cache.insert("key2".to_string(), "value2".to_string()).await; + cache + .insert("key1".to_string(), "value1".to_string(), None) + .await; + cache + .insert("key2".to_string(), "value2".to_string(), None) + .await; assert_eq!(cache.get(&"key".to_string()).await, None); - cache.insert("key3".to_string(), "value3".to_string()).await; - cache.insert("key3".to_string(), "value3".to_string()).await; + cache + .insert("key3".to_string(), "value3".to_string(), None) + .await; + cache + .insert("key3".to_string(), "value3".to_string(), None) + .await; cache.get(&"key2".to_string()).await; // move key2 to the end of the LRU index - cache.insert("key4".to_string(), "value4".to_string()).await; + cache + .insert("key4".to_string(), "value4".to_string(), None) + .await; // key3 should be evicted assert_eq!( cache.remove(&"key2".to_string()).await, Some("value2".to_string()) ); assert_eq!(cache.remove(&"key3".to_string()).await, None); + // test LRU config + cache + .insert( + "key5".to_string(), + "value5".to_string(), + Some(vec![CacheStrategyConfig::LRU(1)]), + ) + .await; + cache + .insert("key6".to_string(), "value6".to_string(), None) + .await; // cleanup std::fs::remove_dir_all(cache_location).unwrap(); @@ -226,29 +317,58 @@ mod tests { fn test_cache_ttl() { let cache = Cache::new(CacheStrategyTTL::new(2, 5, None, None)); block_on(async { - cache.insert("key".to_string(), "value".to_string()).await; + cache + .insert("key".to_string(), "value".to_string(), None) + .await; thread::sleep(std::time::Duration::from_millis(1)); assert_eq!( cache.get(&"key".to_string()).await, Some("value".to_string()) ); - cache.insert("key1".to_string(), "value1".to_string()).await; + cache + .insert("key1".to_string(), "value1".to_string(), None) + .await; thread::sleep(std::time::Duration::from_millis(1)); - cache.insert("key2".to_string(), "value2".to_string()).await; + cache + .insert("key2".to_string(), "value2".to_string(), None) + .await; assert_eq!(cache.get(&"key".to_string()).await, None); thread::sleep(std::time::Duration::from_millis(1)); - cache.insert("key3".to_string(), "value3".to_string()).await; + cache + .insert("key3".to_string(), "value3".to_string(), None) + .await; cache.get(&"key2".to_string()).await; - cache.insert("key4".to_string(), "value4".to_string()).await; + cache + .insert("key4".to_string(), "value4".to_string(), None) + .await; // key2 should be evicted assert_eq!(cache.remove(&"key2".to_string()).await, None); assert_eq!( cache.remove(&"key3".to_string()).await, Some("value3".to_string()) ); - cache.insert("key5".to_string(), "value5".to_string()).await; + cache + .insert("key5".to_string(), "value5".to_string(), None) + .await; thread::sleep(std::time::Duration::from_millis(6)); assert_eq!(cache.get(&"key5".to_string()).await, None); + // test ttl config + cache + .insert( + "key6".to_string(), + "value6".to_string(), + Some(vec![CacheStrategyConfig::TTL(1)]), + ) + .await; + cache + .insert("key7".to_string(), "value7".to_string(), None) + .await; + thread::sleep(std::time::Duration::from_millis(1)); + assert_eq!(cache.get(&"key6".to_string()).await, None); + assert_eq!( + cache.get(&"key7".to_string()).await, + Some("value7".to_string()) + ); }); } @@ -262,29 +382,58 @@ mod tests { let storage: OrderedHashMap = OrderedHashMap::new(tree); let cache = Cache::new(CacheStrategyTTL::new(2, 5, Some(storage), None)); block_on(async { - cache.insert("key".to_string(), "value".to_string()).await; + cache + .insert("key".to_string(), "value".to_string(), None) + .await; thread::sleep(std::time::Duration::from_millis(1)); assert_eq!( cache.get(&"key".to_string()).await, Some("value".to_string()) ); - cache.insert("key1".to_string(), "value1".to_string()).await; + cache + .insert("key1".to_string(), "value1".to_string(), None) + .await; thread::sleep(std::time::Duration::from_millis(1)); - cache.insert("key2".to_string(), "value2".to_string()).await; + cache + .insert("key2".to_string(), "value2".to_string(), None) + .await; assert_eq!(cache.get(&"key".to_string()).await, None); thread::sleep(std::time::Duration::from_millis(1)); - cache.insert("key3".to_string(), "value3".to_string()).await; + cache + .insert("key3".to_string(), "value3".to_string(), None) + .await; cache.get(&"key2".to_string()).await; - cache.insert("key4".to_string(), "value4".to_string()).await; + cache + .insert("key4".to_string(), "value4".to_string(), None) + .await; // key2 should be evicted assert_eq!(cache.remove(&"key2".to_string()).await, None); assert_eq!( cache.remove(&"key3".to_string()).await, Some("value3".to_string()) ); - cache.insert("key5".to_string(), "value5".to_string()).await; + cache + .insert("key5".to_string(), "value5".to_string(), None) + .await; thread::sleep(std::time::Duration::from_millis(6)); assert_eq!(cache.get(&"key5".to_string()).await, None); + // test ttl config + cache + .insert( + "key6".to_string(), + "value6".to_string(), + Some(vec![CacheStrategyConfig::TTL(1)]), + ) + .await; + cache + .insert("key7".to_string(), "value7".to_string(), None) + .await; + thread::sleep(std::time::Duration::from_millis(1)); + assert_eq!(cache.get(&"key6".to_string()).await, None); + assert_eq!( + cache.get(&"key7".to_string()).await, + Some("value7".to_string()) + ); // cleanup std::fs::remove_dir_all(cache_location).unwrap(); diff --git a/libindy_vdr/src/pool/helpers.rs b/libindy_vdr/src/pool/helpers.rs index 058270a3..ca75acf6 100644 --- a/libindy_vdr/src/pool/helpers.rs +++ b/libindy_vdr/src/pool/helpers.rs @@ -242,7 +242,7 @@ pub async fn perform_ledger_request( } if let Some(cache) = cache_opt { cache - .insert(cache_key, (response.to_string(), meta.clone())) + .insert(cache_key, (response.to_string(), meta.clone()), None) .await; } } From 9922abad02415b49b8570dfb2ca183a17c7c61f4 Mon Sep 17 00:00:00 2001 From: wadeking98 Date: Thu, 25 Jan 2024 09:35:42 -0800 Subject: [PATCH 19/32] code touchup Signed-off-by: wadeking98 --- indy-vdr-proxy/Cargo.toml | 1 - indy-vdr-proxy/src/main.rs | 9 +++------ libindy_vdr/src/pool/cache/storage.rs | 24 ++++++++++++++++-------- libindy_vdr/src/pool/cache/strategy.rs | 20 ++++++-------------- libindy_vdr/src/pool/helpers.rs | 5 ++--- 5 files changed, 27 insertions(+), 32 deletions(-) diff --git a/indy-vdr-proxy/Cargo.toml b/indy-vdr-proxy/Cargo.toml index d6c9587e..9111ad47 100644 --- a/indy-vdr-proxy/Cargo.toml +++ b/indy-vdr-proxy/Cargo.toml @@ -34,7 +34,6 @@ serde_json = "1.0" tokio = { version = "1.0", features = ["macros", "rt-multi-thread", "signal"] } tokio-rustls = { version = "0.24", optional = true } url = "2.2.2" -sled = "0.34.7" [target.'cfg(unix)'.dependencies] hyper-unix-connector = "0.2" diff --git a/indy-vdr-proxy/src/main.rs b/indy-vdr-proxy/src/main.rs index 73c1cc17..7f428fd5 100644 --- a/indy-vdr-proxy/src/main.rs +++ b/indy-vdr-proxy/src/main.rs @@ -7,8 +7,7 @@ mod app; mod handlers; mod utils; -use indy_vdr::pool::cache::storage::OrderedHashMap; -use sled; +use indy_vdr::pool::cache::storage::{new_fs_ordered_store, OrderedHashMap}; use std::cell::RefCell; use std::collections::HashMap; #[cfg(unix)] @@ -433,10 +432,8 @@ where let storage_type = match config.cache_path { Some(path) => { let storage = OrderedHashMap::new( - sled::open(path.clone()) - .expect(format!("Invalid cache location: {}", path).as_str()) - .open_tree(path.clone()) - .expect(format!("Invalid cache location: {}", path).as_str()), + new_fs_ordered_store(path.clone()) + .expect(format!("Error creating cache at {}", path).as_str()), ); Some(storage) } diff --git a/libindy_vdr/src/pool/cache/storage.rs b/libindy_vdr/src/pool/cache/storage.rs index 38a183f6..d1766aa3 100644 --- a/libindy_vdr/src/pool/cache/storage.rs +++ b/libindy_vdr/src/pool/cache/storage.rs @@ -4,7 +4,15 @@ use std::{ collections::{BTreeMap, HashMap}, hash::Hash, }; - +pub fn new_fs_ordered_store( + path: String, +) -> Result, sled::Error> { + sled::open(path.clone()).and_then(|db| db.open_tree(path)) +} +pub fn new_mem_ordered_store( +) -> impl OrderedStore { + BTreeMap::new() +} pub trait OrderedStore: Send + Sync { fn len(&self) -> usize; fn first_key_value(&self) -> Option<(O, V)>; @@ -128,9 +136,9 @@ impl OrderedHashMap { } fn get_key_value( &self, - selector: Box< - dyn Fn(&Box> + Send + Sync>) -> Option<(O, Vec)>, - >, + selector: impl FnOnce( + &Box> + Send + Sync>, + ) -> Option<(O, Vec)>, ) -> Option<(K, O, V)> { let (lookup, ordered_lookup) = &self.0; selector(ordered_lookup).and_then(|(_, keys)| { @@ -143,19 +151,19 @@ impl OrderedHashMap { } /// gets the entry with the lowest order value pub fn get_first_key_value(&self) -> Option<(K, O, V)> { - self.get_key_value(Box::new(|ordered_lookup| { + self.get_key_value(|ordered_lookup| { ordered_lookup.first_key_value().and_then(|(order, keys)| { Some((order.clone(), keys.into_iter().map(|(k, _)| k).collect())) }) - })) + }) } /// gets the entry with the highest order value pub fn get_last_key_value(&self) -> Option<(K, O, V)> { - self.get_key_value(Box::new(|ordered_lookup| { + self.get_key_value(|ordered_lookup| { ordered_lookup.last_key_value().and_then(|(order, keys)| { Some((order.clone(), keys.into_iter().map(|(k, _)| k).collect())) }) - })) + }) } /// re-orders the entry with the given new order pub fn re_order(&mut self, key: &K, new_order: O) { diff --git a/libindy_vdr/src/pool/cache/strategy.rs b/libindy_vdr/src/pool/cache/strategy.rs index 5059a634..972c7e0b 100644 --- a/libindy_vdr/src/pool/cache/strategy.rs +++ b/libindy_vdr/src/pool/cache/strategy.rs @@ -102,13 +102,9 @@ impl opt_vec .iter() - .find(|opt| match opt { - CacheStrategyConfig::TTL(_) => true, - _ => false, - }) - .map(|opt| match opt { - CacheStrategyConfig::TTL(ttl) => ttl.clone(), - _ => self.expire_after, + .find_map(|opt| match opt { + CacheStrategyConfig::TTL(ttl) => Some(ttl.clone()), + _ => None, }) .unwrap_or(self.expire_after), _ => self.expire_after, @@ -173,13 +169,9 @@ impl opt_vec .iter() - .find(|opt| match opt { - CacheStrategyConfig::LRU(_) => true, - _ => false, - }) - .map(|opt| match opt { - CacheStrategyConfig::LRU(lru) => lru.clone(), - _ => highest_lru, + .find_map(|opt| match opt { + CacheStrategyConfig::LRU(lru) => Some(lru.clone()), + _ => None, }) .unwrap_or(highest_lru), _ => highest_lru, diff --git a/libindy_vdr/src/pool/helpers.rs b/libindy_vdr/src/pool/helpers.rs index ca75acf6..12095944 100644 --- a/libindy_vdr/src/pool/helpers.rs +++ b/libindy_vdr/src/pool/helpers.rs @@ -20,13 +20,12 @@ use crate::utils::base58; /// Perform a pool ledger status request to see if catchup is required pub async fn perform_pool_status_request( pool: &T, - cache: Option>, ) -> VdrResult<(RequestResult>, RequestResultMeta)> { let (mt_root, mt_size) = pool.get_merkle_tree_info(); if pool.get_refreshed() { trace!("Performing fast status check"); - match perform_get_txn(pool, LedgerType::POOL.to_id(), 1, cache).await { + match perform_get_txn(pool, LedgerType::POOL.to_id(), 1, None).await { Ok((RequestResult::Reply(reply), res_meta)) => { if let Ok(body) = serde_json::from_str::(&reply) { if let (Some(status_root_hash), Some(status_txn_count)) = ( @@ -94,7 +93,7 @@ pub async fn perform_pool_catchup_request( pub async fn perform_refresh( pool: &T, ) -> VdrResult<(Option, RequestResultMeta)> { - let (result, meta) = perform_pool_status_request(pool, None).await?; + let (result, meta) = perform_pool_status_request(pool).await?; trace!("Got status result: {:?}", &result); match result { RequestResult::Reply(target) => match target { From 3875faec45e3ecb99af3514359edc5de5ed6871a Mon Sep 17 00:00:00 2001 From: wadeking98 Date: Thu, 25 Jan 2024 11:18:54 -0800 Subject: [PATCH 20/32] prevented caching results with null seqno Signed-off-by: wadeking98 --- libindy_vdr/src/pool/helpers.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libindy_vdr/src/pool/helpers.rs b/libindy_vdr/src/pool/helpers.rs index 12095944..e5f1aea0 100644 --- a/libindy_vdr/src/pool/helpers.rs +++ b/libindy_vdr/src/pool/helpers.rs @@ -235,7 +235,7 @@ pub async fn perform_ledger_request( // check and made sure data is not null before caching let serialized = serde_json::from_str::(response); if let Ok(data) = serialized { - if data["result"]["data"].is_null() { + if data["result"]["data"].is_null() || data["result"]["seqNo"].is_null() { return result; } } From e26db67d7dd176a51d9735cfbcdc189710a69b59 Mon Sep 17 00:00:00 2001 From: wadeking98 Date: Thu, 25 Jan 2024 13:58:20 -0800 Subject: [PATCH 21/32] added ffi bindings for new cache Signed-off-by: wadeking98 --- indy-vdr-proxy/src/main.rs | 6 ++-- libindy_vdr/include/libindy_vdr.h | 4 +++ libindy_vdr/src/common/error.rs | 6 ++++ libindy_vdr/src/ffi/mod.rs | 30 ++++++++++++++++++- libindy_vdr/src/ffi/pool.rs | 9 ++++-- libindy_vdr/src/lib.rs | 2 +- libindy_vdr/src/pool/builder.rs | 8 +++-- libindy_vdr/tests/utils/pool.rs | 2 +- .../indy-vdr-nodejs/src/NodeJSIndyVdr.ts | 10 +++++++ .../src/library/NativeBindings.ts | 2 ++ .../indy-vdr-react-native/cpp/HostObject.cpp | 2 ++ .../cpp/include/libindy_vdr.h | 4 +++ .../indy-vdr-react-native/cpp/indyVdr.cpp | 19 ++++++++++++ .../indy-vdr-react-native/cpp/indyVdr.h | 2 ++ .../src/NativeBindings.ts | 4 +++ .../src/ReactNativeIndyVdr.ts | 8 +++++ .../indy-vdr-shared/src/types/IndyVdr.ts | 4 +++ wrappers/python/indy_vdr/__init__.py | 4 ++- wrappers/python/indy_vdr/bindings.py | 8 +++++ 19 files changed, 123 insertions(+), 11 deletions(-) diff --git a/indy-vdr-proxy/src/main.rs b/indy-vdr-proxy/src/main.rs index 7f428fd5..62ecf3cf 100644 --- a/indy-vdr-proxy/src/main.rs +++ b/indy-vdr-proxy/src/main.rs @@ -263,8 +263,8 @@ async fn create_pool( ) -> VdrResult { let pool_states = &state.borrow().pool_states; let pool_state = pool_states.get(namespace).unwrap(); - let pool = - PoolBuilder::new(PoolConfig::default(), pool_state.transactions.clone()).into_local()?; + let pool = PoolBuilder::new(PoolConfig::default(), pool_state.transactions.clone(), None) + .into_local()?; let refresh_pool = if refresh { refresh_pool(state.clone(), &pool, 0).await? } else { @@ -317,7 +317,7 @@ async fn refresh_pool( let (txns, _meta) = perform_refresh(pool).await?; if let Some(txns) = txns { - let pool = PoolBuilder::new(PoolConfig::default(), txns) + let pool = PoolBuilder::new(PoolConfig::default(), txns, None) .refreshed(true) .into_local()?; Ok(Some(pool)) diff --git a/libindy_vdr/include/libindy_vdr.h b/libindy_vdr/include/libindy_vdr.h index 7f61bb9f..a52e5b20 100644 --- a/libindy_vdr/include/libindy_vdr.h +++ b/libindy_vdr/include/libindy_vdr.h @@ -481,6 +481,10 @@ ErrorCode indy_vdr_resolve(PoolHandle pool_handle, ErrorCode indy_vdr_set_cache_directory(FfiStr path); +ErrorCode indy_vdr_set_ledger_txn_cache(size_t capacity, c_ulong expiry_offset_ms); + +ErrorCode indy_vdr_set_ledger_txn_fs_cache(size_t capacity, c_ulong expiry_offset_ms, FfiStr path); + ErrorCode indy_vdr_set_config(FfiStr config); ErrorCode indy_vdr_set_default_logger(void); diff --git a/libindy_vdr/src/common/error.rs b/libindy_vdr/src/common/error.rs index 98e3e6cf..b69769a9 100644 --- a/libindy_vdr/src/common/error.rs +++ b/libindy_vdr/src/common/error.rs @@ -130,6 +130,12 @@ impl From for VdrError { } } +impl From for VdrError { + fn from(err: sled::Error) -> VdrError { + VdrError::new(VdrErrorKind::FileSystem, None, Some(Box::new(err))) + } +} + impl From<(VdrErrorKind, M)> for VdrError where M: fmt::Display + Send + Sync + 'static, diff --git a/libindy_vdr/src/ffi/mod.rs b/libindy_vdr/src/ffi/mod.rs index 6ab2cc33..873d7b3c 100644 --- a/libindy_vdr/src/ffi/mod.rs +++ b/libindy_vdr/src/ffi/mod.rs @@ -15,11 +15,14 @@ mod resolver; use crate::common::error::prelude::*; use crate::config::{PoolConfig, LIB_VERSION}; +use crate::pool::cache::storage::{new_fs_ordered_store, OrderedHashMap}; +use crate::pool::cache::strategy::CacheStrategyTTL; +use crate::pool::cache::Cache; use crate::pool::{FilesystemCache, PoolTransactionsCache, ProtocolVersion}; use crate::utils::Validatable; use self::error::{set_last_error, ErrorCode}; -use self::pool::{POOL_CACHE, POOL_CONFIG}; +use self::pool::{LEDGER_TXN_CACHE, POOL_CACHE, POOL_CONFIG}; pub type CallbackId = i64; @@ -72,6 +75,31 @@ pub extern "C" fn indy_vdr_set_cache_directory(path: FfiStr) -> ErrorCode { } } +#[no_mangle] +pub extern "C" fn indy_vdr_set_ledger_txn_cache(capacity: usize, expire_offset: u64) -> ErrorCode { + catch_err! { + debug!("Setting pool ledger transactions cache: capacity={}, expire_offset={}", capacity, expire_offset); + let cache = Cache::new(CacheStrategyTTL::new(capacity, expire_offset.into(), None, None)); + *write_lock!(LEDGER_TXN_CACHE)? = Some(cache); + Ok(ErrorCode::Success) + } +} + +#[no_mangle] +pub extern "C" fn indy_vdr_set_ledger_txn_fs_cache( + capacity: usize, + expire_offset: u64, + path: FfiStr, +) -> ErrorCode { + catch_err! { + debug!("Setting pool ledger transactions cache: capacity={}, expire_offset={}", capacity, expire_offset); + let store = OrderedHashMap::new(new_fs_ordered_store(path.into())?); + let cache = Cache::new(CacheStrategyTTL::new(capacity, expire_offset.into(), Some(store), None)); + *write_lock!(LEDGER_TXN_CACHE)? = Some(cache); + Ok(ErrorCode::Success) + } +} + #[no_mangle] pub extern "C" fn indy_vdr_set_socks_proxy(socks_proxy: FfiStr) -> ErrorCode { catch_err! { diff --git a/libindy_vdr/src/ffi/pool.rs b/libindy_vdr/src/ffi/pool.rs index a10bb4fc..5d87a5e1 100644 --- a/libindy_vdr/src/ffi/pool.rs +++ b/libindy_vdr/src/ffi/pool.rs @@ -9,6 +9,7 @@ use once_cell::sync::Lazy; use crate::common::error::prelude::*; use crate::common::handle::ResourceHandle; use crate::config::PoolConfig; +use crate::pool::cache::Cache; use crate::pool::{ InMemoryCache, PoolBuilder, PoolRunner, PoolTransactions, PoolTransactionsCache, RequestMethod, RequestResult, RequestResultMeta, @@ -40,6 +41,9 @@ pub static POOLS: Lazy>> = pub static POOL_CACHE: Lazy>>> = Lazy::new(|| RwLock::new(Some(Arc::new(InMemoryCache::new())))); +pub static LEDGER_TXN_CACHE: Lazy>>> = + Lazy::new(|| RwLock::new(None)); + #[derive(Serialize, Deserialize, Debug, Clone)] struct PoolCreateParams { #[serde(skip_serializing_if = "Option::is_none")] @@ -76,7 +80,8 @@ pub extern "C" fn indy_vdr_pool_create(params: FfiStr, handle_p: *mut PoolHandle } } let config = read_lock!(POOL_CONFIG)?.clone(); - let runner = PoolBuilder::new(config, txns.clone()).node_weights(params.node_weights.clone()).refreshed(cached).into_runner()?; + let txn_cache = read_lock!(LEDGER_TXN_CACHE)?.clone(); + let runner = PoolBuilder::new(config, txns.clone(), txn_cache).node_weights(params.node_weights.clone()).refreshed(cached).into_runner()?; let handle = PoolHandle::next(); let mut pools = write_lock!(POOLS)?; pools.insert(handle, PoolInstance { runner, init_txns: txns, node_weights: params.node_weights }); @@ -102,7 +107,7 @@ fn handle_pool_refresh( cache.update(&init_txns, latest_txns)?; } if let Some(new_txns) = new_txns { - let runner = PoolBuilder::new(config, new_txns).node_weights(node_weights).refreshed(true).into_runner()?; + let runner = PoolBuilder::new(config, new_txns, None).node_weights(node_weights).refreshed(true).into_runner()?; let mut pools = write_lock!(POOLS)?; if let Entry::Occupied(mut entry) = pools.entry(pool_handle) { entry.get_mut().runner = runner; diff --git a/libindy_vdr/src/lib.rs b/libindy_vdr/src/lib.rs index 16c5c0c1..785d1a28 100644 --- a/libindy_vdr/src/lib.rs +++ b/libindy_vdr/src/lib.rs @@ -26,7 +26,7 @@ //! let txns = PoolTransactions::from_json_file("./genesis.txn").unwrap(); //! //! // Create a PoolBuilder instance -//! let pool_builder = PoolBuilder::new(PoolConfig::default(), txns); +//! let pool_builder = PoolBuilder::new(PoolConfig::default(), txns, None); //! // Convert into a thread-local Pool instance //! let pool = pool_builder.into_local().unwrap(); //! diff --git a/libindy_vdr/src/pool/builder.rs b/libindy_vdr/src/pool/builder.rs index c406f2d6..1f43275d 100644 --- a/libindy_vdr/src/pool/builder.rs +++ b/libindy_vdr/src/pool/builder.rs @@ -22,13 +22,17 @@ pub struct PoolBuilder { impl PoolBuilder { /// Create a new `PoolBuilder` instance. - pub fn new(config: PoolConfig, transactions: PoolTransactions) -> Self { + pub fn new( + config: PoolConfig, + transactions: PoolTransactions, + cache: Option>, + ) -> Self { Self { config, transactions, node_weights: None, refreshed: false, - cache: None, + cache, } } diff --git a/libindy_vdr/tests/utils/pool.rs b/libindy_vdr/tests/utils/pool.rs index 283bf874..8948ab68 100644 --- a/libindy_vdr/tests/utils/pool.rs +++ b/libindy_vdr/tests/utils/pool.rs @@ -42,7 +42,7 @@ impl TestPool { let pool_transactions = PoolTransactions::from_json_transactions(default_transactions()).unwrap(); - let pool = PoolBuilder::new(PoolConfig::default(), pool_transactions) + let pool = PoolBuilder::new(PoolConfig::default(), pool_transactions, None) .into_shared() .unwrap(); diff --git a/wrappers/javascript/indy-vdr-nodejs/src/NodeJSIndyVdr.ts b/wrappers/javascript/indy-vdr-nodejs/src/NodeJSIndyVdr.ts index c82e84f4..62c2a50d 100644 --- a/wrappers/javascript/indy-vdr-nodejs/src/NodeJSIndyVdr.ts +++ b/wrappers/javascript/indy-vdr-nodejs/src/NodeJSIndyVdr.ts @@ -141,6 +141,16 @@ export class NodeJSIndyVdr implements IndyVdr { this.handleError(this.nativeIndyVdr.indy_vdr_set_cache_directory(path)) } + public setLedgerTxnCache(options: { capacity: number; expiry_offset_ms: number }): void { + const { capacity, expiry_offset_ms } = serializeArguments(options) + this.handleError(this.nativeIndyVdr.indy_vdr_set_ledger_txn_cache(capacity, expiry_offset_ms)) + } + + public setLedgerTxnFsCache(options: { capacity: number; expiry_offset_ms: number; path: string }): void { + const { capacity, expiry_offset_ms, path } = serializeArguments(options) + this.handleError(this.nativeIndyVdr.indy_vdr_set_ledger_txn_fs_cache(capacity, expiry_offset_ms, path)) + } + public setDefaultLogger(): void { this.handleError(this.nativeIndyVdr.indy_vdr_set_default_logger()) } diff --git a/wrappers/javascript/indy-vdr-nodejs/src/library/NativeBindings.ts b/wrappers/javascript/indy-vdr-nodejs/src/library/NativeBindings.ts index 264602c8..ac222595 100644 --- a/wrappers/javascript/indy-vdr-nodejs/src/library/NativeBindings.ts +++ b/wrappers/javascript/indy-vdr-nodejs/src/library/NativeBindings.ts @@ -3,6 +3,8 @@ import type { ByteBuffer } from '../ffi' export interface NativeMethods { indy_vdr_set_config: (arg0: string) => number indy_vdr_set_cache_directory: (arg0: string) => number + indy_vdr_set_ledger_txn_cache: (arg0: number, arg1: number) => number + indy_vdr_set_ledger_txn_fs_cache: (arg0: number, arg1: number, arg2: string) => number indy_vdr_set_default_logger: () => number indy_vdr_set_protocol_version: (arg0: number) => number indy_vdr_set_socks_proxy: (arg0: string) => number diff --git a/wrappers/javascript/indy-vdr-react-native/cpp/HostObject.cpp b/wrappers/javascript/indy-vdr-react-native/cpp/HostObject.cpp index dca84796..3810d878 100644 --- a/wrappers/javascript/indy-vdr-react-native/cpp/HostObject.cpp +++ b/wrappers/javascript/indy-vdr-react-native/cpp/HostObject.cpp @@ -13,6 +13,8 @@ FunctionMap IndyVdrTurboModuleHostObject::functionMapping(jsi::Runtime &rt) { fMap.insert(std::make_tuple("getCurrentError", &indyVdr::getCurrentError)); fMap.insert(std::make_tuple("setConfig", &indyVdr::setConfig)); fMap.insert(std::make_tuple("setCacheDirectory", &indyVdr::setCacheDirectory)); + fMap.insert(std::make_tuple("setLedgerTxnCache", &indyVdr::setLedgerTxnCache)); + fMap.insert(std::make_tuple("setLedgerTxnFsCache", &indyVdr::setLedgerTxnFsCache)); fMap.insert(std::make_tuple("setDefaultLogger", &indyVdr::setDefaultLogger)); fMap.insert( std::make_tuple("setProtocolVersion", &indyVdr::setProtocolVersion)); diff --git a/wrappers/javascript/indy-vdr-react-native/cpp/include/libindy_vdr.h b/wrappers/javascript/indy-vdr-react-native/cpp/include/libindy_vdr.h index 7f61bb9f..a52e5b20 100644 --- a/wrappers/javascript/indy-vdr-react-native/cpp/include/libindy_vdr.h +++ b/wrappers/javascript/indy-vdr-react-native/cpp/include/libindy_vdr.h @@ -481,6 +481,10 @@ ErrorCode indy_vdr_resolve(PoolHandle pool_handle, ErrorCode indy_vdr_set_cache_directory(FfiStr path); +ErrorCode indy_vdr_set_ledger_txn_cache(size_t capacity, c_ulong expiry_offset_ms); + +ErrorCode indy_vdr_set_ledger_txn_fs_cache(size_t capacity, c_ulong expiry_offset_ms, FfiStr path); + ErrorCode indy_vdr_set_config(FfiStr config); ErrorCode indy_vdr_set_default_logger(void); diff --git a/wrappers/javascript/indy-vdr-react-native/cpp/indyVdr.cpp b/wrappers/javascript/indy-vdr-react-native/cpp/indyVdr.cpp index 06c1ce7d..02ebb322 100644 --- a/wrappers/javascript/indy-vdr-react-native/cpp/indyVdr.cpp +++ b/wrappers/javascript/indy-vdr-react-native/cpp/indyVdr.cpp @@ -32,6 +32,25 @@ jsi::Value setCacheDirectory(jsi::Runtime &rt, jsi::Object options) { return createReturnValue(rt, code, nullptr); }; +jsi::Value setLedgerTxnFsCache(jsi::Runtime &rt, jsi::Object options) { + auto capacity = jsiToValue(rt, options, "capacity"); + auto expiry_offset_ms = jsiToValue(rt, options, "expiry_offset_ms"); + auto path = jsiToValue(rt, options, "path"); + + ErrorCode code = indy_vdr_set_ledger_txn_cache(capacity, expiry_offset_ms, path.c_str()); + + return createReturnValue(rt, code, nullptr); +}; + +jsi::Value setLedgerTxnCache(jsi::Runtime &rt, jsi::Object options) { + auto capacity = jsiToValue(rt, options, "capacity"); + auto expiry_offset_ms = jsiToValue(rt, options, "expiry_offset_ms"); + + ErrorCode code = indy_vdr_set_ledger_txn_cache(capacity, expiry_offset_ms); + + return createReturnValue(rt, code, nullptr); +}; + jsi::Value setDefaultLogger(jsi::Runtime &rt, jsi::Object options) { ErrorCode code = indy_vdr_set_default_logger(); diff --git a/wrappers/javascript/indy-vdr-react-native/cpp/indyVdr.h b/wrappers/javascript/indy-vdr-react-native/cpp/indyVdr.h index fa48a925..4baa5426 100644 --- a/wrappers/javascript/indy-vdr-react-native/cpp/indyVdr.h +++ b/wrappers/javascript/indy-vdr-react-native/cpp/indyVdr.h @@ -13,6 +13,8 @@ jsi::Value version(jsi::Runtime &rt, jsi::Object options); jsi::Value getCurrentError(jsi::Runtime &rt, jsi::Object options); jsi::Value setConfig(jsi::Runtime &rt, jsi::Object options); jsi::Value setCacheDirectory(jsi::Runtime &rt, jsi::Object options); +jsi::Value setLedgerTxnFsCache(jsi::Runtime &rt, jsi::Object options); +jsi::Value setLedgerTxnCache(jsi::Runtime &rt, jsi::Object options); jsi::Value setDefaultLogger(jsi::Runtime &rt, jsi::Object options); jsi::Value setProtocolVersion(jsi::Runtime &rt, jsi::Object options); jsi::Value setSocksProxy(jsi::Runtime &rt, jsi::Object options); diff --git a/wrappers/javascript/indy-vdr-react-native/src/NativeBindings.ts b/wrappers/javascript/indy-vdr-react-native/src/NativeBindings.ts index 7a7f095e..f87fadd9 100644 --- a/wrappers/javascript/indy-vdr-react-native/src/NativeBindings.ts +++ b/wrappers/javascript/indy-vdr-react-native/src/NativeBindings.ts @@ -13,6 +13,10 @@ export interface NativeBindings { setCacheDirectory(options: { path: string }): ReturnObject + setLedgerTxnCache(options: { capacity: number; expiry_offset_ms: number }): ReturnObject + + setLedgerTxnFsCache(options: { capacity: number; expiry_offset_ms: number; path: string }): ReturnObject + setDefaultLogger(options: Record): ReturnObject setProtocolVersion(options: { version: number }): ReturnObject diff --git a/wrappers/javascript/indy-vdr-react-native/src/ReactNativeIndyVdr.ts b/wrappers/javascript/indy-vdr-react-native/src/ReactNativeIndyVdr.ts index e89f3f3f..ec03bc17 100644 --- a/wrappers/javascript/indy-vdr-react-native/src/ReactNativeIndyVdr.ts +++ b/wrappers/javascript/indy-vdr-react-native/src/ReactNativeIndyVdr.ts @@ -110,6 +110,14 @@ export class ReactNativeIndyVdr implements IndyVdr { const serializedOptions = serializeArguments(options) this.indyVdr.setCacheDirectory(serializedOptions) } + public setLedgerTxnCache(options: { capacity: number; expiry_offset_ms: number }): void { + const serializedOptions = serializeArguments(options) + this.indyVdr.setLedgerTxnCache(serializedOptions) + } + public setLedgerTxnFsCache(options: { capacity: number; expiry_offset_ms: number; path: string }): void { + const serializedOptions = serializeArguments(options) + this.indyVdr.setLedgerTxnFsCache(serializedOptions) + } public setDefaultLogger(): void { this.handleError(this.indyVdr.setDefaultLogger({})) diff --git a/wrappers/javascript/indy-vdr-shared/src/types/IndyVdr.ts b/wrappers/javascript/indy-vdr-shared/src/types/IndyVdr.ts index bee59f8a..1c71e4d5 100644 --- a/wrappers/javascript/indy-vdr-shared/src/types/IndyVdr.ts +++ b/wrappers/javascript/indy-vdr-shared/src/types/IndyVdr.ts @@ -49,6 +49,10 @@ export interface IndyVdr { setCacheDirectory(options: { path: string }): void + setLedgerTxnCache(options: { capacity: number; expiry_offset_ms: number }): void + + setLedgerTxnFsCache(options: { capacity: number; expiry_offset_ms: number; path: string }): void + setDefaultLogger(): void setProtocolVersion(options: { version: number }): void diff --git a/wrappers/python/indy_vdr/__init__.py b/wrappers/python/indy_vdr/__init__.py index 8e41f778..a62e2252 100644 --- a/wrappers/python/indy_vdr/__init__.py +++ b/wrappers/python/indy_vdr/__init__.py @@ -1,6 +1,6 @@ """indy-vdr Python wrapper library""" -from .bindings import set_cache_directory, set_config, set_protocol_version, version +from .bindings import set_cache_directory, set_ledger_txn_fs_cache, set_ledger_txn_cache, set_config, set_protocol_version, version from .error import VdrError, VdrErrorCode from .ledger import LedgerType from .pool import Pool, open_pool @@ -10,6 +10,8 @@ __all__ = [ "open_pool", "set_cache_directory", + "set_ledger_txn_fs_cache", + "set_ledger_txn_cache", "set_config", "set_protocol_version", "set_socks_proxy", diff --git a/wrappers/python/indy_vdr/bindings.py b/wrappers/python/indy_vdr/bindings.py index 488a8ea9..c4faa74e 100644 --- a/wrappers/python/indy_vdr/bindings.py +++ b/wrappers/python/indy_vdr/bindings.py @@ -425,6 +425,14 @@ def set_cache_directory(path: str): """Set the library configuration.""" do_call("indy_vdr_set_cache_directory", encode_str(path)) +def set_ledger_txn_cache(capacity: int, expiry_offset_ms: int): + """Set the library configuration.""" + do_call("indy_vdr_set_ledger_txn_cache", c_size_t(capacity), c_ulong(expiry_offset_ms)) + +def set_ledger_txn_fs_cache(capacity: int, expiry_offset_ms: int, path: str): + """Set the library configuration.""" + do_call("indy_vdr_set_ledger_txn_fs_cache", c_size_t(capacity), c_ulong(expiry_offset_ms), encode_str(path)) + def set_config(config: dict): """Set the library configuration.""" From 0291d7b8ad800affdb831f85ec8c6def9246a58e Mon Sep 17 00:00:00 2001 From: wadeking98 Date: Thu, 25 Jan 2024 16:35:48 -0800 Subject: [PATCH 22/32] cleaned up pool creation with cache Signed-off-by: wadeking98 --- indy-vdr-proxy/src/main.rs | 6 +++--- libindy_vdr/src/ffi/pool.rs | 5 +++-- libindy_vdr/src/lib.rs | 2 +- libindy_vdr/src/pool/builder.rs | 15 ++++++--------- libindy_vdr/tests/utils/pool.rs | 2 +- 5 files changed, 14 insertions(+), 16 deletions(-) diff --git a/indy-vdr-proxy/src/main.rs b/indy-vdr-proxy/src/main.rs index 62ecf3cf..7f428fd5 100644 --- a/indy-vdr-proxy/src/main.rs +++ b/indy-vdr-proxy/src/main.rs @@ -263,8 +263,8 @@ async fn create_pool( ) -> VdrResult { let pool_states = &state.borrow().pool_states; let pool_state = pool_states.get(namespace).unwrap(); - let pool = PoolBuilder::new(PoolConfig::default(), pool_state.transactions.clone(), None) - .into_local()?; + let pool = + PoolBuilder::new(PoolConfig::default(), pool_state.transactions.clone()).into_local()?; let refresh_pool = if refresh { refresh_pool(state.clone(), &pool, 0).await? } else { @@ -317,7 +317,7 @@ async fn refresh_pool( let (txns, _meta) = perform_refresh(pool).await?; if let Some(txns) = txns { - let pool = PoolBuilder::new(PoolConfig::default(), txns, None) + let pool = PoolBuilder::new(PoolConfig::default(), txns) .refreshed(true) .into_local()?; Ok(Some(pool)) diff --git a/libindy_vdr/src/ffi/pool.rs b/libindy_vdr/src/ffi/pool.rs index 5d87a5e1..19f0aaa2 100644 --- a/libindy_vdr/src/ffi/pool.rs +++ b/libindy_vdr/src/ffi/pool.rs @@ -81,7 +81,7 @@ pub extern "C" fn indy_vdr_pool_create(params: FfiStr, handle_p: *mut PoolHandle } let config = read_lock!(POOL_CONFIG)?.clone(); let txn_cache = read_lock!(LEDGER_TXN_CACHE)?.clone(); - let runner = PoolBuilder::new(config, txns.clone(), txn_cache).node_weights(params.node_weights.clone()).refreshed(cached).into_runner()?; + let runner = PoolBuilder::new(config, txns.clone()).node_weights(params.node_weights.clone()).refreshed(cached).into_runner(txn_cache)?; let handle = PoolHandle::next(); let mut pools = write_lock!(POOLS)?; pools.insert(handle, PoolInstance { runner, init_txns: txns, node_weights: params.node_weights }); @@ -107,7 +107,8 @@ fn handle_pool_refresh( cache.update(&init_txns, latest_txns)?; } if let Some(new_txns) = new_txns { - let runner = PoolBuilder::new(config, new_txns, None).node_weights(node_weights).refreshed(true).into_runner()?; + let txn_cache = read_lock!(LEDGER_TXN_CACHE)?.clone(); + let runner = PoolBuilder::new(config, new_txns).node_weights(node_weights).refreshed(true).into_runner(txn_cache)?; let mut pools = write_lock!(POOLS)?; if let Entry::Occupied(mut entry) = pools.entry(pool_handle) { entry.get_mut().runner = runner; diff --git a/libindy_vdr/src/lib.rs b/libindy_vdr/src/lib.rs index 785d1a28..16c5c0c1 100644 --- a/libindy_vdr/src/lib.rs +++ b/libindy_vdr/src/lib.rs @@ -26,7 +26,7 @@ //! let txns = PoolTransactions::from_json_file("./genesis.txn").unwrap(); //! //! // Create a PoolBuilder instance -//! let pool_builder = PoolBuilder::new(PoolConfig::default(), txns, None); +//! let pool_builder = PoolBuilder::new(PoolConfig::default(), txns); //! // Convert into a thread-local Pool instance //! let pool = pool_builder.into_local().unwrap(); //! diff --git a/libindy_vdr/src/pool/builder.rs b/libindy_vdr/src/pool/builder.rs index 1f43275d..c6a6a2f7 100644 --- a/libindy_vdr/src/pool/builder.rs +++ b/libindy_vdr/src/pool/builder.rs @@ -17,22 +17,16 @@ pub struct PoolBuilder { transactions: PoolTransactions, node_weights: Option>, refreshed: bool, - cache: Option>, } impl PoolBuilder { /// Create a new `PoolBuilder` instance. - pub fn new( - config: PoolConfig, - transactions: PoolTransactions, - cache: Option>, - ) -> Self { + pub fn new(config: PoolConfig, transactions: PoolTransactions) -> Self { Self { config, transactions, node_weights: None, refreshed: false, - cache, } } @@ -75,7 +69,10 @@ impl PoolBuilder { /// Create a `PoolRunner` instance from the builder, to handle pool interaction /// in a dedicated thread. - pub fn into_runner(self) -> VdrResult { + pub fn into_runner( + self, + cache: Option>, + ) -> VdrResult { let merkle_tree = self.transactions.merkle_tree()?; Ok(PoolRunner::new( self.config, @@ -83,7 +80,7 @@ impl PoolBuilder { MakeLocal(ZMQNetworkerFactory {}), self.node_weights, self.refreshed, - self.cache, + cache, )) } } diff --git a/libindy_vdr/tests/utils/pool.rs b/libindy_vdr/tests/utils/pool.rs index 8948ab68..283bf874 100644 --- a/libindy_vdr/tests/utils/pool.rs +++ b/libindy_vdr/tests/utils/pool.rs @@ -42,7 +42,7 @@ impl TestPool { let pool_transactions = PoolTransactions::from_json_transactions(default_transactions()).unwrap(); - let pool = PoolBuilder::new(PoolConfig::default(), pool_transactions, None) + let pool = PoolBuilder::new(PoolConfig::default(), pool_transactions) .into_shared() .unwrap(); From 5c6f58c46e465aafdb6af234da0062049ff67f31 Mon Sep 17 00:00:00 2001 From: wadeking98 Date: Fri, 26 Jan 2024 12:57:50 -0800 Subject: [PATCH 23/32] merged ttl and lru cache strategies Signed-off-by: wadeking98 --- libindy_vdr/src/pool/cache/mod.rs | 22 +- libindy_vdr/src/pool/cache/strategy.rs | 435 ++++++------------------- 2 files changed, 107 insertions(+), 350 deletions(-) diff --git a/libindy_vdr/src/pool/cache/mod.rs b/libindy_vdr/src/pool/cache/mod.rs index f6cd8de8..76174e7b 100644 --- a/libindy_vdr/src/pool/cache/mod.rs +++ b/libindy_vdr/src/pool/cache/mod.rs @@ -2,8 +2,6 @@ use async_lock::RwLock; use async_trait::async_trait; use std::sync::Arc; -use self::strategy::CacheStrategyConfig; - pub mod storage; pub mod strategy; @@ -13,12 +11,7 @@ pub trait CacheStrategy: Send + Sync + 'static { async fn remove(&mut self, key: &K) -> Option; - async fn insert( - &mut self, - key: K, - value: V, - config: Option>, - ) -> Option; + async fn insert(&mut self, key: K, value: V, custom_exp_offset: Option) -> Option; } pub struct Cache { @@ -37,13 +30,12 @@ impl Cache { pub async fn remove(&self, key: &K) -> Option { self.storage.write().await.remove(key).await } - pub async fn insert( - &self, - key: K, - value: V, - config: Option>, - ) -> Option { - self.storage.write().await.insert(key, value, config).await + pub async fn insert(&self, key: K, value: V, custom_exp_offset: Option) -> Option { + self.storage + .write() + .await + .insert(key, value, custom_exp_offset) + .await } } diff --git a/libindy_vdr/src/pool/cache/strategy.rs b/libindy_vdr/src/pool/cache/strategy.rs index 972c7e0b..2f273a8e 100644 --- a/libindy_vdr/src/pool/cache/strategy.rs +++ b/libindy_vdr/src/pool/cache/strategy.rs @@ -4,14 +4,20 @@ use async_lock::Mutex; use async_trait::async_trait; use std::{collections::BTreeMap, fmt::Debug, hash::Hash, sync::Arc, time::SystemTime}; -pub enum CacheStrategyConfig { - LRU(u128), - TTL(u128), +/// A simple struct to hold a value and the expiry offset +/// needed because items can be inserted with custom ttl values +/// that may need to be updated/reordered +#[derive(Clone, Serialize, Deserialize)] +pub struct TTLCacheItem { + value: V, + expire_offset: u128, } + /// A simple cache that uses timestamps to expire entries. Once the cache fills up, the oldest entry is evicted. +/// Also uses LRU to evict entries that have not been accessed recently. /// Uses a hashmap for lookups and a BTreeMap for ordering by age pub struct CacheStrategyTTL { - store: OrderedHashMap, + store: Arc>>>, capacity: usize, create_time: SystemTime, expire_after: u128, @@ -26,14 +32,14 @@ impl>, + store_type: Option>>, create_time: Option, ) -> Self { Self { - store: match store_type { + store: Arc::new(Mutex::new(match store_type { Some(store) => store, None => OrderedHashMap::new(BTreeMap::new()), - }, + })), capacity, create_time: match create_time { Some(time) => time, @@ -49,144 +55,69 @@ impl for CacheStrategyTTL { async fn get(&self, key: &K) -> Option { - match self.store.get(key) { + let mut store_lock = self.store.lock().await; + let current_time = SystemTime::now() + .duration_since(self.create_time) + .unwrap() + .as_millis(); + let get_res = match store_lock.get(key) { Some((ts, v)) => { - let current_time = SystemTime::now() - .duration_since(self.create_time) - .unwrap() - .as_millis(); if current_time < *ts { - Some(v.clone()) + Some((*ts, v.clone())) } else { + store_lock.remove(key); None } } None => None, + }; + // update the timestamp if the entry is still valid + if let Some((_, ref v)) = get_res { + store_lock.re_order(key, current_time + v.expire_offset); } + get_res.map(|(_, v)| v.value) } async fn remove(&mut self, key: &K) -> Option { - self.store.remove(key).map(|(_, v)| v) + self.store.lock().await.remove(key).map(|(_, v)| v.value) } - async fn insert( - &mut self, - key: K, - value: V, - config: Option>, - ) -> Option { + async fn insert(&mut self, key: K, value: V, custom_exp_offset: Option) -> Option { + let mut store_lock = self.store.lock().await; let current_ts = SystemTime::now() .duration_since(self.create_time) .unwrap() .as_millis(); // remove expired entries - while self.store.len() > 0 - && self - .store + while store_lock.len() > 0 + && store_lock .get_first_key_value() .map(|(_, ts, _)| ts.clone() < current_ts) .unwrap_or(false) { - self.store.remove_first(); + store_lock.remove_first(); } // remove the oldest item if the cache is still full - if self.store.len() >= self.capacity && self.store.get(&key).is_none() { + if store_lock.len() >= self.capacity && store_lock.get(&key).is_none() { // remove the oldest item - let removal_key = self.store.get_first_key_value().map(|(k, _, _)| k.clone()); + let removal_key = store_lock.get_first_key_value().map(|(k, _, _)| k.clone()); if let Some(removal_key) = removal_key { - self.store.remove(&removal_key); + store_lock.remove(&removal_key); } }; - let exp_offset = match config { - Some(opt_vec) => opt_vec - .iter() - .find_map(|opt| match opt { - CacheStrategyConfig::TTL(ttl) => Some(ttl.clone()), - _ => None, - }) - .unwrap_or(self.expire_after), - _ => self.expire_after, - }; - self.store.insert(key, value, current_ts + exp_offset) - } -} - -/// A simple LRU cache. Once the cache fills up, the least recently used entry is evicted. -/// Uses a hashmap for lookups and a BTreeMap for ordering by least recently used -pub struct CacheStrategyLRU { - // The store is wrapped in an arc and a mutex so that get() can be immutable - store: Arc>>, - capacity: usize, -} - -impl - CacheStrategyLRU -{ - pub fn new(capacity: usize, store_type: Option>) -> Self { - Self { - store: Arc::new(Mutex::new(match store_type { - Some(store) => store, - None => OrderedHashMap::new(BTreeMap::new()), - })), - capacity, - } - } -} -#[async_trait] -impl - CacheStrategy for CacheStrategyLRU -{ - async fn get(&self, key: &K) -> Option { - // move the key to the end of the LRU index - // this is O(log(n)) - let mut store_lock = self.store.lock().await; - let highest_lru = store_lock - .get_last_key_value() - .map(|(_, ts, _)| ts + 1) - .unwrap_or(0); - store_lock.re_order(key, highest_lru); - store_lock.get(key).map(|(_, v)| v.clone()) - } - async fn remove(&mut self, key: &K) -> Option { - let mut store_lock = self.store.lock().await; - store_lock.remove(key).map(|(_, v)| v) - } - async fn insert( - &mut self, - key: K, - value: V, - config: Option>, - ) -> Option { - // this will be O(log(n)) in all cases when cache is at capacity since we need to fetch the first and last element from the btree - let mut store_lock = self.store.lock().await; - let highest_lru = store_lock - .get_last_key_value() - .map(|(_, ts, _)| ts + 1) - .unwrap_or(0); - - let insert_lru = match config { - Some(opt_vec) => opt_vec - .iter() - .find_map(|opt| match opt { - CacheStrategyConfig::LRU(lru) => Some(lru.clone()), - _ => None, - }) - .unwrap_or(highest_lru), - _ => highest_lru, - }; - - if store_lock.len() >= self.capacity && store_lock.get(&key).is_none() { - // remove the LRU item - let lru_key = store_lock - .get_first_key_value() - .map(|(k, _, _)| k.clone()) - .unwrap(); - store_lock.remove(&lru_key); - }; - - store_lock.insert(key, value, insert_lru) + let exp_offset = custom_exp_offset.unwrap_or(self.expire_after); + store_lock + .insert( + key, + TTLCacheItem { + value: value, + expire_offset: exp_offset, + }, + current_ts + exp_offset, + ) + .map(|v| v.value) } } @@ -199,235 +130,69 @@ mod tests { use crate::pool::cache::{storage::OrderedHashMap, Cache}; use futures_executor::block_on; - #[rstest] - fn test_cache_lru() { - let cache = Cache::new(CacheStrategyLRU::new(2, None)); - block_on(async { - cache - .insert("key".to_string(), "value".to_string(), None) - .await; - assert_eq!( - cache.get(&"key".to_string()).await, - Some("value".to_string()) - ); - cache - .insert("key1".to_string(), "value1".to_string(), None) - .await; - cache - .insert("key2".to_string(), "value2".to_string(), None) - .await; - assert_eq!(cache.get(&"key".to_string()).await, None); - cache - .insert("key3".to_string(), "value3".to_string(), None) - .await; - cache - .insert("key3".to_string(), "value3".to_string(), None) - .await; - cache.get(&"key2".to_string()).await; // move key2 to the end of the LRU index - cache - .insert("key4".to_string(), "value4".to_string(), None) - .await; - // key3 should be evicted - assert_eq!( - cache.remove(&"key2".to_string()).await, - Some("value2".to_string()) - ); - assert_eq!(cache.remove(&"key3".to_string()).await, None); - // test lru config - cache - .insert( - "key5".to_string(), - "value5".to_string(), - Some(vec![CacheStrategyConfig::LRU(1)]), - ) - .await; - cache - .insert("key6".to_string(), "value6".to_string(), None) - .await; - assert_eq!(cache.get(&"key5".to_string()).await, None); - }); - } - - #[rstest] - fn test_fs_cache_lru() { - let cache_location = "test_fs_cache_lru"; - let tree = sled::open(cache_location) - .unwrap() - .open_tree(cache_location) - .unwrap(); - let storage: OrderedHashMap = OrderedHashMap::new(tree); - let cache = Cache::new(CacheStrategyLRU::new(2, Some(storage))); - block_on(async { - cache - .insert("key".to_string(), "value".to_string(), None) - .await; - assert_eq!( - cache.get(&"key".to_string()).await, - Some("value".to_string()) - ); - cache - .insert("key1".to_string(), "value1".to_string(), None) - .await; - cache - .insert("key2".to_string(), "value2".to_string(), None) - .await; - assert_eq!(cache.get(&"key".to_string()).await, None); - cache - .insert("key3".to_string(), "value3".to_string(), None) - .await; - cache - .insert("key3".to_string(), "value3".to_string(), None) - .await; - cache.get(&"key2".to_string()).await; // move key2 to the end of the LRU index - cache - .insert("key4".to_string(), "value4".to_string(), None) - .await; - // key3 should be evicted - assert_eq!( - cache.remove(&"key2".to_string()).await, - Some("value2".to_string()) - ); - assert_eq!(cache.remove(&"key3".to_string()).await, None); - // test LRU config - cache - .insert( - "key5".to_string(), - "value5".to_string(), - Some(vec![CacheStrategyConfig::LRU(1)]), - ) - .await; - cache - .insert("key6".to_string(), "value6".to_string(), None) - .await; - - // cleanup - std::fs::remove_dir_all(cache_location).unwrap(); - }); - } - #[rstest] fn test_cache_ttl() { let cache = Cache::new(CacheStrategyTTL::new(2, 5, None, None)); - block_on(async { - cache - .insert("key".to_string(), "value".to_string(), None) - .await; - thread::sleep(std::time::Duration::from_millis(1)); - assert_eq!( - cache.get(&"key".to_string()).await, - Some("value".to_string()) - ); - cache - .insert("key1".to_string(), "value1".to_string(), None) - .await; - thread::sleep(std::time::Duration::from_millis(1)); - cache - .insert("key2".to_string(), "value2".to_string(), None) - .await; - assert_eq!(cache.get(&"key".to_string()).await, None); - thread::sleep(std::time::Duration::from_millis(1)); - cache - .insert("key3".to_string(), "value3".to_string(), None) - .await; - cache.get(&"key2".to_string()).await; - cache - .insert("key4".to_string(), "value4".to_string(), None) - .await; - // key2 should be evicted - assert_eq!(cache.remove(&"key2".to_string()).await, None); - assert_eq!( - cache.remove(&"key3".to_string()).await, - Some("value3".to_string()) - ); - cache - .insert("key5".to_string(), "value5".to_string(), None) - .await; - thread::sleep(std::time::Duration::from_millis(6)); - assert_eq!(cache.get(&"key5".to_string()).await, None); - // test ttl config - cache - .insert( - "key6".to_string(), - "value6".to_string(), - Some(vec![CacheStrategyConfig::TTL(1)]), - ) - .await; - cache - .insert("key7".to_string(), "value7".to_string(), None) - .await; - thread::sleep(std::time::Duration::from_millis(1)); - assert_eq!(cache.get(&"key6".to_string()).await, None); - assert_eq!( - cache.get(&"key7".to_string()).await, - Some("value7".to_string()) - ); - }); - } - - #[rstest] - fn test_fs_cache_ttl() { let cache_location = "test_fs_cache_ttl"; let tree = sled::open(cache_location) .unwrap() .open_tree(cache_location) .unwrap(); - let storage: OrderedHashMap = OrderedHashMap::new(tree); - let cache = Cache::new(CacheStrategyTTL::new(2, 5, Some(storage), None)); + let storage: OrderedHashMap> = OrderedHashMap::new(tree); + let fs_cache = Cache::new(CacheStrategyTTL::new(2, 5, Some(storage), None)); + let caches = vec![cache, fs_cache]; block_on(async { - cache - .insert("key".to_string(), "value".to_string(), None) - .await; - thread::sleep(std::time::Duration::from_millis(1)); - assert_eq!( - cache.get(&"key".to_string()).await, - Some("value".to_string()) - ); - cache - .insert("key1".to_string(), "value1".to_string(), None) - .await; - thread::sleep(std::time::Duration::from_millis(1)); - cache - .insert("key2".to_string(), "value2".to_string(), None) - .await; - assert_eq!(cache.get(&"key".to_string()).await, None); - thread::sleep(std::time::Duration::from_millis(1)); - cache - .insert("key3".to_string(), "value3".to_string(), None) - .await; - cache.get(&"key2".to_string()).await; - cache - .insert("key4".to_string(), "value4".to_string(), None) - .await; - // key2 should be evicted - assert_eq!(cache.remove(&"key2".to_string()).await, None); - assert_eq!( - cache.remove(&"key3".to_string()).await, - Some("value3".to_string()) - ); - cache - .insert("key5".to_string(), "value5".to_string(), None) - .await; - thread::sleep(std::time::Duration::from_millis(6)); - assert_eq!(cache.get(&"key5".to_string()).await, None); - // test ttl config - cache - .insert( - "key6".to_string(), - "value6".to_string(), - Some(vec![CacheStrategyConfig::TTL(1)]), - ) - .await; - cache - .insert("key7".to_string(), "value7".to_string(), None) - .await; - thread::sleep(std::time::Duration::from_millis(1)); - assert_eq!(cache.get(&"key6".to_string()).await, None); - assert_eq!( - cache.get(&"key7".to_string()).await, - Some("value7".to_string()) - ); - - // cleanup + for cache in caches { + cache + .insert("key".to_string(), "value".to_string(), None) + .await; + thread::sleep(std::time::Duration::from_millis(1)); + assert_eq!( + cache.get(&"key".to_string()).await, + Some("value".to_string()) + ); + cache + .insert("key1".to_string(), "value1".to_string(), None) + .await; + thread::sleep(std::time::Duration::from_millis(1)); + cache + .insert("key2".to_string(), "value2".to_string(), None) + .await; + assert_eq!(cache.get(&"key".to_string()).await, None); + thread::sleep(std::time::Duration::from_millis(1)); + cache + .insert("key3".to_string(), "value3".to_string(), None) + .await; + cache.get(&"key2".to_string()).await; + cache + .insert("key4".to_string(), "value4".to_string(), None) + .await; + // key2 should not be evicted because of LRU + assert_eq!( + cache.remove(&"key2".to_string()).await, + Some("value2".to_string()) + ); + // key3 should be evicted because it was bumped to back after key2 was accessed + assert_eq!(cache.get(&"key3".to_string()).await, None); + cache + .insert("key5".to_string(), "value5".to_string(), None) + .await; + thread::sleep(std::time::Duration::from_millis(6)); + assert_eq!(cache.get(&"key5".to_string()).await, None); + // test ttl config + cache + .insert("key6".to_string(), "value6".to_string(), Some(1)) + .await; + cache + .insert("key7".to_string(), "value7".to_string(), None) + .await; + thread::sleep(std::time::Duration::from_millis(1)); + assert_eq!(cache.get(&"key6".to_string()).await, None); + assert_eq!( + cache.get(&"key7".to_string()).await, + Some("value7".to_string()) + ); + } std::fs::remove_dir_all(cache_location).unwrap(); }); } From 20bb31e00d97ff3754d152a8f4ada8eba4a33af3 Mon Sep 17 00:00:00 2001 From: wadeking98 Date: Fri, 26 Jan 2024 15:05:52 -0800 Subject: [PATCH 24/32] removed extraneous sleeps Signed-off-by: wadeking98 --- libindy_vdr/src/pool/cache/strategy.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/libindy_vdr/src/pool/cache/strategy.rs b/libindy_vdr/src/pool/cache/strategy.rs index 2f273a8e..0101139f 100644 --- a/libindy_vdr/src/pool/cache/strategy.rs +++ b/libindy_vdr/src/pool/cache/strategy.rs @@ -146,7 +146,6 @@ mod tests { cache .insert("key".to_string(), "value".to_string(), None) .await; - thread::sleep(std::time::Duration::from_millis(1)); assert_eq!( cache.get(&"key".to_string()).await, Some("value".to_string()) @@ -154,12 +153,10 @@ mod tests { cache .insert("key1".to_string(), "value1".to_string(), None) .await; - thread::sleep(std::time::Duration::from_millis(1)); cache .insert("key2".to_string(), "value2".to_string(), None) .await; assert_eq!(cache.get(&"key".to_string()).await, None); - thread::sleep(std::time::Duration::from_millis(1)); cache .insert("key3".to_string(), "value3".to_string(), None) .await; @@ -186,6 +183,7 @@ mod tests { cache .insert("key7".to_string(), "value7".to_string(), None) .await; + // wait until value6 expires thread::sleep(std::time::Duration::from_millis(1)); assert_eq!(cache.get(&"key6".to_string()).await, None); assert_eq!( From 96b2db643304794d4d20e2b69f092c1652e0ec92 Mon Sep 17 00:00:00 2001 From: wadeking98 Date: Mon, 29 Jan 2024 12:29:03 -0800 Subject: [PATCH 25/32] created per-ledger cache for FFIs Signed-off-by: wadeking98 --- libindy_vdr/src/ffi/mod.rs | 12 ++----- libindy_vdr/src/ffi/pool.rs | 65 ++++++++++++++++++++++++++++++++++--- 2 files changed, 64 insertions(+), 13 deletions(-) diff --git a/libindy_vdr/src/ffi/mod.rs b/libindy_vdr/src/ffi/mod.rs index 873d7b3c..db1b4831 100644 --- a/libindy_vdr/src/ffi/mod.rs +++ b/libindy_vdr/src/ffi/mod.rs @@ -15,14 +15,11 @@ mod resolver; use crate::common::error::prelude::*; use crate::config::{PoolConfig, LIB_VERSION}; -use crate::pool::cache::storage::{new_fs_ordered_store, OrderedHashMap}; -use crate::pool::cache::strategy::CacheStrategyTTL; -use crate::pool::cache::Cache; use crate::pool::{FilesystemCache, PoolTransactionsCache, ProtocolVersion}; use crate::utils::Validatable; use self::error::{set_last_error, ErrorCode}; -use self::pool::{LEDGER_TXN_CACHE, POOL_CACHE, POOL_CONFIG}; +use self::pool::{LegerCacheConfig, LEDGER_TXN_CACHE_CONFIG, POOL_CACHE, POOL_CONFIG}; pub type CallbackId = i64; @@ -79,8 +76,7 @@ pub extern "C" fn indy_vdr_set_cache_directory(path: FfiStr) -> ErrorCode { pub extern "C" fn indy_vdr_set_ledger_txn_cache(capacity: usize, expire_offset: u64) -> ErrorCode { catch_err! { debug!("Setting pool ledger transactions cache: capacity={}, expire_offset={}", capacity, expire_offset); - let cache = Cache::new(CacheStrategyTTL::new(capacity, expire_offset.into(), None, None)); - *write_lock!(LEDGER_TXN_CACHE)? = Some(cache); + *write_lock!(LEDGER_TXN_CACHE_CONFIG)? = Some(LegerCacheConfig::new(capacity, expire_offset.into(), None)); Ok(ErrorCode::Success) } } @@ -93,9 +89,7 @@ pub extern "C" fn indy_vdr_set_ledger_txn_fs_cache( ) -> ErrorCode { catch_err! { debug!("Setting pool ledger transactions cache: capacity={}, expire_offset={}", capacity, expire_offset); - let store = OrderedHashMap::new(new_fs_ordered_store(path.into())?); - let cache = Cache::new(CacheStrategyTTL::new(capacity, expire_offset.into(), Some(store), None)); - *write_lock!(LEDGER_TXN_CACHE)? = Some(cache); + *write_lock!(LEDGER_TXN_CACHE_CONFIG)? = Some(LegerCacheConfig::new(capacity, expire_offset.into(), Some(path.into_string()))); Ok(ErrorCode::Success) } } diff --git a/libindy_vdr/src/ffi/pool.rs b/libindy_vdr/src/ffi/pool.rs index 19f0aaa2..7fd0a3d4 100644 --- a/libindy_vdr/src/ffi/pool.rs +++ b/libindy_vdr/src/ffi/pool.rs @@ -1,7 +1,8 @@ use std::collections::{btree_map::Entry, BTreeMap, HashMap}; use std::os::raw::c_char; +use std::path::Path; use std::sync::{Arc, RwLock}; -use std::thread; +use std::{fs, thread}; use ffi_support::{rust_string_to_c, FfiStr}; use once_cell::sync::Lazy; @@ -9,6 +10,8 @@ use once_cell::sync::Lazy; use crate::common::error::prelude::*; use crate::common::handle::ResourceHandle; use crate::config::PoolConfig; +use crate::pool::cache::storage::{new_fs_ordered_store, OrderedHashMap}; +use crate::pool::cache::strategy::CacheStrategyTTL; use crate::pool::cache::Cache; use crate::pool::{ InMemoryCache, PoolBuilder, PoolRunner, PoolTransactions, PoolTransactionsCache, RequestMethod, @@ -31,6 +34,50 @@ pub struct PoolInstance { pub node_weights: Option, } +#[derive(Clone)] +pub struct LegerCacheConfig { + pub cache_size: usize, + pub cache_ttl: u128, + pub path: Option, +} + +impl LegerCacheConfig { + pub fn new(cache_size: usize, cache_ttl: u128, path: Option) -> Self { + Self { + cache_size, + cache_ttl, + path, + } + } + pub fn create_cache( + &self, + id: Option, + ) -> VdrResult> { + if let Some(path) = &self.path { + let full_path = Path::new(path).join(id.unwrap_or("default".to_string())); + let full_path_string = full_path + .into_os_string() + .into_string() + .unwrap_or(path.to_string()); + fs::create_dir_all(full_path_string.clone())?; + let store = OrderedHashMap::new(new_fs_ordered_store(full_path_string)?); + Ok(Cache::new(CacheStrategyTTL::new( + self.cache_size, + self.cache_ttl, + Some(store), + None, + ))) + } else { + Ok(Cache::new(CacheStrategyTTL::new( + self.cache_size, + self.cache_ttl, + None, + None, + ))) + } + } +} + pub type NodeWeights = HashMap; pub static POOL_CONFIG: Lazy> = Lazy::new(|| RwLock::new(PoolConfig::default())); @@ -41,7 +88,7 @@ pub static POOLS: Lazy>> = pub static POOL_CACHE: Lazy>>> = Lazy::new(|| RwLock::new(Some(Arc::new(InMemoryCache::new())))); -pub static LEDGER_TXN_CACHE: Lazy>>> = +pub static LEDGER_TXN_CACHE_CONFIG: Lazy>> = Lazy::new(|| RwLock::new(None)); #[derive(Serialize, Deserialize, Debug, Clone)] @@ -72,6 +119,12 @@ pub extern "C" fn indy_vdr_pool_create(params: FfiStr, handle_p: *mut PoolHandle "Invalid pool create parameters: must provide transactions or transactions_path" )); }; + let txn_cache_config = read_lock!(LEDGER_TXN_CACHE_CONFIG)?.clone(); + let txn_cache = if let Some(config) = txn_cache_config { + config.create_cache(txns.root_hash_base58().ok()).ok() + } else { + None + }; let mut cached = false; if let Some(cache) = read_lock!(POOL_CACHE)?.as_ref() { if let Some(newer_txns) = cache.resolve_latest(&txns)? { @@ -80,7 +133,6 @@ pub extern "C" fn indy_vdr_pool_create(params: FfiStr, handle_p: *mut PoolHandle } } let config = read_lock!(POOL_CONFIG)?.clone(); - let txn_cache = read_lock!(LEDGER_TXN_CACHE)?.clone(); let runner = PoolBuilder::new(config, txns.clone()).node_weights(params.node_weights.clone()).refreshed(cached).into_runner(txn_cache)?; let handle = PoolHandle::next(); let mut pools = write_lock!(POOLS)?; @@ -107,7 +159,12 @@ fn handle_pool_refresh( cache.update(&init_txns, latest_txns)?; } if let Some(new_txns) = new_txns { - let txn_cache = read_lock!(LEDGER_TXN_CACHE)?.clone(); + let txn_cache_config = read_lock!(LEDGER_TXN_CACHE_CONFIG)?.clone(); + let txn_cache = if let Some(config) = txn_cache_config { + config.create_cache(init_txns.root_hash_base58().ok()).ok() + } else { + None + }; let runner = PoolBuilder::new(config, new_txns).node_weights(node_weights).refreshed(true).into_runner(txn_cache)?; let mut pools = write_lock!(POOLS)?; if let Entry::Occupied(mut entry) = pools.entry(pool_handle) { From 59244e8b850329921e12f7c65764f7e01b892a6b Mon Sep 17 00:00:00 2001 From: wadeking98 Date: Thu, 1 Feb 2024 16:22:54 -0800 Subject: [PATCH 26/32] change to global cache with unique key Signed-off-by: wadeking98 --- indy-vdr-proxy/src/main.rs | 2 +- libindy_vdr/src/ffi/mod.rs | 11 ++-- libindy_vdr/src/ffi/pool.rs | 73 ++++---------------------- libindy_vdr/src/pool/cache/mod.rs | 28 +++++++--- libindy_vdr/src/pool/cache/strategy.rs | 19 ++++++- 5 files changed, 56 insertions(+), 77 deletions(-) diff --git a/indy-vdr-proxy/src/main.rs b/indy-vdr-proxy/src/main.rs index 7f428fd5..39df80e3 100644 --- a/indy-vdr-proxy/src/main.rs +++ b/indy-vdr-proxy/src/main.rs @@ -445,7 +445,7 @@ where storage_type, None, ); - let cache = Cache::new(strategy); + let cache = Cache::new(strategy, None); Some(cache) } else { None diff --git a/libindy_vdr/src/ffi/mod.rs b/libindy_vdr/src/ffi/mod.rs index db1b4831..e4f29a8f 100644 --- a/libindy_vdr/src/ffi/mod.rs +++ b/libindy_vdr/src/ffi/mod.rs @@ -15,11 +15,15 @@ mod resolver; use crate::common::error::prelude::*; use crate::config::{PoolConfig, LIB_VERSION}; +use crate::pool::cache::{ + storage::{new_fs_ordered_store, OrderedHashMap}, + strategy::CacheStrategyTTL, +}; use crate::pool::{FilesystemCache, PoolTransactionsCache, ProtocolVersion}; use crate::utils::Validatable; use self::error::{set_last_error, ErrorCode}; -use self::pool::{LegerCacheConfig, LEDGER_TXN_CACHE_CONFIG, POOL_CACHE, POOL_CONFIG}; +use self::pool::{LEDGER_CACHE_STRATEGY, POOL_CACHE, POOL_CONFIG}; pub type CallbackId = i64; @@ -76,7 +80,7 @@ pub extern "C" fn indy_vdr_set_cache_directory(path: FfiStr) -> ErrorCode { pub extern "C" fn indy_vdr_set_ledger_txn_cache(capacity: usize, expire_offset: u64) -> ErrorCode { catch_err! { debug!("Setting pool ledger transactions cache: capacity={}, expire_offset={}", capacity, expire_offset); - *write_lock!(LEDGER_TXN_CACHE_CONFIG)? = Some(LegerCacheConfig::new(capacity, expire_offset.into(), None)); + *write_lock!(LEDGER_CACHE_STRATEGY)? = Some(Arc::new(CacheStrategyTTL::new(capacity, expire_offset.into(), None, None))); Ok(ErrorCode::Success) } } @@ -89,7 +93,8 @@ pub extern "C" fn indy_vdr_set_ledger_txn_fs_cache( ) -> ErrorCode { catch_err! { debug!("Setting pool ledger transactions cache: capacity={}, expire_offset={}", capacity, expire_offset); - *write_lock!(LEDGER_TXN_CACHE_CONFIG)? = Some(LegerCacheConfig::new(capacity, expire_offset.into(), Some(path.into_string()))); + let store = OrderedHashMap::new(new_fs_ordered_store(path.into())?); + *write_lock!(LEDGER_CACHE_STRATEGY)? = Some(Arc::new(CacheStrategyTTL::new(capacity, expire_offset.into(), Some(store), None))); Ok(ErrorCode::Success) } } diff --git a/libindy_vdr/src/ffi/pool.rs b/libindy_vdr/src/ffi/pool.rs index 7fd0a3d4..74602c8d 100644 --- a/libindy_vdr/src/ffi/pool.rs +++ b/libindy_vdr/src/ffi/pool.rs @@ -1,8 +1,7 @@ use std::collections::{btree_map::Entry, BTreeMap, HashMap}; use std::os::raw::c_char; -use std::path::Path; use std::sync::{Arc, RwLock}; -use std::{fs, thread}; +use std::thread; use ffi_support::{rust_string_to_c, FfiStr}; use once_cell::sync::Lazy; @@ -10,9 +9,7 @@ use once_cell::sync::Lazy; use crate::common::error::prelude::*; use crate::common::handle::ResourceHandle; use crate::config::PoolConfig; -use crate::pool::cache::storage::{new_fs_ordered_store, OrderedHashMap}; -use crate::pool::cache::strategy::CacheStrategyTTL; -use crate::pool::cache::Cache; +use crate::pool::cache::{Cache, CacheStrategy}; use crate::pool::{ InMemoryCache, PoolBuilder, PoolRunner, PoolTransactions, PoolTransactionsCache, RequestMethod, RequestResult, RequestResultMeta, @@ -34,50 +31,6 @@ pub struct PoolInstance { pub node_weights: Option, } -#[derive(Clone)] -pub struct LegerCacheConfig { - pub cache_size: usize, - pub cache_ttl: u128, - pub path: Option, -} - -impl LegerCacheConfig { - pub fn new(cache_size: usize, cache_ttl: u128, path: Option) -> Self { - Self { - cache_size, - cache_ttl, - path, - } - } - pub fn create_cache( - &self, - id: Option, - ) -> VdrResult> { - if let Some(path) = &self.path { - let full_path = Path::new(path).join(id.unwrap_or("default".to_string())); - let full_path_string = full_path - .into_os_string() - .into_string() - .unwrap_or(path.to_string()); - fs::create_dir_all(full_path_string.clone())?; - let store = OrderedHashMap::new(new_fs_ordered_store(full_path_string)?); - Ok(Cache::new(CacheStrategyTTL::new( - self.cache_size, - self.cache_ttl, - Some(store), - None, - ))) - } else { - Ok(Cache::new(CacheStrategyTTL::new( - self.cache_size, - self.cache_ttl, - None, - None, - ))) - } - } -} - pub type NodeWeights = HashMap; pub static POOL_CONFIG: Lazy> = Lazy::new(|| RwLock::new(PoolConfig::default())); @@ -88,8 +41,9 @@ pub static POOLS: Lazy>> = pub static POOL_CACHE: Lazy>>> = Lazy::new(|| RwLock::new(Some(Arc::new(InMemoryCache::new())))); -pub static LEDGER_TXN_CACHE_CONFIG: Lazy>> = - Lazy::new(|| RwLock::new(None)); +pub static LEDGER_CACHE_STRATEGY: Lazy< + RwLock>>>, +> = Lazy::new(|| RwLock::new(None)); #[derive(Serialize, Deserialize, Debug, Clone)] struct PoolCreateParams { @@ -119,12 +73,9 @@ pub extern "C" fn indy_vdr_pool_create(params: FfiStr, handle_p: *mut PoolHandle "Invalid pool create parameters: must provide transactions or transactions_path" )); }; - let txn_cache_config = read_lock!(LEDGER_TXN_CACHE_CONFIG)?.clone(); - let txn_cache = if let Some(config) = txn_cache_config { - config.create_cache(txns.root_hash_base58().ok()).ok() - } else { - None - }; + // set this cache with unique key prefix + let txn_cache = read_lock!(LEDGER_CACHE_STRATEGY)?.as_ref().map(|s| Cache::new(s.clone(), txns.root_hash_base58().ok())); + let mut cached = false; if let Some(cache) = read_lock!(POOL_CACHE)?.as_ref() { if let Some(newer_txns) = cache.resolve_latest(&txns)? { @@ -159,12 +110,8 @@ fn handle_pool_refresh( cache.update(&init_txns, latest_txns)?; } if let Some(new_txns) = new_txns { - let txn_cache_config = read_lock!(LEDGER_TXN_CACHE_CONFIG)?.clone(); - let txn_cache = if let Some(config) = txn_cache_config { - config.create_cache(init_txns.root_hash_base58().ok()).ok() - } else { - None - }; + // set this cache with unique key prefix + let txn_cache = read_lock!(LEDGER_CACHE_STRATEGY)?.as_ref().map(|s| Cache::new(s.clone(), init_txns.root_hash_base58().ok())); let runner = PoolBuilder::new(config, new_txns).node_weights(node_weights).refreshed(true).into_runner(txn_cache)?; let mut pools = write_lock!(POOLS)?; if let Entry::Occupied(mut entry) = pools.entry(pool_handle) { diff --git a/libindy_vdr/src/pool/cache/mod.rs b/libindy_vdr/src/pool/cache/mod.rs index 76174e7b..708800ed 100644 --- a/libindy_vdr/src/pool/cache/mod.rs +++ b/libindy_vdr/src/pool/cache/mod.rs @@ -1,6 +1,6 @@ use async_lock::RwLock; use async_trait::async_trait; -use std::sync::Arc; +use std::{fmt::Display, sync::Arc}; pub mod storage; pub mod strategy; @@ -15,35 +15,47 @@ pub trait CacheStrategy: Send + Sync + 'static { } pub struct Cache { - storage: Arc>>, + storage: Arc>>, + key_prefix: Option, } -impl Cache { - pub fn new(storage: impl CacheStrategy) -> Self { +impl Cache { + fn full_key(&self, key: &K) -> String { + match &self.key_prefix { + Some(prefix) => format!("{}{}", prefix, key), + None => key.to_string(), + } + } + pub fn new(storage: impl CacheStrategy, key_prefix: Option) -> Self { Self { storage: Arc::new(RwLock::new(storage)), + key_prefix, } } pub async fn get(&self, key: &K) -> Option { - self.storage.read().await.get(key).await + let full_key = self.full_key(key); + self.storage.read().await.get(&full_key).await } pub async fn remove(&self, key: &K) -> Option { - self.storage.write().await.remove(key).await + let full_key = self.full_key(key); + self.storage.write().await.remove(&full_key).await } pub async fn insert(&self, key: K, value: V, custom_exp_offset: Option) -> Option { + let full_key = self.full_key(&key); self.storage .write() .await - .insert(key, value, custom_exp_offset) + .insert(full_key, value, custom_exp_offset) .await } } // need to implement Clone manually because Mutex doesn't implement Clone -impl Clone for Cache { +impl Clone for Cache { fn clone(&self) -> Self { Self { storage: self.storage.clone(), + key_prefix: self.key_prefix.clone(), } } } diff --git a/libindy_vdr/src/pool/cache/strategy.rs b/libindy_vdr/src/pool/cache/strategy.rs index 0101139f..1f6a2fca 100644 --- a/libindy_vdr/src/pool/cache/strategy.rs +++ b/libindy_vdr/src/pool/cache/strategy.rs @@ -50,6 +50,21 @@ impl CacheStrategy + for Arc> +{ + async fn get(&self, key: &K) -> Option { + self.get(key).await + } + async fn remove(&mut self, key: &K) -> Option { + self.remove(key).await + } + async fn insert(&mut self, key: K, value: V, custom_exp_offset: Option) -> Option { + self.insert(key, value, custom_exp_offset).await + } +} + #[async_trait] impl CacheStrategy for CacheStrategyTTL @@ -132,14 +147,14 @@ mod tests { #[rstest] fn test_cache_ttl() { - let cache = Cache::new(CacheStrategyTTL::new(2, 5, None, None)); + let cache = Cache::new(CacheStrategyTTL::new(2, 5, None, None), None); let cache_location = "test_fs_cache_ttl"; let tree = sled::open(cache_location) .unwrap() .open_tree(cache_location) .unwrap(); let storage: OrderedHashMap> = OrderedHashMap::new(tree); - let fs_cache = Cache::new(CacheStrategyTTL::new(2, 5, Some(storage), None)); + let fs_cache = Cache::new(CacheStrategyTTL::new(2, 5, Some(storage), None), None); let caches = vec![cache, fs_cache]; block_on(async { for cache in caches { From 9c3877a31cb2982f49ef184e215dceaa6d91c04e Mon Sep 17 00:00:00 2001 From: wadeking98 Date: Fri, 2 Feb 2024 10:26:04 -0800 Subject: [PATCH 27/32] updated ffi cache methods Signed-off-by: wadeking98 --- libindy_vdr/include/libindy_vdr.h | 4 +--- libindy_vdr/src/ffi/mod.rs | 21 ++++++++----------- libindy_vdr/src/pool/cache/mod.rs | 4 ++++ .../indy-vdr-nodejs/src/NodeJSIndyVdr.ts | 9 ++------ .../src/library/NativeBindings.ts | 3 +-- .../indy-vdr-react-native/cpp/HostObject.cpp | 1 - .../cpp/include/libindy_vdr.h | 4 +--- .../indy-vdr-react-native/cpp/indyVdr.cpp | 11 +--------- .../indy-vdr-react-native/cpp/indyVdr.h | 1 - .../src/NativeBindings.ts | 4 +--- .../src/ReactNativeIndyVdr.ts | 6 +----- .../indy-vdr-shared/src/types/IndyVdr.ts | 4 +--- wrappers/python/indy_vdr/__init__.py | 3 +-- wrappers/python/indy_vdr/bindings.py | 9 ++------ 14 files changed, 25 insertions(+), 59 deletions(-) diff --git a/libindy_vdr/include/libindy_vdr.h b/libindy_vdr/include/libindy_vdr.h index a52e5b20..2e18e21d 100644 --- a/libindy_vdr/include/libindy_vdr.h +++ b/libindy_vdr/include/libindy_vdr.h @@ -481,9 +481,7 @@ ErrorCode indy_vdr_resolve(PoolHandle pool_handle, ErrorCode indy_vdr_set_cache_directory(FfiStr path); -ErrorCode indy_vdr_set_ledger_txn_cache(size_t capacity, c_ulong expiry_offset_ms); - -ErrorCode indy_vdr_set_ledger_txn_fs_cache(size_t capacity, c_ulong expiry_offset_ms, FfiStr path); +ErrorCode indy_vdr_set_ledger_txn_cache(size_t capacity, c_ulong expiry_offset_ms, FfiStr path); ErrorCode indy_vdr_set_config(FfiStr config); diff --git a/libindy_vdr/src/ffi/mod.rs b/libindy_vdr/src/ffi/mod.rs index e4f29a8f..2f7ede46 100644 --- a/libindy_vdr/src/ffi/mod.rs +++ b/libindy_vdr/src/ffi/mod.rs @@ -15,6 +15,7 @@ mod resolver; use crate::common::error::prelude::*; use crate::config::{PoolConfig, LIB_VERSION}; +use crate::pool::cache::storage::new_mem_ordered_store; use crate::pool::cache::{ storage::{new_fs_ordered_store, OrderedHashMap}, strategy::CacheStrategyTTL, @@ -77,23 +78,19 @@ pub extern "C" fn indy_vdr_set_cache_directory(path: FfiStr) -> ErrorCode { } #[no_mangle] -pub extern "C" fn indy_vdr_set_ledger_txn_cache(capacity: usize, expire_offset: u64) -> ErrorCode { - catch_err! { - debug!("Setting pool ledger transactions cache: capacity={}, expire_offset={}", capacity, expire_offset); - *write_lock!(LEDGER_CACHE_STRATEGY)? = Some(Arc::new(CacheStrategyTTL::new(capacity, expire_offset.into(), None, None))); - Ok(ErrorCode::Success) - } -} - -#[no_mangle] -pub extern "C" fn indy_vdr_set_ledger_txn_fs_cache( +pub extern "C" fn indy_vdr_set_ledger_txn_cache( capacity: usize, expire_offset: u64, - path: FfiStr, + path_opt: FfiStr, ) -> ErrorCode { catch_err! { debug!("Setting pool ledger transactions cache: capacity={}, expire_offset={}", capacity, expire_offset); - let store = OrderedHashMap::new(new_fs_ordered_store(path.into())?); + let store = match path_opt.as_opt_str() { + Some("") => OrderedHashMap::new(new_mem_ordered_store()), + Some(path) => OrderedHashMap::new(new_fs_ordered_store(path.into())?), + None => OrderedHashMap::new(new_mem_ordered_store()), + }; + *write_lock!(LEDGER_CACHE_STRATEGY)? = Some(Arc::new(CacheStrategyTTL::new(capacity, expire_offset.into(), Some(store), None))); Ok(ErrorCode::Success) } diff --git a/libindy_vdr/src/pool/cache/mod.rs b/libindy_vdr/src/pool/cache/mod.rs index 708800ed..ed2b9a62 100644 --- a/libindy_vdr/src/pool/cache/mod.rs +++ b/libindy_vdr/src/pool/cache/mod.rs @@ -26,20 +26,24 @@ impl Cache { None => key.to_string(), } } + pub fn new(storage: impl CacheStrategy, key_prefix: Option) -> Self { Self { storage: Arc::new(RwLock::new(storage)), key_prefix, } } + pub async fn get(&self, key: &K) -> Option { let full_key = self.full_key(key); self.storage.read().await.get(&full_key).await } + pub async fn remove(&self, key: &K) -> Option { let full_key = self.full_key(key); self.storage.write().await.remove(&full_key).await } + pub async fn insert(&self, key: K, value: V, custom_exp_offset: Option) -> Option { let full_key = self.full_key(&key); self.storage diff --git a/wrappers/javascript/indy-vdr-nodejs/src/NodeJSIndyVdr.ts b/wrappers/javascript/indy-vdr-nodejs/src/NodeJSIndyVdr.ts index 62c2a50d..8c392cb0 100644 --- a/wrappers/javascript/indy-vdr-nodejs/src/NodeJSIndyVdr.ts +++ b/wrappers/javascript/indy-vdr-nodejs/src/NodeJSIndyVdr.ts @@ -141,14 +141,9 @@ export class NodeJSIndyVdr implements IndyVdr { this.handleError(this.nativeIndyVdr.indy_vdr_set_cache_directory(path)) } - public setLedgerTxnCache(options: { capacity: number; expiry_offset_ms: number }): void { - const { capacity, expiry_offset_ms } = serializeArguments(options) - this.handleError(this.nativeIndyVdr.indy_vdr_set_ledger_txn_cache(capacity, expiry_offset_ms)) - } - - public setLedgerTxnFsCache(options: { capacity: number; expiry_offset_ms: number; path: string }): void { + public setLedgerTxnCache(options: { capacity: number; expiry_offset_ms: number; path: string }): void { const { capacity, expiry_offset_ms, path } = serializeArguments(options) - this.handleError(this.nativeIndyVdr.indy_vdr_set_ledger_txn_fs_cache(capacity, expiry_offset_ms, path)) + this.handleError(this.nativeIndyVdr.indy_vdr_set_ledger_txn_cache(capacity, expiry_offset_ms, path)) } public setDefaultLogger(): void { diff --git a/wrappers/javascript/indy-vdr-nodejs/src/library/NativeBindings.ts b/wrappers/javascript/indy-vdr-nodejs/src/library/NativeBindings.ts index ac222595..7d6869eb 100644 --- a/wrappers/javascript/indy-vdr-nodejs/src/library/NativeBindings.ts +++ b/wrappers/javascript/indy-vdr-nodejs/src/library/NativeBindings.ts @@ -3,8 +3,7 @@ import type { ByteBuffer } from '../ffi' export interface NativeMethods { indy_vdr_set_config: (arg0: string) => number indy_vdr_set_cache_directory: (arg0: string) => number - indy_vdr_set_ledger_txn_cache: (arg0: number, arg1: number) => number - indy_vdr_set_ledger_txn_fs_cache: (arg0: number, arg1: number, arg2: string) => number + indy_vdr_set_ledger_txn_cache: (arg0: number, arg1: number, arg2: string) => number indy_vdr_set_default_logger: () => number indy_vdr_set_protocol_version: (arg0: number) => number indy_vdr_set_socks_proxy: (arg0: string) => number diff --git a/wrappers/javascript/indy-vdr-react-native/cpp/HostObject.cpp b/wrappers/javascript/indy-vdr-react-native/cpp/HostObject.cpp index 3810d878..c5c5e0a8 100644 --- a/wrappers/javascript/indy-vdr-react-native/cpp/HostObject.cpp +++ b/wrappers/javascript/indy-vdr-react-native/cpp/HostObject.cpp @@ -14,7 +14,6 @@ FunctionMap IndyVdrTurboModuleHostObject::functionMapping(jsi::Runtime &rt) { fMap.insert(std::make_tuple("setConfig", &indyVdr::setConfig)); fMap.insert(std::make_tuple("setCacheDirectory", &indyVdr::setCacheDirectory)); fMap.insert(std::make_tuple("setLedgerTxnCache", &indyVdr::setLedgerTxnCache)); - fMap.insert(std::make_tuple("setLedgerTxnFsCache", &indyVdr::setLedgerTxnFsCache)); fMap.insert(std::make_tuple("setDefaultLogger", &indyVdr::setDefaultLogger)); fMap.insert( std::make_tuple("setProtocolVersion", &indyVdr::setProtocolVersion)); diff --git a/wrappers/javascript/indy-vdr-react-native/cpp/include/libindy_vdr.h b/wrappers/javascript/indy-vdr-react-native/cpp/include/libindy_vdr.h index a52e5b20..2e18e21d 100644 --- a/wrappers/javascript/indy-vdr-react-native/cpp/include/libindy_vdr.h +++ b/wrappers/javascript/indy-vdr-react-native/cpp/include/libindy_vdr.h @@ -481,9 +481,7 @@ ErrorCode indy_vdr_resolve(PoolHandle pool_handle, ErrorCode indy_vdr_set_cache_directory(FfiStr path); -ErrorCode indy_vdr_set_ledger_txn_cache(size_t capacity, c_ulong expiry_offset_ms); - -ErrorCode indy_vdr_set_ledger_txn_fs_cache(size_t capacity, c_ulong expiry_offset_ms, FfiStr path); +ErrorCode indy_vdr_set_ledger_txn_cache(size_t capacity, c_ulong expiry_offset_ms, FfiStr path); ErrorCode indy_vdr_set_config(FfiStr config); diff --git a/wrappers/javascript/indy-vdr-react-native/cpp/indyVdr.cpp b/wrappers/javascript/indy-vdr-react-native/cpp/indyVdr.cpp index 02ebb322..b0471367 100644 --- a/wrappers/javascript/indy-vdr-react-native/cpp/indyVdr.cpp +++ b/wrappers/javascript/indy-vdr-react-native/cpp/indyVdr.cpp @@ -32,7 +32,7 @@ jsi::Value setCacheDirectory(jsi::Runtime &rt, jsi::Object options) { return createReturnValue(rt, code, nullptr); }; -jsi::Value setLedgerTxnFsCache(jsi::Runtime &rt, jsi::Object options) { +jsi::Value setLedgerTxnCache(jsi::Runtime &rt, jsi::Object options) { auto capacity = jsiToValue(rt, options, "capacity"); auto expiry_offset_ms = jsiToValue(rt, options, "expiry_offset_ms"); auto path = jsiToValue(rt, options, "path"); @@ -42,15 +42,6 @@ jsi::Value setLedgerTxnFsCache(jsi::Runtime &rt, jsi::Object options) { return createReturnValue(rt, code, nullptr); }; -jsi::Value setLedgerTxnCache(jsi::Runtime &rt, jsi::Object options) { - auto capacity = jsiToValue(rt, options, "capacity"); - auto expiry_offset_ms = jsiToValue(rt, options, "expiry_offset_ms"); - - ErrorCode code = indy_vdr_set_ledger_txn_cache(capacity, expiry_offset_ms); - - return createReturnValue(rt, code, nullptr); -}; - jsi::Value setDefaultLogger(jsi::Runtime &rt, jsi::Object options) { ErrorCode code = indy_vdr_set_default_logger(); diff --git a/wrappers/javascript/indy-vdr-react-native/cpp/indyVdr.h b/wrappers/javascript/indy-vdr-react-native/cpp/indyVdr.h index 4baa5426..c708f3d4 100644 --- a/wrappers/javascript/indy-vdr-react-native/cpp/indyVdr.h +++ b/wrappers/javascript/indy-vdr-react-native/cpp/indyVdr.h @@ -13,7 +13,6 @@ jsi::Value version(jsi::Runtime &rt, jsi::Object options); jsi::Value getCurrentError(jsi::Runtime &rt, jsi::Object options); jsi::Value setConfig(jsi::Runtime &rt, jsi::Object options); jsi::Value setCacheDirectory(jsi::Runtime &rt, jsi::Object options); -jsi::Value setLedgerTxnFsCache(jsi::Runtime &rt, jsi::Object options); jsi::Value setLedgerTxnCache(jsi::Runtime &rt, jsi::Object options); jsi::Value setDefaultLogger(jsi::Runtime &rt, jsi::Object options); jsi::Value setProtocolVersion(jsi::Runtime &rt, jsi::Object options); diff --git a/wrappers/javascript/indy-vdr-react-native/src/NativeBindings.ts b/wrappers/javascript/indy-vdr-react-native/src/NativeBindings.ts index f87fadd9..2dacdc11 100644 --- a/wrappers/javascript/indy-vdr-react-native/src/NativeBindings.ts +++ b/wrappers/javascript/indy-vdr-react-native/src/NativeBindings.ts @@ -13,9 +13,7 @@ export interface NativeBindings { setCacheDirectory(options: { path: string }): ReturnObject - setLedgerTxnCache(options: { capacity: number; expiry_offset_ms: number }): ReturnObject - - setLedgerTxnFsCache(options: { capacity: number; expiry_offset_ms: number; path: string }): ReturnObject + setLedgerTxnCache(options: { capacity: number; expiry_offset_ms: number, path: string }): ReturnObject setDefaultLogger(options: Record): ReturnObject diff --git a/wrappers/javascript/indy-vdr-react-native/src/ReactNativeIndyVdr.ts b/wrappers/javascript/indy-vdr-react-native/src/ReactNativeIndyVdr.ts index ec03bc17..7f726e95 100644 --- a/wrappers/javascript/indy-vdr-react-native/src/ReactNativeIndyVdr.ts +++ b/wrappers/javascript/indy-vdr-react-native/src/ReactNativeIndyVdr.ts @@ -110,14 +110,10 @@ export class ReactNativeIndyVdr implements IndyVdr { const serializedOptions = serializeArguments(options) this.indyVdr.setCacheDirectory(serializedOptions) } - public setLedgerTxnCache(options: { capacity: number; expiry_offset_ms: number }): void { + public setLedgerTxnCache(options: { capacity: number; expiry_offset_ms: number, path: string }): void { const serializedOptions = serializeArguments(options) this.indyVdr.setLedgerTxnCache(serializedOptions) } - public setLedgerTxnFsCache(options: { capacity: number; expiry_offset_ms: number; path: string }): void { - const serializedOptions = serializeArguments(options) - this.indyVdr.setLedgerTxnFsCache(serializedOptions) - } public setDefaultLogger(): void { this.handleError(this.indyVdr.setDefaultLogger({})) diff --git a/wrappers/javascript/indy-vdr-shared/src/types/IndyVdr.ts b/wrappers/javascript/indy-vdr-shared/src/types/IndyVdr.ts index 1c71e4d5..dc3d3fb0 100644 --- a/wrappers/javascript/indy-vdr-shared/src/types/IndyVdr.ts +++ b/wrappers/javascript/indy-vdr-shared/src/types/IndyVdr.ts @@ -49,9 +49,7 @@ export interface IndyVdr { setCacheDirectory(options: { path: string }): void - setLedgerTxnCache(options: { capacity: number; expiry_offset_ms: number }): void - - setLedgerTxnFsCache(options: { capacity: number; expiry_offset_ms: number; path: string }): void + setLedgerTxnCache(options: { capacity: number; expiry_offset_ms: number, path: string }): void setDefaultLogger(): void diff --git a/wrappers/python/indy_vdr/__init__.py b/wrappers/python/indy_vdr/__init__.py index a62e2252..1902b12f 100644 --- a/wrappers/python/indy_vdr/__init__.py +++ b/wrappers/python/indy_vdr/__init__.py @@ -1,6 +1,6 @@ """indy-vdr Python wrapper library""" -from .bindings import set_cache_directory, set_ledger_txn_fs_cache, set_ledger_txn_cache, set_config, set_protocol_version, version +from .bindings import set_cache_directory, set_ledger_txn_cache, set_config, set_protocol_version, version from .error import VdrError, VdrErrorCode from .ledger import LedgerType from .pool import Pool, open_pool @@ -10,7 +10,6 @@ __all__ = [ "open_pool", "set_cache_directory", - "set_ledger_txn_fs_cache", "set_ledger_txn_cache", "set_config", "set_protocol_version", diff --git a/wrappers/python/indy_vdr/bindings.py b/wrappers/python/indy_vdr/bindings.py index c4faa74e..63cb5133 100644 --- a/wrappers/python/indy_vdr/bindings.py +++ b/wrappers/python/indy_vdr/bindings.py @@ -425,14 +425,9 @@ def set_cache_directory(path: str): """Set the library configuration.""" do_call("indy_vdr_set_cache_directory", encode_str(path)) -def set_ledger_txn_cache(capacity: int, expiry_offset_ms: int): +def set_ledger_txn_cache(capacity: int, expiry_offset_ms: int, path: str): """Set the library configuration.""" - do_call("indy_vdr_set_ledger_txn_cache", c_size_t(capacity), c_ulong(expiry_offset_ms)) - -def set_ledger_txn_fs_cache(capacity: int, expiry_offset_ms: int, path: str): - """Set the library configuration.""" - do_call("indy_vdr_set_ledger_txn_fs_cache", c_size_t(capacity), c_ulong(expiry_offset_ms), encode_str(path)) - + do_call("indy_vdr_set_ledger_txn_cache", c_size_t(capacity), c_ulong(expiry_offset_ms), encode_str(path)) def set_config(config: dict): """Set the library configuration.""" From df072ba7d90b3addd72c4fc93306ec9153307522 Mon Sep 17 00:00:00 2001 From: wadeking98 Date: Fri, 2 Feb 2024 10:37:05 -0800 Subject: [PATCH 28/32] cleaned up ordered hash map Signed-off-by: wadeking98 --- libindy_vdr/src/pool/cache/storage.rs | 40 ++++++++++--------- .../src/NativeBindings.ts | 2 +- .../src/ReactNativeIndyVdr.ts | 2 +- .../indy-vdr-shared/src/types/IndyVdr.ts | 2 +- 4 files changed, 25 insertions(+), 21 deletions(-) diff --git a/libindy_vdr/src/pool/cache/storage.rs b/libindy_vdr/src/pool/cache/storage.rs index d1766aa3..aea7d9ad 100644 --- a/libindy_vdr/src/pool/cache/storage.rs +++ b/libindy_vdr/src/pool/cache/storage.rs @@ -102,12 +102,10 @@ impl OrderedStore for /// A hashmap that also maintains a BTreeMap of keys ordered by a given value /// This is useful for structures that need fast O(1) lookups, but also need to evict the oldest or least recently used entries /// The Ordered Store must contain both the keys and values for persistence -pub struct OrderedHashMap( - ( - HashMap, - Box> + Send + Sync>, - ), -); +pub struct OrderedHashMap { + lookup: HashMap, + ordered_lookup: Box> + Send + Sync>, +} impl OrderedHashMap @@ -121,17 +119,20 @@ impl keyed_data.insert(k.clone(), (order.clone(), v.clone())); }) }); - Self((keyed_data, ordered_data)) + Self { + lookup: keyed_data, + ordered_lookup: ordered_data, + } } } impl OrderedHashMap { pub fn len(&self) -> usize { - let (lookup, _) = &self.0; + let lookup = &self.lookup; lookup.len() } pub fn get(&self, key: &K) -> Option<&(O, V)> { - let (lookup, _) = &self.0; + let lookup = &self.lookup; lookup.get(key) } fn get_key_value( @@ -140,7 +141,8 @@ impl OrderedHashMap { &Box> + Send + Sync>, ) -> Option<(O, Vec)>, ) -> Option<(K, O, V)> { - let (lookup, ordered_lookup) = &self.0; + let lookup = &self.lookup; + let ordered_lookup = &self.ordered_lookup; selector(ordered_lookup).and_then(|(_, keys)| { keys.first().and_then(|key| { lookup @@ -173,40 +175,42 @@ impl OrderedHashMap { } /// inserts a new entry with the given key and value and order pub fn insert(&mut self, key: K, value: V, order: O) -> Option { - let (lookup, order_lookup) = &mut self.0; + let lookup = &mut self.lookup; + let ordered_lookup = &mut self.ordered_lookup; if let Some((old_order, _)) = lookup.get(&key) { // if entry already exists, remove it from the btree - if let Some(mut keys) = order_lookup.remove(old_order) { + if let Some(mut keys) = ordered_lookup.remove(old_order) { keys.retain(|k| k.0 != key); // insert modified keys back into btree if !keys.is_empty() { - order_lookup.insert(old_order.clone(), keys); + ordered_lookup.insert(old_order.clone(), keys); } } } - let keys = match order_lookup.remove(&order) { + let keys = match ordered_lookup.remove(&order) { Some(mut ks) => { ks.push((key.clone(), value.clone())); ks } None => vec![(key.clone(), value.clone())], }; - order_lookup.insert(order.clone(), keys); + ordered_lookup.insert(order.clone(), keys); lookup .insert(key, (order, value)) .and_then(|(_, v)| Some(v)) } /// removes the entry with the given key pub fn remove(&mut self, key: &K) -> Option<(O, V)> { - let (lookup, order_lookup) = &mut self.0; + let lookup = &mut self.lookup; + let ordered_lookup = &mut self.ordered_lookup; lookup.remove(key).and_then(|(order, v)| { - match order_lookup.remove(&order) { + match ordered_lookup.remove(&order) { Some(mut keys) => { keys.retain(|k| k.0 != *key); // insert remaining keys back in if !keys.is_empty() { - order_lookup.insert(order.clone(), keys); + ordered_lookup.insert(order.clone(), keys); } } None => {} diff --git a/wrappers/javascript/indy-vdr-react-native/src/NativeBindings.ts b/wrappers/javascript/indy-vdr-react-native/src/NativeBindings.ts index 2dacdc11..bd746d85 100644 --- a/wrappers/javascript/indy-vdr-react-native/src/NativeBindings.ts +++ b/wrappers/javascript/indy-vdr-react-native/src/NativeBindings.ts @@ -13,7 +13,7 @@ export interface NativeBindings { setCacheDirectory(options: { path: string }): ReturnObject - setLedgerTxnCache(options: { capacity: number; expiry_offset_ms: number, path: string }): ReturnObject + setLedgerTxnCache(options: { capacity: number; expiry_offset_ms: number; path: string }): ReturnObject setDefaultLogger(options: Record): ReturnObject diff --git a/wrappers/javascript/indy-vdr-react-native/src/ReactNativeIndyVdr.ts b/wrappers/javascript/indy-vdr-react-native/src/ReactNativeIndyVdr.ts index 7f726e95..b2db0609 100644 --- a/wrappers/javascript/indy-vdr-react-native/src/ReactNativeIndyVdr.ts +++ b/wrappers/javascript/indy-vdr-react-native/src/ReactNativeIndyVdr.ts @@ -110,7 +110,7 @@ export class ReactNativeIndyVdr implements IndyVdr { const serializedOptions = serializeArguments(options) this.indyVdr.setCacheDirectory(serializedOptions) } - public setLedgerTxnCache(options: { capacity: number; expiry_offset_ms: number, path: string }): void { + public setLedgerTxnCache(options: { capacity: number; expiry_offset_ms: number; path: string }): void { const serializedOptions = serializeArguments(options) this.indyVdr.setLedgerTxnCache(serializedOptions) } diff --git a/wrappers/javascript/indy-vdr-shared/src/types/IndyVdr.ts b/wrappers/javascript/indy-vdr-shared/src/types/IndyVdr.ts index dc3d3fb0..6e4d35a1 100644 --- a/wrappers/javascript/indy-vdr-shared/src/types/IndyVdr.ts +++ b/wrappers/javascript/indy-vdr-shared/src/types/IndyVdr.ts @@ -49,7 +49,7 @@ export interface IndyVdr { setCacheDirectory(options: { path: string }): void - setLedgerTxnCache(options: { capacity: number; expiry_offset_ms: number, path: string }): void + setLedgerTxnCache(options: { capacity: number; expiry_offset_ms: number; path: string }): void setDefaultLogger(): void From 19fc1425d5a30d1bb80530bb586b2e79856afff6 Mon Sep 17 00:00:00 2001 From: wadeking98 Date: Fri, 2 Feb 2024 12:04:44 -0800 Subject: [PATCH 29/32] using new_txns for key prefix Signed-off-by: wadeking98 --- libindy_vdr/src/ffi/pool.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libindy_vdr/src/ffi/pool.rs b/libindy_vdr/src/ffi/pool.rs index 74602c8d..6c998a28 100644 --- a/libindy_vdr/src/ffi/pool.rs +++ b/libindy_vdr/src/ffi/pool.rs @@ -111,7 +111,7 @@ fn handle_pool_refresh( } if let Some(new_txns) = new_txns { // set this cache with unique key prefix - let txn_cache = read_lock!(LEDGER_CACHE_STRATEGY)?.as_ref().map(|s| Cache::new(s.clone(), init_txns.root_hash_base58().ok())); + let txn_cache = read_lock!(LEDGER_CACHE_STRATEGY)?.as_ref().map(|s| Cache::new(s.clone(), new_txns.root_hash_base58().ok())); let runner = PoolBuilder::new(config, new_txns).node_weights(node_weights).refreshed(true).into_runner(txn_cache)?; let mut pools = write_lock!(POOLS)?; if let Entry::Occupied(mut entry) = pools.entry(pool_handle) { From 0224681be004eea62543cbad3cf4c1120a698eec Mon Sep 17 00:00:00 2001 From: wadeking98 Date: Fri, 2 Feb 2024 13:09:02 -0800 Subject: [PATCH 30/32] quick syntax cleanup Signed-off-by: wadeking98 --- libindy_vdr/src/ffi/mod.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/libindy_vdr/src/ffi/mod.rs b/libindy_vdr/src/ffi/mod.rs index 2f7ede46..e97b2daa 100644 --- a/libindy_vdr/src/ffi/mod.rs +++ b/libindy_vdr/src/ffi/mod.rs @@ -85,10 +85,9 @@ pub extern "C" fn indy_vdr_set_ledger_txn_cache( ) -> ErrorCode { catch_err! { debug!("Setting pool ledger transactions cache: capacity={}, expire_offset={}", capacity, expire_offset); - let store = match path_opt.as_opt_str() { - Some("") => OrderedHashMap::new(new_mem_ordered_store()), - Some(path) => OrderedHashMap::new(new_fs_ordered_store(path.into())?), - None => OrderedHashMap::new(new_mem_ordered_store()), + let store = match path_opt.as_opt_str().unwrap_or_default() { + "" => OrderedHashMap::new(new_mem_ordered_store()), + path => OrderedHashMap::new(new_fs_ordered_store(path.into())?), }; *write_lock!(LEDGER_CACHE_STRATEGY)? = Some(Arc::new(CacheStrategyTTL::new(capacity, expire_offset.into(), Some(store), None))); From f0b65309ca9e0a2655c104c52f58700bd558c7f5 Mon Sep 17 00:00:00 2001 From: wadeking98 Date: Mon, 5 Feb 2024 10:08:19 -0800 Subject: [PATCH 31/32] updated TS wrappers Signed-off-by: wadeking98 --- libindy_vdr/src/pool/cache/mod.rs | 2 +- wrappers/javascript/indy-vdr-nodejs/src/NodeJSIndyVdr.ts | 2 +- .../javascript/indy-vdr-nodejs/src/library/NativeBindings.ts | 2 +- wrappers/javascript/indy-vdr-react-native/cpp/indyVdr.cpp | 4 ++-- .../javascript/indy-vdr-react-native/src/NativeBindings.ts | 2 +- .../indy-vdr-react-native/src/ReactNativeIndyVdr.ts | 2 +- wrappers/javascript/indy-vdr-shared/src/types/IndyVdr.ts | 2 +- 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/libindy_vdr/src/pool/cache/mod.rs b/libindy_vdr/src/pool/cache/mod.rs index ed2b9a62..7950eb20 100644 --- a/libindy_vdr/src/pool/cache/mod.rs +++ b/libindy_vdr/src/pool/cache/mod.rs @@ -14,7 +14,7 @@ pub trait CacheStrategy: Send + Sync + 'static { async fn insert(&mut self, key: K, value: V, custom_exp_offset: Option) -> Option; } -pub struct Cache { +pub struct Cache { storage: Arc>>, key_prefix: Option, } diff --git a/wrappers/javascript/indy-vdr-nodejs/src/NodeJSIndyVdr.ts b/wrappers/javascript/indy-vdr-nodejs/src/NodeJSIndyVdr.ts index 8c392cb0..9a9129f7 100644 --- a/wrappers/javascript/indy-vdr-nodejs/src/NodeJSIndyVdr.ts +++ b/wrappers/javascript/indy-vdr-nodejs/src/NodeJSIndyVdr.ts @@ -141,7 +141,7 @@ export class NodeJSIndyVdr implements IndyVdr { this.handleError(this.nativeIndyVdr.indy_vdr_set_cache_directory(path)) } - public setLedgerTxnCache(options: { capacity: number; expiry_offset_ms: number; path: string }): void { + public setLedgerTxnCache(options: { capacity: number; expiry_offset_ms: number; path?: string }): void { const { capacity, expiry_offset_ms, path } = serializeArguments(options) this.handleError(this.nativeIndyVdr.indy_vdr_set_ledger_txn_cache(capacity, expiry_offset_ms, path)) } diff --git a/wrappers/javascript/indy-vdr-nodejs/src/library/NativeBindings.ts b/wrappers/javascript/indy-vdr-nodejs/src/library/NativeBindings.ts index 7d6869eb..272c2975 100644 --- a/wrappers/javascript/indy-vdr-nodejs/src/library/NativeBindings.ts +++ b/wrappers/javascript/indy-vdr-nodejs/src/library/NativeBindings.ts @@ -3,7 +3,7 @@ import type { ByteBuffer } from '../ffi' export interface NativeMethods { indy_vdr_set_config: (arg0: string) => number indy_vdr_set_cache_directory: (arg0: string) => number - indy_vdr_set_ledger_txn_cache: (arg0: number, arg1: number, arg2: string) => number + indy_vdr_set_ledger_txn_cache: (arg0: number, arg1: number, arg2?: string) => number indy_vdr_set_default_logger: () => number indy_vdr_set_protocol_version: (arg0: number) => number indy_vdr_set_socks_proxy: (arg0: string) => number diff --git a/wrappers/javascript/indy-vdr-react-native/cpp/indyVdr.cpp b/wrappers/javascript/indy-vdr-react-native/cpp/indyVdr.cpp index b0471367..0b59d156 100644 --- a/wrappers/javascript/indy-vdr-react-native/cpp/indyVdr.cpp +++ b/wrappers/javascript/indy-vdr-react-native/cpp/indyVdr.cpp @@ -35,9 +35,9 @@ jsi::Value setCacheDirectory(jsi::Runtime &rt, jsi::Object options) { jsi::Value setLedgerTxnCache(jsi::Runtime &rt, jsi::Object options) { auto capacity = jsiToValue(rt, options, "capacity"); auto expiry_offset_ms = jsiToValue(rt, options, "expiry_offset_ms"); - auto path = jsiToValue(rt, options, "path"); + auto path = jsiToValue(rt, options, "path", true); - ErrorCode code = indy_vdr_set_ledger_txn_cache(capacity, expiry_offset_ms, path.c_str()); + ErrorCode code = indy_vdr_set_ledger_txn_cache(capacity, expiry_offset_ms, path.length() > 0 ? path.c_str() : nullptr); return createReturnValue(rt, code, nullptr); }; diff --git a/wrappers/javascript/indy-vdr-react-native/src/NativeBindings.ts b/wrappers/javascript/indy-vdr-react-native/src/NativeBindings.ts index bd746d85..5d602819 100644 --- a/wrappers/javascript/indy-vdr-react-native/src/NativeBindings.ts +++ b/wrappers/javascript/indy-vdr-react-native/src/NativeBindings.ts @@ -13,7 +13,7 @@ export interface NativeBindings { setCacheDirectory(options: { path: string }): ReturnObject - setLedgerTxnCache(options: { capacity: number; expiry_offset_ms: number; path: string }): ReturnObject + setLedgerTxnCache(options: { capacity: number; expiry_offset_ms: number; path?: string }): ReturnObject setDefaultLogger(options: Record): ReturnObject diff --git a/wrappers/javascript/indy-vdr-react-native/src/ReactNativeIndyVdr.ts b/wrappers/javascript/indy-vdr-react-native/src/ReactNativeIndyVdr.ts index b2db0609..734f8fe6 100644 --- a/wrappers/javascript/indy-vdr-react-native/src/ReactNativeIndyVdr.ts +++ b/wrappers/javascript/indy-vdr-react-native/src/ReactNativeIndyVdr.ts @@ -110,7 +110,7 @@ export class ReactNativeIndyVdr implements IndyVdr { const serializedOptions = serializeArguments(options) this.indyVdr.setCacheDirectory(serializedOptions) } - public setLedgerTxnCache(options: { capacity: number; expiry_offset_ms: number; path: string }): void { + public setLedgerTxnCache(options: { capacity: number; expiry_offset_ms: number; path?: string }): void { const serializedOptions = serializeArguments(options) this.indyVdr.setLedgerTxnCache(serializedOptions) } diff --git a/wrappers/javascript/indy-vdr-shared/src/types/IndyVdr.ts b/wrappers/javascript/indy-vdr-shared/src/types/IndyVdr.ts index 6e4d35a1..c94167d7 100644 --- a/wrappers/javascript/indy-vdr-shared/src/types/IndyVdr.ts +++ b/wrappers/javascript/indy-vdr-shared/src/types/IndyVdr.ts @@ -49,7 +49,7 @@ export interface IndyVdr { setCacheDirectory(options: { path: string }): void - setLedgerTxnCache(options: { capacity: number; expiry_offset_ms: number; path: string }): void + setLedgerTxnCache(options: { capacity: number; expiry_offset_ms: number; path?: string }): void setDefaultLogger(): void From 12deec6215e13d3c3fbba182e59dbca94e3cbe50 Mon Sep 17 00:00:00 2001 From: wadeking98 Date: Mon, 5 Feb 2024 12:09:50 -0800 Subject: [PATCH 32/32] switched to signed typing for ffis Signed-off-by: wadeking98 --- libindy_vdr/include/libindy_vdr.h | 2 +- libindy_vdr/src/ffi/mod.rs | 6 +++--- .../indy-vdr-react-native/cpp/include/libindy_vdr.h | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/libindy_vdr/include/libindy_vdr.h b/libindy_vdr/include/libindy_vdr.h index 2e18e21d..6ce64ab2 100644 --- a/libindy_vdr/include/libindy_vdr.h +++ b/libindy_vdr/include/libindy_vdr.h @@ -481,7 +481,7 @@ ErrorCode indy_vdr_resolve(PoolHandle pool_handle, ErrorCode indy_vdr_set_cache_directory(FfiStr path); -ErrorCode indy_vdr_set_ledger_txn_cache(size_t capacity, c_ulong expiry_offset_ms, FfiStr path); +ErrorCode indy_vdr_set_ledger_txn_cache(int32_t capacity, int64_t expiry_offset_ms, FfiStr path); ErrorCode indy_vdr_set_config(FfiStr config); diff --git a/libindy_vdr/src/ffi/mod.rs b/libindy_vdr/src/ffi/mod.rs index e97b2daa..ef465b54 100644 --- a/libindy_vdr/src/ffi/mod.rs +++ b/libindy_vdr/src/ffi/mod.rs @@ -79,8 +79,8 @@ pub extern "C" fn indy_vdr_set_cache_directory(path: FfiStr) -> ErrorCode { #[no_mangle] pub extern "C" fn indy_vdr_set_ledger_txn_cache( - capacity: usize, - expire_offset: u64, + capacity: i32, + expire_offset: i64, path_opt: FfiStr, ) -> ErrorCode { catch_err! { @@ -90,7 +90,7 @@ pub extern "C" fn indy_vdr_set_ledger_txn_cache( path => OrderedHashMap::new(new_fs_ordered_store(path.into())?), }; - *write_lock!(LEDGER_CACHE_STRATEGY)? = Some(Arc::new(CacheStrategyTTL::new(capacity, expire_offset.into(), Some(store), None))); + *write_lock!(LEDGER_CACHE_STRATEGY)? = Some(Arc::new(CacheStrategyTTL::new(capacity.try_into().ok().unwrap_or_default(), expire_offset.try_into().ok().unwrap_or_default(), Some(store), None))); Ok(ErrorCode::Success) } } diff --git a/wrappers/javascript/indy-vdr-react-native/cpp/include/libindy_vdr.h b/wrappers/javascript/indy-vdr-react-native/cpp/include/libindy_vdr.h index 2e18e21d..6ce64ab2 100644 --- a/wrappers/javascript/indy-vdr-react-native/cpp/include/libindy_vdr.h +++ b/wrappers/javascript/indy-vdr-react-native/cpp/include/libindy_vdr.h @@ -481,7 +481,7 @@ ErrorCode indy_vdr_resolve(PoolHandle pool_handle, ErrorCode indy_vdr_set_cache_directory(FfiStr path); -ErrorCode indy_vdr_set_ledger_txn_cache(size_t capacity, c_ulong expiry_offset_ms, FfiStr path); +ErrorCode indy_vdr_set_ledger_txn_cache(int32_t capacity, int64_t expiry_offset_ms, FfiStr path); ErrorCode indy_vdr_set_config(FfiStr config);