-
Notifications
You must be signed in to change notification settings - Fork 686
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
rpc-v2: Implement archive_unstable_storageDiff
#5997
base: master
Are you sure you want to change the base?
Changes from all commits
e17a555
4101ee6
d781d67
60e1a93
63da031
25b9e5a
52cfaf7
ee6bed7
8948e74
fbfb0dc
f7b33ed
f47db36
97546a4
baa3dd0
4683f6b
36f59d2
a084f47
77cac50
e2324a4
4b05913
83cb9b4
85c810e
152d13b
4f549bf
d208068
369a4d2
a02cc84
637b446
76efb18
ca1fa20
03eefd7
9a1e792
dbbfce1
971026d
2880058
b774855
838504b
60abd9b
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,16 @@ | ||
# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 | ||
# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json | ||
|
||
title: Implement archive_unstable_storageDiff method | ||
|
||
doc: | ||
- audience: Node Dev | ||
description: | | ||
This PR implements the `archive_unstable_storageDiff` rpc-v2 method. | ||
Developers can use this method to fetch the storage differences | ||
between two blocks. This is useful for oracles and archive nodes. | ||
For more details see: https://github.com/paritytech/json-rpc-interface-spec/blob/main/src/api/archive_unstable_storageDiff.md. | ||
|
||
crates: | ||
- name: sc-rpc-spec-v2 | ||
bump: minor |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -19,17 +19,29 @@ | |
//! API implementation for `archive`. | ||
|
||
use crate::{ | ||
archive::{error::Error as ArchiveError, ArchiveApiServer}, | ||
common::events::{ArchiveStorageResult, PaginatedStorageQuery}, | ||
hex_string, MethodResult, | ||
archive::{ | ||
archive_storage::{deduplicate_storage_diff_items, ArchiveStorage, ArchiveStorageDiff}, | ||
error::Error as ArchiveError, | ||
ArchiveApiServer, | ||
}, | ||
common::events::{ | ||
ArchiveStorageDiffEvent, ArchiveStorageDiffItem, ArchiveStorageResult, | ||
PaginatedStorageQuery, | ||
}, | ||
hex_string, MethodResult, SubscriptionTaskExecutor, | ||
}; | ||
|
||
use codec::Encode; | ||
use jsonrpsee::core::{async_trait, RpcResult}; | ||
use futures::FutureExt; | ||
use jsonrpsee::{ | ||
core::{async_trait, RpcResult}, | ||
PendingSubscriptionSink, | ||
}; | ||
use sc_client_api::{ | ||
Backend, BlockBackend, BlockchainEvents, CallExecutor, ChildInfo, ExecutorProvider, StorageKey, | ||
StorageProvider, | ||
}; | ||
use sc_rpc::utils::Subscription; | ||
use sp_api::{CallApiAt, CallContext}; | ||
use sp_blockchain::{ | ||
Backend as BlockChainBackend, Error as BlockChainError, HeaderBackend, HeaderMetadata, | ||
|
@@ -41,7 +53,9 @@ use sp_runtime::{ | |
}; | ||
use std::{collections::HashSet, marker::PhantomData, sync::Arc}; | ||
|
||
use super::archive_storage::ArchiveStorage; | ||
use tokio::sync::mpsc; | ||
|
||
pub(crate) const LOG_TARGET: &str = "rpc-spec-v2::archive"; | ||
|
||
/// The configuration of [`Archive`]. | ||
pub struct ArchiveConfig { | ||
|
@@ -64,6 +78,12 @@ const MAX_DESCENDANT_RESPONSES: usize = 5; | |
/// `MAX_DESCENDANT_RESPONSES`. | ||
const MAX_QUERIED_ITEMS: usize = 8; | ||
|
||
/// The buffer capacity for each storage query. | ||
/// | ||
/// This is small because the underlying JSON-RPC server has | ||
/// its down buffer capacity per connection as well. | ||
const STORAGE_QUERY_BUF: usize = 16; | ||
|
||
impl Default for ArchiveConfig { | ||
fn default() -> Self { | ||
Self { | ||
|
@@ -79,6 +99,8 @@ pub struct Archive<BE: Backend<Block>, Block: BlockT, Client> { | |
client: Arc<Client>, | ||
/// Backend of the chain. | ||
backend: Arc<BE>, | ||
/// Executor to spawn subscriptions. | ||
executor: SubscriptionTaskExecutor, | ||
/// The hexadecimal encoded hash of the genesis block. | ||
genesis_hash: String, | ||
/// The maximum number of items the `archive_storage` can return for a descendant query before | ||
|
@@ -96,12 +118,14 @@ impl<BE: Backend<Block>, Block: BlockT, Client> Archive<BE, Block, Client> { | |
client: Arc<Client>, | ||
backend: Arc<BE>, | ||
genesis_hash: GenesisHash, | ||
executor: SubscriptionTaskExecutor, | ||
config: ArchiveConfig, | ||
) -> Self { | ||
let genesis_hash = hex_string(&genesis_hash.as_ref()); | ||
Self { | ||
client, | ||
backend, | ||
executor, | ||
genesis_hash, | ||
storage_max_descendant_responses: config.max_descendant_responses, | ||
storage_max_queried_items: config.max_queried_items, | ||
|
@@ -278,4 +302,92 @@ where | |
|
||
Ok(storage_client.handle_query(hash, items, child_trie)) | ||
} | ||
|
||
fn archive_unstable_storage_diff( | ||
&self, | ||
pending: PendingSubscriptionSink, | ||
hash: Block::Hash, | ||
items: Vec<ArchiveStorageDiffItem<String>>, | ||
previous_hash: Option<Block::Hash>, | ||
) { | ||
let storage_client = ArchiveStorageDiff::new(self.client.clone()); | ||
let client = self.client.clone(); | ||
|
||
log::trace!(target: LOG_TARGET, "Storage diff subscription started"); | ||
|
||
let fut = async move { | ||
let Ok(mut sink) = pending.accept().await.map(Subscription::from) else { return }; | ||
|
||
// Deduplicate the items. | ||
let mut trie_items = match deduplicate_storage_diff_items(items) { | ||
Ok(items) => items, | ||
Err(error) => { | ||
let _ = sink.send(&ArchiveStorageDiffEvent::err(error.to_string())).await; | ||
return | ||
}, | ||
}; | ||
// Default to using the main storage trie if no items are provided. | ||
if trie_items.is_empty() { | ||
trie_items.push(Vec::new()); | ||
} | ||
log::trace!(target: LOG_TARGET, "Storage diff deduplicated items: {:?}", trie_items); | ||
|
||
let previous_hash = if let Some(previous_hash) = previous_hash { | ||
previous_hash | ||
} else { | ||
let Ok(Some(current_header)) = client.header(hash) else { | ||
let message = format!("Block header is not present: {hash}"); | ||
let _ = sink.send(&ArchiveStorageDiffEvent::err(message)).await; | ||
return | ||
}; | ||
*current_header.parent_hash() | ||
}; | ||
|
||
let (tx, mut rx) = tokio::sync::mpsc::channel(STORAGE_QUERY_BUF); | ||
for trie_queries in trie_items { | ||
let storage_fut = storage_client.handle_trie_queries( | ||
hash, | ||
previous_hash, | ||
trie_queries, | ||
tx.clone(), | ||
); | ||
let result = | ||
futures::future::join(storage_fut, process_events(&mut rx, &mut sink)).await; | ||
if !result.1 { | ||
log::debug!(target: LOG_TARGET, "Error processing trie queries"); | ||
return; | ||
} | ||
} | ||
|
||
let _ = sink.send(&ArchiveStorageDiffEvent::StorageDiffDone).await; | ||
}; | ||
|
||
self.executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed()); | ||
} | ||
} | ||
|
||
/// Returns true if the events where processed successfully, false otherwise. | ||
async fn process_events( | ||
rx: &mut mpsc::Receiver<ArchiveStorageDiffEvent>, | ||
sink: &mut Subscription, | ||
) -> bool { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. See my comment above, would be neat to remove the bool flag, process all trie queries in |
||
while let Some(event) = rx.recv().await { | ||
if event.is_done() { | ||
log::debug!(target: LOG_TARGET, "Finished processing partial trie query"); | ||
break | ||
} | ||
|
||
let is_error_event = event.is_err(); | ||
if let Err(_) = sink.send(&event).await { | ||
return false | ||
} | ||
|
||
if is_error_event { | ||
log::debug!(target: LOG_TARGET, "Error event encountered while processing partial trie query"); | ||
// Stop further processing if an error event is received. | ||
return false | ||
} | ||
} | ||
|
||
true | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Any reason why not pass
trie_items
directly tostorage_client.handle_trie_queries
?Would be neat not spawn a task for each trie query...
I reckon that it would be quite neat to move the sender and rely on that it will close down the stream instead checking whether the event.is_done()....
Then also send
ArchiveStorageDiffEvent::StorageDiffDone
in process_event...