From 371e8c2ac10dda50f935a39a5b0ffbe765ad5486 Mon Sep 17 00:00:00 2001 From: Rob N Date: Sun, 7 Jul 2024 09:45:43 -1000 Subject: [PATCH] chain: load headers outside of constructors --- example/memory.rs | 3 +- example/rescan.rs | 3 +- example/signet.rs | 7 +- src/chain/chain.rs | 138 ++++++++++++++++++++------------------ src/chain/error.rs | 3 + src/chain/header_chain.rs | 76 ++++++++------------- src/db/sqlite/headers.rs | 5 +- src/db/traits.rs | 10 ++- src/node/builder.rs | 11 +-- src/node/node.rs | 30 ++++++--- tests/core.rs | 9 +-- 11 files changed, 147 insertions(+), 148 deletions(-) diff --git a/example/memory.rs b/example/memory.rs index ae3a471..52a3af9 100644 --- a/example/memory.rs +++ b/example/memory.rs @@ -50,8 +50,7 @@ async fn main() { // We only maintain a list of 32 peers in memory .peer_db_size(32) // Build without the default databases - .build_with_databases(peer_store, header_store) - .await; + .build_with_databases(peer_store, header_store); // Run the node tokio::task::spawn(async move { node.run().await }); // Split the client into components that send messages and listen to messages. diff --git a/example/rescan.rs b/example/rescan.rs index b1ba338..581252c 100644 --- a/example/rescan.rs +++ b/example/rescan.rs @@ -36,8 +36,7 @@ async fn main() { // The number of connections we would like to maintain .num_required_peers(1) // Create the node and client, choosing not to store headers - .build_node() - .await; + .build_node(); // Run the node and wait for the sync message; tokio::task::spawn(async move { node.run().await }); tracing::info!("Running the node and waiting for a sync message. Please wait a minute!"); diff --git a/example/signet.rs b/example/signet.rs index 8965aef..074370f 100644 --- a/example/signet.rs +++ b/example/signet.rs @@ -44,8 +44,7 @@ async fn main() { // The number of connections we would like to maintain .num_required_peers(2) // Create the node and client - .build_node() - .await; + .build_node(); // Check if the node is running. Another part of the program may be giving us the node. if !node.is_running() { tokio::task::spawn(async move { node.run().await }); @@ -77,8 +76,8 @@ async fn main() { let recent = update.recent_history; tracing::info!("Recent history:"); for (height, hash) in recent { - tracing::info!("Synced chain up to block {}", height); - tracing::info!("Chain tip: {}", hash.block_hash()); + tracing::info!("Height: {}", height); + tracing::info!("Hash: {}", hash.block_hash()); } break; } diff --git a/src/chain/chain.rs b/src/chain/chain.rs index 2281934..25b422c 100644 --- a/src/chain/chain.rs +++ b/src/chain/chain.rs @@ -53,58 +53,20 @@ pub(crate) struct Chain { } impl Chain { - pub(crate) async fn new( + pub(crate) fn new( network: &Network, scripts: HashSet, anchor: HeaderCheckpoint, - mut checkpoints: HeaderCheckpoints, - mut dialog: Dialog, - mut db: impl HeaderStore + Send + Sync + 'static, + checkpoints: HeaderCheckpoints, + dialog: Dialog, + db: impl HeaderStore + Send + Sync + 'static, quorum_required: usize, - ) -> Result { + ) -> Self { let params = params_from_network(network); - let mut loaded_headers = db - .load(anchor.height) - .await - .map_err(HeaderPersistenceError::Database)?; - if loaded_headers.len().gt(&0) { - if loaded_headers - .values() - .take(1) - .copied() - .collect::>() - .first() - .unwrap() - .prev_blockhash - .ne(&anchor.hash) - { - dialog - .send_warning("Checkpoint anchor mismatch".into()) - .await; - // The header chain did not align, so just start from the anchor - loaded_headers = BTreeMap::new(); - } else if loaded_headers - .iter() - .zip(loaded_headers.iter().skip(1)) - .any(|(first, second)| first.1.block_hash().ne(&second.1.prev_blockhash)) - { - dialog - .send_warning("Blockhash pointer mismatch".into()) - .await; - return Err(HeaderPersistenceError::HeadersDoNotLink); - } - loaded_headers.iter().for_each(|header| { - if let Some(checkpoint) = checkpoints.next() { - if header.1.block_hash().eq(&checkpoint.hash) { - checkpoints.advance() - } - } - }) - }; - let header_chain = HeaderChain::new(anchor, loaded_headers); + let header_chain = HeaderChain::new(anchor); let cf_header_chain = CFHeaderChain::new(anchor, quorum_required); let filter_chain = FilterChain::new(anchor); - Ok(Chain { + Chain { header_chain, checkpoints, params, @@ -115,7 +77,7 @@ impl Chain { scripts, block_queue: BlockQueue::new(), dialog, - }) + } } // Top of the chain @@ -274,6 +236,53 @@ impl Chain { } } + // Load in the headers + pub(crate) async fn load_headers(&mut self) -> Result<(), HeaderPersistenceError> { + let loaded_headers = self + .db + .lock() + .await + .load_after(self.height()) + .await + .map_err(HeaderPersistenceError::Database)?; + if loaded_headers.len().gt(&0) { + if loaded_headers + .values() + .take(1) + .copied() + .collect::>() + .first() + .unwrap() + .prev_blockhash + .ne(&self.tip()) + { + self.dialog + .send_warning("Unlinkable anchor. The headers stored in the database have no connection to this configured anchor.".into()) + .await; + // The header chain did not align, so just start from the anchor + return Err(HeaderPersistenceError::CannotLocateHistory); + } else if loaded_headers + .iter() + .zip(loaded_headers.iter().skip(1)) + .any(|(first, second)| first.1.block_hash().ne(&second.1.prev_blockhash)) + { + self.dialog + .send_warning("Blockhash pointer mismatch".into()) + .await; + return Err(HeaderPersistenceError::HeadersDoNotLink); + } + loaded_headers.iter().for_each(|header| { + if let Some(checkpoint) = self.checkpoints.next() { + if header.1.block_hash().eq(&checkpoint.hash) { + self.checkpoints.advance() + } + } + }) + }; + self.header_chain.set_headers(loaded_headers); + Ok(()) + } + // If the number of headers in memory gets too large, move some of them to the disk pub(crate) async fn manage_memory(&mut self) { if self.header_chain.inner_len() > MAX_HEADER_SIZE { @@ -460,12 +469,14 @@ impl Chain { // This call occurs if we sync to a block that is later reorganized out of the chain, // but we have restarted our node in between these events. async fn load_fork(&mut self, header_batch: &HeadersBatch) -> Result<(), HeaderSyncError> { - let mut db_lock = self.db.lock().await; let prev_hash = header_batch.first().prev_blockhash; - let maybe_height = db_lock - .height_of(&prev_hash) - .await - .map_err(|_| HeaderSyncError::DbError)?; + let maybe_height = { + let mut db_lock = self.db.lock().await; + db_lock + .height_of(&prev_hash) + .await + .map_err(|_| HeaderSyncError::DbError)? + }; match maybe_height { Some(height) => { // This is a very generous check to ensure a peer cannot get us to load an @@ -473,22 +484,21 @@ impl Chain { // we wouldn't accept a fork of a depth more than around 2,000 anyway. // The only reorgs that have ever been recorded are of depth 1. if self.height() - height > MAX_REORG_DEPTH { - Err(HeaderSyncError::FloatingHeaders) + return Err(HeaderSyncError::FloatingHeaders); } else { let older_anchor = HeaderCheckpoint::new(height, prev_hash); - let loaded_headers = db_lock - .load(older_anchor.height) - .await - .map_err(|_| HeaderSyncError::DbError)?; - self.header_chain = HeaderChain::new(older_anchor, loaded_headers); + self.header_chain = HeaderChain::new(older_anchor); self.cf_header_chain = CFHeaderChain::new(older_anchor, self.cf_header_chain.quorum_required()); self.filter_chain = FilterChain::new(older_anchor); - Ok(()) } } - None => Err(HeaderSyncError::FloatingHeaders), + None => return Err(HeaderSyncError::FloatingHeaders), } + self.load_headers() + .await + .map_err(|_| HeaderSyncError::DbError)?; + Ok(()) } // Sync the compact filter headers, possibly encountering conflicts @@ -750,7 +760,7 @@ mod tests { use super::Chain; - async fn new_regtest(anchor: HeaderCheckpoint) -> Chain { + fn new_regtest(anchor: HeaderCheckpoint) -> Chain { let (sender, _) = tokio::sync::broadcast::channel::(1); let mut checkpoints = HeaderCheckpoints::new(&bitcoin::Network::Regtest); checkpoints.prune_up_to(anchor); @@ -763,8 +773,6 @@ mod tests { (), 1, ) - .await - .unwrap() } #[tokio::test] @@ -774,7 +782,7 @@ mod tests { BlockHash::from_str("62c28f380692524a3a8f1fc66252bc0eb31d6b6a127d2263bdcbee172529fe16") .unwrap(), ); - let mut chain = new_regtest(gen).await; + let mut chain = new_regtest(gen); let block_8: Header = deserialize(&hex::decode("0000002016fe292517eecbbd63227d126a6b1db30ebc5262c61f8f3a4a529206388fc262dfd043cef8454f71f30b5bbb9eb1a4c9aea87390f429721e435cf3f8aa6e2a9171375166ffff7f2000000000").unwrap()).unwrap(); let block_9: Header = deserialize(&hex::decode("000000205708a90197d93475975545816b2229401ccff7567cb23900f14f2bd46732c605fd8de19615a1d687e89db365503cdf58cb649b8e935a1d3518fa79b0d408704e71375166ffff7f2000000000").unwrap()).unwrap(); let block_10: Header = deserialize(&hex::decode("000000201d062f2162835787db536c55317e08df17c58078c7610328bdced198574093790c9f554a7780a6043a19619d2a4697364bb62abf6336c0568c31f1eedca3c3e171375166ffff7f2000000000").unwrap()).unwrap(); @@ -822,7 +830,7 @@ mod tests { BlockHash::from_str("0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206") .unwrap(), ); - let mut chain = new_regtest(gen).await; + let mut chain = new_regtest(gen); let block_1: Header = deserialize(&hex::decode("0000002006226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f047eb4d0fe76345e307d0e020a079cedfa37101ee7ac84575cf829a611b0f84bc4805e66ffff7f2001000000").unwrap()).unwrap(); let block_2: Header = deserialize(&hex::decode("00000020299e41732deb76d869fcdb5f72518d3784e99482f572afb73068d52134f1f75e1f20f5da8d18661d0f13aa3db8fff0f53598f7d61f56988a6d66573394b2c6ffc5805e66ffff7f2001000000").unwrap()).unwrap(); let block_3: Header = deserialize(&hex::decode("00000020b96feaa82716f11befeb608724acee4743e0920639a70f35f1637a88b8b6ea3471f1dbedc283ce6a43a87ed3c8e6326dae8d3dbacce1b2daba08e508054ffdb697815e66ffff7f2001000000").unwrap()).unwrap(); @@ -850,7 +858,7 @@ mod tests { BlockHash::from_str("0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206") .unwrap(), ); - let mut chain = new_regtest(gen).await; + let mut chain = new_regtest(gen); let block_1: Header = deserialize(&hex::decode("0000002006226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f575b313ad3ef825cfc204c34da8f3c1fd1784e2553accfa38001010587cb57241f855e66ffff7f2000000000").unwrap()).unwrap(); let block_2: Header = deserialize(&hex::decode("00000020c81cedd6a989939936f31448e49d010a13c2e750acf02d3fa73c9c7ecfb9476e798da2e5565335929ad303fc746acabc812ee8b06139bcf2a4c0eb533c21b8c420855e66ffff7f2000000000").unwrap()).unwrap(); let batch_1 = vec![block_1, block_2]; diff --git a/src/chain/error.rs b/src/chain/error.rs index 4019b93..9ce707e 100644 --- a/src/chain/error.rs +++ b/src/chain/error.rs @@ -35,6 +35,9 @@ pub enum HeaderPersistenceError { /// Some predefined checkpoint does not match. #[error("The headers loaded do not match a known checkpoint.")] MismatchedCheckpoints, + /// A user tried to retrieve headers too far in the past for what is in their database. + #[error("The configured anchor checkpoint is too far in the past compared to previous syncs. The database cannot reconstruct the chain.")] + CannotLocateHistory, /// A database error. #[error("The headers could not be loaded from sqlite.")] Database(DatabaseError), diff --git a/src/chain/header_chain.rs b/src/chain/header_chain.rs index df2d0d4..57e8676 100644 --- a/src/chain/header_chain.rs +++ b/src/chain/header_chain.rs @@ -19,13 +19,20 @@ pub(crate) struct HeaderChain { } impl HeaderChain { - pub(crate) fn new(checkpoint: HeaderCheckpoint, headers: Headers) -> Self { + pub(crate) fn new(checkpoint: HeaderCheckpoint) -> Self { Self { anchor_checkpoint: checkpoint, - headers, + headers: BTreeMap::new(), } } + // Set the headers to those loaded from a database. + // Done separately, such that all asynchronous work is done + // when the node is running. + pub(crate) fn set_headers(&mut self, headers: Headers) { + self.headers = headers; + } + // Top of the chain pub(crate) fn tip(&self) -> BlockHash { match self.headers.values().last() { @@ -251,16 +258,11 @@ mod tests { #[test] fn test_empty_chain() { - let chain = HeaderChain::new( - HeaderCheckpoint::new( - 190_000, - BlockHash::from_str( - "0000013a6143b7360b7ba3834316b3265ee9072dde440bd45f99c01c42abaef2", - ) + let chain = HeaderChain::new(HeaderCheckpoint::new( + 190_000, + BlockHash::from_str("0000013a6143b7360b7ba3834316b3265ee9072dde440bd45f99c01c42abaef2") .unwrap(), - ), - BTreeMap::new(), - ); + )); assert_eq!(chain.chainwork(), Work::from_be_bytes([0; 32])); assert_eq!( chain.chainwork_after_height(189_999), @@ -289,16 +291,11 @@ mod tests { let block_190_003: Header = deserialize(&hex::decode("0000002042c5fa907f5d28affaa72b430f2732052d7a19f203be794fea39153e7e0000009c8705706dce105bbaf42a9a692d3bdcca1d7e34399e8cc7684700da439bf144291810669d41011ed0241501").unwrap()).unwrap(); let batch_1 = vec![block_190_001]; let batch_2 = vec![block_190_002, block_190_003]; - let mut chain = HeaderChain::new( - HeaderCheckpoint::new( - 190_000, - BlockHash::from_str( - "0000013a6143b7360b7ba3834316b3265ee9072dde440bd45f99c01c42abaef2", - ) + let mut chain = HeaderChain::new(HeaderCheckpoint::new( + 190_000, + BlockHash::from_str("0000013a6143b7360b7ba3834316b3265ee9072dde440bd45f99c01c42abaef2") .unwrap(), - ), - BTreeMap::new(), - ); + )); let reorg = chain.extend(&batch_1); assert!(reorg.is_empty()); assert_eq!(chain.height(), 190_001); @@ -332,16 +329,11 @@ mod tests { let new_block_10: Header = deserialize(&hex::decode("000000201d062f2162835787db536c55317e08df17c58078c7610328bdced198574093792151c0e9ce4e4c789ca98427d7740cc7acf30d2ca0c08baef266bf152289d814567e5e66ffff7f2001000000").unwrap()).unwrap(); let block_11: Header = deserialize(&hex::decode("00000020efcf8b12221fccc735b9b0b657ce15b31b9c50aff530ce96a5b4cfe02d8c0068496c1b8a89cf5dec22e46c35ea1035f80f5b666a1b3aa7f3d6f0880d0061adcc567e5e66ffff7f2001000000").unwrap()).unwrap(); let fork = vec![new_block_10, block_11]; - let mut chain = HeaderChain::new( - HeaderCheckpoint::new( - 7, - BlockHash::from_str( - "62c28f380692524a3a8f1fc66252bc0eb31d6b6a127d2263bdcbee172529fe16", - ) + let mut chain = HeaderChain::new(HeaderCheckpoint::new( + 7, + BlockHash::from_str("62c28f380692524a3a8f1fc66252bc0eb31d6b6a127d2263bdcbee172529fe16") .unwrap(), - ), - BTreeMap::new(), - ); + )); let reorg = chain.extend(&batch_1); assert!(reorg.is_empty()); assert_eq!(chain.height(), 10); @@ -377,16 +369,11 @@ mod tests { let new_block_3: Header = deserialize(&hex::decode("00000020b96feaa82716f11befeb608724acee4743e0920639a70f35f1637a88b8b6ea349c6240c5d0521966771808950f796c9c04088bc9551a828b64f1cf06831705dfbc835e66ffff7f2000000000").unwrap()).unwrap(); let new_block_4: Header = deserialize(&hex::decode("00000020d2a1c6ba2be393f405fe2f4574565f9ee38ac68d264872fcd82b030970d0232ce882eb47c3dd138587120f1ad97dd0e73d1e30b79559ad516cb131f83dcb87e9bc835e66ffff7f2002000000").unwrap()).unwrap(); let fork = vec![new_block_3, new_block_4]; - let mut chain = HeaderChain::new( - HeaderCheckpoint::new( - 0, - BlockHash::from_str( - "0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206", - ) + let mut chain = HeaderChain::new(HeaderCheckpoint::new( + 0, + BlockHash::from_str("0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206") .unwrap(), - ), - BTreeMap::new(), - ); + )); chain.extend(&batch_1); let reorged = chain.extend(&fork); assert_eq!(reorged.len(), 2); @@ -411,16 +398,11 @@ mod tests { let block_3: Header = deserialize(&hex::decode("0000002080f38c14e898d6646dd426428472888966e0d279d86453f42edc56fdb143241aa66c8fa8837d95be3f85d53f22e86a0d6d456b1ab348e073da4d42a39f50637423865e66ffff7f2000000000").unwrap()).unwrap(); let block_4: Header = deserialize(&hex::decode("000000204877fed370af64c0a1f7a76f6944e1127aad965b1865f99ecfdf8fa72ae23377f51921d01ff1131bd589500a8ca142884297ceeb1aa762ad727249e9a23f2cb023865e66ffff7f2000000000").unwrap()).unwrap(); let batch_2 = vec![block_3, block_4]; - let mut chain = HeaderChain::new( - HeaderCheckpoint::new( - 0, - BlockHash::from_str( - "0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206", - ) + let mut chain = HeaderChain::new(HeaderCheckpoint::new( + 0, + BlockHash::from_str("0f9188f13cb7b2c71f2a335e3a4fc328bf5beb436012afca590b1a11466e2206") .unwrap(), - ), - BTreeMap::new(), - ); + )); chain.extend(&batch_1); let reorged = chain.extend(&fork); assert_eq!(reorged.len(), 2); diff --git a/src/db/sqlite/headers.rs b/src/db/sqlite/headers.rs index 5be156e..15f422d 100644 --- a/src/db/sqlite/headers.rs +++ b/src/db/sqlite/headers.rs @@ -52,7 +52,10 @@ impl SqliteHeaderDb { #[async_trait] impl HeaderStore for SqliteHeaderDb { - async fn load(&mut self, anchor_height: u32) -> Result, DatabaseError> { + async fn load_after( + &mut self, + anchor_height: u32, + ) -> Result, DatabaseError> { let mut headers = BTreeMap::::new(); let stmt = "SELECT * FROM headers ORDER BY height"; let write_lock = self.conn.lock().await; diff --git a/src/db/traits.rs b/src/db/traits.rs index c865654..7579737 100644 --- a/src/db/traits.rs +++ b/src/db/traits.rs @@ -9,7 +9,10 @@ use super::{error::DatabaseError, PersistedPeer}; #[async_trait] pub trait HeaderStore { /// Load all headers with heights *strictly after* the specified anchor height. - async fn load(&mut self, anchor_height: u32) -> Result, DatabaseError>; + async fn load_after( + &mut self, + anchor_height: u32, + ) -> Result, DatabaseError>; /// Write an indexed map of block headers to the database, ignoring if they already exist. async fn write<'a>( @@ -34,7 +37,10 @@ pub trait HeaderStore { /// This is a simple wrapper for the unit type, signifying that no headers will be stored between sessions. #[async_trait] impl HeaderStore for () { - async fn load(&mut self, _anchor_height: u32) -> Result, DatabaseError> { + async fn load_after( + &mut self, + _anchor_height: u32, + ) -> Result, DatabaseError> { Ok(BTreeMap::new()) } diff --git a/src/node/builder.rs b/src/node/builder.rs index c25b649..a785341 100644 --- a/src/node/builder.rs +++ b/src/node/builder.rs @@ -2,7 +2,6 @@ use std::{collections::HashSet, path::PathBuf}; use bitcoin::{Network, ScriptBuf}; -use crate::db::error::DatabaseError; use crate::prelude::default_port_from_network; use crate::TrustedPeer; use crate::{ @@ -85,26 +84,20 @@ impl NodeBuilder { /// Consume the node builder and receive a [`Node`] and [`Client`]. #[cfg(feature = "database")] - pub async fn build_node(&self) -> (Node, Client) { + pub fn build_node(&self) -> (Node, Client) { use crate::db::sqlite::{headers::SqliteHeaderDb, peers::SqlitePeerDb}; let peer_store = SqlitePeerDb::new(self.network, self.config.data_path.clone()).unwrap(); let header_store = SqliteHeaderDb::new(self.network, self.config.data_path.clone()).unwrap(); Node::new_from_config(&self.config, self.network, peer_store, header_store) - .await - .map_err(|_| DatabaseError::Load) - .unwrap() } /// Consume the node builder by using custom database implementations, receiving a [`Node`] and [`Client`]. - pub async fn build_with_databases( + pub fn build_with_databases( &self, peer_store: impl PeerStore + Send + Sync + 'static, header_store: impl HeaderStore + Send + Sync + 'static, ) -> (Node, Client) { Node::new_from_config(&self.config, self.network, peer_store, header_store) - .await - .map_err(|_| DatabaseError::Load) - .unwrap() } } diff --git a/src/node/node.rs b/src/node/node.rs index de16c9a..aff138c 100644 --- a/src/node/node.rs +++ b/src/node/node.rs @@ -81,7 +81,7 @@ pub struct Node { impl Node { #[allow(clippy::too_many_arguments)] - pub(crate) async fn new( + pub(crate) fn new( network: Network, white_list: Whitelist, scripts: HashSet, @@ -90,7 +90,7 @@ impl Node { target_peer_size: u32, peer_store: impl PeerStore + Send + Sync + 'static, header_store: impl HeaderStore + Send + Sync + 'static, - ) -> Result<(Self, Client), NodeError> { + ) -> (Self, Client) { // Set up a communication channel between the node and client let (ntx, _) = broadcast::channel::(32); let (ctx, crx) = mpsc::channel::(5); @@ -118,12 +118,10 @@ impl Node { dialog.clone(), header_store, required_peers, - ) - .await - .map_err(NodeError::HeaderDatabase)?; + ); // Initialize the chain with the headers we loaded let chain = Arc::new(Mutex::new(loaded_chain)); - Ok(( + ( Self { state, chain, @@ -136,15 +134,15 @@ impl Node { is_running: AtomicBool::new(false), }, client, - )) + ) } - pub(crate) async fn new_from_config( + pub(crate) fn new_from_config( config: &NodeConfig, network: Network, peer_store: impl PeerStore + Send + Sync + 'static, header_store: impl HeaderStore + Send + Sync + 'static, - ) -> Result<(Self, Client), NodeError> { + ) -> (Self, Client) { Node::new( network, config.white_list.clone(), @@ -155,7 +153,6 @@ impl Node { peer_store, header_store, ) - .await } /// Has [`Node::run`] been called. @@ -168,6 +165,7 @@ impl Node { self.dialog.send_dialog("Starting node".into()).await; self.is_running .store(true, std::sync::atomic::Ordering::Relaxed); + self.fetch_headers().await?; let (mtx, mut mrx) = mpsc::channel::(32); let mut node_map = PeerMap::new(mtx, self.network); let mut tx_broadcaster = Broadcaster::new(); @@ -634,6 +632,18 @@ impl Node { } } + // When the application starts, fetch any headers we know about from the database. + async fn fetch_headers(&mut self) -> Result<(), NodeError> { + self.dialog + .send_dialog("Attempting to load headers from the database.".into()) + .await; + let mut chain = self.chain.lock().await; + chain + .load_headers() + .await + .map_err(NodeError::HeaderDatabase) + } + // First we search the whitelist for peers that we trust. If we don't have any more whitelisted peers, // we try to get a new peer from the peer manager. If that fails and our database is empty, we try DNS. // Otherwise, the node throws an error. diff --git a/tests/core.rs b/tests/core.rs index fd4c2aa..be6fce0 100644 --- a/tests/core.rs +++ b/tests/core.rs @@ -44,8 +44,7 @@ async fn new_node(addrs: HashSet) -> (Node, Client) { let (node, client) = builder .add_peers(vec![host.into()]) .add_scripts(addrs) - .build_with_databases((), ()) - .await; + .build_with_databases((), ()); (node, client) } @@ -55,8 +54,7 @@ async fn new_node_sql(addrs: HashSet) -> (Node, Client) { let (node, client) = builder .add_peers(vec![host.into()]) .add_scripts(addrs) - .build_node() - .await; + .build_node(); (node, client) } @@ -70,8 +68,7 @@ async fn new_node_anchor_sql( .add_peers(vec![host.into()]) .add_scripts(addrs) .anchor_checkpoint(checkpoint) - .build_node() - .await; + .build_node(); (node, client) }