Skip to content

Commit

Permalink
multi: [ci] add build and test [clippy] fix many errors
Browse files Browse the repository at this point in the history
  • Loading branch information
rustaceanrob committed Jun 1, 2024
1 parent eb53d62 commit 268b7c2
Show file tree
Hide file tree
Showing 19 changed files with 138 additions and 132 deletions.
1 change: 1 addition & 0 deletions .clippy.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
msrv = "1.56.1"
35 changes: 35 additions & 0 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
name: Build & Test

on:
push:
branches:
- master
pull_request:

jobs:
node:
runs-on: ubuntu-latest
strategy:
matrix:
# Minumum Supported Rust Version (MSRV) is 1.56.1.
toolchain: [1.56.1, stable, beta, nightly]
steps:
- uses: actions/checkout@v3
- name: Update Toolchain
run: |
rustup default ${{ matrix.toolchain }}
rustup component add --toolchain ${{ matrix.toolchain }} rustfmt
rustup component add --toolchain ${{ matrix.toolchain }} clippy
rustup update ${{ matrix.toolchain }}
- name: Lint
run: |
cargo clippy --package kyoto_light_client --all-targets
- name: Format
run: |
cargo fmt --package kyoto_light_client -- --check
- name: Build
run: |
cargo build --package kyoto_light_client --verbose
- name: Test
run: |
cargo test --package kyoto_light_client --verbose
47 changes: 19 additions & 28 deletions src/chain/chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ use crate::{
},
node::{dialog::Dialog, node_messages::NodeMessage},
prelude::{params_from_network, MEDIAN_TIME_PAST},
tx::{memory::MemoryTransactionCache, store::TransactionStore, types::IndexedTransaction},
tx::types::IndexedTransaction,
};

pub(crate) struct Chain {
Expand All @@ -41,15 +41,13 @@ pub(crate) struct Chain {
best_known_height: Option<u32>,
scripts: HashSet<ScriptBuf>,
block_queue: BlockQueue,
tx_store: MemoryTransactionCache,
dialog: Dialog,
}

impl Chain {
pub(crate) async fn new(
network: &Network,
scripts: HashSet<ScriptBuf>,
tx_store: MemoryTransactionCache,
anchor: HeaderCheckpoint,
mut checkpoints: HeaderCheckpoints,
mut dialog: Dialog,
Expand Down Expand Up @@ -103,7 +101,6 @@ impl Chain {
best_known_height: None,
scripts,
block_queue: BlockQueue::new(),
tx_store,
dialog,
})
}
Expand Down Expand Up @@ -174,11 +171,7 @@ impl Chain {
// Do we have best known height and is our height equal to it
pub(crate) fn is_synced(&self) -> bool {
if let Some(height) = self.best_known_height {
if (self.height() as u32).ge(&height) {
true
} else {
false
}
(self.height() as u32).ge(&height)
} else {
false
}
Expand Down Expand Up @@ -313,6 +306,7 @@ impl Chain {
"Peer is sending us malicious headers, restarting header sync.".into(),
)
.await;
// We assume that this would be so rare that we just clear the whole header chain
self.header_chain.clear_all();
return Err(HeaderSyncError::InvalidCheckpoint);
}
Expand All @@ -335,7 +329,7 @@ impl Chain {
.inner()
.iter()
.filter(|header| !self.contains_header(**header))
.map(|a| *a)
.copied()
.collect();
let challenge_chainwork = uncommon
.iter()
Expand All @@ -350,23 +344,23 @@ impl Chain {
.eq(&stem.block_hash())
});
if let Some(stem) = stem_position {
let current_chainwork = self.chainwork_after_height(stem);
let current_chainwork = self.header_chain.chainwork_after_index(stem);
if current_chainwork.lt(&challenge_chainwork) {
self.dialog
.send_dialog("Valid reorganization found".into())
.await;
self.header_chain.extend(&uncommon);
return Ok(());
Ok(())
} else {
self.dialog
.send_warning(
"Peer sent us a fork with less work than the current chain".into(),
)
.await;
return Err(HeaderSyncError::LessWorkFork);
Err(HeaderSyncError::LessWorkFork)
}
} else {
return Err(HeaderSyncError::FloatingHeaders);
Err(HeaderSyncError::FloatingHeaders)
}
}

Expand Down Expand Up @@ -476,22 +470,22 @@ impl Chain {
let mut filter = Filter::new(filter_message.filter, filter_message.block_hash);
let expected_filter_hash = self.cf_header_chain.hash_at(&filter_message.block_hash);
if let Some(ref_hash) = expected_filter_hash {
if filter.filter_hash().await.ne(&ref_hash) {
if filter.filter_hash().await.ne(ref_hash) {
return Err(CFilterSyncError::MisalignedFilterHash);
}
}
if !self.block_queue.contains(&filter_message.block_hash)
&& filter
.contains_any(&self.scripts)
.await
.map_err(|e| CFilterSyncError::Filter(e))?
.map_err(CFilterSyncError::Filter)?
{
// Add to the block queue
self.block_queue.add(filter_message.block_hash);
self.dialog
.send_dialog(format!(
"Found script at block: {}",
filter_message.block_hash.to_string()
filter_message.block_hash
))
.await;
}
Expand All @@ -503,7 +497,7 @@ impl Chain {
Ok(None)
}
} else {
return Err(CFilterSyncError::UnrequestedStophash);
Err(CFilterSyncError::UnrequestedStophash)
}
}

Expand Down Expand Up @@ -559,10 +553,10 @@ impl Chain {
let height_of_block = self.height_of_hash(block.block_hash()).await;
for tx in &block.txdata {
if self.scan_inputs(&tx.input) || self.scan_outputs(&tx.output) {
self.tx_store
.add_transaction(&tx, height_of_block, &block.block_hash())
.await
.unwrap();
// self.tx_store
// .add_transaction(&tx, height_of_block, &block.block_hash())
// .await
// .unwrap();
self.dialog
.send_data(NodeMessage::Block(block.clone()))
.await;
Expand All @@ -574,23 +568,20 @@ impl Chain {
)))
.await;
self.dialog
.send_dialog(format!(
"Found transaction: {}",
tx.compute_txid().to_string()
))
.send_dialog(format!("Found transaction: {}", tx.compute_txid()))
.await;
}
}
Ok(())
}

fn scan_inputs(&mut self, inputs: &Vec<TxIn>) -> bool {
fn scan_inputs(&mut self, inputs: &[TxIn]) -> bool {
inputs
.iter()
.any(|input| self.scripts.contains(&input.script_sig))
}

fn scan_outputs(&mut self, inputs: &Vec<TxOut>) -> bool {
fn scan_outputs(&mut self, inputs: &[TxOut]) -> bool {
inputs
.iter()
.any(|out| self.scripts.contains(&out.script_pubkey))
Expand Down
2 changes: 1 addition & 1 deletion src/chain/checkpoints.rs
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ impl HeaderCheckpoints {
cp_list.iter().for_each(|(height, hash)| {
checkpoints.push_back(HeaderCheckpoint {
height: *height,
hash: BlockHash::from_str(&hash).unwrap(),
hash: BlockHash::from_str(hash).unwrap(),
})
});
let last = *checkpoints.back().unwrap();
Expand Down
2 changes: 1 addition & 1 deletion src/chain/header_batch.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ pub(crate) struct HeadersBatch {
// This struct provides basic sanity checks and helper methods.
impl HeadersBatch {
pub(crate) fn new(batch: Vec<Header>) -> Result<Self, HeadersBatchError> {
if batch.len() < 1 {
if batch.is_empty() {
return Err(HeadersBatchError::EmptyVec);
}
Ok(HeadersBatch { batch })
Expand Down
26 changes: 16 additions & 10 deletions src/chain/header_chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -75,10 +75,7 @@ impl HeaderChain {
.headers
.iter()
.position(|header| header.block_hash().eq(&blockhash));
match offset_pos {
Some(index) => Some(self.anchor_checkpoint.height + index + 1),
None => None,
}
offset_pos.map(|index| self.anchor_checkpoint.height + index + 1)
}

// This header chain contains a block hash
Expand Down Expand Up @@ -135,17 +132,25 @@ impl HeaderChain {
}
}

pub(crate) fn chainwork_after_index(&self, index: usize) -> Work {
let work = self
.headers
.iter()
.enumerate()
.filter(|(h, _)| h.gt(&index))
.map(|(_, header)| header.work())
.reduce(|acc, next| acc + next);
work.unwrap_or(self.chainwork())
}

// Human readable chainwork
pub(crate) fn log2_work(&self) -> f64 {
let work = self
.headers
.iter()
.map(|header| header.work().log2())
.reduce(|acc, next| acc + next);
match work {
Some(w) => w,
None => 0.0,
}
work.unwrap_or(0.0)
}

// The last 11 headers, if we have that many
Expand All @@ -155,7 +160,7 @@ impl HeaderChain {
.rev()
.take(MEDIAN_TIME_PAST)
.rev()
.map(|header_ref| (*header_ref).clone())
.copied()
.collect()
}

Expand Down Expand Up @@ -213,7 +218,7 @@ impl HeaderChain {
}
self.headers.extend(batch);
}
reorged.iter().rev().map(|header| *header).collect()
reorged.iter().rev().copied().collect()
}

// Clear all the headers from our chain. Only to be used when a peer has feed us faulty checkpoints
Expand Down Expand Up @@ -304,6 +309,7 @@ mod tests {
chain.chainwork_after_height(190_001),
block_190_002.work() + block_190_003.work()
);
assert_eq!(chain.chainwork_after_index(1), block_190_003.work());
assert_eq!(chain.tip(), block_190_003.block_hash());
}

Expand Down
1 change: 1 addition & 0 deletions src/chain/mod.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
pub(crate) mod block_queue;
#[allow(clippy::module_inception)]
pub(crate) mod chain;
pub mod checkpoints;
pub(crate) mod error;
Expand Down
8 changes: 4 additions & 4 deletions src/db/sqlite/header_db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ impl HeaderStore for SqliteHeaderDb {
let stmt = "SELECT * FROM headers ORDER BY height";
let write_lock = self.conn.lock().await;
let mut query = write_lock
.prepare(&stmt)
.prepare(stmt)
.map_err(|_| HeaderDatabaseError::LoadError)?;
let mut rows = query
.query([])
Expand Down Expand Up @@ -119,15 +119,15 @@ impl HeaderStore for SqliteHeaderDb {
Ok(headers)
}

async fn write(&mut self, header_chain: &Vec<Header>) -> Result<(), HeaderDatabaseError> {
async fn write<'a>(&mut self, header_chain: &'a [Header]) -> Result<(), HeaderDatabaseError> {
let mut write_lock = self.conn.lock().await;
let tx = write_lock
.transaction()
.map_err(|_| HeaderDatabaseError::WriteError)?;
let count: u64 = tx
.query_row("SELECT COUNT(*) FROM headers", [], |row| row.get(0))
.map_err(|_| HeaderDatabaseError::WriteError)?;
let adjusted_count = count.checked_sub(1).unwrap_or(0) + self.anchor_height;
let adjusted_count = count.saturating_sub(1) + self.anchor_height;
for (height, header) in header_chain.iter().enumerate() {
let adjusted_height = self.anchor_height + 1 + height as u64;
if adjusted_height.ge(&(adjusted_count)) {
Expand All @@ -145,7 +145,7 @@ impl HeaderStore for SqliteHeaderDb {
"INSERT OR REPLACE INTO headers (height, block_hash, version, prev_hash, merkle_root, time, bits, nonce) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)"
};
tx.execute(
&stmt,
stmt,
params![
adjusted_height,
hash,
Expand Down
2 changes: 1 addition & 1 deletion src/db/sqlite/peer_db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ impl SqlitePeerDb {
if let Some(row) = rows.next()? {
let ip_addr: String = row.get(0)?;
let port: u16 = row.get(1)?;
lock.execute("DELETE FROM peers WHERE ip_addr = ?1", &[&ip_addr])?;
lock.execute("DELETE FROM peers WHERE ip_addr = ?1", [&ip_addr])?;
let ip = ip_addr
.parse::<IpAddr>()
.map_err(|_| rusqlite::Error::InvalidQuery)?;
Expand Down
4 changes: 2 additions & 2 deletions src/db/traits.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,15 +6,15 @@ use super::error::HeaderDatabaseError;
#[async_trait]
pub(crate) trait HeaderStore {
async fn load(&mut self) -> Result<Vec<Header>, HeaderDatabaseError>;
async fn write(&mut self, header_chain: &Vec<Header>) -> Result<(), HeaderDatabaseError>;
async fn write<'a>(&mut self, header_chain: &'a [Header]) -> Result<(), HeaderDatabaseError>;
}

#[async_trait]
impl HeaderStore for () {
async fn load(&mut self) -> Result<Vec<Header>, HeaderDatabaseError> {
Ok(Vec::new())
}
async fn write(&mut self, _header_chain: &Vec<Header>) -> Result<(), HeaderDatabaseError> {
async fn write<'a>(&mut self, _header_chain: &'a [Header]) -> Result<(), HeaderDatabaseError> {
Ok(())
}
}
6 changes: 3 additions & 3 deletions src/filters/cfheader_batch.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,8 @@ impl CFHeaderBatch {
}
}

impl Into<CFHeaderBatch> for CFHeaders {
fn into(self) -> CFHeaderBatch {
CFHeaderBatch::new(self)
impl From<CFHeaders> for CFHeaderBatch {
fn from(val: CFHeaders) -> Self {
CFHeaderBatch::new(val)
}
}
6 changes: 3 additions & 3 deletions src/filters/cfheader_chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ impl CFHeaderChain {
peer_id: u32,
cf_headers: CFHeaderBatch,
) -> Result<AppendAttempt, CFHeaderSyncError> {
if self.merged_queue.get(&peer_id).is_some() {
if self.merged_queue.contains_key(&peer_id) {
return Err(CFHeaderSyncError::UnexpectedCFHeaderMessage);
}
self.merged_queue.insert(peer_id, cf_headers.inner());
Expand Down Expand Up @@ -90,7 +90,7 @@ impl CFHeaderChain {
}
}
// Made it through without finding any conflicts, we can extend the current chain by the reference
self.header_chain.extend_from_slice(&reference_peer);
self.header_chain.extend_from_slice(reference_peer);
// Reset the merge queue
self.merged_queue.clear();
Ok(AppendAttempt::Extended)
Expand Down Expand Up @@ -142,7 +142,7 @@ impl CFHeaderChain {
self.merged_queue.clear()
}

pub(crate) async fn join(&mut self, headers: &Vec<Header>) {
pub(crate) async fn join(&mut self, headers: &[Header]) {
headers
.iter()
.zip(self.header_chain.iter().map(|(_, hash)| hash))
Expand Down
Loading

0 comments on commit 268b7c2

Please sign in to comment.