Skip to content

Commit

Permalink
Change Peer::new to return Result
Browse files Browse the repository at this point in the history
This is a larger refactor that aims to make the library more stable by removing unwrap and expect

Resolves #115
  • Loading branch information
herr-seppia committed Oct 21, 2022
1 parent 423cf51 commit 22d9746
Show file tree
Hide file tree
Showing 22 changed files with 244 additions and 234 deletions.
15 changes: 9 additions & 6 deletions src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,17 @@
//
// Copyright (c) DUSK NETWORK. All rights reserved.

use crate::transport::encoding::Configurable;
use crate::transport::encoding::TransportDecoder;
pub use crate::transport::encoding::TransportDecoderConfig;
use crate::transport::encoding::TransportEncoder;
pub use crate::transport::encoding::TransportEncoderConfig;
use serde_derive::{Deserialize, Serialize};
use std::time::Duration;

use serde_derive::{Deserialize, Serialize};

use crate::transport::encoding::{
Configurable, TransportDecoder, TransportEncoder,
};
pub use crate::transport::encoding::{
TransportDecoderConfig, TransportEncoderConfig,
};

/// Default value while a node is considered alive (no eviction will be
/// requested)
pub const BUCKET_DEFAULT_NODE_TTL_MILLIS: u64 = 30000;
Expand Down
2 changes: 1 addition & 1 deletion src/encoding/header.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,9 @@

use std::io::{self, Error, ErrorKind, Read, Write};

use super::Marshallable;
use crate::{kbucket::BinaryID, K_ID_LEN_BYTES, K_NONCE_LEN};

use super::Marshallable;
#[derive(Debug, PartialEq, Clone, Copy)]
pub struct Header {
pub(crate) binary_id: BinaryID,
Expand Down
9 changes: 4 additions & 5 deletions src/encoding/message.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,9 @@

use std::io::{self, Error, ErrorKind, Read, Write};

use crate::kbucket::BinaryKey;

pub(crate) use super::payload::{BroadcastPayload, NodePayload};
pub use super::{header::Header, Marshallable};
use crate::kbucket::BinaryKey;

// PingMsg wire Ping message id.
const ID_MSG_PING: u8 = 0;
Expand Down Expand Up @@ -56,10 +55,10 @@ impl Message {
}
}

pub(crate) fn bytes(&self) -> Vec<u8> {
pub(crate) fn bytes(&self) -> io::Result<Vec<u8>> {
let mut bytes = vec![];
self.marshal_binary(&mut bytes).unwrap();
bytes
self.marshal_binary(&mut bytes)?;
Ok(bytes)
}
}

Expand Down
5 changes: 3 additions & 2 deletions src/encoding/payload.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,8 @@

pub(super) mod broadcast;
pub(super) mod nodes;
pub(crate) use crate::encoding::payload::broadcast::BroadcastPayload;
pub(crate) use crate::encoding::payload::nodes::NodePayload;
pub use nodes::IpInfo;
pub(crate) use nodes::PeerEncodedInfo;

pub(crate) use crate::encoding::payload::broadcast::BroadcastPayload;
pub(crate) use crate::encoding::payload::nodes::NodePayload;
1 change: 1 addition & 0 deletions src/encoding/payload/broadcast.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
use std::io::{self, Read, Write};

use crate::encoding::Marshallable;

#[derive(Debug, PartialEq)]
pub(crate) struct BroadcastPayload {
pub(crate) height: u8,
Expand Down
3 changes: 2 additions & 1 deletion src/encoding/payload/nodes.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ use std::io::{self, Read, Write};
use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6};

use crate::{encoding::Marshallable, kbucket::BinaryKey, K_ID_LEN_BYTES};

#[derive(Debug, PartialEq)]
pub(crate) struct NodePayload {
pub(crate) peers: Vec<PeerEncodedInfo>,
Expand Down Expand Up @@ -74,7 +75,7 @@ impl Marshallable for PeerEncodedInfo {
let ipv6_bytes: [u8; 16] = concat_u8(&ipv4[1..], &ipv6[..])
.as_slice()
.try_into()
.expect("Wrong length");
.expect("ipv6_bytes to be length 16");

IpInfo::IPv6(ipv6_bytes)
}
Expand Down
5 changes: 2 additions & 3 deletions src/handling.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
//
// Copyright (c) DUSK NETWORK. All rights reserved.

use std::convert::TryInto;
use std::net::SocketAddr;

use tokio::sync::mpsc::{Receiver, Sender};
Expand Down Expand Up @@ -200,12 +199,12 @@ impl MessageHandler {
let table_read = ktable.read().await;

let messages: Vec<(Message, Vec<SocketAddr>)> = table_read
.extract(Some((payload.height - 1).into()))
.extract(Some(payload.height - 1))
.map(|(height, nodes)| {
let msg = Message::Broadcast(
my_header,
BroadcastPayload {
height: height.try_into().unwrap(),
height,
gossip_frame: payload
.gossip_frame
.clone(), //FIX_ME: avoid clone
Expand Down
78 changes: 12 additions & 66 deletions src/kbucket.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,13 +7,12 @@
use std::collections::HashMap;

use bucket::Bucket;
pub use bucket::InsertError;
pub use bucket::InsertOk;
pub use bucket::{NodeInsertError, NodeInsertOk};
use itertools::Itertools;
pub use key::{BinaryID, BinaryKey, BinaryNonce};
pub use node::Node;

pub use bucket::InsertError;
pub use bucket::InsertOk;
use tracing::info;

mod bucket;
Expand All @@ -22,9 +21,8 @@ mod node;
use crate::config::BucketConfig;
use crate::K_ALPHA;
use crate::K_BETA;
use crate::K_ID_LEN_BYTES;

pub type BucketHeight = usize;
pub type BucketHeight = u8;

pub(crate) struct Tree<V> {
root: Node<V>,
Expand Down Expand Up @@ -52,16 +50,18 @@ impl<V> Tree<V> {
};
}

//iter the buckets (up to max_height, inclusive) and pick at most Beta
// iter the buckets (up to max_height, inclusive) and pick at most Beta
// nodes for each bucket
pub(crate) fn extract(
&self,
max_h: Option<usize>,
max_h: Option<BucketHeight>,
) -> impl Iterator<Item = (BucketHeight, impl Iterator<Item = &Node<V>>)>
{
self.buckets
.iter()
.filter(move |(&height, _)| height <= max_h.unwrap_or(usize::MAX))
.filter(move |(&height, _)| {
height <= max_h.unwrap_or(BucketHeight::MAX)
})
.map(|(&height, bucket)| (height, bucket.pick::<K_BETA>()))
}

Expand Down Expand Up @@ -100,7 +100,8 @@ impl<V> Tree<V> {
pub(crate) fn idle_or_empty_heigth(
&'static self,
) -> impl Iterator<Item = BucketHeight> {
(0..K_ID_LEN_BYTES * 8).filter(move |h| {
let max_buckets = (crate::K_ID_LEN_BYTES * 8) as BucketHeight;
(0..max_buckets).filter(move |h| {
self.buckets.get(h).map_or_else(|| true, |b| b.is_idle())
})
}
Expand All @@ -116,7 +117,7 @@ impl<V> Tree<V> {
.map(|(&height, bucket)| (height, bucket.pick::<K_ALPHA>()))
}

pub(crate) fn has_peer(&self, peer: &BinaryKey) -> Option<usize> {
pub(crate) fn has_peer(&self, peer: &BinaryKey) -> Option<BucketHeight> {
match self.root.id().calculate_distance(peer) {
None => None,
Some(height) => self.buckets.get(&height).and_then(|bucket| {
Expand All @@ -141,7 +142,7 @@ impl<V> Tree<V> {
.flat_map(|(_, bucket)| bucket.alive_nodes())
}

pub(crate) fn is_bucket_full(&self, height: usize) -> bool {
pub(crate) fn is_bucket_full(&self, height: BucketHeight) -> bool {
self.buckets
.get(&height)
.map_or(false, |bucket| bucket.is_full())
Expand All @@ -160,61 +161,6 @@ impl<V> Tree<V> {
}
}

// pub struct TreeBuilder<V> {
// node_ttl: Duration,
// node_evict_after: Duration,
// root: Node<V>,
// bucket_ttl: Duration,
// }

// impl<V> TreeBuilder<V> {
// pub(crate) fn new(root: Node<V>) -> TreeBuilder<V> {
// TreeBuilder {
// root,
// node_evict_after: Duration::from_millis(
// BUCKET_DEFAULT_NODE_EVICT_AFTER_MILLIS,
// ),
// node_ttl: Duration::from_millis(BUCKET_DEFAULT_NODE_TTL_MILLIS),
// bucket_ttl: Duration::from_secs(BUCKET_DEFAULT_TTL_SECS),
// }
// }

// pub fn with_node_ttl(mut self, node_ttl: Duration) -> TreeBuilder<V> {
// self.node_ttl = node_ttl;
// self
// }

// pub fn with_bucket_ttl(mut self, bucket_ttl: Duration) -> TreeBuilder<V>
// { self.bucket_ttl = bucket_ttl;
// self
// }

// pub fn with_node_evict_after(
// mut self,
// node_evict_after: Duration,
// ) -> TreeBuilder<V> {
// self.node_evict_after = node_evict_after;
// self
// }

// pub(crate) fn build(self) -> Tree<V> {
// info!(
// "Built table [K={}] with root: {:?}",
// crate::K_K,
// self.root.id()
// );
// Tree {
// root: self.root,
// buckets: HashMap::new(),
// config: BucketConfig {
// bucket_ttl: self.bucket_ttl,
// node_evict_after: self.node_evict_after,
// node_ttl: self.node_ttl,
// },
// }
// }
// }

#[cfg(test)]
mod tests {
use std::time::Duration;
Expand Down
18 changes: 11 additions & 7 deletions src/kbucket/bucket.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,15 +4,15 @@
//
// Copyright (c) DUSK NETWORK. All rights reserved.

use crate::config::BucketConfig;
use crate::K_K;

use super::node::{Node, NodeEvictionStatus};
use super::BinaryKey;
use arrayvec::ArrayVec;
use rand::seq::SliceRandom;
use rand::thread_rng;

use super::node::{Node, NodeEvictionStatus};
use super::BinaryKey;
use crate::config::BucketConfig;
use crate::K_K;

pub(super) struct Bucket<V> {
nodes: arrayvec::ArrayVec<Node<V>, K_K>,
pending_node: Option<Node<V>>,
Expand Down Expand Up @@ -132,13 +132,17 @@ impl<V> Bucket<V> {
self.try_perform_eviction();
return Ok(NodeInsertOk::Updated {
pending_eviction: self.pending_eviction_node(),
updated: self.nodes.last().unwrap(),
updated: self.nodes.last().expect(
"last node to exist because it's been just updated",
),
});
}
self.try_perform_eviction();
match self.nodes.try_push(node) {
Ok(_) => Ok(NodeInsertOk::Inserted {
inserted: self.nodes.last().unwrap(),
inserted: self.nodes.last().expect(
"last node to exist because it's been just inserted",
),
}),
Err(err) => {
if self
Expand Down
27 changes: 18 additions & 9 deletions src/kbucket/key.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,12 @@ use crate::K_NONCE_LEN;
pub type BinaryKey = [u8; K_ID_LEN_BYTES];
pub type BinaryNonce = [u8; K_NONCE_LEN];

use blake2::{Blake2s, Digest};
use blake2::{Blake2s256, Digest};

use crate::{K_DIFF_MIN_BIT, K_DIFF_PRODUCED_BIT};

use super::BucketHeight;

#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy)]
pub struct BinaryID {
bytes: BinaryKey,
Expand Down Expand Up @@ -52,25 +54,32 @@ impl BinaryID {

// Returns the 0-based kadcast distance between 2 ID
// `None` if they are identical
pub fn calculate_distance(&self, other: &BinaryKey) -> Option<usize> {
pub fn calculate_distance(
&self,
other: &BinaryKey,
) -> Option<BucketHeight> {
assert!(
(K_ID_LEN_BYTES * BucketHeight::BITS as usize)
< BucketHeight::MAX as usize,
"K_ID_LEN_BYTES must be lower"
);
self.as_binary()
.iter()
.zip(other.iter())
.map(|(&a, &b)| a ^ b)
.enumerate()
.rev()
.find(|(_, b)| b != &0b0)
.map(|(i, b)| {
BinaryID::msb(b).expect("Can't be None") + (i << 3) - 1
})
.map(|(i, b)| (i as BucketHeight, b))
.map(|(i, b)| BinaryID::msb(b).expect("to be Some") + (i << 3) - 1)
}

// Returns the position of the most-significant bit set in a byte,
// `None` if no bit is set
const fn msb(n: u8) -> Option<usize> {
const fn msb(n: u8) -> Option<u8> {
match u8::BITS - n.leading_zeros() {
0 => None,
n => Some(n as usize),
n => Some(n as u8),
}
}

Expand All @@ -83,7 +92,7 @@ impl BinaryID {
panic!("PoW is less than minimum required, review your build config...")
}
let mut nonce: u32 = 0;
let mut hasher = Blake2s::new();
let mut hasher = Blake2s256::new();
loop {
hasher.update(id);
let nonce_bytes = nonce.to_le_bytes();
Expand All @@ -102,7 +111,7 @@ impl BinaryID {
}

pub fn verify_nonce(&self) -> bool {
let mut hasher = Blake2s::new();
let mut hasher = Blake2s256::new();
hasher.update(self.bytes);
hasher.update(self.nonce);
BinaryID::verify_difficulty(
Expand Down
7 changes: 6 additions & 1 deletion src/kbucket/node.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@
use std::time::{Duration, Instant};

use super::key::BinaryID;
use super::BucketHeight;

#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct Node<TValue> {
id: BinaryID,
Expand All @@ -31,7 +33,10 @@ impl<TValue> Node<TValue> {
}
}

pub fn calculate_distance(&self, other: &Node<TValue>) -> Option<usize> {
pub fn calculate_distance(
&self,
other: &Node<TValue>,
) -> Option<BucketHeight> {
self.id.calculate_distance(other.id.as_binary())
}

Expand Down
Loading

0 comments on commit 22d9746

Please sign in to comment.