diff --git a/zebra-network/src/config.rs b/zebra-network/src/config.rs index 4096fea4f48..3203d0916b8 100644 --- a/zebra-network/src/config.rs +++ b/zebra-network/src/config.rs @@ -1,4 +1,8 @@ -use std::{net::SocketAddr, time::Duration}; +use std::{ + net::{SocketAddr, ToSocketAddrs}, + string::String, + time::Duration, +}; use crate::network::Network; @@ -8,14 +12,21 @@ use crate::network::Network; pub struct Config { /// The address on which this node should listen for connections. pub listen_addr: SocketAddr, + /// The network to connect to. pub network: Network, + /// The user-agent to advertise. pub user_agent: String, - /// A list of initial peers for the peerset. - /// - /// XXX this should be replaced with DNS names, not SocketAddrs - pub initial_peers: Vec, + + /// A list of initial peers for the peerset when operating on + /// mainnet. + pub initial_mainnet_peers: Vec, + + /// A list of initial peers for the peerset when operating on + /// testnet. + pub initial_testnet_peers: Vec, + /// The outgoing request buffer size for the peer set. pub peerset_request_buffer_size: usize, @@ -23,23 +34,59 @@ pub struct Config { // serializer, the Duration fields should come last. /// The default RTT estimate for peer responses, used in load-balancing. pub ewma_default_rtt: Duration, + /// The decay time for the exponentially-weighted moving average response time. pub ewma_decay_time: Duration, + /// The timeout for peer handshakes. pub handshake_timeout: Duration, + /// How frequently we attempt to connect to a new peer. pub new_peer_interval: Duration, } +impl Config { + fn parse_peers(peers: Vec) -> Vec { + peers + .iter() + .flat_map(|s| s.to_socket_addrs()) + .flatten() + .collect::>() + } + + /// Get the initial seed peers based on the configured network. + pub fn initial_peers(&self) -> Vec { + match self.network { + Network::Mainnet => Config::parse_peers(self.initial_mainnet_peers.clone()), + Network::Testnet => Config::parse_peers(self.initial_testnet_peers.clone()), + } + } +} + impl Default for Config { fn default() -> Config { + let mainnet_peers = [ + "dnsseed.z.cash:8233", + "dnsseed.str4d.xyz:8233", + "dnsseed.znodes.org:8233", + ] + .iter() + .map(|&s| String::from(s)) + .collect(); + + let testnet_peers = ["dnsseed.testnet.z.cash:18233"] + .iter() + .map(|&s| String::from(s)) + .collect(); + Config { listen_addr: "127.0.0.1:28233" .parse() .expect("Hardcoded address should be parseable"), user_agent: crate::constants::USER_AGENT.to_owned(), network: Network::Mainnet, - initial_peers: Vec::new(), + initial_mainnet_peers: mainnet_peers, + initial_testnet_peers: testnet_peers, ewma_default_rtt: Duration::from_secs(1), ewma_decay_time: Duration::from_secs(60), peerset_request_buffer_size: 1, diff --git a/zebra-network/src/peer/server.rs b/zebra-network/src/peer/server.rs index f77827709c4..743ca6b664c 100644 --- a/zebra-network/src/peer/server.rs +++ b/zebra-network/src/peer/server.rs @@ -304,6 +304,7 @@ where // and try to construct an appropriate request object. let req = match msg { Message::Addr(addrs) => Some(Request::PushPeers(addrs)), + Message::GetAddr => Some(Request::GetPeers), _ => { debug!("unhandled message type"); None diff --git a/zebra-network/src/peer_set.rs b/zebra-network/src/peer_set.rs index 2d5b70e6d2a..44ac0255be8 100644 --- a/zebra-network/src/peer_set.rs +++ b/zebra-network/src/peer_set.rs @@ -103,7 +103,7 @@ where // 1. Initial peers, specified in the config. tokio::spawn(add_initial_peers( - config.initial_peers.clone(), + config.initial_peers(), connector.clone(), peerset_tx.clone(), )); diff --git a/zebrad/src/commands.rs b/zebrad/src/commands.rs index af1db925f9c..e3101f047c5 100644 --- a/zebrad/src/commands.rs +++ b/zebrad/src/commands.rs @@ -1,21 +1,14 @@ //! Zebrad Subcommands -//! -//! This is where you specify the subcommands of your application. -//! -//! The default application comes with two subcommands: -//! -//! - `start`: launches the application -//! - `version`: print application version -//! -//! See the `impl Configurable` below for how to specify the path to the -//! application's configuration file. mod config; mod connect; +mod seed; mod start; mod version; -use self::{config::ConfigCmd, connect::ConnectCmd, start::StartCmd, version::VersionCmd}; +use self::{ + config::ConfigCmd, connect::ConnectCmd, seed::SeedCmd, start::StartCmd, version::VersionCmd, +}; use crate::config::ZebradConfig; use abscissa_core::{ config::Override, Command, Configurable, FrameworkError, Help, Options, Runnable, @@ -47,6 +40,10 @@ pub enum ZebradCmd { /// The `connect` subcommand #[options(help = "testing stub for dumping network messages")] Connect(ConnectCmd), + + /// The `seed` subcommand + #[options(help = "dns seeder")] + Seed(SeedCmd), } /// This trait allows you to define how application configuration is loaded. diff --git a/zebrad/src/commands/config.rs b/zebrad/src/commands/config.rs index ab3ae8ba827..730af4d0c53 100644 --- a/zebrad/src/commands/config.rs +++ b/zebrad/src/commands/config.rs @@ -27,7 +27,7 @@ impl Runnable for ConfigCmd { # can be found in Rustdoc. XXX add link to rendered docs. " - .to_owned(); + .to_owned(); // The default name and location of the config file is defined in ../commands.rs output += &toml::to_string_pretty(&default_config) .expect("default config should be serializable"); match self.output_file { diff --git a/zebrad/src/commands/connect.rs b/zebrad/src/commands/connect.rs index 464ea019435..b24dc289303 100644 --- a/zebrad/src/commands/connect.rs +++ b/zebrad/src/commands/connect.rs @@ -66,9 +66,7 @@ impl ConnectCmd { 1, ); - let mut config = app_config().network.clone(); - - config.initial_peers = vec![self.addr]; + let config = app_config().network.clone(); let (mut peer_set, address_book) = zebra_network::init(config, node).await; diff --git a/zebrad/src/commands/seed.rs b/zebrad/src/commands/seed.rs new file mode 100644 index 00000000000..7d7fdba677f --- /dev/null +++ b/zebrad/src/commands/seed.rs @@ -0,0 +1,155 @@ +//! `seed` subcommand - runs a dns seeder + +use std::{ + future::Future, + pin::Pin, + sync::{Arc, Mutex}, + task::{Context, Poll}, +}; + +use abscissa_core::{config, Command, FrameworkError, Options, Runnable}; +use futures::channel::oneshot; +use tower::{buffer::Buffer, Service, ServiceExt}; +use zebra_network::{AddressBook, BoxedStdError, Request, Response}; + +use crate::{config::ZebradConfig, prelude::*}; + +/// Whether our `SeedService` is poll_ready or not. +#[derive(Debug)] +enum SeederState { + /// Waiting for the address book to be shared with us via the oneshot channel. + AwaitingAddressBook(oneshot::Receiver>>), + /// Address book received, ready to service requests. + Ready(Arc>), +} + +#[derive(Debug)] +struct SeedService { + state: SeederState, +} + +impl Service for SeedService { + type Response = Response; + type Error = BoxedStdError; + type Future = + Pin> + Send + 'static>>; + + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { + debug!("SeedService.state: {:?}", self.state); + + match self.state { + SeederState::Ready(_) => return Poll::Ready(Ok(())), + SeederState::AwaitingAddressBook(ref mut rx) => match rx.try_recv() { + Err(e) => { + error!("SeedService oneshot sender dropped: {:?}", e); + return Poll::Ready(Err(e.into())); + } + Ok(None) => { + trace!("SeedService hasn't received a message via the oneshot yet."); + return Poll::Pending; + } + Ok(Some(address_book)) => { + info!( + "SeedService received address_book via oneshot {:?}", + address_book + ); + self.state = SeederState::Ready(address_book); + return Poll::Ready(Ok(())); + } + }, + } + } + + fn call(&mut self, req: Request) -> Self::Future { + info!("SeedService handling a request: {:?}", req); + + let address_book = if let SeederState::Ready(address_book) = &self.state { + address_book + } else { + panic!("SeedService::call without SeedService::poll_ready"); + }; + + let response = match req { + Request::GetPeers => { + debug!(address_book.len = address_book.lock().unwrap().len()); + info!("SeedService responding to GetPeers"); + Ok::(Response::Peers( + address_book.lock().unwrap().peers().collect(), + )) + } + _ => Ok::(Response::Ok), + }; + + info!("SeedService response: {:?}", response); + return Box::pin(futures::future::ready(response)); + } +} + +/// `seed` subcommand +/// +/// A DNS seeder command to spider and collect as many valid peer +/// addresses as we can. +// This is not a unit-like struct because it makes Command and Options sad. +#[derive(Command, Debug, Default, Options)] +pub struct SeedCmd {} + +impl Runnable for SeedCmd { + /// Start the application. + fn run(&self) { + use crate::components::tokio::TokioComponent; + + let _ = app_reader() + .state() + .components + .get_downcast_ref::() + .expect("TokioComponent should be available") + .rt + .block_on(self.seed()); + } +} + +impl SeedCmd { + async fn seed(&self) -> Result<(), failure::Error> { + use failure::Error; + + info!("begin tower-based peer handling test stub"); + + let (addressbook_tx, addressbook_rx) = oneshot::channel(); + let seed_service = SeedService { + state: SeederState::AwaitingAddressBook(addressbook_rx), + }; + let node = Buffer::new(seed_service, 1); + + let config = app_config().network.clone(); + + let (mut peer_set, address_book) = zebra_network::init(config, node).await; + + let _ = addressbook_tx.send(address_book); + + info!("waiting for peer_set ready"); + peer_set.ready().await.map_err(Error::from_boxed_compat)?; + + info!("peer_set became ready"); + + #[cfg(dos)] + use std::time::Duration; + use tokio::timer::Interval; + + #[cfg(dos)] + // Fire GetPeers requests at ourselves, for testing. + tokio::spawn(async move { + let mut interval_stream = Interval::new_interval(Duration::from_secs(1)); + + loop { + interval_stream.next().await; + + let _ = seed_service.call(Request::GetPeers); + } + }); + + let eternity = tokio::future::pending::<()>(); + eternity.await; + + Ok(()) + } +}