|
| 1 | +use std::{sync::Arc, time::Duration}; |
| 2 | + |
| 3 | +use iroh::{ |
| 4 | + Endpoint, RelayMode, |
| 5 | + endpoint::{ConnectionInfo, ConnectionMonitor}, |
| 6 | +}; |
| 7 | +use n0_error::{Result, StackResultExt, StdResultExt, ensure_any}; |
| 8 | +use n0_future::task::AbortOnDropHandle; |
| 9 | +use tokio::{ |
| 10 | + sync::mpsc::{UnboundedReceiver, UnboundedSender}, |
| 11 | + task::JoinSet, |
| 12 | +}; |
| 13 | +use tracing::{Instrument, info, info_span}; |
| 14 | + |
| 15 | +const ALPN: &[u8] = b"iroh/test"; |
| 16 | + |
| 17 | +#[tokio::main] |
| 18 | +async fn main() -> Result { |
| 19 | + tracing_subscriber::fmt() |
| 20 | + .with_env_filter( |
| 21 | + tracing_subscriber::EnvFilter::try_from_default_env().unwrap_or_else(|_| "info".into()), |
| 22 | + ) |
| 23 | + .init(); |
| 24 | + |
| 25 | + let monitor = Monitor::new(); |
| 26 | + let server = Endpoint::empty_builder(RelayMode::Disabled) |
| 27 | + .alpns(vec![ALPN.to_vec()]) |
| 28 | + .monitor_connections(monitor.clone()) |
| 29 | + .bind() |
| 30 | + .instrument(info_span!("server")) |
| 31 | + .await?; |
| 32 | + let server_addr = server.addr(); |
| 33 | + |
| 34 | + let count = 2; |
| 35 | + |
| 36 | + let client_task = tokio::spawn( |
| 37 | + async move { |
| 38 | + let client = Endpoint::empty_builder(RelayMode::Disabled) |
| 39 | + .bind() |
| 40 | + .instrument(info_span!("client")) |
| 41 | + .await?; |
| 42 | + for _i in 0..count { |
| 43 | + let conn = client.connect(server_addr.clone(), ALPN).await?; |
| 44 | + let mut s = conn.accept_uni().await.anyerr()?; |
| 45 | + let data = s.read_to_end(2).await.anyerr()?; |
| 46 | + ensure_any!(data == b"hi", "unexpected data"); |
| 47 | + conn.close(23u32.into(), b"bye"); |
| 48 | + } |
| 49 | + client.close().await; |
| 50 | + n0_error::Ok(client) |
| 51 | + } |
| 52 | + .instrument(info_span!("client")), |
| 53 | + ); |
| 54 | + |
| 55 | + let server_task = tokio::spawn( |
| 56 | + async move { |
| 57 | + for _i in 0..count { |
| 58 | + let conn = server |
| 59 | + .accept() |
| 60 | + .await |
| 61 | + .context("server endpoint closed")? |
| 62 | + .await?; |
| 63 | + let mut s = conn.open_uni().await.anyerr()?; |
| 64 | + s.write_all(b"hi").await.anyerr()?; |
| 65 | + conn.closed().await; |
| 66 | + } |
| 67 | + info!("done"); |
| 68 | + server.close().await; |
| 69 | + n0_error::Ok(()) |
| 70 | + } |
| 71 | + .instrument(info_span!("server")), |
| 72 | + ); |
| 73 | + client_task.await.std_context("client")?.context("client")?; |
| 74 | + server_task.await.std_context("server")?.context("server")?; |
| 75 | + tokio::time::sleep(Duration::from_secs(1)).await; |
| 76 | + drop(monitor); |
| 77 | + Ok(()) |
| 78 | +} |
| 79 | + |
| 80 | +/// Our connection monitor impl. |
| 81 | +/// |
| 82 | +/// This here only logs connection open and close events via tracing. |
| 83 | +/// It could also maintain a datastructure of all connections, or send the stats to some metrics service. |
| 84 | +#[derive(Clone)] |
| 85 | +struct Monitor { |
| 86 | + tx: UnboundedSender<ConnectionInfo>, |
| 87 | + _task: Arc<AbortOnDropHandle<()>>, |
| 88 | +} |
| 89 | + |
| 90 | +impl ConnectionMonitor for Monitor { |
| 91 | + fn on_connection(&self, connection: ConnectionInfo) { |
| 92 | + self.tx.send(connection).ok(); |
| 93 | + } |
| 94 | +} |
| 95 | + |
| 96 | +impl Monitor { |
| 97 | + fn new() -> Self { |
| 98 | + let (tx, rx) = tokio::sync::mpsc::unbounded_channel(); |
| 99 | + let task = tokio::spawn(Self::run(rx).instrument(info_span!("watcher"))); |
| 100 | + Self { |
| 101 | + tx, |
| 102 | + _task: Arc::new(AbortOnDropHandle::new(task)), |
| 103 | + } |
| 104 | + } |
| 105 | + |
| 106 | + async fn run(mut rx: UnboundedReceiver<ConnectionInfo>) { |
| 107 | + let mut tasks = JoinSet::new(); |
| 108 | + loop { |
| 109 | + tokio::select! { |
| 110 | + Some(conn) = rx.recv() => { |
| 111 | + let alpn = String::from_utf8_lossy(conn.alpn()).to_string(); |
| 112 | + let remote = conn.remote_id().fmt_short(); |
| 113 | + info!(%remote, %alpn, rtt=?conn.rtt(), "new connection"); |
| 114 | + tasks.spawn(async move { |
| 115 | + match conn.closed().await { |
| 116 | + Some((close_reason, stats)) => { |
| 117 | + // We have access to the final stats of the connection! |
| 118 | + info!(%remote, %alpn, ?close_reason, udp_rx=stats.udp_rx.bytes, udp_tx=stats.udp_tx.bytes, "connection closed"); |
| 119 | + } |
| 120 | + None => { |
| 121 | + // The connection was closed before we could register our stats-on-close listener. |
| 122 | + info!(%remote, %alpn, "connection closed before tracking started"); |
| 123 | + } |
| 124 | + } |
| 125 | + }.instrument(tracing::Span::current())); |
| 126 | + } |
| 127 | + Some(res) = tasks.join_next(), if !tasks.is_empty() => res.expect("conn close task panicked"), |
| 128 | + else => break, |
| 129 | + } |
| 130 | + while let Some(res) = tasks.join_next().await { |
| 131 | + res.expect("conn close task panicked"); |
| 132 | + } |
| 133 | + } |
| 134 | + } |
| 135 | +} |
0 commit comments