-
Notifications
You must be signed in to change notification settings - Fork 172
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
fix: tokio v1.27 #1062
fix: tokio v1.27 #1062
Changes from all commits
8ec0ab3
b30dd44
f29593f
2d8f787
3de56a1
47db597
1e2174e
36eb0a4
0635db3
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -32,13 +32,14 @@ use std::sync::Arc; | |
use std::task::{Context, Poll}; | ||
use std::time::Duration; | ||
|
||
use crate::future::{ConnectionGuard, FutureDriver, ServerHandle, StopHandle}; | ||
use crate::future::{ConnectionGuard, ServerHandle, StopHandle}; | ||
use crate::logger::{Logger, TransportProtocol}; | ||
use crate::transport::{http, ws}; | ||
|
||
use futures_util::future::{BoxFuture, FutureExt}; | ||
use futures_util::future::{Either, FutureExt}; | ||
use futures_util::io::{BufReader, BufWriter}; | ||
|
||
use futures_util::stream::{FuturesUnordered, StreamExt}; | ||
use hyper::body::HttpBody; | ||
use jsonrpsee_core::id_providers::RandomIntegerIdProvider; | ||
|
||
|
@@ -127,12 +128,15 @@ where | |
|
||
let mut id: u32 = 0; | ||
let connection_guard = ConnectionGuard::new(self.cfg.max_connections as usize); | ||
let mut connections = FutureDriver::default(); | ||
let mut incoming = Monitored::new(Incoming(self.listener), &stop_handle); | ||
let listener = self.listener; | ||
|
||
let mut connections = FuturesUnordered::new(); | ||
let stopped = stop_handle.clone().shutdown(); | ||
tokio::pin!(stopped); | ||
|
||
loop { | ||
match connections.select_with(&mut incoming).await { | ||
Ok((socket, remote_addr)) => { | ||
match try_accept_conn(&listener, stopped).await { | ||
AcceptConnection::Established { socket, remote_addr, stop } => { | ||
let data = ProcessConnection { | ||
remote_addr, | ||
methods: methods.clone(), | ||
|
@@ -154,15 +158,21 @@ where | |
}; | ||
process_connection(&self.service_builder, &connection_guard, data, socket, &mut connections); | ||
id = id.wrapping_add(1); | ||
stopped = stop; | ||
} | ||
Err(MonitoredError::Selector(err)) => { | ||
tracing::error!("Error while awaiting a new connection: {:?}", err); | ||
AcceptConnection::Err((e, stop)) => { | ||
tracing::error!("Error while awaiting a new connection: {:?}", e); | ||
stopped = stop; | ||
} | ||
Err(MonitoredError::Shutdown) => break, | ||
AcceptConnection::Shutdown => break, | ||
} | ||
} | ||
|
||
connections.await; | ||
// FuturesUnordered won't poll anything until this line but because the | ||
// tasks are spawned (so that they can progress independently) | ||
// then this just makes sure that all tasks are completed before | ||
// returning from this function. | ||
while connections.next().await.is_some() {} | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Ah ok, so if I understand this right, (I think that seems perfectly reasonable to me! Perhaps worth a comment) There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. yeah, exactly |
||
} | ||
} | ||
|
||
|
@@ -668,56 +678,6 @@ impl<L: Logger> hyper::service::Service<hyper::Request<hyper::Body>> for TowerSe | |
} | ||
} | ||
|
||
/// This is a glorified select listening for new messages, while also checking the `stop_receiver` signal. | ||
struct Monitored<'a, F> { | ||
future: F, | ||
stop_monitor: &'a StopHandle, | ||
} | ||
|
||
impl<'a, F> Monitored<'a, F> { | ||
fn new(future: F, stop_monitor: &'a StopHandle) -> Self { | ||
Monitored { future, stop_monitor } | ||
} | ||
} | ||
|
||
enum MonitoredError<E> { | ||
Shutdown, | ||
Selector(E), | ||
} | ||
|
||
struct Incoming(TcpListener); | ||
|
||
impl<'a> Future for Monitored<'a, Incoming> { | ||
type Output = Result<(TcpStream, SocketAddr), MonitoredError<std::io::Error>>; | ||
|
||
fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> { | ||
let this = Pin::into_inner(self); | ||
|
||
if this.stop_monitor.shutdown_requested() { | ||
return Poll::Ready(Err(MonitoredError::Shutdown)); | ||
} | ||
|
||
this.future.0.poll_accept(cx).map_err(MonitoredError::Selector) | ||
} | ||
} | ||
|
||
impl<'a, 'f, F, T, E> Future for Monitored<'a, Pin<&'f mut F>> | ||
where | ||
F: Future<Output = Result<T, E>>, | ||
{ | ||
type Output = Result<T, MonitoredError<E>>; | ||
|
||
fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> { | ||
let this = Pin::into_inner(self); | ||
|
||
if this.stop_monitor.shutdown_requested() { | ||
return Poll::Ready(Err(MonitoredError::Shutdown)); | ||
} | ||
|
||
this.future.poll_unpin(cx).map_err(MonitoredError::Selector) | ||
} | ||
} | ||
|
||
struct ProcessConnection<L> { | ||
/// Remote server address. | ||
remote_addr: SocketAddr, | ||
|
@@ -763,7 +723,7 @@ fn process_connection<'a, L: Logger, B, U>( | |
connection_guard: &ConnectionGuard, | ||
cfg: ProcessConnection<L>, | ||
socket: TcpStream, | ||
connections: &mut FutureDriver<BoxFuture<'a, ()>>, | ||
connections: &mut FuturesUnordered<tokio::task::JoinHandle<()>>, | ||
) where | ||
B: Layer<TowerService<L>> + Send + 'static, | ||
<B as Layer<TowerService<L>>>::Service: Send | ||
|
@@ -786,7 +746,7 @@ fn process_connection<'a, L: Logger, B, U>( | |
Some(conn) => conn, | ||
None => { | ||
tracing::warn!("Too many connections. Please try again later."); | ||
connections.add(http::reject_connection(socket).in_current_span().boxed()); | ||
connections.push(tokio::spawn(http::reject_connection(socket).in_current_span())); | ||
return; | ||
} | ||
}; | ||
|
@@ -819,11 +779,11 @@ fn process_connection<'a, L: Logger, B, U>( | |
|
||
let service = service_builder.service(tower_service); | ||
|
||
connections.add(Box::pin(try_accept_connection(socket, service, cfg.stop_handle).in_current_span())); | ||
connections.push(tokio::spawn(to_http_service(socket, service, cfg.stop_handle).in_current_span())); | ||
} | ||
|
||
// Attempts to create a HTTP connection from a socket. | ||
async fn try_accept_connection<S, B>(socket: TcpStream, service: S, mut stop_handle: StopHandle) | ||
async fn to_http_service<S, B>(socket: TcpStream, service: S, stop_handle: StopHandle) | ||
where | ||
S: Service<hyper::Request<hyper::Body>, Response = hyper::Response<B>> + Send + 'static, | ||
S::Error: Into<Box<dyn std::error::Error + Send + Sync>>, | ||
|
@@ -847,3 +807,25 @@ where | |
} | ||
} | ||
} | ||
|
||
enum AcceptConnection<S> { | ||
Shutdown, | ||
Established { socket: TcpStream, remote_addr: SocketAddr, stop: S }, | ||
Err((std::io::Error, S)), | ||
} | ||
|
||
async fn try_accept_conn<S>(listener: &TcpListener, stopped: S) -> AcceptConnection<S> | ||
where | ||
S: Future + Unpin, | ||
{ | ||
let accept = listener.accept(); | ||
tokio::pin!(accept); | ||
|
||
match futures_util::future::select(accept, stopped).await { | ||
Either::Left((res, stop)) => match res { | ||
Ok((socket, remote_addr)) => AcceptConnection::Established { socket, remote_addr, stop }, | ||
Err(e) => AcceptConnection::Err((e, stop)), | ||
}, | ||
Either::Right(_) => AcceptConnection::Shutdown, | ||
} | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
IIUC we relied on this to make sure that we wake up:
To remove
tokio::time::Interval
we are polling theFutureDriver
alongside other operations that we do in the server:soketto::Pong
,soketto::Data
, andsoketto::Closed
Could we still get in a situation where we have no other competing tasks to drive the
FutureDriver
?(ie, we don't have
soketto::Pong
to wake us up -- or is configured at 100seconds -- and we don't receivesoketto::Data
)There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
After convincing myself that this DriverSelect thing (ie
select_with
) will poll the futures in itself as needed, maybe I'm missing something but why does it matter if this thing doesn't wake up for a while if none of the futures in it need to progress?There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I was concerned that the
FutureDriver
will not be polled on time to handle aReady
future from its internal vector. Meaning that if we don't get enough traction from thetry_recv
, we previously made sure to have some progress with thetokio::Interval
.I was trying to imagine the unlikely scenario where
soketto::recv
fromtry_recv
won't generate events for 10 minutes, but our futures from the driver are all ready. In this case, thetokio::Interval
was making sure to advance things a bit sooner.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
My current understanding is now that when any of those internal futures makes progress, the driver task will be polled again (or whatever task contains the
Driverselect
, anyway) and they will all re-run. So, no futures in this list should be ignored, basically. If an internal item isReady
, then it'll have calledwake()
to make everything get polled again.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
so, that's a good question/observation but I found that quite unlikely because both batch and calls are executed as one future/task.
Then in each loop iteration both
try_recv
andwait_for_permit
are awaited on which checks "the future vec" but it could be possible to spawn huge futures and then "never send some data again/backpressure kicks in" then those would never woken up again but I don't see it as an issue because it's up to the client enforce that.If it's idle fine but we could have a few ready tasks in the future vec.