Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove futures-intrusive when using tokio #1833

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

17 changes: 6 additions & 11 deletions sqlx-bench/benches/pg_pool.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,21 +8,17 @@ fn bench_pgpool_acquire(c: &mut Criterion) {
let mut group = c.benchmark_group("bench_pgpool_acquire");

for &concurrent in [5u32, 10, 50, 100, 500, 1000, 5000 /*, 10_000, 50_000*/].iter() {
for &fair in [false, true].iter() {
let fairness = if fair { "(fair)" } else { "(unfair)" };

group.bench_with_input(
format!("{} concurrent {}", concurrent, fairness),
&(concurrent, fair),
|b, &(concurrent, fair)| do_bench_acquire(b, concurrent, fair),
);
}
group.bench_with_input(
format!("{} concurrent {}", concurrent, fairness),
&(concurrent),
|b, &(concurrent)| do_bench_acquire(b, concurrent),
);
}

group.finish();
}

fn do_bench_acquire(b: &mut Bencher, concurrent: u32, fair: bool) {
fn do_bench_acquire(b: &mut Bencher, concurrent: u32) {
let pool = sqlx_rt::block_on(
PgPoolOptions::new()
// we don't want timeouts because we want to see how the pool degrades
Expand All @@ -32,7 +28,6 @@ fn do_bench_acquire(b: &mut Bencher, concurrent: u32, fair: bool) {
.max_connections(50)
// we're not benchmarking `ping()`
.test_before_acquire(false)
.__fair(fair)
.connect(
&dotenv::var("DATABASE_URL").expect("DATABASE_URL must be set to run benchmarks"),
),
Expand Down
1 change: 0 additions & 1 deletion sqlx-core/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,6 @@ encoding_rs = { version = "0.8.30", optional = true }
either = "1.6.1"
futures-channel = { version = "0.3.19", default-features = false, features = ["sink", "alloc", "std"] }
futures-core = { version = "0.3.19", default-features = false }
futures-intrusive = "0.4.0"
futures-util = { version = "0.3.19", default-features = false, features = ["alloc", "sink"] }
# used by the SQLite worker thread to block on the async mutex that locks the database handle
futures-executor = { version = "0.3.19", optional = true }
Expand Down
4 changes: 2 additions & 2 deletions sqlx-core/src/pool/connection.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ use std::ops::{Deref, DerefMut};
use std::sync::Arc;
use std::time::{Duration, Instant};

use futures_intrusive::sync::SemaphoreReleaser;
use sqlx_rt::SemaphorePermit;

use crate::connection::Connection;
use crate::database::Database;
Expand Down Expand Up @@ -288,7 +288,7 @@ impl<DB: Database> Floating<DB, Idle<DB>> {
pub fn from_idle(
idle: Idle<DB>,
pool: Arc<PoolInner<DB>>,
permit: SemaphoreReleaser<'_>,
permit: SemaphorePermit<'_>,
) -> Self {
Self {
inner: idle,
Expand Down
35 changes: 15 additions & 20 deletions sqlx-core/src/pool/inner.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ use crate::error::Error;
use crate::pool::{deadline_as_timeout, CloseEvent, PoolOptions};
use crossbeam_queue::ArrayQueue;

use futures_intrusive::sync::{Semaphore, SemaphoreReleaser};
use sqlx_rt::{Semaphore, SemaphorePermit};

use std::cmp;
use std::future::Future;
Expand Down Expand Up @@ -49,7 +49,7 @@ impl<DB: Database> PoolInner<DB> {
let pool = Self {
connect_options,
idle_conns: ArrayQueue::new(capacity),
semaphore: Semaphore::new(options.fair, capacity),
semaphore: Semaphore::new(capacity),
size: AtomicU32::new(0),
num_idle: AtomicUsize::new(0),
is_closed: AtomicBool::new(false),
Expand Down Expand Up @@ -88,7 +88,7 @@ impl<DB: Database> PoolInner<DB> {
// if we were the one to mark this closed, release enough permits to wake all waiters
// we can't just do `usize::MAX` because that would overflow
// and we can't do this more than once cause that would _also_ overflow
self.semaphore.release(WAKE_ALL_PERMITS);
self.semaphore.close();
self.on_closed.notify(usize::MAX);
}

Expand All @@ -98,12 +98,6 @@ impl<DB: Database> PoolInner<DB> {
let _ = idle.live.float((*self).clone()).close().await;
}

// Wait for all permits to be released.
let _permits = self
.semaphore
.acquire(WAKE_ALL_PERMITS + (self.options.max_connections as usize))
.await;

// Clean up any remaining connections.
while let Some(idle) = self.idle_conns.pop() {
let _ = idle.live.float((*self).clone()).close().await;
Expand All @@ -123,14 +117,14 @@ impl<DB: Database> PoolInner<DB> {
return None;
}

let permit = self.semaphore.try_acquire(1)?;
let permit = self.semaphore.try_acquire().ok()?;
self.pop_idle(permit).ok()
}

fn pop_idle<'a>(
self: &'a Arc<Self>,
permit: SemaphoreReleaser<'a>,
) -> Result<Floating<DB, Idle<DB>>, SemaphoreReleaser<'a>> {
permit: SemaphorePermit<'a>,
) -> Result<Floating<DB, Idle<DB>>, SemaphorePermit<'a>> {
if let Some(idle) = self.idle_conns.pop() {
self.num_idle.fetch_sub(1, Ordering::AcqRel);
Ok(Floating::from_idle(idle, (*self).clone(), permit))
Expand Down Expand Up @@ -158,8 +152,8 @@ impl<DB: Database> PoolInner<DB> {
/// Try to atomically increment the pool size for a new connection.
pub(super) fn try_increment_size<'a>(
self: &'a Arc<Self>,
permit: SemaphoreReleaser<'a>,
) -> Result<DecrementSizeGuard<DB>, SemaphoreReleaser<'a>> {
permit: SemaphorePermit<'a>,
) -> Result<DecrementSizeGuard<DB>, SemaphorePermit<'a>> {
match self
.size
.fetch_update(Ordering::AcqRel, Ordering::Acquire, |size| {
Expand All @@ -184,7 +178,7 @@ impl<DB: Database> PoolInner<DB> {
self.options.acquire_timeout,
async {
loop {
let permit = self.semaphore.acquire(1).await;
let permit = self.semaphore.acquire().await.map_err(|_| Error::PoolClosed)?;

if self.is_closed() {
return Err(Error::PoolClosed);
Expand Down Expand Up @@ -303,7 +297,7 @@ impl<DB: Database> PoolInner<DB> {
//
// If no extra permits are available then we shouldn't be trying to spin up
// connections anyway.
let permit = unwrap_or_return!(self.semaphore.try_acquire(1));
let permit = unwrap_or_return!(self.semaphore.try_acquire().ok());

// We must always obey `max_connections`.
let guard = unwrap_or_return!(self.try_increment_size(permit).ok());
Expand Down Expand Up @@ -479,15 +473,16 @@ impl<DB: Database> DecrementSizeGuard<DB> {
}
}

pub fn from_permit(pool: Arc<PoolInner<DB>>, mut permit: SemaphoreReleaser<'_>) -> Self {
#[cfg_attr(not(feature = "_rt-async-std"), allow(unused_mut))]
pub fn from_permit(pool: Arc<PoolInner<DB>>, mut permit: SemaphorePermit<'_>) -> Self {
// here we effectively take ownership of the permit
permit.disarm();
permit.forget();
Self::new_permit(pool)
}

/// Release the semaphore permit without decreasing the pool size.
fn release_permit(self) {
self.pool.semaphore.release(1);
self.pool.semaphore.add_permits(1);
self.cancel();
}

Expand All @@ -502,7 +497,7 @@ impl<DB: Database> Drop for DecrementSizeGuard<DB> {
self.pool.size.fetch_sub(1, Ordering::AcqRel);

// and here we release the permit we got on construction
self.pool.semaphore.release(1);
self.pool.semaphore.add_permits(1);
}
}
}
20 changes: 0 additions & 20 deletions sqlx-core/src/pool/options.rs
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,6 @@ pub struct PoolOptions<DB: Database> {
pub(crate) min_connections: u32,
pub(crate) max_lifetime: Option<Duration>,
pub(crate) idle_timeout: Option<Duration>,
pub(crate) fair: bool,
}

/// Metadata for the connection being processed by a [`PoolOptions`] callback.
Expand Down Expand Up @@ -124,7 +123,6 @@ impl<DB: Database> PoolOptions<DB> {
acquire_timeout: Duration::from_secs(30),
idle_timeout: Some(Duration::from_secs(10 * 60)),
max_lifetime: Some(Duration::from_secs(30 * 60)),
fair: true,
}
}

Expand Down Expand Up @@ -221,24 +219,6 @@ impl<DB: Database> PoolOptions<DB> {
self
}

/// If set to `true`, calls to `acquire()` are fair and connections are issued
/// in first-come-first-serve order. If `false`, "drive-by" tasks may steal idle connections
/// ahead of tasks that have been waiting.
///
/// According to `sqlx-bench/benches/pg_pool` this may slightly increase time
/// to `acquire()` at low pool contention but at very high contention it helps
/// avoid tasks at the head of the waiter queue getting repeatedly preempted by
/// these "drive-by" tasks and tasks further back in the queue timing out because
/// the queue isn't moving.
///
/// Currently only exposed for benchmarking; `fair = true` seems to be the superior option
/// in most cases.
#[doc(hidden)]
pub fn __fair(mut self, fair: bool) -> Self {
self.fair = fair;
self
}

/// Perform an asynchronous action after connecting to the database.
///
/// If the operation returns with an error then the error is logged, the connection is closed
Expand Down
4 changes: 2 additions & 2 deletions sqlx-core/src/sqlite/connection/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,9 @@ use std::fmt::{self, Debug, Formatter};
use std::ptr::NonNull;

use futures_core::future::BoxFuture;
use futures_intrusive::sync::MutexGuard;
use futures_util::future;
use libsqlite3_sys::sqlite3;
use sqlx_rt::AsyncMutexGuard;

pub(crate) use handle::{ConnectionHandle, ConnectionHandleRaw};

Expand Down Expand Up @@ -44,7 +44,7 @@ pub struct SqliteConnection {
}

pub struct LockedSqliteHandle<'a> {
pub(crate) guard: MutexGuard<'a, ConnectionState>,
pub(crate) guard: AsyncMutexGuard<'a, ConnectionState>,
}

pub(crate) struct ConnectionState {
Expand Down
10 changes: 6 additions & 4 deletions sqlx-core/src/sqlite/connection/worker.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ use std::thread;

use either::Either;
use futures_channel::oneshot;
use futures_intrusive::sync::{Mutex, MutexGuard};
use sqlx_rt::{AsyncMutex, AsyncMutexGuard};

use crate::describe::Describe;
use crate::error::Error;
Expand Down Expand Up @@ -36,7 +36,7 @@ pub(crate) struct ConnectionWorker {

pub(crate) struct WorkerSharedState {
pub(crate) cached_statements_size: AtomicUsize,
pub(crate) conn: Mutex<ConnectionState>,
pub(crate) conn: AsyncMutex<ConnectionState>,
}

enum Command {
Expand Down Expand Up @@ -101,7 +101,7 @@ impl ConnectionWorker {
// note: must be fair because in `Command::UnlockDb` we unlock the mutex
// and then immediately try to relock it; an unfair mutex would immediately
// grant us the lock even if another task is waiting.
conn: Mutex::new(conn, true),
conn: AsyncMutex::new(conn),
});
let mut conn = shared.conn.try_lock().unwrap();

Expand Down Expand Up @@ -325,7 +325,9 @@ impl ConnectionWorker {
self.oneshot_cmd(|tx| Command::ClearCache { tx }).await
}

pub(crate) async fn unlock_db(&mut self) -> Result<MutexGuard<'_, ConnectionState>, Error> {
pub(crate) async fn unlock_db(
&mut self,
) -> Result<AsyncMutexGuard<'_, ConnectionState>, Error> {
let (guard, res) = futures_util::future::join(
// we need to join the wait queue for the lock before we send the message
self.shared.conn.lock(),
Expand Down
5 changes: 3 additions & 2 deletions sqlx-rt/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ runtime-tokio-rustls = ["_rt-tokio", "_tls-rustls", "tokio-rustls"]

# Not used directly and not re-exported from sqlx
_rt-actix = ["actix-rt", "tokio", "once_cell"]
_rt-async-std = ["async-std"]
_rt-async-std = ["async-std", "futures-intrusive"]
_rt-tokio = ["tokio", "once_cell"]
_tls-native-tls = ["native-tls"]
_tls-rustls = []
Expand All @@ -35,12 +35,13 @@ async-native-tls = { version = "0.4.0", optional = true }
futures-rustls = { version = "0.22.0", optional = true }
actix-rt = { version = "2.0.0", default-features = false, optional = true }
async-std = { version = "1.7.0", features = ["unstable"], optional = true }
futures-intrusive = { version = "0.4", optional = true }
tokio-native-tls = { version = "0.3.0", optional = true }
tokio-rustls = { version = "0.23.0", optional = true }
native-tls = { version = "0.2.4", optional = true }
once_cell = { version = "1.4", features = ["std"], optional = true }

[dependencies.tokio]
version = "1.0.1"
features = ["fs", "net", "rt", "rt-multi-thread", "time", "io-util"]
features = ["fs", "net", "rt", "rt-multi-thread", "time", "io-util", "sync"]
optional = true
64 changes: 61 additions & 3 deletions sqlx-rt/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,9 @@ pub use native_tls;
))]
pub use tokio::{
self, fs, io::AsyncRead, io::AsyncReadExt, io::AsyncWrite, io::AsyncWriteExt, io::ReadBuf,
net::TcpStream, runtime::Handle, sync::Mutex as AsyncMutex, task::spawn, task::yield_now,
time::sleep, time::timeout,
net::TcpStream, runtime::Handle, sync::Mutex as AsyncMutex,
sync::MutexGuard as AsyncMutexGuard, sync::Semaphore, sync::SemaphorePermit, task::spawn,
task::yield_now, time::sleep, time::timeout,
};

#[cfg(all(
Expand Down Expand Up @@ -143,7 +144,8 @@ macro_rules! blocking {
pub use async_std::{
self, fs, future::timeout, io::prelude::ReadExt as AsyncReadExt,
io::prelude::WriteExt as AsyncWriteExt, io::Read as AsyncRead, io::Write as AsyncWrite,
net::TcpStream, sync::Mutex as AsyncMutex, task::sleep, task::spawn, task::yield_now,
net::TcpStream, sync::Mutex as AsyncMutex, sync::MutexGuard as AsyncMutexGuard, task::sleep,
task::spawn, task::yield_now,
};

#[cfg(all(
Expand Down Expand Up @@ -195,3 +197,59 @@ pub use async_native_tls::{TlsConnector, TlsStream};
)),
))]
pub use futures_rustls::{client::TlsStream, TlsConnector};

#[cfg(all(
feature = "_rt-async-std",
not(any(feature = "_rt-actix", feature = "_rt-tokio")),
))]
pub struct Semaphore(futures_intrusive::sync::Semaphore);

#[cfg(all(
feature = "_rt-async-std",
not(any(feature = "_rt-actix", feature = "_rt-tokio")),
))]
impl Semaphore {
pub fn new(permits: usize) -> Self {
Self(futures_intrusive::sync::Semaphore::new(true, permits))
}

pub fn add_permits(&self, n: usize) {
self.0.release(n);
}

pub async fn acquire(&self) -> Result<SemaphorePermit<'_>, ()> {
let releaser = self.0.acquire(1).await;
Ok(SemaphorePermit(releaser))
}

pub fn try_acquire(&self) -> Result<SemaphorePermit<'_>, ()> {
let releaser = self.0.try_acquire(1).ok_or(())?;
Ok(SemaphorePermit(releaser))
}

pub fn close(&self) {
/// Ihe number of permits to release to wake all waiters, such as on `PoolInner::close()`.
///
/// This should be large enough to realistically wake all tasks waiting on the pool without
/// potentially overflowing the permits count in the semaphore itself.
const WAKE_ALL_PERMITS: usize = usize::MAX / 2;

self.0.release(WAKE_ALL_PERMITS);
}
}

#[cfg(all(
feature = "_rt-async-std",
not(any(feature = "_rt-actix", feature = "_rt-tokio")),
))]
pub struct SemaphorePermit<'a>(futures_intrusive::sync::SemaphoreReleaser<'a>);

#[cfg(all(
feature = "_rt-async-std",
not(any(feature = "_rt-actix", feature = "_rt-tokio")),
))]
impl<'a> SemaphorePermit<'a> {
pub fn forget(&mut self) {
self.0.disarm();
}
}