diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c91411875..cf6c26b95 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -60,3 +60,14 @@ jobs: run: rustup update stable && rustup default stable - name: rustfmt run: ./ci/rustfmt.sh + + # Run loom tests. + loom: + name: loom + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + - name: Install Rust + run: rustup update stable && rustup default stable + - name: loom + run: ./ci/crossbeam-epoch-loom.sh diff --git a/ci/crossbeam-epoch-loom.sh b/ci/crossbeam-epoch-loom.sh new file mode 100755 index 000000000..40949f3ed --- /dev/null +++ b/ci/crossbeam-epoch-loom.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +cd "$(dirname "$0")"/../crossbeam-epoch +set -ex + +export RUSTFLAGS="-D warnings --cfg=loom" + +env LOOM_MAX_PREEMPTIONS=2 cargo test --test loom --features sanitize --release -- --nocapture diff --git a/crossbeam-epoch/Cargo.toml b/crossbeam-epoch/Cargo.toml index 82a1b03e0..9be1227e0 100644 --- a/crossbeam-epoch/Cargo.toml +++ b/crossbeam-epoch/Cargo.toml @@ -40,6 +40,9 @@ sanitize = [] # Makes it more likely to trigger any potential data races. cfg-if = "0.1.10" memoffset = "0.5.1" +[target.'cfg(loom)'.dependencies] +loom = "0.3.2" + [dependencies.crossbeam-utils] version = "0.7" path = "../crossbeam-utils" diff --git a/crossbeam-epoch/src/atomic.rs b/crossbeam-epoch/src/atomic.rs index 9cebde12d..f390b5ed6 100644 --- a/crossbeam-epoch/src/atomic.rs +++ b/crossbeam-epoch/src/atomic.rs @@ -1,3 +1,4 @@ +use crate::concurrency::sync::atomic::AtomicUsize; use alloc::boxed::Box; use core::borrow::{Borrow, BorrowMut}; use core::cmp; @@ -5,7 +6,7 @@ use core::fmt; use core::marker::PhantomData; use core::mem; use core::ops::{Deref, DerefMut}; -use core::sync::atomic::{AtomicUsize, Ordering}; +use core::sync::atomic::Ordering; use crate::guard::Guard; use crossbeam_utils::atomic::AtomicConsume; @@ -149,6 +150,24 @@ impl Atomic { /// /// let a = Atomic::::null(); /// ``` + #[cfg(loom)] + pub fn null() -> Atomic { + Self { + data: AtomicUsize::new(0), + _marker: PhantomData, + } + } + + /// Returns a new null atomic pointer. + /// + /// # Examples + /// + /// ``` + /// use crossbeam_epoch::Atomic; + /// + /// let a = Atomic::::null(); + /// ``` + #[cfg(not(loom))] pub const fn null() -> Atomic { Self { data: AtomicUsize::new(0), @@ -487,7 +506,14 @@ impl Atomic { /// } /// ``` pub unsafe fn into_owned(self) -> Owned { - Owned::from_usize(self.data.into_inner()) + #[cfg(loom)] + { + Owned::from_usize(self.data.unsync_load()) + } + #[cfg(not(loom))] + { + Owned::from_usize(self.data.into_inner()) + } } } @@ -1166,7 +1192,7 @@ impl Default for Shared<'_, T> { } } -#[cfg(test)] +#[cfg(all(test, not(loom)))] mod tests { use super::Shared; diff --git a/crossbeam-epoch/src/collector.rs b/crossbeam-epoch/src/collector.rs index b159518cc..4f92390c9 100644 --- a/crossbeam-epoch/src/collector.rs +++ b/crossbeam-epoch/src/collector.rs @@ -12,7 +12,7 @@ /// /// handle.pin().flush(); /// ``` -use alloc::sync::Arc; +use crate::concurrency::sync::Arc; use core::fmt; use crate::guard::Guard; @@ -109,7 +109,7 @@ impl fmt::Debug for LocalHandle { } } -#[cfg(test)] +#[cfg(all(test, not(loom)))] mod tests { use std::mem; use std::sync::atomic::{AtomicUsize, Ordering}; @@ -151,9 +151,9 @@ mod tests { let a = Owned::new(7).into_shared(guard); guard.defer_destroy(a); - assert!(!(*(*guard.local).bag.get()).is_empty()); + assert!(!(*guard.local).bag.with(|b| (*b).is_empty())); - while !(*(*guard.local).bag.get()).is_empty() { + while !(*guard.local).bag.with(|b| (*b).is_empty()) { guard.flush(); } } @@ -172,7 +172,7 @@ mod tests { let a = Owned::new(7).into_shared(guard); guard.defer_destroy(a); } - assert!(!(*(*guard.local).bag.get()).is_empty()); + assert!(!(*guard.local).bag.with(|b| (*b).is_empty())); } } diff --git a/crossbeam-epoch/src/default.rs b/crossbeam-epoch/src/default.rs index 1deac2114..7d0fbd947 100644 --- a/crossbeam-epoch/src/default.rs +++ b/crossbeam-epoch/src/default.rs @@ -5,8 +5,8 @@ //! destructed on thread exit, which in turn unregisters the thread. use crate::collector::{Collector, LocalHandle}; +use crate::concurrency::{lazy_static, thread_local}; use crate::guard::Guard; -use lazy_static::lazy_static; lazy_static! { /// The global data for the default garbage collector. @@ -45,7 +45,7 @@ where .unwrap_or_else(|_| f(&COLLECTOR.register())) } -#[cfg(test)] +#[cfg(all(test, not(loom)))] mod tests { use crossbeam_utils::thread; diff --git a/crossbeam-epoch/src/deferred.rs b/crossbeam-epoch/src/deferred.rs index 089798b89..1e129ea1a 100644 --- a/crossbeam-epoch/src/deferred.rs +++ b/crossbeam-epoch/src/deferred.rs @@ -76,7 +76,7 @@ impl Deferred { } } -#[cfg(test)] +#[cfg(all(test, not(loom)))] mod tests { use super::Deferred; use std::cell::Cell; diff --git a/crossbeam-epoch/src/epoch.rs b/crossbeam-epoch/src/epoch.rs index e7759d935..8415d69b0 100644 --- a/crossbeam-epoch/src/epoch.rs +++ b/crossbeam-epoch/src/epoch.rs @@ -7,7 +7,8 @@ //! If an object became garbage in some epoch, then we can be sure that after two advancements no //! participant will hold a reference to it. That is the crux of safe memory reclamation. -use core::sync::atomic::{AtomicUsize, Ordering}; +use crate::concurrency::sync::atomic::AtomicUsize; +use core::sync::atomic::Ordering; /// An epoch that can be marked as pinned or unpinned. /// diff --git a/crossbeam-epoch/src/internal.rs b/crossbeam-epoch/src/internal.rs index b30038734..e739de8af 100644 --- a/crossbeam-epoch/src/internal.rs +++ b/crossbeam-epoch/src/internal.rs @@ -35,10 +35,11 @@ //! Ideally each instance of concurrent data structure may have its own queue that gets fully //! destroyed as soon as the data structure gets dropped. -use core::cell::{Cell, UnsafeCell}; +use crate::concurrency::cell::UnsafeCell; +use crate::concurrency::sync::atomic; +use core::cell::Cell; use core::mem::{self, ManuallyDrop}; use core::num::Wrapping; -use core::sync::atomic; use core::sync::atomic::Ordering; use core::{fmt, ptr}; @@ -408,7 +409,7 @@ impl Local { /// Returns a reference to the `Collector` in which this `Local` resides. #[inline] pub fn collector(&self) -> &Collector { - unsafe { &**self.collector.get() } + self.collector.with(|c| unsafe { &**c }) } /// Returns `true` if the current participant is pinned. @@ -423,7 +424,7 @@ impl Local { /// /// It should be safe for another thread to execute the given function. pub unsafe fn defer(&self, mut deferred: Deferred, guard: &Guard) { - let bag = &mut *self.bag.get(); + let bag = self.bag.with_mut(|b| &mut *b); while let Err(d) = bag.try_push(deferred) { let epoch = self.epoch.load(Ordering::Relaxed).unpinned(); @@ -433,7 +434,7 @@ impl Local { } pub fn flush(&self, guard: &Guard) { - let bag = unsafe { &mut *self.bag.get() }; + let bag = self.bag.with_mut(|b| unsafe { &mut *b }); if !bag.is_empty() { let epoch = self.epoch.load(Ordering::Relaxed).unpinned(); @@ -582,7 +583,8 @@ impl Local { // doesn't defer destruction on any new garbage. let epoch = self.epoch.load(Ordering::Relaxed).unpinned(); let guard = &self.pin(); - self.global().push_bag(&mut *self.bag.get(), epoch, guard); + self.global() + .push_bag(self.bag.with_mut(|b| &mut *b), epoch, guard); } // Revert the handle count back to zero. self.handle_count.set(0); @@ -591,7 +593,7 @@ impl Local { // Take the reference to the `Global` out of this `Local`. Since we're not protected // by a guard at this time, it's crucial that the reference is read before marking the // `Local` as deleted. - let collector: Collector = ptr::read(&*(*self.collector.get())); + let collector: Collector = ptr::read(self.collector.with(|c| &*(*c))); // Mark this node in the linked list as deleted. self.entry.delete(&unprotected()); @@ -622,7 +624,7 @@ impl IsElement for Local { } } -#[cfg(test)] +#[cfg(all(test, not(loom)))] mod tests { use std::sync::atomic::{AtomicUsize, Ordering}; diff --git a/crossbeam-epoch/src/lib.rs b/crossbeam-epoch/src/lib.rs index 1d4a665d3..f91aea55b 100644 --- a/crossbeam-epoch/src/lib.rs +++ b/crossbeam-epoch/src/lib.rs @@ -60,6 +60,85 @@ use cfg_if::cfg_if; +#[cfg(loom)] +#[allow(unused_imports, dead_code)] +pub(crate) mod concurrency { + pub(crate) mod cell { + pub(crate) use loom::cell::UnsafeCell; + } + pub(crate) mod sync { + pub(crate) mod atomic { + use core::sync::atomic::Ordering; + pub(crate) use loom::sync::atomic::AtomicUsize; + pub(crate) fn fence(ord: Ordering) { + if let Ordering::Acquire = ord { + } else { + // FIXME: loom only supports acquire fences at the moment. + // https://github.com/tokio-rs/loom/issues/117 + // let's at least not panic... + // this may generate some false positives (`SeqCst` is stronger than `Acquire` + // for example), and some false negatives (`Relaxed` is weaker than `Acquire`), + // but it's the best we can do for the time being. + } + loom::sync::atomic::fence(Ordering::Acquire) + } + + // FIXME: loom does not support compiler_fence at the moment. + // https://github.com/tokio-rs/loom/issues/117 + // we use fence as a stand-in for compiler_fence for the time being. + // this may miss some races since fence is stronger than compiler_fence, + // but it's the best we can do for the time being. + pub(crate) use self::fence as compiler_fence; + } + pub(crate) use loom::sync::Arc; + } + pub(crate) use loom::lazy_static; + pub(crate) use loom::thread_local; +} +#[cfg(not(loom))] +#[allow(unused_imports, dead_code)] +pub(crate) mod concurrency { + #[cfg(any(feature = "alloc", feature = "std"))] + pub(crate) mod cell { + #[derive(Debug)] + #[repr(transparent)] + pub(crate) struct UnsafeCell(::core::cell::UnsafeCell); + + impl UnsafeCell { + #[inline] + pub(crate) fn new(data: T) -> UnsafeCell { + UnsafeCell(::core::cell::UnsafeCell::new(data)) + } + + #[inline] + pub(crate) fn with(&self, f: impl FnOnce(*const T) -> R) -> R { + f(self.0.get()) + } + + #[inline] + pub(crate) fn with_mut(&self, f: impl FnOnce(*mut T) -> R) -> R { + f(self.0.get()) + } + } + } + #[cfg(any(feature = "alloc", feature = "std"))] + pub(crate) mod sync { + pub(crate) mod atomic { + pub(crate) use core::sync::atomic::compiler_fence; + pub(crate) use core::sync::atomic::fence; + pub(crate) use core::sync::atomic::AtomicUsize; + } + #[cfg_attr(feature = "nightly", cfg(target_has_atomic = "ptr"))] + pub(crate) use alloc::sync::Arc; + } + + #[cfg(feature = "std")] + pub(crate) use std::thread_local; + + #[cfg(feature = "std")] + pub(crate) use lazy_static::lazy_static; +} + #[cfg_attr(feature = "nightly", cfg(target_has_atomic = "ptr"))] cfg_if! { if #[cfg(feature = "alloc")] { diff --git a/crossbeam-epoch/src/sync/list.rs b/crossbeam-epoch/src/sync/list.rs index 57aea92f9..7de7878f7 100644 --- a/crossbeam-epoch/src/sync/list.rs +++ b/crossbeam-epoch/src/sync/list.rs @@ -295,7 +295,7 @@ impl<'g, T: 'g, C: IsElement> Iterator for Iter<'g, T, C> { } } -#[cfg(test)] +#[cfg(all(test, not(loom)))] mod tests { use super::*; use crate::{Collector, Owned}; diff --git a/crossbeam-epoch/src/sync/queue.rs b/crossbeam-epoch/src/sync/queue.rs index 98ef667b5..b365ac643 100644 --- a/crossbeam-epoch/src/sync/queue.rs +++ b/crossbeam-epoch/src/sync/queue.rs @@ -202,7 +202,7 @@ impl Drop for Queue { } } -#[cfg(test)] +#[cfg(all(test, not(loom)))] mod test { use super::*; use crossbeam_utils::thread; diff --git a/crossbeam-epoch/tests/loom.rs b/crossbeam-epoch/tests/loom.rs new file mode 100644 index 000000000..e8ca0af36 --- /dev/null +++ b/crossbeam-epoch/tests/loom.rs @@ -0,0 +1,145 @@ +#![cfg(loom)] + +use crossbeam_epoch as epoch; + +use epoch::*; +use epoch::{Atomic, Owned}; +use loom::sync::atomic::Ordering::{self, Acquire, Relaxed, Release}; +use loom::sync::Arc; +use loom::thread::spawn; +use std::mem::ManuallyDrop; +use std::ptr; + +#[test] +fn it_works() { + loom::model(|| { + let collector = Collector::new(); + let item: Atomic = Atomic::from(Owned::new(String::from("boom"))); + let item2 = item.clone(); + let collector2 = collector.clone(); + let guard = collector.register().pin(); + + let jh = loom::thread::spawn(move || { + let guard = collector2.register().pin(); + guard.defer(move || { + // this isn't really safe, since other threads may still have pointers to the + // value, but in this limited test scenario it's okay, since we know the test won't + // access item after all the pins are released. + let mut item = unsafe { item2.into_owned() }; + // mutate it as a second measure to make sure the assert_eq below would fail + item.retain(|c| c == 'o'); + drop(item); + }); + }); + + let item = item.load(Ordering::SeqCst, &guard); + // we pinned strictly before the call to defer_destroy, + // so item cannot have been dropped yet + assert_eq!(*unsafe { item.deref() }, "boom"); + drop(guard); + + jh.join().unwrap(); + + drop(collector); + }) +} + +#[test] +fn treiber_stack() { + // this is mostly a copy-paste from the example + #[derive(Debug)] + pub struct TreiberStack { + head: Atomic>, + } + + #[derive(Debug)] + struct Node { + data: ManuallyDrop, + next: Atomic>, + } + + impl TreiberStack { + pub fn new() -> TreiberStack { + TreiberStack { + head: Atomic::null(), + } + } + + pub fn push(&self, t: T) { + let mut n = Owned::new(Node { + data: ManuallyDrop::new(t), + next: Atomic::null(), + }); + + let guard = epoch::pin(); + + loop { + let head = self.head.load(Relaxed, &guard); + n.next.store(head, Relaxed); + + match self.head.compare_and_set(head, n, Release, &guard) { + Ok(_) => break, + Err(e) => n = e.new, + } + } + } + + pub fn pop(&self) -> Option { + let guard = epoch::pin(); + loop { + let head = self.head.load(Acquire, &guard); + + match unsafe { head.as_ref() } { + Some(h) => { + let next = h.next.load(Relaxed, &guard); + + if self + .head + .compare_and_set(head, next, Relaxed, &guard) + .is_ok() + { + unsafe { + guard.defer_destroy(head); + return Some(ManuallyDrop::into_inner(ptr::read(&(*h).data))); + } + } + } + None => return None, + } + } + } + + pub fn is_empty(&self) -> bool { + let guard = epoch::pin(); + self.head.load(Acquire, &guard).is_null() + } + } + + impl Drop for TreiberStack { + fn drop(&mut self) { + while self.pop().is_some() {} + } + } + + loom::model(|| { + let stack1 = Arc::new(TreiberStack::new()); + let stack2 = Arc::clone(&stack1); + + // use 5 since it's greater than the 4 used for the sanitize feature + let jh = spawn(move || { + for i in 0..5 { + stack2.push(i); + assert!(stack2.pop().is_some()); + } + }); + + for i in 0..5 { + stack1.push(i); + assert!(stack1.pop().is_some()); + } + + jh.join().unwrap(); + assert!(stack1.pop().is_none()); + assert!(stack1.is_empty()); + }); +} diff --git a/crossbeam-utils/Cargo.toml b/crossbeam-utils/Cargo.toml index 29d35f887..3cb3607fd 100644 --- a/crossbeam-utils/Cargo.toml +++ b/crossbeam-utils/Cargo.toml @@ -33,6 +33,9 @@ nightly = [] cfg-if = "0.1.10" lazy_static = { version = "1.1.0", optional = true } +[target.'cfg(loom)'.dependencies] +loom = "0.3.2" + [build-dependencies] autocfg = "1" diff --git a/crossbeam-utils/src/atomic/atomic_cell.rs b/crossbeam-utils/src/atomic/atomic_cell.rs index 24e4b4eb9..f4dd64dd2 100644 --- a/crossbeam-utils/src/atomic/atomic_cell.rs +++ b/crossbeam-utils/src/atomic/atomic_cell.rs @@ -1,12 +1,16 @@ +use crate::concurrency::sync::atomic::{self, AtomicBool}; use core::cell::UnsafeCell; use core::fmt; use core::mem; +use core::sync::atomic::Ordering; + +#[cfg(not(loom))] use core::ptr; -use core::sync::atomic::{self, AtomicBool, Ordering}; #[cfg(feature = "std")] use std::panic::{RefUnwindSafe, UnwindSafe}; +#[cfg(not(loom))] use super::seq_lock::SeqLock; /// A thread-safe mutable memory location. @@ -488,23 +492,23 @@ macro_rules! impl_arithmetic { #[cfg(has_atomic_u8)] impl_arithmetic!(u8, atomic::AtomicU8, "let a = AtomicCell::new(7u8);"); -#[cfg(has_atomic_u8)] +#[cfg(all(has_atomic_u8, not(loom)))] impl_arithmetic!(i8, atomic::AtomicI8, "let a = AtomicCell::new(7i8);"); #[cfg(has_atomic_u16)] impl_arithmetic!(u16, atomic::AtomicU16, "let a = AtomicCell::new(7u16);"); -#[cfg(has_atomic_u16)] +#[cfg(all(has_atomic_u16, not(loom)))] impl_arithmetic!(i16, atomic::AtomicI16, "let a = AtomicCell::new(7i16);"); #[cfg(has_atomic_u32)] impl_arithmetic!(u32, atomic::AtomicU32, "let a = AtomicCell::new(7u32);"); -#[cfg(has_atomic_u32)] +#[cfg(all(has_atomic_u32, not(loom)))] impl_arithmetic!(i32, atomic::AtomicI32, "let a = AtomicCell::new(7i32);"); #[cfg(has_atomic_u64)] impl_arithmetic!(u64, atomic::AtomicU64, "let a = AtomicCell::new(7u64);"); -#[cfg(has_atomic_u64)] +#[cfg(all(has_atomic_u64, not(loom)))] impl_arithmetic!(i64, atomic::AtomicI64, "let a = AtomicCell::new(7i64);"); -#[cfg(has_atomic_u128)] +#[cfg(all(has_atomic_u128, not(loom)))] impl_arithmetic!(u128, atomic::AtomicU128, "let a = AtomicCell::new(7u128);"); -#[cfg(has_atomic_u128)] +#[cfg(all(has_atomic_u128, not(loom)))] impl_arithmetic!(i128, atomic::AtomicI128, "let a = AtomicCell::new(7i128);"); impl_arithmetic!( @@ -512,6 +516,7 @@ impl_arithmetic!( atomic::AtomicUsize, "let a = AtomicCell::new(7usize);" ); +#[cfg(not(loom))] impl_arithmetic!( isize, atomic::AtomicIsize, @@ -613,6 +618,7 @@ fn can_transmute() -> bool { /// scalability. #[inline] #[must_use] +#[cfg(not(loom))] fn lock(addr: usize) -> &'static SeqLock { // The number of locks is a prime number because we want to make sure `addr % LEN` gets // dispersed across all locks. @@ -797,6 +803,9 @@ macro_rules! atomic { #[cfg(has_atomic_u64)] atomic!(@check, $t, atomic::AtomicU64, $a, $atomic_op); + #[cfg(loom)] + unimplemented!("loom does not support non-atomic atomic ops"); + #[cfg(not(loom))] break $fallback_op; } }; diff --git a/crossbeam-utils/src/atomic/consume.rs b/crossbeam-utils/src/atomic/consume.rs index 9be5464fb..daac9e665 100644 --- a/crossbeam-utils/src/atomic/consume.rs +++ b/crossbeam-utils/src/atomic/consume.rs @@ -1,5 +1,5 @@ #[cfg(any(target_arch = "arm", target_arch = "aarch64"))] -use core::sync::atomic::compiler_fence; +use crate::concurrency::sync::atomic::compiler_fence; use core::sync::atomic::Ordering; /// Trait which allows reading from primitive atomic types with "consume" ordering. @@ -53,30 +53,42 @@ macro_rules! impl_atomic { type Val = $val; impl_consume!(); } + #[cfg(loom)] + impl AtomicConsume for ::loom::sync::atomic::$atomic { + type Val = $val; + impl_consume!(); + } }; } impl_atomic!(AtomicBool, bool); impl_atomic!(AtomicUsize, usize); +#[cfg(not(loom))] impl_atomic!(AtomicIsize, isize); #[cfg(all(feature = "nightly", target_has_atomic = "8"))] impl_atomic!(AtomicU8, u8); -#[cfg(all(feature = "nightly", target_has_atomic = "8"))] +#[cfg(all(feature = "nightly", target_has_atomic = "8", not(loom)))] impl_atomic!(AtomicI8, i8); #[cfg(all(feature = "nightly", target_has_atomic = "16"))] impl_atomic!(AtomicU16, u16); -#[cfg(all(feature = "nightly", target_has_atomic = "16"))] +#[cfg(all(feature = "nightly", target_has_atomic = "16", not(loom)))] impl_atomic!(AtomicI16, i16); #[cfg(all(feature = "nightly", target_has_atomic = "32"))] impl_atomic!(AtomicU32, u32); -#[cfg(all(feature = "nightly", target_has_atomic = "32"))] +#[cfg(all(feature = "nightly", target_has_atomic = "32", not(loom)))] impl_atomic!(AtomicI32, i32); #[cfg(all(feature = "nightly", target_has_atomic = "64"))] impl_atomic!(AtomicU64, u64); -#[cfg(all(feature = "nightly", target_has_atomic = "64"))] +#[cfg(all(feature = "nightly", target_has_atomic = "64", not(loom)))] impl_atomic!(AtomicI64, i64); impl AtomicConsume for ::core::sync::atomic::AtomicPtr { type Val = *mut T; impl_consume!(); } + +#[cfg(loom)] +impl AtomicConsume for ::loom::sync::atomic::AtomicPtr { + type Val = *mut T; + impl_consume!(); +} diff --git a/crossbeam-utils/src/atomic/mod.rs b/crossbeam-utils/src/atomic/mod.rs index 7309c166d..4314a66aa 100644 --- a/crossbeam-utils/src/atomic/mod.rs +++ b/crossbeam-utils/src/atomic/mod.rs @@ -1,7 +1,9 @@ //! Atomic types. +#[cfg(not(loom))] use cfg_if::cfg_if; +#[cfg(not(loom))] cfg_if! { // Use "wide" sequence lock if the pointer width <= 32 for preventing its counter against wrap // around. diff --git a/crossbeam-utils/src/backoff.rs b/crossbeam-utils/src/backoff.rs index adeea7c3a..72dccc221 100644 --- a/crossbeam-utils/src/backoff.rs +++ b/crossbeam-utils/src/backoff.rs @@ -1,6 +1,6 @@ +use crate::concurrency::sync::atomic; use core::cell::Cell; use core::fmt; -use core::sync::atomic; const SPIN_LIMIT: u32 = 6; const YIELD_LIMIT: u32 = 10; diff --git a/crossbeam-utils/src/lib.rs b/crossbeam-utils/src/lib.rs index e76934b99..51ffaa33f 100644 --- a/crossbeam-utils/src/lib.rs +++ b/crossbeam-utils/src/lib.rs @@ -30,6 +30,57 @@ #![cfg_attr(not(feature = "std"), no_std)] #![cfg_attr(feature = "nightly", feature(cfg_target_has_atomic))] +#[cfg(loom)] +#[allow(unused_imports)] +pub(crate) mod concurrency { + pub(crate) mod sync { + pub(crate) mod atomic { + pub(crate) use loom::sync::atomic::spin_loop_hint; + pub(crate) use loom::sync::atomic::{ + AtomicBool, AtomicU16, AtomicU32, AtomicU64, AtomicU8, AtomicUsize, + }; + + // FIXME: loom does not support compiler_fence at the moment. + // https://github.com/tokio-rs/loom/issues/117 + // we use fence as a stand-in for compiler_fence for the time being. + // this may miss some races since fence is stronger than compiler_fence, + // but it's the best we can do for the time being. + pub(crate) use loom::sync::atomic::fence as compiler_fence; + } + pub(crate) use loom::sync::{Arc, Condvar, Mutex}; + } +} +#[cfg(not(loom))] +#[allow(unused_imports)] +pub(crate) mod concurrency { + pub(crate) mod sync { + pub(crate) mod atomic { + pub(crate) use core::sync::atomic::compiler_fence; + pub(crate) use core::sync::atomic::spin_loop_hint; + pub(crate) use core::sync::atomic::{AtomicBool, AtomicIsize, AtomicUsize}; + #[cfg(has_atomic_u16)] + pub(crate) use core::sync::atomic::{AtomicI16, AtomicU16}; + #[cfg(has_atomic_u32)] + pub(crate) use core::sync::atomic::{AtomicI32, AtomicU32}; + #[cfg(has_atomic_u64)] + pub(crate) use core::sync::atomic::{AtomicI64, AtomicU64}; + #[cfg(has_atomic_u8)] + pub(crate) use core::sync::atomic::{AtomicI8, AtomicU8}; + } + + #[cfg(feature = "std")] + pub(crate) use std::sync::{Arc, Condvar, Mutex}; + } +} + +cfg_if! { + if #[cfg(feature = "alloc")] { + extern crate alloc; + } else if #[cfg(feature = "std")] { + extern crate std as alloc; + } +} + #[cfg_attr(feature = "nightly", cfg(target_has_atomic = "ptr"))] pub mod atomic; @@ -44,6 +95,8 @@ use cfg_if::cfg_if; cfg_if! { if #[cfg(feature = "std")] { pub mod sync; + + #[cfg(not(loom))] pub mod thread; } } diff --git a/crossbeam-utils/src/sync/mod.rs b/crossbeam-utils/src/sync/mod.rs index 363496372..007c3fb38 100644 --- a/crossbeam-utils/src/sync/mod.rs +++ b/crossbeam-utils/src/sync/mod.rs @@ -9,9 +9,11 @@ //! [`WaitGroup`]: struct.WaitGroup.html mod parker; +#[cfg(not(loom))] mod sharded_lock; mod wait_group; pub use self::parker::{Parker, Unparker}; +#[cfg(not(loom))] pub use self::sharded_lock::{ShardedLock, ShardedLockReadGuard, ShardedLockWriteGuard}; pub use self::wait_group::WaitGroup; diff --git a/crossbeam-utils/src/sync/parker.rs b/crossbeam-utils/src/sync/parker.rs index 070c5f454..22284f3f2 100644 --- a/crossbeam-utils/src/sync/parker.rs +++ b/crossbeam-utils/src/sync/parker.rs @@ -1,8 +1,8 @@ +use crate::concurrency::sync::atomic::AtomicUsize; +use crate::concurrency::sync::{Arc, Condvar, Mutex}; +use core::sync::atomic::Ordering::SeqCst; use std::fmt; use std::marker::PhantomData; -use std::sync::atomic::AtomicUsize; -use std::sync::atomic::Ordering::SeqCst; -use std::sync::{Arc, Condvar, Mutex}; use std::time::Duration; /// A thread parking primitive. diff --git a/crossbeam-utils/src/sync/wait_group.rs b/crossbeam-utils/src/sync/wait_group.rs index bec546ed7..68d89f22b 100644 --- a/crossbeam-utils/src/sync/wait_group.rs +++ b/crossbeam-utils/src/sync/wait_group.rs @@ -1,5 +1,5 @@ +use crate::concurrency::sync::{Arc, Condvar, Mutex}; use std::fmt; -use std::sync::{Arc, Condvar, Mutex}; /// Enables threads to synchronize the beginning or end of some computation. /// diff --git a/src/lib.rs b/src/lib.rs index a637d601d..bed23c16b 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -90,7 +90,10 @@ cfg_if! { pub use crossbeam_channel::select; pub use crossbeam_utils::sync; + + #[cfg(not(loom))] pub use crossbeam_utils::thread; + #[cfg(not(loom))] pub use crossbeam_utils::thread::scope; } } diff --git a/tests/subcrates.rs b/tests/subcrates.rs index 21b99fb0e..7963cb773 100644 --- a/tests/subcrates.rs +++ b/tests/subcrates.rs @@ -1,5 +1,7 @@ //! Makes sure subcrates are properly re-exported. +#![cfg(not(loom))] + use crossbeam::select; #[test]