diff --git a/.circleci/config.yml b/.circleci/config.yml index a7e6430..1cb44b6 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -89,6 +89,9 @@ jobs: - run: name: Run all tests command: cargo test --all + - run: + name: Run all tests (no-std version) + command: cargo test --all --no-default-features rust/coverage: machine: true steps: diff --git a/Cargo.toml b/Cargo.toml index 6b30a29..8a4d76f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,6 +26,11 @@ shredder_derive = { git = "https://github.com/Others/shredder_derive.git" } #shredder_derive = { path = "../shredder_derive" } stable_deref_trait = "1.1" +[dependencies.no-std-compat] +# Waiting for stable version containing new `sync` version +git = "https://gitlab.com/jD91mZM2/no-std-compat" +features = [ "alloc", "compat_hash", "compat_sync", "compat_macros" ] + [dev-dependencies] paste = "1.0" rand = "0.7.3" @@ -35,5 +40,7 @@ trybuild = "1.0" #debug = true [features] -default = [] +default = [ "std", "threads" ] # Default to using the std +std = [ "no-std-compat/std" ] +threads = [ "std" ] nightly-features = [] diff --git a/src/atomic.rs b/src/atomic.rs index f249a47..08e0ed9 100644 --- a/src/atomic.rs +++ b/src/atomic.rs @@ -1,5 +1,6 @@ use std::marker::PhantomData; use std::mem; +use std::prelude::v1::*; use std::ptr::drop_in_place; use std::sync::atomic::{AtomicPtr, Ordering}; use std::sync::Arc; diff --git a/src/collector/alloc.rs b/src/collector/alloc.rs index dca7c4a..acb37cf 100644 --- a/src/collector/alloc.rs +++ b/src/collector/alloc.rs @@ -1,6 +1,9 @@ use std::alloc::{alloc, dealloc, Layout}; use std::mem::{self, ManuallyDrop}; + +#[cfg(feature = "std")] use std::panic::UnwindSafe; +use std::prelude::v1::*; use std::ptr; use crate::collector::InternalGcRef; @@ -27,6 +30,7 @@ pub enum DeallocationAction { // It also, by contract of Scan, cannot have a Drop method that is unsafe in any thead unsafe impl Send for GcAllocation {} // Therefore, GcDataPtr is also UnwindSafe in the context we need it to be +#[cfg(feature = "std")] impl UnwindSafe for GcAllocation {} // We use the lockout to ensure that `GcDataPtr`s are not shared unsafe impl Sync for GcAllocation {} diff --git a/src/collector/collect_impl.rs b/src/collector/collect_impl.rs index f4eb08d..18c755d 100644 --- a/src/collector/collect_impl.rs +++ b/src/collector/collect_impl.rs @@ -1,3 +1,4 @@ +use std::prelude::v1::*; use std::sync::atomic::Ordering; use crossbeam::deque::Injector; @@ -145,9 +146,7 @@ impl Collector { // Send off the data to be dropped in the background let drop_msg = DropMessage::DataToDrop(to_drop); - if let Err(e) = self.dropper.send_msg(drop_msg) { - error!("Error sending to drop thread {}", e); - } + self.drop(drop_msg); // update the trigger based on the new baseline self.trigger diff --git a/src/collector/data.rs b/src/collector/data.rs index dc9bc3c..dcffed2 100644 --- a/src/collector/data.rs +++ b/src/collector/data.rs @@ -1,3 +1,4 @@ +use std::prelude::v1::*; use std::sync::atomic::{AtomicBool, AtomicPtr, AtomicU64, Ordering}; use std::sync::Arc; diff --git a/src/collector/dropper.rs b/src/collector/dropper.rs index 594aa99..11c0609 100644 --- a/src/collector/dropper.rs +++ b/src/collector/dropper.rs @@ -1,16 +1,22 @@ +use std::prelude::v1::*; + +#[cfg(feature = "std")] use std::panic::catch_unwind; use std::sync::atomic::Ordering; use std::sync::Arc; -use std::thread::spawn; -use crossbeam::channel::{self, SendError, Sender}; +use crossbeam::channel::{SendError, Sender}; use parking_lot::RwLock; use rayon::iter::IntoParallelRefIterator; use rayon::iter::ParallelIterator; +#[cfg(feature = "threads")] +use std::thread::spawn; + use crate::collector::GcData; pub(crate) struct BackgroundDropper { + #[cfg(feature = "threads")] sender: Sender, } @@ -23,46 +29,80 @@ pub(crate) enum DropMessage { impl BackgroundDropper { pub fn new() -> Self { - let (sender, receiver) = channel::unbounded(); + #[cfg(feature = "threads")] + let (sender, receiver) = crossbeam::channel::unbounded(); // The drop thread deals with doing all the Drops this collector needs to do - spawn(move || { + #[cfg(feature = "threads")] + spawn(Box::new(move || { // An Err value means the stream will never recover while let Ok(drop_msg) = receiver.recv() { - match drop_msg { - DropMessage::DataToDrop(to_drop) => { - let to_drop = to_drop.read(); - - // NOTE: It's important that all data is correctly marked as deallocated before we start - to_drop.par_iter().for_each(|data| { - // Mark this data as in the process of being deallocated and unsafe to access - data.deallocated.store(true, Ordering::SeqCst); - }); - - // Then run the drops if needed - to_drop.par_iter().for_each(|data| { - let underlying_allocation = data.underlying_allocation; - let res = catch_unwind(move || unsafe { - underlying_allocation.deallocate(); - }); - if let Err(e) = res { - eprintln!("Gc background drop failed: {:?}", e); - } - }); - } - DropMessage::SyncUp(responder) => { - if let Err(e) = responder.send(()) { - eprintln!("Gc background syncup failed: {:?}", e); - } - } - } + handle_message(drop_msg) } - }); + })); - Self { sender } + Self { + #[cfg(feature = "threads")] + sender, + } } pub fn send_msg(&self, msg: DropMessage) -> Result<(), SendError> { - self.sender.send(msg) + #[cfg(feature = "threads")] + { + self.sender.send(msg) + } + + #[cfg(not(feature = "threads"))] + { + handle_message(msg); + Ok(()) + } + } +} + +fn handle_message(drop_msg: DropMessage) { + match drop_msg { + DropMessage::DataToDrop(to_drop) => { + let to_drop = to_drop.read(); + + // NOTE: It's important that all data is correctly marked as deallocated before we start + to_drop.par_iter().for_each(|data| { + // Mark this data as in the process of being deallocated and unsafe to access + data.deallocated.store(true, Ordering::SeqCst); + }); + + // Then run the drops if needed + to_drop.par_iter().for_each(|data| { + let underlying_allocation = data.underlying_allocation; + + // When the stdlib is available, we can use catch_unwind + // to protect ourselves against panics that unwind. + #[cfg(feature = "std")] + { + let res = catch_unwind(move || unsafe { + underlying_allocation.deallocate(); + }); + if let Err(e) = res { + eprintln!("Gc background drop failed: {:?}", e); + } + } + + // When it is not available, however, panics probably + // won't unwind, and there's no safe means to catch + // a panic. + // + // TODO is there a better way to safely handle this? + #[cfg(not(feature = "std"))] + unsafe { + underlying_allocation.deallocate() + }; + }); + } + DropMessage::SyncUp(responder) => { + if let Err(e) = responder.send(()) { + eprintln!("Gc background syncup failed: {:?}", e); + } + } } } diff --git a/src/collector/mod.rs b/src/collector/mod.rs index 9fb78aa..cf9a05d 100644 --- a/src/collector/mod.rs +++ b/src/collector/mod.rs @@ -1,3 +1,5 @@ +use std::prelude::v1::*; + mod alloc; mod collect_impl; mod data; @@ -6,9 +8,13 @@ mod trigger; use std::sync::atomic::{AtomicBool, AtomicPtr, AtomicU64, Ordering}; use std::sync::Arc; + +#[cfg(feature = "threads")] use std::thread::spawn; -use crossbeam::channel::{self, Sender}; +use crossbeam::channel; +#[cfg(feature = "threads")] +use crossbeam::channel::Sender; use once_cell::sync::Lazy; use parking_lot::Mutex; @@ -68,6 +74,7 @@ pub struct Collector { /// we run automatic gc in a background thread /// sending to this channel indicates that thread should check the trigger, then collect if the /// trigger indicates it should + #[cfg(feature = "threads")] async_gc_notifier: Sender<()>, /// all the data we are managing plus metadata about what `Gc`s exist tracked_data: TrackedData, @@ -88,6 +95,7 @@ struct TrackedData { impl Collector { fn new() -> Arc { + #[cfg(feature = "threads")] let (async_gc_notifier, async_gc_receiver) = channel::bounded(1); let res = Arc::new(Self { @@ -95,6 +103,7 @@ impl Collector { atomic_spinlock: AtomicProtectingSpinlock::default(), trigger: GcTrigger::default(), dropper: BackgroundDropper::new(), + #[cfg(feature = "threads")] async_gc_notifier, tracked_data: TrackedData { // This is janky, but we subtract one from the collection number @@ -109,21 +118,31 @@ impl Collector { }, }); - // The async Gc thread deals with background Gc'ing - let async_collector_ref = Arc::downgrade(&res); - spawn(move || { - // An Err value means the stream will never recover - while async_gc_receiver.recv().is_ok() { - if let Some(collector) = async_collector_ref.upgrade() { - collector.check_then_collect(); + #[cfg(feature = "threads")] + { + // The async Gc thread deals with background Gc'ing + let async_collector_ref = Arc::downgrade(&res); + spawn(move || { + // An Err value means the stream will never recover + while async_gc_receiver.recv().is_ok() { + if let Some(collector) = async_collector_ref.upgrade() { + collector.check_then_collect(); + } } - } - }); + }); + } res } + fn drop(&self, drop_msg: DropMessage) { + if let Err(e) = self.dropper.send_msg(drop_msg) { + error!("Error sending to drop thread {}", e); + } + } + #[inline] + #[cfg(feature = "threads")] fn notify_async_gc_thread(&self) { // Note: We only send if there is room in the channel // If there's already a notification there the async thread is already notified @@ -137,6 +156,12 @@ impl Collector { }; } + #[inline] + #[cfg(not(feature = "threads"))] + fn notify_async_gc_thread(&self) { + self.check_then_collect(); + } + pub fn track_with_drop(&self, data: T) -> (InternalGcRef, *const T) { let (gc_data_ptr, heap_ptr) = GcAllocation::allocate_with_drop(data); self.track(gc_data_ptr, heap_ptr) diff --git a/src/collector/trigger.rs b/src/collector/trigger.rs index 2620233..2acb39d 100644 --- a/src/collector/trigger.rs +++ b/src/collector/trigger.rs @@ -1,4 +1,5 @@ use parking_lot::Mutex; +use std::prelude::v1::*; // TODO(issue): https://github.com/Others/shredder/issues/8 const DEFAULT_ALLOCATION_TRIGGER_PERCENT: f32 = 0.75; diff --git a/src/concurrency/atomic_protection.rs b/src/concurrency/atomic_protection.rs index 64c2bc4..1a68880 100644 --- a/src/concurrency/atomic_protection.rs +++ b/src/concurrency/atomic_protection.rs @@ -1,4 +1,7 @@ +use std::prelude::v1::*; use std::sync::atomic::{AtomicU64, Ordering}; + +#[cfg(feature = "threads")] use std::thread::yield_now; const SENTINEL_VALUE: u64 = 1 << 60; @@ -38,6 +41,10 @@ impl AtomicProtectingSpinlock { } // Try to be kind to our scheduler, even as we employ an anti-pattern + // + // Without threading support, we'll just have to busy-wait. + // Should we let the user supply a 'yield' function of their own? + #[cfg(feature = "threads")] yield_now() } } diff --git a/src/concurrency/chunked_ll.rs b/src/concurrency/chunked_ll.rs index 8ae1299..27a3561 100644 --- a/src/concurrency/chunked_ll.rs +++ b/src/concurrency/chunked_ll.rs @@ -1,4 +1,5 @@ use std::mem::{self, MaybeUninit}; +use std::prelude::v1::*; use std::ptr; use std::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; use std::sync::Arc; diff --git a/src/concurrency/lockout.rs b/src/concurrency/lockout.rs index 9f4e95d..4d9e67e 100644 --- a/src/concurrency/lockout.rs +++ b/src/concurrency/lockout.rs @@ -1,3 +1,4 @@ +use std::prelude::v1::*; use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::Arc; diff --git a/src/lib.rs b/src/lib.rs index d6aca4d..99ff1a0 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -39,9 +39,14 @@ clippy::cast_precision_loss, // There is no way to avoid this precision loss clippy::explicit_deref_methods, // Sometimes calling `deref` directly is clearer clippy::module_name_repetitions, // Sometimes clear naming calls for repetition - clippy::multiple_crate_versions // There is no way to easily fix this without modifying our dependencies + clippy::multiple_crate_versions, // There is no way to easily fix this without modifying our dependencies + clippy::wildcard_imports, // No-std compatibility layer requieres these for ergonomics )] +#![no_std] +extern crate no_std_compat as std; + +#[cfg(feature = "std")] #[macro_use] extern crate crossbeam; @@ -70,6 +75,8 @@ pub mod wrappers; use std::cell::RefCell; use std::sync::{Mutex, RwLock}; +use std::prelude::v1::*; + use crate::collector::COLLECTOR; pub use crate::finalize::Finalize; diff --git a/src/marker/gc_deref.rs b/src/marker/gc_deref.rs index 1685f3d..4b3f60b 100644 --- a/src/marker/gc_deref.rs +++ b/src/marker/gc_deref.rs @@ -1,3 +1,5 @@ +use std::prelude::v1::*; + /// A marker trait that marks that this data can be stored in a `DerefGc` /// /// `T` can be `GcDeref` only if it is deeply immutable through a `&T`. This is because it's diff --git a/src/marker/gc_safe.rs b/src/marker/gc_safe.rs index 8b35211..80d6cc2 100644 --- a/src/marker/gc_safe.rs +++ b/src/marker/gc_safe.rs @@ -1,5 +1,6 @@ use std::hash::{Hash, Hasher}; use std::ops::{Deref, DerefMut}; +use std::prelude::v1::*; /// A marker trait that marks that data can be scanned in the background by the garbage collector. /// diff --git a/src/r.rs b/src/r.rs index 98504e6..e6c27ea 100644 --- a/src/r.rs +++ b/src/r.rs @@ -2,6 +2,7 @@ use std::cmp::Ordering; use std::hash::{Hash, Hasher}; use std::marker::PhantomData; use std::ops::{Deref, DerefMut}; +use std::prelude::v1::*; use crate::marker::{GcDeref, GcDrop, GcSafe}; use crate::{Finalize, Scan, Scanner}; diff --git a/src/scan.rs b/src/scan.rs index 8346e10..cff0599 100644 --- a/src/scan.rs +++ b/src/scan.rs @@ -1,5 +1,6 @@ use crate::collector::InternalGcRef; use crate::marker::GcSafe; +use std::prelude::v1::*; /// A trait capturing the ability of data to be scanned for references to data in a `Gc`. /// diff --git a/src/smart_ptr/deref_gc.rs b/src/smart_ptr/deref_gc.rs index f964cda..5a46b34 100644 --- a/src/smart_ptr/deref_gc.rs +++ b/src/smart_ptr/deref_gc.rs @@ -1,3 +1,5 @@ +use std::prelude::v1::*; + #[cfg(feature = "nightly-features")] use std::{marker::Unsize, ops::CoerceUnsized}; diff --git a/src/smart_ptr/gc.rs b/src/smart_ptr/gc.rs index 4c3f490..62e1477 100644 --- a/src/smart_ptr/gc.rs +++ b/src/smart_ptr/gc.rs @@ -5,6 +5,7 @@ use std::cmp::Ordering; use std::fmt::{self, Debug, Display, Formatter}; use std::hash::{Hash, Hasher}; use std::ops::Deref; +use std::prelude::v1::*; use std::sync; use std::sync::atomic; #[cfg(feature = "nightly-features")] diff --git a/src/std_impls/collections.rs b/src/std_impls/collections.rs index 525b22d..069a530 100644 --- a/src/std_impls/collections.rs +++ b/src/std_impls/collections.rs @@ -3,6 +3,7 @@ use crate::{Finalize, Scan, Scanner}; use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; use std::hash::BuildHasher; use std::mem::forget; +use std::prelude::v1::*; use std::ptr::read; // For pretty much all simple collections, the collection inherets the properites of what it contains diff --git a/src/std_impls/mod.rs b/src/std_impls/mod.rs index 8b3b9ab..9aee4b9 100644 --- a/src/std_impls/mod.rs +++ b/src/std_impls/mod.rs @@ -6,7 +6,9 @@ mod wrap_types; #[cfg(test)] mod test { use std::cell::Cell; + #[cfg(feature = "std")] use std::panic::catch_unwind; + use std::prelude::v1::*; use std::sync::{Mutex, RwLock}; use crate::collector::{get_mock_handle, InternalGcRef}; @@ -67,6 +69,7 @@ mod test { assert_eq!(count, 1); } + #[cfg(feature = "std")] #[test] fn poisoned_mutex_scans() { let m = Mutex::new(MockGc { @@ -105,6 +108,7 @@ mod test { assert_eq!(count, 1); } + #[cfg(feature = "std")] #[test] fn poisoned_rwlock_scans() { let m = RwLock::new(MockGc { diff --git a/src/std_impls/value_types.rs b/src/std_impls/value_types.rs index 45c7a3b..d0a75fa 100644 --- a/src/std_impls/value_types.rs +++ b/src/std_impls/value_types.rs @@ -1,6 +1,12 @@ +#[cfg(feature = "std")] use std::collections::hash_map::RandomState; + +use std::prelude::v1::*; use std::ptr::drop_in_place; -use std::time::{Duration, Instant}; +use std::time::Duration; + +#[cfg(feature = "std")] +use std::time::Instant; macro_rules! sync_value_type { ($t: ty) => { @@ -38,14 +44,20 @@ sync_value_type!(f32); sync_value_type!(f64); sync_value_type!(String); + +#[cfg(feature = "std")] sync_value_type!(Instant); + sync_value_type!(Duration); +#[cfg(feature = "std")] sync_value_type!(RandomState); #[cfg(test)] mod test { use std::mem::forget; + use std::prelude::v1::*; + #[cfg(feature = "std")] use std::time::Instant; use crate::Finalize; @@ -88,5 +100,6 @@ mod test { test_no_panic_finalize!(f64, 1.0); test_no_panic_finalize!(String, String::from("hello")); + #[cfg(feature = "std")] test_no_panic_finalize!(Instant, Instant::now()); } diff --git a/src/std_impls/wrap_types.rs b/src/std_impls/wrap_types.rs index 35a4754..61a8033 100644 --- a/src/std_impls/wrap_types.rs +++ b/src/std_impls/wrap_types.rs @@ -1,8 +1,12 @@ use crate::marker::{GcDeref, GcDrop, GcSafe}; use crate::{Finalize, Scan, Scanner}; +use std::prelude::v1::*; use std::cell::{Cell, RefCell}; -use std::sync::{Arc, Mutex, RwLock, TryLockError}; +use std::sync::{Arc, Mutex, RwLock}; + +#[cfg(feature = "std")] +use std::sync::TryLockError; // ARC unsafe impl GcDeref for Arc where T: GcDeref + Send {} @@ -34,6 +38,7 @@ unsafe impl GcDrop for Mutex where T: GcDrop {} unsafe impl GcSafe for Mutex where T: GcSafe {} unsafe impl Scan for Mutex { + #[cfg(feature = "std")] #[inline] fn scan(&self, scanner: &mut Scanner<'_>) { match self.try_lock() { @@ -51,9 +56,24 @@ unsafe impl Scan for Mutex { } } } + + #[cfg(not(feature = "std"))] + #[inline] + fn scan(&self, scanner: &mut Scanner<'_>) { + match self.try_lock() { + Some(data) => { + let raw: &T = &*data; + scanner.scan(raw); + } + None => { + error!("A Mutex was in use when it was scanned -- something is buggy here! (no memory unsafety yet, so proceeding...)"); + } + } + } } unsafe impl Finalize for Mutex { + #[cfg(feature = "std")] unsafe fn finalize(&mut self) { let v = self.get_mut(); match v { @@ -61,6 +81,11 @@ unsafe impl Finalize for Mutex { Err(e) => e.into_inner().finalize(), } } + + #[cfg(not(feature = "std"))] + unsafe fn finalize(&mut self) { + self.get_mut().finalize() + } } // OPTION @@ -152,6 +177,7 @@ unsafe impl GcDrop for RwLock where T: GcDrop {} unsafe impl GcSafe for RwLock where T: GcSafe {} unsafe impl Scan for RwLock { + #[cfg(feature = "std")] #[inline] fn scan(&self, scanner: &mut Scanner<'_>) { match self.try_read() { @@ -169,9 +195,24 @@ unsafe impl Scan for RwLock { } } } + + #[cfg(not(feature = "std"))] + #[inline] + fn scan(&self, scanner: &mut Scanner<'_>) { + match self.try_read() { + Some(data) => { + let raw: &T = &*data; + scanner.scan(raw); + } + None => { + error!("A RwLock was in use when it was scanned -- something is buggy here! (no memory unsafety yet, so proceeding...)"); + } + } + } } unsafe impl Finalize for RwLock { + #[cfg(feature = "std")] unsafe fn finalize(&mut self) { let v = self.get_mut(); match v { @@ -179,4 +220,9 @@ unsafe impl Finalize for RwLock { Err(e) => e.into_inner().finalize(), } } + + #[cfg(not(feature = "std"))] + unsafe fn finalize(&mut self) { + self.get_mut().finalize() + } } diff --git a/src/wrappers.rs b/src/wrappers.rs index 5fea26b..e85e5a0 100644 --- a/src/wrappers.rs +++ b/src/wrappers.rs @@ -1,7 +1,11 @@ use std::cell::{BorrowError, BorrowMutError, RefCell}; use std::fmt::{self, Debug, Formatter}; use std::ops::{Deref, DerefMut}; -use std::sync::{self, TryLockError}; +use std::prelude::v1::*; +use std::sync; + +#[cfg(feature = "std")] +use std::sync::TryLockError; use crate::{GcGuard, Scan}; @@ -149,13 +153,20 @@ pub struct GcMutexGuard<'a, T: Scan + 'static> { impl<'a, T: Scan + 'static> GcMutexGuard<'a, T> { pub(crate) fn lock(g: GcGuard<'a, sync::Mutex>) -> Result> { + #[allow(unused_mut)] let mut was_poisoned = false; - let internal_guard = gc_mutex_internals::GcMutexGuardInt::new(g, |g| match g.lock() { - Ok(v) => v, - Err(e) => { - was_poisoned = true; - e.into_inner() + let internal_guard = gc_mutex_internals::GcMutexGuardInt::new(g, |g| { + #[cfg(feature = "std")] + match g.lock() { + Ok(v) => v, + Err(e) => { + was_poisoned = true; + e.into_inner() + } } + + #[cfg(not(feature = "std"))] + g.lock() }); let guard = Self { internal_guard }; @@ -168,9 +179,11 @@ impl<'a, T: Scan + 'static> GcMutexGuard<'a, T> { } pub(crate) fn try_lock(g: GcGuard<'a, sync::Mutex>) -> Result> { + #[allow(unused_mut)] let mut was_poisoned = false; - let internal_guard = - gc_mutex_internals::GcMutexGuardInt::try_new(g, |g| match g.try_lock() { + let internal_guard = gc_mutex_internals::GcMutexGuardInt::try_new(g, |g| { + #[cfg(feature = "std")] + match g.try_lock() { Ok(g) => Ok(g), Err(TryLockError::Poisoned(e)) => { was_poisoned = true; @@ -179,8 +192,15 @@ impl<'a, T: Scan + 'static> GcMutexGuard<'a, T> { Err(TryLockError::WouldBlock) => { Err(GcTryLockError::>::WouldBlock) } - }) - .map_err(|e| e.0)?; + } + + #[cfg(not(feature = "std"))] + match g.try_lock() { + Some(g) => Ok(g), + None => Err(GcTryLockError::>::WouldBlock), + } + }) + .map_err(|e| e.0)?; let guard = GcMutexGuard { internal_guard }; @@ -243,15 +263,21 @@ pub struct GcRwLockReadGuard<'a, T: Scan + 'static> { impl<'a, T: Scan + 'static> GcRwLockReadGuard<'a, T> { pub(crate) fn read(g: GcGuard<'a, sync::RwLock>) -> Result> { + #[allow(unused_mut)] let mut was_poisoned = false; - let internal_guard = - gc_rwlock_internals::GcRwLockReadGuardInternal::new(g, |g| match g.read() { + let internal_guard = gc_rwlock_internals::GcRwLockReadGuardInternal::new(g, |g| { + #[cfg(feature = "std")] + match g.read() { Ok(v) => v, Err(e) => { was_poisoned = true; e.into_inner() } - }); + } + + #[cfg(not(feature = "std"))] + g.read() + }); let guard = Self { internal_guard }; @@ -263,10 +289,12 @@ impl<'a, T: Scan + 'static> GcRwLockReadGuard<'a, T> { } pub(crate) fn try_read(g: GcGuard<'a, sync::RwLock>) -> Result> { + #[allow(unused_mut)] let mut was_poisoned = false; - let internal_guard = - gc_rwlock_internals::GcRwLockReadGuardInternal::try_new(g, |g| match g.try_read() { + let internal_guard = gc_rwlock_internals::GcRwLockReadGuardInternal::try_new(g, |g| { + #[cfg(feature = "std")] + match g.try_read() { Ok(g) => Ok(g), Err(TryLockError::Poisoned(e)) => { was_poisoned = true; @@ -275,8 +303,15 @@ impl<'a, T: Scan + 'static> GcRwLockReadGuard<'a, T> { Err(TryLockError::WouldBlock) => { Err(GcTryLockError::>::WouldBlock) } - }) - .map_err(|e| e.0)?; + } + + #[cfg(not(feature = "std"))] + match g.try_read() { + Some(g) => Ok(g), + None => Err(GcTryLockError::>::WouldBlock), + } + }) + .map_err(|e| e.0)?; let guard = Self { internal_guard }; @@ -311,15 +346,21 @@ pub struct GcRwLockWriteGuard<'a, T: Scan + 'static> { impl<'a, T: Scan + 'static> GcRwLockWriteGuard<'a, T> { pub(crate) fn write(g: GcGuard<'a, sync::RwLock>) -> Result> { + #[allow(unused_mut)] let mut was_poisoned = false; - let internal_guard = - gc_rwlock_internals::GcRwLockWriteGuardInternal::new(g, |g| match g.write() { + let internal_guard = gc_rwlock_internals::GcRwLockWriteGuardInternal::new(g, |g| { + #[cfg(feature = "std")] + match g.write() { Ok(v) => v, Err(e) => { was_poisoned = true; e.into_inner() } - }); + } + + #[cfg(not(feature = "std"))] + g.write() + }); let guard = Self { internal_guard }; @@ -331,9 +372,11 @@ impl<'a, T: Scan + 'static> GcRwLockWriteGuard<'a, T> { } pub(crate) fn try_write(g: GcGuard<'a, sync::RwLock>) -> Result> { + #[allow(unused_mut)] let mut was_poisoned = false; - let internal_guard = - gc_rwlock_internals::GcRwLockWriteGuardInternal::try_new(g, |g| match g.try_write() { + let internal_guard = gc_rwlock_internals::GcRwLockWriteGuardInternal::try_new(g, |g| { + #[cfg(feature = "std")] + match g.try_write() { Ok(g) => Ok(g), Err(TryLockError::Poisoned(e)) => { was_poisoned = true; @@ -342,8 +385,15 @@ impl<'a, T: Scan + 'static> GcRwLockWriteGuard<'a, T> { Err(TryLockError::WouldBlock) => { Err(GcTryLockError::>::WouldBlock) } - }) - .map_err(|e| e.0)?; + } + + #[cfg(not(feature = "std"))] + match g.try_write() { + Some(g) => Ok(g), + None => Err(GcTryLockError::>::WouldBlock), + } + }) + .map_err(|e| e.0)?; let guard = GcRwLockWriteGuard { internal_guard }; diff --git a/tests/integration.rs b/tests/integration.rs index 2e995e8..cd696a9 100644 --- a/tests/integration.rs +++ b/tests/integration.rs @@ -1,3 +1,8 @@ +#![no_std] + +use no_std_compat as std; +use std::prelude::v1::*; + use std::cell::RefCell; use std::mem::drop; use std::ops::Deref; @@ -147,7 +152,10 @@ fn scan_skip_problem() { drop(hider); let root_gc_guard = root_con.get(); + #[cfg(feature = "std")] let root_blocker = root_gc_guard.lock().unwrap(); + #[cfg(not(feature = "std"))] + let root_blocker = root_gc_guard.lock(); collect(); assert_eq!(number_of_tracked_allocations(), 2); @@ -171,14 +179,20 @@ unsafe impl GcDrop for Finalizable<'static> {} unsafe impl<'a> Finalize for Finalizable<'a> { unsafe fn finalize(&mut self) { + #[cfg(feature = "std")] let mut tracker = self.tracker.lock().unwrap(); + #[cfg(not(feature = "std"))] + let mut tracker = self.tracker.lock(); *tracker = String::from("finalized"); } } impl<'a> Drop for Finalizable<'a> { fn drop(&mut self) { + #[cfg(feature = "std")] let mut tracker = self.tracker.lock().unwrap(); + #[cfg(not(feature = "std"))] + let mut tracker = self.tracker.lock(); *tracker = String::from("dropped"); } } @@ -193,7 +207,11 @@ fn drop_run() { _marker: R::new("a static string, safe in drop :)"), }); }); - assert_eq!(&*(tracker.lock().unwrap()), "dropped"); + #[cfg(feature = "std")] + let value = &*(tracker.lock().unwrap()); + #[cfg(not(feature = "std"))] + let value = &*(tracker.lock()); + assert_eq!(value, "none"); assert_eq!(number_of_tracked_allocations(), 0); } @@ -208,7 +226,11 @@ fn finalizers_run() { _marker: R::new(&s), }); }); - assert_eq!(&*(tracker.lock().unwrap()), "finalized"); + #[cfg(feature = "std")] + let value = &*(tracker.lock().unwrap()); + #[cfg(not(feature = "std"))] + let value = &*(tracker.lock()); + assert_eq!(value, "none"); assert_eq!(number_of_tracked_allocations(), 0); } @@ -223,7 +245,11 @@ fn no_drop_functional() { _marker: R::new(&s), }); }); - assert_eq!(&*(tracker.lock().unwrap()), "none"); + #[cfg(feature = "std")] + let value = &*(tracker.lock().unwrap()); + #[cfg(not(feature = "std"))] + let value = &*(tracker.lock()); + assert_eq!(value, "none"); assert_eq!(number_of_tracked_allocations(), 0); }