diff --git a/src/liballoc/arc_stage0.rs b/src/liballoc/arc_stage0.rs deleted file mode 100644 index 290b0566fd1b5..0000000000000 --- a/src/liballoc/arc_stage0.rs +++ /dev/null @@ -1,686 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![stable(feature = "rust1", since = "1.0.0")] - -//! Threadsafe reference-counted boxes (the `Arc` type). -//! -//! The `Arc` type provides shared ownership of an immutable value. -//! Destruction is deterministic, and will occur as soon as the last owner is -//! gone. It is marked as `Send` because it uses atomic reference counting. -//! -//! If you do not need thread-safety, and just need shared ownership, consider -//! the [`Rc` type](../rc/struct.Rc.html). It is the same as `Arc`, but -//! does not use atomics, making it both thread-unsafe as well as significantly -//! faster when updating the reference count. -//! -//! The `downgrade` method can be used to create a non-owning `Weak` pointer -//! to the box. A `Weak` pointer can be upgraded to an `Arc` pointer, but -//! will return `None` if the value has already been dropped. -//! -//! For example, a tree with parent pointers can be represented by putting the -//! nodes behind strong `Arc` pointers, and then storing the parent pointers -//! as `Weak` pointers. -//! -//! # Examples -//! -//! Sharing some immutable data between threads: -//! -//! ```no_run -//! use std::sync::Arc; -//! use std::thread; -//! -//! let five = Arc::new(5); -//! -//! for _ in 0..10 { -//! let five = five.clone(); -//! -//! thread::spawn(move || { -//! println!("{:?}", five); -//! }); -//! } -//! ``` -//! -//! Sharing mutable data safely between threads with a `Mutex`: -//! -//! ```no_run -//! use std::sync::{Arc, Mutex}; -//! use std::thread; -//! -//! let five = Arc::new(Mutex::new(5)); -//! -//! for _ in 0..10 { -//! let five = five.clone(); -//! -//! thread::spawn(move || { -//! let mut number = five.lock().unwrap(); -//! -//! *number += 1; -//! -//! println!("{}", *number); // prints 6 -//! }); -//! } -//! ``` - -use boxed::Box; - -use core::prelude::*; - -use core::atomic; -use core::atomic::Ordering::{Relaxed, Release, Acquire, SeqCst}; -use core::fmt; -use core::cmp::Ordering; -use core::mem::{min_align_of, size_of}; -use core::mem; -use core::nonzero::NonZero; -use core::ops::Deref; -use core::ptr; -use core::hash::{Hash, Hasher}; -use heap::deallocate; - -/// An atomically reference counted wrapper for shared state. -/// -/// # Examples -/// -/// In this example, a large vector of floats is shared between several threads. -/// With simple pipes, without `Arc`, a copy would have to be made for each -/// thread. -/// -/// When you clone an `Arc`, it will create another pointer to the data and -/// increase the reference counter. -/// -/// ``` -/// # #![feature(alloc, core)] -/// use std::sync::Arc; -/// use std::thread; -/// -/// fn main() { -/// let numbers: Vec<_> = (0..100u32).collect(); -/// let shared_numbers = Arc::new(numbers); -/// -/// for _ in 0..10 { -/// let child_numbers = shared_numbers.clone(); -/// -/// thread::spawn(move || { -/// let local_numbers = &child_numbers[..]; -/// -/// // Work with the local numbers -/// }); -/// } -/// } -/// ``` -#[unsafe_no_drop_flag] -#[stable(feature = "rust1", since = "1.0.0")] -pub struct Arc { - // FIXME #12808: strange name to try to avoid interfering with - // field accesses of the contained type via Deref - _ptr: NonZero<*mut ArcInner>, -} - -unsafe impl Send for Arc { } -unsafe impl Sync for Arc { } - - -/// A weak pointer to an `Arc`. -/// -/// Weak pointers will not keep the data inside of the `Arc` alive, and can be -/// used to break cycles between `Arc` pointers. -#[unsafe_no_drop_flag] -#[unstable(feature = "alloc", - reason = "Weak pointers may not belong in this module.")] -pub struct Weak { - // FIXME #12808: strange name to try to avoid interfering with - // field accesses of the contained type via Deref - _ptr: NonZero<*mut ArcInner>, -} - -unsafe impl Send for Weak { } -unsafe impl Sync for Weak { } - -#[stable(feature = "rust1", since = "1.0.0")] -impl fmt::Debug for Weak { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "(Weak)") - } -} - -struct ArcInner { - strong: atomic::AtomicUsize, - weak: atomic::AtomicUsize, - data: T, -} - -unsafe impl Send for ArcInner {} -unsafe impl Sync for ArcInner {} - -impl Arc { - /// Constructs a new `Arc`. - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// - /// let five = Arc::new(5); - /// ``` - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - pub fn new(data: T) -> Arc { - // Start the weak pointer count as 1 which is the weak pointer that's - // held by all the strong pointers (kinda), see std/rc.rs for more info - let x: Box<_> = box ArcInner { - strong: atomic::AtomicUsize::new(1), - weak: atomic::AtomicUsize::new(1), - data: data, - }; - Arc { _ptr: unsafe { NonZero::new(mem::transmute(x)) } } - } - - /// Downgrades the `Arc` to a `Weak` reference. - /// - /// # Examples - /// - /// ``` - /// # #![feature(alloc)] - /// use std::sync::Arc; - /// - /// let five = Arc::new(5); - /// - /// let weak_five = five.downgrade(); - /// ``` - #[unstable(feature = "alloc", - reason = "Weak pointers may not belong in this module.")] - pub fn downgrade(&self) -> Weak { - // See the clone() impl for why this is relaxed - self.inner().weak.fetch_add(1, Relaxed); - Weak { _ptr: self._ptr } - } -} - -impl Arc { - #[inline] - fn inner(&self) -> &ArcInner { - // This unsafety is ok because while this arc is alive we're guaranteed - // that the inner pointer is valid. Furthermore, we know that the - // `ArcInner` structure itself is `Sync` because the inner data is - // `Sync` as well, so we're ok loaning out an immutable pointer to these - // contents. - unsafe { &**self._ptr } - } - - // Non-inlined part of `drop`. - #[inline(never)] - unsafe fn drop_slow(&mut self) { - let ptr = *self._ptr; - - // Destroy the data at this time, even though we may not free the box - // allocation itself (there may still be weak pointers lying around). - drop(ptr::read(&self.inner().data)); - - if self.inner().weak.fetch_sub(1, Release) == 1 { - atomic::fence(Acquire); - deallocate(ptr as *mut u8, size_of::>(), min_align_of::>()) - } - } -} - -/// Get the number of weak references to this value. -#[inline] -#[unstable(feature = "alloc")] -pub fn weak_count(this: &Arc) -> usize { this.inner().weak.load(SeqCst) - 1 } - -/// Get the number of strong references to this value. -#[inline] -#[unstable(feature = "alloc")] -pub fn strong_count(this: &Arc) -> usize { this.inner().strong.load(SeqCst) } - - -/// Returns a mutable reference to the contained value if the `Arc` is unique. -/// -/// Returns `None` if the `Arc` is not unique. -/// -/// # Examples -/// -/// ``` -/// # #![feature(alloc)] -/// extern crate alloc; -/// # fn main() { -/// use alloc::arc::{Arc, get_mut}; -/// -/// let mut x = Arc::new(3); -/// *get_mut(&mut x).unwrap() = 4; -/// assert_eq!(*x, 4); -/// -/// let _y = x.clone(); -/// assert!(get_mut(&mut x).is_none()); -/// # } -/// ``` -#[inline] -#[unstable(feature = "alloc")] -pub fn get_mut(this: &mut Arc) -> Option<&mut T> { - if strong_count(this) == 1 && weak_count(this) == 0 { - // This unsafety is ok because we're guaranteed that the pointer - // returned is the *only* pointer that will ever be returned to T. Our - // reference count is guaranteed to be 1 at this point, and we required - // the Arc itself to be `mut`, so we're returning the only possible - // reference to the inner data. - let inner = unsafe { &mut **this._ptr }; - Some(&mut inner.data) - } else { - None - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Clone for Arc { - /// Makes a clone of the `Arc`. - /// - /// This increases the strong reference count. - /// - /// # Examples - /// - /// ``` - /// # #![feature(alloc)] - /// use std::sync::Arc; - /// - /// let five = Arc::new(5); - /// - /// five.clone(); - /// ``` - #[inline] - fn clone(&self) -> Arc { - // Using a relaxed ordering is alright here, as knowledge of the - // original reference prevents other threads from erroneously deleting - // the object. - // - // As explained in the [Boost documentation][1], Increasing the - // reference counter can always be done with memory_order_relaxed: New - // references to an object can only be formed from an existing - // reference, and passing an existing reference from one thread to - // another must already provide any required synchronization. - // - // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) - self.inner().strong.fetch_add(1, Relaxed); - Arc { _ptr: self._ptr } - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Deref for Arc { - type Target = T; - - #[inline] - fn deref(&self) -> &T { - &self.inner().data - } -} - -impl Arc { - /// Make a mutable reference from the given `Arc`. - /// - /// This is also referred to as a copy-on-write operation because the inner - /// data is cloned if the reference count is greater than one. - /// - /// # Examples - /// - /// ``` - /// # #![feature(alloc)] - /// use std::sync::Arc; - /// - /// let mut five = Arc::new(5); - /// - /// let mut_five = five.make_unique(); - /// ``` - #[inline] - #[unstable(feature = "alloc")] - pub fn make_unique(&mut self) -> &mut T { - // Note that we hold a strong reference, which also counts as a weak - // reference, so we only clone if there is an additional reference of - // either kind. - if self.inner().strong.load(SeqCst) != 1 || - self.inner().weak.load(SeqCst) != 1 { - *self = Arc::new((**self).clone()) - } - // As with `get_mut()`, the unsafety is ok because our reference was - // either unique to begin with, or became one upon cloning the contents. - let inner = unsafe { &mut **self._ptr }; - &mut inner.data - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Drop for Arc { - /// Drops the `Arc`. - /// - /// This will decrement the strong reference count. If the strong reference - /// count becomes zero and the only other references are `Weak` ones, - /// `drop`s the inner value. - /// - /// # Examples - /// - /// ``` - /// # #![feature(alloc)] - /// use std::sync::Arc; - /// - /// { - /// let five = Arc::new(5); - /// - /// // stuff - /// - /// drop(five); // explicit drop - /// } - /// { - /// let five = Arc::new(5); - /// - /// // stuff - /// - /// } // implicit drop - /// ``` - #[inline] - fn drop(&mut self) { - // This structure has #[unsafe_no_drop_flag], so this drop glue may run - // more than once (but it is guaranteed to be zeroed after the first if - // it's run more than once) - let ptr = *self._ptr; - // if ptr.is_null() { return } - if ptr.is_null() || ptr as usize == mem::POST_DROP_USIZE { return } - - // Because `fetch_sub` is already atomic, we do not need to synchronize - // with other threads unless we are going to delete the object. This - // same logic applies to the below `fetch_sub` to the `weak` count. - if self.inner().strong.fetch_sub(1, Release) != 1 { return } - - // This fence is needed to prevent reordering of use of the data and - // deletion of the data. Because it is marked `Release`, the decreasing - // of the reference count synchronizes with this `Acquire` fence. This - // means that use of the data happens before decreasing the reference - // count, which happens before this fence, which happens before the - // deletion of the data. - // - // As explained in the [Boost documentation][1], - // - // > It is important to enforce any possible access to the object in one - // > thread (through an existing reference) to *happen before* deleting - // > the object in a different thread. This is achieved by a "release" - // > operation after dropping a reference (any access to the object - // > through this reference must obviously happened before), and an - // > "acquire" operation before deleting the object. - // - // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) - atomic::fence(Acquire); - - unsafe { - self.drop_slow() - } - } -} - -#[unstable(feature = "alloc", - reason = "Weak pointers may not belong in this module.")] -impl Weak { - /// Upgrades a weak reference to a strong reference. - /// - /// Upgrades the `Weak` reference to an `Arc`, if possible. - /// - /// Returns `None` if there were no strong references and the data was - /// destroyed. - /// - /// # Examples - /// - /// ``` - /// # #![feature(alloc)] - /// use std::sync::Arc; - /// - /// let five = Arc::new(5); - /// - /// let weak_five = five.downgrade(); - /// - /// let strong_five: Option> = weak_five.upgrade(); - /// ``` - pub fn upgrade(&self) -> Option> { - // We use a CAS loop to increment the strong count instead of a - // fetch_add because once the count hits 0 it must never be above 0. - let inner = self.inner(); - loop { - let n = inner.strong.load(SeqCst); - if n == 0 { return None } - let old = inner.strong.compare_and_swap(n, n + 1, SeqCst); - if old == n { return Some(Arc { _ptr: self._ptr }) } - } - } - - #[inline] - fn inner(&self) -> &ArcInner { - // See comments above for why this is "safe" - unsafe { &**self._ptr } - } -} - -#[unstable(feature = "alloc", - reason = "Weak pointers may not belong in this module.")] -impl Clone for Weak { - /// Makes a clone of the `Weak`. - /// - /// This increases the weak reference count. - /// - /// # Examples - /// - /// ``` - /// # #![feature(alloc)] - /// use std::sync::Arc; - /// - /// let weak_five = Arc::new(5).downgrade(); - /// - /// weak_five.clone(); - /// ``` - #[inline] - fn clone(&self) -> Weak { - // See comments in Arc::clone() for why this is relaxed - self.inner().weak.fetch_add(1, Relaxed); - Weak { _ptr: self._ptr } - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Drop for Weak { - /// Drops the `Weak`. - /// - /// This will decrement the weak reference count. - /// - /// # Examples - /// - /// ``` - /// # #![feature(alloc)] - /// use std::sync::Arc; - /// - /// { - /// let five = Arc::new(5); - /// let weak_five = five.downgrade(); - /// - /// // stuff - /// - /// drop(weak_five); // explicit drop - /// } - /// { - /// let five = Arc::new(5); - /// let weak_five = five.downgrade(); - /// - /// // stuff - /// - /// } // implicit drop - /// ``` - fn drop(&mut self) { - let ptr = *self._ptr; - - // see comments above for why this check is here - if ptr.is_null() || ptr as usize == mem::POST_DROP_USIZE { return } - - // If we find out that we were the last weak pointer, then its time to - // deallocate the data entirely. See the discussion in Arc::drop() about - // the memory orderings - if self.inner().weak.fetch_sub(1, Release) == 1 { - atomic::fence(Acquire); - unsafe { deallocate(ptr as *mut u8, size_of::>(), - min_align_of::>()) } - } - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl PartialEq for Arc { - /// Equality for two `Arc`s. - /// - /// Two `Arc`s are equal if their inner value are equal. - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// - /// let five = Arc::new(5); - /// - /// five == Arc::new(5); - /// ``` - fn eq(&self, other: &Arc) -> bool { *(*self) == *(*other) } - - /// Inequality for two `Arc`s. - /// - /// Two `Arc`s are unequal if their inner value are unequal. - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// - /// let five = Arc::new(5); - /// - /// five != Arc::new(5); - /// ``` - fn ne(&self, other: &Arc) -> bool { *(*self) != *(*other) } -} -#[stable(feature = "rust1", since = "1.0.0")] -impl PartialOrd for Arc { - /// Partial comparison for two `Arc`s. - /// - /// The two are compared by calling `partial_cmp()` on their inner values. - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// - /// let five = Arc::new(5); - /// - /// five.partial_cmp(&Arc::new(5)); - /// ``` - fn partial_cmp(&self, other: &Arc) -> Option { - (**self).partial_cmp(&**other) - } - - /// Less-than comparison for two `Arc`s. - /// - /// The two are compared by calling `<` on their inner values. - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// - /// let five = Arc::new(5); - /// - /// five < Arc::new(5); - /// ``` - fn lt(&self, other: &Arc) -> bool { *(*self) < *(*other) } - - /// 'Less-than or equal to' comparison for two `Arc`s. - /// - /// The two are compared by calling `<=` on their inner values. - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// - /// let five = Arc::new(5); - /// - /// five <= Arc::new(5); - /// ``` - fn le(&self, other: &Arc) -> bool { *(*self) <= *(*other) } - - /// Greater-than comparison for two `Arc`s. - /// - /// The two are compared by calling `>` on their inner values. - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// - /// let five = Arc::new(5); - /// - /// five > Arc::new(5); - /// ``` - fn gt(&self, other: &Arc) -> bool { *(*self) > *(*other) } - - /// 'Greater-than or equal to' comparison for two `Arc`s. - /// - /// The two are compared by calling `>=` on their inner values. - /// - /// # Examples - /// - /// ``` - /// use std::sync::Arc; - /// - /// let five = Arc::new(5); - /// - /// five >= Arc::new(5); - /// ``` - fn ge(&self, other: &Arc) -> bool { *(*self) >= *(*other) } -} -#[stable(feature = "rust1", since = "1.0.0")] -impl Ord for Arc { - fn cmp(&self, other: &Arc) -> Ordering { (**self).cmp(&**other) } -} -#[stable(feature = "rust1", since = "1.0.0")] -impl Eq for Arc {} - -#[stable(feature = "rust1", since = "1.0.0")] -impl fmt::Display for Arc { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Display::fmt(&**self, f) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl fmt::Debug for Arc { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Debug::fmt(&**self, f) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl fmt::Pointer for Arc { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Pointer::fmt(&*self._ptr, f) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Default for Arc { - #[stable(feature = "rust1", since = "1.0.0")] - fn default() -> Arc { Arc::new(Default::default()) } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Hash for Arc { - fn hash(&self, state: &mut H) { - (**self).hash(state) - } -} diff --git a/src/liballoc/boxed.rs b/src/liballoc/boxed.rs index 6633e48a814f6..12eadcc145d52 100644 --- a/src/liballoc/boxed.rs +++ b/src/liballoc/boxed.rs @@ -57,16 +57,12 @@ use core::any::Any; use core::cmp::Ordering; use core::fmt; use core::hash::{self, Hash}; +use core::marker::Unsize; use core::mem; -use core::ops::{Deref, DerefMut}; +use core::ops::{CoerceUnsized, Deref, DerefMut}; use core::ptr::{Unique}; use core::raw::{TraitObject}; -#[cfg(not(stage0))] -use core::marker::Unsize; -#[cfg(not(stage0))] -use core::ops::CoerceUnsized; - /// A value that represents the heap. This is the default place that the `box` /// keyword allocates into when no place is supplied. /// @@ -392,5 +388,4 @@ impl<'a,A,R> FnOnce for Box+Send+'a> { } } -#[cfg(not(stage0))] impl, U: ?Sized> CoerceUnsized> for Box {} diff --git a/src/liballoc/lib.rs b/src/liballoc/lib.rs index 45dcea909f44a..ac5a5d60cbd47 100644 --- a/src/liballoc/lib.rs +++ b/src/liballoc/lib.rs @@ -112,14 +112,7 @@ pub mod boxed; mod boxed { pub use std::boxed::{Box, HEAP}; } #[cfg(test)] mod boxed_test; -#[cfg(not(stage0))] pub mod arc; -#[cfg(stage0)] -mod arc_stage0; -#[cfg(stage0)] -pub mod arc { - pub use arc_stage0::*; -} pub mod rc; /// Common out-of-memory routine diff --git a/src/liballoc/rc.rs b/src/liballoc/rc.rs index 1f660449593fb..44f4a6a6290c8 100644 --- a/src/liballoc/rc.rs +++ b/src/liballoc/rc.rs @@ -159,36 +159,19 @@ use core::cmp::{PartialEq, PartialOrd, Eq, Ord, Ordering}; use core::default::Default; use core::fmt; use core::hash::{Hasher, Hash}; -use core::marker::{self, Sized}; -use core::mem::{self, min_align_of, size_of, forget}; +use core::intrinsics::{assume, drop_in_place}; +use core::marker::{self, Sized, Unsize}; +use core::mem::{self, min_align_of, size_of, min_align_of_val, size_of_val, forget}; use core::nonzero::NonZero; -use core::ops::{Deref, Drop}; +use core::ops::{CoerceUnsized, Deref, Drop}; use core::option::Option; use core::option::Option::{Some, None}; use core::ptr; use core::result::Result; use core::result::Result::{Ok, Err}; -use core::intrinsics::assume; - -#[cfg(not(stage0))] -use core::intrinsics::drop_in_place; -#[cfg(not(stage0))] -use core::marker::Unsize; -#[cfg(not(stage0))] -use core::mem::{min_align_of_val, size_of_val}; -#[cfg(not(stage0))] -use core::ops::CoerceUnsized; use heap::deallocate; -#[cfg(stage0)] -struct RcBox { - strong: Cell, - weak: Cell, - value: T, -} - -#[cfg(not(stage0))] struct RcBox { strong: Cell, weak: Cell, @@ -199,15 +182,6 @@ struct RcBox { /// A reference-counted pointer type over an immutable value. /// /// See the [module level documentation](./index.html) for more details. -#[cfg(stage0)] -#[unsafe_no_drop_flag] -#[stable(feature = "rust1", since = "1.0.0")] -pub struct Rc { - // FIXME #12808: strange names to try to avoid interfering with field - // accesses of the contained type via Deref - _ptr: NonZero<*mut RcBox>, -} -#[cfg(not(stage0))] #[unsafe_no_drop_flag] #[stable(feature = "rust1", since = "1.0.0")] pub struct Rc { @@ -216,19 +190,9 @@ pub struct Rc { _ptr: NonZero<*mut RcBox>, } -#[cfg(stage0)] -impl !marker::Send for Rc {} - -#[cfg(not(stage0))] impl !marker::Send for Rc {} - -#[cfg(stage0)] -impl !marker::Sync for Rc {} - -#[cfg(not(stage0))] impl !marker::Sync for Rc {} -#[cfg(not(stage0))] impl, U: ?Sized> CoerceUnsized> for Rc {} impl Rc { @@ -259,7 +223,6 @@ impl Rc { } } -#[cfg(not(stage0))] impl Rc { /// Downgrades the `Rc` to a `Weak` reference. /// @@ -281,44 +244,12 @@ impl Rc { } } -#[cfg(stage0)] -impl Rc { - /// Downgrades the `Rc` to a `Weak` reference. - /// - /// # Examples - /// - /// ``` - /// # #![feature(alloc)] - /// use std::rc::Rc; - /// - /// let five = Rc::new(5); - /// - /// let weak_five = five.downgrade(); - /// ``` - #[unstable(feature = "alloc", - reason = "Weak pointers may not belong in this module")] - pub fn downgrade(&self) -> Weak { - self.inc_weak(); - Weak { _ptr: self._ptr } - } -} - /// Get the number of weak references to this value. -#[cfg(stage0)] -#[inline] -#[unstable(feature = "alloc")] -pub fn weak_count(this: &Rc) -> usize { this.weak() - 1 } -#[cfg(not(stage0))] #[inline] #[unstable(feature = "alloc")] pub fn weak_count(this: &Rc) -> usize { this.weak() - 1 } /// Get the number of strong references to this value. -#[cfg(stage0)] -#[inline] -#[unstable(feature = "alloc")] -pub fn strong_count(this: &Rc) -> usize { this.strong() } -#[cfg(not(stage0))] #[inline] #[unstable(feature = "alloc")] pub fn strong_count(this: &Rc) -> usize { this.strong() } @@ -438,17 +369,6 @@ impl Rc { } } -#[cfg(stage0)] -#[stable(feature = "rust1", since = "1.0.0")] -impl Deref for Rc { - type Target = T; - - #[inline(always)] - fn deref(&self) -> &T { - &self.inner().value - } -} -#[cfg(not(stage0))] #[stable(feature = "rust1", since = "1.0.0")] impl Deref for Rc { type Target = T; @@ -459,58 +379,6 @@ impl Deref for Rc { } } -#[cfg(stage0)] -#[stable(feature = "rust1", since = "1.0.0")] -impl Drop for Rc { - /// Drops the `Rc`. - /// - /// This will decrement the strong reference count. If the strong reference - /// count becomes zero and the only other references are `Weak` ones, - /// `drop`s the inner value. - /// - /// # Examples - /// - /// ``` - /// # #![feature(alloc)] - /// use std::rc::Rc; - /// - /// { - /// let five = Rc::new(5); - /// - /// // stuff - /// - /// drop(five); // explicit drop - /// } - /// { - /// let five = Rc::new(5); - /// - /// // stuff - /// - /// } // implicit drop - /// ``` - fn drop(&mut self) { - unsafe { - let ptr = *self._ptr; - if !ptr.is_null() && ptr as usize != mem::POST_DROP_USIZE { - self.dec_strong(); - if self.strong() == 0 { - ptr::read(&**self); // destroy the contained object - - // remove the implicit "strong weak" pointer now that we've - // destroyed the contents. - self.dec_weak(); - - if self.weak() == 0 { - deallocate(ptr as *mut u8, size_of::>(), - min_align_of::>()) - } - } - } - } - } -} - -#[cfg(not(stage0))] #[stable(feature = "rust1", since = "1.0.0")] impl Drop for Rc { /// Drops the `Rc`. @@ -564,32 +432,6 @@ impl Drop for Rc { } } -#[cfg(stage0)] -#[stable(feature = "rust1", since = "1.0.0")] -impl Clone for Rc { - - /// Makes a clone of the `Rc`. - /// - /// When you clone an `Rc`, it will create another pointer to the data and - /// increase the strong reference counter. - /// - /// # Examples - /// - /// ``` - /// # #![feature(alloc)] - /// use std::rc::Rc; - /// - /// let five = Rc::new(5); - /// - /// five.clone(); - /// ``` - #[inline] - fn clone(&self) -> Rc { - self.inc_strong(); - Rc { _ptr: self._ptr } - } -} -#[cfg(not(stage0))] #[stable(feature = "rust1", since = "1.0.0")] impl Clone for Rc { @@ -634,17 +476,6 @@ impl Default for Rc { } #[stable(feature = "rust1", since = "1.0.0")] -#[cfg(stage0)] -impl PartialEq for Rc { - #[inline(always)] - fn eq(&self, other: &Rc) -> bool { **self == **other } - - #[inline(always)] - fn ne(&self, other: &Rc) -> bool { **self != **other } -} - -#[stable(feature = "rust1", since = "1.0.0")] -#[cfg(not(stage0))] impl PartialEq for Rc { /// Equality for two `Rc`s. /// @@ -680,34 +511,9 @@ impl PartialEq for Rc { } #[stable(feature = "rust1", since = "1.0.0")] -#[cfg(stage0)] -impl Eq for Rc {} -#[stable(feature = "rust1", since = "1.0.0")] -#[cfg(not(stage0))] impl Eq for Rc {} #[stable(feature = "rust1", since = "1.0.0")] -#[cfg(stage0)] -impl PartialOrd for Rc { - #[inline(always)] - fn partial_cmp(&self, other: &Rc) -> Option { - (**self).partial_cmp(&**other) - } - - #[inline(always)] - fn lt(&self, other: &Rc) -> bool { **self < **other } - - #[inline(always)] - fn le(&self, other: &Rc) -> bool { **self <= **other } - - #[inline(always)] - fn gt(&self, other: &Rc) -> bool { **self > **other } - - #[inline(always)] - fn ge(&self, other: &Rc) -> bool { **self >= **other } -} -#[stable(feature = "rust1", since = "1.0.0")] -#[cfg(not(stage0))] impl PartialOrd for Rc { /// Partial comparison for two `Rc`s. /// @@ -793,13 +599,6 @@ impl PartialOrd for Rc { } #[stable(feature = "rust1", since = "1.0.0")] -#[cfg(stage0)] -impl Ord for Rc { - #[inline] - fn cmp(&self, other: &Rc) -> Ordering { (**self).cmp(&**other) } -} -#[stable(feature = "rust1", since = "1.0.0")] -#[cfg(not(stage0))] impl Ord for Rc { /// Comparison for two `Rc`s. /// @@ -818,14 +617,6 @@ impl Ord for Rc { fn cmp(&self, other: &Rc) -> Ordering { (**self).cmp(&**other) } } -#[cfg(stage0)] -#[stable(feature = "rust1", since = "1.0.0")] -impl Hash for Rc { - fn hash(&self, state: &mut H) { - (**self).hash(state); - } -} -#[cfg(not(stage0))] #[stable(feature = "rust1", since = "1.0.0")] impl Hash for Rc { fn hash(&self, state: &mut H) { @@ -833,14 +624,6 @@ impl Hash for Rc { } } -#[cfg(stage0)] -#[stable(feature = "rust1", since = "1.0.0")] -impl fmt::Display for Rc { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Display::fmt(&**self, f) - } -} -#[cfg(not(stage0))] #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Display for Rc { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { @@ -848,14 +631,6 @@ impl fmt::Display for Rc { } } -#[cfg(stage0)] -#[stable(feature = "rust1", since = "1.0.0")] -impl fmt::Debug for Rc { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Debug::fmt(&**self, f) - } -} -#[cfg(not(stage0))] #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Debug for Rc { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { @@ -876,16 +651,6 @@ impl fmt::Pointer for Rc { /// dropped. /// /// See the [module level documentation](./index.html) for more. -#[cfg(stage0)] -#[unsafe_no_drop_flag] -#[unstable(feature = "alloc", - reason = "Weak pointers may not belong in this module.")] -pub struct Weak { - // FIXME #12808: strange names to try to avoid interfering with - // field accesses of the contained type via Deref - _ptr: NonZero<*mut RcBox>, -} -#[cfg(not(stage0))] #[unsafe_no_drop_flag] #[unstable(feature = "alloc", reason = "Weak pointers may not belong in this module.")] @@ -895,51 +660,9 @@ pub struct Weak { _ptr: NonZero<*mut RcBox>, } -#[cfg(stage0)] -impl !marker::Send for Weak {} -#[cfg(not(stage0))] impl !marker::Send for Weak {} - -#[cfg(stage0)] -impl !marker::Sync for Weak {} -#[cfg(not(stage0))] impl !marker::Sync for Weak {} - -#[cfg(stage0)] -#[unstable(feature = "alloc", - reason = "Weak pointers may not belong in this module.")] -impl Weak { - - /// Upgrades a weak reference to a strong reference. - /// - /// Upgrades the `Weak` reference to an `Rc`, if possible. - /// - /// Returns `None` if there were no strong references and the data was - /// destroyed. - /// - /// # Examples - /// - /// ``` - /// # #![feature(alloc)] - /// use std::rc::Rc; - /// - /// let five = Rc::new(5); - /// - /// let weak_five = five.downgrade(); - /// - /// let strong_five: Option> = weak_five.upgrade(); - /// ``` - pub fn upgrade(&self) -> Option> { - if self.strong() == 0 { - None - } else { - self.inc_strong(); - Some(Rc { _ptr: self._ptr }) - } - } -} -#[cfg(not(stage0))] #[unstable(feature = "alloc", reason = "Weak pointers may not belong in this module.")] impl Weak { @@ -973,52 +696,6 @@ impl Weak { } } -#[cfg(stage0)] -#[stable(feature = "rust1", since = "1.0.0")] -impl Drop for Weak { - /// Drops the `Weak`. - /// - /// This will decrement the weak reference count. - /// - /// # Examples - /// - /// ``` - /// # #![feature(alloc)] - /// use std::rc::Rc; - /// - /// { - /// let five = Rc::new(5); - /// let weak_five = five.downgrade(); - /// - /// // stuff - /// - /// drop(weak_five); // explicit drop - /// } - /// { - /// let five = Rc::new(5); - /// let weak_five = five.downgrade(); - /// - /// // stuff - /// - /// } // implicit drop - /// ``` - fn drop(&mut self) { - unsafe { - let ptr = *self._ptr; - if !ptr.is_null() && ptr as usize != mem::POST_DROP_USIZE { - self.dec_weak(); - // the weak count starts at 1, and will only go to zero if all - // the strong pointers have disappeared. - if self.weak() == 0 { - deallocate(ptr as *mut u8, size_of::>(), - min_align_of::>()) - } - } - } - } -} - -#[cfg(not(stage0))] #[stable(feature = "rust1", since = "1.0.0")] impl Drop for Weak { /// Drops the `Weak`. @@ -1064,32 +741,6 @@ impl Drop for Weak { } } -#[cfg(stage0)] -#[unstable(feature = "alloc", - reason = "Weak pointers may not belong in this module.")] -impl Clone for Weak { - - /// Makes a clone of the `Weak`. - /// - /// This increases the weak reference count. - /// - /// # Examples - /// - /// ``` - /// # #![feature(alloc)] - /// use std::rc::Rc; - /// - /// let weak_five = Rc::new(5).downgrade(); - /// - /// weak_five.clone(); - /// ``` - #[inline] - fn clone(&self) -> Weak { - self.inc_weak(); - Weak { _ptr: self._ptr } - } -} -#[cfg(not(stage0))] #[unstable(feature = "alloc", reason = "Weak pointers may not belong in this module.")] impl Clone for Weak { @@ -1115,14 +766,6 @@ impl Clone for Weak { } } -#[cfg(stage0)] -#[stable(feature = "rust1", since = "1.0.0")] -impl fmt::Debug for Weak { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "(Weak)") - } -} -#[cfg(not(stage0))] #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Debug for Weak { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { @@ -1130,30 +773,6 @@ impl fmt::Debug for Weak { } } -#[cfg(stage0)] -#[doc(hidden)] -trait RcBoxPtr { - fn inner(&self) -> &RcBox; - - #[inline] - fn strong(&self) -> usize { self.inner().strong.get() } - - #[inline] - fn inc_strong(&self) { self.inner().strong.set(self.strong() + 1); } - - #[inline] - fn dec_strong(&self) { self.inner().strong.set(self.strong() - 1); } - - #[inline] - fn weak(&self) -> usize { self.inner().weak.get() } - - #[inline] - fn inc_weak(&self) { self.inner().weak.set(self.weak() + 1); } - - #[inline] - fn dec_weak(&self) { self.inner().weak.set(self.weak() - 1); } -} -#[cfg(not(stage0))] #[doc(hidden)] trait RcBoxPtr { fn inner(&self) -> &RcBox; @@ -1177,21 +796,6 @@ trait RcBoxPtr { fn dec_weak(&self) { self.inner().weak.set(self.weak() - 1); } } -#[cfg(stage0)] -impl RcBoxPtr for Rc { - #[inline(always)] - fn inner(&self) -> &RcBox { - unsafe { - // Safe to assume this here, as if it weren't true, we'd be breaking - // the contract anyway. - // This allows the null check to be elided in the destructor if we - // manipulated the reference count in the same function. - assume(!(*(&self._ptr as *const _ as *const *const ())).is_null()); - &(**self._ptr) - } - } -} -#[cfg(not(stage0))] impl RcBoxPtr for Rc { #[inline(always)] fn inner(&self) -> &RcBox { @@ -1206,21 +810,6 @@ impl RcBoxPtr for Rc { } } -#[cfg(stage0)] -impl RcBoxPtr for Weak { - #[inline(always)] - fn inner(&self) -> &RcBox { - unsafe { - // Safe to assume this here, as if it weren't true, we'd be breaking - // the contract anyway. - // This allows the null check to be elided in the destructor if we - // manipulated the reference count in the same function. - assume(!(*(&self._ptr as *const _ as *const *const ())).is_null()); - &(**self._ptr) - } - } -} -#[cfg(not(stage0))] impl RcBoxPtr for Weak { #[inline(always)] fn inner(&self) -> &RcBox { diff --git a/src/libcollections/borrow.rs b/src/libcollections/borrow.rs index 08bd88cd861b1..8e8fc0bedec6a 100644 --- a/src/libcollections/borrow.rs +++ b/src/libcollections/borrow.rs @@ -116,17 +116,11 @@ impl<'a, T: ?Sized> BorrowMut for &'a mut T { fn borrow_mut(&mut self) -> &mut T { &mut **self } } -#[cfg(stage0)] -impl Borrow for rc::Rc { - fn borrow(&self) -> &T { &**self } -} - -#[cfg(not(stage0))] impl Borrow for rc::Rc { fn borrow(&self) -> &T { &**self } } -impl Borrow for arc::Arc { +impl Borrow for arc::Arc { fn borrow(&self) -> &T { &**self } } diff --git a/src/libcollections/btree/node.rs b/src/libcollections/btree/node.rs index 4f3c3b0826342..2d8335d373473 100644 --- a/src/libcollections/btree/node.rs +++ b/src/libcollections/btree/node.rs @@ -19,7 +19,6 @@ pub use self::TraversalItem::*; use core::prelude::*; use core::cmp::Ordering::{Greater, Less, Equal}; -#[cfg(not(stage0))] use core::intrinsics::arith_offset; use core::iter::Zip; use core::marker::PhantomData; @@ -207,22 +206,6 @@ impl RawItems { RawItems::from_parts(slice.as_ptr(), slice.len()) } - #[cfg(stage0)] - unsafe fn from_parts(ptr: *const T, len: usize) -> RawItems { - if mem::size_of::() == 0 { - RawItems { - head: ptr, - tail: (ptr as usize + len) as *const T, - } - } else { - RawItems { - head: ptr, - tail: ptr.offset(len as isize), - } - } - } - - #[cfg(not(stage0))] unsafe fn from_parts(ptr: *const T, len: usize) -> RawItems { if mem::size_of::() == 0 { RawItems { @@ -237,18 +220,6 @@ impl RawItems { } } - #[cfg(stage0)] - unsafe fn push(&mut self, val: T) { - ptr::write(self.tail as *mut T, val); - - if mem::size_of::() == 0 { - self.tail = (self.tail as usize + 1) as *const T; - } else { - self.tail = self.tail.offset(1); - } - } - - #[cfg(not(stage0))] unsafe fn push(&mut self, val: T) { ptr::write(self.tail as *mut T, val); @@ -263,26 +234,6 @@ impl RawItems { impl Iterator for RawItems { type Item = T; - #[cfg(stage0)] - fn next(&mut self) -> Option { - if self.head == self.tail { - None - } else { - unsafe { - let ret = Some(ptr::read(self.head)); - - if mem::size_of::() == 0 { - self.head = (self.head as usize + 1) as *const T; - } else { - self.head = self.head.offset(1); - } - - ret - } - } - } - - #[cfg(not(stage0))] fn next(&mut self) -> Option { if self.head == self.tail { None @@ -303,24 +254,6 @@ impl Iterator for RawItems { } impl DoubleEndedIterator for RawItems { - #[cfg(stage0)] - fn next_back(&mut self) -> Option { - if self.head == self.tail { - None - } else { - unsafe { - if mem::size_of::() == 0 { - self.tail = (self.tail as usize - 1) as *const T; - } else { - self.tail = self.tail.offset(-1); - } - - Some(ptr::read(self.tail)) - } - } - } - - #[cfg(not(stage0))] fn next_back(&mut self) -> Option { if self.head == self.tail { None diff --git a/src/libcollections/vec.rs b/src/libcollections/vec.rs index 84fe5ba1fbf5a..4d52eb8e8ae67 100644 --- a/src/libcollections/vec.rs +++ b/src/libcollections/vec.rs @@ -66,9 +66,7 @@ use core::cmp::max; use core::cmp::Ordering; use core::fmt; use core::hash::{self, Hash}; -use core::intrinsics::assume; -#[cfg(not(stage0))] -use core::intrinsics::arith_offset; +use core::intrinsics::{arith_offset, assume}; use core::iter::{repeat, FromIterator}; use core::marker::PhantomData; use core::mem; @@ -1526,25 +1524,6 @@ impl IntoIterator for Vec { /// } /// ``` #[inline] - #[cfg(stage0)] - fn into_iter(self) -> IntoIter { - unsafe { - let ptr = *self.ptr; - assume(!ptr.is_null()); - let cap = self.cap; - let begin = ptr as *const T; - let end = if mem::size_of::() == 0 { - (ptr as usize + self.len()) as *const T - } else { - ptr.offset(self.len() as isize) as *const T - }; - mem::forget(self); - IntoIter { allocation: ptr, cap: cap, ptr: begin, end: end } - } - } - - #[inline] - #[cfg(not(stage0))] fn into_iter(self) -> IntoIter { unsafe { let ptr = *self.ptr; @@ -1764,32 +1743,6 @@ impl Iterator for IntoIter { type Item = T; #[inline] - #[cfg(stage0)] - fn next(&mut self) -> Option { - unsafe { - if self.ptr == self.end { - None - } else { - if mem::size_of::() == 0 { - // purposefully don't use 'ptr.offset' because for - // vectors with 0-size elements this would return the - // same pointer. - self.ptr = mem::transmute(self.ptr as usize + 1); - - // Use a non-null pointer value - Some(ptr::read(EMPTY as *mut T)) - } else { - let old = self.ptr; - self.ptr = self.ptr.offset(1); - - Some(ptr::read(old)) - } - } - } - } - - #[inline] - #[cfg(not(stage0))] fn next(&mut self) -> Option { unsafe { if self.ptr == self.end { @@ -1830,29 +1783,6 @@ impl Iterator for IntoIter { #[stable(feature = "rust1", since = "1.0.0")] impl DoubleEndedIterator for IntoIter { #[inline] - #[cfg(stage0)] - fn next_back(&mut self) -> Option { - unsafe { - if self.end == self.ptr { - None - } else { - if mem::size_of::() == 0 { - // See above for why 'ptr.offset' isn't used - self.end = mem::transmute(self.end as usize - 1); - - // Use a non-null pointer value - Some(ptr::read(EMPTY as *mut T)) - } else { - self.end = self.end.offset(-1); - - Some(ptr::read(mem::transmute(self.end))) - } - } - } - } - - #[inline] - #[cfg(not(stage0))] fn next_back(&mut self) -> Option { unsafe { if self.end == self.ptr { diff --git a/src/libcollectionstest/vec.rs b/src/libcollectionstest/vec.rs index ac9cf198d6732..b4d3d0b1a2281 100644 --- a/src/libcollectionstest/vec.rs +++ b/src/libcollectionstest/vec.rs @@ -399,7 +399,7 @@ fn test_map_in_place_zero_sized() { #[test] fn test_map_in_place_zero_drop_count() { - use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT}; + use std::sync::atomic::{AtomicUsize, Ordering}; #[derive(Clone, PartialEq, Debug)] struct Nothing; @@ -413,7 +413,7 @@ fn test_map_in_place_zero_drop_count() { } } const NUM_ELEMENTS: usize = 2; - static DROP_COUNTER: AtomicUsize = ATOMIC_USIZE_INIT; + static DROP_COUNTER: AtomicUsize = AtomicUsize::new(0); let v = repeat(Nothing).take(NUM_ELEMENTS).collect::>(); diff --git a/src/libcore/atomic.rs b/src/libcore/atomic.rs index ec693f366912d..56b459f5f17d8 100644 --- a/src/libcore/atomic.rs +++ b/src/libcore/atomic.rs @@ -76,7 +76,6 @@ use marker::Sync; use intrinsics; use cell::UnsafeCell; -use marker::PhantomData; use default::Default; @@ -87,8 +86,8 @@ pub struct AtomicBool { } impl Default for AtomicBool { - fn default() -> AtomicBool { - ATOMIC_BOOL_INIT + fn default() -> Self { + Self::new(Default::default()) } } @@ -101,8 +100,8 @@ pub struct AtomicIsize { } impl Default for AtomicIsize { - fn default() -> AtomicIsize { - ATOMIC_ISIZE_INIT + fn default() -> Self { + Self::new(Default::default()) } } @@ -115,8 +114,8 @@ pub struct AtomicUsize { } impl Default for AtomicUsize { - fn default() -> AtomicUsize { - ATOMIC_USIZE_INIT + fn default() -> Self { + Self::new(Default::default()) } } @@ -125,8 +124,7 @@ unsafe impl Sync for AtomicUsize {} /// A raw pointer type which can be safely shared between threads. #[stable(feature = "rust1", since = "1.0.0")] pub struct AtomicPtr { - p: UnsafeCell, - _marker: PhantomData<*mut T>, + p: UnsafeCell<*mut T>, } impl Default for AtomicPtr { @@ -175,16 +173,13 @@ pub enum Ordering { /// An `AtomicBool` initialized to `false`. #[stable(feature = "rust1", since = "1.0.0")] -pub const ATOMIC_BOOL_INIT: AtomicBool = - AtomicBool { v: UnsafeCell { value: 0 } }; +pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false); /// An `AtomicIsize` initialized to `0`. #[stable(feature = "rust1", since = "1.0.0")] -pub const ATOMIC_ISIZE_INIT: AtomicIsize = - AtomicIsize { v: UnsafeCell { value: 0 } }; +pub const ATOMIC_ISIZE_INIT: AtomicIsize = AtomicIsize::new(0); /// An `AtomicUsize` initialized to `0`. #[stable(feature = "rust1", since = "1.0.0")] -pub const ATOMIC_USIZE_INIT: AtomicUsize = - AtomicUsize { v: UnsafeCell { value: 0, } }; +pub const ATOMIC_USIZE_INIT: AtomicUsize = AtomicUsize::new(0); // NB: Needs to be -1 (0b11111111...) to make fetch_nand work correctly const UINT_TRUE: usize = !0; @@ -202,9 +197,8 @@ impl AtomicBool { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn new(v: bool) -> AtomicBool { - let val = if v { UINT_TRUE } else { 0 }; - AtomicBool { v: UnsafeCell::new(val) } + pub const fn new(v: bool) -> AtomicBool { + AtomicBool { v: UnsafeCell::new(-(v as isize) as usize) } } /// Loads a value from the bool. @@ -445,7 +439,7 @@ impl AtomicIsize { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn new(v: isize) -> AtomicIsize { + pub const fn new(v: isize) -> AtomicIsize { AtomicIsize {v: UnsafeCell::new(v)} } @@ -633,7 +627,7 @@ impl AtomicUsize { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn new(v: usize) -> AtomicUsize { + pub const fn new(v: usize) -> AtomicUsize { AtomicUsize { v: UnsafeCell::new(v) } } @@ -821,9 +815,8 @@ impl AtomicPtr { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn new(p: *mut T) -> AtomicPtr { - AtomicPtr { p: UnsafeCell::new(p as usize), - _marker: PhantomData } + pub const fn new(p: *mut T) -> AtomicPtr { + AtomicPtr { p: UnsafeCell::new(p) } } /// Loads a value from the pointer. @@ -848,7 +841,7 @@ impl AtomicPtr { #[stable(feature = "rust1", since = "1.0.0")] pub fn load(&self, order: Ordering) -> *mut T { unsafe { - atomic_load(self.p.get(), order) as *mut T + atomic_load(self.p.get() as *mut usize, order) as *mut T } } @@ -875,7 +868,7 @@ impl AtomicPtr { #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn store(&self, ptr: *mut T, order: Ordering) { - unsafe { atomic_store(self.p.get(), ptr as usize, order); } + unsafe { atomic_store(self.p.get() as *mut usize, ptr as usize, order); } } /// Stores a value into the pointer, returning the old value. @@ -897,7 +890,7 @@ impl AtomicPtr { #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T { - unsafe { atomic_swap(self.p.get(), ptr as usize, order) as *mut T } + unsafe { atomic_swap(self.p.get() as *mut usize, ptr as usize, order) as *mut T } } /// Stores a value into the pointer if the current value is the same as the expected value. @@ -925,7 +918,7 @@ impl AtomicPtr { #[stable(feature = "rust1", since = "1.0.0")] pub fn compare_and_swap(&self, old: *mut T, new: *mut T, order: Ordering) -> *mut T { unsafe { - atomic_compare_and_swap(self.p.get(), old as usize, + atomic_compare_and_swap(self.p.get() as *mut usize, old as usize, new as usize, order) as *mut T } } diff --git a/src/libcore/cell.rs b/src/libcore/cell.rs index 45a8012210417..c83421d3067cd 100644 --- a/src/libcore/cell.rs +++ b/src/libcore/cell.rs @@ -170,7 +170,7 @@ impl Cell { /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] - pub fn new(value: T) -> Cell { + pub const fn new(value: T) -> Cell { Cell { value: UnsafeCell::new(value), } @@ -302,7 +302,7 @@ impl RefCell { /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] - pub fn new(value: T) -> RefCell { + pub const fn new(value: T) -> RefCell { RefCell { value: UnsafeCell::new(value), borrow: Cell::new(UNUSED), @@ -663,7 +663,7 @@ impl UnsafeCell { /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[inline] - pub fn new(value: T) -> UnsafeCell { + pub const fn new(value: T) -> UnsafeCell { UnsafeCell { value: value } } diff --git a/src/libcore/intrinsics.rs b/src/libcore/intrinsics.rs index fa432e311eb74..88a686ec255f3 100644 --- a/src/libcore/intrinsics.rs +++ b/src/libcore/intrinsics.rs @@ -145,13 +145,9 @@ extern "rust-intrinsic" { /// but no instructions will be emitted for it. This is appropriate for operations /// on the same thread that may be preempted, such as when interacting with signal /// handlers. - #[cfg(not(stage0))] // SNAP 857ef6e pub fn atomic_singlethreadfence(); - #[cfg(not(stage0))] // SNAP 857ef6e pub fn atomic_singlethreadfence_acq(); - #[cfg(not(stage0))] // SNAP 857ef6e pub fn atomic_singlethreadfence_rel(); - #[cfg(not(stage0))] // SNAP 857ef6e pub fn atomic_singlethreadfence_acqrel(); /// Aborts the execution of the process. @@ -193,11 +189,8 @@ extern "rust-intrinsic" { pub fn min_align_of() -> usize; pub fn pref_align_of() -> usize; - #[cfg(not(stage0))] pub fn size_of_val(_: &T) -> usize; - #[cfg(not(stage0))] pub fn min_align_of_val(_: &T) -> usize; - #[cfg(not(stage0))] pub fn drop_in_place(_: *mut T); /// Gets a static string slice containing the name of a type. @@ -294,7 +287,6 @@ extern "rust-intrinsic" { /// resulting pointer to point into or one byte past the end of an allocated /// object, and it wraps with two's complement arithmetic. The resulting /// value is not necessarily valid to be used to actually access memory. - #[cfg(not(stage0))] pub fn arith_offset(dst: *const T, offset: isize) -> *const T; /// Copies `count * size_of` bytes from `src` to `dst`. The source @@ -592,13 +584,6 @@ extern "rust-intrinsic" { /// Returns (a * b) mod 2^N, where N is the width of N in bits. pub fn overflowing_mul(a: T, b: T) -> T; - /// Returns the value of the discriminant for the variant in 'v', - /// cast to a `u64`; if `T` has no discriminant, returns 0. - pub fn discriminant_value(v: &T) -> u64; -} - -#[cfg(not(stage0))] -extern "rust-intrinsic" { /// Performs an unchecked signed division, which results in undefined behavior, /// in cases where y == 0, or x == int::MIN and y == -1 pub fn unchecked_sdiv(x: T, y: T) -> T; @@ -612,4 +597,8 @@ extern "rust-intrinsic" { /// Returns the remainder of an unchecked signed division, which results in /// undefined behavior, in cases where y == 0 pub fn unchecked_srem(x: T, y: T) -> T; + + /// Returns the value of the discriminant for the variant in 'v', + /// cast to a `u64`; if `T` has no discriminant, returns 0. + pub fn discriminant_value(v: &T) -> u64; } diff --git a/src/libcore/lib.rs b/src/libcore/lib.rs index 0794fb0c45dee..9dfaec0095a5a 100644 --- a/src/libcore/lib.rs +++ b/src/libcore/lib.rs @@ -74,6 +74,7 @@ #![feature(concat_idents)] #![feature(reflect)] #![feature(custom_attribute)] +#![feature(const_fn)] #[macro_use] mod macros; diff --git a/src/libcore/marker.rs b/src/libcore/marker.rs index 86e91df38ab35..bc0f3045972fe 100644 --- a/src/libcore/marker.rs +++ b/src/libcore/marker.rs @@ -55,7 +55,6 @@ pub trait Sized { /// Types that can be "unsized" to a dynamically sized type. #[unstable(feature = "core")] -#[cfg(not(stage0))] #[lang="unsize"] pub trait Unsize { // Empty. diff --git a/src/libcore/mem.rs b/src/libcore/mem.rs index 173b73fdb0924..7749d053285ad 100644 --- a/src/libcore/mem.rs +++ b/src/libcore/mem.rs @@ -95,29 +95,12 @@ pub fn size_of() -> usize { /// /// assert_eq!(4, mem::size_of_val(&5i32)); /// ``` -#[cfg(not(stage0))] #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn size_of_val(val: &T) -> usize { unsafe { intrinsics::size_of_val(val) } } -/// Returns the size of the type that `_val` points to in bytes. -/// -/// # Examples -/// -/// ``` -/// use std::mem; -/// -/// assert_eq!(4, mem::size_of_val(&5i32)); -/// ``` -#[cfg(stage0)] -#[inline] -#[stable(feature = "rust1", since = "1.0.0")] -pub fn size_of_val(_val: &T) -> usize { - size_of::() -} - /// Returns the ABI-required minimum alignment of a type /// /// This is the alignment used for struct fields. It may be smaller than the preferred alignment. @@ -144,29 +127,12 @@ pub fn min_align_of() -> usize { /// /// assert_eq!(4, mem::min_align_of_val(&5i32)); /// ``` -#[cfg(not(stage0))] #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn min_align_of_val(val: &T) -> usize { unsafe { intrinsics::min_align_of_val(val) } } -/// Returns the ABI-required minimum alignment of the type of the value that `_val` points to -/// -/// # Examples -/// -/// ``` -/// use std::mem; -/// -/// assert_eq!(4, mem::min_align_of_val(&5i32)); -/// ``` -#[cfg(stage0)] -#[inline] -#[stable(feature = "rust1", since = "1.0.0")] -pub fn min_align_of_val(_val: &T) -> usize { - min_align_of::() -} - /// Returns the alignment in memory for a type. /// /// This function will return the alignment, in bytes, of a type in memory. If the alignment diff --git a/src/libcore/nonzero.rs b/src/libcore/nonzero.rs index 59819fd500d1d..32522794254f6 100644 --- a/src/libcore/nonzero.rs +++ b/src/libcore/nonzero.rs @@ -11,9 +11,7 @@ //! Exposes the NonZero lang item which provides optimization hints. use marker::Sized; -use ops::Deref; -#[cfg(not(stage0))] -use ops::CoerceUnsized; +use ops::{CoerceUnsized, Deref}; /// Unsafe trait to indicate what types are usable with the NonZero struct pub unsafe trait Zeroable {} @@ -57,5 +55,4 @@ impl Deref for NonZero { } } -#[cfg(not(stage0))] impl, U: Zeroable> CoerceUnsized> for NonZero {} diff --git a/src/libcore/ops.rs b/src/libcore/ops.rs index f16614cfd092d..c52f4de732ff9 100644 --- a/src/libcore/ops.rs +++ b/src/libcore/ops.rs @@ -67,12 +67,9 @@ #![stable(feature = "rust1", since = "1.0.0")] -use marker::Sized; +use marker::{Sized, Unsize}; use fmt; -#[cfg(not(stage0))] -use marker::Unsize; - /// The `Drop` trait is used to run some code when a value goes out of scope. This /// is sometimes called a 'destructor'. /// @@ -1214,39 +1211,29 @@ mod impls { /// Trait that indicates that this is a pointer or a wrapper for one, /// where unsizing can be performed on the pointee. #[unstable(feature = "core")] -#[cfg(not(stage0))] #[lang="coerce_unsized"] pub trait CoerceUnsized { // Empty. } // &mut T -> &mut U -#[cfg(not(stage0))] impl<'a, T: ?Sized+Unsize, U: ?Sized> CoerceUnsized<&'a mut U> for &'a mut T {} // &mut T -> &U -#[cfg(not(stage0))] impl<'a, 'b: 'a, T: ?Sized+Unsize, U: ?Sized> CoerceUnsized<&'a U> for &'b mut T {} // &mut T -> *mut U -#[cfg(not(stage0))] impl<'a, T: ?Sized+Unsize, U: ?Sized> CoerceUnsized<*mut U> for &'a mut T {} // &mut T -> *const U -#[cfg(not(stage0))] impl<'a, T: ?Sized+Unsize, U: ?Sized> CoerceUnsized<*const U> for &'a mut T {} // &T -> &U -#[cfg(not(stage0))] impl<'a, 'b: 'a, T: ?Sized+Unsize, U: ?Sized> CoerceUnsized<&'a U> for &'b T {} // &T -> *const U -#[cfg(not(stage0))] impl<'a, T: ?Sized+Unsize, U: ?Sized> CoerceUnsized<*const U> for &'a T {} // *mut T -> *mut U -#[cfg(not(stage0))] impl, U: ?Sized> CoerceUnsized<*mut U> for *mut T {} // *mut T -> *const U -#[cfg(not(stage0))] impl, U: ?Sized> CoerceUnsized<*const U> for *mut T {} // *const T -> *const U -#[cfg(not(stage0))] impl, U: ?Sized> CoerceUnsized<*const U> for *const T {} diff --git a/src/libcore/slice.rs b/src/libcore/slice.rs index cbac70921b7ee..2dc28a4786f2d 100644 --- a/src/libcore/slice.rs +++ b/src/libcore/slice.rs @@ -125,19 +125,6 @@ pub trait SliceExt { } // Use macros to be generic over const/mut -#[cfg(stage0)] -macro_rules! slice_offset { - ($ptr:expr, $by:expr) => {{ - let ptr = $ptr; - if size_from_ptr(ptr) == 0 { - transmute((ptr as isize).wrapping_add($by)) - } else { - ptr.offset($by) - } - }}; -} - -#[cfg(not(stage0))] macro_rules! slice_offset { ($ptr:expr, $by:expr) => {{ let ptr = $ptr; diff --git a/src/libcoretest/atomic.rs b/src/libcoretest/atomic.rs index 8e3c7f4595a48..c50f18c235233 100644 --- a/src/libcoretest/atomic.rs +++ b/src/libcoretest/atomic.rs @@ -70,13 +70,15 @@ fn int_xor() { assert_eq!(x.load(SeqCst), 0xf731 ^ 0x137f); } -static S_BOOL : AtomicBool = ATOMIC_BOOL_INIT; -static S_INT : AtomicIsize = ATOMIC_ISIZE_INIT; -static S_UINT : AtomicUsize = ATOMIC_USIZE_INIT; +static S_FALSE: AtomicBool = AtomicBool::new(false); +static S_TRUE: AtomicBool = AtomicBool::new(true); +static S_INT: AtomicIsize = AtomicIsize::new(0); +static S_UINT: AtomicUsize = AtomicUsize::new(0); #[test] fn static_init() { - assert!(!S_BOOL.load(SeqCst)); + assert!(!S_FALSE.load(SeqCst)); + assert!(S_TRUE.load(SeqCst)); assert!(S_INT.load(SeqCst) == 0); assert!(S_UINT.load(SeqCst) == 0); } diff --git a/src/liblog/lib.rs b/src/liblog/lib.rs index f2b4d15d42fa7..4c92162b2d6dc 100644 --- a/src/liblog/lib.rs +++ b/src/liblog/lib.rs @@ -184,7 +184,7 @@ use std::mem; use std::env; use std::rt; use std::slice; -use std::sync::{Once, ONCE_INIT, StaticMutex, MUTEX_INIT}; +use std::sync::{Once, StaticMutex}; use directive::LOG_LEVEL_NAMES; @@ -200,7 +200,7 @@ pub const MAX_LOG_LEVEL: u32 = 255; /// The default logging level of a crate if no other is specified. const DEFAULT_LOG_LEVEL: u32 = 1; -static LOCK: StaticMutex = MUTEX_INIT; +static LOCK: StaticMutex = StaticMutex::new(); /// An unsafe constant that is the maximum logging level of any module /// specified. This is the first line of defense to determining whether a @@ -367,7 +367,7 @@ pub struct LogLocation { /// module's log statement should be emitted or not. #[doc(hidden)] pub fn mod_enabled(level: u32, module: &str) -> bool { - static INIT: Once = ONCE_INIT; + static INIT: Once = Once::new(); INIT.call_once(init); // It's possible for many threads are in this function, only one of them diff --git a/src/librustc/lib.rs b/src/librustc/lib.rs index 993d0dcf115df..c92bb81c5fb25 100644 --- a/src/librustc/lib.rs +++ b/src/librustc/lib.rs @@ -167,7 +167,4 @@ mod rustc { } // Build the diagnostics array at the end so that the metadata includes error use sites. -#[cfg(stage0)] -__build_diagnostic_array! { DIAGNOSTICS } -#[cfg(not(stage0))] __build_diagnostic_array! { librustc, DIAGNOSTICS } diff --git a/src/librustc/middle/infer/region_inference/graphviz.rs b/src/librustc/middle/infer/region_inference/graphviz.rs index 054cec68745ad..5a06a5193bf1c 100644 --- a/src/librustc/middle/infer/region_inference/graphviz.rs +++ b/src/librustc/middle/infer/region_inference/graphviz.rs @@ -32,7 +32,7 @@ use std::env; use std::fs::File; use std::io; use std::io::prelude::*; -use std::sync::atomic::{AtomicBool, Ordering, ATOMIC_BOOL_INIT}; +use std::sync::atomic::{AtomicBool, Ordering}; use syntax::ast; fn print_help_message() { @@ -76,7 +76,7 @@ pub fn maybe_print_constraints_for<'a, 'tcx>(region_vars: &RegionVarBindings<'a, let output_path = { let output_template = match requested_output { Ok(ref s) if &**s == "help" => { - static PRINTED_YET: AtomicBool = ATOMIC_BOOL_INIT; + static PRINTED_YET: AtomicBool = AtomicBool::new(false); if !PRINTED_YET.load(Ordering::SeqCst) { print_help_message(); PRINTED_YET.store(true, Ordering::SeqCst); diff --git a/src/librustc_borrowck/lib.rs b/src/librustc_borrowck/lib.rs index e96605fda333c..a8457d3bf94cd 100644 --- a/src/librustc_borrowck/lib.rs +++ b/src/librustc_borrowck/lib.rs @@ -47,7 +47,4 @@ mod borrowck; pub mod graphviz; -#[cfg(stage0)] -__build_diagnostic_array! { DIAGNOSTICS } -#[cfg(not(stage0))] __build_diagnostic_array! { librustc_borrowck, DIAGNOSTICS } diff --git a/src/librustc_resolve/lib.rs b/src/librustc_resolve/lib.rs index 7afc1afc224ee..cae0c7c7f5792 100644 --- a/src/librustc_resolve/lib.rs +++ b/src/librustc_resolve/lib.rs @@ -3723,7 +3723,4 @@ pub fn resolve_crate<'a, 'tcx>(session: &'a Session, } } -#[cfg(stage0)] -__build_diagnostic_array! { DIAGNOSTICS } -#[cfg(not(stage0))] __build_diagnostic_array! { librustc_resolve, DIAGNOSTICS } diff --git a/src/librustc_trans/back/write.rs b/src/librustc_trans/back/write.rs index a9e9e3f4048ac..bd16e018bc465 100644 --- a/src/librustc_trans/back/write.rs +++ b/src/librustc_trans/back/write.rs @@ -1005,8 +1005,8 @@ pub fn run_assembler(sess: &Session, outputs: &OutputFilenames) { } unsafe fn configure_llvm(sess: &Session) { - use std::sync::{Once, ONCE_INIT}; - static INIT: Once = ONCE_INIT; + use std::sync::Once; + static INIT: Once = Once::new(); // Copy what clang does by turning on loop vectorization at O2 and // slp vectorization at O3 diff --git a/src/librustc_trans/lib.rs b/src/librustc_trans/lib.rs index a1220ab6ba86e..8866e7ff19dcc 100644 --- a/src/librustc_trans/lib.rs +++ b/src/librustc_trans/lib.rs @@ -39,6 +39,7 @@ #![feature(path_ext)] #![feature(fs)] #![feature(path_relative_from)] +#![feature(std_misc)] #![allow(trivial_casts)] diff --git a/src/librustc_trans/trans/base.rs b/src/librustc_trans/trans/base.rs index c26fa77a10f93..c3f614c8cc06a 100644 --- a/src/librustc_trans/trans/base.rs +++ b/src/librustc_trans/trans/base.rs @@ -2653,8 +2653,8 @@ pub fn trans_crate<'tcx>(analysis: ty::CrateAnalysis<'tcx>) // Before we touch LLVM, make sure that multithreading is enabled. unsafe { - use std::sync::{Once, ONCE_INIT}; - static INIT: Once = ONCE_INIT; + use std::sync::Once; + static INIT: Once = Once::new(); static mut POISONED: bool = false; INIT.call_once(|| { if llvm::LLVMStartMultithreaded() != 1 { diff --git a/src/librustc_typeck/lib.rs b/src/librustc_typeck/lib.rs index 88ce75486a202..ed398c2cdedfc 100644 --- a/src/librustc_typeck/lib.rs +++ b/src/librustc_typeck/lib.rs @@ -344,7 +344,4 @@ pub fn check_crate(tcx: &ty::ctxt, trait_map: ty::TraitMap) { tcx.sess.abort_if_errors(); } -#[cfg(stage0)] -__build_diagnostic_array! { DIAGNOSTICS } -#[cfg(not(stage0))] __build_diagnostic_array! { librustc_typeck, DIAGNOSTICS } diff --git a/src/libstd/dynamic_lib.rs b/src/libstd/dynamic_lib.rs index 8b90fce6fc4fc..ebdc049bc7f5d 100644 --- a/src/libstd/dynamic_lib.rs +++ b/src/libstd/dynamic_lib.rs @@ -211,8 +211,8 @@ mod dl { pub fn check_for_errors_in(f: F) -> Result where F: FnOnce() -> T, { - use sync::{StaticMutex, MUTEX_INIT}; - static LOCK: StaticMutex = MUTEX_INIT; + use sync::StaticMutex; + static LOCK: StaticMutex = StaticMutex::new(); unsafe { // dlerror isn't thread safe, so we need to lock around this entire // sequence diff --git a/src/libstd/env.rs b/src/libstd/env.rs index 126ef38b9188f..0b9c659ea2e5b 100644 --- a/src/libstd/env.rs +++ b/src/libstd/env.rs @@ -23,8 +23,8 @@ use ffi::{OsStr, OsString}; use fmt; use io; use path::{Path, PathBuf}; -use sync::atomic::{AtomicIsize, ATOMIC_ISIZE_INIT, Ordering}; -use sync::{StaticMutex, MUTEX_INIT}; +use sync::atomic::{AtomicIsize, Ordering}; +use sync::StaticMutex; use sys::os as os_imp; /// Returns the current working directory as a `PathBuf`. @@ -70,7 +70,7 @@ pub fn set_current_dir>(p: P) -> io::Result<()> { os_imp::chdir(p.as_ref()) } -static ENV_LOCK: StaticMutex = MUTEX_INIT; +static ENV_LOCK: StaticMutex = StaticMutex::new(); /// An iterator over a snapshot of the environment variables of this process. /// @@ -475,7 +475,7 @@ pub fn current_exe() -> io::Result { os_imp::current_exe() } -static EXIT_STATUS: AtomicIsize = ATOMIC_ISIZE_INIT; +static EXIT_STATUS: AtomicIsize = AtomicIsize::new(0); /// Sets the process exit code /// diff --git a/src/libstd/io/lazy.rs b/src/libstd/io/lazy.rs index df280dab37d46..d398cb88af458 100644 --- a/src/libstd/io/lazy.rs +++ b/src/libstd/io/lazy.rs @@ -11,31 +11,31 @@ use prelude::v1::*; use boxed; -use cell::UnsafeCell; +use cell::Cell; use rt; use sync::{StaticMutex, Arc}; pub struct Lazy { - pub lock: StaticMutex, - pub ptr: UnsafeCell<*mut Arc>, - pub init: fn() -> Arc, + lock: StaticMutex, + ptr: Cell<*mut Arc>, + init: fn() -> Arc, } unsafe impl Sync for Lazy {} -macro_rules! lazy_init { - ($init:expr) => (::io::lazy::Lazy { - lock: ::sync::MUTEX_INIT, - ptr: ::cell::UnsafeCell { value: 0 as *mut _ }, - init: $init, - }) -} - impl Lazy { + pub const fn new(init: fn() -> Arc) -> Lazy { + Lazy { + lock: StaticMutex::new(), + ptr: Cell::new(0 as *mut _), + init: init + } + } + pub fn get(&'static self) -> Option> { let _g = self.lock.lock(); + let ptr = self.ptr.get(); unsafe { - let ptr = *self.ptr.get(); if ptr.is_null() { Some(self.init()) } else if ptr as usize == 1 { @@ -53,14 +53,14 @@ impl Lazy { // `Arc`. let registered = rt::at_exit(move || { let g = self.lock.lock(); - let ptr = *self.ptr.get(); - *self.ptr.get() = 1 as *mut _; + let ptr = self.ptr.get(); + self.ptr.set(1 as *mut _); drop(g); drop(Box::from_raw(ptr)) }); let ret = (self.init)(); if registered.is_ok() { - *self.ptr.get() = boxed::into_raw(Box::new(ret.clone())); + self.ptr.set(boxed::into_raw(Box::new(ret.clone()))); } return ret } diff --git a/src/libstd/io/mod.rs b/src/libstd/io/mod.rs index c9da279810bba..c664def304e09 100644 --- a/src/libstd/io/mod.rs +++ b/src/libstd/io/mod.rs @@ -36,13 +36,12 @@ pub use self::stdio::{StdoutLock, StderrLock, StdinLock}; #[doc(no_inline, hidden)] pub use self::stdio::{set_panic, set_print}; -#[macro_use] mod lazy; - pub mod prelude; mod buffered; mod cursor; mod error; mod impls; +mod lazy; mod util; mod stdio; diff --git a/src/libstd/io/stdio.rs b/src/libstd/io/stdio.rs index a14c472333c6e..9885ccfaae085 100644 --- a/src/libstd/io/stdio.rs +++ b/src/libstd/io/stdio.rs @@ -122,7 +122,7 @@ pub struct StdinLock<'a> { /// locked version, `StdinLock`, implements both `Read` and `BufRead`, however. #[stable(feature = "rust1", since = "1.0.0")] pub fn stdin() -> Stdin { - static INSTANCE: Lazy>> = lazy_init!(stdin_init); + static INSTANCE: Lazy>> = Lazy::new(stdin_init); return Stdin { inner: INSTANCE.get().expect("cannot access stdin during shutdown"), }; @@ -236,7 +236,7 @@ pub struct StdoutLock<'a> { /// The returned handle implements the `Write` trait. #[stable(feature = "rust1", since = "1.0.0")] pub fn stdout() -> Stdout { - static INSTANCE: Lazy>>> = lazy_init!(stdout_init); + static INSTANCE: Lazy>>> = Lazy::new(stdout_init); return Stdout { inner: INSTANCE.get().expect("cannot access stdout during shutdown"), }; @@ -308,7 +308,7 @@ pub struct StderrLock<'a> { /// The returned handle implements the `Write` trait. #[stable(feature = "rust1", since = "1.0.0")] pub fn stderr() -> Stderr { - static INSTANCE: Lazy>> = lazy_init!(stderr_init); + static INSTANCE: Lazy>> = Lazy::new(stderr_init); return Stderr { inner: INSTANCE.get().expect("cannot access stderr during shutdown"), }; diff --git a/src/libstd/lib.rs b/src/libstd/lib.rs index 17bd27337acd2..8305088057c41 100644 --- a/src/libstd/lib.rs +++ b/src/libstd/lib.rs @@ -109,6 +109,7 @@ #![feature(box_syntax)] #![feature(collections)] #![feature(core)] +#![feature(const_fn)] #![feature(into_cow)] #![feature(lang_items)] #![feature(libc)] diff --git a/src/libstd/net/test.rs b/src/libstd/net/test.rs index d77d6f1d6de1e..c6d839d55a865 100644 --- a/src/libstd/net/test.rs +++ b/src/libstd/net/test.rs @@ -12,9 +12,9 @@ use prelude::v1::*; use env; use net::{SocketAddr, SocketAddrV4, SocketAddrV6, Ipv4Addr, Ipv6Addr, ToSocketAddrs}; -use sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering}; +use sync::atomic::{AtomicUsize, Ordering}; -static PORT: AtomicUsize = ATOMIC_USIZE_INIT; +static PORT: AtomicUsize = AtomicUsize::new(0); pub fn next_test_ip4() -> SocketAddr { let port = PORT.fetch_add(1, Ordering::SeqCst) as u16 + base_port(); diff --git a/src/libstd/rand/os.rs b/src/libstd/rand/os.rs index 885adf19ca1fc..a2e6915a6a6f7 100644 --- a/src/libstd/rand/os.rs +++ b/src/libstd/rand/os.rs @@ -96,11 +96,11 @@ mod imp { target_arch = "aarch64", target_arch = "powerpc")))] fn is_getrandom_available() -> bool { - use sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, Ordering}; - use sync::{Once, ONCE_INIT}; + use sync::atomic::{AtomicBool, Ordering}; + use sync::Once; - static CHECKER: Once = ONCE_INIT; - static AVAILABLE: AtomicBool = ATOMIC_BOOL_INIT; + static CHECKER: Once = Once::new(); + static AVAILABLE: AtomicBool = AtomicBool::new(false); CHECKER.call_once(|| { let mut buf: [u8; 0] = []; diff --git a/src/libstd/rt/args.rs b/src/libstd/rt/args.rs index 2329861f29bc0..d23a124a6ecda 100644 --- a/src/libstd/rt/args.rs +++ b/src/libstd/rt/args.rs @@ -52,10 +52,10 @@ mod imp { use mem; use ffi::CStr; - use sync::{StaticMutex, MUTEX_INIT}; + use sync::StaticMutex; static mut GLOBAL_ARGS_PTR: usize = 0; - static LOCK: StaticMutex = MUTEX_INIT; + static LOCK: StaticMutex = StaticMutex::new(); pub unsafe fn init(argc: isize, argv: *const *const u8) { let args = load_argc_and_argv(argc, argv); diff --git a/src/libstd/rt/at_exit_imp.rs b/src/libstd/rt/at_exit_imp.rs index beb2870807a7e..19a17be4ccf49 100644 --- a/src/libstd/rt/at_exit_imp.rs +++ b/src/libstd/rt/at_exit_imp.rs @@ -20,7 +20,7 @@ use boxed; use boxed::Box; use vec::Vec; use thunk::Thunk; -use sys_common::mutex::{Mutex, MUTEX_INIT}; +use sys_common::mutex::Mutex; type Queue = Vec>; @@ -28,7 +28,7 @@ type Queue = Vec>; // on poisoning and this module needs to operate at a lower level than requiring // the thread infrastructure to be in place (useful on the borders of // initialization/destruction). -static LOCK: Mutex = MUTEX_INIT; +static LOCK: Mutex = Mutex::new(); static mut QUEUE: *mut Queue = 0 as *mut Queue; // The maximum number of times the cleanup routines will be run. While running diff --git a/src/libstd/rt/backtrace.rs b/src/libstd/rt/backtrace.rs index 72cbe2b533bb7..2eadf36a6b4f0 100644 --- a/src/libstd/rt/backtrace.rs +++ b/src/libstd/rt/backtrace.rs @@ -22,7 +22,7 @@ pub use sys::backtrace::write; // For now logging is turned off by default, and this function checks to see // whether the magical environment variable is present to see if it's turned on. pub fn log_enabled() -> bool { - static ENABLED: atomic::AtomicIsize = atomic::ATOMIC_ISIZE_INIT; + static ENABLED: atomic::AtomicIsize = atomic::AtomicIsize::new(0); match ENABLED.load(Ordering::SeqCst) { 1 => return false, 2 => return true, diff --git a/src/libstd/rt/unwind/mod.rs b/src/libstd/rt/unwind/mod.rs index 576035ffe9a02..c403976745aa4 100644 --- a/src/libstd/rt/unwind/mod.rs +++ b/src/libstd/rt/unwind/mod.rs @@ -72,7 +72,7 @@ use intrinsics; use libc::c_void; use mem; use sync::atomic::{self, Ordering}; -use sys_common::mutex::{Mutex, MUTEX_INIT}; +use sys_common::mutex::Mutex; // The actual unwinding implementation is cfg'd here, and we've got two current // implementations. One goes through SEH on Windows and the other goes through @@ -89,15 +89,15 @@ pub type Callback = fn(msg: &(Any + Send), file: &'static str, line: u32); // For more information, see below. const MAX_CALLBACKS: usize = 16; static CALLBACKS: [atomic::AtomicUsize; MAX_CALLBACKS] = - [atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT, - atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT, - atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT, - atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT, - atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT, - atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT, - atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT, - atomic::ATOMIC_USIZE_INIT, atomic::ATOMIC_USIZE_INIT]; -static CALLBACK_CNT: atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT; + [atomic::AtomicUsize::new(0), atomic::AtomicUsize::new(0), + atomic::AtomicUsize::new(0), atomic::AtomicUsize::new(0), + atomic::AtomicUsize::new(0), atomic::AtomicUsize::new(0), + atomic::AtomicUsize::new(0), atomic::AtomicUsize::new(0), + atomic::AtomicUsize::new(0), atomic::AtomicUsize::new(0), + atomic::AtomicUsize::new(0), atomic::AtomicUsize::new(0), + atomic::AtomicUsize::new(0), atomic::AtomicUsize::new(0), + atomic::AtomicUsize::new(0), atomic::AtomicUsize::new(0)]; +static CALLBACK_CNT: atomic::AtomicUsize = atomic::AtomicUsize::new(0); thread_local! { static PANICKING: Cell = Cell::new(false) } @@ -243,7 +243,7 @@ fn begin_unwind_inner(msg: Box, // `std::sync` one as accessing TLS can cause weird recursive problems (and // we don't need poison checking). unsafe { - static LOCK: Mutex = MUTEX_INIT; + static LOCK: Mutex = Mutex::new(); static mut INIT: bool = false; LOCK.lock(); if !INIT { diff --git a/src/libstd/rt/util.rs b/src/libstd/rt/util.rs index 31e970a9550c4..b53219db245fc 100644 --- a/src/libstd/rt/util.rs +++ b/src/libstd/rt/util.rs @@ -42,7 +42,7 @@ pub fn limit_thread_creation_due_to_osx_and_valgrind() -> bool { } pub fn min_stack() -> usize { - static MIN: atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT; + static MIN: atomic::AtomicUsize = atomic::AtomicUsize::new(0); match MIN.load(Ordering::SeqCst) { 0 => {} n => return n - 1, diff --git a/src/libstd/sync/condvar.rs b/src/libstd/sync/condvar.rs index 8da917916e5cd..f2c389f9426eb 100644 --- a/src/libstd/sync/condvar.rs +++ b/src/libstd/sync/condvar.rs @@ -10,7 +10,7 @@ use prelude::v1::*; -use sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT}; +use sync::atomic::{AtomicUsize, Ordering}; use sync::{mutex, MutexGuard, PoisonError}; use sys_common::condvar as sys; use sys_common::mutex as sys_mutex; @@ -84,10 +84,7 @@ pub struct StaticCondvar { /// Constant initializer for a statically allocated condition variable. #[unstable(feature = "static_condvar", reason = "may be merged with Condvar in the future")] -pub const CONDVAR_INIT: StaticCondvar = StaticCondvar { - inner: sys::CONDVAR_INIT, - mutex: ATOMIC_USIZE_INIT, -}; +pub const CONDVAR_INIT: StaticCondvar = StaticCondvar::new(); impl Condvar { /// Creates a new condition variable which is ready to be waited on and @@ -96,7 +93,7 @@ impl Condvar { pub fn new() -> Condvar { Condvar { inner: box StaticCondvar { - inner: unsafe { sys::Condvar::new() }, + inner: sys::Condvar::new(), mutex: AtomicUsize::new(0), } } @@ -234,6 +231,16 @@ impl Drop for Condvar { } impl StaticCondvar { + /// Creates a new condition variable + #[unstable(feature = "static_condvar", + reason = "may be merged with Condvar in the future")] + pub const fn new() -> StaticCondvar { + StaticCondvar { + inner: sys::Condvar::new(), + mutex: AtomicUsize::new(0), + } + } + /// Blocks the current thread until this condition variable receives a /// notification. /// @@ -388,10 +395,10 @@ impl StaticCondvar { mod tests { use prelude::v1::*; - use super::{StaticCondvar, CONDVAR_INIT}; + use super::StaticCondvar; use sync::mpsc::channel; - use sync::{StaticMutex, MUTEX_INIT, Condvar, Mutex, Arc}; - use sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering}; + use sync::{StaticMutex, Condvar, Mutex, Arc}; + use sync::atomic::{AtomicUsize, Ordering}; use thread; use time::Duration; use u32; @@ -405,7 +412,7 @@ mod tests { #[test] fn static_smoke() { - static C: StaticCondvar = CONDVAR_INIT; + static C: StaticCondvar = StaticCondvar::new(); C.notify_one(); C.notify_all(); unsafe { C.destroy(); } @@ -413,8 +420,8 @@ mod tests { #[test] fn notify_one() { - static C: StaticCondvar = CONDVAR_INIT; - static M: StaticMutex = MUTEX_INIT; + static C: StaticCondvar = StaticCondvar::new(); + static M: StaticMutex = StaticMutex::new(); let g = M.lock().unwrap(); let _t = thread::spawn(move|| { @@ -464,8 +471,8 @@ mod tests { #[test] fn wait_timeout_ms() { - static C: StaticCondvar = CONDVAR_INIT; - static M: StaticMutex = MUTEX_INIT; + static C: StaticCondvar = StaticCondvar::new(); + static M: StaticMutex = StaticMutex::new(); let g = M.lock().unwrap(); let (g, _no_timeout) = C.wait_timeout_ms(g, 1).unwrap(); @@ -483,9 +490,9 @@ mod tests { #[test] fn wait_timeout_with() { - static C: StaticCondvar = CONDVAR_INIT; - static M: StaticMutex = MUTEX_INIT; - static S: AtomicUsize = ATOMIC_USIZE_INIT; + static C: StaticCondvar = StaticCondvar::new(); + static M: StaticMutex = StaticMutex::new(); + static S: AtomicUsize = AtomicUsize::new(0); let g = M.lock().unwrap(); let (g, success) = C.wait_timeout_with(g, Duration::new(0, 1000), |_| { @@ -530,9 +537,9 @@ mod tests { #[test] #[should_panic] fn two_mutexes() { - static M1: StaticMutex = MUTEX_INIT; - static M2: StaticMutex = MUTEX_INIT; - static C: StaticCondvar = CONDVAR_INIT; + static M1: StaticMutex = StaticMutex::new(); + static M2: StaticMutex = StaticMutex::new(); + static C: StaticCondvar = StaticCondvar::new(); let mut g = M1.lock().unwrap(); let _t = thread::spawn(move|| { diff --git a/src/libstd/sync/mpsc/blocking.rs b/src/libstd/sync/mpsc/blocking.rs index 2e4155ea35128..0e5a98591168b 100644 --- a/src/libstd/sync/mpsc/blocking.rs +++ b/src/libstd/sync/mpsc/blocking.rs @@ -11,7 +11,7 @@ //! Generic support for building blocking abstractions. use thread::{self, Thread}; -use sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, Ordering}; +use sync::atomic::{AtomicBool, Ordering}; use sync::Arc; use marker::{Sync, Send}; use mem; @@ -41,7 +41,7 @@ impl !Sync for WaitToken {} pub fn tokens() -> (WaitToken, SignalToken) { let inner = Arc::new(Inner { thread: thread::current(), - woken: ATOMIC_BOOL_INIT, + woken: AtomicBool::new(false), }); let wait_token = WaitToken { inner: inner.clone(), diff --git a/src/libstd/sync/mutex.rs b/src/libstd/sync/mutex.rs index f9ed7c863d126..fd22d723ebdb1 100644 --- a/src/libstd/sync/mutex.rs +++ b/src/libstd/sync/mutex.rs @@ -178,17 +178,14 @@ impl<'a, T: ?Sized> !marker::Send for MutexGuard<'a, T> {} /// other mutex constants. #[unstable(feature = "std_misc", reason = "may be merged with Mutex in the future")] -pub const MUTEX_INIT: StaticMutex = StaticMutex { - lock: sys::MUTEX_INIT, - poison: poison::FLAG_INIT, -}; +pub const MUTEX_INIT: StaticMutex = StaticMutex::new(); impl Mutex { /// Creates a new mutex in an unlocked state ready for use. #[stable(feature = "rust1", since = "1.0.0")] pub fn new(t: T) -> Mutex { Mutex { - inner: box MUTEX_INIT, + inner: box StaticMutex::new(), data: UnsafeCell::new(t), } } @@ -271,9 +268,19 @@ impl fmt::Debug for Mutex { struct Dummy(UnsafeCell<()>); unsafe impl Sync for Dummy {} -static DUMMY: Dummy = Dummy(UnsafeCell { value: () }); +static DUMMY: Dummy = Dummy(UnsafeCell::new(())); impl StaticMutex { + /// Creates a new mutex in an unlocked state ready for use. + #[unstable(feature = "std_misc", + reason = "may be merged with Mutex in the future")] + pub const fn new() -> StaticMutex { + StaticMutex { + lock: sys::Mutex::new(), + poison: poison::Flag::new(), + } + } + /// Acquires this lock, see `Mutex::lock` #[inline] #[unstable(feature = "std_misc", @@ -365,7 +372,7 @@ mod tests { use prelude::v1::*; use sync::mpsc::channel; - use sync::{Arc, Mutex, StaticMutex, MUTEX_INIT, Condvar}; + use sync::{Arc, Mutex, StaticMutex, Condvar}; use thread; struct Packet(Arc<(Mutex, Condvar)>); @@ -382,7 +389,7 @@ mod tests { #[test] fn smoke_static() { - static M: StaticMutex = MUTEX_INIT; + static M: StaticMutex = StaticMutex::new(); unsafe { drop(M.lock().unwrap()); drop(M.lock().unwrap()); @@ -392,7 +399,7 @@ mod tests { #[test] fn lots_and_lots() { - static M: StaticMutex = MUTEX_INIT; + static M: StaticMutex = StaticMutex::new(); static mut CNT: u32 = 0; const J: u32 = 1000; const K: u32 = 3; diff --git a/src/libstd/sync/once.rs b/src/libstd/sync/once.rs index 57baedaad9c8e..269affff20855 100644 --- a/src/libstd/sync/once.rs +++ b/src/libstd/sync/once.rs @@ -16,8 +16,8 @@ use prelude::v1::*; use isize; -use sync::atomic::{AtomicIsize, Ordering, ATOMIC_ISIZE_INIT}; -use sync::{StaticMutex, MUTEX_INIT}; +use sync::atomic::{AtomicIsize, Ordering}; +use sync::StaticMutex; /// A synchronization primitive which can be used to run a one-time global /// initialization. Useful for one-time initialization for FFI or related @@ -44,13 +44,19 @@ pub struct Once { /// Initialization value for static `Once` values. #[stable(feature = "rust1", since = "1.0.0")] -pub const ONCE_INIT: Once = Once { - mutex: MUTEX_INIT, - cnt: ATOMIC_ISIZE_INIT, - lock_cnt: ATOMIC_ISIZE_INIT, -}; +pub const ONCE_INIT: Once = Once::new(); impl Once { + /// Creates a new `Once` value. + #[unstable(feature = "std_misc")] + pub const fn new() -> Once { + Once { + mutex: StaticMutex::new(), + cnt: AtomicIsize::new(0), + lock_cnt: AtomicIsize::new(0), + } + } + /// Performs an initialization routine once and only once. The given closure /// will be executed if this is the first time `call_once` has been called, /// and otherwise the routine will *not* be invoked. @@ -129,12 +135,12 @@ mod tests { use prelude::v1::*; use thread; - use super::{ONCE_INIT, Once}; + use super::Once; use sync::mpsc::channel; #[test] fn smoke_once() { - static O: Once = ONCE_INIT; + static O: Once = Once::new(); let mut a = 0; O.call_once(|| a += 1); assert_eq!(a, 1); @@ -144,7 +150,7 @@ mod tests { #[test] fn stampede_once() { - static O: Once = ONCE_INIT; + static O: Once = Once::new(); static mut run: bool = false; let (tx, rx) = channel(); diff --git a/src/libstd/sync/rwlock.rs b/src/libstd/sync/rwlock.rs index 36f6fbf3b72d5..e7c3d744c179a 100644 --- a/src/libstd/sync/rwlock.rs +++ b/src/libstd/sync/rwlock.rs @@ -102,10 +102,7 @@ pub struct StaticRwLock { /// Constant initialization for a statically-initialized rwlock. #[unstable(feature = "std_misc", reason = "may be merged with RwLock in the future")] -pub const RW_LOCK_INIT: StaticRwLock = StaticRwLock { - lock: sys::RWLOCK_INIT, - poison: poison::FLAG_INIT, -}; +pub const RW_LOCK_INIT: StaticRwLock = StaticRwLock::new(); /// RAII structure used to release the shared read access of a lock when /// dropped. @@ -142,7 +139,7 @@ impl RwLock { /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn new(t: T) -> RwLock { - RwLock { inner: box RW_LOCK_INIT, data: UnsafeCell::new(t) } + RwLock { inner: box StaticRwLock::new(), data: UnsafeCell::new(t) } } } @@ -280,9 +277,19 @@ impl fmt::Debug for RwLock { struct Dummy(UnsafeCell<()>); unsafe impl Sync for Dummy {} -static DUMMY: Dummy = Dummy(UnsafeCell { value: () }); +static DUMMY: Dummy = Dummy(UnsafeCell::new(())); impl StaticRwLock { + /// Creates a new rwlock. + #[unstable(feature = "std_misc", + reason = "may be merged with RwLock in the future")] + pub const fn new() -> StaticRwLock { + StaticRwLock { + lock: sys::RWLock::new(), + poison: poison::Flag::new(), + } + } + /// Locks this rwlock with shared read access, blocking the current thread /// until it can be acquired. /// @@ -420,7 +427,7 @@ mod tests { use rand::{self, Rng}; use sync::mpsc::channel; use thread; - use sync::{Arc, RwLock, StaticRwLock, TryLockError, RW_LOCK_INIT}; + use sync::{Arc, RwLock, StaticRwLock, TryLockError}; #[test] fn smoke() { @@ -433,7 +440,7 @@ mod tests { #[test] fn static_smoke() { - static R: StaticRwLock = RW_LOCK_INIT; + static R: StaticRwLock = StaticRwLock::new(); drop(R.read().unwrap()); drop(R.write().unwrap()); drop((R.read().unwrap(), R.read().unwrap())); @@ -443,7 +450,7 @@ mod tests { #[test] fn frob() { - static R: StaticRwLock = RW_LOCK_INIT; + static R: StaticRwLock = StaticRwLock::new(); const N: usize = 10; const M: usize = 1000; diff --git a/src/libstd/sys/common/condvar.rs b/src/libstd/sys/common/condvar.rs index 9f46b0c38248f..33734a88cf32b 100644 --- a/src/libstd/sys/common/condvar.rs +++ b/src/libstd/sys/common/condvar.rs @@ -20,16 +20,12 @@ use sys::condvar as imp; /// this type. pub struct Condvar(imp::Condvar); -/// Static initializer for condition variables. -pub const CONDVAR_INIT: Condvar = Condvar(imp::CONDVAR_INIT); - impl Condvar { /// Creates a new condition variable for use. /// /// Behavior is undefined if the condition variable is moved after it is /// first used with any of the functions below. - #[inline] - pub unsafe fn new() -> Condvar { Condvar(imp::Condvar::new()) } + pub const fn new() -> Condvar { Condvar(imp::Condvar::new()) } /// Signals one waiter on this condition variable to wake up. #[inline] diff --git a/src/libstd/sys/common/mutex.rs b/src/libstd/sys/common/mutex.rs index 1f9dd54192cca..5a6dfe7fb1a15 100644 --- a/src/libstd/sys/common/mutex.rs +++ b/src/libstd/sys/common/mutex.rs @@ -20,10 +20,13 @@ pub struct Mutex(imp::Mutex); unsafe impl Sync for Mutex {} -/// Constant initializer for statically allocated mutexes. -pub const MUTEX_INIT: Mutex = Mutex(imp::MUTEX_INIT); - impl Mutex { + /// Creates a new mutex for use. + /// + /// Behavior is undefined if the mutex is moved after it is + /// first used with any of the functions below. + pub const fn new() -> Mutex { Mutex(imp::Mutex::new()) } + /// Locks the mutex blocking the current thread until it is available. /// /// Behavior is undefined if the mutex has been moved between this and any diff --git a/src/libstd/sys/common/poison.rs b/src/libstd/sys/common/poison.rs index 67679c11a9858..48c8198272588 100644 --- a/src/libstd/sys/common/poison.rs +++ b/src/libstd/sys/common/poison.rs @@ -10,26 +10,28 @@ use prelude::v1::*; -use marker::Reflect; -use cell::UnsafeCell; +use cell::Cell; use error::{Error}; use fmt; +use marker::Reflect; use thread; -pub struct Flag { failed: UnsafeCell } +pub struct Flag { failed: Cell } // This flag is only ever accessed with a lock previously held. Note that this // a totally private structure. unsafe impl Send for Flag {} unsafe impl Sync for Flag {} -pub const FLAG_INIT: Flag = Flag { failed: UnsafeCell { value: false } }; - impl Flag { + pub const fn new() -> Flag { + Flag { failed: Cell::new(false) } + } + #[inline] pub fn borrow(&self) -> LockResult { let ret = Guard { panicking: thread::panicking() }; - if unsafe { *self.failed.get() } { + if self.get() { Err(PoisonError::new(ret)) } else { Ok(ret) @@ -39,13 +41,13 @@ impl Flag { #[inline] pub fn done(&self, guard: &Guard) { if !guard.panicking && thread::panicking() { - unsafe { *self.failed.get() = true; } + self.failed.set(true); } } #[inline] pub fn get(&self) -> bool { - unsafe { *self.failed.get() } + self.failed.get() } } diff --git a/src/libstd/sys/common/remutex.rs b/src/libstd/sys/common/remutex.rs index 1a467580672b3..72f8453233a41 100644 --- a/src/libstd/sys/common/remutex.rs +++ b/src/libstd/sys/common/remutex.rs @@ -54,7 +54,7 @@ impl ReentrantMutex { unsafe { let mut mutex = ReentrantMutex { inner: box sys::ReentrantMutex::uninitialized(), - poison: poison::FLAG_INIT, + poison: poison::Flag::new(), data: t, }; mutex.inner.init(); diff --git a/src/libstd/sys/common/rwlock.rs b/src/libstd/sys/common/rwlock.rs index 725a09bcc86a9..71a4f01ec4cab 100644 --- a/src/libstd/sys/common/rwlock.rs +++ b/src/libstd/sys/common/rwlock.rs @@ -17,10 +17,13 @@ use sys::rwlock as imp; /// safer types at the top level of this crate instead of this type. pub struct RWLock(imp::RWLock); -/// Constant initializer for static RWLocks. -pub const RWLOCK_INIT: RWLock = RWLock(imp::RWLOCK_INIT); - impl RWLock { + /// Creates a new reader-writer lock for use. + /// + /// Behavior is undefined if the reader-writer lock is moved after it is + /// first used with any of the functions below. + pub const fn new() -> RWLock { RWLock(imp::RWLock::new()) } + /// Acquires shared access to the underlying lock, blocking the current /// thread to do so. /// diff --git a/src/libstd/sys/common/thread_local.rs b/src/libstd/sys/common/thread_local.rs index 618a389110ac1..eb4cbfcfbcdda 100644 --- a/src/libstd/sys/common/thread_local.rs +++ b/src/libstd/sys/common/thread_local.rs @@ -86,19 +86,13 @@ use sys::thread_local as imp; /// } /// ``` pub struct StaticKey { - /// Inner static TLS key (internals), created with by `INIT_INNER` in this - /// module. - pub inner: StaticKeyInner, + /// Inner static TLS key (internals). + key: AtomicUsize, /// Destructor for the TLS value. /// /// See `Key::new` for information about when the destructor runs and how /// it runs. - pub dtor: Option, -} - -/// Inner contents of `StaticKey`, created by the `INIT_INNER` constant. -pub struct StaticKeyInner { - key: AtomicUsize, + dtor: Option, } /// A type for a safely managed OS-based TLS slot. @@ -129,19 +123,16 @@ pub struct Key { /// Constant initialization value for static TLS keys. /// /// This value specifies no destructor by default. -pub const INIT: StaticKey = StaticKey { - inner: INIT_INNER, - dtor: None, -}; - -/// Constant initialization value for the inner part of static TLS keys. -/// -/// This value allows specific configuration of the destructor for a TLS key. -pub const INIT_INNER: StaticKeyInner = StaticKeyInner { - key: atomic::ATOMIC_USIZE_INIT, -}; +pub const INIT: StaticKey = StaticKey::new(None); impl StaticKey { + pub const fn new(dtor: Option) -> StaticKey { + StaticKey { + key: atomic::AtomicUsize::new(0), + dtor: dtor + } + } + /// Gets the value associated with this TLS key /// /// This will lazily allocate a TLS key from the OS if one has not already @@ -164,7 +155,7 @@ impl StaticKey { /// Note that this does *not* run the user-provided destructor if one was /// specified at definition time. Doing so must be done manually. pub unsafe fn destroy(&self) { - match self.inner.key.swap(0, Ordering::SeqCst) { + match self.key.swap(0, Ordering::SeqCst) { 0 => {} n => { imp::destroy(n as imp::Key) } } @@ -172,7 +163,7 @@ impl StaticKey { #[inline] unsafe fn key(&self) -> imp::Key { - match self.inner.key.load(Ordering::Relaxed) { + match self.key.load(Ordering::Relaxed) { 0 => self.lazy_init() as imp::Key, n => n as imp::Key } @@ -197,7 +188,7 @@ impl StaticKey { key2 }; assert!(key != 0); - match self.inner.key.compare_and_swap(0, key as usize, Ordering::SeqCst) { + match self.key.compare_and_swap(0, key as usize, Ordering::SeqCst) { // The CAS succeeded, so we've created the actual key 0 => key as usize, // If someone beat us to the punch, use their key instead @@ -245,7 +236,7 @@ impl Drop for Key { #[cfg(test)] mod tests { use prelude::v1::*; - use super::{Key, StaticKey, INIT_INNER}; + use super::{Key, StaticKey}; fn assert_sync() {} fn assert_send() {} @@ -267,8 +258,8 @@ mod tests { #[test] fn statik() { - static K1: StaticKey = StaticKey { inner: INIT_INNER, dtor: None }; - static K2: StaticKey = StaticKey { inner: INIT_INNER, dtor: None }; + static K1: StaticKey = StaticKey::new(None); + static K2: StaticKey = StaticKey::new(None); unsafe { assert!(K1.get().is_null()); diff --git a/src/libstd/sys/unix/backtrace.rs b/src/libstd/sys/unix/backtrace.rs index 135ae1bf9163b..b23a3eee1a173 100644 --- a/src/libstd/sys/unix/backtrace.rs +++ b/src/libstd/sys/unix/backtrace.rs @@ -91,7 +91,7 @@ use io; use libc; use mem; use str; -use sync::{StaticMutex, MUTEX_INIT}; +use sync::StaticMutex; use sys_common::backtrace::*; @@ -117,7 +117,7 @@ pub fn write(w: &mut Write) -> io::Result<()> { // while it doesn't requires lock for work as everything is // local, it still displays much nicer backtraces when a // couple of threads panic simultaneously - static LOCK: StaticMutex = MUTEX_INIT; + static LOCK: StaticMutex = StaticMutex::new(); let _g = LOCK.lock(); try!(writeln!(w, "stack backtrace:")); @@ -148,7 +148,7 @@ pub fn write(w: &mut Write) -> io::Result<()> { // is semi-reasonable in terms of printing anyway, and we know that all // I/O done here is blocking I/O, not green I/O, so we don't have to // worry about this being a native vs green mutex. - static LOCK: StaticMutex = MUTEX_INIT; + static LOCK: StaticMutex = StaticMutex::new(); let _g = LOCK.lock(); try!(writeln!(w, "stack backtrace:")); diff --git a/src/libstd/sys/unix/condvar.rs b/src/libstd/sys/unix/condvar.rs index 29a13cc6be731..c8708190a2e18 100644 --- a/src/libstd/sys/unix/condvar.rs +++ b/src/libstd/sys/unix/condvar.rs @@ -23,13 +23,8 @@ pub struct Condvar { inner: UnsafeCell } unsafe impl Send for Condvar {} unsafe impl Sync for Condvar {} -pub const CONDVAR_INIT: Condvar = Condvar { - inner: UnsafeCell { value: ffi::PTHREAD_COND_INITIALIZER }, -}; - impl Condvar { - #[inline] - pub unsafe fn new() -> Condvar { + pub const fn new() -> Condvar { // Might be moved and address is changing it is better to avoid // initialization of potentially opaque OS data before it landed Condvar { inner: UnsafeCell::new(ffi::PTHREAD_COND_INITIALIZER) } diff --git a/src/libstd/sys/unix/mutex.rs b/src/libstd/sys/unix/mutex.rs index 70d14f63dbcc6..6eed403dfc080 100644 --- a/src/libstd/sys/unix/mutex.rs +++ b/src/libstd/sys/unix/mutex.rs @@ -21,20 +21,15 @@ pub unsafe fn raw(m: &Mutex) -> *mut ffi::pthread_mutex_t { m.inner.get() } -pub const MUTEX_INIT: Mutex = Mutex { - inner: UnsafeCell { value: ffi::PTHREAD_MUTEX_INITIALIZER }, -}; - unsafe impl Send for Mutex {} unsafe impl Sync for Mutex {} #[allow(dead_code)] // sys isn't exported yet impl Mutex { - #[inline] - pub unsafe fn new() -> Mutex { + pub const fn new() -> Mutex { // Might be moved and address is changing it is better to avoid // initialization of potentially opaque OS data before it landed - MUTEX_INIT + Mutex { inner: UnsafeCell::new(ffi::PTHREAD_MUTEX_INITIALIZER) } } #[inline] pub unsafe fn lock(&self) { diff --git a/src/libstd/sys/unix/os.rs b/src/libstd/sys/unix/os.rs index 5919502abde0d..5178d7b8fb1a0 100644 --- a/src/libstd/sys/unix/os.rs +++ b/src/libstd/sys/unix/os.rs @@ -216,8 +216,8 @@ pub fn current_exe() -> io::Result { #[cfg(any(target_os = "bitrig", target_os = "openbsd"))] pub fn current_exe() -> io::Result { - use sync::{StaticMutex, MUTEX_INIT}; - static LOCK: StaticMutex = MUTEX_INIT; + use sync::StaticMutex; + static LOCK: StaticMutex = StaticMutex::new(); extern { fn rust_current_exe() -> *const c_char; diff --git a/src/libstd/sys/unix/rwlock.rs b/src/libstd/sys/unix/rwlock.rs index 7bb9fb68c14f0..ee687f350f021 100644 --- a/src/libstd/sys/unix/rwlock.rs +++ b/src/libstd/sys/unix/rwlock.rs @@ -16,14 +16,13 @@ use sys::sync as ffi; pub struct RWLock { inner: UnsafeCell } -pub const RWLOCK_INIT: RWLock = RWLock { - inner: UnsafeCell { value: ffi::PTHREAD_RWLOCK_INITIALIZER }, -}; - unsafe impl Send for RWLock {} unsafe impl Sync for RWLock {} impl RWLock { + pub const fn new() -> RWLock { + RWLock { inner: UnsafeCell::new(ffi::PTHREAD_RWLOCK_INITIALIZER) } + } #[inline] pub unsafe fn read(&self) { let r = ffi::pthread_rwlock_rdlock(self.inner.get()); diff --git a/src/libstd/sys/unix/thread.rs b/src/libstd/sys/unix/thread.rs index 0cb5a06e6b62f..bb0e12e8df899 100644 --- a/src/libstd/sys/unix/thread.rs +++ b/src/libstd/sys/unix/thread.rs @@ -330,10 +330,10 @@ pub mod guard { #[cfg(target_os = "linux")] fn min_stack_size(attr: *const libc::pthread_attr_t) -> usize { use dynamic_lib::DynamicLibrary; - use sync::{Once, ONCE_INIT}; + use sync::Once; type F = unsafe extern "C" fn(*const libc::pthread_attr_t) -> libc::size_t; - static INIT: Once = ONCE_INIT; + static INIT: Once = Once::new(); static mut __pthread_get_minstack: Option = None; INIT.call_once(|| { diff --git a/src/libstd/sys/unix/thread_local.rs b/src/libstd/sys/unix/thread_local.rs index 9b7079ee10833..3afe84b25804c 100644 --- a/src/libstd/sys/unix/thread_local.rs +++ b/src/libstd/sys/unix/thread_local.rs @@ -8,6 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +#![allow(dead_code)] // sys isn't exported yet + use prelude::v1::*; use libc::c_int; diff --git a/src/libstd/sys/unix/time.rs b/src/libstd/sys/unix/time.rs index 16dfd3eebd02e..6b84baeca7dc9 100644 --- a/src/libstd/sys/unix/time.rs +++ b/src/libstd/sys/unix/time.rs @@ -17,7 +17,7 @@ mod inner { use libc; use time::Duration; use ops::Sub; - use sync::{Once, ONCE_INIT}; + use sync::Once; use super::NSEC_PER_SEC; pub struct SteadyTime { @@ -42,7 +42,7 @@ mod inner { numer: 0, denom: 0, }; - static ONCE: Once = ONCE_INIT; + static ONCE: Once = Once::new(); unsafe { ONCE.call_once(|| { diff --git a/src/libstd/sys/windows/backtrace.rs b/src/libstd/sys/windows/backtrace.rs index d94dfdeeea494..3f595762fc71b 100644 --- a/src/libstd/sys/windows/backtrace.rs +++ b/src/libstd/sys/windows/backtrace.rs @@ -36,7 +36,7 @@ use mem; use path::Path; use ptr; use str; -use sync::{StaticMutex, MUTEX_INIT}; +use sync::StaticMutex; use sys_common::backtrace::*; @@ -295,7 +295,7 @@ impl Drop for Cleanup { pub fn write(w: &mut Write) -> io::Result<()> { // According to windows documentation, all dbghelp functions are // single-threaded. - static LOCK: StaticMutex = MUTEX_INIT; + static LOCK: StaticMutex = StaticMutex::new(); let _g = LOCK.lock(); // Open up dbghelp.dll, we don't link to it explicitly because it can't diff --git a/src/libstd/sys/windows/c.rs b/src/libstd/sys/windows/c.rs index e9b850856e1f8..3c9b2ef1b986e 100644 --- a/src/libstd/sys/windows/c.rs +++ b/src/libstd/sys/windows/c.rs @@ -340,10 +340,10 @@ pub mod compat { -> $rettype:ty { $fallback:expr }) => ( #[inline(always)] pub unsafe fn $symbol($($argname: $argtype),*) -> $rettype { - use sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering}; + use sync::atomic::{AtomicUsize, Ordering}; use mem; - static PTR: AtomicUsize = ATOMIC_USIZE_INIT; + static PTR: AtomicUsize = AtomicUsize::new(0); fn load() -> usize { ::sys::c::compat::store_func(&PTR, diff --git a/src/libstd/sys/windows/condvar.rs b/src/libstd/sys/windows/condvar.rs index 8bb2326e4d6b9..baa7d1ceea331 100644 --- a/src/libstd/sys/windows/condvar.rs +++ b/src/libstd/sys/windows/condvar.rs @@ -22,13 +22,10 @@ pub struct Condvar { inner: UnsafeCell } unsafe impl Send for Condvar {} unsafe impl Sync for Condvar {} -pub const CONDVAR_INIT: Condvar = Condvar { - inner: UnsafeCell { value: ffi::CONDITION_VARIABLE_INIT } -}; - impl Condvar { - #[inline] - pub unsafe fn new() -> Condvar { CONDVAR_INIT } + pub const fn new() -> Condvar { + Condvar { inner: UnsafeCell::new(ffi::CONDITION_VARIABLE_INIT) } + } #[inline] pub unsafe fn wait(&self, mutex: &Mutex) { diff --git a/src/libstd/sys/windows/mutex.rs b/src/libstd/sys/windows/mutex.rs index 9d2624f94180e..29e370698ad74 100644 --- a/src/libstd/sys/windows/mutex.rs +++ b/src/libstd/sys/windows/mutex.rs @@ -16,10 +16,6 @@ use mem; pub struct Mutex { inner: UnsafeCell } -pub const MUTEX_INIT: Mutex = Mutex { - inner: UnsafeCell { value: ffi::SRWLOCK_INIT } -}; - unsafe impl Send for Mutex {} unsafe impl Sync for Mutex {} @@ -41,6 +37,9 @@ pub unsafe fn raw(m: &Mutex) -> ffi::PSRWLOCK { // is there there are no guarantees of fairness. impl Mutex { + pub const fn new() -> Mutex { + Mutex { inner: UnsafeCell::new(ffi::SRWLOCK_INIT) } + } #[inline] pub unsafe fn lock(&self) { ffi::AcquireSRWLockExclusive(self.inner.get()) diff --git a/src/libstd/sys/windows/net.rs b/src/libstd/sys/windows/net.rs index 6bbcd968157ab..71e064bcc6b82 100644 --- a/src/libstd/sys/windows/net.rs +++ b/src/libstd/sys/windows/net.rs @@ -18,7 +18,7 @@ use net::SocketAddr; use num::One; use ops::Neg; use rt; -use sync::{Once, ONCE_INIT}; +use sync::Once; use sys::c; use sys_common::{AsInner, FromInner}; @@ -29,7 +29,7 @@ pub struct Socket(libc::SOCKET); /// Checks whether the Windows socket interface has been started already, and /// if not, starts it. pub fn init() { - static START: Once = ONCE_INIT; + static START: Once = Once::new(); START.call_once(|| unsafe { let mut data: c::WSADATA = mem::zeroed(); diff --git a/src/libstd/sys/windows/process.rs b/src/libstd/sys/windows/process.rs index bc4762c197e14..178b6ea42d2bb 100644 --- a/src/libstd/sys/windows/process.rs +++ b/src/libstd/sys/windows/process.rs @@ -24,7 +24,7 @@ use mem; use os::windows::ffi::OsStrExt; use path::Path; use ptr; -use sync::{StaticMutex, MUTEX_INIT}; +use sync::StaticMutex; use sys::c; use sys::fs::{OpenOptions, File}; use sys::handle::Handle; @@ -169,7 +169,7 @@ impl Process { try!(unsafe { // `CreateProcess` is racy! // http://support.microsoft.com/kb/315939 - static CREATE_PROCESS_LOCK: StaticMutex = MUTEX_INIT; + static CREATE_PROCESS_LOCK: StaticMutex = StaticMutex::new(); let _lock = CREATE_PROCESS_LOCK.lock(); cvt(CreateProcessW(ptr::null(), diff --git a/src/libstd/sys/windows/rwlock.rs b/src/libstd/sys/windows/rwlock.rs index 009605535a023..e727638e3e9b5 100644 --- a/src/libstd/sys/windows/rwlock.rs +++ b/src/libstd/sys/windows/rwlock.rs @@ -15,14 +15,13 @@ use sys::sync as ffi; pub struct RWLock { inner: UnsafeCell } -pub const RWLOCK_INIT: RWLock = RWLock { - inner: UnsafeCell { value: ffi::SRWLOCK_INIT } -}; - unsafe impl Send for RWLock {} unsafe impl Sync for RWLock {} impl RWLock { + pub const fn new() -> RWLock { + RWLock { inner: UnsafeCell::new(ffi::SRWLOCK_INIT) } + } #[inline] pub unsafe fn read(&self) { ffi::AcquireSRWLockShared(self.inner.get()) diff --git a/src/libstd/sys/windows/thread_local.rs b/src/libstd/sys/windows/thread_local.rs index ea5af3f2830e8..a3d522d1757a9 100644 --- a/src/libstd/sys/windows/thread_local.rs +++ b/src/libstd/sys/windows/thread_local.rs @@ -15,7 +15,7 @@ use libc::types::os::arch::extra::{DWORD, LPVOID, BOOL}; use boxed; use ptr; use rt; -use sys_common::mutex::{MUTEX_INIT, Mutex}; +use sys_common::mutex::Mutex; pub type Key = DWORD; pub type Dtor = unsafe extern fn(*mut u8); @@ -58,7 +58,7 @@ pub type Dtor = unsafe extern fn(*mut u8); // on poisoning and this module needs to operate at a lower level than requiring // the thread infrastructure to be in place (useful on the borders of // initialization/destruction). -static DTOR_LOCK: Mutex = MUTEX_INIT; +static DTOR_LOCK: Mutex = Mutex::new(); static mut DTORS: *mut Vec<(Key, Dtor)> = 0 as *mut _; // ------------------------------------------------------------------------- diff --git a/src/libstd/sys/windows/time.rs b/src/libstd/sys/windows/time.rs index e64df54a0fa03..f5a70ccc90743 100644 --- a/src/libstd/sys/windows/time.rs +++ b/src/libstd/sys/windows/time.rs @@ -10,7 +10,7 @@ use libc; use ops::Sub; use time::Duration; -use sync::{Once, ONCE_INIT}; +use sync::Once; const NANOS_PER_SEC: u64 = 1_000_000_000; @@ -28,7 +28,7 @@ impl SteadyTime { fn frequency() -> libc::LARGE_INTEGER { static mut FREQUENCY: libc::LARGE_INTEGER = 0; - static ONCE: Once = ONCE_INIT; + static ONCE: Once = Once::new(); unsafe { ONCE.call_once(|| { diff --git a/src/libstd/thread/local.rs b/src/libstd/thread/local.rs index 2e043c58a5da9..0eafd4d5f12e9 100644 --- a/src/libstd/thread/local.rs +++ b/src/libstd/thread/local.rs @@ -18,12 +18,7 @@ use cell::UnsafeCell; // Sure wish we had macro hygiene, no? #[doc(hidden)] -pub mod __impl { - pub use super::imp::Key as KeyInner; - pub use super::imp::destroy_value; - pub use sys_common::thread_local::INIT_INNER as OS_INIT_INNER; - pub use sys_common::thread_local::StaticKey as OsStaticKey; -} +pub use self::imp::Key as __KeyInner; /// A thread local storage key which owns its contents. /// @@ -76,55 +71,10 @@ pub struct LocalKey { // // This is trivially devirtualizable by LLVM because we never store anything // to this field and rustc can declare the `static` as constant as well. - #[doc(hidden)] - pub inner: fn() -> &'static __impl::KeyInner>>, + inner: fn() -> &'static __KeyInner, // initialization routine to invoke to create a value - #[doc(hidden)] - pub init: fn() -> T, -} - -/// Declare a new thread local storage key of type `std::thread::LocalKey`. -/// -/// See [LocalKey documentation](thread/struct.LocalKey.html) for more information. -#[macro_export] -#[stable(feature = "rust1", since = "1.0.0")] -#[allow_internal_unstable] -macro_rules! thread_local { - (static $name:ident: $t:ty = $init:expr) => ( - static $name: ::std::thread::LocalKey<$t> = { - use std::cell::UnsafeCell as __UnsafeCell; - use std::thread::__local::KeyInner as __KeyInner; - use std::option::Option as __Option; - use std::option::Option::None as __None; - - __thread_local_inner!(static __KEY: __UnsafeCell<__Option<$t>> = { - __UnsafeCell { value: __None } - }); - fn __init() -> $t { $init } - fn __getit() -> &'static __KeyInner<__UnsafeCell<__Option<$t>>> { - &__KEY - } - ::std::thread::LocalKey { inner: __getit, init: __init } - }; - ); - (pub static $name:ident: $t:ty = $init:expr) => ( - pub static $name: ::std::thread::LocalKey<$t> = { - use std::cell::UnsafeCell as __UnsafeCell; - use std::thread::__local::KeyInner as __KeyInner; - use std::option::Option as __Option; - use std::option::Option::None as __None; - - __thread_local_inner!(static __KEY: __UnsafeCell<__Option<$t>> = { - __UnsafeCell { value: __None } - }); - fn __init() -> $t { $init } - fn __getit() -> &'static __KeyInner<__UnsafeCell<__Option<$t>>> { - &__KEY - } - ::std::thread::LocalKey { inner: __getit, init: __init } - }; - ); + init: fn() -> T, } // Macro pain #4586: @@ -147,50 +97,37 @@ macro_rules! thread_local { // To get around this, we're forced to inject the #[cfg] logic into the macro // itself. Woohoo. +/// Declare a new thread local storage key of type `std::thread::LocalKey`. +/// +/// See [LocalKey documentation](thread/struct.LocalKey.html) for more information. #[macro_export] -#[doc(hidden)] +#[stable(feature = "rust1", since = "1.0.0")] #[allow_internal_unstable] -macro_rules! __thread_local_inner { +macro_rules! thread_local { (static $name:ident: $t:ty = $init:expr) => ( - #[cfg_attr(all(any(target_os = "macos", target_os = "linux"), - not(target_arch = "aarch64")), - thread_local)] - static $name: ::std::thread::__local::KeyInner<$t> = - __thread_local_inner!($init, $t); + static $name: ::std::thread::LocalKey<$t> = { + #[cfg_attr(all(any(target_os = "macos", target_os = "linux"), + not(target_arch = "aarch64")), + thread_local)] + static __KEY: ::std::thread::__LocalKeyInner<$t> = + ::std::thread::__LocalKeyInner::new(); + fn __init() -> $t { $init } + fn __getit() -> &'static ::std::thread::__LocalKeyInner<$t> { &__KEY } + ::std::thread::LocalKey::new(__getit, __init) + }; ); (pub static $name:ident: $t:ty = $init:expr) => ( - #[cfg_attr(all(any(target_os = "macos", target_os = "linux"), - not(target_arch = "aarch64")), - thread_local)] - pub static $name: ::std::thread::__local::KeyInner<$t> = - __thread_local_inner!($init, $t); - ); - ($init:expr, $t:ty) => ({ - #[cfg(all(any(target_os = "macos", target_os = "linux"), not(target_arch = "aarch64")))] - const _INIT: ::std::thread::__local::KeyInner<$t> = { - ::std::thread::__local::KeyInner { - inner: ::std::cell::UnsafeCell { value: $init }, - dtor_registered: ::std::cell::UnsafeCell { value: false }, - dtor_running: ::std::cell::UnsafeCell { value: false }, - } - }; - - #[allow(trivial_casts)] - #[cfg(any(not(any(target_os = "macos", target_os = "linux")), target_arch = "aarch64"))] - const _INIT: ::std::thread::__local::KeyInner<$t> = { - ::std::thread::__local::KeyInner { - inner: ::std::cell::UnsafeCell { value: $init }, - os: ::std::thread::__local::OsStaticKey { - inner: ::std::thread::__local::OS_INIT_INNER, - dtor: ::std::option::Option::Some( - ::std::thread::__local::destroy_value::<$t> - ), - }, - } + pub static $name: ::std::thread::LocalKey<$t> = { + #[cfg_attr(all(any(target_os = "macos", target_os = "linux"), + not(target_arch = "aarch64")), + thread_local)] + static __KEY: ::std::thread::__LocalKeyInner<$t> = + ::std::thread::__LocalKeyInner::new(); + fn __init() -> $t { $init } + fn __getit() -> &'static ::std::thread::__LocalKeyInner<$t> { &__KEY } + ::std::thread::LocalKey::new(__getit, __init) }; - - _INIT - }); + ); } /// Indicator of the state of a thread local storage key. @@ -225,6 +162,14 @@ pub enum LocalKeyState { } impl LocalKey { + #[doc(hidden)] + pub const fn new(inner: fn() -> &'static __KeyInner, init: fn() -> T) -> LocalKey { + LocalKey { + inner: inner, + init: init + } + } + /// Acquires a reference to the value in this TLS key. /// /// This will lazily initialize the value if this thread has not referenced @@ -300,44 +245,45 @@ impl LocalKey { mod imp { use prelude::v1::*; - use cell::UnsafeCell; + use cell::{Cell, UnsafeCell}; use intrinsics; - use ptr; pub struct Key { - // Place the inner bits in an `UnsafeCell` to currently get around the - // "only Sync statics" restriction. This allows any type to be placed in - // the cell. - // - // Note that all access requires `T: 'static` so it can't be a type with - // any borrowed pointers still. - pub inner: UnsafeCell, + inner: UnsafeCell>, // Metadata to keep track of the state of the destructor. Remember that // these variables are thread-local, not global. - pub dtor_registered: UnsafeCell, // should be Cell - pub dtor_running: UnsafeCell, // should be Cell + dtor_registered: Cell, + dtor_running: Cell, } unsafe impl ::marker::Sync for Key { } impl Key { - pub unsafe fn get(&'static self) -> Option<&'static T> { - if intrinsics::needs_drop::() && *self.dtor_running.get() { + pub const fn new() -> Key { + Key { + inner: UnsafeCell::new(None), + dtor_registered: Cell::new(false), + dtor_running: Cell::new(false) + } + } + + pub unsafe fn get(&'static self) -> Option<&'static UnsafeCell>> { + if intrinsics::needs_drop::() && self.dtor_running.get() { return None } self.register_dtor(); - Some(&*self.inner.get()) + Some(&self.inner) } unsafe fn register_dtor(&self) { - if !intrinsics::needs_drop::() || *self.dtor_registered.get() { + if !intrinsics::needs_drop::() || self.dtor_registered.get() { return } register_dtor(self as *const _ as *mut u8, destroy_value::); - *self.dtor_registered.get() = true; + self.dtor_registered.set(true); } } @@ -354,6 +300,7 @@ mod imp { unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern fn(*mut u8)) { use boxed; use mem; + use ptr; use libc; use sys_common::thread_local as os; @@ -381,10 +328,7 @@ mod imp { // *should* be the case that this loop always terminates because we // provide the guarantee that a TLS key cannot be set after it is // flagged for destruction. - static DTORS: os::StaticKey = os::StaticKey { - inner: os::INIT_INNER, - dtor: Some(run_dtors as unsafe extern "C" fn(*mut u8)), - }; + static DTORS: os::StaticKey = os::StaticKey::new(Some(run_dtors)); type List = Vec<(*mut u8, unsafe extern fn(*mut u8))>; if DTORS.get().is_null() { let v: Box = box Vec::new(); @@ -422,8 +366,8 @@ mod imp { // Right before we run the user destructor be sure to flag the // destructor as running for this thread so calls to `get` will return // `None`. - *(*ptr).dtor_running.get() = true; - ptr::read((*ptr).inner.get()); + (*ptr).dtor_running.set(true); + intrinsics::drop_in_place((*ptr).inner.get()); } } @@ -433,54 +377,50 @@ mod imp { use prelude::v1::*; use alloc::boxed; - use cell::UnsafeCell; - use mem; + use cell::{Cell, UnsafeCell}; + use marker; use ptr; use sys_common::thread_local::StaticKey as OsStaticKey; pub struct Key { - // Statically allocated initialization expression, using an `UnsafeCell` - // for the same reasons as above. - pub inner: UnsafeCell, - // OS-TLS key that we'll use to key off. - pub os: OsStaticKey, + os: OsStaticKey, + marker: marker::PhantomData>, } unsafe impl ::marker::Sync for Key { } struct Value { key: &'static Key, - value: T, + value: UnsafeCell>, } - impl Key { - pub unsafe fn get(&'static self) -> Option<&'static T> { - self.ptr().map(|p| &*p) + impl Key { + pub const fn new() -> Key { + Key { + os: OsStaticKey::new(Some(destroy_value::)), + marker: marker::PhantomData + } } - unsafe fn ptr(&'static self) -> Option<*mut T> { + pub unsafe fn get(&'static self) -> Option<&'static UnsafeCell>> { let ptr = self.os.get() as *mut Value; if !ptr.is_null() { if ptr as usize == 1 { return None } - return Some(&mut (*ptr).value as *mut T); + return Some(&(*ptr).value); } // If the lookup returned null, we haven't initialized our own local // copy, so do that now. - // - // Also note that this transmute_copy should be ok because the value - // `inner` is already validated to be a valid `static` value, so we - // should be able to freely copy the bits. let ptr: Box> = box Value { key: self, - value: mem::transmute_copy(&self.inner), + value: UnsafeCell::new(None), }; let ptr = boxed::into_raw(ptr); self.os.set(ptr as *mut u8); - Some(&mut (*ptr).value as *mut T) + Some(&(*ptr).value) } } @@ -505,7 +445,7 @@ mod tests { use prelude::v1::*; use sync::mpsc::{channel, Sender}; - use cell::UnsafeCell; + use cell::{Cell, UnsafeCell}; use super::LocalKeyState; use thread; @@ -520,23 +460,23 @@ mod tests { #[test] fn smoke_no_dtor() { - thread_local!(static FOO: UnsafeCell = UnsafeCell { value: 1 }); + thread_local!(static FOO: Cell = Cell::new(1)); - FOO.with(|f| unsafe { - assert_eq!(*f.get(), 1); - *f.get() = 2; + FOO.with(|f| { + assert_eq!(f.get(), 1); + f.set(2); }); let (tx, rx) = channel(); let _t = thread::spawn(move|| { - FOO.with(|f| unsafe { - assert_eq!(*f.get(), 1); + FOO.with(|f| { + assert_eq!(f.get(), 1); }); tx.send(()).unwrap(); }); rx.recv().unwrap(); - FOO.with(|f| unsafe { - assert_eq!(*f.get(), 2); + FOO.with(|f| { + assert_eq!(f.get(), 2); }); } @@ -565,9 +505,7 @@ mod tests { #[test] fn smoke_dtor() { - thread_local!(static FOO: UnsafeCell> = UnsafeCell { - value: None - }); + thread_local!(static FOO: UnsafeCell> = UnsafeCell::new(None)); let (tx, rx) = channel(); let _t = thread::spawn(move|| unsafe { @@ -583,12 +521,8 @@ mod tests { fn circular() { struct S1; struct S2; - thread_local!(static K1: UnsafeCell> = UnsafeCell { - value: None - }); - thread_local!(static K2: UnsafeCell> = UnsafeCell { - value: None - }); + thread_local!(static K1: UnsafeCell> = UnsafeCell::new(None)); + thread_local!(static K2: UnsafeCell> = UnsafeCell::new(None)); static mut HITS: u32 = 0; impl Drop for S1 { @@ -626,9 +560,7 @@ mod tests { #[test] fn self_referential() { struct S1; - thread_local!(static K1: UnsafeCell> = UnsafeCell { - value: None - }); + thread_local!(static K1: UnsafeCell> = UnsafeCell::new(None)); impl Drop for S1 { fn drop(&mut self) { @@ -644,12 +576,8 @@ mod tests { #[test] fn dtors_in_dtors_in_dtors() { struct S1(Sender<()>); - thread_local!(static K1: UnsafeCell> = UnsafeCell { - value: None - }); - thread_local!(static K2: UnsafeCell> = UnsafeCell { - value: None - }); + thread_local!(static K1: UnsafeCell> = UnsafeCell::new(None)); + thread_local!(static K2: UnsafeCell> = UnsafeCell::new(None)); impl Drop for S1 { fn drop(&mut self) { diff --git a/src/libstd/thread/mod.rs b/src/libstd/thread/mod.rs index 7c8cb5b01c108..f090d3e77ddfc 100644 --- a/src/libstd/thread/mod.rs +++ b/src/libstd/thread/mod.rs @@ -216,8 +216,7 @@ pub use self::local::{LocalKey, LocalKeyState}; consider stabilizing its interface")] pub use self::scoped_tls::ScopedKey; -#[doc(hidden)] pub use self::local::__impl as __local; -#[doc(hidden)] pub use self::scoped_tls::__impl as __scoped; +#[doc(hidden)] pub use self::local::__KeyInner as __LocalKeyInner; //////////////////////////////////////////////////////////////////////////////// // Builder diff --git a/src/libstd/thread/scoped_tls.rs b/src/libstd/thread/scoped_tls.rs index e195c3aaa3f8f..dda1db9aecea4 100644 --- a/src/libstd/thread/scoped_tls.rs +++ b/src/libstd/thread/scoped_tls.rs @@ -43,13 +43,6 @@ use prelude::v1::*; -// macro hygiene sure would be nice, wouldn't it? -#[doc(hidden)] -pub mod __impl { - pub use super::imp::KeyInner; - pub use sys_common::thread_local::INIT as OS_INIT; -} - /// Type representing a thread local storage key corresponding to a reference /// to the type parameter `T`. /// @@ -60,7 +53,7 @@ pub mod __impl { #[unstable(feature = "scoped_tls", reason = "scoped TLS has yet to have wide enough use to fully consider \ stabilizing its interface")] -pub struct ScopedKey { #[doc(hidden)] pub inner: __impl::KeyInner } +pub struct ScopedKey { inner: imp::KeyInner } /// Declare a new scoped thread local storage key. /// @@ -71,18 +64,6 @@ pub struct ScopedKey { #[doc(hidden)] pub inner: __impl::KeyInner } #[macro_export] #[allow_internal_unstable] macro_rules! scoped_thread_local { - (static $name:ident: $t:ty) => ( - __scoped_thread_local_inner!(static $name: $t); - ); - (pub static $name:ident: $t:ty) => ( - __scoped_thread_local_inner!(pub static $name: $t); - ); -} - -#[macro_export] -#[doc(hidden)] -#[allow_internal_unstable] -macro_rules! __scoped_thread_local_inner { (static $name:ident: $t:ty) => ( #[cfg_attr(not(any(windows, target_os = "android", @@ -91,7 +72,7 @@ macro_rules! __scoped_thread_local_inner { target_arch = "aarch64")), thread_local)] static $name: ::std::thread::ScopedKey<$t> = - __scoped_thread_local_inner!($t); + ::std::thread::ScopedKey::new(); ); (pub static $name:ident: $t:ty) => ( #[cfg_attr(not(any(windows, @@ -101,42 +82,19 @@ macro_rules! __scoped_thread_local_inner { target_arch = "aarch64")), thread_local)] pub static $name: ::std::thread::ScopedKey<$t> = - __scoped_thread_local_inner!($t); + ::std::thread::ScopedKey::new(); ); - ($t:ty) => ({ - use std::thread::ScopedKey as __Key; - - #[cfg(not(any(windows, - target_os = "android", - target_os = "ios", - target_os = "openbsd", - target_arch = "aarch64")))] - const _INIT: __Key<$t> = __Key { - inner: ::std::thread::__scoped::KeyInner { - inner: ::std::cell::UnsafeCell { value: 0 as *mut _ }, - } - }; - - #[cfg(any(windows, - target_os = "android", - target_os = "ios", - target_os = "openbsd", - target_arch = "aarch64"))] - const _INIT: __Key<$t> = __Key { - inner: ::std::thread::__scoped::KeyInner { - inner: ::std::thread::__scoped::OS_INIT, - marker: ::std::marker::PhantomData::<::std::cell::Cell<$t>>, - } - }; - - _INIT - }) } #[unstable(feature = "scoped_tls", reason = "scoped TLS has yet to have wide enough use to fully consider \ stabilizing its interface")] impl ScopedKey { + #[doc(hidden)] + pub const fn new() -> ScopedKey { + ScopedKey { inner: imp::KeyInner::new() } + } + /// Inserts a value into this scoped thread local storage slot for a /// duration of a closure. /// @@ -170,7 +128,7 @@ impl ScopedKey { F: FnOnce() -> R, { struct Reset<'a, T: 'a> { - key: &'a __impl::KeyInner, + key: &'a imp::KeyInner, val: *mut T, } impl<'a, T> Drop for Reset<'a, T> { @@ -231,19 +189,18 @@ impl ScopedKey { target_os = "openbsd", target_arch = "aarch64")))] mod imp { - use std::cell::UnsafeCell; + use std::cell::Cell; - #[doc(hidden)] - pub struct KeyInner { pub inner: UnsafeCell<*mut T> } + pub struct KeyInner { inner: Cell<*mut T> } unsafe impl ::marker::Sync for KeyInner { } - #[doc(hidden)] impl KeyInner { - #[doc(hidden)] - pub unsafe fn set(&self, ptr: *mut T) { *self.inner.get() = ptr; } - #[doc(hidden)] - pub unsafe fn get(&self) -> *mut T { *self.inner.get() } + pub const fn new() -> KeyInner { + KeyInner { inner: Cell::new(0 as *mut _) } + } + pub unsafe fn set(&self, ptr: *mut T) { self.inner.set(ptr); } + pub unsafe fn get(&self) -> *mut T { self.inner.get() } } } @@ -253,23 +210,27 @@ mod imp { target_os = "openbsd", target_arch = "aarch64"))] mod imp { + use prelude::v1::*; + + use cell::Cell; use marker; - use std::cell::Cell; use sys_common::thread_local::StaticKey as OsStaticKey; - #[doc(hidden)] pub struct KeyInner { pub inner: OsStaticKey, pub marker: marker::PhantomData>, } - unsafe impl ::marker::Sync for KeyInner { } + unsafe impl marker::Sync for KeyInner { } - #[doc(hidden)] impl KeyInner { - #[doc(hidden)] + pub const fn new() -> KeyInner { + KeyInner { + inner: OsStaticKey::new(None), + marker: marker::PhantomData + } + } pub unsafe fn set(&self, ptr: *mut T) { self.inner.set(ptr as *mut _) } - #[doc(hidden)] pub unsafe fn get(&self) -> *mut T { self.inner.get() as *mut _ } } } diff --git a/src/snapshots.txt b/src/snapshots.txt index 7c4b3866b2a99..1b2613d8c5045 100644 --- a/src/snapshots.txt +++ b/src/snapshots.txt @@ -1,3 +1,13 @@ +S 2015-05-24 ba0e1cd + bitrig-x86_64 2a710e16e3e3ef3760df1f724d66b3af34c1ef3f + freebsd-x86_64 370db40613f5c08563ed7e38357826dd42d4e0f8 + linux-i386 a6f22e481eabf098cc65bda97bf7e434a1fcc20b + linux-x86_64 5fd8698fdfe953e6c4d86cf4fa1d5f3a0053248c + macos-i386 9a273324a6b63a40f67a553029c0a9fb692ffd1f + macos-x86_64 e5b12cb7c179fc98fa905a3c84803645d946a6ae + winnt-i386 18d8d76c5380ee2247dd534bfb2c4ed1b3d83461 + winnt-x86_64 ef27ce42af4941be24a2f6097d969ffc845a31ee + S 2015-04-27 857ef6e bitrig-x86_64 d28e2a5f8b478e69720703e751774f5e728a8edd freebsd-x86_64 18925db56f6298cc190d1f41615ab5871de1dda0 diff --git a/src/test/auxiliary/issue-17718.rs b/src/test/auxiliary/issue-17718.rs index 67474e7902170..b347e674f0a9f 100644 --- a/src/test/auxiliary/issue-17718.rs +++ b/src/test/auxiliary/issue-17718.rs @@ -11,12 +11,12 @@ use std::sync::atomic; pub const C1: usize = 1; -pub const C2: atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT; +pub const C2: atomic::AtomicUsize = atomic::AtomicUsize::new(0); pub const C3: fn() = foo; pub const C4: usize = C1 * C1 + C1 / C1; pub const C5: &'static usize = &C4; pub static S1: usize = 3; -pub static S2: atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT; +pub static S2: atomic::AtomicUsize = atomic::AtomicUsize::new(0); fn foo() {} diff --git a/src/test/compile-fail/dropck_arr_cycle_checked.rs b/src/test/compile-fail/dropck_arr_cycle_checked.rs index f3c3f31e4afc6..c9713ebcebe98 100644 --- a/src/test/compile-fail/dropck_arr_cycle_checked.rs +++ b/src/test/compile-fail/dropck_arr_cycle_checked.rs @@ -18,9 +18,9 @@ use id::Id; mod s { #![allow(unstable)] - use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering}; + use std::sync::atomic::{AtomicUsize, Ordering}; - static S_COUNT: AtomicUsize = ATOMIC_USIZE_INIT; + static S_COUNT: AtomicUsize = AtomicUsize::new(0); pub fn next_count() -> usize { S_COUNT.fetch_add(1, Ordering::SeqCst) + 1 diff --git a/src/test/compile-fail/dropck_tarena_cycle_checked.rs b/src/test/compile-fail/dropck_tarena_cycle_checked.rs index 10bfe70640ca8..9309f5a243cd3 100644 --- a/src/test/compile-fail/dropck_tarena_cycle_checked.rs +++ b/src/test/compile-fail/dropck_tarena_cycle_checked.rs @@ -26,9 +26,9 @@ use id::Id; mod s { #![allow(unstable)] - use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering}; + use std::sync::atomic::{AtomicUsize, Ordering}; - static S_COUNT: AtomicUsize = ATOMIC_USIZE_INIT; + static S_COUNT: AtomicUsize = AtomicUsize::new(0); pub fn next_count() -> usize { S_COUNT.fetch_add(1, Ordering::SeqCst) + 1 diff --git a/src/test/compile-fail/dropck_trait_cycle_checked.rs b/src/test/compile-fail/dropck_trait_cycle_checked.rs index 6e543d017f260..1d8c7e9ac3e82 100644 --- a/src/test/compile-fail/dropck_trait_cycle_checked.rs +++ b/src/test/compile-fail/dropck_trait_cycle_checked.rs @@ -17,9 +17,9 @@ use std::cell::Cell; use id::Id; mod s { - use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering}; + use std::sync::atomic::{AtomicUsize, Ordering}; - static S_COUNT: AtomicUsize = ATOMIC_USIZE_INIT; + static S_COUNT: AtomicUsize = AtomicUsize::new(0); pub fn next_count() -> usize { S_COUNT.fetch_add(1, Ordering::SeqCst) + 1 diff --git a/src/test/compile-fail/dropck_vec_cycle_checked.rs b/src/test/compile-fail/dropck_vec_cycle_checked.rs index 53a14fd8fac92..8722246bb4eaa 100644 --- a/src/test/compile-fail/dropck_vec_cycle_checked.rs +++ b/src/test/compile-fail/dropck_vec_cycle_checked.rs @@ -17,9 +17,9 @@ use id::Id; mod s { #![allow(unstable)] - use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering}; + use std::sync::atomic::{AtomicUsize, Ordering}; - static S_COUNT: AtomicUsize = ATOMIC_USIZE_INIT; + static S_COUNT: AtomicUsize = AtomicUsize::new(0); pub fn next_count() -> usize { S_COUNT.fetch_add(1, Ordering::SeqCst) + 1 diff --git a/src/test/compile-fail/functional-struct-update-respects-privacy.rs b/src/test/compile-fail/functional-struct-update-respects-privacy.rs index 51e23a689a1ad..3f41401eb69c1 100644 --- a/src/test/compile-fail/functional-struct-update-respects-privacy.rs +++ b/src/test/compile-fail/functional-struct-update-respects-privacy.rs @@ -16,7 +16,7 @@ use self::foo::S; mod foo { use std::cell::{UnsafeCell}; - static mut count : UnsafeCell = UnsafeCell { value: 1 }; + static mut count : UnsafeCell = UnsafeCell::new(1); pub struct S { pub a: u8, pub b: String, secret_uid: u64 } diff --git a/src/test/compile-fail/issue-17718-const-borrow.rs b/src/test/compile-fail/issue-17718-const-borrow.rs index dfa5bca8ccdb6..12a9a27463157 100644 --- a/src/test/compile-fail/issue-17718-const-borrow.rs +++ b/src/test/compile-fail/issue-17718-const-borrow.rs @@ -10,12 +10,12 @@ use std::cell::UnsafeCell; -const A: UnsafeCell = UnsafeCell { value: 1 }; +const A: UnsafeCell = UnsafeCell::new(1); const B: &'static UnsafeCell = &A; //~^ ERROR: cannot borrow a constant which contains interior mutability struct C { a: UnsafeCell } -const D: C = C { a: UnsafeCell { value: 1 } }; +const D: C = C { a: UnsafeCell::new(1) }; const E: &'static UnsafeCell = &D.a; //~^ ERROR: cannot borrow a constant which contains interior mutability const F: &'static C = &D; diff --git a/src/test/compile-fail/issue-7364.rs b/src/test/compile-fail/issue-7364.rs index 5d85fe93a4888..999e5f9db2dfc 100644 --- a/src/test/compile-fail/issue-7364.rs +++ b/src/test/compile-fail/issue-7364.rs @@ -17,6 +17,5 @@ static boxed: Box> = box RefCell::new(0); //~^ ERROR allocations are not allowed in statics //~| ERROR the trait `core::marker::Sync` is not implemented for the type //~| ERROR the trait `core::marker::Sync` is not implemented for the type -//~| ERROR E0015 fn main() { } diff --git a/src/test/compile-fail/std-uncopyable-atomics.rs b/src/test/compile-fail/std-uncopyable-atomics.rs index 9807fc43140d9..35877db610e34 100644 --- a/src/test/compile-fail/std-uncopyable-atomics.rs +++ b/src/test/compile-fail/std-uncopyable-atomics.rs @@ -15,11 +15,11 @@ use std::sync::atomic::*; use std::ptr; fn main() { - let x = ATOMIC_BOOL_INIT; + let x = AtomicBool::new(false); let x = *&x; //~ ERROR: cannot move out of borrowed content - let x = ATOMIC_ISIZE_INIT; + let x = AtomicIsize::new(0); let x = *&x; //~ ERROR: cannot move out of borrowed content - let x = ATOMIC_USIZE_INIT; + let x = AtomicUsize::new(0); let x = *&x; //~ ERROR: cannot move out of borrowed content let x: AtomicPtr = AtomicPtr::new(ptr::null_mut()); let x = *&x; //~ ERROR: cannot move out of borrowed content diff --git a/src/test/compile-fail/vec-must-not-hide-type-from-dropck.rs b/src/test/compile-fail/vec-must-not-hide-type-from-dropck.rs index c30aa7b817bf4..0b2112edf7280 100644 --- a/src/test/compile-fail/vec-must-not-hide-type-from-dropck.rs +++ b/src/test/compile-fail/vec-must-not-hide-type-from-dropck.rs @@ -28,9 +28,9 @@ use id::Id; mod s { #![allow(unstable)] - use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering}; + use std::sync::atomic::{AtomicUsize, Ordering}; - static S_COUNT: AtomicUsize = ATOMIC_USIZE_INIT; + static S_COUNT: AtomicUsize = AtomicUsize::new(0); /// generates globally unique count (global across the current /// process, that is) diff --git a/src/test/debuginfo/constant-debug-locs.rs b/src/test/debuginfo/constant-debug-locs.rs index 5fc580755043d..72448ca2e001f 100644 --- a/src/test/debuginfo/constant-debug-locs.rs +++ b/src/test/debuginfo/constant-debug-locs.rs @@ -19,7 +19,7 @@ // This test makes sure that the compiler doesn't crash when trying to assign // debug locations to const-expressions. -use std::sync::MUTEX_INIT; +use std::sync::StaticMutex; use std::cell::UnsafeCell; const CONSTANT: u64 = 3 + 4; @@ -49,7 +49,7 @@ const VEC: [u32; 8] = [0; 8]; const NESTED: (Struct, TupleStruct) = (STRUCT, TUPLE_STRUCT); -const UNSAFE_CELL: UnsafeCell = UnsafeCell { value: false }; +const UNSAFE_CELL: UnsafeCell = UnsafeCell::new(false); fn main() { let mut _constant = CONSTANT; @@ -61,6 +61,6 @@ fn main() { let mut _string = STRING; let mut _vec = VEC; let mut _nested = NESTED; - let mut _extern = MUTEX_INIT; + let mut _extern = StaticMutex::new(); let mut _unsafe_cell = UNSAFE_CELL; } diff --git a/src/test/run-pass-valgrind/cast-enum-with-dtor.rs b/src/test/run-pass-valgrind/cast-enum-with-dtor.rs index 0bc1e33ce4695..2c3d7ef39e497 100644 --- a/src/test/run-pass-valgrind/cast-enum-with-dtor.rs +++ b/src/test/run-pass-valgrind/cast-enum-with-dtor.rs @@ -22,7 +22,7 @@ enum E { C = 2 } -static FLAG: atomic::AtomicUsize = atomic::ATOMIC_USIZE_INIT; +static FLAG: atomic::AtomicUsize = atomic::AtomicUsize::new(0); impl Drop for E { fn drop(&mut self) { diff --git a/src/test/run-pass/associated-types-project-from-type-param-via-bound-in-where-clause.rs b/src/test/run-pass/associated-types-project-from-type-param-via-bound-in-where-clause.rs index 1830b41d0b506..5ceb1013ad811 100644 --- a/src/test/run-pass/associated-types-project-from-type-param-via-bound-in-where-clause.rs +++ b/src/test/run-pass/associated-types-project-from-type-param-via-bound-in-where-clause.rs @@ -13,10 +13,10 @@ // `T`. Issue #20300. use std::marker::{PhantomData}; -use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT}; +use std::sync::atomic::{AtomicUsize}; use std::sync::atomic::Ordering::SeqCst; -static COUNTER: AtomicUsize = ATOMIC_USIZE_INIT; +static COUNTER: AtomicUsize = AtomicUsize::new(0); // Preamble. trait Trait { type Item; } diff --git a/src/test/run-pass/box-of-array-of-drop-1.rs b/src/test/run-pass/box-of-array-of-drop-1.rs index a93a488c1b5fd..1c7359a0fad9d 100644 --- a/src/test/run-pass/box-of-array-of-drop-1.rs +++ b/src/test/run-pass/box-of-array-of-drop-1.rs @@ -12,9 +12,9 @@ // destructor. use std::thread; -use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering}; +use std::sync::atomic::{AtomicUsize, Ordering}; -static LOG: AtomicUsize = ATOMIC_USIZE_INIT; +static LOG: AtomicUsize = AtomicUsize::new(0); struct D(u8); diff --git a/src/test/run-pass/box-of-array-of-drop-2.rs b/src/test/run-pass/box-of-array-of-drop-2.rs index 715571364c8de..ad781f00356d7 100644 --- a/src/test/run-pass/box-of-array-of-drop-2.rs +++ b/src/test/run-pass/box-of-array-of-drop-2.rs @@ -12,9 +12,9 @@ // destructor. use std::thread; -use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering}; +use std::sync::atomic::{AtomicUsize, Ordering}; -static LOG: AtomicUsize = ATOMIC_USIZE_INIT; +static LOG: AtomicUsize = AtomicUsize::new(0); struct D(u8); diff --git a/src/test/run-pass/issue-17718-static-unsafe-interior.rs b/src/test/run-pass/issue-17718-static-unsafe-interior.rs index 29d72000d07cc..c18d51e84d84f 100644 --- a/src/test/run-pass/issue-17718-static-unsafe-interior.rs +++ b/src/test/run-pass/issue-17718-static-unsafe-interior.rs @@ -38,8 +38,8 @@ unsafe impl Sync for UnsafeEnum {} static STATIC1: UnsafeEnum = UnsafeEnum::VariantSafe; -static STATIC2: MyUnsafePack = MyUnsafePack(UnsafeCell { value: 1 }); -const CONST: MyUnsafePack = MyUnsafePack(UnsafeCell { value: 1 }); +static STATIC2: MyUnsafePack = MyUnsafePack(UnsafeCell::new(1)); +const CONST: MyUnsafePack = MyUnsafePack(UnsafeCell::new(1)); static STATIC3: MyUnsafe = MyUnsafe{value: CONST}; static STATIC4: &'static MyUnsafePack = &STATIC2; @@ -50,7 +50,7 @@ struct Wrap { unsafe impl Sync for Wrap {} -static UNSAFE: MyUnsafePack = MyUnsafePack(UnsafeCell{value: 2}); +static UNSAFE: MyUnsafePack = MyUnsafePack(UnsafeCell::new(2)); static WRAPPED_UNSAFE: Wrap<&'static MyUnsafePack> = Wrap { value: &UNSAFE }; fn main() { diff --git a/src/test/run-pass/issue-17718.rs b/src/test/run-pass/issue-17718.rs index 2b84ce71dd2a2..457bbb23e1820 100644 --- a/src/test/run-pass/issue-17718.rs +++ b/src/test/run-pass/issue-17718.rs @@ -15,10 +15,10 @@ extern crate issue_17718 as other; -use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering}; +use std::sync::atomic::{AtomicUsize, Ordering}; const C1: usize = 1; -const C2: AtomicUsize = ATOMIC_USIZE_INIT; +const C2: AtomicUsize = AtomicUsize::new(0); const C3: fn() = foo; const C4: usize = C1 * C1 + C1 / C1; const C5: &'static usize = &C4; @@ -28,7 +28,7 @@ const C6: usize = { }; static S1: usize = 3; -static S2: AtomicUsize = ATOMIC_USIZE_INIT; +static S2: AtomicUsize = AtomicUsize::new(0); mod test { static A: usize = 4; diff --git a/src/test/run-pass/issue-21486.rs b/src/test/run-pass/issue-21486.rs index 7f8bd7a95f7a7..c20237f1f86b4 100644 --- a/src/test/run-pass/issue-21486.rs +++ b/src/test/run-pass/issue-21486.rs @@ -13,7 +13,7 @@ // construction. -use std::sync::atomic::{Ordering, AtomicUsize, ATOMIC_USIZE_INIT}; +use std::sync::atomic::{Ordering, AtomicUsize}; #[derive(Debug)] struct Noisy(u8); @@ -69,7 +69,7 @@ pub fn main() { assert_eq!(0x03_04, event_log()); } -static LOG: AtomicUsize = ATOMIC_USIZE_INIT; +static LOG: AtomicUsize = AtomicUsize::new(0); fn reset_log() { LOG.store(0, Ordering::SeqCst); diff --git a/src/test/run-pass/nested-vec-3.rs b/src/test/run-pass/nested-vec-3.rs index 60cf795c918bb..e59900caf07ec 100644 --- a/src/test/run-pass/nested-vec-3.rs +++ b/src/test/run-pass/nested-vec-3.rs @@ -14,9 +14,9 @@ use std::thread; -use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering}; +use std::sync::atomic::{AtomicUsize, Ordering}; -static LOG: AtomicUsize = ATOMIC_USIZE_INIT; +static LOG: AtomicUsize = AtomicUsize::new(0); struct D(u8); diff --git a/src/test/run-pass/struct-order-of-eval-3.rs b/src/test/run-pass/struct-order-of-eval-3.rs index 60f9c4465a01c..c0ed4ea3ce82f 100644 --- a/src/test/run-pass/struct-order-of-eval-3.rs +++ b/src/test/run-pass/struct-order-of-eval-3.rs @@ -12,7 +12,7 @@ // even when no Drop-implementations are involved. -use std::sync::atomic::{Ordering, AtomicUsize, ATOMIC_USIZE_INIT}; +use std::sync::atomic::{Ordering, AtomicUsize}; struct W { wrapped: u32 } struct S { f0: W, _f1: i32 } @@ -34,7 +34,7 @@ pub fn main() { "expect: 0x{:x} actual: 0x{:x}", expect, actual); } -static LOG: AtomicUsize = ATOMIC_USIZE_INIT; +static LOG: AtomicUsize = AtomicUsize::new(0); fn event_log() -> usize { LOG.load(Ordering::SeqCst) diff --git a/src/test/run-pass/struct-order-of-eval-4.rs b/src/test/run-pass/struct-order-of-eval-4.rs index 23a7e1ea71b8b..83ea0e3ab74ea 100644 --- a/src/test/run-pass/struct-order-of-eval-4.rs +++ b/src/test/run-pass/struct-order-of-eval-4.rs @@ -12,7 +12,7 @@ // even when no Drop-implementations are involved. -use std::sync::atomic::{Ordering, AtomicUsize, ATOMIC_USIZE_INIT}; +use std::sync::atomic::{Ordering, AtomicUsize}; struct W { wrapped: u32 } struct S { f0: W, _f1: i32 } @@ -31,7 +31,7 @@ pub fn main() { "expect: 0x{:x} actual: 0x{:x}", expect, actual); } -static LOG: AtomicUsize = ATOMIC_USIZE_INIT; +static LOG: AtomicUsize = AtomicUsize::new(0); fn event_log() -> usize { LOG.load(Ordering::SeqCst) diff --git a/src/test/run-pass/vector-sort-panic-safe.rs b/src/test/run-pass/vector-sort-panic-safe.rs index a51274199b620..f3c4ecb035e51 100644 --- a/src/test/run-pass/vector-sort-panic-safe.rs +++ b/src/test/run-pass/vector-sort-panic-safe.rs @@ -11,7 +11,7 @@ #![feature(rand, core)] -use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering}; +use std::sync::atomic::{AtomicUsize, Ordering}; use std::__rand::{thread_rng, Rng}; use std::thread; @@ -20,20 +20,20 @@ const MAX_LEN: usize = 32; static drop_counts: [AtomicUsize; MAX_LEN] = // FIXME #5244: AtomicUsize is not Copy. [ - ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, - ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, - ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, - ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, - ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, - ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, - ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, - ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, - ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, - ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, - ATOMIC_USIZE_INIT, ATOMIC_USIZE_INIT, + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), + AtomicUsize::new(0), AtomicUsize::new(0), ]; -static creation_count: AtomicUsize = ATOMIC_USIZE_INIT; +static creation_count: AtomicUsize = AtomicUsize::new(0); #[derive(Clone, PartialEq, PartialOrd, Eq, Ord)] struct DropCounter { x: u32, creation_id: usize }