diff --git a/src/libstd/io/stdio.rs b/src/libstd/io/stdio.rs index fb489bf487b8b..df8d132e1230b 100644 --- a/src/libstd/io/stdio.rs +++ b/src/libstd/io/stdio.rs @@ -676,6 +676,7 @@ fn print_to(args: fmt::Arguments, label: &str) where T: Write { let result = match local_s.state() { LocalKeyState::Uninitialized | + LocalKeyState::Initializing | LocalKeyState::Destroyed => global_s().write_fmt(args), LocalKeyState::Valid => { local_s.with(|s| { diff --git a/src/libstd/sys/unix/fast_thread_local.rs b/src/libstd/sys/unix/fast_thread_local.rs index 6cdbe5df75d51..99f4e720334b4 100644 --- a/src/libstd/sys/unix/fast_thread_local.rs +++ b/src/libstd/sys/unix/fast_thread_local.rs @@ -59,19 +59,3 @@ pub unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern fn(*mut u8)) { // a more direct implementation. #[cfg(target_os = "fuchsia")] pub use sys_common::thread_local::register_dtor_fallback as register_dtor; - -pub fn requires_move_before_drop() -> bool { - // The macOS implementation of TLS apparently had an odd aspect to it - // where the pointer we have may be overwritten while this destructor - // is running. Specifically if a TLS destructor re-accesses TLS it may - // trigger a re-initialization of all TLS variables, paving over at - // least some destroyed ones with initial values. - // - // This means that if we drop a TLS value in place on macOS that we could - // revert the value to its original state halfway through the - // destructor, which would be bad! - // - // Hence, we use `ptr::read` on macOS (to move to a "safe" location) - // instead of drop_in_place. - cfg!(target_os = "macos") -} diff --git a/src/libstd/thread/local.rs b/src/libstd/thread/local.rs index 4ee8132f55cee..426d0d39de9d4 100644 --- a/src/libstd/thread/local.rs +++ b/src/libstd/thread/local.rs @@ -12,9 +12,8 @@ #![unstable(feature = "thread_local_internals", issue = "0")] -use cell::UnsafeCell; use fmt; -use mem; +use cell::UnsafeCell; /// A thread local storage key which owns its contents. /// @@ -87,7 +86,7 @@ pub struct LocalKey { // but actual data inside will sometimes be tagged with #[thread_local]. // It's not valid for a true static to reference a #[thread_local] static, // so we get around that by exposing an accessor through a layer of function - // indirection (this thunk). + // indirection (the 'get' thunk). // // Note that the thunk is itself unsafe because the returned lifetime of the // slot where data lives, `'static`, is not actually valid. The lifetime @@ -97,10 +96,19 @@ pub struct LocalKey { // trivially devirtualizable by LLVM because the value of `inner` never // changes and the constant should be readonly within a crate. This mainly // only runs into problems when TLS statics are exported across crates. - inner: unsafe fn() -> Option<&'static UnsafeCell>>, - // initialization routine to invoke to create a value + // The user-supplied initialization function that constructs a new T. init: fn() -> T, + + // Platform-specific callbacks. + + // Get a reference to the value. get can be called at any time during the + // value's life cycle (Uninitialized, Initializing, Valid, and Destroyed). + get: unsafe fn() -> &'static UnsafeCell>, + + // Register a destructor for the value. register_dtor should be called + // after the key has been transitioned into the Valid state. + register_dtor: unsafe fn(), } #[stable(feature = "std_debug", since = "1.16.0")] @@ -161,28 +169,30 @@ macro_rules! thread_local { macro_rules! __thread_local_inner { (@key $(#[$attr:meta])* $vis:vis $name:ident, $t:ty, $init:expr) => { { + #[thread_local] + #[cfg(target_thread_local)] + static __KEY: $crate::thread::__FastLocalKeyInner<$t> = + $crate::thread::__FastLocalKeyInner::new(); + + #[cfg(not(target_thread_local))] + static __KEY: $crate::thread::__OsLocalKeyInner<$t> = + $crate::thread::__OsLocalKeyInner::new(); + + unsafe fn __get() -> &'static $crate::cell::UnsafeCell<$crate::thread:: + __LocalKeyValue<$t>> { __KEY.get() } + + // Only the fast implementation needs a destructor explicitly + // registered - the OS implementation's destructor is registered + // automatically by std::sys_common::thread_local::StaticKey. + #[cfg(target_thread_local)] + unsafe fn __register_dtor() { __KEY.register_dtor() } + #[cfg(not(target_thread_local))] + unsafe fn __register_dtor() {} + #[inline] fn __init() -> $t { $init } - unsafe fn __getit() -> $crate::option::Option< - &'static $crate::cell::UnsafeCell< - $crate::option::Option<$t>>> - { - #[thread_local] - #[cfg(target_thread_local)] - static __KEY: $crate::thread::__FastLocalKeyInner<$t> = - $crate::thread::__FastLocalKeyInner::new(); - - #[cfg(not(target_thread_local))] - static __KEY: $crate::thread::__OsLocalKeyInner<$t> = - $crate::thread::__OsLocalKeyInner::new(); - - __KEY.get() - } - - unsafe { - $crate::thread::LocalKey::new(__getit, __init) - } + unsafe { $crate::thread::LocalKey::new(__get, __register_dtor, __init) } } }; ($(#[$attr:meta])* $vis:vis $name:ident, $t:ty, $init:expr) => { @@ -196,15 +206,28 @@ macro_rules! __thread_local_inner { } } +#[unstable(feature = "thread_local_state", + reason = "state querying was recently added", + issue = "27716")] +#[doc(hidden)] +#[derive(Debug, Eq, PartialEq, Copy, Clone)] +pub enum LocalKeyValue { + Uninitialized, + Initializing, + Valid(T), + Destroyed, +} + /// Indicator of the state of a thread local storage key. #[unstable(feature = "thread_local_state", reason = "state querying was recently added", issue = "27716")] #[derive(Debug, Eq, PartialEq, Copy, Clone)] pub enum LocalKeyState { - /// All keys are in this state whenever a thread starts. Keys will - /// transition to the `Valid` state once the first call to [`with`] happens - /// and the initialization expression succeeds. + /// All keys are in this state whenever a thread starts. On the first call + /// to [`with`], keys are initialized - they are moved into the `Initializing` + /// state, then the initializer is called, and finally, if the initializer + /// succeeds (does not panic), they are moved into the `Valid` state. /// /// Keys in the `Uninitialized` state will yield a reference to the closure /// passed to [`with`] so long as the initialization routine does not panic. @@ -212,7 +235,28 @@ pub enum LocalKeyState { /// [`with`]: ../../std/thread/struct.LocalKey.html#method.with Uninitialized, - /// Once a key has been accessed successfully, it will enter the `Valid` + /// On the first call to [`with`], the key is moved into this state. After + /// The initialization routine successfully completes (does not panic), + /// the key is moved into the `Valid` state (if it does panic, the key is + /// moved back into the `Uninitialized` state). Any code that may be called + /// from the initialization routine for a particular key - and that may + /// also access that key itself - should make sure that the key is not + /// currently being initialized by either querying the key's state or by + /// calling [`try_with`] instead of [`with`]. + /// + /// Keys in the `Initializing` state will trigger a panic when accessed via + /// [`with`]. + /// + /// Note to allocator implementors: On some platforms, initializing a TLS + /// key causes allocation to happen _before_ the key is moved into the + /// `Initializing` state. Thus, it is not necessarily safe for a global + /// allocator to use this TLS mechanism. + /// + /// [`try_with`]: ../../std/thread/struct.LocalKey.html#method.try_with + /// [`with`]: ../../std/thread/struct.LocalKey.html#method.with + Initializing, + + /// Once a key has been initialized successfully, it will enter the `Valid` /// state. Keys in the `Valid` state will remain so until the thread exits, /// at which point the destructor will be run and the key will enter the /// `Destroyed` state. @@ -227,7 +271,7 @@ pub enum LocalKeyState { /// necessary). While a destructor is running, and possibly after a /// destructor has run, a key is in the `Destroyed` state. /// - /// Keys in the `Destroyed` states will trigger a panic when accessed via + /// Keys in the `Destroyed` state will trigger a panic when accessed via /// [`with`]. /// /// [`with`]: ../../std/thread/struct.LocalKey.html#method.with @@ -239,7 +283,33 @@ pub enum LocalKeyState { reason = "state querying was recently added", issue = "27716")] pub struct AccessError { - _private: (), + // whether the error was due to the key being in the Initializing + // state - false means the key was in the Destroyed state + init: bool, +} + +impl AccessError { + /// Determines whether the `AccessError` was due to the key being initialized. + /// + /// If `is_initializing` returns true, this `AccessError` was returned because + /// the key was in the `Initializing` state when it was accessed. + #[unstable(feature = "thread_local_state", + reason = "state querying was recently added", + issue = "27716")] + pub fn is_initializing(&self) -> bool { + self.init + } + + /// Determines whether the `AccessError` was due to the key being destroyed. + /// + /// If `is_destroyed` returns true, this `AccessError` was returned because + /// the key was in the `Destroyed` state when it was accessed. + #[unstable(feature = "thread_local_state", + reason = "state querying was recently added", + issue = "27716")] + pub fn is_destroyed(&self) -> bool { + !self.init + } } #[unstable(feature = "thread_local_state", @@ -256,7 +326,11 @@ impl fmt::Debug for AccessError { issue = "27716")] impl fmt::Display for AccessError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Display::fmt("already destroyed", f) + if self.init { + fmt::Display::fmt("currently being initialized", f) + } else { + fmt::Display::fmt("already destroyed", f) + } } } @@ -265,13 +339,14 @@ impl LocalKey { #[unstable(feature = "thread_local_internals", reason = "recently added to create a key", issue = "0")] - pub const unsafe fn new(inner: unsafe fn() -> Option<&'static UnsafeCell>>, - init: fn() -> T) -> LocalKey { - LocalKey { - inner, - init, - } - } + pub const unsafe fn new(get: unsafe fn() -> &'static UnsafeCell>, + register_dtor: unsafe fn(), init: fn() -> T) -> LocalKey { + LocalKey { + init, + get, + register_dtor, + } + } /// Acquires a reference to the value in this TLS key. /// @@ -280,44 +355,67 @@ impl LocalKey { /// /// # Panics /// - /// This function will `panic!()` if the key currently has its - /// destructor running, and it **may** panic if the destructor has - /// previously been run for this thread. + /// This function will `panic!()` if the key is currently being initialized + /// or destructed, and it **may** panic if the destructor has previously + /// been run for this thread. #[stable(feature = "rust1", since = "1.0.0")] pub fn with(&'static self, f: F) -> R where F: FnOnce(&T) -> R { - self.try_with(f).expect("cannot access a TLS value during or \ - after it is destroyed") + self.try_with(f).expect("cannot access a TLS value while it is being initialized \ + or during or after it is destroyed") } - unsafe fn init(&self, slot: &UnsafeCell>) -> &T { - // Execute the initialization up front, *then* move it into our slot, - // just in case initialization fails. - let value = (self.init)(); - let ptr = slot.get(); + unsafe fn init(&self, slot: &UnsafeCell>) { + struct RollbackOnPanic<'a, T: 'a> { + panicking: bool, + slot: &'a UnsafeCell>, + } - // note that this can in theory just be `*ptr = Some(value)`, but due to - // the compiler will currently codegen that pattern with something like: - // - // ptr::drop_in_place(ptr) - // ptr::write(ptr, Some(value)) + impl<'a, T: 'a> Drop for RollbackOnPanic<'a, T> { + fn drop(&mut self) { + if self.panicking { + unsafe { *self.slot.get() = LocalKeyValue::Uninitialized } + } + } + } + + let mut reset = RollbackOnPanic { + panicking: true, + slot, + }; + + // Transition into Initializing in preparation for calling self.init. + *slot.get() = LocalKeyValue::Initializing; + // Call the initializer; if this panics, the Drop method of RollbackOnPanic + // will roll back to LocalKeyValue::Uninitialized. + let val = (self.init)(); + reset.panicking = false; + // If we get here, self.init didn't panic, so move into state Valid and + // register the destructor. + *slot.get() = LocalKeyValue::Valid(val); + // NOTE(joshlf): Calling self.register_dtor here (after transitioning + // into Initializing and then into Valid) guarantees that, for the fast + // implementation, no allocation happens until after a key has transitioned + // into Initializing. This allows a global allocator to make use of this + // TLS implementation and still be able to detect a recusive call to alloc. // - // Due to this pattern it's possible for the destructor of the value in - // `ptr` (e.g. if this is being recursively initialized) to re-access - // TLS, in which case there will be a `&` and `&mut` pointer to the same - // value (an aliasing violation). To avoid setting the "I'm running a - // destructor" flag we just use `mem::replace` which should sequence the - // operations a little differently and make this safe to call. - mem::replace(&mut *ptr, Some(value)); - - (*ptr).as_ref().unwrap() + // Unfortunately, no such guarantee exists on platforms that cannot use + // the fast implementation - std::sys_common::thread_local::StaticKey's + // get and set methods both allocate under the hood, and the only way to + // get or modify thread-local data is to use those methods. Thus, we cannot + // guarantee that TLS is always safe to use in the implementation of global + // allocators. If somebody in the future figures out a way to postpone + // allocation until after the transition to Initializing, that would be + // great. + (self.register_dtor)(); } /// Query the current state of this key. /// /// A key is initially in the `Uninitialized` state whenever a thread /// starts. It will remain in this state up until the first call to [`with`] - /// within a thread has run the initialization expression successfully. + /// or [`try_with`]. At this point, it will transition to the `Initializing` + /// state for the duration of the initialization. /// /// Once the initialization expression succeeds, the key transitions to the /// `Valid` state which will guarantee that future calls to [`with`] will @@ -331,24 +429,22 @@ impl LocalKey { /// /// Keys in the `Uninitialized` state can be accessed so long as the /// initialization does not panic. Keys in the `Valid` state are guaranteed - /// to be able to be accessed. Keys in the `Destroyed` state will panic on - /// any call to [`with`]. + /// to be able to be accessed. Keys in the `Initializing` or `Destroyed` + /// states will panic on any call to [`with`]. /// /// [`with`]: ../../std/thread/struct.LocalKey.html#method.with + /// [`try_with`]: ../../std/thread/struct.LocalKey.html#method.try_with /// [`Copy`]: ../../std/marker/trait.Copy.html #[unstable(feature = "thread_local_state", reason = "state querying was recently added", issue = "27716")] pub fn state(&'static self) -> LocalKeyState { unsafe { - match (self.inner)() { - Some(cell) => { - match *cell.get() { - Some(..) => LocalKeyState::Valid, - None => LocalKeyState::Uninitialized, - } - } - None => LocalKeyState::Destroyed, + match &*(self.get)().get() { + &LocalKeyValue::Uninitialized => LocalKeyState::Uninitialized, + &LocalKeyValue::Initializing => LocalKeyState::Initializing, + &LocalKeyValue::Valid(_) => LocalKeyState::Valid, + &LocalKeyValue::Destroyed => LocalKeyState::Destroyed, } } } @@ -357,7 +453,8 @@ impl LocalKey { /// /// This will lazily initialize the value if this thread has not referenced /// this key yet. If the key has been destroyed (which may happen if this is called - /// in a destructor), this function will return a ThreadLocalError. + /// in a destructor) or is being initialized (which may happen if this is called in + /// the key's initialization expression), this function will return a ThreadLocalError. /// /// # Panics /// @@ -369,13 +466,25 @@ impl LocalKey { pub fn try_with(&'static self, f: F) -> Result where F: FnOnce(&T) -> R { unsafe { - let slot = (self.inner)().ok_or(AccessError { - _private: (), - })?; - Ok(f(match *slot.get() { - Some(ref inner) => inner, - None => self.init(slot), - })) + let slot = (self.get)(); + if let &LocalKeyValue::Valid(ref inner) = &*slot.get() { + // Do this in a separate if branch (rather than just part of the + // match statement in the else block) to increase the performance + // of the fast path. + Ok(f(inner)) + } else { + match *slot.get() { + LocalKeyValue::Uninitialized => { + self.init(slot); + // Now that the value is initialized, we're guaranteed + // not to enter this else block in the recursive call. + self.try_with(f) + } + LocalKeyValue::Initializing => Err(AccessError { init: true }), + LocalKeyValue::Destroyed => Err(AccessError { init: false }), + LocalKeyValue::Valid(_) => unreachable!(), + } + } } } } @@ -386,16 +495,16 @@ pub mod fast { use cell::{Cell, UnsafeCell}; use fmt; use mem; - use ptr; - use sys::fast_thread_local::{register_dtor, requires_move_before_drop}; + use sys::fast_thread_local::register_dtor; + use thread::__LocalKeyValue; pub struct Key { - inner: UnsafeCell>, + inner: UnsafeCell<__LocalKeyValue>, - // Metadata to keep track of the state of the destructor. Remember that - // these variables are thread-local, not global. + // Keep track of whether the destructor has been registered (it's + // registered by LocalKey::init after the initializer successfully + // returns). Remember that this variable is thread-local, not global. dtor_registered: Cell, - dtor_running: Cell, } impl fmt::Debug for Key { @@ -407,21 +516,16 @@ pub mod fast { impl Key { pub const fn new() -> Key { Key { - inner: UnsafeCell::new(None), + inner: UnsafeCell::new(__LocalKeyValue::Uninitialized), dtor_registered: Cell::new(false), - dtor_running: Cell::new(false) } } - pub unsafe fn get(&self) -> Option<&'static UnsafeCell>> { - if mem::needs_drop::() && self.dtor_running.get() { - return None - } - self.register_dtor(); - Some(&*(&self.inner as *const _)) + pub unsafe fn get(&self) -> &'static UnsafeCell<__LocalKeyValue> { + &*(&self.inner as *const _) } - unsafe fn register_dtor(&self) { + pub unsafe fn register_dtor(&self) { if !mem::needs_drop::() || self.dtor_registered.get() { return } @@ -434,36 +538,29 @@ pub mod fast { unsafe extern fn destroy_value(ptr: *mut u8) { let ptr = ptr as *mut Key; - // Right before we run the user destructor be sure to flag the - // destructor as running for this thread so calls to `get` will return - // `None`. - (*ptr).dtor_running.set(true); - - // Some implementations may require us to move the value before we drop - // it as it could get re-initialized in-place during destruction. - // - // Hence, we use `ptr::read` on those platforms (to move to a "safe" - // location) instead of drop_in_place. - if requires_move_before_drop() { - ptr::read((*ptr).inner.get()); - } else { - ptr::drop_in_place((*ptr).inner.get()); - } + // Set inner to Destroyed before we drop tmp so that a + // recursive call to get (called from the destructor) will + // be able to detect that the value is already being dropped. + let tmp = mem::replace(&mut *(*ptr).inner.get(), __LocalKeyValue::Destroyed); + drop(tmp); } } #[doc(hidden)] pub mod os { - use cell::{Cell, UnsafeCell}; + use cell::UnsafeCell; use fmt; - use marker; use ptr; use sys_common::thread_local::StaticKey as OsStaticKey; + use thread::__LocalKeyValue; pub struct Key { // OS-TLS key that we'll use to key off. os: OsStaticKey, - marker: marker::PhantomData>, + + // Dummy value that can be used once the Value has already been + // deallocated. Its value is always Destroyed. + dummy_destroyed: UnsafeCell<__LocalKeyValue>, } impl fmt::Debug for Key { @@ -476,35 +573,35 @@ pub mod os { struct Value { key: &'static Key, - value: UnsafeCell>, + value: UnsafeCell<__LocalKeyValue>, } impl Key { pub const fn new() -> Key { Key { os: OsStaticKey::new(Some(destroy_value::)), - marker: marker::PhantomData + dummy_destroyed: UnsafeCell::new(__LocalKeyValue::Destroyed), } } - pub unsafe fn get(&'static self) -> Option<&'static UnsafeCell>> { + pub unsafe fn get(&'static self) -> &'static UnsafeCell<__LocalKeyValue> { let ptr = self.os.get() as *mut Value; if !ptr.is_null() { if ptr as usize == 1 { - return None + // The destructor was already called (and set self.os to 1). + return &self.dummy_destroyed; } - return Some(&(*ptr).value); + return &(*ptr).value; } - // If the lookup returned null, we haven't initialized our own // local copy, so do that now. let ptr: Box> = box Value { - key: self, - value: UnsafeCell::new(None), + key: &*(self as *const _), + value: UnsafeCell::new(__LocalKeyValue::Uninitialized), }; let ptr = Box::into_raw(ptr); self.os.set(ptr as *mut u8); - Some(&(*ptr).value) + &(*ptr).value } } @@ -516,6 +613,11 @@ pub mod os { // // Note that to prevent an infinite loop we reset it back to null right // before we return from the destructor ourselves. + // + // FIXME: Setting this back to null means that, after this destructor + // returns, future accesses of this key will think that the state is + // Uninitialized (rather than Destroyed). We should figure out a way + // to fix this. let ptr = Box::from_raw(ptr as *mut Value); let key = ptr.key; key.os.set(1 as *mut u8); @@ -571,7 +673,7 @@ mod tests { } } fn foo() -> Foo { - assert!(FOO.state() == LocalKeyState::Uninitialized); + assert!(FOO.state() == LocalKeyState::Initializing); Foo } thread_local!(static FOO: Foo = foo()); @@ -600,7 +702,49 @@ mod tests { } #[test] - fn circular() { + fn circular_init() { + struct S1; + struct S2; + thread_local!(static K1: S1 = S1::new()); + thread_local!(static K2: S2 = S2::new()); + static mut HITS: u32 = 0; + + impl S1 { + fn new() -> S1 { + unsafe { + HITS += 1; + if K2.state() == LocalKeyState::Initializing { + assert_eq!(HITS, 3); + } else { + if HITS == 1 { + K2.with(|_| {}); + } else { + assert_eq!(HITS, 3); + } + } + } + S1 + } + } + impl S2 { + fn new() -> S2 { + unsafe { + HITS += 1; + assert!(K1.state() != LocalKeyState::Initializing); + assert_eq!(HITS, 2); + K1.with(|_| {}); + } + S2 + } + } + + thread::spawn(move|| { + drop(S1::new()); + }).join().ok().unwrap(); + } + + #[test] + fn circular_drop() { struct S1; struct S2; thread_local!(static K1: UnsafeCell> = UnsafeCell::new(None)); @@ -640,7 +784,22 @@ mod tests { } #[test] - fn self_referential() { + fn self_referential_init() { + struct S1; + thread_local!(static K1: S1 = S1::new()); + + impl S1 { + fn new() -> S1 { + assert!(K1.state() == LocalKeyState::Initializing); + S1 + } + } + + thread::spawn(move|| K1.with(|_| {})).join().ok().unwrap(); + } + + #[test] + fn self_referential_drop() { struct S1; thread_local!(static K1: UnsafeCell> = UnsafeCell::new(None)); diff --git a/src/libstd/thread/mod.rs b/src/libstd/thread/mod.rs index 4912ff93abdb3..638557c305afe 100644 --- a/src/libstd/thread/mod.rs +++ b/src/libstd/thread/mod.rs @@ -196,6 +196,8 @@ pub use self::local::{LocalKey, LocalKeyState, AccessError}; // where fast TLS was not available; end-user code is compiled with fast TLS // where available, but both are needed. +#[unstable(feature = "libstd_thread_internals", issue = "0")] +#[doc(hidden)] pub use self::local::LocalKeyValue as __LocalKeyValue; #[unstable(feature = "libstd_thread_internals", issue = "0")] #[cfg(target_thread_local)] #[doc(hidden)] pub use self::local::fast::Key as __FastLocalKeyInner; diff --git a/src/test/compile-fail/issue-43733-2.rs b/src/test/compile-fail/issue-43733-2.rs index 3dff34c2ebb12..85415dd4d2017 100644 --- a/src/test/compile-fail/issue-43733-2.rs +++ b/src/test/compile-fail/issue-43733-2.rs @@ -15,7 +15,7 @@ // a custom non-`Sync` type to fake the same error. #[cfg(not(target_thread_local))] struct Key { - _data: std::cell::UnsafeCell>, + _data: std::cell::UnsafeCell, _flag: std::cell::Cell, } @@ -33,7 +33,7 @@ impl Key { use std::thread::__FastLocalKeyInner as Key; static __KEY: Key<()> = Key::new(); -//~^ ERROR `std::cell::UnsafeCell>: std::marker::Sync` is not satisfied +//~^ ERROR `std::cell::UnsafeCell>: std::marker::Sync` is not //~| ERROR `std::cell::Cell: std::marker::Sync` is not satisfied fn main() {} diff --git a/src/test/compile-fail/issue-43733.rs b/src/test/compile-fail/issue-43733.rs index a4aad21a9f832..dbcc6a39d524f 100644 --- a/src/test/compile-fail/issue-43733.rs +++ b/src/test/compile-fail/issue-43733.rs @@ -9,11 +9,10 @@ // except according to those terms. #![feature(const_fn, drop_types_in_const)] -#![feature(cfg_target_thread_local, thread_local_internals)] +#![feature(cfg_target_thread_local, thread_local_internals, thread_local_state)] type Foo = std::cell::RefCell; -#[cfg(target_thread_local)] static __KEY: std::thread::__FastLocalKeyInner = std::thread::__FastLocalKeyInner::new(); @@ -21,16 +20,19 @@ static __KEY: std::thread::__FastLocalKeyInner = static __KEY: std::thread::__OsLocalKeyInner = std::thread::__OsLocalKeyInner::new(); -fn __getit() -> std::option::Option< - &'static std::cell::UnsafeCell< - std::option::Option>> -{ +fn __get() -> &'static std::cell::UnsafeCell> { __KEY.get() //~ ERROR invocation of unsafe method requires unsafe } +fn __register_dtor() { + #[cfg(target_thread_local)] + __KEY.register_dtor() //~ ERROR invocation of unsafe method requires unsafe +} + static FOO: std::thread::LocalKey = - std::thread::LocalKey::new(__getit, Default::default); -//~^ ERROR call to unsafe function requires unsafe + std::thread::LocalKey::new(__get, //~ ERROR call to unsafe function requires unsafe + __register_dtor, + Default::default); fn main() { FOO.with(|foo| println!("{}", foo.borrow())); diff --git a/src/test/run-fail/tls-init-on-init.rs b/src/test/run-fail/tls-init-on-init.rs new file mode 100644 index 0000000000000..1deef92b823bb --- /dev/null +++ b/src/test/run-fail/tls-init-on-init.rs @@ -0,0 +1,35 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// ignore-emscripten no threads support + +// Can't include entire panic message because it'd exceed tidy's 100-character line length limit. +// error-pattern:cannot access a TLS value while it is being initialized or during or after + +#![feature(thread_local_state)] + +use std::thread::{self, LocalKeyState}; + +struct Foo; + +thread_local!(static FOO: Foo = Foo::init()); + +impl Foo { + fn init() -> Foo { + FOO.with(|_| {}); + Foo + } +} + +fn main() { + thread::spawn(|| { + FOO.with(|_| {}); + }).join().unwrap(); +} diff --git a/src/test/run-pass/tls-init-on-init.rs b/src/test/run-pass/tls-init-on-init.rs index b44c535d3a487..0514f3e3698fd 100644 --- a/src/test/run-pass/tls-init-on-init.rs +++ b/src/test/run-pass/tls-init-on-init.rs @@ -24,7 +24,7 @@ static CNT: AtomicUsize = ATOMIC_USIZE_INIT; impl Foo { fn init() -> Foo { let cnt = CNT.fetch_add(1, Ordering::SeqCst); - if cnt == 0 { + if FOO.state() == LocalKeyState::Uninitialized { FOO.with(|_| {}); } Foo { cnt: cnt } @@ -34,20 +34,16 @@ impl Foo { impl Drop for Foo { fn drop(&mut self) { if self.cnt == 1 { - FOO.with(|foo| assert_eq!(foo.cnt, 0)); + assert_eq!(FOO.state(), LocalKeyState::Destroyed); } else { assert_eq!(self.cnt, 0); - match FOO.state() { - LocalKeyState::Valid => panic!("should not be in valid state"), - LocalKeyState::Uninitialized | - LocalKeyState::Destroyed => {} - } + assert_eq!(FOO.state(), LocalKeyState::Valid); } } } fn main() { thread::spawn(|| { - FOO.with(|_| {}); + Foo::init(); }).join().unwrap(); }