Skip to content

Commit 5c96369

Browse files
committedApr 22, 2015
Auto merge of #24447 - alexcrichton:audit-thread, r=aturon
Much of this code hasn't been updated in quite some time and this commit does a small audit of the functionality: * Implementation functions now centralize all functionality on a locally defined `Thread` type. * The `detach` method has been removed in favor of a `Drop` implementation. This notably fixes leaking thread handles on Windows. * The `Thread` structure is now appropriately annotated with `Send` and `Sync` automatically on Windows and in a custom fashion on Unix. * The unsafety of creating a thread has been pushed out to the right boundaries now. Closes #24442
2 parents 3dbfa74 + 2e11009 commit 5c96369

File tree

5 files changed

+290
-287
lines changed

5 files changed

+290
-287
lines changed
 

‎src/libstd/sys/common/thread.rs

+14-14
Original file line numberDiff line numberDiff line change
@@ -10,22 +10,22 @@
1010

1111
use prelude::v1::*;
1212

13-
use usize;
13+
use alloc::boxed::FnBox;
1414
use libc;
15-
use thunk::Thunk;
16-
use sys_common::stack;
1715
use sys::stack_overflow;
16+
use sys_common::stack;
17+
use usize;
1818

19-
// This is the starting point of rust os threads. The first thing we do
20-
// is make sure that we don't trigger __morestack (also why this has a
21-
// no_stack_check annotation), and then we extract the main function
22-
// and invoke it.
2319
#[no_stack_check]
24-
pub fn start_thread(main: *mut libc::c_void) {
25-
unsafe {
26-
stack::record_os_managed_stack_bounds(0, usize::MAX);
27-
let _handler = stack_overflow::Handler::new();
28-
let main: Box<Thunk> = Box::from_raw(main as *mut Thunk);
29-
main();
30-
}
20+
pub unsafe fn start_thread(main: *mut libc::c_void) {
21+
// First ensure that we don't trigger __morestack (also why this has a
22+
// no_stack_check annotation).
23+
stack::record_os_managed_stack_bounds(0, usize::MAX);
24+
25+
// Next, set up our stack overflow handler which may get triggered if we run
26+
// out of stack.
27+
let _handler = stack_overflow::Handler::new();
28+
29+
// Finally, let's run some code.
30+
Box::from_raw(main as *mut Box<FnBox()>)()
3131
}

‎src/libstd/sys/unix/thread.rs

+144-130
Original file line numberDiff line numberDiff line change
@@ -10,8 +10,9 @@
1010

1111
#![allow(dead_code)]
1212

13-
use core::prelude::*;
13+
use prelude::v1::*;
1414

15+
use alloc::boxed::FnBox;
1516
use cmp;
1617
use ffi::CString;
1718
use io;
@@ -20,13 +21,148 @@ use libc;
2021
use mem;
2122
use ptr;
2223
use sys::os;
23-
use thunk::Thunk;
2424
use time::Duration;
2525

2626
use sys_common::stack::RED_ZONE;
2727
use sys_common::thread::*;
2828

29-
pub type rust_thread = libc::pthread_t;
29+
pub struct Thread {
30+
id: libc::pthread_t,
31+
}
32+
33+
// Some platforms may have pthread_t as a pointer in which case we still want
34+
// a thread to be Send/Sync
35+
unsafe impl Send for Thread {}
36+
unsafe impl Sync for Thread {}
37+
38+
impl Thread {
39+
pub unsafe fn new<'a>(stack: usize, p: Box<FnBox() + 'a>)
40+
-> io::Result<Thread> {
41+
let p = box p;
42+
let mut native: libc::pthread_t = mem::zeroed();
43+
let mut attr: libc::pthread_attr_t = mem::zeroed();
44+
assert_eq!(pthread_attr_init(&mut attr), 0);
45+
46+
// Reserve room for the red zone, the runtime's stack of last resort.
47+
let stack_size = cmp::max(stack, RED_ZONE + min_stack_size(&attr));
48+
match pthread_attr_setstacksize(&mut attr, stack_size as libc::size_t) {
49+
0 => {}
50+
n => {
51+
assert_eq!(n, libc::EINVAL);
52+
// EINVAL means |stack_size| is either too small or not a
53+
// multiple of the system page size. Because it's definitely
54+
// >= PTHREAD_STACK_MIN, it must be an alignment issue.
55+
// Round up to the nearest page and try again.
56+
let page_size = os::page_size();
57+
let stack_size = (stack_size + page_size - 1) &
58+
(-(page_size as isize - 1) as usize - 1);
59+
let stack_size = stack_size as libc::size_t;
60+
assert_eq!(pthread_attr_setstacksize(&mut attr, stack_size), 0);
61+
}
62+
};
63+
64+
let ret = pthread_create(&mut native, &attr, thread_start,
65+
&*p as *const _ as *mut _);
66+
assert_eq!(pthread_attr_destroy(&mut attr), 0);
67+
68+
return if ret != 0 {
69+
Err(io::Error::from_raw_os_error(ret))
70+
} else {
71+
mem::forget(p); // ownership passed to pthread_create
72+
Ok(Thread { id: native })
73+
};
74+
75+
#[no_stack_check]
76+
extern fn thread_start(main: *mut libc::c_void) -> *mut libc::c_void {
77+
unsafe { start_thread(main); }
78+
0 as *mut _
79+
}
80+
}
81+
82+
pub fn yield_now() {
83+
let ret = unsafe { sched_yield() };
84+
debug_assert_eq!(ret, 0);
85+
}
86+
87+
#[cfg(any(target_os = "linux", target_os = "android"))]
88+
pub fn set_name(name: &str) {
89+
// pthread wrapper only appeared in glibc 2.12, so we use syscall
90+
// directly.
91+
extern {
92+
fn prctl(option: libc::c_int, arg2: libc::c_ulong,
93+
arg3: libc::c_ulong, arg4: libc::c_ulong,
94+
arg5: libc::c_ulong) -> libc::c_int;
95+
}
96+
const PR_SET_NAME: libc::c_int = 15;
97+
let cname = CString::new(name).unwrap_or_else(|_| {
98+
panic!("thread name may not contain interior null bytes")
99+
});
100+
unsafe {
101+
prctl(PR_SET_NAME, cname.as_ptr() as libc::c_ulong, 0, 0, 0);
102+
}
103+
}
104+
105+
#[cfg(any(target_os = "freebsd",
106+
target_os = "dragonfly",
107+
target_os = "bitrig",
108+
target_os = "openbsd"))]
109+
pub fn set_name(name: &str) {
110+
extern {
111+
fn pthread_set_name_np(tid: libc::pthread_t,
112+
name: *const libc::c_char);
113+
}
114+
let cname = CString::new(name).unwrap();
115+
unsafe {
116+
pthread_set_name_np(pthread_self(), cname.as_ptr());
117+
}
118+
}
119+
120+
#[cfg(any(target_os = "macos", target_os = "ios"))]
121+
pub fn set_name(name: &str) {
122+
extern {
123+
fn pthread_setname_np(name: *const libc::c_char) -> libc::c_int;
124+
}
125+
let cname = CString::new(name).unwrap();
126+
unsafe {
127+
pthread_setname_np(cname.as_ptr());
128+
}
129+
}
130+
131+
pub fn sleep(dur: Duration) {
132+
if dur < Duration::zero() {
133+
return Thread::yield_now()
134+
}
135+
let seconds = dur.num_seconds();
136+
let ns = dur - Duration::seconds(seconds);
137+
let mut ts = libc::timespec {
138+
tv_sec: seconds as libc::time_t,
139+
tv_nsec: ns.num_nanoseconds().unwrap() as libc::c_long,
140+
};
141+
142+
// If we're awoken with a signal then the return value will be -1 and
143+
// nanosleep will fill in `ts` with the remaining time.
144+
unsafe {
145+
while libc::nanosleep(&ts, &mut ts) == -1 {
146+
assert_eq!(os::errno(), libc::EINTR);
147+
}
148+
}
149+
}
150+
151+
pub fn join(self) {
152+
unsafe {
153+
let ret = pthread_join(self.id, ptr::null_mut());
154+
mem::forget(self);
155+
debug_assert_eq!(ret, 0);
156+
}
157+
}
158+
}
159+
160+
impl Drop for Thread {
161+
fn drop(&mut self) {
162+
let ret = unsafe { pthread_detach(self.id) };
163+
debug_assert_eq!(ret, 0);
164+
}
165+
}
30166

31167
#[cfg(all(not(target_os = "linux"),
32168
not(target_os = "macos"),
@@ -183,128 +319,6 @@ pub mod guard {
183319
}
184320
}
185321

186-
pub unsafe fn create(stack: usize, p: Thunk) -> io::Result<rust_thread> {
187-
let p = box p;
188-
let mut native: libc::pthread_t = mem::zeroed();
189-
let mut attr: libc::pthread_attr_t = mem::zeroed();
190-
assert_eq!(pthread_attr_init(&mut attr), 0);
191-
192-
// Reserve room for the red zone, the runtime's stack of last resort.
193-
let stack_size = cmp::max(stack, RED_ZONE + min_stack_size(&attr) as usize);
194-
match pthread_attr_setstacksize(&mut attr, stack_size as libc::size_t) {
195-
0 => {}
196-
n => {
197-
assert_eq!(n, libc::EINVAL);
198-
// EINVAL means |stack_size| is either too small or not a
199-
// multiple of the system page size. Because it's definitely
200-
// >= PTHREAD_STACK_MIN, it must be an alignment issue.
201-
// Round up to the nearest page and try again.
202-
let page_size = os::page_size();
203-
let stack_size = (stack_size + page_size - 1) &
204-
(-(page_size as isize - 1) as usize - 1);
205-
assert_eq!(pthread_attr_setstacksize(&mut attr,
206-
stack_size as libc::size_t), 0);
207-
}
208-
};
209-
210-
let ret = pthread_create(&mut native, &attr, thread_start,
211-
&*p as *const _ as *mut _);
212-
assert_eq!(pthread_attr_destroy(&mut attr), 0);
213-
214-
return if ret != 0 {
215-
Err(io::Error::from_raw_os_error(ret))
216-
} else {
217-
mem::forget(p); // ownership passed to pthread_create
218-
Ok(native)
219-
};
220-
221-
#[no_stack_check]
222-
extern fn thread_start(main: *mut libc::c_void) -> *mut libc::c_void {
223-
start_thread(main);
224-
0 as *mut _
225-
}
226-
}
227-
228-
#[cfg(any(target_os = "linux", target_os = "android"))]
229-
pub unsafe fn set_name(name: &str) {
230-
// pthread wrapper only appeared in glibc 2.12, so we use syscall directly.
231-
extern {
232-
fn prctl(option: libc::c_int, arg2: libc::c_ulong, arg3: libc::c_ulong,
233-
arg4: libc::c_ulong, arg5: libc::c_ulong) -> libc::c_int;
234-
}
235-
const PR_SET_NAME: libc::c_int = 15;
236-
let cname = CString::new(name).unwrap_or_else(|_| {
237-
panic!("thread name may not contain interior null bytes")
238-
});
239-
prctl(PR_SET_NAME, cname.as_ptr() as libc::c_ulong, 0, 0, 0);
240-
}
241-
242-
#[cfg(any(target_os = "freebsd",
243-
target_os = "dragonfly",
244-
target_os = "bitrig",
245-
target_os = "openbsd"))]
246-
pub unsafe fn set_name(name: &str) {
247-
extern {
248-
fn pthread_set_name_np(tid: libc::pthread_t, name: *const libc::c_char);
249-
}
250-
let cname = CString::new(name).unwrap();
251-
pthread_set_name_np(pthread_self(), cname.as_ptr());
252-
}
253-
254-
#[cfg(any(target_os = "macos", target_os = "ios"))]
255-
pub unsafe fn set_name(name: &str) {
256-
extern {
257-
fn pthread_setname_np(name: *const libc::c_char) -> libc::c_int;
258-
}
259-
let cname = CString::new(name).unwrap();
260-
pthread_setname_np(cname.as_ptr());
261-
}
262-
263-
pub unsafe fn join(native: rust_thread) {
264-
assert_eq!(pthread_join(native, ptr::null_mut()), 0);
265-
}
266-
267-
pub unsafe fn detach(native: rust_thread) {
268-
assert_eq!(pthread_detach(native), 0);
269-
}
270-
271-
pub unsafe fn yield_now() {
272-
assert_eq!(sched_yield(), 0);
273-
}
274-
275-
pub fn sleep(dur: Duration) {
276-
unsafe {
277-
if dur < Duration::zero() {
278-
return yield_now()
279-
}
280-
let seconds = dur.num_seconds();
281-
let ns = dur - Duration::seconds(seconds);
282-
let mut ts = libc::timespec {
283-
tv_sec: seconds as libc::time_t,
284-
tv_nsec: ns.num_nanoseconds().unwrap() as libc::c_long,
285-
};
286-
// If we're awoken with a signal then the return value will be -1 and
287-
// nanosleep will fill in `ts` with the remaining time.
288-
while dosleep(&mut ts) == -1 {
289-
assert_eq!(os::errno(), libc::EINTR);
290-
}
291-
}
292-
293-
#[cfg(target_os = "linux")]
294-
unsafe fn dosleep(ts: *mut libc::timespec) -> libc::c_int {
295-
extern {
296-
fn clock_nanosleep(clock_id: libc::c_int, flags: libc::c_int,
297-
request: *const libc::timespec,
298-
remain: *mut libc::timespec) -> libc::c_int;
299-
}
300-
clock_nanosleep(libc::CLOCK_MONOTONIC, 0, ts, ts)
301-
}
302-
#[cfg(not(target_os = "linux"))]
303-
unsafe fn dosleep(ts: *mut libc::timespec) -> libc::c_int {
304-
libc::nanosleep(ts, ts)
305-
}
306-
}
307-
308322
// glibc >= 2.15 has a __pthread_get_minstack() function that returns
309323
// PTHREAD_STACK_MIN plus however many bytes are needed for thread-local
310324
// storage. We need that information to avoid blowing up when a small stack
@@ -319,7 +333,7 @@ pub fn sleep(dur: Duration) {
319333
// but that caused Debian to detect an unnecessarily strict versioned
320334
// dependency on libc6 (#23628).
321335
#[cfg(target_os = "linux")]
322-
fn min_stack_size(attr: *const libc::pthread_attr_t) -> libc::size_t {
336+
fn min_stack_size(attr: *const libc::pthread_attr_t) -> usize {
323337
use dynamic_lib::DynamicLibrary;
324338
use sync::{Once, ONCE_INIT};
325339

@@ -337,16 +351,16 @@ fn min_stack_size(attr: *const libc::pthread_attr_t) -> libc::size_t {
337351
});
338352

339353
match unsafe { __pthread_get_minstack } {
340-
None => PTHREAD_STACK_MIN,
341-
Some(f) => unsafe { f(attr) },
354+
None => PTHREAD_STACK_MIN as usize,
355+
Some(f) => unsafe { f(attr) as usize },
342356
}
343357
}
344358

345359
// No point in looking up __pthread_get_minstack() on non-glibc
346360
// platforms.
347361
#[cfg(not(target_os = "linux"))]
348-
fn min_stack_size(_: *const libc::pthread_attr_t) -> libc::size_t {
349-
PTHREAD_STACK_MIN
362+
fn min_stack_size(_: *const libc::pthread_attr_t) -> usize {
363+
PTHREAD_STACK_MIN as usize
350364
}
351365

352366
extern {

‎src/libstd/sys/windows/c.rs

+11
Original file line numberDiff line numberDiff line change
@@ -471,6 +471,17 @@ extern "system" {
471471
hWritePipe: libc::LPHANDLE,
472472
lpPipeAttributes: libc::LPSECURITY_ATTRIBUTES,
473473
nSize: libc::DWORD) -> libc::BOOL;
474+
pub fn CreateThread(lpThreadAttributes: libc::LPSECURITY_ATTRIBUTES,
475+
dwStackSize: libc::SIZE_T,
476+
lpStartAddress: extern "system" fn(*mut libc::c_void)
477+
-> libc::DWORD,
478+
lpParameter: libc::LPVOID,
479+
dwCreationFlags: libc::DWORD,
480+
lpThreadId: libc::LPDWORD) -> libc::HANDLE;
481+
pub fn WaitForSingleObject(hHandle: libc::HANDLE,
482+
dwMilliseconds: libc::DWORD) -> libc::DWORD;
483+
pub fn SwitchToThread() -> libc::BOOL;
484+
pub fn Sleep(dwMilliseconds: libc::DWORD);
474485
}
475486

476487
#[link(name = "userenv")]

‎src/libstd/sys/windows/thread.rs

+66-77
Original file line numberDiff line numberDiff line change
@@ -10,102 +10,91 @@
1010

1111
use prelude::v1::*;
1212

13+
use alloc::boxed::FnBox;
1314
use cmp;
1415
use io;
15-
use libc::{self, c_void};
16-
use libc::types::os::arch::extra::{LPSECURITY_ATTRIBUTES, SIZE_T, BOOL,
17-
LPVOID, DWORD, LPDWORD, HANDLE};
16+
use libc::{self, c_void, DWORD};
1817
use mem;
1918
use ptr;
19+
use sys::c;
20+
use sys::handle::Handle;
2021
use sys_common::stack::RED_ZONE;
2122
use sys_common::thread::*;
22-
use thunk::Thunk;
2323
use time::Duration;
2424

25-
pub type rust_thread = HANDLE;
26-
27-
pub mod guard {
28-
pub unsafe fn main() -> usize { 0 }
29-
pub unsafe fn current() -> usize { 0 }
30-
pub unsafe fn init() {}
25+
pub struct Thread {
26+
handle: Handle
3127
}
3228

33-
pub unsafe fn create(stack: usize, p: Thunk) -> io::Result<rust_thread> {
34-
let p = box p;
35-
// FIXME On UNIX, we guard against stack sizes that are too small but
36-
// that's because pthreads enforces that stacks are at least
37-
// PTHREAD_STACK_MIN bytes big. Windows has no such lower limit, it's
38-
// just that below a certain threshold you can't do anything useful.
39-
// That threshold is application and architecture-specific, however.
40-
// For now, the only requirement is that it's big enough to hold the
41-
// red zone. Round up to the next 64 kB because that's what the NT
42-
// kernel does, might as well make it explicit. With the current
43-
// 20 kB red zone, that makes for a 64 kB minimum stack.
44-
let stack_size = (cmp::max(stack, RED_ZONE) + 0xfffe) & (-0xfffe - 1);
45-
let ret = CreateThread(ptr::null_mut(), stack_size as libc::size_t,
46-
thread_start, &*p as *const _ as *mut _,
47-
0, ptr::null_mut());
29+
impl Thread {
30+
pub unsafe fn new<'a>(stack: usize, p: Box<FnBox() + 'a>)
31+
-> io::Result<Thread> {
32+
let p = box p;
4833

49-
return if ret as usize == 0 {
50-
Err(io::Error::last_os_error())
51-
} else {
52-
mem::forget(p); // ownership passed to CreateThread
53-
Ok(ret)
54-
};
34+
// FIXME On UNIX, we guard against stack sizes that are too small but
35+
// that's because pthreads enforces that stacks are at least
36+
// PTHREAD_STACK_MIN bytes big. Windows has no such lower limit, it's
37+
// just that below a certain threshold you can't do anything useful.
38+
// That threshold is application and architecture-specific, however.
39+
// For now, the only requirement is that it's big enough to hold the
40+
// red zone. Round up to the next 64 kB because that's what the NT
41+
// kernel does, might as well make it explicit. With the current
42+
// 20 kB red zone, that makes for a 64 kB minimum stack.
43+
let stack_size = (cmp::max(stack, RED_ZONE) + 0xfffe) & (-0xfffe - 1);
44+
let ret = c::CreateThread(ptr::null_mut(), stack_size as libc::size_t,
45+
thread_start, &*p as *const _ as *mut _,
46+
0, ptr::null_mut());
5547

56-
#[no_stack_check]
57-
extern "system" fn thread_start(main: *mut libc::c_void) -> DWORD {
58-
start_thread(main);
59-
0
60-
}
61-
}
48+
return if ret as usize == 0 {
49+
Err(io::Error::last_os_error())
50+
} else {
51+
mem::forget(p); // ownership passed to CreateThread
52+
Ok(Thread { handle: Handle::new(ret) })
53+
};
6254

63-
pub unsafe fn set_name(_name: &str) {
64-
// Windows threads are nameless
65-
// The names in MSVC debugger are obtained using a "magic" exception,
66-
// which requires a use of MS C++ extensions.
67-
// See https://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx
68-
}
55+
#[no_stack_check]
56+
extern "system" fn thread_start(main: *mut libc::c_void) -> DWORD {
57+
unsafe { start_thread(main); }
58+
0
59+
}
60+
}
6961

70-
pub unsafe fn join(native: rust_thread) {
71-
use libc::consts::os::extra::INFINITE;
72-
WaitForSingleObject(native, INFINITE);
73-
}
62+
pub fn set_name(_name: &str) {
63+
// Windows threads are nameless
64+
// The names in MSVC debugger are obtained using a "magic" exception,
65+
// which requires a use of MS C++ extensions.
66+
// See https://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx
67+
}
7468

75-
pub unsafe fn detach(native: rust_thread) {
76-
assert!(libc::CloseHandle(native) != 0);
77-
}
69+
pub fn join(self) {
70+
use libc::consts::os::extra::INFINITE;
71+
unsafe { c::WaitForSingleObject(self.handle.raw(), INFINITE); }
72+
}
7873

79-
pub unsafe fn yield_now() {
80-
// This function will return 0 if there are no other threads to execute,
81-
// but this also means that the yield was useless so this isn't really a
82-
// case that needs to be worried about.
83-
SwitchToThread();
84-
}
74+
pub fn yield_now() {
75+
// This function will return 0 if there are no other threads to execute,
76+
// but this also means that the yield was useless so this isn't really a
77+
// case that needs to be worried about.
78+
unsafe { c::SwitchToThread(); }
79+
}
8580

86-
pub fn sleep(dur: Duration) {
87-
unsafe {
88-
if dur < Duration::zero() {
89-
return yield_now()
81+
pub fn sleep(dur: Duration) {
82+
unsafe {
83+
if dur < Duration::zero() {
84+
return Thread::yield_now()
85+
}
86+
let ms = dur.num_milliseconds();
87+
// if we have a fractional number of milliseconds then add an extra
88+
// millisecond to sleep for
89+
let extra = dur - Duration::milliseconds(ms);
90+
let ms = ms + if extra.is_zero() {0} else {1};
91+
c::Sleep(ms as DWORD);
9092
}
91-
let ms = dur.num_milliseconds();
92-
// if we have a fractional number of milliseconds then add an extra
93-
// millisecond to sleep for
94-
let extra = dur - Duration::milliseconds(ms);
95-
let ms = ms + if extra.is_zero() {0} else {1};
96-
Sleep(ms as DWORD);
9793
}
9894
}
9995

100-
#[allow(non_snake_case)]
101-
extern "system" {
102-
fn CreateThread(lpThreadAttributes: LPSECURITY_ATTRIBUTES,
103-
dwStackSize: SIZE_T,
104-
lpStartAddress: extern "system" fn(*mut c_void) -> DWORD,
105-
lpParameter: LPVOID,
106-
dwCreationFlags: DWORD,
107-
lpThreadId: LPDWORD) -> HANDLE;
108-
fn WaitForSingleObject(hHandle: HANDLE, dwMilliseconds: DWORD) -> DWORD;
109-
fn SwitchToThread() -> BOOL;
110-
fn Sleep(dwMilliseconds: DWORD);
96+
pub mod guard {
97+
pub unsafe fn main() -> usize { 0 }
98+
pub unsafe fn current() -> usize { 0 }
99+
pub unsafe fn init() {}
111100
}

‎src/libstd/thread/mod.rs

+55-66
Original file line numberDiff line numberDiff line change
@@ -190,6 +190,7 @@
190190

191191
use prelude::v1::*;
192192

193+
use alloc::boxed::FnBox;
193194
use any::Any;
194195
use cell::UnsafeCell;
195196
use fmt;
@@ -199,7 +200,6 @@ use rt::{self, unwind};
199200
use sync::{Mutex, Condvar, Arc};
200201
use sys::thread as imp;
201202
use sys_common::{stack, thread_info};
202-
use thunk::Thunk;
203203
use time::Duration;
204204

205205
////////////////////////////////////////////////////////////////////////////////
@@ -276,7 +276,9 @@ impl Builder {
276276
pub fn spawn<F, T>(self, f: F) -> io::Result<JoinHandle<T>> where
277277
F: FnOnce() -> T, F: Send + 'static, T: Send + 'static
278278
{
279-
self.spawn_inner(Box::new(f)).map(|i| JoinHandle(i))
279+
unsafe {
280+
self.spawn_inner(Box::new(f)).map(JoinHandle)
281+
}
280282
}
281283

282284
/// Spawns a new child thread that must be joined within a given
@@ -299,21 +301,27 @@ impl Builder {
299301
pub fn scoped<'a, T, F>(self, f: F) -> io::Result<JoinGuard<'a, T>> where
300302
T: Send + 'a, F: FnOnce() -> T, F: Send + 'a
301303
{
302-
self.spawn_inner(Box::new(f)).map(|inner| {
303-
JoinGuard { inner: inner, _marker: PhantomData }
304-
})
304+
unsafe {
305+
self.spawn_inner(Box::new(f)).map(|inner| {
306+
JoinGuard { inner: inner, _marker: PhantomData }
307+
})
308+
}
305309
}
306310

307-
fn spawn_inner<T: Send>(self, f: Thunk<(), T>) -> io::Result<JoinInner<T>> {
311+
// NB: this function is unsafe as the lifetime parameter of the code to run
312+
// in the new thread is not tied into the return value, and the return
313+
// value must not outlast that lifetime.
314+
unsafe fn spawn_inner<'a, T: Send>(self, f: Box<FnBox() -> T + Send + 'a>)
315+
-> io::Result<JoinInner<T>> {
308316
let Builder { name, stack_size } = self;
309317

310318
let stack_size = stack_size.unwrap_or(rt::min_stack());
311319

312320
let my_thread = Thread::new(name);
313321
let their_thread = my_thread.clone();
314322

315-
let my_packet = Packet(Arc::new(UnsafeCell::new(None)));
316-
let their_packet = Packet(my_packet.0.clone());
323+
let my_packet = Arc::new(UnsafeCell::new(None));
324+
let their_packet = my_packet.clone();
317325

318326
// Spawning a new OS thread guarantees that __morestack will never get
319327
// triggered, but we must manually set up the actual stack bounds once
@@ -326,48 +334,27 @@ impl Builder {
326334
let addr = &something_around_the_top_of_the_stack as *const i32;
327335
let my_stack_top = addr as usize;
328336
let my_stack_bottom = my_stack_top - stack_size + 1024;
329-
unsafe {
330-
if let Some(name) = their_thread.name() {
331-
imp::set_name(name);
332-
}
333-
stack::record_os_managed_stack_bounds(my_stack_bottom,
334-
my_stack_top);
335-
thread_info::set(imp::guard::current(), their_thread);
337+
stack::record_os_managed_stack_bounds(my_stack_bottom, my_stack_top);
338+
339+
if let Some(name) = their_thread.name() {
340+
imp::Thread::set_name(name);
336341
}
342+
thread_info::set(imp::guard::current(), their_thread);
337343

338-
let mut output: Option<T> = None;
344+
let mut output = None;
339345
let try_result = {
340346
let ptr = &mut output;
341-
342-
// There are two primary reasons that general try/catch is
343-
// unsafe. The first is that we do not support nested
344-
// try/catch. The fact that this is happening in a newly-spawned
345-
// thread suffices. The second is that unwinding while unwinding
346-
// is not defined. We take care of that by having an
347-
// 'unwinding' flag in the thread itself. For these reasons,
348-
// this unsafety should be ok.
349-
unsafe {
350-
unwind::try(move || {
351-
let f: Thunk<(), T> = f;
352-
let v: T = f();
353-
*ptr = Some(v)
354-
})
355-
}
347+
unwind::try(move || *ptr = Some(f()))
356348
};
357-
unsafe {
358-
*their_packet.0.get() = Some(match (output, try_result) {
359-
(Some(data), Ok(_)) => Ok(data),
360-
(None, Err(cause)) => Err(cause),
361-
_ => unreachable!()
362-
});
363-
}
349+
*their_packet.get() = Some(try_result.map(|()| {
350+
output.unwrap()
351+
}));
364352
};
365353

366354
Ok(JoinInner {
367-
native: try!(unsafe { imp::create(stack_size, Box::new(main)) }),
355+
native: Some(try!(imp::Thread::new(stack_size, Box::new(main)))),
368356
thread: my_thread,
369-
packet: my_packet,
370-
joined: false,
357+
packet: Packet(my_packet),
371358
})
372359
}
373360
}
@@ -427,7 +414,7 @@ pub fn current() -> Thread {
427414
/// Cooperatively gives up a timeslice to the OS scheduler.
428415
#[stable(feature = "rust1", since = "1.0.0")]
429416
pub fn yield_now() {
430-
unsafe { imp::yield_now() }
417+
imp::Thread::yield_now()
431418
}
432419

433420
/// Determines whether the current thread is unwinding because of panic.
@@ -494,7 +481,7 @@ pub fn catch_panic<F, R>(f: F) -> Result<R>
494481
/// spurious wakeup.
495482
#[stable(feature = "rust1", since = "1.0.0")]
496483
pub fn sleep_ms(ms: u32) {
497-
imp::sleep(Duration::milliseconds(ms as i64))
484+
imp::Thread::sleep(Duration::milliseconds(ms as i64))
498485
}
499486

500487
/// Blocks unless or until the current thread's token is made available (may wake spuriously).
@@ -548,8 +535,6 @@ struct Inner {
548535
cvar: Condvar,
549536
}
550537

551-
unsafe impl Sync for Inner {}
552-
553538
#[derive(Clone)]
554539
#[stable(feature = "rust1", since = "1.0.0")]
555540
/// A handle to a thread.
@@ -610,24 +595,33 @@ impl thread_info::NewThread for Thread {
610595
#[stable(feature = "rust1", since = "1.0.0")]
611596
pub type Result<T> = ::result::Result<T, Box<Any + Send + 'static>>;
612597

598+
// This packet is used to communicate the return value between the child thread
599+
// and the parent thread. Memory is shared through the `Arc` within and there's
600+
// no need for a mutex here because synchronization happens with `join()` (the
601+
// parent thread never reads this packet until the child has exited).
602+
//
603+
// This packet itself is then stored into a `JoinInner` which in turns is placed
604+
// in `JoinHandle` and `JoinGuard`. Due to the usage of `UnsafeCell` we need to
605+
// manually worry about impls like Send and Sync. The type `T` should
606+
// already always be Send (otherwise the thread could not have been created) and
607+
// this type is inherently Sync because no methods take &self. Regardless,
608+
// however, we add inheriting impls for Send/Sync to this type to ensure it's
609+
// Send/Sync and that future modifications will still appropriately classify it.
613610
struct Packet<T>(Arc<UnsafeCell<Option<Result<T>>>>);
614611

615-
unsafe impl<T:Send> Send for Packet<T> {}
616-
unsafe impl<T> Sync for Packet<T> {}
612+
unsafe impl<T: Send> Send for Packet<T> {}
613+
unsafe impl<T: Sync> Sync for Packet<T> {}
617614

618615
/// Inner representation for JoinHandle and JoinGuard
619616
struct JoinInner<T> {
620-
native: imp::rust_thread,
617+
native: Option<imp::Thread>,
621618
thread: Thread,
622619
packet: Packet<T>,
623-
joined: bool,
624620
}
625621

626622
impl<T> JoinInner<T> {
627623
fn join(&mut self) -> Result<T> {
628-
assert!(!self.joined);
629-
unsafe { imp::join(self.native) };
630-
self.joined = true;
624+
self.native.take().unwrap().join();
631625
unsafe {
632626
(*self.packet.0.get()).take().unwrap()
633627
}
@@ -662,16 +656,6 @@ impl<T> JoinHandle<T> {
662656
}
663657
}
664658

665-
#[stable(feature = "rust1", since = "1.0.0")]
666-
#[unsafe_destructor]
667-
impl<T> Drop for JoinHandle<T> {
668-
fn drop(&mut self) {
669-
if !self.0.joined {
670-
unsafe { imp::detach(self.0.native) }
671-
}
672-
}
673-
}
674-
675659
/// An RAII-style guard that will block until thread termination when dropped.
676660
///
677661
/// The type `T` is the return type for the thread's main function.
@@ -720,14 +704,19 @@ impl<'a, T: Send + 'a> JoinGuard<'a, T> {
720704
reason = "memory unsafe if destructor is avoided, see #24292")]
721705
impl<'a, T: Send + 'a> Drop for JoinGuard<'a, T> {
722706
fn drop(&mut self) {
723-
if !self.inner.joined {
724-
if self.inner.join().is_err() {
725-
panic!("child thread {:?} panicked", self.thread());
726-
}
707+
if self.inner.native.is_some() && self.inner.join().is_err() {
708+
panic!("child thread {:?} panicked", self.thread());
727709
}
728710
}
729711
}
730712

713+
fn _assert_sync_and_send() {
714+
fn _assert_both<T: Send + Sync>() {}
715+
_assert_both::<JoinHandle<()>>();
716+
_assert_both::<JoinGuard<()>>();
717+
_assert_both::<Thread>();
718+
}
719+
731720
////////////////////////////////////////////////////////////////////////////////
732721
// Tests
733722
////////////////////////////////////////////////////////////////////////////////

0 commit comments

Comments
 (0)
Please sign in to comment.