diff --git a/src/etc/licenseck.py b/src/etc/licenseck.py index b5a721c03ff09..afbf34d07535d 100644 --- a/src/etc/licenseck.py +++ b/src/etc/licenseck.py @@ -41,6 +41,7 @@ "libstd/sync/mpsc_queue.rs", # BSD "libstd/sync/spsc_queue.rs", # BSD "libstd/sync/mpmc_bounded_queue.rs", # BSD + "libextra/sync/mpsc_intrusive.rs", # BSD ] def check_license(name, contents): diff --git a/src/libextra/sync.rs b/src/libextra/sync/mod.rs similarity index 98% rename from src/libextra/sync.rs rename to src/libextra/sync/mod.rs index 0a29fd1898229..21ebcf1272009 100644 --- a/src/libextra/sync.rs +++ b/src/libextra/sync/mod.rs @@ -17,9 +17,8 @@ * in std. */ - +use std::cast; use std::comm; -use std::unstable::sync::Exclusive; use std::sync::arc::UnsafeArc; use std::sync::atomics; use std::unstable::finally::Finally; @@ -32,6 +31,10 @@ use arc::MutexArc; * Internals ****************************************************************************/ +pub mod mutex; +pub mod one; +mod mpsc_intrusive; + // Each waiting task receives on one of these. #[doc(hidden)] type WaitEnd = Port<()>; @@ -54,7 +57,7 @@ impl WaitQueue { comm::Data(ch) => { // Send a wakeup signal. If the waiter was killed, its port will // have closed. Keep trying until we get a live task. - if ch.try_send_deferred(()) { + if ch.try_send(()) { true } else { self.signal() @@ -69,7 +72,7 @@ impl WaitQueue { loop { match self.head.try_recv() { comm::Data(ch) => { - if ch.try_send_deferred(()) { + if ch.try_send(()) { count += 1; } } @@ -81,14 +84,14 @@ impl WaitQueue { fn wait_end(&self) -> WaitEnd { let (wait_end, signal_end) = Chan::new(); - assert!(self.tail.try_send_deferred(signal_end)); + assert!(self.tail.try_send(signal_end)); wait_end } } // The building-block used to make semaphores, mutexes, and rwlocks. -#[doc(hidden)] struct SemInner { + lock: mutex::Mutex, count: int, waiters: WaitQueue, // Can be either unit or another waitqueue. Some sems shouldn't come with @@ -96,21 +99,30 @@ struct SemInner { blocked: Q } -#[doc(hidden)] -struct Sem(Exclusive>); +struct Sem(UnsafeArc>); #[doc(hidden)] impl Sem { fn new(count: int, q: Q) -> Sem { - Sem(Exclusive::new(SemInner { - count: count, waiters: WaitQueue::new(), blocked: q })) + Sem(UnsafeArc::new(SemInner { + count: count, + waiters: WaitQueue::new(), + blocked: q, + lock: mutex::Mutex::new(), + })) + } + + unsafe fn with(&self, f: |&mut SemInner|) { + let Sem(ref arc) = *self; + let state = arc.get(); + let _g = (*state).lock.lock(); + f(cast::transmute(state)); } pub fn acquire(&self) { unsafe { let mut waiter_nobe = None; - let Sem(ref lock) = *self; - lock.with(|state| { + self.with(|state| { state.count -= 1; if state.count < 0 { // Create waiter nobe, enqueue ourself, and tell @@ -129,8 +141,7 @@ impl Sem { pub fn release(&self) { unsafe { - let Sem(ref lock) = *self; - lock.with(|state| { + self.with(|state| { state.count += 1; if state.count <= 0 { state.waiters.signal(); @@ -210,8 +221,7 @@ impl<'a> Condvar<'a> { let mut out_of_bounds = None; // Release lock, 'atomically' enqueuing ourselves in so doing. unsafe { - let Sem(ref queue) = *self.sem; - queue.with(|state| { + self.sem.with(|state| { if condvar_id < state.blocked.len() { // Drop the lock. state.count += 1; @@ -253,8 +263,7 @@ impl<'a> Condvar<'a> { unsafe { let mut out_of_bounds = None; let mut result = false; - let Sem(ref lock) = *self.sem; - lock.with(|state| { + self.sem.with(|state| { if condvar_id < state.blocked.len() { result = state.blocked[condvar_id].signal(); } else { @@ -276,8 +285,7 @@ impl<'a> Condvar<'a> { let mut out_of_bounds = None; let mut queue = None; unsafe { - let Sem(ref lock) = *self.sem; - lock.with(|state| { + self.sem.with(|state| { if condvar_id < state.blocked.len() { // To avoid :broadcast_heavy, we make a new waitqueue, // swap it out with the old one, and broadcast on the diff --git a/src/libextra/sync/mpsc_intrusive.rs b/src/libextra/sync/mpsc_intrusive.rs new file mode 100644 index 0000000000000..0f13a4980d919 --- /dev/null +++ b/src/libextra/sync/mpsc_intrusive.rs @@ -0,0 +1,139 @@ +/* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO + * EVENT SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, + * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, + * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * The views and conclusions contained in the software and documentation are + * those of the authors and should not be interpreted as representing official + * policies, either expressed or implied, of Dmitry Vyukov. + */ + +//! A mostly lock-free multi-producer, single consumer queue. +//! +//! This module implements an intrusive MPSC queue. This queue is incredibly +//! unsafe (due to use of unsafe pointers for nodes), and hence is not public. + +// http://www.1024cores.net/home/lock-free-algorithms +// /queues/intrusive-mpsc-node-based-queue + +use std::cast; +use std::sync::atomics; + +// NB: all links are done as AtomicUint instead of AtomicPtr to allow for static +// initialization. + +pub struct Node { + next: atomics::AtomicUint, + data: T, +} + +pub struct DummyNode { + next: atomics::AtomicUint, +} + +pub struct Queue { + head: atomics::AtomicUint, + tail: *mut Node, + stub: DummyNode, +} + +impl Queue { + pub fn new() -> Queue { + Queue { + head: atomics::AtomicUint::new(0), + tail: 0 as *mut Node, + stub: DummyNode { + next: atomics::AtomicUint::new(0), + }, + } + } + + pub unsafe fn push(&mut self, node: *mut Node) { + (*node).next.store(0, atomics::Release); + let prev = self.head.swap(node as uint, atomics::AcqRel); + + // Note that this code is slightly modified to allow static + // initialization of these queues with rust's flavor of static + // initialization. + if prev == 0 { + self.stub.next.store(node as uint, atomics::Release); + } else { + let prev = prev as *mut Node; + (*prev).next.store(node as uint, atomics::Release); + } + } + + /// You'll note that the other MPSC queue in std::sync is non-intrusive and + /// returns a `PopResult` here to indicate when the queue is inconsistent. + /// An "inconsistent state" in the other queue means that a pusher has + /// pushed, but it hasn't finished linking the rest of the chain. + /// + /// This queue also suffers from this problem, but I currently haven't been + /// able to detangle when this actually happens. This code is translated + /// verbatim from the website above, and is more complicated than the + /// non-intrusive version. + /// + /// Right now consumers of this queue must be ready for this fact. Just + /// because `pop` returns `None` does not mean that there is not data + /// on the queue. + pub unsafe fn pop(&mut self) -> Option<*mut Node> { + let tail = self.tail; + let mut tail = if !tail.is_null() {tail} else { + cast::transmute(&self.stub) + }; + let mut next = (*tail).next(atomics::Relaxed); + if tail as uint == &self.stub as *DummyNode as uint { + if next.is_null() { + return None; + } + self.tail = next; + tail = next; + next = (*next).next(atomics::Relaxed); + } + if !next.is_null() { + self.tail = next; + return Some(tail); + } + let head = self.head.load(atomics::Acquire) as *mut Node; + if tail != head { + return None; + } + let stub = cast::transmute(&self.stub); + self.push(stub); + next = (*tail).next(atomics::Relaxed); + if !next.is_null() { + self.tail = next; + return Some(tail); + } + return None + } +} + +impl Node { + pub fn new(t: T) -> Node { + Node { + data: t, + next: atomics::AtomicUint::new(0), + } + } + pub unsafe fn next(&mut self, ord: atomics::Ordering) -> *mut Node { + cast::transmute::>(self.next.load(ord)) + } +} diff --git a/src/libextra/sync/mutex.rs b/src/libextra/sync/mutex.rs new file mode 100644 index 0000000000000..7ea98c0741a29 --- /dev/null +++ b/src/libextra/sync/mutex.rs @@ -0,0 +1,557 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! A proper mutex implementation regardless of the "flavor of task" which is +//! acquiring the lock. + +// # Implementation of Rust mutexes +// +// Most answers to the question of "how do I use a mutex" are "use pthreads", +// but for Rust this isn't quite sufficient. Green threads cannot acquire an OS +// mutex because they can context switch among many OS threads, leading to +// deadlocks with other green threads. +// +// Another problem for green threads grabbing an OS mutex is that POSIX dictates +// that unlocking a mutex on a different thread from where it was locked is +// undefined behavior. Remember that green threads can migrate among OS threads, +// so this would mean that we would have to pin green threads to OS threads, +// which is less than ideal. +// +// ## Using deschedule/reawaken +// +// We already have primitives for descheduling/reawakening tasks, so they're the +// first obvious choice when implementing a mutex. The idea would be to have a +// concurrent queue that everyone is pushed on to, and then the owner of the +// mutex is the one popping from the queue. +// +// Unfortunately, this is not very performant for native tasks. The suspected +// reason for this is that each native thread is suspended on its own condition +// variable, unique from all the other threads. In this situation, the kernel +// has no idea what the scheduling semantics are of the user program, so all of +// the threads are distributed among all cores on the system. This ends up +// having very expensive wakeups of remote cores high up in the profile when +// handing off the mutex among native tasks. On the other hand, when using an OS +// mutex, the kernel knows that all native threads are contended on the same +// mutex, so they're in theory all migrated to a single core (fast context +// switching). +// +// ## Mixing implementations +// +// From that above information, we have two constraints. The first is that +// green threads can't touch os mutexes, and the second is that native tasks +// pretty much *must* touch an os mutex. +// +// As a compromise, the queueing implementation is used for green threads and +// the os mutex is used for native threads (why not have both?). This ends up +// leading to fairly decent performance for both native threads and green +// threads on various workloads (uncontended and contended). +// +// The crux of this implementation is an atomic work which is CAS'd on many many +// times in order to manage a few flags about who's blocking where and whether +// it's locked or not. + +use std::rt::local::Local; +use std::rt::task::{BlockedTask, Task}; +use std::rt::thread::Thread; +use std::sync::atomics; +use std::unstable::mutex; + +use q = sync::mpsc_intrusive; + +pub static LOCKED: uint = 1 << 0; +pub static GREEN_BLOCKED: uint = 1 << 1; +pub static NATIVE_BLOCKED: uint = 1 << 2; + +/// A mutual exclusion primitive useful for protecting shared data +/// +/// This mutex is an implementation of a lock for all flavors of tasks which may +/// be grabbing. A common problem with green threads is that they cannot grab +/// locks (if they reschedule during the lock a contender could deadlock the +/// system), but this mutex does *not* suffer this problem. +/// +/// This mutex will properly block tasks waiting for the lock to become +/// available. The mutex can also be statically initialized or created via a +/// `new` constructor. +/// +/// # Example +/// +/// ```rust +/// use extra::sync::mutex::Mutex; +/// +/// let mut m = Mutex::new(); +/// let guard = m.lock(); +/// // do some work +/// drop(guard); // unlock the lock +/// ``` +pub struct Mutex { + priv lock: StaticMutex, +} + +#[deriving(Eq)] +enum Flavor { + Unlocked, + TryLockAcquisition, + GreenAcquisition, + NativeAcquisition, +} + +/// The static mutex type is provided to allow for static allocation of mutexes. +/// +/// Note that this is a separate type because using a Mutex correctly means that +/// it needs to have a destructor run. In Rust, statics are not allowed to have +/// destructors. As a result, a `StaticMutex` has one extra method when compared +/// to a `Mutex`, a `destroy` method. This method is unsafe to call, and +/// documentation can be found directly on the method. +/// +/// # Example +/// +/// ```rust +/// use extra::sync::mutex::{StaticMutex, MUTEX_INIT}; +/// +/// static mut LOCK: StaticMutex = MUTEX_INIT; +/// +/// unsafe { +/// let _g = LOCK.lock(); +/// // do some productive work +/// } +/// // lock is unlocked here. +/// ``` +pub struct StaticMutex { + /// Current set of flags on this mutex + priv state: atomics::AtomicUint, + /// Type of locking operation currently on this mutex + priv flavor: Flavor, + /// uint-cast of the green thread waiting for this mutex + priv green_blocker: uint, + /// uint-cast of the native thread waiting for this mutex + priv native_blocker: uint, + /// an OS mutex used by native threads + priv lock: mutex::Mutex, + + /// A concurrent mpsc queue used by green threads, along with a count used + /// to figure out when to dequeue and enqueue. + priv q: q::Queue, + priv green_cnt: atomics::AtomicUint, +} + +/// An RAII implementation of a "scoped lock" of a mutex. When this structure is +/// dropped (falls out of scope), the lock will be unlocked. +pub struct Guard<'a> { + priv lock: &'a mut StaticMutex, +} + +/// Static initialization of a mutex. This constant can be used to initialize +/// other mutex constants. +pub static MUTEX_INIT: StaticMutex = StaticMutex { + lock: mutex::MUTEX_INIT, + state: atomics::INIT_ATOMIC_UINT, + flavor: Unlocked, + green_blocker: 0, + native_blocker: 0, + green_cnt: atomics::INIT_ATOMIC_UINT, + q: q::Queue { + head: atomics::INIT_ATOMIC_UINT, + tail: 0 as *mut q::Node, + stub: q::DummyNode { + next: atomics::INIT_ATOMIC_UINT, + } + } +}; + +impl StaticMutex { + /// Attempts to grab this lock, see `Mutex::try_lock` + pub fn try_lock<'a>(&'a mut self) -> Option> { + // Attempt to steal the mutex from an unlocked state. + // + // FIXME: this can mess up the fairness of the mutex, seems bad + match self.state.compare_and_swap(0, LOCKED, atomics::SeqCst) { + 0 => { + assert!(self.flavor == Unlocked); + self.flavor = TryLockAcquisition; + Some(Guard::new(self)) + } + _ => None + } + } + + /// Acquires this lock, see `Mutex::lock` + pub fn lock<'a>(&'a mut self) -> Guard<'a> { + // First, attempt to steal the mutex from an unlocked state. The "fast + // path" needs to have as few atomic instructions as possible, and this + // one cmpxchg is already pretty expensive. + // + // FIXME: this can mess up the fairness of the mutex, seems bad + match self.state.compare_and_swap(0, LOCKED, atomics::SeqCst) { + 0 => { + assert!(self.flavor == Unlocked); + self.flavor = TryLockAcquisition; + return Guard::new(self) + } + _ => {} + } + + // After we've failed the fast path, then we delegate to the differnet + // locking protocols for green/native tasks. This will select two tasks + // to continue further (one native, one green). + let t: ~Task = Local::take(); + let can_block = t.can_block(); + let native_bit; + if can_block { + self.native_lock(t); + native_bit = NATIVE_BLOCKED; + } else { + self.green_lock(t); + native_bit = GREEN_BLOCKED; + } + + // After we've arbitrated among task types, attempt to re-acquire the + // lock (avoids a deschedule). This is very important to do in order to + // allow threads coming out of the native_lock function to try their + // best to not hit a cvar in deschedule. + let mut old = match self.state.compare_and_swap(0, LOCKED, + atomics::SeqCst) { + 0 => { + self.flavor = if can_block { + NativeAcquisition + } else { + GreenAcquisition + }; + return Guard::new(self) + } + old => old, + }; + + // Alright, everything else failed. We need to deschedule ourselves and + // flag ourselves as waiting. Note that this case should only happen + // regularly in native/green contention. Due to try_lock and the header + // of lock stealing the lock, it's also possible for native/native + // contention to hit this location, but as less common. + let t: ~Task = Local::take(); + t.deschedule(1, |task| { + let task = unsafe { task.cast_to_uint() }; + if can_block { + assert_eq!(self.native_blocker, 0); + self.native_blocker = task; + } else { + assert_eq!(self.green_blocker, 0); + self.green_blocker = task; + } + + loop { + assert_eq!(old & native_bit, 0); + // If the old state was locked, then we need to flag ourselves + // as blocking in the state. If the old state was unlocked, then + // we attempt to acquire the mutex. Everything here is a CAS + // loop that'll eventually make progress. + if old & LOCKED != 0 { + old = match self.state.compare_and_swap(old, + old | native_bit, + atomics::SeqCst) { + n if n == old => return Ok(()), + n => n + }; + } else { + assert_eq!(old, 0); + old = match self.state.compare_and_swap(old, + old | LOCKED, + atomics::SeqCst) { + n if n == old => { + assert_eq!(self.flavor, Unlocked); + if can_block { + self.native_blocker = 0; + self.flavor = NativeAcquisition; + } else { + self.green_blocker = 0; + self.flavor = GreenAcquisition; + } + return Err(unsafe { + BlockedTask::cast_from_uint(task) + }) + } + n => n, + }; + } + } + }); + + Guard::new(self) + } + + // Tasks which can block are super easy. These tasks just call the blocking + // `lock()` function on an OS mutex + fn native_lock(&mut self, t: ~Task) { + Local::put(t); + unsafe { self.lock.lock(); } + } + + fn native_unlock(&mut self) { + unsafe { self.lock.unlock(); } + } + + fn green_lock(&mut self, t: ~Task) { + // Green threads flag their presence with an atomic counter, and if they + // fail to be the first to the mutex, they enqueue themselves on a + // concurrent internal queue with a stack-allocated node. + // + // FIXME: There isn't a cancellation currently of an enqueue, forcing + // the unlocker to spin for a bit. + if self.green_cnt.fetch_add(1, atomics::SeqCst) == 0 { + Local::put(t); + return + } + + let mut node = q::Node::new(0); + t.deschedule(1, |task| { + unsafe { + node.data = task.cast_to_uint(); + self.q.push(&mut node); + } + Ok(()) + }); + } + + fn green_unlock(&mut self) { + // If we're the only green thread, then no need to check the queue, + // otherwise the fixme above forces us to spin for a bit. + if self.green_cnt.fetch_sub(1, atomics::SeqCst) == 1 { return } + let node; + loop { + match unsafe { self.q.pop() } { + Some(t) => { node = t; break; } + None => Thread::yield_now(), + } + } + let task = unsafe { BlockedTask::cast_from_uint((*node).data) }; + task.wake().map(|t| t.reawaken()); + } + + fn unlock(&mut self) { + // Unlocking this mutex is a little tricky. We favor any task that is + // manually blocked (not in each of the separate locks) in order to help + // provide a little fairness (green threads will wake up the pending + // native thread and native threads will wake up the pending green + // thread). + // + // There's also the question of when we unlock the actual green/native + // locking halves as well. If we're waking up someone, then we can wait + // to unlock until we've acquired the task to wake up (we're guaranteed + // the mutex memory is still valid when there's contenders), but as soon + // as we don't find any contenders we must unlock the mutex, and *then* + // flag the mutex as unlocked. + // + // This flagging can fail, leading to another round of figuring out if a + // task needs to be woken, and in this case it's ok that the "mutex + // halves" are unlocked, we're just mainly dealing with the atomic state + // of the outer mutex. + let flavor = self.flavor; + self.flavor = Unlocked; + + let mut state = self.state.load(atomics::SeqCst); + let mut unlocked = false; + let task; + loop { + assert!(state & LOCKED != 0); + if state & GREEN_BLOCKED != 0 { + self.unset(state, GREEN_BLOCKED); + task = unsafe { + BlockedTask::cast_from_uint(self.green_blocker) + }; + self.green_blocker = 0; + self.flavor = GreenAcquisition; + break; + } else if state & NATIVE_BLOCKED != 0 { + self.unset(state, NATIVE_BLOCKED); + task = unsafe { + BlockedTask::cast_from_uint(self.native_blocker) + }; + self.native_blocker = 0; + self.flavor = NativeAcquisition; + break; + } else { + assert_eq!(state, LOCKED); + if !unlocked { + match flavor { + GreenAcquisition => { self.green_unlock(); } + NativeAcquisition => { self.native_unlock(); } + TryLockAcquisition => {} + Unlocked => unreachable!() + } + unlocked = true; + } + match self.state.compare_and_swap(LOCKED, 0, atomics::SeqCst) { + LOCKED => return, + n => { state = n; } + } + } + } + if !unlocked { + match flavor { + GreenAcquisition => { self.green_unlock(); } + NativeAcquisition => { self.native_unlock(); } + TryLockAcquisition => {} + Unlocked => unreachable!() + } + } + + task.wake().map(|t| t.reawaken()); + } + + /// Loops around a CAS to unset the `bit` in `state` + fn unset(&mut self, mut state: uint, bit: uint) { + loop { + assert!(state & bit != 0); + let new = state ^ bit; + match self.state.compare_and_swap(state, new, atomics::SeqCst) { + n if n == state => break, + n => { state = n; } + } + } + } + + /// Deallocates resources associated with this static mutex. + /// + /// This method is unsafe because it provides no guarantees that there are + /// no active users of this mutex, and safety is not guaranteed if there are + /// active users of this mutex. + /// + /// This method is required to ensure that there are no memory leaks on + /// *all* platforms. It may be the case that some platforms do not leak + /// memory if this method is not called, but this is not guaranteed to be + /// true on all platforms. + pub unsafe fn destroy(&mut self) { + self.lock.destroy() + } +} + +impl Mutex { + /// Creates a new mutex in an unlocked state ready for use. + pub fn new() -> Mutex { + Mutex { + lock: StaticMutex { + state: atomics::AtomicUint::new(0), + flavor: Unlocked, + green_blocker: 0, + native_blocker: 0, + green_cnt: atomics::AtomicUint::new(0), + q: q::Queue::new(), + lock: unsafe { mutex::Mutex::new() }, + } + } + } + + /// Attempts to acquire this lock. + /// + /// If the lock could not be acquired at this time, then `None` is returned. + /// Otherwise, an RAII guard is returned. The lock will be unlocked when the + /// guard is dropped. + /// + /// This function does not block. + pub fn try_lock<'a>(&'a mut self) -> Option> { + self.lock.try_lock() + } + + /// Acquires a mutex, blocking the current task until it is able to do so. + /// + /// This function will block the local task until it is availble to acquire + /// the mutex. Upon returning, the task is the only task with the mutex + /// held. An RAII guard is returned to allow scoped unlock of the lock. When + /// the guard goes out of scope, the mutex will be unlocked. + pub fn lock<'a>(&'a mut self) -> Guard<'a> { self.lock.lock() } +} + +impl<'a> Guard<'a> { + fn new<'b>(lock: &'b mut StaticMutex) -> Guard<'b> { + if cfg!(debug) { + assert!(lock.flavor != Unlocked); + assert!(lock.state.load(atomics::SeqCst) & LOCKED != 0); + } + Guard { lock: lock } + } +} + +#[unsafe_destructor] +impl<'a> Drop for Guard<'a> { + #[inline] + fn drop(&mut self) { + self.lock.unlock(); + } +} + +impl Drop for Mutex { + fn drop(&mut self) { + // This is actually safe b/c we know that there is no further usage of + // this mutex (it's up to the user to arrange for a mutex to get + // dropped, that's not our job) + unsafe { self.lock.destroy() } + } +} + +#[cfg(test)] +mod test { + extern mod native; + use super::{Mutex, StaticMutex, MUTEX_INIT}; + + #[test] + fn smoke() { + let mut m = Mutex::new(); + drop(m.lock()); + drop(m.lock()); + } + + #[test] + fn smoke_static() { + static mut m: StaticMutex = MUTEX_INIT; + unsafe { + drop(m.lock()); + drop(m.lock()); + m.destroy(); + } + } + + #[test] + fn lots_and_lots() { + static mut m: StaticMutex = MUTEX_INIT; + static mut CNT: uint = 0; + static M: uint = 1000; + static N: uint = 3; + + fn inc() { + for _ in range(0, M) { + unsafe { + let _g = m.lock(); + CNT += 1; + } + } + } + + let (p, c) = SharedChan::new(); + for _ in range(0, N) { + let c2 = c.clone(); + native::task::spawn(proc() { inc(); c2.send(()); }); + let c2 = c.clone(); + spawn(proc() { inc(); c2.send(()); }); + } + + drop(c); + for _ in range(0, 2 * N) { + p.recv(); + } + assert_eq!(unsafe {CNT}, M * N * 2); + unsafe { + m.destroy(); + } + } + + #[test] + fn trylock() { + let mut m = Mutex::new(); + assert!(m.try_lock().is_some()); + } +} diff --git a/src/libextra/sync/one.rs b/src/libextra/sync/one.rs new file mode 100644 index 0000000000000..826955d93e8d3 --- /dev/null +++ b/src/libextra/sync/one.rs @@ -0,0 +1,168 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! A "once initialization" primitive +//! +//! This primitive is meant to be used to run one-time initialization. An +//! example use case would be for initializing an FFI library. + +use std::int; +use std::sync::atomics; +use sync::mutex::{StaticMutex, MUTEX_INIT}; + +/// A type which can be used to run a one-time global initialization. This type +/// is *unsafe* to use because it is built on top of the `Mutex` in this module. +/// It does not know whether the currently running task is in a green or native +/// context, and a blocking mutex should *not* be used under normal +/// circumstances on a green task. +/// +/// Despite its unsafety, it is often useful to have a one-time initialization +/// routine run for FFI bindings or related external functionality. This type +/// can only be statically constructed with the `ONCE_INIT` value. +/// +/// # Example +/// +/// ```rust +/// use extra::sync::one::{Once, ONCE_INIT}; +/// +/// static mut START: Once = ONCE_INIT; +/// unsafe { +/// START.doit(|| { +/// // run initialization here +/// }); +/// } +/// ``` +pub struct Once { + priv mutex: StaticMutex, + priv cnt: atomics::AtomicInt, + priv lock_cnt: atomics::AtomicInt, +} + +/// Initialization value for static `Once` values. +pub static ONCE_INIT: Once = Once { + mutex: MUTEX_INIT, + cnt: atomics::INIT_ATOMIC_INT, + lock_cnt: atomics::INIT_ATOMIC_INT, +}; + +impl Once { + /// Perform an initialization routine once and only once. The given closure + /// will be executed if this is the first time `doit` has been called, and + /// otherwise the routine will *not* be invoked. + /// + /// This method will block the calling *os thread* if another initialization + /// routine is currently running. + /// + /// When this function returns, it is guaranteed that some initialization + /// has run and completed (it may not be the closure specified). + pub fn doit(&mut self, f: ||) { + // Implementation-wise, this would seem like a fairly trivial primitive. + // The stickler part is where our mutexes currently require an + // allocation, and usage of a `Once` should't leak this allocation. + // + // This means that there must be a deterministic destroyer of the mutex + // contained within (because it's not needed after the initialization + // has run). + // + // The general scheme here is to gate all future threads once + // initialization has completed with a "very negative" count, and to + // allow through threads to lock the mutex if they see a non negative + // count. For all threads grabbing the mutex, exactly one of them should + // be responsible for unlocking the mutex, and this should only be done + // once everyone else is done with the mutex. + // + // This atomicity is achieved by swapping a very negative value into the + // shared count when the initialization routine has completed. This will + // read the number of threads which will at some point attempt to + // acquire the mutex. This count is then squirreled away in a separate + // variable, and the last person on the way out of the mutex is then + // responsible for destroying the mutex. + // + // It is crucial that the negative value is swapped in *after* the + // initialization routine has completed because otherwise new threads + // calling `doit` will return immediately before the initialization has + // completed. + + let prev = self.cnt.fetch_add(1, atomics::SeqCst); + if prev < 0 { + // Make sure we never overflow, we'll never have int::MIN + // simultaneous calls to `doit` to make this value go back to 0 + self.cnt.store(int::MIN, atomics::SeqCst); + return + } + + // If the count is negative, then someone else finished the job, + // otherwise we run the job and record how many people will try to grab + // this lock + { + let _guard = self.mutex.lock(); + if self.cnt.load(atomics::SeqCst) > 0 { + f(); + let prev = self.cnt.swap(int::MIN, atomics::SeqCst); + self.lock_cnt.store(prev, atomics::SeqCst); + } + } + + // Last one out cleans up after everyone else, no leaks! + if self.lock_cnt.fetch_add(-1, atomics::SeqCst) == 1 { + unsafe { self.mutex.destroy() } + } + } +} + +#[cfg(test)] +mod test { + use super::{ONCE_INIT, Once}; + use std::task; + + #[test] + fn smoke_once() { + static mut o: Once = ONCE_INIT; + let mut a = 0; + unsafe { o.doit(|| a += 1); } + assert_eq!(a, 1); + unsafe { o.doit(|| a += 1); } + assert_eq!(a, 1); + } + + #[test] + fn stampede_once() { + static mut o: Once = ONCE_INIT; + static mut run: bool = false; + + let (p, c) = SharedChan::new(); + for _ in range(0, 10) { + let c = c.clone(); + spawn(proc() { + for _ in range(0, 4) { task::deschedule() } + unsafe { + o.doit(|| { + assert!(!run); + run = true; + }); + assert!(run); + } + c.send(()); + }); + } + + unsafe { + o.doit(|| { + assert!(!run); + run = true; + }); + assert!(run); + } + + for _ in range(0, 10) { + p.recv(); + } + } +} diff --git a/src/libgreen/sched.rs b/src/libgreen/sched.rs index a8071dd82eb9b..c8b84d445db2f 100644 --- a/src/libgreen/sched.rs +++ b/src/libgreen/sched.rs @@ -1416,7 +1416,8 @@ mod test { #[test] fn test_spawn_sched_blocking() { - use std::unstable::mutex::Mutex; + use std::unstable::mutex::{Mutex, MUTEX_INIT}; + static mut LOCK: Mutex = MUTEX_INIT; // Testing that a task in one scheduler can block in foreign code // without affecting other schedulers @@ -1425,19 +1426,15 @@ mod test { let (start_po, start_ch) = Chan::new(); let (fin_po, fin_ch) = Chan::new(); - let lock = unsafe { Mutex::new() }; - let lock2 = unsafe { lock.clone() }; - let mut handle = pool.spawn_sched(); handle.send(PinnedTask(pool.task(TaskOpts::new(), proc() { - let mut lock = lock2; unsafe { - lock.lock(); + LOCK.lock(); start_ch.send(()); - lock.wait(); // block the scheduler thread - lock.signal(); // let them know we have the lock - lock.unlock(); + LOCK.wait(); // block the scheduler thread + LOCK.signal(); // let them know we have the lock + LOCK.unlock(); } fin_ch.send(()); @@ -1469,12 +1466,10 @@ mod test { child_ch.send(20); pingpong(&parent_po, &child_ch); unsafe { - let mut lock = lock; - lock.lock(); - lock.signal(); // wakeup waiting scheduler - lock.wait(); // wait for them to grab the lock - lock.unlock(); - lock.destroy(); // now we're guaranteed they have no locks + LOCK.lock(); + LOCK.signal(); // wakeup waiting scheduler + LOCK.wait(); // wait for them to grab the lock + LOCK.unlock(); } }))); drop(handle); @@ -1482,6 +1477,6 @@ mod test { fin_po.recv(); pool.shutdown(); } - + unsafe { LOCK.destroy(); } } } diff --git a/src/libgreen/simple.rs b/src/libgreen/simple.rs index 4a0523fe47a7a..8db95f55d18db 100644 --- a/src/libgreen/simple.rs +++ b/src/libgreen/simple.rs @@ -54,7 +54,7 @@ impl Runtime for SimpleTask { } Local::put(cur_task); } - fn reawaken(mut ~self, mut to_wake: ~Task, _can_resched: bool) { + fn reawaken(mut ~self, mut to_wake: ~Task) { let me = &mut *self as *mut SimpleTask; to_wake.put_runtime(self as ~Runtime); unsafe { @@ -76,6 +76,7 @@ impl Runtime for SimpleTask { } fn local_io<'a>(&'a mut self) -> Option> { None } fn stack_bounds(&self) -> (uint, uint) { fail!() } + fn can_block(&self) -> bool { true } fn wrap(~self) -> ~Any { fail!() } } diff --git a/src/libgreen/task.rs b/src/libgreen/task.rs index a2ecaf6fa9cac..4fb61f156809f 100644 --- a/src/libgreen/task.rs +++ b/src/libgreen/task.rs @@ -376,7 +376,7 @@ impl Runtime for GreenTask { } } - fn reawaken(mut ~self, to_wake: ~Task, can_resched: bool) { + fn reawaken(mut ~self, to_wake: ~Task) { self.put_task(to_wake); assert!(self.sched.is_none()); @@ -409,15 +409,10 @@ impl Runtime for GreenTask { match running_task.maybe_take_runtime::() { Some(mut running_green_task) => { running_green_task.put_task(running_task); - let mut sched = running_green_task.sched.take_unwrap(); + let sched = running_green_task.sched.take_unwrap(); if sched.pool_id == self.pool_id { - if can_resched { - sched.run_task(running_green_task, self); - } else { - sched.enqueue_task(self); - running_green_task.put_with_sched(sched); - } + sched.run_task(running_green_task, self); } else { self.reawaken_remotely(); @@ -462,6 +457,8 @@ impl Runtime for GreenTask { c.current_stack_segment.end() as uint) } + fn can_block(&self) -> bool { false } + fn wrap(~self) -> ~Any { self as ~Any } } diff --git a/src/libnative/io/net.rs b/src/libnative/io/net.rs index ac68b1523d7d9..dd916c8f3c4b9 100644 --- a/src/libnative/io/net.rs +++ b/src/libnative/io/net.rs @@ -201,14 +201,19 @@ pub fn init() { } unsafe { - use std::unstable::mutex::{Once, ONCE_INIT}; - static mut INIT: Once = ONCE_INIT; - INIT.doit(|| { + use std::unstable::mutex::{Mutex, MUTEX_INIT}; + static mut INITIALIZED: bool = false; + static mut LOCK: Mutex = MUTEX_INIT; + + LOCK.lock(); + if !INITIALIZED { let mut data: WSADATA = intrinsics::init(); let ret = WSAStartup(0x202, // version 2.2 &mut data); assert_eq!(ret, 0); - }); + INITIALIZED = true; + } + LOCK.unlock(); } } diff --git a/src/libnative/io/timer_helper.rs b/src/libnative/io/timer_helper.rs index 7311be46e8bd2..c00b0efadb5ee 100644 --- a/src/libnative/io/timer_helper.rs +++ b/src/libnative/io/timer_helper.rs @@ -22,7 +22,7 @@ use std::cast; use std::rt; -use std::unstable::mutex::{Once, ONCE_INIT}; +use std::unstable::mutex::{Mutex, MUTEX_INIT}; use bookkeeping; use io::timer::{Req, Shutdown}; @@ -37,10 +37,12 @@ static mut HELPER_CHAN: *mut SharedChan = 0 as *mut SharedChan; static mut HELPER_SIGNAL: imp::signal = 0 as imp::signal; pub fn boot(helper: fn(imp::signal, Port)) { - static mut INIT: Once = ONCE_INIT; + static mut LOCK: Mutex = MUTEX_INIT; + static mut INITIALIZED: bool = false; unsafe { - INIT.doit(|| { + LOCK.lock(); + if !INITIALIZED { let (msgp, msgc) = SharedChan::new(); HELPER_CHAN = cast::transmute(~msgc); let (receive, send) = imp::new(); @@ -52,7 +54,9 @@ pub fn boot(helper: fn(imp::signal, Port)) { }); rt::at_exit(proc() { shutdown() }); - }) + INITIALIZED = true; + } + LOCK.unlock(); } } diff --git a/src/libnative/task.rs b/src/libnative/task.rs index 0def5cb405338..c08e326d90331 100644 --- a/src/libnative/task.rs +++ b/src/libnative/task.rs @@ -143,6 +143,8 @@ impl rt::Runtime for Ops { fn stack_bounds(&self) -> (uint, uint) { self.stack_bounds } + fn can_block(&self) -> bool { true } + // This function gets a little interesting. There are a few safety and // ownership violations going on here, but this is all done in the name of // shared state. Additionally, all of the violations are protected with a @@ -231,7 +233,7 @@ impl rt::Runtime for Ops { // See the comments on `deschedule` for why the task is forgotten here, and // why it's valid to do so. - fn reawaken(mut ~self, mut to_wake: ~Task, _can_resched: bool) { + fn reawaken(mut ~self, mut to_wake: ~Task) { unsafe { let me = &mut *self as *mut Ops; to_wake.put_runtime(self as ~rt::Runtime); diff --git a/src/librustc/back/link.rs b/src/librustc/back/link.rs index 4d6576fed3462..e224a06818af1 100644 --- a/src/librustc/back/link.rs +++ b/src/librustc/back/link.rs @@ -96,8 +96,8 @@ pub mod write { use lib::llvm::llvm; use lib::llvm::{ModuleRef, TargetMachineRef, PassManagerRef}; use lib; - use syntax::abi; use util::common::time; + use syntax::abi; use std::c_str::ToCStr; use std::libc::{c_uint, c_int}; @@ -105,6 +105,24 @@ pub mod write { use std::run; use std::str; + // On android, we by default compile for armv7 processors. This enables + // things like double word CAS instructions (rather than emulating them) + // which are *far* more efficient. This is obviously undesirable in some + // cases, so if any sort of target feature is specified we don't append v7 + // to the feature list. + fn target_feature<'a>(sess: &'a Session) -> &'a str { + match sess.targ_cfg.os { + abi::OsAndroid => { + if "" == sess.opts.target_feature { + "+v7" + } else { + sess.opts.target_feature.as_slice() + } + } + _ => sess.opts.target_feature.as_slice() + } + } + pub fn run_passes(sess: Session, trans: &CrateTranslation, output_type: OutputType, @@ -136,7 +154,7 @@ pub mod write { let tm = sess.targ_cfg.target_strs.target_triple.with_c_str(|T| { sess.opts.target_cpu.with_c_str(|CPU| { - sess.opts.target_feature.with_c_str(|Features| { + target_feature(&sess).with_c_str(|Features| { llvm::LLVMRustCreateTargetMachine( T, CPU, Features, lib::llvm::CodeModelDefault, @@ -313,7 +331,7 @@ pub mod write { } unsafe fn configure_llvm(sess: Session) { - use std::unstable::mutex::{Once, ONCE_INIT}; + use extra::sync::one::{Once, ONCE_INIT}; static mut INIT: Once = ONCE_INIT; // Copy what clang does by turning on loop vectorization at O2 and diff --git a/src/librustc/middle/trans/base.rs b/src/librustc/middle/trans/base.rs index 7bd2653f86990..1ebe4a03cfdd2 100644 --- a/src/librustc/middle/trans/base.rs +++ b/src/librustc/middle/trans/base.rs @@ -2660,7 +2660,7 @@ pub fn trans_crate(sess: session::Session, output: &Path) -> CrateTranslation { // Before we touch LLVM, make sure that multithreading is enabled. unsafe { - use std::unstable::mutex::{Once, ONCE_INIT}; + use extra::sync::one::{Once, ONCE_INIT}; static mut INIT: Once = ONCE_INIT; static mut POISONED: bool = false; INIT.doit(|| { diff --git a/src/librustc/middle/trans/builder.rs b/src/librustc/middle/trans/builder.rs index 71b25b79feb4a..cedd98e261cc7 100644 --- a/src/librustc/middle/trans/builder.rs +++ b/src/librustc/middle/trans/builder.rs @@ -15,7 +15,7 @@ use lib::llvm::{Opcode, IntPredicate, RealPredicate, False}; use lib::llvm::{ValueRef, BasicBlockRef, BuilderRef, ModuleRef}; use middle::trans::base; use middle::trans::common::*; -use middle::trans::machine::llalign_of_min; +use middle::trans::machine::llalign_of_pref; use middle::trans::type_::Type; use std::cast; use std::hashmap::HashMap; @@ -461,8 +461,10 @@ impl Builder { pub fn atomic_load(&self, ptr: ValueRef, order: AtomicOrdering) -> ValueRef { self.count_insn("load.atomic"); unsafe { - let align = llalign_of_min(self.ccx, self.ccx.int_type); - llvm::LLVMBuildAtomicLoad(self.llbuilder, ptr, noname(), order, align as c_uint) + let ty = Type::from_ref(llvm::LLVMTypeOf(ptr)); + let align = llalign_of_pref(self.ccx, ty.element_type()); + llvm::LLVMBuildAtomicLoad(self.llbuilder, ptr, noname(), order, + align as c_uint) } } @@ -514,8 +516,9 @@ impl Builder { self.ccx.tn.val_to_str(val), self.ccx.tn.val_to_str(ptr)); self.count_insn("store.atomic"); - let align = llalign_of_min(self.ccx, self.ccx.int_type); unsafe { + let ty = Type::from_ref(llvm::LLVMTypeOf(ptr)); + let align = llalign_of_pref(self.ccx, ty.element_type()); llvm::LLVMBuildAtomicStore(self.llbuilder, val, ptr, order, align as c_uint); } } diff --git a/src/librustc/middle/typeck/check/mod.rs b/src/librustc/middle/typeck/check/mod.rs index 74655914a27c9..e1679c81a0e2d 100644 --- a/src/librustc/middle/typeck/check/mod.rs +++ b/src/librustc/middle/typeck/check/mod.rs @@ -4030,29 +4030,32 @@ pub fn check_intrinsic_type(ccx: @CrateCtxt, it: &ast::ForeignItem) { //We only care about the operation here match split[1] { - "cxchg" => (0, ~[ty::mk_mut_rptr(tcx, + "cxchg" => (1, ~[ty::mk_mut_rptr(tcx, ty::ReLateBound(it.id, ty::BrAnon(0)), - ty::mk_int()), - ty::mk_int(), - ty::mk_int() - ], ty::mk_int()), - "load" => (0, + param(ccx, 0)), + param(ccx, 0), + param(ccx, 0), + ], param(ccx, 0)), + "load" => (1, ~[ - ty::mk_imm_rptr(tcx, ty::ReLateBound(it.id, ty::BrAnon(0)), ty::mk_int()) + ty::mk_imm_rptr(tcx, ty::ReLateBound(it.id, ty::BrAnon(0)), + param(ccx, 0)) ], - ty::mk_int()), - "store" => (0, + param(ccx, 0)), + "store" => (1, ~[ - ty::mk_mut_rptr(tcx, ty::ReLateBound(it.id, ty::BrAnon(0)), ty::mk_int()), - ty::mk_int() + ty::mk_mut_rptr(tcx, ty::ReLateBound(it.id, ty::BrAnon(0)), + param(ccx, 0)), + param(ccx, 0) ], ty::mk_nil()), - "xchg" | "xadd" | "xsub" | "and" | "nand" | "or" | "xor" | "max" | + "xchg" | "xadd" | "xsub" | "and" | "nand" | "or" | "xor" | "max" | "min" | "umax" | "umin" => { - (0, ~[ty::mk_mut_rptr(tcx, + (1, ~[ty::mk_mut_rptr(tcx, ty::ReLateBound(it.id, ty::BrAnon(0)), - ty::mk_int()), ty::mk_int() ], ty::mk_int()) + param(ccx, 0)), param(ccx, 0) ], + param(ccx, 0)) } "fence" => { (0, ~[], ty::mk_nil()) @@ -4085,16 +4088,6 @@ pub fn check_intrinsic_type(ccx: @CrateCtxt, it: &ast::ForeignItem) { } "needs_drop" => (1u, ~[], ty::mk_bool()), "owns_managed" => (1u, ~[], ty::mk_bool()), - "atomic_xchg" | "atomic_xadd" | "atomic_xsub" | - "atomic_xchg_acq" | "atomic_xadd_acq" | "atomic_xsub_acq" | - "atomic_xchg_rel" | "atomic_xadd_rel" | "atomic_xsub_rel" => { - (0, - ~[ - ty::mk_mut_rptr(tcx, ty::ReLateBound(it.id, ty::BrAnon(0)), ty::mk_int()), - ty::mk_int() - ], - ty::mk_int()) - } "get_tydesc" => { let tydesc_ty = match ty::get_tydesc_ty(ccx.tcx) { diff --git a/src/librustuv/idle.rs b/src/librustuv/idle.rs index dafd3dbe1bc96..30e02b168ca57 100644 --- a/src/librustuv/idle.rs +++ b/src/librustuv/idle.rs @@ -122,7 +122,7 @@ mod test { } } }; - let _ = task.wake().map(|t| t.reawaken(true)); + let _ = task.wake().map(|t| t.reawaken()); } } diff --git a/src/librustuv/lib.rs b/src/librustuv/lib.rs index f945c0972ca39..39d6f851e1722 100644 --- a/src/librustuv/lib.rs +++ b/src/librustuv/lib.rs @@ -208,7 +208,7 @@ fn wait_until_woken_after(slot: *mut Option, f: ||) { fn wakeup(slot: &mut Option) { assert!(slot.is_some()); - let _ = slot.take_unwrap().wake().map(|t| t.reawaken(true)); + let _ = slot.take_unwrap().wake().map(|t| t.reawaken()); } pub struct Request { diff --git a/src/librustuv/queue.rs b/src/librustuv/queue.rs index 0e1c4225caa93..358582d436b32 100644 --- a/src/librustuv/queue.rs +++ b/src/librustuv/queue.rs @@ -67,7 +67,7 @@ extern fn async_cb(handle: *uvll::uv_async_t, status: c_int) { loop { match state.consumer.pop() { mpsc::Data(Task(task)) => { - let _ = task.wake().map(|t| t.reawaken(true)); + let _ = task.wake().map(|t| t.reawaken()); } mpsc::Data(Increment) => unsafe { if state.refcnt == 0 { diff --git a/src/librustuv/timer.rs b/src/librustuv/timer.rs index 792414238fdaa..0363cab247d15 100644 --- a/src/librustuv/timer.rs +++ b/src/librustuv/timer.rs @@ -138,7 +138,7 @@ extern fn timer_cb(handle: *uvll::uv_timer_t, status: c_int) { match timer.action.take_unwrap() { WakeTask(task) => { - let _ = task.wake().map(|t| t.reawaken(true)); + let _ = task.wake().map(|t| t.reawaken()); } SendOnce(chan) => { let _ = chan.try_send(()); } SendMany(chan, id) => { diff --git a/src/libstd/comm/mod.rs b/src/libstd/comm/mod.rs index bccebeaa79f6b..366c268fae2b9 100644 --- a/src/libstd/comm/mod.rs +++ b/src/libstd/comm/mod.rs @@ -443,9 +443,9 @@ impl Packet { // This function must have had at least an acquire fence before it to be // properly called. - fn wakeup(&mut self, can_resched: bool) { + fn wakeup(&mut self) { match self.to_wake.take_unwrap().wake() { - Some(task) => task.reawaken(can_resched), + Some(task) => task.reawaken(), None => {} } self.selecting.store(false, Relaxed); @@ -519,7 +519,7 @@ impl Packet { match self.channels.fetch_sub(1, SeqCst) { 1 => { match self.cnt.swap(DISCONNECTED, SeqCst) { - -1 => { self.wakeup(true); } + -1 => { self.wakeup(); } DISCONNECTED => {} n => { assert!(n >= 0); } } @@ -595,20 +595,14 @@ impl Chan { /// /// Like `send`, this method will never block. If the failure of send cannot /// be tolerated, then this method should be used instead. - pub fn try_send(&self, t: T) -> bool { self.try(t, true) } - - /// This function will not stick around for very long. The purpose of this - /// function is to guarantee that no rescheduling is performed. - pub fn try_send_deferred(&self, t: T) -> bool { self.try(t, false) } - - fn try(&self, t: T, can_resched: bool) -> bool { + pub fn try_send(&self, t: T) -> bool { unsafe { let this = cast::transmute_mut(self); this.queue.push(t); let packet = this.queue.packet(); match (*packet).increment() { // As described above, -1 == wakeup - -1 => { (*packet).wakeup(can_resched); true } + -1 => { (*packet).wakeup(); true } // Also as above, SPSC queues must be >= -2 -2 => true, // We succeeded if we sent data @@ -623,7 +617,7 @@ impl Chan { // the TLS overhead can be a bit much. n => { assert!(n >= 0); - if can_resched && n > 0 && n % RESCHED_FREQ == 0 { + if n > 0 && n % RESCHED_FREQ == 0 { let task: ~Task = Local::take(); task.maybe_yield(); } @@ -700,7 +694,7 @@ impl SharedChan { match (*packet).increment() { DISCONNECTED => {} // oh well, we tried - -1 => { (*packet).wakeup(true); } + -1 => { (*packet).wakeup(); } n => { if n > 0 && n % RESCHED_FREQ == 0 { let task: ~Task = Local::take(); diff --git a/src/libstd/rt/mod.rs b/src/libstd/rt/mod.rs index 0e30f3e2efd09..55425eb2e7226 100644 --- a/src/libstd/rt/mod.rs +++ b/src/libstd/rt/mod.rs @@ -146,7 +146,7 @@ pub trait Runtime { fn maybe_yield(~self, cur_task: ~Task); fn deschedule(~self, times: uint, cur_task: ~Task, f: |BlockedTask| -> Result<(), BlockedTask>); - fn reawaken(~self, to_wake: ~Task, can_resched: bool); + fn reawaken(~self, to_wake: ~Task); // Miscellaneous calls which are very different depending on what context // you're in. @@ -154,6 +154,7 @@ pub trait Runtime { fn local_io<'a>(&'a mut self) -> Option>; /// The (low, high) edges of the current stack. fn stack_bounds(&self) -> (uint, uint); // (lo, hi) + fn can_block(&self) -> bool; // FIXME: This is a serious code smell and this should not exist at all. fn wrap(~self) -> ~Any; diff --git a/src/libstd/rt/task.rs b/src/libstd/rt/task.rs index 515eb93001af2..fbe82531f6977 100644 --- a/src/libstd/rt/task.rs +++ b/src/libstd/rt/task.rs @@ -250,9 +250,9 @@ impl Task { /// Wakes up a previously blocked task, optionally specifiying whether the /// current task can accept a change in scheduling. This function can only /// be called on tasks that were previously blocked in `deschedule`. - pub fn reawaken(mut ~self, can_resched: bool) { + pub fn reawaken(mut ~self) { let ops = self.imp.take_unwrap(); - ops.reawaken(self, can_resched); + ops.reawaken(self); } /// Yields control of this task to another task. This function will @@ -283,6 +283,12 @@ impl Task { pub fn stack_bounds(&self) -> (uint, uint) { self.imp.get_ref().stack_bounds() } + + /// Returns whether it is legal for this task to block the OS thread that it + /// is running on. + pub fn can_block(&self) -> bool { + self.imp.get_ref().can_block() + } } impl Drop for Task { diff --git a/src/libstd/sync/atomics.rs b/src/libstd/sync/atomics.rs index 30d9ede8a433e..fb62bed9ed0ae 100644 --- a/src/libstd/sync/atomics.rs +++ b/src/libstd/sync/atomics.rs @@ -59,9 +59,25 @@ pub struct AtomicUint { priv nocopy: NonCopyable } +/** + * An unsigned atomic integer type that is forced to be 64-bits. This does not + * support all operations. + */ +#[cfg(not(stage0))] +pub struct AtomicU64 { + priv v: u64, + priv nocopy: NonCopyable +} + /** * An unsafe atomic pointer. Only supports basic atomic operations */ +#[cfg(not(stage0))] +pub struct AtomicPtr { + priv p: uint, + priv nocopy: NonCopyable +} +#[cfg(stage0)] pub struct AtomicPtr { priv p: *mut T, priv nocopy: NonCopyable @@ -71,6 +87,12 @@ pub struct AtomicPtr { * An owned atomic pointer. Ensures that only a single reference to the data is held at any time. */ #[unsafe_no_drop_flag] +#[cfg(not(stage0))] +pub struct AtomicOption { + priv p: uint, +} +#[unsafe_no_drop_flag] +#[cfg(stage0)] pub struct AtomicOption { priv p: *mut u8 } @@ -87,6 +109,8 @@ pub static INIT_ATOMIC_FLAG : AtomicFlag = AtomicFlag { v: 0, nocopy: NonCopyabl pub static INIT_ATOMIC_BOOL : AtomicBool = AtomicBool { v: 0, nocopy: NonCopyable }; pub static INIT_ATOMIC_INT : AtomicInt = AtomicInt { v: 0, nocopy: NonCopyable }; pub static INIT_ATOMIC_UINT : AtomicUint = AtomicUint { v: 0, nocopy: NonCopyable }; +#[cfg(not(stage0))] +pub static INIT_ATOMIC_U64 : AtomicU64 = AtomicU64 { v: 0, nocopy: NonCopyable }; impl AtomicFlag { @@ -215,6 +239,43 @@ impl AtomicInt { } } +#[cfg(not(stage0))] +impl AtomicU64 { + pub fn new(v: u64) -> AtomicU64 { + AtomicU64 { v:v, nocopy: NonCopyable } + } + + #[inline] + pub fn load(&self, order: Ordering) -> u64 { + unsafe { atomic_load(&self.v, order) } + } + + #[inline] + pub fn store(&mut self, val: u64, order: Ordering) { + unsafe { atomic_store(&mut self.v, val, order); } + } + + #[inline] + pub fn swap(&mut self, val: u64, order: Ordering) -> u64 { + unsafe { atomic_swap(&mut self.v, val, order) } + } + + #[inline] + pub fn compare_and_swap(&mut self, old: u64, new: u64, order: Ordering) -> u64 { + unsafe { atomic_compare_and_swap(&mut self.v, old, new, order) } + } + + #[inline] + pub fn fetch_add(&mut self, val: u64, order: Ordering) -> u64 { + unsafe { atomic_add(&mut self.v, val, order) } + } + + #[inline] + pub fn fetch_sub(&mut self, val: u64, order: Ordering) -> u64 { + unsafe { atomic_sub(&mut self.v, val, order) } + } +} + impl AtomicUint { pub fn new(v: uint) -> AtomicUint { AtomicUint { v:v, nocopy: NonCopyable } @@ -254,26 +315,64 @@ impl AtomicUint { } impl AtomicPtr { + #[cfg(stage0)] + pub fn new(p: *mut T) -> AtomicPtr { + AtomicPtr { p: p, nocopy: NonCopyable } + } + #[cfg(not(stage0))] pub fn new(p: *mut T) -> AtomicPtr { - AtomicPtr { p:p, nocopy: NonCopyable } + AtomicPtr { p: p as uint, nocopy: NonCopyable } } #[inline] + #[cfg(not(stage0))] + pub fn load(&self, order: Ordering) -> *mut T { + unsafe { + atomic_load(&self.p, order) as *mut T + } + } + + #[inline] + #[cfg(not(stage0))] + pub fn store(&mut self, ptr: *mut T, order: Ordering) { + unsafe { atomic_store(&mut self.p, ptr as uint, order); } + } + + #[inline] + #[cfg(not(stage0))] + pub fn swap(&mut self, ptr: *mut T, order: Ordering) -> *mut T { + unsafe { atomic_swap(&mut self.p, ptr as uint, order) as *mut T } + } + + #[inline] + #[cfg(not(stage0))] + pub fn compare_and_swap(&mut self, old: *mut T, new: *mut T, order: Ordering) -> *mut T { + unsafe { + atomic_compare_and_swap(&mut self.p, old as uint, + new as uint, order) as *mut T + } + } + + #[inline] + #[cfg(stage0)] pub fn load(&self, order: Ordering) -> *mut T { unsafe { atomic_load(&self.p, order) } } #[inline] + #[cfg(stage0)] pub fn store(&mut self, ptr: *mut T, order: Ordering) { unsafe { atomic_store(&mut self.p, ptr, order); } } #[inline] + #[cfg(stage0)] pub fn swap(&mut self, ptr: *mut T, order: Ordering) -> *mut T { unsafe { atomic_swap(&mut self.p, ptr, order) } } #[inline] + #[cfg(stage0)] pub fn compare_and_swap(&mut self, old: *mut T, new: *mut T, order: Ordering) -> *mut T { unsafe { atomic_compare_and_swap(&mut self.p, old, new, order) } } @@ -281,20 +380,13 @@ impl AtomicPtr { impl AtomicOption { pub fn new(p: ~T) -> AtomicOption { - unsafe { - AtomicOption { - p: cast::transmute(p) - } - } + unsafe { AtomicOption { p: cast::transmute(p) } } } - pub fn empty() -> AtomicOption { - unsafe { - AtomicOption { - p: cast::transmute(0) - } - } - } + #[cfg(stage0)] + pub fn empty() -> AtomicOption { AtomicOption { p: 0 as *mut u8 } } + #[cfg(not(stage0))] + pub fn empty() -> AtomicOption { AtomicOption { p: 0 } } #[inline] pub fn swap(&mut self, val: ~T, order: Ordering) -> Option<~T> { @@ -302,9 +394,7 @@ impl AtomicOption { let val = cast::transmute(val); let p = atomic_swap(&mut self.p, val, order); - let pv : &uint = cast::transmute(&p); - - if *pv == 0 { + if p as uint == 0 { None } else { Some(cast::transmute(p)) @@ -314,9 +404,7 @@ impl AtomicOption { #[inline] pub fn take(&mut self, order: Ordering) -> Option<~T> { - unsafe { - self.swap(cast::transmute(0), order) - } + unsafe { self.swap(cast::transmute(0), order) } } /// A compare-and-swap. Succeeds if the option is 'None' and returns 'None' @@ -340,7 +428,7 @@ impl AtomicOption { /// result does not get invalidated by another task after this returns. #[inline] pub fn is_empty(&mut self, order: Ordering) -> bool { - unsafe { atomic_load(&self.p, order) == cast::transmute(0) } + unsafe { atomic_load(&self.p, order) as uint == 0 } } } @@ -351,11 +439,20 @@ impl Drop for AtomicOption { } } +#[cfg(stage0)] #[inline] pub unsafe fn atomic_store(dst: &mut T, val: T, order:Ordering) { let dst = cast::transmute(dst); let val = cast::transmute(val); - + cast::transmute(match order { + Release => intrinsics::atomic_store_rel(dst, val), + Relaxed => intrinsics::atomic_store_relaxed(dst, val), + _ => intrinsics::atomic_store(dst, val) + }) +} +#[cfg(not(stage0))] +#[inline] +pub unsafe fn atomic_store(dst: &mut T, val: T, order:Ordering) { match order { Release => intrinsics::atomic_store_rel(dst, val), Relaxed => intrinsics::atomic_store_relaxed(dst, val), @@ -363,22 +460,31 @@ pub unsafe fn atomic_store(dst: &mut T, val: T, order:Ordering) { } } +#[cfg(stage0)] #[inline] pub unsafe fn atomic_load(dst: &T, order:Ordering) -> T { let dst = cast::transmute(dst); - cast::transmute(match order { Acquire => intrinsics::atomic_load_acq(dst), Relaxed => intrinsics::atomic_load_relaxed(dst), _ => intrinsics::atomic_load(dst) }) } +#[cfg(not(stage0))] +#[inline] +pub unsafe fn atomic_load(dst: &T, order:Ordering) -> T { + match order { + Acquire => intrinsics::atomic_load_acq(dst), + Relaxed => intrinsics::atomic_load_relaxed(dst), + _ => intrinsics::atomic_load(dst) + } +} +#[cfg(stage0)] #[inline] pub unsafe fn atomic_swap(dst: &mut T, val: T, order: Ordering) -> T { let dst = cast::transmute(dst); let val = cast::transmute(val); - cast::transmute(match order { Acquire => intrinsics::atomic_xchg_acq(dst, val), Release => intrinsics::atomic_xchg_rel(dst, val), @@ -387,13 +493,24 @@ pub unsafe fn atomic_swap(dst: &mut T, val: T, order: Ordering) -> T { _ => intrinsics::atomic_xchg(dst, val) }) } +#[cfg(not(stage0))] +#[inline] +pub unsafe fn atomic_swap(dst: &mut T, val: T, order: Ordering) -> T { + match order { + Acquire => intrinsics::atomic_xchg_acq(dst, val), + Release => intrinsics::atomic_xchg_rel(dst, val), + AcqRel => intrinsics::atomic_xchg_acqrel(dst, val), + Relaxed => intrinsics::atomic_xchg_relaxed(dst, val), + _ => intrinsics::atomic_xchg(dst, val) + } +} /// Returns the old value (like __sync_fetch_and_add). +#[cfg(stage0)] #[inline] pub unsafe fn atomic_add(dst: &mut T, val: T, order: Ordering) -> T { let dst = cast::transmute(dst); let val = cast::transmute(val); - cast::transmute(match order { Acquire => intrinsics::atomic_xadd_acq(dst, val), Release => intrinsics::atomic_xadd_rel(dst, val), @@ -402,13 +519,25 @@ pub unsafe fn atomic_add(dst: &mut T, val: T, order: Ordering) -> T { _ => intrinsics::atomic_xadd(dst, val) }) } +/// Returns the old value (like __sync_fetch_and_add). +#[cfg(not(stage0))] +#[inline] +pub unsafe fn atomic_add(dst: &mut T, val: T, order: Ordering) -> T { + match order { + Acquire => intrinsics::atomic_xadd_acq(dst, val), + Release => intrinsics::atomic_xadd_rel(dst, val), + AcqRel => intrinsics::atomic_xadd_acqrel(dst, val), + Relaxed => intrinsics::atomic_xadd_relaxed(dst, val), + _ => intrinsics::atomic_xadd(dst, val) + } +} /// Returns the old value (like __sync_fetch_and_sub). +#[cfg(stage0)] #[inline] pub unsafe fn atomic_sub(dst: &mut T, val: T, order: Ordering) -> T { let dst = cast::transmute(dst); let val = cast::transmute(val); - cast::transmute(match order { Acquire => intrinsics::atomic_xsub_acq(dst, val), Release => intrinsics::atomic_xsub_rel(dst, val), @@ -417,13 +546,25 @@ pub unsafe fn atomic_sub(dst: &mut T, val: T, order: Ordering) -> T { _ => intrinsics::atomic_xsub(dst, val) }) } +/// Returns the old value (like __sync_fetch_and_sub). +#[cfg(not(stage0))] +#[inline] +pub unsafe fn atomic_sub(dst: &mut T, val: T, order: Ordering) -> T { + match order { + Acquire => intrinsics::atomic_xsub_acq(dst, val), + Release => intrinsics::atomic_xsub_rel(dst, val), + AcqRel => intrinsics::atomic_xsub_acqrel(dst, val), + Relaxed => intrinsics::atomic_xsub_relaxed(dst, val), + _ => intrinsics::atomic_xsub(dst, val) + } +} +#[cfg(stage0)] #[inline] pub unsafe fn atomic_compare_and_swap(dst:&mut T, old:T, new:T, order: Ordering) -> T { let dst = cast::transmute(dst); - let old = cast::transmute(old); let new = cast::transmute(new); - + let old = cast::transmute(old); cast::transmute(match order { Acquire => intrinsics::atomic_cxchg_acq(dst, old, new), Release => intrinsics::atomic_cxchg_rel(dst, old, new), @@ -432,12 +573,23 @@ pub unsafe fn atomic_compare_and_swap(dst:&mut T, old:T, new:T, order: Orderi _ => intrinsics::atomic_cxchg(dst, old, new), }) } +#[cfg(not(stage0))] +#[inline] +pub unsafe fn atomic_compare_and_swap(dst:&mut T, old:T, new:T, order: Ordering) -> T { + match order { + Acquire => intrinsics::atomic_cxchg_acq(dst, old, new), + Release => intrinsics::atomic_cxchg_rel(dst, old, new), + AcqRel => intrinsics::atomic_cxchg_acqrel(dst, old, new), + Relaxed => intrinsics::atomic_cxchg_relaxed(dst, old, new), + _ => intrinsics::atomic_cxchg(dst, old, new), + } +} +#[cfg(stage0)] #[inline] pub unsafe fn atomic_and(dst: &mut T, val: T, order: Ordering) -> T { let dst = cast::transmute(dst); let val = cast::transmute(val); - cast::transmute(match order { Acquire => intrinsics::atomic_and_acq(dst, val), Release => intrinsics::atomic_and_rel(dst, val), @@ -446,13 +598,23 @@ pub unsafe fn atomic_and(dst: &mut T, val: T, order: Ordering) -> T { _ => intrinsics::atomic_and(dst, val) }) } +#[cfg(not(stage0))] +#[inline] +pub unsafe fn atomic_and(dst: &mut T, val: T, order: Ordering) -> T { + match order { + Acquire => intrinsics::atomic_and_acq(dst, val), + Release => intrinsics::atomic_and_rel(dst, val), + AcqRel => intrinsics::atomic_and_acqrel(dst, val), + Relaxed => intrinsics::atomic_and_relaxed(dst, val), + _ => intrinsics::atomic_and(dst, val) + } +} - +#[cfg(stage0)] #[inline] pub unsafe fn atomic_nand(dst: &mut T, val: T, order: Ordering) -> T { let dst = cast::transmute(dst); let val = cast::transmute(val); - cast::transmute(match order { Acquire => intrinsics::atomic_nand_acq(dst, val), Release => intrinsics::atomic_nand_rel(dst, val), @@ -461,13 +623,24 @@ pub unsafe fn atomic_nand(dst: &mut T, val: T, order: Ordering) -> T { _ => intrinsics::atomic_nand(dst, val) }) } +#[cfg(not(stage0))] +#[inline] +pub unsafe fn atomic_nand(dst: &mut T, val: T, order: Ordering) -> T { + match order { + Acquire => intrinsics::atomic_nand_acq(dst, val), + Release => intrinsics::atomic_nand_rel(dst, val), + AcqRel => intrinsics::atomic_nand_acqrel(dst, val), + Relaxed => intrinsics::atomic_nand_relaxed(dst, val), + _ => intrinsics::atomic_nand(dst, val) + } +} +#[cfg(stage0)] #[inline] pub unsafe fn atomic_or(dst: &mut T, val: T, order: Ordering) -> T { let dst = cast::transmute(dst); let val = cast::transmute(val); - cast::transmute(match order { Acquire => intrinsics::atomic_or_acq(dst, val), Release => intrinsics::atomic_or_rel(dst, val), @@ -476,13 +649,24 @@ pub unsafe fn atomic_or(dst: &mut T, val: T, order: Ordering) -> T { _ => intrinsics::atomic_or(dst, val) }) } +#[cfg(not(stage0))] +#[inline] +pub unsafe fn atomic_or(dst: &mut T, val: T, order: Ordering) -> T { + match order { + Acquire => intrinsics::atomic_or_acq(dst, val), + Release => intrinsics::atomic_or_rel(dst, val), + AcqRel => intrinsics::atomic_or_acqrel(dst, val), + Relaxed => intrinsics::atomic_or_relaxed(dst, val), + _ => intrinsics::atomic_or(dst, val) + } +} +#[cfg(stage0)] #[inline] pub unsafe fn atomic_xor(dst: &mut T, val: T, order: Ordering) -> T { let dst = cast::transmute(dst); let val = cast::transmute(val); - cast::transmute(match order { Acquire => intrinsics::atomic_xor_acq(dst, val), Release => intrinsics::atomic_xor_rel(dst, val), @@ -491,6 +675,17 @@ pub unsafe fn atomic_xor(dst: &mut T, val: T, order: Ordering) -> T { _ => intrinsics::atomic_xor(dst, val) }) } +#[cfg(not(stage0))] +#[inline] +pub unsafe fn atomic_xor(dst: &mut T, val: T, order: Ordering) -> T { + match order { + Acquire => intrinsics::atomic_xor_acq(dst, val), + Release => intrinsics::atomic_xor_rel(dst, val), + AcqRel => intrinsics::atomic_xor_acqrel(dst, val), + Relaxed => intrinsics::atomic_xor_relaxed(dst, val), + _ => intrinsics::atomic_xor(dst, val) + } +} /** @@ -599,4 +794,22 @@ mod test { assert!(S_UINT.load(SeqCst) == 0); } } + + #[test] + #[cfg(not(stage0))] + fn different_sizes() { + unsafe { + let mut slot = 0u16; + assert_eq!(super::atomic_swap(&mut slot, 1, SeqCst), 0); + + let mut slot = 0u8; + assert_eq!(super::atomic_compare_and_swap(&mut slot, 1, 2, SeqCst), 0); + + let mut slot = 0u32; + assert_eq!(super::atomic_load(&mut slot, SeqCst), 0); + + let mut slot = 0u64; + super::atomic_store(&mut slot, 2, SeqCst); + } + } } diff --git a/src/libstd/unstable/intrinsics.rs b/src/libstd/unstable/intrinsics.rs index 067826f04dc83..9b3826b42a59a 100644 --- a/src/libstd/unstable/intrinsics.rs +++ b/src/libstd/unstable/intrinsics.rs @@ -172,16 +172,8 @@ pub trait TyVisitor { fn visit_closure_ptr(&mut self, ck: uint) -> bool; } +#[cfg(stage0)] extern "rust-intrinsic" { - /// Abort the execution of the process. - pub fn abort() -> !; - - /// Execute a breakpoint trap, for inspection by a debugger. - pub fn breakpoint(); - - pub fn volatile_load(src: *T) -> T; - pub fn volatile_store(dst: *mut T, val: T); - /// Atomic compare and exchange, sequentially consistent. pub fn atomic_cxchg(dst: &mut int, old: int, src: int) -> int; /// Atomic compare and exchange, acquire ordering. @@ -286,6 +278,106 @@ extern "rust-intrinsic" { pub fn atomic_fence_acq(); pub fn atomic_fence_rel(); pub fn atomic_fence_acqrel(); +} + +#[cfg(not(stage0))] +extern "rust-intrinsic" { + pub fn atomic_cxchg(dst: &mut T, old: T, src: T) -> T; + pub fn atomic_cxchg_acq(dst: &mut T, old: T, src: T) -> T; + pub fn atomic_cxchg_rel(dst: &mut T, old: T, src: T) -> T; + pub fn atomic_cxchg_acqrel(dst: &mut T, old: T, src: T) -> T; + pub fn atomic_cxchg_relaxed(dst: &mut T, old: T, src: T) -> T; + + pub fn atomic_load(src: &T) -> T; + pub fn atomic_load_acq(src: &T) -> T; + pub fn atomic_load_relaxed(src: &T) -> T; + + pub fn atomic_store(dst: &mut T, val: T); + pub fn atomic_store_rel(dst: &mut T, val: T); + pub fn atomic_store_relaxed(dst: &mut T, val: T); + + pub fn atomic_xchg(dst: &mut T, src: T) -> T; + pub fn atomic_xchg_acq(dst: &mut T, src: T) -> T; + pub fn atomic_xchg_rel(dst: &mut T, src: T) -> T; + pub fn atomic_xchg_acqrel(dst: &mut T, src: T) -> T; + pub fn atomic_xchg_relaxed(dst: &mut T, src: T) -> T; + + pub fn atomic_xadd(dst: &mut T, src: T) -> T; + pub fn atomic_xadd_acq(dst: &mut T, src: T) -> T; + pub fn atomic_xadd_rel(dst: &mut T, src: T) -> T; + pub fn atomic_xadd_acqrel(dst: &mut T, src: T) -> T; + pub fn atomic_xadd_relaxed(dst: &mut T, src: T) -> T; + + pub fn atomic_xsub(dst: &mut T, src: T) -> T; + pub fn atomic_xsub_acq(dst: &mut T, src: T) -> T; + pub fn atomic_xsub_rel(dst: &mut T, src: T) -> T; + pub fn atomic_xsub_acqrel(dst: &mut T, src: T) -> T; + pub fn atomic_xsub_relaxed(dst: &mut T, src: T) -> T; + + pub fn atomic_and(dst: &mut T, src: T) -> T; + pub fn atomic_and_acq(dst: &mut T, src: T) -> T; + pub fn atomic_and_rel(dst: &mut T, src: T) -> T; + pub fn atomic_and_acqrel(dst: &mut T, src: T) -> T; + pub fn atomic_and_relaxed(dst: &mut T, src: T) -> T; + + pub fn atomic_nand(dst: &mut T, src: T) -> T; + pub fn atomic_nand_acq(dst: &mut T, src: T) -> T; + pub fn atomic_nand_rel(dst: &mut T, src: T) -> T; + pub fn atomic_nand_acqrel(dst: &mut T, src: T) -> T; + pub fn atomic_nand_relaxed(dst: &mut T, src: T) -> T; + + pub fn atomic_or(dst: &mut T, src: T) -> T; + pub fn atomic_or_acq(dst: &mut T, src: T) -> T; + pub fn atomic_or_rel(dst: &mut T, src: T) -> T; + pub fn atomic_or_acqrel(dst: &mut T, src: T) -> T; + pub fn atomic_or_relaxed(dst: &mut T, src: T) -> T; + + pub fn atomic_xor(dst: &mut T, src: T) -> T; + pub fn atomic_xor_acq(dst: &mut T, src: T) -> T; + pub fn atomic_xor_rel(dst: &mut T, src: T) -> T; + pub fn atomic_xor_acqrel(dst: &mut T, src: T) -> T; + pub fn atomic_xor_relaxed(dst: &mut T, src: T) -> T; + + pub fn atomic_max(dst: &mut T, src: T) -> T; + pub fn atomic_max_acq(dst: &mut T, src: T) -> T; + pub fn atomic_max_rel(dst: &mut T, src: T) -> T; + pub fn atomic_max_acqrel(dst: &mut T, src: T) -> T; + pub fn atomic_max_relaxed(dst: &mut T, src: T) -> T; + + pub fn atomic_min(dst: &mut T, src: T) -> T; + pub fn atomic_min_acq(dst: &mut T, src: T) -> T; + pub fn atomic_min_rel(dst: &mut T, src: T) -> T; + pub fn atomic_min_acqrel(dst: &mut T, src: T) -> T; + pub fn atomic_min_relaxed(dst: &mut T, src: T) -> T; + + pub fn atomic_umin(dst: &mut T, src: T) -> T; + pub fn atomic_umin_acq(dst: &mut T, src: T) -> T; + pub fn atomic_umin_rel(dst: &mut T, src: T) -> T; + pub fn atomic_umin_acqrel(dst: &mut T, src: T) -> T; + pub fn atomic_umin_relaxed(dst: &mut T, src: T) -> T; + + pub fn atomic_umax(dst: &mut T, src: T) -> T; + pub fn atomic_umax_acq(dst: &mut T, src: T) -> T; + pub fn atomic_umax_rel(dst: &mut T, src: T) -> T; + pub fn atomic_umax_acqrel(dst: &mut T, src: T) -> T; + pub fn atomic_umax_relaxed(dst: &mut T, src: T) -> T; + + pub fn atomic_fence(); + pub fn atomic_fence_acq(); + pub fn atomic_fence_rel(); + pub fn atomic_fence_acqrel(); +} + +extern "rust-intrinsic" { + /// Abort the execution of the process. + pub fn abort() -> !; + + /// Execute a breakpoint trap, for inspection by a debugger. + pub fn breakpoint(); + + pub fn volatile_load(src: *T) -> T; + pub fn volatile_store(dst: *mut T, val: T); + /// The size of a type in bytes. /// diff --git a/src/libstd/unstable/mutex.rs b/src/libstd/unstable/mutex.rs index 39f0d7b5638bc..4804de756876f 100644 --- a/src/libstd/unstable/mutex.rs +++ b/src/libstd/unstable/mutex.rs @@ -47,180 +47,186 @@ #[allow(non_camel_case_types)]; -use int; -use sync::atomics; - pub struct Mutex { - // pointers for the lock/cond handles, atomically updated - priv lock: atomics::AtomicUint, - priv cond: atomics::AtomicUint, + priv inner: imp::Mutex, } pub static MUTEX_INIT: Mutex = Mutex { - lock: atomics::INIT_ATOMIC_UINT, - cond: atomics::INIT_ATOMIC_UINT, + inner: imp::MUTEX_INIT, }; impl Mutex { - /// Creates a new mutex, with the lock/condition variable pre-initialized + /// Creates a new mutex pub unsafe fn new() -> Mutex { - Mutex { - lock: atomics::AtomicUint::new(imp::init_lock()), - cond: atomics::AtomicUint::new(imp::init_cond()), - } - } - - /// Creates a new mutex, with the lock/condition variable not initialized. - /// This is the same as initializing from the MUTEX_INIT static. - pub unsafe fn empty() -> Mutex { - Mutex { - lock: atomics::AtomicUint::new(0), - cond: atomics::AtomicUint::new(0), - } - } - - /// Creates a new copy of this mutex. This is an unsafe operation because - /// there is no reference counting performed on this type. - /// - /// This function may only be called on mutexes which have had both the - /// internal condition variable and lock initialized. This means that the - /// mutex must have been created via `new`, or usage of it has already - /// initialized the internal handles. - /// - /// This is a dangerous function to call as both this mutex and the returned - /// mutex will share the same handles to the underlying mutex/condition - /// variable. Care must be taken to ensure that deallocation happens - /// accordingly. - pub unsafe fn clone(&self) -> Mutex { - let lock = self.lock.load(atomics::Relaxed); - let cond = self.cond.load(atomics::Relaxed); - assert!(lock != 0); - assert!(cond != 0); - Mutex { - lock: atomics::AtomicUint::new(lock), - cond: atomics::AtomicUint::new(cond), - } + Mutex { inner: imp::Mutex::new() } } /// Acquires this lock. This assumes that the current thread does not /// already hold the lock. - pub unsafe fn lock(&mut self) { imp::lock(self.getlock()) } + pub unsafe fn lock(&mut self) { self.inner.lock() } /// Attempts to acquire the lock. The value returned is whether the lock was /// acquired or not - pub unsafe fn trylock(&mut self) -> bool { imp::trylock(self.getlock()) } + pub unsafe fn trylock(&mut self) -> bool { self.inner.trylock() } /// Unlocks the lock. This assumes that the current thread already holds the /// lock. - pub unsafe fn unlock(&mut self) { imp::unlock(self.getlock()) } + pub unsafe fn unlock(&mut self) { self.inner.unlock() } /// Block on the internal condition variable. /// /// This function assumes that the lock is already held - pub unsafe fn wait(&mut self) { imp::wait(self.getcond(), self.getlock()) } + pub unsafe fn wait(&mut self) { self.inner.wait() } /// Signals a thread in `wait` to wake up - pub unsafe fn signal(&mut self) { imp::signal(self.getcond()) } + pub unsafe fn signal(&mut self) { self.inner.signal() } /// This function is especially unsafe because there are no guarantees made /// that no other thread is currently holding the lock or waiting on the /// condition variable contained inside. - pub unsafe fn destroy(&mut self) { - let lock = self.lock.swap(0, atomics::Relaxed); - let cond = self.cond.swap(0, atomics::Relaxed); - if lock != 0 { imp::free_lock(lock) } - if cond != 0 { imp::free_cond(cond) } - } - - unsafe fn getlock(&mut self) -> uint{ - match self.lock.load(atomics::Relaxed) { - 0 => {} - n => return n - } - let lock = imp::init_lock(); - match self.lock.compare_and_swap(0, lock, atomics::SeqCst) { - 0 => return lock, - _ => {} - } - imp::free_lock(lock); - self.lock.load(atomics::Relaxed) - } - - unsafe fn getcond(&mut self) -> uint { - match self.cond.load(atomics::Relaxed) { - 0 => {} - n => return n - } - let cond = imp::init_cond(); - match self.cond.compare_and_swap(0, cond, atomics::SeqCst) { - 0 => return cond, - _ => {} - } - imp::free_cond(cond); - self.cond.load(atomics::Relaxed) - } + pub unsafe fn destroy(&mut self) { self.inner.destroy() } } #[cfg(unix)] mod imp { use libc; - use ptr; - use rt::global_heap::malloc_raw; + use self::os::{PTHREAD_MUTEX_INITIALIZER, PTHREAD_COND_INITIALIZER, + pthread_mutex_t, pthread_cond_t}; + use unstable::intrinsics; - type pthread_mutex_t = libc::c_void; type pthread_mutexattr_t = libc::c_void; - type pthread_cond_t = libc::c_void; type pthread_condattr_t = libc::c_void; - pub unsafe fn init_lock() -> uint { - let block = malloc_raw(rust_pthread_mutex_t_size() as uint) as *mut pthread_mutex_t; - let n = pthread_mutex_init(block, ptr::null()); - assert_eq!(n, 0); - return block as uint; - } + #[cfg(target_os = "freebsd")] + mod os { + use libc; + + pub type pthread_mutex_t = *libc::c_void; + pub type pthread_cond_t = *libc::c_void; + + pub static PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = + 0 as pthread_mutex_t; + pub static PTHREAD_COND_INITIALIZER: pthread_cond_t = + 0 as pthread_cond_t; + } + + #[cfg(target_os = "macos")] + mod os { + use libc; + + #[cfg(target_arch = "x86_64")] + static __PTHREAD_MUTEX_SIZE__: uint = 56; + #[cfg(target_arch = "x86_64")] + static __PTHREAD_COND_SIZE__: uint = 40; + #[cfg(target_arch = "x86")] + static __PTHREAD_MUTEX_SIZE__: uint = 40; + #[cfg(target_arch = "x86")] + static __PTHREAD_COND_SIZE__: uint = 24; + static _PTHREAD_MUTEX_SIG_init: libc::c_long = 0x32AAABA7; + static _PTHREAD_COND_SIG_init: libc::c_long = 0x3CB0B1BB; + + pub struct pthread_mutex_t { + __sig: libc::c_long, + __opaque: [u8, ..__PTHREAD_MUTEX_SIZE__], + } + pub struct pthread_cond_t { + __sig: libc::c_long, + __opaque: [u8, ..__PTHREAD_COND_SIZE__], + } - pub unsafe fn init_cond() -> uint { - let block = malloc_raw(rust_pthread_cond_t_size() as uint) as *mut pthread_cond_t; - let n = pthread_cond_init(block, ptr::null()); - assert_eq!(n, 0); - return block as uint; - } + pub static PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { + __sig: _PTHREAD_MUTEX_SIG_init, + __opaque: [0, ..__PTHREAD_MUTEX_SIZE__], + }; + pub static PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { + __sig: _PTHREAD_COND_SIG_init, + __opaque: [0, ..__PTHREAD_COND_SIZE__], + }; + } + + #[cfg(target_os = "linux")] + mod os { + use libc; + + // minus 8 because we have an 'align' field + #[cfg(target_arch = "x86_64")] + static __SIZEOF_PTHREAD_MUTEX_T: uint = 40 - 8; + #[cfg(target_arch = "x86")] + static __SIZEOF_PTHREAD_MUTEX_T: uint = 24 - 8; + #[cfg(target_arch = "x86_64")] + static __SIZEOF_PTHREAD_COND_T: uint = 48 - 8; + #[cfg(target_arch = "x86")] + static __SIZEOF_PTHREAD_COND_T: uint = 48 - 8; + + pub struct pthread_mutex_t { + __align: libc::c_long, + size: [u8, ..__SIZEOF_PTHREAD_MUTEX_T], + } + pub struct pthread_cond_t { + __align: libc::c_longlong, + size: [u8, ..__SIZEOF_PTHREAD_COND_T], + } - pub unsafe fn free_lock(h: uint) { - let block = h as *mut libc::c_void; - assert_eq!(pthread_mutex_destroy(block), 0); - libc::free(block); + pub static PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { + __align: 0, + size: [0, ..__SIZEOF_PTHREAD_MUTEX_T], + }; + pub static PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { + __align: 0, + size: [0, ..__SIZEOF_PTHREAD_COND_T], + }; } + #[cfg(target_os = "android")] + mod os { + use libc; - pub unsafe fn free_cond(h: uint) { - let block = h as *mut pthread_cond_t; - assert_eq!(pthread_cond_destroy(block), 0); - libc::free(block); - } + pub struct pthread_mutex_t { value: libc::c_int } + pub struct pthread_cond_t { value: libc::c_int } - pub unsafe fn lock(l: uint) { - assert_eq!(pthread_mutex_lock(l as *mut pthread_mutex_t), 0); + pub static PTHREAD_MUTEX_INITIALIZER: pthread_mutex_t = pthread_mutex_t { + value: 0, + }; + pub static PTHREAD_COND_INITIALIZER: pthread_cond_t = pthread_cond_t { + value: 0, + }; } - pub unsafe fn trylock(l: uint) -> bool { - pthread_mutex_trylock(l as *mut pthread_mutex_t) == 0 + pub struct Mutex { + priv lock: pthread_mutex_t, + priv cond: pthread_cond_t, } - pub unsafe fn unlock(l: uint) { - assert_eq!(pthread_mutex_unlock(l as *mut pthread_mutex_t), 0); - } + pub static MUTEX_INIT: Mutex = Mutex { + lock: PTHREAD_MUTEX_INITIALIZER, + cond: PTHREAD_COND_INITIALIZER, + }; - pub unsafe fn wait(cond: uint, m: uint) { - assert_eq!(pthread_cond_wait(cond as *mut pthread_cond_t, m as *mut pthread_mutex_t), 0); - } + impl Mutex { + pub unsafe fn new() -> Mutex { + let mut m = Mutex { + lock: intrinsics::init(), + cond: intrinsics::init(), + }; - pub unsafe fn signal(cond: uint) { - assert_eq!(pthread_cond_signal(cond as *mut pthread_cond_t), 0); - } + pthread_mutex_init(&mut m.lock, 0 as *libc::c_void); + pthread_cond_init(&mut m.cond, 0 as *libc::c_void); - extern { - fn rust_pthread_mutex_t_size() -> libc::c_int; - fn rust_pthread_cond_t_size() -> libc::c_int; + return m; + } + + pub unsafe fn lock(&mut self) { pthread_mutex_lock(&mut self.lock); } + pub unsafe fn unlock(&mut self) { pthread_mutex_unlock(&mut self.lock); } + pub unsafe fn signal(&mut self) { pthread_cond_signal(&mut self.cond); } + pub unsafe fn wait(&mut self) { + pthread_cond_wait(&mut self.cond, &mut self.lock); + } + pub unsafe fn trylock(&mut self) -> bool { + pthread_mutex_trylock(&mut self.lock) == 0 + } + pub unsafe fn destroy(&mut self) { + pthread_mutex_destroy(&mut self.lock); + pthread_cond_destroy(&mut self.cond); + } } extern { @@ -242,16 +248,96 @@ mod imp { #[cfg(windows)] mod imp { - use libc; + use rt::global_heap::malloc_raw; use libc::{HANDLE, BOOL, LPSECURITY_ATTRIBUTES, c_void, DWORD, LPCSTR}; + use libc; use ptr; - use rt::global_heap::malloc_raw; + use sync::atomics; - type LPCRITICAL_SECTION = *c_void; + type LPCRITICAL_SECTION = *mut c_void; static SPIN_COUNT: DWORD = 4000; + #[cfg(target_arch = "x86")] + static CRIT_SECTION_SIZE: uint = 24; + + pub struct Mutex { + // pointers for the lock/cond handles, atomically updated + priv lock: atomics::AtomicUint, + priv cond: atomics::AtomicUint, + } + + pub static MUTEX_INIT: Mutex = Mutex { + lock: atomics::INIT_ATOMIC_UINT, + cond: atomics::INIT_ATOMIC_UINT, + }; + + impl Mutex { + pub unsafe fn new() -> Mutex { + Mutex { + lock: atomics::AtomicUint::new(init_lock()), + cond: atomics::AtomicUint::new(init_cond()), + } + } + pub unsafe fn lock(&mut self) { + EnterCriticalSection(self.getlock() as LPCRITICAL_SECTION) + } + pub unsafe fn trylock(&mut self) -> bool { + TryEnterCriticalSection(self.getlock() as LPCRITICAL_SECTION) != 0 + } + pub unsafe fn unlock(&mut self) { + LeaveCriticalSection(self.getlock() as LPCRITICAL_SECTION) + } + + pub unsafe fn wait(&mut self) { + self.unlock(); + WaitForSingleObject(self.getcond() as HANDLE, libc::INFINITE); + self.lock(); + } + + pub unsafe fn signal(&mut self) { + assert!(SetEvent(self.getcond() as HANDLE) != 0); + } + + /// This function is especially unsafe because there are no guarantees made + /// that no other thread is currently holding the lock or waiting on the + /// condition variable contained inside. + pub unsafe fn destroy(&mut self) { + let lock = self.lock.swap(0, atomics::SeqCst); + let cond = self.cond.swap(0, atomics::SeqCst); + if lock != 0 { free_lock(lock) } + if cond != 0 { free_cond(cond) } + } + + unsafe fn getlock(&mut self) -> *mut c_void { + match self.lock.load(atomics::SeqCst) { + 0 => {} + n => return n as *mut c_void + } + let lock = init_lock(); + match self.lock.compare_and_swap(0, lock, atomics::SeqCst) { + 0 => return lock as *mut c_void, + _ => {} + } + free_lock(lock); + return self.lock.load(atomics::SeqCst) as *mut c_void; + } + + unsafe fn getcond(&mut self) -> *mut c_void { + match self.cond.load(atomics::SeqCst) { + 0 => {} + n => return n as *mut c_void + } + let cond = init_cond(); + match self.cond.compare_and_swap(0, cond, atomics::SeqCst) { + 0 => return cond as *mut c_void, + _ => {} + } + free_cond(cond); + return self.cond.load(atomics::SeqCst) as *mut c_void; + } + } pub unsafe fn init_lock() -> uint { - let block = malloc_raw(rust_crit_section_size() as uint) as *c_void; + let block = malloc_raw(CRIT_SECTION_SIZE as uint) as *mut c_void; InitializeCriticalSectionAndSpinCount(block, SPIN_COUNT); return block as uint; } @@ -271,32 +357,6 @@ mod imp { libc::CloseHandle(block); } - pub unsafe fn lock(l: uint) { - EnterCriticalSection(l as LPCRITICAL_SECTION) - } - - pub unsafe fn trylock(l: uint) -> bool { - TryEnterCriticalSection(l as LPCRITICAL_SECTION) != 0 - } - - pub unsafe fn unlock(l: uint) { - LeaveCriticalSection(l as LPCRITICAL_SECTION) - } - - pub unsafe fn wait(cond: uint, m: uint) { - unlock(m); - WaitForSingleObject(cond as HANDLE, libc::INFINITE); - lock(m); - } - - pub unsafe fn signal(cond: uint) { - assert!(SetEvent(cond as HANDLE) != 0); - } - - extern { - fn rust_crit_section_size() -> libc::c_int; - } - extern "system" { fn CreateEventA(lpSecurityAttributes: LPSECURITY_ATTRIBUTES, bManualReset: BOOL, @@ -314,157 +374,14 @@ mod imp { } } -/// A type which can be used to run a one-time global initialization. This type -/// is *unsafe* to use because it is built on top of the `Mutex` in this module. -/// It does not know whether the currently running task is in a green or native -/// context, and a blocking mutex should *not* be used under normal -/// circumstances on a green task. -/// -/// Despite its unsafety, it is often useful to have a one-time initialization -/// routine run for FFI bindings or related external functionality. This type -/// can only be statically constructed with the `ONCE_INIT` value. -/// -/// # Example -/// -/// ```rust -/// use std::unstable::mutex::{Once, ONCE_INIT}; -/// -/// static mut START: Once = ONCE_INIT; -/// unsafe { -/// START.doit(|| { -/// // run initialization here -/// }); -/// } -/// ``` -pub struct Once { - priv mutex: Mutex, - priv cnt: atomics::AtomicInt, - priv lock_cnt: atomics::AtomicInt, -} - -/// Initialization value for static `Once` values. -pub static ONCE_INIT: Once = Once { - mutex: MUTEX_INIT, - cnt: atomics::INIT_ATOMIC_INT, - lock_cnt: atomics::INIT_ATOMIC_INT, -}; - -impl Once { - /// Perform an initialization routine once and only once. The given closure - /// will be executed if this is the first time `doit` has been called, and - /// otherwise the routine will *not* be invoked. - /// - /// This method will block the calling *os thread* if another initialization - /// routine is currently running. - /// - /// When this function returns, it is guaranteed that some initialization - /// has run and completed (it may not be the closure specified). - pub fn doit(&mut self, f: ||) { - // Implementation-wise, this would seem like a fairly trivial primitive. - // The stickler part is where our mutexes currently require an - // allocation, and usage of a `Once` should't leak this allocation. - // - // This means that there must be a deterministic destroyer of the mutex - // contained within (because it's not needed after the initialization - // has run). - // - // The general scheme here is to gate all future threads once - // initialization has completed with a "very negative" count, and to - // allow through threads to lock the mutex if they see a non negative - // count. For all threads grabbing the mutex, exactly one of them should - // be responsible for unlocking the mutex, and this should only be done - // once everyone else is done with the mutex. - // - // This atomicity is achieved by swapping a very negative value into the - // shared count when the initialization routine has completed. This will - // read the number of threads which will at some point attempt to - // acquire the mutex. This count is then squirreled away in a separate - // variable, and the last person on the way out of the mutex is then - // responsible for destroying the mutex. - // - // It is crucial that the negative value is swapped in *after* the - // initialization routine has completed because otherwise new threads - // calling `doit` will return immediately before the initialization has - // completed. - - let prev = self.cnt.fetch_add(1, atomics::SeqCst); - if prev < 0 { - // Make sure we never overflow, we'll never have int::MIN - // simultaneous calls to `doit` to make this value go back to 0 - self.cnt.store(int::MIN, atomics::SeqCst); - return - } - - // If the count is negative, then someone else finished the job, - // otherwise we run the job and record how many people will try to grab - // this lock - unsafe { self.mutex.lock() } - if self.cnt.load(atomics::SeqCst) > 0 { - f(); - let prev = self.cnt.swap(int::MIN, atomics::SeqCst); - self.lock_cnt.store(prev, atomics::SeqCst); - } - unsafe { self.mutex.unlock() } - - // Last one out cleans up after everyone else, no leaks! - if self.lock_cnt.fetch_add(-1, atomics::SeqCst) == 1 { - unsafe { self.mutex.destroy() } - } - } -} - #[cfg(test)] mod test { use prelude::*; + use super::{Mutex, MUTEX_INIT}; use rt::thread::Thread; - use super::{ONCE_INIT, Once, Mutex, MUTEX_INIT}; use task; - #[test] - fn smoke_once() { - static mut o: Once = ONCE_INIT; - let mut a = 0; - unsafe { o.doit(|| a += 1); } - assert_eq!(a, 1); - unsafe { o.doit(|| a += 1); } - assert_eq!(a, 1); - } - - #[test] - fn stampede_once() { - static mut o: Once = ONCE_INIT; - static mut run: bool = false; - - let (p, c) = SharedChan::new(); - for _ in range(0, 10) { - let c = c.clone(); - spawn(proc() { - for _ in range(0, 4) { task::deschedule() } - unsafe { - o.doit(|| { - assert!(!run); - run = true; - }); - assert!(run); - } - c.send(()); - }); - } - - unsafe { - o.doit(|| { - assert!(!run); - run = true; - }); - assert!(run); - } - - for _ in range(0, 10) { - p.recv(); - } - } - #[test] fn somke_lock() { static mut lock: Mutex = MUTEX_INIT; @@ -493,7 +410,7 @@ mod test { #[test] fn destroy_immediately() { unsafe { - let mut m = Mutex::empty(); + let mut m = Mutex::new(); m.destroy(); } } diff --git a/src/rt/rust_builtin.c b/src/rt/rust_builtin.c index 6de5f80829003..81eba2984dad0 100644 --- a/src/rt/rust_builtin.c +++ b/src/rt/rust_builtin.c @@ -437,26 +437,6 @@ rust_win32_rand_release() { #endif -#if defined(__WIN32__) - -int -rust_crit_section_size() { return sizeof(CRITICAL_SECTION); } -int -rust_pthread_mutex_t_size() { return 0; } -int -rust_pthread_cond_t_size() { return 0; } - -#else - -int -rust_crit_section_size() { return 0; } -int -rust_pthread_mutex_t_size() { return sizeof(pthread_mutex_t); } -int -rust_pthread_cond_t_size() { return sizeof(pthread_cond_t); } - -#endif - // // Local Variables: // mode: C++ diff --git a/src/test/auxiliary/cci_intrinsic.rs b/src/test/auxiliary/cci_intrinsic.rs index 9e69715d1cb21..07d6df89d220c 100644 --- a/src/test/auxiliary/cci_intrinsic.rs +++ b/src/test/auxiliary/cci_intrinsic.rs @@ -10,21 +10,21 @@ pub mod rusti { extern "rust-intrinsic" { - pub fn atomic_cxchg(dst: &mut int, old: int, src: int) -> int; - pub fn atomic_cxchg_acq(dst: &mut int, old: int, src: int) -> int; - pub fn atomic_cxchg_rel(dst: &mut int, old: int, src: int) -> int; + pub fn atomic_cxchg(dst: &mut T, old: T, src: T) -> T; + pub fn atomic_cxchg_acq(dst: &mut T, old: T, src: T) -> T; + pub fn atomic_cxchg_rel(dst: &mut T, old: T, src: T) -> T; - pub fn atomic_xchg(dst: &mut int, src: int) -> int; - pub fn atomic_xchg_acq(dst: &mut int, src: int) -> int; - pub fn atomic_xchg_rel(dst: &mut int, src: int) -> int; + pub fn atomic_xchg(dst: &mut T, src: T) -> T; + pub fn atomic_xchg_acq(dst: &mut T, src: T) -> T; + pub fn atomic_xchg_rel(dst: &mut T, src: T) -> T; - pub fn atomic_xadd(dst: &mut int, src: int) -> int; - pub fn atomic_xadd_acq(dst: &mut int, src: int) -> int; - pub fn atomic_xadd_rel(dst: &mut int, src: int) -> int; + pub fn atomic_xadd(dst: &mut T, src: T) -> T; + pub fn atomic_xadd_acq(dst: &mut T, src: T) -> T; + pub fn atomic_xadd_rel(dst: &mut T, src: T) -> T; - pub fn atomic_xsub(dst: &mut int, src: int) -> int; - pub fn atomic_xsub_acq(dst: &mut int, src: int) -> int; - pub fn atomic_xsub_rel(dst: &mut int, src: int) -> int; + pub fn atomic_xsub(dst: &mut T, src: T) -> T; + pub fn atomic_xsub_acq(dst: &mut T, src: T) -> T; + pub fn atomic_xsub_rel(dst: &mut T, src: T) -> T; } } diff --git a/src/test/run-pass/intrinsic-atomics.rs b/src/test/run-pass/intrinsic-atomics.rs index 2ec91ee440b86..d6e394a345e22 100644 --- a/src/test/run-pass/intrinsic-atomics.rs +++ b/src/test/run-pass/intrinsic-atomics.rs @@ -10,27 +10,27 @@ mod rusti { extern "rust-intrinsic" { - pub fn atomic_cxchg(dst: &mut int, old: int, src: int) -> int; - pub fn atomic_cxchg_acq(dst: &mut int, old: int, src: int) -> int; - pub fn atomic_cxchg_rel(dst: &mut int, old: int, src: int) -> int; + pub fn atomic_cxchg(dst: &mut T, old: T, src: T) -> T; + pub fn atomic_cxchg_acq(dst: &mut T, old: T, src: T) -> T; + pub fn atomic_cxchg_rel(dst: &mut T, old: T, src: T) -> T; - pub fn atomic_load(src: &int) -> int; - pub fn atomic_load_acq(src: &int) -> int; + pub fn atomic_load(src: &T) -> T; + pub fn atomic_load_acq(src: &T) -> T; - pub fn atomic_store(dst: &mut int, val: int); - pub fn atomic_store_rel(dst: &mut int, val: int); + pub fn atomic_store(dst: &mut T, val: T); + pub fn atomic_store_rel(dst: &mut T, val: T); - pub fn atomic_xchg(dst: &mut int, src: int) -> int; - pub fn atomic_xchg_acq(dst: &mut int, src: int) -> int; - pub fn atomic_xchg_rel(dst: &mut int, src: int) -> int; + pub fn atomic_xchg(dst: &mut T, src: T) -> T; + pub fn atomic_xchg_acq(dst: &mut T, src: T) -> T; + pub fn atomic_xchg_rel(dst: &mut T, src: T) -> T; - pub fn atomic_xadd(dst: &mut int, src: int) -> int; - pub fn atomic_xadd_acq(dst: &mut int, src: int) -> int; - pub fn atomic_xadd_rel(dst: &mut int, src: int) -> int; + pub fn atomic_xadd(dst: &mut T, src: T) -> T; + pub fn atomic_xadd_acq(dst: &mut T, src: T) -> T; + pub fn atomic_xadd_rel(dst: &mut T, src: T) -> T; - pub fn atomic_xsub(dst: &mut int, src: int) -> int; - pub fn atomic_xsub_acq(dst: &mut int, src: int) -> int; - pub fn atomic_xsub_rel(dst: &mut int, src: int) -> int; + pub fn atomic_xsub(dst: &mut T, src: T) -> T; + pub fn atomic_xsub_acq(dst: &mut T, src: T) -> T; + pub fn atomic_xsub_rel(dst: &mut T, src: T) -> T; } }