Skip to content

Commit 916d1a9

Browse files
committed
auto merge of #5176 : brson/rust/unwrap_shared_mutable_state, r=nikomatsakis
r? This fixes the current [random failures](http://buildbot.rust-lang.org/builders/auto-linux/builds/291/steps/test/logs/stdio) on the bots and closes #4436 by removing `unwrap_shared_mutable_state` and the code that depends on it. The result is that ARC-like things will not be unwrappable. This feature is complex and is not used outside of test cases. Note that there is not consensus to remove it. (second commit)
2 parents 5680ec0 + 78d5091 commit 916d1a9

File tree

3 files changed

+10
-253
lines changed

3 files changed

+10
-253
lines changed

Diff for: src/libcore/private.rs

+4-186
Original file line numberDiff line numberDiff line change
@@ -107,20 +107,9 @@ fn compare_and_swap(address: &mut int, oldval: int, newval: int) -> bool {
107107
* Shared state & exclusive ARC
108108
****************************************************************************/
109109

110-
struct UnwrapProtoInner {
111-
contents: Option<(comm::ChanOne<()>, comm::PortOne<bool>)>,
112-
}
113-
114-
// An unwrapper uses this protocol to communicate with the "other" task that
115-
// drops the last refcount on an arc. Unfortunately this can't be a proper
116-
// pipe protocol because the unwrapper has to access both stages at once.
117-
type UnwrapProto = ~UnwrapProtoInner;
118-
119110
struct ArcData<T> {
120111
mut count: libc::intptr_t,
121-
mut unwrapper: int, // either a UnwrapProto or 0
122-
// FIXME(#3224) should be able to make this non-option to save memory, and
123-
// in unwrap() use "let ~ArcData { data: result, _ } = thing" to unwrap it
112+
// FIXME(#3224) should be able to make this non-option to save memory
124113
mut data: Option<T>,
125114
}
126115

@@ -131,37 +120,13 @@ struct ArcDestruct<T> {
131120
impl<T> Drop for ArcDestruct<T>{
132121
fn finalize(&self) {
133122
unsafe {
134-
if self.data.is_null() {
135-
return; // Happens when destructing an unwrapper's handle.
136-
}
137123
do task::unkillable {
138124
let data: ~ArcData<T> = cast::reinterpret_cast(&self.data);
139125
let new_count =
140126
intrinsics::atomic_xsub(&mut data.count, 1) - 1;
141127
assert new_count >= 0;
142128
if new_count == 0 {
143-
// Were we really last, or should we hand off to an
144-
// unwrapper? It's safe to not xchg because the unwrapper
145-
// will set the unwrap lock *before* dropping his/her
146-
// reference. In effect, being here means we're the only
147-
// *awake* task with the data.
148-
if data.unwrapper != 0 {
149-
let mut p: UnwrapProto =
150-
cast::reinterpret_cast(&data.unwrapper);
151-
let (message, response) =
152-
option::swap_unwrap(&mut p.contents);
153-
// Send 'ready' and wait for a response.
154-
comm::send_one(message, ());
155-
// Unkillable wait. Message guaranteed to come.
156-
if comm::recv_one(response) {
157-
// Other task got the data.
158-
cast::forget(data);
159-
} else {
160-
// Other task was killed. drop glue takes over.
161-
}
162-
} else {
163-
// drop glue takes over.
164-
}
129+
// drop glue takes over.
165130
} else {
166131
cast::forget(data);
167132
}
@@ -176,79 +141,6 @@ fn ArcDestruct<T>(data: *libc::c_void) -> ArcDestruct<T> {
176141
}
177142
}
178143

179-
pub unsafe fn unwrap_shared_mutable_state<T:Owned>(rc: SharedMutableState<T>)
180-
-> T {
181-
struct DeathThroes<T> {
182-
mut ptr: Option<~ArcData<T>>,
183-
mut response: Option<comm::ChanOne<bool>>,
184-
}
185-
186-
impl<T> Drop for DeathThroes<T>{
187-
fn finalize(&self) {
188-
unsafe {
189-
let response = option::swap_unwrap(&mut self.response);
190-
// In case we get killed early, we need to tell the person who
191-
// tried to wake us whether they should hand-off the data to
192-
// us.
193-
if task::failing() {
194-
comm::send_one(response, false);
195-
// Either this swap_unwrap or the one below (at "Got
196-
// here") ought to run.
197-
cast::forget(option::swap_unwrap(&mut self.ptr));
198-
} else {
199-
assert self.ptr.is_none();
200-
comm::send_one(response, true);
201-
}
202-
}
203-
}
204-
}
205-
206-
do task::unkillable {
207-
let ptr: ~ArcData<T> = cast::reinterpret_cast(&rc.data);
208-
let (p1,c1) = comm::oneshot(); // ()
209-
let (p2,c2) = comm::oneshot(); // bool
210-
let mut server: UnwrapProto = ~UnwrapProtoInner {
211-
contents: Some((c1,p2))
212-
};
213-
let serverp: int = cast::transmute(server);
214-
// Try to put our server end in the unwrapper slot.
215-
if compare_and_swap(&mut ptr.unwrapper, 0, serverp) {
216-
// Got in. Step 0: Tell destructor not to run. We are now it.
217-
rc.data = ptr::null();
218-
// Step 1 - drop our own reference.
219-
let new_count = intrinsics::atomic_xsub(&mut ptr.count, 1) - 1;
220-
//assert new_count >= 0;
221-
if new_count == 0 {
222-
// We were the last owner. Can unwrap immediately.
223-
// Also we have to free the server endpoints.
224-
let _server: UnwrapProto = cast::transmute(serverp);
225-
option::swap_unwrap(&mut ptr.data)
226-
// drop glue takes over.
227-
} else {
228-
// The *next* person who sees the refcount hit 0 will wake us.
229-
let end_result =
230-
DeathThroes { ptr: Some(ptr),
231-
response: Some(c2) };
232-
let mut p1 = Some(p1); // argh
233-
do task::rekillable {
234-
comm::recv_one(option::swap_unwrap(&mut p1));
235-
}
236-
// Got here. Back in the 'unkillable' without getting killed.
237-
// Recover ownership of ptr, then take the data out.
238-
let ptr = option::swap_unwrap(&mut end_result.ptr);
239-
option::swap_unwrap(&mut ptr.data)
240-
// drop glue takes over.
241-
}
242-
} else {
243-
// Somebody else was trying to unwrap. Avoid guaranteed deadlock.
244-
cast::forget(ptr);
245-
// Also we have to free the (rejected) server endpoints.
246-
let _server: UnwrapProto = cast::transmute(serverp);
247-
fail!(~"Another task is already unwrapping this ARC!");
248-
}
249-
}
250-
}
251-
252144
/**
253145
* COMPLETELY UNSAFE. Used as a primitive for the safe versions in std::arc.
254146
*
@@ -259,7 +151,7 @@ pub type SharedMutableState<T> = ArcDestruct<T>;
259151

260152
pub unsafe fn shared_mutable_state<T:Owned>(data: T) ->
261153
SharedMutableState<T> {
262-
let data = ~ArcData { count: 1, unwrapper: 0, data: Some(data) };
154+
let data = ~ArcData { count: 1, data: Some(data) };
263155
unsafe {
264156
let ptr = cast::transmute(data);
265157
ArcDestruct(ptr)
@@ -413,22 +305,14 @@ pub impl<T:Owned> Exclusive<T> {
413305
}
414306
}
415307

416-
// FIXME(#3724) make this a by-move method on the exclusive
417-
pub fn unwrap_exclusive<T:Owned>(arc: Exclusive<T>) -> T {
418-
let Exclusive { x: x } = arc;
419-
let inner = unsafe { unwrap_shared_mutable_state(x) };
420-
let ExData { data: data, _ } = inner;
421-
data
422-
}
423-
424308
#[cfg(test)]
425309
pub mod tests {
426310
use core::option::{None, Some};
427311

428312
use cell::Cell;
429313
use comm;
430314
use option;
431-
use private::{exclusive, unwrap_exclusive};
315+
use private::exclusive;
432316
use result;
433317
use task;
434318
use uint;
@@ -479,70 +363,4 @@ pub mod tests {
479363
assert *one == 1;
480364
}
481365
}
482-
483-
#[test]
484-
pub fn exclusive_unwrap_basic() {
485-
let x = exclusive(~~"hello");
486-
assert unwrap_exclusive(x) == ~~"hello";
487-
}
488-
489-
#[test]
490-
pub fn exclusive_unwrap_contended() {
491-
let x = exclusive(~~"hello");
492-
let x2 = Cell(x.clone());
493-
do task::spawn {
494-
let x2 = x2.take();
495-
do x2.with |_hello| { }
496-
task::yield();
497-
}
498-
assert unwrap_exclusive(x) == ~~"hello";
499-
500-
// Now try the same thing, but with the child task blocking.
501-
let x = exclusive(~~"hello");
502-
let x2 = Cell(x.clone());
503-
let mut res = None;
504-
do task::task().future_result(|+r| res = Some(r)).spawn {
505-
let x2 = x2.take();
506-
assert unwrap_exclusive(x2) == ~~"hello";
507-
}
508-
// Have to get rid of our reference before blocking.
509-
{ let _x = x; } // FIXME(#3161) util::ignore doesn't work here
510-
let res = option::swap_unwrap(&mut res);
511-
res.recv();
512-
}
513-
514-
#[test] #[should_fail] #[ignore(cfg(windows))]
515-
pub fn exclusive_unwrap_conflict() {
516-
let x = exclusive(~~"hello");
517-
let x2 = Cell(x.clone());
518-
let mut res = None;
519-
do task::task().future_result(|+r| res = Some(r)).spawn {
520-
let x2 = x2.take();
521-
assert unwrap_exclusive(x2) == ~~"hello";
522-
}
523-
assert unwrap_exclusive(x) == ~~"hello";
524-
let res = option::swap_unwrap(&mut res);
525-
// See #4689 for why this can't be just "res.recv()".
526-
assert res.recv() == task::Success;
527-
}
528-
529-
#[test] #[ignore(cfg(windows))]
530-
pub fn exclusive_unwrap_deadlock() {
531-
// This is not guaranteed to get to the deadlock before being killed,
532-
// but it will show up sometimes, and if the deadlock were not there,
533-
// the test would nondeterministically fail.
534-
let result = do task::try {
535-
// a task that has two references to the same exclusive will
536-
// deadlock when it unwraps. nothing to be done about that.
537-
let x = exclusive(~~"hello");
538-
let x2 = x.clone();
539-
do task::spawn {
540-
for 10.times { task::yield(); } // try to let the unwrapper go
541-
fail!(); // punt it awake from its deadlock
542-
}
543-
let _z = unwrap_exclusive(x);
544-
do x2.with |_hello| { }
545-
};
546-
assert result.is_err();
547-
}
548366
}

Diff for: src/libstd/arc.rs

+1-65
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ use core::cell::Cell;
2121
use core::pipes;
2222
use core::prelude::*;
2323
use core::private::{SharedMutableState, shared_mutable_state};
24-
use core::private::{clone_shared_mutable_state, unwrap_shared_mutable_state};
24+
use core::private::{clone_shared_mutable_state};
2525
use core::private::{get_shared_mutable_state, get_shared_immutable_state};
2626
use core::ptr;
2727
use core::task;
@@ -104,20 +104,6 @@ pub fn clone<T:Const + Owned>(rc: &ARC<T>) -> ARC<T> {
104104
ARC { x: unsafe { clone_shared_mutable_state(&rc.x) } }
105105
}
106106

107-
/**
108-
* Retrieve the data back out of the ARC. This function blocks until the
109-
* reference given to it is the last existing one, and then unwrap the data
110-
* instead of destroying it.
111-
*
112-
* If multiple tasks call unwrap, all but the first will fail. Do not call
113-
* unwrap from a task that holds another reference to the same ARC; it is
114-
* guaranteed to deadlock.
115-
*/
116-
pub fn unwrap<T:Const + Owned>(rc: ARC<T>) -> T {
117-
let ARC { x: x } = rc;
118-
unsafe { unwrap_shared_mutable_state(x) }
119-
}
120-
121107
impl<T:Const + Owned> Clone for ARC<T> {
122108
fn clone(&self) -> ARC<T> {
123109
clone(self)
@@ -213,23 +199,6 @@ pub impl<T:Owned> &MutexARC<T> {
213199
}
214200
}
215201

216-
/**
217-
* Retrieves the data, blocking until all other references are dropped,
218-
* exactly as arc::unwrap.
219-
*
220-
* Will additionally fail if another task has failed while accessing the arc.
221-
*/
222-
// FIXME(#3724) make this a by-move method on the arc
223-
pub fn unwrap_mutex_arc<T:Owned>(arc: MutexARC<T>) -> T {
224-
let MutexARC { x: x } = arc;
225-
let inner = unsafe { unwrap_shared_mutable_state(x) };
226-
let MutexARCInner { failed: failed, data: data, _ } = inner;
227-
if failed {
228-
fail!(~"Can't unwrap poisoned MutexARC - another task failed inside!")
229-
}
230-
data
231-
}
232-
233202
// Common code for {mutex.access,rwlock.write}{,_cond}.
234203
#[inline(always)]
235204
#[doc(hidden)]
@@ -411,24 +380,6 @@ pub impl<T:Const + Owned> &RWARC<T> {
411380
}
412381
}
413382
414-
/**
415-
* Retrieves the data, blocking until all other references are dropped,
416-
* exactly as arc::unwrap.
417-
*
418-
* Will additionally fail if another task has failed while accessing the arc
419-
* in write mode.
420-
*/
421-
// FIXME(#3724) make this a by-move method on the arc
422-
pub fn unwrap_rw_arc<T:Const + Owned>(arc: RWARC<T>) -> T {
423-
let RWARC { x: x, _ } = arc;
424-
let inner = unsafe { unwrap_shared_mutable_state(x) };
425-
let RWARCInner { failed: failed, data: data, _ } = inner;
426-
if failed {
427-
fail!(~"Can't unwrap poisoned RWARC - another task failed inside!")
428-
}
429-
data
430-
}
431-
432383
// Borrowck rightly complains about immutably aliasing the rwlock in order to
433384
// lock it. This wraps the unsafety, with the justification that the 'lock'
434385
// field is never overwritten; only 'failed' and 'data'.
@@ -586,21 +537,6 @@ mod tests {
586537
}
587538
}
588539
#[test] #[should_fail] #[ignore(cfg(windows))]
589-
pub fn test_mutex_arc_unwrap_poison() {
590-
let arc = MutexARC(1);
591-
let arc2 = ~(&arc).clone();
592-
let (p, c) = comm::stream();
593-
do task::spawn || {
594-
do arc2.access |one| {
595-
c.send(());
596-
assert *one == 2;
597-
}
598-
}
599-
let _ = p.recv();
600-
let one = unwrap_mutex_arc(arc);
601-
assert one == 1;
602-
}
603-
#[test] #[should_fail] #[ignore(cfg(windows))]
604540
pub fn test_rw_arc_poison_wr() {
605541
let arc = ~RWARC(1);
606542
let arc2 = ~arc.clone();

Diff for: src/rt/rust_kernel.cpp

+5-2
Original file line numberDiff line numberDiff line change
@@ -342,8 +342,11 @@ rust_kernel::register_exit_function(spawn_fn runner, fn_env_pair *f) {
342342
assert(!at_exit_started && "registering at_exit function after exit");
343343

344344
if (at_exit_runner) {
345-
assert(runner == at_exit_runner
346-
&& "there can be only one at_exit_runner");
345+
// FIXME #2912 Would be very nice to assert this but we can't because
346+
// of the way coretest works (the test case ends up using its own
347+
// function)
348+
//assert(runner == at_exit_runner
349+
// && "there can be only one at_exit_runner");
347350
}
348351

349352
at_exit_runner = runner;

0 commit comments

Comments
 (0)