Skip to content

Commit c18fea5

Browse files
author
Jonathan S
committed
Relaxed the memory ordering on the implementation of UnsafeArc
1 parent 9e89ffc commit c18fea5

File tree

1 file changed

+29
-7
lines changed

1 file changed

+29
-7
lines changed

src/libstd/sync/arc.rs

+29-7
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
1+
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
22
// file at the top-level directory of this distribution and at
33
// http://rust-lang.org/COPYRIGHT.
44
//
@@ -26,7 +26,7 @@ use clone::Clone;
2626
use kinds::Send;
2727
use ops::Drop;
2828
use ptr::RawPtr;
29-
use sync::atomics::{AtomicUint, SeqCst, Relaxed, Acquire};
29+
use sync::atomics::{fence, AtomicUint, Relaxed, Acquire, Release};
3030
use vec;
3131

3232
/// An atomically reference counted pointer.
@@ -109,8 +109,16 @@ impl<T: Send> UnsafeArc<T> {
109109
impl<T: Send> Clone for UnsafeArc<T> {
110110
fn clone(&self) -> UnsafeArc<T> {
111111
unsafe {
112-
// This barrier might be unnecessary, but I'm not sure...
113-
let old_count = (*self.data).count.fetch_add(1, Acquire);
112+
// Using a relaxed ordering is alright here, as knowledge of the original reference
113+
// prevents other threads from erroneously deleting the object.
114+
//
115+
// As explained in the [Boost documentation][1],
116+
// Increasing the reference counter can always be done with memory_order_relaxed: New
117+
// references to an object can only be formed from an existing reference, and passing
118+
// an existing reference from one thread to another must already provide any required
119+
// synchronization.
120+
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
121+
let old_count = (*self.data).count.fetch_add(1, Relaxed);
114122
// FIXME(#12049): this needs some sort of debug assertion
115123
if cfg!(test) { assert!(old_count >= 1); }
116124
return UnsafeArc { data: self.data };
@@ -127,12 +135,26 @@ impl<T> Drop for UnsafeArc<T>{
127135
if self.data.is_null() {
128136
return
129137
}
130-
// Must be acquire+release, not just release, to make sure this
131-
// doesn't get reordered to after the unwrapper pointer load.
132-
let old_count = (*self.data).count.fetch_sub(1, SeqCst);
138+
// Because `fetch_sub` is already atomic, we do not need to synchronize with other
139+
// threads unless we are going to delete the object.
140+
let old_count = (*self.data).count.fetch_sub(1, Release);
133141
// FIXME(#12049): this needs some sort of debug assertion
134142
if cfg!(test) { assert!(old_count >= 1); }
135143
if old_count == 1 {
144+
// This fence is needed to prevent reordering of use of the data and deletion of
145+
// the data. Because it is marked `Release`, the decreasing of the reference count
146+
// sychronizes with this `Acquire` fence. This means that use of the data happens
147+
// before decreasing the refernce count, which happens before this fence, which
148+
// happens before the deletion of the data.
149+
//
150+
// As explained in the [Boost documentation][1],
151+
// It is important to enforce any possible access to the object in one thread
152+
// (through an existing reference) to *happen before* deleting the object in a
153+
// different thread. This is achieved by a "release" operation after dropping a
154+
// reference (any access to the object through this reference must obviously
155+
// happened before), and an "acquire" operation before deleting the object.
156+
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
157+
fence(Acquire);
136158
let _: ~ArcData<T> = cast::transmute(self.data);
137159
}
138160
}

0 commit comments

Comments
 (0)