Skip to content

Commit

Permalink
Add missing atomic operations to AtomicPtr
Browse files Browse the repository at this point in the history
This adds various `fetch_` methods to `AtomicPtr` that are present on
other `Atomic*` types. It does so such that libraries that depend on
atomic operations on pointers do not have to cast those pointers to
`usize` and fiddle around with `AtomicUsize` instead.

Note that this patch currently implements `fetch_add` and `fetch_sub`
without considering the size of the pointer target. This is unlike
regular pointer additions and subtractions. The rationale for this is
that for atomic operations, the user may indeed wish to truly increment
by 1, which is difficult if all deltas are interpreted in increments of
the type's size.

This patch effectively resurrects the change from #10154. Based on
#12949 (comment),
the rationale for not making the changes at the time no longer hold.
  • Loading branch information
jonhoo committed Apr 10, 2020
1 parent dbc3cfd commit 8e72286
Show file tree
Hide file tree
Showing 3 changed files with 258 additions and 0 deletions.
210 changes: 210 additions & 0 deletions src/libcore/sync/atomic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1166,6 +1166,216 @@ impl<T> AtomicPtr<T> {
}
}
}

/// Adds to the current pointer, returning the previous pointer.
///
/// Unlike other pointer additions, `fetch_add` increments directly by the provided value,
/// rather than interpreting it as a multiple of `size_of<T>`.
///
/// This operation wraps around on overflow.
///
/// `fetch_add` takes an [`Ordering`] argument which describes the memory ordering
/// of this operation. All ordering modes are possible. Note that using
/// [`Acquire`] makes the store part of this operation [`Relaxed`], and
/// using [`Release`] makes the load part [`Relaxed`].
///
/// [`Ordering`]: enum.Ordering.html
/// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
/// [`Release`]: enum.Ordering.html#variant.Release
/// [`Acquire`]: enum.Ordering.html#variant.Acquire
///
/// # Examples
///
/// ```
/// #![feature(atomic_ptr_fetch_op)]
/// use std::sync::atomic::{AtomicPtr, Ordering};
///
/// let foo = AtomicPtr::new(0 as *mut ());
/// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0 as *mut _);
/// assert_eq!(foo.load(Ordering::SeqCst), 10 as *mut _);
/// ```
#[inline]
#[cfg(target_has_atomic = "ptr")]
#[unstable(feature = "atomic_ptr_fetch_op", issue = "none")]
pub fn fetch_add(&self, val: usize, order: Ordering) -> *mut T {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { crate::mem::transmute(atomic_add(self.p.get() as *mut usize, val, order)) }
}

/// Subtracts from the current pointer, returning the previous pointer.
///
/// Unlike other pointer subtractions, `fetch_sub` decrements directly by the provided value,
/// rather than interpreting it as a multiple of `size_of<T>`.
///
/// This operation wraps around on overflow.
///
/// `fetch_sub` takes an [`Ordering`] argument which describes the memory ordering
/// of this operation. All ordering modes are possible. Note that using
/// [`Acquire`] makes the store part of this operation [`Relaxed`], and
/// using [`Release`] makes the load part [`Relaxed`].
///
/// [`Ordering`]: enum.Ordering.html
/// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
/// [`Release`]: enum.Ordering.html#variant.Release
/// [`Acquire`]: enum.Ordering.html#variant.Acquire
///
/// # Examples
///
/// ```
/// #![feature(atomic_ptr_fetch_op)]
/// use std::sync::atomic::{AtomicPtr, Ordering};
///
/// let foo = AtomicPtr::new(20 as *mut ());
/// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 20 as *mut _);
/// assert_eq!(foo.load(Ordering::SeqCst), 10 as *mut _);
/// ```
#[inline]
#[cfg(target_has_atomic = "ptr")]
#[unstable(feature = "atomic_ptr_fetch_op", issue = "none")]
pub fn fetch_sub(&self, val: usize, order: Ordering) -> *mut T {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { crate::mem::transmute(atomic_sub(self.p.get() as *mut usize, val, order)) }
}

/// Bitwise "and" with the current value.
///
/// Performs a bitwise "and" operation on the current pointer and the argument `val`, and
/// sets the new pointer to the result.
///
/// Returns the previous pointer.
///
/// `fetch_and` takes an [`Ordering`] argument which describes the memory ordering
/// of this operation. All ordering modes are possible. Note that using
/// [`Acquire`] makes the store part of this operation [`Relaxed`], and
/// using [`Release`] makes the load part [`Relaxed`].
///
/// [`Ordering`]: enum.Ordering.html
/// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
/// [`Release`]: enum.Ordering.html#variant.Release
/// [`Acquire`]: enum.Ordering.html#variant.Acquire
///
/// # Examples
///
/// ```
/// #![feature(atomic_ptr_fetch_op)]
/// use std::sync::atomic::{AtomicPtr, Ordering};
///
/// let foo = AtomicPtr::new(0b101101 as *mut ());
/// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101 as *mut _);
/// assert_eq!(foo.load(Ordering::SeqCst), 0b100001 as *mut _);
/// ```
#[inline]
#[cfg(target_has_atomic = "ptr")]
#[unstable(feature = "atomic_ptr_fetch_op", issue = "none")]
pub fn fetch_and(&self, val: usize, order: Ordering) -> *mut T {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { crate::mem::transmute(atomic_and(self.p.get() as *mut usize, val, order)) }
}

/// Bitwise "nand" with the current value.
///
/// Performs a bitwise "nand" operation on the current pointer and the argument `val`, and
/// sets the new pointer to the result.
///
/// Returns the previous pointer.
///
/// `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering
/// of this operation. All ordering modes are possible. Note that using
/// [`Acquire`] makes the store part of this operation [`Relaxed`], and
/// using [`Release`] makes the load part [`Relaxed`].
///
/// [`Ordering`]: enum.Ordering.html
/// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
/// [`Release`]: enum.Ordering.html#variant.Release
/// [`Acquire`]: enum.Ordering.html#variant.Acquire
///
/// # Examples
///
/// ```
/// #![feature(atomic_ptr_fetch_op)]
/// use std::sync::atomic::{AtomicPtr, Ordering};
///
/// let foo = AtomicPtr::new(0x13 as *mut ());
/// assert_eq!(foo.fetch_nand(0x31, Ordering::SeqCst), 0x13 as *mut _);
/// assert_eq!(foo.load(Ordering::SeqCst), !(0x13 & 0x31) as *mut _);
/// ```
#[inline]
#[cfg(target_has_atomic = "ptr")]
#[unstable(feature = "atomic_ptr_fetch_op", issue = "none")]
pub fn fetch_nand(&self, val: usize, order: Ordering) -> *mut T {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { crate::mem::transmute(atomic_nand(self.p.get() as *mut usize, val, order)) }
}

/// Bitwise "or" with the current value.
///
/// Performs a bitwise "or" operation on the current pointer and the argument `val`, and
/// sets the new pointer to the result.
///
/// Returns the previous pointer.
///
/// `fetch_or` takes an [`Ordering`] argument which describes the memory ordering
/// of this operation. All ordering modes are possible. Note that using
/// [`Acquire`] makes the store part of this operation [`Relaxed`], and
/// using [`Release`] makes the load part [`Relaxed`].
///
/// [`Ordering`]: enum.Ordering.html
/// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
/// [`Release`]: enum.Ordering.html#variant.Release
/// [`Acquire`]: enum.Ordering.html#variant.Acquire
///
/// # Examples
///
/// ```
/// #![feature(atomic_ptr_fetch_op)]
/// use std::sync::atomic::{AtomicPtr, Ordering};
///
/// let foo = AtomicPtr::new(0b101101 as *mut ());
/// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101 as *mut _);
/// assert_eq!(foo.load(Ordering::SeqCst), 0b111111 as *mut _);
/// ```
#[inline]
#[cfg(target_has_atomic = "ptr")]
#[unstable(feature = "atomic_ptr_fetch_op", issue = "none")]
pub fn fetch_or(&self, val: usize, order: Ordering) -> *mut T {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { crate::mem::transmute(atomic_or(self.p.get() as *mut usize, val, order)) }
}

/// Bitwise "xor" with the current value.
///
/// Performs a bitwise "xor" operation on the current pointer and the argument `val`, and
/// sets the new pointer to the result.
///
/// Returns the previous pointer.
///
/// `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering
/// of this operation. All ordering modes are possible. Note that using
/// [`Acquire`] makes the store part of this operation [`Relaxed`], and
/// using [`Release`] makes the load part [`Relaxed`].
///
/// [`Ordering`]: enum.Ordering.html
/// [`Relaxed`]: enum.Ordering.html#variant.Relaxed
/// [`Release`]: enum.Ordering.html#variant.Release
/// [`Acquire`]: enum.Ordering.html#variant.Acquire
///
/// # Examples
///
/// ```
/// #![feature(atomic_ptr_fetch_op)]
/// use std::sync::atomic::{AtomicPtr, Ordering};
///
/// let foo = AtomicPtr::new(0b101101 as *mut ());
/// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101 as *mut _);
/// assert_eq!(foo.load(Ordering::SeqCst), 0b011110 as *mut _);
/// ```
#[inline]
#[cfg(target_has_atomic = "ptr")]
#[unstable(feature = "atomic_ptr_fetch_op", issue = "none")]
pub fn fetch_xor(&self, val: usize, order: Ordering) -> *mut T {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { crate::mem::transmute(atomic_xor(self.p.get() as *mut usize, val, order)) }
}
}

#[cfg(target_has_atomic_load_store = "8")]
Expand Down
47 changes: 47 additions & 0 deletions src/libcore/tests/atomic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,53 @@ fn int_xor() {
assert_eq!(x.load(SeqCst), 0xf731 ^ 0x137f);
}

#[test]
fn atomic_ptr() {
// This test assumes a contiguous memory layout for a (tuple) pair of usize
unsafe {
let mut mem: (usize, usize) = (1, 2);
let mut ptr = &mut mem.0 as *mut usize;
// ptr points to .0
let atomic = AtomicPtr::new(ptr);
// atomic points to .0
assert_eq!(atomic.fetch_add(core::mem::size_of::<usize>(), SeqCst), ptr);
// atomic points to .1
ptr = atomic.load(SeqCst);
// ptr points to .1
assert_eq!(*ptr, 2);
atomic.fetch_sub(core::mem::size_of::<usize>(), SeqCst);
// atomic points to .0
ptr = atomic.load(SeqCst);
// ptr points to .0
assert_eq!(*ptr, 1);

// now try xor and back
assert_eq!(atomic.fetch_xor(ptr as usize, SeqCst), ptr);
// atomic is NULL
assert_eq!(atomic.fetch_xor(ptr as usize, SeqCst), std::ptr::null_mut());
// atomic points to .0
ptr = atomic.load(SeqCst);
// ptr points to .0
assert_eq!(*ptr, 1);

// then and with all 1s
assert_eq!(atomic.fetch_and(!0, SeqCst), ptr);
assert_eq!(atomic.load(SeqCst), ptr);

// then or with all 0s
assert_eq!(atomic.fetch_or(0, SeqCst), ptr);
assert_eq!(atomic.load(SeqCst), ptr);

// then or with all 1s
assert_eq!(atomic.fetch_or(!0, SeqCst), ptr);
assert_eq!(atomic.load(SeqCst), !0 as *mut _);

// then and with all 0s
assert_eq!(atomic.fetch_and(0, SeqCst), !0 as *mut _);
assert_eq!(atomic.load(SeqCst), 0 as *mut _);
}
}

static S_FALSE: AtomicBool = AtomicBool::new(false);
static S_TRUE: AtomicBool = AtomicBool::new(true);
static S_INT: AtomicIsize = AtomicIsize::new(0);
Expand Down
1 change: 1 addition & 0 deletions src/libcore/tests/lib.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
#![feature(alloc_layout_extra)]
#![feature(atomic_ptr_fetch_op)]
#![feature(bool_to_option)]
#![feature(bound_cloned)]
#![feature(box_syntax)]
Expand Down

0 comments on commit 8e72286

Please sign in to comment.