Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Replace zeroing-on-drop with filling-on-drop. #23535

Merged
merged 9 commits into from
Mar 28, 2015
5 changes: 3 additions & 2 deletions src/liballoc/arc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -354,7 +354,8 @@ impl<T> Drop for Arc<T> {
// more than once (but it is guaranteed to be zeroed after the first if
// it's run more than once)
let ptr = *self._ptr;
if ptr.is_null() { return }
// if ptr.is_null() { return }
if ptr.is_null() || ptr as usize == mem::POST_DROP_USIZE { return }

// Because `fetch_sub` is already atomic, we do not need to synchronize
// with other threads unless we are going to delete the object. This
Expand Down Expand Up @@ -485,7 +486,7 @@ impl<T> Drop for Weak<T> {
let ptr = *self._ptr;

// see comments above for why this check is here
if ptr.is_null() { return }
if ptr.is_null() || ptr as usize == mem::POST_DROP_USIZE { return }

// If we find out that we were the last weak pointer, then its time to
// deallocate the data entirely. See the discussion in Arc::drop() about
Expand Down
2 changes: 1 addition & 1 deletion src/liballoc/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@
#![feature(box_syntax)]
#![feature(optin_builtin_traits)]
#![feature(unboxed_closures)]
#![feature(unsafe_no_drop_flag)]
#![feature(unsafe_no_drop_flag, filling_drop)]
#![feature(core)]
#![feature(unique)]
#![cfg_attr(test, feature(test, alloc, rustc_private))]
Expand Down
6 changes: 3 additions & 3 deletions src/liballoc/rc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ use core::default::Default;
use core::fmt;
use core::hash::{Hasher, Hash};
use core::marker;
use core::mem::{min_align_of, size_of, forget};
use core::mem::{self, min_align_of, size_of, forget};
use core::nonzero::NonZero;
use core::ops::{Deref, Drop};
use core::option::Option;
Expand Down Expand Up @@ -407,7 +407,7 @@ impl<T> Drop for Rc<T> {
fn drop(&mut self) {
unsafe {
let ptr = *self._ptr;
if !ptr.is_null() {
if !ptr.is_null() && ptr as usize != mem::POST_DROP_USIZE {
self.dec_strong();
if self.strong() == 0 {
ptr::read(&**self); // destroy the contained object
Expand Down Expand Up @@ -718,7 +718,7 @@ impl<T> Drop for Weak<T> {
fn drop(&mut self) {
unsafe {
let ptr = *self._ptr;
if !ptr.is_null() {
if !ptr.is_null() && ptr as usize != mem::POST_DROP_USIZE {
self.dec_weak();
// the weak count starts at 1, and will only go to zero if all
// the strong pointers have disappeared.
Expand Down
6 changes: 4 additions & 2 deletions src/libcollections/btree/node.rs
Original file line number Diff line number Diff line change
Expand Up @@ -280,9 +280,11 @@ impl<T> Drop for RawItems<T> {
#[unsafe_destructor]
impl<K, V> Drop for Node<K, V> {
fn drop(&mut self) {
if self.keys.is_null() {
if self.keys.is_null() ||
(unsafe { self.keys.get() as *const K as usize == mem::POST_DROP_USIZE })
{
// Since we have #[unsafe_no_drop_flag], we have to watch
// out for a null value being stored in self.keys. (Using
// out for the sentinel value being stored in self.keys. (Using
// null is technically a violation of the `Unique`
// requirements, though.)
return;
Expand Down
2 changes: 1 addition & 1 deletion src/libcollections/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@
#![feature(unicode)]
#![feature(unsafe_destructor)]
#![feature(unique)]
#![feature(unsafe_no_drop_flag)]
#![feature(unsafe_no_drop_flag, filling_drop)]
#![feature(step_by)]
#![feature(str_char)]
#![feature(convert)]
Expand Down
4 changes: 2 additions & 2 deletions src/libcollections/vec.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1694,7 +1694,7 @@ impl<T> Drop for Vec<T> {
fn drop(&mut self) {
// This is (and should always remain) a no-op if the fields are
// zeroed (when moving out, because of #[unsafe_no_drop_flag]).
if self.cap != 0 {
if self.cap != 0 && self.cap != mem::POST_DROP_USIZE {
unsafe {
for x in &*self {
ptr::read(x);
Expand Down Expand Up @@ -1977,7 +1977,7 @@ impl<'a, T> ExactSizeIterator for Drain<'a, T> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> Drop for Drain<'a, T> {
fn drop(&mut self) {
// self.ptr == self.end == null if drop has already been called,
// self.ptr == self.end == mem::POST_DROP_USIZE if drop has already been called,
// so we can use #[unsafe_no_drop_flag].

// destroy the remaining elements
Expand Down
24 changes: 23 additions & 1 deletion src/libcore/intrinsics.rs
Original file line number Diff line number Diff line change
Expand Up @@ -191,13 +191,35 @@ extern "rust-intrinsic" {
/// crate it is invoked in.
pub fn type_id<T: ?Sized + 'static>() -> u64;

/// Create a value initialized to so that its drop flag,
/// if any, says that it has been dropped.
///
/// `init_dropped` is unsafe because it returns a datum with all
/// of its bytes set to the drop flag, which generally does not
/// correspond to a valid value.
///
/// This intrinsic is likely to be deprecated in the future when
/// Rust moves to non-zeroing dynamic drop (and thus removes the
/// embedded drop flags that are being established by this
/// intrinsic).
#[cfg(not(stage0))]
pub fn init_dropped<T>() -> T;

/// Create a value initialized to zero.
///
/// `init` is unsafe because it returns a zeroed-out datum,
/// which is unsafe unless T is Copy.
/// which is unsafe unless T is `Copy`. Also, even if T is
/// `Copy`, an all-zero value may not correspond to any legitimate
/// state for the type in question.
pub fn init<T>() -> T;

/// Create an uninitialized value.
///
/// `uninit` is unsafe because there is no guarantee of what its
/// contents are. In particular its drop-flag may be set to any
/// state, which means it may claim either dropped or
/// undropped. In the general case one must use `ptr::write` to
/// initialize memory previous set to the result of `uninit`.
pub fn uninit<T>() -> T;

/// Move a value out of scope without running drop glue.
Expand Down
69 changes: 69 additions & 0 deletions src/libcore/mem.rs
Original file line number Diff line number Diff line change
Expand Up @@ -158,6 +158,32 @@ pub unsafe fn zeroed<T>() -> T {
intrinsics::init()
}

/// Create a value initialized to an unspecified series of bytes.
///
/// The byte sequence usually indicates that the value at the memory
/// in question has been dropped. Thus, *if* T carries a drop flag,
/// any associated destructor will not be run when the value falls out
/// of scope.
///
/// Some code at one time used the `zeroed` function above to
/// accomplish this goal.
///
/// This function is expected to be deprecated with the transition
/// to non-zeroing drop.
#[inline]
#[unstable(feature = "filling_drop")]
pub unsafe fn dropped<T>() -> T {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could this be a wrapper around init_dropped?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I tried to make it such a wrapper at one point in the history of the PR, but I got tired of trying to work out the staging issues involved...

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Oh.

I guess this might work? (it might not too.)

#[cfg(stage0)]
pub unsafe fn dropped<T>() -> T { zeroed() }
#[cfg(not(stage0))]
pub unsafe fn dropped<T>() -> T { intrinsics::init_dropped() }

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

okay i will try that tomorrow morning

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ah it works like a charm, thanks huon, now I feel dumb. :)

#[cfg(stage0)]
#[inline(always)]
unsafe fn dropped_impl<T>() -> T { zeroed() }

#[cfg(not(stage0))]
#[inline(always)]
unsafe fn dropped_impl<T>() -> T { intrinsics::init_dropped() }

dropped_impl()
}

/// Create an uninitialized value.
///
/// Care must be taken when using this function, if the type `T` has a destructor and the value
Expand Down Expand Up @@ -291,6 +317,49 @@ pub fn replace<T>(dest: &mut T, mut src: T) -> T {
#[stable(feature = "rust1", since = "1.0.0")]
pub fn drop<T>(_x: T) { }

macro_rules! repeat_u8_as_u32 {
($name:expr) => { (($name as u32) << 24 |
($name as u32) << 16 |
($name as u32) << 8 |
($name as u32)) }
}
macro_rules! repeat_u8_as_u64 {
($name:expr) => { ((repeat_u8_as_u32!($name) as u64) << 32 |
(repeat_u8_as_u32!($name) as u64)) }
}

// NOTE: Keep synchronized with values used in librustc_trans::trans::adt.
//
// In particular, the POST_DROP_U8 marker must never equal the
// DTOR_NEEDED_U8 marker.
//
// For a while pnkfelix was using 0xc1 here.
// But having the sign bit set is a pain, so 0x1d is probably better.
//
// And of course, 0x00 brings back the old world of zero'ing on drop.
#[cfg(not(stage0))] #[unstable(feature = "filling_drop")]
pub const POST_DROP_U8: u8 = 0x1d;
#[cfg(not(stage0))] #[unstable(feature = "filling_drop")]
pub const POST_DROP_U32: u32 = repeat_u8_as_u32!(POST_DROP_U8);
#[cfg(not(stage0))] #[unstable(feature = "filling_drop")]
pub const POST_DROP_U64: u64 = repeat_u8_as_u64!(POST_DROP_U8);

#[cfg(target_pointer_width = "32")]
#[cfg(not(stage0))] #[unstable(feature = "filling_drop")]
pub const POST_DROP_USIZE: usize = POST_DROP_U32 as usize;
#[cfg(target_pointer_width = "64")]
#[cfg(not(stage0))] #[unstable(feature = "filling_drop")]
pub const POST_DROP_USIZE: usize = POST_DROP_U64 as usize;

#[cfg(stage0)] #[unstable(feature = "filling_drop")]
pub const POST_DROP_U8: u8 = 0;
#[cfg(stage0)] #[unstable(feature = "filling_drop")]
pub const POST_DROP_U32: u32 = 0;
#[cfg(stage0)] #[unstable(feature = "filling_drop")]
pub const POST_DROP_U64: u64 = 0;
#[cfg(stage0)] #[unstable(feature = "filling_drop")]
pub const POST_DROP_USIZE: usize = 0;

/// Interprets `src` as `&U`, and then reads `src` without moving the contained value.
///
/// This function will unsafely assume the pointer `src` is valid for `sizeof(U)` bytes by
Expand Down
15 changes: 15 additions & 0 deletions src/libcore/ptr.rs
Original file line number Diff line number Diff line change
Expand Up @@ -230,6 +230,21 @@ pub unsafe fn read_and_zero<T>(dest: *mut T) -> T {
tmp
}

/// Variant of read_and_zero that writes the specific drop-flag byte
/// (which may be more appropriate than zero).
#[inline(always)]
#[unstable(feature = "core",
reason = "may play a larger role in std::ptr future extensions")]
pub unsafe fn read_and_drop<T>(dest: *mut T) -> T {
// Copy the data out from `dest`:
let tmp = read(&*dest);

// Now mark `dest` as dropped:
write_bytes(dest, mem::POST_DROP_U8, 1);

tmp
}

/// Overwrites a memory location with the given value without reading or
/// dropping the old value.
///
Expand Down
2 changes: 2 additions & 0 deletions src/librustc/session/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -605,6 +605,8 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options,
"Print the size of enums and their variants"),
force_overflow_checks: Option<bool> = (None, parse_opt_bool,
"Force overflow checks on or off"),
force_dropflag_checks: Option<bool> = (None, parse_opt_bool,
"Force drop flag checks on or off"),
}

pub fn default_lib_output() -> CrateType {
Expand Down
2 changes: 1 addition & 1 deletion src/librustc_trans/trans/_match.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1528,7 +1528,7 @@ pub fn store_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
let scope = cleanup::var_scope(tcx, p_id);
bcx = mk_binding_alloca(
bcx, p_id, &path1.node, scope, (),
|(), bcx, llval, ty| { zero_mem(bcx, llval, ty); bcx });
|(), bcx, llval, ty| { drop_done_fill_mem(bcx, llval, ty); bcx });
});
bcx
}
Expand Down
Loading