Skip to content

Commit 3e677e3

Browse files
author
Matthew Giordano
committed
fix new_cyclic_in for rc
1 parent 7d4ef17 commit 3e677e3

File tree

1 file changed

+79
-48
lines changed

1 file changed

+79
-48
lines changed

alloc/src/rc.rs

+79-48
Original file line numberDiff line numberDiff line change
@@ -687,54 +687,6 @@ impl<T, A: Allocator> Rc<T, A> {
687687
}
688688
}
689689

690-
/// TODO: document
691-
#[cfg(not(no_global_oom_handling))]
692-
#[unstable(feature = "allocator_api", issue = "32838")]
693-
pub fn new_cyclic_in<F>(data_fn: F, alloc: A) -> Rc<T, A>
694-
where
695-
F: FnOnce(&Weak<T, A>) -> T,
696-
{
697-
// Construct the inner in the "uninitialized" state with a single
698-
// weak reference.
699-
let uninit_ptr: NonNull<_> = Box::leak(
700-
Box::new_in(RcBox {
701-
strong: Cell::new(0),
702-
weak: Cell::new(1),
703-
value: mem::MaybeUninit::<T>::uninit(),
704-
}),
705-
alloc,
706-
)
707-
.into();
708-
709-
let init_ptr: NonNull<RcBox<T>> = uninit_ptr.cast();
710-
711-
let weak = Weak { ptr: init_ptr, alloc: Global };
712-
713-
// It's important we don't give up ownership of the weak pointer, or
714-
// else the memory might be freed by the time `data_fn` returns. If
715-
// we really wanted to pass ownership, we could create an additional
716-
// weak pointer for ourselves, but this would result in additional
717-
// updates to the weak reference count which might not be necessary
718-
// otherwise.
719-
let data = data_fn(&weak);
720-
721-
let strong = unsafe {
722-
let inner = init_ptr.as_ptr();
723-
ptr::write(ptr::addr_of_mut!((*inner).value), data);
724-
725-
let prev_value = (*inner).strong.get();
726-
debug_assert_eq!(prev_value, 0, "No prior strong references should exist");
727-
(*inner).strong.set(1);
728-
729-
Rc::from_inner(init_ptr)
730-
};
731-
732-
// Strong references should collectively own a shared weak reference,
733-
// so don't run the destructor for our old weak reference.
734-
mem::forget(weak);
735-
strong
736-
}
737-
738690
/// Constructs a new `Rc` with uninitialized contents in the provided allocator.
739691
///
740692
/// # Examples
@@ -2312,6 +2264,85 @@ impl<T: ?Sized, A: Allocator + Clone> Clone for Rc<T, A> {
23122264
}
23132265
}
23142266

2267+
impl<T, A: Allocator + Clone> Rc<T, A> {
2268+
/// Constructs a new `Rc<T, A>` in the given allocator while giving you a `Weak<T, A>` to the allocation,
2269+
/// to allow you to construct a `T` which holds a weak pointer to itself.
2270+
///
2271+
/// Generally, a structure circularly referencing itself, either directly or
2272+
/// indirectly, should not hold a strong reference to itself to prevent a memory leak.
2273+
/// Using this function, you get access to the weak pointer during the
2274+
/// initialization of `T`, before the `Rc<T, A>` is created, such that you can
2275+
/// clone and store it inside the `T`.
2276+
///
2277+
/// `new_cyclic` first allocates the managed allocation for the `Rc<T, A>`,
2278+
/// then calls your closure, giving it a `Weak<T, A>` to this allocation,
2279+
/// and only afterwards completes the construction of the `Rc<T, A>` by placing
2280+
/// the `T` returned from your closure into the allocation.
2281+
///
2282+
/// Since the new `Rc<T, A>` is not fully-constructed until `Rc<T, A>::new_cyclic`
2283+
/// returns, calling [`upgrade`] on the weak reference inside your closure will
2284+
/// fail and result in a `None` value.
2285+
///
2286+
/// # Panics
2287+
///
2288+
/// If `data_fn` panics, the panic is propagated to the caller, and the
2289+
/// temporary [`Weak<T, A>`] is dropped normally.
2290+
///
2291+
/// # Examples
2292+
/// See [`new_cyclic`].
2293+
///
2294+
/// [`new_cyclic`]: Rc::new_cyclic
2295+
/// [`upgrade`]: Weak::upgrade
2296+
#[cfg(not(no_global_oom_handling))]
2297+
#[unstable(feature = "allocator_api", issue = "32838")]
2298+
pub fn new_cyclic_in<F>(data_fn: F, alloc: A) -> Rc<T, A>
2299+
where
2300+
F: FnOnce(&Weak<T, A>) -> T,
2301+
{
2302+
// Note: comments and implementation are copied from Rc::new_cyclic.
2303+
2304+
// Construct the inner in the "uninitialized" state with a single
2305+
// weak reference.
2306+
let uninit_ptr: NonNull<_> = Box::leak(Box::new_in(
2307+
RcBox {
2308+
strong: Cell::new(0),
2309+
weak: Cell::new(1),
2310+
value: mem::MaybeUninit::<T>::uninit(),
2311+
},
2312+
alloc.clone(),
2313+
))
2314+
.into();
2315+
2316+
let init_ptr: NonNull<RcBox<T>> = uninit_ptr.cast();
2317+
2318+
let weak = Weak { ptr: init_ptr, alloc: alloc.clone() };
2319+
2320+
// It's important we don't give up ownership of the weak pointer, or
2321+
// else the memory might be freed by the time `data_fn` returns. If
2322+
// we really wanted to pass ownership, we could create an additional
2323+
// weak pointer for ourselves, but this would result in additional
2324+
// updates to the weak reference count which might not be necessary
2325+
// otherwise.
2326+
let data = data_fn(&weak);
2327+
2328+
let strong = unsafe {
2329+
let inner = init_ptr.as_ptr();
2330+
ptr::write(ptr::addr_of_mut!((*inner).value), data);
2331+
2332+
let prev_value = (*inner).strong.get();
2333+
debug_assert_eq!(prev_value, 0, "No prior strong references should exist");
2334+
(*inner).strong.set(1);
2335+
2336+
Rc::from_inner_in(init_ptr, alloc)
2337+
};
2338+
2339+
// Strong references should collectively own a shared weak reference,
2340+
// so don't run the destructor for our old weak reference.
2341+
mem::forget(weak);
2342+
strong
2343+
}
2344+
}
2345+
23152346
#[cfg(not(no_global_oom_handling))]
23162347
#[stable(feature = "rust1", since = "1.0.0")]
23172348
impl<T: Default> Default for Rc<T> {

0 commit comments

Comments
 (0)