Skip to content

Commit 9f6a230

Browse files
committed
Auto merge of rust-lang#116327 - nnethercote:alloc_from_iter, r=<try>
Make `TypedArena::alloc_from_iter` like `DroplessArena::alloc_from_iter`. Although they do similar things, they are currently implemented differently, for no particular reason. r? `@cjgillot`
2 parents 79bfd93 + 2c40e82 commit 9f6a230

File tree

1 file changed

+81
-150
lines changed
  • compiler/rustc_arena/src

1 file changed

+81
-150
lines changed

compiler/rustc_arena/src/lib.rs

+81-150
Original file line numberDiff line numberDiff line change
@@ -44,23 +44,6 @@ fn outline<F: FnOnce() -> R, R>(f: F) -> R {
4444
f()
4545
}
4646

47-
/// An arena that can hold objects of only one type.
48-
pub struct TypedArena<T> {
49-
/// A pointer to the next object to be allocated.
50-
ptr: Cell<*mut T>,
51-
52-
/// A pointer to the end of the allocated area. When this pointer is
53-
/// reached, a new chunk is allocated.
54-
end: Cell<*mut T>,
55-
56-
/// A vector of arena chunks.
57-
chunks: RefCell<Vec<ArenaChunk<T>>>,
58-
59-
/// Marker indicating that dropping the arena causes its owned
60-
/// instances of `T` to be dropped.
61-
_own: PhantomData<T>,
62-
}
63-
6447
struct ArenaChunk<T = u8> {
6548
/// The raw storage for the arena chunk.
6649
storage: NonNull<[MaybeUninit<T>]>,
@@ -130,6 +113,23 @@ impl<T> ArenaChunk<T> {
130113
const PAGE: usize = 4096;
131114
const HUGE_PAGE: usize = 2 * 1024 * 1024;
132115

116+
/// An arena that can hold objects of only one type.
117+
pub struct TypedArena<T> {
118+
/// A pointer to the next object to be allocated.
119+
ptr: Cell<*mut T>,
120+
121+
/// A pointer to the end of the allocated area. When this pointer is
122+
/// reached, a new chunk is allocated.
123+
end: Cell<*mut T>,
124+
125+
/// A vector of arena chunks.
126+
chunks: RefCell<Vec<ArenaChunk<T>>>,
127+
128+
/// Marker indicating that dropping the arena causes its owned
129+
/// instances of `T` to be dropped.
130+
_own: PhantomData<T>,
131+
}
132+
133133
impl<T> Default for TypedArena<T> {
134134
/// Creates a new `TypedArena`.
135135
fn default() -> TypedArena<T> {
@@ -144,77 +144,6 @@ impl<T> Default for TypedArena<T> {
144144
}
145145
}
146146

147-
trait IterExt<T> {
148-
fn alloc_from_iter(self, arena: &TypedArena<T>) -> &mut [T];
149-
}
150-
151-
impl<I, T> IterExt<T> for I
152-
where
153-
I: IntoIterator<Item = T>,
154-
{
155-
// This default collects into a `SmallVec` and then allocates by copying
156-
// from it. The specializations below for types like `Vec` are more
157-
// efficient, copying directly without the intermediate collecting step.
158-
// This default could be made more efficient, like
159-
// `DroplessArena::alloc_from_iter`, but it's not hot enough to bother.
160-
#[inline]
161-
default fn alloc_from_iter(self, arena: &TypedArena<T>) -> &mut [T] {
162-
let vec: SmallVec<[_; 8]> = self.into_iter().collect();
163-
vec.alloc_from_iter(arena)
164-
}
165-
}
166-
167-
impl<T, const N: usize> IterExt<T> for std::array::IntoIter<T, N> {
168-
#[inline]
169-
fn alloc_from_iter(self, arena: &TypedArena<T>) -> &mut [T] {
170-
let len = self.len();
171-
if len == 0 {
172-
return &mut [];
173-
}
174-
// Move the content to the arena by copying and then forgetting it.
175-
let start_ptr = arena.alloc_raw_slice(len);
176-
unsafe {
177-
self.as_slice().as_ptr().copy_to_nonoverlapping(start_ptr, len);
178-
mem::forget(self);
179-
slice::from_raw_parts_mut(start_ptr, len)
180-
}
181-
}
182-
}
183-
184-
impl<T> IterExt<T> for Vec<T> {
185-
#[inline]
186-
fn alloc_from_iter(mut self, arena: &TypedArena<T>) -> &mut [T] {
187-
let len = self.len();
188-
if len == 0 {
189-
return &mut [];
190-
}
191-
// Move the content to the arena by copying and then forgetting it.
192-
let start_ptr = arena.alloc_raw_slice(len);
193-
unsafe {
194-
self.as_ptr().copy_to_nonoverlapping(start_ptr, len);
195-
self.set_len(0);
196-
slice::from_raw_parts_mut(start_ptr, len)
197-
}
198-
}
199-
}
200-
201-
impl<A: smallvec::Array> IterExt<A::Item> for SmallVec<A> {
202-
#[inline]
203-
fn alloc_from_iter(mut self, arena: &TypedArena<A::Item>) -> &mut [A::Item] {
204-
let len = self.len();
205-
if len == 0 {
206-
return &mut [];
207-
}
208-
// Move the content to the arena by copying and then forgetting it.
209-
let start_ptr = arena.alloc_raw_slice(len);
210-
unsafe {
211-
self.as_ptr().copy_to_nonoverlapping(start_ptr, len);
212-
self.set_len(0);
213-
slice::from_raw_parts_mut(start_ptr, len)
214-
}
215-
}
216-
}
217-
218147
impl<T> TypedArena<T> {
219148
/// Allocates an object in the `TypedArena`, returning a reference to it.
220149
#[inline]
@@ -270,8 +199,7 @@ impl<T> TypedArena<T> {
270199

271200
#[inline]
272201
pub fn alloc_from_iter<I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] {
273-
assert!(mem::size_of::<T>() != 0);
274-
iter.alloc_from_iter(self)
202+
alloc_from_iter(iter, |len| self.alloc_raw_slice(len))
275203
}
276204

277205
/// Grows the arena.
@@ -527,76 +455,79 @@ impl DroplessArena {
527455
}
528456
}
529457

530-
/// # Safety
531-
///
532-
/// The caller must ensure that `mem` is valid for writes up to
533-
/// `size_of::<T>() * len`.
534-
#[inline]
535-
unsafe fn write_from_iter<T, I: Iterator<Item = T>>(
536-
&self,
537-
mut iter: I,
538-
len: usize,
539-
mem: *mut T,
540-
) -> &mut [T] {
541-
let mut i = 0;
542-
// Use a manual loop since LLVM manages to optimize it better for
543-
// slice iterators
544-
loop {
545-
// SAFETY: The caller must ensure that `mem` is valid for writes up to
546-
// `size_of::<T>() * len`.
547-
unsafe {
548-
match iter.next() {
549-
Some(value) if i < len => mem.add(i).write(value),
550-
Some(_) | None => {
551-
// We only return as many items as the iterator gave us, even
552-
// though it was supposed to give us `len`
553-
return slice::from_raw_parts_mut(mem, i);
554-
}
555-
}
556-
}
557-
i += 1;
558-
}
458+
pub fn alloc_from_iter<T, I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] {
459+
alloc_from_iter(iter, |len| self.alloc_raw(Layout::array::<T>(len).unwrap()) as *mut T)
559460
}
461+
}
560462

561-
#[inline]
562-
pub fn alloc_from_iter<T, I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] {
563-
let iter = iter.into_iter();
564-
assert!(mem::size_of::<T>() != 0);
565-
assert!(!mem::needs_drop::<T>());
463+
#[inline]
464+
pub fn alloc_from_iter<'a, T, I, F>(iter: I, alloc_raw: F) -> &'a mut [T]
465+
where
466+
I: IntoIterator<Item = T>,
467+
F: FnOnce(usize) -> *mut T,
468+
{
469+
let iter = iter.into_iter();
470+
assert!(mem::size_of::<T>() != 0);
566471

567-
let size_hint = iter.size_hint();
472+
match iter.size_hint() {
473+
(min, Some(max)) if min == max => {
474+
// We know the exact number of elements the iterator will produce here
475+
let len = min;
568476

569-
match size_hint {
570-
(min, Some(max)) if min == max => {
571-
// We know the exact number of elements the iterator will produce here
572-
let len = min;
477+
if len == 0 {
478+
return &mut [];
479+
}
573480

574-
if len == 0 {
481+
let mem = alloc_raw(len);
482+
unsafe { write_from_iter(iter, len, mem) }
483+
}
484+
(_, _) => {
485+
outline(move || -> &mut [T] {
486+
let mut vec: SmallVec<[_; 8]> = iter.collect();
487+
if vec.is_empty() {
575488
return &mut [];
576489
}
490+
// Move the content to the arena by copying it and then forgetting
491+
// the content of the SmallVec
492+
unsafe {
493+
let len = vec.len();
494+
let start_ptr = alloc_raw(len);
495+
vec.as_ptr().copy_to_nonoverlapping(start_ptr, len);
496+
vec.set_len(0);
497+
slice::from_raw_parts_mut(start_ptr, len)
498+
}
499+
})
500+
}
501+
}
502+
}
577503

578-
let mem = self.alloc_raw(Layout::array::<T>(len).unwrap()) as *mut T;
579-
unsafe { self.write_from_iter(iter, len, mem) }
580-
}
581-
(_, _) => {
582-
outline(move || -> &mut [T] {
583-
let mut vec: SmallVec<[_; 8]> = iter.collect();
584-
if vec.is_empty() {
585-
return &mut [];
586-
}
587-
// Move the content to the arena by copying it and then forgetting
588-
// the content of the SmallVec
589-
unsafe {
590-
let len = vec.len();
591-
let start_ptr =
592-
self.alloc_raw(Layout::for_value::<[T]>(vec.as_slice())) as *mut T;
593-
vec.as_ptr().copy_to_nonoverlapping(start_ptr, len);
594-
vec.set_len(0);
595-
slice::from_raw_parts_mut(start_ptr, len)
596-
}
597-
})
504+
/// # Safety
505+
///
506+
/// The caller must ensure that `mem` is valid for writes up to
507+
/// `size_of::<T>() * len`.
508+
#[inline]
509+
unsafe fn write_from_iter<'a, T, I: Iterator<Item = T>>(
510+
mut iter: I,
511+
len: usize,
512+
mem: *mut T,
513+
) -> &'a mut [T] {
514+
let mut i = 0;
515+
// Use a manual loop since LLVM manages to optimize it better for
516+
// slice iterators
517+
loop {
518+
// SAFETY: The caller must ensure that `mem` is valid for writes up to
519+
// `size_of::<T>() * len`.
520+
unsafe {
521+
match iter.next() {
522+
Some(value) if i < len => mem.add(i).write(value),
523+
Some(_) | None => {
524+
// We only return as many items as the iterator gave us, even
525+
// though it was supposed to give us `len`
526+
return slice::from_raw_parts_mut(mem, i);
527+
}
598528
}
599529
}
530+
i += 1;
600531
}
601532
}
602533

0 commit comments

Comments
 (0)