@@ -74,7 +74,7 @@ impl<T> ArenaChunk<T> {
7474 #[ inline]
7575 unsafe fn new ( capacity : usize ) -> ArenaChunk < T > {
7676 ArenaChunk {
77- storage : NonNull :: new ( Box :: into_raw ( Box :: new_uninit_slice ( capacity) ) ) . unwrap ( ) ,
77+ storage : NonNull :: new_unchecked ( Box :: into_raw ( Box :: new_uninit_slice ( capacity) ) ) ,
7878 entries : 0 ,
7979 }
8080 }
@@ -85,7 +85,7 @@ impl<T> ArenaChunk<T> {
8585 // The branch on needs_drop() is an -O1 performance optimization.
8686 // Without the branch, dropping TypedArena<u8> takes linear time.
8787 if mem:: needs_drop :: < T > ( ) {
88- let slice = & mut * ( self . storage . as_mut ( ) ) ;
88+ let slice = self . storage . as_mut ( ) ;
8989 ptr:: drop_in_place ( MaybeUninit :: slice_assume_init_mut ( & mut slice[ ..len] ) ) ;
9090 }
9191 }
@@ -104,7 +104,7 @@ impl<T> ArenaChunk<T> {
104104 // A pointer as large as possible for zero-sized elements.
105105 ptr:: invalid_mut ( !0 )
106106 } else {
107- self . start ( ) . add ( ( * self . storage . as_ptr ( ) ) . len ( ) )
107+ self . start ( ) . add ( self . storage . len ( ) )
108108 }
109109 }
110110 }
@@ -288,7 +288,7 @@ impl<T> TypedArena<T> {
288288 // If the previous chunk's len is less than HUGE_PAGE
289289 // bytes, then this chunk will be least double the previous
290290 // chunk's size.
291- new_cap = ( * last_chunk. storage . as_ptr ( ) ) . len ( ) . min ( HUGE_PAGE / elem_size / 2 ) ;
291+ new_cap = last_chunk. storage . len ( ) . min ( HUGE_PAGE / elem_size / 2 ) ;
292292 new_cap *= 2 ;
293293 } else {
294294 new_cap = PAGE / elem_size;
@@ -396,7 +396,7 @@ impl DroplessArena {
396396 // If the previous chunk's len is less than HUGE_PAGE
397397 // bytes, then this chunk will be least double the previous
398398 // chunk's size.
399- new_cap = ( * last_chunk. storage . as_ptr ( ) ) . len ( ) . min ( HUGE_PAGE / 2 ) ;
399+ new_cap = last_chunk. storage . len ( ) . min ( HUGE_PAGE / 2 ) ;
400400 new_cap *= 2 ;
401401 } else {
402402 new_cap = PAGE ;
0 commit comments