Skip to content

Commit 744bdf5

Browse files
committed
Inline mem::size_of & mem::align_of
1 parent bcd6975 commit 744bdf5

File tree

22 files changed

+118
-121
lines changed

22 files changed

+118
-121
lines changed

library/alloc/src/collections/vec_deque/mod.rs

+5-4
Original file line numberDiff line numberDiff line change
@@ -10,9 +10,10 @@
1010
use core::cmp::{self, Ordering};
1111
use core::fmt;
1212
use core::hash::{Hash, Hasher};
13+
use core::intrinsics::size_of;
1314
use core::iter::{repeat_with, FromIterator};
1415
use core::marker::PhantomData;
15-
use core::mem::{self, ManuallyDrop};
16+
use core::mem::ManuallyDrop;
1617
use core::ops::{Index, IndexMut, Range, RangeBounds};
1718
use core::ptr::{self, NonNull};
1819
use core::slice;
@@ -58,7 +59,7 @@ mod tests;
5859
const INITIAL_CAPACITY: usize = 7; // 2^3 - 1
5960
const MINIMUM_CAPACITY: usize = 1; // 2 - 1
6061

61-
const MAXIMUM_ZST_CAPACITY: usize = 1 << (core::mem::size_of::<usize>() * 8 - 1); // Largest possible power of two
62+
const MAXIMUM_ZST_CAPACITY: usize = 1 << (size_of::<usize>() * 8 - 1); // Largest possible power of two
6263

6364
/// A double-ended queue implemented with a growable ring buffer.
6465
///
@@ -157,7 +158,7 @@ impl<T> VecDeque<T> {
157158
/// Marginally more convenient
158159
#[inline]
159160
fn cap(&self) -> usize {
160-
if mem::size_of::<T>() == 0 {
161+
if size_of::<T>() == 0 {
161162
// For zero sized types, we are always at maximum capacity
162163
MAXIMUM_ZST_CAPACITY
163164
} else {
@@ -2795,7 +2796,7 @@ impl<T> From<Vec<T>> for VecDeque<T> {
27952796
// because `usize::MAX` (the capacity returned by `capacity()` for ZST)
27962797
// is not a power of two and thus it'll always try
27972798
// to reserve more memory which will panic for ZST (rust-lang/rust#78532)
2798-
if (!buf.capacity().is_power_of_two() && mem::size_of::<T>() != 0)
2799+
if (!buf.capacity().is_power_of_two() && size_of::<T>() != 0)
27992800
|| (buf.capacity() < (MINIMUM_CAPACITY + 1))
28002801
|| (buf.capacity() == len)
28012802
{

library/alloc/src/raw_vec.rs

+13-13
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,8 @@
33

44
use core::alloc::LayoutError;
55
use core::cmp;
6-
use core::intrinsics;
7-
use core::mem::{self, ManuallyDrop, MaybeUninit};
6+
use core::intrinsics::{self, min_align_of as align_of, size_of};
7+
use core::mem::{ManuallyDrop, MaybeUninit};
88
use core::ops::Drop;
99
use core::ptr::{self, NonNull, Unique};
1010
use core::slice;
@@ -171,7 +171,7 @@ impl<T, A: Allocator> RawVec<T, A> {
171171
}
172172

173173
fn allocate_in(capacity: usize, init: AllocInit, alloc: A) -> Self {
174-
if mem::size_of::<T>() == 0 {
174+
if size_of::<T>() == 0 {
175175
Self::new_in(alloc)
176176
} else {
177177
// We avoid `unwrap_or_else` here because it bloats the amount of
@@ -228,7 +228,7 @@ impl<T, A: Allocator> RawVec<T, A> {
228228
/// This will always be `usize::MAX` if `T` is zero-sized.
229229
#[inline(always)]
230230
pub fn capacity(&self) -> usize {
231-
if mem::size_of::<T>() == 0 { usize::MAX } else { self.cap }
231+
if size_of::<T>() == 0 { usize::MAX } else { self.cap }
232232
}
233233

234234
/// Returns a shared reference to the allocator backing this `RawVec`.
@@ -237,14 +237,14 @@ impl<T, A: Allocator> RawVec<T, A> {
237237
}
238238

239239
fn current_memory(&self) -> Option<(NonNull<u8>, Layout)> {
240-
if mem::size_of::<T>() == 0 || self.cap == 0 {
240+
if size_of::<T>() == 0 || self.cap == 0 {
241241
None
242242
} else {
243243
// We have an allocated chunk of memory, so we can bypass runtime
244244
// checks to get our current layout.
245245
unsafe {
246-
let align = mem::align_of::<T>();
247-
let size = mem::size_of::<T>() * self.cap;
246+
let align = align_of::<T>();
247+
let size = size_of::<T>() * self.cap;
248248
let layout = Layout::from_size_align_unchecked(size, align);
249249
Some((self.ptr.cast().into(), layout))
250250
}
@@ -367,8 +367,8 @@ impl<T, A: Allocator> RawVec<T, A> {
367367
}
368368

369369
fn capacity_from_bytes(excess: usize) -> usize {
370-
debug_assert_ne!(mem::size_of::<T>(), 0);
371-
excess / mem::size_of::<T>()
370+
debug_assert_ne!(size_of::<T>(), 0);
371+
excess / size_of::<T>()
372372
}
373373

374374
fn set_ptr(&mut self, ptr: NonNull<[u8]>) {
@@ -387,7 +387,7 @@ impl<T, A: Allocator> RawVec<T, A> {
387387
// This is ensured by the calling contexts.
388388
debug_assert!(additional > 0);
389389

390-
if mem::size_of::<T>() == 0 {
390+
if size_of::<T>() == 0 {
391391
// Since we return a capacity of `usize::MAX` when `elem_size` is
392392
// 0, getting to here necessarily means the `RawVec` is overfull.
393393
return Err(CapacityOverflow);
@@ -406,7 +406,7 @@ impl<T, A: Allocator> RawVec<T, A> {
406406
// - 4 if elements are moderate-sized (<= 1 KiB).
407407
// - 1 otherwise, to avoid wasting too much space for very short Vecs.
408408
// Note that `min_non_zero_cap` is computed statically.
409-
let elem_size = mem::size_of::<T>();
409+
let elem_size = size_of::<T>();
410410
let min_non_zero_cap = if elem_size == 1 {
411411
8
412412
} else if elem_size <= 1024 {
@@ -428,7 +428,7 @@ impl<T, A: Allocator> RawVec<T, A> {
428428
// `grow_amortized`, but this method is usually instantiated less often so
429429
// it's less critical.
430430
fn grow_exact(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
431-
if mem::size_of::<T>() == 0 {
431+
if size_of::<T>() == 0 {
432432
// Since we return a capacity of `usize::MAX` when the type size is
433433
// 0, getting to here necessarily means the `RawVec` is overfull.
434434
return Err(CapacityOverflow);
@@ -447,7 +447,7 @@ impl<T, A: Allocator> RawVec<T, A> {
447447
assert!(amount <= self.capacity(), "Tried to shrink to a larger capacity");
448448

449449
let (ptr, layout) = if let Some(mem) = self.current_memory() { mem } else { return Ok(()) };
450-
let new_size = amount * mem::size_of::<T>();
450+
let new_size = amount * size_of::<T>();
451451

452452
let ptr = unsafe {
453453
let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());

library/alloc/src/slice.rs

+7-6
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,8 @@
8484

8585
use core::borrow::{Borrow, BorrowMut};
8686
use core::cmp::Ordering::{self, Less};
87-
use core::mem::{self, size_of};
87+
use core::intrinsics::size_of;
88+
use core::mem;
8889
use core::ptr;
8990

9091
use crate::alloc::{Allocator, Global};
@@ -411,10 +412,10 @@ impl<T> [T] {
411412
}};
412413
}
413414

414-
let sz_u8 = mem::size_of::<(K, u8)>();
415-
let sz_u16 = mem::size_of::<(K, u16)>();
416-
let sz_u32 = mem::size_of::<(K, u32)>();
417-
let sz_usize = mem::size_of::<(K, usize)>();
415+
let sz_u8 = size_of::<(K, u8)>();
416+
let sz_u16 = size_of::<(K, u16)>();
417+
let sz_u32 = size_of::<(K, u32)>();
418+
let sz_usize = size_of::<(K, usize)>();
418419

419420
let len = self.len();
420421
if len < 2 {
@@ -1004,7 +1005,7 @@ where
10041005
impl<T> Drop for MergeHole<T> {
10051006
fn drop(&mut self) {
10061007
// `T` is not a zero-sized type, so it's okay to divide by its size.
1007-
let len = (self.end as usize - self.start as usize) / mem::size_of::<T>();
1008+
let len = (self.end as usize - self.start as usize) / size_of::<T>();
10081009
unsafe {
10091010
ptr::copy_nonoverlapping(self.start, self.dest, len);
10101011
}

library/alloc/src/vec/into_iter.rs

+5-5
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
use crate::alloc::{Allocator, Global};
22
use crate::raw_vec::RawVec;
33
use core::fmt;
4-
use core::intrinsics::arith_offset;
4+
use core::intrinsics::{arith_offset, size_of};
55
use core::iter::{FusedIterator, InPlaceIterable, SourceIter, TrustedLen, TrustedRandomAccess};
66
use core::marker::PhantomData;
77
use core::mem::{self};
@@ -122,7 +122,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
122122
fn next(&mut self) -> Option<T> {
123123
if self.ptr as *const _ == self.end {
124124
None
125-
} else if mem::size_of::<T>() == 0 {
125+
} else if size_of::<T>() == 0 {
126126
// purposefully don't use 'ptr.offset' because for
127127
// vectors with 0-size elements this would return the
128128
// same pointer.
@@ -140,7 +140,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
140140

141141
#[inline]
142142
fn size_hint(&self) -> (usize, Option<usize>) {
143-
let exact = if mem::size_of::<T>() == 0 {
143+
let exact = if size_of::<T>() == 0 {
144144
(self.end as usize).wrapping_sub(self.ptr as usize)
145145
} else {
146146
unsafe { self.end.offset_from(self.ptr) as usize }
@@ -166,7 +166,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
166166
// that `T: Copy` so reading elements from the buffer doesn't invalidate
167167
// them for `Drop`.
168168
unsafe {
169-
if mem::size_of::<T>() == 0 { mem::zeroed() } else { ptr::read(self.ptr.add(i)) }
169+
if size_of::<T>() == 0 { mem::zeroed() } else { ptr::read(self.ptr.add(i)) }
170170
}
171171
}
172172
}
@@ -177,7 +177,7 @@ impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
177177
fn next_back(&mut self) -> Option<T> {
178178
if self.end == self.ptr {
179179
None
180-
} else if mem::size_of::<T>() == 0 {
180+
} else if size_of::<T>() == 0 {
181181
// See above for why 'ptr.offset' isn't used
182182
self.end = unsafe { arith_offset(self.end as *const i8, -1) as *mut T };
183183

library/alloc/src/vec/mod.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ use core::cmp::{self, Ordering};
5757
use core::convert::TryFrom;
5858
use core::fmt;
5959
use core::hash::{Hash, Hasher};
60-
use core::intrinsics::{arith_offset, assume};
60+
use core::intrinsics::{arith_offset, assume, size_of};
6161
use core::iter::FromIterator;
6262
use core::marker::PhantomData;
6363
use core::mem::{self, ManuallyDrop, MaybeUninit};
@@ -2105,7 +2105,7 @@ impl<T, A: Allocator> IntoIterator for Vec<T, A> {
21052105
let mut me = ManuallyDrop::new(self);
21062106
let alloc = ptr::read(me.allocator());
21072107
let begin = me.as_mut_ptr();
2108-
let end = if mem::size_of::<T>() == 0 {
2108+
let end = if size_of::<T>() == 0 {
21092109
arith_offset(begin as *const i8, me.len() as isize) as *const T
21102110
} else {
21112111
begin.add(me.len()) as *const T

library/alloc/src/vec/source_iter_marker.rs

+6-7
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
1+
use core::intrinsics::{min_align_of as align_of, size_of};
12
use core::iter::{InPlaceIterable, SourceIter};
2-
use core::mem::{self, ManuallyDrop};
3-
use core::ptr::{self};
3+
use core::mem::ManuallyDrop;
4+
use core::ptr;
45

56
use super::{AsIntoIter, InPlaceDrop, SpecFromIter, SpecFromIterNested, Vec};
67

@@ -31,11 +32,9 @@ where
3132
// a) no ZSTs as there would be no allocation to reuse and pointer arithmetic would panic
3233
// b) size match as required by Alloc contract
3334
// c) alignments match as required by Alloc contract
34-
if mem::size_of::<T>() == 0
35-
|| mem::size_of::<T>()
36-
!= mem::size_of::<<<I as SourceIter>::Source as AsIntoIter>::Item>()
37-
|| mem::align_of::<T>()
38-
!= mem::align_of::<<<I as SourceIter>::Source as AsIntoIter>::Item>()
35+
if size_of::<T>() == 0
36+
|| size_of::<T>() != size_of::<<<I as SourceIter>::Source as AsIntoIter>::Item>()
37+
|| align_of::<T>() != align_of::<<<I as SourceIter>::Source as AsIntoIter>::Item>()
3938
{
4039
// fallback to more generic implementations
4140
return SpecFromIterNested::from_iter(iterator);

library/core/src/alloc/layout.rs

+10-15
Original file line numberDiff line numberDiff line change
@@ -1,19 +1,11 @@
11
use crate::cmp;
22
use crate::fmt;
3-
use crate::mem;
3+
use crate::intrinsics::{
4+
min_align_of as align_of, min_align_of_val as align_of_val, size_of, size_of_val,
5+
};
46
use crate::num::NonZeroUsize;
57
use crate::ptr::NonNull;
68

7-
// While this function is used in one place and its implementation
8-
// could be inlined, the previous attempts to do so made rustc
9-
// slower:
10-
//
11-
// * https://github.com/rust-lang/rust/pull/72189
12-
// * https://github.com/rust-lang/rust/pull/79827
13-
const fn size_align<T>() -> (usize, usize) {
14-
(mem::size_of::<T>(), mem::align_of::<T>())
15-
}
16-
179
/// Layout of a block of memory.
1810
///
1911
/// An instance of `Layout` describes a particular layout of memory.
@@ -121,7 +113,8 @@ impl Layout {
121113
#[rustc_const_stable(feature = "alloc_layout_const_new", since = "1.42.0")]
122114
#[inline]
123115
pub const fn new<T>() -> Self {
124-
let (size, align) = size_align::<T>();
116+
let size = size_of::<T>();
117+
let align = align_of::<T>();
125118
// SAFETY: the align is guaranteed by Rust to be a power of two and
126119
// the size+align combo is guaranteed to fit in our address space. As a
127120
// result use the unchecked constructor here to avoid inserting code
@@ -135,7 +128,8 @@ impl Layout {
135128
#[stable(feature = "alloc_layout", since = "1.28.0")]
136129
#[inline]
137130
pub fn for_value<T: ?Sized>(t: &T) -> Self {
138-
let (size, align) = (mem::size_of_val(t), mem::align_of_val(t));
131+
let size = size_of_val(t);
132+
let align = align_of_val(t);
139133
debug_assert!(Layout::from_size_align(size, align).is_ok());
140134
// SAFETY: see rationale in `new` for why this is using the unsafe variant
141135
unsafe { Layout::from_size_align_unchecked(size, align) }
@@ -170,7 +164,8 @@ impl Layout {
170164
#[unstable(feature = "layout_for_ptr", issue = "69835")]
171165
pub unsafe fn for_value_raw<T: ?Sized>(t: *const T) -> Self {
172166
// SAFETY: we pass along the prerequisites of these functions to the caller
173-
let (size, align) = unsafe { (mem::size_of_val_raw(t), mem::align_of_val_raw(t)) };
167+
let size = size_of_val(t);
168+
let align = align_of_val(t);
174169
debug_assert!(Layout::from_size_align(size, align).is_ok());
175170
// SAFETY: see rationale in `new` for why this is using the unsafe variant
176171
unsafe { Layout::from_size_align_unchecked(size, align) }
@@ -393,7 +388,7 @@ impl Layout {
393388
#[inline]
394389
pub fn array<T>(n: usize) -> Result<Self, LayoutError> {
395390
let (layout, offset) = Layout::new::<T>().repeat(n)?;
396-
debug_assert_eq!(offset, mem::size_of::<T>());
391+
debug_assert_eq!(offset, size_of::<T>());
397392
Ok(layout.pad_to_align())
398393
}
399394
}

library/core/src/hash/mod.rs

+4-4
Original file line numberDiff line numberDiff line change
@@ -80,6 +80,7 @@
8080
#![stable(feature = "rust1", since = "1.0.0")]
8181

8282
use crate::fmt;
83+
use crate::intrinsics::size_of;
8384
use crate::marker;
8485

8586
#[stable(feature = "rust1", since = "1.0.0")]
@@ -539,7 +540,6 @@ impl<H> PartialEq for BuildHasherDefault<H> {
539540
impl<H> Eq for BuildHasherDefault<H> {}
540541

541542
mod impls {
542-
use crate::mem;
543543
use crate::slice;
544544

545545
use super::*;
@@ -553,7 +553,7 @@ mod impls {
553553
}
554554

555555
fn hash_slice<H: Hasher>(data: &[$ty], state: &mut H) {
556-
let newlen = data.len() * mem::size_of::<$ty>();
556+
let newlen = data.len() * size_of::<$ty>();
557557
let ptr = data.as_ptr() as *const u8;
558558
// SAFETY: `ptr` is valid and aligned, as this macro is only used
559559
// for numeric primitives which have no padding. The new slice only
@@ -673,7 +673,7 @@ mod impls {
673673
#[stable(feature = "rust1", since = "1.0.0")]
674674
impl<T: ?Sized> Hash for *const T {
675675
fn hash<H: Hasher>(&self, state: &mut H) {
676-
if mem::size_of::<Self>() == mem::size_of::<usize>() {
676+
if size_of::<Self>() == size_of::<usize>() {
677677
// Thin pointer
678678
state.write_usize(*self as *const () as usize);
679679
} else {
@@ -693,7 +693,7 @@ mod impls {
693693
#[stable(feature = "rust1", since = "1.0.0")]
694694
impl<T: ?Sized> Hash for *mut T {
695695
fn hash<H: Hasher>(&self, state: &mut H) {
696-
if mem::size_of::<Self>() == mem::size_of::<usize>() {
696+
if size_of::<Self>() == size_of::<usize>() {
697697
// Thin pointer
698698
state.write_usize(*self as *const () as usize);
699699
} else {

library/core/src/hash/sip.rs

+3-3
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,8 @@
33
#![allow(deprecated)] // the types in this module are deprecated
44

55
use crate::cmp;
6+
use crate::intrinsics::size_of;
67
use crate::marker::PhantomData;
7-
use crate::mem;
88
use crate::ptr;
99

1010
/// An implementation of SipHash 1-3.
@@ -108,12 +108,12 @@ macro_rules! compress {
108108
/// Unsafe because: unchecked indexing at i..i+size_of(int_ty)
109109
macro_rules! load_int_le {
110110
($buf:expr, $i:expr, $int_ty:ident) => {{
111-
debug_assert!($i + mem::size_of::<$int_ty>() <= $buf.len());
111+
debug_assert!($i + size_of::<$int_ty>() <= $buf.len());
112112
let mut data = 0 as $int_ty;
113113
ptr::copy_nonoverlapping(
114114
$buf.as_ptr().add($i),
115115
&mut data as *mut _ as *mut u8,
116-
mem::size_of::<$int_ty>(),
116+
size_of::<$int_ty>(),
117117
);
118118
data.to_le()
119119
}};

0 commit comments

Comments
 (0)