From 53d8b1d0515f53d138f0b9c3067540d8c5708415 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Thu, 20 Jul 2017 11:14:13 -0700 Subject: [PATCH] std: Cut down #[inline] annotations where not necessary This PR cuts down on a large number of `#[inline(always)]` and `#[inline]` annotations in libcore for various core functions. The `#[inline(always)]` annotation is almost never needed and is detrimental to debug build times as it forces LLVM to perform inlining when it otherwise wouldn't need to in debug builds. Additionally `#[inline]` is an unnecessary annoation on almost all generic functions because the function will already be monomorphized into other codegen units and otherwise rarely needs the extra "help" from us to tell LLVM to inline something. Overall this PR cut the compile time of a [microbenchmark][1] by 30% from 1s to 0.7s. [1]: https://gist.github.com/alexcrichton/a7d70319a45aa60cf36a6a7bf540dd3a --- src/libcore/clone.rs | 2 +- src/libcore/cmp.rs | 2 +- src/libcore/hash/sip.rs | 2 +- src/libcore/nonzero.rs | 2 +- src/libcore/num/mod.rs | 22 ++++++++-------- src/libcore/num/wrapping.rs | 52 ++++++++++++++++++------------------- src/libcore/ptr.rs | 4 +-- src/libcore/slice/mod.rs | 2 +- src/libcore/str/mod.rs | 6 ++--- src/libcore/str/pattern.rs | 6 ++--- 10 files changed, 50 insertions(+), 50 deletions(-) diff --git a/src/libcore/clone.rs b/src/libcore/clone.rs index 97b9525da6715..d6d3bf9950528 100644 --- a/src/libcore/clone.rs +++ b/src/libcore/clone.rs @@ -106,7 +106,7 @@ pub trait Clone : Sized { /// `a.clone_from(&b)` is equivalent to `a = b.clone()` in functionality, /// but can be overridden to reuse the resources of `a` to avoid unnecessary /// allocations. - #[inline(always)] + #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn clone_from(&mut self, source: &Self) { *self = source.clone() diff --git a/src/libcore/cmp.rs b/src/libcore/cmp.rs index f133bd93c9178..a9f55dc27880b 100644 --- a/src/libcore/cmp.rs +++ b/src/libcore/cmp.rs @@ -168,7 +168,7 @@ pub trait Eq: PartialEq { // // This should never be implemented by hand. #[doc(hidden)] - #[inline(always)] + #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn assert_receiver_is_total_eq(&self) {} } diff --git a/src/libcore/hash/sip.rs b/src/libcore/hash/sip.rs index db12496b6f320..91fd01b36d495 100644 --- a/src/libcore/hash/sip.rs +++ b/src/libcore/hash/sip.rs @@ -239,7 +239,7 @@ impl Hasher { // except for composite types (that includes slices and str hashing because of delimiter). // Without this extra push the compiler is very reluctant to inline delimiter writes, // degrading performance substantially for the most common use cases. - #[inline(always)] + #[inline] fn short_write(&mut self, msg: &[u8]) { debug_assert!(msg.len() <= 8); let length = msg.len(); diff --git a/src/libcore/nonzero.rs b/src/libcore/nonzero.rs index d93085e96dbb2..977438051d93b 100644 --- a/src/libcore/nonzero.rs +++ b/src/libcore/nonzero.rs @@ -42,7 +42,7 @@ pub struct NonZero(T); impl NonZero { /// Creates an instance of NonZero with the provided value. /// You must indeed ensure that the value is actually "non-zero". - #[inline(always)] + #[inline] pub const unsafe fn new(inner: T) -> NonZero { NonZero(inner) } diff --git a/src/libcore/num/mod.rs b/src/libcore/num/mod.rs index cbd59ed371377..a8094ab932bf6 100644 --- a/src/libcore/num/mod.rs +++ b/src/libcore/num/mod.rs @@ -695,7 +695,7 @@ macro_rules! int_impl { /// assert_eq!((-128i8).wrapping_div(-1), -128); /// ``` #[stable(feature = "num_wrapping", since = "1.2.0")] - #[inline(always)] + #[inline] pub fn wrapping_div(self, rhs: Self) -> Self { self.overflowing_div(rhs).0 } @@ -721,7 +721,7 @@ macro_rules! int_impl { /// assert_eq!((-128i8).wrapping_rem(-1), 0); /// ``` #[stable(feature = "num_wrapping", since = "1.2.0")] - #[inline(always)] + #[inline] pub fn wrapping_rem(self, rhs: Self) -> Self { self.overflowing_rem(rhs).0 } @@ -744,7 +744,7 @@ macro_rules! int_impl { /// assert_eq!((-128i8).wrapping_neg(), -128); /// ``` #[stable(feature = "num_wrapping", since = "1.2.0")] - #[inline(always)] + #[inline] pub fn wrapping_neg(self) -> Self { self.overflowing_neg().0 } @@ -769,7 +769,7 @@ macro_rules! int_impl { /// assert_eq!((-1i8).wrapping_shl(8), -1); /// ``` #[stable(feature = "num_wrapping", since = "1.2.0")] - #[inline(always)] + #[inline] pub fn wrapping_shl(self, rhs: u32) -> Self { unsafe { intrinsics::unchecked_shl(self, (rhs & ($BITS - 1)) as $SelfT) @@ -796,7 +796,7 @@ macro_rules! int_impl { /// assert_eq!((-128i8).wrapping_shr(8), -128); /// ``` #[stable(feature = "num_wrapping", since = "1.2.0")] - #[inline(always)] + #[inline] pub fn wrapping_shr(self, rhs: u32) -> Self { unsafe { intrinsics::unchecked_shr(self, (rhs & ($BITS - 1)) as $SelfT) @@ -822,7 +822,7 @@ macro_rules! int_impl { /// assert_eq!((-128i8).wrapping_abs() as u8, 128); /// ``` #[stable(feature = "no_panic_abs", since = "1.13.0")] - #[inline(always)] + #[inline] pub fn wrapping_abs(self) -> Self { if self.is_negative() { self.wrapping_neg() @@ -1831,7 +1831,7 @@ macro_rules! uint_impl { /// assert_eq!(100u8.wrapping_div(10), 10); /// ``` #[stable(feature = "num_wrapping", since = "1.2.0")] - #[inline(always)] + #[inline] pub fn wrapping_div(self, rhs: Self) -> Self { self / rhs } @@ -1851,7 +1851,7 @@ macro_rules! uint_impl { /// assert_eq!(100u8.wrapping_rem(10), 0); /// ``` #[stable(feature = "num_wrapping", since = "1.2.0")] - #[inline(always)] + #[inline] pub fn wrapping_rem(self, rhs: Self) -> Self { self % rhs } @@ -1877,7 +1877,7 @@ macro_rules! uint_impl { /// assert_eq!(180u8.wrapping_neg(), (127 + 1) - (180u8 - (127 + 1))); /// ``` #[stable(feature = "num_wrapping", since = "1.2.0")] - #[inline(always)] + #[inline] pub fn wrapping_neg(self) -> Self { self.overflowing_neg().0 } @@ -1902,7 +1902,7 @@ macro_rules! uint_impl { /// assert_eq!(1u8.wrapping_shl(8), 1); /// ``` #[stable(feature = "num_wrapping", since = "1.2.0")] - #[inline(always)] + #[inline] pub fn wrapping_shl(self, rhs: u32) -> Self { unsafe { intrinsics::unchecked_shl(self, (rhs & ($BITS - 1)) as $SelfT) @@ -1929,7 +1929,7 @@ macro_rules! uint_impl { /// assert_eq!(128u8.wrapping_shr(8), 128); /// ``` #[stable(feature = "num_wrapping", since = "1.2.0")] - #[inline(always)] + #[inline] pub fn wrapping_shr(self, rhs: u32) -> Self { unsafe { intrinsics::unchecked_shr(self, (rhs & ($BITS - 1)) as $SelfT) diff --git a/src/libcore/num/wrapping.rs b/src/libcore/num/wrapping.rs index 6cc374b13b7b3..acdf685e850ab 100644 --- a/src/libcore/num/wrapping.rs +++ b/src/libcore/num/wrapping.rs @@ -19,7 +19,7 @@ macro_rules! sh_impl_signed { impl Shl<$f> for Wrapping<$t> { type Output = Wrapping<$t>; - #[inline(always)] + #[inline] fn shl(self, other: $f) -> Wrapping<$t> { if other < 0 { Wrapping(self.0.wrapping_shr((-other & self::shift_max::$t as $f) as u32)) @@ -31,7 +31,7 @@ macro_rules! sh_impl_signed { #[stable(feature = "op_assign_traits", since = "1.8.0")] impl ShlAssign<$f> for Wrapping<$t> { - #[inline(always)] + #[inline] fn shl_assign(&mut self, other: $f) { *self = *self << other; } @@ -41,7 +41,7 @@ macro_rules! sh_impl_signed { impl Shr<$f> for Wrapping<$t> { type Output = Wrapping<$t>; - #[inline(always)] + #[inline] fn shr(self, other: $f) -> Wrapping<$t> { if other < 0 { Wrapping(self.0.wrapping_shl((-other & self::shift_max::$t as $f) as u32)) @@ -53,7 +53,7 @@ macro_rules! sh_impl_signed { #[stable(feature = "op_assign_traits", since = "1.8.0")] impl ShrAssign<$f> for Wrapping<$t> { - #[inline(always)] + #[inline] fn shr_assign(&mut self, other: $f) { *self = *self >> other; } @@ -67,7 +67,7 @@ macro_rules! sh_impl_unsigned { impl Shl<$f> for Wrapping<$t> { type Output = Wrapping<$t>; - #[inline(always)] + #[inline] fn shl(self, other: $f) -> Wrapping<$t> { Wrapping(self.0.wrapping_shl((other & self::shift_max::$t as $f) as u32)) } @@ -75,7 +75,7 @@ macro_rules! sh_impl_unsigned { #[stable(feature = "op_assign_traits", since = "1.8.0")] impl ShlAssign<$f> for Wrapping<$t> { - #[inline(always)] + #[inline] fn shl_assign(&mut self, other: $f) { *self = *self << other; } @@ -85,7 +85,7 @@ macro_rules! sh_impl_unsigned { impl Shr<$f> for Wrapping<$t> { type Output = Wrapping<$t>; - #[inline(always)] + #[inline] fn shr(self, other: $f) -> Wrapping<$t> { Wrapping(self.0.wrapping_shr((other & self::shift_max::$t as $f) as u32)) } @@ -93,7 +93,7 @@ macro_rules! sh_impl_unsigned { #[stable(feature = "op_assign_traits", since = "1.8.0")] impl ShrAssign<$f> for Wrapping<$t> { - #[inline(always)] + #[inline] fn shr_assign(&mut self, other: $f) { *self = *self >> other; } @@ -127,7 +127,7 @@ macro_rules! wrapping_impl { impl Add for Wrapping<$t> { type Output = Wrapping<$t>; - #[inline(always)] + #[inline] fn add(self, other: Wrapping<$t>) -> Wrapping<$t> { Wrapping(self.0.wrapping_add(other.0)) } @@ -137,7 +137,7 @@ macro_rules! wrapping_impl { #[stable(feature = "op_assign_traits", since = "1.8.0")] impl AddAssign for Wrapping<$t> { - #[inline(always)] + #[inline] fn add_assign(&mut self, other: Wrapping<$t>) { *self = *self + other; } @@ -147,7 +147,7 @@ macro_rules! wrapping_impl { impl Sub for Wrapping<$t> { type Output = Wrapping<$t>; - #[inline(always)] + #[inline] fn sub(self, other: Wrapping<$t>) -> Wrapping<$t> { Wrapping(self.0.wrapping_sub(other.0)) } @@ -157,7 +157,7 @@ macro_rules! wrapping_impl { #[stable(feature = "op_assign_traits", since = "1.8.0")] impl SubAssign for Wrapping<$t> { - #[inline(always)] + #[inline] fn sub_assign(&mut self, other: Wrapping<$t>) { *self = *self - other; } @@ -167,7 +167,7 @@ macro_rules! wrapping_impl { impl Mul for Wrapping<$t> { type Output = Wrapping<$t>; - #[inline(always)] + #[inline] fn mul(self, other: Wrapping<$t>) -> Wrapping<$t> { Wrapping(self.0.wrapping_mul(other.0)) } @@ -177,7 +177,7 @@ macro_rules! wrapping_impl { #[stable(feature = "op_assign_traits", since = "1.8.0")] impl MulAssign for Wrapping<$t> { - #[inline(always)] + #[inline] fn mul_assign(&mut self, other: Wrapping<$t>) { *self = *self * other; } @@ -187,7 +187,7 @@ macro_rules! wrapping_impl { impl Div for Wrapping<$t> { type Output = Wrapping<$t>; - #[inline(always)] + #[inline] fn div(self, other: Wrapping<$t>) -> Wrapping<$t> { Wrapping(self.0.wrapping_div(other.0)) } @@ -197,7 +197,7 @@ macro_rules! wrapping_impl { #[stable(feature = "op_assign_traits", since = "1.8.0")] impl DivAssign for Wrapping<$t> { - #[inline(always)] + #[inline] fn div_assign(&mut self, other: Wrapping<$t>) { *self = *self / other; } @@ -207,7 +207,7 @@ macro_rules! wrapping_impl { impl Rem for Wrapping<$t> { type Output = Wrapping<$t>; - #[inline(always)] + #[inline] fn rem(self, other: Wrapping<$t>) -> Wrapping<$t> { Wrapping(self.0.wrapping_rem(other.0)) } @@ -217,7 +217,7 @@ macro_rules! wrapping_impl { #[stable(feature = "op_assign_traits", since = "1.8.0")] impl RemAssign for Wrapping<$t> { - #[inline(always)] + #[inline] fn rem_assign(&mut self, other: Wrapping<$t>) { *self = *self % other; } @@ -227,7 +227,7 @@ macro_rules! wrapping_impl { impl Not for Wrapping<$t> { type Output = Wrapping<$t>; - #[inline(always)] + #[inline] fn not(self) -> Wrapping<$t> { Wrapping(!self.0) } @@ -239,7 +239,7 @@ macro_rules! wrapping_impl { impl BitXor for Wrapping<$t> { type Output = Wrapping<$t>; - #[inline(always)] + #[inline] fn bitxor(self, other: Wrapping<$t>) -> Wrapping<$t> { Wrapping(self.0 ^ other.0) } @@ -249,7 +249,7 @@ macro_rules! wrapping_impl { #[stable(feature = "op_assign_traits", since = "1.8.0")] impl BitXorAssign for Wrapping<$t> { - #[inline(always)] + #[inline] fn bitxor_assign(&mut self, other: Wrapping<$t>) { *self = *self ^ other; } @@ -259,7 +259,7 @@ macro_rules! wrapping_impl { impl BitOr for Wrapping<$t> { type Output = Wrapping<$t>; - #[inline(always)] + #[inline] fn bitor(self, other: Wrapping<$t>) -> Wrapping<$t> { Wrapping(self.0 | other.0) } @@ -269,7 +269,7 @@ macro_rules! wrapping_impl { #[stable(feature = "op_assign_traits", since = "1.8.0")] impl BitOrAssign for Wrapping<$t> { - #[inline(always)] + #[inline] fn bitor_assign(&mut self, other: Wrapping<$t>) { *self = *self | other; } @@ -279,7 +279,7 @@ macro_rules! wrapping_impl { impl BitAnd for Wrapping<$t> { type Output = Wrapping<$t>; - #[inline(always)] + #[inline] fn bitand(self, other: Wrapping<$t>) -> Wrapping<$t> { Wrapping(self.0 & other.0) } @@ -289,7 +289,7 @@ macro_rules! wrapping_impl { #[stable(feature = "op_assign_traits", since = "1.8.0")] impl BitAndAssign for Wrapping<$t> { - #[inline(always)] + #[inline] fn bitand_assign(&mut self, other: Wrapping<$t>) { *self = *self & other; } @@ -298,7 +298,7 @@ macro_rules! wrapping_impl { #[stable(feature = "wrapping_neg", since = "1.10.0")] impl Neg for Wrapping<$t> { type Output = Self; - #[inline(always)] + #[inline] fn neg(self) -> Self { Wrapping(0) - self } diff --git a/src/libcore/ptr.rs b/src/libcore/ptr.rs index 4f118f58441c4..b19e07b8578c0 100644 --- a/src/libcore/ptr.rs +++ b/src/libcore/ptr.rs @@ -244,7 +244,7 @@ pub unsafe fn replace(dest: *mut T, mut src: T) -> T { /// assert_eq!(std::ptr::read(y), 12); /// } /// ``` -#[inline(always)] +#[inline] #[stable(feature = "rust1", since = "1.0.0")] pub unsafe fn read(src: *const T) -> T { let mut tmp: T = mem::uninitialized(); @@ -278,7 +278,7 @@ pub unsafe fn read(src: *const T) -> T { /// assert_eq!(std::ptr::read_unaligned(y), 12); /// } /// ``` -#[inline(always)] +#[inline] #[stable(feature = "ptr_unaligned", since = "1.17.0")] pub unsafe fn read_unaligned(src: *const T) -> T { let mut tmp: T = mem::uninitialized(); diff --git a/src/libcore/slice/mod.rs b/src/libcore/slice/mod.rs index 62c7e7aa1cce2..fa0c482e55c91 100644 --- a/src/libcore/slice/mod.rs +++ b/src/libcore/slice/mod.rs @@ -1105,7 +1105,7 @@ impl<'a, T> IntoIterator for &'a mut [T] { } } -#[inline(always)] +#[inline] fn size_from_ptr(_: *const T) -> usize { mem::size_of::() } diff --git a/src/libcore/str/mod.rs b/src/libcore/str/mod.rs index 3862b4a2eb046..1df69a1b598e4 100644 --- a/src/libcore/str/mod.rs +++ b/src/libcore/str/mod.rs @@ -369,7 +369,7 @@ unsafe fn from_raw_parts_mut<'a>(p: *mut u8, len: usize) -> &'a mut str { /// /// assert_eq!("💖", sparkle_heart); /// ``` -#[inline(always)] +#[inline] #[stable(feature = "rust1", since = "1.0.0")] pub unsafe fn from_utf8_unchecked(v: &[u8]) -> &str { mem::transmute(v) @@ -381,7 +381,7 @@ pub unsafe fn from_utf8_unchecked(v: &[u8]) -> &str { /// See the immutable version, [`from_utf8_unchecked()`][fromutf8], for more information. /// /// [fromutf8]: fn.from_utf8_unchecked.html -#[inline(always)] +#[inline] #[unstable(feature = "str_mut_extras", issue = "41119")] pub unsafe fn from_utf8_unchecked_mut(v: &mut [u8]) -> &mut str { mem::transmute(v) @@ -1380,7 +1380,7 @@ fn contains_nonascii(x: usize) -> bool { /// returning `true` in that case, or, if it is invalid, `false` with /// `iter` reset such that it is pointing at the first byte in the /// invalid sequence. -#[inline(always)] +#[inline] fn run_utf8_validation(v: &[u8]) -> Result<(), Utf8Error> { let mut index = 0; let len = v.len(); diff --git a/src/libcore/str/pattern.rs b/src/libcore/str/pattern.rs index 5a007285e4873..3c9c1d6cab479 100644 --- a/src/libcore/str/pattern.rs +++ b/src/libcore/str/pattern.rs @@ -668,7 +668,7 @@ unsafe impl<'a, 'b> Searcher<'a> for StrSearcher<'a, 'b> { } } - #[inline(always)] + #[inline] fn next_match(&mut self) -> Option<(usize, usize)> { match self.searcher { StrSearcherImpl::Empty(..) => { @@ -936,7 +936,7 @@ impl TwoWaySearcher { bytes.iter().fold(0, |a, &b| (1 << (b & 0x3f)) | a) } - #[inline(always)] + #[inline] fn byteset_contains(&self, byte: u8) -> bool { (self.byteset >> ((byte & 0x3f) as usize)) & 1 != 0 } @@ -946,7 +946,7 @@ impl TwoWaySearcher { // left to right. If v matches, we try to match u by scanning right to left. // How far we can jump when we encounter a mismatch is all based on the fact // that (u, v) is a critical factorization for the needle. - #[inline(always)] + #[inline] fn next(&mut self, haystack: &[u8], needle: &[u8], long_period: bool) -> S::Output where S: TwoWayStrategy