Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit 21951cd

Browse files
committedMar 28, 2024·
Add add/sub methods that only panic with debug assertions to rustc
This mitigates the perf impact of enabling overflow checks on rustc. The change to use overflow checks will be done in a later PR.
1 parent c3b05c6 commit 21951cd

File tree

6 files changed

+98
-27
lines changed

6 files changed

+98
-27
lines changed
 

‎compiler/rustc_data_structures/src/sip128.rs

+19-17
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
//! This is a copy of `core::hash::sip` adapted to providing 128 bit hashes.
22
3+
use rustc_serialize::int_overflow::{DebugStrictAdd, DebugStrictSub};
34
use std::hash::Hasher;
45
use std::mem::{self, MaybeUninit};
56
use std::ptr;
@@ -103,19 +104,19 @@ unsafe fn copy_nonoverlapping_small(src: *const u8, dst: *mut u8, count: usize)
103104
}
104105

105106
let mut i = 0;
106-
if i + 3 < count {
107+
if i.debug_strict_add(3) < count {
107108
ptr::copy_nonoverlapping(src.add(i), dst.add(i), 4);
108-
i += 4;
109+
i = i.debug_strict_add(4);
109110
}
110111

111-
if i + 1 < count {
112+
if i.debug_strict_add(1) < count {
112113
ptr::copy_nonoverlapping(src.add(i), dst.add(i), 2);
113-
i += 2
114+
i = i.debug_strict_add(2)
114115
}
115116

116117
if i < count {
117118
*dst.add(i) = *src.add(i);
118-
i += 1;
119+
i = i.debug_strict_add(1);
119120
}
120121

121122
debug_assert_eq!(i, count);
@@ -211,14 +212,14 @@ impl SipHasher128 {
211212
debug_assert!(nbuf < BUFFER_SIZE);
212213
debug_assert!(nbuf + LEN < BUFFER_WITH_SPILL_SIZE);
213214

214-
if nbuf + LEN < BUFFER_SIZE {
215+
if nbuf.debug_strict_add(LEN) < BUFFER_SIZE {
215216
unsafe {
216217
// The memcpy call is optimized away because the size is known.
217218
let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);
218219
ptr::copy_nonoverlapping(bytes.as_ptr(), dst, LEN);
219220
}
220221

221-
self.nbuf = nbuf + LEN;
222+
self.nbuf = nbuf.debug_strict_add(LEN);
222223

223224
return;
224225
}
@@ -265,8 +266,9 @@ impl SipHasher128 {
265266
// This function should only be called when the write fills the buffer.
266267
// Therefore, when LEN == 1, the new `self.nbuf` must be zero.
267268
// LEN is statically known, so the branch is optimized away.
268-
self.nbuf = if LEN == 1 { 0 } else { nbuf + LEN - BUFFER_SIZE };
269-
self.processed += BUFFER_SIZE;
269+
self.nbuf =
270+
if LEN == 1 { 0 } else { nbuf.debug_strict_add(LEN).debug_strict_sub(BUFFER_SIZE) };
271+
self.processed = self.processed.debug_strict_add(BUFFER_SIZE);
270272
}
271273
}
272274

@@ -277,7 +279,7 @@ impl SipHasher128 {
277279
let nbuf = self.nbuf;
278280
debug_assert!(nbuf < BUFFER_SIZE);
279281

280-
if nbuf + length < BUFFER_SIZE {
282+
if nbuf.debug_strict_add(length) < BUFFER_SIZE {
281283
unsafe {
282284
let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);
283285

@@ -289,7 +291,7 @@ impl SipHasher128 {
289291
}
290292
}
291293

292-
self.nbuf = nbuf + length;
294+
self.nbuf = nbuf.debug_strict_add(length);
293295

294296
return;
295297
}
@@ -315,7 +317,7 @@ impl SipHasher128 {
315317
// This function should only be called when the write fills the buffer,
316318
// so we know that there is enough input to fill the current element.
317319
let valid_in_elem = nbuf % ELEM_SIZE;
318-
let needed_in_elem = ELEM_SIZE - valid_in_elem;
320+
let needed_in_elem = ELEM_SIZE.debug_strict_sub(valid_in_elem);
319321

320322
let src = msg.as_ptr();
321323
let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);
@@ -327,7 +329,7 @@ impl SipHasher128 {
327329
// ELEM_SIZE` to show the compiler that this loop's upper bound is > 0.
328330
// We know that is true, because last step ensured we have a full
329331
// element in the buffer.
330-
let last = nbuf / ELEM_SIZE + 1;
332+
let last = (nbuf / ELEM_SIZE).debug_strict_add(1);
331333

332334
for i in 0..last {
333335
let elem = self.buf.get_unchecked(i).assume_init().to_le();
@@ -338,7 +340,7 @@ impl SipHasher128 {
338340

339341
// Process the remaining element-sized chunks of input.
340342
let mut processed = needed_in_elem;
341-
let input_left = length - processed;
343+
let input_left = length.debug_strict_sub(processed);
342344
let elems_left = input_left / ELEM_SIZE;
343345
let extra_bytes_left = input_left % ELEM_SIZE;
344346

@@ -347,7 +349,7 @@ impl SipHasher128 {
347349
self.state.v3 ^= elem;
348350
Sip13Rounds::c_rounds(&mut self.state);
349351
self.state.v0 ^= elem;
350-
processed += ELEM_SIZE;
352+
processed = processed.debug_strict_add(ELEM_SIZE);
351353
}
352354

353355
// Copy remaining input into start of buffer.
@@ -356,7 +358,7 @@ impl SipHasher128 {
356358
copy_nonoverlapping_small(src, dst, extra_bytes_left);
357359

358360
self.nbuf = extra_bytes_left;
359-
self.processed += nbuf + processed;
361+
self.processed = self.processed.debug_strict_add(nbuf.debug_strict_add(processed));
360362
}
361363
}
362364

@@ -394,7 +396,7 @@ impl SipHasher128 {
394396
};
395397

396398
// Finalize the hash.
397-
let length = self.processed + self.nbuf;
399+
let length = self.processed.debug_strict_add(self.nbuf);
398400
let b: u64 = ((length as u64 & 0xff) << 56) | elem;
399401

400402
state.v3 ^= b;
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,65 @@
1+
// This would belong to `rustc_data_structures`, but `rustc_serialize` needs it too.
2+
3+
/// Addition, but only overflow checked when `cfg(debug_assertions)` is set
4+
/// instead of respecting `-Coverflow-checks`.
5+
///
6+
/// This exists for performance reasons, as we ship rustc with overflow checks.
7+
/// While overflow checks are perf neutral in almost all of the compiler, there
8+
/// are a few particularly hot areas where we don't want overflow checks in our
9+
/// dist builds. Overflow is still a bug there, so we want overflow check for
10+
/// builds with debug assertions.
11+
///
12+
/// That's a long way to say that this should be used in areas where overflow
13+
/// is a bug but overflow checking is too slow.
14+
pub trait DebugStrictAdd {
15+
/// See [`DebugStrictAdd`].
16+
fn debug_strict_add(self, other: Self) -> Self;
17+
}
18+
19+
macro_rules! impl_debug_strict_add {
20+
($( $ty:ty )*) => {
21+
$(
22+
impl DebugStrictAdd for $ty {
23+
fn debug_strict_add(self, other: Self) -> Self {
24+
if cfg!(debug_assertions) {
25+
self + other
26+
} else {
27+
self.wrapping_add(other)
28+
}
29+
}
30+
}
31+
)*
32+
};
33+
}
34+
35+
/// See [`DebugStrictAdd`].
36+
pub trait DebugStrictSub {
37+
/// See [`DebugStrictAdd`].
38+
fn debug_strict_sub(self, other: Self) -> Self;
39+
}
40+
41+
macro_rules! impl_debug_strict_sub {
42+
($( $ty:ty )*) => {
43+
$(
44+
impl DebugStrictSub for $ty {
45+
fn debug_strict_sub(self, other: Self) -> Self {
46+
if cfg!(debug_assertions) {
47+
self - other
48+
} else {
49+
self.wrapping_sub(other)
50+
}
51+
}
52+
}
53+
)*
54+
};
55+
}
56+
57+
impl_debug_strict_add! {
58+
u8 u16 u32 u64 u128 usize
59+
i8 i16 i32 i64 i128 isize
60+
}
61+
62+
impl_debug_strict_sub! {
63+
u8 u16 u32 u64 u128 usize
64+
i8 i16 i32 i64 i128 isize
65+
}

‎compiler/rustc_serialize/src/leb128.rs

+6-5
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
use crate::int_overflow::DebugStrictAdd;
12
use crate::opaque::MemDecoder;
23
use crate::serialize::Decoder;
34

@@ -24,15 +25,15 @@ macro_rules! impl_write_unsigned_leb128 {
2425
*out.get_unchecked_mut(i) = value as u8;
2526
}
2627

27-
i += 1;
28+
i = i.debug_strict_add(1);
2829
break;
2930
} else {
3031
unsafe {
3132
*out.get_unchecked_mut(i) = ((value & 0x7f) | 0x80) as u8;
3233
}
3334

3435
value >>= 7;
35-
i += 1;
36+
i = i.debug_strict_add(1);
3637
}
3738
}
3839

@@ -69,7 +70,7 @@ macro_rules! impl_read_unsigned_leb128 {
6970
} else {
7071
result |= ((byte & 0x7F) as $int_ty) << shift;
7172
}
72-
shift += 7;
73+
shift = shift.debug_strict_add(7);
7374
}
7475
}
7576
};
@@ -101,7 +102,7 @@ macro_rules! impl_write_signed_leb128 {
101102
*out.get_unchecked_mut(i) = byte;
102103
}
103104

104-
i += 1;
105+
i = i.debug_strict_add(1);
105106

106107
if !more {
107108
break;
@@ -130,7 +131,7 @@ macro_rules! impl_read_signed_leb128 {
130131
loop {
131132
byte = decoder.read_u8();
132133
result |= <$int_ty>::from(byte & 0x7F) << shift;
133-
shift += 7;
134+
shift = shift.debug_strict_add(7);
134135

135136
if (byte & 0x80) == 0 {
136137
break;

‎compiler/rustc_serialize/src/lib.rs

+1
Original file line numberDiff line numberDiff line change
@@ -23,5 +23,6 @@ pub use self::serialize::{Decodable, Decoder, Encodable, Encoder};
2323

2424
mod serialize;
2525

26+
pub mod int_overflow;
2627
pub mod leb128;
2728
pub mod opaque;

‎compiler/rustc_serialize/src/opaque.rs

+4-3
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
use crate::int_overflow::DebugStrictAdd;
12
use crate::leb128;
23
use crate::serialize::{Decodable, Decoder, Encodable, Encoder};
34
use std::fs::File;
@@ -65,7 +66,7 @@ impl FileEncoder {
6566
// Tracking position this way instead of having a `self.position` field
6667
// means that we only need to update `self.buffered` on a write call,
6768
// as opposed to updating `self.position` and `self.buffered`.
68-
self.flushed + self.buffered
69+
self.flushed.debug_strict_add(self.buffered)
6970
}
7071

7172
#[cold]
@@ -119,7 +120,7 @@ impl FileEncoder {
119120
}
120121
if let Some(dest) = self.buffer_empty().get_mut(..buf.len()) {
121122
dest.copy_from_slice(buf);
122-
self.buffered += buf.len();
123+
self.buffered = self.buffered.debug_strict_add(buf.len());
123124
} else {
124125
self.write_all_cold_path(buf);
125126
}
@@ -158,7 +159,7 @@ impl FileEncoder {
158159
if written > N {
159160
Self::panic_invalid_write::<N>(written);
160161
}
161-
self.buffered += written;
162+
self.buffered = self.buffered.debug_strict_add(written);
162163
}
163164

164165
#[cold]

‎compiler/rustc_span/src/span_encoding.rs

+3-2
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ use crate::SPAN_TRACK;
44
use crate::{BytePos, SpanData};
55

66
use rustc_data_structures::fx::FxIndexSet;
7+
use rustc_serialize::int_overflow::DebugStrictAdd;
78

89
/// A compressed span.
910
///
@@ -166,7 +167,7 @@ impl Span {
166167
debug_assert!(len <= MAX_LEN);
167168
SpanData {
168169
lo: BytePos(self.lo_or_index),
169-
hi: BytePos(self.lo_or_index + len),
170+
hi: BytePos(self.lo_or_index.debug_strict_add(len)),
170171
ctxt: SyntaxContext::from_u32(self.ctxt_or_parent_or_marker as u32),
171172
parent: None,
172173
}
@@ -179,7 +180,7 @@ impl Span {
179180
};
180181
SpanData {
181182
lo: BytePos(self.lo_or_index),
182-
hi: BytePos(self.lo_or_index + len),
183+
hi: BytePos(self.lo_or_index.debug_strict_add(len)),
183184
ctxt: SyntaxContext::root(),
184185
parent: Some(parent),
185186
}

0 commit comments

Comments
 (0)
Please sign in to comment.