Skip to content

[perf experiment] Enable overflow checks for not-std #119440

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 3 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 16 additions & 16 deletions compiler/rustc_data_structures/src/sip128.rs
Original file line number Diff line number Diff line change
Expand Up @@ -102,20 +102,20 @@ unsafe fn copy_nonoverlapping_small(src: *const u8, dst: *mut u8, count: usize)
return;
}

let mut i = 0;
if i + 3 < count {
let mut i = 0_usize;
if i.wrapping_add(3) < count {
ptr::copy_nonoverlapping(src.add(i), dst.add(i), 4);
i += 4;
i = i.wrapping_add(4);
}

if i + 1 < count {
ptr::copy_nonoverlapping(src.add(i), dst.add(i), 2);
i += 2
i = i.wrapping_add(2);
}

if i < count {
*dst.add(i) = *src.add(i);
i += 1;
i = i.wrapping_add(1);
}

debug_assert_eq!(i, count);
Expand Down Expand Up @@ -211,14 +211,14 @@ impl SipHasher128 {
debug_assert!(nbuf < BUFFER_SIZE);
debug_assert!(nbuf + LEN < BUFFER_WITH_SPILL_SIZE);

if nbuf + LEN < BUFFER_SIZE {
if nbuf.wrapping_add(LEN) < BUFFER_SIZE {
unsafe {
// The memcpy call is optimized away because the size is known.
let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);
ptr::copy_nonoverlapping(bytes.as_ptr(), dst, LEN);
}

self.nbuf = nbuf + LEN;
self.nbuf = nbuf.wrapping_add(LEN);

return;
}
Expand Down Expand Up @@ -265,8 +265,8 @@ impl SipHasher128 {
// This function should only be called when the write fills the buffer.
// Therefore, when LEN == 1, the new `self.nbuf` must be zero.
// LEN is statically known, so the branch is optimized away.
self.nbuf = if LEN == 1 { 0 } else { nbuf + LEN - BUFFER_SIZE };
self.processed += BUFFER_SIZE;
self.nbuf = if LEN == 1 { 0 } else { nbuf.wrapping_add(LEN).wrapping_sub(BUFFER_SIZE) };
self.processed = self.processed.wrapping_add(BUFFER_SIZE);
}
}

Expand All @@ -277,7 +277,7 @@ impl SipHasher128 {
let nbuf = self.nbuf;
debug_assert!(nbuf < BUFFER_SIZE);

if nbuf + length < BUFFER_SIZE {
if nbuf.wrapping_add(length) < BUFFER_SIZE {
unsafe {
let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);

Expand All @@ -289,7 +289,7 @@ impl SipHasher128 {
}
}

self.nbuf = nbuf + length;
self.nbuf = nbuf.wrapping_add(length);

return;
}
Expand Down Expand Up @@ -327,7 +327,7 @@ impl SipHasher128 {
// ELEM_SIZE` to show the compiler that this loop's upper bound is > 0.
// We know that is true, because last step ensured we have a full
// element in the buffer.
let last = nbuf / ELEM_SIZE + 1;
let last = (nbuf / ELEM_SIZE).wrapping_add(1);

for i in 0..last {
let elem = self.buf.get_unchecked(i).assume_init().to_le();
Expand All @@ -338,7 +338,7 @@ impl SipHasher128 {

// Process the remaining element-sized chunks of input.
let mut processed = needed_in_elem;
let input_left = length - processed;
let input_left = length.wrapping_sub(processed);
let elems_left = input_left / ELEM_SIZE;
let extra_bytes_left = input_left % ELEM_SIZE;

Expand All @@ -347,7 +347,7 @@ impl SipHasher128 {
self.state.v3 ^= elem;
Sip13Rounds::c_rounds(&mut self.state);
self.state.v0 ^= elem;
processed += ELEM_SIZE;
processed = processed.wrapping_add(ELEM_SIZE);
}

// Copy remaining input into start of buffer.
Expand All @@ -356,7 +356,7 @@ impl SipHasher128 {
copy_nonoverlapping_small(src, dst, extra_bytes_left);

self.nbuf = extra_bytes_left;
self.processed += nbuf + processed;
self.processed = self.processed.wrapping_add(nbuf).wrapping_add(processed);
}
}

Expand Down Expand Up @@ -394,7 +394,7 @@ impl SipHasher128 {
};

// Finalize the hash.
let length = self.processed + self.nbuf;
let length = self.processed.wrapping_add(self.nbuf);
let b: u64 = ((length as u64 & 0xff) << 56) | elem;

state.v3 ^= b;
Expand Down
2 changes: 1 addition & 1 deletion compiler/rustc_middle/src/mir/interpret/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -285,7 +285,7 @@ impl AllocDecodingState {
let counter = DECODER_SESSION_ID.fetch_add(1, Ordering::SeqCst);

// Make sure this is never zero.
let session_id = DecodingSessionId::new((counter & 0x7FFFFFFF) + 1).unwrap();
let session_id = DecodingSessionId::new((counter & 0x7FFFFFFF).wrapping_add(1)).unwrap();

AllocDecodingSession { state: self, session_id }
}
Expand Down
6 changes: 3 additions & 3 deletions compiler/rustc_query_system/src/dep_graph/serialized.rs
Original file line number Diff line number Diff line change
Expand Up @@ -101,14 +101,14 @@ impl SerializedDepGraph {
// edge list, or the end of the array if this is the last edge.
let end = self
.edge_list_indices
.get(source + 1)
.get(SerializedDepNodeIndex::from_usize(source.index().wrapping_add(1)))
.map(|h| h.start())
.unwrap_or_else(|| self.edge_list_data.len() - DEP_NODE_PAD);
.unwrap_or_else(|| self.edge_list_data.len().wrapping_sub(DEP_NODE_PAD));

// The number of edges for this node is implicitly stored in the combination of the byte
// width and the length.
let bytes_per_index = header.bytes_per_index();
let len = (end - header.start()) / bytes_per_index;
let len = (end.wrapping_sub(header.start())) / bytes_per_index;

// LLVM doesn't hoist EdgeHeader::mask so we do it ourselves.
let mask = header.mask();
Expand Down
8 changes: 4 additions & 4 deletions compiler/rustc_serialize/src/leb128.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,15 +24,15 @@ macro_rules! impl_write_unsigned_leb128 {
*out.get_unchecked_mut(i) = value as u8;
}

i += 1;
i = i.wrapping_add(1);
break;
} else {
unsafe {
*out.get_unchecked_mut(i) = ((value & 0x7f) | 0x80) as u8;
}

value >>= 7;
i += 1;
i = i.wrapping_add(1);
}
}

Expand Down Expand Up @@ -60,7 +60,7 @@ macro_rules! impl_read_unsigned_leb128 {
return byte as $int_ty;
}
let mut result = (byte & 0x7F) as $int_ty;
let mut shift = 7;
let mut shift = 7_usize;
loop {
let byte = decoder.read_u8();
if (byte & 0x80) == 0 {
Expand All @@ -69,7 +69,7 @@ macro_rules! impl_read_unsigned_leb128 {
} else {
result |= ((byte & 0x7F) as $int_ty) << shift;
}
shift += 7;
shift = shift.wrapping_add(7);
}
}
};
Expand Down
6 changes: 3 additions & 3 deletions compiler/rustc_serialize/src/opaque.rs
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ impl FileEncoder {
// Tracking position this way instead of having a `self.position` field
// means that we only need to update `self.buffered` on a write call,
// as opposed to updating `self.position` and `self.buffered`.
self.flushed + self.buffered
self.flushed.wrapping_add(self.buffered)
}

#[cold]
Expand Down Expand Up @@ -119,7 +119,7 @@ impl FileEncoder {
}
if let Some(dest) = self.buffer_empty().get_mut(..buf.len()) {
dest.copy_from_slice(buf);
self.buffered += buf.len();
self.buffered = self.buffered.wrapping_add(buf.len());
} else {
self.write_all_cold_path(buf);
}
Expand Down Expand Up @@ -158,7 +158,7 @@ impl FileEncoder {
if written > N {
Self::panic_invalid_write::<N>(written);
}
self.buffered += written;
self.buffered = self.buffered.wrapping_add(written);
}

#[cold]
Expand Down
6 changes: 3 additions & 3 deletions compiler/rustc_span/src/caching_source_map_view.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ impl CacheEntry {
let pos = self.file.relative_position(pos);
let line_index = self.file.lookup_line(pos).unwrap();
let line_bounds = self.file.line_bounds(line_index);
self.line_number = line_index + 1;
self.line_number = line_index.wrapping_add(1);
self.line = line_bounds;
self.touch(time_stamp);
}
Expand Down Expand Up @@ -81,15 +81,15 @@ impl<'sm> CachingSourceMapView<'sm> {
&mut self,
pos: BytePos,
) -> Option<(Lrc<SourceFile>, usize, RelativeBytePos)> {
self.time_stamp += 1;
self.time_stamp = self.time_stamp.wrapping_add(1);

// Check if the position is in one of the cached lines
let cache_idx = self.cache_entry_index(pos);
if cache_idx != -1 {
let cache_entry = &mut self.line_cache[cache_idx as usize];
cache_entry.touch(self.time_stamp);

let col = RelativeBytePos(pos.to_u32() - cache_entry.line.start.to_u32());
let col = RelativeBytePos(pos.to_u32().wrapping_sub(cache_entry.line.start.to_u32()));
return Some((cache_entry.file.clone(), cache_entry.line_number, col));
}

Expand Down
11 changes: 6 additions & 5 deletions compiler/rustc_span/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1746,7 +1746,7 @@ impl SourceFile {

#[inline]
pub fn relative_position(&self, pos: BytePos) -> RelativeBytePos {
RelativeBytePos::from_u32(pos.to_u32() - self.start_pos.to_u32())
RelativeBytePos::from_u32(pos.to_u32().wrapping_sub(self.start_pos.to_u32()))
}

#[inline]
Expand All @@ -1769,10 +1769,11 @@ impl SourceFile {

let lines = self.lines();
assert!(line_index < lines.len());
if line_index == (lines.len() - 1) {
if line_index == (lines.len().wrapping_sub(1)) {
self.absolute_position(lines[line_index])..self.end_position()
} else {
self.absolute_position(lines[line_index])..self.absolute_position(lines[line_index + 1])
self.absolute_position(lines[line_index])
..self.absolute_position(lines[line_index.wrapping_add(1)])
}
}

Expand Down Expand Up @@ -2039,7 +2040,7 @@ macro_rules! impl_pos {

#[inline(always)]
fn add(self, rhs: $ident) -> $ident {
$ident(self.0 + rhs.0)
$ident(self.0.wrapping_add(rhs.0))
}
}

Expand All @@ -2048,7 +2049,7 @@ macro_rules! impl_pos {

#[inline(always)]
fn sub(self, rhs: $ident) -> $ident {
$ident(self.0 - rhs.0)
$ident(self.0.wrapping_sub(rhs.0))
}
}
)*
Expand Down
4 changes: 2 additions & 2 deletions compiler/rustc_span/src/span_encoding.rs
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ impl Span {
debug_assert!(len <= MAX_LEN);
SpanData {
lo: BytePos(self.lo_or_index),
hi: BytePos(self.lo_or_index + len),
hi: BytePos(self.lo_or_index.wrapping_add(len)),
ctxt: SyntaxContext::from_u32(self.ctxt_or_parent_or_marker as u32),
parent: None,
}
Expand All @@ -179,7 +179,7 @@ impl Span {
};
SpanData {
lo: BytePos(self.lo_or_index),
hi: BytePos(self.lo_or_index + len),
hi: BytePos(self.lo_or_index.wrapping_add(len)),
ctxt: SyntaxContext::root(),
parent: Some(parent),
}
Expand Down
2 changes: 1 addition & 1 deletion src/bootstrap/src/core/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1760,7 +1760,7 @@ impl<'a> Builder<'a> {
if mode == Mode::Std {
self.config.rust_overflow_checks_std.to_string()
} else {
self.config.rust_overflow_checks.to_string()
"true".into()
},
);

Expand Down