11//! This is a copy of `core::hash::sip` adapted to providing 128 bit hashes.
22
3+ use rustc_serialize:: int_overflow:: { DebugStrictAdd , DebugStrictSub } ;
34use std:: hash:: Hasher ;
45use std:: mem:: { self , MaybeUninit } ;
56use std:: ptr;
@@ -103,19 +104,19 @@ unsafe fn copy_nonoverlapping_small(src: *const u8, dst: *mut u8, count: usize)
103104 }
104105
105106 let mut i = 0 ;
106- if i + 3 < count {
107+ if i. debug_strict_add ( 3 ) < count {
107108 ptr:: copy_nonoverlapping ( src. add ( i) , dst. add ( i) , 4 ) ;
108- i += 4 ;
109+ i = i . debug_strict_add ( 4 ) ;
109110 }
110111
111- if i + 1 < count {
112+ if i. debug_strict_add ( 1 ) < count {
112113 ptr:: copy_nonoverlapping ( src. add ( i) , dst. add ( i) , 2 ) ;
113- i += 2
114+ i = i . debug_strict_add ( 2 )
114115 }
115116
116117 if i < count {
117118 * dst. add ( i) = * src. add ( i) ;
118- i += 1 ;
119+ i = i . debug_strict_add ( 1 ) ;
119120 }
120121
121122 debug_assert_eq ! ( i, count) ;
@@ -211,14 +212,14 @@ impl SipHasher128 {
211212 debug_assert ! ( nbuf < BUFFER_SIZE ) ;
212213 debug_assert ! ( nbuf + LEN < BUFFER_WITH_SPILL_SIZE ) ;
213214
214- if nbuf + LEN < BUFFER_SIZE {
215+ if nbuf. debug_strict_add ( LEN ) < BUFFER_SIZE {
215216 unsafe {
216217 // The memcpy call is optimized away because the size is known.
217218 let dst = ( self . buf . as_mut_ptr ( ) as * mut u8 ) . add ( nbuf) ;
218219 ptr:: copy_nonoverlapping ( bytes. as_ptr ( ) , dst, LEN ) ;
219220 }
220221
221- self . nbuf = nbuf + LEN ;
222+ self . nbuf = nbuf. debug_strict_add ( LEN ) ;
222223
223224 return ;
224225 }
@@ -265,8 +266,9 @@ impl SipHasher128 {
265266 // This function should only be called when the write fills the buffer.
266267 // Therefore, when LEN == 1, the new `self.nbuf` must be zero.
267268 // LEN is statically known, so the branch is optimized away.
268- self . nbuf = if LEN == 1 { 0 } else { nbuf + LEN - BUFFER_SIZE } ;
269- self . processed += BUFFER_SIZE ;
269+ self . nbuf =
270+ if LEN == 1 { 0 } else { nbuf. debug_strict_add ( LEN ) . debug_strict_sub ( BUFFER_SIZE ) } ;
271+ self . processed = self . processed . debug_strict_add ( BUFFER_SIZE ) ;
270272 }
271273 }
272274
@@ -277,7 +279,7 @@ impl SipHasher128 {
277279 let nbuf = self . nbuf ;
278280 debug_assert ! ( nbuf < BUFFER_SIZE ) ;
279281
280- if nbuf + length < BUFFER_SIZE {
282+ if nbuf. debug_strict_add ( length) < BUFFER_SIZE {
281283 unsafe {
282284 let dst = ( self . buf . as_mut_ptr ( ) as * mut u8 ) . add ( nbuf) ;
283285
@@ -289,7 +291,7 @@ impl SipHasher128 {
289291 }
290292 }
291293
292- self . nbuf = nbuf + length;
294+ self . nbuf = nbuf. debug_strict_add ( length) ;
293295
294296 return ;
295297 }
@@ -315,7 +317,7 @@ impl SipHasher128 {
315317 // This function should only be called when the write fills the buffer,
316318 // so we know that there is enough input to fill the current element.
317319 let valid_in_elem = nbuf % ELEM_SIZE ;
318- let needed_in_elem = ELEM_SIZE - valid_in_elem;
320+ let needed_in_elem = ELEM_SIZE . debug_strict_sub ( valid_in_elem) ;
319321
320322 let src = msg. as_ptr ( ) ;
321323 let dst = ( self . buf . as_mut_ptr ( ) as * mut u8 ) . add ( nbuf) ;
@@ -327,7 +329,7 @@ impl SipHasher128 {
327329 // ELEM_SIZE` to show the compiler that this loop's upper bound is > 0.
328330 // We know that is true, because last step ensured we have a full
329331 // element in the buffer.
330- let last = nbuf / ELEM_SIZE + 1 ;
332+ let last = ( nbuf / ELEM_SIZE ) . debug_strict_add ( 1 ) ;
331333
332334 for i in 0 ..last {
333335 let elem = self . buf . get_unchecked ( i) . assume_init ( ) . to_le ( ) ;
@@ -338,7 +340,7 @@ impl SipHasher128 {
338340
339341 // Process the remaining element-sized chunks of input.
340342 let mut processed = needed_in_elem;
341- let input_left = length - processed;
343+ let input_left = length. debug_strict_sub ( processed) ;
342344 let elems_left = input_left / ELEM_SIZE ;
343345 let extra_bytes_left = input_left % ELEM_SIZE ;
344346
@@ -347,7 +349,7 @@ impl SipHasher128 {
347349 self . state . v3 ^= elem;
348350 Sip13Rounds :: c_rounds ( & mut self . state ) ;
349351 self . state . v0 ^= elem;
350- processed += ELEM_SIZE ;
352+ processed = processed . debug_strict_add ( ELEM_SIZE ) ;
351353 }
352354
353355 // Copy remaining input into start of buffer.
@@ -356,7 +358,7 @@ impl SipHasher128 {
356358 copy_nonoverlapping_small ( src, dst, extra_bytes_left) ;
357359
358360 self . nbuf = extra_bytes_left;
359- self . processed += nbuf + processed;
361+ self . processed = self . processed . debug_strict_add ( nbuf. debug_strict_add ( processed) ) ;
360362 }
361363 }
362364
@@ -394,7 +396,7 @@ impl SipHasher128 {
394396 } ;
395397
396398 // Finalize the hash.
397- let length = self . processed + self . nbuf ;
399+ let length = self . processed . debug_strict_add ( self . nbuf ) ;
398400 let b: u64 = ( ( length as u64 & 0xff ) << 56 ) | elem;
399401
400402 state. v3 ^= b;
0 commit comments