@@ -202,28 +202,26 @@ impl SipHasher128 {
202
202
hasher
203
203
}
204
204
205
- // A specialized write function for values with size <= 8.
206
205
#[ inline]
207
- fn short_write < T > ( & mut self , x : T ) {
208
- let size = mem:: size_of :: < T > ( ) ;
206
+ pub fn short_write < const LEN : usize > ( & mut self , bytes : [ u8 ; LEN ] ) {
209
207
let nbuf = self . nbuf ;
210
- debug_assert ! ( size <= 8 ) ;
208
+ debug_assert ! ( LEN <= 8 ) ;
211
209
debug_assert ! ( nbuf < BUFFER_SIZE ) ;
212
- debug_assert ! ( nbuf + size < BUFFER_WITH_SPILL_SIZE ) ;
210
+ debug_assert ! ( nbuf + LEN < BUFFER_WITH_SPILL_SIZE ) ;
213
211
214
- if nbuf + size < BUFFER_SIZE {
212
+ if nbuf + LEN < BUFFER_SIZE {
215
213
unsafe {
216
214
// The memcpy call is optimized away because the size is known.
217
215
let dst = ( self . buf . as_mut_ptr ( ) as * mut u8 ) . add ( nbuf) ;
218
- ptr:: copy_nonoverlapping ( & x as * const _ as * const u8 , dst, size ) ;
216
+ ptr:: copy_nonoverlapping ( bytes . as_ptr ( ) , dst, LEN ) ;
219
217
}
220
218
221
- self . nbuf = nbuf + size ;
219
+ self . nbuf = nbuf + LEN ;
222
220
223
221
return ;
224
222
}
225
223
226
- unsafe { self . short_write_process_buffer ( x ) }
224
+ unsafe { self . short_write_process_buffer ( bytes ) }
227
225
}
228
226
229
227
// A specialized write function for values with size <= 8 that should only
@@ -233,18 +231,17 @@ impl SipHasher128 {
233
231
// `self.nbuf` must cause `self.buf` to become fully initialized (and not
234
232
// overflow) if it wasn't already.
235
233
#[ inline( never) ]
236
- unsafe fn short_write_process_buffer < T > ( & mut self , x : T ) {
237
- let size = mem:: size_of :: < T > ( ) ;
234
+ unsafe fn short_write_process_buffer < const LEN : usize > ( & mut self , bytes : [ u8 ; LEN ] ) {
238
235
let nbuf = self . nbuf ;
239
- debug_assert ! ( size <= 8 ) ;
236
+ debug_assert ! ( LEN <= 8 ) ;
240
237
debug_assert ! ( nbuf < BUFFER_SIZE ) ;
241
- debug_assert ! ( nbuf + size >= BUFFER_SIZE ) ;
242
- debug_assert ! ( nbuf + size < BUFFER_WITH_SPILL_SIZE ) ;
238
+ debug_assert ! ( nbuf + LEN >= BUFFER_SIZE ) ;
239
+ debug_assert ! ( nbuf + LEN < BUFFER_WITH_SPILL_SIZE ) ;
243
240
244
241
// Copy first part of input into end of buffer, possibly into spill
245
242
// element. The memcpy call is optimized away because the size is known.
246
243
let dst = ( self . buf . as_mut_ptr ( ) as * mut u8 ) . add ( nbuf) ;
247
- ptr:: copy_nonoverlapping ( & x as * const _ as * const u8 , dst, size ) ;
244
+ ptr:: copy_nonoverlapping ( bytes . as_ptr ( ) , dst, LEN ) ;
248
245
249
246
// Process buffer.
250
247
for i in 0 ..BUFFER_CAPACITY {
@@ -254,17 +251,17 @@ impl SipHasher128 {
254
251
self . state . v0 ^= elem;
255
252
}
256
253
257
- // Copy remaining input into start of buffer by copying size - 1
258
- // elements from spill (at most size - 1 bytes could have overflowed
254
+ // Copy remaining input into start of buffer by copying LEN - 1
255
+ // elements from spill (at most LEN - 1 bytes could have overflowed
259
256
// into the spill). The memcpy call is optimized away because the size
260
- // is known. And the whole copy is optimized away for size == 1.
257
+ // is known. And the whole copy is optimized away for LEN == 1.
261
258
let src = self . buf . get_unchecked ( BUFFER_SPILL_INDEX ) as * const _ as * const u8 ;
262
- ptr:: copy_nonoverlapping ( src, self . buf . as_mut_ptr ( ) as * mut u8 , size - 1 ) ;
259
+ ptr:: copy_nonoverlapping ( src, self . buf . as_mut_ptr ( ) as * mut u8 , LEN - 1 ) ;
263
260
264
261
// This function should only be called when the write fills the buffer.
265
- // Therefore, when size == 1, the new `self.nbuf` must be zero. The size
266
- // is statically known, so the branch is optimized away.
267
- self . nbuf = if size == 1 { 0 } else { nbuf + size - BUFFER_SIZE } ;
262
+ // Therefore, when LEN == 1, the new `self.nbuf` must be zero.
263
+ // LEN is statically known, so the branch is optimized away.
264
+ self . nbuf = if LEN == 1 { 0 } else { nbuf + LEN - BUFFER_SIZE } ;
268
265
self . processed += BUFFER_SIZE ;
269
266
}
270
267
@@ -412,52 +409,52 @@ impl SipHasher128 {
412
409
impl Hasher for SipHasher128 {
413
410
#[ inline]
414
411
fn write_u8 ( & mut self , i : u8 ) {
415
- self . short_write ( i) ;
412
+ self . short_write ( i. to_ne_bytes ( ) ) ;
416
413
}
417
414
418
415
#[ inline]
419
416
fn write_u16 ( & mut self , i : u16 ) {
420
- self . short_write ( i) ;
417
+ self . short_write ( i. to_ne_bytes ( ) ) ;
421
418
}
422
419
423
420
#[ inline]
424
421
fn write_u32 ( & mut self , i : u32 ) {
425
- self . short_write ( i) ;
422
+ self . short_write ( i. to_ne_bytes ( ) ) ;
426
423
}
427
424
428
425
#[ inline]
429
426
fn write_u64 ( & mut self , i : u64 ) {
430
- self . short_write ( i) ;
427
+ self . short_write ( i. to_ne_bytes ( ) ) ;
431
428
}
432
429
433
430
#[ inline]
434
431
fn write_usize ( & mut self , i : usize ) {
435
- self . short_write ( i) ;
432
+ self . short_write ( i. to_ne_bytes ( ) ) ;
436
433
}
437
434
438
435
#[ inline]
439
436
fn write_i8 ( & mut self , i : i8 ) {
440
- self . short_write ( i as u8 ) ;
437
+ self . short_write ( ( i as u8 ) . to_ne_bytes ( ) ) ;
441
438
}
442
439
443
440
#[ inline]
444
441
fn write_i16 ( & mut self , i : i16 ) {
445
- self . short_write ( i as u16 ) ;
442
+ self . short_write ( ( i as u16 ) . to_ne_bytes ( ) ) ;
446
443
}
447
444
448
445
#[ inline]
449
446
fn write_i32 ( & mut self , i : i32 ) {
450
- self . short_write ( i as u32 ) ;
447
+ self . short_write ( ( i as u32 ) . to_ne_bytes ( ) ) ;
451
448
}
452
449
453
450
#[ inline]
454
451
fn write_i64 ( & mut self , i : i64 ) {
455
- self . short_write ( i as u64 ) ;
452
+ self . short_write ( ( i as u64 ) . to_ne_bytes ( ) ) ;
456
453
}
457
454
458
455
#[ inline]
459
456
fn write_isize ( & mut self , i : isize ) {
460
- self . short_write ( i as usize ) ;
457
+ self . short_write ( ( i as usize ) . to_ne_bytes ( ) ) ;
461
458
}
462
459
463
460
#[ inline]
0 commit comments