|
13 | 13 | // XXX: Not sure how this should be structured
|
14 | 14 | // XXX: Iteration should probably be considered separately
|
15 | 15 |
|
| 16 | +use container::Container; |
16 | 17 | use iter::Iterator;
|
17 | 18 | use option::Option;
|
18 | 19 | use io::Reader;
|
19 |
| -use vec::OwnedVector; |
| 20 | +use vec::{OwnedVector, ImmutableVector}; |
20 | 21 |
|
21 | 22 | /// An iterator that reads a single byte on each iteration,
|
22 | 23 | /// until `.read_byte()` returns `None`.
|
@@ -117,16 +118,23 @@ pub fn u64_from_be_bytes(data: &[u8],
|
117 | 118 | start: uint,
|
118 | 119 | size: uint)
|
119 | 120 | -> u64 {
|
120 |
| - let mut sz = size; |
121 |
| - assert!((sz <= 8u)); |
122 |
| - let mut val = 0_u64; |
123 |
| - let mut pos = start; |
124 |
| - while sz > 0u { |
125 |
| - sz -= 1u; |
126 |
| - val += (data[pos] as u64) << ((sz * 8u) as u64); |
127 |
| - pos += 1u; |
128 |
| - } |
129 |
| - return val; |
| 121 | + use ptr::{copy_nonoverlapping_memory, offset, mut_offset}; |
| 122 | + use unstable::intrinsics::from_be64; |
| 123 | + use vec::MutableVector; |
| 124 | + |
| 125 | + assert!(size <= 8u); |
| 126 | + |
| 127 | + if data.len() - start < size { |
| 128 | + fail!("index out of bounds"); |
| 129 | + } |
| 130 | + |
| 131 | + let mut buf = [0u8, ..8]; |
| 132 | + unsafe { |
| 133 | + let ptr = offset(data.as_ptr(), start as int); |
| 134 | + let out = buf.as_mut_ptr(); |
| 135 | + copy_nonoverlapping_memory(mut_offset(out, (8 - size) as int), ptr, size); |
| 136 | + from_be64(*(out as *i64)) as u64 |
| 137 | + } |
130 | 138 | }
|
131 | 139 |
|
132 | 140 | #[cfg(test)]
|
@@ -465,4 +473,86 @@ mod test {
|
465 | 473 | assert!(reader.read_le_f32() == 8.1250);
|
466 | 474 | }
|
467 | 475 |
|
| 476 | + #[test] |
| 477 | + fn test_u64_from_be_bytes() { |
| 478 | + use super::u64_from_be_bytes; |
| 479 | + |
| 480 | + let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09]; |
| 481 | + |
| 482 | + // Aligned access |
| 483 | + assert_eq!(u64_from_be_bytes(buf, 0, 0), 0); |
| 484 | + assert_eq!(u64_from_be_bytes(buf, 0, 1), 0x01); |
| 485 | + assert_eq!(u64_from_be_bytes(buf, 0, 2), 0x0102); |
| 486 | + assert_eq!(u64_from_be_bytes(buf, 0, 3), 0x010203); |
| 487 | + assert_eq!(u64_from_be_bytes(buf, 0, 4), 0x01020304); |
| 488 | + assert_eq!(u64_from_be_bytes(buf, 0, 5), 0x0102030405); |
| 489 | + assert_eq!(u64_from_be_bytes(buf, 0, 6), 0x010203040506); |
| 490 | + assert_eq!(u64_from_be_bytes(buf, 0, 7), 0x01020304050607); |
| 491 | + assert_eq!(u64_from_be_bytes(buf, 0, 8), 0x0102030405060708); |
| 492 | + |
| 493 | + // Unaligned access |
| 494 | + assert_eq!(u64_from_be_bytes(buf, 1, 0), 0); |
| 495 | + assert_eq!(u64_from_be_bytes(buf, 1, 1), 0x02); |
| 496 | + assert_eq!(u64_from_be_bytes(buf, 1, 2), 0x0203); |
| 497 | + assert_eq!(u64_from_be_bytes(buf, 1, 3), 0x020304); |
| 498 | + assert_eq!(u64_from_be_bytes(buf, 1, 4), 0x02030405); |
| 499 | + assert_eq!(u64_from_be_bytes(buf, 1, 5), 0x0203040506); |
| 500 | + assert_eq!(u64_from_be_bytes(buf, 1, 6), 0x020304050607); |
| 501 | + assert_eq!(u64_from_be_bytes(buf, 1, 7), 0x02030405060708); |
| 502 | + assert_eq!(u64_from_be_bytes(buf, 1, 8), 0x0203040506070809); |
| 503 | + } |
| 504 | +} |
| 505 | + |
| 506 | +#[cfg(test)] |
| 507 | +mod bench { |
| 508 | + use extra::test::BenchHarness; |
| 509 | + use container::Container; |
| 510 | + |
| 511 | + macro_rules! u64_from_be_bytes_bench_impl( |
| 512 | + ($size:expr, $stride:expr, $start_index:expr) => |
| 513 | + ({ |
| 514 | + use vec; |
| 515 | + use super::u64_from_be_bytes; |
| 516 | + |
| 517 | + let data = vec::from_fn($stride*100+$start_index, |i| i as u8); |
| 518 | + let mut sum = 0u64; |
| 519 | + bh.iter(|| { |
| 520 | + let mut i = $start_index; |
| 521 | + while (i < data.len()) { |
| 522 | + sum += u64_from_be_bytes(data, i, $size); |
| 523 | + i += $stride; |
| 524 | + } |
| 525 | + }); |
| 526 | + }) |
| 527 | + ) |
| 528 | + |
| 529 | + #[bench] |
| 530 | + fn u64_from_be_bytes_4_aligned(bh: &mut BenchHarness) { |
| 531 | + u64_from_be_bytes_bench_impl!(4, 4, 0); |
| 532 | + } |
| 533 | + |
| 534 | + #[bench] |
| 535 | + fn u64_from_be_bytes_4_unaligned(bh: &mut BenchHarness) { |
| 536 | + u64_from_be_bytes_bench_impl!(4, 4, 1); |
| 537 | + } |
| 538 | + |
| 539 | + #[bench] |
| 540 | + fn u64_from_be_bytes_7_aligned(bh: &mut BenchHarness) { |
| 541 | + u64_from_be_bytes_bench_impl!(7, 8, 0); |
| 542 | + } |
| 543 | + |
| 544 | + #[bench] |
| 545 | + fn u64_from_be_bytes_7_unaligned(bh: &mut BenchHarness) { |
| 546 | + u64_from_be_bytes_bench_impl!(7, 8, 1); |
| 547 | + } |
| 548 | + |
| 549 | + #[bench] |
| 550 | + fn u64_from_be_bytes_8_aligned(bh: &mut BenchHarness) { |
| 551 | + u64_from_be_bytes_bench_impl!(8, 8, 0); |
| 552 | + } |
| 553 | + |
| 554 | + #[bench] |
| 555 | + fn u64_from_be_bytes_8_unaligned(bh: &mut BenchHarness) { |
| 556 | + u64_from_be_bytes_bench_impl!(8, 8, 1); |
| 557 | + } |
468 | 558 | }
|
0 commit comments