diff --git a/rand_core/src/impls.rs b/rand_core/src/impls.rs new file mode 100644 index 00000000000..fb45a3ecee2 --- /dev/null +++ b/rand_core/src/impls.rs @@ -0,0 +1,78 @@ +// Copyright 2013-2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Helper functions for implementing `Rng` functions. +//! +//! For cross-platform reproducibility, these functions all use Little Endian: +//! least-significant part first. For example, `next_u64_via_u32` takes `u32` +//! values `x, y`, then outputs `(y << 32) | x`. To implement `next_u32` +//! from `next_u64` in little-endian order, one should use `next_u64() as u32`. +//! +//! Byte-swapping (like the std `to_le` functions) is only needed to convert +//! to/from byte sequences, and since its purpose is reproducibility, +//! non-reproducible sources (e.g. `OsRng`) need not bother with it. + +use core::intrinsics::transmute; +use {Rng, Result}; + +/// Implement `next_u64` via `next_u32`, little-endian order. +pub fn next_u64_via_u32(rng: &mut R) -> u64 { + // Use LE; we explicitly generate one value before the next. + let x = rng.next_u32() as u64; + let y = rng.next_u32() as u64; + (y << 32) | x +} + +/// Implement `next_u128` via `next_u64`, little-endian order. +#[cfg(feature = "i128_support")] +pub fn next_u128_via_u64(rng: &mut R) -> u128 { + // Use LE; we explicitly generate one value before the next. + let x = rng.next_u64() as u128; + let y = rng.next_u64() as u128; + (y << 64) | x +} + +macro_rules! try_fill_via { + ($rng:ident, $next_u:ident, $BYTES:expr, $dest:ident) => {{ + let mut left = $dest; + while left.len() >= $BYTES { + let (l, r) = {left}.split_at_mut($BYTES); + left = r; + let chunk: [u8; $BYTES] = unsafe { + transmute($rng.$next_u().to_le()) + }; + l.copy_from_slice(&chunk); + } + let n = left.len(); + if n > 0 { + let chunk: [u8; $BYTES] = unsafe { + transmute($rng.$next_u().to_le()) + }; + left.copy_from_slice(&chunk[..n]); + } + Ok(()) + }} +} + +/// Implement `try_fill` via `next_u32`, little-endian order. +pub fn try_fill_via_u32(rng: &mut R, dest: &mut [u8]) -> Result<()> { + try_fill_via!(rng, next_u32, 4, dest) +} + +/// Implement `try_fill` via `next_u64`, little-endian order. +pub fn try_fill_via_u64(rng: &mut R, dest: &mut [u8]) -> Result<()> { + try_fill_via!(rng, next_u64, 8, dest) +} + +/// Implement `try_fill` via `next_u128`, little-endian order. +#[cfg(feature = "i128_support")] +pub fn try_fill_via_u128(rng: &mut R, dest: &mut [u8]) -> Result<()> { + try_fill_via!(rng, next_u128, 16, dest) +} diff --git a/rand_core/src/lib.rs b/rand_core/src/lib.rs index afa681641b3..7094b8ebe5e 100644 --- a/rand_core/src/lib.rs +++ b/rand_core/src/lib.rs @@ -35,6 +35,7 @@ #[cfg(feature="std")] extern crate core; +pub mod impls; pub mod mock; @@ -75,63 +76,21 @@ pub trait Rng { fn next_u32(&mut self) -> u32; /// Return the next random u64. - /// - /// By default this is implemented in terms of `next_u32` in LE order. An - /// implementation of this trait must provide at least one of - /// these two methods. Similarly to `next_u32`, this rarely needs - /// to be called directly, prefer `r.gen()` to `r.next_u64()`. - fn next_u64(&mut self) -> u64 { - // Use LE; we explicitly generate one value before the next. - let x = self.next_u32() as u64; - let y = self.next_u32() as u64; - (y << 32) | x - } + fn next_u64(&mut self) -> u64; - #[cfg(feature = "i128_support")] /// Return the next random u128. - /// - /// By default this is implemented in terms of `next_u64` in LE order. - fn next_u128(&mut self) -> u128 { - // Use LE; we explicitly generate one value before the next. - let x = self.next_u64() as u128; - let y = self.next_u64() as u128; - (y << 64) | x - } + #[cfg(feature = "i128_support")] + fn next_u128(&mut self) -> u128; /// Fill `dest` entirely with random data. /// - /// This has a default implementation in terms of `next_u64` in LE order, - /// but should be overridden by implementations that - /// offer a more efficient solution than just calling `next_u64` - /// repeatedly. - /// /// This method does *not* have any requirement on how much of the - /// generated random number stream is consumed; e.g. the default + /// generated random number stream is consumed; e.g. `try_fill_via_u64` /// implementation uses `next_u64` thus consuming 8 bytes even when only /// 1 is required. A different implementation might use `next_u32` and /// only consume 4 bytes; *however* any change affecting *reproducibility* /// of output must be considered a breaking change. - fn try_fill(&mut self, dest: &mut [u8]) -> Result<()> { - use core::intrinsics::transmute; - - let mut left = dest; - while left.len() >= 8 { - let (l, r) = {left}.split_at_mut(8); - left = r; - let chunk: [u8; 8] = unsafe { - transmute(self.next_u64().to_le()) - }; - l.copy_from_slice(&chunk); - } - let n = left.len(); - if n > 0 { - let chunk: [u8; 8] = unsafe { - transmute(self.next_u64().to_le()) - }; - left.copy_from_slice(&chunk[..n]); - } - Ok(()) - } + fn try_fill(&mut self, dest: &mut [u8]) -> Result<()>; } #[cfg(feature="std")] diff --git a/rand_core/src/mock.rs b/rand_core/src/mock.rs index a4fead00e4a..c32e9310d84 100644 --- a/rand_core/src/mock.rs +++ b/rand_core/src/mock.rs @@ -16,7 +16,7 @@ //! Instead maybe this should be yet another crate? Or just leave it here? use core::num::Wrapping as w; -use {Rng, SeedableRng}; +use {Rng, SeedableRng, impls, Result}; /// A simple implementation of `Rng`, purely for testing. /// Returns an arithmetic sequence (i.e. adds a constant each step). @@ -49,6 +49,17 @@ impl Rng for MockAddRng { self.v += self.a; result } + fn next_u64(&mut self) -> u64 { + impls::next_u64_via_u32(self) + } + #[cfg(feature = "i128_support")] + fn next_u128(&mut self) -> u128 { + impls::next_u128_via_u64(self) + } + + fn try_fill(&mut self, dest: &mut [u8]) -> Result<()> { + impls::try_fill_via_u32(self, dest) + } } impl Rng for MockAddRng { @@ -60,6 +71,14 @@ impl Rng for MockAddRng { self.v += self.a; result } + #[cfg(feature = "i128_support")] + fn next_u128(&mut self) -> u128 { + impls::next_u128_via_u64(self) + } + + fn try_fill(&mut self, dest: &mut [u8]) -> Result<()> { + impls::try_fill_via_u32(self, dest) + } } impl SeedableRng for MockAddRng where diff --git a/src/lib.rs b/src/lib.rs index 91c829ae0a1..53b2216a45e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -404,15 +404,19 @@ pub struct StdRng { } impl Rng for StdRng { - #[inline] fn next_u32(&mut self) -> u32 { self.rng.next_u32() } - - #[inline] fn next_u64(&mut self) -> u64 { self.rng.next_u64() } + #[cfg(feature = "i128_support")] + fn next_u128(&mut self) -> u128 { + self.rng.next_u128() + } + fn try_fill(&mut self, dest: &mut [u8]) -> Result<()> { + self.rng.try_fill(dest) + } } impl SeedFromRng for StdRng { @@ -424,7 +428,7 @@ impl SeedFromRng for StdRng { #[cfg(test)] mod test { - use {Rng, thread_rng, Sample}; + use {Rng, thread_rng, Sample, Result}; use rand_core::mock::MockAddRng; use distributions::{uniform}; use distributions::{Uniform, Range, Exp}; @@ -436,10 +440,17 @@ mod test { impl Rng for MyRng { fn next_u32(&mut self) -> u32 { - fn next(t: &mut T) -> u32 { - t.next_u32() - } - next(&mut self.inner) + self.inner.next_u32() + } + fn next_u64(&mut self) -> u64 { + self.inner.next_u64() + } + #[cfg(feature = "i128_support")] + fn next_u128(&mut self) -> u128 { + self.inner.next_u128() + } + fn try_fill(&mut self, dest: &mut [u8]) -> Result<()> { + self.inner.try_fill(dest) } } diff --git a/src/os.rs b/src/os.rs index 9e46560b9c9..eeb4c13d1eb 100644 --- a/src/os.rs +++ b/src/os.rs @@ -55,6 +55,12 @@ impl Rng for OsRng { self.try_fill(&mut buf).unwrap_or_else(|e| panic!("try_fill failed: {:?}", e)); unsafe{ *(buf.as_ptr() as *const u64) } } + #[cfg(feature = "i128_support")] + fn next_u128(&mut self) -> u128 { + let mut buf: [u8; 16] = [0; 16]; + self.try_fill(&mut buf).unwrap_or_else(|e| panic!("try_fill failed: {:?}", e)); + unsafe{ *(buf.as_ptr() as *const u128) } + } fn try_fill(&mut self, v: &mut [u8]) -> Result<()> { self.0.try_fill(v) } diff --git a/src/prng/chacha.rs b/src/prng/chacha.rs index 0b0a83b6c1f..2f0a5f78794 100644 --- a/src/prng/chacha.rs +++ b/src/prng/chacha.rs @@ -198,6 +198,14 @@ impl Rng for ChaChaRng { value.0 } + fn next_u64(&mut self) -> u64 { + ::rand_core::impls::next_u64_via_u32(self) + } + #[cfg(feature = "i128_support")] + fn next_u128(&mut self) -> u128 { + ::rand_core::impls::next_u128_via_u64(self) + } + // Custom implementation allowing larger reads from buffer is about 8% // faster than default implementation in my tests fn try_fill(&mut self, dest: &mut [u8]) -> Result<()> { diff --git a/src/prng/isaac.rs b/src/prng/isaac.rs index f7d206c629f..2e866028552 100644 --- a/src/prng/isaac.rs +++ b/src/prng/isaac.rs @@ -222,27 +222,16 @@ impl Rng for IsaacRng { self.rsl[(self.cnt % RAND_SIZE) as usize].0 } - // Default impl adjusted for native byte size; approx 18% faster in tests + fn next_u64(&mut self) -> u64 { + ::rand_core::impls::next_u64_via_u32(self) + } + #[cfg(feature = "i128_support")] + fn next_u128(&mut self) -> u128 { + ::rand_core::impls::next_u128_via_u64(self) + } + fn try_fill(&mut self, dest: &mut [u8]) -> Result<()> { - use core::intrinsics::transmute; - - let mut left = dest; - while left.len() >= 4 { - let (l, r) = {left}.split_at_mut(4); - left = r; - let chunk: [u8; 4] = unsafe { - transmute(self.next_u32().to_le()) - }; - l.copy_from_slice(&chunk); - } - let n = left.len(); - if n > 0 { - let chunk: [u8; 4] = unsafe { - transmute(self.next_u32().to_le()) - }; - left.copy_from_slice(&chunk[..n]); - } - Ok(()) + ::rand_core::impls::try_fill_via_u32(self, dest) } } @@ -492,6 +481,14 @@ impl Rng for Isaac64Rng { debug_assert!(self.cnt < RAND_SIZE_64); self.rsl[(self.cnt % RAND_SIZE_64) as usize].0 } + #[cfg(feature = "i128_support")] + fn next_u128(&mut self) -> u128 { + ::rand_core::impls::next_u128_via_u64(self) + } + + fn try_fill(&mut self, dest: &mut [u8]) -> Result<()> { + ::rand_core::impls::try_fill_via_u64(self, dest) + } } impl SeedFromRng for Isaac64Rng { diff --git a/src/prng/xorshift.rs b/src/prng/xorshift.rs index dbfa24a26a0..1e2d6cca252 100644 --- a/src/prng/xorshift.rs +++ b/src/prng/xorshift.rs @@ -75,6 +75,18 @@ impl Rng for XorShiftRng { self.w = w_ ^ (w_ >> 19) ^ (t ^ (t >> 8)); self.w.0 } + + fn next_u64(&mut self) -> u64 { + ::rand_core::impls::next_u64_via_u32(self) + } + #[cfg(feature = "i128_support")] + fn next_u128(&mut self) -> u128 { + ::rand_core::impls::next_u128_via_u64(self) + } + + fn try_fill(&mut self, dest: &mut [u8]) -> Result<()> { + ::rand_core::impls::try_fill_via_u32(self, dest) + } } impl SeedableRng<[u32; 4]> for XorShiftRng {