Skip to content

Commit

Permalink
Add rand_core::impls module; remove all default impls of Rng functions
Browse files Browse the repository at this point in the history
This is a controvesial change. The argument *for* is that it prevents RNG
wrappers from accidentally implementing methods incorrectly via the default
impls (since it is valid to throw away bits, e.g. "self.next_u64() as u32",
this affects output, not just performance). The argument *against* is that it
complicates Rng implementations.
  • Loading branch information
dhardy committed Sep 12, 2017
1 parent 0ee54b1 commit dd1241a
Show file tree
Hide file tree
Showing 8 changed files with 166 additions and 76 deletions.
78 changes: 78 additions & 0 deletions rand_core/src/impls.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
// Copyright 2013-2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.

//! Helper functions for implementing `Rng` functions.
//!
//! For cross-platform reproducibility, these functions all use Little Endian:
//! least-significant part first. For example, `next_u64_via_u32` takes `u32`
//! values `x, y`, then outputs `(y << 32) | x`. To implement `next_u32`
//! from `next_u64` in little-endian order, one should use `next_u64() as u32`.
//!
//! Byte-swapping (like the std `to_le` functions) is only needed to convert
//! to/from byte sequences, and since its purpose is reproducibility,
//! non-reproducible sources (e.g. `OsRng`) need not bother with it.

use core::intrinsics::transmute;
use {Rng, Result};

/// Implement `next_u64` via `next_u32`, little-endian order.
pub fn next_u64_via_u32<R: Rng+?Sized>(rng: &mut R) -> u64 {
// Use LE; we explicitly generate one value before the next.
let x = rng.next_u32() as u64;
let y = rng.next_u32() as u64;
(y << 32) | x
}

/// Implement `next_u128` via `next_u64`, little-endian order.
#[cfg(feature = "i128_support")]
pub fn next_u128_via_u64<R: Rng+?Sized>(rng: &mut R) -> u128 {
// Use LE; we explicitly generate one value before the next.
let x = rng.next_u64() as u128;
let y = rng.next_u64() as u128;
(y << 64) | x
}

macro_rules! try_fill_via {
($rng:ident, $next_u:ident, $BYTES:expr, $dest:ident) => {{
let mut left = $dest;
while left.len() >= $BYTES {
let (l, r) = {left}.split_at_mut($BYTES);
left = r;
let chunk: [u8; $BYTES] = unsafe {
transmute($rng.$next_u().to_le())
};
l.copy_from_slice(&chunk);
}
let n = left.len();
if n > 0 {
let chunk: [u8; $BYTES] = unsafe {
transmute($rng.$next_u().to_le())
};
left.copy_from_slice(&chunk[..n]);
}
Ok(())
}}
}

/// Implement `try_fill` via `next_u32`, little-endian order.
pub fn try_fill_via_u32<R: Rng+?Sized>(rng: &mut R, dest: &mut [u8]) -> Result<()> {
try_fill_via!(rng, next_u32, 4, dest)
}

/// Implement `try_fill` via `next_u64`, little-endian order.
pub fn try_fill_via_u64<R: Rng+?Sized>(rng: &mut R, dest: &mut [u8]) -> Result<()> {
try_fill_via!(rng, next_u64, 8, dest)
}

/// Implement `try_fill` via `next_u128`, little-endian order.
#[cfg(feature = "i128_support")]
pub fn try_fill_via_u128<R: Rng+?Sized>(rng: &mut R, dest: &mut [u8]) -> Result<()> {
try_fill_via!(rng, next_u128, 16, dest)
}
53 changes: 6 additions & 47 deletions rand_core/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@
#[cfg(feature="std")]
extern crate core;

pub mod impls;
pub mod mock;


Expand Down Expand Up @@ -75,63 +76,21 @@ pub trait Rng {
fn next_u32(&mut self) -> u32;

/// Return the next random u64.
///
/// By default this is implemented in terms of `next_u32` in LE order. An
/// implementation of this trait must provide at least one of
/// these two methods. Similarly to `next_u32`, this rarely needs
/// to be called directly, prefer `r.gen()` to `r.next_u64()`.
fn next_u64(&mut self) -> u64 {
// Use LE; we explicitly generate one value before the next.
let x = self.next_u32() as u64;
let y = self.next_u32() as u64;
(y << 32) | x
}
fn next_u64(&mut self) -> u64;

#[cfg(feature = "i128_support")]
/// Return the next random u128.
///
/// By default this is implemented in terms of `next_u64` in LE order.
fn next_u128(&mut self) -> u128 {
// Use LE; we explicitly generate one value before the next.
let x = self.next_u64() as u128;
let y = self.next_u64() as u128;
(y << 64) | x
}
#[cfg(feature = "i128_support")]
fn next_u128(&mut self) -> u128;

/// Fill `dest` entirely with random data.
///
/// This has a default implementation in terms of `next_u64` in LE order,
/// but should be overridden by implementations that
/// offer a more efficient solution than just calling `next_u64`
/// repeatedly.
///
/// This method does *not* have any requirement on how much of the
/// generated random number stream is consumed; e.g. the default
/// generated random number stream is consumed; e.g. `try_fill_via_u64`
/// implementation uses `next_u64` thus consuming 8 bytes even when only
/// 1 is required. A different implementation might use `next_u32` and
/// only consume 4 bytes; *however* any change affecting *reproducibility*
/// of output must be considered a breaking change.
fn try_fill(&mut self, dest: &mut [u8]) -> Result<()> {
use core::intrinsics::transmute;

let mut left = dest;
while left.len() >= 8 {
let (l, r) = {left}.split_at_mut(8);
left = r;
let chunk: [u8; 8] = unsafe {
transmute(self.next_u64().to_le())
};
l.copy_from_slice(&chunk);
}
let n = left.len();
if n > 0 {
let chunk: [u8; 8] = unsafe {
transmute(self.next_u64().to_le())
};
left.copy_from_slice(&chunk[..n]);
}
Ok(())
}
fn try_fill(&mut self, dest: &mut [u8]) -> Result<()>;
}

#[cfg(feature="std")]
Expand Down
21 changes: 20 additions & 1 deletion rand_core/src/mock.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
//! Instead maybe this should be yet another crate? Or just leave it here?

use core::num::Wrapping as w;
use {Rng, SeedableRng};
use {Rng, SeedableRng, impls, Result};

/// A simple implementation of `Rng`, purely for testing.
/// Returns an arithmetic sequence (i.e. adds a constant each step).
Expand Down Expand Up @@ -49,6 +49,17 @@ impl Rng for MockAddRng<u32> {
self.v += self.a;
result
}
fn next_u64(&mut self) -> u64 {
impls::next_u64_via_u32(self)
}
#[cfg(feature = "i128_support")]
fn next_u128(&mut self) -> u128 {
impls::next_u128_via_u64(self)
}

fn try_fill(&mut self, dest: &mut [u8]) -> Result<()> {
impls::try_fill_via_u32(self, dest)
}
}

impl Rng for MockAddRng<u64> {
Expand All @@ -60,6 +71,14 @@ impl Rng for MockAddRng<u64> {
self.v += self.a;
result
}
#[cfg(feature = "i128_support")]
fn next_u128(&mut self) -> u128 {
impls::next_u128_via_u64(self)
}

fn try_fill(&mut self, dest: &mut [u8]) -> Result<()> {
impls::try_fill_via_u32(self, dest)
}
}

impl<T> SeedableRng<T> for MockAddRng<T> where
Expand Down
27 changes: 19 additions & 8 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -404,15 +404,19 @@ pub struct StdRng {
}

impl Rng for StdRng {
#[inline]
fn next_u32(&mut self) -> u32 {
self.rng.next_u32()
}

#[inline]
fn next_u64(&mut self) -> u64 {
self.rng.next_u64()
}
#[cfg(feature = "i128_support")]
fn next_u128(&mut self) -> u128 {
self.rng.next_u128()
}
fn try_fill(&mut self, dest: &mut [u8]) -> Result<()> {
self.rng.try_fill(dest)
}
}

impl SeedFromRng for StdRng {
Expand All @@ -424,7 +428,7 @@ impl SeedFromRng for StdRng {

#[cfg(test)]
mod test {
use {Rng, thread_rng, Sample};
use {Rng, thread_rng, Sample, Result};
use rand_core::mock::MockAddRng;
use distributions::{uniform};
use distributions::{Uniform, Range, Exp};
Expand All @@ -436,10 +440,17 @@ mod test {

impl<R: Rng+?Sized> Rng for MyRng<R> {
fn next_u32(&mut self) -> u32 {
fn next<T: Rng+?Sized>(t: &mut T) -> u32 {
t.next_u32()
}
next(&mut self.inner)
self.inner.next_u32()
}
fn next_u64(&mut self) -> u64 {
self.inner.next_u64()
}
#[cfg(feature = "i128_support")]
fn next_u128(&mut self) -> u128 {
self.inner.next_u128()
}
fn try_fill(&mut self, dest: &mut [u8]) -> Result<()> {
self.inner.try_fill(dest)
}
}

Expand Down
6 changes: 6 additions & 0 deletions src/os.rs
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,12 @@ impl Rng for OsRng {
self.try_fill(&mut buf).unwrap_or_else(|e| panic!("try_fill failed: {:?}", e));
unsafe{ *(buf.as_ptr() as *const u64) }
}
#[cfg(feature = "i128_support")]
fn next_u128(&mut self) -> u128 {
let mut buf: [u8; 16] = [0; 16];
self.try_fill(&mut buf).unwrap_or_else(|e| panic!("try_fill failed: {:?}", e));
unsafe{ *(buf.as_ptr() as *const u128) }
}
fn try_fill(&mut self, v: &mut [u8]) -> Result<()> {
self.0.try_fill(v)
}
Expand Down
8 changes: 8 additions & 0 deletions src/prng/chacha.rs
Original file line number Diff line number Diff line change
Expand Up @@ -198,6 +198,14 @@ impl Rng for ChaChaRng {
value.0
}

fn next_u64(&mut self) -> u64 {
::rand_core::impls::next_u64_via_u32(self)
}
#[cfg(feature = "i128_support")]
fn next_u128(&mut self) -> u128 {
::rand_core::impls::next_u128_via_u64(self)
}

// Custom implementation allowing larger reads from buffer is about 8%
// faster than default implementation in my tests
fn try_fill(&mut self, dest: &mut [u8]) -> Result<()> {
Expand Down
37 changes: 17 additions & 20 deletions src/prng/isaac.rs
Original file line number Diff line number Diff line change
Expand Up @@ -222,27 +222,16 @@ impl Rng for IsaacRng {
self.rsl[(self.cnt % RAND_SIZE) as usize].0
}

// Default impl adjusted for native byte size; approx 18% faster in tests
fn next_u64(&mut self) -> u64 {
::rand_core::impls::next_u64_via_u32(self)
}
#[cfg(feature = "i128_support")]
fn next_u128(&mut self) -> u128 {
::rand_core::impls::next_u128_via_u64(self)
}

fn try_fill(&mut self, dest: &mut [u8]) -> Result<()> {
use core::intrinsics::transmute;

let mut left = dest;
while left.len() >= 4 {
let (l, r) = {left}.split_at_mut(4);
left = r;
let chunk: [u8; 4] = unsafe {
transmute(self.next_u32().to_le())
};
l.copy_from_slice(&chunk);
}
let n = left.len();
if n > 0 {
let chunk: [u8; 4] = unsafe {
transmute(self.next_u32().to_le())
};
left.copy_from_slice(&chunk[..n]);
}
Ok(())
::rand_core::impls::try_fill_via_u32(self, dest)
}
}

Expand Down Expand Up @@ -492,6 +481,14 @@ impl Rng for Isaac64Rng {
debug_assert!(self.cnt < RAND_SIZE_64);
self.rsl[(self.cnt % RAND_SIZE_64) as usize].0
}
#[cfg(feature = "i128_support")]
fn next_u128(&mut self) -> u128 {
::rand_core::impls::next_u128_via_u64(self)
}

fn try_fill(&mut self, dest: &mut [u8]) -> Result<()> {
::rand_core::impls::try_fill_via_u64(self, dest)
}
}

impl SeedFromRng for Isaac64Rng {
Expand Down
12 changes: 12 additions & 0 deletions src/prng/xorshift.rs
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,18 @@ impl Rng for XorShiftRng {
self.w = w_ ^ (w_ >> 19) ^ (t ^ (t >> 8));
self.w.0
}

fn next_u64(&mut self) -> u64 {
::rand_core::impls::next_u64_via_u32(self)
}
#[cfg(feature = "i128_support")]
fn next_u128(&mut self) -> u128 {
::rand_core::impls::next_u128_via_u64(self)
}

fn try_fill(&mut self, dest: &mut [u8]) -> Result<()> {
::rand_core::impls::try_fill_via_u32(self, dest)
}
}

impl SeedableRng<[u32; 4]> for XorShiftRng {
Expand Down

0 comments on commit dd1241a

Please sign in to comment.