-
Notifications
You must be signed in to change notification settings - Fork 432
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Implement BlockRngCore
ISAAC and ISAAC-64
#325
Changes from 6 commits
602ff6f
dc00192
84f6ae9
610e975
94fbf76
bbd1b38
c5ffc61
28238e0
f9f0ea6
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -27,6 +27,8 @@ use core::cmp::min; | |
use core::mem::size_of; | ||
use {RngCore, BlockRngCore, CryptoRng, SeedableRng, Error}; | ||
|
||
#[cfg(feature="serde-1")] use serde::{Serialize, Deserialize}; | ||
|
||
/// Implement `next_u64` via `next_u32`, little-endian order. | ||
pub fn next_u64_via_u32<R: RngCore + ?Sized>(rng: &mut R) -> u64 { | ||
// Use LE; we explicitly generate one value before the next. | ||
|
@@ -184,7 +186,11 @@ pub fn next_u64_via_fill<R: RngCore + ?Sized>(rng: &mut R) -> u64 { | |
/// [`RngCore`]: ../RngCore.t.html | ||
/// [`SeedableRng`]: ../SeedableRng.t.html | ||
#[derive(Clone)] | ||
#[cfg_attr(feature="serde-1", derive(Serialize, Deserialize))] | ||
pub struct BlockRng<R: BlockRngCore + ?Sized> { | ||
#[cfg_attr(feature="serde-1", serde(bound( | ||
serialize = "R::Results: Serialize", | ||
deserialize = "R::Results: Deserialize<'de>")))] | ||
pub results: R::Results, | ||
pub index: usize, | ||
pub core: R, | ||
|
@@ -335,6 +341,171 @@ impl<R: BlockRngCore + SeedableRng> SeedableRng for BlockRng<R> { | |
} | ||
} | ||
|
||
|
||
|
||
/// Wrapper around PRNGs that implement [`BlockRngCore`] to keep a results | ||
/// buffer and offer the methods from [`RngCore`]. | ||
/// | ||
/// This is similar to [`BlockRng`], but specialized for algorithms that operate | ||
/// on `u64` values. | ||
/// | ||
/// [`BlockRngCore`]: ../BlockRngCore.t.html | ||
/// [`RngCore`]: ../RngCore.t.html | ||
/// [`BlockRng`]: struct.BlockRng.html | ||
#[derive(Clone)] | ||
#[cfg_attr(feature="serde-1", derive(Serialize, Deserialize))] | ||
pub struct BlockRng64<R: BlockRngCore + ?Sized> { | ||
#[cfg_attr(feature="serde-1", serde(bound( | ||
serialize = "R::Results: Serialize", | ||
deserialize = "R::Results: Deserialize<'de>")))] | ||
pub results: R::Results, | ||
pub index: usize, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Comment that 0 is never stored? |
||
pub half_used: bool, // true if only half of the previous result is used | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Good catch. I think it only happened to pass the tests because it then it generates a new set of results in-between, which resets I don't like to change the logic at the moment t.b.h. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I don't think tests could catch this anyway; there's no memory unsafety, just the possibility of skipping one word of output and using another twice. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Oops, I though we tested for it in |
||
pub core: R, | ||
} | ||
|
||
// Custom Debug implementation that does not expose the contents of `results`. | ||
impl<R: BlockRngCore + fmt::Debug> fmt::Debug for BlockRng64<R> { | ||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { | ||
fmt.debug_struct("BlockRng64") | ||
.field("core", &self.core) | ||
.field("result_len", &self.results.as_ref().len()) | ||
.field("index", &self.index) | ||
.field("half_used", &self.half_used) | ||
.finish() | ||
} | ||
} | ||
|
||
impl<R: BlockRngCore<Item=u64>> RngCore for BlockRng64<R> | ||
where <R as BlockRngCore>::Results: AsRef<[u64]> | ||
{ | ||
#[inline(always)] | ||
fn next_u32(&mut self) -> u32 { | ||
let mut index = self.index * 2 - self.half_used as usize; | ||
if index >= self.results.as_ref().len() * 2 { | ||
self.core.generate(&mut self.results); | ||
self.index = 0; | ||
// `self.half_used` is by definition `false` | ||
self.half_used = false; | ||
index = 0; | ||
} | ||
|
||
self.half_used = !self.half_used; | ||
self.index += self.half_used as usize; | ||
|
||
// Index as if this is a u32 slice. | ||
unsafe { | ||
let results = | ||
&*(self.results.as_ref() as *const [u64] as *const [u32]); | ||
if cfg!(target_endian = "little") { | ||
*results.get_unchecked(index) | ||
} else { | ||
*results.get_unchecked(index ^ 1) | ||
} | ||
} | ||
} | ||
|
||
#[inline(always)] | ||
fn next_u64(&mut self) -> u64 { | ||
if self.index >= self.results.as_ref().len() { | ||
self.core.generate(&mut self.results); | ||
self.index = 0; | ||
} | ||
|
||
let value = self.results.as_ref()[self.index]; | ||
self.index += 1; | ||
self.half_used = false; | ||
value | ||
} | ||
|
||
// As an optimization we try to write directly into the output buffer. | ||
// This is only enabled for little-endian platforms where unaligned writes | ||
// are known to be safe and fast. | ||
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))] | ||
fn fill_bytes(&mut self, dest: &mut [u8]) { | ||
let mut filled = 0; | ||
|
||
// Continue filling from the current set of results | ||
if self.index < self.results.as_ref().len() { | ||
let (consumed_u64, filled_u8) = | ||
fill_via_u64_chunks(&self.results.as_ref()[self.index..], | ||
dest); | ||
|
||
self.index += consumed_u64; | ||
filled += filled_u8; | ||
} | ||
|
||
let len_remainder = | ||
(dest.len() - filled) % (self.results.as_ref().len() * 8); | ||
let end_direct = dest.len() - len_remainder; | ||
|
||
while filled < end_direct { | ||
let dest_u64: &mut R::Results = unsafe { | ||
::core::mem::transmute(dest[filled..].as_mut_ptr()) | ||
}; | ||
self.core.generate(dest_u64); | ||
filled += self.results.as_ref().len() * 8; | ||
} | ||
self.index = self.results.as_ref().len(); | ||
|
||
if len_remainder > 0 { | ||
self.core.generate(&mut self.results); | ||
let (consumed_u64, _) = | ||
fill_via_u64_chunks(&mut self.results.as_ref(), | ||
&mut dest[filled..]); | ||
|
||
self.index = consumed_u64; | ||
} | ||
} | ||
|
||
#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))] | ||
fn fill_bytes(&mut self, dest: &mut [u8]) { | ||
let mut read_len = 0; | ||
while read_len < dest.len() { | ||
if self.index as usize >= self.results.as_ref().len() { | ||
self.core.generate(&mut self.results); | ||
self.index = 0; | ||
self.half_used = false; | ||
} | ||
|
||
let (consumed_u64, filled_u8) = | ||
fill_via_u64_chunks(&self.results.as_ref()[self.index as usize..], | ||
&mut dest[read_len..]); | ||
|
||
self.index += consumed_u64; | ||
read_len += filled_u8; | ||
} | ||
} | ||
|
||
fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> { | ||
Ok(self.fill_bytes(dest)) | ||
} | ||
} | ||
|
||
impl<R: BlockRngCore + SeedableRng> SeedableRng for BlockRng64<R> { | ||
type Seed = R::Seed; | ||
|
||
fn from_seed(seed: Self::Seed) -> Self { | ||
let results_empty = R::Results::default(); | ||
Self { | ||
core: R::from_seed(seed), | ||
index: results_empty.as_ref().len(), // generate on first use | ||
half_used: false, | ||
results: results_empty, | ||
} | ||
} | ||
|
||
fn from_rng<S: RngCore>(rng: S) -> Result<Self, Error> { | ||
let results_empty = R::Results::default(); | ||
Ok(Self { | ||
core: R::from_rng(rng)?, | ||
index: results_empty.as_ref().len(), // generate on first use | ||
half_used: false, | ||
results: results_empty, | ||
}) | ||
} | ||
} | ||
|
||
impl<R: BlockRngCore + CryptoRng> CryptoRng for BlockRng<R> {} | ||
|
||
// TODO: implement tests for the above |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -44,6 +44,8 @@ | |
|
||
#[cfg(feature="std")] extern crate core; | ||
#[cfg(all(feature = "alloc", not(feature="std")))] extern crate alloc; | ||
#[cfg(feature="serde-1")] extern crate serde; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I have a weak preference for There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. From what I see I don't like For @dhardy What do you think? Does it make sense to use There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Well if There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
I don't think we did? https://github.com/rust-lang-nursery/rand/blob/0.4/Cargo.toml There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Okay, then we aren't constrained and can use |
||
#[cfg(feature="serde-1")] #[macro_use] extern crate serde_derive; | ||
|
||
|
||
use core::default::Default; | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Public fields should be documented — but I think perhaps we should not make these public (just add necessary accessor fns and a "reset" function).
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
But than I have to do things properly... But it really cleans things up, so good idea.