From 30c92d9a37ca9ba1af8dd64b3702314ba355bfcc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D1=80=D1=82=D1=91=D0=BC=20=D0=9F=D0=B0=D0=B2=D0=BB?= =?UTF-8?q?=D0=BE=D0=B2=20=5BArtyom=20Pavlov=5D?= Date: Mon, 10 Nov 2025 18:47:15 +0300 Subject: [PATCH 01/34] Replace block module with helper functions --- Cargo.toml | 15 -- src/block.rs | 563 --------------------------------------------------- src/le.rs | 243 ---------------------- src/lib.rs | 7 +- src/utils.rs | 496 +++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 499 insertions(+), 825 deletions(-) delete mode 100644 src/block.rs delete mode 100644 src/le.rs create mode 100644 src/utils.rs diff --git a/Cargo.toml b/Cargo.toml index 3f979cc7..2e623bf9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,18 +15,3 @@ categories = ["algorithms", "no-std"] edition = "2024" rust-version = "1.85" exclude = ["/.github"] - -[package.metadata.docs.rs] -# To build locally: -# RUSTDOCFLAGS="--cfg docsrs" cargo +nightly doc --all-features --no-deps --open -all-features = true -rustdoc-args = ["--generate-link-to-definition"] - -[package.metadata.playground] -all-features = true - -[features] -serde = ["dep:serde"] # enables serde for BlockRng wrapper - -[dependencies] -serde = { version = "1.0.103", features = ["derive"], optional = true } diff --git a/src/block.rs b/src/block.rs deleted file mode 100644 index 16fc4545..00000000 --- a/src/block.rs +++ /dev/null @@ -1,563 +0,0 @@ -// Copyright 2018 Developers of the Rand project. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! The `BlockRngCore` trait and implementation helpers -//! -//! The [`BlockRngCore`] trait exists to assist in the implementation of RNGs -//! which generate a block of data in a cache instead of returning generated -//! values directly. -//! -//! Usage of this trait is optional, but provides two advantages: -//! implementations only need to concern themselves with generation of the -//! block, not the various [`RngCore`] methods (especially [`fill_bytes`], where -//! the optimal implementations are not trivial), and this allows -//! `ReseedingRng` (see [`rand`](https://docs.rs/rand) crate) perform periodic -//! reseeding with very low overhead. -//! -//! # Example -//! -//! ```no_run -//! use rand_core::{RngCore, SeedableRng}; -//! use rand_core::block::{BlockRngCore, BlockRng}; -//! -//! struct MyRngCore; -//! -//! impl BlockRngCore for MyRngCore { -//! type Item = u32; -//! type Results = [u32; 16]; -//! -//! fn generate(&mut self, results: &mut Self::Results) { -//! unimplemented!() -//! } -//! } -//! -//! impl SeedableRng for MyRngCore { -//! type Seed = [u8; 32]; -//! fn from_seed(seed: Self::Seed) -> Self { -//! unimplemented!() -//! } -//! } -//! -//! // optionally, also implement CryptoBlockRng for MyRngCore -//! -//! // Final RNG. -//! let mut rng = BlockRng::::seed_from_u64(0); -//! println!("First value: {}", rng.next_u32()); -//! ``` -//! -//! [`BlockRngCore`]: crate::block::BlockRngCore -//! [`fill_bytes`]: RngCore::fill_bytes - -use crate::le::fill_via_chunks; -use crate::{CryptoRng, RngCore, SeedableRng, TryRngCore}; -use core::fmt; -#[cfg(feature = "serde")] -use serde::{Deserialize, Serialize}; - -/// A trait for RNGs which do not generate random numbers individually, but in -/// blocks (typically `[u32; N]`). This technique is commonly used by -/// cryptographic RNGs to improve performance. -/// -/// See the [module][crate::block] documentation for details. -pub trait BlockRngCore { - /// Results element type, e.g. `u32`. - type Item; - - /// Results type. This is the 'block' an RNG implementing `BlockRngCore` - /// generates, which will usually be an array like `[u32; 16]`. - type Results: AsRef<[Self::Item]> + AsMut<[Self::Item]> + Default; - - /// Generate a new block of results. - fn generate(&mut self, results: &mut Self::Results); -} - -/// A marker trait used to indicate that an [`RngCore`] implementation is -/// supposed to be cryptographically secure. -/// -/// See [`CryptoRng`] docs for more information. -pub trait CryptoBlockRng: BlockRngCore {} - -/// A wrapper type implementing [`RngCore`] for some type implementing -/// [`BlockRngCore`] with `u32` array buffer; i.e. this can be used to implement -/// a full RNG from just a `generate` function. -/// -/// The `core` field may be accessed directly but the results buffer may not. -/// PRNG implementations can simply use a type alias -/// (`pub type MyRng = BlockRng;`) but might prefer to use a -/// wrapper type (`pub struct MyRng(BlockRng);`); the latter must -/// re-implement `RngCore` but hides the implementation details and allows -/// extra functionality to be defined on the RNG -/// (e.g. `impl MyRng { fn set_stream(...){...} }`). -/// -/// `BlockRng` has heavily optimized implementations of the [`RngCore`] methods -/// reading values from the results buffer, as well as -/// calling [`BlockRngCore::generate`] directly on the output array when -/// [`fill_bytes`] is called on a large array. These methods also handle -/// the bookkeeping of when to generate a new batch of values. -/// -/// No whole generated `u32` values are thrown away and all values are consumed -/// in-order. [`next_u32`] simply takes the next available `u32` value. -/// [`next_u64`] is implemented by combining two `u32` values, least -/// significant first. [`fill_bytes`] consume a whole number of `u32` values, -/// converting each `u32` to a byte slice in little-endian order. If the requested byte -/// length is not a multiple of 4, some bytes will be discarded. -/// -/// See also [`BlockRng64`] which uses `u64` array buffers. Currently there is -/// no direct support for other buffer types. -/// -/// For easy initialization `BlockRng` also implements [`SeedableRng`]. -/// -/// [`next_u32`]: RngCore::next_u32 -/// [`next_u64`]: RngCore::next_u64 -/// [`fill_bytes`]: RngCore::fill_bytes -#[derive(Clone)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -#[cfg_attr( - feature = "serde", - serde( - bound = "for<'x> R: Serialize + Deserialize<'x>, for<'x> R::Results: Serialize + Deserialize<'x>" - ) -)] -pub struct BlockRng { - results: R::Results, - index: usize, - /// The *core* part of the RNG, implementing the `generate` function. - pub core: R, -} - -// Custom Debug implementation that does not expose the contents of `results`. -impl fmt::Debug for BlockRng { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.debug_struct("BlockRng") - .field("core", &self.core) - .field("result_len", &self.results.as_ref().len()) - .field("index", &self.index) - .finish() - } -} - -impl BlockRng { - /// Create a new `BlockRng` from an existing RNG implementing - /// `BlockRngCore`. Results will be generated on first use. - #[inline] - pub fn new(core: R) -> BlockRng { - let results_empty = R::Results::default(); - BlockRng { - core, - index: results_empty.as_ref().len(), - results: results_empty, - } - } - - /// Get the index into the result buffer. - /// - /// If this is equal to or larger than the size of the result buffer then - /// the buffer is "empty" and `generate()` must be called to produce new - /// results. - #[inline(always)] - pub fn index(&self) -> usize { - self.index - } - - /// Reset the number of available results. - /// This will force a new set of results to be generated on next use. - #[inline] - pub fn reset(&mut self) { - self.index = self.results.as_ref().len(); - } - - /// Generate a new set of results immediately, setting the index to the - /// given value. - #[inline] - pub fn generate_and_set(&mut self, index: usize) { - assert!(index < self.results.as_ref().len()); - self.core.generate(&mut self.results); - self.index = index; - } -} - -impl> RngCore for BlockRng { - #[inline] - fn next_u32(&mut self) -> u32 { - if self.index >= self.results.as_ref().len() { - self.generate_and_set(0); - } - - let value = self.results.as_ref()[self.index]; - self.index += 1; - value - } - - #[inline] - fn next_u64(&mut self) -> u64 { - let read_u64 = |results: &[u32], index| { - let data = &results[index..=index + 1]; - (u64::from(data[1]) << 32) | u64::from(data[0]) - }; - - let len = self.results.as_ref().len(); - - let index = self.index; - if index < len - 1 { - self.index += 2; - // Read an u64 from the current index - read_u64(self.results.as_ref(), index) - } else if index >= len { - self.generate_and_set(2); - read_u64(self.results.as_ref(), 0) - } else { - let x = u64::from(self.results.as_ref()[len - 1]); - self.generate_and_set(1); - let y = u64::from(self.results.as_ref()[0]); - (y << 32) | x - } - } - - #[inline] - fn fill_bytes(&mut self, dest: &mut [u8]) { - let mut read_len = 0; - while read_len < dest.len() { - if self.index >= self.results.as_ref().len() { - self.generate_and_set(0); - } - let (consumed_u32, filled_u8) = - fill_via_chunks(&self.results.as_mut()[self.index..], &mut dest[read_len..]); - - self.index += consumed_u32; - read_len += filled_u8; - } - } -} - -impl SeedableRng for BlockRng { - type Seed = R::Seed; - - #[inline(always)] - fn from_seed(seed: Self::Seed) -> Self { - Self::new(R::from_seed(seed)) - } - - #[inline(always)] - fn seed_from_u64(seed: u64) -> Self { - Self::new(R::seed_from_u64(seed)) - } - - #[inline(always)] - fn from_rng(rng: &mut S) -> Self { - Self::new(R::from_rng(rng)) - } - - #[inline(always)] - fn try_from_rng(rng: &mut S) -> Result { - R::try_from_rng(rng).map(Self::new) - } -} - -impl> CryptoRng for BlockRng {} - -/// A wrapper type implementing [`RngCore`] for some type implementing -/// [`BlockRngCore`] with `u64` array buffer; i.e. this can be used to implement -/// a full RNG from just a `generate` function. -/// -/// This is similar to [`BlockRng`], but specialized for algorithms that operate -/// on `u64` values. -/// -/// No whole generated `u64` values are thrown away and all values are consumed -/// in-order. [`next_u64`] simply takes the next available `u64` value. -/// [`next_u32`] is however a bit special: half of a `u64` is consumed, leaving -/// the other half in the buffer. If the next function called is [`next_u32`] -/// then the other half is then consumed, however both [`next_u64`] and -/// [`fill_bytes`] discard the rest of any half-consumed `u64`s when called. -/// -/// [`fill_bytes`] consumes a whole number of `u64` values. If the requested length -/// is not a multiple of 8, some bytes will be discarded. -/// -/// [`next_u32`]: RngCore::next_u32 -/// [`next_u64`]: RngCore::next_u64 -/// [`fill_bytes`]: RngCore::fill_bytes -#[derive(Clone)] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct BlockRng64 { - results: R::Results, - index: usize, - half_used: bool, // true if only half of the previous result is used - /// The *core* part of the RNG, implementing the `generate` function. - pub core: R, -} - -// Custom Debug implementation that does not expose the contents of `results`. -impl fmt::Debug for BlockRng64 { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt.debug_struct("BlockRng64") - .field("core", &self.core) - .field("result_len", &self.results.as_ref().len()) - .field("index", &self.index) - .field("half_used", &self.half_used) - .finish() - } -} - -impl BlockRng64 { - /// Create a new `BlockRng` from an existing RNG implementing - /// `BlockRngCore`. Results will be generated on first use. - #[inline] - pub fn new(core: R) -> BlockRng64 { - let results_empty = R::Results::default(); - BlockRng64 { - core, - index: results_empty.as_ref().len(), - half_used: false, - results: results_empty, - } - } - - /// Get the index into the result buffer. - /// - /// If this is equal to or larger than the size of the result buffer then - /// the buffer is "empty" and `generate()` must be called to produce new - /// results. - #[inline(always)] - pub fn index(&self) -> usize { - self.index - } - - /// Reset the number of available results. - /// This will force a new set of results to be generated on next use. - #[inline] - pub fn reset(&mut self) { - self.index = self.results.as_ref().len(); - self.half_used = false; - } - - /// Generate a new set of results immediately, setting the index to the - /// given value. - #[inline] - pub fn generate_and_set(&mut self, index: usize) { - assert!(index < self.results.as_ref().len()); - self.core.generate(&mut self.results); - self.index = index; - self.half_used = false; - } -} - -impl> RngCore for BlockRng64 { - #[inline] - fn next_u32(&mut self) -> u32 { - let mut index = self.index - self.half_used as usize; - if index >= self.results.as_ref().len() { - self.core.generate(&mut self.results); - self.index = 0; - index = 0; - // `self.half_used` is by definition `false` - self.half_used = false; - } - - let shift = 32 * (self.half_used as usize); - - self.half_used = !self.half_used; - self.index += self.half_used as usize; - - (self.results.as_ref()[index] >> shift) as u32 - } - - #[inline] - fn next_u64(&mut self) -> u64 { - if self.index >= self.results.as_ref().len() { - self.core.generate(&mut self.results); - self.index = 0; - } - - let value = self.results.as_ref()[self.index]; - self.index += 1; - self.half_used = false; - value - } - - #[inline] - fn fill_bytes(&mut self, dest: &mut [u8]) { - let mut read_len = 0; - self.half_used = false; - while read_len < dest.len() { - if self.index >= self.results.as_ref().len() { - self.core.generate(&mut self.results); - self.index = 0; - } - - let (consumed_u64, filled_u8) = - fill_via_chunks(&self.results.as_mut()[self.index..], &mut dest[read_len..]); - - self.index += consumed_u64; - read_len += filled_u8; - } - } -} - -impl SeedableRng for BlockRng64 { - type Seed = R::Seed; - - #[inline(always)] - fn from_seed(seed: Self::Seed) -> Self { - Self::new(R::from_seed(seed)) - } - - #[inline(always)] - fn seed_from_u64(seed: u64) -> Self { - Self::new(R::seed_from_u64(seed)) - } - - #[inline(always)] - fn from_rng(rng: &mut S) -> Self { - Self::new(R::from_rng(rng)) - } - - #[inline(always)] - fn try_from_rng(rng: &mut S) -> Result { - R::try_from_rng(rng).map(Self::new) - } -} - -impl> CryptoRng for BlockRng64 {} - -#[cfg(test)] -mod test { - use crate::block::{BlockRng, BlockRng64, BlockRngCore}; - use crate::{RngCore, SeedableRng}; - - #[derive(Debug, Clone)] - struct DummyRng { - counter: u32, - } - - impl BlockRngCore for DummyRng { - type Item = u32; - type Results = [u32; 16]; - - fn generate(&mut self, results: &mut Self::Results) { - for r in results { - *r = self.counter; - self.counter = self.counter.wrapping_add(3511615421); - } - } - } - - impl SeedableRng for DummyRng { - type Seed = [u8; 4]; - - fn from_seed(seed: Self::Seed) -> Self { - DummyRng { - counter: u32::from_le_bytes(seed), - } - } - } - - #[test] - fn blockrng_next_u32_vs_next_u64() { - let mut rng1 = BlockRng::::from_seed([1, 2, 3, 4]); - let mut rng2 = rng1.clone(); - let mut rng3 = rng1.clone(); - - let mut a = [0; 16]; - a[..4].copy_from_slice(&rng1.next_u32().to_le_bytes()); - a[4..12].copy_from_slice(&rng1.next_u64().to_le_bytes()); - a[12..].copy_from_slice(&rng1.next_u32().to_le_bytes()); - - let mut b = [0; 16]; - b[..4].copy_from_slice(&rng2.next_u32().to_le_bytes()); - b[4..8].copy_from_slice(&rng2.next_u32().to_le_bytes()); - b[8..].copy_from_slice(&rng2.next_u64().to_le_bytes()); - assert_eq!(a, b); - - let mut c = [0; 16]; - c[..8].copy_from_slice(&rng3.next_u64().to_le_bytes()); - c[8..12].copy_from_slice(&rng3.next_u32().to_le_bytes()); - c[12..].copy_from_slice(&rng3.next_u32().to_le_bytes()); - assert_eq!(a, c); - } - - #[derive(Debug, Clone)] - struct DummyRng64 { - counter: u64, - } - - impl BlockRngCore for DummyRng64 { - type Item = u64; - type Results = [u64; 8]; - - fn generate(&mut self, results: &mut Self::Results) { - for r in results { - *r = self.counter; - self.counter = self.counter.wrapping_add(2781463553396133981); - } - } - } - - impl SeedableRng for DummyRng64 { - type Seed = [u8; 8]; - - fn from_seed(seed: Self::Seed) -> Self { - DummyRng64 { - counter: u64::from_le_bytes(seed), - } - } - } - - #[test] - fn blockrng64_next_u32_vs_next_u64() { - let mut rng1 = BlockRng64::::from_seed([1, 2, 3, 4, 5, 6, 7, 8]); - let mut rng2 = rng1.clone(); - let mut rng3 = rng1.clone(); - - let mut a = [0; 16]; - a[..4].copy_from_slice(&rng1.next_u32().to_le_bytes()); - a[4..12].copy_from_slice(&rng1.next_u64().to_le_bytes()); - a[12..].copy_from_slice(&rng1.next_u32().to_le_bytes()); - - let mut b = [0; 16]; - b[..4].copy_from_slice(&rng2.next_u32().to_le_bytes()); - b[4..8].copy_from_slice(&rng2.next_u32().to_le_bytes()); - b[8..].copy_from_slice(&rng2.next_u64().to_le_bytes()); - assert_ne!(a, b); - assert_eq!(&a[..4], &b[..4]); - assert_eq!(&a[4..12], &b[8..]); - - let mut c = [0; 16]; - c[..8].copy_from_slice(&rng3.next_u64().to_le_bytes()); - c[8..12].copy_from_slice(&rng3.next_u32().to_le_bytes()); - c[12..].copy_from_slice(&rng3.next_u32().to_le_bytes()); - assert_eq!(b, c); - } - - #[test] - fn blockrng64_generate_and_set() { - let mut rng = BlockRng64::::from_seed([1, 2, 3, 4, 5, 6, 7, 8]); - assert_eq!(rng.index(), rng.results.as_ref().len()); - - rng.generate_and_set(5); - assert_eq!(rng.index(), 5); - } - - #[test] - #[should_panic(expected = "index < self.results.as_ref().len()")] - fn blockrng64_generate_and_set_panic() { - let mut rng = BlockRng64::::from_seed([1, 2, 3, 4, 5, 6, 7, 8]); - rng.generate_and_set(rng.results.as_ref().len()); - } - - #[test] - fn blockrng_next_u64() { - let mut rng = BlockRng::::from_seed([1, 2, 3, 4]); - let result_size = rng.results.as_ref().len(); - for _i in 0..result_size / 2 - 1 { - rng.next_u64(); - } - rng.next_u32(); - - let _ = rng.next_u64(); - assert_eq!(rng.index(), 1); - } -} diff --git a/src/le.rs b/src/le.rs deleted file mode 100644 index fae444cf..00000000 --- a/src/le.rs +++ /dev/null @@ -1,243 +0,0 @@ -// Copyright 2018 Developers of the Rand project. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! # Little-Endian utilities -//! -//! For cross-platform reproducibility, Little-Endian order (least-significant -//! part first) has been chosen as the standard for inter-type conversion. -//! For example, ``next_u64_via_u32`] takes `u32` -//! values `x, y`, then outputs `(y << 32) | x`. -//! -//! Byte-swapping (like the std `to_le` functions) is only needed to convert -//! to/from byte sequences, and since its purpose is reproducibility, -//! non-reproducible sources (e.g. `OsRng`) need not bother with it. -//! -//! ### Implementing [`RngCore`] -//! -//! Usually an implementation of [`RngCore`] will implement one of the three -//! methods over its internal source. The following helpers are provided for -//! the remaining implementations. -//! -//! **`fn next_u32`:** -//! - `self.next_u64() as u32` -//! - `(self.next_u64() >> 32) as u32` -//! - [next_u32_via_fill][](self) -//! -//! **`fn next_u64`:** -//! - [next_u64_via_u32][](self) -//! - [next_u64_via_fill][](self) -//! -//! **`fn fill_bytes`:** -//! - [fill_bytes_via_next][](self, dest) -//! -//! ### Implementing [`SeedableRng`] -//! -//! In many cases, [`SeedableRng::Seed`] must be converted to `[u32]` or -//! `[u64]`. The following helpers are provided: -//! -//! - [`read_u32_into`] -//! - [`read_u64_into`] - -use crate::RngCore; -#[allow(unused)] -use crate::SeedableRng; - -/// Implement `next_u64` via `next_u32`, little-endian order. -pub fn next_u64_via_u32(rng: &mut R) -> u64 { - // Use LE; we explicitly generate one value before the next. - let x = u64::from(rng.next_u32()); - let y = u64::from(rng.next_u32()); - (y << 32) | x -} - -/// Implement `fill_bytes` via `next_u64` and `next_u32`, little-endian order. -/// -/// The fastest way to fill a slice is usually to work as long as possible with -/// integers. That is why this method mostly uses `next_u64`, and only when -/// there are 4 or less bytes remaining at the end of the slice it uses -/// `next_u32` once. -pub fn fill_bytes_via_next(rng: &mut R, dest: &mut [u8]) { - let mut left = dest; - while left.len() >= 8 { - let (l, r) = { left }.split_at_mut(8); - left = r; - let chunk: [u8; 8] = rng.next_u64().to_le_bytes(); - l.copy_from_slice(&chunk); - } - let n = left.len(); - if n > 4 { - let chunk: [u8; 8] = rng.next_u64().to_le_bytes(); - left.copy_from_slice(&chunk[..n]); - } else if n > 0 { - let chunk: [u8; 4] = rng.next_u32().to_le_bytes(); - left.copy_from_slice(&chunk[..n]); - } -} - -pub(crate) trait Observable: Copy { - type Bytes: Sized + AsRef<[u8]>; - fn to_le_bytes(self) -> Self::Bytes; -} -impl Observable for u32 { - type Bytes = [u8; 4]; - - fn to_le_bytes(self) -> Self::Bytes { - Self::to_le_bytes(self) - } -} -impl Observable for u64 { - type Bytes = [u8; 8]; - - fn to_le_bytes(self) -> Self::Bytes { - Self::to_le_bytes(self) - } -} - -/// Fill dest from src -/// -/// Returns `(n, byte_len)`. `src[..n]` is consumed, -/// `dest[..byte_len]` is filled. `src[n..]` and `dest[byte_len..]` are left -/// unaltered. -pub(crate) fn fill_via_chunks(src: &[T], dest: &mut [u8]) -> (usize, usize) { - let size = core::mem::size_of::(); - - // Always use little endian for portability of results. - - let mut dest = dest.chunks_exact_mut(size); - let mut src = src.iter(); - - let zipped = dest.by_ref().zip(src.by_ref()); - let num_chunks = zipped.len(); - zipped.for_each(|(dest, src)| dest.copy_from_slice(src.to_le_bytes().as_ref())); - - let byte_len = num_chunks * size; - if let Some(src) = src.next() { - // We have consumed all full chunks of dest, but not src. - let dest = dest.into_remainder(); - let n = dest.len(); - if n > 0 { - dest.copy_from_slice(&src.to_le_bytes().as_ref()[..n]); - return (num_chunks + 1, byte_len + n); - } - } - (num_chunks, byte_len) -} - -/// Implement `next_u32` via `fill_bytes`, little-endian order. -pub fn next_u32_via_fill(rng: &mut R) -> u32 { - let mut buf = [0; 4]; - rng.fill_bytes(&mut buf); - u32::from_le_bytes(buf) -} - -/// Implement `next_u64` via `fill_bytes`, little-endian order. -pub fn next_u64_via_fill(rng: &mut R) -> u64 { - let mut buf = [0; 8]; - rng.fill_bytes(&mut buf); - u64::from_le_bytes(buf) -} - -/// Fills `dst: &mut [u32]` from `src` -/// -/// Reads use Little-Endian byte order, allowing portable reproduction of `dst` -/// from a byte slice. -/// -/// # Panics -/// -/// If `src` has insufficient length (if `src.len() < 4*dst.len()`). -#[inline] -#[track_caller] -pub fn read_u32_into(src: &[u8], dst: &mut [u32]) { - assert!(src.len() >= 4 * dst.len()); - for (out, chunk) in dst.iter_mut().zip(src.chunks_exact(4)) { - *out = u32::from_le_bytes(chunk.try_into().unwrap()); - } -} - -/// Fills `dst: &mut [u64]` from `src` -/// -/// # Panics -/// -/// If `src` has insufficient length (if `src.len() < 8*dst.len()`). -#[inline] -#[track_caller] -pub fn read_u64_into(src: &[u8], dst: &mut [u64]) { - assert!(src.len() >= 8 * dst.len()); - for (out, chunk) in dst.iter_mut().zip(src.chunks_exact(8)) { - *out = u64::from_le_bytes(chunk.try_into().unwrap()); - } -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn test_fill_via_u32_chunks() { - let src_orig = [1u32, 2, 3]; - - let src = src_orig; - let mut dst = [0u8; 11]; - assert_eq!(fill_via_chunks(&src, &mut dst), (3, 11)); - assert_eq!(dst, [1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0]); - - let src = src_orig; - let mut dst = [0u8; 13]; - assert_eq!(fill_via_chunks(&src, &mut dst), (3, 12)); - assert_eq!(dst, [1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0, 0]); - - let src = src_orig; - let mut dst = [0u8; 5]; - assert_eq!(fill_via_chunks(&src, &mut dst), (2, 5)); - assert_eq!(dst, [1, 0, 0, 0, 2]); - } - - #[test] - fn test_fill_via_u64_chunks() { - let src_orig = [1u64, 2]; - - let src = src_orig; - let mut dst = [0u8; 11]; - assert_eq!(fill_via_chunks(&src, &mut dst), (2, 11)); - assert_eq!(dst, [1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0]); - - let src = src_orig; - let mut dst = [0u8; 17]; - assert_eq!(fill_via_chunks(&src, &mut dst), (2, 16)); - assert_eq!(dst, [1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0]); - - let src = src_orig; - let mut dst = [0u8; 5]; - assert_eq!(fill_via_chunks(&src, &mut dst), (1, 5)); - assert_eq!(dst, [1, 0, 0, 0, 0]); - } - - #[test] - fn test_read() { - let bytes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; - - let mut buf = [0u32; 4]; - read_u32_into(&bytes, &mut buf); - assert_eq!(buf[0], 0x04030201); - assert_eq!(buf[3], 0x100F0E0D); - - let mut buf = [0u32; 3]; - read_u32_into(&bytes[1..13], &mut buf); // unaligned - assert_eq!(buf[0], 0x05040302); - assert_eq!(buf[2], 0x0D0C0B0A); - - let mut buf = [0u64; 2]; - read_u64_into(&bytes, &mut buf); - assert_eq!(buf[0], 0x0807060504030201); - assert_eq!(buf[1], 0x100F0E0D0C0B0A09); - - let mut buf = [0u64; 1]; - read_u64_into(&bytes[7..15], &mut buf); // unaligned - assert_eq!(buf[0], 0x0F0E0D0C0B0A0908); - } -} diff --git a/src/lib.rs b/src/lib.rs index 13d53628..8aea80ad 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -19,7 +19,7 @@ //! [`SeedableRng`] is an extension trait for construction from fixed seeds and //! other random number generators. //! -//! The [`le`] sub-module includes a few small functions to assist +//! The [`utils`] sub-module includes a few small functions to assist //! implementation of [`RngCore`] and [`SeedableRng`]. //! //! [`rand`]: https://docs.rs/rand @@ -38,8 +38,7 @@ use core::{fmt, ops::DerefMut}; -pub mod block; -pub mod le; +pub mod utils; /// Implementation-level interface for RNGs /// @@ -530,7 +529,7 @@ mod test { fn from_seed(seed: Self::Seed) -> Self { let mut x = [0u64; 1]; - le::read_u64_into(&seed, &mut x); + utils::read_u64_into(&seed, &mut x); SeedableNum(x[0]) } } diff --git a/src/utils.rs b/src/utils.rs new file mode 100644 index 00000000..166eceda --- /dev/null +++ b/src/utils.rs @@ -0,0 +1,496 @@ +// Copyright 2018 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Helper utilities. +//! +//! For cross-platform reproducibility, Little-Endian order (least-significant +//! part first) has been chosen as the standard for inter-type conversion. +//! For example, [`next_u64_via_u32`] generates two `u32` values `x, y`, +//! then outputs `(y << 32) | x`. +//! +//! Byte-swapping (like the std `to_le` functions) is only needed to convert +//! to/from byte sequences, and since its purpose is reproducibility, +//! non-reproducible sources (e.g. `OsRng`) need not bother with it. +//! +//! # Implementing [`SeedableRng`] +//! +//! In many cases, [`SeedableRng::Seed`] must be converted to `[u32]` or `[u64]`. +//! We provide [`read_u32_into`] and [`read_u64_into`] helpers for this. +//! +//! [`SeedableRng`]: crate::SeedableRng +//! [`SeedableRng::Seed`]: crate::SeedableRng::Seed +//! +//! # Implementing [`RngCore`] +//! +//! Usually an implementation of [`RngCore`] will implement one of the three methods +//! over its internal source, while remaining methods are implemented on top of it. +//! +//! Additionally, some RNGs generate blocks of data. In that case the implementations have to +//! handle buffering of the generated block. If an implementation supports SIMD-based optimizations, +//! i.e. if optimal block size depends on available target features, we reccomend to always +//! generate the biggest supported block size. +//! +//! See the examples below which demonstrate how functions in this module can be used to implement +//! `RngCore` for common RNG algorithm classes. +//! +//! WARNING: the RNG implementations below are provided for demonstation purposes only and +//! should not be used in practice! +//! +//! ## Fill-based RNG +//! +//! ``` +//! use rand_core::{RngCore, utils}; +//! +//! pub struct FillRng(u8); +//! +//! impl RngCore for FillRng { +//! fn next_u32(&mut self) -> u32 { +//! utils::next_u32_via_fill(self) +//! } +//! +//! fn next_u64(&mut self) -> u64 { +//! utils::next_u64_via_fill(self) +//! } +//! +//! fn fill_bytes(&mut self, dst: &mut [u8]) { +//! for byte in dst { +//! self.0 += 1; +//! *byte = self.0; +//! } +//! } +//! } +//! +//! let mut rng = FillRng(0); +//! +//! assert_eq!(rng.next_u32(), 0x0403_0201); +//! assert_eq!(rng.next_u64(), 0x0c0b_0a09_0807_0605); +//! let mut buf = [0u8; 4]; +//! rng.fill_bytes(&mut buf); +//! assert_eq!(buf, [0x0d, 0x0e, 0x0f, 0x10]); +//! ``` +//! +//! ## Single 32-bit value RNG +//! +//! ``` +//! use rand_core::{RngCore, utils}; +//! +//! pub struct Step32Rng(u32); +//! +//! impl RngCore for Step32Rng { +//! fn next_u32(&mut self) -> u32 { +//! self.0 += 1; +//! self.0 +//! } +//! +//! fn next_u64(&mut self) -> u64 { +//! utils::next_u64_via_u32(self) +//! } +//! +//! fn fill_bytes(&mut self, dst: &mut [u8]) { +//! utils::fill_bytes_via_next(self, dst); +//! } +//! } +//! +//! let mut rng = Step32Rng(0); +//! +//! assert_eq!(rng.next_u32(), 1); +//! assert_eq!(rng.next_u64(), 0x0000_0003_0000_0002); +//! let mut buf = [0u8; 4]; +//! rng.fill_bytes(&mut buf); +//! assert_eq!(buf, [4, 0, 0, 0]); +//! ``` +//! +//! ## Single 64-bit value RNG +//! +//! ``` +//! use rand_core::{RngCore, utils}; +//! +//! pub struct Step64Rng(u64); +//! +//! impl RngCore for Step64Rng { +//! fn next_u32(&mut self) -> u32 { +//! self.next_u64() as u32 +//! } +//! +//! fn next_u64(&mut self) -> u64 { +//! self.0 += 1; +//! self.0 +//! } +//! +//! fn fill_bytes(&mut self, dst: &mut [u8]) { +//! utils::fill_bytes_via_next(self, dst); +//! } +//! } +//! +//! let mut rng = Step64Rng(0); +//! +//! assert_eq!(rng.next_u32(), 1); +//! assert_eq!(rng.next_u64(), 2); +//! let mut buf = [0u8; 4]; +//! rng.fill_bytes(&mut buf); +//! assert_eq!(buf, [3, 0, 0, 0]); +//! ``` +//! +//! ## 32-bit block RNG +//! +//! ``` +//! use rand_core::{RngCore, SeedableRng, utils}; +//! +//! struct Block32RngCore([u32; 8]); +//! +//! impl Block32RngCore { +//! fn next_block(&mut self) -> [u32; 8] { +//! self.0.iter_mut().for_each(|v| *v += 1); +//! self.0 +//! } +//! } +//! +//! pub struct Block32Rng { +//! core: Block32RngCore, +//! buffer: [u32; 8], +//! } +//! +//! impl SeedableRng for Block32Rng { +//! type Seed = [u8; 32]; +//! +//! fn from_seed(seed: Self::Seed) -> Self { +//! let mut core_state = [0u32; 8]; +//! utils::read_u32_into(&seed, &mut core_state); +//! Self { +//! core: Block32RngCore(core_state), +//! buffer: utils::new_u32_buffer(), +//! } +//! } +//! } +//! +//! impl RngCore for Block32Rng { +//! fn next_u32(&mut self) -> u32 { +//! utils::next_u32_from_block(&mut self.buffer, || self.core.next_block()) +//! } +//! +//! fn next_u64(&mut self) -> u64 { +//! utils::next_u64_via_u32(self) +//! } +//! +//! fn fill_bytes(&mut self, dst: &mut [u8]) { +//! utils::fill_bytes_via_next(self, dst); +//! } +//! } +//! +//! let mut rng = Block32Rng::seed_from_u64(42); +//! +//! assert_eq!(rng.next_u32(), 0x7ba1_8fa5); +//! assert_eq!(rng.next_u64(), 0xcca1_b8eb_0a3d_3259); +//! let mut buf = [0u8; 4]; +//! rng.fill_bytes(&mut buf); +//! assert_eq!(buf, [0x6a, 0x01, 0x14, 0xb8]); +//! ``` +//! +//! ## 64-bit block RNG +//! +//! ``` +//! use rand_core::{RngCore, SeedableRng, utils}; +//! +//! struct Block64RngCore([u64; 4]); +//! +//! impl Block64RngCore { +//! fn next_block(&mut self) -> [u64; 4] { +//! self.0.iter_mut().for_each(|v| *v += 1); +//! self.0 +//! } +//! } +//! +//! pub struct Block64Rng { +//! core: Block64RngCore, +//! buffer: [u64; 4], +//! } +//! +//! impl SeedableRng for Block64Rng { +//! type Seed = [u8; 32]; +//! +//! fn from_seed(seed: Self::Seed) -> Self { +//! let mut core_state = [0u64; 4]; +//! utils::read_u64_into(&seed, &mut core_state); +//! Self { +//! core: Block64RngCore(core_state), +//! buffer: utils::new_u64_buffer(), +//! } +//! } +//! } +//! +//! impl RngCore for Block64Rng { +//! fn next_u32(&mut self) -> u32 { +//! self.next_u64() as u32 +//! } +//! +//! fn next_u64(&mut self) -> u64 { +//! utils::next_u64_from_block(&mut self.buffer, || self.core.next_block()) +//! } +//! +//! fn fill_bytes(&mut self, dst: &mut [u8]) { +//! utils::fill_bytes_via_next(self, dst); +//! } +//! } +//! +//! let mut rng = Block64Rng::seed_from_u64(42); +//! +//! assert_eq!(rng.next_u32(), 0x7ba1_8fa5); +//! assert_eq!(rng.next_u64(), 0xb814_0169_cca1_b8eb); +//! let mut buf = [0u8; 4]; +//! rng.fill_bytes(&mut buf); +//! assert_eq!(buf, [0x2c, 0x8c, 0xc8, 0x75]); +//! ``` + +use crate::RngCore; + +/// Implement `next_u64` via `next_u32`, little-endian order. +pub fn next_u64_via_u32(rng: &mut R) -> u64 { + // Use LE; we explicitly generate one value before the next. + let x = u64::from(rng.next_u32()); + let y = u64::from(rng.next_u32()); + (y << 32) | x +} + +/// Implement `fill_bytes` via `next_u64` and `next_u32`, little-endian order. +/// +/// The fastest way to fill a slice is usually to work as long as possible with +/// integers. That is why this method mostly uses `next_u64`, and only when +/// there are 4 or less bytes remaining at the end of the slice it uses +/// `next_u32` once. +pub fn fill_bytes_via_next(rng: &mut R, dest: &mut [u8]) { + let mut left = dest; + while left.len() >= 8 { + let (l, r) = { left }.split_at_mut(8); + left = r; + let chunk: [u8; 8] = rng.next_u64().to_le_bytes(); + l.copy_from_slice(&chunk); + } + let n = left.len(); + if n > 4 { + let chunk: [u8; 8] = rng.next_u64().to_le_bytes(); + left.copy_from_slice(&chunk[..n]); + } else if n > 0 { + let chunk: [u8; 4] = rng.next_u32().to_le_bytes(); + left.copy_from_slice(&chunk[..n]); + } +} + +/// Implement `next_u32` via `fill_bytes`, little-endian order. +pub fn next_u32_via_fill(rng: &mut R) -> u32 { + let mut buf = [0; 4]; + rng.fill_bytes(&mut buf); + u32::from_le_bytes(buf) +} + +/// Implement `next_u64` via `fill_bytes`, little-endian order. +pub fn next_u64_via_fill(rng: &mut R) -> u64 { + let mut buf = [0; 8]; + rng.fill_bytes(&mut buf); + u64::from_le_bytes(buf) +} + +/// Fills `dst: &mut [u32]` from `src`. +/// +/// Reads use Little-Endian byte order, allowing portable reproduction of `dst` +/// from a byte slice. +/// +/// # Panics +/// +/// If `src.len() != 4 * dst.len()`. +#[inline] +#[track_caller] +pub fn read_u32_into(src: &[u8], dst: &mut [u32]) { + assert!(src.len() == 4 * dst.len()); + for (out, chunk) in dst.iter_mut().zip(src.chunks_exact(4)) { + *out = u32::from_le_bytes(chunk.try_into().unwrap()); + } +} + +/// Fills `dst: &mut [u64]` from `src`. +/// +/// # Panics +/// +/// If `src.len() != 8 * dst.len()`. +#[inline] +#[track_caller] +pub fn read_u64_into(src: &[u8], dst: &mut [u64]) { + assert!(src.len() == 8 * dst.len()); + for (out, chunk) in dst.iter_mut().zip(src.chunks_exact(8)) { + *out = u64::from_le_bytes(chunk.try_into().unwrap()); + } +} + +/// Create new 32-bit block buffer. +pub fn new_u32_buffer() -> [u32; N] { + assert!(N > 1); + let mut res = [0u32; N]; + res[0] = N.try_into().unwrap(); + res +} + +/// Generate `u32` from block. +pub fn next_u32_from_block( + buf: &mut [u32; N], + mut generate_block: impl FnMut() -> [u32; N], +) -> u32 { + let pos = buf[0] as usize; + match buf.get(pos) { + Some(&val) => { + buf[0] += 1; + val + } + None => { + *buf = generate_block(); + core::mem::replace(&mut buf[0], 1) + } + } +} + +/// Create new 32-bit block buffer. +pub fn new_u64_buffer() -> [u64; N] { + assert!(N > 1); + let mut res = [0u64; N]; + res[0] = N.try_into().unwrap(); + res +} + +/// Generate `u64` from block. +pub fn next_u64_from_block( + buf: &mut [u64; N], + mut generate_block: impl FnMut() -> [u64; N], +) -> u64 { + let pos = buf[0] as usize; + match buf.get(pos) { + Some(&val) => { + buf[0] += 1; + val + } + None => { + *buf = generate_block(); + core::mem::replace(&mut buf[0], 1) + } + } +} + +#[cfg(test)] +mod test { + use super::*; + + pub(crate) trait Observable: Copy { + type Bytes: Sized + AsRef<[u8]>; + fn to_le_bytes(self) -> Self::Bytes; + } + impl Observable for u32 { + type Bytes = [u8; 4]; + + fn to_le_bytes(self) -> Self::Bytes { + Self::to_le_bytes(self) + } + } + impl Observable for u64 { + type Bytes = [u8; 8]; + + fn to_le_bytes(self) -> Self::Bytes { + Self::to_le_bytes(self) + } + } + + /// Fill dest from src + /// + /// Returns `(n, byte_len)`. `src[..n]` is consumed, + /// `dest[..byte_len]` is filled. `src[n..]` and `dest[byte_len..]` are left + /// unaltered. + pub(crate) fn fill_via_chunks(src: &[T], dest: &mut [u8]) -> (usize, usize) { + let size = core::mem::size_of::(); + + // Always use little endian for portability of results. + + let mut dest = dest.chunks_exact_mut(size); + let mut src = src.iter(); + + let zipped = dest.by_ref().zip(src.by_ref()); + let num_chunks = zipped.len(); + zipped.for_each(|(dest, src)| dest.copy_from_slice(src.to_le_bytes().as_ref())); + + let byte_len = num_chunks * size; + if let Some(src) = src.next() { + // We have consumed all full chunks of dest, but not src. + let dest = dest.into_remainder(); + let n = dest.len(); + if n > 0 { + dest.copy_from_slice(&src.to_le_bytes().as_ref()[..n]); + return (num_chunks + 1, byte_len + n); + } + } + (num_chunks, byte_len) + } + + #[test] + fn test_fill_via_u32_chunks() { + let src_orig = [1u32, 2, 3]; + + let src = src_orig; + let mut dst = [0u8; 11]; + assert_eq!(fill_via_chunks(&src, &mut dst), (3, 11)); + assert_eq!(dst, [1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0]); + + let src = src_orig; + let mut dst = [0u8; 13]; + assert_eq!(fill_via_chunks(&src, &mut dst), (3, 12)); + assert_eq!(dst, [1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0, 0]); + + let src = src_orig; + let mut dst = [0u8; 5]; + assert_eq!(fill_via_chunks(&src, &mut dst), (2, 5)); + assert_eq!(dst, [1, 0, 0, 0, 2]); + } + + #[test] + fn test_fill_via_u64_chunks() { + let src_orig = [1u64, 2]; + + let src = src_orig; + let mut dst = [0u8; 11]; + assert_eq!(fill_via_chunks(&src, &mut dst), (2, 11)); + assert_eq!(dst, [1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0]); + + let src = src_orig; + let mut dst = [0u8; 17]; + assert_eq!(fill_via_chunks(&src, &mut dst), (2, 16)); + assert_eq!(dst, [1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0]); + + let src = src_orig; + let mut dst = [0u8; 5]; + assert_eq!(fill_via_chunks(&src, &mut dst), (1, 5)); + assert_eq!(dst, [1, 0, 0, 0, 0]); + } + + #[test] + fn test_read() { + let bytes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; + + let mut buf = [0u32; 4]; + read_u32_into(&bytes, &mut buf); + assert_eq!(buf[0], 0x04030201); + assert_eq!(buf[3], 0x100F0E0D); + + let mut buf = [0u32; 3]; + read_u32_into(&bytes[1..13], &mut buf); // unaligned + assert_eq!(buf[0], 0x05040302); + assert_eq!(buf[2], 0x0D0C0B0A); + + let mut buf = [0u64; 2]; + read_u64_into(&bytes, &mut buf); + assert_eq!(buf[0], 0x0807060504030201); + assert_eq!(buf[1], 0x100F0E0D0C0B0A09); + + let mut buf = [0u64; 1]; + read_u64_into(&bytes[7..15], &mut buf); // unaligned + assert_eq!(buf[0], 0x0F0E0D0C0B0A0908); + } +} From 4a6425bcad5e151702957c3886013c0e5cf93afe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D1=80=D1=82=D1=91=D0=BC=20=D0=9F=D0=B0=D0=B2=D0=BB?= =?UTF-8?q?=D0=BE=D0=B2=20=5BArtyom=20Pavlov=5D?= Date: Mon, 10 Nov 2025 18:53:07 +0300 Subject: [PATCH 02/34] fix doc --- src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index 8aea80ad..8773719e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -72,7 +72,7 @@ pub mod utils; /// /// Typically an RNG will implement only one of the methods available /// in this trait directly, then use the helper functions from the -/// [`le` module](crate::le) to implement the other methods. +/// [`utils`] module to implement the other methods. /// /// Note that implementors of [`RngCore`] also automatically implement /// the [`TryRngCore`] trait with the `Error` associated type being From 73d7ab355c20717616d22323b6fa57046c5e3165 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D1=80=D1=82=D1=91=D0=BC=20=D0=9F=D0=B0=D0=B2=D0=BB?= =?UTF-8?q?=D0=BE=D0=B2=20=5BArtyom=20Pavlov=5D?= Date: Mon, 10 Nov 2025 18:55:13 +0300 Subject: [PATCH 03/34] Fix CI --- .github/workflows/test.yml | 20 ++++++-------------- 1 file changed, 6 insertions(+), 14 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index e4549e6b..650e69c2 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -44,7 +44,7 @@ jobs: with: toolchain: nightly - name: rand_core - run: cargo doc --all-features --no-deps + run: cargo doc --no-deps test: runs-on: ${{ matrix.os }} @@ -92,12 +92,9 @@ jobs: - run: ${{ matrix.deps }} - name: Maybe minimal versions if: ${{ matrix.variant == 'minimal_versions' }} - run: | - cargo generate-lockfile -Z minimal-versions + run: cargo generate-lockfile -Z minimal-versions - name: Test rand_core - run: | - cargo test --target ${{ matrix.target }} --no-default-features - cargo test --target ${{ matrix.target }} --features serde + run: cargo test --target ${{ matrix.target }} test-cross: runs-on: ${{ matrix.os }} @@ -124,9 +121,7 @@ jobs: - name: Install cross run: cargo install cross || true - name: Test - run: | - # all stable features: - cross test --no-fail-fast --target ${{ matrix.target }} + run: cross test --no-fail-fast --target ${{ matrix.target }} test-miri: runs-on: ubuntu-latest @@ -138,10 +133,7 @@ jobs: rustup override set nightly cargo miri setup - name: Test rand - run: | - cargo miri test - cargo miri test --features=serde - cargo miri test --no-default-features + run: cargo miri test test-no-std: runs-on: ubuntu-latest @@ -152,7 +144,7 @@ jobs: with: target: thumbv6m-none-eabi - name: Build top-level only - run: cargo build --target=thumbv6m-none-eabi --no-default-features + run: cargo build --target=thumbv6m-none-eabi test-ios: runs-on: macos-latest From 699271e619e30e420285e3b87c6bbcf19d4b88d9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D1=80=D1=82=D1=91=D0=BC=20=D0=9F=D0=B0=D0=B2=D0=BB?= =?UTF-8?q?=D0=BE=D0=B2=20=5BArtyom=20Pavlov=5D?= Date: Mon, 10 Nov 2025 19:03:02 +0300 Subject: [PATCH 04/34] Main tests -> Tests --- .github/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 650e69c2..129ff0b1 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -1,4 +1,4 @@ -name: Main tests +name: Tests on: push: From 127603146d5c486073caff56eced9f55c63c6405 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D1=80=D1=82=D1=91=D0=BC=20=D0=9F=D0=B0=D0=B2=D0=BB?= =?UTF-8?q?=D0=BE=D0=B2=20=5BArtyom=20Pavlov=5D?= Date: Mon, 10 Nov 2025 19:09:50 +0300 Subject: [PATCH 05/34] tweak examples --- src/utils.rs | 65 ++++++++++++++++++++++++++++------------------------ 1 file changed, 35 insertions(+), 30 deletions(-) diff --git a/src/utils.rs b/src/utils.rs index 166eceda..c67cfbf0 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -59,19 +59,20 @@ //! //! fn fill_bytes(&mut self, dst: &mut [u8]) { //! for byte in dst { -//! self.0 += 1; -//! *byte = self.0; +//! let val = self.0; +//! self.0 = val + 1; +//! *byte = val; //! } //! } //! } //! //! let mut rng = FillRng(0); //! -//! assert_eq!(rng.next_u32(), 0x0403_0201); -//! assert_eq!(rng.next_u64(), 0x0c0b_0a09_0807_0605); -//! let mut buf = [0u8; 4]; +//! assert_eq!(rng.next_u32(), 0x03_020100); +//! assert_eq!(rng.next_u64(), 0x0b0a_0908_0706_0504); +//! let mut buf = [0u8; 5]; //! rng.fill_bytes(&mut buf); -//! assert_eq!(buf, [0x0d, 0x0e, 0x0f, 0x10]); +//! assert_eq!(buf, [0x0c, 0x0d, 0x0e, 0x0f, 0x10]); //! ``` //! //! ## Single 32-bit value RNG @@ -83,8 +84,9 @@ //! //! impl RngCore for Step32Rng { //! fn next_u32(&mut self) -> u32 { -//! self.0 += 1; -//! self.0 +//! let val = self.0; +//! self.0 = val + 1; +//! val //! } //! //! fn next_u64(&mut self) -> u64 { @@ -98,11 +100,11 @@ //! //! let mut rng = Step32Rng(0); //! -//! assert_eq!(rng.next_u32(), 1); -//! assert_eq!(rng.next_u64(), 0x0000_0003_0000_0002); -//! let mut buf = [0u8; 4]; +//! assert_eq!(rng.next_u32(), 0); +//! assert_eq!(rng.next_u64(), 0x0000_0002_0000_0001); +//! let mut buf = [0u8; 5]; //! rng.fill_bytes(&mut buf); -//! assert_eq!(buf, [4, 0, 0, 0]); +//! assert_eq!(buf, [3, 0, 0, 0, 4]); //! ``` //! //! ## Single 64-bit value RNG @@ -118,8 +120,9 @@ //! } //! //! fn next_u64(&mut self) -> u64 { -//! self.0 += 1; -//! self.0 +//! let val = self.0; +//! self.0 = val + 1; +//! val //! } //! //! fn fill_bytes(&mut self, dst: &mut [u8]) { @@ -129,11 +132,11 @@ //! //! let mut rng = Step64Rng(0); //! -//! assert_eq!(rng.next_u32(), 1); -//! assert_eq!(rng.next_u64(), 2); -//! let mut buf = [0u8; 4]; +//! assert_eq!(rng.next_u32(), 0); +//! assert_eq!(rng.next_u64(), 1); +//! let mut buf = [0u8; 5]; //! rng.fill_bytes(&mut buf); -//! assert_eq!(buf, [3, 0, 0, 0]); +//! assert_eq!(buf, [2, 0, 0, 0, 0]); //! ``` //! //! ## 32-bit block RNG @@ -145,8 +148,9 @@ //! //! impl Block32RngCore { //! fn next_block(&mut self) -> [u32; 8] { -//! self.0.iter_mut().for_each(|v| *v += 1); -//! self.0 +//! let val = self.0; +//! self.0 = val.map(|v| v + 1); +//! val //! } //! } //! @@ -184,11 +188,11 @@ //! //! let mut rng = Block32Rng::seed_from_u64(42); //! -//! assert_eq!(rng.next_u32(), 0x7ba1_8fa5); -//! assert_eq!(rng.next_u64(), 0xcca1_b8eb_0a3d_3259); -//! let mut buf = [0u8; 4]; +//! assert_eq!(rng.next_u32(), 0x7ba1_8fa4); +//! assert_eq!(rng.next_u64(), 0xcca1_b8ea_0a3d_3258); +//! let mut buf = [0u8; 5]; //! rng.fill_bytes(&mut buf); -//! assert_eq!(buf, [0x6a, 0x01, 0x14, 0xb8]); +//! assert_eq!(buf, [0x69, 0x01, 0x14, 0xb8, 0x2b]); //! ``` //! //! ## 64-bit block RNG @@ -200,8 +204,9 @@ //! //! impl Block64RngCore { //! fn next_block(&mut self) -> [u64; 4] { -//! self.0.iter_mut().for_each(|v| *v += 1); -//! self.0 +//! let val = self.0; +//! self.0 = val.map(|v| v + 1); +//! val //! } //! } //! @@ -239,11 +244,11 @@ //! //! let mut rng = Block64Rng::seed_from_u64(42); //! -//! assert_eq!(rng.next_u32(), 0x7ba1_8fa5); -//! assert_eq!(rng.next_u64(), 0xb814_0169_cca1_b8eb); -//! let mut buf = [0u8; 4]; +//! assert_eq!(rng.next_u32(), 0x7ba1_8fa4); +//! assert_eq!(rng.next_u64(), 0xb814_0169_cca1_b8ea); +//! let mut buf = [0u8; 5]; //! rng.fill_bytes(&mut buf); -//! assert_eq!(buf, [0x2c, 0x8c, 0xc8, 0x75]); +//! assert_eq!(buf, [0x2b, 0x8c, 0xc8, 0x75, 0x18]); //! ``` use crate::RngCore; From 0dc24404bcfb2323065ef361d672f975fcd44280 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D1=80=D1=82=D1=91=D0=BC=20=D0=9F=D0=B0=D0=B2=D0=BB?= =?UTF-8?q?=D0=BE=D0=B2=20=5BArtyom=20Pavlov=5D?= Date: Mon, 10 Nov 2025 19:22:29 +0300 Subject: [PATCH 06/34] tweak `read_u*_into` functions --- src/utils.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/utils.rs b/src/utils.rs index c67cfbf0..d94cbff6 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -310,7 +310,7 @@ pub fn next_u64_via_fill(rng: &mut R) -> u64 { #[inline] #[track_caller] pub fn read_u32_into(src: &[u8], dst: &mut [u32]) { - assert!(src.len() == 4 * dst.len()); + assert!(size_of_val(src) == size_of_val(dst)); for (out, chunk) in dst.iter_mut().zip(src.chunks_exact(4)) { *out = u32::from_le_bytes(chunk.try_into().unwrap()); } @@ -324,7 +324,7 @@ pub fn read_u32_into(src: &[u8], dst: &mut [u32]) { #[inline] #[track_caller] pub fn read_u64_into(src: &[u8], dst: &mut [u64]) { - assert!(src.len() == 8 * dst.len()); + assert!(size_of_val(src) == size_of_val(dst)); for (out, chunk) in dst.iter_mut().zip(src.chunks_exact(8)) { *out = u64::from_le_bytes(chunk.try_into().unwrap()); } @@ -356,7 +356,7 @@ pub fn next_u32_from_block( } } -/// Create new 32-bit block buffer. +/// Create new 64-bit block buffer. pub fn new_u64_buffer() -> [u64; N] { assert!(N > 1); let mut res = [0u64; N]; From 44a48d4a1ce8c596d2c4460c61f770aafb225cb9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D1=80=D1=82=D1=91=D0=BC=20=D0=9F=D0=B0=D0=B2=D0=BB?= =?UTF-8?q?=D0=BE=D0=B2=20=5BArtyom=20Pavlov=5D?= Date: Mon, 10 Nov 2025 19:23:03 +0300 Subject: [PATCH 07/34] tweak asserts --- src/utils.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/utils.rs b/src/utils.rs index d94cbff6..2205907e 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -310,7 +310,7 @@ pub fn next_u64_via_fill(rng: &mut R) -> u64 { #[inline] #[track_caller] pub fn read_u32_into(src: &[u8], dst: &mut [u32]) { - assert!(size_of_val(src) == size_of_val(dst)); + assert_eq!(size_of_val(src), size_of_val(dst)); for (out, chunk) in dst.iter_mut().zip(src.chunks_exact(4)) { *out = u32::from_le_bytes(chunk.try_into().unwrap()); } @@ -324,7 +324,7 @@ pub fn read_u32_into(src: &[u8], dst: &mut [u32]) { #[inline] #[track_caller] pub fn read_u64_into(src: &[u8], dst: &mut [u64]) { - assert!(size_of_val(src) == size_of_val(dst)); + assert_eq!(size_of_val(src), size_of_val(dst)); for (out, chunk) in dst.iter_mut().zip(src.chunks_exact(8)) { *out = u64::from_le_bytes(chunk.try_into().unwrap()); } From 869ae969918c542debc04d41cfe3500ec6bcbb7b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D1=80=D1=82=D1=91=D0=BC=20=D0=9F=D0=B0=D0=B2=D0=BB?= =?UTF-8?q?=D0=BE=D0=B2=20=5BArtyom=20Pavlov=5D?= Date: Wed, 12 Nov 2025 14:46:17 +0300 Subject: [PATCH 08/34] Remove `next_u*_via_fill` methods --- src/utils.rs | 27 ++++++++++----------------- 1 file changed, 10 insertions(+), 17 deletions(-) diff --git a/src/utils.rs b/src/utils.rs index e73801e9..9ccff38d 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -36,17 +36,21 @@ //! ## Fill-based RNG //! //! ``` -//! use rand_core::{RngCore, utils}; +//! use rand_core::RngCore; //! //! pub struct FillRng(u8); //! //! impl RngCore for FillRng { //! fn next_u32(&mut self) -> u32 { -//! utils::next_u32_via_fill(self) +//! let mut buf = [0; 4]; +//! self.fill_bytes(&mut buf); +//! u32::from_le_bytes(buf) //! } //! //! fn next_u64(&mut self) -> u64 { -//! utils::next_u64_via_fill(self) +//! let mut buf = [0; 8]; +//! self.fill_bytes(&mut buf); +//! u64::from_le_bytes(buf) //! } //! //! fn fill_bytes(&mut self, dst: &mut [u8]) { @@ -67,6 +71,9 @@ //! assert_eq!(buf, [0x0c, 0x0d, 0x0e, 0x0f, 0x10]); //! ``` //! +//! Note that you can use `from_ne_bytes` instead of `from_le_bytes` +//! if your `fill_bytes` implementation is not reproducible. +//! //! ## Single 32-bit value RNG //! //! ``` @@ -277,20 +284,6 @@ pub fn fill_bytes_via_next(rng: &mut R, dest: &mut [u8]) { } } -/// Implement `next_u32` via `fill_bytes`, little-endian order. -pub fn next_u32_via_fill(rng: &mut R) -> u32 { - let mut buf = [0; 4]; - rng.fill_bytes(&mut buf); - u32::from_le_bytes(buf) -} - -/// Implement `next_u64` via `fill_bytes`, little-endian order. -pub fn next_u64_via_fill(rng: &mut R) -> u64 { - let mut buf = [0; 8]; - rng.fill_bytes(&mut buf); - u64::from_le_bytes(buf) -} - /// Fills `dst: &mut [u32]` from `src`. /// /// Reads use Little-Endian byte order, allowing portable reproduction of `dst` From 76959c1c562553fb67ab0d99c47aa8d07e4676ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D1=80=D1=82=D1=91=D0=BC=20=D0=9F=D0=B0=D0=B2=D0=BB?= =?UTF-8?q?=D0=BE=D0=B2=20=5BArtyom=20Pavlov=5D?= Date: Wed, 12 Nov 2025 15:00:54 +0300 Subject: [PATCH 09/34] split `fill_bytes_via_next` --- src/utils.rs | 136 +++++++++++++++++++++++++++------------------------ 1 file changed, 71 insertions(+), 65 deletions(-) diff --git a/src/utils.rs b/src/utils.rs index 9ccff38d..0d4d68d7 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -33,47 +33,6 @@ //! WARNING: the RNG implementations below are provided for demonstation purposes only and //! should not be used in practice! //! -//! ## Fill-based RNG -//! -//! ``` -//! use rand_core::RngCore; -//! -//! pub struct FillRng(u8); -//! -//! impl RngCore for FillRng { -//! fn next_u32(&mut self) -> u32 { -//! let mut buf = [0; 4]; -//! self.fill_bytes(&mut buf); -//! u32::from_le_bytes(buf) -//! } -//! -//! fn next_u64(&mut self) -> u64 { -//! let mut buf = [0; 8]; -//! self.fill_bytes(&mut buf); -//! u64::from_le_bytes(buf) -//! } -//! -//! fn fill_bytes(&mut self, dst: &mut [u8]) { -//! for byte in dst { -//! let val = self.0; -//! self.0 = val + 1; -//! *byte = val; -//! } -//! } -//! } -//! -//! let mut rng = FillRng(0); -//! -//! assert_eq!(rng.next_u32(), 0x03_020100); -//! assert_eq!(rng.next_u64(), 0x0b0a_0908_0706_0504); -//! let mut buf = [0u8; 5]; -//! rng.fill_bytes(&mut buf); -//! assert_eq!(buf, [0x0c, 0x0d, 0x0e, 0x0f, 0x10]); -//! ``` -//! -//! Note that you can use `from_ne_bytes` instead of `from_le_bytes` -//! if your `fill_bytes` implementation is not reproducible. -//! //! ## Single 32-bit value RNG //! //! ``` @@ -93,7 +52,7 @@ //! } //! //! fn fill_bytes(&mut self, dst: &mut [u8]) { -//! utils::fill_bytes_via_next(self, dst); +//! utils::fill_bytes_via_next_u32(self, dst); //! } //! } //! @@ -125,7 +84,7 @@ //! } //! //! fn fill_bytes(&mut self, dst: &mut [u8]) { -//! utils::fill_bytes_via_next(self, dst); +//! utils::fill_bytes_via_next_u64(self, dst); //! } //! } //! @@ -181,7 +140,7 @@ //! } //! //! fn fill_bytes(&mut self, dst: &mut [u8]) { -//! utils::fill_bytes_via_next(self, dst); +//! utils::fill_bytes_via_next_u32(self, dst); //! } //! } //! @@ -237,7 +196,7 @@ //! } //! //! fn fill_bytes(&mut self, dst: &mut [u8]) { -//! utils::fill_bytes_via_next(self, dst); +//! utils::fill_bytes_via_next_u64(self, dst); //! } //! } //! @@ -249,6 +208,47 @@ //! rng.fill_bytes(&mut buf); //! assert_eq!(buf, [0x2b, 0x8c, 0xc8, 0x75, 0x18]); //! ``` +//! +//! ## Fill-based RNG +//! +//! ``` +//! use rand_core::RngCore; +//! +//! pub struct FillRng(u8); +//! +//! impl RngCore for FillRng { +//! fn next_u32(&mut self) -> u32 { +//! let mut buf = [0; 4]; +//! self.fill_bytes(&mut buf); +//! u32::from_le_bytes(buf) +//! } +//! +//! fn next_u64(&mut self) -> u64 { +//! let mut buf = [0; 8]; +//! self.fill_bytes(&mut buf); +//! u64::from_le_bytes(buf) +//! } +//! +//! fn fill_bytes(&mut self, dst: &mut [u8]) { +//! for byte in dst { +//! let val = self.0; +//! self.0 = val + 1; +//! *byte = val; +//! } +//! } +//! } +//! +//! let mut rng = FillRng(0); +//! +//! assert_eq!(rng.next_u32(), 0x03_020100); +//! assert_eq!(rng.next_u64(), 0x0b0a_0908_0706_0504); +//! let mut buf = [0u8; 5]; +//! rng.fill_bytes(&mut buf); +//! assert_eq!(buf, [0x0c, 0x0d, 0x0e, 0x0f, 0x10]); +//! ``` +//! +//! Note that you can use `from_ne_bytes` instead of `from_le_bytes` +//! if your `fill_bytes` implementation is not reproducible. use crate::RngCore; @@ -260,27 +260,33 @@ pub fn next_u64_via_u32(rng: &mut R) -> u64 { (y << 32) | x } -/// Implement `fill_bytes` via `next_u64` and `next_u32`, little-endian order. -/// -/// The fastest way to fill a slice is usually to work as long as possible with -/// integers. That is why this method mostly uses `next_u64`, and only when -/// there are 4 or less bytes remaining at the end of the slice it uses -/// `next_u32` once. -pub fn fill_bytes_via_next(rng: &mut R, dest: &mut [u8]) { - let mut left = dest; - while left.len() >= 8 { - let (l, r) = { left }.split_at_mut(8); - left = r; - let chunk: [u8; 8] = rng.next_u64().to_le_bytes(); - l.copy_from_slice(&chunk); +/// Implement `fill_bytes` via `next_u64` using little-endian order. +pub fn fill_bytes_via_next_u32(rng: &mut R, dest: &mut [u8]) { + let mut chunks = dest.chunks_exact_mut(size_of::()); + for chunk in &mut chunks { + let val = rng.next_u32(); + chunk.copy_from_slice(&val.to_le_bytes()); + } + let rem = chunks.into_remainder(); + if !rem.is_empty() { + let val = rng.next_u32(); + let rem_src = &val.to_le_bytes()[..rem.len()]; + rem.copy_from_slice(rem_src); + } +} + +/// Implement `fill_bytes` via `next_u64` using little-endian order. +pub fn fill_bytes_via_next_u64(rng: &mut R, dest: &mut [u8]) { + let mut chunks = dest.chunks_exact_mut(size_of::()); + for chunk in &mut chunks { + let val = rng.next_u64(); + chunk.copy_from_slice(&val.to_le_bytes()); } - let n = left.len(); - if n > 4 { - let chunk: [u8; 8] = rng.next_u64().to_le_bytes(); - left.copy_from_slice(&chunk[..n]); - } else if n > 0 { - let chunk: [u8; 4] = rng.next_u32().to_le_bytes(); - left.copy_from_slice(&chunk[..n]); + let rem = chunks.into_remainder(); + if !rem.is_empty() { + let val = rng.next_u64(); + let rem_src = &val.to_le_bytes()[..rem.len()]; + rem.copy_from_slice(rem_src); } } From 4da0ef729706664c95a0f12e43995da583f6f96d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D1=80=D1=82=D1=91=D0=BC=20=D0=9F=D0=B0=D0=B2=D0=BB?= =?UTF-8?q?=D0=BE=D0=B2=20=5BArtyom=20Pavlov=5D?= Date: Wed, 12 Nov 2025 17:18:52 +0300 Subject: [PATCH 10/34] Introduce `Word` trit, add `fill_bytes_via_gen_block` --- src/lib.rs | 3 +- src/sealed.rs | 50 +++++++++ src/utils.rs | 293 +++++++++++++++++++------------------------------- 3 files changed, 160 insertions(+), 186 deletions(-) create mode 100644 src/sealed.rs diff --git a/src/lib.rs b/src/lib.rs index f1bd0b5e..95c4d8eb 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -29,6 +29,7 @@ use core::{fmt, ops::DerefMut}; +mod sealed; pub mod utils; /// Implementation-level interface for RNGs @@ -520,7 +521,7 @@ mod test { fn from_seed(seed: Self::Seed) -> Self { let mut x = [0u64; 1]; - utils::read_u64_into(&seed, &mut x); + utils::read_words_into(&seed, &mut x); SeedableNum(x[0]) } } diff --git a/src/sealed.rs b/src/sealed.rs new file mode 100644 index 00000000..4f037314 --- /dev/null +++ b/src/sealed.rs @@ -0,0 +1,50 @@ +/// Sealed trait implemented for `u32` and `u64`. +pub trait Sealed: Default + Copy + TryFrom { + type Bytes: Sized + AsRef<[u8]> + for<'a> TryFrom<&'a [u8]>; + + fn from_usize(val: usize) -> Self; + fn into_usize(self) -> usize; + fn to_le_bytes(self) -> Self::Bytes; + fn from_le_bytes(bytes: Self::Bytes) -> Self; + fn increment(&mut self); +} + +impl Sealed for u32 { + type Bytes = [u8; 4]; + + fn from_usize(val: usize) -> Self { + val.try_into().unwrap() + } + fn into_usize(self) -> usize { + self.try_into().unwrap() + } + fn to_le_bytes(self) -> Self::Bytes { + u32::to_le_bytes(self) + } + fn from_le_bytes(bytes: Self::Bytes) -> Self { + u32::from_le_bytes(bytes) + } + fn increment(&mut self) { + *self += 1; + } +} + +impl Sealed for u64 { + type Bytes = [u8; 8]; + + fn from_usize(val: usize) -> Self { + val.try_into().unwrap() + } + fn into_usize(self) -> usize { + self.try_into().unwrap() + } + fn to_le_bytes(self) -> Self::Bytes { + u64::to_le_bytes(self) + } + fn from_le_bytes(bytes: Self::Bytes) -> Self { + u64::from_le_bytes(bytes) + } + fn increment(&mut self) { + *self += 1; + } +} diff --git a/src/utils.rs b/src/utils.rs index 0d4d68d7..03cc08c1 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -52,7 +52,7 @@ //! } //! //! fn fill_bytes(&mut self, dst: &mut [u8]) { -//! utils::fill_bytes_via_next_u32(self, dst); +//! utils::fill_bytes_via_next_word(dst, || self.next_u32()); //! } //! } //! @@ -84,7 +84,7 @@ //! } //! //! fn fill_bytes(&mut self, dst: &mut [u8]) { -//! utils::fill_bytes_via_next_u64(self, dst); +//! utils::fill_bytes_via_next_word(dst, || self.next_u64()); //! } //! } //! @@ -105,10 +105,9 @@ //! struct Block32RngCore([u32; 8]); //! //! impl Block32RngCore { -//! fn next_block(&mut self) -> [u32; 8] { -//! let val = self.0; -//! self.0 = val.map(|v| v + 1); -//! val +//! fn next_block(&mut self, block: &mut [u32; 8]) { +//! *block = self.0; +//! self.0.iter_mut().for_each(|v| *v += 1); //! } //! } //! @@ -122,17 +121,17 @@ //! //! fn from_seed(seed: Self::Seed) -> Self { //! let mut core_state = [0u32; 8]; -//! utils::read_u32_into(&seed, &mut core_state); +//! utils::read_words_into(&seed, &mut core_state); //! Self { //! core: Block32RngCore(core_state), -//! buffer: utils::new_u32_buffer(), +//! buffer: utils::new_buffer(), //! } //! } //! } //! //! impl RngCore for Block32Rng { //! fn next_u32(&mut self) -> u32 { -//! utils::next_u32_from_block(&mut self.buffer, || self.core.next_block()) +//! utils::next_word_via_gen_block(&mut self.buffer, |block| self.core.next_block(block)) //! } //! //! fn next_u64(&mut self) -> u64 { @@ -140,7 +139,7 @@ //! } //! //! fn fill_bytes(&mut self, dst: &mut [u8]) { -//! utils::fill_bytes_via_next_u32(self, dst); +//! utils::fill_bytes_via_next_word(dst, || self.next_u32()); //! } //! } //! @@ -161,10 +160,9 @@ //! struct Block64RngCore([u64; 4]); //! //! impl Block64RngCore { -//! fn next_block(&mut self) -> [u64; 4] { -//! let val = self.0; -//! self.0 = val.map(|v| v + 1); -//! val +//! fn next_block(&mut self, block: &mut [u64; 4]) { +//! *block = self.0; +//! self.0.iter_mut().for_each(|v| *v += 1); //! } //! } //! @@ -178,10 +176,10 @@ //! //! fn from_seed(seed: Self::Seed) -> Self { //! let mut core_state = [0u64; 4]; -//! utils::read_u64_into(&seed, &mut core_state); +//! utils::read_words_into(&seed, &mut core_state); //! Self { //! core: Block64RngCore(core_state), -//! buffer: utils::new_u64_buffer(), +//! buffer: utils::new_buffer(), //! } //! } //! } @@ -192,11 +190,11 @@ //! } //! //! fn next_u64(&mut self) -> u64 { -//! utils::next_u64_from_block(&mut self.buffer, || self.core.next_block()) +//! utils::next_word_via_gen_block(&mut self.buffer, |block| self.core.next_block(block)) //! } //! //! fn fill_bytes(&mut self, dst: &mut [u8]) { -//! utils::fill_bytes_via_next_u64(self, dst); +//! utils::fill_bytes_via_next_word(dst, || self.next_u64()); //! } //! } //! @@ -252,7 +250,7 @@ use crate::RngCore; -/// Implement `next_u64` via `next_u32`, little-endian order. +/// Implement `next_u64` via `next_u32` using little-endian order. pub fn next_u64_via_u32(rng: &mut R) -> u64 { // Use LE; we explicitly generate one value before the next. let x = u64::from(rng.next_u32()); @@ -261,232 +259,157 @@ pub fn next_u64_via_u32(rng: &mut R) -> u64 { } /// Implement `fill_bytes` via `next_u64` using little-endian order. -pub fn fill_bytes_via_next_u32(rng: &mut R, dest: &mut [u8]) { - let mut chunks = dest.chunks_exact_mut(size_of::()); - for chunk in &mut chunks { - let val = rng.next_u32(); - chunk.copy_from_slice(&val.to_le_bytes()); - } - let rem = chunks.into_remainder(); - if !rem.is_empty() { - let val = rng.next_u32(); - let rem_src = &val.to_le_bytes()[..rem.len()]; - rem.copy_from_slice(rem_src); - } -} - -/// Implement `fill_bytes` via `next_u64` using little-endian order. -pub fn fill_bytes_via_next_u64(rng: &mut R, dest: &mut [u8]) { - let mut chunks = dest.chunks_exact_mut(size_of::()); +pub fn fill_bytes_via_next_word(dest: &mut [u8], mut next_word: impl FnMut() -> W) { + let mut chunks = dest.chunks_exact_mut(size_of::()); for chunk in &mut chunks { - let val = rng.next_u64(); - chunk.copy_from_slice(&val.to_le_bytes()); + let val = next_word(); + chunk.copy_from_slice(val.to_le_bytes().as_ref()); } let rem = chunks.into_remainder(); if !rem.is_empty() { - let val = rng.next_u64(); - let rem_src = &val.to_le_bytes()[..rem.len()]; - rem.copy_from_slice(rem_src); + let val = next_word().to_le_bytes(); + rem.copy_from_slice(&val.as_ref()[..rem.len()]); } } -/// Fills `dst: &mut [u32]` from `src`. -/// -/// Reads use Little-Endian byte order, allowing portable reproduction of `dst` -/// from a byte slice. +/// Fills slice of words `dst` from byte slice `src` using little endian order. /// /// # Panics /// -/// If `src.len() != 4 * dst.len()`. +/// If `size_of_val(src) != size_of_val(dst)`. #[inline] -#[track_caller] -pub fn read_u32_into(src: &[u8], dst: &mut [u32]) { +pub fn read_words_into(src: &[u8], dst: &mut [W]) { assert_eq!(size_of_val(src), size_of_val(dst)); - for (out, chunk) in dst.iter_mut().zip(src.chunks_exact(4)) { - *out = u32::from_le_bytes(chunk.try_into().unwrap()); + let chunks = src.chunks_exact(size_of::()); + for (out, chunk) in dst.iter_mut().zip(chunks) { + let Ok(bytes) = chunk.try_into() else { + unreachable!() + }; + *out = W::from_le_bytes(bytes); } } -/// Fills `dst: &mut [u64]` from `src`. -/// -/// # Panics -/// -/// If `src.len() != 8 * dst.len()`. -#[inline] -#[track_caller] -pub fn read_u64_into(src: &[u8], dst: &mut [u64]) { - assert_eq!(size_of_val(src), size_of_val(dst)); - for (out, chunk) in dst.iter_mut().zip(src.chunks_exact(8)) { - *out = u64::from_le_bytes(chunk.try_into().unwrap()); - } -} - -/// Create new 32-bit block buffer. -pub fn new_u32_buffer() -> [u32; N] { - assert!(N > 1); - let mut res = [0u32; N]; - res[0] = N.try_into().unwrap(); +/// Create new block buffer. +pub fn new_buffer() -> [W; N] { + let mut res = [W::from_usize(0); N]; + res[0] = W::from_usize(N); res } -/// Generate `u32` from block. -pub fn next_u32_from_block( - buf: &mut [u32; N], - mut generate_block: impl FnMut() -> [u32; N], -) -> u32 { - let pos = buf[0] as usize; +/// Implement `next_u32/u64` function using buffer and block generation closure. +pub fn next_word_via_gen_block( + buf: &mut [W; N], + mut generate_block: impl FnMut(&mut [W; N]), +) -> W { + let pos = buf[0].into_usize(); match buf.get(pos) { Some(&val) => { - buf[0] += 1; + buf[0].increment(); val } None => { - *buf = generate_block(); - core::mem::replace(&mut buf[0], 1) + generate_block(buf); + core::mem::replace(&mut buf[0], W::from_usize(1)) } } } -/// Create new 64-bit block buffer. -pub fn new_u64_buffer() -> [u64; N] { - assert!(N > 1); - let mut res = [0u64; N]; - res[0] = N.try_into().unwrap(); - res -} +/// Implement `fill_bytes` using 32-bit block buffer and block generation function. +pub fn fill_bytes_via_gen_block( + mut dst: &mut [u8], + buf: &mut [W; N], + mut generate_block: impl FnMut(&mut [W; N]), +) { + let word_size = size_of::(); + + let pos = buf[0].into_usize(); + if pos < buf.len() { + let buf_tail = &buf[pos..]; + let buf_rem = size_of_val(buf_tail); + + if buf_rem >= dst.len() { + let chunks = dst.chunks_mut(word_size); + let mut pos = buf[0]; + + for (src, dst) in buf_tail.iter().zip(chunks) { + let val = src.to_le_bytes(); + dst.copy_from_slice(&val.as_ref()[..dst.len()]); + pos.increment(); + } -/// Generate `u64` from block. -pub fn next_u64_from_block( - buf: &mut [u64; N], - mut generate_block: impl FnMut() -> [u64; N], -) -> u64 { - let pos = buf[0] as usize; - match buf.get(pos) { - Some(&val) => { - buf[0] += 1; - val + buf[0] = pos; + return; } - None => { - *buf = generate_block(); - core::mem::replace(&mut buf[0], 1) - } - } -} - -#[cfg(test)] -mod test { - use super::*; - pub(crate) trait Observable: Copy { - type Bytes: Sized + AsRef<[u8]>; - fn to_le_bytes(self) -> Self::Bytes; - } - impl Observable for u32 { - type Bytes = [u8; 4]; + let (l, r) = dst.split_at_mut(buf_rem); + dst = r; - fn to_le_bytes(self) -> Self::Bytes { - Self::to_le_bytes(self) + let chunks = l.chunks_exact_mut(word_size); + for (src, dst) in buf_tail.iter().zip(chunks) { + let val = src.to_le_bytes(); + dst.copy_from_slice(&val.as_ref()[..dst.len()]); } } - impl Observable for u64 { - type Bytes = [u8; 8]; - fn to_le_bytes(self) -> Self::Bytes { - Self::to_le_bytes(self) + let mut blocks = dst.chunks_exact_mut(N * word_size); + let mut temp_buf = [W::from_usize(0); N]; + for block in &mut blocks { + generate_block(&mut temp_buf); + for (chunk, word) in block.chunks_exact_mut(word_size).zip(temp_buf.iter()) { + chunk.copy_from_slice(word.to_le_bytes().as_ref()); } } - /// Fill dest from src - /// - /// Returns `(n, byte_len)`. `src[..n]` is consumed, - /// `dest[..byte_len]` is filled. `src[n..]` and `dest[byte_len..]` are left - /// unaltered. - pub(crate) fn fill_via_chunks(src: &[T], dest: &mut [u8]) -> (usize, usize) { - let size = core::mem::size_of::(); - - // Always use little endian for portability of results. - - let mut dest = dest.chunks_exact_mut(size); - let mut src = src.iter(); - - let zipped = dest.by_ref().zip(src.by_ref()); - let num_chunks = zipped.len(); - zipped.for_each(|(dest, src)| dest.copy_from_slice(src.to_le_bytes().as_ref())); - - let byte_len = num_chunks * size; - if let Some(src) = src.next() { - // We have consumed all full chunks of dest, but not src. - let dest = dest.into_remainder(); - let n = dest.len(); - if n > 0 { - dest.copy_from_slice(&src.to_le_bytes().as_ref()[..n]); - return (num_chunks + 1, byte_len + n); - } + let rem = blocks.into_remainder(); + let new_pos = if rem.is_empty() { + W::from_usize(N) + } else { + generate_block(buf); + let chunks = rem.chunks_mut(word_size); + let mut pos = W::from_usize(0); + + for (src, dst) in buf.iter().zip(chunks) { + let val = src.to_le_bytes(); + dst.copy_from_slice(&val.as_ref()[..dst.len()]); + pos.increment(); } - (num_chunks, byte_len) - } - - #[test] - fn test_fill_via_u32_chunks() { - let src_orig = [1u32, 2, 3]; - - let src = src_orig; - let mut dst = [0u8; 11]; - assert_eq!(fill_via_chunks(&src, &mut dst), (3, 11)); - assert_eq!(dst, [1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0]); - - let src = src_orig; - let mut dst = [0u8; 13]; - assert_eq!(fill_via_chunks(&src, &mut dst), (3, 12)); - assert_eq!(dst, [1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0, 0]); - - let src = src_orig; - let mut dst = [0u8; 5]; - assert_eq!(fill_via_chunks(&src, &mut dst), (2, 5)); - assert_eq!(dst, [1, 0, 0, 0, 2]); - } - #[test] - fn test_fill_via_u64_chunks() { - let src_orig = [1u64, 2]; + pos + }; + buf[0] = new_pos; +} - let src = src_orig; - let mut dst = [0u8; 11]; - assert_eq!(fill_via_chunks(&src, &mut dst), (2, 11)); - assert_eq!(dst, [1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0]); +/// Sealed trait implemented for `u32` and `u64`. +pub trait Word: crate::sealed::Sealed {} - let src = src_orig; - let mut dst = [0u8; 17]; - assert_eq!(fill_via_chunks(&src, &mut dst), (2, 16)); - assert_eq!(dst, [1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0]); +impl Word for u32 {} +impl Word for u64 {} - let src = src_orig; - let mut dst = [0u8; 5]; - assert_eq!(fill_via_chunks(&src, &mut dst), (1, 5)); - assert_eq!(dst, [1, 0, 0, 0, 0]); - } +#[cfg(test)] +mod test { + use super::*; #[test] fn test_read() { let bytes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; let mut buf = [0u32; 4]; - read_u32_into(&bytes, &mut buf); + read_words_into(&bytes, &mut buf); assert_eq!(buf[0], 0x04030201); assert_eq!(buf[3], 0x100F0E0D); let mut buf = [0u32; 3]; - read_u32_into(&bytes[1..13], &mut buf); // unaligned + read_words_into(&bytes[1..13], &mut buf); // unaligned assert_eq!(buf[0], 0x05040302); assert_eq!(buf[2], 0x0D0C0B0A); let mut buf = [0u64; 2]; - read_u64_into(&bytes, &mut buf); + read_words_into(&bytes, &mut buf); assert_eq!(buf[0], 0x0807060504030201); assert_eq!(buf[1], 0x100F0E0D0C0B0A09); let mut buf = [0u64; 1]; - read_u64_into(&bytes[7..15], &mut buf); // unaligned + read_words_into(&bytes[7..15], &mut buf); // unaligned assert_eq!(buf[0], 0x0F0E0D0C0B0A0908); } } From b486e603a2b09cc8b58cca4135d8be40b7a4e667 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D1=80=D1=82=D1=91=D0=BC=20=D0=9F=D0=B0=D0=B2=D0=BB?= =?UTF-8?q?=D0=BE=D0=B2=20=5BArtyom=20Pavlov=5D?= Date: Wed, 12 Nov 2025 17:30:43 +0300 Subject: [PATCH 11/34] tweak docs --- src/utils.rs | 43 +++++++++++++++++++++++-------------------- 1 file changed, 23 insertions(+), 20 deletions(-) diff --git a/src/utils.rs b/src/utils.rs index 03cc08c1..d792b429 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -12,7 +12,8 @@ //! # Implementing [`SeedableRng`] //! //! In many cases, [`SeedableRng::Seed`] must be converted to `[u32]` or `[u64]`. -//! We provide [`read_u32_into`] and [`read_u64_into`] helpers for this. +//! We provide the [`read_words_into`] helper function for this. The examples below +//! demonstrate how it can be used in practice. //! //! [`SeedableRng`]: crate::SeedableRng //! [`SeedableRng::Seed`]: crate::SeedableRng::Seed @@ -27,10 +28,12 @@ //! i.e. if optimal block size depends on available target features, we reccomend to always //! generate the biggest supported block size. //! -//! See the examples below which demonstrate how functions in this module can be used to implement -//! `RngCore` for common RNG algorithm classes. +//! # Examples //! -//! WARNING: the RNG implementations below are provided for demonstation purposes only and +//! The examples below demonstrate how functions in this module can be used to implement +//! [`RngCore`] and [`SeedableRng`] for common RNG algorithm classes. +//! +//! WARNING: the step RNG implementations below are provided for demonstation purposes only and //! should not be used in practice! //! //! ## Single 32-bit value RNG @@ -102,34 +105,34 @@ //! ``` //! use rand_core::{RngCore, SeedableRng, utils}; //! -//! struct Block32RngCore([u32; 8]); +//! struct Step8x32RngCore([u32; 8]); //! -//! impl Block32RngCore { +//! impl Step8x32RngCore { //! fn next_block(&mut self, block: &mut [u32; 8]) { //! *block = self.0; //! self.0.iter_mut().for_each(|v| *v += 1); //! } //! } //! -//! pub struct Block32Rng { -//! core: Block32RngCore, +//! pub struct Step8x32Rng { +//! core: Step8x32RngCore, //! buffer: [u32; 8], //! } //! -//! impl SeedableRng for Block32Rng { +//! impl SeedableRng for Step8x32Rng { //! type Seed = [u8; 32]; //! //! fn from_seed(seed: Self::Seed) -> Self { //! let mut core_state = [0u32; 8]; //! utils::read_words_into(&seed, &mut core_state); //! Self { -//! core: Block32RngCore(core_state), +//! core: Step8x32RngCore(core_state), //! buffer: utils::new_buffer(), //! } //! } //! } //! -//! impl RngCore for Block32Rng { +//! impl RngCore for Step8x32Rng { //! fn next_u32(&mut self) -> u32 { //! utils::next_word_via_gen_block(&mut self.buffer, |block| self.core.next_block(block)) //! } @@ -143,7 +146,7 @@ //! } //! } //! -//! let mut rng = Block32Rng::seed_from_u64(42); +//! let mut rng = Step8x32Rng::seed_from_u64(42); //! //! assert_eq!(rng.next_u32(), 0x7ba1_8fa4); //! assert_eq!(rng.next_u64(), 0xcca1_b8ea_0a3d_3258); @@ -157,34 +160,34 @@ //! ``` //! use rand_core::{RngCore, SeedableRng, utils}; //! -//! struct Block64RngCore([u64; 4]); +//! struct Step4x64RngCore([u64; 4]); //! -//! impl Block64RngCore { +//! impl Step4x64RngCore { //! fn next_block(&mut self, block: &mut [u64; 4]) { //! *block = self.0; //! self.0.iter_mut().for_each(|v| *v += 1); //! } //! } //! -//! pub struct Block64Rng { -//! core: Block64RngCore, +//! pub struct Step4x64Rng { +//! core: Step4x64RngCore, //! buffer: [u64; 4], //! } //! -//! impl SeedableRng for Block64Rng { +//! impl SeedableRng for Step4x64Rng { //! type Seed = [u8; 32]; //! //! fn from_seed(seed: Self::Seed) -> Self { //! let mut core_state = [0u64; 4]; //! utils::read_words_into(&seed, &mut core_state); //! Self { -//! core: Block64RngCore(core_state), +//! core: Step4x64RngCore(core_state), //! buffer: utils::new_buffer(), //! } //! } //! } //! -//! impl RngCore for Block64Rng { +//! impl RngCore for Step4x64Rng { //! fn next_u32(&mut self) -> u32 { //! self.next_u64() as u32 //! } @@ -198,7 +201,7 @@ //! } //! } //! -//! let mut rng = Block64Rng::seed_from_u64(42); +//! let mut rng = Step4x64Rng::seed_from_u64(42); //! //! assert_eq!(rng.next_u32(), 0x7ba1_8fa4); //! assert_eq!(rng.next_u64(), 0xb814_0169_cca1_b8ea); From b50f3e3f5580eeba9f2aec1d7c7641f87ec425b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D1=80=D1=82=D1=91=D0=BC=20=D0=9F=D0=B0=D0=B2=D0=BB?= =?UTF-8?q?=D0=BE=D0=B2=20=5BArtyom=20Pavlov=5D?= Date: Wed, 12 Nov 2025 18:37:36 +0300 Subject: [PATCH 12/34] Add `SeedableRng` impl to utils examples --- src/utils.rs | 38 +++++++++++++++++++++++++++----------- 1 file changed, 27 insertions(+), 11 deletions(-) diff --git a/src/utils.rs b/src/utils.rs index d792b429..b2f63bbc 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -39,10 +39,18 @@ //! ## Single 32-bit value RNG //! //! ``` -//! use rand_core::{RngCore, utils}; +//! use rand_core::{RngCore, SeedableRng, utils}; //! //! pub struct Step32Rng(u32); //! +//! impl SeedableRng for Step32Rng { +//! type Seed = [u8; 4]; +//! +//! fn from_seed(seed: Self::Seed) -> Self { +//! Self(u32::from_le_bytes(seed)) +//! } +//! } +//! //! impl RngCore for Step32Rng { //! fn next_u32(&mut self) -> u32 { //! let val = self.0; @@ -59,22 +67,30 @@ //! } //! } //! -//! let mut rng = Step32Rng(0); +//! let mut rng = Step32Rng::seed_from_u64(42); //! -//! assert_eq!(rng.next_u32(), 0); -//! assert_eq!(rng.next_u64(), 0x0000_0002_0000_0001); +//! assert_eq!(rng.next_u32(), 0x7ba1_8fa4); +//! assert_eq!(rng.next_u64(), 0x7ba1_8fa6_7ba1_8fa5); //! let mut buf = [0u8; 5]; //! rng.fill_bytes(&mut buf); -//! assert_eq!(buf, [3, 0, 0, 0, 4]); +//! assert_eq!(buf, [0xa7, 0x8f, 0xa1, 0x7b, 0xa8]); //! ``` //! //! ## Single 64-bit value RNG //! //! ``` -//! use rand_core::{RngCore, utils}; +//! use rand_core::{RngCore, SeedableRng, utils}; //! //! pub struct Step64Rng(u64); //! +//! impl SeedableRng for Step64Rng { +//! type Seed = [u8; 8]; +//! +//! fn from_seed(seed: Self::Seed) -> Self { +//! Self(u64::from_le_bytes(seed)) +//! } +//! } +//! //! impl RngCore for Step64Rng { //! fn next_u32(&mut self) -> u32 { //! self.next_u64() as u32 @@ -91,13 +107,13 @@ //! } //! } //! -//! let mut rng = Step64Rng(0); +//! let mut rng = Step64Rng::seed_from_u64(42); //! -//! assert_eq!(rng.next_u32(), 0); -//! assert_eq!(rng.next_u64(), 1); +//! assert_eq!(rng.next_u32(), 0x7ba1_8fa4); +//! assert_eq!(rng.next_u64(), 0x0a3d_3258_7ba1_8fa5); //! let mut buf = [0u8; 5]; //! rng.fill_bytes(&mut buf); -//! assert_eq!(buf, [2, 0, 0, 0, 0]); +//! assert_eq!(buf, [0xa6, 0x8f, 0xa1, 0x7b, 0x58]); //! ``` //! //! ## 32-bit block RNG @@ -317,7 +333,7 @@ pub fn next_word_via_gen_block( } } -/// Implement `fill_bytes` using 32-bit block buffer and block generation function. +/// Implement `fill_bytes` using buffer and block generation closure. pub fn fill_bytes_via_gen_block( mut dst: &mut [u8], buf: &mut [W; N], From b020090f1d619f193baa9cb30c691ba645697c82 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D1=80=D1=82=D1=91=D0=BC=20=D0=9F=D0=B0=D0=B2=D0=BB?= =?UTF-8?q?=D0=BE=D0=B2=20=5BArtyom=20Pavlov=5D?= Date: Wed, 12 Nov 2025 18:43:39 +0300 Subject: [PATCH 13/34] Add asserts to `new_buffer` --- src/utils.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/utils.rs b/src/utils.rs index b2f63bbc..baf9696b 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -309,7 +309,12 @@ pub fn read_words_into(src: &[u8], dst: &mut [W]) { } /// Create new block buffer. +/// +/// # Panics +/// If `N` is smaller than 2 or bigger than 2^32 - 1. pub fn new_buffer() -> [W; N] { + assert!(N > 2); + assert!(N < 1 << 32); let mut res = [W::from_usize(0); N]; res[0] = W::from_usize(N); res From f2cad627b8e6e2dcfcff1292509b378ef426e80d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D1=80=D1=82=D1=91=D0=BC=20=D0=9F=D0=B0=D0=B2=D0=BB?= =?UTF-8?q?=D0=BE=D0=B2=20=5BArtyom=20Pavlov=5D?= Date: Fri, 14 Nov 2025 05:26:37 +0300 Subject: [PATCH 14/34] Fix assert --- src/sealed.rs | 3 +++ src/utils.rs | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/src/sealed.rs b/src/sealed.rs index 4f037314..480151f2 100644 --- a/src/sealed.rs +++ b/src/sealed.rs @@ -1,6 +1,7 @@ /// Sealed trait implemented for `u32` and `u64`. pub trait Sealed: Default + Copy + TryFrom { type Bytes: Sized + AsRef<[u8]> + for<'a> TryFrom<&'a [u8]>; + const MAX: Self; fn from_usize(val: usize) -> Self; fn into_usize(self) -> usize; @@ -11,6 +12,7 @@ pub trait Sealed: Default + Copy + TryFrom { impl Sealed for u32 { type Bytes = [u8; 4]; + const MAX: Self = u32::MAX; fn from_usize(val: usize) -> Self { val.try_into().unwrap() @@ -31,6 +33,7 @@ impl Sealed for u32 { impl Sealed for u64 { type Bytes = [u8; 8]; + const MAX: Self = u64::MAX; fn from_usize(val: usize) -> Self { val.try_into().unwrap() diff --git a/src/utils.rs b/src/utils.rs index baf9696b..e5fa1137 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -311,10 +311,10 @@ pub fn read_words_into(src: &[u8], dst: &mut [W]) { /// Create new block buffer. /// /// # Panics -/// If `N` is smaller than 2 or bigger than 2^32 - 1. +/// If `N` is smaller than 2 or bigger than `W::MAX`. pub fn new_buffer() -> [W; N] { assert!(N > 2); - assert!(N < 1 << 32); + assert!(N < W::MAX.into_usize()); let mut res = [W::from_usize(0); N]; res[0] = W::from_usize(N); res From 605f6537570f31737e45e5f8137040c05394a648 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D1=80=D1=82=D1=91=D0=BC=20=D0=9F=D0=B0=D0=B2=D0=BB?= =?UTF-8?q?=D0=BE=D0=B2=20=5BArtyom=20Pavlov=5D?= Date: Fri, 14 Nov 2025 05:40:58 +0300 Subject: [PATCH 15/34] Fix assert on 32 bit targets --- src/sealed.rs | 3 --- src/utils.rs | 5 +++-- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/src/sealed.rs b/src/sealed.rs index 480151f2..4f037314 100644 --- a/src/sealed.rs +++ b/src/sealed.rs @@ -1,7 +1,6 @@ /// Sealed trait implemented for `u32` and `u64`. pub trait Sealed: Default + Copy + TryFrom { type Bytes: Sized + AsRef<[u8]> + for<'a> TryFrom<&'a [u8]>; - const MAX: Self; fn from_usize(val: usize) -> Self; fn into_usize(self) -> usize; @@ -12,7 +11,6 @@ pub trait Sealed: Default + Copy + TryFrom { impl Sealed for u32 { type Bytes = [u8; 4]; - const MAX: Self = u32::MAX; fn from_usize(val: usize) -> Self { val.try_into().unwrap() @@ -33,7 +31,6 @@ impl Sealed for u32 { impl Sealed for u64 { type Bytes = [u8; 8]; - const MAX: Self = u64::MAX; fn from_usize(val: usize) -> Self { val.try_into().unwrap() diff --git a/src/utils.rs b/src/utils.rs index e5fa1137..e57bc9f3 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -311,10 +311,11 @@ pub fn read_words_into(src: &[u8], dst: &mut [W]) { /// Create new block buffer. /// /// # Panics -/// If `N` is smaller than 2 or bigger than `W::MAX`. +/// If `N` is smaller than 2 or can not be represented as `W`. pub fn new_buffer() -> [W; N] { assert!(N > 2); - assert!(N < W::MAX.into_usize()); + // Check that `N` can be converted into `W`. + let _ = W::from_usize(N); let mut res = [W::from_usize(0); N]; res[0] = W::from_usize(N); res From 26874956113ec472e2d6e4e6962d3b6eabe2af26 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D1=80=D1=82=D1=91=D0=BC=20=D0=9F=D0=B0=D0=B2=D0=BB?= =?UTF-8?q?=D0=BE=D0=B2=20=5BArtyom=20Pavlov=5D?= Date: Fri, 14 Nov 2025 07:34:15 +0300 Subject: [PATCH 16/34] Improve codegen for `fill_bytes_via_gen_block` --- src/utils.rs | 75 ++++++++++++++++++++++++++++++---------------------- 1 file changed, 43 insertions(+), 32 deletions(-) diff --git a/src/utils.rs b/src/utils.rs index e57bc9f3..fe322ff9 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -347,42 +347,29 @@ pub fn fill_bytes_via_gen_block( ) { let word_size = size_of::(); - let pos = buf[0].into_usize(); - if pos < buf.len() { - let buf_tail = &buf[pos..]; + let pos = buf[0]; + let pos_usize = pos.into_usize(); + if pos_usize < buf.len() { + let buf_tail = &buf[pos_usize..]; let buf_rem = size_of_val(buf_tail); if buf_rem >= dst.len() { - let chunks = dst.chunks_mut(word_size); - let mut pos = buf[0]; - - for (src, dst) in buf_tail.iter().zip(chunks) { - let val = src.to_le_bytes(); - dst.copy_from_slice(&val.as_ref()[..dst.len()]); - pos.increment(); - } - - buf[0] = pos; + let new_pos = read_bytes(&buf, dst, pos); + buf[0] = new_pos; return; } let (l, r) = dst.split_at_mut(buf_rem); + read_bytes(&buf, l, pos); dst = r; - - let chunks = l.chunks_exact_mut(word_size); - for (src, dst) in buf_tail.iter().zip(chunks) { - let val = src.to_le_bytes(); - dst.copy_from_slice(&val.as_ref()[..dst.len()]); - } } let mut blocks = dst.chunks_exact_mut(N * word_size); - let mut temp_buf = [W::from_usize(0); N]; + let zero = W::from_usize(0); + let mut temp_buf = [zero; N]; for block in &mut blocks { generate_block(&mut temp_buf); - for (chunk, word) in block.chunks_exact_mut(word_size).zip(temp_buf.iter()) { - chunk.copy_from_slice(word.to_le_bytes().as_ref()); - } + read_bytes(&temp_buf, block, zero); } let rem = blocks.into_remainder(); @@ -390,18 +377,42 @@ pub fn fill_bytes_via_gen_block( W::from_usize(N) } else { generate_block(buf); - let chunks = rem.chunks_mut(word_size); - let mut pos = W::from_usize(0); + read_bytes::(&buf, rem, zero) + }; + buf[0] = new_pos; +} - for (src, dst) in buf.iter().zip(chunks) { - let val = src.to_le_bytes(); - dst.copy_from_slice(&val.as_ref()[..dst.len()]); - pos.increment(); - } +/// Reads bytes from `&block[pos..new_pos]` to `dst` using little endian byte order +/// ignoring the tail bytes if necessary and returns `new_pos`. +/// +/// This function is written in a way which helps the compiler to compile it down +/// to one `memcpy`. The temporary buffer gets eliminated by the compiler, see: +/// https://rust.godbolt.org/z/Kaq7zbsT3 +fn read_bytes(block: &[W; N], dst: &mut [u8], pos: W) -> W { + let word_size = size_of::(); + let pos = pos.into_usize(); + assert!(size_of_val(&block[pos..]) >= size_of_val(dst)); - pos + // TODO: replace with `[0u8; { size_of::() * N }]` on + // stabilization of `generic_const_exprs` + let mut buf = [W::from_usize(0); N]; + // SAFETY: it's safe to reference `[u32/u64; N]` as `&mut [u8]` + // with length equal to `size_of::() * N` + let buf: &mut [u8] = unsafe { + let p: *mut u8 = buf.as_mut_ptr().cast(); + let len = word_size * N; + core::slice::from_raw_parts_mut(p, len) }; - buf[0] = new_pos; + + for (src, dst) in block.iter().zip(buf.chunks_exact_mut(4)) { + let val = src.to_le_bytes(); + dst.copy_from_slice(val.as_ref()) + } + + let offset = pos * word_size; + dst.copy_from_slice(&buf[offset..][..dst.len()]); + let read_words = dst.len().div_ceil(word_size); + W::from_usize(pos + read_words) } /// Sealed trait implemented for `u32` and `u64`. From 30eacd92fb2e78cc3959a1cdc9481e6a662d3958 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D1=80=D1=82=D1=91=D0=BC=20=D0=9F=D0=B0=D0=B2=D0=BB?= =?UTF-8?q?=D0=BE=D0=B2=20=5BArtyom=20Pavlov=5D?= Date: Fri, 14 Nov 2025 07:35:15 +0300 Subject: [PATCH 17/34] Add inline attributes --- src/sealed.rs | 10 ++++++++++ src/utils.rs | 8 +++++++- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/src/sealed.rs b/src/sealed.rs index 4f037314..aa1746b1 100644 --- a/src/sealed.rs +++ b/src/sealed.rs @@ -12,18 +12,23 @@ pub trait Sealed: Default + Copy + TryFrom { impl Sealed for u32 { type Bytes = [u8; 4]; + #[inline(always)] fn from_usize(val: usize) -> Self { val.try_into().unwrap() } + #[inline(always)] fn into_usize(self) -> usize { self.try_into().unwrap() } + #[inline(always)] fn to_le_bytes(self) -> Self::Bytes { u32::to_le_bytes(self) } + #[inline(always)] fn from_le_bytes(bytes: Self::Bytes) -> Self { u32::from_le_bytes(bytes) } + #[inline(always)] fn increment(&mut self) { *self += 1; } @@ -32,18 +37,23 @@ impl Sealed for u32 { impl Sealed for u64 { type Bytes = [u8; 8]; + #[inline(always)] fn from_usize(val: usize) -> Self { val.try_into().unwrap() } + #[inline(always)] fn into_usize(self) -> usize { self.try_into().unwrap() } + #[inline(always)] fn to_le_bytes(self) -> Self::Bytes { u64::to_le_bytes(self) } + #[inline(always)] fn from_le_bytes(bytes: Self::Bytes) -> Self { u64::from_le_bytes(bytes) } + #[inline(always)] fn increment(&mut self) { *self += 1; } diff --git a/src/utils.rs b/src/utils.rs index fe322ff9..703cc6b9 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -270,6 +270,7 @@ use crate::RngCore; /// Implement `next_u64` via `next_u32` using little-endian order. +#[inline] pub fn next_u64_via_u32(rng: &mut R) -> u64 { // Use LE; we explicitly generate one value before the next. let x = u64::from(rng.next_u32()); @@ -278,6 +279,7 @@ pub fn next_u64_via_u32(rng: &mut R) -> u64 { } /// Implement `fill_bytes` via `next_u64` using little-endian order. +#[inline] pub fn fill_bytes_via_next_word(dest: &mut [u8], mut next_word: impl FnMut() -> W) { let mut chunks = dest.chunks_exact_mut(size_of::()); for chunk in &mut chunks { @@ -312,6 +314,7 @@ pub fn read_words_into(src: &[u8], dst: &mut [W]) { /// /// # Panics /// If `N` is smaller than 2 or can not be represented as `W`. +#[inline] pub fn new_buffer() -> [W; N] { assert!(N > 2); // Check that `N` can be converted into `W`. @@ -322,6 +325,7 @@ pub fn new_buffer() -> [W; N] { } /// Implement `next_u32/u64` function using buffer and block generation closure. +#[inline] pub fn next_word_via_gen_block( buf: &mut [W; N], mut generate_block: impl FnMut(&mut [W; N]), @@ -340,6 +344,7 @@ pub fn next_word_via_gen_block( } /// Implement `fill_bytes` using buffer and block generation closure. +#[inline] pub fn fill_bytes_via_gen_block( mut dst: &mut [u8], buf: &mut [W; N], @@ -387,7 +392,8 @@ pub fn fill_bytes_via_gen_block( /// /// This function is written in a way which helps the compiler to compile it down /// to one `memcpy`. The temporary buffer gets eliminated by the compiler, see: -/// https://rust.godbolt.org/z/Kaq7zbsT3 +/// https://rust.godbolt.org/z/xbo88cbsn +#[inline] fn read_bytes(block: &[W; N], dst: &mut [u8], pos: W) -> W { let word_size = size_of::(); let pos = pos.into_usize(); From bd9e3c204f67f601177ee57b96d804f7017756aa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D1=80=D1=82=D1=91=D0=BC=20=D0=9F=D0=B0=D0=B2=D0=BB?= =?UTF-8?q?=D0=BE=D0=B2=20=5BArtyom=20Pavlov=5D?= Date: Fri, 14 Nov 2025 07:42:08 +0300 Subject: [PATCH 18/34] Fix Clippy lints --- src/utils.rs | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/src/utils.rs b/src/utils.rs index 703cc6b9..56c319ea 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -359,22 +359,25 @@ pub fn fill_bytes_via_gen_block( let buf_rem = size_of_val(buf_tail); if buf_rem >= dst.len() { - let new_pos = read_bytes(&buf, dst, pos); + let new_pos = read_bytes(buf, dst, pos); buf[0] = new_pos; return; } let (l, r) = dst.split_at_mut(buf_rem); - read_bytes(&buf, l, pos); + read_bytes(buf, l, pos); dst = r; } let mut blocks = dst.chunks_exact_mut(N * word_size); let zero = W::from_usize(0); - let mut temp_buf = [zero; N]; for block in &mut blocks { - generate_block(&mut temp_buf); - read_bytes(&temp_buf, block, zero); + // We intentionally use the temporary buffer to prevent unnecessary writes + // to the original `buf` and to enable potential optimization of writing + // generated data directly into `block`. + let mut buf = [zero; N]; + generate_block(&mut buf); + read_bytes(&buf, block, zero); } let rem = blocks.into_remainder(); @@ -382,7 +385,7 @@ pub fn fill_bytes_via_gen_block( W::from_usize(N) } else { generate_block(buf); - read_bytes::(&buf, rem, zero) + read_bytes::(buf, rem, zero) }; buf[0] = new_pos; } From fc7a0210008e5f5c5e4095e207b3d5eca7b513ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D1=80=D1=82=D1=91=D0=BC=20=D0=9F=D0=B0=D0=B2=D0=BB?= =?UTF-8?q?=D0=BE=D0=B2=20=5BArtyom=20Pavlov=5D?= Date: Fri, 14 Nov 2025 07:45:55 +0300 Subject: [PATCH 19/34] Fix loop in `read_bytes` --- src/utils.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/utils.rs b/src/utils.rs index 56c319ea..f8064b2e 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -395,7 +395,7 @@ pub fn fill_bytes_via_gen_block( /// /// This function is written in a way which helps the compiler to compile it down /// to one `memcpy`. The temporary buffer gets eliminated by the compiler, see: -/// https://rust.godbolt.org/z/xbo88cbsn +/// https://rust.godbolt.org/z/T8f77KjGc #[inline] fn read_bytes(block: &[W; N], dst: &mut [u8], pos: W) -> W { let word_size = size_of::(); @@ -413,7 +413,7 @@ fn read_bytes(block: &[W; N], dst: &mut [u8], pos: W) - core::slice::from_raw_parts_mut(p, len) }; - for (src, dst) in block.iter().zip(buf.chunks_exact_mut(4)) { + for (src, dst) in block.iter().zip(buf.chunks_exact_mut(word_size)) { let val = src.to_le_bytes(); dst.copy_from_slice(val.as_ref()) } From 1f1846063c968d7a6655e050bbc96378bd71a655 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D1=80=D1=82=D1=91=D0=BC=20=D0=9F=D0=B0=D0=B2=D0=BB?= =?UTF-8?q?=D0=BE=D0=B2=20=5BArtyom=20Pavlov=5D?= Date: Fri, 14 Nov 2025 12:24:16 +0300 Subject: [PATCH 20/34] rename `utils` to `le` --- src/{utils.rs => le.rs} | 32 ++++++++++++++++---------------- src/lib.rs | 8 ++++---- 2 files changed, 20 insertions(+), 20 deletions(-) rename src/{utils.rs => le.rs} (92%) diff --git a/src/utils.rs b/src/le.rs similarity index 92% rename from src/utils.rs rename to src/le.rs index f8064b2e..175aba61 100644 --- a/src/utils.rs +++ b/src/le.rs @@ -39,7 +39,7 @@ //! ## Single 32-bit value RNG //! //! ``` -//! use rand_core::{RngCore, SeedableRng, utils}; +//! use rand_core::{RngCore, SeedableRng, le}; //! //! pub struct Step32Rng(u32); //! @@ -59,11 +59,11 @@ //! } //! //! fn next_u64(&mut self) -> u64 { -//! utils::next_u64_via_u32(self) +//! le::next_u64_via_u32(self) //! } //! //! fn fill_bytes(&mut self, dst: &mut [u8]) { -//! utils::fill_bytes_via_next_word(dst, || self.next_u32()); +//! le::fill_bytes_via_next_word(dst, || self.next_u32()); //! } //! } //! @@ -79,7 +79,7 @@ //! ## Single 64-bit value RNG //! //! ``` -//! use rand_core::{RngCore, SeedableRng, utils}; +//! use rand_core::{RngCore, SeedableRng, le}; //! //! pub struct Step64Rng(u64); //! @@ -103,7 +103,7 @@ //! } //! //! fn fill_bytes(&mut self, dst: &mut [u8]) { -//! utils::fill_bytes_via_next_word(dst, || self.next_u64()); +//! le::fill_bytes_via_next_word(dst, || self.next_u64()); //! } //! } //! @@ -119,7 +119,7 @@ //! ## 32-bit block RNG //! //! ``` -//! use rand_core::{RngCore, SeedableRng, utils}; +//! use rand_core::{RngCore, SeedableRng, le}; //! //! struct Step8x32RngCore([u32; 8]); //! @@ -140,25 +140,25 @@ //! //! fn from_seed(seed: Self::Seed) -> Self { //! let mut core_state = [0u32; 8]; -//! utils::read_words_into(&seed, &mut core_state); +//! le::read_words_into(&seed, &mut core_state); //! Self { //! core: Step8x32RngCore(core_state), -//! buffer: utils::new_buffer(), +//! buffer: le::new_buffer(), //! } //! } //! } //! //! impl RngCore for Step8x32Rng { //! fn next_u32(&mut self) -> u32 { -//! utils::next_word_via_gen_block(&mut self.buffer, |block| self.core.next_block(block)) +//! le::next_word_via_gen_block(&mut self.buffer, |block| self.core.next_block(block)) //! } //! //! fn next_u64(&mut self) -> u64 { -//! utils::next_u64_via_u32(self) +//! le::next_u64_via_u32(self) //! } //! //! fn fill_bytes(&mut self, dst: &mut [u8]) { -//! utils::fill_bytes_via_next_word(dst, || self.next_u32()); +//! le::fill_bytes_via_next_word(dst, || self.next_u32()); //! } //! } //! @@ -174,7 +174,7 @@ //! ## 64-bit block RNG //! //! ``` -//! use rand_core::{RngCore, SeedableRng, utils}; +//! use rand_core::{RngCore, SeedableRng, le}; //! //! struct Step4x64RngCore([u64; 4]); //! @@ -195,10 +195,10 @@ //! //! fn from_seed(seed: Self::Seed) -> Self { //! let mut core_state = [0u64; 4]; -//! utils::read_words_into(&seed, &mut core_state); +//! le::read_words_into(&seed, &mut core_state); //! Self { //! core: Step4x64RngCore(core_state), -//! buffer: utils::new_buffer(), +//! buffer: le::new_buffer(), //! } //! } //! } @@ -209,11 +209,11 @@ //! } //! //! fn next_u64(&mut self) -> u64 { -//! utils::next_word_via_gen_block(&mut self.buffer, |block| self.core.next_block(block)) +//! le::next_word_via_gen_block(&mut self.buffer, |block| self.core.next_block(block)) //! } //! //! fn fill_bytes(&mut self, dst: &mut [u8]) { -//! utils::fill_bytes_via_next_word(dst, || self.next_u64()); +//! le::fill_bytes_via_next_word(dst, || self.next_u64()); //! } //! } //! diff --git a/src/lib.rs b/src/lib.rs index 95c4d8eb..41e52638 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -10,7 +10,7 @@ //! [`SeedableRng`] is an extension trait for construction from fixed seeds and //! other random number generators. //! -//! The [`utils`] sub-module includes a few small functions to assist +//! The [`le`] sub-module includes a few small functions to assist //! implementation of [`RngCore`] and [`SeedableRng`]. //! //! [`rand`]: https://docs.rs/rand @@ -30,7 +30,7 @@ use core::{fmt, ops::DerefMut}; mod sealed; -pub mod utils; +pub mod le; /// Implementation-level interface for RNGs /// @@ -64,7 +64,7 @@ pub mod utils; /// /// Typically an RNG will implement only one of the methods available /// in this trait directly, then use the helper functions from the -/// [`utils`] module to implement the other methods. +/// [`le`] module to implement the other methods. /// /// Note that implementors of [`RngCore`] also automatically implement /// the [`TryRngCore`] trait with the `Error` associated type being @@ -521,7 +521,7 @@ mod test { fn from_seed(seed: Self::Seed) -> Self { let mut x = [0u64; 1]; - utils::read_words_into(&seed, &mut x); + le::read_words_into(&seed, &mut x); SeedableNum(x[0]) } } From 0eb5d8614ea3f2a89b8b9e4055e8be2c34fdb5be Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D1=80=D1=82=D1=91=D0=BC=20=D0=9F=D0=B0=D0=B2=D0=BB?= =?UTF-8?q?=D0=BE=D0=B2=20=5BArtyom=20Pavlov=5D?= Date: Fri, 14 Nov 2025 12:25:48 +0300 Subject: [PATCH 21/34] fmt --- src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index 41e52638..c41b3899 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -29,8 +29,8 @@ use core::{fmt, ops::DerefMut}; -mod sealed; pub mod le; +mod sealed; /// Implementation-level interface for RNGs /// From f5fcfcbfd777600343b9df2505cf76c90534594e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D1=80=D1=82=D1=91=D0=BC=20=D0=9F=D0=B0=D0=B2=D0=BB?= =?UTF-8?q?=D0=BE=D0=B2=20=5BArtyom=20Pavlov=5D?= Date: Fri, 14 Nov 2025 12:29:59 +0300 Subject: [PATCH 22/34] add debug asserts --- src/le.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/le.rs b/src/le.rs index 175aba61..b6c4d5ad 100644 --- a/src/le.rs +++ b/src/le.rs @@ -331,6 +331,7 @@ pub fn next_word_via_gen_block( mut generate_block: impl FnMut(&mut [W; N]), ) -> W { let pos = buf[0].into_usize(); + debug_assert_ne!(pos, 0, "cursor position should not be zero"); match buf.get(pos) { Some(&val) => { buf[0].increment(); @@ -354,6 +355,7 @@ pub fn fill_bytes_via_gen_block( let pos = buf[0]; let pos_usize = pos.into_usize(); + debug_assert_ne!(pos_usize, 0, "cursor position should not be zero"); if pos_usize < buf.len() { let buf_tail = &buf[pos_usize..]; let buf_rem = size_of_val(buf_tail); From afcdb6f1e9e50ca22a311d31173f67105c5b2394 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D1=80=D1=82=D1=91=D0=BC=20=D0=9F=D0=B0=D0=B2=D0=BB?= =?UTF-8?q?=D0=BE=D0=B2=20=5BArtyom=20Pavlov=5D?= Date: Fri, 14 Nov 2025 12:44:50 +0300 Subject: [PATCH 23/34] Hide some lines from doc examples --- src/le.rs | 73 ++++++++++++++++++++++++++++++++----------------------- 1 file changed, 42 insertions(+), 31 deletions(-) diff --git a/src/le.rs b/src/le.rs index b6c4d5ad..5c579cd4 100644 --- a/src/le.rs +++ b/src/le.rs @@ -176,34 +176,43 @@ //! ``` //! use rand_core::{RngCore, SeedableRng, le}; //! -//! struct Step4x64RngCore([u64; 4]); +//! struct Block64RngCore { +//! // ... +//! # state: [u64; 4], +//! } +//! +//! impl Block64RngCore { +//! fn new(seed: [u64; 4]) -> Self { +//! // ... +//! # Self { state: seed } +//! } //! -//! impl Step4x64RngCore { //! fn next_block(&mut self, block: &mut [u64; 4]) { -//! *block = self.0; -//! self.0.iter_mut().for_each(|v| *v += 1); +//! // ... +//! # *block = self.state; +//! # self.state.iter_mut().for_each(|v| *v += 1); //! } //! } //! -//! pub struct Step4x64Rng { -//! core: Step4x64RngCore, +//! pub struct Block64Rng { +//! core: Block64RngCore, //! buffer: [u64; 4], //! } //! -//! impl SeedableRng for Step4x64Rng { +//! impl SeedableRng for Block64Rng { //! type Seed = [u8; 32]; //! //! fn from_seed(seed: Self::Seed) -> Self { -//! let mut core_state = [0u64; 4]; -//! le::read_words_into(&seed, &mut core_state); +//! let mut seed_u64 = [0u64; 4]; +//! le::read_words_into(&seed, &mut seed_u64); //! Self { -//! core: Step4x64RngCore(core_state), +//! core: Block64RngCore::new(seed_u64), //! buffer: le::new_buffer(), //! } //! } //! } //! -//! impl RngCore for Step4x64Rng { +//! impl RngCore for Block64Rng { //! fn next_u32(&mut self) -> u32 { //! self.next_u64() as u32 //! } @@ -217,13 +226,12 @@ //! } //! } //! -//! let mut rng = Step4x64Rng::seed_from_u64(42); -//! -//! assert_eq!(rng.next_u32(), 0x7ba1_8fa4); -//! assert_eq!(rng.next_u64(), 0xb814_0169_cca1_b8ea); -//! let mut buf = [0u8; 5]; -//! rng.fill_bytes(&mut buf); -//! assert_eq!(buf, [0x2b, 0x8c, 0xc8, 0x75, 0x18]); +//! # let mut rng = Block64Rng::seed_from_u64(42); +//! # assert_eq!(rng.next_u32(), 0x7ba1_8fa4); +//! # assert_eq!(rng.next_u64(), 0xb814_0169_cca1_b8ea); +//! # let mut buf = [0u8; 5]; +//! # rng.fill_bytes(&mut buf); +//! # assert_eq!(buf, [0x2b, 0x8c, 0xc8, 0x75, 0x18]); //! ``` //! //! ## Fill-based RNG @@ -231,7 +239,10 @@ //! ``` //! use rand_core::RngCore; //! -//! pub struct FillRng(u8); +//! pub struct FillRng { +//! // ... +//! # state: u8, +//! } //! //! impl RngCore for FillRng { //! fn next_u32(&mut self) -> u32 { @@ -247,21 +258,21 @@ //! } //! //! fn fill_bytes(&mut self, dst: &mut [u8]) { -//! for byte in dst { -//! let val = self.0; -//! self.0 = val + 1; -//! *byte = val; -//! } +//! // ... +//! # for byte in dst { +//! # let val = self.state; +//! # self.state = val + 1; +//! # *byte = val; +//! # } //! } //! } //! -//! let mut rng = FillRng(0); -//! -//! assert_eq!(rng.next_u32(), 0x03_020100); -//! assert_eq!(rng.next_u64(), 0x0b0a_0908_0706_0504); -//! let mut buf = [0u8; 5]; -//! rng.fill_bytes(&mut buf); -//! assert_eq!(buf, [0x0c, 0x0d, 0x0e, 0x0f, 0x10]); +//! # let mut rng = FillRng { state: 0 }; +//! # assert_eq!(rng.next_u32(), 0x03_020100); +//! # assert_eq!(rng.next_u64(), 0x0b0a_0908_0706_0504); +//! # let mut buf = [0u8; 5]; +//! # rng.fill_bytes(&mut buf); +//! # assert_eq!(buf, [0x0c, 0x0d, 0x0e, 0x0f, 0x10]); //! ``` //! //! Note that you can use `from_ne_bytes` instead of `from_le_bytes` From fd8b982fed294bff54bc105bcbd372ae4f436ebe Mon Sep 17 00:00:00 2001 From: Artyom Pavlov Date: Fri, 14 Nov 2025 14:54:54 +0300 Subject: [PATCH 24/34] Add comment for `Self(u32::from_le_bytes(seed))` Co-authored-by: Diggory Hardy --- src/le.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/le.rs b/src/le.rs index 5c579cd4..c54da865 100644 --- a/src/le.rs +++ b/src/le.rs @@ -47,6 +47,7 @@ //! type Seed = [u8; 4]; //! //! fn from_seed(seed: Self::Seed) -> Self { +//! // Always use Little-Endian conversion to ensure portable results //! Self(u32::from_le_bytes(seed)) //! } //! } From 4643695ec952f5db2f9e11a27ed8467bb7d5622a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D1=80=D1=82=D1=91=D0=BC=20=D0=9F=D0=B0=D0=B2=D0=BB?= =?UTF-8?q?=D0=BE=D0=B2=20=5BArtyom=20Pavlov=5D?= Date: Fri, 14 Nov 2025 15:20:22 +0300 Subject: [PATCH 25/34] Twak docs --- src/le.rs | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/le.rs b/src/le.rs index c54da865..672e3343 100644 --- a/src/le.rs +++ b/src/le.rs @@ -23,10 +23,13 @@ //! Usually an implementation of [`RngCore`] will implement one of the three methods //! over its internal source, while remaining methods are implemented on top of it. //! -//! Additionally, some RNGs generate blocks of data. In that case the implementations have to -//! handle buffering of the generated block. If an implementation supports SIMD-based optimizations, -//! i.e. if optimal block size depends on available target features, we reccomend to always -//! generate the biggest supported block size. +//! Some RNGs instead generate fixed-size blocks of data. In this case the implementations must +//! handle buffering of the generated blocks. +//! +//! If an implementation can generate several blocks simultaneously (e.g. using SIMD), we recommend +//! to treat multiple generated blocks as one big block (i.e. you should treat `[[u32; N]; M]` +//! as `[u32; N * M]`). If number of simultaneously generated blocks depends on target features, +//! we recommend to use the largest supported number of blocks for all target features. //! //! # Examples //! From 69d992aa4c58ea67672545335382a6e86527a869 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D1=80=D1=82=D1=91=D0=BC=20=D0=9F=D0=B0=D0=B2=D0=BB?= =?UTF-8?q?=D0=BE=D0=B2=20=5BArtyom=20Pavlov=5D?= Date: Fri, 14 Nov 2025 15:23:42 +0300 Subject: [PATCH 26/34] Use `fill_bytes_via_gen_block` in examples --- src/le.rs | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/le.rs b/src/le.rs index 672e3343..0208ea30 100644 --- a/src/le.rs +++ b/src/le.rs @@ -154,7 +154,8 @@ //! //! impl RngCore for Step8x32Rng { //! fn next_u32(&mut self) -> u32 { -//! le::next_word_via_gen_block(&mut self.buffer, |block| self.core.next_block(block)) +//! let Self { buffer, core } = self; +//! le::next_word_via_gen_block(buffer, |block| core.next_block(block)) //! } //! //! fn next_u64(&mut self) -> u64 { @@ -162,7 +163,8 @@ //! } //! //! fn fill_bytes(&mut self, dst: &mut [u8]) { -//! le::fill_bytes_via_next_word(dst, || self.next_u32()); +//! let Self { buffer, core } = self; +//! le::fill_bytes_via_gen_block(dst, buffer, |block| core.next_block(block)); //! } //! } //! @@ -222,11 +224,13 @@ //! } //! //! fn next_u64(&mut self) -> u64 { -//! le::next_word_via_gen_block(&mut self.buffer, |block| self.core.next_block(block)) +//! let Self { buffer, core } = self; +//! le::next_word_via_gen_block(buffer, |block| core.next_block(block)) //! } //! //! fn fill_bytes(&mut self, dst: &mut [u8]) { -//! le::fill_bytes_via_next_word(dst, || self.next_u64()); +//! let Self { buffer, core } = self; +//! le::fill_bytes_via_gen_block(dst, buffer, |block| core.next_block(block)); //! } //! } //! From ee92dd8940eaddf109c01d7d76dc709f211e9514 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D1=80=D1=82=D1=91=D0=BC=20=D0=9F=D0=B0=D0=B2=D0=BB?= =?UTF-8?q?=D0=BE=D0=B2=20=5BArtyom=20Pavlov=5D?= Date: Fri, 14 Nov 2025 15:36:05 +0300 Subject: [PATCH 27/34] Tweak examples and change `read_words_into` --- src/le.rs | 100 ++++++++++++++++++++++++++++------------------------- src/lib.rs | 3 +- 2 files changed, 53 insertions(+), 50 deletions(-) diff --git a/src/le.rs b/src/le.rs index 0208ea30..12ad3910 100644 --- a/src/le.rs +++ b/src/le.rs @@ -125,37 +125,45 @@ //! ``` //! use rand_core::{RngCore, SeedableRng, le}; //! -//! struct Step8x32RngCore([u32; 8]); +//! struct Block8x32RngInner { +//! // ... +//! # state: [u32; 8] +//! } +//! +//! impl Block8x32RngInner { +//! fn new(seed: [u32; 8]) -> Self { +//! // ... +//! # Self { state: seed } +//! } //! -//! impl Step8x32RngCore { //! fn next_block(&mut self, block: &mut [u32; 8]) { -//! *block = self.0; -//! self.0.iter_mut().for_each(|v| *v += 1); +//! // ... +//! # *block = self.state; +//! # self.state.iter_mut().for_each(|v| *v += 1); //! } //! } //! -//! pub struct Step8x32Rng { -//! core: Step8x32RngCore, +//! pub struct Block8x32Rng { +//! inner: Block8x32RngInner, //! buffer: [u32; 8], //! } //! -//! impl SeedableRng for Step8x32Rng { +//! impl SeedableRng for Block8x32Rng { //! type Seed = [u8; 32]; //! //! fn from_seed(seed: Self::Seed) -> Self { -//! let mut core_state = [0u32; 8]; -//! le::read_words_into(&seed, &mut core_state); +//! let seed: [u32; 8] = le::read_words(&seed); //! Self { -//! core: Step8x32RngCore(core_state), +//! inner: Block8x32RngInner::new(seed), //! buffer: le::new_buffer(), //! } //! } //! } //! -//! impl RngCore for Step8x32Rng { +//! impl RngCore for Block8x32Rng { //! fn next_u32(&mut self) -> u32 { -//! let Self { buffer, core } = self; -//! le::next_word_via_gen_block(buffer, |block| core.next_block(block)) +//! let Self { inner, buffer } = self; +//! le::next_word_via_gen_block(buffer, |block| inner.next_block(block)) //! } //! //! fn next_u64(&mut self) -> u64 { @@ -163,18 +171,17 @@ //! } //! //! fn fill_bytes(&mut self, dst: &mut [u8]) { -//! let Self { buffer, core } = self; -//! le::fill_bytes_via_gen_block(dst, buffer, |block| core.next_block(block)); +//! let Self { inner, buffer } = self; +//! le::fill_bytes_via_gen_block(dst, buffer, |block| inner.next_block(block)); //! } //! } //! -//! let mut rng = Step8x32Rng::seed_from_u64(42); -//! -//! assert_eq!(rng.next_u32(), 0x7ba1_8fa4); -//! assert_eq!(rng.next_u64(), 0xcca1_b8ea_0a3d_3258); -//! let mut buf = [0u8; 5]; -//! rng.fill_bytes(&mut buf); -//! assert_eq!(buf, [0x69, 0x01, 0x14, 0xb8, 0x2b]); +//! # let mut rng = Block8x32Rng::seed_from_u64(42); +//! # assert_eq!(rng.next_u32(), 0x7ba1_8fa4); +//! # assert_eq!(rng.next_u64(), 0xcca1_b8ea_0a3d_3258); +//! # let mut buf = [0u8; 5]; +//! # rng.fill_bytes(&mut buf); +//! # assert_eq!(buf, [0x69, 0x01, 0x14, 0xb8, 0x2b]); //! ``` //! //! ## 64-bit block RNG @@ -182,12 +189,12 @@ //! ``` //! use rand_core::{RngCore, SeedableRng, le}; //! -//! struct Block64RngCore { +//! struct Block4x64RngInner { //! // ... //! # state: [u64; 4], //! } //! -//! impl Block64RngCore { +//! impl Block4x64RngInner { //! fn new(seed: [u64; 4]) -> Self { //! // ... //! # Self { state: seed } @@ -200,41 +207,40 @@ //! } //! } //! -//! pub struct Block64Rng { -//! core: Block64RngCore, +//! pub struct Block4x64Rng { +//! inner: Block4x64RngInner, //! buffer: [u64; 4], //! } //! -//! impl SeedableRng for Block64Rng { +//! impl SeedableRng for Block4x64Rng { //! type Seed = [u8; 32]; //! //! fn from_seed(seed: Self::Seed) -> Self { -//! let mut seed_u64 = [0u64; 4]; -//! le::read_words_into(&seed, &mut seed_u64); +//! let seed: [u64; 4] = le::read_words(&seed); //! Self { -//! core: Block64RngCore::new(seed_u64), +//! inner: Block4x64RngInner::new(seed), //! buffer: le::new_buffer(), //! } //! } //! } //! -//! impl RngCore for Block64Rng { +//! impl RngCore for Block4x64Rng { //! fn next_u32(&mut self) -> u32 { //! self.next_u64() as u32 //! } //! //! fn next_u64(&mut self) -> u64 { -//! let Self { buffer, core } = self; -//! le::next_word_via_gen_block(buffer, |block| core.next_block(block)) +//! let Self { inner, buffer } = self; +//! le::next_word_via_gen_block(buffer, |block| inner.next_block(block)) //! } //! //! fn fill_bytes(&mut self, dst: &mut [u8]) { -//! let Self { buffer, core } = self; -//! le::fill_bytes_via_gen_block(dst, buffer, |block| core.next_block(block)); +//! let Self { inner, buffer } = self; +//! le::fill_bytes_via_gen_block(dst, buffer, |block| inner.next_block(block)); //! } //! } //! -//! # let mut rng = Block64Rng::seed_from_u64(42); +//! # let mut rng = Block4x64Rng::seed_from_u64(42); //! # assert_eq!(rng.next_u32(), 0x7ba1_8fa4); //! # assert_eq!(rng.next_u64(), 0xb814_0169_cca1_b8ea); //! # let mut buf = [0u8; 5]; @@ -312,14 +318,15 @@ pub fn fill_bytes_via_next_word(dest: &mut [u8], mut next_word: impl Fn } } -/// Fills slice of words `dst` from byte slice `src` using little endian order. +/// Reads array of words from byte slice `src` using little endian order. /// /// # Panics /// -/// If `size_of_val(src) != size_of_val(dst)`. +/// If `size_of_val(src) != size_of::<[W; N]>()`. #[inline] -pub fn read_words_into(src: &[u8], dst: &mut [W]) { - assert_eq!(size_of_val(src), size_of_val(dst)); +pub fn read_words(src: &[u8]) -> [W; N] { + assert_eq!(size_of_val(src), size_of::<[W; N]>()); + let mut dst = [W::from_usize(0); N]; let chunks = src.chunks_exact(size_of::()); for (out, chunk) in dst.iter_mut().zip(chunks) { let Ok(bytes) = chunk.try_into() else { @@ -327,6 +334,7 @@ pub fn read_words_into(src: &[u8], dst: &mut [W]) { }; *out = W::from_le_bytes(bytes); } + dst } /// Create new block buffer. @@ -459,23 +467,19 @@ mod test { fn test_read() { let bytes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; - let mut buf = [0u32; 4]; - read_words_into(&bytes, &mut buf); + let buf: [u32; 4] = read_words(&bytes); assert_eq!(buf[0], 0x04030201); assert_eq!(buf[3], 0x100F0E0D); - let mut buf = [0u32; 3]; - read_words_into(&bytes[1..13], &mut buf); // unaligned + let buf: [u32; 3] = read_words(&bytes[1..13]); // unaligned assert_eq!(buf[0], 0x05040302); assert_eq!(buf[2], 0x0D0C0B0A); - let mut buf = [0u64; 2]; - read_words_into(&bytes, &mut buf); + let buf: [u64; 2] = read_words(&bytes); assert_eq!(buf[0], 0x0807060504030201); assert_eq!(buf[1], 0x100F0E0D0C0B0A09); - let mut buf = [0u64; 1]; - read_words_into(&bytes[7..15], &mut buf); // unaligned + let buf: [u64; 1] = read_words(&bytes[7..15]); // unaligned assert_eq!(buf[0], 0x0F0E0D0C0B0A0908); } } diff --git a/src/lib.rs b/src/lib.rs index c41b3899..05690a05 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -520,8 +520,7 @@ mod test { type Seed = [u8; 8]; fn from_seed(seed: Self::Seed) -> Self { - let mut x = [0u64; 1]; - le::read_words_into(&seed, &mut x); + let x: [u64; 1] = le::read_words(&seed); SeedableNum(x[0]) } } From 13cce6190ccea596c06724daf25b1795c7009153 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D1=80=D1=82=D1=91=D0=BC=20=D0=9F=D0=B0=D0=B2=D0=BB?= =?UTF-8?q?=D0=BE=D0=B2=20=5BArtyom=20Pavlov=5D?= Date: Fri, 14 Nov 2025 15:45:31 +0300 Subject: [PATCH 28/34] Tweak docs --- src/le.rs | 70 +++++++++++++++++++++++++++++-------------------------- 1 file changed, 37 insertions(+), 33 deletions(-) diff --git a/src/le.rs b/src/le.rs index 12ad3910..033772f4 100644 --- a/src/le.rs +++ b/src/le.rs @@ -12,7 +12,7 @@ //! # Implementing [`SeedableRng`] //! //! In many cases, [`SeedableRng::Seed`] must be converted to `[u32]` or `[u64]`. -//! We provide the [`read_words_into`] helper function for this. The examples below +//! We provide the [`read_words`] helper function for this. The examples below //! demonstrate how it can be used in practice. //! //! [`SeedableRng`]: crate::SeedableRng @@ -27,39 +27,41 @@ //! handle buffering of the generated blocks. //! //! If an implementation can generate several blocks simultaneously (e.g. using SIMD), we recommend -//! to treat multiple generated blocks as one big block (i.e. you should treat `[[u32; N]; M]` -//! as `[u32; N * M]`). If number of simultaneously generated blocks depends on target features, -//! we recommend to use the largest supported number of blocks for all target features. +//! to treat multiple generated blocks as a single large block (i.e. you should treat +//! `[[u32; N]; M]` as `[u32; N * M]`). If the number of simultaneously generated blocks depends +//! on CPU target features, we recommend to use the largest supported number of blocks +//! for all target features. //! //! # Examples //! //! The examples below demonstrate how functions in this module can be used to implement //! [`RngCore`] and [`SeedableRng`] for common RNG algorithm classes. //! -//! WARNING: the step RNG implementations below are provided for demonstation purposes only and -//! should not be used in practice! -//! //! ## Single 32-bit value RNG //! //! ``` //! use rand_core::{RngCore, SeedableRng, le}; //! -//! pub struct Step32Rng(u32); +//! pub struct Step32Rng { +//! state: u32 +//! } //! //! impl SeedableRng for Step32Rng { //! type Seed = [u8; 4]; //! //! fn from_seed(seed: Self::Seed) -> Self { -//! // Always use Little-Endian conversion to ensure portable results -//! Self(u32::from_le_bytes(seed)) +//! // Always use little-endian byte order to ensure portable results +//! let state = u32::from_le_bytes(seed); +//! Self { state } //! } //! } //! //! impl RngCore for Step32Rng { //! fn next_u32(&mut self) -> u32 { -//! let val = self.0; -//! self.0 = val + 1; -//! val +//! // ... +//! # let val = self.state; +//! # self.state = val + 1; +//! # val //! } //! //! fn next_u64(&mut self) -> u64 { @@ -71,13 +73,12 @@ //! } //! } //! -//! let mut rng = Step32Rng::seed_from_u64(42); -//! -//! assert_eq!(rng.next_u32(), 0x7ba1_8fa4); -//! assert_eq!(rng.next_u64(), 0x7ba1_8fa6_7ba1_8fa5); -//! let mut buf = [0u8; 5]; -//! rng.fill_bytes(&mut buf); -//! assert_eq!(buf, [0xa7, 0x8f, 0xa1, 0x7b, 0xa8]); +//! # let mut rng = Step32Rng::seed_from_u64(42); +//! # assert_eq!(rng.next_u32(), 0x7ba1_8fa4); +//! # assert_eq!(rng.next_u64(), 0x7ba1_8fa6_7ba1_8fa5); +//! # let mut buf = [0u8; 5]; +//! # rng.fill_bytes(&mut buf); +//! # assert_eq!(buf, [0xa7, 0x8f, 0xa1, 0x7b, 0xa8]); //! ``` //! //! ## Single 64-bit value RNG @@ -85,13 +86,17 @@ //! ``` //! use rand_core::{RngCore, SeedableRng, le}; //! -//! pub struct Step64Rng(u64); +//! pub struct Step64Rng { +//! state: u64 +//! } //! //! impl SeedableRng for Step64Rng { //! type Seed = [u8; 8]; //! //! fn from_seed(seed: Self::Seed) -> Self { -//! Self(u64::from_le_bytes(seed)) +//! // Always use little-endian byte order to ensure portable results +//! let state = u64::from_le_bytes(seed); +//! Self { state } //! } //! } //! @@ -101,9 +106,10 @@ //! } //! //! fn next_u64(&mut self) -> u64 { -//! let val = self.0; -//! self.0 = val + 1; -//! val +//! // ... +//! # let val = self.state; +//! # self.state = val + 1; +//! # val //! } //! //! fn fill_bytes(&mut self, dst: &mut [u8]) { @@ -111,13 +117,12 @@ //! } //! } //! -//! let mut rng = Step64Rng::seed_from_u64(42); -//! -//! assert_eq!(rng.next_u32(), 0x7ba1_8fa4); -//! assert_eq!(rng.next_u64(), 0x0a3d_3258_7ba1_8fa5); -//! let mut buf = [0u8; 5]; -//! rng.fill_bytes(&mut buf); -//! assert_eq!(buf, [0xa6, 0x8f, 0xa1, 0x7b, 0x58]); +//! # let mut rng = Step64Rng::seed_from_u64(42); +//! # assert_eq!(rng.next_u32(), 0x7ba1_8fa4); +//! # assert_eq!(rng.next_u64(), 0x0a3d_3258_7ba1_8fa5); +//! # let mut buf = [0u8; 5]; +//! # rng.fill_bytes(&mut buf); +//! # assert_eq!(buf, [0xa6, 0x8f, 0xa1, 0x7b, 0x58]); //! ``` //! //! ## 32-bit block RNG @@ -321,7 +326,6 @@ pub fn fill_bytes_via_next_word(dest: &mut [u8], mut next_word: impl Fn /// Reads array of words from byte slice `src` using little endian order. /// /// # Panics -/// /// If `size_of_val(src) != size_of::<[W; N]>()`. #[inline] pub fn read_words(src: &[u8]) -> [W; N] { From 794aa2c19210b39f569589dc1e3ce8498602aca3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D1=80=D1=82=D1=91=D0=BC=20=D0=9F=D0=B0=D0=B2=D0=BB?= =?UTF-8?q?=D0=BE=D0=B2=20=5BArtyom=20Pavlov=5D?= Date: Fri, 14 Nov 2025 16:33:12 +0300 Subject: [PATCH 29/34] fix `new_buffer` docs --- src/le.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/le.rs b/src/le.rs index 033772f4..fa44854d 100644 --- a/src/le.rs +++ b/src/le.rs @@ -344,7 +344,7 @@ pub fn read_words(src: &[u8]) -> [W; N] { /// Create new block buffer. /// /// # Panics -/// If `N` is smaller than 2 or can not be represented as `W`. +/// If `N` is smaller than 3 or can not be represented as `W`. #[inline] pub fn new_buffer() -> [W; N] { assert!(N > 2); From 3a1e1999627712c62fc9456247042605b75756fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D1=80=D1=82=D1=91=D0=BC=20=D0=9F=D0=B0=D0=B2=D0=BB?= =?UTF-8?q?=D0=BE=D0=B2=20=5BArtyom=20Pavlov=5D?= Date: Fri, 14 Nov 2025 17:26:28 +0300 Subject: [PATCH 30/34] Add `next_u64_via_gen_block` --- src/le.rs | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/src/le.rs b/src/le.rs index fa44854d..c4bae7a9 100644 --- a/src/le.rs +++ b/src/le.rs @@ -172,7 +172,8 @@ //! } //! //! fn next_u64(&mut self) -> u64 { -//! le::next_u64_via_u32(self) +//! let Self { inner, buffer } = self; +//! le::next_u64_via_gen_block(buffer, |block| inner.next_block(block)) //! } //! //! fn fill_bytes(&mut self, dst: &mut [u8]) { @@ -375,6 +376,34 @@ pub fn next_word_via_gen_block( } } +/// Implement `next_u64` function using buffer and block generation closure. +#[inline] +pub fn next_u64_via_gen_block( + buf: &mut [u32; N], + mut generate_block: impl FnMut(&mut [u32; N]), +) -> u64 { + use core::mem::replace; + let pos = usize::try_from(buf[0]).unwrap(); + + let (x, y) = if pos < N - 1 { + let xy = (buf[pos], buf[pos + 1]); + buf[0] += 2; + xy + } else if pos == N - 1 { + let x = buf[pos]; + generate_block(buf); + let y = replace(&mut buf[0], 1); + (x, y) + } else { + generate_block(buf); + let x = replace(&mut buf[0], 2); + let y = buf[1]; + (x, y) + }; + + u64::from(y) << 32 | u64::from(x) +} + /// Implement `fill_bytes` using buffer and block generation closure. #[inline] pub fn fill_bytes_via_gen_block( From 9bfbb13f8bdcfac49f67adbb6157be8878c860c8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D1=80=D1=82=D1=91=D0=BC=20=D0=9F=D0=B0=D0=B2=D0=BB?= =?UTF-8?q?=D0=BE=D0=B2=20=5BArtyom=20Pavlov=5D?= Date: Fri, 14 Nov 2025 17:31:28 +0300 Subject: [PATCH 31/34] Rename `dest` to `dst` --- src/le.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/le.rs b/src/le.rs index c4bae7a9..12588c0d 100644 --- a/src/le.rs +++ b/src/le.rs @@ -311,8 +311,8 @@ pub fn next_u64_via_u32(rng: &mut R) -> u64 { /// Implement `fill_bytes` via `next_u64` using little-endian order. #[inline] -pub fn fill_bytes_via_next_word(dest: &mut [u8], mut next_word: impl FnMut() -> W) { - let mut chunks = dest.chunks_exact_mut(size_of::()); +pub fn fill_bytes_via_next_word(dst: &mut [u8], mut next_word: impl FnMut() -> W) { + let mut chunks = dst.chunks_exact_mut(size_of::()); for chunk in &mut chunks { let val = next_word(); chunk.copy_from_slice(val.to_le_bytes().as_ref()); From 8432c8dc3329e00292985439dd1711eaa1e17d3f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D1=80=D1=82=D1=91=D0=BC=20=D0=9F=D0=B0=D0=B2=D0=BB?= =?UTF-8?q?=D0=BE=D0=B2=20=5BArtyom=20Pavlov=5D?= Date: Fri, 14 Nov 2025 17:33:26 +0300 Subject: [PATCH 32/34] Remove redundant `W::from_usize(N)` --- src/le.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/le.rs b/src/le.rs index 12588c0d..a2071007 100644 --- a/src/le.rs +++ b/src/le.rs @@ -349,8 +349,6 @@ pub fn read_words(src: &[u8]) -> [W; N] { #[inline] pub fn new_buffer() -> [W; N] { assert!(N > 2); - // Check that `N` can be converted into `W`. - let _ = W::from_usize(N); let mut res = [W::from_usize(0); N]; res[0] = W::from_usize(N); res From 66714561f070319e55bec7514d88a26fef8bcb72 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D1=80=D1=82=D1=91=D0=BC=20=D0=9F=D0=B0=D0=B2=D0=BB?= =?UTF-8?q?=D0=BE=D0=B2=20=5BArtyom=20Pavlov=5D?= Date: Fri, 14 Nov 2025 17:34:54 +0300 Subject: [PATCH 33/34] Tweak `new_buffer` panic conditions --- src/le.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/le.rs b/src/le.rs index a2071007..801204ad 100644 --- a/src/le.rs +++ b/src/le.rs @@ -345,10 +345,9 @@ pub fn read_words(src: &[u8]) -> [W; N] { /// Create new block buffer. /// /// # Panics -/// If `N` is smaller than 3 or can not be represented as `W`. +/// If `N` is equal to 0 or can not be represented as `W`. #[inline] pub fn new_buffer() -> [W; N] { - assert!(N > 2); let mut res = [W::from_usize(0); N]; res[0] = W::from_usize(N); res From a78264a3d805e651c92d97310af0ce98129a1deb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D1=80=D1=82=D1=91=D0=BC=20=D0=9F=D0=B0=D0=B2=D0=BB?= =?UTF-8?q?=D0=BE=D0=B2=20=5BArtyom=20Pavlov=5D?= Date: Fri, 14 Nov 2025 18:52:16 +0300 Subject: [PATCH 34/34] tweak example names --- src/le.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/le.rs b/src/le.rs index 801204ad..b0e116ea 100644 --- a/src/le.rs +++ b/src/le.rs @@ -37,7 +37,7 @@ //! The examples below demonstrate how functions in this module can be used to implement //! [`RngCore`] and [`SeedableRng`] for common RNG algorithm classes. //! -//! ## Single 32-bit value RNG +//! ## RNG outputs `u32` //! //! ``` //! use rand_core::{RngCore, SeedableRng, le}; @@ -81,7 +81,7 @@ //! # assert_eq!(buf, [0xa7, 0x8f, 0xa1, 0x7b, 0xa8]); //! ``` //! -//! ## Single 64-bit value RNG +//! ## RNG outputs `u64` //! //! ``` //! use rand_core::{RngCore, SeedableRng, le}; @@ -125,7 +125,7 @@ //! # assert_eq!(buf, [0xa6, 0x8f, 0xa1, 0x7b, 0x58]); //! ``` //! -//! ## 32-bit block RNG +//! ## RNG outputs `[u32; N]` //! //! ``` //! use rand_core::{RngCore, SeedableRng, le}; @@ -190,7 +190,7 @@ //! # assert_eq!(buf, [0x69, 0x01, 0x14, 0xb8, 0x2b]); //! ``` //! -//! ## 64-bit block RNG +//! ## RNG outputs `[u64; N]` //! //! ``` //! use rand_core::{RngCore, SeedableRng, le}; @@ -254,7 +254,7 @@ //! # assert_eq!(buf, [0x2b, 0x8c, 0xc8, 0x75, 0x18]); //! ``` //! -//! ## Fill-based RNG +//! ## RNG outputs bytes //! //! ``` //! use rand_core::RngCore;