rand_core/
le.rs

1// Copyright 2018 Developers of the Rand project.
2//
3// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
4// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
5// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
6// option. This file may not be copied, modified, or distributed
7// except according to those terms.
8
9//! # Little-Endian utilities
10//!
11//! For cross-platform reproducibility, Little-Endian order (least-significant
12//! part first) has been chosen as the standard for inter-type conversion.
13//! For example, ``next_u64_via_u32`] takes `u32`
14//! values `x, y`, then outputs `(y << 32) | x`.
15//!
16//! Byte-swapping (like the std `to_le` functions) is only needed to convert
17//! to/from byte sequences, and since its purpose is reproducibility,
18//! non-reproducible sources (e.g. `OsRng`) need not bother with it.
19//!
20//! ### Implementing [`RngCore`]
21//!
22//! Usually an implementation of [`RngCore`] will implement one of the three
23//! methods over its internal source. The following helpers are provided for
24//! the remaining implementations.
25//!
26//! **`fn next_u32`:**
27//! -   `self.next_u64() as u32`
28//! -   `(self.next_u64() >> 32) as u32`
29//! -   <code>[next_u32_via_fill][](self)</code>
30//!
31//! **`fn next_u64`:**
32//! -   <code>[next_u64_via_u32][](self)</code>
33//! -   <code>[next_u64_via_fill][](self)</code>
34//!
35//! **`fn fill_bytes`:**
36//! -   <code>[fill_bytes_via_next][](self, dest)</code>
37//!
38//! ### Implementing [`SeedableRng`]
39//!
40//! In many cases, [`SeedableRng::Seed`] must be converted to `[u32]` or
41//! `[u64]`. The following helpers are provided:
42//!
43//! - [`read_u32_into`]
44//! - [`read_u64_into`]
45
46use crate::RngCore;
47#[allow(unused)]
48use crate::SeedableRng;
49
50/// Implement `next_u64` via `next_u32`, little-endian order.
51pub fn next_u64_via_u32<R: RngCore + ?Sized>(rng: &mut R) -> u64 {
52    // Use LE; we explicitly generate one value before the next.
53    let x = u64::from(rng.next_u32());
54    let y = u64::from(rng.next_u32());
55    (y << 32) | x
56}
57
58/// Implement `fill_bytes` via `next_u64` and `next_u32`, little-endian order.
59///
60/// The fastest way to fill a slice is usually to work as long as possible with
61/// integers. That is why this method mostly uses `next_u64`, and only when
62/// there are 4 or less bytes remaining at the end of the slice it uses
63/// `next_u32` once.
64pub fn fill_bytes_via_next<R: RngCore + ?Sized>(rng: &mut R, dest: &mut [u8]) {
65    let mut left = dest;
66    while left.len() >= 8 {
67        let (l, r) = { left }.split_at_mut(8);
68        left = r;
69        let chunk: [u8; 8] = rng.next_u64().to_le_bytes();
70        l.copy_from_slice(&chunk);
71    }
72    let n = left.len();
73    if n > 4 {
74        let chunk: [u8; 8] = rng.next_u64().to_le_bytes();
75        left.copy_from_slice(&chunk[..n]);
76    } else if n > 0 {
77        let chunk: [u8; 4] = rng.next_u32().to_le_bytes();
78        left.copy_from_slice(&chunk[..n]);
79    }
80}
81
82pub(crate) trait Observable: Copy {
83    type Bytes: Sized + AsRef<[u8]>;
84    fn to_le_bytes(self) -> Self::Bytes;
85}
86impl Observable for u32 {
87    type Bytes = [u8; 4];
88
89    fn to_le_bytes(self) -> Self::Bytes {
90        Self::to_le_bytes(self)
91    }
92}
93impl Observable for u64 {
94    type Bytes = [u8; 8];
95
96    fn to_le_bytes(self) -> Self::Bytes {
97        Self::to_le_bytes(self)
98    }
99}
100
101/// Fill dest from src
102///
103/// Returns `(n, byte_len)`. `src[..n]` is consumed,
104/// `dest[..byte_len]` is filled. `src[n..]` and `dest[byte_len..]` are left
105/// unaltered.
106pub(crate) fn fill_via_chunks<T: Observable>(src: &[T], dest: &mut [u8]) -> (usize, usize) {
107    let size = core::mem::size_of::<T>();
108
109    // Always use little endian for portability of results.
110
111    let mut dest = dest.chunks_exact_mut(size);
112    let mut src = src.iter();
113
114    let zipped = dest.by_ref().zip(src.by_ref());
115    let num_chunks = zipped.len();
116    zipped.for_each(|(dest, src)| dest.copy_from_slice(src.to_le_bytes().as_ref()));
117
118    let byte_len = num_chunks * size;
119    if let Some(src) = src.next() {
120        // We have consumed all full chunks of dest, but not src.
121        let dest = dest.into_remainder();
122        let n = dest.len();
123        if n > 0 {
124            dest.copy_from_slice(&src.to_le_bytes().as_ref()[..n]);
125            return (num_chunks + 1, byte_len + n);
126        }
127    }
128    (num_chunks, byte_len)
129}
130
131/// Implement `next_u32` via `fill_bytes`, little-endian order.
132pub fn next_u32_via_fill<R: RngCore + ?Sized>(rng: &mut R) -> u32 {
133    let mut buf = [0; 4];
134    rng.fill_bytes(&mut buf);
135    u32::from_le_bytes(buf)
136}
137
138/// Implement `next_u64` via `fill_bytes`, little-endian order.
139pub fn next_u64_via_fill<R: RngCore + ?Sized>(rng: &mut R) -> u64 {
140    let mut buf = [0; 8];
141    rng.fill_bytes(&mut buf);
142    u64::from_le_bytes(buf)
143}
144
145/// Fills `dst: &mut [u32]` from `src`
146///
147/// Reads use Little-Endian byte order, allowing portable reproduction of `dst`
148/// from a byte slice.
149///
150/// # Panics
151///
152/// If `src` has insufficient length (if `src.len() < 4*dst.len()`).
153#[inline]
154#[track_caller]
155pub fn read_u32_into(src: &[u8], dst: &mut [u32]) {
156    assert!(src.len() >= 4 * dst.len());
157    for (out, chunk) in dst.iter_mut().zip(src.chunks_exact(4)) {
158        *out = u32::from_le_bytes(chunk.try_into().unwrap());
159    }
160}
161
162/// Fills `dst: &mut [u64]` from `src`
163///
164/// # Panics
165///
166/// If `src` has insufficient length (if `src.len() < 8*dst.len()`).
167#[inline]
168#[track_caller]
169pub fn read_u64_into(src: &[u8], dst: &mut [u64]) {
170    assert!(src.len() >= 8 * dst.len());
171    for (out, chunk) in dst.iter_mut().zip(src.chunks_exact(8)) {
172        *out = u64::from_le_bytes(chunk.try_into().unwrap());
173    }
174}
175
176#[cfg(test)]
177mod test {
178    use super::*;
179
180    #[test]
181    fn test_fill_via_u32_chunks() {
182        let src_orig = [1u32, 2, 3];
183
184        let src = src_orig;
185        let mut dst = [0u8; 11];
186        assert_eq!(fill_via_chunks(&src, &mut dst), (3, 11));
187        assert_eq!(dst, [1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0]);
188
189        let src = src_orig;
190        let mut dst = [0u8; 13];
191        assert_eq!(fill_via_chunks(&src, &mut dst), (3, 12));
192        assert_eq!(dst, [1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0, 0]);
193
194        let src = src_orig;
195        let mut dst = [0u8; 5];
196        assert_eq!(fill_via_chunks(&src, &mut dst), (2, 5));
197        assert_eq!(dst, [1, 0, 0, 0, 2]);
198    }
199
200    #[test]
201    fn test_fill_via_u64_chunks() {
202        let src_orig = [1u64, 2];
203
204        let src = src_orig;
205        let mut dst = [0u8; 11];
206        assert_eq!(fill_via_chunks(&src, &mut dst), (2, 11));
207        assert_eq!(dst, [1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0]);
208
209        let src = src_orig;
210        let mut dst = [0u8; 17];
211        assert_eq!(fill_via_chunks(&src, &mut dst), (2, 16));
212        assert_eq!(dst, [1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0]);
213
214        let src = src_orig;
215        let mut dst = [0u8; 5];
216        assert_eq!(fill_via_chunks(&src, &mut dst), (1, 5));
217        assert_eq!(dst, [1, 0, 0, 0, 0]);
218    }
219
220    #[test]
221    fn test_read() {
222        let bytes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
223
224        let mut buf = [0u32; 4];
225        read_u32_into(&bytes, &mut buf);
226        assert_eq!(buf[0], 0x04030201);
227        assert_eq!(buf[3], 0x100F0E0D);
228
229        let mut buf = [0u32; 3];
230        read_u32_into(&bytes[1..13], &mut buf); // unaligned
231        assert_eq!(buf[0], 0x05040302);
232        assert_eq!(buf[2], 0x0D0C0B0A);
233
234        let mut buf = [0u64; 2];
235        read_u64_into(&bytes, &mut buf);
236        assert_eq!(buf[0], 0x0807060504030201);
237        assert_eq!(buf[1], 0x100F0E0D0C0B0A09);
238
239        let mut buf = [0u64; 1];
240        read_u64_into(&bytes[7..15], &mut buf); // unaligned
241        assert_eq!(buf[0], 0x0F0E0D0C0B0A0908);
242    }
243}