2020-12-18 03:03:41 +00:00
|
|
|
//! The engines provided here should be initialized from an external source.
|
|
|
|
//! For a thread-local cryptographically secure pseudo random number generator,
|
|
|
|
//! use `std.crypto.random`.
|
|
|
|
//! Be sure to use a CSPRNG when required, otherwise using a normal PRNG will
|
|
|
|
//! be faster and use substantially less stack space.
|
2020-08-26 03:51:40 +01:00
|
|
|
//!
|
|
|
|
//! TODO(tiehuis): Benchmark these against other reference implementations.
|
2018-03-29 13:36:04 +01:00
|
|
|
|
2019-03-02 21:46:04 +00:00
|
|
|
const std = @import("std.zig");
|
2022-03-11 09:24:01 +00:00
|
|
|
const builtin = @import("builtin");
|
2018-03-29 13:36:04 +01:00
|
|
|
const assert = std.debug.assert;
|
|
|
|
const mem = std.mem;
|
|
|
|
const math = std.math;
|
2019-03-02 21:46:04 +00:00
|
|
|
const ziggurat = @import("rand/ziggurat.zig");
|
2018-10-26 19:59:58 +01:00
|
|
|
const maxInt = std.math.maxInt;
|
2018-03-29 13:36:04 +01:00
|
|
|
|
2020-08-26 03:51:40 +01:00
|
|
|
/// Fast unbiased random numbers.
|
2021-07-08 22:19:49 +01:00
|
|
|
pub const DefaultPrng = Xoshiro256;
|
2018-03-29 13:36:04 +01:00
|
|
|
|
2020-08-26 03:51:40 +01:00
|
|
|
/// Cryptographically secure random numbers.
|
std.rand: set DefaultCsprng to Gimli, and require a larger seed
`DefaultCsprng` is documented as a cryptographically secure RNG.
While `ISAAC` is a CSPRNG, the variant we have, `ISAAC64` is not.
A 64 bit seed is a bit small to satisfy that claim.
We also saw it being used with the current date as a seed, that
also defeats the point of a CSPRNG.
Set `DefaultCsprng` to `Gimli` instead of `ISAAC64`, rename
the parameter from `init_s` to `secret_seed` + add a comment to
clarify what kind of seed is expected here.
Instead of directly touching the internals of the Gimli implementation
(which can change/be architecture-specific), add an `init()` function
to the state.
Our Gimli-based CSPRNG was also not backtracking resistant. Gimli
is a permutation; it can be reverted. So, if the state was ever leaked,
future secrets, but also all the previously generated ones could be
recovered. Clear the rate after a squeeze in order to prevent this.
Finally, a dumb test was added just to exercise `DefaultCsprng` since
we don't use it anywhere.
2020-10-09 13:33:16 +01:00
|
|
|
pub const DefaultCsprng = Gimli;
|
2018-03-29 13:36:04 +01:00
|
|
|
|
2020-12-18 03:03:41 +00:00
|
|
|
pub const Isaac64 = @import("rand/Isaac64.zig");
|
|
|
|
pub const Gimli = @import("rand/Gimli.zig");
|
|
|
|
pub const Pcg = @import("rand/Pcg.zig");
|
|
|
|
pub const Xoroshiro128 = @import("rand/Xoroshiro128.zig");
|
2021-07-04 15:15:23 +01:00
|
|
|
pub const Xoshiro256 = @import("rand/Xoshiro256.zig");
|
2020-12-18 03:03:41 +00:00
|
|
|
pub const Sfc64 = @import("rand/Sfc64.zig");
|
|
|
|
|
2018-11-13 13:08:37 +00:00
|
|
|
pub const Random = struct {
|
2021-12-19 05:24:45 +00:00
|
|
|
ptr: *anyopaque,
|
2022-03-11 09:24:01 +00:00
|
|
|
fillFn: if (builtin.zig_backend == .stage1)
|
|
|
|
fn (ptr: *anyopaque, buf: []u8) void
|
|
|
|
else
|
|
|
|
*const fn (ptr: *anyopaque, buf: []u8) void,
|
2021-10-27 15:53:29 +01:00
|
|
|
|
2021-10-29 00:34:17 +01:00
|
|
|
pub fn init(pointer: anytype, comptime fillFn: fn (ptr: @TypeOf(pointer), buf: []u8) void) Random {
|
2021-10-27 15:53:29 +01:00
|
|
|
const Ptr = @TypeOf(pointer);
|
|
|
|
assert(@typeInfo(Ptr) == .Pointer); // Must be a pointer
|
|
|
|
assert(@typeInfo(Ptr).Pointer.size == .One); // Must be a single-item pointer
|
|
|
|
assert(@typeInfo(@typeInfo(Ptr).Pointer.child) == .Struct); // Must point to a struct
|
|
|
|
const gen = struct {
|
2021-12-19 05:24:45 +00:00
|
|
|
fn fill(ptr: *anyopaque, buf: []u8) void {
|
2021-10-27 15:53:29 +01:00
|
|
|
const alignment = @typeInfo(Ptr).Pointer.alignment;
|
|
|
|
const self = @ptrCast(Ptr, @alignCast(alignment, ptr));
|
2021-10-29 00:34:17 +01:00
|
|
|
fillFn(self, buf);
|
2021-10-27 15:53:29 +01:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
return .{
|
|
|
|
.ptr = pointer,
|
|
|
|
.fillFn = gen.fill,
|
|
|
|
};
|
|
|
|
}
|
2018-03-29 13:36:04 +01:00
|
|
|
|
2018-08-03 16:44:39 +01:00
|
|
|
/// Read random bytes into the specified buffer until full.
|
2021-10-27 15:53:29 +01:00
|
|
|
pub fn bytes(r: Random, buf: []u8) void {
|
|
|
|
r.fillFn(r.ptr, buf);
|
2018-03-29 13:36:04 +01:00
|
|
|
}
|
|
|
|
|
2021-10-27 15:53:29 +01:00
|
|
|
pub fn boolean(r: Random) bool {
|
2018-09-27 05:35:38 +01:00
|
|
|
return r.int(u1) != 0;
|
|
|
|
}
|
|
|
|
|
2021-08-19 20:18:23 +01:00
|
|
|
/// Returns a random value from an enum, evenly distributed.
|
2021-10-27 15:53:29 +01:00
|
|
|
pub fn enumValue(r: Random, comptime EnumType: type) EnumType {
|
2022-04-14 18:12:45 +01:00
|
|
|
comptime assert(@typeInfo(EnumType) == .Enum);
|
2021-08-19 20:18:23 +01:00
|
|
|
|
|
|
|
// We won't use int -> enum casting because enum elements can have
|
|
|
|
// arbitrary values. Instead we'll randomly pick one of the type's values.
|
|
|
|
const values = std.enums.values(EnumType);
|
|
|
|
const index = r.uintLessThan(usize, values.len);
|
|
|
|
return values[index];
|
|
|
|
}
|
|
|
|
|
2021-10-24 21:29:37 +01:00
|
|
|
/// Returns a random int `i` such that `minInt(T) <= i <= maxInt(T)`.
|
2018-09-27 05:35:38 +01:00
|
|
|
/// `i` is evenly distributed.
|
2021-10-27 15:53:29 +01:00
|
|
|
pub fn int(r: Random, comptime T: type) T {
|
2020-09-03 16:09:55 +01:00
|
|
|
const bits = @typeInfo(T).Int.bits;
|
2020-10-17 13:09:59 +01:00
|
|
|
const UnsignedT = std.meta.Int(.unsigned, bits);
|
|
|
|
const ByteAlignedT = std.meta.Int(.unsigned, @divTrunc(bits + 7, 8) * 8);
|
2018-09-27 05:35:38 +01:00
|
|
|
|
|
|
|
var rand_bytes: [@sizeOf(ByteAlignedT)]u8 = undefined;
|
2018-03-29 13:36:04 +01:00
|
|
|
r.bytes(rand_bytes[0..]);
|
|
|
|
|
2018-09-27 05:35:38 +01:00
|
|
|
// use LE instead of native endian for better portability maybe?
|
|
|
|
// TODO: endian portability is pointless if the underlying prng isn't endian portable.
|
|
|
|
// TODO: document the endian portability of this library.
|
2019-11-27 08:30:39 +00:00
|
|
|
const byte_aligned_result = mem.readIntSliceLittle(ByteAlignedT, &rand_bytes);
|
2018-09-27 05:35:38 +01:00
|
|
|
const unsigned_result = @truncate(UnsignedT, byte_aligned_result);
|
|
|
|
return @bitCast(T, unsigned_result);
|
|
|
|
}
|
|
|
|
|
2020-03-30 19:23:22 +01:00
|
|
|
/// Constant-time implementation off `uintLessThan`.
|
2018-11-21 23:24:14 +00:00
|
|
|
/// The results of this function may be biased.
|
2021-10-27 15:53:29 +01:00
|
|
|
pub fn uintLessThanBiased(r: Random, comptime T: type, less_than: T) T {
|
2020-10-18 01:04:53 +01:00
|
|
|
comptime assert(@typeInfo(T).Int.signedness == .unsigned);
|
2020-09-03 16:09:55 +01:00
|
|
|
const bits = @typeInfo(T).Int.bits;
|
|
|
|
comptime assert(bits <= 64); // TODO: workaround: LLVM ERROR: Unsupported library call operation!
|
2018-11-21 23:24:14 +00:00
|
|
|
assert(0 < less_than);
|
2020-09-03 16:09:55 +01:00
|
|
|
if (bits <= 32) {
|
2018-11-22 00:46:42 +00:00
|
|
|
return @intCast(T, limitRangeBiased(u32, r.int(u32), less_than));
|
|
|
|
} else {
|
|
|
|
return @intCast(T, limitRangeBiased(u64, r.int(u64), less_than));
|
|
|
|
}
|
2018-11-21 23:24:14 +00:00
|
|
|
}
|
2018-12-13 01:19:46 +00:00
|
|
|
|
2018-09-27 05:35:38 +01:00
|
|
|
/// Returns an evenly distributed random unsigned integer `0 <= i < less_than`.
|
2020-03-30 19:23:22 +01:00
|
|
|
/// This function assumes that the underlying `fillFn` produces evenly distributed values.
|
2018-09-27 05:35:38 +01:00
|
|
|
/// Within this assumption, the runtime of this function is exponentially distributed.
|
2020-03-30 19:23:22 +01:00
|
|
|
/// If `fillFn` were backed by a true random generator,
|
2018-09-27 05:35:38 +01:00
|
|
|
/// the runtime of this function would technically be unbounded.
|
2020-03-30 19:23:22 +01:00
|
|
|
/// However, if `fillFn` is backed by any evenly distributed pseudo random number generator,
|
2018-09-27 05:35:38 +01:00
|
|
|
/// this function is guaranteed to return.
|
2020-03-30 19:23:22 +01:00
|
|
|
/// If you need deterministic runtime bounds, use `uintLessThanBiased`.
|
2021-10-27 15:53:29 +01:00
|
|
|
pub fn uintLessThan(r: Random, comptime T: type, less_than: T) T {
|
2020-10-18 01:04:53 +01:00
|
|
|
comptime assert(@typeInfo(T).Int.signedness == .unsigned);
|
2020-09-03 16:09:55 +01:00
|
|
|
const bits = @typeInfo(T).Int.bits;
|
|
|
|
comptime assert(bits <= 64); // TODO: workaround: LLVM ERROR: Unsupported library call operation!
|
2018-09-27 05:35:38 +01:00
|
|
|
assert(0 < less_than);
|
2018-11-21 22:33:37 +00:00
|
|
|
// Small is typically u32
|
2020-09-03 16:09:55 +01:00
|
|
|
const small_bits = @divTrunc(bits + 31, 32) * 32;
|
2020-10-17 13:09:59 +01:00
|
|
|
const Small = std.meta.Int(.unsigned, small_bits);
|
2018-11-21 22:33:37 +00:00
|
|
|
// Large is typically u64
|
2020-10-17 13:09:59 +01:00
|
|
|
const Large = std.meta.Int(.unsigned, small_bits * 2);
|
2018-11-21 22:33:37 +00:00
|
|
|
|
|
|
|
// adapted from:
|
|
|
|
// http://www.pcg-random.org/posts/bounded-rands.html
|
|
|
|
// "Lemire's (with an extra tweak from me)"
|
|
|
|
var x: Small = r.int(Small);
|
2019-11-07 23:52:09 +00:00
|
|
|
var m: Large = @as(Large, x) * @as(Large, less_than);
|
2018-11-21 22:33:37 +00:00
|
|
|
var l: Small = @truncate(Small, m);
|
|
|
|
if (l < less_than) {
|
2021-10-10 02:54:53 +01:00
|
|
|
var t: Small = -%less_than;
|
2018-11-21 22:33:37 +00:00
|
|
|
|
|
|
|
if (t >= less_than) {
|
|
|
|
t -= less_than;
|
|
|
|
if (t >= less_than) {
|
|
|
|
t %= less_than;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
while (l < t) {
|
|
|
|
x = r.int(Small);
|
2019-11-07 23:52:09 +00:00
|
|
|
m = @as(Large, x) * @as(Large, less_than);
|
2018-11-21 22:33:37 +00:00
|
|
|
l = @truncate(Small, m);
|
2018-09-27 05:35:38 +01:00
|
|
|
}
|
|
|
|
}
|
2020-09-03 16:09:55 +01:00
|
|
|
return @intCast(T, m >> small_bits);
|
2018-09-27 05:35:38 +01:00
|
|
|
}
|
|
|
|
|
2020-03-30 19:23:22 +01:00
|
|
|
/// Constant-time implementation off `uintAtMost`.
|
2018-11-21 23:24:14 +00:00
|
|
|
/// The results of this function may be biased.
|
2021-10-27 15:53:29 +01:00
|
|
|
pub fn uintAtMostBiased(r: Random, comptime T: type, at_most: T) T {
|
2020-10-18 01:04:53 +01:00
|
|
|
assert(@typeInfo(T).Int.signedness == .unsigned);
|
2018-11-21 23:24:14 +00:00
|
|
|
if (at_most == maxInt(T)) {
|
|
|
|
// have the full range
|
|
|
|
return r.int(T);
|
|
|
|
}
|
|
|
|
return r.uintLessThanBiased(T, at_most + 1);
|
|
|
|
}
|
2018-12-13 01:19:46 +00:00
|
|
|
|
2018-09-27 05:35:38 +01:00
|
|
|
/// Returns an evenly distributed random unsigned integer `0 <= i <= at_most`.
|
2020-03-30 19:23:22 +01:00
|
|
|
/// See `uintLessThan`, which this function uses in most cases,
|
2018-09-27 05:35:38 +01:00
|
|
|
/// for commentary on the runtime of this function.
|
2021-10-27 15:53:29 +01:00
|
|
|
pub fn uintAtMost(r: Random, comptime T: type, at_most: T) T {
|
2020-10-18 01:04:53 +01:00
|
|
|
assert(@typeInfo(T).Int.signedness == .unsigned);
|
2018-10-26 19:59:58 +01:00
|
|
|
if (at_most == maxInt(T)) {
|
2018-09-27 05:35:38 +01:00
|
|
|
// have the full range
|
|
|
|
return r.int(T);
|
|
|
|
}
|
|
|
|
return r.uintLessThan(T, at_most + 1);
|
|
|
|
}
|
|
|
|
|
2020-03-30 19:23:22 +01:00
|
|
|
/// Constant-time implementation off `intRangeLessThan`.
|
2018-11-21 23:24:14 +00:00
|
|
|
/// The results of this function may be biased.
|
2021-10-27 15:53:29 +01:00
|
|
|
pub fn intRangeLessThanBiased(r: Random, comptime T: type, at_least: T, less_than: T) T {
|
2018-11-21 23:24:14 +00:00
|
|
|
assert(at_least < less_than);
|
2020-09-03 16:09:55 +01:00
|
|
|
const info = @typeInfo(T).Int;
|
2020-10-18 01:04:53 +01:00
|
|
|
if (info.signedness == .signed) {
|
2018-11-21 23:24:14 +00:00
|
|
|
// Two's complement makes this math pretty easy.
|
2020-10-17 13:09:59 +01:00
|
|
|
const UnsignedT = std.meta.Int(.unsigned, info.bits);
|
2018-11-21 23:24:14 +00:00
|
|
|
const lo = @bitCast(UnsignedT, at_least);
|
|
|
|
const hi = @bitCast(UnsignedT, less_than);
|
|
|
|
const result = lo +% r.uintLessThanBiased(UnsignedT, hi -% lo);
|
|
|
|
return @bitCast(T, result);
|
|
|
|
} else {
|
|
|
|
// The signed implementation would work fine, but we can use stricter arithmetic operators here.
|
|
|
|
return at_least + r.uintLessThanBiased(T, less_than - at_least);
|
|
|
|
}
|
|
|
|
}
|
2018-12-13 01:19:46 +00:00
|
|
|
|
2018-09-27 05:35:38 +01:00
|
|
|
/// Returns an evenly distributed random integer `at_least <= i < less_than`.
|
2020-03-30 19:23:22 +01:00
|
|
|
/// See `uintLessThan`, which this function uses in most cases,
|
2018-09-27 05:35:38 +01:00
|
|
|
/// for commentary on the runtime of this function.
|
2021-10-27 15:53:29 +01:00
|
|
|
pub fn intRangeLessThan(r: Random, comptime T: type, at_least: T, less_than: T) T {
|
2018-09-27 05:35:38 +01:00
|
|
|
assert(at_least < less_than);
|
2020-09-03 16:09:55 +01:00
|
|
|
const info = @typeInfo(T).Int;
|
2020-10-18 01:04:53 +01:00
|
|
|
if (info.signedness == .signed) {
|
2018-09-27 05:35:38 +01:00
|
|
|
// Two's complement makes this math pretty easy.
|
2020-10-17 13:09:59 +01:00
|
|
|
const UnsignedT = std.meta.Int(.unsigned, info.bits);
|
2018-09-27 05:35:38 +01:00
|
|
|
const lo = @bitCast(UnsignedT, at_least);
|
|
|
|
const hi = @bitCast(UnsignedT, less_than);
|
|
|
|
const result = lo +% r.uintLessThan(UnsignedT, hi -% lo);
|
|
|
|
return @bitCast(T, result);
|
|
|
|
} else {
|
|
|
|
// The signed implementation would work fine, but we can use stricter arithmetic operators here.
|
|
|
|
return at_least + r.uintLessThan(T, less_than - at_least);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-30 19:23:22 +01:00
|
|
|
/// Constant-time implementation off `intRangeAtMostBiased`.
|
2018-11-21 23:24:14 +00:00
|
|
|
/// The results of this function may be biased.
|
2021-10-27 15:53:29 +01:00
|
|
|
pub fn intRangeAtMostBiased(r: Random, comptime T: type, at_least: T, at_most: T) T {
|
2018-11-21 23:24:14 +00:00
|
|
|
assert(at_least <= at_most);
|
2020-09-03 16:09:55 +01:00
|
|
|
const info = @typeInfo(T).Int;
|
2020-10-18 01:04:53 +01:00
|
|
|
if (info.signedness == .signed) {
|
2018-11-21 23:24:14 +00:00
|
|
|
// Two's complement makes this math pretty easy.
|
2020-10-17 13:09:59 +01:00
|
|
|
const UnsignedT = std.meta.Int(.unsigned, info.bits);
|
2018-11-21 23:24:14 +00:00
|
|
|
const lo = @bitCast(UnsignedT, at_least);
|
|
|
|
const hi = @bitCast(UnsignedT, at_most);
|
|
|
|
const result = lo +% r.uintAtMostBiased(UnsignedT, hi -% lo);
|
|
|
|
return @bitCast(T, result);
|
|
|
|
} else {
|
|
|
|
// The signed implementation would work fine, but we can use stricter arithmetic operators here.
|
|
|
|
return at_least + r.uintAtMostBiased(T, at_most - at_least);
|
|
|
|
}
|
|
|
|
}
|
2018-12-13 01:19:46 +00:00
|
|
|
|
2018-09-27 05:35:38 +01:00
|
|
|
/// Returns an evenly distributed random integer `at_least <= i <= at_most`.
|
2020-03-30 19:23:22 +01:00
|
|
|
/// See `uintLessThan`, which this function uses in most cases,
|
2018-09-27 05:35:38 +01:00
|
|
|
/// for commentary on the runtime of this function.
|
2021-10-27 15:53:29 +01:00
|
|
|
pub fn intRangeAtMost(r: Random, comptime T: type, at_least: T, at_most: T) T {
|
2018-09-27 05:35:38 +01:00
|
|
|
assert(at_least <= at_most);
|
2020-09-03 16:09:55 +01:00
|
|
|
const info = @typeInfo(T).Int;
|
2020-10-18 01:04:53 +01:00
|
|
|
if (info.signedness == .signed) {
|
2018-09-27 05:35:38 +01:00
|
|
|
// Two's complement makes this math pretty easy.
|
2020-10-17 13:09:59 +01:00
|
|
|
const UnsignedT = std.meta.Int(.unsigned, info.bits);
|
2018-09-27 05:35:38 +01:00
|
|
|
const lo = @bitCast(UnsignedT, at_least);
|
|
|
|
const hi = @bitCast(UnsignedT, at_most);
|
|
|
|
const result = lo +% r.uintAtMost(UnsignedT, hi -% lo);
|
|
|
|
return @bitCast(T, result);
|
2018-03-29 13:36:04 +01:00
|
|
|
} else {
|
2018-09-27 05:35:38 +01:00
|
|
|
// The signed implementation would work fine, but we can use stricter arithmetic operators here.
|
|
|
|
return at_least + r.uintAtMost(T, at_most - at_least);
|
2018-03-29 13:36:04 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Return a floating point value evenly distributed in the range [0, 1).
|
2021-10-27 15:53:29 +01:00
|
|
|
pub fn float(r: Random, comptime T: type) T {
|
2022-05-12 13:13:20 +01:00
|
|
|
// Generate a uniformly random value for the mantissa.
|
2021-12-28 00:13:15 +00:00
|
|
|
// Then generate an exponentially biased random value for the exponent.
|
2022-05-12 13:13:20 +01:00
|
|
|
// This covers every possible value in the range.
|
2018-03-29 13:36:04 +01:00
|
|
|
switch (T) {
|
|
|
|
f32 => {
|
2021-12-28 00:13:15 +00:00
|
|
|
// Use 23 random bits for the mantissa, and the rest for the exponent.
|
|
|
|
// If all 41 bits are zero, generate additional random bits, until a
|
|
|
|
// set bit is found, or 126 bits have been generated.
|
|
|
|
const rand = r.int(u64);
|
|
|
|
var rand_lz = @clz(u64, rand | 0x7FFFFF);
|
|
|
|
if (rand_lz == 41) {
|
2022-05-12 13:13:20 +01:00
|
|
|
// TODO: when #5177 or #489 is implemented,
|
|
|
|
// tell the compiler it is unlikely (1/2^41) to reach this point.
|
|
|
|
// (Same for the if branch and the f64 calculations below.)
|
2021-12-28 00:13:15 +00:00
|
|
|
rand_lz += @clz(u64, r.int(u64));
|
|
|
|
if (rand_lz == 41 + 64) {
|
|
|
|
// It is astronomically unlikely to reach this point.
|
|
|
|
rand_lz += @clz(u32, r.int(u32) | 0x7FF);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
const mantissa = @truncate(u23, rand);
|
|
|
|
const exponent = @as(u32, 126 - rand_lz) << 23;
|
|
|
|
return @bitCast(f32, exponent | mantissa);
|
2018-03-29 13:36:04 +01:00
|
|
|
},
|
|
|
|
f64 => {
|
2021-12-28 00:13:15 +00:00
|
|
|
// Use 52 random bits for the mantissa, and the rest for the exponent.
|
|
|
|
// If all 12 bits are zero, generate additional random bits, until a
|
|
|
|
// set bit is found, or 1022 bits have been generated.
|
|
|
|
const rand = r.int(u64);
|
|
|
|
var rand_lz: u64 = @clz(u64, rand | 0xFFFFFFFFFFFFF);
|
|
|
|
if (rand_lz == 12) {
|
|
|
|
while (true) {
|
|
|
|
// It is astronomically unlikely for this loop to execute more than once.
|
|
|
|
const addl_rand_lz = @clz(u64, r.int(u64));
|
|
|
|
rand_lz += addl_rand_lz;
|
|
|
|
if (addl_rand_lz != 64) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (rand_lz >= 1022) {
|
|
|
|
rand_lz = 1022;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
const mantissa = rand & 0xFFFFFFFFFFFFF;
|
|
|
|
const exponent = (1022 - rand_lz) << 52;
|
|
|
|
return @bitCast(f64, exponent | mantissa);
|
2018-03-29 13:36:04 +01:00
|
|
|
},
|
|
|
|
else => @compileError("unknown floating point type"),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-14 10:08:49 +01:00
|
|
|
/// Return a floating point value normally distributed with mean = 0, stddev = 1.
|
|
|
|
///
|
|
|
|
/// To use different parameters, use: floatNorm(...) * desiredStddev + desiredMean.
|
2021-10-27 15:53:29 +01:00
|
|
|
pub fn floatNorm(r: Random, comptime T: type) T {
|
2018-04-14 10:08:49 +01:00
|
|
|
const value = ziggurat.next_f64(r, ziggurat.NormDist);
|
|
|
|
switch (T) {
|
2018-06-27 17:30:15 +01:00
|
|
|
f32 => return @floatCast(f32, value),
|
2018-04-14 10:08:49 +01:00
|
|
|
f64 => return value,
|
|
|
|
else => @compileError("unknown floating point type"),
|
|
|
|
}
|
2018-03-29 13:36:04 +01:00
|
|
|
}
|
|
|
|
|
2018-04-14 10:08:49 +01:00
|
|
|
/// Return an exponentially distributed float with a rate parameter of 1.
|
|
|
|
///
|
|
|
|
/// To use a different rate parameter, use: floatExp(...) / desiredRate.
|
2021-10-27 15:53:29 +01:00
|
|
|
pub fn floatExp(r: Random, comptime T: type) T {
|
2018-04-14 10:08:49 +01:00
|
|
|
const value = ziggurat.next_f64(r, ziggurat.ExpDist);
|
|
|
|
switch (T) {
|
2018-06-27 17:30:15 +01:00
|
|
|
f32 => return @floatCast(f32, value),
|
2018-04-14 10:08:49 +01:00
|
|
|
f64 => return value,
|
|
|
|
else => @compileError("unknown floating point type"),
|
|
|
|
}
|
2018-03-29 13:36:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Shuffle a slice into a random order.
|
2021-10-27 15:53:29 +01:00
|
|
|
pub fn shuffle(r: Random, comptime T: type, buf: []T) void {
|
2018-03-29 13:36:04 +01:00
|
|
|
if (buf.len < 2) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
var i: usize = 0;
|
|
|
|
while (i < buf.len - 1) : (i += 1) {
|
2018-09-27 05:35:38 +01:00
|
|
|
const j = r.intRangeLessThan(usize, i, buf.len);
|
2018-03-29 13:36:04 +01:00
|
|
|
mem.swap(T, &buf[i], &buf[j]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2018-11-22 00:46:42 +00:00
|
|
|
/// Convert a random integer 0 <= random_int <= maxValue(T),
|
|
|
|
/// into an integer 0 <= result < less_than.
|
|
|
|
/// This function introduces a minor bias.
|
|
|
|
pub fn limitRangeBiased(comptime T: type, random_int: T, less_than: T) T {
|
2020-10-18 01:04:53 +01:00
|
|
|
comptime assert(@typeInfo(T).Int.signedness == .unsigned);
|
2020-09-03 16:09:55 +01:00
|
|
|
const bits = @typeInfo(T).Int.bits;
|
2020-10-17 13:09:59 +01:00
|
|
|
const T2 = std.meta.Int(.unsigned, bits * 2);
|
2018-11-22 00:46:42 +00:00
|
|
|
|
|
|
|
// adapted from:
|
|
|
|
// http://www.pcg-random.org/posts/bounded-rands.html
|
|
|
|
// "Integer Multiplication (Biased)"
|
2019-11-07 23:52:09 +00:00
|
|
|
var m: T2 = @as(T2, random_int) * @as(T2, less_than);
|
2020-09-03 16:09:55 +01:00
|
|
|
return @intCast(T, m >> bits);
|
2018-11-22 00:46:42 +00:00
|
|
|
}
|
|
|
|
|
2018-03-29 13:36:04 +01:00
|
|
|
// Generator to extend 64-bit seed values into longer sequences.
|
|
|
|
//
|
|
|
|
// The number of cycles is thus limited to 64-bits regardless of the engine, but this
|
|
|
|
// is still plenty for practical purposes.
|
2020-12-18 03:03:41 +00:00
|
|
|
pub const SplitMix64 = struct {
|
2018-03-29 13:36:04 +01:00
|
|
|
s: u64,
|
|
|
|
|
|
|
|
pub fn init(seed: u64) SplitMix64 {
|
2018-11-13 13:08:37 +00:00
|
|
|
return SplitMix64{ .s = seed };
|
2018-03-29 13:36:04 +01:00
|
|
|
}
|
|
|
|
|
2018-05-31 15:56:59 +01:00
|
|
|
pub fn next(self: *SplitMix64) u64 {
|
2018-03-29 13:36:04 +01:00
|
|
|
self.s +%= 0x9e3779b97f4a7c15;
|
|
|
|
|
|
|
|
var z = self.s;
|
|
|
|
z = (z ^ (z >> 30)) *% 0xbf58476d1ce4e5b9;
|
|
|
|
z = (z ^ (z >> 27)) *% 0x94d049bb133111eb;
|
|
|
|
return z ^ (z >> 31);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-01-22 14:45:28 +00:00
|
|
|
test {
|
2020-09-28 14:29:53 +01:00
|
|
|
std.testing.refAllDecls(@This());
|
2022-05-11 03:02:03 +01:00
|
|
|
_ = @import("rand/test.zig");
|
2020-04-18 19:41:25 +01:00
|
|
|
}
|