2017-05-28 14:54:53 +01:00
|
|
|
const debug = @import("debug.zig");
|
|
|
|
const assert = debug.assert;
|
2017-06-08 03:56:57 +01:00
|
|
|
const math = @import("math/index.zig");
|
2017-12-04 15:35:55 +00:00
|
|
|
const builtin = @import("builtin");
|
2016-05-17 21:32:43 +01:00
|
|
|
|
2016-08-12 06:25:13 +01:00
|
|
|
pub const Cmp = math.Cmp;
|
|
|
|
|
2016-12-19 00:40:26 +00:00
|
|
|
pub const Allocator = struct {
|
2017-08-30 04:33:25 +01:00
|
|
|
/// Allocate byte_count bytes and return them in a slice, with the
|
2017-12-06 23:12:05 +00:00
|
|
|
/// slice's pointer aligned at least to alignment bytes.
|
2017-12-10 20:38:05 +00:00
|
|
|
/// The returned newly allocated memory is undefined.
|
2017-12-06 23:12:05 +00:00
|
|
|
allocFn: fn (self: &Allocator, byte_count: usize, alignment: u29) -> %[]u8,
|
2017-08-30 04:33:25 +01:00
|
|
|
|
2017-12-06 23:12:05 +00:00
|
|
|
/// If `new_byte_count > old_mem.len`:
|
|
|
|
/// * `old_mem.len` is the same as what was returned from allocFn or reallocFn.
|
|
|
|
/// * alignment >= alignment of old_mem.ptr
|
2017-10-09 19:21:35 +01:00
|
|
|
///
|
2017-12-06 23:12:05 +00:00
|
|
|
/// If `new_byte_count <= old_mem.len`:
|
|
|
|
/// * this function must return successfully.
|
|
|
|
/// * alignment <= alignment of old_mem.ptr
|
2017-12-10 20:38:05 +00:00
|
|
|
///
|
|
|
|
/// The returned newly allocated memory is undefined.
|
2017-12-06 23:12:05 +00:00
|
|
|
reallocFn: fn (self: &Allocator, old_mem: []u8, new_byte_count: usize, alignment: u29) -> %[]u8,
|
2017-08-30 04:33:25 +01:00
|
|
|
|
2017-10-09 19:21:35 +01:00
|
|
|
/// Guaranteed: `old_mem.len` is the same as what was returned from `allocFn` or `reallocFn`
|
|
|
|
freeFn: fn (self: &Allocator, old_mem: []u8),
|
2016-07-25 02:35:50 +01:00
|
|
|
|
2017-03-31 10:48:15 +01:00
|
|
|
fn create(self: &Allocator, comptime T: type) -> %&T {
|
2017-08-30 04:33:25 +01:00
|
|
|
const slice = %return self.alloc(T, 1);
|
2017-12-06 23:12:05 +00:00
|
|
|
return &slice[0];
|
2017-03-31 10:48:15 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
fn destroy(self: &Allocator, ptr: var) {
|
2017-05-19 15:39:59 +01:00
|
|
|
self.free(ptr[0..1]);
|
2017-03-31 10:48:15 +01:00
|
|
|
}
|
|
|
|
|
2017-01-23 00:51:37 +00:00
|
|
|
fn alloc(self: &Allocator, comptime T: type, n: usize) -> %[]T {
|
2017-12-06 23:12:05 +00:00
|
|
|
return self.alignedAlloc(T, @alignOf(T), n);
|
|
|
|
}
|
|
|
|
|
|
|
|
fn alignedAlloc(self: &Allocator, comptime T: type, comptime alignment: u29,
|
|
|
|
n: usize) -> %[]align(alignment) T
|
|
|
|
{
|
2017-05-07 04:13:12 +01:00
|
|
|
const byte_count = %return math.mul(usize, @sizeOf(T), n);
|
2017-12-06 23:12:05 +00:00
|
|
|
const byte_slice = %return self.allocFn(self, byte_count, alignment);
|
2017-12-10 20:38:05 +00:00
|
|
|
// This loop should get optimized out in ReleaseFast mode
|
|
|
|
for (byte_slice) |*byte| {
|
|
|
|
*byte = undefined;
|
|
|
|
}
|
2017-12-06 23:12:05 +00:00
|
|
|
return ([]align(alignment) T)(@alignCast(alignment, byte_slice));
|
2016-07-25 02:35:50 +01:00
|
|
|
}
|
|
|
|
|
2017-01-23 00:51:37 +00:00
|
|
|
fn realloc(self: &Allocator, comptime T: type, old_mem: []T, n: usize) -> %[]T {
|
2017-12-06 23:12:05 +00:00
|
|
|
return self.alignedRealloc(T, @alignOf(T), @alignCast(@alignOf(T), old_mem), n);
|
|
|
|
}
|
|
|
|
|
|
|
|
fn alignedRealloc(self: &Allocator, comptime T: type, comptime alignment: u29,
|
|
|
|
old_mem: []align(alignment) T, n: usize) -> %[]align(alignment) T
|
|
|
|
{
|
2017-08-30 04:33:25 +01:00
|
|
|
if (old_mem.len == 0) {
|
|
|
|
return self.alloc(T, n);
|
|
|
|
}
|
|
|
|
|
2017-12-10 20:38:05 +00:00
|
|
|
const old_byte_slice = ([]u8)(old_mem);
|
2017-05-07 04:13:12 +01:00
|
|
|
const byte_count = %return math.mul(usize, @sizeOf(T), n);
|
2017-12-10 20:38:05 +00:00
|
|
|
const byte_slice = %return self.reallocFn(self, old_byte_slice, byte_count, alignment);
|
|
|
|
// This loop should get optimized out in ReleaseFast mode
|
|
|
|
for (byte_slice[old_byte_slice.len..]) |*byte| {
|
|
|
|
*byte = undefined;
|
|
|
|
}
|
2017-12-06 23:12:05 +00:00
|
|
|
return ([]T)(@alignCast(alignment, byte_slice));
|
2017-10-09 19:21:35 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Reallocate, but `n` must be less than or equal to `old_mem.len`.
|
|
|
|
/// Unlike `realloc`, this function cannot fail.
|
|
|
|
/// Shrinking to 0 is the same as calling `free`.
|
|
|
|
fn shrink(self: &Allocator, comptime T: type, old_mem: []T, n: usize) -> []T {
|
2017-12-06 23:12:05 +00:00
|
|
|
return self.alignedShrink(T, @alignOf(T), @alignCast(@alignOf(T), old_mem), n);
|
|
|
|
}
|
|
|
|
|
|
|
|
fn alignedShrink(self: &Allocator, comptime T: type, comptime alignment: u29,
|
|
|
|
old_mem: []align(alignment) T, n: usize) -> []align(alignment) T
|
|
|
|
{
|
2017-10-09 19:21:35 +01:00
|
|
|
if (n == 0) {
|
|
|
|
self.free(old_mem);
|
|
|
|
return old_mem[0..0];
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(n <= old_mem.len);
|
|
|
|
|
|
|
|
// Here we skip the overflow checking on the multiplication because
|
|
|
|
// n <= old_mem.len and the multiplication didn't overflow for that operation.
|
|
|
|
const byte_count = @sizeOf(T) * n;
|
|
|
|
|
2017-12-06 23:12:05 +00:00
|
|
|
const byte_slice = %%self.reallocFn(self, ([]u8)(old_mem), byte_count, alignment);
|
2017-12-11 00:40:46 +00:00
|
|
|
return ([]align(alignment) T)(@alignCast(alignment, byte_slice));
|
2016-07-25 02:35:50 +01:00
|
|
|
}
|
|
|
|
|
2017-08-30 04:33:25 +01:00
|
|
|
fn free(self: &Allocator, memory: var) {
|
2017-10-09 19:21:35 +01:00
|
|
|
const bytes = ([]const u8)(memory);
|
|
|
|
if (bytes.len == 0)
|
|
|
|
return;
|
|
|
|
const non_const_ptr = @intToPtr(&u8, @ptrToInt(bytes.ptr));
|
|
|
|
self.freeFn(self, non_const_ptr[0..bytes.len]);
|
2016-07-25 02:35:50 +01:00
|
|
|
}
|
2016-12-19 00:40:26 +00:00
|
|
|
};
|
2016-05-08 09:34:00 +01:00
|
|
|
|
2017-12-11 22:27:31 +00:00
|
|
|
pub const FixedBufferAllocator = struct {
|
|
|
|
allocator: Allocator,
|
|
|
|
end_index: usize,
|
|
|
|
buffer: []u8,
|
|
|
|
|
|
|
|
pub fn init(buffer: []u8) -> FixedBufferAllocator {
|
|
|
|
return FixedBufferAllocator {
|
|
|
|
.allocator = Allocator {
|
|
|
|
.allocFn = alloc,
|
|
|
|
.reallocFn = realloc,
|
|
|
|
.freeFn = free,
|
|
|
|
},
|
|
|
|
.buffer = buffer,
|
|
|
|
.end_index = 0,
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
fn alloc(allocator: &Allocator, n: usize, alignment: u29) -> %[]u8 {
|
|
|
|
const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
|
|
|
|
const addr = @ptrToInt(&self.buffer[self.end_index]);
|
|
|
|
const rem = @rem(addr, alignment);
|
|
|
|
const march_forward_bytes = if (rem == 0) 0 else (alignment - rem);
|
|
|
|
const adjusted_index = self.end_index + march_forward_bytes;
|
|
|
|
const new_end_index = adjusted_index + n;
|
|
|
|
if (new_end_index > self.buffer.len) {
|
|
|
|
return error.OutOfMemory;
|
|
|
|
}
|
|
|
|
const result = self.buffer[adjusted_index .. new_end_index];
|
|
|
|
self.end_index = new_end_index;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
fn realloc(allocator: &Allocator, old_mem: []u8, new_size: usize, alignment: u29) -> %[]u8 {
|
|
|
|
if (new_size <= old_mem.len) {
|
|
|
|
return old_mem[0..new_size];
|
|
|
|
} else {
|
|
|
|
const result = %return alloc(allocator, new_size, alignment);
|
|
|
|
copy(u8, result, old_mem);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn free(allocator: &Allocator, bytes: []u8) { }
|
|
|
|
};
|
|
|
|
|
2017-09-28 03:04:38 +01:00
|
|
|
|
2016-05-17 21:32:43 +01:00
|
|
|
/// Copy all of source into dest at position 0.
|
|
|
|
/// dest.len must be >= source.len.
|
2017-01-23 00:51:37 +00:00
|
|
|
pub fn copy(comptime T: type, dest: []T, source: []const T) {
|
2017-02-07 22:19:51 +00:00
|
|
|
// TODO instead of manually doing this check for the whole array
|
|
|
|
// and turning off debug safety, the compiler should detect loops like
|
|
|
|
// this and automatically omit safety checks for loops
|
2016-12-12 05:31:35 +00:00
|
|
|
@setDebugSafety(this, false);
|
2016-05-17 21:32:43 +01:00
|
|
|
assert(dest.len >= source.len);
|
2016-12-12 05:31:35 +00:00
|
|
|
for (source) |s, i| dest[i] = s;
|
|
|
|
}
|
|
|
|
|
2017-01-23 00:51:37 +00:00
|
|
|
pub fn set(comptime T: type, dest: []T, value: T) {
|
2016-12-12 05:31:35 +00:00
|
|
|
for (dest) |*d| *d = value;
|
2016-05-17 21:32:43 +01:00
|
|
|
}
|
2016-08-12 06:25:13 +01:00
|
|
|
|
|
|
|
/// Return < 0, == 0, or > 0 if memory a is less than, equal to, or greater than,
|
|
|
|
/// memory b, respectively.
|
2017-01-23 00:51:37 +00:00
|
|
|
pub fn cmp(comptime T: type, a: []const T, b: []const T) -> Cmp {
|
2016-09-09 13:58:39 +01:00
|
|
|
const n = math.min(a.len, b.len);
|
2016-08-12 06:25:13 +01:00
|
|
|
var i: usize = 0;
|
2017-05-03 23:12:07 +01:00
|
|
|
while (i < n) : (i += 1) {
|
2016-08-12 06:25:13 +01:00
|
|
|
if (a[i] == b[i]) continue;
|
|
|
|
return if (a[i] > b[i]) Cmp.Greater else if (a[i] < b[i]) Cmp.Less else Cmp.Equal;
|
|
|
|
}
|
|
|
|
|
|
|
|
return if (a.len > b.len) Cmp.Greater else if (a.len < b.len) Cmp.Less else Cmp.Equal;
|
|
|
|
}
|
2016-09-23 07:00:23 +01:00
|
|
|
|
2017-02-12 22:22:35 +00:00
|
|
|
/// Compares two slices and returns whether they are equal.
|
|
|
|
pub fn eql(comptime T: type, a: []const T, b: []const T) -> bool {
|
|
|
|
if (a.len != b.len) return false;
|
|
|
|
for (a) |item, index| {
|
|
|
|
if (b[index] != item) return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-04-19 06:13:15 +01:00
|
|
|
/// Copies ::m to newly allocated memory. Caller is responsible to free it.
|
|
|
|
pub fn dupe(allocator: &Allocator, comptime T: type, m: []const T) -> %[]T {
|
|
|
|
const new_buf = %return allocator.alloc(T, m.len);
|
|
|
|
copy(T, new_buf, m);
|
|
|
|
return new_buf;
|
|
|
|
}
|
|
|
|
|
2017-04-05 22:55:50 +01:00
|
|
|
/// Linear search for the index of a scalar value inside a slice.
|
|
|
|
pub fn indexOfScalar(comptime T: type, slice: []const T, value: T) -> ?usize {
|
2017-10-06 05:27:15 +01:00
|
|
|
return indexOfScalarPos(T, slice, 0, value);
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn indexOfScalarPos(comptime T: type, slice: []const T, start_index: usize, value: T) -> ?usize {
|
|
|
|
var i: usize = start_index;
|
|
|
|
while (i < slice.len) : (i += 1) {
|
|
|
|
if (slice[i] == value)
|
2017-04-05 22:55:50 +01:00
|
|
|
return i;
|
|
|
|
}
|
|
|
|
return null;
|
|
|
|
}
|
|
|
|
|
2017-10-13 14:31:03 +01:00
|
|
|
pub fn indexOfAny(comptime T: type, slice: []const T, values: []const T) -> ?usize {
|
|
|
|
return indexOfAnyPos(T, slice, 0, values);
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn indexOfAnyPos(comptime T: type, slice: []const T, start_index: usize, values: []const T) -> ?usize {
|
|
|
|
var i: usize = start_index;
|
|
|
|
while (i < slice.len) : (i += 1) {
|
|
|
|
for (values) |value| {
|
|
|
|
if (slice[i] == value)
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return null;
|
|
|
|
}
|
|
|
|
|
2017-04-19 06:13:15 +01:00
|
|
|
pub fn indexOf(comptime T: type, haystack: []const T, needle: []const T) -> ?usize {
|
2017-10-06 05:27:15 +01:00
|
|
|
return indexOfPos(T, haystack, 0, needle);
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO boyer-moore algorithm
|
|
|
|
pub fn indexOfPos(comptime T: type, haystack: []const T, start_index: usize, needle: []const T) -> ?usize {
|
2017-04-19 06:13:15 +01:00
|
|
|
if (needle.len > haystack.len)
|
|
|
|
return null;
|
|
|
|
|
2017-10-06 05:27:15 +01:00
|
|
|
var i: usize = start_index;
|
2017-04-19 06:13:15 +01:00
|
|
|
const end = haystack.len - needle.len;
|
2017-05-03 23:12:07 +01:00
|
|
|
while (i <= end) : (i += 1) {
|
2017-05-19 15:39:59 +01:00
|
|
|
if (eql(T, haystack[i .. i + needle.len], needle))
|
2017-04-19 06:13:15 +01:00
|
|
|
return i;
|
|
|
|
}
|
|
|
|
return null;
|
|
|
|
}
|
|
|
|
|
|
|
|
test "mem.indexOf" {
|
|
|
|
assert(??indexOf(u8, "one two three four", "four") == 14);
|
|
|
|
assert(indexOf(u8, "one two three four", "gour") == null);
|
|
|
|
assert(??indexOf(u8, "foo", "foo") == 0);
|
|
|
|
assert(indexOf(u8, "foo", "fool") == null);
|
|
|
|
}
|
|
|
|
|
2017-02-12 22:22:35 +00:00
|
|
|
/// Reads an integer from memory with size equal to bytes.len.
|
|
|
|
/// T specifies the return type, which must be large enough to store
|
|
|
|
/// the result.
|
2017-11-30 18:20:39 +00:00
|
|
|
/// See also ::readIntBE or ::readIntLE.
|
2017-12-04 15:35:55 +00:00
|
|
|
pub fn readInt(bytes: []const u8, comptime T: type, endian: builtin.Endian) -> T {
|
bit shifting safety
* add u3, u4, u5, u6, u7 and i3, i4, i5, i6, i7
* shift operations shift amount parameter type is
integer with log2 bit width of other param
- This enforces not violating undefined behavior on
shift amount >= bit width with the type system
* clean up math.log, math.ln, math.log2, math.log10
closes #403
2017-08-19 06:32:15 +01:00
|
|
|
if (T.bit_count == 8) {
|
|
|
|
return bytes[0];
|
|
|
|
}
|
2017-02-12 22:22:35 +00:00
|
|
|
var result: T = 0;
|
2017-12-04 15:35:55 +00:00
|
|
|
switch (endian) {
|
|
|
|
builtin.Endian.Big => {
|
|
|
|
for (bytes) |b| {
|
|
|
|
result = (result << 8) | b;
|
|
|
|
}
|
|
|
|
},
|
|
|
|
builtin.Endian.Little => {
|
|
|
|
const ShiftType = math.Log2Int(T);
|
|
|
|
for (bytes) |b, index| {
|
|
|
|
result = result | (T(b) << ShiftType(index * 8));
|
|
|
|
}
|
|
|
|
},
|
2016-09-23 07:00:23 +01:00
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2017-11-30 18:20:39 +00:00
|
|
|
/// Reads a big-endian int of type T from bytes.
|
|
|
|
/// bytes.len must be exactly @sizeOf(T).
|
|
|
|
pub fn readIntBE(comptime T: type, bytes: []const u8) -> T {
|
|
|
|
if (T.is_signed) {
|
|
|
|
return @bitCast(T, readIntBE(@IntType(false, T.bit_count), bytes));
|
|
|
|
}
|
|
|
|
assert(bytes.len == @sizeOf(T));
|
|
|
|
var result: T = 0;
|
|
|
|
{comptime var i = 0; inline while (i < @sizeOf(T)) : (i += 1) {
|
|
|
|
result = (result << 8) | T(bytes[i]);
|
|
|
|
}}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Reads a little-endian int of type T from bytes.
|
|
|
|
/// bytes.len must be exactly @sizeOf(T).
|
|
|
|
pub fn readIntLE(comptime T: type, bytes: []const u8) -> T {
|
|
|
|
if (T.is_signed) {
|
|
|
|
return @bitCast(T, readIntLE(@IntType(false, T.bit_count), bytes));
|
|
|
|
}
|
|
|
|
assert(bytes.len == @sizeOf(T));
|
|
|
|
var result: T = 0;
|
|
|
|
{comptime var i = 0; inline while (i < @sizeOf(T)) : (i += 1) {
|
|
|
|
result |= T(bytes[i]) << i * 8;
|
|
|
|
}}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2017-02-12 22:22:35 +00:00
|
|
|
/// Writes an integer to memory with size equal to bytes.len. Pads with zeroes
|
|
|
|
/// to fill the entire buffer provided.
|
|
|
|
/// value must be an integer.
|
2017-12-04 15:35:55 +00:00
|
|
|
pub fn writeInt(buf: []u8, value: var, endian: builtin.Endian) {
|
2017-04-18 05:05:09 +01:00
|
|
|
const uint = @IntType(false, @typeOf(value).bit_count);
|
2017-02-12 22:22:35 +00:00
|
|
|
var bits = @truncate(uint, value);
|
2017-12-04 15:35:55 +00:00
|
|
|
switch (endian) {
|
|
|
|
builtin.Endian.Big => {
|
|
|
|
var index: usize = buf.len;
|
|
|
|
while (index != 0) {
|
|
|
|
index -= 1;
|
|
|
|
|
|
|
|
buf[index] = @truncate(u8, bits);
|
|
|
|
bits >>= 8;
|
|
|
|
}
|
|
|
|
},
|
|
|
|
builtin.Endian.Little => {
|
|
|
|
for (buf) |*b| {
|
|
|
|
*b = @truncate(u8, bits);
|
|
|
|
bits >>= 8;
|
|
|
|
}
|
|
|
|
},
|
2017-02-07 22:19:51 +00:00
|
|
|
}
|
2017-02-12 22:22:35 +00:00
|
|
|
assert(bits == 0);
|
2017-02-07 22:19:51 +00:00
|
|
|
}
|
|
|
|
|
2017-04-04 06:52:20 +01:00
|
|
|
|
|
|
|
pub fn hash_slice_u8(k: []const u8) -> u32 {
|
|
|
|
// FNV 32-bit hash
|
|
|
|
var h: u32 = 2166136261;
|
|
|
|
for (k) |b| {
|
|
|
|
h = (h ^ b) *% 16777619;
|
|
|
|
}
|
|
|
|
return h;
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn eql_slice_u8(a: []const u8, b: []const u8) -> bool {
|
|
|
|
return eql(u8, a, b);
|
|
|
|
}
|
|
|
|
|
2017-10-06 05:27:15 +01:00
|
|
|
/// Returns an iterator that iterates over the slices of `buffer` that are not
|
|
|
|
/// any of the bytes in `split_bytes`.
|
|
|
|
/// split(" abc def ghi ", " ")
|
2017-04-30 00:23:33 +01:00
|
|
|
/// Will return slices for "abc", "def", "ghi", null, in that order.
|
2017-10-06 05:27:15 +01:00
|
|
|
pub fn split(buffer: []const u8, split_bytes: []const u8) -> SplitIterator {
|
2017-04-04 06:52:20 +01:00
|
|
|
SplitIterator {
|
|
|
|
.index = 0,
|
2017-10-06 05:27:15 +01:00
|
|
|
.buffer = buffer,
|
|
|
|
.split_bytes = split_bytes,
|
2017-04-04 06:52:20 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-30 00:23:33 +01:00
|
|
|
test "mem.split" {
|
2017-10-06 22:21:21 +01:00
|
|
|
var it = split(" abc def ghi ", " ");
|
2017-04-30 00:23:33 +01:00
|
|
|
assert(eql(u8, ??it.next(), "abc"));
|
|
|
|
assert(eql(u8, ??it.next(), "def"));
|
|
|
|
assert(eql(u8, ??it.next(), "ghi"));
|
|
|
|
assert(it.next() == null);
|
|
|
|
}
|
|
|
|
|
2017-04-06 10:34:04 +01:00
|
|
|
pub fn startsWith(comptime T: type, haystack: []const T, needle: []const T) -> bool {
|
2017-05-19 15:39:59 +01:00
|
|
|
return if (needle.len > haystack.len) false else eql(T, haystack[0 .. needle.len], needle);
|
2017-04-06 10:34:04 +01:00
|
|
|
}
|
|
|
|
|
2017-04-04 06:52:20 +01:00
|
|
|
const SplitIterator = struct {
|
2017-10-06 05:27:15 +01:00
|
|
|
buffer: []const u8,
|
|
|
|
split_bytes: []const u8,
|
2017-04-04 06:52:20 +01:00
|
|
|
index: usize,
|
|
|
|
|
|
|
|
pub fn next(self: &SplitIterator) -> ?[]const u8 {
|
|
|
|
// move to beginning of token
|
2017-10-06 05:27:15 +01:00
|
|
|
while (self.index < self.buffer.len and self.isSplitByte(self.buffer[self.index])) : (self.index += 1) {}
|
2017-04-04 06:52:20 +01:00
|
|
|
const start = self.index;
|
2017-10-06 05:27:15 +01:00
|
|
|
if (start == self.buffer.len) {
|
2017-04-04 06:52:20 +01:00
|
|
|
return null;
|
|
|
|
}
|
|
|
|
|
|
|
|
// move to end of token
|
2017-10-06 05:27:15 +01:00
|
|
|
while (self.index < self.buffer.len and !self.isSplitByte(self.buffer[self.index])) : (self.index += 1) {}
|
2017-04-04 06:52:20 +01:00
|
|
|
const end = self.index;
|
|
|
|
|
2017-10-06 05:27:15 +01:00
|
|
|
return self.buffer[start..end];
|
2017-04-04 06:52:20 +01:00
|
|
|
}
|
2017-04-30 00:23:33 +01:00
|
|
|
|
2017-10-06 22:21:21 +01:00
|
|
|
/// Returns a slice of the remaining bytes. Does not affect iterator state.
|
|
|
|
pub fn rest(self: &const SplitIterator) -> []const u8 {
|
|
|
|
// move to beginning of token
|
|
|
|
var index: usize = self.index;
|
|
|
|
while (index < self.buffer.len and self.isSplitByte(self.buffer[index])) : (index += 1) {}
|
|
|
|
return self.buffer[index..];
|
|
|
|
}
|
|
|
|
|
|
|
|
fn isSplitByte(self: &const SplitIterator, byte: u8) -> bool {
|
2017-10-06 05:27:15 +01:00
|
|
|
for (self.split_bytes) |split_byte| {
|
|
|
|
if (byte == split_byte) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
2017-04-30 00:23:33 +01:00
|
|
|
}
|
2017-04-04 06:52:20 +01:00
|
|
|
};
|
|
|
|
|
2017-05-28 14:54:53 +01:00
|
|
|
/// Naively combines a series of strings with a separator.
|
|
|
|
/// Allocates memory for the result, which must be freed by the caller.
|
|
|
|
pub fn join(allocator: &Allocator, sep: u8, strings: ...) -> %[]u8 {
|
|
|
|
comptime assert(strings.len >= 1);
|
2017-05-28 14:56:35 +01:00
|
|
|
var total_strings_len: usize = strings.len; // 1 sep per string
|
2017-05-28 14:54:53 +01:00
|
|
|
{
|
|
|
|
comptime var string_i = 0;
|
|
|
|
inline while (string_i < strings.len) : (string_i += 1) {
|
|
|
|
const arg = ([]const u8)(strings[string_i]);
|
|
|
|
total_strings_len += arg.len;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
const buf = %return allocator.alloc(u8, total_strings_len);
|
|
|
|
%defer allocator.free(buf);
|
|
|
|
|
|
|
|
var buf_index: usize = 0;
|
|
|
|
comptime var string_i = 0;
|
|
|
|
inline while (true) {
|
|
|
|
const arg = ([]const u8)(strings[string_i]);
|
|
|
|
string_i += 1;
|
|
|
|
copy(u8, buf[buf_index..], arg);
|
|
|
|
buf_index += arg.len;
|
|
|
|
if (string_i >= strings.len) break;
|
|
|
|
if (buf[buf_index - 1] != sep) {
|
|
|
|
buf[buf_index] = sep;
|
|
|
|
buf_index += 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return buf[0..buf_index];
|
|
|
|
}
|
|
|
|
|
|
|
|
test "mem.join" {
|
2017-11-10 19:02:45 +00:00
|
|
|
assert(eql(u8, %%join(debug.global_allocator, ',', "a", "b", "c"), "a,b,c"));
|
|
|
|
assert(eql(u8, %%join(debug.global_allocator, ',', "a"), "a"));
|
2017-05-28 14:54:53 +01:00
|
|
|
}
|
|
|
|
|
2017-03-16 20:02:35 +00:00
|
|
|
test "testStringEquality" {
|
2017-02-12 22:22:35 +00:00
|
|
|
assert(eql(u8, "abcd", "abcd"));
|
|
|
|
assert(!eql(u8, "abcdef", "abZdef"));
|
|
|
|
assert(!eql(u8, "abcdefg", "abcdef"));
|
2017-02-07 22:19:51 +00:00
|
|
|
}
|
|
|
|
|
2017-03-16 20:02:35 +00:00
|
|
|
test "testReadInt" {
|
2017-02-12 22:22:35 +00:00
|
|
|
testReadIntImpl();
|
|
|
|
comptime testReadIntImpl();
|
|
|
|
}
|
|
|
|
fn testReadIntImpl() {
|
|
|
|
{
|
|
|
|
const bytes = []u8{ 0x12, 0x34, 0x56, 0x78 };
|
2017-12-04 15:35:55 +00:00
|
|
|
assert(readInt(bytes, u32, builtin.Endian.Big) == 0x12345678);
|
2017-11-30 18:20:39 +00:00
|
|
|
assert(readIntBE(u32, bytes) == 0x12345678);
|
|
|
|
assert(readIntBE(i32, bytes) == 0x12345678);
|
2017-12-04 15:35:55 +00:00
|
|
|
assert(readInt(bytes, u32, builtin.Endian.Little) == 0x78563412);
|
2017-11-30 18:20:39 +00:00
|
|
|
assert(readIntLE(u32, bytes) == 0x78563412);
|
|
|
|
assert(readIntLE(i32, bytes) == 0x78563412);
|
2017-02-12 22:22:35 +00:00
|
|
|
}
|
2016-09-23 07:00:23 +01:00
|
|
|
{
|
|
|
|
const buf = []u8{0x00, 0x00, 0x12, 0x34};
|
2017-12-04 15:35:55 +00:00
|
|
|
const answer = readInt(buf, u64, builtin.Endian.Big);
|
2016-09-23 07:00:23 +01:00
|
|
|
assert(answer == 0x00001234);
|
|
|
|
}
|
|
|
|
{
|
|
|
|
const buf = []u8{0x12, 0x34, 0x00, 0x00};
|
2017-12-04 15:35:55 +00:00
|
|
|
const answer = readInt(buf, u64, builtin.Endian.Little);
|
2016-09-23 07:00:23 +01:00
|
|
|
assert(answer == 0x00003412);
|
|
|
|
}
|
2017-11-30 18:20:39 +00:00
|
|
|
{
|
|
|
|
const bytes = []u8{0xff, 0xfe};
|
|
|
|
assert(readIntBE(u16, bytes) == 0xfffe);
|
|
|
|
assert(readIntBE(i16, bytes) == -0x0002);
|
|
|
|
assert(readIntLE(u16, bytes) == 0xfeff);
|
|
|
|
assert(readIntLE(i16, bytes) == -0x0101);
|
|
|
|
}
|
2016-09-23 07:00:23 +01:00
|
|
|
}
|
2017-02-12 22:22:35 +00:00
|
|
|
|
2017-03-16 20:02:35 +00:00
|
|
|
test "testWriteInt" {
|
2017-02-12 22:22:35 +00:00
|
|
|
testWriteIntImpl();
|
|
|
|
comptime testWriteIntImpl();
|
|
|
|
}
|
|
|
|
fn testWriteIntImpl() {
|
|
|
|
var bytes: [4]u8 = undefined;
|
|
|
|
|
2017-12-04 15:35:55 +00:00
|
|
|
writeInt(bytes[0..], u32(0x12345678), builtin.Endian.Big);
|
2017-02-12 22:22:35 +00:00
|
|
|
assert(eql(u8, bytes, []u8{ 0x12, 0x34, 0x56, 0x78 }));
|
|
|
|
|
2017-12-04 15:35:55 +00:00
|
|
|
writeInt(bytes[0..], u32(0x78563412), builtin.Endian.Little);
|
2017-02-12 22:22:35 +00:00
|
|
|
assert(eql(u8, bytes, []u8{ 0x12, 0x34, 0x56, 0x78 }));
|
|
|
|
|
2017-12-04 15:35:55 +00:00
|
|
|
writeInt(bytes[0..], u16(0x1234), builtin.Endian.Big);
|
2017-02-12 22:22:35 +00:00
|
|
|
assert(eql(u8, bytes, []u8{ 0x00, 0x00, 0x12, 0x34 }));
|
|
|
|
|
2017-12-04 15:35:55 +00:00
|
|
|
writeInt(bytes[0..], u16(0x1234), builtin.Endian.Little);
|
2017-02-12 22:22:35 +00:00
|
|
|
assert(eql(u8, bytes, []u8{ 0x34, 0x12, 0x00, 0x00 }));
|
|
|
|
}
|
2017-04-04 06:52:20 +01:00
|
|
|
|
2017-05-07 04:13:12 +01:00
|
|
|
|
|
|
|
pub fn min(comptime T: type, slice: []const T) -> T {
|
|
|
|
var best = slice[0];
|
|
|
|
var i: usize = 1;
|
|
|
|
while (i < slice.len) : (i += 1) {
|
|
|
|
best = math.min(best, slice[i]);
|
|
|
|
}
|
|
|
|
return best;
|
|
|
|
}
|
|
|
|
|
|
|
|
test "mem.min" {
|
|
|
|
assert(min(u8, "abcdefg") == 'a');
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn max(comptime T: type, slice: []const T) -> T {
|
|
|
|
var best = slice[0];
|
|
|
|
var i: usize = 1;
|
|
|
|
while (i < slice.len) : (i += 1) {
|
|
|
|
best = math.max(best, slice[i]);
|
|
|
|
}
|
|
|
|
return best;
|
|
|
|
}
|
|
|
|
|
|
|
|
test "mem.max" {
|
|
|
|
assert(max(u8, "abcdefg") == 'g');
|
|
|
|
}
|
2017-12-15 00:41:35 +00:00
|
|
|
|
|
|
|
pub fn swap(comptime T: type, a: &T, b: &T) {
|
|
|
|
const tmp = *a;
|
|
|
|
*a = *b;
|
|
|
|
*b = tmp;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// In-place order reversal of a slice
|
|
|
|
pub fn reverse(comptime T: type, items: []T) {
|
|
|
|
var i: usize = 0;
|
|
|
|
const end = items.len / 2;
|
|
|
|
while (i < end) : (i += 1) {
|
|
|
|
swap(T, &items[i], &items[items.len - i - 1]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
test "std.mem.reverse" {
|
|
|
|
var arr = []i32{ 5, 3, 1, 2, 4 };
|
|
|
|
reverse(i32, arr[0..]);
|
|
|
|
|
|
|
|
assert(eql(i32, arr, []i32{ 4, 2, 1, 3, 5 }))
|
|
|
|
}
|
|
|
|
|
|
|
|
/// In-place rotation of the values in an array ([0 1 2 3] becomes [1 2 3 0] if we rotate by 1)
|
|
|
|
/// Assumes 0 <= amount <= items.len
|
|
|
|
pub fn rotate(comptime T: type, items: []T, amount: usize) {
|
|
|
|
reverse(T, items[0..amount]);
|
|
|
|
reverse(T, items[amount..]);
|
|
|
|
reverse(T, items);
|
|
|
|
}
|
|
|
|
|
|
|
|
test "std.mem.rotate" {
|
|
|
|
var arr = []i32{ 5, 3, 1, 2, 4 };
|
|
|
|
rotate(i32, arr[0..], 2);
|
|
|
|
|
|
|
|
assert(eql(i32, arr, []i32{ 1, 2, 4, 5, 3 }))
|
|
|
|
}
|