2023-02-21 01:07:44 +00:00
|
|
|
//! This ring buffer stores read and write indices while being able to utilise
|
|
|
|
//! the full backing slice by incrementing the indices modulo twice the slice's
|
|
|
|
//! length and reducing indices modulo the slice's length on slice access. This
|
2023-04-23 19:06:21 +01:00
|
|
|
//! means that whether the ring buffer is full or empty can be distinguished by
|
2023-02-21 01:07:44 +00:00
|
|
|
//! looking at the difference between the read and write indices without adding
|
|
|
|
//! an extra boolean flag or having to reserve a slot in the buffer.
|
|
|
|
//!
|
|
|
|
//! This ring buffer has not been implemented with thread safety in mind, and
|
|
|
|
//! therefore should not be assumed to be suitable for use cases involving
|
|
|
|
//! separate reader and writer threads.
|
2023-01-22 05:11:47 +00:00
|
|
|
|
2023-01-31 01:54:05 +00:00
|
|
|
const Allocator = @import("std").mem.Allocator;
|
2023-01-22 05:11:47 +00:00
|
|
|
const assert = @import("std").debug.assert;
|
2023-10-21 19:19:35 +01:00
|
|
|
const copyForwards = @import("std").mem.copyForwards;
|
2023-01-22 05:11:47 +00:00
|
|
|
|
|
|
|
const RingBuffer = @This();
|
|
|
|
|
|
|
|
data: []u8,
|
|
|
|
read_index: usize,
|
|
|
|
write_index: usize,
|
|
|
|
|
2023-10-21 19:19:35 +01:00
|
|
|
pub const Error = error{ Full, ReadLengthInvalid };
|
2023-02-09 14:33:38 +00:00
|
|
|
|
2023-02-21 01:07:44 +00:00
|
|
|
/// Allocate a new `RingBuffer`; `deinit()` should be called to free the buffer.
|
2023-01-31 01:54:05 +00:00
|
|
|
pub fn init(allocator: Allocator, capacity: usize) Allocator.Error!RingBuffer {
|
|
|
|
const bytes = try allocator.alloc(u8, capacity);
|
|
|
|
return RingBuffer{
|
|
|
|
.data = bytes,
|
|
|
|
.write_index = 0,
|
|
|
|
.read_index = 0,
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2023-02-21 01:07:44 +00:00
|
|
|
/// Free the data backing a `RingBuffer`; must be passed the same `Allocator` as
|
|
|
|
/// `init()`.
|
2023-01-31 01:54:05 +00:00
|
|
|
pub fn deinit(self: *RingBuffer, allocator: Allocator) void {
|
|
|
|
allocator.free(self.data);
|
|
|
|
self.* = undefined;
|
|
|
|
}
|
|
|
|
|
2023-01-27 12:50:23 +00:00
|
|
|
/// Returns `index` modulo the length of the backing slice.
|
2023-01-22 05:11:47 +00:00
|
|
|
pub fn mask(self: RingBuffer, index: usize) usize {
|
|
|
|
return index % self.data.len;
|
|
|
|
}
|
|
|
|
|
2023-02-21 01:07:44 +00:00
|
|
|
/// Returns `index` modulo twice the length of the backing slice.
|
2023-01-22 05:11:47 +00:00
|
|
|
pub fn mask2(self: RingBuffer, index: usize) usize {
|
|
|
|
return index % (2 * self.data.len);
|
|
|
|
}
|
|
|
|
|
2023-01-27 12:50:23 +00:00
|
|
|
/// Write `byte` into the ring buffer. Returns `error.Full` if the ring
|
|
|
|
/// buffer is full.
|
2023-02-09 14:33:38 +00:00
|
|
|
pub fn write(self: *RingBuffer, byte: u8) Error!void {
|
2023-01-22 05:11:47 +00:00
|
|
|
if (self.isFull()) return error.Full;
|
|
|
|
self.writeAssumeCapacity(byte);
|
|
|
|
}
|
|
|
|
|
2023-01-27 12:50:23 +00:00
|
|
|
/// Write `byte` into the ring buffer. If the ring buffer is full, the
|
|
|
|
/// oldest byte is overwritten.
|
2023-01-22 05:11:47 +00:00
|
|
|
pub fn writeAssumeCapacity(self: *RingBuffer, byte: u8) void {
|
|
|
|
self.data[self.mask(self.write_index)] = byte;
|
|
|
|
self.write_index = self.mask2(self.write_index + 1);
|
|
|
|
}
|
|
|
|
|
2023-02-21 01:07:44 +00:00
|
|
|
/// Write `bytes` into the ring buffer. Returns `error.Full` if the ring
|
2023-01-27 12:50:23 +00:00
|
|
|
/// buffer does not have enough space, without writing any data.
|
2023-10-21 19:19:35 +01:00
|
|
|
/// Uses memcpy and so `bytes` must not overlap ring buffer data.
|
2023-02-09 14:33:38 +00:00
|
|
|
pub fn writeSlice(self: *RingBuffer, bytes: []const u8) Error!void {
|
2023-01-22 05:11:47 +00:00
|
|
|
if (self.len() + bytes.len > self.data.len) return error.Full;
|
|
|
|
self.writeSliceAssumeCapacity(bytes);
|
|
|
|
}
|
|
|
|
|
2023-01-27 12:50:23 +00:00
|
|
|
/// Write `bytes` into the ring buffer. If there is not enough space, older
|
|
|
|
/// bytes will be overwritten.
|
2023-10-21 19:19:35 +01:00
|
|
|
/// Uses memcpy and so `bytes` must not overlap ring buffer data.
|
2023-01-22 05:11:47 +00:00
|
|
|
pub fn writeSliceAssumeCapacity(self: *RingBuffer, bytes: []const u8) void {
|
2024-03-10 07:13:53 +00:00
|
|
|
assert(bytes.len <= self.data.len);
|
2023-10-21 19:19:35 +01:00
|
|
|
const data_start = self.mask(self.write_index);
|
|
|
|
const part1_data_end = @min(data_start + bytes.len, self.data.len);
|
|
|
|
const part1_len = part1_data_end - data_start;
|
|
|
|
@memcpy(self.data[data_start..part1_data_end], bytes[0..part1_len]);
|
|
|
|
|
|
|
|
const remaining = bytes.len - part1_len;
|
|
|
|
const to_write = @min(remaining, remaining % self.data.len + self.data.len);
|
|
|
|
const part2_bytes_start = bytes.len - to_write;
|
|
|
|
const part2_bytes_end = @min(part2_bytes_start + self.data.len, bytes.len);
|
|
|
|
const part2_len = part2_bytes_end - part2_bytes_start;
|
|
|
|
@memcpy(self.data[0..part2_len], bytes[part2_bytes_start..part2_bytes_end]);
|
|
|
|
if (part2_bytes_end != bytes.len) {
|
|
|
|
const part3_len = bytes.len - part2_bytes_end;
|
|
|
|
@memcpy(self.data[0..part3_len], bytes[part2_bytes_end..bytes.len]);
|
|
|
|
}
|
|
|
|
self.write_index = self.mask2(self.write_index + bytes.len);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Write `bytes` into the ring buffer. Returns `error.Full` if the ring
|
|
|
|
/// buffer does not have enough space, without writing any data.
|
|
|
|
/// Uses copyForwards and can write slices from this RingBuffer into itself.
|
|
|
|
pub fn writeSliceForwards(self: *RingBuffer, bytes: []const u8) Error!void {
|
|
|
|
if (self.len() + bytes.len > self.data.len) return error.Full;
|
|
|
|
self.writeSliceForwardsAssumeCapacity(bytes);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Write `bytes` into the ring buffer. If there is not enough space, older
|
|
|
|
/// bytes will be overwritten.
|
|
|
|
/// Uses copyForwards and can write slices from this RingBuffer into itself.
|
|
|
|
pub fn writeSliceForwardsAssumeCapacity(self: *RingBuffer, bytes: []const u8) void {
|
2024-03-10 07:13:53 +00:00
|
|
|
assert(bytes.len <= self.data.len);
|
2023-10-21 19:19:35 +01:00
|
|
|
const data_start = self.mask(self.write_index);
|
|
|
|
const part1_data_end = @min(data_start + bytes.len, self.data.len);
|
|
|
|
const part1_len = part1_data_end - data_start;
|
|
|
|
copyForwards(u8, self.data[data_start..], bytes[0..part1_len]);
|
|
|
|
|
|
|
|
const remaining = bytes.len - part1_len;
|
|
|
|
const to_write = @min(remaining, remaining % self.data.len + self.data.len);
|
|
|
|
const part2_bytes_start = bytes.len - to_write;
|
|
|
|
const part2_bytes_end = @min(part2_bytes_start + self.data.len, bytes.len);
|
|
|
|
copyForwards(u8, self.data[0..], bytes[part2_bytes_start..part2_bytes_end]);
|
|
|
|
if (part2_bytes_end != bytes.len)
|
|
|
|
copyForwards(u8, self.data[0..], bytes[part2_bytes_end..bytes.len]);
|
|
|
|
self.write_index = self.mask2(self.write_index + bytes.len);
|
2023-01-22 05:11:47 +00:00
|
|
|
}
|
|
|
|
|
2023-01-27 12:50:23 +00:00
|
|
|
/// Consume a byte from the ring buffer and return it. Returns `null` if the
|
|
|
|
/// ring buffer is empty.
|
2023-01-22 05:11:47 +00:00
|
|
|
pub fn read(self: *RingBuffer) ?u8 {
|
|
|
|
if (self.isEmpty()) return null;
|
2023-02-21 01:07:44 +00:00
|
|
|
return self.readAssumeLength();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Consume a byte from the ring buffer and return it; asserts that the buffer
|
|
|
|
/// is not empty.
|
|
|
|
pub fn readAssumeLength(self: *RingBuffer) u8 {
|
|
|
|
assert(!self.isEmpty());
|
2023-01-22 05:11:47 +00:00
|
|
|
const byte = self.data[self.mask(self.read_index)];
|
|
|
|
self.read_index = self.mask2(self.read_index + 1);
|
|
|
|
return byte;
|
|
|
|
}
|
|
|
|
|
2023-10-21 19:19:35 +01:00
|
|
|
/// Reads first `length` bytes written to the ring buffer into `dest`; Returns
|
|
|
|
/// Error.ReadLengthInvalid if length greater than ring or dest length
|
|
|
|
/// Uses memcpy and so `dest` must not overlap ring buffer data.
|
|
|
|
pub fn readFirst(self: *RingBuffer, dest: []u8, length: usize) Error!void {
|
|
|
|
if (length > self.len() or length > dest.len) return error.ReadLengthInvalid;
|
|
|
|
self.readFirstAssumeLength(dest, length);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Reads first `length` bytes written to the ring buffer into `dest`;
|
|
|
|
/// Asserts that length not greater than ring buffer or dest length
|
|
|
|
/// Uses memcpy and so `dest` must not overlap ring buffer data.
|
|
|
|
pub fn readFirstAssumeLength(self: *RingBuffer, dest: []u8, length: usize) void {
|
|
|
|
assert(length <= self.len() and length <= dest.len);
|
2024-03-10 07:17:23 +00:00
|
|
|
const slice = self.sliceAt(self.read_index, length);
|
|
|
|
slice.copyTo(dest);
|
2023-10-21 19:19:35 +01:00
|
|
|
self.read_index = self.mask2(self.read_index + length);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Reads last `length` bytes written to the ring buffer into `dest`; Returns
|
|
|
|
/// Error.ReadLengthInvalid if length greater than ring or dest length
|
|
|
|
/// Uses memcpy and so `dest` must not overlap ring buffer data.
|
2024-03-10 07:13:53 +00:00
|
|
|
/// Reduces write index by `length`.
|
2023-10-21 19:19:35 +01:00
|
|
|
pub fn readLast(self: *RingBuffer, dest: []u8, length: usize) Error!void {
|
|
|
|
if (length > self.len() or length > dest.len) return error.ReadLengthInvalid;
|
|
|
|
self.readLastAssumeLength(dest, length);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Reads last `length` bytes written to the ring buffer into `dest`;
|
|
|
|
/// Asserts that length not greater than ring buffer or dest length
|
|
|
|
/// Uses memcpy and so `dest` must not overlap ring buffer data.
|
2024-03-10 07:13:53 +00:00
|
|
|
/// Reduces write index by `length`.
|
2023-10-21 19:19:35 +01:00
|
|
|
pub fn readLastAssumeLength(self: *RingBuffer, dest: []u8, length: usize) void {
|
|
|
|
assert(length <= self.len() and length <= dest.len);
|
2024-03-10 07:17:23 +00:00
|
|
|
const slice = self.sliceLast(length);
|
|
|
|
slice.copyTo(dest);
|
|
|
|
self.write_index = if (self.write_index >= self.data.len)
|
|
|
|
self.write_index - length
|
|
|
|
else
|
|
|
|
self.mask(self.write_index + self.data.len - length);
|
2023-10-21 19:19:35 +01:00
|
|
|
}
|
|
|
|
|
2023-01-27 12:50:23 +00:00
|
|
|
/// Returns `true` if the ring buffer is empty and `false` otherwise.
|
2023-01-22 05:11:47 +00:00
|
|
|
pub fn isEmpty(self: RingBuffer) bool {
|
|
|
|
return self.write_index == self.read_index;
|
|
|
|
}
|
|
|
|
|
2023-01-27 12:50:23 +00:00
|
|
|
/// Returns `true` if the ring buffer is full and `false` otherwise.
|
2023-01-22 05:11:47 +00:00
|
|
|
pub fn isFull(self: RingBuffer) bool {
|
|
|
|
return self.mask2(self.write_index + self.data.len) == self.read_index;
|
|
|
|
}
|
|
|
|
|
2024-03-10 07:13:53 +00:00
|
|
|
/// Returns the length of data available for reading
|
2023-01-22 05:11:47 +00:00
|
|
|
pub fn len(self: RingBuffer) usize {
|
2023-06-15 08:14:16 +01:00
|
|
|
const wrap_offset = 2 * self.data.len * @intFromBool(self.write_index < self.read_index);
|
2023-02-09 14:33:38 +00:00
|
|
|
const adjusted_write_index = self.write_index + wrap_offset;
|
2023-01-22 05:11:47 +00:00
|
|
|
return adjusted_write_index - self.read_index;
|
|
|
|
}
|
|
|
|
|
2023-01-27 12:50:23 +00:00
|
|
|
/// A `Slice` represents a region of a ring buffer. The region is split into two
|
2023-02-21 01:07:44 +00:00
|
|
|
/// sections as the ring buffer data will not be contiguous if the desired
|
|
|
|
/// region wraps to the start of the backing slice.
|
2023-01-27 12:50:23 +00:00
|
|
|
pub const Slice = struct {
|
2023-01-22 05:11:47 +00:00
|
|
|
first: []u8,
|
|
|
|
second: []u8,
|
2024-03-10 07:17:23 +00:00
|
|
|
|
|
|
|
/// Copy data from `self` into `dest`
|
|
|
|
pub fn copyTo(self: Slice, dest: []u8) void {
|
|
|
|
@memcpy(dest[0..self.first.len], self.first);
|
|
|
|
@memcpy(dest[self.first.len..][0..self.second.len], self.second);
|
|
|
|
}
|
2023-01-22 05:11:47 +00:00
|
|
|
};
|
|
|
|
|
2023-02-21 01:07:44 +00:00
|
|
|
/// Returns a `Slice` for the region of the ring buffer starting at
|
|
|
|
/// `self.mask(start_unmasked)` with the specified length.
|
2023-01-22 05:11:47 +00:00
|
|
|
pub fn sliceAt(self: RingBuffer, start_unmasked: usize, length: usize) Slice {
|
|
|
|
assert(length <= self.data.len);
|
|
|
|
const slice1_start = self.mask(start_unmasked);
|
|
|
|
const slice1_end = @min(self.data.len, slice1_start + length);
|
|
|
|
const slice1 = self.data[slice1_start..slice1_end];
|
|
|
|
const slice2 = self.data[0 .. length - slice1.len];
|
|
|
|
return Slice{
|
|
|
|
.first = slice1,
|
|
|
|
.second = slice2,
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2023-01-27 12:50:23 +00:00
|
|
|
/// Returns a `Slice` for the last `length` bytes written to the ring buffer.
|
2023-02-21 01:07:44 +00:00
|
|
|
/// Does not check that any bytes have been written into the region.
|
2023-01-22 05:11:47 +00:00
|
|
|
pub fn sliceLast(self: RingBuffer, length: usize) Slice {
|
|
|
|
return self.sliceAt(self.write_index + self.data.len - length, length);
|
|
|
|
}
|