std.mem.Allocator: allow shrink to fail

closes #13535
This commit is contained in:
Andrew Kelley 2022-11-27 01:07:35 -07:00
parent deda6b5146
commit ceb0a632cf
57 changed files with 950 additions and 1279 deletions

View File

@ -471,7 +471,7 @@ fn genToc(allocator: Allocator, tokenizer: *Tokenizer) !Toc {
},
Token.Id.Separator => {},
Token.Id.BracketClose => {
try nodes.append(Node{ .SeeAlso = list.toOwnedSlice() });
try nodes.append(Node{ .SeeAlso = try list.toOwnedSlice() });
break;
},
else => return parseError(tokenizer, see_also_tok, "invalid see_also token", .{}),
@ -610,7 +610,7 @@ fn genToc(allocator: Allocator, tokenizer: *Tokenizer) !Toc {
.source_token = source_token,
.just_check_syntax = just_check_syntax,
.mode = mode,
.link_objects = link_objects.toOwnedSlice(),
.link_objects = try link_objects.toOwnedSlice(),
.target_str = target_str,
.link_libc = link_libc,
.backend_stage1 = backend_stage1,
@ -707,8 +707,8 @@ fn genToc(allocator: Allocator, tokenizer: *Tokenizer) !Toc {
}
return Toc{
.nodes = nodes.toOwnedSlice(),
.toc = toc_buf.toOwnedSlice(),
.nodes = try nodes.toOwnedSlice(),
.toc = try toc_buf.toOwnedSlice(),
.urls = urls,
};
}
@ -729,7 +729,7 @@ fn urlize(allocator: Allocator, input: []const u8) ![]u8 {
else => {},
}
}
return buf.toOwnedSlice();
return try buf.toOwnedSlice();
}
fn escapeHtml(allocator: Allocator, input: []const u8) ![]u8 {
@ -738,7 +738,7 @@ fn escapeHtml(allocator: Allocator, input: []const u8) ![]u8 {
const out = buf.writer();
try writeEscaped(out, input);
return buf.toOwnedSlice();
return try buf.toOwnedSlice();
}
fn writeEscaped(out: anytype, input: []const u8) !void {
@ -854,7 +854,7 @@ fn termColor(allocator: Allocator, input: []const u8) ![]u8 {
},
}
}
return buf.toOwnedSlice();
return try buf.toOwnedSlice();
}
const builtin_types = [_][]const u8{

View File

@ -1872,7 +1872,7 @@ const IndexHeader = struct {
const len = @as(usize, 1) << @intCast(math.Log2Int(usize), new_bit_index);
const index_size = hash_map.capacityIndexSize(new_bit_index);
const nbytes = @sizeOf(IndexHeader) + index_size * len;
const bytes = try allocator.allocAdvanced(u8, @alignOf(IndexHeader), nbytes, .exact);
const bytes = try allocator.alignedAlloc(u8, @alignOf(IndexHeader), nbytes);
@memset(bytes.ptr + @sizeOf(IndexHeader), 0xff, bytes.len - @sizeOf(IndexHeader));
const result = @ptrCast(*IndexHeader, bytes.ptr);
result.* = .{

View File

@ -47,6 +47,10 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
pub const Slice = if (alignment) |a| ([]align(a) T) else []T;
pub fn SentinelSlice(comptime s: T) type {
return if (alignment) |a| ([:s]align(a) T) else [:s]T;
}
/// Deinitialize with `deinit` or use `toOwnedSlice`.
pub fn init(allocator: Allocator) Self {
return Self{
@ -92,18 +96,31 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
return result;
}
/// The caller owns the returned memory. Empties this ArrayList.
pub fn toOwnedSlice(self: *Self) Slice {
/// The caller owns the returned memory. Empties this ArrayList,
/// however its capacity may or may not be cleared and deinit() is
/// still required to clean up its memory.
pub fn toOwnedSlice(self: *Self) Allocator.Error!Slice {
const allocator = self.allocator;
const result = allocator.shrink(self.allocatedSlice(), self.items.len);
self.* = init(allocator);
return result;
const old_memory = self.allocatedSlice();
if (allocator.resize(old_memory, self.items.len)) {
const result = self.items;
self.* = init(allocator);
return result;
}
const new_memory = try allocator.alignedAlloc(T, alignment, self.items.len);
mem.copy(T, new_memory, self.items);
@memset(@ptrCast([*]u8, self.items.ptr), undefined, self.items.len * @sizeOf(T));
self.items.len = 0;
return new_memory;
}
/// The caller owns the returned memory. Empties this ArrayList.
pub fn toOwnedSliceSentinel(self: *Self, comptime sentinel: T) Allocator.Error![:sentinel]T {
try self.append(sentinel);
const result = self.toOwnedSlice();
pub fn toOwnedSliceSentinel(self: *Self, comptime sentinel: T) Allocator.Error!SentinelSlice(sentinel) {
try self.ensureTotalCapacityPrecise(self.items.len + 1);
self.appendAssumeCapacity(sentinel);
const result = try self.toOwnedSlice();
return result[0 .. result.len - 1 :sentinel];
}
@ -299,17 +316,30 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
pub fn shrinkAndFree(self: *Self, new_len: usize) void {
assert(new_len <= self.items.len);
if (@sizeOf(T) > 0) {
self.items = self.allocator.realloc(self.allocatedSlice(), new_len) catch |e| switch (e) {
error.OutOfMemory => { // no problem, capacity is still correct then.
self.items.len = new_len;
return;
},
};
self.capacity = new_len;
} else {
if (@sizeOf(T) == 0) {
self.items.len = new_len;
return;
}
const old_memory = self.allocatedSlice();
if (self.allocator.resize(old_memory, new_len)) {
self.capacity = new_len;
self.items.len = new_len;
return;
}
const new_memory = self.allocator.alignedAlloc(T, alignment, new_len) catch |e| switch (e) {
error.OutOfMemory => {
// No problem, capacity is still correct then.
self.items.len = new_len;
return;
},
};
mem.copy(T, new_memory, self.items);
self.allocator.free(old_memory);
self.items = new_memory;
self.capacity = new_memory.len;
}
/// Reduce length to `new_len`.
@ -334,19 +364,20 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
/// Modify the array so that it can hold at least `new_capacity` items.
/// Invalidates pointers if additional memory is needed.
pub fn ensureTotalCapacity(self: *Self, new_capacity: usize) Allocator.Error!void {
if (@sizeOf(T) > 0) {
if (self.capacity >= new_capacity) return;
var better_capacity = self.capacity;
while (true) {
better_capacity +|= better_capacity / 2 + 8;
if (better_capacity >= new_capacity) break;
}
return self.ensureTotalCapacityPrecise(better_capacity);
} else {
if (@sizeOf(T) == 0) {
self.capacity = math.maxInt(usize);
return;
}
if (self.capacity >= new_capacity) return;
var better_capacity = self.capacity;
while (true) {
better_capacity +|= better_capacity / 2 + 8;
if (better_capacity >= new_capacity) break;
}
return self.ensureTotalCapacityPrecise(better_capacity);
}
/// Modify the array so that it can hold at least `new_capacity` items.
@ -354,15 +385,27 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
/// (but not guaranteed) to be equal to `new_capacity`.
/// Invalidates pointers if additional memory is needed.
pub fn ensureTotalCapacityPrecise(self: *Self, new_capacity: usize) Allocator.Error!void {
if (@sizeOf(T) > 0) {
if (self.capacity >= new_capacity) return;
if (@sizeOf(T) == 0) {
self.capacity = math.maxInt(usize);
return;
}
// TODO This can be optimized to avoid needlessly copying undefined memory.
const new_memory = try self.allocator.reallocAtLeast(self.allocatedSlice(), new_capacity);
if (self.capacity >= new_capacity) return;
// Here we avoid copying allocated but unused bytes by
// attempting a resize in place, and falling back to allocating
// a new buffer and doing our own copy. With a realloc() call,
// the allocator implementation would pointlessly copy our
// extra capacity.
const old_memory = self.allocatedSlice();
if (self.allocator.resize(old_memory, new_capacity)) {
self.capacity = new_capacity;
} else {
const new_memory = try self.allocator.alignedAlloc(T, alignment, new_capacity);
mem.copy(T, new_memory, self.items);
self.allocator.free(old_memory);
self.items.ptr = new_memory.ptr;
self.capacity = new_memory.len;
} else {
self.capacity = math.maxInt(usize);
}
}
@ -381,8 +424,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
/// Increase length by 1, returning pointer to the new item.
/// The returned pointer becomes invalid when the list resized.
pub fn addOne(self: *Self) Allocator.Error!*T {
const newlen = self.items.len + 1;
try self.ensureTotalCapacity(newlen);
try self.ensureTotalCapacity(self.items.len + 1);
return self.addOneAssumeCapacity();
}
@ -392,7 +434,6 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
/// **Does not** invalidate element pointers.
pub fn addOneAssumeCapacity(self: *Self) *T {
assert(self.items.len < self.capacity);
self.items.len += 1;
return &self.items[self.items.len - 1];
}
@ -490,6 +531,10 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
pub const Slice = if (alignment) |a| ([]align(a) T) else []T;
pub fn SentinelSlice(comptime s: T) type {
return if (alignment) |a| ([:s]align(a) T) else [:s]T;
}
/// Initialize with capacity to hold at least num elements.
/// The resulting capacity is likely to be equal to `num`.
/// Deinitialize with `deinit` or use `toOwnedSlice`.
@ -511,17 +556,29 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
return .{ .items = self.items, .capacity = self.capacity, .allocator = allocator };
}
/// The caller owns the returned memory. ArrayList becomes empty.
pub fn toOwnedSlice(self: *Self, allocator: Allocator) Slice {
const result = allocator.shrink(self.allocatedSlice(), self.items.len);
self.* = Self{};
return result;
/// The caller owns the returned memory. Empties this ArrayList,
/// however its capacity may or may not be cleared and deinit() is
/// still required to clean up its memory.
pub fn toOwnedSlice(self: *Self, allocator: Allocator) Allocator.Error!Slice {
const old_memory = self.allocatedSlice();
if (allocator.resize(old_memory, self.items.len)) {
const result = self.items;
self.* = .{};
return result;
}
const new_memory = try allocator.alignedAlloc(T, alignment, self.items.len);
mem.copy(T, new_memory, self.items);
@memset(@ptrCast([*]u8, self.items.ptr), undefined, self.items.len * @sizeOf(T));
self.items.len = 0;
return new_memory;
}
/// The caller owns the returned memory. ArrayList becomes empty.
pub fn toOwnedSliceSentinel(self: *Self, allocator: Allocator, comptime sentinel: T) Allocator.Error![:sentinel]T {
try self.append(allocator, sentinel);
const result = self.toOwnedSlice(allocator);
pub fn toOwnedSliceSentinel(self: *Self, allocator: Allocator, comptime sentinel: T) Allocator.Error!SentinelSlice(sentinel) {
try self.ensureTotalCapacityPrecise(allocator, self.items.len + 1);
self.appendAssumeCapacity(sentinel);
const result = try self.toOwnedSlice(allocator);
return result[0 .. result.len - 1 :sentinel];
}
@ -701,16 +758,34 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
}
/// Reduce allocated capacity to `new_len`.
/// May invalidate element pointers.
pub fn shrinkAndFree(self: *Self, allocator: Allocator, new_len: usize) void {
assert(new_len <= self.items.len);
self.items = allocator.realloc(self.allocatedSlice(), new_len) catch |e| switch (e) {
error.OutOfMemory => { // no problem, capacity is still correct then.
if (@sizeOf(T) == 0) {
self.items.len = new_len;
return;
}
const old_memory = self.allocatedSlice();
if (allocator.resize(old_memory, new_len)) {
self.capacity = new_len;
self.items.len = new_len;
return;
}
const new_memory = allocator.alignedAlloc(T, alignment, new_len) catch |e| switch (e) {
error.OutOfMemory => {
// No problem, capacity is still correct then.
self.items.len = new_len;
return;
},
};
self.capacity = new_len;
mem.copy(T, new_memory, self.items);
allocator.free(old_memory);
self.items = new_memory;
self.capacity = new_memory.len;
}
/// Reduce length to `new_len`.
@ -752,11 +827,28 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
/// (but not guaranteed) to be equal to `new_capacity`.
/// Invalidates pointers if additional memory is needed.
pub fn ensureTotalCapacityPrecise(self: *Self, allocator: Allocator, new_capacity: usize) Allocator.Error!void {
if (@sizeOf(T) == 0) {
self.capacity = math.maxInt(usize);
return;
}
if (self.capacity >= new_capacity) return;
const new_memory = try allocator.reallocAtLeast(self.allocatedSlice(), new_capacity);
self.items.ptr = new_memory.ptr;
self.capacity = new_memory.len;
// Here we avoid copying allocated but unused bytes by
// attempting a resize in place, and falling back to allocating
// a new buffer and doing our own copy. With a realloc() call,
// the allocator implementation would pointlessly copy our
// extra capacity.
const old_memory = self.allocatedSlice();
if (allocator.resize(old_memory, new_capacity)) {
self.capacity = new_capacity;
} else {
const new_memory = try allocator.alignedAlloc(T, alignment, new_capacity);
mem.copy(T, new_memory, self.items);
allocator.free(old_memory);
self.items.ptr = new_memory.ptr;
self.capacity = new_memory.len;
}
}
/// Modify the array so that it can hold at least `additional_count` **more** items.

View File

@ -2934,7 +2934,7 @@ pub const LibExeObjStep = struct {
}
}
try zig_args.append(mcpu_buffer.toOwnedSlice());
try zig_args.append(try mcpu_buffer.toOwnedSlice());
}
if (self.target.dynamic_linker.get()) |dynamic_linker| {

View File

@ -421,8 +421,8 @@ pub const ChildProcess = struct {
return ExecResult{
.term = try child.wait(),
.stdout = stdout.toOwnedSlice(),
.stderr = stderr.toOwnedSlice(),
.stdout = try stdout.toOwnedSlice(),
.stderr = try stderr.toOwnedSlice(),
};
}
@ -1270,7 +1270,7 @@ pub fn createWindowsEnvBlock(allocator: mem.Allocator, env_map: *const EnvMap) !
i += 1;
result[i] = 0;
i += 1;
return allocator.shrink(result, i);
return try allocator.realloc(result, i);
}
pub fn createNullDelimitedEnvMap(arena: mem.Allocator, env_map: *const EnvMap) ![:null]?[*:0]u8 {

View File

@ -1112,7 +1112,7 @@ fn readMachODebugInfo(allocator: mem.Allocator, macho_file: File) !ModuleDebugIn
}
assert(state == .oso_close);
const symbols = allocator.shrink(symbols_buf, symbol_index);
const symbols = try allocator.realloc(symbols_buf, symbol_index);
// Even though lld emits symbols in ascending order, this debug code
// should work for programs linked in any valid way.

View File

@ -954,11 +954,9 @@ pub const File = struct {
};
if (optional_sentinel) |sentinel| {
try array_list.append(sentinel);
const buf = array_list.toOwnedSlice();
return buf[0 .. buf.len - 1 :sentinel];
return try array_list.toOwnedSliceSentinel(sentinel);
} else {
return array_list.toOwnedSlice();
return try array_list.toOwnedSlice();
}
}

View File

@ -1155,7 +1155,7 @@ pub fn relativePosix(allocator: Allocator, from: []const u8, to: []const u8) ![]
}
if (to_rest.len == 0) {
// shave off the trailing slash
return allocator.shrink(result, result_index - 1);
return allocator.realloc(result, result_index - 1);
}
mem.copy(u8, result[result_index..], to_rest);

View File

@ -160,7 +160,7 @@ pub const PreopenList = struct {
if (cwd_root) |root| assert(fs.path.isAbsolute(root));
// Clear contents if we're being called again
for (self.toOwnedSlice()) |preopen| {
for (try self.toOwnedSlice()) |preopen| {
switch (preopen.type) {
PreopenType.Dir => |path| self.buffer.allocator.free(path),
}
@ -263,8 +263,8 @@ pub const PreopenList = struct {
}
/// The caller owns the returned memory. ArrayList becomes empty.
pub fn toOwnedSlice(self: *Self) []Preopen {
return self.buffer.toOwnedSlice();
pub fn toOwnedSlice(self: *Self) ![]Preopen {
return try self.buffer.toOwnedSlice();
}
};

View File

@ -52,11 +52,12 @@ const CAllocator = struct {
return @intToPtr(*[*]u8, @ptrToInt(ptr) - @sizeOf(usize));
}
fn alignedAlloc(len: usize, alignment: usize) ?[*]u8 {
fn alignedAlloc(len: usize, log2_align: u8) ?[*]u8 {
const alignment = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_align);
if (supports_posix_memalign) {
// The posix_memalign only accepts alignment values that are a
// multiple of the pointer size
const eff_alignment = std.math.max(alignment, @sizeOf(usize));
const eff_alignment = @max(alignment, @sizeOf(usize));
var aligned_ptr: ?*anyopaque = undefined;
if (c.posix_memalign(&aligned_ptr, eff_alignment, len) != 0)
@ -99,58 +100,42 @@ const CAllocator = struct {
fn alloc(
_: *anyopaque,
len: usize,
alignment: u29,
len_align: u29,
log2_align: u8,
return_address: usize,
) error{OutOfMemory}![]u8 {
) ?[*]u8 {
_ = return_address;
assert(len > 0);
assert(std.math.isPowerOfTwo(alignment));
var ptr = alignedAlloc(len, alignment) orelse return error.OutOfMemory;
if (len_align == 0) {
return ptr[0..len];
}
const full_len = init: {
if (CAllocator.supports_malloc_size) {
const s = alignedAllocSize(ptr);
assert(s >= len);
break :init s;
}
break :init len;
};
return ptr[0..mem.alignBackwardAnyAlign(full_len, len_align)];
return alignedAlloc(len, log2_align);
}
fn resize(
_: *anyopaque,
buf: []u8,
buf_align: u29,
log2_buf_align: u8,
new_len: usize,
len_align: u29,
return_address: usize,
) ?usize {
_ = buf_align;
) bool {
_ = log2_buf_align;
_ = return_address;
if (new_len <= buf.len) {
return mem.alignAllocLen(buf.len, new_len, len_align);
return true;
}
if (CAllocator.supports_malloc_size) {
const full_len = alignedAllocSize(buf.ptr);
if (new_len <= full_len) {
return mem.alignAllocLen(full_len, new_len, len_align);
return true;
}
}
return null;
return false;
}
fn free(
_: *anyopaque,
buf: []u8,
buf_align: u29,
log2_buf_align: u8,
return_address: usize,
) void {
_ = buf_align;
_ = log2_buf_align;
_ = return_address;
alignedFree(buf.ptr);
}
@ -187,40 +172,35 @@ const raw_c_allocator_vtable = Allocator.VTable{
fn rawCAlloc(
_: *anyopaque,
len: usize,
ptr_align: u29,
len_align: u29,
log2_ptr_align: u8,
ret_addr: usize,
) Allocator.Error![]u8 {
_ = len_align;
) ?[*]u8 {
_ = ret_addr;
assert(ptr_align <= @alignOf(std.c.max_align_t));
const ptr = @ptrCast([*]u8, c.malloc(len) orelse return error.OutOfMemory);
return ptr[0..len];
assert(log2_ptr_align <= comptime std.math.log2_int(usize, @alignOf(std.c.max_align_t)));
// TODO: change the language to make @ptrCast also do alignment cast
const ptr = @alignCast(@alignOf(std.c.max_align_t), c.malloc(len));
return @ptrCast(?[*]align(@alignOf(std.c.max_align_t)) u8, ptr);
}
fn rawCResize(
_: *anyopaque,
buf: []u8,
old_align: u29,
log2_old_align: u8,
new_len: usize,
len_align: u29,
ret_addr: usize,
) ?usize {
_ = old_align;
) bool {
_ = log2_old_align;
_ = ret_addr;
if (new_len <= buf.len) {
return mem.alignAllocLen(buf.len, new_len, len_align);
}
return null;
return new_len <= buf.len;
}
fn rawCFree(
_: *anyopaque,
buf: []u8,
old_align: u29,
log2_old_align: u8,
ret_addr: usize,
) void {
_ = old_align;
_ = log2_old_align;
_ = ret_addr;
c.free(buf.ptr);
}
@ -241,8 +221,8 @@ else
};
/// Verifies that the adjusted length will still map to the full length
pub fn alignPageAllocLen(full_len: usize, len: usize, len_align: u29) usize {
const aligned_len = mem.alignAllocLen(full_len, len, len_align);
pub fn alignPageAllocLen(full_len: usize, len: usize) usize {
const aligned_len = mem.alignAllocLen(full_len, len);
assert(mem.alignForward(aligned_len, mem.page_size) == full_len);
return aligned_len;
}
@ -257,115 +237,47 @@ const PageAllocator = struct {
.free = free,
};
fn alloc(_: *anyopaque, n: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 {
fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 {
_ = ra;
_ = log2_align;
assert(n > 0);
if (n > maxInt(usize) - (mem.page_size - 1)) {
return error.OutOfMemory;
}
if (n > maxInt(usize) - (mem.page_size - 1)) return null;
const aligned_len = mem.alignForward(n, mem.page_size);
if (builtin.os.tag == .windows) {
const w = os.windows;
// Although officially it's at least aligned to page boundary,
// Windows is known to reserve pages on a 64K boundary. It's
// even more likely that the requested alignment is <= 64K than
// 4K, so we're just allocating blindly and hoping for the best.
// see https://devblogs.microsoft.com/oldnewthing/?p=42223
const addr = w.VirtualAlloc(
null,
aligned_len,
w.MEM_COMMIT | w.MEM_RESERVE,
w.PAGE_READWRITE,
) catch return error.OutOfMemory;
// If the allocation is sufficiently aligned, use it.
if (mem.isAligned(@ptrToInt(addr), alignment)) {
return @ptrCast([*]u8, addr)[0..alignPageAllocLen(aligned_len, n, len_align)];
}
// If it wasn't, actually do an explicitly aligned allocation.
w.VirtualFree(addr, 0, w.MEM_RELEASE);
const alloc_size = n + alignment - mem.page_size;
while (true) {
// Reserve a range of memory large enough to find a sufficiently
// aligned address.
const reserved_addr = w.VirtualAlloc(
null,
alloc_size,
w.MEM_RESERVE,
w.PAGE_NOACCESS,
) catch return error.OutOfMemory;
const aligned_addr = mem.alignForward(@ptrToInt(reserved_addr), alignment);
// Release the reserved pages (not actually used).
w.VirtualFree(reserved_addr, 0, w.MEM_RELEASE);
// At this point, it is possible that another thread has
// obtained some memory space that will cause the next
// VirtualAlloc call to fail. To handle this, we will retry
// until it succeeds.
const ptr = w.VirtualAlloc(
@intToPtr(*anyopaque, aligned_addr),
aligned_len,
w.MEM_COMMIT | w.MEM_RESERVE,
w.PAGE_READWRITE,
) catch continue;
return @ptrCast([*]u8, ptr)[0..alignPageAllocLen(aligned_len, n, len_align)];
}
) catch return null;
return @ptrCast([*]align(mem.page_size) u8, @alignCast(mem.page_size, addr));
}
const max_drop_len = alignment - @min(alignment, mem.page_size);
const alloc_len = if (max_drop_len <= aligned_len - n)
aligned_len
else
mem.alignForward(aligned_len + max_drop_len, mem.page_size);
const hint = @atomicLoad(@TypeOf(next_mmap_addr_hint), &next_mmap_addr_hint, .Unordered);
const slice = os.mmap(
hint,
alloc_len,
aligned_len,
os.PROT.READ | os.PROT.WRITE,
os.MAP.PRIVATE | os.MAP.ANONYMOUS,
-1,
0,
) catch return error.OutOfMemory;
) catch return null;
assert(mem.isAligned(@ptrToInt(slice.ptr), mem.page_size));
const result_ptr = mem.alignPointer(slice.ptr, alignment) orelse
return error.OutOfMemory;
// Unmap the extra bytes that were only requested in order to guarantee
// that the range of memory we were provided had a proper alignment in
// it somewhere. The extra bytes could be at the beginning, or end, or both.
const drop_len = @ptrToInt(result_ptr) - @ptrToInt(slice.ptr);
if (drop_len != 0) {
os.munmap(slice[0..drop_len]);
}
// Unmap extra pages
const aligned_buffer_len = alloc_len - drop_len;
if (aligned_buffer_len > aligned_len) {
os.munmap(@alignCast(mem.page_size, result_ptr[aligned_len..aligned_buffer_len]));
}
const new_hint = @alignCast(mem.page_size, result_ptr + aligned_len);
const new_hint = @alignCast(mem.page_size, slice.ptr + aligned_len);
_ = @cmpxchgStrong(@TypeOf(next_mmap_addr_hint), &next_mmap_addr_hint, hint, new_hint, .Monotonic, .Monotonic);
return result_ptr[0..alignPageAllocLen(aligned_len, n, len_align)];
return slice.ptr;
}
fn resize(
_: *anyopaque,
buf_unaligned: []u8,
buf_align: u29,
log2_buf_align: u8,
new_size: usize,
len_align: u29,
return_address: usize,
) ?usize {
_ = buf_align;
) bool {
_ = log2_buf_align;
_ = return_address;
const new_size_aligned = mem.alignForward(new_size, mem.page_size);
@ -384,40 +296,40 @@ const PageAllocator = struct {
w.MEM_DECOMMIT,
);
}
return alignPageAllocLen(new_size_aligned, new_size, len_align);
return true;
}
const old_size_aligned = mem.alignForward(buf_unaligned.len, mem.page_size);
if (new_size_aligned <= old_size_aligned) {
return alignPageAllocLen(new_size_aligned, new_size, len_align);
return true;
}
return null;
return false;
}
const buf_aligned_len = mem.alignForward(buf_unaligned.len, mem.page_size);
if (new_size_aligned == buf_aligned_len)
return alignPageAllocLen(new_size_aligned, new_size, len_align);
return true;
if (new_size_aligned < buf_aligned_len) {
const ptr = @alignCast(mem.page_size, buf_unaligned.ptr + new_size_aligned);
// TODO: if the next_mmap_addr_hint is within the unmapped range, update it
os.munmap(ptr[0 .. buf_aligned_len - new_size_aligned]);
return alignPageAllocLen(new_size_aligned, new_size, len_align);
return true;
}
// TODO: call mremap
// TODO: if the next_mmap_addr_hint is within the remapped range, update it
return null;
return false;
}
fn free(_: *anyopaque, buf_unaligned: []u8, buf_align: u29, return_address: usize) void {
_ = buf_align;
fn free(_: *anyopaque, slice: []u8, log2_buf_align: u8, return_address: usize) void {
_ = log2_buf_align;
_ = return_address;
if (builtin.os.tag == .windows) {
os.windows.VirtualFree(buf_unaligned.ptr, 0, os.windows.MEM_RELEASE);
os.windows.VirtualFree(slice.ptr, 0, os.windows.MEM_RELEASE);
} else {
const buf_aligned_len = mem.alignForward(buf_unaligned.len, mem.page_size);
const ptr = @alignCast(mem.page_size, buf_unaligned.ptr);
const buf_aligned_len = mem.alignForward(slice.len, mem.page_size);
const ptr = @alignCast(mem.page_size, slice.ptr);
os.munmap(ptr[0..buf_aligned_len]);
}
}
@ -478,7 +390,7 @@ const WasmPageAllocator = struct {
// Revisit if this is settled: https://github.com/ziglang/zig/issues/3806
const not_found = std.math.maxInt(usize);
fn useRecycled(self: FreeBlock, num_pages: usize, alignment: u29) usize {
fn useRecycled(self: FreeBlock, num_pages: usize, log2_align: u8) usize {
@setCold(true);
for (self.data) |segment, i| {
const spills_into_next = @bitCast(i128, segment) < 0;
@ -492,7 +404,7 @@ const WasmPageAllocator = struct {
while (j + count < self.totalPages() and self.getBit(j + count) == .free) {
count += 1;
const addr = j * mem.page_size;
if (count >= num_pages and mem.isAligned(addr, alignment)) {
if (count >= num_pages and mem.isAlignedLog2(addr, log2_align)) {
self.setBits(j, num_pages, .used);
return j;
}
@ -521,31 +433,30 @@ const WasmPageAllocator = struct {
return mem.alignForward(memsize, mem.page_size) / mem.page_size;
}
fn alloc(_: *anyopaque, len: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 {
fn alloc(_: *anyopaque, len: usize, log2_align: u8, ra: usize) ?[*]u8 {
_ = ra;
if (len > maxInt(usize) - (mem.page_size - 1)) {
return error.OutOfMemory;
}
if (len > maxInt(usize) - (mem.page_size - 1)) return null;
const page_count = nPages(len);
const page_idx = try allocPages(page_count, alignment);
return @intToPtr([*]u8, page_idx * mem.page_size)[0..alignPageAllocLen(page_count * mem.page_size, len, len_align)];
const page_idx = allocPages(page_count, log2_align) catch return null;
return @intToPtr([*]u8, page_idx * mem.page_size);
}
fn allocPages(page_count: usize, alignment: u29) !usize {
fn allocPages(page_count: usize, log2_align: u8) !usize {
{
const idx = conventional.useRecycled(page_count, alignment);
const idx = conventional.useRecycled(page_count, log2_align);
if (idx != FreeBlock.not_found) {
return idx;
}
}
const idx = extended.useRecycled(page_count, alignment);
const idx = extended.useRecycled(page_count, log2_align);
if (idx != FreeBlock.not_found) {
return idx + extendedOffset();
}
const next_page_idx = @wasmMemorySize(0);
const next_page_addr = next_page_idx * mem.page_size;
const aligned_addr = mem.alignForward(next_page_addr, alignment);
const aligned_addr = mem.alignForwardLog2(next_page_addr, log2_align);
const drop_page_count = @divExact(aligned_addr - next_page_addr, mem.page_size);
const result = @wasmMemoryGrow(0, @intCast(u32, drop_page_count + page_count));
if (result <= 0)
@ -573,7 +484,7 @@ const WasmPageAllocator = struct {
// Since this is the first page being freed and we consume it, assume *nothing* is free.
mem.set(u128, extended.data, PageStatus.none_free);
}
const clamped_start = std.math.max(extendedOffset(), start);
const clamped_start = @max(extendedOffset(), start);
extended.recycle(clamped_start - extendedOffset(), new_end - clamped_start);
}
}
@ -581,31 +492,30 @@ const WasmPageAllocator = struct {
fn resize(
_: *anyopaque,
buf: []u8,
buf_align: u29,
log2_buf_align: u8,
new_len: usize,
len_align: u29,
return_address: usize,
) ?usize {
_ = buf_align;
) bool {
_ = log2_buf_align;
_ = return_address;
const aligned_len = mem.alignForward(buf.len, mem.page_size);
if (new_len > aligned_len) return null;
if (new_len > aligned_len) return false;
const current_n = nPages(aligned_len);
const new_n = nPages(new_len);
if (new_n != current_n) {
const base = nPages(@ptrToInt(buf.ptr));
freePages(base + new_n, base + current_n);
}
return alignPageAllocLen(new_n * mem.page_size, new_len, len_align);
return true;
}
fn free(
_: *anyopaque,
buf: []u8,
buf_align: u29,
log2_buf_align: u8,
return_address: usize,
) void {
_ = buf_align;
_ = log2_buf_align;
_ = return_address;
const aligned_len = mem.alignForward(buf.len, mem.page_size);
const current_n = nPages(aligned_len);
@ -627,7 +537,14 @@ pub const HeapAllocator = switch (builtin.os.tag) {
}
pub fn allocator(self: *HeapAllocator) Allocator {
return Allocator.init(self, alloc, resize, free);
return .{
.ptr = self,
.vtable = &.{
.alloc = alloc,
.resize = resize,
.free = free,
},
};
}
pub fn deinit(self: *HeapAllocator) void {
@ -641,48 +558,42 @@ pub const HeapAllocator = switch (builtin.os.tag) {
}
fn alloc(
self: *HeapAllocator,
ctx: *anyopaque,
n: usize,
ptr_align: u29,
len_align: u29,
log2_ptr_align: u8,
return_address: usize,
) error{OutOfMemory}![]u8 {
) ?[*]u8 {
_ = return_address;
const self = @ptrCast(*HeapAllocator, @alignCast(@alignOf(HeapAllocator), ctx));
const ptr_align = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_ptr_align);
const amt = n + ptr_align - 1 + @sizeOf(usize);
const optional_heap_handle = @atomicLoad(?HeapHandle, &self.heap_handle, .SeqCst);
const heap_handle = optional_heap_handle orelse blk: {
const options = if (builtin.single_threaded) os.windows.HEAP_NO_SERIALIZE else 0;
const hh = os.windows.kernel32.HeapCreate(options, amt, 0) orelse return error.OutOfMemory;
const hh = os.windows.kernel32.HeapCreate(options, amt, 0) orelse return null;
const other_hh = @cmpxchgStrong(?HeapHandle, &self.heap_handle, null, hh, .SeqCst, .SeqCst) orelse break :blk hh;
os.windows.HeapDestroy(hh);
break :blk other_hh.?; // can't be null because of the cmpxchg
};
const ptr = os.windows.kernel32.HeapAlloc(heap_handle, 0, amt) orelse return error.OutOfMemory;
const ptr = os.windows.kernel32.HeapAlloc(heap_handle, 0, amt) orelse return null;
const root_addr = @ptrToInt(ptr);
const aligned_addr = mem.alignForward(root_addr, ptr_align);
const return_len = init: {
if (len_align == 0) break :init n;
const full_len = os.windows.kernel32.HeapSize(heap_handle, 0, ptr);
assert(full_len != std.math.maxInt(usize));
assert(full_len >= amt);
break :init mem.alignBackwardAnyAlign(full_len - (aligned_addr - root_addr) - @sizeOf(usize), len_align);
};
const buf = @intToPtr([*]u8, aligned_addr)[0..return_len];
const buf = @intToPtr([*]u8, aligned_addr)[0..n];
getRecordPtr(buf).* = root_addr;
return buf;
return buf.ptr;
}
fn resize(
self: *HeapAllocator,
ctx: *anyopaque,
buf: []u8,
buf_align: u29,
log2_buf_align: u8,
new_size: usize,
len_align: u29,
return_address: usize,
) ?usize {
_ = buf_align;
) bool {
_ = log2_buf_align;
_ = return_address;
const self = @ptrCast(*HeapAllocator, @alignCast(@alignOf(HeapAllocator), ctx));
const root_addr = getRecordPtr(buf).*;
const align_offset = @ptrToInt(buf.ptr) - root_addr;
@ -692,27 +603,21 @@ pub const HeapAllocator = switch (builtin.os.tag) {
os.windows.HEAP_REALLOC_IN_PLACE_ONLY,
@intToPtr(*anyopaque, root_addr),
amt,
) orelse return null;
) orelse return false;
assert(new_ptr == @intToPtr(*anyopaque, root_addr));
const return_len = init: {
if (len_align == 0) break :init new_size;
const full_len = os.windows.kernel32.HeapSize(self.heap_handle.?, 0, new_ptr);
assert(full_len != std.math.maxInt(usize));
assert(full_len >= amt);
break :init mem.alignBackwardAnyAlign(full_len - align_offset, len_align);
};
getRecordPtr(buf.ptr[0..return_len]).* = root_addr;
return return_len;
getRecordPtr(buf.ptr[0..new_size]).* = root_addr;
return true;
}
fn free(
self: *HeapAllocator,
ctx: *anyopaque,
buf: []u8,
buf_align: u29,
log2_buf_align: u8,
return_address: usize,
) void {
_ = buf_align;
_ = log2_buf_align;
_ = return_address;
const self = @ptrCast(*HeapAllocator, @alignCast(@alignOf(HeapAllocator), ctx));
os.windows.HeapFree(self.heap_handle.?, 0, @intToPtr(*anyopaque, getRecordPtr(buf).*));
}
},
@ -742,18 +647,27 @@ pub const FixedBufferAllocator = struct {
/// *WARNING* using this at the same time as the interface returned by `threadSafeAllocator` is not thread safe
pub fn allocator(self: *FixedBufferAllocator) Allocator {
return Allocator.init(self, alloc, resize, free);
return .{
.ptr = self,
.vtable = &.{
.alloc = alloc,
.resize = resize,
.free = free,
},
};
}
/// Provides a lock free thread safe `Allocator` interface to the underlying `FixedBufferAllocator`
/// *WARNING* using this at the same time as the interface returned by `allocator` is not thread safe
pub fn threadSafeAllocator(self: *FixedBufferAllocator) Allocator {
return Allocator.init(
self,
threadSafeAlloc,
Allocator.NoResize(FixedBufferAllocator).noResize,
Allocator.NoOpFree(FixedBufferAllocator).noOpFree,
);
return .{
.ptr = self,
.vtable = &.{
.alloc = threadSafeAlloc,
.resize = Allocator.noResize,
.free = Allocator.noFree,
},
};
}
pub fn ownsPtr(self: *FixedBufferAllocator, ptr: [*]u8) bool {
@ -771,59 +685,56 @@ pub const FixedBufferAllocator = struct {
return buf.ptr + buf.len == self.buffer.ptr + self.end_index;
}
fn alloc(self: *FixedBufferAllocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
_ = len_align;
fn alloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 {
const self = @ptrCast(*FixedBufferAllocator, @alignCast(@alignOf(FixedBufferAllocator), ctx));
_ = ra;
const adjust_off = mem.alignPointerOffset(self.buffer.ptr + self.end_index, ptr_align) orelse
return error.OutOfMemory;
const ptr_align = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_ptr_align);
const adjust_off = mem.alignPointerOffset(self.buffer.ptr + self.end_index, ptr_align) orelse return null;
const adjusted_index = self.end_index + adjust_off;
const new_end_index = adjusted_index + n;
if (new_end_index > self.buffer.len) {
return error.OutOfMemory;
}
const result = self.buffer[adjusted_index..new_end_index];
if (new_end_index > self.buffer.len) return null;
self.end_index = new_end_index;
return result;
return self.buffer.ptr + adjusted_index;
}
fn resize(
self: *FixedBufferAllocator,
ctx: *anyopaque,
buf: []u8,
buf_align: u29,
log2_buf_align: u8,
new_size: usize,
len_align: u29,
return_address: usize,
) ?usize {
_ = buf_align;
) bool {
const self = @ptrCast(*FixedBufferAllocator, @alignCast(@alignOf(FixedBufferAllocator), ctx));
_ = log2_buf_align;
_ = return_address;
assert(self.ownsSlice(buf)); // sanity check
if (!self.isLastAllocation(buf)) {
if (new_size > buf.len) return null;
return mem.alignAllocLen(buf.len, new_size, len_align);
if (new_size > buf.len) return false;
return true;
}
if (new_size <= buf.len) {
const sub = buf.len - new_size;
self.end_index -= sub;
return mem.alignAllocLen(buf.len - sub, new_size, len_align);
return true;
}
const add = new_size - buf.len;
if (add + self.end_index > self.buffer.len) return null;
if (add + self.end_index > self.buffer.len) return false;
self.end_index += add;
return new_size;
return true;
}
fn free(
self: *FixedBufferAllocator,
ctx: *anyopaque,
buf: []u8,
buf_align: u29,
log2_buf_align: u8,
return_address: usize,
) void {
_ = buf_align;
const self = @ptrCast(*FixedBufferAllocator, @alignCast(@alignOf(FixedBufferAllocator), ctx));
_ = log2_buf_align;
_ = return_address;
assert(self.ownsSlice(buf)); // sanity check
@ -832,19 +743,18 @@ pub const FixedBufferAllocator = struct {
}
}
fn threadSafeAlloc(self: *FixedBufferAllocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
_ = len_align;
fn threadSafeAlloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 {
const self = @ptrCast(*FixedBufferAllocator, @alignCast(@alignOf(FixedBufferAllocator), ctx));
_ = ra;
const ptr_align = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_ptr_align);
var end_index = @atomicLoad(usize, &self.end_index, .SeqCst);
while (true) {
const adjust_off = mem.alignPointerOffset(self.buffer.ptr + end_index, ptr_align) orelse
return error.OutOfMemory;
const adjust_off = mem.alignPointerOffset(self.buffer.ptr + end_index, ptr_align) orelse return null;
const adjusted_index = end_index + adjust_off;
const new_end_index = adjusted_index + n;
if (new_end_index > self.buffer.len) {
return error.OutOfMemory;
}
end_index = @cmpxchgWeak(usize, &self.end_index, end_index, new_end_index, .SeqCst, .SeqCst) orelse return self.buffer[adjusted_index..new_end_index];
if (new_end_index > self.buffer.len) return null;
end_index = @cmpxchgWeak(usize, &self.end_index, end_index, new_end_index, .SeqCst, .SeqCst) orelse
return self.buffer[adjusted_index..new_end_index].ptr;
}
}
@ -878,48 +788,57 @@ pub fn StackFallbackAllocator(comptime size: usize) type {
fallback_allocator: Allocator,
fixed_buffer_allocator: FixedBufferAllocator,
/// WARNING: This functions both fetches a `std.mem.Allocator` interface to this allocator *and* resets the internal buffer allocator
/// This function both fetches a `Allocator` interface to this
/// allocator *and* resets the internal buffer allocator.
pub fn get(self: *Self) Allocator {
self.fixed_buffer_allocator = FixedBufferAllocator.init(self.buffer[0..]);
return Allocator.init(self, alloc, resize, free);
return .{
.ptr = self,
.vtable = &.{
.alloc = alloc,
.resize = resize,
.free = free,
},
};
}
fn alloc(
self: *Self,
ctx: *anyopaque,
len: usize,
ptr_align: u29,
len_align: u29,
return_address: usize,
) error{OutOfMemory}![]u8 {
return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator, len, ptr_align, len_align, return_address) catch
return self.fallback_allocator.rawAlloc(len, ptr_align, len_align, return_address);
log2_ptr_align: u8,
ra: usize,
) ?[*]u8 {
const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator, len, log2_ptr_align, ra) orelse
return self.fallback_allocator.rawAlloc(len, log2_ptr_align, ra);
}
fn resize(
self: *Self,
ctx: *anyopaque,
buf: []u8,
buf_align: u29,
log2_buf_align: u8,
new_len: usize,
len_align: u29,
return_address: usize,
) ?usize {
ra: usize,
) bool {
const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) {
return FixedBufferAllocator.resize(&self.fixed_buffer_allocator, buf, buf_align, new_len, len_align, return_address);
return FixedBufferAllocator.resize(&self.fixed_buffer_allocator, buf, log2_buf_align, new_len, ra);
} else {
return self.fallback_allocator.rawResize(buf, buf_align, new_len, len_align, return_address);
return self.fallback_allocator.rawResize(buf, log2_buf_align, new_len, ra);
}
}
fn free(
self: *Self,
ctx: *anyopaque,
buf: []u8,
buf_align: u29,
return_address: usize,
log2_buf_align: u8,
ra: usize,
) void {
const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
if (self.fixed_buffer_allocator.ownsPtr(buf.ptr)) {
return FixedBufferAllocator.free(&self.fixed_buffer_allocator, buf, buf_align, return_address);
return FixedBufferAllocator.free(&self.fixed_buffer_allocator, buf, log2_buf_align, ra);
} else {
return self.fallback_allocator.rawFree(buf, buf_align, return_address);
return self.fallback_allocator.rawFree(buf, log2_buf_align, ra);
}
}
};
@ -987,11 +906,7 @@ test "PageAllocator" {
}
if (builtin.os.tag == .windows) {
// Trying really large alignment. As mentionned in the implementation,
// VirtualAlloc returns 64K aligned addresses. We want to make sure
// PageAllocator works beyond that, as it's not tested by
// `testAllocatorLargeAlignment`.
const slice = try allocator.alignedAlloc(u8, 1 << 20, 128);
const slice = try allocator.alignedAlloc(u8, mem.page_size, 128);
slice[0] = 0x12;
slice[127] = 0x34;
allocator.free(slice);
@ -1132,15 +1047,16 @@ pub fn testAllocator(base_allocator: mem.Allocator) !void {
allocator.destroy(item);
}
slice = allocator.shrink(slice, 50);
try testing.expect(slice.len == 50);
slice = allocator.shrink(slice, 25);
try testing.expect(slice.len == 25);
slice = allocator.shrink(slice, 0);
try testing.expect(slice.len == 0);
slice = try allocator.realloc(slice, 10);
try testing.expect(slice.len == 10);
if (allocator.resize(slice, 50)) {
slice = slice[0..50];
if (allocator.resize(slice, 25)) {
slice = slice[0..25];
try testing.expect(allocator.resize(slice, 0));
slice = slice[0..0];
slice = try allocator.realloc(slice, 10);
try testing.expect(slice.len == 10);
}
}
allocator.free(slice);
// Zero-length allocation
@ -1151,7 +1067,7 @@ pub fn testAllocator(base_allocator: mem.Allocator) !void {
zero_bit_ptr.* = 0;
allocator.destroy(zero_bit_ptr);
const oversize = try allocator.allocAdvanced(u32, null, 5, .at_least);
const oversize = try allocator.alignedAlloc(u32, null, 5);
try testing.expect(oversize.len >= 5);
for (oversize) |*item| {
item.* = 0xDEADBEEF;
@ -1171,21 +1087,18 @@ pub fn testAllocatorAligned(base_allocator: mem.Allocator) !void {
// grow
slice = try allocator.realloc(slice, 100);
try testing.expect(slice.len == 100);
// shrink
slice = allocator.shrink(slice, 10);
try testing.expect(slice.len == 10);
// go to zero
slice = allocator.shrink(slice, 0);
try testing.expect(slice.len == 0);
if (allocator.resize(slice, 10)) {
slice = slice[0..10];
}
try testing.expect(allocator.resize(slice, 0));
slice = slice[0..0];
// realloc from zero
slice = try allocator.realloc(slice, 100);
try testing.expect(slice.len == 100);
// shrink with shrink
slice = allocator.shrink(slice, 10);
try testing.expect(slice.len == 10);
// shrink to zero
slice = allocator.shrink(slice, 0);
try testing.expect(slice.len == 0);
if (allocator.resize(slice, 10)) {
slice = slice[0..10];
}
try testing.expect(allocator.resize(slice, 0));
}
}
@ -1193,27 +1106,24 @@ pub fn testAllocatorLargeAlignment(base_allocator: mem.Allocator) !void {
var validationAllocator = mem.validationWrap(base_allocator);
const allocator = validationAllocator.allocator();
//Maybe a platform's page_size is actually the same as or
// very near usize?
if (mem.page_size << 2 > maxInt(usize)) return;
const USizeShift = std.meta.Int(.unsigned, std.math.log2(@bitSizeOf(usize)));
const large_align = @as(u29, mem.page_size << 2);
const large_align: usize = mem.page_size / 2;
var align_mask: usize = undefined;
_ = @shlWithOverflow(usize, ~@as(usize, 0), @as(USizeShift, @ctz(large_align)), &align_mask);
_ = @shlWithOverflow(usize, ~@as(usize, 0), @as(Allocator.Log2Align, @ctz(large_align)), &align_mask);
var slice = try allocator.alignedAlloc(u8, large_align, 500);
try testing.expect(@ptrToInt(slice.ptr) & align_mask == @ptrToInt(slice.ptr));
slice = allocator.shrink(slice, 100);
try testing.expect(@ptrToInt(slice.ptr) & align_mask == @ptrToInt(slice.ptr));
if (allocator.resize(slice, 100)) {
slice = slice[0..100];
}
slice = try allocator.realloc(slice, 5000);
try testing.expect(@ptrToInt(slice.ptr) & align_mask == @ptrToInt(slice.ptr));
slice = allocator.shrink(slice, 10);
try testing.expect(@ptrToInt(slice.ptr) & align_mask == @ptrToInt(slice.ptr));
if (allocator.resize(slice, 10)) {
slice = slice[0..10];
}
slice = try allocator.realloc(slice, 20000);
try testing.expect(@ptrToInt(slice.ptr) & align_mask == @ptrToInt(slice.ptr));
@ -1248,8 +1158,7 @@ pub fn testAllocatorAlignedShrink(base_allocator: mem.Allocator) !void {
slice[0] = 0x12;
slice[60] = 0x34;
// realloc to a smaller size but with a larger alignment
slice = try allocator.reallocAdvanced(slice, mem.page_size * 32, alloc_size / 2, .exact);
slice = try allocator.reallocAdvanced(slice, alloc_size / 2, 0);
try testing.expect(slice[0] == 0x12);
try testing.expect(slice[60] == 0x34);
}

View File

@ -24,7 +24,14 @@ pub const ArenaAllocator = struct {
};
pub fn allocator(self: *ArenaAllocator) Allocator {
return Allocator.init(self, alloc, resize, free);
return .{
.ptr = self,
.vtable = &.{
.alloc = alloc,
.resize = resize,
.free = free,
},
};
}
const BufNode = std.SinglyLinkedList([]u8).Node;
@ -43,14 +50,16 @@ pub const ArenaAllocator = struct {
}
}
fn createNode(self: *ArenaAllocator, prev_len: usize, minimum_size: usize) !*BufNode {
fn createNode(self: *ArenaAllocator, prev_len: usize, minimum_size: usize) ?*BufNode {
const actual_min_size = minimum_size + (@sizeOf(BufNode) + 16);
const big_enough_len = prev_len + actual_min_size;
const len = big_enough_len + big_enough_len / 2;
const buf = try self.child_allocator.rawAlloc(len, @alignOf(BufNode), 1, @returnAddress());
const buf_node = @ptrCast(*BufNode, @alignCast(@alignOf(BufNode), buf.ptr));
const log2_align = comptime std.math.log2_int(usize, @alignOf(BufNode));
const ptr = self.child_allocator.rawAlloc(len, log2_align, @returnAddress()) orelse
return null;
const buf_node = @ptrCast(*BufNode, @alignCast(@alignOf(BufNode), ptr));
buf_node.* = BufNode{
.data = buf,
.data = ptr[0..len],
.next = null,
};
self.state.buffer_list.prepend(buf_node);
@ -58,11 +67,15 @@ pub const ArenaAllocator = struct {
return buf_node;
}
fn alloc(self: *ArenaAllocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
_ = len_align;
fn alloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 {
const self = @ptrCast(*ArenaAllocator, @alignCast(@alignOf(ArenaAllocator), ctx));
_ = ra;
var cur_node = if (self.state.buffer_list.first) |first_node| first_node else try self.createNode(0, n + ptr_align);
const ptr_align = @as(usize, 1) << @intCast(Allocator.Log2Align, log2_ptr_align);
var cur_node = if (self.state.buffer_list.first) |first_node|
first_node
else
(self.createNode(0, n + ptr_align) orelse return null);
while (true) {
const cur_buf = cur_node.data[@sizeOf(BufNode)..];
const addr = @ptrToInt(cur_buf.ptr) + self.state.end_index;
@ -73,46 +86,48 @@ pub const ArenaAllocator = struct {
if (new_end_index <= cur_buf.len) {
const result = cur_buf[adjusted_index..new_end_index];
self.state.end_index = new_end_index;
return result;
return result.ptr;
}
const bigger_buf_size = @sizeOf(BufNode) + new_end_index;
// Try to grow the buffer in-place
cur_node.data = self.child_allocator.resize(cur_node.data, bigger_buf_size) orelse {
if (self.child_allocator.resize(cur_node.data, bigger_buf_size)) {
cur_node.data.len = bigger_buf_size;
} else {
// Allocate a new node if that's not possible
cur_node = try self.createNode(cur_buf.len, n + ptr_align);
continue;
};
cur_node = self.createNode(cur_buf.len, n + ptr_align) orelse return null;
}
}
}
fn resize(self: *ArenaAllocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) ?usize {
_ = buf_align;
_ = len_align;
fn resize(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, new_len: usize, ret_addr: usize) bool {
const self = @ptrCast(*ArenaAllocator, @alignCast(@alignOf(ArenaAllocator), ctx));
_ = log2_buf_align;
_ = ret_addr;
const cur_node = self.state.buffer_list.first orelse return null;
const cur_node = self.state.buffer_list.first orelse return false;
const cur_buf = cur_node.data[@sizeOf(BufNode)..];
if (@ptrToInt(cur_buf.ptr) + self.state.end_index != @ptrToInt(buf.ptr) + buf.len) {
if (new_len > buf.len) return null;
return new_len;
if (new_len > buf.len) return false;
return true;
}
if (buf.len >= new_len) {
self.state.end_index -= buf.len - new_len;
return new_len;
return true;
} else if (cur_buf.len - self.state.end_index >= new_len - buf.len) {
self.state.end_index += new_len - buf.len;
return new_len;
return true;
} else {
return null;
return false;
}
}
fn free(self: *ArenaAllocator, buf: []u8, buf_align: u29, ret_addr: usize) void {
_ = buf_align;
fn free(ctx: *anyopaque, buf: []u8, log2_buf_align: u8, ret_addr: usize) void {
_ = log2_buf_align;
_ = ret_addr;
const self = @ptrCast(*ArenaAllocator, @alignCast(@alignOf(ArenaAllocator), ctx));
const cur_node = self.state.buffer_list.first orelse return;
const cur_buf = cur_node.data[@sizeOf(BufNode)..];

View File

@ -199,7 +199,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
requested_size: if (config.enable_memory_limit) usize else void,
stack_addresses: [trace_n][stack_n]usize,
freed: if (config.retain_metadata) bool else void,
ptr_align: if (config.never_unmap and config.retain_metadata) u29 else void,
log2_ptr_align: if (config.never_unmap and config.retain_metadata) u8 else void,
const trace_n = if (config.retain_metadata) traces_per_slot else 1;
@ -271,7 +271,14 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
};
pub fn allocator(self: *Self) Allocator {
return Allocator.init(self, alloc, resize, free);
return .{
.ptr = self,
.vtable = &.{
.alloc = alloc,
.resize = resize,
.free = free,
},
};
}
fn bucketStackTrace(
@ -379,7 +386,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
var it = self.large_allocations.iterator();
while (it.next()) |large| {
if (large.value_ptr.freed) {
self.backing_allocator.rawFree(large.value_ptr.bytes, large.value_ptr.ptr_align, @returnAddress());
self.backing_allocator.rawFree(large.value_ptr.bytes, large.value_ptr.log2_ptr_align, @returnAddress());
}
}
}
@ -504,11 +511,10 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
fn resizeLarge(
self: *Self,
old_mem: []u8,
old_align: u29,
log2_old_align: u8,
new_size: usize,
len_align: u29,
ret_addr: usize,
) ?usize {
) bool {
const entry = self.large_allocations.getEntry(@ptrToInt(old_mem.ptr)) orelse {
if (config.safety) {
@panic("Invalid free");
@ -541,24 +547,26 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
});
}
// Do memory limit accounting with requested sizes rather than what backing_allocator returns
// because if we want to return error.OutOfMemory, we have to leave allocation untouched, and
// that is impossible to guarantee after calling backing_allocator.rawResize.
// Do memory limit accounting with requested sizes rather than what
// backing_allocator returns because if we want to return
// error.OutOfMemory, we have to leave allocation untouched, and
// that is impossible to guarantee after calling
// backing_allocator.rawResize.
const prev_req_bytes = self.total_requested_bytes;
if (config.enable_memory_limit) {
const new_req_bytes = prev_req_bytes + new_size - entry.value_ptr.requested_size;
if (new_req_bytes > prev_req_bytes and new_req_bytes > self.requested_memory_limit) {
return null;
return false;
}
self.total_requested_bytes = new_req_bytes;
}
const result_len = self.backing_allocator.rawResize(old_mem, old_align, new_size, len_align, ret_addr) orelse {
if (!self.backing_allocator.rawResize(old_mem, log2_old_align, new_size, ret_addr)) {
if (config.enable_memory_limit) {
self.total_requested_bytes = prev_req_bytes;
}
return null;
};
return false;
}
if (config.enable_memory_limit) {
entry.value_ptr.requested_size = new_size;
@ -569,9 +577,9 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
old_mem.len, old_mem.ptr, new_size,
});
}
entry.value_ptr.bytes = old_mem.ptr[0..result_len];
entry.value_ptr.bytes = old_mem.ptr[0..new_size];
entry.value_ptr.captureStackTrace(ret_addr, .alloc);
return result_len;
return true;
}
/// This function assumes the object is in the large object storage regardless
@ -579,7 +587,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
fn freeLarge(
self: *Self,
old_mem: []u8,
old_align: u29,
log2_old_align: u8,
ret_addr: usize,
) void {
const entry = self.large_allocations.getEntry(@ptrToInt(old_mem.ptr)) orelse {
@ -615,7 +623,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
}
if (!config.never_unmap) {
self.backing_allocator.rawFree(old_mem, old_align, ret_addr);
self.backing_allocator.rawFree(old_mem, log2_old_align, ret_addr);
}
if (config.enable_memory_limit) {
@ -639,21 +647,22 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
}
fn resize(
self: *Self,
ctx: *anyopaque,
old_mem: []u8,
old_align: u29,
log2_old_align_u8: u8,
new_size: usize,
len_align: u29,
ret_addr: usize,
) ?usize {
) bool {
const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
const log2_old_align = @intCast(Allocator.Log2Align, log2_old_align_u8);
self.mutex.lock();
defer self.mutex.unlock();
assert(old_mem.len != 0);
const aligned_size = math.max(old_mem.len, old_align);
const aligned_size = @max(old_mem.len, @as(usize, 1) << log2_old_align);
if (aligned_size > largest_bucket_object_size) {
return self.resizeLarge(old_mem, old_align, new_size, len_align, ret_addr);
return self.resizeLarge(old_mem, log2_old_align, new_size, ret_addr);
}
const size_class_hint = math.ceilPowerOfTwoAssert(usize, aligned_size);
@ -678,7 +687,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
}
}
}
return self.resizeLarge(old_mem, old_align, new_size, len_align, ret_addr);
return self.resizeLarge(old_mem, log2_old_align, new_size, ret_addr);
};
const byte_offset = @ptrToInt(old_mem.ptr) - @ptrToInt(bucket.page);
const slot_index = @intCast(SlotIndex, byte_offset / size_class);
@ -700,12 +709,12 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
if (config.enable_memory_limit) {
const new_req_bytes = prev_req_bytes + new_size - old_mem.len;
if (new_req_bytes > prev_req_bytes and new_req_bytes > self.requested_memory_limit) {
return null;
return false;
}
self.total_requested_bytes = new_req_bytes;
}
const new_aligned_size = math.max(new_size, old_align);
const new_aligned_size = @max(new_size, @as(usize, 1) << log2_old_align);
const new_size_class = math.ceilPowerOfTwoAssert(usize, new_aligned_size);
if (new_size_class <= size_class) {
if (old_mem.len > new_size) {
@ -716,29 +725,31 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
old_mem.len, old_mem.ptr, new_size,
});
}
return new_size;
return true;
}
if (config.enable_memory_limit) {
self.total_requested_bytes = prev_req_bytes;
}
return null;
return false;
}
fn free(
self: *Self,
ctx: *anyopaque,
old_mem: []u8,
old_align: u29,
log2_old_align_u8: u8,
ret_addr: usize,
) void {
const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
const log2_old_align = @intCast(Allocator.Log2Align, log2_old_align_u8);
self.mutex.lock();
defer self.mutex.unlock();
assert(old_mem.len != 0);
const aligned_size = math.max(old_mem.len, old_align);
const aligned_size = @max(old_mem.len, @as(usize, 1) << log2_old_align);
if (aligned_size > largest_bucket_object_size) {
self.freeLarge(old_mem, old_align, ret_addr);
self.freeLarge(old_mem, log2_old_align, ret_addr);
return;
}
const size_class_hint = math.ceilPowerOfTwoAssert(usize, aligned_size);
@ -764,7 +775,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
}
}
}
self.freeLarge(old_mem, old_align, ret_addr);
self.freeLarge(old_mem, log2_old_align, ret_addr);
return;
};
const byte_offset = @ptrToInt(old_mem.ptr) - @ptrToInt(bucket.page);
@ -846,18 +857,26 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
return true;
}
fn alloc(self: *Self, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8 {
fn alloc(ctx: *anyopaque, len: usize, log2_ptr_align: u8, ret_addr: usize) ?[*]u8 {
const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
self.mutex.lock();
defer self.mutex.unlock();
if (!self.isAllocationAllowed(len)) return null;
return allocInner(self, len, @intCast(Allocator.Log2Align, log2_ptr_align), ret_addr) catch return null;
}
if (!self.isAllocationAllowed(len)) {
return error.OutOfMemory;
}
const new_aligned_size = math.max(len, ptr_align);
fn allocInner(
self: *Self,
len: usize,
log2_ptr_align: Allocator.Log2Align,
ret_addr: usize,
) Allocator.Error![*]u8 {
const new_aligned_size = @max(len, @as(usize, 1) << @intCast(Allocator.Log2Align, log2_ptr_align));
if (new_aligned_size > largest_bucket_object_size) {
try self.large_allocations.ensureUnusedCapacity(self.backing_allocator, 1);
const slice = try self.backing_allocator.rawAlloc(len, ptr_align, len_align, ret_addr);
const ptr = self.backing_allocator.rawAlloc(len, log2_ptr_align, ret_addr) orelse
return error.OutOfMemory;
const slice = ptr[0..len];
const gop = self.large_allocations.getOrPutAssumeCapacity(@ptrToInt(slice.ptr));
if (config.retain_metadata and !config.never_unmap) {
@ -873,14 +892,14 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
if (config.retain_metadata) {
gop.value_ptr.freed = false;
if (config.never_unmap) {
gop.value_ptr.ptr_align = ptr_align;
gop.value_ptr.log2_ptr_align = log2_ptr_align;
}
}
if (config.verbose_log) {
log.info("large alloc {d} bytes at {*}", .{ slice.len, slice.ptr });
}
return slice;
return slice.ptr;
}
const new_size_class = math.ceilPowerOfTwoAssert(usize, new_aligned_size);
@ -888,15 +907,15 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
if (config.verbose_log) {
log.info("small alloc {d} bytes at {*}", .{ len, ptr });
}
return ptr[0..len];
return ptr;
}
fn createBucket(self: *Self, size_class: usize, bucket_index: usize) Error!*BucketHeader {
const page = try self.backing_allocator.allocAdvanced(u8, page_size, page_size, .exact);
const page = try self.backing_allocator.alignedAlloc(u8, page_size, page_size);
errdefer self.backing_allocator.free(page);
const bucket_size = bucketSize(size_class);
const bucket_bytes = try self.backing_allocator.allocAdvanced(u8, @alignOf(BucketHeader), bucket_size, .exact);
const bucket_bytes = try self.backing_allocator.alignedAlloc(u8, @alignOf(BucketHeader), bucket_size);
const ptr = @ptrCast(*BucketHeader, bucket_bytes.ptr);
ptr.* = BucketHeader{
.prev = ptr,
@ -1011,13 +1030,15 @@ test "shrink" {
mem.set(u8, slice, 0x11);
slice = allocator.shrink(slice, 17);
try std.testing.expect(allocator.resize(slice, 17));
slice = slice[0..17];
for (slice) |b| {
try std.testing.expect(b == 0x11);
}
slice = allocator.shrink(slice, 16);
try std.testing.expect(allocator.resize(slice, 16));
slice = slice[0..16];
for (slice) |b| {
try std.testing.expect(b == 0x11);
@ -1069,11 +1090,13 @@ test "shrink large object to large object" {
slice[0] = 0x12;
slice[60] = 0x34;
slice = allocator.resize(slice, page_size * 2 + 1) orelse return;
if (!allocator.resize(slice, page_size * 2 + 1)) return;
slice = slice.ptr[0 .. page_size * 2 + 1];
try std.testing.expect(slice[0] == 0x12);
try std.testing.expect(slice[60] == 0x34);
slice = allocator.shrink(slice, page_size * 2 + 1);
try std.testing.expect(allocator.resize(slice, page_size * 2 + 1));
slice = slice[0 .. page_size * 2 + 1];
try std.testing.expect(slice[0] == 0x12);
try std.testing.expect(slice[60] == 0x34);
@ -1113,7 +1136,7 @@ test "shrink large object to large object with larger alignment" {
slice[0] = 0x12;
slice[60] = 0x34;
slice = try allocator.reallocAdvanced(slice, big_alignment, alloc_size / 2, .exact);
slice = try allocator.reallocAdvanced(slice, big_alignment, alloc_size / 2);
try std.testing.expect(slice[0] == 0x12);
try std.testing.expect(slice[60] == 0x34);
}
@ -1182,15 +1205,15 @@ test "realloc large object to larger alignment" {
slice[0] = 0x12;
slice[16] = 0x34;
slice = try allocator.reallocAdvanced(slice, 32, page_size * 2 + 100, .exact);
slice = try allocator.reallocAdvanced(slice, 32, page_size * 2 + 100);
try std.testing.expect(slice[0] == 0x12);
try std.testing.expect(slice[16] == 0x34);
slice = try allocator.reallocAdvanced(slice, 32, page_size * 2 + 25, .exact);
slice = try allocator.reallocAdvanced(slice, 32, page_size * 2 + 25);
try std.testing.expect(slice[0] == 0x12);
try std.testing.expect(slice[16] == 0x34);
slice = try allocator.reallocAdvanced(slice, big_alignment, page_size * 2 + 100, .exact);
slice = try allocator.reallocAdvanced(slice, big_alignment, page_size * 2 + 100);
try std.testing.expect(slice[0] == 0x12);
try std.testing.expect(slice[16] == 0x34);
}
@ -1208,7 +1231,8 @@ test "large object shrinks to small but allocation fails during shrink" {
// Next allocation will fail in the backing allocator of the GeneralPurposeAllocator
slice = allocator.shrink(slice, 4);
try std.testing.expect(allocator.resize(slice, 4));
slice = slice[0..4];
try std.testing.expect(slice[0] == 0x12);
try std.testing.expect(slice[3] == 0x34);
}
@ -1296,10 +1320,10 @@ test "bug 9995 fix, large allocs count requested size not backing size" {
var gpa = GeneralPurposeAllocator(.{ .enable_memory_limit = true }){};
const allocator = gpa.allocator();
var buf = try allocator.allocAdvanced(u8, 1, page_size + 1, .at_least);
var buf = try allocator.alignedAlloc(u8, 1, page_size + 1);
try std.testing.expect(gpa.total_requested_bytes == page_size + 1);
buf = try allocator.reallocAtLeast(buf, 1);
buf = try allocator.realloc(buf, 1);
try std.testing.expect(gpa.total_requested_bytes == 1);
buf = try allocator.reallocAtLeast(buf, 2);
buf = try allocator.realloc(buf, 2);
try std.testing.expect(gpa.total_requested_bytes == 2);
}

View File

@ -18,60 +18,68 @@ pub fn LogToWriterAllocator(comptime Writer: type) type {
}
pub fn allocator(self: *Self) Allocator {
return Allocator.init(self, alloc, resize, free);
return .{
.ptr = self,
.vtable = &.{
.alloc = alloc,
.resize = resize,
.free = free,
},
};
}
fn alloc(
self: *Self,
ctx: *anyopaque,
len: usize,
ptr_align: u29,
len_align: u29,
log2_ptr_align: u8,
ra: usize,
) error{OutOfMemory}![]u8 {
) ?[*]u8 {
const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
self.writer.print("alloc : {}", .{len}) catch {};
const result = self.parent_allocator.rawAlloc(len, ptr_align, len_align, ra);
if (result) |_| {
const result = self.parent_allocator.rawAlloc(len, log2_ptr_align, ra);
if (result != null) {
self.writer.print(" success!\n", .{}) catch {};
} else |_| {
} else {
self.writer.print(" failure!\n", .{}) catch {};
}
return result;
}
fn resize(
self: *Self,
ctx: *anyopaque,
buf: []u8,
buf_align: u29,
log2_buf_align: u8,
new_len: usize,
len_align: u29,
ra: usize,
) ?usize {
) bool {
const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
if (new_len <= buf.len) {
self.writer.print("shrink: {} to {}\n", .{ buf.len, new_len }) catch {};
} else {
self.writer.print("expand: {} to {}", .{ buf.len, new_len }) catch {};
}
if (self.parent_allocator.rawResize(buf, buf_align, new_len, len_align, ra)) |resized_len| {
if (self.parent_allocator.rawResize(buf, log2_buf_align, new_len, ra)) {
if (new_len > buf.len) {
self.writer.print(" success!\n", .{}) catch {};
}
return resized_len;
return true;
}
std.debug.assert(new_len > buf.len);
self.writer.print(" failure!\n", .{}) catch {};
return null;
return false;
}
fn free(
self: *Self,
ctx: *anyopaque,
buf: []u8,
buf_align: u29,
log2_buf_align: u8,
ra: usize,
) void {
const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
self.writer.print("free : {}\n", .{buf.len}) catch {};
self.parent_allocator.rawFree(buf, buf_align, ra);
self.parent_allocator.rawFree(buf, log2_buf_align, ra);
}
};
}
@ -95,9 +103,9 @@ test "LogToWriterAllocator" {
const allocator = allocator_state.allocator();
var a = try allocator.alloc(u8, 10);
a = allocator.shrink(a, 5);
try std.testing.expect(a.len == 5);
try std.testing.expect(allocator.resize(a, 20) == null);
try std.testing.expect(allocator.resize(a, 5));
a = a[0..5];
try std.testing.expect(!allocator.resize(a, 20));
allocator.free(a);
try std.testing.expectEqualSlices(u8,

View File

@ -33,7 +33,14 @@ pub fn ScopedLoggingAllocator(
}
pub fn allocator(self: *Self) Allocator {
return Allocator.init(self, alloc, resize, free);
return .{
.ptr = self,
.vtable = &.{
.alloc = alloc,
.resize = resize,
.free = free,
},
};
}
// This function is required as the `std.log.log` function is not public
@ -47,71 +54,72 @@ pub fn ScopedLoggingAllocator(
}
fn alloc(
self: *Self,
ctx: *anyopaque,
len: usize,
ptr_align: u29,
len_align: u29,
log2_ptr_align: u8,
ra: usize,
) error{OutOfMemory}![]u8 {
const result = self.parent_allocator.rawAlloc(len, ptr_align, len_align, ra);
if (result) |_| {
) ?[*]u8 {
const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
const result = self.parent_allocator.rawAlloc(len, log2_ptr_align, ra);
if (result != null) {
logHelper(
success_log_level,
"alloc - success - len: {}, ptr_align: {}, len_align: {}",
.{ len, ptr_align, len_align },
"alloc - success - len: {}, ptr_align: {}",
.{ len, log2_ptr_align },
);
} else |err| {
} else {
logHelper(
failure_log_level,
"alloc - failure: {s} - len: {}, ptr_align: {}, len_align: {}",
.{ @errorName(err), len, ptr_align, len_align },
"alloc - failure: OutOfMemory - len: {}, ptr_align: {}",
.{ len, log2_ptr_align },
);
}
return result;
}
fn resize(
self: *Self,
ctx: *anyopaque,
buf: []u8,
buf_align: u29,
log2_buf_align: u8,
new_len: usize,
len_align: u29,
ra: usize,
) ?usize {
if (self.parent_allocator.rawResize(buf, buf_align, new_len, len_align, ra)) |resized_len| {
) bool {
const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
if (self.parent_allocator.rawResize(buf, log2_buf_align, new_len, ra)) {
if (new_len <= buf.len) {
logHelper(
success_log_level,
"shrink - success - {} to {}, len_align: {}, buf_align: {}",
.{ buf.len, new_len, len_align, buf_align },
"shrink - success - {} to {}, buf_align: {}",
.{ buf.len, new_len, log2_buf_align },
);
} else {
logHelper(
success_log_level,
"expand - success - {} to {}, len_align: {}, buf_align: {}",
.{ buf.len, new_len, len_align, buf_align },
"expand - success - {} to {}, buf_align: {}",
.{ buf.len, new_len, log2_buf_align },
);
}
return resized_len;
return true;
}
std.debug.assert(new_len > buf.len);
logHelper(
failure_log_level,
"expand - failure - {} to {}, len_align: {}, buf_align: {}",
.{ buf.len, new_len, len_align, buf_align },
"expand - failure - {} to {}, buf_align: {}",
.{ buf.len, new_len, log2_buf_align },
);
return null;
return false;
}
fn free(
self: *Self,
ctx: *anyopaque,
buf: []u8,
buf_align: u29,
log2_buf_align: u8,
ra: usize,
) void {
self.parent_allocator.rawFree(buf, buf_align, ra);
const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
self.parent_allocator.rawFree(buf, log2_buf_align, ra);
logHelper(success_log_level, "free - len: {}", .{buf.len});
}
};

View File

@ -176,11 +176,11 @@ pub fn Reader(
error.EndOfStream => if (array_list.items.len == 0) {
return null;
} else {
return array_list.toOwnedSlice();
return try array_list.toOwnedSlice();
},
else => |e| return e,
};
return array_list.toOwnedSlice();
return try array_list.toOwnedSlice();
}
/// Reads from the stream until specified byte is found. If the buffer is not

View File

@ -1668,12 +1668,10 @@ fn parseInternal(
if (ptrInfo.sentinel) |some| {
const sentinel_value = @ptrCast(*align(1) const ptrInfo.child, some).*;
try arraylist.append(sentinel_value);
const output = arraylist.toOwnedSlice();
return output[0 .. output.len - 1 :sentinel_value];
return try arraylist.toOwnedSliceSentinel(sentinel_value);
}
return arraylist.toOwnedSlice();
return try arraylist.toOwnedSlice();
},
.String => |stringToken| {
if (ptrInfo.child != u8) return error.UnexpectedToken;

View File

@ -2148,7 +2148,7 @@ pub const Const = struct {
const limbs = try allocator.alloc(Limb, calcToStringLimbsBufferLen(self.limbs.len, base));
defer allocator.free(limbs);
return allocator.shrink(string, self.toString(string, base, case, limbs));
return allocator.realloc(string, self.toString(string, base, case, limbs));
}
/// Converts self to a string in the requested base.

View File

@ -47,7 +47,14 @@ pub fn ValidationAllocator(comptime T: type) type {
}
pub fn allocator(self: *Self) Allocator {
return Allocator.init(self, alloc, resize, free);
return .{
.ptr = self,
.vtable = &.{
.alloc = alloc,
.resize = resize,
.free = free,
},
};
}
fn getUnderlyingAllocatorPtr(self: *Self) Allocator {
@ -56,72 +63,48 @@ pub fn ValidationAllocator(comptime T: type) type {
}
pub fn alloc(
self: *Self,
ctx: *anyopaque,
n: usize,
ptr_align: u29,
len_align: u29,
log2_ptr_align: u8,
ret_addr: usize,
) Allocator.Error![]u8 {
) ?[*]u8 {
assert(n > 0);
assert(mem.isValidAlign(ptr_align));
if (len_align != 0) {
assert(mem.isAlignedAnyAlign(n, len_align));
assert(n >= len_align);
}
const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
const underlying = self.getUnderlyingAllocatorPtr();
const result = try underlying.rawAlloc(n, ptr_align, len_align, ret_addr);
assert(mem.isAligned(@ptrToInt(result.ptr), ptr_align));
if (len_align == 0) {
assert(result.len == n);
} else {
assert(result.len >= n);
assert(mem.isAlignedAnyAlign(result.len, len_align));
}
const result = underlying.rawAlloc(n, log2_ptr_align, ret_addr) orelse
return null;
assert(mem.isAlignedLog2(@ptrToInt(result), log2_ptr_align));
return result;
}
pub fn resize(
self: *Self,
ctx: *anyopaque,
buf: []u8,
buf_align: u29,
log2_buf_align: u8,
new_len: usize,
len_align: u29,
ret_addr: usize,
) ?usize {
) bool {
const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ctx));
assert(buf.len > 0);
if (len_align != 0) {
assert(mem.isAlignedAnyAlign(new_len, len_align));
assert(new_len >= len_align);
}
const underlying = self.getUnderlyingAllocatorPtr();
const result = underlying.rawResize(buf, buf_align, new_len, len_align, ret_addr) orelse return null;
if (len_align == 0) {
assert(result == new_len);
} else {
assert(result >= new_len);
assert(mem.isAlignedAnyAlign(result, len_align));
}
return result;
return underlying.rawResize(buf, log2_buf_align, new_len, ret_addr);
}
pub fn free(
self: *Self,
ctx: *anyopaque,
buf: []u8,
buf_align: u29,
log2_buf_align: u8,
ret_addr: usize,
) void {
_ = self;
_ = buf_align;
_ = ctx;
_ = log2_buf_align;
_ = ret_addr;
assert(buf.len > 0);
}
pub usingnamespace if (T == Allocator or !@hasDecl(T, "reset")) struct {} else struct {
pub fn reset(self: *Self) void {
self.underlying_allocator.reset();
}
};
pub fn reset(self: *Self) void {
self.underlying_allocator.reset();
}
};
}
@ -151,16 +134,15 @@ const fail_allocator = Allocator{
const failAllocator_vtable = Allocator.VTable{
.alloc = failAllocatorAlloc,
.resize = Allocator.NoResize(anyopaque).noResize,
.free = Allocator.NoOpFree(anyopaque).noOpFree,
.resize = Allocator.noResize,
.free = Allocator.noFree,
};
fn failAllocatorAlloc(_: *anyopaque, n: usize, alignment: u29, len_align: u29, ra: usize) Allocator.Error![]u8 {
fn failAllocatorAlloc(_: *anyopaque, n: usize, log2_alignment: u8, ra: usize) ?[*]u8 {
_ = n;
_ = alignment;
_ = len_align;
_ = log2_alignment;
_ = ra;
return error.OutOfMemory;
return null;
}
test "Allocator basics" {
@ -188,7 +170,8 @@ test "Allocator.resize" {
defer testing.allocator.free(values);
for (values) |*v, i| v.* = @intCast(T, i);
values = testing.allocator.resize(values, values.len + 10) orelse return error.OutOfMemory;
if (!testing.allocator.resize(values, values.len + 10)) return error.OutOfMemory;
values = values.ptr[0 .. values.len + 10];
try testing.expect(values.len == 110);
}
@ -203,7 +186,8 @@ test "Allocator.resize" {
defer testing.allocator.free(values);
for (values) |*v, i| v.* = @intToFloat(T, i);
values = testing.allocator.resize(values, values.len + 10) orelse return error.OutOfMemory;
if (!testing.allocator.resize(values, values.len + 10)) return error.OutOfMemory;
values = values.ptr[0 .. values.len + 10];
try testing.expect(values.len == 110);
}
}
@ -3108,7 +3092,7 @@ pub fn nativeToBig(comptime T: type, x: T) T {
/// - The aligned pointer would not fit the address space,
/// - The delta required to align the pointer is not a multiple of the pointee's
/// type.
pub fn alignPointerOffset(ptr: anytype, align_to: u29) ?usize {
pub fn alignPointerOffset(ptr: anytype, align_to: usize) ?usize {
assert(align_to != 0 and @popCount(align_to) == 1);
const T = @TypeOf(ptr);
@ -3140,7 +3124,7 @@ pub fn alignPointerOffset(ptr: anytype, align_to: u29) ?usize {
/// - The aligned pointer would not fit the address space,
/// - The delta required to align the pointer is not a multiple of the pointee's
/// type.
pub fn alignPointer(ptr: anytype, align_to: u29) ?@TypeOf(ptr) {
pub fn alignPointer(ptr: anytype, align_to: usize) ?@TypeOf(ptr) {
const adjust_off = alignPointerOffset(ptr, align_to) orelse return null;
const T = @TypeOf(ptr);
// Avoid the use of intToPtr to avoid losing the pointer provenance info.
@ -3149,7 +3133,7 @@ pub fn alignPointer(ptr: anytype, align_to: u29) ?@TypeOf(ptr) {
test "alignPointer" {
const S = struct {
fn checkAlign(comptime T: type, base: usize, align_to: u29, expected: usize) !void {
fn checkAlign(comptime T: type, base: usize, align_to: usize, expected: usize) !void {
var ptr = @intToPtr(T, base);
var aligned = alignPointer(ptr, align_to);
try testing.expectEqual(expected, @ptrToInt(aligned));
@ -3566,6 +3550,11 @@ pub fn alignForward(addr: usize, alignment: usize) usize {
return alignForwardGeneric(usize, addr, alignment);
}
pub fn alignForwardLog2(addr: usize, log2_alignment: u8) usize {
const alignment = @as(usize, 1) << @intCast(math.Log2Int(usize), log2_alignment);
return alignForward(addr, alignment);
}
/// Round an address up to the next (or current) aligned address.
/// The alignment must be a power of 2 and greater than 0.
/// Asserts that rounding up the address does not cause integer overflow.
@ -3626,7 +3615,7 @@ pub fn alignBackwardGeneric(comptime T: type, addr: T, alignment: T) T {
/// Returns whether `alignment` is a valid alignment, meaning it is
/// a positive power of 2.
pub fn isValidAlign(alignment: u29) bool {
pub fn isValidAlign(alignment: usize) bool {
return @popCount(alignment) == 1;
}
@ -3637,6 +3626,10 @@ pub fn isAlignedAnyAlign(i: usize, alignment: usize) bool {
return 0 == @mod(i, alignment);
}
pub fn isAlignedLog2(addr: usize, log2_alignment: u8) bool {
return @ctz(addr) >= log2_alignment;
}
/// Given an address and an alignment, return true if the address is a multiple of the alignment
/// The alignment must be a power of 2 and greater than 0.
pub fn isAligned(addr: usize, alignment: usize) bool {
@ -3670,7 +3663,7 @@ test "freeing empty string with null-terminated sentinel" {
/// Returns a slice with the given new alignment,
/// all other pointer attributes copied from `AttributeSource`.
fn AlignedSlice(comptime AttributeSource: type, comptime new_alignment: u29) type {
fn AlignedSlice(comptime AttributeSource: type, comptime new_alignment: usize) type {
const info = @typeInfo(AttributeSource).Pointer;
return @Type(.{
.Pointer = .{

View File

@ -8,167 +8,101 @@ const Allocator = @This();
const builtin = @import("builtin");
pub const Error = error{OutOfMemory};
pub const Log2Align = math.Log2Int(usize);
// The type erased pointer to the allocator implementation
ptr: *anyopaque,
vtable: *const VTable,
pub const VTable = struct {
/// Attempt to allocate at least `len` bytes aligned to `ptr_align`.
/// Attempt to allocate exactly `len` bytes aligned to `1 << ptr_align`.
///
/// If `len_align` is `0`, then the length returned MUST be exactly `len` bytes,
/// otherwise, the length must be aligned to `len_align`.
///
/// `len` must be greater than or equal to `len_align` and must be aligned by `len_align`.
///
/// `ret_addr` is optionally provided as the first return address of the allocation call stack.
/// If the value is `0` it means no return address has been provided.
alloc: std.meta.FnPtr(fn (ptr: *anyopaque, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8),
/// `ret_addr` is optionally provided as the first return address of the
/// allocation call stack. If the value is `0` it means no return address
/// has been provided.
alloc: std.meta.FnPtr(fn (ctx: *anyopaque, len: usize, ptr_align: u8, ret_addr: usize) ?[*]u8),
/// Attempt to expand or shrink memory in place. `buf.len` must equal the most recent
/// length returned by `alloc` or `resize`. `buf_align` must equal the same value
/// that was passed as the `ptr_align` parameter to the original `alloc` call.
/// Attempt to expand or shrink memory in place. `buf.len` must equal the
/// length requested from the most recent successful call to `alloc` or
/// `resize`. `buf_align` must equal the same value that was passed as the
/// `ptr_align` parameter to the original `alloc` call.
///
/// `null` can only be returned if `new_len` is greater than `buf.len`.
/// If `buf` cannot be expanded to accomodate `new_len`, then the allocation MUST be
/// unmodified and `null` MUST be returned.
/// A result of `true` indicates the resize was successful and the
/// allocation now has the same address but a size of `new_len`. `false`
/// indicates the resize could not be completed without moving the
/// allocation to a different address.
///
/// If `len_align` is `0`, then the length returned MUST be exactly `len` bytes,
/// otherwise, the length must be aligned to `len_align`. Note that `len_align` does *not*
/// provide a way to modify the alignment of a pointer. Rather it provides an API for
/// accepting more bytes of memory from the allocator than requested.
/// `new_len` must be greater than zero.
///
/// `new_len` must be greater than zero, greater than or equal to `len_align` and must be aligned by `len_align`.
///
/// `ret_addr` is optionally provided as the first return address of the allocation call stack.
/// If the value is `0` it means no return address has been provided.
resize: std.meta.FnPtr(fn (ptr: *anyopaque, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) ?usize),
/// `ret_addr` is optionally provided as the first return address of the
/// allocation call stack. If the value is `0` it means no return address
/// has been provided.
resize: std.meta.FnPtr(fn (ctx: *anyopaque, buf: []u8, buf_align: u8, new_len: usize, ret_addr: usize) bool),
/// Free and invalidate a buffer. `buf.len` must equal the most recent length returned by `alloc` or `resize`.
/// `buf_align` must equal the same value that was passed as the `ptr_align` parameter to the original `alloc` call.
/// Free and invalidate a buffer.
///
/// `ret_addr` is optionally provided as the first return address of the allocation call stack.
/// If the value is `0` it means no return address has been provided.
free: std.meta.FnPtr(fn (ptr: *anyopaque, buf: []u8, buf_align: u29, ret_addr: usize) void),
/// `buf.len` must equal the most recent length returned by `alloc` or
/// given to a successful `resize` call.
///
/// `buf_align` must equal the same value that was passed as the
/// `ptr_align` parameter to the original `alloc` call.
///
/// `ret_addr` is optionally provided as the first return address of the
/// allocation call stack. If the value is `0` it means no return address
/// has been provided.
free: std.meta.FnPtr(fn (ctx: *anyopaque, buf: []u8, buf_align: u8, ret_addr: usize) void),
};
pub fn init(
pointer: anytype,
comptime allocFn: fn (ptr: @TypeOf(pointer), len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8,
comptime resizeFn: fn (ptr: @TypeOf(pointer), buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) ?usize,
comptime freeFn: fn (ptr: @TypeOf(pointer), buf: []u8, buf_align: u29, ret_addr: usize) void,
) Allocator {
const Ptr = @TypeOf(pointer);
const ptr_info = @typeInfo(Ptr);
assert(ptr_info == .Pointer); // Must be a pointer
assert(ptr_info.Pointer.size == .One); // Must be a single-item pointer
const alignment = ptr_info.Pointer.alignment;
const gen = struct {
fn allocImpl(ptr: *anyopaque, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8 {
const self = @ptrCast(Ptr, @alignCast(alignment, ptr));
return @call(.{ .modifier = .always_inline }, allocFn, .{ self, len, ptr_align, len_align, ret_addr });
}
fn resizeImpl(ptr: *anyopaque, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) ?usize {
assert(new_len != 0);
const self = @ptrCast(Ptr, @alignCast(alignment, ptr));
return @call(.{ .modifier = .always_inline }, resizeFn, .{ self, buf, buf_align, new_len, len_align, ret_addr });
}
fn freeImpl(ptr: *anyopaque, buf: []u8, buf_align: u29, ret_addr: usize) void {
const self = @ptrCast(Ptr, @alignCast(alignment, ptr));
@call(.{ .modifier = .always_inline }, freeFn, .{ self, buf, buf_align, ret_addr });
}
const vtable = VTable{
.alloc = allocImpl,
.resize = resizeImpl,
.free = freeImpl,
};
};
return .{
.ptr = pointer,
.vtable = &gen.vtable,
};
pub fn noResize(
self: *anyopaque,
buf: []u8,
log2_buf_align: u8,
new_len: usize,
ret_addr: usize,
) bool {
_ = self;
_ = buf;
_ = log2_buf_align;
_ = new_len;
_ = ret_addr;
return false;
}
/// Set resizeFn to `NoResize(AllocatorType).noResize` if in-place resize is not supported.
pub fn NoResize(comptime AllocatorType: type) type {
return struct {
pub fn noResize(
self: *AllocatorType,
buf: []u8,
buf_align: u29,
new_len: usize,
len_align: u29,
ret_addr: usize,
) ?usize {
_ = self;
_ = buf_align;
_ = len_align;
_ = ret_addr;
return if (new_len > buf.len) null else new_len;
}
};
pub fn noFree(
self: *anyopaque,
buf: []u8,
log2_buf_align: u8,
ret_addr: usize,
) void {
_ = self;
_ = buf;
_ = log2_buf_align;
_ = ret_addr;
}
/// Set freeFn to `NoOpFree(AllocatorType).noOpFree` if free is a no-op.
pub fn NoOpFree(comptime AllocatorType: type) type {
return struct {
pub fn noOpFree(
self: *AllocatorType,
buf: []u8,
buf_align: u29,
ret_addr: usize,
) void {
_ = self;
_ = buf;
_ = buf_align;
_ = ret_addr;
}
};
/// This function is not intended to be called except from within the
/// implementation of an Allocator
pub inline fn rawAlloc(self: Allocator, len: usize, ptr_align: u8, ret_addr: usize) ?[*]u8 {
return self.vtable.alloc(self.ptr, len, ptr_align, ret_addr);
}
/// Set freeFn to `PanicFree(AllocatorType).panicFree` if free is not a supported operation.
pub fn PanicFree(comptime AllocatorType: type) type {
return struct {
pub fn panicFree(
self: *AllocatorType,
buf: []u8,
buf_align: u29,
ret_addr: usize,
) void {
_ = self;
_ = buf;
_ = buf_align;
_ = ret_addr;
@panic("free is not a supported operation for the allocator: " ++ @typeName(AllocatorType));
}
};
/// This function is not intended to be called except from within the
/// implementation of an Allocator
pub inline fn rawResize(self: Allocator, buf: []u8, log2_buf_align: u8, new_len: usize, ret_addr: usize) bool {
return self.vtable.resize(self.ptr, buf, log2_buf_align, new_len, ret_addr);
}
/// This function is not intended to be called except from within the implementation of an Allocator
pub inline fn rawAlloc(self: Allocator, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) Error![]u8 {
return self.vtable.alloc(self.ptr, len, ptr_align, len_align, ret_addr);
}
/// This function is not intended to be called except from within the implementation of an Allocator
pub inline fn rawResize(self: Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) ?usize {
return self.vtable.resize(self.ptr, buf, buf_align, new_len, len_align, ret_addr);
}
/// This function is not intended to be called except from within the implementation of an Allocator
pub inline fn rawFree(self: Allocator, buf: []u8, buf_align: u29, ret_addr: usize) void {
return self.vtable.free(self.ptr, buf, buf_align, ret_addr);
/// This function is not intended to be called except from within the
/// implementation of an Allocator
pub inline fn rawFree(self: Allocator, buf: []u8, log2_buf_align: u8, ret_addr: usize) void {
return self.vtable.free(self.ptr, buf, log2_buf_align, ret_addr);
}
/// Returns a pointer to undefined memory.
/// Call `destroy` with the result to free the memory.
pub fn create(self: Allocator, comptime T: type) Error!*T {
if (@sizeOf(T) == 0) return @intToPtr(*T, std.math.maxInt(usize));
const slice = try self.allocAdvancedWithRetAddr(T, null, 1, .exact, @returnAddress());
if (@sizeOf(T) == 0) return @intToPtr(*T, math.maxInt(usize));
const slice = try self.allocAdvancedWithRetAddr(T, null, 1, @returnAddress());
return &slice[0];
}
@ -179,7 +113,7 @@ pub fn destroy(self: Allocator, ptr: anytype) void {
const T = info.child;
if (@sizeOf(T) == 0) return;
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(ptr));
self.rawFree(non_const_ptr[0..@sizeOf(T)], info.alignment, @returnAddress());
self.rawFree(non_const_ptr[0..@sizeOf(T)], math.log2(info.alignment), @returnAddress());
}
/// Allocates an array of `n` items of type `T` and sets all the
@ -191,7 +125,7 @@ pub fn destroy(self: Allocator, ptr: anytype) void {
///
/// For allocating a single item, see `create`.
pub fn alloc(self: Allocator, comptime T: type, n: usize) Error![]T {
return self.allocAdvancedWithRetAddr(T, null, n, .exact, @returnAddress());
return self.allocAdvancedWithRetAddr(T, null, n, @returnAddress());
}
pub fn allocWithOptions(
@ -215,11 +149,11 @@ pub fn allocWithOptionsRetAddr(
return_address: usize,
) Error!AllocWithOptionsPayload(Elem, optional_alignment, optional_sentinel) {
if (optional_sentinel) |sentinel| {
const ptr = try self.allocAdvancedWithRetAddr(Elem, optional_alignment, n + 1, .exact, return_address);
const ptr = try self.allocAdvancedWithRetAddr(Elem, optional_alignment, n + 1, return_address);
ptr[n] = sentinel;
return ptr[0..n :sentinel];
} else {
return self.allocAdvancedWithRetAddr(Elem, optional_alignment, n, .exact, return_address);
return self.allocAdvancedWithRetAddr(Elem, optional_alignment, n, return_address);
}
}
@ -255,231 +189,108 @@ pub fn alignedAlloc(
comptime alignment: ?u29,
n: usize,
) Error![]align(alignment orelse @alignOf(T)) T {
return self.allocAdvancedWithRetAddr(T, alignment, n, .exact, @returnAddress());
return self.allocAdvancedWithRetAddr(T, alignment, n, @returnAddress());
}
pub fn allocAdvanced(
self: Allocator,
comptime T: type,
/// null means naturally aligned
comptime alignment: ?u29,
n: usize,
exact: Exact,
) Error![]align(alignment orelse @alignOf(T)) T {
return self.allocAdvancedWithRetAddr(T, alignment, n, exact, @returnAddress());
}
pub const Exact = enum { exact, at_least };
pub fn allocAdvancedWithRetAddr(
self: Allocator,
comptime T: type,
/// null means naturally aligned
comptime alignment: ?u29,
n: usize,
exact: Exact,
return_address: usize,
) Error![]align(alignment orelse @alignOf(T)) T {
const a = if (alignment) |a| blk: {
if (a == @alignOf(T)) return allocAdvancedWithRetAddr(self, T, null, n, exact, return_address);
if (a == @alignOf(T)) return allocAdvancedWithRetAddr(self, T, null, n, return_address);
break :blk a;
} else @alignOf(T);
// The Zig Allocator interface is not intended to solve allocations beyond
// the minimum OS page size. For these use cases, the caller must use OS
// APIs directly.
comptime assert(a <= mem.page_size);
if (n == 0) {
const ptr = comptime std.mem.alignBackward(std.math.maxInt(usize), a);
const ptr = comptime std.mem.alignBackward(math.maxInt(usize), a);
return @intToPtr([*]align(a) T, ptr)[0..0];
}
const byte_count = math.mul(usize, @sizeOf(T), n) catch return Error.OutOfMemory;
// TODO The `if (alignment == null)` blocks are workarounds for zig not being able to
// access certain type information about T without creating a circular dependency in async
// functions that heap-allocate their own frame with @Frame(func).
const size_of_T: usize = if (alignment == null) @divExact(byte_count, n) else @sizeOf(T);
const len_align: u29 = switch (exact) {
.exact => 0,
.at_least => math.cast(u29, size_of_T) orelse 0,
};
const byte_slice = try self.rawAlloc(byte_count, a, len_align, return_address);
switch (exact) {
.exact => assert(byte_slice.len == byte_count),
.at_least => assert(byte_slice.len >= byte_count),
}
const byte_ptr = self.rawAlloc(byte_count, log2a(a), return_address) orelse return Error.OutOfMemory;
// TODO: https://github.com/ziglang/zig/issues/4298
@memset(byte_slice.ptr, undefined, byte_slice.len);
if (alignment == null) {
// This if block is a workaround (see comment above)
return @intToPtr([*]T, @ptrToInt(byte_slice.ptr))[0..@divExact(byte_slice.len, @sizeOf(T))];
} else {
return mem.bytesAsSlice(T, @alignCast(a, byte_slice));
}
@memset(byte_ptr, undefined, byte_count);
const byte_slice = byte_ptr[0..byte_count];
return mem.bytesAsSlice(T, @alignCast(a, byte_slice));
}
/// Increases or decreases the size of an allocation. It is guaranteed to not move the pointer.
pub fn resize(self: Allocator, old_mem: anytype, new_n: usize) ?@TypeOf(old_mem) {
/// Requests to modify the size of an allocation. It is guaranteed to not move
/// the pointer, however the allocator implementation may refuse the resize
/// request by returning `false`.
pub fn resize(self: Allocator, old_mem: anytype, new_n: usize) bool {
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
const T = Slice.child;
if (new_n == 0) {
self.free(old_mem);
return &[0]T{};
return true;
}
if (old_mem.len == 0) {
return false;
}
const old_byte_slice = mem.sliceAsBytes(old_mem);
const new_byte_count = math.mul(usize, @sizeOf(T), new_n) catch return null;
const rc = self.rawResize(old_byte_slice, Slice.alignment, new_byte_count, 0, @returnAddress()) orelse return null;
assert(rc == new_byte_count);
const new_byte_slice = old_byte_slice.ptr[0..new_byte_count];
return mem.bytesAsSlice(T, new_byte_slice);
// I would like to use saturating multiplication here, but LLVM cannot lower it
// on WebAssembly: https://github.com/ziglang/zig/issues/9660
//const new_byte_count = new_n *| @sizeOf(T);
const new_byte_count = math.mul(usize, @sizeOf(T), new_n) catch return false;
return self.rawResize(old_byte_slice, log2a(Slice.alignment), new_byte_count, @returnAddress());
}
/// This function requests a new byte size for an existing allocation,
/// which can be larger, smaller, or the same size as the old memory
/// allocation.
/// This function is preferred over `shrink`, because it can fail, even
/// when shrinking. This gives the allocator a chance to perform a
/// cheap shrink operation if possible, or otherwise return OutOfMemory,
/// indicating that the caller should keep their capacity, for example
/// in `std.ArrayList.shrink`.
/// If you need guaranteed success, call `shrink`.
/// This function requests a new byte size for an existing allocation, which
/// can be larger, smaller, or the same size as the old memory allocation.
/// If `new_n` is 0, this is the same as `free` and it always succeeds.
pub fn realloc(self: Allocator, old_mem: anytype, new_n: usize) t: {
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
break :t Error![]align(Slice.alignment) Slice.child;
} {
const old_alignment = @typeInfo(@TypeOf(old_mem)).Pointer.alignment;
return self.reallocAdvancedWithRetAddr(old_mem, old_alignment, new_n, .exact, @returnAddress());
return self.reallocAdvanced(old_mem, new_n, @returnAddress());
}
pub fn reallocAtLeast(self: Allocator, old_mem: anytype, new_n: usize) t: {
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
break :t Error![]align(Slice.alignment) Slice.child;
} {
const old_alignment = @typeInfo(@TypeOf(old_mem)).Pointer.alignment;
return self.reallocAdvancedWithRetAddr(old_mem, old_alignment, new_n, .at_least, @returnAddress());
}
/// This is the same as `realloc`, except caller may additionally request
/// a new alignment, which can be larger, smaller, or the same as the old
/// allocation.
pub fn reallocAdvanced(
self: Allocator,
old_mem: anytype,
comptime new_alignment: u29,
new_n: usize,
exact: Exact,
) Error![]align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child {
return self.reallocAdvancedWithRetAddr(old_mem, new_alignment, new_n, exact, @returnAddress());
}
pub fn reallocAdvancedWithRetAddr(
self: Allocator,
old_mem: anytype,
comptime new_alignment: u29,
new_n: usize,
exact: Exact,
return_address: usize,
) Error![]align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child {
) t: {
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
break :t Error![]align(Slice.alignment) Slice.child;
} {
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
const T = Slice.child;
if (old_mem.len == 0) {
return self.allocAdvancedWithRetAddr(T, new_alignment, new_n, exact, return_address);
return self.allocAdvancedWithRetAddr(T, Slice.alignment, new_n, return_address);
}
if (new_n == 0) {
self.free(old_mem);
const ptr = comptime std.mem.alignBackward(std.math.maxInt(usize), new_alignment);
return @intToPtr([*]align(new_alignment) T, ptr)[0..0];
const ptr = comptime std.mem.alignBackward(math.maxInt(usize), Slice.alignment);
return @intToPtr([*]align(Slice.alignment) T, ptr)[0..0];
}
const old_byte_slice = mem.sliceAsBytes(old_mem);
const byte_count = math.mul(usize, @sizeOf(T), new_n) catch return Error.OutOfMemory;
// Note: can't set shrunk memory to undefined as memory shouldn't be modified on realloc failure
const len_align: u29 = switch (exact) {
.exact => 0,
.at_least => math.cast(u29, @as(usize, @sizeOf(T))) orelse 0,
};
if (mem.isAligned(@ptrToInt(old_byte_slice.ptr), new_alignment)) {
if (byte_count <= old_byte_slice.len) {
const shrunk_len = self.shrinkBytes(old_byte_slice, Slice.alignment, byte_count, len_align, return_address);
return mem.bytesAsSlice(T, @alignCast(new_alignment, old_byte_slice.ptr[0..shrunk_len]));
}
if (self.rawResize(old_byte_slice, Slice.alignment, byte_count, len_align, return_address)) |resized_len| {
// TODO: https://github.com/ziglang/zig/issues/4298
@memset(old_byte_slice.ptr + byte_count, undefined, resized_len - byte_count);
return mem.bytesAsSlice(T, @alignCast(new_alignment, old_byte_slice.ptr[0..resized_len]));
if (mem.isAligned(@ptrToInt(old_byte_slice.ptr), Slice.alignment)) {
if (self.rawResize(old_byte_slice, log2a(Slice.alignment), byte_count, return_address)) {
return mem.bytesAsSlice(T, @alignCast(Slice.alignment, old_byte_slice.ptr[0..byte_count]));
}
}
if (byte_count <= old_byte_slice.len and new_alignment <= Slice.alignment) {
const new_mem = self.rawAlloc(byte_count, log2a(Slice.alignment), return_address) orelse
return error.OutOfMemory;
}
const new_mem = try self.rawAlloc(byte_count, new_alignment, len_align, return_address);
@memcpy(new_mem.ptr, old_byte_slice.ptr, math.min(byte_count, old_byte_slice.len));
@memcpy(new_mem, old_byte_slice.ptr, @min(byte_count, old_byte_slice.len));
// TODO https://github.com/ziglang/zig/issues/4298
@memset(old_byte_slice.ptr, undefined, old_byte_slice.len);
self.rawFree(old_byte_slice, Slice.alignment, return_address);
self.rawFree(old_byte_slice, log2a(Slice.alignment), return_address);
return mem.bytesAsSlice(T, @alignCast(new_alignment, new_mem));
}
/// Prefer calling realloc to shrink if you can tolerate failure, such as
/// in an ArrayList data structure with a storage capacity.
/// Shrink always succeeds, and `new_n` must be <= `old_mem.len`.
/// Returned slice has same alignment as old_mem.
/// Shrinking to 0 is the same as calling `free`.
pub fn shrink(self: Allocator, old_mem: anytype, new_n: usize) t: {
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
break :t []align(Slice.alignment) Slice.child;
} {
const old_alignment = @typeInfo(@TypeOf(old_mem)).Pointer.alignment;
return self.alignedShrinkWithRetAddr(old_mem, old_alignment, new_n, @returnAddress());
}
/// This is the same as `shrink`, except caller may additionally request
/// a new alignment, which must be smaller or the same as the old
/// allocation.
pub fn alignedShrink(
self: Allocator,
old_mem: anytype,
comptime new_alignment: u29,
new_n: usize,
) []align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child {
return self.alignedShrinkWithRetAddr(old_mem, new_alignment, new_n, @returnAddress());
}
/// This is the same as `alignedShrink`, except caller may additionally pass
/// the return address of the first stack frame, which may be relevant for
/// allocators which collect stack traces.
pub fn alignedShrinkWithRetAddr(
self: Allocator,
old_mem: anytype,
comptime new_alignment: u29,
new_n: usize,
return_address: usize,
) []align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child {
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
const T = Slice.child;
if (new_n == old_mem.len)
return old_mem;
if (new_n == 0) {
self.free(old_mem);
const ptr = comptime std.mem.alignBackward(std.math.maxInt(usize), new_alignment);
return @intToPtr([*]align(new_alignment) T, ptr)[0..0];
}
assert(new_n < old_mem.len);
assert(new_alignment <= Slice.alignment);
// Here we skip the overflow checking on the multiplication because
// new_n <= old_mem.len and the multiplication didn't overflow for that operation.
const byte_count = @sizeOf(T) * new_n;
const old_byte_slice = mem.sliceAsBytes(old_mem);
// TODO: https://github.com/ziglang/zig/issues/4298
@memset(old_byte_slice.ptr + byte_count, undefined, old_byte_slice.len - byte_count);
_ = self.shrinkBytes(old_byte_slice, Slice.alignment, byte_count, 0, return_address);
return old_mem[0..new_n];
return mem.bytesAsSlice(T, @alignCast(Slice.alignment, new_mem[0..byte_count]));
}
/// Free an array allocated with `alloc`. To free a single item,
@ -492,7 +303,7 @@ pub fn free(self: Allocator, memory: anytype) void {
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(bytes.ptr));
// TODO: https://github.com/ziglang/zig/issues/4298
@memset(non_const_ptr, undefined, bytes_len);
self.rawFree(non_const_ptr[0..bytes_len], Slice.alignment, @returnAddress());
self.rawFree(non_const_ptr[0..bytes_len], log2a(Slice.alignment), @returnAddress());
}
/// Copies `m` to newly allocated memory. Caller owns the memory.
@ -510,226 +321,16 @@ pub fn dupeZ(allocator: Allocator, comptime T: type, m: []const T) ![:0]T {
return new_buf[0..m.len :0];
}
/// This function allows a runtime `alignment` value. Callers should generally prefer
/// to call the `alloc*` functions.
pub fn allocBytes(
self: Allocator,
/// Must be >= 1.
/// Must be a power of 2.
/// Returned slice's pointer will have this alignment.
alignment: u29,
byte_count: usize,
/// 0 indicates the length of the slice returned MUST match `byte_count` exactly
/// non-zero means the length of the returned slice must be aligned by `len_align`
/// `byte_count` must be aligned by `len_align`
len_align: u29,
return_address: usize,
) Error![]u8 {
const new_mem = try self.rawAlloc(byte_count, alignment, len_align, return_address);
// TODO: https://github.com/ziglang/zig/issues/4298
@memset(new_mem.ptr, undefined, new_mem.len);
return new_mem;
}
test "allocBytes" {
const number_of_bytes: usize = 10;
var runtime_alignment: u29 = 2;
{
const new_mem = try std.testing.allocator.allocBytes(runtime_alignment, number_of_bytes, 0, @returnAddress());
defer std.testing.allocator.free(new_mem);
try std.testing.expectEqual(number_of_bytes, new_mem.len);
try std.testing.expect(mem.isAligned(@ptrToInt(new_mem.ptr), runtime_alignment));
}
runtime_alignment = 8;
{
const new_mem = try std.testing.allocator.allocBytes(runtime_alignment, number_of_bytes, 0, @returnAddress());
defer std.testing.allocator.free(new_mem);
try std.testing.expectEqual(number_of_bytes, new_mem.len);
try std.testing.expect(mem.isAligned(@ptrToInt(new_mem.ptr), runtime_alignment));
/// TODO replace callsites with `@log2` after this proposal is implemented:
/// https://github.com/ziglang/zig/issues/13642
inline fn log2a(x: anytype) switch (@typeInfo(@TypeOf(x))) {
.Int => math.Log2Int(@TypeOf(x)),
.ComptimeInt => comptime_int,
else => @compileError("int please"),
} {
switch (@typeInfo(@TypeOf(x))) {
.Int => return math.log2_int(@TypeOf(x), x),
.ComptimeInt => return math.log2(x),
else => @compileError("bad"),
}
}
test "allocBytes non-zero len_align" {
const number_of_bytes: usize = 10;
var runtime_alignment: u29 = 1;
var len_align: u29 = 2;
{
const new_mem = try std.testing.allocator.allocBytes(runtime_alignment, number_of_bytes, len_align, @returnAddress());
defer std.testing.allocator.free(new_mem);
try std.testing.expect(new_mem.len >= number_of_bytes);
try std.testing.expect(new_mem.len % len_align == 0);
try std.testing.expect(mem.isAligned(@ptrToInt(new_mem.ptr), runtime_alignment));
}
runtime_alignment = 16;
len_align = 5;
{
const new_mem = try std.testing.allocator.allocBytes(runtime_alignment, number_of_bytes, len_align, @returnAddress());
defer std.testing.allocator.free(new_mem);
try std.testing.expect(new_mem.len >= number_of_bytes);
try std.testing.expect(new_mem.len % len_align == 0);
try std.testing.expect(mem.isAligned(@ptrToInt(new_mem.ptr), runtime_alignment));
}
}
/// Realloc is used to modify the size or alignment of an existing allocation,
/// as well as to provide the allocator with an opportunity to move an allocation
/// to a better location.
/// The returned slice will have its pointer aligned at least to `new_alignment` bytes.
///
/// This function allows a runtime `alignment` value. Callers should generally prefer
/// to call the `realloc*` functions.
///
/// If the size/alignment is greater than the previous allocation, and the requested new
/// allocation could not be granted this function returns `error.OutOfMemory`.
/// When the size/alignment is less than or equal to the previous allocation,
/// this function returns `error.OutOfMemory` when the allocator decides the client
/// would be better off keeping the extra alignment/size.
/// Clients will call `resizeFn` when they require the allocator to track a new alignment/size,
/// and so this function should only return success when the allocator considers
/// the reallocation desirable from the allocator's perspective.
///
/// As an example, `std.ArrayList` tracks a "capacity", and therefore can handle
/// reallocation failure, even when `new_n` <= `old_mem.len`. A `FixedBufferAllocator`
/// would always return `error.OutOfMemory` for `reallocFn` when the size/alignment
/// is less than or equal to the old allocation, because it cannot reclaim the memory,
/// and thus the `std.ArrayList` would be better off retaining its capacity.
pub fn reallocBytes(
self: Allocator,
/// Must be the same as what was returned from most recent call to `allocFn` or `resizeFn`.
/// If `old_mem.len == 0` then this is a new allocation and `new_byte_count` must be >= 1.
old_mem: []u8,
/// If `old_mem.len == 0` then this is `undefined`, otherwise:
/// Must be the same as what was passed to `allocFn`.
/// Must be >= 1.
/// Must be a power of 2.
old_alignment: u29,
/// If `new_byte_count` is 0 then this is a free and it is required that `old_mem.len != 0`.
new_byte_count: usize,
/// Must be >= 1.
/// Must be a power of 2.
/// Returned slice's pointer will have this alignment.
new_alignment: u29,
/// 0 indicates the length of the slice returned MUST match `new_byte_count` exactly
/// non-zero means the length of the returned slice must be aligned by `len_align`
/// `new_byte_count` must be aligned by `len_align`
len_align: u29,
return_address: usize,
) Error![]u8 {
if (old_mem.len == 0) {
return self.allocBytes(new_alignment, new_byte_count, len_align, return_address);
}
if (new_byte_count == 0) {
// TODO https://github.com/ziglang/zig/issues/4298
@memset(old_mem.ptr, undefined, old_mem.len);
self.rawFree(old_mem, old_alignment, return_address);
return &[0]u8{};
}
if (mem.isAligned(@ptrToInt(old_mem.ptr), new_alignment)) {
if (new_byte_count <= old_mem.len) {
const shrunk_len = self.shrinkBytes(old_mem, old_alignment, new_byte_count, len_align, return_address);
return old_mem.ptr[0..shrunk_len];
}
if (self.rawResize(old_mem, old_alignment, new_byte_count, len_align, return_address)) |resized_len| {
assert(resized_len >= new_byte_count);
// TODO: https://github.com/ziglang/zig/issues/4298
@memset(old_mem.ptr + new_byte_count, undefined, resized_len - new_byte_count);
return old_mem.ptr[0..resized_len];
}
}
if (new_byte_count <= old_mem.len and new_alignment <= old_alignment) {
return error.OutOfMemory;
}
const new_mem = try self.rawAlloc(new_byte_count, new_alignment, len_align, return_address);
@memcpy(new_mem.ptr, old_mem.ptr, math.min(new_byte_count, old_mem.len));
// TODO https://github.com/ziglang/zig/issues/4298
@memset(old_mem.ptr, undefined, old_mem.len);
self.rawFree(old_mem, old_alignment, return_address);
return new_mem;
}
test "reallocBytes" {
var new_mem: []u8 = &.{};
var new_byte_count: usize = 16;
var runtime_alignment: u29 = 4;
// `new_mem.len == 0`, this is a new allocation
{
new_mem = try std.testing.allocator.reallocBytes(new_mem, undefined, new_byte_count, runtime_alignment, 0, @returnAddress());
try std.testing.expectEqual(new_byte_count, new_mem.len);
try std.testing.expect(mem.isAligned(@ptrToInt(new_mem.ptr), runtime_alignment));
}
// `new_byte_count < new_mem.len`, this is a shrink, alignment is unmodified
new_byte_count = 14;
{
new_mem = try std.testing.allocator.reallocBytes(new_mem, runtime_alignment, new_byte_count, runtime_alignment, 0, @returnAddress());
try std.testing.expectEqual(new_byte_count, new_mem.len);
try std.testing.expect(mem.isAligned(@ptrToInt(new_mem.ptr), runtime_alignment));
}
// `new_byte_count < new_mem.len`, this is a shrink, alignment is decreased from 4 to 2
runtime_alignment = 2;
new_byte_count = 12;
{
new_mem = try std.testing.allocator.reallocBytes(new_mem, 4, new_byte_count, runtime_alignment, 0, @returnAddress());
try std.testing.expectEqual(new_byte_count, new_mem.len);
try std.testing.expect(mem.isAligned(@ptrToInt(new_mem.ptr), runtime_alignment));
}
// `new_byte_count > new_mem.len`, this is a growth, alignment is increased from 2 to 8
runtime_alignment = 8;
new_byte_count = 32;
{
new_mem = try std.testing.allocator.reallocBytes(new_mem, 2, new_byte_count, runtime_alignment, 0, @returnAddress());
try std.testing.expectEqual(new_byte_count, new_mem.len);
try std.testing.expect(mem.isAligned(@ptrToInt(new_mem.ptr), runtime_alignment));
}
// `new_byte_count == 0`, this is a free
new_byte_count = 0;
{
new_mem = try std.testing.allocator.reallocBytes(new_mem, runtime_alignment, new_byte_count, runtime_alignment, 0, @returnAddress());
try std.testing.expectEqual(new_byte_count, new_mem.len);
}
}
/// Call `vtable.resize`, but caller guarantees that `new_len` <= `buf.len` meaning
/// than a `null` return value should be impossible.
/// This function allows a runtime `buf_align` value. Callers should generally prefer
/// to call `shrink`.
pub fn shrinkBytes(
self: Allocator,
/// Must be the same as what was returned from most recent call to `allocFn` or `resizeFn`.
buf: []u8,
/// Must be the same as what was passed to `allocFn`.
/// Must be >= 1.
/// Must be a power of 2.
buf_align: u29,
/// Must be >= 1.
new_len: usize,
/// 0 indicates the length of the slice returned MUST match `new_len` exactly
/// non-zero means the length of the returned slice must be aligned by `len_align`
/// `new_len` must be aligned by `len_align`
len_align: u29,
return_address: usize,
) usize {
assert(new_len <= buf.len);
return self.rawResize(buf, buf_align, new_len, len_align, return_address) orelse unreachable;
}

View File

@ -144,7 +144,7 @@ test "TrailerFlags" {
.b = true,
.c = true,
});
const slice = try testing.allocator.allocAdvanced(u8, 8, flags.sizeInBytes(), .exact);
const slice = try testing.allocator.alignedAlloc(u8, 8, flags.sizeInBytes());
defer testing.allocator.free(slice);
flags.set(slice.ptr, .b, false);

View File

@ -288,11 +288,10 @@ pub fn MultiArrayList(comptime S: type) type {
assert(new_len <= self.capacity);
assert(new_len <= self.len);
const other_bytes = gpa.allocAdvanced(
const other_bytes = gpa.alignedAlloc(
u8,
@alignOf(S),
capacityInBytes(new_len),
.exact,
) catch {
const self_slice = self.slice();
inline for (fields) |field_info, i| {
@ -360,11 +359,10 @@ pub fn MultiArrayList(comptime S: type) type {
/// `new_capacity` must be greater or equal to `len`.
pub fn setCapacity(self: *Self, gpa: Allocator, new_capacity: usize) !void {
assert(new_capacity >= self.len);
const new_bytes = try gpa.allocAdvanced(
const new_bytes = try gpa.alignedAlloc(
u8,
@alignOf(S),
capacityInBytes(new_capacity),
.exact,
);
if (self.len == 0) {
gpa.free(self.allocatedBytes());

View File

@ -825,7 +825,7 @@ pub fn getAddressList(allocator: mem.Allocator, name: []const u8, port: u16) !*A
result.addrs = try arena.alloc(Address, lookup_addrs.items.len);
if (canon.items.len != 0) {
result.canon_name = canon.toOwnedSlice();
result.canon_name = try canon.toOwnedSlice();
}
for (lookup_addrs.items) |lookup_addr, i| {

View File

@ -478,7 +478,7 @@ fn readSparseBitVector(stream: anytype, allocator: mem.Allocator) ![]u32 {
if (bit_i == std.math.maxInt(u5)) break;
}
}
return list.toOwnedSlice();
return try list.toOwnedSlice();
}
pub const Pdb = struct {
@ -615,8 +615,8 @@ pub const Pdb = struct {
return error.InvalidDebugInfo;
}
self.modules = modules.toOwnedSlice();
self.sect_contribs = sect_contribs.toOwnedSlice();
self.modules = try modules.toOwnedSlice();
self.sect_contribs = try sect_contribs.toOwnedSlice();
}
pub fn parseInfoStream(self: *Pdb) !void {

View File

@ -1,6 +1,7 @@
const std = @import("std.zig");
const assert = std.debug.assert;
const testing = std.testing;
const mem = std.mem;
const Allocator = std.mem.Allocator;
// Imagine that `fn at(self: *Self, index: usize) &T` is a customer asking for a box
@ -177,24 +178,32 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
return self.growCapacity(allocator, new_capacity);
}
/// Only grows capacity, or retains current capacity
/// Only grows capacity, or retains current capacity.
pub fn growCapacity(self: *Self, allocator: Allocator, new_capacity: usize) Allocator.Error!void {
const new_cap_shelf_count = shelfCount(new_capacity);
const old_shelf_count = @intCast(ShelfIndex, self.dynamic_segments.len);
if (new_cap_shelf_count > old_shelf_count) {
self.dynamic_segments = try allocator.realloc(self.dynamic_segments, new_cap_shelf_count);
var i = old_shelf_count;
errdefer {
self.freeShelves(allocator, i, old_shelf_count);
self.dynamic_segments = allocator.shrink(self.dynamic_segments, old_shelf_count);
}
while (i < new_cap_shelf_count) : (i += 1) {
self.dynamic_segments[i] = (try allocator.alloc(T, shelfSize(i))).ptr;
}
if (new_cap_shelf_count <= old_shelf_count) return;
const new_dynamic_segments = try allocator.alloc([*]T, new_cap_shelf_count);
errdefer allocator.free(new_dynamic_segments);
var i: ShelfIndex = 0;
while (i < old_shelf_count) : (i += 1) {
new_dynamic_segments[i] = self.dynamic_segments[i];
}
errdefer while (i > old_shelf_count) : (i -= 1) {
allocator.free(new_dynamic_segments[i][0..shelfSize(i)]);
};
while (i < new_cap_shelf_count) : (i += 1) {
new_dynamic_segments[i] = (try allocator.alloc(T, shelfSize(i))).ptr;
}
allocator.free(self.dynamic_segments);
self.dynamic_segments = new_dynamic_segments;
}
/// Only shrinks capacity or retains current capacity
/// Only shrinks capacity or retains current capacity.
/// It may fail to reduce the capacity in which case the capacity will remain unchanged.
pub fn shrinkCapacity(self: *Self, allocator: Allocator, new_capacity: usize) void {
if (new_capacity <= prealloc_item_count) {
const len = @intCast(ShelfIndex, self.dynamic_segments.len);
@ -207,12 +216,24 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
const new_cap_shelf_count = shelfCount(new_capacity);
const old_shelf_count = @intCast(ShelfIndex, self.dynamic_segments.len);
assert(new_cap_shelf_count <= old_shelf_count);
if (new_cap_shelf_count == old_shelf_count) {
return;
}
if (new_cap_shelf_count == old_shelf_count) return;
// freeShelves() must be called before resizing the dynamic
// segments, but we don't know if resizing the dynamic segments
// will work until we try it. So we must allocate a fresh memory
// buffer in order to reduce capacity.
const new_dynamic_segments = allocator.alloc([*]T, new_cap_shelf_count) catch return;
self.freeShelves(allocator, old_shelf_count, new_cap_shelf_count);
self.dynamic_segments = allocator.shrink(self.dynamic_segments, new_cap_shelf_count);
if (allocator.resize(self.dynamic_segments, new_cap_shelf_count)) {
// We didn't need the new memory allocation after all.
self.dynamic_segments = self.dynamic_segments[0..new_cap_shelf_count];
allocator.free(new_dynamic_segments);
} else {
// Good thing we allocated that new memory slice.
mem.copy([*]T, new_dynamic_segments, self.dynamic_segments[0..new_cap_shelf_count]);
allocator.free(self.dynamic_segments);
self.dynamic_segments = new_dynamic_segments;
}
}
pub fn shrink(self: *Self, new_len: usize) void {
@ -227,10 +248,10 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
var i = start;
if (end <= prealloc_item_count) {
std.mem.copy(T, dest[i - start ..], self.prealloc_segment[i..end]);
mem.copy(T, dest[i - start ..], self.prealloc_segment[i..end]);
return;
} else if (i < prealloc_item_count) {
std.mem.copy(T, dest[i - start ..], self.prealloc_segment[i..]);
mem.copy(T, dest[i - start ..], self.prealloc_segment[i..]);
i = prealloc_item_count;
}
@ -239,7 +260,7 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
const copy_start = boxIndex(i, shelf_index);
const copy_end = std.math.min(shelfSize(shelf_index), copy_start + end - i);
std.mem.copy(
mem.copy(
T,
dest[i - start ..],
self.dynamic_segments[shelf_index][copy_start..copy_end],
@ -480,13 +501,13 @@ fn testSegmentedList(comptime prealloc: usize) !void {
control[@intCast(usize, i)] = i + 1;
}
std.mem.set(i32, dest[0..], 0);
mem.set(i32, dest[0..], 0);
list.writeToSlice(dest[0..], 0);
try testing.expect(std.mem.eql(i32, control[0..], dest[0..]));
try testing.expect(mem.eql(i32, control[0..], dest[0..]));
std.mem.set(i32, dest[0..], 0);
mem.set(i32, dest[0..], 0);
list.writeToSlice(dest[50..], 50);
try testing.expect(std.mem.eql(i32, control[50..], dest[50..]));
try testing.expect(mem.eql(i32, control[50..], dest[50..]));
}
try list.setCapacity(testing.allocator, 0);

View File

@ -47,16 +47,23 @@ pub const FailingAllocator = struct {
}
pub fn allocator(self: *FailingAllocator) mem.Allocator {
return mem.Allocator.init(self, alloc, resize, free);
return .{
.ptr = self,
.vtable = &.{
.alloc = alloc,
.resize = resize,
.free = free,
},
};
}
fn alloc(
self: *FailingAllocator,
ctx: *anyopaque,
len: usize,
ptr_align: u29,
len_align: u29,
log2_ptr_align: u8,
return_address: usize,
) error{OutOfMemory}![]u8 {
) ?[*]u8 {
const self = @ptrCast(*FailingAllocator, @alignCast(@alignOf(FailingAllocator), ctx));
if (self.index == self.fail_index) {
if (!self.has_induced_failure) {
mem.set(usize, &self.stack_addresses, 0);
@ -67,39 +74,42 @@ pub const FailingAllocator = struct {
std.debug.captureStackTrace(return_address, &stack_trace);
self.has_induced_failure = true;
}
return error.OutOfMemory;
return null;
}
const result = try self.internal_allocator.rawAlloc(len, ptr_align, len_align, return_address);
self.allocated_bytes += result.len;
const result = self.internal_allocator.rawAlloc(len, log2_ptr_align, return_address) orelse
return null;
self.allocated_bytes += len;
self.allocations += 1;
self.index += 1;
return result;
}
fn resize(
self: *FailingAllocator,
ctx: *anyopaque,
old_mem: []u8,
old_align: u29,
log2_old_align: u8,
new_len: usize,
len_align: u29,
ra: usize,
) ?usize {
const r = self.internal_allocator.rawResize(old_mem, old_align, new_len, len_align, ra) orelse return null;
if (r < old_mem.len) {
self.freed_bytes += old_mem.len - r;
) bool {
const self = @ptrCast(*FailingAllocator, @alignCast(@alignOf(FailingAllocator), ctx));
if (!self.internal_allocator.rawResize(old_mem, log2_old_align, new_len, ra))
return false;
if (new_len < old_mem.len) {
self.freed_bytes += old_mem.len - new_len;
} else {
self.allocated_bytes += r - old_mem.len;
self.allocated_bytes += new_len - old_mem.len;
}
return r;
return true;
}
fn free(
self: *FailingAllocator,
ctx: *anyopaque,
old_mem: []u8,
old_align: u29,
log2_old_align: u8,
ra: usize,
) void {
self.internal_allocator.rawFree(old_mem, old_align, ra);
const self = @ptrCast(*FailingAllocator, @alignCast(@alignOf(FailingAllocator), ctx));
self.internal_allocator.rawFree(old_mem, log2_old_align, ra);
self.deallocations += 1;
self.freed_bytes += old_mem.len;
}

View File

@ -611,12 +611,7 @@ pub fn utf16leToUtf8AllocZ(allocator: mem.Allocator, utf16le: []const u16) ![:0]
assert((utf8Encode(codepoint, result.items[out_index..]) catch unreachable) == utf8_len);
out_index += utf8_len;
}
const len = result.items.len;
try result.append(0);
return result.toOwnedSlice()[0..len :0];
return result.toOwnedSliceSentinel(0);
}
/// Asserts that the output buffer is big enough.
@ -714,9 +709,7 @@ pub fn utf8ToUtf16LeWithNull(allocator: mem.Allocator, utf8: []const u8) ![:0]u1
}
}
const len = result.items.len;
try result.append(0);
return result.toOwnedSlice()[0..len :0];
return result.toOwnedSliceSentinel(0);
}
/// Returns index of next character. If exact fit, returned index equals output slice length.

View File

@ -72,8 +72,8 @@ pub fn parse(gpa: Allocator, source: [:0]const u8) Allocator.Error!Ast {
.source = source,
.tokens = tokens.toOwnedSlice(),
.nodes = parser.nodes.toOwnedSlice(),
.extra_data = parser.extra_data.toOwnedSlice(gpa),
.errors = parser.errors.toOwnedSlice(gpa),
.extra_data = try parser.extra_data.toOwnedSlice(gpa),
.errors = try parser.errors.toOwnedSlice(gpa),
};
}

View File

@ -199,8 +199,8 @@ pub fn generate(gpa: Allocator, tree: Ast) Allocator.Error!Zir {
return Zir{
.instructions = astgen.instructions.toOwnedSlice(),
.string_bytes = astgen.string_bytes.toOwnedSlice(gpa),
.extra = astgen.extra.toOwnedSlice(gpa),
.string_bytes = try astgen.string_bytes.toOwnedSlice(gpa),
.extra = try astgen.extra.toOwnedSlice(gpa),
};
}

View File

@ -146,46 +146,46 @@ pub fn generateZirData(self: *Autodoc) !void {
.c_ulonglong_type,
.c_longdouble_type,
=> .{
.Int = .{ .name = tmpbuf.toOwnedSlice() },
.Int = .{ .name = try tmpbuf.toOwnedSlice() },
},
.f16_type,
.f32_type,
.f64_type,
.f128_type,
=> .{
.Float = .{ .name = tmpbuf.toOwnedSlice() },
.Float = .{ .name = try tmpbuf.toOwnedSlice() },
},
.comptime_int_type => .{
.ComptimeInt = .{ .name = tmpbuf.toOwnedSlice() },
.ComptimeInt = .{ .name = try tmpbuf.toOwnedSlice() },
},
.comptime_float_type => .{
.ComptimeFloat = .{ .name = tmpbuf.toOwnedSlice() },
.ComptimeFloat = .{ .name = try tmpbuf.toOwnedSlice() },
},
.anyopaque_type => .{
.ComptimeExpr = .{ .name = tmpbuf.toOwnedSlice() },
.ComptimeExpr = .{ .name = try tmpbuf.toOwnedSlice() },
},
.bool_type => .{
.Bool = .{ .name = tmpbuf.toOwnedSlice() },
.Bool = .{ .name = try tmpbuf.toOwnedSlice() },
},
.noreturn_type => .{
.NoReturn = .{ .name = tmpbuf.toOwnedSlice() },
.NoReturn = .{ .name = try tmpbuf.toOwnedSlice() },
},
.void_type => .{
.Void = .{ .name = tmpbuf.toOwnedSlice() },
.Void = .{ .name = try tmpbuf.toOwnedSlice() },
},
.type_info_type => .{
.ComptimeExpr = .{ .name = tmpbuf.toOwnedSlice() },
.ComptimeExpr = .{ .name = try tmpbuf.toOwnedSlice() },
},
.type_type => .{
.Type = .{ .name = tmpbuf.toOwnedSlice() },
.Type = .{ .name = try tmpbuf.toOwnedSlice() },
},
.anyerror_type => .{
.ErrorSet = .{ .name = tmpbuf.toOwnedSlice() },
.ErrorSet = .{ .name = try tmpbuf.toOwnedSlice() },
},
.calling_convention_inline, .calling_convention_c, .calling_convention_type => .{
.EnumLiteral = .{ .name = tmpbuf.toOwnedSlice() },
.EnumLiteral = .{ .name = try tmpbuf.toOwnedSlice() },
},
},
);

View File

@ -5052,7 +5052,7 @@ fn parseLldStderr(comp: *Compilation, comptime prefix: []const u8, stderr: []con
while (lines.next()) |line| {
if (mem.startsWith(u8, line, prefix ++ ":")) {
if (current_err) |err| {
err.context_lines = context_lines.toOwnedSlice();
err.context_lines = try context_lines.toOwnedSlice();
}
var split = std.mem.split(u8, line, "error: ");
@ -5078,7 +5078,7 @@ fn parseLldStderr(comp: *Compilation, comptime prefix: []const u8, stderr: []con
}
if (current_err) |err| {
err.context_lines = context_lines.toOwnedSlice();
err.context_lines = try context_lines.toOwnedSlice();
}
}

View File

@ -79,7 +79,7 @@ pub fn analyze(gpa: Allocator, air: Air) Allocator.Error!Liveness {
return Liveness{
.tomb_bits = a.tomb_bits,
.special = a.special,
.extra = a.extra.toOwnedSlice(gpa),
.extra = try a.extra.toOwnedSlice(gpa),
};
}
@ -594,7 +594,7 @@ pub fn getSwitchBr(l: Liveness, gpa: Allocator, inst: Air.Inst.Index, cases_len:
deaths.appendAssumeCapacity(else_deaths);
}
return SwitchBrTable{
.deaths = deaths.toOwnedSlice(),
.deaths = try deaths.toOwnedSlice(),
};
}

View File

@ -53,12 +53,12 @@ local_zir_cache: Compilation.Directory,
/// map of Decl indexes to details about them being exported.
/// The Export memory is owned by the `export_owners` table; the slice itself
/// is owned by this table. The slice is guaranteed to not be empty.
decl_exports: std.AutoArrayHashMapUnmanaged(Decl.Index, []*Export) = .{},
decl_exports: std.AutoArrayHashMapUnmanaged(Decl.Index, ArrayListUnmanaged(*Export)) = .{},
/// This models the Decls that perform exports, so that `decl_exports` can be updated when a Decl
/// is modified. Note that the key of this table is not the Decl being exported, but the Decl that
/// is performing the export of another Decl.
/// This table owns the Export memory.
export_owners: std.AutoArrayHashMapUnmanaged(Decl.Index, []*Export) = .{},
export_owners: std.AutoArrayHashMapUnmanaged(Decl.Index, ArrayListUnmanaged(*Export)) = .{},
/// The set of all the Zig source files in the Module. We keep track of this in order
/// to iterate over it and check which source files have been modified on the file system when
/// an update is requested, as well as to cache `@import` results.
@ -80,7 +80,7 @@ embed_table: std.StringHashMapUnmanaged(*EmbedFile) = .{},
/// This table uses an optional index so that when a Decl is destroyed, the string literal
/// is still reclaimable by a future Decl.
string_literal_table: std.HashMapUnmanaged(StringLiteralContext.Key, Decl.OptionalIndex, StringLiteralContext, std.hash_map.default_max_load_percentage) = .{},
string_literal_bytes: std.ArrayListUnmanaged(u8) = .{},
string_literal_bytes: ArrayListUnmanaged(u8) = .{},
/// The set of all the generic function instantiations. This is used so that when a generic
/// function is called twice with the same comptime parameter arguments, both calls dispatch
@ -163,7 +163,7 @@ test_functions: std.AutoArrayHashMapUnmanaged(Decl.Index, void) = .{},
/// multi-threaded contention on an atomic counter.
allocated_decls: std.SegmentedList(Decl, 0) = .{},
/// When a Decl object is freed from `allocated_decls`, it is pushed into this stack.
decls_free_list: std.ArrayListUnmanaged(Decl.Index) = .{},
decls_free_list: ArrayListUnmanaged(Decl.Index) = .{},
global_assembly: std.AutoHashMapUnmanaged(Decl.Index, []u8) = .{},
@ -173,7 +173,7 @@ reference_table: std.AutoHashMapUnmanaged(Decl.Index, struct {
}) = .{},
pub const StringLiteralContext = struct {
bytes: *std.ArrayListUnmanaged(u8),
bytes: *ArrayListUnmanaged(u8),
pub const Key = struct {
index: u32,
@ -192,7 +192,7 @@ pub const StringLiteralContext = struct {
};
pub const StringLiteralAdapter = struct {
bytes: *std.ArrayListUnmanaged(u8),
bytes: *ArrayListUnmanaged(u8),
pub fn eql(self: @This(), a_slice: []const u8, b: StringLiteralContext.Key) bool {
const b_slice = self.bytes.items[b.index..][0..b.len];
@ -1896,11 +1896,11 @@ pub const File = struct {
/// Used by change detection algorithm, after astgen, contains the
/// set of decls that existed in the previous ZIR but not in the new one.
deleted_decls: std.ArrayListUnmanaged(Decl.Index) = .{},
deleted_decls: ArrayListUnmanaged(Decl.Index) = .{},
/// Used by change detection algorithm, after astgen, contains the
/// set of decls that existed both in the previous ZIR and in the new one,
/// but their source code has been modified.
outdated_decls: std.ArrayListUnmanaged(Decl.Index) = .{},
outdated_decls: ArrayListUnmanaged(Decl.Index) = .{},
/// The most recent successful ZIR for this file, with no errors.
/// This is only populated when a previously successful ZIR
@ -3438,12 +3438,12 @@ pub fn deinit(mod: *Module) void {
mod.compile_log_decls.deinit(gpa);
for (mod.decl_exports.values()) |export_list| {
gpa.free(export_list);
for (mod.decl_exports.values()) |*export_list| {
export_list.deinit(gpa);
}
mod.decl_exports.deinit(gpa);
for (mod.export_owners.values()) |value| {
for (mod.export_owners.values()) |*value| {
freeExportList(gpa, value);
}
mod.export_owners.deinit(gpa);
@ -3533,13 +3533,13 @@ pub fn declIsRoot(mod: *Module, decl_index: Decl.Index) bool {
return decl_index == decl.src_namespace.getDeclIndex();
}
fn freeExportList(gpa: Allocator, export_list: []*Export) void {
for (export_list) |exp| {
fn freeExportList(gpa: Allocator, export_list: *ArrayListUnmanaged(*Export)) void {
for (export_list.items) |exp| {
gpa.free(exp.options.name);
if (exp.options.section) |s| gpa.free(s);
gpa.destroy(exp);
}
gpa.free(export_list);
export_list.deinit(gpa);
}
const data_has_safety_tag = @sizeOf(Zir.Inst.Data) != 8;
@ -3822,7 +3822,7 @@ pub fn astGenFile(mod: *Module, file: *File) !void {
.byte_abs = token_starts[parse_err.token] + extra_offset,
},
},
.msg = msg.toOwnedSlice(),
.msg = try msg.toOwnedSlice(),
};
if (token_tags[parse_err.token + @boolToInt(parse_err.token_is_prev)] == .invalid) {
const bad_off = @intCast(u32, file.tree.tokenSlice(parse_err.token + @boolToInt(parse_err.token_is_prev)).len);
@ -3845,7 +3845,7 @@ pub fn astGenFile(mod: *Module, file: *File) !void {
.parent_decl_node = 0,
.lazy = .{ .token_abs = note.token },
},
.msg = msg.toOwnedSlice(),
.msg = try msg.toOwnedSlice(),
};
}
@ -3981,7 +3981,7 @@ fn updateZirRefs(mod: *Module, file: *File, old_zir: Zir) !void {
// Walk the Decl graph, updating ZIR indexes, strings, and populating
// the deleted and outdated lists.
var decl_stack: std.ArrayListUnmanaged(Decl.Index) = .{};
var decl_stack: ArrayListUnmanaged(Decl.Index) = .{};
defer decl_stack.deinit(gpa);
const root_decl = file.root_decl.unwrap().?;
@ -4146,7 +4146,7 @@ pub fn mapOldZirToNew(
old_inst: Zir.Inst.Index,
new_inst: Zir.Inst.Index,
};
var match_stack: std.ArrayListUnmanaged(MatchedZirDecl) = .{};
var match_stack: ArrayListUnmanaged(MatchedZirDecl) = .{};
defer match_stack.deinit(gpa);
// Main struct inst is always the same
@ -5488,12 +5488,12 @@ pub fn abortAnonDecl(mod: *Module, decl_index: Decl.Index) void {
/// Delete all the Export objects that are caused by this Decl. Re-analysis of
/// this Decl will cause them to be re-created (or not).
fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) void {
const kv = mod.export_owners.fetchSwapRemove(decl_index) orelse return;
var export_owners = (mod.export_owners.fetchSwapRemove(decl_index) orelse return).value;
for (kv.value) |exp| {
for (export_owners.items) |exp| {
if (mod.decl_exports.getPtr(exp.exported_decl)) |value_ptr| {
// Remove exports with owner_decl matching the regenerating decl.
const list = value_ptr.*;
const list = value_ptr.items;
var i: usize = 0;
var new_len = list.len;
while (i < new_len) {
@ -5504,7 +5504,7 @@ fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) void {
i += 1;
}
}
value_ptr.* = mod.gpa.shrink(list, new_len);
value_ptr.shrinkAndFree(mod.gpa, new_len);
if (new_len == 0) {
assert(mod.decl_exports.swapRemove(exp.exported_decl));
}
@ -5527,7 +5527,7 @@ fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) void {
mod.gpa.free(exp.options.name);
mod.gpa.destroy(exp);
}
mod.gpa.free(kv.value);
export_owners.deinit(mod.gpa);
}
pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air {
@ -5745,8 +5745,8 @@ pub fn analyzeFnBody(mod: *Module, func: *Fn, arena: Allocator) SemaError!Air {
return Air{
.instructions = sema.air_instructions.toOwnedSlice(),
.extra = sema.air_extra.toOwnedSlice(gpa),
.values = sema.air_values.toOwnedSlice(gpa),
.extra = try sema.air_extra.toOwnedSlice(gpa),
.values = try sema.air_values.toOwnedSlice(gpa),
};
}
@ -6415,7 +6415,7 @@ pub fn processExports(mod: *Module) !void {
var it = mod.decl_exports.iterator();
while (it.next()) |entry| {
const exported_decl = entry.key_ptr.*;
const exports = entry.value_ptr.*;
const exports = entry.value_ptr.items;
for (exports) |new_export| {
const gop = try symbol_exports.getOrPut(gpa, new_export.options.name);
if (gop.found_existing) {
@ -6695,3 +6695,11 @@ pub fn addGlobalAssembly(mod: *Module, decl_index: Decl.Index, source: []const u
pub fn wantDllExports(mod: Module) bool {
return mod.comp.bin_file.options.dll_export_fns and mod.getTarget().os.tag == .windows;
}
pub fn getDeclExports(mod: Module, decl_index: Decl.Index) []const *Export {
if (mod.decl_exports.get(decl_index)) |l| {
return l.items;
} else {
return &[0]*Export{};
}
}

View File

@ -2244,7 +2244,7 @@ fn failWithOwnedErrorMsg(sema: *Sema, err_msg: *Module.ErrorMsg) CompileError {
.hidden = cur_reference_trace - max_references,
});
}
err_msg.reference_trace = reference_stack.toOwnedSlice();
err_msg.reference_trace = try reference_stack.toOwnedSlice();
}
if (sema.owner_func) |func| {
func.state = .sema_failure;
@ -5500,20 +5500,18 @@ pub fn analyzeExport(
// Add to export_owners table.
const eo_gop = mod.export_owners.getOrPutAssumeCapacity(sema.owner_decl_index);
if (!eo_gop.found_existing) {
eo_gop.value_ptr.* = &[0]*Export{};
eo_gop.value_ptr.* = .{};
}
eo_gop.value_ptr.* = try gpa.realloc(eo_gop.value_ptr.*, eo_gop.value_ptr.len + 1);
eo_gop.value_ptr.*[eo_gop.value_ptr.len - 1] = new_export;
errdefer eo_gop.value_ptr.* = gpa.shrink(eo_gop.value_ptr.*, eo_gop.value_ptr.len - 1);
try eo_gop.value_ptr.append(gpa, new_export);
errdefer _ = eo_gop.value_ptr.pop();
// Add to exported_decl table.
const de_gop = mod.decl_exports.getOrPutAssumeCapacity(exported_decl_index);
if (!de_gop.found_existing) {
de_gop.value_ptr.* = &[0]*Export{};
de_gop.value_ptr.* = .{};
}
de_gop.value_ptr.* = try gpa.realloc(de_gop.value_ptr.*, de_gop.value_ptr.len + 1);
de_gop.value_ptr.*[de_gop.value_ptr.len - 1] = new_export;
errdefer de_gop.value_ptr.* = gpa.shrink(de_gop.value_ptr.*, de_gop.value_ptr.len - 1);
try de_gop.value_ptr.append(gpa, new_export);
errdefer _ = de_gop.value_ptr.pop();
}
fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void {
@ -10762,7 +10760,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
.payload = undefined,
},
} });
var cond_body = case_block.instructions.toOwnedSlice(gpa);
var cond_body = try case_block.instructions.toOwnedSlice(gpa);
defer gpa.free(cond_body);
var wip_captures = try WipCaptureScope.init(gpa, sema.perm_arena, child_block.wip_capture_scope);
@ -10800,7 +10798,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
sema.air_extra.appendSliceAssumeCapacity(cond_body);
}
gpa.free(prev_then_body);
prev_then_body = case_block.instructions.toOwnedSlice(gpa);
prev_then_body = try case_block.instructions.toOwnedSlice(gpa);
prev_cond_br = new_cond_br;
}
}
@ -16318,7 +16316,7 @@ fn zirCondbr(
defer sub_block.instructions.deinit(gpa);
try sema.analyzeBodyRuntimeBreak(&sub_block, then_body);
const true_instructions = sub_block.instructions.toOwnedSlice(gpa);
const true_instructions = try sub_block.instructions.toOwnedSlice(gpa);
defer gpa.free(true_instructions);
const err_cond = blk: {

View File

@ -531,7 +531,7 @@ pub fn generate(
var mir = Mir{
.instructions = function.mir_instructions.toOwnedSlice(),
.extra = function.mir_extra.toOwnedSlice(bin_file.allocator),
.extra = try function.mir_extra.toOwnedSlice(bin_file.allocator),
};
defer mir.deinit(bin_file.allocator);

View File

@ -328,7 +328,7 @@ pub fn generate(
var mir = Mir{
.instructions = function.mir_instructions.toOwnedSlice(),
.extra = function.mir_extra.toOwnedSlice(bin_file.allocator),
.extra = try function.mir_extra.toOwnedSlice(bin_file.allocator),
};
defer mir.deinit(bin_file.allocator);

View File

@ -291,7 +291,7 @@ pub fn generate(
var mir = Mir{
.instructions = function.mir_instructions.toOwnedSlice(),
.extra = function.mir_extra.toOwnedSlice(bin_file.allocator),
.extra = try function.mir_extra.toOwnedSlice(bin_file.allocator),
};
defer mir.deinit(bin_file.allocator);

View File

@ -330,7 +330,7 @@ pub fn generate(
var mir = Mir{
.instructions = function.mir_instructions.toOwnedSlice(),
.extra = function.mir_extra.toOwnedSlice(bin_file.allocator),
.extra = try function.mir_extra.toOwnedSlice(bin_file.allocator),
};
defer mir.deinit(bin_file.allocator);

View File

@ -1064,8 +1064,8 @@ fn genFunctype(gpa: Allocator, cc: std.builtin.CallingConvention, params: []cons
}
return wasm.Type{
.params = temp_params.toOwnedSlice(),
.returns = returns.toOwnedSlice(),
.params = try temp_params.toOwnedSlice(),
.returns = try returns.toOwnedSlice(),
};
}
@ -1176,7 +1176,7 @@ fn genFunc(func: *CodeGen) InnerError!void {
var mir: Mir = .{
.instructions = func.mir_instructions.toOwnedSlice(),
.extra = func.mir_extra.toOwnedSlice(func.gpa),
.extra = try func.mir_extra.toOwnedSlice(func.gpa),
};
defer mir.deinit(func.gpa);
@ -1258,7 +1258,7 @@ fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWV
},
else => return func.fail("calling convention '{s}' not supported for Wasm", .{@tagName(cc)}),
}
result.args = args.toOwnedSlice();
result.args = try args.toOwnedSlice();
return result;
}

View File

@ -331,7 +331,7 @@ pub fn generate(
var mir = Mir{
.instructions = function.mir_instructions.toOwnedSlice(),
.extra = function.mir_extra.toOwnedSlice(bin_file.allocator),
.extra = try function.mir_extra.toOwnedSlice(bin_file.allocator),
};
defer mir.deinit(bin_file.allocator);

View File

@ -1286,7 +1286,7 @@ pub const DeclGen = struct {
}
try bw.writeAll(");\n");
const rendered = buffer.toOwnedSlice();
const rendered = try buffer.toOwnedSlice();
errdefer dg.typedefs.allocator.free(rendered);
const name = rendered[name_begin..name_end];
@ -1326,7 +1326,7 @@ pub const DeclGen = struct {
const name_end = buffer.items.len;
try bw.writeAll(";\n");
const rendered = buffer.toOwnedSlice();
const rendered = try buffer.toOwnedSlice();
errdefer dg.typedefs.allocator.free(rendered);
const name = rendered[name_begin..name_end];
@ -1369,7 +1369,7 @@ pub const DeclGen = struct {
buffer.appendSliceAssumeCapacity(buffer.items[name_begin..name_end]);
buffer.appendSliceAssumeCapacity(";\n");
const rendered = buffer.toOwnedSlice();
const rendered = try buffer.toOwnedSlice();
errdefer dg.typedefs.allocator.free(rendered);
const name = rendered[name_begin..name_end];
@ -1413,7 +1413,7 @@ pub const DeclGen = struct {
}
try buffer.appendSlice("};\n");
const rendered = buffer.toOwnedSlice();
const rendered = try buffer.toOwnedSlice();
errdefer dg.typedefs.allocator.free(rendered);
try dg.typedefs.ensureUnusedCapacity(1);
@ -1448,7 +1448,7 @@ pub const DeclGen = struct {
try buffer.writer().print("}} zig_T_{};\n", .{typeToCIdentifier(t, dg.module)});
const name_end = buffer.items.len - ";\n".len;
const rendered = buffer.toOwnedSlice();
const rendered = try buffer.toOwnedSlice();
errdefer dg.typedefs.allocator.free(rendered);
const name = rendered[name_begin..name_end];
@ -1510,7 +1510,7 @@ pub const DeclGen = struct {
if (t.unionTagTypeSafety()) |_| try buffer.appendSlice(" } payload;\n");
try buffer.appendSlice("};\n");
const rendered = buffer.toOwnedSlice();
const rendered = try buffer.toOwnedSlice();
errdefer dg.typedefs.allocator.free(rendered);
try dg.typedefs.ensureUnusedCapacity(1);
@ -1553,7 +1553,7 @@ pub const DeclGen = struct {
const name_end = buffer.items.len;
try bw.writeAll(";\n");
const rendered = buffer.toOwnedSlice();
const rendered = try buffer.toOwnedSlice();
errdefer dg.typedefs.allocator.free(rendered);
const name = rendered[name_begin..name_end];
@ -1586,7 +1586,7 @@ pub const DeclGen = struct {
const c_len_val = Value.initPayload(&c_len_pl.base);
try bw.print("[{}];\n", .{try dg.fmtIntLiteral(Type.usize, c_len_val)});
const rendered = buffer.toOwnedSlice();
const rendered = try buffer.toOwnedSlice();
errdefer dg.typedefs.allocator.free(rendered);
const name = rendered[name_begin..name_end];
@ -1614,7 +1614,7 @@ pub const DeclGen = struct {
const name_end = buffer.items.len;
try bw.writeAll(";\n");
const rendered = buffer.toOwnedSlice();
const rendered = try buffer.toOwnedSlice();
errdefer dg.typedefs.allocator.free(rendered);
const name = rendered[name_begin..name_end];
@ -1643,7 +1643,7 @@ pub const DeclGen = struct {
const name_end = buffer.items.len;
try buffer.appendSlice(";\n");
const rendered = buffer.toOwnedSlice();
const rendered = try buffer.toOwnedSlice();
errdefer dg.typedefs.allocator.free(rendered);
const name = rendered[name_begin..name_end];
@ -2006,7 +2006,7 @@ pub const DeclGen = struct {
_ = try airBreakpoint(bw);
try buffer.appendSlice("}\n");
const rendered = buffer.toOwnedSlice();
const rendered = try buffer.toOwnedSlice();
errdefer dg.typedefs.allocator.free(rendered);
const name = rendered[name_begin..name_end];
@ -2108,7 +2108,7 @@ pub const DeclGen = struct {
dg.module.markDeclAlive(decl);
if (dg.module.decl_exports.get(decl_index)) |exports| {
return writer.writeAll(exports[0].options.name);
return writer.writeAll(exports.items[0].options.name);
} else if (decl.isExtern()) {
return writer.writeAll(mem.sliceTo(decl.name, 0));
} else {

View File

@ -693,7 +693,7 @@ pub const Object = struct {
for (mod.decl_exports.values()) |export_list, i| {
const decl_index = export_keys[i];
const llvm_global = object.decl_map.get(decl_index) orelse continue;
for (export_list) |exp| {
for (export_list.items) |exp| {
// Detect if the LLVM global has already been created as an extern. In such
// case, we need to replace all uses of it with this exported global.
// TODO update std.builtin.ExportOptions to have the name be a
@ -1215,8 +1215,7 @@ pub const Object = struct {
else => |e| return e,
};
const decl_exports = module.decl_exports.get(decl_index) orelse &[0]*Module.Export{};
try o.updateDeclExports(module, decl_index, decl_exports);
try o.updateDeclExports(module, decl_index, module.getDeclExports(decl_index));
}
pub fn updateDecl(self: *Object, module: *Module, decl_index: Module.Decl.Index) !void {
@ -1239,8 +1238,7 @@ pub const Object = struct {
},
else => |e| return e,
};
const decl_exports = module.decl_exports.get(decl_index) orelse &[0]*Module.Export{};
try self.updateDeclExports(module, decl_index, decl_exports);
try self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index));
}
/// TODO replace this with a call to `Module::getNamedValue`. This will require adding

View File

@ -387,7 +387,7 @@ pub const LibCInstallation = struct {
else => return error.FileSystem,
};
self.include_dir = result_buf.toOwnedSlice();
self.include_dir = try result_buf.toOwnedSlice();
return;
}
@ -434,7 +434,7 @@ pub const LibCInstallation = struct {
else => return error.FileSystem,
};
self.crt_dir = result_buf.toOwnedSlice();
self.crt_dir = try result_buf.toOwnedSlice();
return;
}
return error.LibCRuntimeNotFound;
@ -499,7 +499,7 @@ pub const LibCInstallation = struct {
else => return error.FileSystem,
};
self.kernel32_lib_dir = result_buf.toOwnedSlice();
self.kernel32_lib_dir = try result_buf.toOwnedSlice();
return;
}
return error.LibCKernel32LibNotFound;

View File

@ -938,9 +938,9 @@ pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, live
try self.updateDeclCode(decl_index, code, .FUNCTION);
// Since we updated the vaddr and the size, each corresponding export symbol also needs to be updated.
const decl_exports = module.decl_exports.get(decl_index) orelse &[0]*Module.Export{};
return self.updateDeclExports(module, decl_index, decl_exports);
// Since we updated the vaddr and the size, each corresponding export
// symbol also needs to be updated.
return self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index));
}
pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.Index) !u32 {
@ -1053,9 +1053,9 @@ pub fn updateDecl(self: *Coff, module: *Module, decl_index: Module.Decl.Index) !
try self.updateDeclCode(decl_index, code, .NULL);
// Since we updated the vaddr and the size, each corresponding export symbol also needs to be updated.
const decl_exports = module.decl_exports.get(decl_index) orelse &[0]*Module.Export{};
return self.updateDeclExports(module, decl_index, decl_exports);
// Since we updated the vaddr and the size, each corresponding export
// symbol also needs to be updated.
return self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index));
}
fn getDeclOutputSection(self: *Coff, decl: *Module.Decl) u16 {

View File

@ -2450,9 +2450,9 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven
);
}
// Since we updated the vaddr and the size, each corresponding export symbol also needs to be updated.
const decl_exports = module.decl_exports.get(decl_index) orelse &[0]*Module.Export{};
return self.updateDeclExports(module, decl_index, decl_exports);
// Since we updated the vaddr and the size, each corresponding export
// symbol also needs to be updated.
return self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index));
}
pub fn updateDecl(self: *Elf, module: *Module, decl_index: Module.Decl.Index) !void {
@ -2527,9 +2527,9 @@ pub fn updateDecl(self: *Elf, module: *Module, decl_index: Module.Decl.Index) !v
);
}
// Since we updated the vaddr and the size, each corresponding export symbol also needs to be updated.
const decl_exports = module.decl_exports.get(decl_index) orelse &[0]*Module.Export{};
return self.updateDeclExports(module, decl_index, decl_exports);
// Since we updated the vaddr and the size, each corresponding export
// symbol also needs to be updated.
return self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index));
}
pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module.Decl.Index) !u32 {

View File

@ -2225,8 +2225,7 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv
// Since we updated the vaddr and the size, each corresponding export symbol also
// needs to be updated.
const decl_exports = module.decl_exports.get(decl_index) orelse &[0]*Module.Export{};
try self.updateDeclExports(module, decl_index, decl_exports);
try self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index));
}
pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl_index: Module.Decl.Index) !u32 {
@ -2377,8 +2376,7 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index)
// Since we updated the vaddr and the size, each corresponding export symbol also
// needs to be updated.
const decl_exports = module.decl_exports.get(decl_index) orelse &[0]*Module.Export{};
try self.updateDeclExports(module, decl_index, decl_exports);
try self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index));
}
fn getDeclOutputSection(self: *MachO, decl: *Module.Decl) u8 {

View File

@ -165,7 +165,7 @@ pub const Node = struct {
break;
try label_buf.append(next);
}
break :blk label_buf.toOwnedSlice();
break :blk try label_buf.toOwnedSlice();
};
const seek_to = try leb.readULEB128(u64, reader);

View File

@ -230,7 +230,7 @@ fn putFn(self: *Plan9, decl_index: Module.Decl.Index, out: FnDeclOutput) !void {
// null terminate
try a.append(0);
const final = a.toOwnedSlice();
const final = try a.toOwnedSlice();
self.syms.items[fn_map_res.value_ptr.sym_index - 1] = .{
.type = .z,
.value = 1,
@ -296,7 +296,7 @@ pub fn updateFunc(self: *Plan9, module: *Module, func: *Module.Fn, air: Air, liv
},
);
const code = switch (res) {
.appended => code_buffer.toOwnedSlice(),
.appended => try code_buffer.toOwnedSlice(),
.fail => |em| {
decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl_index, em);
@ -305,7 +305,7 @@ pub fn updateFunc(self: *Plan9, module: *Module, func: *Module.Fn, air: Air, liv
};
const out: FnDeclOutput = .{
.code = code,
.lineinfo = dbg_line_buffer.toOwnedSlice(),
.lineinfo = try dbg_line_buffer.toOwnedSlice(),
.start_line = start_line.?,
.end_line = end_line,
};
@ -574,7 +574,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
}
self.syms.items[decl.link.plan9.sym_index.?].value = off;
if (mod.decl_exports.get(decl_index)) |exports| {
try self.addDeclExports(mod, decl, exports);
try self.addDeclExports(mod, decl, exports.items);
}
}
}
@ -611,7 +611,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
}
self.syms.items[decl.link.plan9.sym_index.?].value = off;
if (mod.decl_exports.get(decl_index)) |exports| {
try self.addDeclExports(mod, decl, exports);
try self.addDeclExports(mod, decl, exports.items);
}
}
// write the unnamed constants after the other data decls
@ -641,7 +641,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
self.syms.items[1].value = self.getAddr(0x0, .b);
var sym_buf = std.ArrayList(u8).init(self.base.allocator);
try self.writeSyms(&sym_buf);
const syms = sym_buf.toOwnedSlice();
const syms = try sym_buf.toOwnedSlice();
defer self.base.allocator.free(syms);
assert(2 + self.atomCount() == iovecs_i); // we didn't write all the decls
iovecs[iovecs_i] = .{ .iov_base = syms.ptr, .iov_len = syms.len };
@ -914,7 +914,7 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void {
const sym = self.syms.items[decl.link.plan9.sym_index.?];
try self.writeSym(writer, sym);
if (self.base.options.module.?.decl_exports.get(decl_index)) |exports| {
for (exports) |e| {
for (exports.items) |e| {
try self.writeSym(writer, self.syms.items[e.link.plan9.?]);
}
}
@ -939,7 +939,7 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void {
const sym = self.syms.items[decl.link.plan9.sym_index.?];
try self.writeSym(writer, sym);
if (self.base.options.module.?.decl_exports.get(decl_index)) |exports| {
for (exports) |e| {
for (exports.items) |e| {
const s = self.syms.items[e.link.plan9.?];
if (mem.eql(u8, s.name, "_start"))
self.entry_val = s.value;

View File

@ -3206,7 +3206,7 @@ fn linkWithLLD(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) !
const skip_export_non_fn = target.os.tag == .wasi and
wasm.base.options.wasi_exec_model == .command;
for (mod.decl_exports.values()) |exports| {
for (exports) |exprt| {
for (exports.items) |exprt| {
const exported_decl = mod.declPtr(exprt.exported_decl);
if (skip_export_non_fn and exported_decl.ty.zigTypeTag() != .Fn) {
// skip exporting symbols when we're building a WASI command

View File

@ -557,7 +557,7 @@ fn Parser(comptime ReaderType: type) type {
error.EndOfStream => {}, // finished parsing the file
else => |e| return e,
}
parser.object.relocatable_data = relocatable_data.toOwnedSlice();
parser.object.relocatable_data = try relocatable_data.toOwnedSlice();
}
/// Based on the "features" custom section, parses it into a list of
@ -742,7 +742,7 @@ fn Parser(comptime ReaderType: type) type {
log.debug("Found legacy indirect function table. Created symbol", .{});
}
parser.object.symtable = symbols.toOwnedSlice();
parser.object.symtable = try symbols.toOwnedSlice();
},
}
}

View File

@ -262,7 +262,7 @@ pub const Tree = struct {
}
self.source = source;
self.tokens = tokens.toOwnedSlice();
self.tokens = try tokens.toOwnedSlice();
var it = TokenIterator{ .buffer = self.tokens };
var parser = Parser{

View File

@ -193,7 +193,7 @@ pub const Value = union(ValueType) {
}
}
return Value{ .list = out_list.toOwnedSlice() };
return Value{ .list = try out_list.toOwnedSlice() };
} else if (node.cast(Node.Value)) |value| {
const start = tree.tokens[value.start.?];
const end = tree.tokens[value.end.?];

View File

@ -4803,7 +4803,7 @@ pub const ClangArgIterator = struct {
};
self.root_args = args;
}
const resp_arg_slice = resp_arg_list.toOwnedSlice();
const resp_arg_slice = try resp_arg_list.toOwnedSlice();
self.next_index = 0;
self.argv = resp_arg_slice;

View File

@ -338,7 +338,7 @@ const TestManifest = struct {
while (try it.next()) |item| {
try out.append(item);
}
return out.toOwnedSlice();
return try out.toOwnedSlice();
}
fn getConfigForKeyAssertSingle(self: TestManifest, key: []const u8, comptime T: type) !T {
@ -361,7 +361,7 @@ const TestManifest = struct {
while (it.next()) |line| {
try out.append(line);
}
return out.toOwnedSlice();
return try out.toOwnedSlice();
}
fn ParseFn(comptime T: type) type {
@ -1179,7 +1179,7 @@ pub const TestContext = struct {
if (output.items.len > 0) {
try output.resize(output.items.len - 1);
}
case.addCompareOutput(src, output.toOwnedSlice());
case.addCompareOutput(src, try output.toOwnedSlice());
},
.cli => @panic("TODO cli tests"),
}

View File

@ -788,7 +788,7 @@ pub fn render(gpa: Allocator, zig_is_stage1: bool, nodes: []const Node) !std.zig
.source = try ctx.buf.toOwnedSliceSentinel(0),
.tokens = ctx.tokens.toOwnedSlice(),
.nodes = ctx.nodes.toOwnedSlice(),
.extra_data = ctx.extra_data.toOwnedSlice(gpa),
.extra_data = try ctx.extra_data.toOwnedSlice(gpa),
.errors = &.{},
};
}

View File

@ -47,9 +47,9 @@ pub export fn entry() void {
// :11:22: error: comparison of 'void' with null
// :25:51: error: values of type 'anyopaque' must be comptime-known, but operand value is runtime-known
// :25:51: note: opaque type 'anyopaque' has undefined size
// :25:51: error: values of type 'fn(*anyopaque, usize, u29, u29, usize) error{OutOfMemory}![]u8' must be comptime-known, but operand value is runtime-known
// :25:51: note: use '*const fn(*anyopaque, usize, u29, u29, usize) error{OutOfMemory}![]u8' for a function pointer type
// :25:51: error: values of type 'fn(*anyopaque, []u8, u29, usize, u29, usize) ?usize' must be comptime-known, but operand value is runtime-known
// :25:51: note: use '*const fn(*anyopaque, []u8, u29, usize, u29, usize) ?usize' for a function pointer type
// :25:51: error: values of type 'fn(*anyopaque, []u8, u29, usize) void' must be comptime-known, but operand value is runtime-known
// :25:51: note: use '*const fn(*anyopaque, []u8, u29, usize) void' for a function pointer type
// :25:51: error: values of type 'fn(*anyopaque, usize, u8, usize) ?[*]u8' must be comptime-known, but operand value is runtime-known
// :25:51: note: use '*const fn(*anyopaque, usize, u8, usize) ?[*]u8' for a function pointer type
// :25:51: error: values of type 'fn(*anyopaque, []u8, u8, usize, usize) bool' must be comptime-known, but operand value is runtime-known
// :25:51: note: use '*const fn(*anyopaque, []u8, u8, usize, usize) bool' for a function pointer type
// :25:51: error: values of type 'fn(*anyopaque, []u8, u8, usize) void' must be comptime-known, but operand value is runtime-known
// :25:51: note: use '*const fn(*anyopaque, []u8, u8, usize) void' for a function pointer type

View File

@ -504,9 +504,10 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
\\ const allocator = logging_allocator.allocator();
\\
\\ var a = try allocator.alloc(u8, 10);
\\ a = allocator.shrink(a, 5);
\\ try std.testing.expect(allocator.resize(a, 5));
\\ a = a[0..5];
\\ try std.testing.expect(a.len == 5);
\\ try std.testing.expect(allocator.resize(a, 20) == null);
\\ try std.testing.expect(!allocator.resize(a, 20));
\\ allocator.free(a);
\\}
\\
@ -522,9 +523,9 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
\\ nosuspend stdout.print(level_txt ++ prefix2 ++ format ++ "\n", args) catch return;
\\}
,
\\debug: alloc - success - len: 10, ptr_align: 1, len_align: 0
\\debug: shrink - success - 10 to 5, len_align: 0, buf_align: 1
\\error: expand - failure - 5 to 20, len_align: 0, buf_align: 1
\\debug: alloc - success - len: 10, ptr_align: 0
\\debug: shrink - success - 10 to 5, buf_align: 0
\\error: expand - failure - 5 to 20, buf_align: 0
\\debug: free - len: 5
\\
);

View File

@ -992,7 +992,7 @@ pub const StackTracesContext = struct {
}
try buf.appendSlice("\n");
}
break :got_result buf.toOwnedSlice();
break :got_result try buf.toOwnedSlice();
};
if (!mem.eql(u8, self.expect_output, got)) {