zig/lib/std/heap/arena_allocator.zig
Andrew Kelley a32d3a85d2 rework self-hosted compiler for incremental builds
* introduce std.ArrayListUnmanaged for when you have the allocator
   stored elsewhere
 * move std.heap.ArenaAllocator implementation to its own file. extract
   the main state into std.heap.ArenaAllocator.State, which can be
   stored as an alternative to storing the entire ArenaAllocator, saving
   24 bytes per ArenaAllocator on 64 bit targets.
 * std.LinkedList.Node pointer field now defaults to being null
   initialized.
 * Rework self-hosted compiler Package API
 * Delete almost all the bitrotted self-hosted compiler code. The only bit
   rotted code left is in main.zig and compilation.zig
 * Add call instruction to ZIR
 * self-hosted compiler ir API and link API are reworked to support
   a long-running compiler that incrementally updates declarations
 * Introduce the concept of scopes to ZIR semantic analysis
 * ZIR text format supports referencing named decls that are declared
   later in the file
 * Figure out how memory management works for the long-running compiler
   and incremental compilation. The main roots are top level
   declarations. There is a table of decls. The key is a cryptographic
   hash of the fully qualified decl name. Each decl has an arena
   allocator where all of the memory related to that decl is stored.
   Each code block has its own arena allocator for the lifetime of
   the block. Values that want to survive when going out of scope in
   a block must get copied into the outer block. Finally, values must
   get copied into the Decl arena to be long-lived.
 * Delete the unused MemoryCell struct. Instead, comptime pointers are
   based on references to Decl structs.
 * Figure out how caching works. Each Decl will store a set of other
   Decls which must be recompiled when it changes.

This branch is still work-in-progress; this commit breaks the build.
2020-05-10 02:05:54 -04:00

103 lines
3.9 KiB
Zig

const std = @import("../std.zig");
const assert = std.debug.assert;
const mem = std.mem;
const Allocator = std.mem.Allocator;
/// This allocator takes an existing allocator, wraps it, and provides an interface
/// where you can allocate without freeing, and then free it all together.
pub const ArenaAllocator = struct {
allocator: Allocator,
child_allocator: *Allocator,
state: State,
/// Inner state of ArenaAllocator. Can be stored rather than the entire ArenaAllocator
/// as a memory-saving optimization.
pub const State = struct {
buffer_list: std.SinglyLinkedList([]u8) = @as(std.SinglyLinkedList([]u8), .{}),
end_index: usize = 0,
pub fn promote(self: State, child_allocator: *Allocator) ArenaAllocator {
return .{
.allocator = Allocator{
.reallocFn = realloc,
.shrinkFn = shrink,
},
.child_allocator = child_allocator,
.state = self,
};
}
};
const BufNode = std.SinglyLinkedList([]u8).Node;
pub fn init(child_allocator: *Allocator) ArenaAllocator {
return (State{}).promote(child_allocator);
}
pub fn deinit(self: ArenaAllocator) void {
var it = self.state.buffer_list.first;
while (it) |node| {
// this has to occur before the free because the free frees node
const next_it = node.next;
self.child_allocator.free(node.data);
it = next_it;
}
}
fn createNode(self: *ArenaAllocator, prev_len: usize, minimum_size: usize) !*BufNode {
const actual_min_size = minimum_size + @sizeOf(BufNode);
var len = prev_len;
while (true) {
len += len / 2;
len += mem.page_size - @rem(len, mem.page_size);
if (len >= actual_min_size) break;
}
const buf = try self.child_allocator.alignedAlloc(u8, @alignOf(BufNode), len);
const buf_node_slice = mem.bytesAsSlice(BufNode, buf[0..@sizeOf(BufNode)]);
const buf_node = &buf_node_slice[0];
buf_node.* = BufNode{
.data = buf,
.next = null,
};
self.state.buffer_list.prepend(buf_node);
self.state.end_index = 0;
return buf_node;
}
fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(ArenaAllocator, "allocator", allocator);
var cur_node = if (self.state.buffer_list.first) |first_node| first_node else try self.createNode(0, n + alignment);
while (true) {
const cur_buf = cur_node.data[@sizeOf(BufNode)..];
const addr = @ptrToInt(cur_buf.ptr) + self.state.end_index;
const adjusted_addr = mem.alignForward(addr, alignment);
const adjusted_index = self.state.end_index + (adjusted_addr - addr);
const new_end_index = adjusted_index + n;
if (new_end_index > cur_buf.len) {
cur_node = try self.createNode(cur_buf.len, n + alignment);
continue;
}
const result = cur_buf[adjusted_index..new_end_index];
self.state.end_index = new_end_index;
return result;
}
}
fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
if (new_size <= old_mem.len and new_align <= new_size) {
// We can't do anything with the memory, so tell the client to keep it.
return error.OutOfMemory;
} else {
const result = try alloc(allocator, new_size, new_align);
@memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len));
return result;
}
}
fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
return old_mem[0..new_size];
}
};