mirror of
https://github.com/ziglang/zig.git
synced 2024-11-26 23:22:44 +00:00
breaking changes to std.mem.Allocator interface API
Before, allocator implementations had to provide `allocFn`, `reallocFn`, and `freeFn`. Now, they must provide only `reallocFn` and `shrinkFn`. Reallocating from a zero length slice is allocation, and shrinking to a zero length slice is freeing. When the new memory size is less than or equal to the previous allocation size, `reallocFn` now has the option to return `error.OutOfMemory` to indicate that the allocator would not be able to take advantage of the new size. For more details see #1306. This commit closes #1306. This commit paves the way to solving #2009. This commit also introduces a memory leak to all coroutines. There is an issue where a coroutine calls the function and it frees its own stack frame, but then the return value of `shrinkFn` is a slice, which is implemented as an sret struct. Writing to the return pointer causes invalid memory write. We could work around it by having a global helper function which has a void return type and calling that instead. But instead this hack will suffice until I rework coroutines to be non-allocating. Basically coroutines are not supported right now until they are reworked as in #1194.
This commit is contained in:
parent
4090fe81f6
commit
9c13e9b7ed
@ -1364,7 +1364,7 @@ pub const Builder = struct {
|
||||
|
||||
if (str_token[0] == 'c') {
|
||||
// first we add a null
|
||||
buf = try irb.comp.gpa().realloc(u8, buf, buf.len + 1);
|
||||
buf = try irb.comp.gpa().realloc(buf, buf.len + 1);
|
||||
buf[buf.len - 1] = 0;
|
||||
|
||||
// next make an array value
|
||||
|
@ -3356,7 +3356,7 @@ struct IrInstructionCoroPromise {
|
||||
struct IrInstructionCoroAllocHelper {
|
||||
IrInstruction base;
|
||||
|
||||
IrInstruction *alloc_fn;
|
||||
IrInstruction *realloc_fn;
|
||||
IrInstruction *coro_size;
|
||||
};
|
||||
|
||||
@ -3481,8 +3481,8 @@ static const size_t stack_trace_ptr_count = 32;
|
||||
#define RETURN_ADDRESSES_FIELD_NAME "return_addresses"
|
||||
#define ERR_RET_TRACE_FIELD_NAME "err_ret_trace"
|
||||
#define RESULT_FIELD_NAME "result"
|
||||
#define ASYNC_ALLOC_FIELD_NAME "allocFn"
|
||||
#define ASYNC_FREE_FIELD_NAME "freeFn"
|
||||
#define ASYNC_REALLOC_FIELD_NAME "reallocFn"
|
||||
#define ASYNC_SHRINK_FIELD_NAME "shrinkFn"
|
||||
#define ATOMIC_STATE_FIELD_NAME "atomic_state"
|
||||
// these point to data belonging to the awaiter
|
||||
#define ERR_RET_TRACE_PTR_FIELD_NAME "err_ret_trace_ptr"
|
||||
|
@ -3707,9 +3707,11 @@ ZigVar *add_variable(CodeGen *g, AstNode *source_node, Scope *parent_scope, Buf
|
||||
|
||||
ZigVar *existing_var = find_variable(g, parent_scope, name, nullptr);
|
||||
if (existing_var && !existing_var->shadowable) {
|
||||
ErrorMsg *msg = add_node_error(g, source_node,
|
||||
buf_sprintf("redeclaration of variable '%s'", buf_ptr(name)));
|
||||
add_error_note(g, msg, existing_var->decl_node, buf_sprintf("previous declaration is here"));
|
||||
if (existing_var->var_type == nullptr || !type_is_invalid(existing_var->var_type)) {
|
||||
ErrorMsg *msg = add_node_error(g, source_node,
|
||||
buf_sprintf("redeclaration of variable '%s'", buf_ptr(name)));
|
||||
add_error_note(g, msg, existing_var->decl_node, buf_sprintf("previous declaration is here"));
|
||||
}
|
||||
variable_entry->var_type = g->builtin_types.entry_invalid;
|
||||
} else {
|
||||
ZigType *type;
|
||||
|
@ -5177,7 +5177,7 @@ static LLVMValueRef get_coro_alloc_helper_fn_val(CodeGen *g, LLVMTypeRef alloc_f
|
||||
LLVMValueRef sret_ptr = LLVMBuildAlloca(g->builder, LLVMGetElementType(alloc_fn_arg_types[0]), "");
|
||||
|
||||
size_t next_arg = 0;
|
||||
LLVMValueRef alloc_fn_val = LLVMGetParam(fn_val, next_arg);
|
||||
LLVMValueRef realloc_fn_val = LLVMGetParam(fn_val, next_arg);
|
||||
next_arg += 1;
|
||||
|
||||
LLVMValueRef stack_trace_val;
|
||||
@ -5195,15 +5195,22 @@ static LLVMValueRef get_coro_alloc_helper_fn_val(CodeGen *g, LLVMTypeRef alloc_f
|
||||
LLVMValueRef alignment_val = LLVMConstInt(g->builtin_types.entry_u29->type_ref,
|
||||
get_coro_frame_align_bytes(g), false);
|
||||
|
||||
ConstExprValue *zero_array = create_const_str_lit(g, buf_create_from_str(""));
|
||||
ConstExprValue *undef_slice_zero = create_const_slice(g, zero_array, 0, 0, false);
|
||||
render_const_val(g, undef_slice_zero, "");
|
||||
render_const_val_global(g, undef_slice_zero, "");
|
||||
|
||||
ZigList<LLVMValueRef> args = {};
|
||||
args.append(sret_ptr);
|
||||
if (g->have_err_ret_tracing) {
|
||||
args.append(stack_trace_val);
|
||||
}
|
||||
args.append(allocator_val);
|
||||
args.append(undef_slice_zero->global_refs->llvm_global);
|
||||
args.append(LLVMGetUndef(g->builtin_types.entry_u29->type_ref));
|
||||
args.append(coro_size);
|
||||
args.append(alignment_val);
|
||||
LLVMValueRef call_instruction = ZigLLVMBuildCall(g->builder, alloc_fn_val, args.items, args.length,
|
||||
LLVMValueRef call_instruction = ZigLLVMBuildCall(g->builder, realloc_fn_val, args.items, args.length,
|
||||
get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, "");
|
||||
set_call_instr_sret(g, call_instruction);
|
||||
LLVMValueRef err_val_ptr = LLVMBuildStructGEP(g->builder, sret_ptr, err_union_err_index, "");
|
||||
@ -5239,14 +5246,14 @@ static LLVMValueRef get_coro_alloc_helper_fn_val(CodeGen *g, LLVMTypeRef alloc_f
|
||||
static LLVMValueRef ir_render_coro_alloc_helper(CodeGen *g, IrExecutable *executable,
|
||||
IrInstructionCoroAllocHelper *instruction)
|
||||
{
|
||||
LLVMValueRef alloc_fn = ir_llvm_value(g, instruction->alloc_fn);
|
||||
LLVMValueRef realloc_fn = ir_llvm_value(g, instruction->realloc_fn);
|
||||
LLVMValueRef coro_size = ir_llvm_value(g, instruction->coro_size);
|
||||
LLVMValueRef fn_val = get_coro_alloc_helper_fn_val(g, LLVMTypeOf(alloc_fn), instruction->alloc_fn->value.type);
|
||||
LLVMValueRef fn_val = get_coro_alloc_helper_fn_val(g, LLVMTypeOf(realloc_fn), instruction->realloc_fn->value.type);
|
||||
size_t err_code_ptr_arg_index = get_async_err_code_arg_index(g, &g->cur_fn->type_entry->data.fn.fn_type_id);
|
||||
size_t allocator_arg_index = get_async_allocator_arg_index(g, &g->cur_fn->type_entry->data.fn.fn_type_id);
|
||||
|
||||
ZigList<LLVMValueRef> params = {};
|
||||
params.append(alloc_fn);
|
||||
params.append(realloc_fn);
|
||||
uint32_t err_ret_trace_arg_index = get_err_ret_trace_arg_index(g, g->cur_fn);
|
||||
if (err_ret_trace_arg_index != UINT32_MAX) {
|
||||
params.append(LLVMGetParam(g->cur_fn_val, err_ret_trace_arg_index));
|
||||
|
70
src/ir.cpp
70
src/ir.cpp
@ -2788,13 +2788,13 @@ static IrInstruction *ir_build_coro_promise(IrBuilder *irb, Scope *scope, AstNod
|
||||
}
|
||||
|
||||
static IrInstruction *ir_build_coro_alloc_helper(IrBuilder *irb, Scope *scope, AstNode *source_node,
|
||||
IrInstruction *alloc_fn, IrInstruction *coro_size)
|
||||
IrInstruction *realloc_fn, IrInstruction *coro_size)
|
||||
{
|
||||
IrInstructionCoroAllocHelper *instruction = ir_build_instruction<IrInstructionCoroAllocHelper>(irb, scope, source_node);
|
||||
instruction->alloc_fn = alloc_fn;
|
||||
instruction->realloc_fn = realloc_fn;
|
||||
instruction->coro_size = coro_size;
|
||||
|
||||
ir_ref_instruction(alloc_fn, irb->current_basic_block);
|
||||
ir_ref_instruction(realloc_fn, irb->current_basic_block);
|
||||
ir_ref_instruction(coro_size, irb->current_basic_block);
|
||||
|
||||
return &instruction->base;
|
||||
@ -3319,9 +3319,11 @@ static ZigVar *create_local_var(CodeGen *codegen, AstNode *node, Scope *parent_s
|
||||
if (!skip_name_check) {
|
||||
ZigVar *existing_var = find_variable(codegen, parent_scope, name, nullptr);
|
||||
if (existing_var && !existing_var->shadowable) {
|
||||
ErrorMsg *msg = add_node_error(codegen, node,
|
||||
buf_sprintf("redeclaration of variable '%s'", buf_ptr(name)));
|
||||
add_error_note(codegen, msg, existing_var->decl_node, buf_sprintf("previous declaration is here"));
|
||||
if (existing_var->var_type == nullptr || !type_is_invalid(existing_var->var_type)) {
|
||||
ErrorMsg *msg = add_node_error(codegen, node,
|
||||
buf_sprintf("redeclaration of variable '%s'", buf_ptr(name)));
|
||||
add_error_note(codegen, msg, existing_var->decl_node, buf_sprintf("previous declaration is here"));
|
||||
}
|
||||
variable_entry->var_type = codegen->builtin_types.entry_invalid;
|
||||
} else {
|
||||
ZigType *type;
|
||||
@ -7506,10 +7508,10 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec
|
||||
ImplicitAllocatorIdArg);
|
||||
irb->exec->coro_allocator_var = ir_create_var(irb, node, coro_scope, nullptr, true, true, true, const_bool_false);
|
||||
ir_build_var_decl_src(irb, coro_scope, node, irb->exec->coro_allocator_var, nullptr, nullptr, implicit_allocator_ptr);
|
||||
Buf *alloc_field_name = buf_create_from_str(ASYNC_ALLOC_FIELD_NAME);
|
||||
IrInstruction *alloc_fn_ptr = ir_build_field_ptr(irb, coro_scope, node, implicit_allocator_ptr, alloc_field_name);
|
||||
IrInstruction *alloc_fn = ir_build_load_ptr(irb, coro_scope, node, alloc_fn_ptr);
|
||||
IrInstruction *maybe_coro_mem_ptr = ir_build_coro_alloc_helper(irb, coro_scope, node, alloc_fn, coro_size);
|
||||
Buf *realloc_field_name = buf_create_from_str(ASYNC_REALLOC_FIELD_NAME);
|
||||
IrInstruction *realloc_fn_ptr = ir_build_field_ptr(irb, coro_scope, node, implicit_allocator_ptr, realloc_field_name);
|
||||
IrInstruction *realloc_fn = ir_build_load_ptr(irb, coro_scope, node, realloc_fn_ptr);
|
||||
IrInstruction *maybe_coro_mem_ptr = ir_build_coro_alloc_helper(irb, coro_scope, node, realloc_fn, coro_size);
|
||||
IrInstruction *alloc_result_is_ok = ir_build_test_nonnull(irb, coro_scope, node, maybe_coro_mem_ptr);
|
||||
IrBasicBlock *alloc_err_block = ir_create_basic_block(irb, coro_scope, "AllocError");
|
||||
IrBasicBlock *alloc_ok_block = ir_create_basic_block(irb, coro_scope, "AllocOk");
|
||||
@ -7643,11 +7645,11 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec
|
||||
merge_incoming_values[1] = await_handle_in_block;
|
||||
IrInstruction *awaiter_handle = ir_build_phi(irb, scope, node, 2, merge_incoming_blocks, merge_incoming_values);
|
||||
|
||||
Buf *free_field_name = buf_create_from_str(ASYNC_FREE_FIELD_NAME);
|
||||
Buf *shrink_field_name = buf_create_from_str(ASYNC_SHRINK_FIELD_NAME);
|
||||
IrInstruction *implicit_allocator_ptr = ir_build_get_implicit_allocator(irb, scope, node,
|
||||
ImplicitAllocatorIdLocalVar);
|
||||
IrInstruction *free_fn_ptr = ir_build_field_ptr(irb, scope, node, implicit_allocator_ptr, free_field_name);
|
||||
IrInstruction *free_fn = ir_build_load_ptr(irb, scope, node, free_fn_ptr);
|
||||
IrInstruction *shrink_fn_ptr = ir_build_field_ptr(irb, scope, node, implicit_allocator_ptr, shrink_field_name);
|
||||
IrInstruction *shrink_fn = ir_build_load_ptr(irb, scope, node, shrink_fn_ptr);
|
||||
IrInstruction *zero = ir_build_const_usize(irb, scope, node, 0);
|
||||
IrInstruction *coro_mem_ptr_maybe = ir_build_coro_free(irb, scope, node, coro_id, irb->exec->coro_handle);
|
||||
IrInstruction *u8_ptr_type_unknown_len = ir_build_const_type(irb, scope, node,
|
||||
@ -7659,11 +7661,20 @@ bool ir_gen(CodeGen *codegen, AstNode *node, Scope *scope, IrExecutable *ir_exec
|
||||
IrInstruction *coro_size_ptr = ir_build_var_ptr(irb, scope, node, coro_size_var);
|
||||
IrInstruction *coro_size = ir_build_load_ptr(irb, scope, node, coro_size_ptr);
|
||||
IrInstruction *mem_slice = ir_build_slice(irb, scope, node, coro_mem_ptr_ref, zero, coro_size, false);
|
||||
size_t arg_count = 2;
|
||||
size_t arg_count = 5;
|
||||
IrInstruction **args = allocate<IrInstruction *>(arg_count);
|
||||
args[0] = implicit_allocator_ptr; // self
|
||||
args[1] = mem_slice; // old_mem
|
||||
ir_build_call(irb, scope, node, nullptr, free_fn, arg_count, args, false, FnInlineAuto, false, nullptr, nullptr);
|
||||
args[2] = ir_build_const_usize(irb, scope, node, 8); // old_align
|
||||
// TODO: intentional memory leak here. If this is set to 0 then there is an issue where a coroutine
|
||||
// calls the function and it frees its own stack frame, but then the return value is a slice, which
|
||||
// is implemented as an sret struct. writing to the return pointer causes invalid memory write.
|
||||
// We could work around it by having a global helper function which has a void return type
|
||||
// and calling that instead. But instead this hack will suffice until I rework coroutines to be
|
||||
// non-allocating. Basically coroutines are not supported right now until they are reworked.
|
||||
args[3] = ir_build_const_usize(irb, scope, node, 1); // new_size
|
||||
args[4] = ir_build_const_usize(irb, scope, node, 1); // new_align
|
||||
ir_build_call(irb, scope, node, nullptr, shrink_fn, arg_count, args, false, FnInlineAuto, false, nullptr, nullptr);
|
||||
|
||||
IrBasicBlock *resume_block = ir_create_basic_block(irb, scope, "Resume");
|
||||
ir_build_cond_br(irb, scope, node, resume_awaiter, resume_block, irb->exec->coro_suspend_block, const_bool_false);
|
||||
@ -13574,32 +13585,31 @@ IrInstruction *ir_get_implicit_allocator(IrAnalyze *ira, IrInstruction *source_i
|
||||
static IrInstruction *ir_analyze_async_call(IrAnalyze *ira, IrInstructionCall *call_instruction, ZigFn *fn_entry, ZigType *fn_type,
|
||||
IrInstruction *fn_ref, IrInstruction **casted_args, size_t arg_count, IrInstruction *async_allocator_inst)
|
||||
{
|
||||
Buf *alloc_field_name = buf_create_from_str(ASYNC_ALLOC_FIELD_NAME);
|
||||
//Buf *free_field_name = buf_create_from_str("freeFn");
|
||||
Buf *realloc_field_name = buf_create_from_str(ASYNC_REALLOC_FIELD_NAME);
|
||||
assert(async_allocator_inst->value.type->id == ZigTypeIdPointer);
|
||||
ZigType *container_type = async_allocator_inst->value.type->data.pointer.child_type;
|
||||
IrInstruction *field_ptr_inst = ir_analyze_container_field_ptr(ira, alloc_field_name, &call_instruction->base,
|
||||
IrInstruction *field_ptr_inst = ir_analyze_container_field_ptr(ira, realloc_field_name, &call_instruction->base,
|
||||
async_allocator_inst, container_type);
|
||||
if (type_is_invalid(field_ptr_inst->value.type)) {
|
||||
return ira->codegen->invalid_instruction;
|
||||
}
|
||||
ZigType *ptr_to_alloc_fn_type = field_ptr_inst->value.type;
|
||||
assert(ptr_to_alloc_fn_type->id == ZigTypeIdPointer);
|
||||
ZigType *ptr_to_realloc_fn_type = field_ptr_inst->value.type;
|
||||
assert(ptr_to_realloc_fn_type->id == ZigTypeIdPointer);
|
||||
|
||||
ZigType *alloc_fn_type = ptr_to_alloc_fn_type->data.pointer.child_type;
|
||||
if (alloc_fn_type->id != ZigTypeIdFn) {
|
||||
ZigType *realloc_fn_type = ptr_to_realloc_fn_type->data.pointer.child_type;
|
||||
if (realloc_fn_type->id != ZigTypeIdFn) {
|
||||
ir_add_error(ira, &call_instruction->base,
|
||||
buf_sprintf("expected allocation function, found '%s'", buf_ptr(&alloc_fn_type->name)));
|
||||
buf_sprintf("expected reallocation function, found '%s'", buf_ptr(&realloc_fn_type->name)));
|
||||
return ira->codegen->invalid_instruction;
|
||||
}
|
||||
|
||||
ZigType *alloc_fn_return_type = alloc_fn_type->data.fn.fn_type_id.return_type;
|
||||
if (alloc_fn_return_type->id != ZigTypeIdErrorUnion) {
|
||||
ZigType *realloc_fn_return_type = realloc_fn_type->data.fn.fn_type_id.return_type;
|
||||
if (realloc_fn_return_type->id != ZigTypeIdErrorUnion) {
|
||||
ir_add_error(ira, fn_ref,
|
||||
buf_sprintf("expected allocation function to return error union, but it returns '%s'", buf_ptr(&alloc_fn_return_type->name)));
|
||||
buf_sprintf("expected allocation function to return error union, but it returns '%s'", buf_ptr(&realloc_fn_return_type->name)));
|
||||
return ira->codegen->invalid_instruction;
|
||||
}
|
||||
ZigType *alloc_fn_error_set_type = alloc_fn_return_type->data.error_union.err_set_type;
|
||||
ZigType *alloc_fn_error_set_type = realloc_fn_return_type->data.error_union.err_set_type;
|
||||
ZigType *return_type = fn_type->data.fn.fn_type_id.return_type;
|
||||
ZigType *promise_type = get_promise_type(ira->codegen, return_type);
|
||||
ZigType *async_return_type = get_error_union_type(ira->codegen, alloc_fn_error_set_type, promise_type);
|
||||
@ -22033,8 +22043,8 @@ static IrInstruction *ir_analyze_instruction_coro_promise(IrAnalyze *ira, IrInst
|
||||
}
|
||||
|
||||
static IrInstruction *ir_analyze_instruction_coro_alloc_helper(IrAnalyze *ira, IrInstructionCoroAllocHelper *instruction) {
|
||||
IrInstruction *alloc_fn = instruction->alloc_fn->child;
|
||||
if (type_is_invalid(alloc_fn->value.type))
|
||||
IrInstruction *realloc_fn = instruction->realloc_fn->child;
|
||||
if (type_is_invalid(realloc_fn->value.type))
|
||||
return ira->codegen->invalid_instruction;
|
||||
|
||||
IrInstruction *coro_size = instruction->coro_size->child;
|
||||
@ -22042,7 +22052,7 @@ static IrInstruction *ir_analyze_instruction_coro_alloc_helper(IrAnalyze *ira, I
|
||||
return ira->codegen->invalid_instruction;
|
||||
|
||||
IrInstruction *result = ir_build_coro_alloc_helper(&ira->new_irb, instruction->base.scope,
|
||||
instruction->base.source_node, alloc_fn, coro_size);
|
||||
instruction->base.source_node, realloc_fn, coro_size);
|
||||
ZigType *u8_ptr_type = get_pointer_to_type(ira->codegen, ira->codegen->builtin_types.entry_u8, false);
|
||||
result->value.type = get_optional_type(ira->codegen, u8_ptr_type);
|
||||
return result;
|
||||
|
@ -1286,7 +1286,7 @@ static void ir_print_promise_result_type(IrPrint *irp, IrInstructionPromiseResul
|
||||
|
||||
static void ir_print_coro_alloc_helper(IrPrint *irp, IrInstructionCoroAllocHelper *instruction) {
|
||||
fprintf(irp->f, "@coroAllocHelper(");
|
||||
ir_print_other_instruction(irp, instruction->alloc_fn);
|
||||
ir_print_other_instruction(irp, instruction->realloc_fn);
|
||||
fprintf(irp->f, ",");
|
||||
ir_print_other_instruction(irp, instruction->coro_size);
|
||||
fprintf(irp->f, ")");
|
||||
|
@ -80,7 +80,7 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
|
||||
/// The caller owns the returned memory. ArrayList becomes empty.
|
||||
pub fn toOwnedSlice(self: *Self) []align(A) T {
|
||||
const allocator = self.allocator;
|
||||
const result = allocator.alignedShrink(T, A, self.items, self.len);
|
||||
const result = allocator.shrink(self.items, self.len);
|
||||
self.* = init(allocator);
|
||||
return result;
|
||||
}
|
||||
@ -144,6 +144,9 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
|
||||
pub fn shrink(self: *Self, new_len: usize) void {
|
||||
assert(new_len <= self.len);
|
||||
self.len = new_len;
|
||||
self.items = self.allocator.realloc(self.items, new_len) catch |e| switch (e) {
|
||||
error.OutOfMemory => return, // no problem, capacity is still correct then.
|
||||
};
|
||||
}
|
||||
|
||||
pub fn ensureCapacity(self: *Self, new_capacity: usize) !void {
|
||||
@ -153,7 +156,7 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
|
||||
better_capacity += better_capacity / 2 + 8;
|
||||
if (better_capacity >= new_capacity) break;
|
||||
}
|
||||
self.items = try self.allocator.alignedRealloc(T, A, self.items, better_capacity);
|
||||
self.items = try self.allocator.realloc(self.items, better_capacity);
|
||||
}
|
||||
|
||||
pub fn addOne(self: *Self) !*T {
|
||||
|
@ -50,7 +50,7 @@ pub const Buffer = struct {
|
||||
/// is safe to `deinit`.
|
||||
pub fn toOwnedSlice(self: *Buffer) []u8 {
|
||||
const allocator = self.list.allocator;
|
||||
const result = allocator.shrink(u8, self.list.items, self.len());
|
||||
const result = allocator.shrink(self.list.items, self.len());
|
||||
self.* = initNull(allocator);
|
||||
return result;
|
||||
}
|
||||
|
@ -53,7 +53,7 @@ pub extern "c" fn rmdir(path: [*]const u8) c_int;
|
||||
|
||||
pub extern "c" fn aligned_alloc(alignment: usize, size: usize) ?*c_void;
|
||||
pub extern "c" fn malloc(usize) ?*c_void;
|
||||
pub extern "c" fn realloc(*c_void, usize) ?*c_void;
|
||||
pub extern "c" fn realloc(?*c_void, usize) ?*c_void;
|
||||
pub extern "c" fn free(*c_void) void;
|
||||
pub extern "c" fn posix_memalign(memptr: **c_void, alignment: usize, size: usize) c_int;
|
||||
|
||||
|
@ -1072,7 +1072,7 @@ fn openSelfDebugInfoMacOs(allocator: *mem.Allocator) !DebugInfo {
|
||||
.n_value = symbols_buf[symbol_index - 1].nlist.n_value + last_len,
|
||||
};
|
||||
|
||||
const symbols = allocator.shrink(MachoSymbol, symbols_buf, symbol_index);
|
||||
const symbols = allocator.shrink(symbols_buf, symbol_index);
|
||||
|
||||
// Even though lld emits symbols in ascending order, this debug code
|
||||
// should work for programs linked in any valid way.
|
||||
|
@ -21,44 +21,37 @@ pub const FailingAllocator = struct {
|
||||
.freed_bytes = 0,
|
||||
.deallocations = 0,
|
||||
.allocator = mem.Allocator{
|
||||
.allocFn = alloc,
|
||||
.reallocFn = realloc,
|
||||
.freeFn = free,
|
||||
.shrinkFn = shrink,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
fn alloc(allocator: *mem.Allocator, n: usize, alignment: u29) ![]u8 {
|
||||
fn realloc(allocator: *mem.Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
|
||||
const self = @fieldParentPtr(FailingAllocator, "allocator", allocator);
|
||||
if (self.index == self.fail_index) {
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
const result = try self.internal_allocator.allocFn(self.internal_allocator, n, alignment);
|
||||
self.allocated_bytes += result.len;
|
||||
self.index += 1;
|
||||
return result;
|
||||
}
|
||||
|
||||
fn realloc(allocator: *mem.Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
|
||||
const self = @fieldParentPtr(FailingAllocator, "allocator", allocator);
|
||||
const result = try self.internal_allocator.reallocFn(
|
||||
self.internal_allocator,
|
||||
old_mem,
|
||||
old_align,
|
||||
new_size,
|
||||
new_align,
|
||||
);
|
||||
if (new_size <= old_mem.len) {
|
||||
self.freed_bytes += old_mem.len - new_size;
|
||||
return self.internal_allocator.reallocFn(self.internal_allocator, old_mem, new_size, alignment);
|
||||
} else {
|
||||
self.allocated_bytes += new_size - old_mem.len;
|
||||
}
|
||||
if (self.index == self.fail_index) {
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
const result = try self.internal_allocator.reallocFn(self.internal_allocator, old_mem, new_size, alignment);
|
||||
self.allocated_bytes += new_size - old_mem.len;
|
||||
self.deallocations += 1;
|
||||
self.index += 1;
|
||||
return result;
|
||||
}
|
||||
|
||||
fn free(allocator: *mem.Allocator, bytes: []u8) void {
|
||||
fn shrink(allocator: *mem.Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
|
||||
const self = @fieldParentPtr(FailingAllocator, "allocator", allocator);
|
||||
self.freed_bytes += bytes.len;
|
||||
self.deallocations += 1;
|
||||
return self.internal_allocator.freeFn(self.internal_allocator, bytes);
|
||||
self.freed_bytes += old_mem.len - new_size;
|
||||
return self.internal_allocator.shrinkFn(self.internal_allocator, old_mem, old_align, new_size, new_align);
|
||||
}
|
||||
};
|
||||
|
254
std/heap.zig
254
std/heap.zig
@ -13,30 +13,21 @@ const Allocator = mem.Allocator;
|
||||
|
||||
pub const c_allocator = &c_allocator_state;
|
||||
var c_allocator_state = Allocator{
|
||||
.allocFn = cAlloc,
|
||||
.reallocFn = cRealloc,
|
||||
.freeFn = cFree,
|
||||
.shrinkFn = cShrink,
|
||||
};
|
||||
|
||||
fn cAlloc(self: *Allocator, n: usize, alignment: u29) ![]u8 {
|
||||
assert(alignment <= @alignOf(c_longdouble));
|
||||
return if (c.malloc(n)) |buf| @ptrCast([*]u8, buf)[0..n] else error.OutOfMemory;
|
||||
fn cRealloc(self: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
|
||||
assert(new_align <= @alignOf(c_longdouble));
|
||||
const old_ptr = if (old_mem.len == 0) null else @ptrCast(*c_void, old_mem.ptr);
|
||||
const buf = c.realloc(old_ptr, new_size) orelse return error.OutOfMemory;
|
||||
return @ptrCast([*]u8, buf)[0..new_size];
|
||||
}
|
||||
|
||||
fn cRealloc(self: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
|
||||
fn cShrink(self: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
|
||||
const old_ptr = @ptrCast(*c_void, old_mem.ptr);
|
||||
if (c.realloc(old_ptr, new_size)) |buf| {
|
||||
return @ptrCast([*]u8, buf)[0..new_size];
|
||||
} else if (new_size <= old_mem.len) {
|
||||
return old_mem[0..new_size];
|
||||
} else {
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
}
|
||||
|
||||
fn cFree(self: *Allocator, old_mem: []u8) void {
|
||||
const old_ptr = @ptrCast(*c_void, old_mem.ptr);
|
||||
c.free(old_ptr);
|
||||
const buf = c.realloc(old_ptr, new_size) orelse return old_mem[0..new_size];
|
||||
return @ptrCast([*]u8, buf)[0..new_size];
|
||||
}
|
||||
|
||||
/// This allocator makes a syscall directly for every allocation and free.
|
||||
@ -50,9 +41,8 @@ pub const DirectAllocator = struct {
|
||||
pub fn init() DirectAllocator {
|
||||
return DirectAllocator{
|
||||
.allocator = Allocator{
|
||||
.allocFn = alloc,
|
||||
.reallocFn = realloc,
|
||||
.freeFn = free,
|
||||
.shrinkFn = shrink,
|
||||
},
|
||||
.heap_handle = if (builtin.os == Os.windows) null else {},
|
||||
};
|
||||
@ -116,62 +106,63 @@ pub const DirectAllocator = struct {
|
||||
}
|
||||
}
|
||||
|
||||
fn realloc(allocator: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
|
||||
const self = @fieldParentPtr(DirectAllocator, "allocator", allocator);
|
||||
|
||||
fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
|
||||
switch (builtin.os) {
|
||||
Os.linux, Os.macosx, Os.ios, Os.freebsd, Os.netbsd => {
|
||||
if (new_size <= old_mem.len) {
|
||||
const base_addr = @ptrToInt(old_mem.ptr);
|
||||
const old_addr_end = base_addr + old_mem.len;
|
||||
const new_addr_end = base_addr + new_size;
|
||||
const new_addr_end_rounded = mem.alignForward(new_addr_end, os.page_size);
|
||||
if (old_addr_end > new_addr_end_rounded) {
|
||||
_ = os.posix.munmap(new_addr_end_rounded, old_addr_end - new_addr_end_rounded);
|
||||
}
|
||||
return old_mem[0..new_size];
|
||||
const base_addr = @ptrToInt(old_mem.ptr);
|
||||
const old_addr_end = base_addr + old_mem.len;
|
||||
const new_addr_end = base_addr + new_size;
|
||||
const new_addr_end_rounded = mem.alignForward(new_addr_end, os.page_size);
|
||||
if (old_addr_end > new_addr_end_rounded) {
|
||||
_ = os.posix.munmap(new_addr_end_rounded, old_addr_end - new_addr_end_rounded);
|
||||
}
|
||||
|
||||
const result = try alloc(allocator, new_size, alignment);
|
||||
mem.copy(u8, result, old_mem);
|
||||
return result;
|
||||
return old_mem[0..new_size];
|
||||
},
|
||||
Os.windows => {
|
||||
Os.windows => return realloc(allocator, old_mem, old_align, new_size, new_align) catch {
|
||||
const old_adjusted_addr = @ptrToInt(old_mem.ptr);
|
||||
const old_record_addr = old_adjusted_addr + old_mem.len;
|
||||
const root_addr = @intToPtr(*align(1) usize, old_record_addr).*;
|
||||
const old_ptr = @intToPtr(*c_void, root_addr);
|
||||
const amt = new_size + alignment + @sizeOf(usize);
|
||||
const new_ptr = os.windows.HeapReAlloc(self.heap_handle.?, 0, old_ptr, amt) orelse blk: {
|
||||
if (new_size > old_mem.len) return error.OutOfMemory;
|
||||
const new_record_addr = old_record_addr - new_size + old_mem.len;
|
||||
@intToPtr(*align(1) usize, new_record_addr).* = root_addr;
|
||||
return old_mem[0..new_size];
|
||||
};
|
||||
const offset = old_adjusted_addr - root_addr;
|
||||
const new_root_addr = @ptrToInt(new_ptr);
|
||||
const new_adjusted_addr = new_root_addr + offset;
|
||||
assert(new_adjusted_addr % alignment == 0);
|
||||
const new_record_addr = new_adjusted_addr + new_size;
|
||||
@intToPtr(*align(1) usize, new_record_addr).* = new_root_addr;
|
||||
return @intToPtr([*]u8, new_adjusted_addr)[0..new_size];
|
||||
const new_record_addr = old_record_addr - new_size + old_mem.len;
|
||||
@intToPtr(*align(1) usize, new_record_addr).* = root_addr;
|
||||
return old_mem[0..new_size];
|
||||
},
|
||||
else => @compileError("Unsupported OS"),
|
||||
}
|
||||
}
|
||||
|
||||
fn free(allocator: *Allocator, bytes: []u8) void {
|
||||
const self = @fieldParentPtr(DirectAllocator, "allocator", allocator);
|
||||
|
||||
fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
|
||||
switch (builtin.os) {
|
||||
Os.linux, Os.macosx, Os.ios, Os.freebsd, Os.netbsd => {
|
||||
_ = os.posix.munmap(@ptrToInt(bytes.ptr), bytes.len);
|
||||
if (new_size <= old_mem.len and new_align <= old_align) {
|
||||
return shrink(allocator, old_mem, old_align, new_size, new_align);
|
||||
}
|
||||
const result = try alloc(allocator, new_size, new_align);
|
||||
mem.copy(u8, result, old_mem);
|
||||
_ = os.posix.munmap(@ptrToInt(old_mem.ptr), old_mem.len);
|
||||
return result;
|
||||
},
|
||||
Os.windows => {
|
||||
const record_addr = @ptrToInt(bytes.ptr) + bytes.len;
|
||||
const root_addr = @intToPtr(*align(1) usize, record_addr).*;
|
||||
const ptr = @intToPtr(*c_void, root_addr);
|
||||
_ = os.windows.HeapFree(self.heap_handle.?, 0, ptr);
|
||||
const self = @fieldParentPtr(DirectAllocator, "allocator", allocator);
|
||||
|
||||
const old_adjusted_addr = @ptrToInt(old_mem.ptr);
|
||||
const old_record_addr = old_adjusted_addr + old_mem.len;
|
||||
const root_addr = @intToPtr(*align(1) usize, old_record_addr).*;
|
||||
const old_ptr = @intToPtr(*c_void, root_addr);
|
||||
const amt = new_size + new_align + @sizeOf(usize);
|
||||
const new_ptr = os.windows.HeapReAlloc(
|
||||
self.heap_handle.?,
|
||||
0,
|
||||
old_ptr,
|
||||
amt,
|
||||
) orelse return error.OutOfMemory;
|
||||
const offset = old_adjusted_addr - root_addr;
|
||||
const new_root_addr = @ptrToInt(new_ptr);
|
||||
const new_adjusted_addr = new_root_addr + offset;
|
||||
assert(new_adjusted_addr % new_align == 0);
|
||||
const new_record_addr = new_adjusted_addr + new_size;
|
||||
@intToPtr(*align(1) usize, new_record_addr).* = new_root_addr;
|
||||
return @intToPtr([*]u8, new_adjusted_addr)[0..new_size];
|
||||
},
|
||||
else => @compileError("Unsupported OS"),
|
||||
}
|
||||
@ -192,9 +183,8 @@ pub const ArenaAllocator = struct {
|
||||
pub fn init(child_allocator: *Allocator) ArenaAllocator {
|
||||
return ArenaAllocator{
|
||||
.allocator = Allocator{
|
||||
.allocFn = alloc,
|
||||
.reallocFn = realloc,
|
||||
.freeFn = free,
|
||||
.shrinkFn = shrink,
|
||||
},
|
||||
.child_allocator = child_allocator,
|
||||
.buffer_list = std.LinkedList([]u8).init(),
|
||||
@ -253,17 +243,20 @@ pub const ArenaAllocator = struct {
|
||||
}
|
||||
}
|
||||
|
||||
fn realloc(allocator: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
|
||||
if (new_size <= old_mem.len) {
|
||||
return old_mem[0..new_size];
|
||||
fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
|
||||
if (new_size <= old_mem.len and new_align <= new_size) {
|
||||
// We can't do anything with the memory, so tell the client to keep it.
|
||||
return error.OutOfMemory;
|
||||
} else {
|
||||
const result = try alloc(allocator, new_size, alignment);
|
||||
const result = try alloc(allocator, new_size, new_align);
|
||||
mem.copy(u8, result, old_mem);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
fn free(allocator: *Allocator, bytes: []u8) void {}
|
||||
fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
|
||||
return old_mem[0..new_size];
|
||||
}
|
||||
};
|
||||
|
||||
pub const FixedBufferAllocator = struct {
|
||||
@ -274,9 +267,8 @@ pub const FixedBufferAllocator = struct {
|
||||
pub fn init(buffer: []u8) FixedBufferAllocator {
|
||||
return FixedBufferAllocator{
|
||||
.allocator = Allocator{
|
||||
.allocFn = alloc,
|
||||
.reallocFn = realloc,
|
||||
.freeFn = free,
|
||||
.shrinkFn = shrink,
|
||||
},
|
||||
.buffer = buffer,
|
||||
.end_index = 0,
|
||||
@ -298,26 +290,31 @@ pub const FixedBufferAllocator = struct {
|
||||
return result;
|
||||
}
|
||||
|
||||
fn realloc(allocator: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
|
||||
fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
|
||||
const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
|
||||
assert(old_mem.len <= self.end_index);
|
||||
if (new_size <= old_mem.len) {
|
||||
return old_mem[0..new_size];
|
||||
} else if (old_mem.ptr == self.buffer.ptr + self.end_index - old_mem.len) {
|
||||
if (old_mem.ptr == self.buffer.ptr + self.end_index - old_mem.len and
|
||||
mem.alignForward(@ptrToInt(old_mem.ptr), new_align) == @ptrToInt(old_mem.ptr))
|
||||
{
|
||||
const start_index = self.end_index - old_mem.len;
|
||||
const new_end_index = start_index + new_size;
|
||||
if (new_end_index > self.buffer.len) return error.OutOfMemory;
|
||||
const result = self.buffer[start_index..new_end_index];
|
||||
self.end_index = new_end_index;
|
||||
return result;
|
||||
} else if (new_size <= old_mem.len and new_align <= old_align) {
|
||||
// We can't do anything with the memory, so tell the client to keep it.
|
||||
return error.OutOfMemory;
|
||||
} else {
|
||||
const result = try alloc(allocator, new_size, alignment);
|
||||
const result = try alloc(allocator, new_size, new_align);
|
||||
mem.copy(u8, result, old_mem);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
fn free(allocator: *Allocator, bytes: []u8) void {}
|
||||
fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
|
||||
return old_mem[0..new_size];
|
||||
}
|
||||
};
|
||||
|
||||
pub const ThreadSafeFixedBufferAllocator = blk: {
|
||||
@ -333,9 +330,8 @@ pub const ThreadSafeFixedBufferAllocator = blk: {
|
||||
pub fn init(buffer: []u8) ThreadSafeFixedBufferAllocator {
|
||||
return ThreadSafeFixedBufferAllocator{
|
||||
.allocator = Allocator{
|
||||
.allocFn = alloc,
|
||||
.reallocFn = realloc,
|
||||
.freeFn = free,
|
||||
.shrinkFn = shrink,
|
||||
},
|
||||
.buffer = buffer,
|
||||
.end_index = 0,
|
||||
@ -357,17 +353,20 @@ pub const ThreadSafeFixedBufferAllocator = blk: {
|
||||
}
|
||||
}
|
||||
|
||||
fn realloc(allocator: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
|
||||
if (new_size <= old_mem.len) {
|
||||
return old_mem[0..new_size];
|
||||
fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
|
||||
if (new_size <= old_mem.len and new_align <= old_align) {
|
||||
// We can't do anything useful with the memory, tell the client to keep it.
|
||||
return error.OutOfMemory;
|
||||
} else {
|
||||
const result = try alloc(allocator, new_size, alignment);
|
||||
const result = try alloc(allocator, new_size, new_align);
|
||||
mem.copy(u8, result, old_mem);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
fn free(allocator: *Allocator, bytes: []u8) void {}
|
||||
fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
|
||||
return old_mem[0..new_size];
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
@ -378,9 +377,8 @@ pub fn stackFallback(comptime size: usize, fallback_allocator: *Allocator) Stack
|
||||
.fallback_allocator = fallback_allocator,
|
||||
.fixed_buffer_allocator = undefined,
|
||||
.allocator = Allocator{
|
||||
.allocFn = StackFallbackAllocator(size).alloc,
|
||||
.reallocFn = StackFallbackAllocator(size).realloc,
|
||||
.freeFn = StackFallbackAllocator(size).free,
|
||||
.shrinkFn = StackFallbackAllocator(size).shrink,
|
||||
},
|
||||
};
|
||||
}
|
||||
@ -399,13 +397,7 @@ pub fn StackFallbackAllocator(comptime size: usize) type {
|
||||
return &self.allocator;
|
||||
}
|
||||
|
||||
fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
|
||||
const self = @fieldParentPtr(Self, "allocator", allocator);
|
||||
return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator.allocator, n, alignment) catch
|
||||
self.fallback_allocator.allocFn(self.fallback_allocator, n, alignment);
|
||||
}
|
||||
|
||||
fn realloc(allocator: *Allocator, old_mem: []u8, new_size: usize, alignment: u29) ![]u8 {
|
||||
fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
|
||||
const self = @fieldParentPtr(Self, "allocator", allocator);
|
||||
const in_buffer = @ptrToInt(old_mem.ptr) >= @ptrToInt(&self.buffer) and
|
||||
@ptrToInt(old_mem.ptr) < @ptrToInt(&self.buffer) + self.buffer.len;
|
||||
@ -413,37 +405,59 @@ pub fn StackFallbackAllocator(comptime size: usize) type {
|
||||
return FixedBufferAllocator.realloc(
|
||||
&self.fixed_buffer_allocator.allocator,
|
||||
old_mem,
|
||||
old_align,
|
||||
new_size,
|
||||
alignment,
|
||||
new_align,
|
||||
) catch {
|
||||
const result = try self.fallback_allocator.allocFn(
|
||||
const result = try self.fallback_allocator.reallocFn(
|
||||
self.fallback_allocator,
|
||||
([*]u8)(undefined)[0..0],
|
||||
undefined,
|
||||
new_size,
|
||||
alignment,
|
||||
new_align,
|
||||
);
|
||||
mem.copy(u8, result, old_mem);
|
||||
return result;
|
||||
};
|
||||
}
|
||||
return self.fallback_allocator.reallocFn(self.fallback_allocator, old_mem, new_size, alignment);
|
||||
return self.fallback_allocator.reallocFn(
|
||||
self.fallback_allocator,
|
||||
old_mem,
|
||||
old_align,
|
||||
new_size,
|
||||
new_align,
|
||||
);
|
||||
}
|
||||
|
||||
fn free(allocator: *Allocator, bytes: []u8) void {
|
||||
fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
|
||||
const self = @fieldParentPtr(Self, "allocator", allocator);
|
||||
const in_buffer = @ptrToInt(bytes.ptr) >= @ptrToInt(&self.buffer) and
|
||||
@ptrToInt(bytes.ptr) < @ptrToInt(&self.buffer) + self.buffer.len;
|
||||
if (!in_buffer) {
|
||||
return self.fallback_allocator.freeFn(self.fallback_allocator, bytes);
|
||||
const in_buffer = @ptrToInt(old_mem.ptr) >= @ptrToInt(&self.buffer) and
|
||||
@ptrToInt(old_mem.ptr) < @ptrToInt(&self.buffer) + self.buffer.len;
|
||||
if (in_buffer) {
|
||||
return FixedBufferAllocator.shrink(
|
||||
&self.fixed_buffer_allocator.allocator,
|
||||
old_mem,
|
||||
old_align,
|
||||
new_size,
|
||||
new_align,
|
||||
);
|
||||
}
|
||||
return self.fallback_allocator.shrinkFn(
|
||||
self.fallback_allocator,
|
||||
old_mem,
|
||||
old_align,
|
||||
new_size,
|
||||
new_align,
|
||||
);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
test "c_allocator" {
|
||||
if (builtin.link_libc) {
|
||||
var slice = c_allocator.alloc(u8, 50) catch return;
|
||||
var slice = try c_allocator.alloc(u8, 50);
|
||||
defer c_allocator.free(slice);
|
||||
slice = c_allocator.realloc(u8, slice, 100) catch return;
|
||||
slice = try c_allocator.realloc(slice, 100);
|
||||
}
|
||||
}
|
||||
|
||||
@ -486,10 +500,10 @@ test "FixedBufferAllocator Reuse memory on realloc" {
|
||||
|
||||
var slice0 = try fixed_buffer_allocator.allocator.alloc(u8, 5);
|
||||
testing.expect(slice0.len == 5);
|
||||
var slice1 = try fixed_buffer_allocator.allocator.realloc(u8, slice0, 10);
|
||||
var slice1 = try fixed_buffer_allocator.allocator.realloc(slice0, 10);
|
||||
testing.expect(slice1.ptr == slice0.ptr);
|
||||
testing.expect(slice1.len == 10);
|
||||
testing.expectError(error.OutOfMemory, fixed_buffer_allocator.allocator.realloc(u8, slice1, 11));
|
||||
testing.expectError(error.OutOfMemory, fixed_buffer_allocator.allocator.realloc(slice1, 11));
|
||||
}
|
||||
// check that we don't re-use the memory if it's not the most recent block
|
||||
{
|
||||
@ -499,7 +513,7 @@ test "FixedBufferAllocator Reuse memory on realloc" {
|
||||
slice0[0] = 1;
|
||||
slice0[1] = 2;
|
||||
var slice1 = try fixed_buffer_allocator.allocator.alloc(u8, 2);
|
||||
var slice2 = try fixed_buffer_allocator.allocator.realloc(u8, slice0, 4);
|
||||
var slice2 = try fixed_buffer_allocator.allocator.realloc(slice0, 4);
|
||||
testing.expect(slice0.ptr != slice2.ptr);
|
||||
testing.expect(slice1.ptr != slice2.ptr);
|
||||
testing.expect(slice2[0] == 1);
|
||||
@ -523,7 +537,7 @@ fn testAllocator(allocator: *mem.Allocator) !void {
|
||||
item.*.* = @intCast(i32, i);
|
||||
}
|
||||
|
||||
slice = try allocator.realloc(*i32, slice, 20000);
|
||||
slice = try allocator.realloc(slice, 20000);
|
||||
testing.expect(slice.len == 20000);
|
||||
|
||||
for (slice[0..100]) |item, i| {
|
||||
@ -531,13 +545,13 @@ fn testAllocator(allocator: *mem.Allocator) !void {
|
||||
allocator.destroy(item);
|
||||
}
|
||||
|
||||
slice = try allocator.realloc(*i32, slice, 50);
|
||||
slice = allocator.shrink(slice, 50);
|
||||
testing.expect(slice.len == 50);
|
||||
slice = try allocator.realloc(*i32, slice, 25);
|
||||
slice = allocator.shrink(slice, 25);
|
||||
testing.expect(slice.len == 25);
|
||||
slice = try allocator.realloc(*i32, slice, 0);
|
||||
slice = allocator.shrink(slice, 0);
|
||||
testing.expect(slice.len == 0);
|
||||
slice = try allocator.realloc(*i32, slice, 10);
|
||||
slice = try allocator.realloc(slice, 10);
|
||||
testing.expect(slice.len == 10);
|
||||
|
||||
allocator.free(slice);
|
||||
@ -548,22 +562,22 @@ fn testAllocatorAligned(allocator: *mem.Allocator, comptime alignment: u29) !voi
|
||||
var slice = try allocator.alignedAlloc(u8, alignment, 10);
|
||||
testing.expect(slice.len == 10);
|
||||
// grow
|
||||
slice = try allocator.alignedRealloc(u8, alignment, slice, 100);
|
||||
slice = try allocator.realloc(slice, 100);
|
||||
testing.expect(slice.len == 100);
|
||||
// shrink
|
||||
slice = try allocator.alignedRealloc(u8, alignment, slice, 10);
|
||||
slice = allocator.shrink(slice, 10);
|
||||
testing.expect(slice.len == 10);
|
||||
// go to zero
|
||||
slice = try allocator.alignedRealloc(u8, alignment, slice, 0);
|
||||
slice = allocator.shrink(slice, 0);
|
||||
testing.expect(slice.len == 0);
|
||||
// realloc from zero
|
||||
slice = try allocator.alignedRealloc(u8, alignment, slice, 100);
|
||||
slice = try allocator.realloc(slice, 100);
|
||||
testing.expect(slice.len == 100);
|
||||
// shrink with shrink
|
||||
slice = allocator.alignedShrink(u8, alignment, slice, 10);
|
||||
slice = allocator.shrink(slice, 10);
|
||||
testing.expect(slice.len == 10);
|
||||
// shrink to zero
|
||||
slice = allocator.alignedShrink(u8, alignment, slice, 0);
|
||||
slice = allocator.shrink(slice, 0);
|
||||
testing.expect(slice.len == 0);
|
||||
}
|
||||
|
||||
@ -578,19 +592,19 @@ fn testAllocatorLargeAlignment(allocator: *mem.Allocator) mem.Allocator.Error!vo
|
||||
var align_mask: usize = undefined;
|
||||
_ = @shlWithOverflow(usize, ~usize(0), USizeShift(@ctz(large_align)), &align_mask);
|
||||
|
||||
var slice = try allocator.allocFn(allocator, 500, large_align);
|
||||
var slice = try allocator.alignedAlloc(u8, large_align, 500);
|
||||
testing.expect(@ptrToInt(slice.ptr) & align_mask == @ptrToInt(slice.ptr));
|
||||
|
||||
slice = try allocator.reallocFn(allocator, slice, 100, large_align);
|
||||
slice = allocator.shrink(slice, 100);
|
||||
testing.expect(@ptrToInt(slice.ptr) & align_mask == @ptrToInt(slice.ptr));
|
||||
|
||||
slice = try allocator.reallocFn(allocator, slice, 5000, large_align);
|
||||
slice = try allocator.realloc(slice, 5000);
|
||||
testing.expect(@ptrToInt(slice.ptr) & align_mask == @ptrToInt(slice.ptr));
|
||||
|
||||
slice = try allocator.reallocFn(allocator, slice, 10, large_align);
|
||||
slice = allocator.shrink(slice, 10);
|
||||
testing.expect(@ptrToInt(slice.ptr) & align_mask == @ptrToInt(slice.ptr));
|
||||
|
||||
slice = try allocator.reallocFn(allocator, slice, 20000, large_align);
|
||||
slice = try allocator.realloc(slice, 20000);
|
||||
testing.expect(@ptrToInt(slice.ptr) & align_mask == @ptrToInt(slice.ptr));
|
||||
|
||||
allocator.free(slice);
|
||||
|
@ -60,7 +60,7 @@ pub const Int = struct {
|
||||
return;
|
||||
}
|
||||
|
||||
self.limbs = try self.allocator.realloc(Limb, self.limbs, capacity);
|
||||
self.limbs = try self.allocator.realloc(self.limbs, capacity);
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Int) void {
|
||||
|
176
std/mem.zig
176
std/mem.zig
@ -11,31 +11,64 @@ const testing = std.testing;
|
||||
pub const Allocator = struct {
|
||||
pub const Error = error{OutOfMemory};
|
||||
|
||||
/// Allocate byte_count bytes and return them in a slice, with the
|
||||
/// slice's pointer aligned at least to alignment bytes.
|
||||
/// The returned newly allocated memory is undefined.
|
||||
/// `alignment` is guaranteed to be >= 1
|
||||
/// `alignment` is guaranteed to be a power of 2
|
||||
allocFn: fn (self: *Allocator, byte_count: usize, alignment: u29) Error![]u8,
|
||||
|
||||
/// If `new_byte_count > old_mem.len`:
|
||||
/// * `old_mem.len` is the same as what was returned from allocFn or reallocFn.
|
||||
/// * alignment >= alignment of old_mem.ptr
|
||||
///
|
||||
/// If `new_byte_count <= old_mem.len`:
|
||||
/// * this function must return successfully.
|
||||
/// * alignment <= alignment of old_mem.ptr
|
||||
///
|
||||
/// Realloc is used to modify the size or alignment of an existing allocation,
|
||||
/// as well as to provide the allocator with an opportunity to move an allocation
|
||||
/// to a better location.
|
||||
/// When the size/alignment is greater than the previous allocation, this function
|
||||
/// returns `error.OutOfMemory` when the requested new allocation could not be granted.
|
||||
/// When the size/alignment is less than or equal to the previous allocation,
|
||||
/// this function returns `error.OutOfMemory` when the allocator decides the client
|
||||
/// would be better off keeping the extra alignment/size. Clients will call
|
||||
/// `shrinkFn` when they require the allocator to track a new alignment/size,
|
||||
/// and so this function should only return success when the allocator considers
|
||||
/// the reallocation desirable from the allocator's perspective.
|
||||
/// As an example, `std.ArrayList` tracks a "capacity", and therefore can handle
|
||||
/// reallocation failure, even when `new_n` <= `old_mem.len`. A `FixedBufferAllocator`
|
||||
/// would always return `error.OutOfMemory` for `reallocFn` when the size/alignment
|
||||
/// is less than or equal to the old allocation, because it cannot reclaim the memory,
|
||||
/// and thus the `std.ArrayList` would be better off retaining its capacity.
|
||||
/// When `reallocFn` returns,
|
||||
/// `return_value[0..min(old_mem.len, new_byte_count)]` must be the same
|
||||
/// as `old_mem` was when `reallocFn` is called. The bytes of
|
||||
/// `return_value[old_mem.len..]` have undefined values.
|
||||
/// `alignment` is guaranteed to be >= 1
|
||||
/// `alignment` is guaranteed to be a power of 2
|
||||
reallocFn: fn (self: *Allocator, old_mem: []u8, new_byte_count: usize, alignment: u29) Error![]u8,
|
||||
/// The returned slice must have its pointer aligned at least to `new_alignment` bytes.
|
||||
reallocFn: fn (
|
||||
self: *Allocator,
|
||||
// Guaranteed to be the same as what was returned from most recent call to
|
||||
// `reallocFn` or `shrinkFn`.
|
||||
// If `old_mem.len == 0` then this is a new allocation and `new_byte_count`
|
||||
// is guaranteed to be >= 1.
|
||||
old_mem: []u8,
|
||||
// If `old_mem.len == 0` then this is `undefined`, otherwise:
|
||||
// Guaranteed to be the same as what was returned from most recent call to
|
||||
// `reallocFn` or `shrinkFn`.
|
||||
// Guaranteed to be >= 1.
|
||||
// Guaranteed to be a power of 2.
|
||||
old_alignment: u29,
|
||||
// If `new_byte_count` is 0 then this is a free and it is guaranteed that
|
||||
// `old_mem.len != 0`.
|
||||
new_byte_count: usize,
|
||||
// Guaranteed to be >= 1.
|
||||
// Guaranteed to be a power of 2.
|
||||
// Returned slice's pointer must have this alignment.
|
||||
new_alignment: u29,
|
||||
) Error![]u8,
|
||||
|
||||
/// Guaranteed: `old_mem.len` is the same as what was returned from `allocFn` or `reallocFn`
|
||||
freeFn: fn (self: *Allocator, old_mem: []u8) void,
|
||||
/// This function deallocates memory. It must succeed.
|
||||
shrinkFn: fn (
|
||||
self: *Allocator,
|
||||
// Guaranteed to be the same as what was returned from most recent call to
|
||||
// `reallocFn` or `shrinkFn`.
|
||||
old_mem: []u8,
|
||||
// Guaranteed to be the same as what was returned from most recent call to
|
||||
// `reallocFn` or `shrinkFn`.
|
||||
old_alignment: u29,
|
||||
// Guaranteed to be less than or equal to `old_mem.len`.
|
||||
new_byte_count: usize,
|
||||
// If `new_byte_count == 0` then this is `undefined`, otherwise:
|
||||
// Guaranteed to be less than or equal to `old_alignment`.
|
||||
new_alignment: u29,
|
||||
) []u8,
|
||||
|
||||
/// Call `destroy` with the result.
|
||||
/// Returns undefined memory.
|
||||
@ -47,20 +80,29 @@ pub const Allocator = struct {
|
||||
|
||||
/// `ptr` should be the return value of `create`
|
||||
pub fn destroy(self: *Allocator, ptr: var) void {
|
||||
const T = @typeOf(ptr).Child;
|
||||
if (@sizeOf(T) == 0) return;
|
||||
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(ptr));
|
||||
self.freeFn(self, non_const_ptr[0..@sizeOf(@typeOf(ptr).Child)]);
|
||||
const shrink_result = self.shrinkFn(self, non_const_ptr[0..@sizeOf(T)], @alignOf(T), 0, 1);
|
||||
assert(shrink_result.len == 0);
|
||||
}
|
||||
|
||||
pub fn alloc(self: *Allocator, comptime T: type, n: usize) ![]T {
|
||||
return self.alignedAlloc(T, @alignOf(T), n);
|
||||
}
|
||||
|
||||
pub fn alignedAlloc(self: *Allocator, comptime T: type, comptime alignment: u29, n: usize) ![]align(alignment) T {
|
||||
pub fn alignedAlloc(
|
||||
self: *Allocator,
|
||||
comptime T: type,
|
||||
comptime alignment: u29,
|
||||
n: usize,
|
||||
) ![]align(alignment) T {
|
||||
if (n == 0) {
|
||||
return ([*]align(alignment) T)(undefined)[0..0];
|
||||
}
|
||||
|
||||
const byte_count = math.mul(usize, @sizeOf(T), n) catch return Error.OutOfMemory;
|
||||
const byte_slice = try self.allocFn(self, byte_count, alignment);
|
||||
const byte_slice = try self.reallocFn(self, ([*]u8)(undefined)[0..0], undefined, byte_count, alignment);
|
||||
assert(byte_slice.len == byte_count);
|
||||
// This loop gets optimized out in ReleaseFast mode
|
||||
for (byte_slice) |*byte| {
|
||||
@ -69,62 +111,106 @@ pub const Allocator = struct {
|
||||
return @bytesToSlice(T, @alignCast(alignment, byte_slice));
|
||||
}
|
||||
|
||||
pub fn realloc(self: *Allocator, comptime T: type, old_mem: []T, n: usize) ![]T {
|
||||
return self.alignedRealloc(T, @alignOf(T), @alignCast(@alignOf(T), old_mem), n);
|
||||
/// This function requests a new byte size for an existing allocation,
|
||||
/// which can be larger, smaller, or the same size as the old memory
|
||||
/// allocation.
|
||||
/// This function is preferred over `shrink`, because it can fail, even
|
||||
/// when shrinking. This gives the allocator a chance to perform a
|
||||
/// cheap shrink operation if possible, or otherwise return OutOfMemory,
|
||||
/// indicating that the caller should keep their capacity, for example
|
||||
/// in `std.ArrayList.shrink`.
|
||||
/// If you need guaranteed success, call `shrink`.
|
||||
/// If `new_n` is 0, this is the same as `free` and it always succeeds.
|
||||
pub fn realloc(self: *Allocator, old_mem: var, new_n: usize) t: {
|
||||
const Slice = @typeInfo(@typeOf(old_mem)).Pointer;
|
||||
break :t Error![]align(Slice.alignment) Slice.child;
|
||||
} {
|
||||
const old_alignment = @typeInfo(@typeOf(old_mem)).Pointer.alignment;
|
||||
return self.alignedRealloc(old_mem, old_alignment, new_n);
|
||||
}
|
||||
|
||||
pub fn alignedRealloc(self: *Allocator, comptime T: type, comptime alignment: u29, old_mem: []align(alignment) T, n: usize) ![]align(alignment) T {
|
||||
/// This is the same as `realloc`, except caller may additionally request
|
||||
/// a new alignment, which can be larger, smaller, or the same as the old
|
||||
/// allocation.
|
||||
pub fn alignedRealloc(
|
||||
self: *Allocator,
|
||||
old_mem: var,
|
||||
comptime new_alignment: u29,
|
||||
new_n: usize,
|
||||
) Error![]align(new_alignment) @typeInfo(@typeOf(old_mem)).Pointer.child {
|
||||
const Slice = @typeInfo(@typeOf(old_mem)).Pointer;
|
||||
const T = Slice.child;
|
||||
if (old_mem.len == 0) {
|
||||
return self.alignedAlloc(T, alignment, n);
|
||||
return self.alignedAlloc(T, new_alignment, new_n);
|
||||
}
|
||||
if (n == 0) {
|
||||
if (new_n == 0) {
|
||||
self.free(old_mem);
|
||||
return ([*]align(alignment) T)(undefined)[0..0];
|
||||
return ([*]align(new_alignment) T)(undefined)[0..0];
|
||||
}
|
||||
|
||||
const old_byte_slice = @sliceToBytes(old_mem);
|
||||
const byte_count = math.mul(usize, @sizeOf(T), n) catch return Error.OutOfMemory;
|
||||
const byte_slice = try self.reallocFn(self, old_byte_slice, byte_count, alignment);
|
||||
const byte_count = math.mul(usize, @sizeOf(T), new_n) catch return Error.OutOfMemory;
|
||||
const byte_slice = try self.reallocFn(self, old_byte_slice, Slice.alignment, byte_count, new_alignment);
|
||||
assert(byte_slice.len == byte_count);
|
||||
if (n > old_mem.len) {
|
||||
if (new_n > old_mem.len) {
|
||||
// This loop gets optimized out in ReleaseFast mode
|
||||
for (byte_slice[old_byte_slice.len..]) |*byte| {
|
||||
byte.* = undefined;
|
||||
}
|
||||
}
|
||||
return @bytesToSlice(T, @alignCast(alignment, byte_slice));
|
||||
return @bytesToSlice(T, @alignCast(new_alignment, byte_slice));
|
||||
}
|
||||
|
||||
/// Reallocate, but `n` must be less than or equal to `old_mem.len`.
|
||||
/// Unlike `realloc`, this function cannot fail.
|
||||
/// Prefer calling realloc to shrink if you can tolerate failure, such as
|
||||
/// in an ArrayList data structure with a storage capacity.
|
||||
/// Shrink always succeeds, and `new_n` must be <= `old_mem.len`.
|
||||
/// Returned slice has same alignment as old_mem.
|
||||
/// Shrinking to 0 is the same as calling `free`.
|
||||
pub fn shrink(self: *Allocator, comptime T: type, old_mem: []T, n: usize) []T {
|
||||
return self.alignedShrink(T, @alignOf(T), @alignCast(@alignOf(T), old_mem), n);
|
||||
pub fn shrink(self: *Allocator, old_mem: var, new_n: usize) t: {
|
||||
const Slice = @typeInfo(@typeOf(old_mem)).Pointer;
|
||||
break :t []align(Slice.alignment) Slice.child;
|
||||
} {
|
||||
const old_alignment = @typeInfo(@typeOf(old_mem)).Pointer.alignment;
|
||||
return self.alignedShrink(old_mem, old_alignment, new_n);
|
||||
}
|
||||
|
||||
pub fn alignedShrink(self: *Allocator, comptime T: type, comptime alignment: u29, old_mem: []align(alignment) T, n: usize) []align(alignment) T {
|
||||
if (n == 0) {
|
||||
/// This is the same as `shrink`, except caller may additionally request
|
||||
/// a new alignment, which must be smaller or the same as the old
|
||||
/// allocation.
|
||||
pub fn alignedShrink(
|
||||
self: *Allocator,
|
||||
old_mem: var,
|
||||
comptime new_alignment: u29,
|
||||
new_n: usize,
|
||||
) []align(new_alignment) @typeInfo(@typeOf(old_mem)).Pointer.child {
|
||||
const Slice = @typeInfo(@typeOf(old_mem)).Pointer;
|
||||
const T = Slice.child;
|
||||
|
||||
if (new_n == 0) {
|
||||
self.free(old_mem);
|
||||
return old_mem[0..0];
|
||||
}
|
||||
|
||||
assert(n <= old_mem.len);
|
||||
assert(new_n <= old_mem.len);
|
||||
assert(new_alignment <= Slice.alignment);
|
||||
|
||||
// Here we skip the overflow checking on the multiplication because
|
||||
// n <= old_mem.len and the multiplication didn't overflow for that operation.
|
||||
const byte_count = @sizeOf(T) * n;
|
||||
// new_n <= old_mem.len and the multiplication didn't overflow for that operation.
|
||||
const byte_count = @sizeOf(T) * new_n;
|
||||
|
||||
const old_byte_slice = @sliceToBytes(old_mem);
|
||||
const byte_slice = self.reallocFn(self, old_byte_slice, byte_count, alignment) catch unreachable;
|
||||
const byte_slice = self.shrinkFn(self, old_byte_slice, Slice.alignment, byte_count, new_alignment);
|
||||
assert(byte_slice.len == byte_count);
|
||||
return @bytesToSlice(T, @alignCast(alignment, byte_slice));
|
||||
return @bytesToSlice(T, @alignCast(new_alignment, byte_slice));
|
||||
}
|
||||
|
||||
pub fn free(self: *Allocator, memory: var) void {
|
||||
const Slice = @typeInfo(@typeOf(memory)).Pointer;
|
||||
const bytes = @sliceToBytes(memory);
|
||||
if (bytes.len == 0) return;
|
||||
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(bytes.ptr));
|
||||
self.freeFn(self, non_const_ptr[0..bytes.len]);
|
||||
const shrink_result = self.shrinkFn(self, non_const_ptr[0..bytes.len], Slice.alignment, 0, 1);
|
||||
assert(shrink_result.len == 0);
|
||||
}
|
||||
};
|
||||
|
||||
|
10
std/os.zig
10
std/os.zig
@ -814,7 +814,7 @@ pub fn getEnvVarOwned(allocator: *mem.Allocator, key: []const u8) GetEnvVarOwned
|
||||
}
|
||||
|
||||
if (result > buf.len) {
|
||||
buf = try allocator.realloc(u16, buf, result);
|
||||
buf = try allocator.realloc(buf, result);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -1648,7 +1648,7 @@ pub const Dir = struct {
|
||||
switch (err) {
|
||||
posix.EBADF, posix.EFAULT, posix.ENOTDIR => unreachable,
|
||||
posix.EINVAL => {
|
||||
self.handle.buf = try self.allocator.realloc(u8, self.handle.buf, self.handle.buf.len * 2);
|
||||
self.handle.buf = try self.allocator.realloc(self.handle.buf, self.handle.buf.len * 2);
|
||||
continue;
|
||||
},
|
||||
else => return unexpectedErrorPosix(err),
|
||||
@ -1730,7 +1730,7 @@ pub const Dir = struct {
|
||||
switch (err) {
|
||||
posix.EBADF, posix.EFAULT, posix.ENOTDIR => unreachable,
|
||||
posix.EINVAL => {
|
||||
self.handle.buf = try self.allocator.realloc(u8, self.handle.buf, self.handle.buf.len * 2);
|
||||
self.handle.buf = try self.allocator.realloc(self.handle.buf, self.handle.buf.len * 2);
|
||||
continue;
|
||||
},
|
||||
else => return unexpectedErrorPosix(err),
|
||||
@ -1784,7 +1784,7 @@ pub const Dir = struct {
|
||||
switch (err) {
|
||||
posix.EBADF, posix.EFAULT, posix.ENOTDIR => unreachable,
|
||||
posix.EINVAL => {
|
||||
self.handle.buf = try self.allocator.realloc(u8, self.handle.buf, self.handle.buf.len * 2);
|
||||
self.handle.buf = try self.allocator.realloc(self.handle.buf, self.handle.buf.len * 2);
|
||||
continue;
|
||||
},
|
||||
else => return unexpectedErrorPosix(err),
|
||||
@ -3279,7 +3279,7 @@ pub fn cpuCount(fallback_allocator: *mem.Allocator) CpuCountError!usize {
|
||||
}
|
||||
return sum;
|
||||
} else {
|
||||
set = try allocator.realloc(usize, set, set.len * 2);
|
||||
set = try allocator.realloc(set, set.len * 2);
|
||||
continue;
|
||||
}
|
||||
},
|
||||
|
@ -565,7 +565,7 @@ pub fn resolveWindows(allocator: *Allocator, paths: []const []const u8) ![]u8 {
|
||||
result_index += 1;
|
||||
}
|
||||
|
||||
return allocator.shrink(u8, result, result_index);
|
||||
return allocator.shrink(result, result_index);
|
||||
}
|
||||
|
||||
/// This function is like a series of `cd` statements executed one after another.
|
||||
@ -634,7 +634,7 @@ pub fn resolvePosix(allocator: *Allocator, paths: []const []const u8) ![]u8 {
|
||||
result_index += 1;
|
||||
}
|
||||
|
||||
return allocator.shrink(u8, result, result_index);
|
||||
return allocator.shrink(result, result_index);
|
||||
}
|
||||
|
||||
test "os.path.resolve" {
|
||||
|
@ -141,7 +141,7 @@ pub fn PriorityQueue(comptime T: type) type {
|
||||
better_capacity += better_capacity / 2 + 8;
|
||||
if (better_capacity >= new_capacity) break;
|
||||
}
|
||||
self.items = try self.allocator.realloc(T, self.items, better_capacity);
|
||||
self.items = try self.allocator.realloc(self.items, better_capacity);
|
||||
}
|
||||
|
||||
pub fn resize(self: *Self, new_len: usize) !void {
|
||||
@ -150,6 +150,7 @@ pub fn PriorityQueue(comptime T: type) type {
|
||||
}
|
||||
|
||||
pub fn shrink(self: *Self, new_len: usize) void {
|
||||
// TODO take advantage of the new realloc semantics
|
||||
assert(new_len <= self.len);
|
||||
self.len = new_len;
|
||||
}
|
||||
|
@ -169,11 +169,11 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
|
||||
const new_cap_shelf_count = shelfCount(new_capacity);
|
||||
const old_shelf_count = @intCast(ShelfIndex, self.dynamic_segments.len);
|
||||
if (new_cap_shelf_count > old_shelf_count) {
|
||||
self.dynamic_segments = try self.allocator.realloc([*]T, self.dynamic_segments, new_cap_shelf_count);
|
||||
self.dynamic_segments = try self.allocator.realloc(self.dynamic_segments, new_cap_shelf_count);
|
||||
var i = old_shelf_count;
|
||||
errdefer {
|
||||
self.freeShelves(i, old_shelf_count);
|
||||
self.dynamic_segments = self.allocator.shrink([*]T, self.dynamic_segments, old_shelf_count);
|
||||
self.dynamic_segments = self.allocator.shrink(self.dynamic_segments, old_shelf_count);
|
||||
}
|
||||
while (i < new_cap_shelf_count) : (i += 1) {
|
||||
self.dynamic_segments[i] = (try self.allocator.alloc(T, shelfSize(i))).ptr;
|
||||
@ -199,11 +199,12 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
|
||||
}
|
||||
|
||||
self.freeShelves(old_shelf_count, new_cap_shelf_count);
|
||||
self.dynamic_segments = self.allocator.shrink([*]T, self.dynamic_segments, new_cap_shelf_count);
|
||||
self.dynamic_segments = self.allocator.shrink(self.dynamic_segments, new_cap_shelf_count);
|
||||
}
|
||||
|
||||
pub fn shrink(self: *Self, new_len: usize) void {
|
||||
assert(new_len <= self.len);
|
||||
// TODO take advantage of the new realloc semantics
|
||||
self.len = new_len;
|
||||
}
|
||||
|
||||
|
@ -316,11 +316,13 @@ pub const CompareOutputContext = struct {
|
||||
Term.Exited => |code| {
|
||||
if (code != 0) {
|
||||
warn("Process {} exited with error code {}\n", full_exe_path, code);
|
||||
printInvocation(args.toSliceConst());
|
||||
return error.TestFailed;
|
||||
}
|
||||
},
|
||||
else => {
|
||||
warn("Process {} terminated unexpectedly\n", full_exe_path);
|
||||
printInvocation(args.toSliceConst());
|
||||
return error.TestFailed;
|
||||
},
|
||||
}
|
||||
@ -681,11 +683,13 @@ pub const CompileErrorContext = struct {
|
||||
switch (term) {
|
||||
Term.Exited => |code| {
|
||||
if (code == 0) {
|
||||
printInvocation(zig_args.toSliceConst());
|
||||
return error.CompilationIncorrectlySucceeded;
|
||||
}
|
||||
},
|
||||
else => {
|
||||
warn("Process {} terminated unexpectedly\n", b.zig_exe);
|
||||
printInvocation(zig_args.toSliceConst());
|
||||
return error.TestFailed;
|
||||
},
|
||||
}
|
||||
@ -752,13 +756,6 @@ pub const CompileErrorContext = struct {
|
||||
}
|
||||
};
|
||||
|
||||
fn printInvocation(args: []const []const u8) void {
|
||||
for (args) |arg| {
|
||||
warn("{} ", arg);
|
||||
}
|
||||
warn("\n");
|
||||
}
|
||||
|
||||
pub fn create(self: *CompileErrorContext, name: []const u8, source: []const u8, expected_lines: ...) *TestCase {
|
||||
const tc = self.b.allocator.create(TestCase) catch unreachable;
|
||||
tc.* = TestCase{
|
||||
@ -1240,3 +1237,10 @@ pub const GenHContext = struct {
|
||||
self.step.dependOn(&cmp_h.step);
|
||||
}
|
||||
};
|
||||
|
||||
fn printInvocation(args: []const []const u8) void {
|
||||
for (args) |arg| {
|
||||
warn("{} ", arg);
|
||||
}
|
||||
warn("\n");
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user