mirror of
https://github.com/ziglang/zig.git
synced 2024-11-26 23:22:44 +00:00
Merge pull request #21063 from mlugg/incremental
Incremental compilation progress
This commit is contained in:
commit
4929cf23ce
@ -72,6 +72,11 @@ pub fn BoundedArrayAligned(
|
||||
self.len = @intCast(len);
|
||||
}
|
||||
|
||||
/// Remove all elements from the slice.
|
||||
pub fn clear(self: *Self) void {
|
||||
self.len = 0;
|
||||
}
|
||||
|
||||
/// Copy the content of an existing slice.
|
||||
pub fn fromSlice(m: []const T) error{Overflow}!Self {
|
||||
var list = try init(m.len);
|
||||
|
@ -603,7 +603,7 @@ pub const Inst = struct {
|
||||
/// Uses the `un_node` field.
|
||||
typeof,
|
||||
/// Implements `@TypeOf` for one operand.
|
||||
/// Uses the `pl_node` field.
|
||||
/// Uses the `pl_node` field. Payload is `Block`.
|
||||
typeof_builtin,
|
||||
/// Given a value, look at the type of it, which must be an integer type.
|
||||
/// Returns the integer type for the RHS of a shift operation.
|
||||
@ -2727,6 +2727,9 @@ pub const Inst = struct {
|
||||
field_name_start: NullTerminatedString,
|
||||
};
|
||||
|
||||
/// There is a body of instructions at `extra[body_index..][0..body_len]`.
|
||||
/// Trailing:
|
||||
/// 0. operand: Ref // for each `operands_len`
|
||||
pub const TypeOfPeer = struct {
|
||||
src_node: i32,
|
||||
body_len: u32,
|
||||
@ -2844,6 +2847,40 @@ pub const Inst = struct {
|
||||
src_line: u32,
|
||||
};
|
||||
|
||||
/// Trailing:
|
||||
/// 0. multi_cases_len: u32 // if `has_multi_cases`
|
||||
/// 1. err_capture_inst: u32 // if `any_uses_err_capture`
|
||||
/// 2. non_err_body {
|
||||
/// info: ProngInfo,
|
||||
/// inst: Index // for every `info.body_len`
|
||||
/// }
|
||||
/// 3. else_body { // if `has_else`
|
||||
/// info: ProngInfo,
|
||||
/// inst: Index // for every `info.body_len`
|
||||
/// }
|
||||
/// 4. scalar_cases: { // for every `scalar_cases_len`
|
||||
/// item: Ref,
|
||||
/// info: ProngInfo,
|
||||
/// inst: Index // for every `info.body_len`
|
||||
/// }
|
||||
/// 5. multi_cases: { // for every `multi_cases_len`
|
||||
/// items_len: u32,
|
||||
/// ranges_len: u32,
|
||||
/// info: ProngInfo,
|
||||
/// item: Ref // for every `items_len`
|
||||
/// ranges: { // for every `ranges_len`
|
||||
/// item_first: Ref,
|
||||
/// item_last: Ref,
|
||||
/// }
|
||||
/// inst: Index // for every `info.body_len`
|
||||
/// }
|
||||
///
|
||||
/// When analyzing a case body, the switch instruction itself refers to the
|
||||
/// captured error, or to the success value in `non_err_body`. Whether this
|
||||
/// is captured by reference or by value depends on whether the `byref` bit
|
||||
/// is set for the corresponding body. `err_capture_inst` refers to the error
|
||||
/// capture outside of the `switch`, i.e. `err` in
|
||||
/// `x catch |err| switch (err) { ... }`.
|
||||
pub const SwitchBlockErrUnion = struct {
|
||||
operand: Ref,
|
||||
bits: Bits,
|
||||
@ -3153,7 +3190,7 @@ pub const Inst = struct {
|
||||
/// 1. captures_len: u32 // if has_captures_len
|
||||
/// 2. body_len: u32, // if has_body_len
|
||||
/// 3. fields_len: u32, // if has_fields_len
|
||||
/// 4. decls_len: u37, // if has_decls_len
|
||||
/// 4. decls_len: u32, // if has_decls_len
|
||||
/// 5. capture: Capture // for every captures_len
|
||||
/// 6. decl: Index, // for every decls_len; points to a `declaration` instruction
|
||||
/// 7. inst: Index // for every body_len
|
||||
@ -3624,33 +3661,492 @@ pub fn declIterator(zir: Zir, decl_inst: Zir.Inst.Index) DeclIterator {
|
||||
}
|
||||
}
|
||||
|
||||
/// The iterator would have to allocate memory anyway to iterate. So here we populate
|
||||
/// an ArrayList as the result.
|
||||
pub fn findDecls(zir: Zir, list: *std.ArrayList(Inst.Index), decl_inst: Zir.Inst.Index) !void {
|
||||
/// Find all type declarations, recursively, within a `declaration` instruction. Does not recurse through
|
||||
/// said type declarations' declarations; to find all declarations, call this function on the declarations
|
||||
/// of the discovered types recursively.
|
||||
/// The iterator would have to allocate memory anyway to iterate, so an `ArrayList` is populated as the result.
|
||||
pub fn findDecls(zir: Zir, gpa: Allocator, list: *std.ArrayListUnmanaged(Inst.Index), decl_inst: Zir.Inst.Index) !void {
|
||||
list.clearRetainingCapacity();
|
||||
const declaration, const extra_end = zir.getDeclaration(decl_inst);
|
||||
const bodies = declaration.getBodies(extra_end, zir);
|
||||
|
||||
try zir.findDeclsBody(list, bodies.value_body);
|
||||
if (bodies.align_body) |b| try zir.findDeclsBody(list, b);
|
||||
if (bodies.linksection_body) |b| try zir.findDeclsBody(list, b);
|
||||
if (bodies.addrspace_body) |b| try zir.findDeclsBody(list, b);
|
||||
// `defer` instructions duplicate the same body arbitrarily many times, but we only want to traverse
|
||||
// their contents once per defer. So, we store the extra index of the body here to deduplicate.
|
||||
var found_defers: std.AutoHashMapUnmanaged(u32, void) = .{};
|
||||
defer found_defers.deinit(gpa);
|
||||
|
||||
try zir.findDeclsBody(gpa, list, &found_defers, bodies.value_body);
|
||||
if (bodies.align_body) |b| try zir.findDeclsBody(gpa, list, &found_defers, b);
|
||||
if (bodies.linksection_body) |b| try zir.findDeclsBody(gpa, list, &found_defers, b);
|
||||
if (bodies.addrspace_body) |b| try zir.findDeclsBody(gpa, list, &found_defers, b);
|
||||
}
|
||||
|
||||
fn findDeclsInner(
|
||||
zir: Zir,
|
||||
list: *std.ArrayList(Inst.Index),
|
||||
gpa: Allocator,
|
||||
list: *std.ArrayListUnmanaged(Inst.Index),
|
||||
defers: *std.AutoHashMapUnmanaged(u32, void),
|
||||
inst: Inst.Index,
|
||||
) Allocator.Error!void {
|
||||
const tags = zir.instructions.items(.tag);
|
||||
const datas = zir.instructions.items(.data);
|
||||
|
||||
switch (tags[@intFromEnum(inst)]) {
|
||||
.declaration => unreachable,
|
||||
|
||||
// Boring instruction tags first. These have no body and are not declarations or type declarations.
|
||||
.add,
|
||||
.addwrap,
|
||||
.add_sat,
|
||||
.add_unsafe,
|
||||
.sub,
|
||||
.subwrap,
|
||||
.sub_sat,
|
||||
.mul,
|
||||
.mulwrap,
|
||||
.mul_sat,
|
||||
.div_exact,
|
||||
.div_floor,
|
||||
.div_trunc,
|
||||
.mod,
|
||||
.rem,
|
||||
.mod_rem,
|
||||
.shl,
|
||||
.shl_exact,
|
||||
.shl_sat,
|
||||
.shr,
|
||||
.shr_exact,
|
||||
.param_anytype,
|
||||
.param_anytype_comptime,
|
||||
.array_cat,
|
||||
.array_mul,
|
||||
.array_type,
|
||||
.array_type_sentinel,
|
||||
.vector_type,
|
||||
.elem_type,
|
||||
.indexable_ptr_elem_type,
|
||||
.vector_elem_type,
|
||||
.indexable_ptr_len,
|
||||
.anyframe_type,
|
||||
.as_node,
|
||||
.as_shift_operand,
|
||||
.bit_and,
|
||||
.bitcast,
|
||||
.bit_not,
|
||||
.bit_or,
|
||||
.bool_not,
|
||||
.bool_br_and,
|
||||
.bool_br_or,
|
||||
.@"break",
|
||||
.break_inline,
|
||||
.check_comptime_control_flow,
|
||||
.builtin_call,
|
||||
.cmp_lt,
|
||||
.cmp_lte,
|
||||
.cmp_eq,
|
||||
.cmp_gte,
|
||||
.cmp_gt,
|
||||
.cmp_neq,
|
||||
.error_set_decl,
|
||||
.dbg_stmt,
|
||||
.dbg_var_ptr,
|
||||
.dbg_var_val,
|
||||
.decl_ref,
|
||||
.decl_val,
|
||||
.load,
|
||||
.div,
|
||||
.elem_ptr_node,
|
||||
.elem_ptr,
|
||||
.elem_val_node,
|
||||
.elem_val,
|
||||
.elem_val_imm,
|
||||
.ensure_result_used,
|
||||
.ensure_result_non_error,
|
||||
.ensure_err_union_payload_void,
|
||||
.error_union_type,
|
||||
.error_value,
|
||||
.@"export",
|
||||
.export_value,
|
||||
.field_ptr,
|
||||
.field_val,
|
||||
.field_ptr_named,
|
||||
.field_val_named,
|
||||
.import,
|
||||
.int,
|
||||
.int_big,
|
||||
.float,
|
||||
.float128,
|
||||
.int_type,
|
||||
.is_non_null,
|
||||
.is_non_null_ptr,
|
||||
.is_non_err,
|
||||
.is_non_err_ptr,
|
||||
.ret_is_non_err,
|
||||
.repeat,
|
||||
.repeat_inline,
|
||||
.for_len,
|
||||
.merge_error_sets,
|
||||
.ref,
|
||||
.ret_node,
|
||||
.ret_load,
|
||||
.ret_implicit,
|
||||
.ret_err_value,
|
||||
.ret_err_value_code,
|
||||
.ret_ptr,
|
||||
.ret_type,
|
||||
.ptr_type,
|
||||
.slice_start,
|
||||
.slice_end,
|
||||
.slice_sentinel,
|
||||
.slice_length,
|
||||
.store_node,
|
||||
.store_to_inferred_ptr,
|
||||
.str,
|
||||
.negate,
|
||||
.negate_wrap,
|
||||
.typeof,
|
||||
.typeof_log2_int_type,
|
||||
.@"unreachable",
|
||||
.xor,
|
||||
.optional_type,
|
||||
.optional_payload_safe,
|
||||
.optional_payload_unsafe,
|
||||
.optional_payload_safe_ptr,
|
||||
.optional_payload_unsafe_ptr,
|
||||
.err_union_payload_unsafe,
|
||||
.err_union_payload_unsafe_ptr,
|
||||
.err_union_code,
|
||||
.err_union_code_ptr,
|
||||
.enum_literal,
|
||||
.validate_deref,
|
||||
.validate_destructure,
|
||||
.field_type_ref,
|
||||
.opt_eu_base_ptr_init,
|
||||
.coerce_ptr_elem_ty,
|
||||
.validate_ref_ty,
|
||||
.struct_init_empty,
|
||||
.struct_init_empty_result,
|
||||
.struct_init_empty_ref_result,
|
||||
.struct_init_anon,
|
||||
.struct_init,
|
||||
.struct_init_ref,
|
||||
.validate_struct_init_ty,
|
||||
.validate_struct_init_result_ty,
|
||||
.validate_ptr_struct_init,
|
||||
.struct_init_field_type,
|
||||
.struct_init_field_ptr,
|
||||
.array_init_anon,
|
||||
.array_init,
|
||||
.array_init_ref,
|
||||
.validate_array_init_ty,
|
||||
.validate_array_init_result_ty,
|
||||
.validate_array_init_ref_ty,
|
||||
.validate_ptr_array_init,
|
||||
.array_init_elem_type,
|
||||
.array_init_elem_ptr,
|
||||
.union_init,
|
||||
.type_info,
|
||||
.size_of,
|
||||
.bit_size_of,
|
||||
.int_from_ptr,
|
||||
.compile_error,
|
||||
.set_eval_branch_quota,
|
||||
.int_from_enum,
|
||||
.align_of,
|
||||
.int_from_bool,
|
||||
.embed_file,
|
||||
.error_name,
|
||||
.panic,
|
||||
.trap,
|
||||
.set_runtime_safety,
|
||||
.sqrt,
|
||||
.sin,
|
||||
.cos,
|
||||
.tan,
|
||||
.exp,
|
||||
.exp2,
|
||||
.log,
|
||||
.log2,
|
||||
.log10,
|
||||
.abs,
|
||||
.floor,
|
||||
.ceil,
|
||||
.trunc,
|
||||
.round,
|
||||
.tag_name,
|
||||
.type_name,
|
||||
.frame_type,
|
||||
.frame_size,
|
||||
.int_from_float,
|
||||
.float_from_int,
|
||||
.ptr_from_int,
|
||||
.enum_from_int,
|
||||
.float_cast,
|
||||
.int_cast,
|
||||
.ptr_cast,
|
||||
.truncate,
|
||||
.has_decl,
|
||||
.has_field,
|
||||
.clz,
|
||||
.ctz,
|
||||
.pop_count,
|
||||
.byte_swap,
|
||||
.bit_reverse,
|
||||
.bit_offset_of,
|
||||
.offset_of,
|
||||
.splat,
|
||||
.reduce,
|
||||
.shuffle,
|
||||
.atomic_load,
|
||||
.atomic_rmw,
|
||||
.atomic_store,
|
||||
.mul_add,
|
||||
.memcpy,
|
||||
.memset,
|
||||
.min,
|
||||
.max,
|
||||
.alloc,
|
||||
.alloc_mut,
|
||||
.alloc_comptime_mut,
|
||||
.alloc_inferred,
|
||||
.alloc_inferred_mut,
|
||||
.alloc_inferred_comptime,
|
||||
.alloc_inferred_comptime_mut,
|
||||
.resolve_inferred_alloc,
|
||||
.make_ptr_const,
|
||||
.@"resume",
|
||||
.@"await",
|
||||
.save_err_ret_index,
|
||||
.restore_err_ret_index_unconditional,
|
||||
.restore_err_ret_index_fn_entry,
|
||||
=> return,
|
||||
|
||||
.extended => {
|
||||
const extended = datas[@intFromEnum(inst)].extended;
|
||||
switch (extended.opcode) {
|
||||
.value_placeholder => unreachable,
|
||||
|
||||
// Once again, we start with the boring tags.
|
||||
.variable,
|
||||
.this,
|
||||
.ret_addr,
|
||||
.builtin_src,
|
||||
.error_return_trace,
|
||||
.frame,
|
||||
.frame_address,
|
||||
.alloc,
|
||||
.builtin_extern,
|
||||
.@"asm",
|
||||
.asm_expr,
|
||||
.compile_log,
|
||||
.min_multi,
|
||||
.max_multi,
|
||||
.add_with_overflow,
|
||||
.sub_with_overflow,
|
||||
.mul_with_overflow,
|
||||
.shl_with_overflow,
|
||||
.c_undef,
|
||||
.c_include,
|
||||
.c_define,
|
||||
.wasm_memory_size,
|
||||
.wasm_memory_grow,
|
||||
.prefetch,
|
||||
.fence,
|
||||
.set_float_mode,
|
||||
.set_align_stack,
|
||||
.set_cold,
|
||||
.error_cast,
|
||||
.await_nosuspend,
|
||||
.breakpoint,
|
||||
.disable_instrumentation,
|
||||
.select,
|
||||
.int_from_error,
|
||||
.error_from_int,
|
||||
.builtin_async_call,
|
||||
.cmpxchg,
|
||||
.c_va_arg,
|
||||
.c_va_copy,
|
||||
.c_va_end,
|
||||
.c_va_start,
|
||||
.ptr_cast_full,
|
||||
.ptr_cast_no_dest,
|
||||
.work_item_id,
|
||||
.work_group_size,
|
||||
.work_group_id,
|
||||
.in_comptime,
|
||||
.restore_err_ret_index,
|
||||
.closure_get,
|
||||
.field_parent_ptr,
|
||||
=> return,
|
||||
|
||||
// `@TypeOf` has a body.
|
||||
.typeof_peer => {
|
||||
const extra = zir.extraData(Zir.Inst.TypeOfPeer, extended.operand);
|
||||
const body = zir.bodySlice(extra.data.body_index, extra.data.body_len);
|
||||
try zir.findDeclsBody(gpa, list, defers, body);
|
||||
},
|
||||
|
||||
// Reifications and opaque declarations need tracking, but have no body.
|
||||
.reify, .opaque_decl => return list.append(gpa, inst),
|
||||
|
||||
// Struct declarations need tracking and have bodies.
|
||||
.struct_decl => {
|
||||
try list.append(gpa, inst);
|
||||
|
||||
const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
|
||||
const extra = zir.extraData(Zir.Inst.StructDecl, extended.operand);
|
||||
var extra_index = extra.end;
|
||||
const captures_len = if (small.has_captures_len) blk: {
|
||||
const captures_len = zir.extra[extra_index];
|
||||
extra_index += 1;
|
||||
break :blk captures_len;
|
||||
} else 0;
|
||||
const fields_len = if (small.has_fields_len) blk: {
|
||||
const fields_len = zir.extra[extra_index];
|
||||
extra_index += 1;
|
||||
break :blk fields_len;
|
||||
} else 0;
|
||||
const decls_len = if (small.has_decls_len) blk: {
|
||||
const decls_len = zir.extra[extra_index];
|
||||
extra_index += 1;
|
||||
break :blk decls_len;
|
||||
} else 0;
|
||||
extra_index += captures_len;
|
||||
if (small.has_backing_int) {
|
||||
const backing_int_body_len = zir.extra[extra_index];
|
||||
extra_index += 1;
|
||||
if (backing_int_body_len == 0) {
|
||||
extra_index += 1; // backing_int_ref
|
||||
} else {
|
||||
const body = zir.bodySlice(extra_index, backing_int_body_len);
|
||||
extra_index += backing_int_body_len;
|
||||
try zir.findDeclsBody(gpa, list, defers, body);
|
||||
}
|
||||
}
|
||||
extra_index += decls_len;
|
||||
|
||||
// This ZIR is structured in a slightly awkward way, so we have to split up the iteration.
|
||||
// `extra_index` iterates `flags` (bags of bits).
|
||||
// `fields_extra_index` iterates `fields`.
|
||||
// We accumulate the total length of bodies into `total_bodies_len`. This is sufficient because
|
||||
// the bodies are packed together in `extra` and we only need to traverse their instructions (we
|
||||
// don't really care about the structure).
|
||||
|
||||
const bits_per_field = 4;
|
||||
const fields_per_u32 = 32 / bits_per_field;
|
||||
const bit_bags_count = std.math.divCeil(usize, fields_len, fields_per_u32) catch unreachable;
|
||||
var cur_bit_bag: u32 = undefined;
|
||||
|
||||
var fields_extra_index = extra_index + bit_bags_count;
|
||||
var total_bodies_len: u32 = 0;
|
||||
|
||||
for (0..fields_len) |field_i| {
|
||||
if (field_i % fields_per_u32 == 0) {
|
||||
cur_bit_bag = zir.extra[extra_index];
|
||||
extra_index += 1;
|
||||
}
|
||||
|
||||
const has_align = @as(u1, @truncate(cur_bit_bag)) != 0;
|
||||
cur_bit_bag >>= 1;
|
||||
const has_init = @as(u1, @truncate(cur_bit_bag)) != 0;
|
||||
cur_bit_bag >>= 2; // also skip `is_comptime`; we don't care
|
||||
const has_type_body = @as(u1, @truncate(cur_bit_bag)) != 0;
|
||||
cur_bit_bag >>= 1;
|
||||
|
||||
fields_extra_index += @intFromBool(!small.is_tuple); // field_name
|
||||
fields_extra_index += 1; // doc_comment
|
||||
|
||||
if (has_type_body) {
|
||||
const field_type_body_len = zir.extra[fields_extra_index];
|
||||
total_bodies_len += field_type_body_len;
|
||||
}
|
||||
fields_extra_index += 1; // field_type or field_type_body_len
|
||||
|
||||
if (has_align) {
|
||||
const align_body_len = zir.extra[fields_extra_index];
|
||||
fields_extra_index += 1;
|
||||
total_bodies_len += align_body_len;
|
||||
}
|
||||
|
||||
if (has_init) {
|
||||
const init_body_len = zir.extra[fields_extra_index];
|
||||
fields_extra_index += 1;
|
||||
total_bodies_len += init_body_len;
|
||||
}
|
||||
}
|
||||
|
||||
// Now, `fields_extra_index` points to `bodies`. Let's treat this as one big body.
|
||||
const merged_bodies = zir.bodySlice(fields_extra_index, total_bodies_len);
|
||||
try zir.findDeclsBody(gpa, list, defers, merged_bodies);
|
||||
},
|
||||
|
||||
// Union declarations need tracking and have a body.
|
||||
.union_decl => {
|
||||
try list.append(gpa, inst);
|
||||
|
||||
const small: Zir.Inst.UnionDecl.Small = @bitCast(extended.small);
|
||||
const extra = zir.extraData(Zir.Inst.UnionDecl, extended.operand);
|
||||
var extra_index = extra.end;
|
||||
extra_index += @intFromBool(small.has_tag_type);
|
||||
const captures_len = if (small.has_captures_len) blk: {
|
||||
const captures_len = zir.extra[extra_index];
|
||||
extra_index += 1;
|
||||
break :blk captures_len;
|
||||
} else 0;
|
||||
const body_len = if (small.has_body_len) blk: {
|
||||
const body_len = zir.extra[extra_index];
|
||||
extra_index += 1;
|
||||
break :blk body_len;
|
||||
} else 0;
|
||||
extra_index += @intFromBool(small.has_fields_len);
|
||||
const decls_len = if (small.has_decls_len) blk: {
|
||||
const decls_len = zir.extra[extra_index];
|
||||
extra_index += 1;
|
||||
break :blk decls_len;
|
||||
} else 0;
|
||||
extra_index += captures_len;
|
||||
extra_index += decls_len;
|
||||
const body = zir.bodySlice(extra_index, body_len);
|
||||
try zir.findDeclsBody(gpa, list, defers, body);
|
||||
},
|
||||
|
||||
// Enum declarations need tracking and have a body.
|
||||
.enum_decl => {
|
||||
try list.append(gpa, inst);
|
||||
|
||||
const small: Zir.Inst.EnumDecl.Small = @bitCast(extended.small);
|
||||
const extra = zir.extraData(Zir.Inst.EnumDecl, extended.operand);
|
||||
var extra_index = extra.end;
|
||||
extra_index += @intFromBool(small.has_tag_type);
|
||||
const captures_len = if (small.has_captures_len) blk: {
|
||||
const captures_len = zir.extra[extra_index];
|
||||
extra_index += 1;
|
||||
break :blk captures_len;
|
||||
} else 0;
|
||||
const body_len = if (small.has_body_len) blk: {
|
||||
const body_len = zir.extra[extra_index];
|
||||
extra_index += 1;
|
||||
break :blk body_len;
|
||||
} else 0;
|
||||
extra_index += @intFromBool(small.has_fields_len);
|
||||
const decls_len = if (small.has_decls_len) blk: {
|
||||
const decls_len = zir.extra[extra_index];
|
||||
extra_index += 1;
|
||||
break :blk decls_len;
|
||||
} else 0;
|
||||
extra_index += captures_len;
|
||||
extra_index += decls_len;
|
||||
const body = zir.bodySlice(extra_index, body_len);
|
||||
try zir.findDeclsBody(gpa, list, defers, body);
|
||||
},
|
||||
}
|
||||
},
|
||||
|
||||
// Functions instructions are interesting and have a body.
|
||||
.func,
|
||||
.func_inferred,
|
||||
=> {
|
||||
try list.append(inst);
|
||||
try list.append(gpa, inst);
|
||||
|
||||
const inst_data = datas[@intFromEnum(inst)].pl_node;
|
||||
const extra = zir.extraData(Inst.Func, inst_data.payload_index);
|
||||
@ -3661,14 +4157,14 @@ fn findDeclsInner(
|
||||
else => {
|
||||
const body = zir.bodySlice(extra_index, extra.data.ret_body_len);
|
||||
extra_index += body.len;
|
||||
try zir.findDeclsBody(list, body);
|
||||
try zir.findDeclsBody(gpa, list, defers, body);
|
||||
},
|
||||
}
|
||||
const body = zir.bodySlice(extra_index, extra.data.body_len);
|
||||
return zir.findDeclsBody(list, body);
|
||||
return zir.findDeclsBody(gpa, list, defers, body);
|
||||
},
|
||||
.func_fancy => {
|
||||
try list.append(inst);
|
||||
try list.append(gpa, inst);
|
||||
|
||||
const inst_data = datas[@intFromEnum(inst)].pl_node;
|
||||
const extra = zir.extraData(Inst.FuncFancy, inst_data.payload_index);
|
||||
@ -3679,7 +4175,7 @@ fn findDeclsInner(
|
||||
const body_len = zir.extra[extra_index];
|
||||
extra_index += 1;
|
||||
const body = zir.bodySlice(extra_index, body_len);
|
||||
try zir.findDeclsBody(list, body);
|
||||
try zir.findDeclsBody(gpa, list, defers, body);
|
||||
extra_index += body.len;
|
||||
} else if (extra.data.bits.has_align_ref) {
|
||||
extra_index += 1;
|
||||
@ -3689,7 +4185,7 @@ fn findDeclsInner(
|
||||
const body_len = zir.extra[extra_index];
|
||||
extra_index += 1;
|
||||
const body = zir.bodySlice(extra_index, body_len);
|
||||
try zir.findDeclsBody(list, body);
|
||||
try zir.findDeclsBody(gpa, list, defers, body);
|
||||
extra_index += body.len;
|
||||
} else if (extra.data.bits.has_addrspace_ref) {
|
||||
extra_index += 1;
|
||||
@ -3699,7 +4195,7 @@ fn findDeclsInner(
|
||||
const body_len = zir.extra[extra_index];
|
||||
extra_index += 1;
|
||||
const body = zir.bodySlice(extra_index, body_len);
|
||||
try zir.findDeclsBody(list, body);
|
||||
try zir.findDeclsBody(gpa, list, defers, body);
|
||||
extra_index += body.len;
|
||||
} else if (extra.data.bits.has_section_ref) {
|
||||
extra_index += 1;
|
||||
@ -3709,7 +4205,7 @@ fn findDeclsInner(
|
||||
const body_len = zir.extra[extra_index];
|
||||
extra_index += 1;
|
||||
const body = zir.bodySlice(extra_index, body_len);
|
||||
try zir.findDeclsBody(list, body);
|
||||
try zir.findDeclsBody(gpa, list, defers, body);
|
||||
extra_index += body.len;
|
||||
} else if (extra.data.bits.has_cc_ref) {
|
||||
extra_index += 1;
|
||||
@ -3719,7 +4215,7 @@ fn findDeclsInner(
|
||||
const body_len = zir.extra[extra_index];
|
||||
extra_index += 1;
|
||||
const body = zir.bodySlice(extra_index, body_len);
|
||||
try zir.findDeclsBody(list, body);
|
||||
try zir.findDeclsBody(gpa, list, defers, body);
|
||||
extra_index += body.len;
|
||||
} else if (extra.data.bits.has_ret_ty_ref) {
|
||||
extra_index += 1;
|
||||
@ -3728,62 +4224,99 @@ fn findDeclsInner(
|
||||
extra_index += @intFromBool(extra.data.bits.has_any_noalias);
|
||||
|
||||
const body = zir.bodySlice(extra_index, extra.data.body_len);
|
||||
return zir.findDeclsBody(list, body);
|
||||
},
|
||||
.extended => {
|
||||
const extended = datas[@intFromEnum(inst)].extended;
|
||||
switch (extended.opcode) {
|
||||
|
||||
// Decl instructions are interesting but have no body.
|
||||
// TODO yes they do have a body actually. recurse over them just like block instructions.
|
||||
.struct_decl,
|
||||
.union_decl,
|
||||
.enum_decl,
|
||||
.opaque_decl,
|
||||
.reify,
|
||||
=> return list.append(inst),
|
||||
|
||||
else => return,
|
||||
}
|
||||
return zir.findDeclsBody(gpa, list, defers, body);
|
||||
},
|
||||
|
||||
// Block instructions, recurse over the bodies.
|
||||
|
||||
.block, .block_comptime, .block_inline => {
|
||||
.block,
|
||||
.block_comptime,
|
||||
.block_inline,
|
||||
.c_import,
|
||||
.typeof_builtin,
|
||||
.loop,
|
||||
=> {
|
||||
const inst_data = datas[@intFromEnum(inst)].pl_node;
|
||||
const extra = zir.extraData(Inst.Block, inst_data.payload_index);
|
||||
const body = zir.bodySlice(extra.end, extra.data.body_len);
|
||||
return zir.findDeclsBody(list, body);
|
||||
return zir.findDeclsBody(gpa, list, defers, body);
|
||||
},
|
||||
.condbr, .condbr_inline => {
|
||||
const inst_data = datas[@intFromEnum(inst)].pl_node;
|
||||
const extra = zir.extraData(Inst.CondBr, inst_data.payload_index);
|
||||
const then_body = zir.bodySlice(extra.end, extra.data.then_body_len);
|
||||
const else_body = zir.bodySlice(extra.end + then_body.len, extra.data.else_body_len);
|
||||
try zir.findDeclsBody(list, then_body);
|
||||
try zir.findDeclsBody(list, else_body);
|
||||
try zir.findDeclsBody(gpa, list, defers, then_body);
|
||||
try zir.findDeclsBody(gpa, list, defers, else_body);
|
||||
},
|
||||
.@"try", .try_ptr => {
|
||||
const inst_data = datas[@intFromEnum(inst)].pl_node;
|
||||
const extra = zir.extraData(Inst.Try, inst_data.payload_index);
|
||||
const body = zir.bodySlice(extra.end, extra.data.body_len);
|
||||
try zir.findDeclsBody(list, body);
|
||||
try zir.findDeclsBody(gpa, list, defers, body);
|
||||
},
|
||||
.switch_block => return findDeclsSwitch(zir, list, inst),
|
||||
.switch_block, .switch_block_ref => return zir.findDeclsSwitch(gpa, list, defers, inst, .normal),
|
||||
.switch_block_err_union => return zir.findDeclsSwitch(gpa, list, defers, inst, .err_union),
|
||||
|
||||
.suspend_block => @panic("TODO iterate suspend block"),
|
||||
|
||||
else => return, // Regular instruction, not interesting.
|
||||
.param, .param_comptime => {
|
||||
const inst_data = datas[@intFromEnum(inst)].pl_tok;
|
||||
const extra = zir.extraData(Inst.Param, inst_data.payload_index);
|
||||
const body = zir.bodySlice(extra.end, extra.data.body_len);
|
||||
try zir.findDeclsBody(gpa, list, defers, body);
|
||||
},
|
||||
|
||||
inline .call, .field_call => |tag| {
|
||||
const inst_data = datas[@intFromEnum(inst)].pl_node;
|
||||
const extra = zir.extraData(switch (tag) {
|
||||
.call => Inst.Call,
|
||||
.field_call => Inst.FieldCall,
|
||||
else => unreachable,
|
||||
}, inst_data.payload_index);
|
||||
// It's easiest to just combine all the arg bodies into one body, like we do above for `struct_decl`.
|
||||
const args_len = extra.data.flags.args_len;
|
||||
if (args_len > 0) {
|
||||
const first_arg_start_off = args_len;
|
||||
const final_arg_end_off = zir.extra[extra.end + args_len - 1];
|
||||
const args_body = zir.bodySlice(extra.end + first_arg_start_off, final_arg_end_off - first_arg_start_off);
|
||||
try zir.findDeclsBody(gpa, list, defers, args_body);
|
||||
}
|
||||
},
|
||||
.@"defer" => {
|
||||
const inst_data = datas[@intFromEnum(inst)].@"defer";
|
||||
const gop = try defers.getOrPut(gpa, inst_data.index);
|
||||
if (!gop.found_existing) {
|
||||
const body = zir.bodySlice(inst_data.index, inst_data.len);
|
||||
try zir.findDeclsBody(gpa, list, defers, body);
|
||||
}
|
||||
},
|
||||
.defer_err_code => {
|
||||
const inst_data = datas[@intFromEnum(inst)].defer_err_code;
|
||||
const extra = zir.extraData(Inst.DeferErrCode, inst_data.payload_index).data;
|
||||
const gop = try defers.getOrPut(gpa, extra.index);
|
||||
if (!gop.found_existing) {
|
||||
const body = zir.bodySlice(extra.index, extra.len);
|
||||
try zir.findDeclsBody(gpa, list, defers, body);
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn findDeclsSwitch(
|
||||
zir: Zir,
|
||||
list: *std.ArrayList(Inst.Index),
|
||||
gpa: Allocator,
|
||||
list: *std.ArrayListUnmanaged(Inst.Index),
|
||||
defers: *std.AutoHashMapUnmanaged(u32, void),
|
||||
inst: Inst.Index,
|
||||
/// Distinguishes between `switch_block[_ref]` and `switch_block_err_union`.
|
||||
comptime kind: enum { normal, err_union },
|
||||
) Allocator.Error!void {
|
||||
const inst_data = zir.instructions.items(.data)[@intFromEnum(inst)].pl_node;
|
||||
const extra = zir.extraData(Inst.SwitchBlock, inst_data.payload_index);
|
||||
const extra = zir.extraData(switch (kind) {
|
||||
.normal => Inst.SwitchBlock,
|
||||
.err_union => Inst.SwitchBlockErrUnion,
|
||||
}, inst_data.payload_index);
|
||||
|
||||
var extra_index: usize = extra.end;
|
||||
|
||||
@ -3793,18 +4326,35 @@ fn findDeclsSwitch(
|
||||
break :blk multi_cases_len;
|
||||
} else 0;
|
||||
|
||||
if (extra.data.bits.any_has_tag_capture) {
|
||||
if (switch (kind) {
|
||||
.normal => extra.data.bits.any_has_tag_capture,
|
||||
.err_union => extra.data.bits.any_uses_err_capture,
|
||||
}) {
|
||||
extra_index += 1;
|
||||
}
|
||||
|
||||
const special_prong = extra.data.bits.specialProng();
|
||||
if (special_prong != .none) {
|
||||
const has_special = switch (kind) {
|
||||
.normal => extra.data.bits.specialProng() != .none,
|
||||
.err_union => has_special: {
|
||||
// Handle `non_err_body` first.
|
||||
const prong_info: Inst.SwitchBlock.ProngInfo = @bitCast(zir.extra[extra_index]);
|
||||
extra_index += 1;
|
||||
const body = zir.bodySlice(extra_index, prong_info.body_len);
|
||||
extra_index += body.len;
|
||||
|
||||
try zir.findDeclsBody(gpa, list, defers, body);
|
||||
|
||||
break :has_special extra.data.bits.has_else;
|
||||
},
|
||||
};
|
||||
|
||||
if (has_special) {
|
||||
const prong_info: Inst.SwitchBlock.ProngInfo = @bitCast(zir.extra[extra_index]);
|
||||
extra_index += 1;
|
||||
const body = zir.bodySlice(extra_index, prong_info.body_len);
|
||||
extra_index += body.len;
|
||||
|
||||
try zir.findDeclsBody(list, body);
|
||||
try zir.findDeclsBody(gpa, list, defers, body);
|
||||
}
|
||||
|
||||
{
|
||||
@ -3816,7 +4366,7 @@ fn findDeclsSwitch(
|
||||
const body = zir.bodySlice(extra_index, prong_info.body_len);
|
||||
extra_index += body.len;
|
||||
|
||||
try zir.findDeclsBody(list, body);
|
||||
try zir.findDeclsBody(gpa, list, defers, body);
|
||||
}
|
||||
}
|
||||
{
|
||||
@ -3833,18 +4383,20 @@ fn findDeclsSwitch(
|
||||
const body = zir.bodySlice(extra_index, prong_info.body_len);
|
||||
extra_index += body.len;
|
||||
|
||||
try zir.findDeclsBody(list, body);
|
||||
try zir.findDeclsBody(gpa, list, defers, body);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn findDeclsBody(
|
||||
zir: Zir,
|
||||
list: *std.ArrayList(Inst.Index),
|
||||
gpa: Allocator,
|
||||
list: *std.ArrayListUnmanaged(Inst.Index),
|
||||
defers: *std.AutoHashMapUnmanaged(u32, void),
|
||||
body: []const Inst.Index,
|
||||
) Allocator.Error!void {
|
||||
for (body) |member| {
|
||||
try zir.findDeclsInner(list, member);
|
||||
try zir.findDeclsInner(gpa, list, defers, member);
|
||||
}
|
||||
}
|
||||
|
||||
@ -4042,7 +4594,7 @@ pub fn getAssociatedSrcHash(zir: Zir, inst: Zir.Inst.Index) ?std.zig.SrcHash {
|
||||
return null;
|
||||
}
|
||||
const extra_index = extra.end +
|
||||
1 +
|
||||
extra.data.ret_body_len +
|
||||
extra.data.body_len +
|
||||
@typeInfo(Inst.Func.SrcLocs).Struct.fields.len;
|
||||
return @bitCast([4]u32{
|
||||
|
@ -2264,13 +2264,19 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
|
||||
}
|
||||
}
|
||||
|
||||
zcu.analysis_roots.clear();
|
||||
|
||||
try comp.queueJob(.{ .analyze_mod = std_mod });
|
||||
if (comp.config.is_test) {
|
||||
zcu.analysis_roots.appendAssumeCapacity(std_mod);
|
||||
|
||||
if (comp.config.is_test and zcu.main_mod != std_mod) {
|
||||
try comp.queueJob(.{ .analyze_mod = zcu.main_mod });
|
||||
zcu.analysis_roots.appendAssumeCapacity(zcu.main_mod);
|
||||
}
|
||||
|
||||
if (zcu.root_mod.deps.get("compiler_rt")) |compiler_rt_mod| {
|
||||
try comp.queueJob(.{ .analyze_mod = compiler_rt_mod });
|
||||
zcu.analysis_roots.appendAssumeCapacity(compiler_rt_mod);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2294,7 +2300,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
|
||||
zcu.intern_pool.dumpGenericInstances(gpa);
|
||||
}
|
||||
|
||||
if (comp.config.is_test and comp.totalErrorCount() == 0) {
|
||||
if (comp.config.is_test) {
|
||||
// The `test_functions` decl has been intentionally postponed until now,
|
||||
// at which point we must populate it with the list of test functions that
|
||||
// have been discovered and not filtered out.
|
||||
@ -2304,7 +2310,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
|
||||
try pt.processExports();
|
||||
}
|
||||
|
||||
if (comp.totalErrorCount() != 0) {
|
||||
if (try comp.totalErrorCount() != 0) {
|
||||
// Skip flushing and keep source files loaded for error reporting.
|
||||
comp.link_error_flags = .{};
|
||||
return;
|
||||
@ -2388,7 +2394,8 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
|
||||
}
|
||||
|
||||
try flush(comp, arena, .main, main_progress_node);
|
||||
if (comp.totalErrorCount() != 0) return;
|
||||
|
||||
if (try comp.totalErrorCount() != 0) return;
|
||||
|
||||
// Failure here only means an unnecessary cache miss.
|
||||
man.writeManifest() catch |err| {
|
||||
@ -2405,7 +2412,6 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
|
||||
},
|
||||
.incremental => {
|
||||
try flush(comp, arena, .main, main_progress_node);
|
||||
if (comp.totalErrorCount() != 0) return;
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -3041,82 +3047,6 @@ fn addBuf(list: *std.ArrayList(std.posix.iovec_const), buf: []const u8) void {
|
||||
list.appendAssumeCapacity(.{ .base = buf.ptr, .len = buf.len });
|
||||
}
|
||||
|
||||
/// This function is temporally single-threaded.
|
||||
pub fn totalErrorCount(comp: *Compilation) u32 {
|
||||
var total: usize =
|
||||
comp.misc_failures.count() +
|
||||
@intFromBool(comp.alloc_failure_occurred) +
|
||||
comp.lld_errors.items.len;
|
||||
|
||||
for (comp.failed_c_objects.values()) |bundle| {
|
||||
total += bundle.diags.len;
|
||||
}
|
||||
|
||||
for (comp.failed_win32_resources.values()) |errs| {
|
||||
total += errs.errorMessageCount();
|
||||
}
|
||||
|
||||
if (comp.module) |zcu| {
|
||||
const ip = &zcu.intern_pool;
|
||||
|
||||
total += zcu.failed_exports.count();
|
||||
total += zcu.failed_embed_files.count();
|
||||
|
||||
for (zcu.failed_files.keys(), zcu.failed_files.values()) |file, error_msg| {
|
||||
if (error_msg) |_| {
|
||||
total += 1;
|
||||
} else {
|
||||
assert(file.zir_loaded);
|
||||
const payload_index = file.zir.extra[@intFromEnum(Zir.ExtraIndex.compile_errors)];
|
||||
assert(payload_index != 0);
|
||||
const header = file.zir.extraData(Zir.Inst.CompileErrors, payload_index);
|
||||
total += header.data.items_len;
|
||||
}
|
||||
}
|
||||
|
||||
// Skip errors for Decls within files that failed parsing.
|
||||
// When a parse error is introduced, we keep all the semantic analysis for
|
||||
// the previous parse success, including compile errors, but we cannot
|
||||
// emit them until the file succeeds parsing.
|
||||
for (zcu.failed_analysis.keys()) |anal_unit| {
|
||||
const file_index = switch (anal_unit.unwrap()) {
|
||||
.cau => |cau| zcu.namespacePtr(ip.getCau(cau).namespace).file_scope,
|
||||
.func => |ip_index| zcu.funcInfo(ip_index).zir_body_inst.resolveFull(ip).file,
|
||||
};
|
||||
if (zcu.fileByIndex(file_index).okToReportErrors()) {
|
||||
total += 1;
|
||||
if (zcu.cimport_errors.get(anal_unit)) |errors| {
|
||||
total += errors.errorMessageCount();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (zcu.intern_pool.global_error_set.getNamesFromMainThread().len > zcu.error_limit) {
|
||||
total += 1;
|
||||
}
|
||||
|
||||
for (zcu.failed_codegen.keys()) |_| {
|
||||
total += 1;
|
||||
}
|
||||
}
|
||||
|
||||
// The "no entry point found" error only counts if there are no semantic analysis errors.
|
||||
if (total == 0) {
|
||||
total += @intFromBool(comp.link_error_flags.no_entry_point_found);
|
||||
}
|
||||
total += @intFromBool(comp.link_error_flags.missing_libc);
|
||||
total += comp.link_errors.items.len;
|
||||
|
||||
// Compile log errors only count if there are no other errors.
|
||||
if (total == 0) {
|
||||
if (comp.module) |zcu| {
|
||||
total += @intFromBool(zcu.compile_log_sources.count() != 0);
|
||||
}
|
||||
}
|
||||
|
||||
return @as(u32, @intCast(total));
|
||||
}
|
||||
|
||||
/// This function is temporally single-threaded.
|
||||
pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle {
|
||||
const gpa = comp.gpa;
|
||||
@ -3159,12 +3089,13 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle {
|
||||
.msg = try bundle.addString("memory allocation failure"),
|
||||
});
|
||||
}
|
||||
|
||||
var all_references: ?std.AutoHashMapUnmanaged(InternPool.AnalUnit, ?Zcu.ResolvedReference) = null;
|
||||
defer if (all_references) |*a| a.deinit(gpa);
|
||||
|
||||
if (comp.module) |zcu| {
|
||||
const ip = &zcu.intern_pool;
|
||||
|
||||
var all_references = try zcu.resolveReferences();
|
||||
defer all_references.deinit(gpa);
|
||||
|
||||
for (zcu.failed_files.keys(), zcu.failed_files.values()) |file, error_msg| {
|
||||
if (error_msg) |msg| {
|
||||
try addModuleErrorMsg(zcu, &bundle, msg.*, &all_references);
|
||||
@ -3190,8 +3121,14 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle {
|
||||
pub fn lessThan(ctx: @This(), lhs_index: usize, rhs_index: usize) bool {
|
||||
if (ctx.err.*) |_| return lhs_index < rhs_index;
|
||||
const errors = ctx.zcu.failed_analysis.values();
|
||||
const lhs_src_loc = errors[lhs_index].src_loc.upgrade(ctx.zcu);
|
||||
const rhs_src_loc = errors[rhs_index].src_loc.upgrade(ctx.zcu);
|
||||
const lhs_src_loc = errors[lhs_index].src_loc.upgradeOrLost(ctx.zcu) orelse {
|
||||
// LHS source location lost, so should never be referenced. Just sort it to the end.
|
||||
return false;
|
||||
};
|
||||
const rhs_src_loc = errors[rhs_index].src_loc.upgradeOrLost(ctx.zcu) orelse {
|
||||
// RHS source location lost, so should never be referenced. Just sort it to the end.
|
||||
return true;
|
||||
};
|
||||
return if (lhs_src_loc.file_scope != rhs_src_loc.file_scope) std.mem.order(
|
||||
u8,
|
||||
lhs_src_loc.file_scope.sub_file_path,
|
||||
@ -3212,9 +3149,16 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle {
|
||||
if (err) |e| return e;
|
||||
}
|
||||
for (zcu.failed_analysis.keys(), zcu.failed_analysis.values()) |anal_unit, error_msg| {
|
||||
if (comp.incremental) {
|
||||
if (all_references == null) {
|
||||
all_references = try zcu.resolveReferences();
|
||||
}
|
||||
if (!all_references.?.contains(anal_unit)) continue;
|
||||
}
|
||||
|
||||
const file_index = switch (anal_unit.unwrap()) {
|
||||
.cau => |cau| zcu.namespacePtr(ip.getCau(cau).namespace).file_scope,
|
||||
.func => |ip_index| zcu.funcInfo(ip_index).zir_body_inst.resolveFull(ip).file,
|
||||
.func => |ip_index| (zcu.funcInfo(ip_index).zir_body_inst.resolveFull(ip) orelse continue).file,
|
||||
};
|
||||
|
||||
// Skip errors for AnalUnits within files that had a parse failure.
|
||||
@ -3243,7 +3187,8 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle {
|
||||
}
|
||||
}
|
||||
}
|
||||
for (zcu.failed_codegen.values()) |error_msg| {
|
||||
for (zcu.failed_codegen.keys(), zcu.failed_codegen.values()) |nav, error_msg| {
|
||||
if (!zcu.navFileScope(nav).okToReportErrors()) continue;
|
||||
try addModuleErrorMsg(zcu, &bundle, error_msg.*, &all_references);
|
||||
}
|
||||
for (zcu.failed_exports.values()) |value| {
|
||||
@ -3304,9 +3249,6 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle {
|
||||
|
||||
if (comp.module) |zcu| {
|
||||
if (bundle.root_list.items.len == 0 and zcu.compile_log_sources.count() != 0) {
|
||||
var all_references = try zcu.resolveReferences();
|
||||
defer all_references.deinit(gpa);
|
||||
|
||||
const values = zcu.compile_log_sources.values();
|
||||
// First one will be the error; subsequent ones will be notes.
|
||||
const src_loc = values[0].src();
|
||||
@ -3328,12 +3270,30 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle {
|
||||
}
|
||||
}
|
||||
|
||||
assert(comp.totalErrorCount() == bundle.root_list.items.len);
|
||||
if (comp.module) |zcu| {
|
||||
if (comp.incremental and bundle.root_list.items.len == 0) {
|
||||
const should_have_error = for (zcu.transitive_failed_analysis.keys()) |failed_unit| {
|
||||
if (all_references == null) {
|
||||
all_references = try zcu.resolveReferences();
|
||||
}
|
||||
if (all_references.?.contains(failed_unit)) break true;
|
||||
} else false;
|
||||
if (should_have_error) {
|
||||
@panic("referenced transitive analysis errors, but none actually emitted");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const compile_log_text = if (comp.module) |m| m.compile_log_text.items else "";
|
||||
return bundle.toOwnedBundle(compile_log_text);
|
||||
}
|
||||
|
||||
fn totalErrorCount(comp: *Compilation) !u32 {
|
||||
var errors = try comp.getAllErrorsAlloc();
|
||||
defer errors.deinit(comp.gpa);
|
||||
return errors.errorMessageCount();
|
||||
}
|
||||
|
||||
pub const ErrorNoteHashContext = struct {
|
||||
eb: *const ErrorBundle.Wip,
|
||||
|
||||
@ -3384,7 +3344,7 @@ pub fn addModuleErrorMsg(
|
||||
mod: *Zcu,
|
||||
eb: *ErrorBundle.Wip,
|
||||
module_err_msg: Zcu.ErrorMsg,
|
||||
all_references: *const std.AutoHashMapUnmanaged(InternPool.AnalUnit, Zcu.ResolvedReference),
|
||||
all_references: *?std.AutoHashMapUnmanaged(InternPool.AnalUnit, ?Zcu.ResolvedReference),
|
||||
) !void {
|
||||
const gpa = eb.gpa;
|
||||
const ip = &mod.intern_pool;
|
||||
@ -3408,13 +3368,18 @@ pub fn addModuleErrorMsg(
|
||||
defer ref_traces.deinit(gpa);
|
||||
|
||||
if (module_err_msg.reference_trace_root.unwrap()) |rt_root| {
|
||||
if (all_references.* == null) {
|
||||
all_references.* = try mod.resolveReferences();
|
||||
}
|
||||
|
||||
var seen: std.AutoHashMapUnmanaged(InternPool.AnalUnit, void) = .{};
|
||||
defer seen.deinit(gpa);
|
||||
|
||||
const max_references = mod.comp.reference_trace orelse Sema.default_reference_trace_len;
|
||||
|
||||
var referenced_by = rt_root;
|
||||
while (all_references.get(referenced_by)) |ref| {
|
||||
while (all_references.*.?.get(referenced_by)) |maybe_ref| {
|
||||
const ref = maybe_ref orelse break;
|
||||
const gop = try seen.getOrPut(gpa, ref.referencer);
|
||||
if (gop.found_existing) break;
|
||||
if (ref_traces.items.len < max_references) {
|
||||
@ -3423,6 +3388,7 @@ pub fn addModuleErrorMsg(
|
||||
const span = try src.span(gpa);
|
||||
const loc = std.zig.findLineColumn(source.bytes, span.main);
|
||||
const rt_file_path = try src.file_scope.fullPath(gpa);
|
||||
defer gpa.free(rt_file_path);
|
||||
const name = switch (ref.referencer.unwrap()) {
|
||||
.cau => |cau| switch (ip.getCau(cau).owner.unwrap()) {
|
||||
.nav => |nav| ip.getNav(nav).name.toSlice(ip),
|
||||
@ -3537,6 +3503,8 @@ pub fn performAllTheWork(
|
||||
mod.sema_prog_node = std.Progress.Node.none;
|
||||
mod.codegen_prog_node.end();
|
||||
mod.codegen_prog_node = std.Progress.Node.none;
|
||||
|
||||
mod.generation += 1;
|
||||
};
|
||||
try comp.performAllTheWorkInner(main_progress_node);
|
||||
if (!InternPool.single_threaded) if (comp.codegen_work.job_error) |job_error| return job_error;
|
||||
@ -3608,10 +3576,9 @@ fn performAllTheWorkInner(
|
||||
// Pre-load these things from our single-threaded context since they
|
||||
// will be needed by the worker threads.
|
||||
const path_digest = zcu.filePathDigest(file_index);
|
||||
const old_root_type = zcu.fileRootType(file_index);
|
||||
const file = zcu.fileByIndex(file_index);
|
||||
comp.thread_pool.spawnWgId(&astgen_wait_group, workerAstGenFile, .{
|
||||
comp, file, file_index, path_digest, old_root_type, zir_prog_node, &astgen_wait_group, .root,
|
||||
comp, file, file_index, path_digest, zir_prog_node, &astgen_wait_group, .root,
|
||||
});
|
||||
}
|
||||
}
|
||||
@ -3649,11 +3616,15 @@ fn performAllTheWorkInner(
|
||||
}
|
||||
try reportMultiModuleErrors(pt);
|
||||
try zcu.flushRetryableFailures();
|
||||
|
||||
zcu.sema_prog_node = main_progress_node.start("Semantic Analysis", 0);
|
||||
zcu.codegen_prog_node = main_progress_node.start("Code Generation", 0);
|
||||
}
|
||||
|
||||
if (!InternPool.single_threaded) comp.thread_pool.spawnWgId(&work_queue_wait_group, codegenThread, .{comp});
|
||||
if (!InternPool.single_threaded) {
|
||||
comp.codegen_work.done = false; // may be `true` from a prior update
|
||||
comp.thread_pool.spawnWgId(&work_queue_wait_group, codegenThread, .{comp});
|
||||
}
|
||||
defer if (!InternPool.single_threaded) {
|
||||
{
|
||||
comp.codegen_work.mutex.lock();
|
||||
@ -4283,7 +4254,6 @@ fn workerAstGenFile(
|
||||
file: *Zcu.File,
|
||||
file_index: Zcu.File.Index,
|
||||
path_digest: Cache.BinDigest,
|
||||
old_root_type: InternPool.Index,
|
||||
prog_node: std.Progress.Node,
|
||||
wg: *WaitGroup,
|
||||
src: Zcu.AstGenSrc,
|
||||
@ -4292,7 +4262,7 @@ fn workerAstGenFile(
|
||||
defer child_prog_node.end();
|
||||
|
||||
const pt: Zcu.PerThread = .{ .zcu = comp.module.?, .tid = @enumFromInt(tid) };
|
||||
pt.astGenFile(file, path_digest, old_root_type) catch |err| switch (err) {
|
||||
pt.astGenFile(file, path_digest) catch |err| switch (err) {
|
||||
error.AnalysisFail => return,
|
||||
else => {
|
||||
file.status = .retryable_failure;
|
||||
@ -4323,7 +4293,7 @@ fn workerAstGenFile(
|
||||
// `@import("builtin")` is handled specially.
|
||||
if (mem.eql(u8, import_path, "builtin")) continue;
|
||||
|
||||
const import_result, const imported_path_digest, const imported_root_type = blk: {
|
||||
const import_result, const imported_path_digest = blk: {
|
||||
comp.mutex.lock();
|
||||
defer comp.mutex.unlock();
|
||||
|
||||
@ -4338,8 +4308,7 @@ fn workerAstGenFile(
|
||||
comp.appendFileSystemInput(fsi, res.file.mod.root, res.file.sub_file_path) catch continue;
|
||||
};
|
||||
const imported_path_digest = pt.zcu.filePathDigest(res.file_index);
|
||||
const imported_root_type = pt.zcu.fileRootType(res.file_index);
|
||||
break :blk .{ res, imported_path_digest, imported_root_type };
|
||||
break :blk .{ res, imported_path_digest };
|
||||
};
|
||||
if (import_result.is_new) {
|
||||
log.debug("AstGen of {s} has import '{s}'; queuing AstGen of {s}", .{
|
||||
@ -4350,7 +4319,7 @@ fn workerAstGenFile(
|
||||
.import_tok = item.data.token,
|
||||
} };
|
||||
comp.thread_pool.spawnWgId(wg, workerAstGenFile, .{
|
||||
comp, import_result.file, import_result.file_index, imported_path_digest, imported_root_type, prog_node, wg, sub_src,
|
||||
comp, import_result.file, import_result.file_index, imported_path_digest, prog_node, wg, sub_src,
|
||||
});
|
||||
}
|
||||
}
|
||||
@ -6443,7 +6412,8 @@ fn buildOutputFromZig(
|
||||
|
||||
try comp.updateSubCompilation(sub_compilation, misc_task_tag, prog_node);
|
||||
|
||||
assert(out.* == null);
|
||||
// Under incremental compilation, `out` may already be populated from a prior update.
|
||||
assert(out.* == null or comp.incremental);
|
||||
out.* = try sub_compilation.toCrtFile();
|
||||
}
|
||||
|
||||
|
@ -62,22 +62,60 @@ const want_multi_threaded = true;
|
||||
/// Whether a single-threaded intern pool impl is in use.
|
||||
pub const single_threaded = builtin.single_threaded or !want_multi_threaded;
|
||||
|
||||
/// A `TrackedInst.Index` provides a single, unchanging reference to a ZIR instruction across a whole
|
||||
/// compilation. From this index, you can acquire a `TrackedInst`, which containss a reference to both
|
||||
/// the file which the instruction lives in, and the instruction index itself, which is updated on
|
||||
/// incremental updates by `Zcu.updateZirRefs`.
|
||||
pub const TrackedInst = extern struct {
|
||||
file: FileIndex,
|
||||
inst: Zir.Inst.Index,
|
||||
comptime {
|
||||
// The fields should be tightly packed. See also serialiation logic in `Compilation.saveState`.
|
||||
assert(@sizeOf(@This()) == @sizeOf(FileIndex) + @sizeOf(Zir.Inst.Index));
|
||||
}
|
||||
|
||||
/// It is possible on an incremental update that we "lose" a ZIR instruction: some tracked `%x` in
|
||||
/// the old ZIR failed to map to any `%y` in the new ZIR. For this reason, we actually store values
|
||||
/// of type `MaybeLost`, which uses `ZirIndex.lost` to represent this case. `Index.resolve` etc
|
||||
/// return `null` when the `TrackedInst` being resolved has been lost.
|
||||
pub const MaybeLost = extern struct {
|
||||
file: FileIndex,
|
||||
inst: ZirIndex,
|
||||
pub const ZirIndex = enum(u32) {
|
||||
/// Tracking failed for this ZIR instruction. Uses of it should fail.
|
||||
lost = std.math.maxInt(u32),
|
||||
_,
|
||||
pub fn unwrap(inst: ZirIndex) ?Zir.Inst.Index {
|
||||
return switch (inst) {
|
||||
.lost => null,
|
||||
_ => @enumFromInt(@intFromEnum(inst)),
|
||||
};
|
||||
}
|
||||
pub fn wrap(inst: Zir.Inst.Index) ZirIndex {
|
||||
return @enumFromInt(@intFromEnum(inst));
|
||||
}
|
||||
};
|
||||
comptime {
|
||||
// The fields should be tightly packed. See also serialiation logic in `Compilation.saveState`.
|
||||
assert(@sizeOf(@This()) == @sizeOf(FileIndex) + @sizeOf(ZirIndex));
|
||||
}
|
||||
};
|
||||
|
||||
pub const Index = enum(u32) {
|
||||
_,
|
||||
pub fn resolveFull(tracked_inst_index: TrackedInst.Index, ip: *const InternPool) TrackedInst {
|
||||
pub fn resolveFull(tracked_inst_index: TrackedInst.Index, ip: *const InternPool) ?TrackedInst {
|
||||
const tracked_inst_unwrapped = tracked_inst_index.unwrap(ip);
|
||||
const tracked_insts = ip.getLocalShared(tracked_inst_unwrapped.tid).tracked_insts.acquire();
|
||||
return tracked_insts.view().items(.@"0")[tracked_inst_unwrapped.index];
|
||||
const maybe_lost = tracked_insts.view().items(.@"0")[tracked_inst_unwrapped.index];
|
||||
return .{
|
||||
.file = maybe_lost.file,
|
||||
.inst = maybe_lost.inst.unwrap() orelse return null,
|
||||
};
|
||||
}
|
||||
pub fn resolve(i: TrackedInst.Index, ip: *const InternPool) Zir.Inst.Index {
|
||||
return i.resolveFull(ip).inst;
|
||||
pub fn resolveFile(tracked_inst_index: TrackedInst.Index, ip: *const InternPool) FileIndex {
|
||||
const tracked_inst_unwrapped = tracked_inst_index.unwrap(ip);
|
||||
const tracked_insts = ip.getLocalShared(tracked_inst_unwrapped.tid).tracked_insts.acquire();
|
||||
const maybe_lost = tracked_insts.view().items(.@"0")[tracked_inst_unwrapped.index];
|
||||
return maybe_lost.file;
|
||||
}
|
||||
pub fn resolve(i: TrackedInst.Index, ip: *const InternPool) ?Zir.Inst.Index {
|
||||
return (i.resolveFull(ip) orelse return null).inst;
|
||||
}
|
||||
|
||||
pub fn toOptional(i: TrackedInst.Index) Optional {
|
||||
@ -120,7 +158,11 @@ pub fn trackZir(
|
||||
tid: Zcu.PerThread.Id,
|
||||
key: TrackedInst,
|
||||
) Allocator.Error!TrackedInst.Index {
|
||||
const full_hash = Hash.hash(0, std.mem.asBytes(&key));
|
||||
const maybe_lost_key: TrackedInst.MaybeLost = .{
|
||||
.file = key.file,
|
||||
.inst = TrackedInst.MaybeLost.ZirIndex.wrap(key.inst),
|
||||
};
|
||||
const full_hash = Hash.hash(0, std.mem.asBytes(&maybe_lost_key));
|
||||
const hash: u32 = @truncate(full_hash >> 32);
|
||||
const shard = &ip.shards[@intCast(full_hash & (ip.shards.len - 1))];
|
||||
var map = shard.shared.tracked_inst_map.acquire();
|
||||
@ -132,12 +174,11 @@ pub fn trackZir(
|
||||
const entry = &map.entries[map_index];
|
||||
const index = entry.acquire().unwrap() orelse break;
|
||||
if (entry.hash != hash) continue;
|
||||
if (std.meta.eql(index.resolveFull(ip), key)) return index;
|
||||
if (std.meta.eql(index.resolveFull(ip) orelse continue, key)) return index;
|
||||
}
|
||||
shard.mutate.tracked_inst_map.mutex.lock();
|
||||
defer shard.mutate.tracked_inst_map.mutex.unlock();
|
||||
if (map.entries != shard.shared.tracked_inst_map.entries) {
|
||||
shard.mutate.tracked_inst_map.len += 1;
|
||||
map = shard.shared.tracked_inst_map;
|
||||
map_mask = map.header().mask();
|
||||
map_index = hash;
|
||||
@ -147,7 +188,7 @@ pub fn trackZir(
|
||||
const entry = &map.entries[map_index];
|
||||
const index = entry.acquire().unwrap() orelse break;
|
||||
if (entry.hash != hash) continue;
|
||||
if (std.meta.eql(index.resolveFull(ip), key)) return index;
|
||||
if (std.meta.eql(index.resolveFull(ip) orelse continue, key)) return index;
|
||||
}
|
||||
defer shard.mutate.tracked_inst_map.len += 1;
|
||||
const local = ip.getLocal(tid);
|
||||
@ -161,7 +202,7 @@ pub fn trackZir(
|
||||
.tid = tid,
|
||||
.index = list.mutate.len,
|
||||
}).wrap(ip);
|
||||
list.appendAssumeCapacity(.{key});
|
||||
list.appendAssumeCapacity(.{maybe_lost_key});
|
||||
entry.release(index.toOptional());
|
||||
return index;
|
||||
}
|
||||
@ -205,12 +246,94 @@ pub fn trackZir(
|
||||
.tid = tid,
|
||||
.index = list.mutate.len,
|
||||
}).wrap(ip);
|
||||
list.appendAssumeCapacity(.{key});
|
||||
list.appendAssumeCapacity(.{maybe_lost_key});
|
||||
map.entries[map_index] = .{ .value = index.toOptional(), .hash = hash };
|
||||
shard.shared.tracked_inst_map.release(new_map);
|
||||
return index;
|
||||
}
|
||||
|
||||
/// At the start of an incremental update, we update every entry in `tracked_insts` to include
|
||||
/// the new ZIR index. Once this is done, we must update the hashmap metadata so that lookups
|
||||
/// return correct entries where they already exist.
|
||||
pub fn rehashTrackedInsts(
|
||||
ip: *InternPool,
|
||||
gpa: Allocator,
|
||||
tid: Zcu.PerThread.Id,
|
||||
) Allocator.Error!void {
|
||||
assert(tid == .main); // we shouldn't have any other threads active right now
|
||||
|
||||
// TODO: this function doesn't handle OOM well. What should it do?
|
||||
|
||||
// We don't lock anything, as this function assumes that no other thread is
|
||||
// accessing `tracked_insts`. This is necessary because we're going to be
|
||||
// iterating the `TrackedInst`s in each `Local`, so we have to know that
|
||||
// none will be added as we work.
|
||||
|
||||
// Figure out how big each shard need to be and store it in its mutate `len`.
|
||||
for (ip.shards) |*shard| shard.mutate.tracked_inst_map.len = 0;
|
||||
for (ip.locals) |*local| {
|
||||
// `getMutableTrackedInsts` is okay only because no other thread is currently active.
|
||||
// We need the `mutate` for the len.
|
||||
for (local.getMutableTrackedInsts(gpa).viewAllowEmpty().items(.@"0")) |tracked_inst| {
|
||||
if (tracked_inst.inst == .lost) continue; // we can ignore this one!
|
||||
const full_hash = Hash.hash(0, std.mem.asBytes(&tracked_inst));
|
||||
const shard = &ip.shards[@intCast(full_hash & (ip.shards.len - 1))];
|
||||
shard.mutate.tracked_inst_map.len += 1;
|
||||
}
|
||||
}
|
||||
|
||||
const Map = Shard.Map(TrackedInst.Index.Optional);
|
||||
|
||||
const arena_state = &ip.getLocal(tid).mutate.arena;
|
||||
|
||||
// We know how big each shard must be, so ensure we have the capacity we need.
|
||||
for (ip.shards) |*shard| {
|
||||
const want_capacity = std.math.ceilPowerOfTwo(u32, shard.mutate.tracked_inst_map.len * 5 / 3) catch unreachable;
|
||||
const have_capacity = shard.shared.tracked_inst_map.header().capacity; // no acquire because we hold the mutex
|
||||
if (have_capacity >= want_capacity) {
|
||||
@memset(shard.shared.tracked_inst_map.entries[0..have_capacity], .{ .value = .none, .hash = undefined });
|
||||
continue;
|
||||
}
|
||||
var arena = arena_state.promote(gpa);
|
||||
defer arena_state.* = arena.state;
|
||||
const new_map_buf = try arena.allocator().alignedAlloc(
|
||||
u8,
|
||||
Map.alignment,
|
||||
Map.entries_offset + want_capacity * @sizeOf(Map.Entry),
|
||||
);
|
||||
const new_map: Map = .{ .entries = @ptrCast(new_map_buf[Map.entries_offset..].ptr) };
|
||||
new_map.header().* = .{ .capacity = want_capacity };
|
||||
@memset(new_map.entries[0..want_capacity], .{ .value = .none, .hash = undefined });
|
||||
shard.shared.tracked_inst_map.release(new_map);
|
||||
}
|
||||
|
||||
// Now, actually insert the items.
|
||||
for (ip.locals, 0..) |*local, local_tid| {
|
||||
// `getMutableTrackedInsts` is okay only because no other thread is currently active.
|
||||
// We need the `mutate` for the len.
|
||||
for (local.getMutableTrackedInsts(gpa).viewAllowEmpty().items(.@"0"), 0..) |tracked_inst, local_inst_index| {
|
||||
if (tracked_inst.inst == .lost) continue; // we can ignore this one!
|
||||
const full_hash = Hash.hash(0, std.mem.asBytes(&tracked_inst));
|
||||
const hash: u32 = @truncate(full_hash >> 32);
|
||||
const shard = &ip.shards[@intCast(full_hash & (ip.shards.len - 1))];
|
||||
const map = shard.shared.tracked_inst_map; // no acquire because we hold the mutex
|
||||
const map_mask = map.header().mask();
|
||||
var map_index = hash;
|
||||
const entry = while (true) : (map_index += 1) {
|
||||
map_index &= map_mask;
|
||||
const entry = &map.entries[map_index];
|
||||
if (entry.acquire() == .none) break entry;
|
||||
};
|
||||
const index = TrackedInst.Index.Unwrapped.wrap(.{
|
||||
.tid = @enumFromInt(local_tid),
|
||||
.index = @intCast(local_inst_index),
|
||||
}, ip);
|
||||
entry.hash = hash;
|
||||
entry.release(index.toOptional());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Analysis Unit. Represents a single entity which undergoes semantic analysis.
|
||||
/// This is either a `Cau` or a runtime function.
|
||||
/// The LSB is used as a tag bit.
|
||||
@ -572,10 +695,6 @@ pub fn dependencyIterator(ip: *const InternPool, dependee: Dependee) DependencyI
|
||||
.ip = ip,
|
||||
.next_entry = .none,
|
||||
};
|
||||
if (ip.dep_entries.items[@intFromEnum(first_entry)].depender == .none) return .{
|
||||
.ip = ip,
|
||||
.next_entry = .none,
|
||||
};
|
||||
return .{
|
||||
.ip = ip,
|
||||
.next_entry = first_entry.toOptional(),
|
||||
@ -612,7 +731,6 @@ pub fn addDependency(ip: *InternPool, gpa: Allocator, depender: AnalUnit, depend
|
||||
|
||||
if (gop.found_existing and ip.dep_entries.items[@intFromEnum(gop.value_ptr.*)].depender == .none) {
|
||||
// Dummy entry, so we can reuse it rather than allocating a new one!
|
||||
ip.dep_entries.items[@intFromEnum(gop.value_ptr.*)].next = .none;
|
||||
break :new_index gop.value_ptr.*;
|
||||
}
|
||||
|
||||
@ -620,7 +738,12 @@ pub fn addDependency(ip: *InternPool, gpa: Allocator, depender: AnalUnit, depend
|
||||
const new_index: DepEntry.Index, const ptr = if (ip.free_dep_entries.popOrNull()) |new_index| new: {
|
||||
break :new .{ new_index, &ip.dep_entries.items[@intFromEnum(new_index)] };
|
||||
} else .{ @enumFromInt(ip.dep_entries.items.len), ip.dep_entries.addOneAssumeCapacity() };
|
||||
ptr.next = if (gop.found_existing) gop.value_ptr.*.toOptional() else .none;
|
||||
if (gop.found_existing) {
|
||||
ptr.next = gop.value_ptr.*.toOptional();
|
||||
ip.dep_entries.items[@intFromEnum(gop.value_ptr.*)].prev = new_index.toOptional();
|
||||
} else {
|
||||
ptr.next = .none;
|
||||
}
|
||||
gop.value_ptr.* = new_index;
|
||||
break :new_index new_index;
|
||||
},
|
||||
@ -642,10 +765,9 @@ pub const NamespaceNameKey = struct {
|
||||
};
|
||||
|
||||
pub const DepEntry = extern struct {
|
||||
/// If null, this is a dummy entry - all other fields are `undefined`. It is
|
||||
/// the first and only entry in one of `intern_pool.*_deps`, and does not
|
||||
/// appear in any list by `first_dependency`, but is not in
|
||||
/// `free_dep_entries` since `*_deps` stores a reference to it.
|
||||
/// If null, this is a dummy entry. `next_dependee` is undefined. This is the first
|
||||
/// entry in one of `*_deps`, and does not appear in any list by `first_dependency`,
|
||||
/// but is not in `free_dep_entries` since `*_deps` stores a reference to it.
|
||||
depender: AnalUnit.Optional,
|
||||
/// Index into `dep_entries` forming a doubly linked list of all dependencies on this dependee.
|
||||
/// Used to iterate all dependers for a given dependee during an update.
|
||||
@ -684,6 +806,14 @@ const Local = struct {
|
||||
/// This state is fully local to the owning thread and does not require any
|
||||
/// atomic access.
|
||||
mutate: struct {
|
||||
/// When we need to allocate any long-lived buffer for mutating the `InternPool`, it is
|
||||
/// allocated into this `arena` (for the `Id` of the thread performing the mutation). An
|
||||
/// arena is used to avoid contention on the GPA, and to ensure that any code which retains
|
||||
/// references to old state remains valid. For instance, when reallocing hashmap metadata,
|
||||
/// a racing lookup on another thread may still retain a handle to the old metadata pointer,
|
||||
/// so it must remain valid.
|
||||
/// This arena's lifetime is tied to that of `Compilation`, although it can be cleared on
|
||||
/// garbage collection (currently vaporware).
|
||||
arena: std.heap.ArenaAllocator.State,
|
||||
|
||||
items: ListMutate,
|
||||
@ -728,7 +858,7 @@ const Local = struct {
|
||||
else => @compileError("unsupported host"),
|
||||
};
|
||||
const Strings = List(struct { u8 });
|
||||
const TrackedInsts = List(struct { TrackedInst });
|
||||
const TrackedInsts = List(struct { TrackedInst.MaybeLost });
|
||||
const Maps = List(struct { FieldMap });
|
||||
const Caus = List(struct { Cau });
|
||||
const Navs = List(Nav.Repr);
|
||||
@ -959,6 +1089,14 @@ const Local = struct {
|
||||
mutable.list.release(new_list);
|
||||
}
|
||||
|
||||
pub fn viewAllowEmpty(mutable: Mutable) View {
|
||||
const capacity = mutable.list.header().capacity;
|
||||
return .{
|
||||
.bytes = mutable.list.bytes,
|
||||
.len = mutable.mutate.len,
|
||||
.capacity = capacity,
|
||||
};
|
||||
}
|
||||
pub fn view(mutable: Mutable) View {
|
||||
const capacity = mutable.list.header().capacity;
|
||||
assert(capacity > 0); // optimizes `MultiArrayList.Slice.items`
|
||||
@ -996,7 +1134,6 @@ const Local = struct {
|
||||
fn header(list: ListSelf) *Header {
|
||||
return @ptrFromInt(@intFromPtr(list.bytes) - bytes_offset);
|
||||
}
|
||||
|
||||
pub fn view(list: ListSelf) View {
|
||||
const capacity = list.header().capacity;
|
||||
assert(capacity > 0); // optimizes `MultiArrayList.Slice.items`
|
||||
@ -2570,7 +2707,12 @@ pub const Key = union(enum) {
|
||||
|
||||
.variable => |a_info| {
|
||||
const b_info = b.variable;
|
||||
return a_info.owner_nav == b_info.owner_nav;
|
||||
return a_info.owner_nav == b_info.owner_nav and
|
||||
a_info.ty == b_info.ty and
|
||||
a_info.init == b_info.init and
|
||||
a_info.lib_name == b_info.lib_name and
|
||||
a_info.is_threadlocal == b_info.is_threadlocal and
|
||||
a_info.is_weak_linkage == b_info.is_weak_linkage;
|
||||
},
|
||||
.@"extern" => |a_info| {
|
||||
const b_info = b.@"extern";
|
||||
@ -6958,6 +7100,7 @@ fn getOrPutKeyEnsuringAdditionalCapacity(
|
||||
const index = entry.acquire();
|
||||
if (index == .none) break;
|
||||
if (entry.hash != hash) continue;
|
||||
if (ip.isRemoved(index)) continue;
|
||||
if (ip.indexToKey(index).eql(key, ip)) return .{ .existing = index };
|
||||
}
|
||||
shard.mutate.map.mutex.lock();
|
||||
@ -7032,6 +7175,43 @@ fn getOrPutKeyEnsuringAdditionalCapacity(
|
||||
.map_index = map_index,
|
||||
} };
|
||||
}
|
||||
/// Like `getOrPutKey`, but asserts that the key already exists, and prepares to replace
|
||||
/// its shard entry with a new `Index` anyway. After finalizing this, the old index remains
|
||||
/// valid (in that `indexToKey` and similar queries will behave as before), but it will
|
||||
/// never be returned from a lookup (`getOrPutKey` etc).
|
||||
/// This is used by incremental compilation when an existing container type is outdated. In
|
||||
/// this case, the type must be recreated at a new `InternPool.Index`, but the old index must
|
||||
/// remain valid since now-unreferenced `AnalUnit`s may retain references to it. The old index
|
||||
/// will be cleaned up when the `Zcu` undergoes garbage collection.
|
||||
fn putKeyReplace(
|
||||
ip: *InternPool,
|
||||
tid: Zcu.PerThread.Id,
|
||||
key: Key,
|
||||
) GetOrPutKey {
|
||||
const full_hash = key.hash64(ip);
|
||||
const hash: u32 = @truncate(full_hash >> 32);
|
||||
const shard = &ip.shards[@intCast(full_hash & (ip.shards.len - 1))];
|
||||
shard.mutate.map.mutex.lock();
|
||||
errdefer shard.mutate.map.mutex.unlock();
|
||||
const map = shard.shared.map;
|
||||
const map_mask = map.header().mask();
|
||||
var map_index = hash;
|
||||
while (true) : (map_index += 1) {
|
||||
map_index &= map_mask;
|
||||
const entry = &map.entries[map_index];
|
||||
const index = entry.value;
|
||||
assert(index != .none); // key not present
|
||||
if (entry.hash == hash and ip.indexToKey(index).eql(key, ip)) {
|
||||
break; // we found the entry to replace
|
||||
}
|
||||
}
|
||||
return .{ .new = .{
|
||||
.ip = ip,
|
||||
.tid = tid,
|
||||
.shard = shard,
|
||||
.map_index = map_index,
|
||||
} };
|
||||
}
|
||||
|
||||
pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) Allocator.Error!Index {
|
||||
var gop = try ip.getOrPutKey(gpa, tid, key);
|
||||
@ -7859,6 +8039,10 @@ pub const UnionTypeInit = struct {
|
||||
zir_index: TrackedInst.Index,
|
||||
captures: []const CaptureValue,
|
||||
},
|
||||
declared_owned_captures: struct {
|
||||
zir_index: TrackedInst.Index,
|
||||
captures: CaptureValue.Slice,
|
||||
},
|
||||
reified: struct {
|
||||
zir_index: TrackedInst.Index,
|
||||
type_hash: u64,
|
||||
@ -7871,17 +8055,28 @@ pub fn getUnionType(
|
||||
gpa: Allocator,
|
||||
tid: Zcu.PerThread.Id,
|
||||
ini: UnionTypeInit,
|
||||
/// If it is known that there is an existing type with this key which is outdated,
|
||||
/// this is passed as `true`, and the type is replaced with one at a fresh index.
|
||||
replace_existing: bool,
|
||||
) Allocator.Error!WipNamespaceType.Result {
|
||||
var gop = try ip.getOrPutKey(gpa, tid, .{ .union_type = switch (ini.key) {
|
||||
const key: Key = .{ .union_type = switch (ini.key) {
|
||||
.declared => |d| .{ .declared = .{
|
||||
.zir_index = d.zir_index,
|
||||
.captures = .{ .external = d.captures },
|
||||
} },
|
||||
.declared_owned_captures => |d| .{ .declared = .{
|
||||
.zir_index = d.zir_index,
|
||||
.captures = .{ .owned = d.captures },
|
||||
} },
|
||||
.reified => |r| .{ .reified = .{
|
||||
.zir_index = r.zir_index,
|
||||
.type_hash = r.type_hash,
|
||||
} },
|
||||
} });
|
||||
} };
|
||||
var gop = if (replace_existing)
|
||||
ip.putKeyReplace(tid, key)
|
||||
else
|
||||
try ip.getOrPutKey(gpa, tid, key);
|
||||
defer gop.deinit();
|
||||
if (gop == .existing) return .{ .existing = gop.existing };
|
||||
|
||||
@ -7896,7 +8091,7 @@ pub fn getUnionType(
|
||||
// TODO: fmt bug
|
||||
// zig fmt: off
|
||||
switch (ini.key) {
|
||||
.declared => |d| @intFromBool(d.captures.len != 0) + d.captures.len,
|
||||
inline .declared, .declared_owned_captures => |d| @intFromBool(d.captures.len != 0) + d.captures.len,
|
||||
.reified => 2, // type_hash: PackedU64
|
||||
} +
|
||||
// zig fmt: on
|
||||
@ -7905,7 +8100,10 @@ pub fn getUnionType(
|
||||
|
||||
const extra_index = addExtraAssumeCapacity(extra, Tag.TypeUnion{
|
||||
.flags = .{
|
||||
.any_captures = ini.key == .declared and ini.key.declared.captures.len != 0,
|
||||
.any_captures = switch (ini.key) {
|
||||
inline .declared, .declared_owned_captures => |d| d.captures.len != 0,
|
||||
.reified => false,
|
||||
},
|
||||
.runtime_tag = ini.flags.runtime_tag,
|
||||
.any_aligned_fields = ini.flags.any_aligned_fields,
|
||||
.layout = ini.flags.layout,
|
||||
@ -7914,7 +8112,10 @@ pub fn getUnionType(
|
||||
.assumed_runtime_bits = ini.flags.assumed_runtime_bits,
|
||||
.assumed_pointer_aligned = ini.flags.assumed_pointer_aligned,
|
||||
.alignment = ini.flags.alignment,
|
||||
.is_reified = ini.key == .reified,
|
||||
.is_reified = switch (ini.key) {
|
||||
.declared, .declared_owned_captures => false,
|
||||
.reified => true,
|
||||
},
|
||||
},
|
||||
.fields_len = ini.fields_len,
|
||||
.size = std.math.maxInt(u32),
|
||||
@ -7938,6 +8139,10 @@ pub fn getUnionType(
|
||||
extra.appendAssumeCapacity(.{@intCast(d.captures.len)});
|
||||
extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)});
|
||||
},
|
||||
.declared_owned_captures => |d| if (d.captures.len != 0) {
|
||||
extra.appendAssumeCapacity(.{@intCast(d.captures.len)});
|
||||
extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures.get(ip))});
|
||||
},
|
||||
.reified => |r| _ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash)),
|
||||
}
|
||||
|
||||
@ -8035,6 +8240,10 @@ pub const StructTypeInit = struct {
|
||||
zir_index: TrackedInst.Index,
|
||||
captures: []const CaptureValue,
|
||||
},
|
||||
declared_owned_captures: struct {
|
||||
zir_index: TrackedInst.Index,
|
||||
captures: CaptureValue.Slice,
|
||||
},
|
||||
reified: struct {
|
||||
zir_index: TrackedInst.Index,
|
||||
type_hash: u64,
|
||||
@ -8047,17 +8256,28 @@ pub fn getStructType(
|
||||
gpa: Allocator,
|
||||
tid: Zcu.PerThread.Id,
|
||||
ini: StructTypeInit,
|
||||
/// If it is known that there is an existing type with this key which is outdated,
|
||||
/// this is passed as `true`, and the type is replaced with one at a fresh index.
|
||||
replace_existing: bool,
|
||||
) Allocator.Error!WipNamespaceType.Result {
|
||||
var gop = try ip.getOrPutKey(gpa, tid, .{ .struct_type = switch (ini.key) {
|
||||
const key: Key = .{ .struct_type = switch (ini.key) {
|
||||
.declared => |d| .{ .declared = .{
|
||||
.zir_index = d.zir_index,
|
||||
.captures = .{ .external = d.captures },
|
||||
} },
|
||||
.declared_owned_captures => |d| .{ .declared = .{
|
||||
.zir_index = d.zir_index,
|
||||
.captures = .{ .owned = d.captures },
|
||||
} },
|
||||
.reified => |r| .{ .reified = .{
|
||||
.zir_index = r.zir_index,
|
||||
.type_hash = r.type_hash,
|
||||
} },
|
||||
} });
|
||||
} };
|
||||
var gop = if (replace_existing)
|
||||
ip.putKeyReplace(tid, key)
|
||||
else
|
||||
try ip.getOrPutKey(gpa, tid, key);
|
||||
defer gop.deinit();
|
||||
if (gop == .existing) return .{ .existing = gop.existing };
|
||||
|
||||
@ -8080,7 +8300,7 @@ pub fn getStructType(
|
||||
// TODO: fmt bug
|
||||
// zig fmt: off
|
||||
switch (ini.key) {
|
||||
.declared => |d| @intFromBool(d.captures.len != 0) + d.captures.len,
|
||||
inline .declared, .declared_owned_captures => |d| @intFromBool(d.captures.len != 0) + d.captures.len,
|
||||
.reified => 2, // type_hash: PackedU64
|
||||
} +
|
||||
// zig fmt: on
|
||||
@ -8096,10 +8316,16 @@ pub fn getStructType(
|
||||
.backing_int_ty = .none,
|
||||
.names_map = names_map,
|
||||
.flags = .{
|
||||
.any_captures = ini.key == .declared and ini.key.declared.captures.len != 0,
|
||||
.any_captures = switch (ini.key) {
|
||||
inline .declared, .declared_owned_captures => |d| d.captures.len != 0,
|
||||
.reified => false,
|
||||
},
|
||||
.field_inits_wip = false,
|
||||
.inits_resolved = ini.inits_resolved,
|
||||
.is_reified = ini.key == .reified,
|
||||
.is_reified = switch (ini.key) {
|
||||
.declared, .declared_owned_captures => false,
|
||||
.reified => true,
|
||||
},
|
||||
},
|
||||
});
|
||||
try items.append(.{
|
||||
@ -8111,6 +8337,10 @@ pub fn getStructType(
|
||||
extra.appendAssumeCapacity(.{@intCast(d.captures.len)});
|
||||
extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)});
|
||||
},
|
||||
.declared_owned_captures => |d| if (d.captures.len != 0) {
|
||||
extra.appendAssumeCapacity(.{@intCast(d.captures.len)});
|
||||
extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures.get(ip))});
|
||||
},
|
||||
.reified => |r| {
|
||||
_ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash));
|
||||
},
|
||||
@ -8138,7 +8368,7 @@ pub fn getStructType(
|
||||
// TODO: fmt bug
|
||||
// zig fmt: off
|
||||
switch (ini.key) {
|
||||
.declared => |d| @intFromBool(d.captures.len != 0) + d.captures.len,
|
||||
inline .declared, .declared_owned_captures => |d| @intFromBool(d.captures.len != 0) + d.captures.len,
|
||||
.reified => 2, // type_hash: PackedU64
|
||||
} +
|
||||
// zig fmt: on
|
||||
@ -8153,7 +8383,10 @@ pub fn getStructType(
|
||||
.fields_len = ini.fields_len,
|
||||
.size = std.math.maxInt(u32),
|
||||
.flags = .{
|
||||
.any_captures = ini.key == .declared and ini.key.declared.captures.len != 0,
|
||||
.any_captures = switch (ini.key) {
|
||||
inline .declared, .declared_owned_captures => |d| d.captures.len != 0,
|
||||
.reified => false,
|
||||
},
|
||||
.is_extern = is_extern,
|
||||
.known_non_opv = ini.known_non_opv,
|
||||
.requires_comptime = ini.requires_comptime,
|
||||
@ -8171,7 +8404,10 @@ pub fn getStructType(
|
||||
.field_inits_wip = false,
|
||||
.inits_resolved = ini.inits_resolved,
|
||||
.fully_resolved = false,
|
||||
.is_reified = ini.key == .reified,
|
||||
.is_reified = switch (ini.key) {
|
||||
.declared, .declared_owned_captures => false,
|
||||
.reified => true,
|
||||
},
|
||||
},
|
||||
});
|
||||
try items.append(.{
|
||||
@ -8183,6 +8419,10 @@ pub fn getStructType(
|
||||
extra.appendAssumeCapacity(.{@intCast(d.captures.len)});
|
||||
extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)});
|
||||
},
|
||||
.declared_owned_captures => |d| if (d.captures.len != 0) {
|
||||
extra.appendAssumeCapacity(.{@intCast(d.captures.len)});
|
||||
extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures.get(ip))});
|
||||
},
|
||||
.reified => |r| {
|
||||
_ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash));
|
||||
},
|
||||
@ -8986,6 +9226,10 @@ pub const EnumTypeInit = struct {
|
||||
zir_index: TrackedInst.Index,
|
||||
captures: []const CaptureValue,
|
||||
},
|
||||
declared_owned_captures: struct {
|
||||
zir_index: TrackedInst.Index,
|
||||
captures: CaptureValue.Slice,
|
||||
},
|
||||
reified: struct {
|
||||
zir_index: TrackedInst.Index,
|
||||
type_hash: u64,
|
||||
@ -9081,17 +9325,28 @@ pub fn getEnumType(
|
||||
gpa: Allocator,
|
||||
tid: Zcu.PerThread.Id,
|
||||
ini: EnumTypeInit,
|
||||
/// If it is known that there is an existing type with this key which is outdated,
|
||||
/// this is passed as `true`, and the type is replaced with one at a fresh index.
|
||||
replace_existing: bool,
|
||||
) Allocator.Error!WipEnumType.Result {
|
||||
var gop = try ip.getOrPutKey(gpa, tid, .{ .enum_type = switch (ini.key) {
|
||||
const key: Key = .{ .enum_type = switch (ini.key) {
|
||||
.declared => |d| .{ .declared = .{
|
||||
.zir_index = d.zir_index,
|
||||
.captures = .{ .external = d.captures },
|
||||
} },
|
||||
.declared_owned_captures => |d| .{ .declared = .{
|
||||
.zir_index = d.zir_index,
|
||||
.captures = .{ .owned = d.captures },
|
||||
} },
|
||||
.reified => |r| .{ .reified = .{
|
||||
.zir_index = r.zir_index,
|
||||
.type_hash = r.type_hash,
|
||||
} },
|
||||
} });
|
||||
} };
|
||||
var gop = if (replace_existing)
|
||||
ip.putKeyReplace(tid, key)
|
||||
else
|
||||
try ip.getOrPutKey(gpa, tid, key);
|
||||
defer gop.deinit();
|
||||
if (gop == .existing) return .{ .existing = gop.existing };
|
||||
|
||||
@ -9110,7 +9365,7 @@ pub fn getEnumType(
|
||||
// TODO: fmt bug
|
||||
// zig fmt: off
|
||||
switch (ini.key) {
|
||||
.declared => |d| d.captures.len,
|
||||
inline .declared, .declared_owned_captures => |d| d.captures.len,
|
||||
.reified => 2, // type_hash: PackedU64
|
||||
} +
|
||||
// zig fmt: on
|
||||
@ -9120,7 +9375,7 @@ pub fn getEnumType(
|
||||
const extra_index = addExtraAssumeCapacity(extra, EnumAuto{
|
||||
.name = undefined, // set by `prepare`
|
||||
.captures_len = switch (ini.key) {
|
||||
.declared => |d| @intCast(d.captures.len),
|
||||
inline .declared, .declared_owned_captures => |d| @intCast(d.captures.len),
|
||||
.reified => std.math.maxInt(u32),
|
||||
},
|
||||
.namespace = undefined, // set by `prepare`
|
||||
@ -9139,6 +9394,7 @@ pub fn getEnumType(
|
||||
extra.appendAssumeCapacity(undefined); // `cau` will be set by `finish`
|
||||
switch (ini.key) {
|
||||
.declared => |d| extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)}),
|
||||
.declared_owned_captures => |d| extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures.get(ip))}),
|
||||
.reified => |r| _ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash)),
|
||||
}
|
||||
const names_start = extra.mutate.len;
|
||||
@ -9169,7 +9425,7 @@ pub fn getEnumType(
|
||||
// TODO: fmt bug
|
||||
// zig fmt: off
|
||||
switch (ini.key) {
|
||||
.declared => |d| d.captures.len,
|
||||
inline .declared, .declared_owned_captures => |d| d.captures.len,
|
||||
.reified => 2, // type_hash: PackedU64
|
||||
} +
|
||||
// zig fmt: on
|
||||
@ -9180,7 +9436,7 @@ pub fn getEnumType(
|
||||
const extra_index = addExtraAssumeCapacity(extra, EnumExplicit{
|
||||
.name = undefined, // set by `prepare`
|
||||
.captures_len = switch (ini.key) {
|
||||
.declared => |d| @intCast(d.captures.len),
|
||||
inline .declared, .declared_owned_captures => |d| @intCast(d.captures.len),
|
||||
.reified => std.math.maxInt(u32),
|
||||
},
|
||||
.namespace = undefined, // set by `prepare`
|
||||
@ -9204,6 +9460,7 @@ pub fn getEnumType(
|
||||
extra.appendAssumeCapacity(undefined); // `cau` will be set by `finish`
|
||||
switch (ini.key) {
|
||||
.declared => |d| extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)}),
|
||||
.declared_owned_captures => |d| extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures.get(ip))}),
|
||||
.reified => |r| _ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash)),
|
||||
}
|
||||
const names_start = extra.mutate.len;
|
||||
@ -9267,10 +9524,12 @@ pub fn getGeneratedTagEnumType(
|
||||
.tid = tid,
|
||||
.index = items.mutate.len,
|
||||
}, ip);
|
||||
const parent_namespace = ip.namespacePtr(ini.parent_namespace);
|
||||
const namespace = try ip.createNamespace(gpa, tid, .{
|
||||
.parent = ini.parent_namespace.toOptional(),
|
||||
.owner_type = enum_index,
|
||||
.file_scope = ip.namespacePtr(ini.parent_namespace).file_scope,
|
||||
.file_scope = parent_namespace.file_scope,
|
||||
.generation = parent_namespace.generation,
|
||||
});
|
||||
errdefer ip.destroyNamespace(tid, namespace);
|
||||
|
||||
@ -10866,6 +11125,7 @@ pub fn destroyNamespace(
|
||||
.parent = undefined,
|
||||
.file_scope = undefined,
|
||||
.owner_type = undefined,
|
||||
.generation = undefined,
|
||||
};
|
||||
@field(namespace, Local.namespace_next_free_field) =
|
||||
@enumFromInt(local.mutate.namespaces.free_list);
|
||||
@ -11000,7 +11260,6 @@ pub fn getOrPutTrailingString(
|
||||
shard.mutate.string_map.mutex.lock();
|
||||
defer shard.mutate.string_map.mutex.unlock();
|
||||
if (map.entries != shard.shared.string_map.entries) {
|
||||
shard.mutate.string_map.len += 1;
|
||||
map = shard.shared.string_map;
|
||||
map_mask = map.header().mask();
|
||||
map_index = hash;
|
||||
|
641
src/Sema.zig
641
src/Sema.zig
File diff suppressed because it is too large
Load Diff
@ -3437,7 +3437,7 @@ pub fn typeDeclSrcLine(ty: Type, zcu: *Zcu) ?u32 {
|
||||
},
|
||||
else => return null,
|
||||
};
|
||||
const info = tracked.resolveFull(&zcu.intern_pool);
|
||||
const info = tracked.resolveFull(&zcu.intern_pool) orelse return null;
|
||||
const file = zcu.fileByIndex(info.file);
|
||||
assert(file.zir_loaded);
|
||||
const zir = file.zir;
|
||||
|
620
src/Zcu.zig
620
src/Zcu.zig
@ -10,7 +10,7 @@ const builtin = @import("builtin");
|
||||
const mem = std.mem;
|
||||
const Allocator = std.mem.Allocator;
|
||||
const assert = std.debug.assert;
|
||||
const log = std.log.scoped(.module);
|
||||
const log = std.log.scoped(.zcu);
|
||||
const BigIntConst = std.math.big.int.Const;
|
||||
const BigIntMutable = std.math.big.int.Mutable;
|
||||
const Target = std.Target;
|
||||
@ -153,27 +153,27 @@ cimport_errors: std.AutoArrayHashMapUnmanaged(AnalUnit, std.zig.ErrorBundle) = .
|
||||
/// Maximum amount of distinct error values, set by --error-limit
|
||||
error_limit: ErrorInt,
|
||||
|
||||
/// Value is the number of PO or outdated Decls which this AnalUnit depends on.
|
||||
/// Value is the number of PO dependencies of this AnalUnit.
|
||||
/// This value will decrease as we perform semantic analysis to learn what is outdated.
|
||||
/// If any of these PO deps is outdated, this value will be moved to `outdated`.
|
||||
potentially_outdated: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .{},
|
||||
/// Value is the number of PO or outdated Decls which this AnalUnit depends on.
|
||||
/// Value is the number of PO dependencies of this AnalUnit.
|
||||
/// Once this value drops to 0, the AnalUnit is a candidate for re-analysis.
|
||||
outdated: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .{},
|
||||
/// This contains all `AnalUnit`s in `outdated` whose PO dependency count is 0.
|
||||
/// Such `AnalUnit`s are ready for immediate re-analysis.
|
||||
/// See `findOutdatedToAnalyze` for details.
|
||||
outdated_ready: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .{},
|
||||
/// This contains a set of struct types whose corresponding `Cau` may not be in
|
||||
/// `outdated`, but are the root types of files which have updated source and
|
||||
/// thus must be re-analyzed. If such a type is only in this set, the struct type
|
||||
/// index may be preserved (only the namespace might change). If its owned `Cau`
|
||||
/// is also outdated, the struct type index must be recreated.
|
||||
outdated_file_root: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{},
|
||||
/// This contains a list of AnalUnit whose analysis or codegen failed, but the
|
||||
/// failure was something like running out of disk space, and trying again may
|
||||
/// succeed. On the next update, we will flush this list, marking all members of
|
||||
/// it as outdated.
|
||||
retryable_failures: std.ArrayListUnmanaged(AnalUnit) = .{},
|
||||
|
||||
/// These are the modules which we initially queue for analysis in `Compilation.update`.
|
||||
/// `resolveReferences` will use these as the root of its reachability traversal.
|
||||
analysis_roots: std.BoundedArray(*Package.Module, 3) = .{},
|
||||
|
||||
stage1_flags: packed struct {
|
||||
have_winmain: bool = false,
|
||||
have_wwinmain: bool = false,
|
||||
@ -192,7 +192,7 @@ global_assembly: std.AutoArrayHashMapUnmanaged(InternPool.Cau.Index, []u8) = .{}
|
||||
|
||||
/// Key is the `AnalUnit` *performing* the reference. This representation allows
|
||||
/// incremental updates to quickly delete references caused by a specific `AnalUnit`.
|
||||
/// Value is index into `all_reference` of the first reference triggered by the unit.
|
||||
/// Value is index into `all_references` of the first reference triggered by the unit.
|
||||
/// The `next` field on the `Reference` forms a linked list of all references
|
||||
/// triggered by the key `AnalUnit`.
|
||||
reference_table: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .{},
|
||||
@ -200,11 +200,23 @@ all_references: std.ArrayListUnmanaged(Reference) = .{},
|
||||
/// Freelist of indices in `all_references`.
|
||||
free_references: std.ArrayListUnmanaged(u32) = .{},
|
||||
|
||||
/// Key is the `AnalUnit` *performing* the reference. This representation allows
|
||||
/// incremental updates to quickly delete references caused by a specific `AnalUnit`.
|
||||
/// Value is index into `all_type_reference` of the first reference triggered by the unit.
|
||||
/// The `next` field on the `TypeReference` forms a linked list of all type references
|
||||
/// triggered by the key `AnalUnit`.
|
||||
type_reference_table: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .{},
|
||||
all_type_references: std.ArrayListUnmanaged(TypeReference) = .{},
|
||||
/// Freelist of indices in `all_type_references`.
|
||||
free_type_references: std.ArrayListUnmanaged(u32) = .{},
|
||||
|
||||
panic_messages: [PanicId.len]InternPool.Nav.Index.Optional = .{.none} ** PanicId.len,
|
||||
/// The panic function body.
|
||||
panic_func_index: InternPool.Index = .none,
|
||||
null_stack_trace: InternPool.Index = .none,
|
||||
|
||||
generation: u32 = 0,
|
||||
|
||||
pub const PerThread = @import("Zcu/PerThread.zig");
|
||||
|
||||
pub const PanicId = enum {
|
||||
@ -308,10 +320,21 @@ pub const Reference = struct {
|
||||
src: LazySrcLoc,
|
||||
};
|
||||
|
||||
pub const TypeReference = struct {
|
||||
/// The container type which was referenced.
|
||||
referenced: InternPool.Index,
|
||||
/// Index into `all_type_references` of the next `TypeReference` triggered by the same `AnalUnit`.
|
||||
/// `std.math.maxInt(u32)` is the sentinel.
|
||||
next: u32,
|
||||
/// The source location of the reference.
|
||||
src: LazySrcLoc,
|
||||
};
|
||||
|
||||
/// The container that structs, enums, unions, and opaques have.
|
||||
pub const Namespace = struct {
|
||||
parent: OptionalIndex,
|
||||
file_scope: File.Index,
|
||||
generation: u32,
|
||||
/// Will be a struct, enum, union, or opaque.
|
||||
owner_type: InternPool.Index,
|
||||
/// Members of the namespace which are marked `pub`.
|
||||
@ -2022,10 +2045,11 @@ pub const LazySrcLoc = struct {
|
||||
.offset = .unneeded,
|
||||
};
|
||||
|
||||
pub fn resolveBaseNode(base_node_inst: InternPool.TrackedInst.Index, zcu: *Zcu) struct { *File, Ast.Node.Index } {
|
||||
/// Returns `null` if the ZIR instruction has been lost across incremental updates.
|
||||
pub fn resolveBaseNode(base_node_inst: InternPool.TrackedInst.Index, zcu: *Zcu) ?struct { *File, Ast.Node.Index } {
|
||||
const ip = &zcu.intern_pool;
|
||||
const file_index, const zir_inst = inst: {
|
||||
const info = base_node_inst.resolveFull(ip);
|
||||
const info = base_node_inst.resolveFull(ip) orelse return null;
|
||||
break :inst .{ info.file, info.inst };
|
||||
};
|
||||
const file = zcu.fileByIndex(file_index);
|
||||
@ -2051,7 +2075,15 @@ pub const LazySrcLoc = struct {
|
||||
/// Resolve the file and AST node of `base_node_inst` to get a resolved `SrcLoc`.
|
||||
/// The resulting `SrcLoc` should only be used ephemerally, as it is not correct across incremental updates.
|
||||
pub fn upgrade(lazy: LazySrcLoc, zcu: *Zcu) SrcLoc {
|
||||
const file, const base_node = resolveBaseNode(lazy.base_node_inst, zcu);
|
||||
return lazy.upgradeOrLost(zcu).?;
|
||||
}
|
||||
|
||||
/// Like `upgrade`, but returns `null` if the source location has been lost across incremental updates.
|
||||
pub fn upgradeOrLost(lazy: LazySrcLoc, zcu: *Zcu) ?SrcLoc {
|
||||
const file, const base_node: Ast.Node.Index = if (lazy.offset == .entire_file) .{
|
||||
zcu.fileByIndex(lazy.base_node_inst.resolveFile(&zcu.intern_pool)),
|
||||
0,
|
||||
} else resolveBaseNode(lazy.base_node_inst, zcu) orelse return null;
|
||||
return .{
|
||||
.file_scope = file,
|
||||
.base_node = base_node,
|
||||
@ -2148,7 +2180,6 @@ pub fn deinit(zcu: *Zcu) void {
|
||||
zcu.potentially_outdated.deinit(gpa);
|
||||
zcu.outdated.deinit(gpa);
|
||||
zcu.outdated_ready.deinit(gpa);
|
||||
zcu.outdated_file_root.deinit(gpa);
|
||||
zcu.retryable_failures.deinit(gpa);
|
||||
|
||||
zcu.test_functions.deinit(gpa);
|
||||
@ -2162,6 +2193,10 @@ pub fn deinit(zcu: *Zcu) void {
|
||||
zcu.all_references.deinit(gpa);
|
||||
zcu.free_references.deinit(gpa);
|
||||
|
||||
zcu.type_reference_table.deinit(gpa);
|
||||
zcu.all_type_references.deinit(gpa);
|
||||
zcu.free_type_references.deinit(gpa);
|
||||
|
||||
zcu.intern_pool.deinit(gpa);
|
||||
}
|
||||
|
||||
@ -2255,55 +2290,89 @@ pub fn loadZirCacheBody(gpa: Allocator, header: Zir.Header, cache_file: std.fs.F
|
||||
return zir;
|
||||
}
|
||||
|
||||
pub fn markDependeeOutdated(zcu: *Zcu, dependee: InternPool.Dependee) !void {
|
||||
log.debug("outdated dependee: {}", .{dependee});
|
||||
pub fn markDependeeOutdated(
|
||||
zcu: *Zcu,
|
||||
/// When we are diffing ZIR and marking things as outdated, we won't yet have marked the dependencies as PO.
|
||||
/// However, when we discover during analysis that something was outdated, the `Dependee` was already
|
||||
/// marked as PO, so we need to decrement the PO dep count for each depender.
|
||||
marked_po: enum { not_marked_po, marked_po },
|
||||
dependee: InternPool.Dependee,
|
||||
) !void {
|
||||
log.debug("outdated dependee: {}", .{zcu.fmtDependee(dependee)});
|
||||
var it = zcu.intern_pool.dependencyIterator(dependee);
|
||||
while (it.next()) |depender| {
|
||||
if (zcu.outdated.contains(depender)) {
|
||||
// We do not need to increment the PO dep count, as if the outdated
|
||||
// dependee is a Decl, we had already marked this as PO.
|
||||
if (zcu.outdated.getPtr(depender)) |po_dep_count| {
|
||||
switch (marked_po) {
|
||||
.not_marked_po => {},
|
||||
.marked_po => {
|
||||
po_dep_count.* -= 1;
|
||||
log.debug("outdated {} => already outdated {} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), po_dep_count.* });
|
||||
if (po_dep_count.* == 0) {
|
||||
log.debug("outdated ready: {}", .{zcu.fmtAnalUnit(depender)});
|
||||
try zcu.outdated_ready.put(zcu.gpa, depender, {});
|
||||
}
|
||||
},
|
||||
}
|
||||
continue;
|
||||
}
|
||||
const opt_po_entry = zcu.potentially_outdated.fetchSwapRemove(depender);
|
||||
const new_po_dep_count = switch (marked_po) {
|
||||
.not_marked_po => if (opt_po_entry) |e| e.value else 0,
|
||||
.marked_po => if (opt_po_entry) |e| e.value - 1 else {
|
||||
// This `AnalUnit` has already been re-analyzed this update, and registered a dependency
|
||||
// on this thing, but already has sufficiently up-to-date information. Nothing to do.
|
||||
continue;
|
||||
},
|
||||
};
|
||||
try zcu.outdated.putNoClobber(
|
||||
zcu.gpa,
|
||||
depender,
|
||||
// We do not need to increment this count for the same reason as above.
|
||||
if (opt_po_entry) |e| e.value else 0,
|
||||
new_po_dep_count,
|
||||
);
|
||||
log.debug("outdated: {}", .{depender});
|
||||
if (opt_po_entry == null) {
|
||||
// This is a new entry with no PO dependencies.
|
||||
log.debug("outdated {} => new outdated {} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), new_po_dep_count });
|
||||
if (new_po_dep_count == 0) {
|
||||
log.debug("outdated ready: {}", .{zcu.fmtAnalUnit(depender)});
|
||||
try zcu.outdated_ready.put(zcu.gpa, depender, {});
|
||||
}
|
||||
// If this is a Decl and was not previously PO, we must recursively
|
||||
// mark dependencies on its tyval as PO.
|
||||
if (opt_po_entry == null) {
|
||||
assert(marked_po == .not_marked_po);
|
||||
try zcu.markTransitiveDependersPotentiallyOutdated(depender);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn markPoDependeeUpToDate(zcu: *Zcu, dependee: InternPool.Dependee) !void {
|
||||
log.debug("up-to-date dependee: {}", .{zcu.fmtDependee(dependee)});
|
||||
var it = zcu.intern_pool.dependencyIterator(dependee);
|
||||
while (it.next()) |depender| {
|
||||
if (zcu.outdated.getPtr(depender)) |po_dep_count| {
|
||||
// This depender is already outdated, but it now has one
|
||||
// less PO dependency!
|
||||
po_dep_count.* -= 1;
|
||||
log.debug("up-to-date {} => {} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), po_dep_count.* });
|
||||
if (po_dep_count.* == 0) {
|
||||
log.debug("outdated ready: {}", .{zcu.fmtAnalUnit(depender)});
|
||||
try zcu.outdated_ready.put(zcu.gpa, depender, {});
|
||||
}
|
||||
continue;
|
||||
}
|
||||
// This depender is definitely at least PO, because this Decl was just analyzed
|
||||
// due to being outdated.
|
||||
const ptr = zcu.potentially_outdated.getPtr(depender).?;
|
||||
const ptr = zcu.potentially_outdated.getPtr(depender) orelse {
|
||||
// This dependency has been registered during in-progress analysis, but the unit is
|
||||
// not in `potentially_outdated` because analysis is in-progress. Nothing to do.
|
||||
continue;
|
||||
};
|
||||
if (ptr.* > 1) {
|
||||
ptr.* -= 1;
|
||||
log.debug("up-to-date {} => {} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), ptr.* });
|
||||
continue;
|
||||
}
|
||||
|
||||
log.debug("up-to-date {} => {} po_deps=0 (up-to-date)", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender) });
|
||||
|
||||
// This dependency is no longer PO, i.e. is known to be up-to-date.
|
||||
assert(zcu.potentially_outdated.swapRemove(depender));
|
||||
// If this is a Decl, we must recursively mark dependencies on its tyval
|
||||
@ -2323,14 +2392,16 @@ pub fn markPoDependeeUpToDate(zcu: *Zcu, dependee: InternPool.Dependee) !void {
|
||||
/// in turn be PO, due to a dependency on the original AnalUnit's tyval or IES.
|
||||
fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: AnalUnit) !void {
|
||||
const ip = &zcu.intern_pool;
|
||||
var it = ip.dependencyIterator(switch (maybe_outdated.unwrap()) {
|
||||
const dependee: InternPool.Dependee = switch (maybe_outdated.unwrap()) {
|
||||
.cau => |cau| switch (ip.getCau(cau).owner.unwrap()) {
|
||||
.nav => |nav| .{ .nav_val = nav }, // TODO: also `nav_ref` deps when introduced
|
||||
.none, .type => return, // analysis of this `Cau` can't outdate any dependencies
|
||||
.type => |ty| .{ .interned = ty },
|
||||
.none => return, // analysis of this `Cau` can't outdate any dependencies
|
||||
},
|
||||
.func => |func_index| .{ .interned = func_index }, // IES
|
||||
});
|
||||
|
||||
};
|
||||
log.debug("potentially outdated dependee: {}", .{zcu.fmtDependee(dependee)});
|
||||
var it = ip.dependencyIterator(dependee);
|
||||
while (it.next()) |po| {
|
||||
if (zcu.outdated.getPtr(po)) |po_dep_count| {
|
||||
// This dependency is already outdated, but it now has one more PO
|
||||
@ -2339,14 +2410,17 @@ fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: AnalUni
|
||||
_ = zcu.outdated_ready.swapRemove(po);
|
||||
}
|
||||
po_dep_count.* += 1;
|
||||
log.debug("po {} => {} [outdated] po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(po), po_dep_count.* });
|
||||
continue;
|
||||
}
|
||||
if (zcu.potentially_outdated.getPtr(po)) |n| {
|
||||
// There is now one more PO dependency.
|
||||
n.* += 1;
|
||||
log.debug("po {} => {} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(po), n.* });
|
||||
continue;
|
||||
}
|
||||
try zcu.potentially_outdated.putNoClobber(zcu.gpa, po, 1);
|
||||
log.debug("po {} => {} po_deps=1", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(po) });
|
||||
// This AnalUnit was not already PO, so we must recursively mark its dependers as also PO.
|
||||
try zcu.markTransitiveDependersPotentiallyOutdated(po);
|
||||
}
|
||||
@ -2355,9 +2429,11 @@ fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: AnalUni
|
||||
pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?AnalUnit {
|
||||
if (!zcu.comp.incremental) return null;
|
||||
|
||||
if (true) @panic("TODO: findOutdatedToAnalyze");
|
||||
|
||||
if (zcu.outdated.count() == 0 and zcu.potentially_outdated.count() == 0) {
|
||||
if (zcu.outdated.count() == 0) {
|
||||
// Any units in `potentially_outdated` must just be stuck in loops with one another: none of those
|
||||
// units have had any outdated dependencies so far, and all of their remaining PO deps are triggered
|
||||
// by other units in `potentially_outdated`. So, we can safety assume those units up-to-date.
|
||||
zcu.potentially_outdated.clearRetainingCapacity();
|
||||
log.debug("findOutdatedToAnalyze: no outdated depender", .{});
|
||||
return null;
|
||||
}
|
||||
@ -2372,96 +2448,75 @@ pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?AnalUnit {
|
||||
// In this case, we must defer to more complex logic below.
|
||||
|
||||
if (zcu.outdated_ready.count() > 0) {
|
||||
log.debug("findOutdatedToAnalyze: trivial '{s} {d}'", .{
|
||||
@tagName(zcu.outdated_ready.keys()[0].unwrap()),
|
||||
switch (zcu.outdated_ready.keys()[0].unwrap()) {
|
||||
inline else => |x| @intFromEnum(x),
|
||||
},
|
||||
});
|
||||
return zcu.outdated_ready.keys()[0];
|
||||
const unit = zcu.outdated_ready.keys()[0];
|
||||
log.debug("findOutdatedToAnalyze: trivial {}", .{zcu.fmtAnalUnit(unit)});
|
||||
return unit;
|
||||
}
|
||||
|
||||
// Next, we will see if there is any outdated file root which was not in
|
||||
// `outdated`. This set will be small (number of files changed in this
|
||||
// update), so it's alright for us to just iterate here.
|
||||
for (zcu.outdated_file_root.keys()) |file_decl| {
|
||||
const decl_depender = AnalUnit.wrap(.{ .decl = file_decl });
|
||||
if (zcu.outdated.contains(decl_depender)) {
|
||||
// Since we didn't hit this in the first loop, this Decl must have
|
||||
// pending dependencies, so is ineligible.
|
||||
continue;
|
||||
}
|
||||
if (zcu.potentially_outdated.contains(decl_depender)) {
|
||||
// This Decl's struct may or may not need to be recreated depending
|
||||
// on whether it is outdated. If we analyzed it now, we would have
|
||||
// to assume it was outdated and recreate it!
|
||||
continue;
|
||||
}
|
||||
log.debug("findOutdatedToAnalyze: outdated file root decl '{d}'", .{file_decl});
|
||||
return decl_depender;
|
||||
}
|
||||
// There is no single AnalUnit which is ready for re-analysis. Instead, we must assume that some
|
||||
// Cau with PO dependencies is outdated -- e.g. in the above example we arbitrarily pick one of
|
||||
// A or B. We should select a Cau, since a Cau is definitely responsible for the loop in the
|
||||
// dependency graph (since IES dependencies can't have loops). We should also, of course, not
|
||||
// select a Cau owned by a `comptime` declaration, since you can't depend on those!
|
||||
|
||||
// There is no single AnalUnit which is ready for re-analysis. Instead, we
|
||||
// must assume that some Decl with PO dependencies is outdated - e.g. in the
|
||||
// above example we arbitrarily pick one of A or B. We should select a Decl,
|
||||
// since a Decl is definitely responsible for the loop in the dependency
|
||||
// graph (since you can't depend on a runtime function analysis!).
|
||||
|
||||
// The choice of this Decl could have a big impact on how much total
|
||||
// analysis we perform, since if analysis concludes its tyval is unchanged,
|
||||
// then other PO AnalUnit may be resolved as up-to-date. To hopefully avoid
|
||||
// doing too much work, let's find a Decl which the most things depend on -
|
||||
// the idea is that this will resolve a lot of loops (but this is only a
|
||||
// heuristic).
|
||||
// The choice of this Cau could have a big impact on how much total analysis we perform, since
|
||||
// if analysis concludes any dependencies on its result are up-to-date, then other PO AnalUnit
|
||||
// may be resolved as up-to-date. To hopefully avoid doing too much work, let's find a Decl
|
||||
// which the most things depend on - the idea is that this will resolve a lot of loops (but this
|
||||
// is only a heuristic).
|
||||
|
||||
log.debug("findOutdatedToAnalyze: no trivial ready, using heuristic; {d} outdated, {d} PO", .{
|
||||
zcu.outdated.count(),
|
||||
zcu.potentially_outdated.count(),
|
||||
});
|
||||
|
||||
const Decl = {};
|
||||
const ip = &zcu.intern_pool;
|
||||
|
||||
var chosen_decl_idx: ?Decl.Index = null;
|
||||
var chosen_decl_dependers: u32 = undefined;
|
||||
var chosen_cau: ?InternPool.Cau.Index = null;
|
||||
var chosen_cau_dependers: u32 = undefined;
|
||||
|
||||
for (zcu.outdated.keys()) |depender| {
|
||||
const decl_index = switch (depender.unwrap()) {
|
||||
.decl => |d| d,
|
||||
.func => continue,
|
||||
};
|
||||
inline for (.{ zcu.outdated.keys(), zcu.potentially_outdated.keys() }) |outdated_units| {
|
||||
for (outdated_units) |unit| {
|
||||
const cau = switch (unit.unwrap()) {
|
||||
.cau => |cau| cau,
|
||||
.func => continue, // a `func` definitely can't be causing the loop so it is a bad choice
|
||||
};
|
||||
const cau_owner = ip.getCau(cau).owner;
|
||||
|
||||
var n: u32 = 0;
|
||||
var it = zcu.intern_pool.dependencyIterator(.{ .decl_val = decl_index });
|
||||
while (it.next()) |_| n += 1;
|
||||
var n: u32 = 0;
|
||||
var it = ip.dependencyIterator(switch (cau_owner.unwrap()) {
|
||||
.none => continue, // there can be no dependencies on this `Cau` so it is a terrible choice
|
||||
.type => |ty| .{ .interned = ty },
|
||||
.nav => |nav| .{ .nav_val = nav },
|
||||
});
|
||||
while (it.next()) |_| n += 1;
|
||||
|
||||
if (chosen_decl_idx == null or n > chosen_decl_dependers) {
|
||||
chosen_decl_idx = decl_index;
|
||||
chosen_decl_dependers = n;
|
||||
if (chosen_cau == null or n > chosen_cau_dependers) {
|
||||
chosen_cau = cau;
|
||||
chosen_cau_dependers = n;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (zcu.potentially_outdated.keys()) |depender| {
|
||||
const decl_index = switch (depender.unwrap()) {
|
||||
.decl => |d| d,
|
||||
.func => continue,
|
||||
};
|
||||
|
||||
var n: u32 = 0;
|
||||
var it = zcu.intern_pool.dependencyIterator(.{ .decl_val = decl_index });
|
||||
while (it.next()) |_| n += 1;
|
||||
|
||||
if (chosen_decl_idx == null or n > chosen_decl_dependers) {
|
||||
chosen_decl_idx = decl_index;
|
||||
chosen_decl_dependers = n;
|
||||
if (chosen_cau == null) {
|
||||
for (zcu.outdated.keys(), zcu.outdated.values()) |o, opod| {
|
||||
const func = o.unwrap().func;
|
||||
const nav = zcu.funcInfo(func).owner_nav;
|
||||
std.io.getStdErr().writer().print("outdated: func {}, nav {}, name '{}', [p]o deps {}\n", .{ func, nav, ip.getNav(nav).fqn.fmt(ip), opod }) catch {};
|
||||
}
|
||||
for (zcu.potentially_outdated.keys(), zcu.potentially_outdated.values()) |o, opod| {
|
||||
const func = o.unwrap().func;
|
||||
const nav = zcu.funcInfo(func).owner_nav;
|
||||
std.io.getStdErr().writer().print("po: func {}, nav {}, name '{}', [p]o deps {}\n", .{ func, nav, ip.getNav(nav).fqn.fmt(ip), opod }) catch {};
|
||||
}
|
||||
}
|
||||
|
||||
log.debug("findOutdatedToAnalyze: heuristic returned Decl {d} ({d} dependers)", .{
|
||||
chosen_decl_idx.?,
|
||||
chosen_decl_dependers,
|
||||
log.debug("findOutdatedToAnalyze: heuristic returned '{}' ({d} dependers)", .{
|
||||
zcu.fmtAnalUnit(AnalUnit.wrap(.{ .cau = chosen_cau.? })),
|
||||
chosen_cau_dependers,
|
||||
});
|
||||
|
||||
return AnalUnit.wrap(.{ .decl = chosen_decl_idx.? });
|
||||
return AnalUnit.wrap(.{ .cau = chosen_cau.? });
|
||||
}
|
||||
|
||||
/// During an incremental update, before semantic analysis, call this to flush all values from
|
||||
@ -2506,10 +2561,10 @@ pub fn mapOldZirToNew(
|
||||
});
|
||||
|
||||
// Used as temporary buffers for namespace declaration instructions
|
||||
var old_decls = std.ArrayList(Zir.Inst.Index).init(gpa);
|
||||
defer old_decls.deinit();
|
||||
var new_decls = std.ArrayList(Zir.Inst.Index).init(gpa);
|
||||
defer new_decls.deinit();
|
||||
var old_decls: std.ArrayListUnmanaged(Zir.Inst.Index) = .{};
|
||||
defer old_decls.deinit(gpa);
|
||||
var new_decls: std.ArrayListUnmanaged(Zir.Inst.Index) = .{};
|
||||
defer new_decls.deinit(gpa);
|
||||
|
||||
while (match_stack.popOrNull()) |match_item| {
|
||||
// Match the namespace declaration itself
|
||||
@ -2583,7 +2638,7 @@ pub fn mapOldZirToNew(
|
||||
break :inst unnamed_tests.items[unnamed_test_idx];
|
||||
},
|
||||
_ => inst: {
|
||||
const name_nts = new_decl.name.toString(old_zir).?;
|
||||
const name_nts = new_decl.name.toString(new_zir).?;
|
||||
const name = new_zir.nullTerminatedString(name_nts);
|
||||
if (new_decl.name.isNamedTest(new_zir)) {
|
||||
break :inst named_tests.get(name) orelse continue;
|
||||
@ -2596,11 +2651,11 @@ pub fn mapOldZirToNew(
|
||||
// Match the `declaration` instruction
|
||||
try inst_map.put(gpa, old_decl_inst, new_decl_inst);
|
||||
|
||||
// Find namespace declarations within this declaration
|
||||
try old_zir.findDecls(&old_decls, old_decl_inst);
|
||||
try new_zir.findDecls(&new_decls, new_decl_inst);
|
||||
// Find container type declarations within this declaration
|
||||
try old_zir.findDecls(gpa, &old_decls, old_decl_inst);
|
||||
try new_zir.findDecls(gpa, &new_decls, new_decl_inst);
|
||||
|
||||
// We don't have any smart way of matching up these namespace declarations, so we always
|
||||
// We don't have any smart way of matching up these type declarations, so we always
|
||||
// correlate them based on source order.
|
||||
const n = @min(old_decls.items.len, new_decls.items.len);
|
||||
try match_stack.ensureUnusedCapacity(gpa, n);
|
||||
@ -2699,16 +2754,32 @@ pub fn deleteUnitExports(zcu: *Zcu, anal_unit: AnalUnit) void {
|
||||
pub fn deleteUnitReferences(zcu: *Zcu, anal_unit: AnalUnit) void {
|
||||
const gpa = zcu.gpa;
|
||||
|
||||
const kv = zcu.reference_table.fetchSwapRemove(anal_unit) orelse return;
|
||||
var idx = kv.value;
|
||||
unit_refs: {
|
||||
const kv = zcu.reference_table.fetchSwapRemove(anal_unit) orelse break :unit_refs;
|
||||
var idx = kv.value;
|
||||
|
||||
while (idx != std.math.maxInt(u32)) {
|
||||
zcu.free_references.append(gpa, idx) catch {
|
||||
// This space will be reused eventually, so we need not propagate this error.
|
||||
// Just leak it for now, and let GC reclaim it later on.
|
||||
return;
|
||||
};
|
||||
idx = zcu.all_references.items[idx].next;
|
||||
while (idx != std.math.maxInt(u32)) {
|
||||
zcu.free_references.append(gpa, idx) catch {
|
||||
// This space will be reused eventually, so we need not propagate this error.
|
||||
// Just leak it for now, and let GC reclaim it later on.
|
||||
break :unit_refs;
|
||||
};
|
||||
idx = zcu.all_references.items[idx].next;
|
||||
}
|
||||
}
|
||||
|
||||
type_refs: {
|
||||
const kv = zcu.type_reference_table.fetchSwapRemove(anal_unit) orelse break :type_refs;
|
||||
var idx = kv.value;
|
||||
|
||||
while (idx != std.math.maxInt(u32)) {
|
||||
zcu.free_type_references.append(gpa, idx) catch {
|
||||
// This space will be reused eventually, so we need not propagate this error.
|
||||
// Just leak it for now, and let GC reclaim it later on.
|
||||
break :type_refs;
|
||||
};
|
||||
idx = zcu.all_type_references.items[idx].next;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -2735,6 +2806,29 @@ pub fn addUnitReference(zcu: *Zcu, src_unit: AnalUnit, referenced_unit: AnalUnit
|
||||
gop.value_ptr.* = @intCast(ref_idx);
|
||||
}
|
||||
|
||||
pub fn addTypeReference(zcu: *Zcu, src_unit: AnalUnit, referenced_type: InternPool.Index, ref_src: LazySrcLoc) Allocator.Error!void {
|
||||
const gpa = zcu.gpa;
|
||||
|
||||
try zcu.type_reference_table.ensureUnusedCapacity(gpa, 1);
|
||||
|
||||
const ref_idx = zcu.free_type_references.popOrNull() orelse idx: {
|
||||
_ = try zcu.all_type_references.addOne(gpa);
|
||||
break :idx zcu.all_type_references.items.len - 1;
|
||||
};
|
||||
|
||||
errdefer comptime unreachable;
|
||||
|
||||
const gop = zcu.type_reference_table.getOrPutAssumeCapacity(src_unit);
|
||||
|
||||
zcu.all_type_references.items[ref_idx] = .{
|
||||
.referenced = referenced_type,
|
||||
.next = if (gop.found_existing) gop.value_ptr.* else std.math.maxInt(u32),
|
||||
.src = ref_src,
|
||||
};
|
||||
|
||||
gop.value_ptr.* = @intCast(ref_idx);
|
||||
}
|
||||
|
||||
pub fn errorSetBits(mod: *Zcu) u16 {
|
||||
if (mod.error_limit == 0) return 0;
|
||||
return @as(u16, std.math.log2_int(ErrorInt, mod.error_limit)) + 1;
|
||||
@ -3029,28 +3123,215 @@ pub const ResolvedReference = struct {
|
||||
};
|
||||
|
||||
/// Returns a mapping from an `AnalUnit` to where it is referenced.
|
||||
/// TODO: in future, this must be adapted to traverse from roots of analysis. That way, we can
|
||||
/// use the returned map to determine which units have become unreferenced in an incremental update.
|
||||
pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ResolvedReference) {
|
||||
/// If the value is `null`, the `AnalUnit` is a root of analysis.
|
||||
/// If an `AnalUnit` is not in the returned map, it is unreferenced.
|
||||
pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?ResolvedReference) {
|
||||
const gpa = zcu.gpa;
|
||||
const comp = zcu.comp;
|
||||
const ip = &zcu.intern_pool;
|
||||
|
||||
var result: std.AutoHashMapUnmanaged(AnalUnit, ResolvedReference) = .{};
|
||||
var result: std.AutoHashMapUnmanaged(AnalUnit, ?ResolvedReference) = .{};
|
||||
errdefer result.deinit(gpa);
|
||||
|
||||
var checked_types: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{};
|
||||
var type_queue: std.AutoArrayHashMapUnmanaged(InternPool.Index, ?ResolvedReference) = .{};
|
||||
var unit_queue: std.AutoArrayHashMapUnmanaged(AnalUnit, ?ResolvedReference) = .{};
|
||||
defer {
|
||||
checked_types.deinit(gpa);
|
||||
type_queue.deinit(gpa);
|
||||
unit_queue.deinit(gpa);
|
||||
}
|
||||
|
||||
// This is not a sufficient size, but a lower bound.
|
||||
try result.ensureTotalCapacity(gpa, @intCast(zcu.reference_table.count()));
|
||||
|
||||
for (zcu.reference_table.keys(), zcu.reference_table.values()) |referencer, first_ref_idx| {
|
||||
assert(first_ref_idx != std.math.maxInt(u32));
|
||||
var ref_idx = first_ref_idx;
|
||||
while (ref_idx != std.math.maxInt(u32)) {
|
||||
const ref = zcu.all_references.items[ref_idx];
|
||||
const gop = try result.getOrPut(gpa, ref.referenced);
|
||||
if (!gop.found_existing) {
|
||||
gop.value_ptr.* = .{ .referencer = referencer, .src = ref.src };
|
||||
try type_queue.ensureTotalCapacity(gpa, zcu.analysis_roots.len);
|
||||
for (zcu.analysis_roots.slice()) |mod| {
|
||||
// Logic ripped from `Zcu.PerThread.importPkg`.
|
||||
// TODO: this is silly, `Module` should just store a reference to its root `File`.
|
||||
const resolved_path = try std.fs.path.resolve(gpa, &.{
|
||||
mod.root.root_dir.path orelse ".",
|
||||
mod.root.sub_path,
|
||||
mod.root_src_path,
|
||||
});
|
||||
defer gpa.free(resolved_path);
|
||||
const file = zcu.import_table.get(resolved_path).?;
|
||||
const root_ty = zcu.fileRootType(file);
|
||||
if (root_ty == .none) continue;
|
||||
type_queue.putAssumeCapacityNoClobber(root_ty, null);
|
||||
}
|
||||
|
||||
while (true) {
|
||||
if (type_queue.popOrNull()) |kv| {
|
||||
const ty = kv.key;
|
||||
const referencer = kv.value;
|
||||
try checked_types.putNoClobber(gpa, ty, {});
|
||||
|
||||
log.debug("handle type '{}'", .{Type.fromInterned(ty).containerTypeName(ip).fmt(ip)});
|
||||
|
||||
// If this type has a `Cau` for resolution, it's automatically referenced.
|
||||
const resolution_cau: InternPool.Cau.Index.Optional = switch (ip.indexToKey(ty)) {
|
||||
.struct_type => ip.loadStructType(ty).cau,
|
||||
.union_type => ip.loadUnionType(ty).cau.toOptional(),
|
||||
.enum_type => ip.loadEnumType(ty).cau,
|
||||
.opaque_type => .none,
|
||||
else => unreachable,
|
||||
};
|
||||
if (resolution_cau.unwrap()) |cau| {
|
||||
// this should only be referenced by the type
|
||||
const unit = AnalUnit.wrap(.{ .cau = cau });
|
||||
assert(!result.contains(unit));
|
||||
try unit_queue.putNoClobber(gpa, unit, referencer);
|
||||
}
|
||||
ref_idx = ref.next;
|
||||
|
||||
// If this is a union with a generated tag, its tag type is automatically referenced.
|
||||
// We don't add this reference for non-generated tags, as those will already be referenced via the union's `Cau`, with a better source location.
|
||||
if (zcu.typeToUnion(Type.fromInterned(ty))) |union_obj| {
|
||||
const tag_ty = union_obj.enum_tag_ty;
|
||||
if (tag_ty != .none) {
|
||||
if (ip.indexToKey(tag_ty).enum_type == .generated_tag) {
|
||||
if (!checked_types.contains(tag_ty)) {
|
||||
try type_queue.put(gpa, tag_ty, referencer);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Queue any decls within this type which would be automatically analyzed.
|
||||
// Keep in sync with analysis queueing logic in `Zcu.PerThread.ScanDeclIter.scanDecl`.
|
||||
const ns = Type.fromInterned(ty).getNamespace(zcu).unwrap().?;
|
||||
for (zcu.namespacePtr(ns).other_decls.items) |cau| {
|
||||
// These are `comptime` and `test` declarations.
|
||||
// `comptime` decls are always analyzed; `test` declarations are analyzed depending on the test filter.
|
||||
const inst_info = ip.getCau(cau).zir_index.resolveFull(ip) orelse continue;
|
||||
const file = zcu.fileByIndex(inst_info.file);
|
||||
// If the file failed AstGen, the TrackedInst refers to the old ZIR.
|
||||
const zir = if (file.status == .success_zir) file.zir else file.prev_zir.?.*;
|
||||
const declaration = zir.getDeclaration(inst_info.inst)[0];
|
||||
const want_analysis = switch (declaration.name) {
|
||||
.@"usingnamespace" => unreachable,
|
||||
.@"comptime" => true,
|
||||
else => a: {
|
||||
if (!comp.config.is_test) break :a false;
|
||||
if (file.mod != zcu.main_mod) break :a false;
|
||||
if (declaration.name.isNamedTest(zir) or declaration.name == .decltest) {
|
||||
const nav = ip.getCau(cau).owner.unwrap().nav;
|
||||
const fqn_slice = ip.getNav(nav).fqn.toSlice(ip);
|
||||
for (comp.test_filters) |test_filter| {
|
||||
if (std.mem.indexOf(u8, fqn_slice, test_filter) != null) break;
|
||||
} else break :a false;
|
||||
}
|
||||
break :a true;
|
||||
},
|
||||
};
|
||||
if (want_analysis) {
|
||||
const unit = AnalUnit.wrap(.{ .cau = cau });
|
||||
if (!result.contains(unit)) {
|
||||
log.debug("type '{}': ref cau %{}", .{
|
||||
Type.fromInterned(ty).containerTypeName(ip).fmt(ip),
|
||||
@intFromEnum(inst_info.inst),
|
||||
});
|
||||
try unit_queue.put(gpa, unit, referencer);
|
||||
}
|
||||
}
|
||||
}
|
||||
for (zcu.namespacePtr(ns).pub_decls.keys()) |nav| {
|
||||
// These are named declarations. They are analyzed only if marked `export`.
|
||||
const cau = ip.getNav(nav).analysis_owner.unwrap().?;
|
||||
const inst_info = ip.getCau(cau).zir_index.resolveFull(ip) orelse continue;
|
||||
const file = zcu.fileByIndex(inst_info.file);
|
||||
// If the file failed AstGen, the TrackedInst refers to the old ZIR.
|
||||
const zir = if (file.status == .success_zir) file.zir else file.prev_zir.?.*;
|
||||
const declaration = zir.getDeclaration(inst_info.inst)[0];
|
||||
if (declaration.flags.is_export) {
|
||||
const unit = AnalUnit.wrap(.{ .cau = cau });
|
||||
if (!result.contains(unit)) {
|
||||
log.debug("type '{}': ref cau %{}", .{
|
||||
Type.fromInterned(ty).containerTypeName(ip).fmt(ip),
|
||||
@intFromEnum(inst_info.inst),
|
||||
});
|
||||
try unit_queue.put(gpa, unit, referencer);
|
||||
}
|
||||
}
|
||||
}
|
||||
for (zcu.namespacePtr(ns).priv_decls.keys()) |nav| {
|
||||
// These are named declarations. They are analyzed only if marked `export`.
|
||||
const cau = ip.getNav(nav).analysis_owner.unwrap().?;
|
||||
const inst_info = ip.getCau(cau).zir_index.resolveFull(ip) orelse continue;
|
||||
const file = zcu.fileByIndex(inst_info.file);
|
||||
// If the file failed AstGen, the TrackedInst refers to the old ZIR.
|
||||
const zir = if (file.status == .success_zir) file.zir else file.prev_zir.?.*;
|
||||
const declaration = zir.getDeclaration(inst_info.inst)[0];
|
||||
if (declaration.flags.is_export) {
|
||||
const unit = AnalUnit.wrap(.{ .cau = cau });
|
||||
if (!result.contains(unit)) {
|
||||
log.debug("type '{}': ref cau %{}", .{
|
||||
Type.fromInterned(ty).containerTypeName(ip).fmt(ip),
|
||||
@intFromEnum(inst_info.inst),
|
||||
});
|
||||
try unit_queue.put(gpa, unit, referencer);
|
||||
}
|
||||
}
|
||||
}
|
||||
// Incremental compilation does not support `usingnamespace`.
|
||||
// These are only included to keep good reference traces in non-incremental updates.
|
||||
for (zcu.namespacePtr(ns).pub_usingnamespace.items) |nav| {
|
||||
const cau = ip.getNav(nav).analysis_owner.unwrap().?;
|
||||
const unit = AnalUnit.wrap(.{ .cau = cau });
|
||||
if (!result.contains(unit)) try unit_queue.put(gpa, unit, referencer);
|
||||
}
|
||||
for (zcu.namespacePtr(ns).priv_usingnamespace.items) |nav| {
|
||||
const cau = ip.getNav(nav).analysis_owner.unwrap().?;
|
||||
const unit = AnalUnit.wrap(.{ .cau = cau });
|
||||
if (!result.contains(unit)) try unit_queue.put(gpa, unit, referencer);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if (unit_queue.popOrNull()) |kv| {
|
||||
const unit = kv.key;
|
||||
try result.putNoClobber(gpa, unit, kv.value);
|
||||
|
||||
log.debug("handle unit '{}'", .{zcu.fmtAnalUnit(unit)});
|
||||
|
||||
if (zcu.reference_table.get(unit)) |first_ref_idx| {
|
||||
assert(first_ref_idx != std.math.maxInt(u32));
|
||||
var ref_idx = first_ref_idx;
|
||||
while (ref_idx != std.math.maxInt(u32)) {
|
||||
const ref = zcu.all_references.items[ref_idx];
|
||||
if (!result.contains(ref.referenced)) {
|
||||
log.debug("unit '{}': ref unit '{}'", .{
|
||||
zcu.fmtAnalUnit(unit),
|
||||
zcu.fmtAnalUnit(ref.referenced),
|
||||
});
|
||||
try unit_queue.put(gpa, ref.referenced, .{
|
||||
.referencer = unit,
|
||||
.src = ref.src,
|
||||
});
|
||||
}
|
||||
ref_idx = ref.next;
|
||||
}
|
||||
}
|
||||
if (zcu.type_reference_table.get(unit)) |first_ref_idx| {
|
||||
assert(first_ref_idx != std.math.maxInt(u32));
|
||||
var ref_idx = first_ref_idx;
|
||||
while (ref_idx != std.math.maxInt(u32)) {
|
||||
const ref = zcu.all_type_references.items[ref_idx];
|
||||
if (!checked_types.contains(ref.referenced)) {
|
||||
log.debug("unit '{}': ref type '{}'", .{
|
||||
zcu.fmtAnalUnit(unit),
|
||||
Type.fromInterned(ref.referenced).containerTypeName(ip).fmt(ip),
|
||||
});
|
||||
try type_queue.put(gpa, ref.referenced, .{
|
||||
.referencer = unit,
|
||||
.src = ref.src,
|
||||
});
|
||||
}
|
||||
ref_idx = ref.next;
|
||||
}
|
||||
}
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
return result;
|
||||
@ -3093,7 +3374,7 @@ pub fn navSrcLoc(zcu: *const Zcu, nav_index: InternPool.Nav.Index) LazySrcLoc {
|
||||
|
||||
pub fn navSrcLine(zcu: *Zcu, nav_index: InternPool.Nav.Index) u32 {
|
||||
const ip = &zcu.intern_pool;
|
||||
const inst_info = ip.getNav(nav_index).srcInst(ip).resolveFull(ip);
|
||||
const inst_info = ip.getNav(nav_index).srcInst(ip).resolveFull(ip).?;
|
||||
const zir = zcu.fileByIndex(inst_info.file).zir;
|
||||
const inst = zir.instructions.get(@intFromEnum(inst_info.inst));
|
||||
assert(inst.tag == .declaration);
|
||||
@ -3106,7 +3387,7 @@ pub fn navValue(zcu: *const Zcu, nav_index: InternPool.Nav.Index) Value {
|
||||
|
||||
pub fn navFileScopeIndex(zcu: *Zcu, nav: InternPool.Nav.Index) File.Index {
|
||||
const ip = &zcu.intern_pool;
|
||||
return ip.getNav(nav).srcInst(ip).resolveFull(ip).file;
|
||||
return ip.getNav(nav).srcInst(ip).resolveFile(ip);
|
||||
}
|
||||
|
||||
pub fn navFileScope(zcu: *Zcu, nav: InternPool.Nav.Index) *File {
|
||||
@ -3115,6 +3396,75 @@ pub fn navFileScope(zcu: *Zcu, nav: InternPool.Nav.Index) *File {
|
||||
|
||||
pub fn cauFileScope(zcu: *Zcu, cau: InternPool.Cau.Index) *File {
|
||||
const ip = &zcu.intern_pool;
|
||||
const file_index = ip.getCau(cau).zir_index.resolveFull(ip).file;
|
||||
const file_index = ip.getCau(cau).zir_index.resolveFile(ip);
|
||||
return zcu.fileByIndex(file_index);
|
||||
}
|
||||
|
||||
pub fn fmtAnalUnit(zcu: *Zcu, unit: AnalUnit) std.fmt.Formatter(formatAnalUnit) {
|
||||
return .{ .data = .{ .unit = unit, .zcu = zcu } };
|
||||
}
|
||||
pub fn fmtDependee(zcu: *Zcu, d: InternPool.Dependee) std.fmt.Formatter(formatDependee) {
|
||||
return .{ .data = .{ .dependee = d, .zcu = zcu } };
|
||||
}
|
||||
|
||||
fn formatAnalUnit(data: struct { unit: AnalUnit, zcu: *Zcu }, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
|
||||
_ = .{ fmt, options };
|
||||
const zcu = data.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
switch (data.unit.unwrap()) {
|
||||
.cau => |cau_index| {
|
||||
const cau = ip.getCau(cau_index);
|
||||
switch (cau.owner.unwrap()) {
|
||||
.nav => |nav| return writer.print("cau(decl='{}')", .{ip.getNav(nav).fqn.fmt(ip)}),
|
||||
.type => |ty| return writer.print("cau(ty='{}')", .{Type.fromInterned(ty).containerTypeName(ip).fmt(ip)}),
|
||||
.none => if (cau.zir_index.resolveFull(ip)) |resolved| {
|
||||
const file_path = zcu.fileByIndex(resolved.file).sub_file_path;
|
||||
return writer.print("cau(inst=('{s}', %{}))", .{ file_path, @intFromEnum(resolved.inst) });
|
||||
} else {
|
||||
return writer.writeAll("cau(inst=<lost>)");
|
||||
},
|
||||
}
|
||||
},
|
||||
.func => |func| {
|
||||
const nav = zcu.funcInfo(func).owner_nav;
|
||||
return writer.print("func('{}')", .{ip.getNav(nav).fqn.fmt(ip)});
|
||||
},
|
||||
}
|
||||
}
|
||||
fn formatDependee(data: struct { dependee: InternPool.Dependee, zcu: *Zcu }, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
|
||||
_ = .{ fmt, options };
|
||||
const zcu = data.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
switch (data.dependee) {
|
||||
.src_hash => |ti| {
|
||||
const info = ti.resolveFull(ip) orelse {
|
||||
return writer.writeAll("inst(<lost>)");
|
||||
};
|
||||
const file_path = zcu.fileByIndex(info.file).sub_file_path;
|
||||
return writer.print("inst('{s}', %{d})", .{ file_path, @intFromEnum(info.inst) });
|
||||
},
|
||||
.nav_val => |nav| {
|
||||
const fqn = ip.getNav(nav).fqn;
|
||||
return writer.print("nav('{}')", .{fqn.fmt(ip)});
|
||||
},
|
||||
.interned => |ip_index| switch (ip.indexToKey(ip_index)) {
|
||||
.struct_type, .union_type, .enum_type => return writer.print("type('{}')", .{Type.fromInterned(ip_index).containerTypeName(ip).fmt(ip)}),
|
||||
.func => |f| return writer.print("ies('{}')", .{ip.getNav(f.owner_nav).fqn.fmt(ip)}),
|
||||
else => unreachable,
|
||||
},
|
||||
.namespace => |ti| {
|
||||
const info = ti.resolveFull(ip) orelse {
|
||||
return writer.writeAll("namespace(<lost>)");
|
||||
};
|
||||
const file_path = zcu.fileByIndex(info.file).sub_file_path;
|
||||
return writer.print("namespace('{s}', %{d})", .{ file_path, @intFromEnum(info.inst) });
|
||||
},
|
||||
.namespace_name => |k| {
|
||||
const info = k.namespace.resolveFull(ip) orelse {
|
||||
return writer.print("namespace(<lost>, '{}')", .{k.name.fmt(ip)});
|
||||
};
|
||||
const file_path = zcu.fileByIndex(info.file).sub_file_path;
|
||||
return writer.print("namespace('{s}', %{d}, '{}')", .{ file_path, @intFromEnum(info.inst), k.name.fmt(ip) });
|
||||
},
|
||||
}
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -98,7 +98,7 @@ pub fn generateLazyFunction(
|
||||
debug_output: DebugInfoOutput,
|
||||
) CodeGenError!Result {
|
||||
const zcu = pt.zcu;
|
||||
const file = Type.fromInterned(lazy_sym.ty).typeDeclInstAllowGeneratedTag(zcu).?.resolveFull(&zcu.intern_pool).file;
|
||||
const file = Type.fromInterned(lazy_sym.ty).typeDeclInstAllowGeneratedTag(zcu).?.resolveFile(&zcu.intern_pool);
|
||||
const target = zcu.fileByIndex(file).mod.resolved_target.result;
|
||||
switch (target_util.zigBackend(target, false)) {
|
||||
else => unreachable,
|
||||
|
@ -2585,7 +2585,7 @@ pub fn genTypeDecl(
|
||||
const ty = Type.fromInterned(index);
|
||||
_ = try renderTypePrefix(.flush, global_ctype_pool, zcu, writer, global_ctype, .suffix, .{});
|
||||
try writer.writeByte(';');
|
||||
const file_scope = ty.typeDeclInstAllowGeneratedTag(zcu).?.resolveFull(ip).file;
|
||||
const file_scope = ty.typeDeclInstAllowGeneratedTag(zcu).?.resolveFile(ip);
|
||||
if (!zcu.fileByIndex(file_scope).mod.strip) try writer.print(" /* {} */", .{
|
||||
ty.containerTypeName(ip).fmt(ip),
|
||||
});
|
||||
|
@ -1959,7 +1959,7 @@ pub const Object = struct {
|
||||
);
|
||||
}
|
||||
|
||||
const file = try o.getDebugFile(ty.typeDeclInstAllowGeneratedTag(zcu).?.resolveFull(ip).file);
|
||||
const file = try o.getDebugFile(ty.typeDeclInstAllowGeneratedTag(zcu).?.resolveFile(ip));
|
||||
const scope = if (ty.getParentNamespace(zcu).unwrap()) |parent_namespace|
|
||||
try o.namespaceToDebugScope(parent_namespace)
|
||||
else
|
||||
@ -2137,7 +2137,7 @@ pub const Object = struct {
|
||||
const name = try o.allocTypeName(ty);
|
||||
defer gpa.free(name);
|
||||
|
||||
const file = try o.getDebugFile(ty.typeDeclInstAllowGeneratedTag(zcu).?.resolveFull(ip).file);
|
||||
const file = try o.getDebugFile(ty.typeDeclInstAllowGeneratedTag(zcu).?.resolveFile(ip));
|
||||
const scope = if (ty.getParentNamespace(zcu).unwrap()) |parent_namespace|
|
||||
try o.namespaceToDebugScope(parent_namespace)
|
||||
else
|
||||
@ -2772,7 +2772,7 @@ pub const Object = struct {
|
||||
fn makeEmptyNamespaceDebugType(o: *Object, ty: Type) !Builder.Metadata {
|
||||
const zcu = o.pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
const file = try o.getDebugFile(ty.typeDeclInstAllowGeneratedTag(zcu).?.resolveFull(ip).file);
|
||||
const file = try o.getDebugFile(ty.typeDeclInstAllowGeneratedTag(zcu).?.resolveFile(ip));
|
||||
const scope = if (ty.getParentNamespace(zcu).unwrap()) |parent_namespace|
|
||||
try o.namespaceToDebugScope(parent_namespace)
|
||||
else
|
||||
|
@ -78,7 +78,13 @@ fn dumpStatusReport() !void {
|
||||
const block: *Sema.Block = anal.block;
|
||||
const zcu = anal.sema.pt.zcu;
|
||||
|
||||
const file, const src_base_node = Zcu.LazySrcLoc.resolveBaseNode(block.src_base_inst, zcu);
|
||||
const file, const src_base_node = Zcu.LazySrcLoc.resolveBaseNode(block.src_base_inst, zcu) orelse {
|
||||
const file = zcu.fileByIndex(block.src_base_inst.resolveFile(&zcu.intern_pool));
|
||||
try stderr.writeAll("Analyzing lost instruction in file '");
|
||||
try writeFilePath(file, stderr);
|
||||
try stderr.writeAll("'. This should not happen!\n\n");
|
||||
return;
|
||||
};
|
||||
|
||||
try stderr.writeAll("Analyzing ");
|
||||
try writeFilePath(file, stderr);
|
||||
@ -104,7 +110,13 @@ fn dumpStatusReport() !void {
|
||||
while (parent) |curr| {
|
||||
fba.reset();
|
||||
try stderr.writeAll(" in ");
|
||||
const cur_block_file, const cur_block_src_base_node = Zcu.LazySrcLoc.resolveBaseNode(curr.block.src_base_inst, zcu);
|
||||
const cur_block_file, const cur_block_src_base_node = Zcu.LazySrcLoc.resolveBaseNode(curr.block.src_base_inst, zcu) orelse {
|
||||
const cur_block_file = zcu.fileByIndex(curr.block.src_base_inst.resolveFile(&zcu.intern_pool));
|
||||
try writeFilePath(cur_block_file, stderr);
|
||||
try stderr.writeAll("\n > [lost instruction; this should not happen]\n");
|
||||
parent = curr.parent;
|
||||
continue;
|
||||
};
|
||||
try writeFilePath(cur_block_file, stderr);
|
||||
try stderr.writeAll("\n > ");
|
||||
print_zir.renderSingleInstruction(
|
||||
|
@ -786,7 +786,7 @@ const Entry = struct {
|
||||
const ip = &zcu.intern_pool;
|
||||
for (dwarf.types.keys(), dwarf.types.values()) |ty, other_entry| {
|
||||
const ty_unit: Unit.Index = if (Type.fromInterned(ty).typeDeclInst(zcu)) |inst_index|
|
||||
dwarf.getUnit(zcu.fileByIndex(inst_index.resolveFull(ip).file).mod) catch unreachable
|
||||
dwarf.getUnit(zcu.fileByIndex(inst_index.resolveFile(ip)).mod) catch unreachable
|
||||
else
|
||||
.main;
|
||||
if (sec.getUnit(ty_unit) == unit and unit.getEntry(other_entry) == entry)
|
||||
@ -796,7 +796,7 @@ const Entry = struct {
|
||||
});
|
||||
}
|
||||
for (dwarf.navs.keys(), dwarf.navs.values()) |nav, other_entry| {
|
||||
const nav_unit = dwarf.getUnit(zcu.fileByIndex(ip.getNav(nav).srcInst(ip).resolveFull(ip).file).mod) catch unreachable;
|
||||
const nav_unit = dwarf.getUnit(zcu.fileByIndex(ip.getNav(nav).srcInst(ip).resolveFile(ip)).mod) catch unreachable;
|
||||
if (sec.getUnit(nav_unit) == unit and unit.getEntry(other_entry) == entry)
|
||||
log.err("missing Nav({}({d}))", .{ ip.getNav(nav).fqn.fmt(ip), @intFromEnum(nav) });
|
||||
}
|
||||
@ -1201,7 +1201,7 @@ pub const WipNav = struct {
|
||||
const ip = &zcu.intern_pool;
|
||||
const maybe_inst_index = ty.typeDeclInst(zcu);
|
||||
const unit = if (maybe_inst_index) |inst_index|
|
||||
try wip_nav.dwarf.getUnit(zcu.fileByIndex(inst_index.resolveFull(ip).file).mod)
|
||||
try wip_nav.dwarf.getUnit(zcu.fileByIndex(inst_index.resolveFile(ip)).mod)
|
||||
else
|
||||
.main;
|
||||
const gop = try wip_nav.dwarf.types.getOrPut(wip_nav.dwarf.gpa, ty.toIntern());
|
||||
@ -1539,7 +1539,7 @@ pub fn initWipNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool.Nav.In
|
||||
const nav = ip.getNav(nav_index);
|
||||
log.debug("initWipNav({})", .{nav.fqn.fmt(ip)});
|
||||
|
||||
const inst_info = nav.srcInst(ip).resolveFull(ip);
|
||||
const inst_info = nav.srcInst(ip).resolveFull(ip).?;
|
||||
const file = zcu.fileByIndex(inst_info.file);
|
||||
|
||||
const unit = try dwarf.getUnit(file.mod);
|
||||
@ -1874,7 +1874,7 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool
|
||||
const nav = ip.getNav(nav_index);
|
||||
log.debug("updateComptimeNav({})", .{nav.fqn.fmt(ip)});
|
||||
|
||||
const inst_info = nav.srcInst(ip).resolveFull(ip);
|
||||
const inst_info = nav.srcInst(ip).resolveFull(ip).?;
|
||||
const file = zcu.fileByIndex(inst_info.file);
|
||||
assert(file.zir_loaded);
|
||||
const decl_inst = file.zir.instructions.get(@intFromEnum(inst_info.inst));
|
||||
@ -1937,7 +1937,7 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool
|
||||
};
|
||||
break :value_inst value_inst;
|
||||
};
|
||||
const type_inst_info = loaded_struct.zir_index.unwrap().?.resolveFull(ip);
|
||||
const type_inst_info = loaded_struct.zir_index.unwrap().?.resolveFull(ip).?;
|
||||
if (type_inst_info.inst != value_inst) break :decl_struct;
|
||||
|
||||
const type_gop = try dwarf.types.getOrPut(dwarf.gpa, nav_val.toIntern());
|
||||
@ -2053,7 +2053,7 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool
|
||||
};
|
||||
break :value_inst value_inst;
|
||||
};
|
||||
const type_inst_info = loaded_enum.zir_index.unwrap().?.resolveFull(ip);
|
||||
const type_inst_info = loaded_enum.zir_index.unwrap().?.resolveFull(ip).?;
|
||||
if (type_inst_info.inst != value_inst) break :decl_enum;
|
||||
|
||||
const type_gop = try dwarf.types.getOrPut(dwarf.gpa, nav_val.toIntern());
|
||||
@ -2127,7 +2127,7 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool
|
||||
};
|
||||
break :value_inst value_inst;
|
||||
};
|
||||
const type_inst_info = loaded_union.zir_index.resolveFull(ip);
|
||||
const type_inst_info = loaded_union.zir_index.resolveFull(ip).?;
|
||||
if (type_inst_info.inst != value_inst) break :decl_union;
|
||||
|
||||
const type_gop = try dwarf.types.getOrPut(dwarf.gpa, nav_val.toIntern());
|
||||
@ -2240,7 +2240,7 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool
|
||||
};
|
||||
break :value_inst value_inst;
|
||||
};
|
||||
const type_inst_info = loaded_opaque.zir_index.resolveFull(ip);
|
||||
const type_inst_info = loaded_opaque.zir_index.resolveFull(ip).?;
|
||||
if (type_inst_info.inst != value_inst) break :decl_opaque;
|
||||
|
||||
const type_gop = try dwarf.types.getOrPut(dwarf.gpa, nav_val.toIntern());
|
||||
@ -2704,7 +2704,7 @@ pub fn updateContainerType(dwarf: *Dwarf, pt: Zcu.PerThread, type_index: InternP
|
||||
const ty = Type.fromInterned(type_index);
|
||||
log.debug("updateContainerType({}({d}))", .{ ty.fmt(pt), @intFromEnum(type_index) });
|
||||
|
||||
const inst_info = ty.typeDeclInst(zcu).?.resolveFull(ip);
|
||||
const inst_info = ty.typeDeclInst(zcu).?.resolveFull(ip).?;
|
||||
const file = zcu.fileByIndex(inst_info.file);
|
||||
if (inst_info.inst == .main_struct_inst) {
|
||||
const unit = try dwarf.getUnit(file.mod);
|
||||
@ -2922,7 +2922,7 @@ pub fn updateNavLineNumber(dwarf: *Dwarf, zcu: *Zcu, nav_index: InternPool.Nav.I
|
||||
const ip = &zcu.intern_pool;
|
||||
|
||||
const zir_index = ip.getCau(ip.getNav(nav_index).analysis_owner.unwrap() orelse return).zir_index;
|
||||
const inst_info = zir_index.resolveFull(ip);
|
||||
const inst_info = zir_index.resolveFull(ip).?;
|
||||
assert(inst_info.inst != .main_struct_inst);
|
||||
const file = zcu.fileByIndex(inst_info.file);
|
||||
|
||||
|
@ -3257,9 +3257,12 @@ fn buildOutputType(
|
||||
else => false,
|
||||
};
|
||||
|
||||
const incremental = opt_incremental orelse false;
|
||||
|
||||
const disable_lld_caching = !output_to_cache;
|
||||
|
||||
const cache_mode: Compilation.CacheMode = b: {
|
||||
if (incremental) break :b .incremental;
|
||||
if (disable_lld_caching) break :b .incremental;
|
||||
if (!create_module.resolved_options.have_zcu) break :b .whole;
|
||||
|
||||
@ -3272,8 +3275,6 @@ fn buildOutputType(
|
||||
break :b .incremental;
|
||||
};
|
||||
|
||||
const incremental = opt_incremental orelse false;
|
||||
|
||||
process.raiseFileDescriptorLimit();
|
||||
|
||||
var file_system_inputs: std.ArrayListUnmanaged(u8) = .{};
|
||||
|
59
test/incremental/add_decl
Normal file
59
test/incremental/add_decl
Normal file
@ -0,0 +1,59 @@
|
||||
#target=x86_64-linux
|
||||
#update=initial version
|
||||
#file=main.zig
|
||||
const std = @import("std");
|
||||
pub fn main() !void {
|
||||
try std.io.getStdOut().writeAll(foo);
|
||||
}
|
||||
const foo = "good morning\n";
|
||||
#expect_stdout="good morning\n"
|
||||
|
||||
#update=add new declaration
|
||||
#file=main.zig
|
||||
const std = @import("std");
|
||||
pub fn main() !void {
|
||||
try std.io.getStdOut().writeAll(foo);
|
||||
}
|
||||
const foo = "good morning\n";
|
||||
const bar = "good evening\n";
|
||||
#expect_stdout="good morning\n"
|
||||
|
||||
#update=reference new declaration
|
||||
#file=main.zig
|
||||
const std = @import("std");
|
||||
pub fn main() !void {
|
||||
try std.io.getStdOut().writeAll(bar);
|
||||
}
|
||||
const foo = "good morning\n";
|
||||
const bar = "good evening\n";
|
||||
#expect_stdout="good evening\n"
|
||||
|
||||
#update=reference missing declaration
|
||||
#file=main.zig
|
||||
const std = @import("std");
|
||||
pub fn main() !void {
|
||||
try std.io.getStdOut().writeAll(qux);
|
||||
}
|
||||
const foo = "good morning\n";
|
||||
const bar = "good evening\n";
|
||||
#expect_error=ignored
|
||||
|
||||
#update=add missing declaration
|
||||
#file=main.zig
|
||||
const std = @import("std");
|
||||
pub fn main() !void {
|
||||
try std.io.getStdOut().writeAll(qux);
|
||||
}
|
||||
const foo = "good morning\n";
|
||||
const bar = "good evening\n";
|
||||
const qux = "good night\n";
|
||||
#expect_stdout="good night\n"
|
||||
|
||||
#update=remove unused declarations
|
||||
#file=main.zig
|
||||
const std = @import("std");
|
||||
pub fn main() !void {
|
||||
try std.io.getStdOut().writeAll(qux);
|
||||
}
|
||||
const qux = "good night\n";
|
||||
#expect_stdout="good night\n"
|
59
test/incremental/add_decl_namespaced
Normal file
59
test/incremental/add_decl_namespaced
Normal file
@ -0,0 +1,59 @@
|
||||
#target=x86_64-linux
|
||||
#update=initial version
|
||||
#file=main.zig
|
||||
const std = @import("std");
|
||||
pub fn main() !void {
|
||||
try std.io.getStdOut().writeAll(@This().foo);
|
||||
}
|
||||
const foo = "good morning\n";
|
||||
#expect_stdout="good morning\n"
|
||||
|
||||
#update=add new declaration
|
||||
#file=main.zig
|
||||
const std = @import("std");
|
||||
pub fn main() !void {
|
||||
try std.io.getStdOut().writeAll(@This().foo);
|
||||
}
|
||||
const foo = "good morning\n";
|
||||
const bar = "good evening\n";
|
||||
#expect_stdout="good morning\n"
|
||||
|
||||
#update=reference new declaration
|
||||
#file=main.zig
|
||||
const std = @import("std");
|
||||
pub fn main() !void {
|
||||
try std.io.getStdOut().writeAll(@This().bar);
|
||||
}
|
||||
const foo = "good morning\n";
|
||||
const bar = "good evening\n";
|
||||
#expect_stdout="good evening\n"
|
||||
|
||||
#update=reference missing declaration
|
||||
#file=main.zig
|
||||
const std = @import("std");
|
||||
pub fn main() !void {
|
||||
try std.io.getStdOut().writeAll(@This().qux);
|
||||
}
|
||||
const foo = "good morning\n";
|
||||
const bar = "good evening\n";
|
||||
#expect_error=ignored
|
||||
|
||||
#update=add missing declaration
|
||||
#file=main.zig
|
||||
const std = @import("std");
|
||||
pub fn main() !void {
|
||||
try std.io.getStdOut().writeAll(@This().qux);
|
||||
}
|
||||
const foo = "good morning\n";
|
||||
const bar = "good evening\n";
|
||||
const qux = "good night\n";
|
||||
#expect_stdout="good night\n"
|
||||
|
||||
#update=remove unused declarations
|
||||
#file=main.zig
|
||||
const std = @import("std");
|
||||
pub fn main() !void {
|
||||
try std.io.getStdOut().writeAll(@This().qux);
|
||||
}
|
||||
const qux = "good night\n";
|
||||
#expect_stdout="good night\n"
|
38
test/incremental/delete_comptime_decls
Normal file
38
test/incremental/delete_comptime_decls
Normal file
@ -0,0 +1,38 @@
|
||||
#target=x86_64-linux
|
||||
#update=initial version
|
||||
#file=main.zig
|
||||
pub fn main() void {}
|
||||
comptime {
|
||||
var array = [_:0]u8{ 1, 2, 3, 4 };
|
||||
const src_slice: [:0]u8 = &array;
|
||||
const slice = src_slice[2..6];
|
||||
_ = slice;
|
||||
}
|
||||
comptime {
|
||||
var array = [_:0]u8{ 1, 2, 3, 4 };
|
||||
const slice = array[2..6];
|
||||
_ = slice;
|
||||
}
|
||||
comptime {
|
||||
var array = [_]u8{ 1, 2, 3, 4 };
|
||||
const slice = array[2..5];
|
||||
_ = slice;
|
||||
}
|
||||
comptime {
|
||||
var array = [_:0]u8{ 1, 2, 3, 4 };
|
||||
const slice = array[3..2];
|
||||
_ = slice;
|
||||
}
|
||||
#expect_error=ignored
|
||||
|
||||
#update=delete and modify comptime decls
|
||||
#file=main.zig
|
||||
pub fn main() void {}
|
||||
comptime {
|
||||
const x: [*c]u8 = null;
|
||||
var runtime_len: usize = undefined;
|
||||
runtime_len = 0;
|
||||
const y = x[0..runtime_len];
|
||||
_ = y;
|
||||
}
|
||||
#expect_error=ignored
|
38
test/incremental/unreferenced_error
Normal file
38
test/incremental/unreferenced_error
Normal file
@ -0,0 +1,38 @@
|
||||
#target=x86_64-linux
|
||||
#update=initial version
|
||||
#file=main.zig
|
||||
const std = @import("std");
|
||||
pub fn main() !void {
|
||||
try std.io.getStdOut().writeAll(a);
|
||||
}
|
||||
const a = "Hello, World!\n";
|
||||
#expect_stdout="Hello, World!\n"
|
||||
|
||||
#update=introduce compile error
|
||||
#file=main.zig
|
||||
const std = @import("std");
|
||||
pub fn main() !void {
|
||||
try std.io.getStdOut().writeAll(a);
|
||||
}
|
||||
const a = @compileError("bad a");
|
||||
#expect_error=ignored
|
||||
|
||||
#update=remove error reference
|
||||
#file=main.zig
|
||||
const std = @import("std");
|
||||
pub fn main() !void {
|
||||
try std.io.getStdOut().writeAll(b);
|
||||
}
|
||||
const a = @compileError("bad a");
|
||||
const b = "Hi there!\n";
|
||||
#expect_stdout="Hi there!\n"
|
||||
|
||||
#update=introduce and remove reference to error
|
||||
#file=main.zig
|
||||
const std = @import("std");
|
||||
pub fn main() !void {
|
||||
try std.io.getStdOut().writeAll(a);
|
||||
}
|
||||
const a = "Back to a\n";
|
||||
const b = @compileError("bad b");
|
||||
#expect_stdout="Back to a\n"
|
@ -2,14 +2,55 @@ const std = @import("std");
|
||||
const fatal = std.process.fatal;
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
||||
const usage = "usage: incr-check <zig binary path> <input file> [--zig-lib-dir lib] [--debug-zcu] [--emit none|bin|c] [--zig-cc-binary /path/to/zig]";
|
||||
|
||||
const EmitMode = enum {
|
||||
none,
|
||||
bin,
|
||||
c,
|
||||
};
|
||||
|
||||
pub fn main() !void {
|
||||
var arena_instance = std.heap.ArenaAllocator.init(std.heap.page_allocator);
|
||||
defer arena_instance.deinit();
|
||||
const arena = arena_instance.allocator();
|
||||
|
||||
const args = try std.process.argsAlloc(arena);
|
||||
const zig_exe = args[1];
|
||||
const input_file_name = args[2];
|
||||
var opt_zig_exe: ?[]const u8 = null;
|
||||
var opt_input_file_name: ?[]const u8 = null;
|
||||
var opt_lib_dir: ?[]const u8 = null;
|
||||
var opt_cc_zig: ?[]const u8 = null;
|
||||
var emit: EmitMode = .bin;
|
||||
var debug_zcu = false;
|
||||
|
||||
var arg_it = try std.process.argsWithAllocator(arena);
|
||||
_ = arg_it.skip();
|
||||
while (arg_it.next()) |arg| {
|
||||
if (arg.len > 0 and arg[0] == '-') {
|
||||
if (std.mem.eql(u8, arg, "--emit")) {
|
||||
const emit_str = arg_it.next() orelse fatal("expected arg after '--emit'\n{s}", .{usage});
|
||||
emit = std.meta.stringToEnum(EmitMode, emit_str) orelse
|
||||
fatal("invalid emit mode '{s}'\n{s}", .{ emit_str, usage });
|
||||
} else if (std.mem.eql(u8, arg, "--zig-lib-dir")) {
|
||||
opt_lib_dir = arg_it.next() orelse fatal("expected arg after '--zig-lib-dir'\n{s}", .{usage});
|
||||
} else if (std.mem.eql(u8, arg, "--debug-zcu")) {
|
||||
debug_zcu = true;
|
||||
} else if (std.mem.eql(u8, arg, "--zig-cc-binary")) {
|
||||
opt_cc_zig = arg_it.next() orelse fatal("expect arg after '--zig-cc-binary'\n{s}", .{usage});
|
||||
} else {
|
||||
fatal("unknown option '{s}'\n{s}", .{ arg, usage });
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if (opt_zig_exe == null) {
|
||||
opt_zig_exe = arg;
|
||||
} else if (opt_input_file_name == null) {
|
||||
opt_input_file_name = arg;
|
||||
} else {
|
||||
fatal("unknown argument '{s}'\n{s}", .{ arg, usage });
|
||||
}
|
||||
}
|
||||
const zig_exe = opt_zig_exe orelse fatal("missing path to zig\n{s}", .{usage});
|
||||
const input_file_name = opt_input_file_name orelse fatal("missing input file\n{s}", .{usage});
|
||||
|
||||
const input_file_bytes = try std.fs.cwd().readFileAlloc(arena, input_file_name, std.math.maxInt(u32));
|
||||
const case = try Case.parse(arena, input_file_bytes);
|
||||
@ -24,13 +65,18 @@ pub fn main() !void {
|
||||
const child_prog_node = prog_node.start("zig build-exe", 0);
|
||||
defer child_prog_node.end();
|
||||
|
||||
var child = std.process.Child.init(&.{
|
||||
// Convert incr-check-relative path to subprocess-relative path.
|
||||
try std.fs.path.relative(arena, tmp_dir_path, zig_exe),
|
||||
// Convert paths to be relative to the cwd of the subprocess.
|
||||
const resolved_zig_exe = try std.fs.path.relative(arena, tmp_dir_path, zig_exe);
|
||||
const opt_resolved_lib_dir = if (opt_lib_dir) |lib_dir|
|
||||
try std.fs.path.relative(arena, tmp_dir_path, lib_dir)
|
||||
else
|
||||
null;
|
||||
|
||||
var child_args: std.ArrayListUnmanaged([]const u8) = .{};
|
||||
try child_args.appendSlice(arena, &.{
|
||||
resolved_zig_exe,
|
||||
"build-exe",
|
||||
case.root_source_file,
|
||||
"-fno-llvm",
|
||||
"-fno-lld",
|
||||
"-fincremental",
|
||||
"-target",
|
||||
case.target_query,
|
||||
@ -39,8 +85,20 @@ pub fn main() !void {
|
||||
"--global-cache-dir",
|
||||
".global_cache",
|
||||
"--listen=-",
|
||||
}, arena);
|
||||
});
|
||||
if (opt_resolved_lib_dir) |resolved_lib_dir| {
|
||||
try child_args.appendSlice(arena, &.{ "--zig-lib-dir", resolved_lib_dir });
|
||||
}
|
||||
switch (emit) {
|
||||
.bin => try child_args.appendSlice(arena, &.{ "-fno-llvm", "-fno-lld" }),
|
||||
.none => try child_args.append(arena, "-fno-emit-bin"),
|
||||
.c => try child_args.appendSlice(arena, &.{ "-ofmt=c", "-lc" }),
|
||||
}
|
||||
if (debug_zcu) {
|
||||
try child_args.appendSlice(arena, &.{ "--debug-log", "zcu" });
|
||||
}
|
||||
|
||||
var child = std.process.Child.init(child_args.items, arena);
|
||||
child.stdin_behavior = .Pipe;
|
||||
child.stdout_behavior = .Pipe;
|
||||
child.stderr_behavior = .Pipe;
|
||||
@ -48,12 +106,33 @@ pub fn main() !void {
|
||||
child.cwd_dir = tmp_dir;
|
||||
child.cwd = tmp_dir_path;
|
||||
|
||||
var cc_child_args: std.ArrayListUnmanaged([]const u8) = .{};
|
||||
if (emit == .c) {
|
||||
const resolved_cc_zig_exe = if (opt_cc_zig) |cc_zig_exe|
|
||||
try std.fs.path.relative(arena, tmp_dir_path, cc_zig_exe)
|
||||
else
|
||||
resolved_zig_exe;
|
||||
|
||||
try cc_child_args.appendSlice(arena, &.{
|
||||
resolved_cc_zig_exe,
|
||||
"cc",
|
||||
"-target",
|
||||
case.target_query,
|
||||
"-I",
|
||||
opt_resolved_lib_dir orelse fatal("'--zig-lib-dir' required when using '--emit c'", .{}),
|
||||
"-o",
|
||||
});
|
||||
}
|
||||
|
||||
var eval: Eval = .{
|
||||
.arena = arena,
|
||||
.case = case,
|
||||
.tmp_dir = tmp_dir,
|
||||
.tmp_dir_path = tmp_dir_path,
|
||||
.child = &child,
|
||||
.allow_stderr = debug_zcu,
|
||||
.emit = emit,
|
||||
.cc_child_args = &cc_child_args,
|
||||
};
|
||||
|
||||
try child.spawn();
|
||||
@ -65,9 +144,16 @@ pub fn main() !void {
|
||||
defer poller.deinit();
|
||||
|
||||
for (case.updates) |update| {
|
||||
var update_node = prog_node.start(update.name, 0);
|
||||
defer update_node.end();
|
||||
|
||||
if (debug_zcu) {
|
||||
std.log.info("=== START UPDATE '{s}' ===", .{update.name});
|
||||
}
|
||||
|
||||
eval.write(update);
|
||||
try eval.requestUpdate();
|
||||
try eval.check(&poller, update);
|
||||
try eval.check(&poller, update, update_node);
|
||||
}
|
||||
|
||||
try eval.end(&poller);
|
||||
@ -81,6 +167,11 @@ const Eval = struct {
|
||||
tmp_dir: std.fs.Dir,
|
||||
tmp_dir_path: []const u8,
|
||||
child: *std.process.Child,
|
||||
allow_stderr: bool,
|
||||
emit: EmitMode,
|
||||
/// When `emit == .c`, this contains the first few arguments to `zig cc` to build the generated binary.
|
||||
/// The arguments `out.c in.c` must be appended before spawning the subprocess.
|
||||
cc_child_args: *std.ArrayListUnmanaged([]const u8),
|
||||
|
||||
const StreamEnum = enum { stdout, stderr };
|
||||
const Poller = std.io.Poller(StreamEnum);
|
||||
@ -102,7 +193,7 @@ const Eval = struct {
|
||||
}
|
||||
}
|
||||
|
||||
fn check(eval: *Eval, poller: *Poller, update: Case.Update) !void {
|
||||
fn check(eval: *Eval, poller: *Poller, update: Case.Update, prog_node: std.Progress.Node) !void {
|
||||
const arena = eval.arena;
|
||||
const Header = std.zig.Server.Message.Header;
|
||||
const stdout = poller.fifo(.stdout);
|
||||
@ -136,9 +227,18 @@ const Eval = struct {
|
||||
};
|
||||
if (stderr.readableLength() > 0) {
|
||||
const stderr_data = try stderr.toOwnedSlice();
|
||||
fatal("error_bundle included unexpected stderr:\n{s}", .{stderr_data});
|
||||
if (eval.allow_stderr) {
|
||||
std.log.info("error_bundle included stderr:\n{s}", .{stderr_data});
|
||||
} else {
|
||||
fatal("error_bundle included unexpected stderr:\n{s}", .{stderr_data});
|
||||
}
|
||||
}
|
||||
if (result_error_bundle.errorMessageCount() == 0) {
|
||||
// Empty bundle indicates successful update in a `-fno-emit-bin` build.
|
||||
try eval.checkSuccessOutcome(update, null, prog_node);
|
||||
} else {
|
||||
try eval.checkErrorOutcome(update, result_error_bundle);
|
||||
}
|
||||
try eval.checkErrorOutcome(update, result_error_bundle);
|
||||
// This message indicates the end of the update.
|
||||
stdout.discard(body.len);
|
||||
return;
|
||||
@ -150,9 +250,13 @@ const Eval = struct {
|
||||
const result_binary = try arena.dupe(u8, body[@sizeOf(EbpHdr)..]);
|
||||
if (stderr.readableLength() > 0) {
|
||||
const stderr_data = try stderr.toOwnedSlice();
|
||||
fatal("emit_bin_path included unexpected stderr:\n{s}", .{stderr_data});
|
||||
if (eval.allow_stderr) {
|
||||
std.log.info("emit_bin_path included stderr:\n{s}", .{stderr_data});
|
||||
} else {
|
||||
fatal("emit_bin_path included unexpected stderr:\n{s}", .{stderr_data});
|
||||
}
|
||||
}
|
||||
try eval.checkSuccessOutcome(update, result_binary);
|
||||
try eval.checkSuccessOutcome(update, result_binary, prog_node);
|
||||
// This message indicates the end of the update.
|
||||
stdout.discard(body.len);
|
||||
return;
|
||||
@ -166,7 +270,11 @@ const Eval = struct {
|
||||
|
||||
if (stderr.readableLength() > 0) {
|
||||
const stderr_data = try stderr.toOwnedSlice();
|
||||
fatal("update '{s}' failed:\n{s}", .{ update.name, stderr_data });
|
||||
if (eval.allow_stderr) {
|
||||
std.log.info("update '{s}' included stderr:\n{s}", .{ update.name, stderr_data });
|
||||
} else {
|
||||
fatal("update '{s}' failed:\n{s}", .{ update.name, stderr_data });
|
||||
}
|
||||
}
|
||||
|
||||
waitChild(eval.child);
|
||||
@ -191,12 +299,28 @@ const Eval = struct {
|
||||
}
|
||||
}
|
||||
|
||||
fn checkSuccessOutcome(eval: *Eval, update: Case.Update, binary_path: []const u8) !void {
|
||||
fn checkSuccessOutcome(eval: *Eval, update: Case.Update, opt_emitted_path: ?[]const u8, prog_node: std.Progress.Node) !void {
|
||||
switch (update.outcome) {
|
||||
.unknown => return,
|
||||
.compile_errors => fatal("expected compile errors but compilation incorrectly succeeded", .{}),
|
||||
.stdout, .exit_code => {},
|
||||
}
|
||||
const emitted_path = opt_emitted_path orelse {
|
||||
std.debug.assert(eval.emit == .none);
|
||||
return;
|
||||
};
|
||||
|
||||
const binary_path = switch (eval.emit) {
|
||||
.none => unreachable,
|
||||
.bin => emitted_path,
|
||||
.c => bin: {
|
||||
const rand_int = std.crypto.random.int(u64);
|
||||
const out_bin_name = "./out_" ++ std.fmt.hex(rand_int);
|
||||
try eval.buildCOutput(update, emitted_path, out_bin_name, prog_node);
|
||||
break :bin out_bin_name;
|
||||
},
|
||||
};
|
||||
|
||||
const result = std.process.Child.run(.{
|
||||
.allocator = eval.arena,
|
||||
.argv = &.{binary_path},
|
||||
@ -266,6 +390,50 @@ const Eval = struct {
|
||||
fatal("unexpected stderr:\n{s}", .{stderr_data});
|
||||
}
|
||||
}
|
||||
|
||||
fn buildCOutput(eval: *Eval, update: Case.Update, c_path: []const u8, out_path: []const u8, prog_node: std.Progress.Node) !void {
|
||||
std.debug.assert(eval.cc_child_args.items.len > 0);
|
||||
|
||||
const child_prog_node = prog_node.start("build cbe output", 0);
|
||||
defer child_prog_node.end();
|
||||
|
||||
try eval.cc_child_args.appendSlice(eval.arena, &.{ out_path, c_path });
|
||||
defer eval.cc_child_args.items.len -= 2;
|
||||
|
||||
const result = std.process.Child.run(.{
|
||||
.allocator = eval.arena,
|
||||
.argv = eval.cc_child_args.items,
|
||||
.cwd_dir = eval.tmp_dir,
|
||||
.cwd = eval.tmp_dir_path,
|
||||
.progress_node = child_prog_node,
|
||||
}) catch |err| {
|
||||
fatal("update '{s}': failed to spawn zig cc for '{s}': {s}", .{
|
||||
update.name, c_path, @errorName(err),
|
||||
});
|
||||
};
|
||||
switch (result.term) {
|
||||
.Exited => |code| if (code != 0) {
|
||||
if (result.stderr.len != 0) {
|
||||
std.log.err("update '{s}': zig cc stderr:\n{s}", .{
|
||||
update.name, result.stderr,
|
||||
});
|
||||
}
|
||||
fatal("update '{s}': zig cc for '{s}' failed with code {d}", .{
|
||||
update.name, c_path, code,
|
||||
});
|
||||
},
|
||||
.Signal, .Stopped, .Unknown => {
|
||||
if (result.stderr.len != 0) {
|
||||
std.log.err("update '{s}': zig cc stderr:\n{s}", .{
|
||||
update.name, result.stderr,
|
||||
});
|
||||
}
|
||||
fatal("update '{s}': zig cc for '{s}' terminated unexpectedly", .{
|
||||
update.name, c_path,
|
||||
});
|
||||
},
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const Case = struct {
|
||||
@ -357,6 +525,11 @@ const Case = struct {
|
||||
fatal("line {d}: bad string literal: {s}", .{ line_n, @errorName(err) });
|
||||
},
|
||||
};
|
||||
} else if (std.mem.eql(u8, key, "expect_error")) {
|
||||
if (updates.items.len == 0) fatal("line {d}: expect directive before update", .{line_n});
|
||||
const last_update = &updates.items[updates.items.len - 1];
|
||||
if (last_update.outcome != .unknown) fatal("line {d}: conflicting expect directive", .{line_n});
|
||||
last_update.outcome = .{ .compile_errors = &.{} };
|
||||
} else {
|
||||
fatal("line {d}: unrecognized key '{s}'", .{ line_n, key });
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user