mirror of
https://github.com/ziglang/zig.git
synced 2024-11-27 07:32:44 +00:00
frontend: incremental compilation progress
Another big commit, sorry! This commit makes all fixes necessary for incremental updates of the compiler itself (specifically, adding a breakpoint to `zirCompileLog`) to succeed, at least on the frontend. The biggest change here is a reform to how types are handled. It works like this: * When a type is first created in `zirStructDecl` etc, its namespace is scanned. If the type requires resolution, an `interned` dependency is declared for the containing `AnalUnit`. * `zirThis` also declared an `interned` dependency for its `AnalUnit` on the namespace's owner type. * If the type's namespace changes, the surrounding source declaration changes hash, so `zirStructDecl` etc will be hit again. We check whether the namespace has been scanned this generation, and re-scan it if not. * Namespace lookups also check whether the namespace in question requires a re-scan based on the generation. This is because there's no guarantee that the `zirStructDecl` is re-analyzed before the namespace lookup is re-analyzed. * If a type's structure (essentially its fields) change, then the type's `Cau` is considered outdated. When the type is re-analyzed due to being outdated, or the `zirStructDecl` is re-analyzed by being transitively outdated, or a corresponding `zirThis` is re-analyzed by being transitively outdated, the struct type is recreated at a new `InternPool` index. The namespace's owner is updated (but not re-scanned, since that is handled by the mechanisms above), and the old type, while remaining a valid `Index`, is removed from the map metadata so it will never be found by lookups. `zirStructDecl` and `zirThis` store an `interned` dependency on the *new* type.
This commit is contained in:
parent
65cbdefe4d
commit
84c2ebd6c6
@ -3569,6 +3569,8 @@ pub fn performAllTheWork(
|
||||
mod.sema_prog_node = std.Progress.Node.none;
|
||||
mod.codegen_prog_node.end();
|
||||
mod.codegen_prog_node = std.Progress.Node.none;
|
||||
|
||||
mod.generation += 1;
|
||||
};
|
||||
try comp.performAllTheWorkInner(main_progress_node);
|
||||
if (!InternPool.single_threaded) if (comp.codegen_work.job_error) |job_error| return job_error;
|
||||
|
@ -684,10 +684,6 @@ pub fn dependencyIterator(ip: *const InternPool, dependee: Dependee) DependencyI
|
||||
.ip = ip,
|
||||
.next_entry = .none,
|
||||
};
|
||||
if (ip.dep_entries.items[@intFromEnum(first_entry)].depender == .none) return .{
|
||||
.ip = ip,
|
||||
.next_entry = .none,
|
||||
};
|
||||
return .{
|
||||
.ip = ip,
|
||||
.next_entry = first_entry.toOptional(),
|
||||
@ -724,7 +720,6 @@ pub fn addDependency(ip: *InternPool, gpa: Allocator, depender: AnalUnit, depend
|
||||
|
||||
if (gop.found_existing and ip.dep_entries.items[@intFromEnum(gop.value_ptr.*)].depender == .none) {
|
||||
// Dummy entry, so we can reuse it rather than allocating a new one!
|
||||
ip.dep_entries.items[@intFromEnum(gop.value_ptr.*)].next = .none;
|
||||
break :new_index gop.value_ptr.*;
|
||||
}
|
||||
|
||||
@ -732,7 +727,12 @@ pub fn addDependency(ip: *InternPool, gpa: Allocator, depender: AnalUnit, depend
|
||||
const new_index: DepEntry.Index, const ptr = if (ip.free_dep_entries.popOrNull()) |new_index| new: {
|
||||
break :new .{ new_index, &ip.dep_entries.items[@intFromEnum(new_index)] };
|
||||
} else .{ @enumFromInt(ip.dep_entries.items.len), ip.dep_entries.addOneAssumeCapacity() };
|
||||
ptr.next = if (gop.found_existing) gop.value_ptr.*.toOptional() else .none;
|
||||
if (gop.found_existing) {
|
||||
ptr.next = gop.value_ptr.*.toOptional();
|
||||
ip.dep_entries.items[@intFromEnum(gop.value_ptr.*)].prev = new_index.toOptional();
|
||||
} else {
|
||||
ptr.next = .none;
|
||||
}
|
||||
gop.value_ptr.* = new_index;
|
||||
break :new_index new_index;
|
||||
},
|
||||
@ -754,10 +754,9 @@ pub const NamespaceNameKey = struct {
|
||||
};
|
||||
|
||||
pub const DepEntry = extern struct {
|
||||
/// If null, this is a dummy entry - all other fields are `undefined`. It is
|
||||
/// the first and only entry in one of `intern_pool.*_deps`, and does not
|
||||
/// appear in any list by `first_dependency`, but is not in
|
||||
/// `free_dep_entries` since `*_deps` stores a reference to it.
|
||||
/// If null, this is a dummy entry. `next_dependee` is undefined. This is the first
|
||||
/// entry in one of `*_deps`, and does not appear in any list by `first_dependency`,
|
||||
/// but is not in `free_dep_entries` since `*_deps` stores a reference to it.
|
||||
depender: AnalUnit.Optional,
|
||||
/// Index into `dep_entries` forming a doubly linked list of all dependencies on this dependee.
|
||||
/// Used to iterate all dependers for a given dependee during an update.
|
||||
@ -2689,7 +2688,12 @@ pub const Key = union(enum) {
|
||||
|
||||
.variable => |a_info| {
|
||||
const b_info = b.variable;
|
||||
return a_info.owner_nav == b_info.owner_nav;
|
||||
return a_info.owner_nav == b_info.owner_nav and
|
||||
a_info.ty == b_info.ty and
|
||||
a_info.init == b_info.init and
|
||||
a_info.lib_name == b_info.lib_name and
|
||||
a_info.is_threadlocal == b_info.is_threadlocal and
|
||||
a_info.is_weak_linkage == b_info.is_weak_linkage;
|
||||
},
|
||||
.@"extern" => |a_info| {
|
||||
const b_info = b.@"extern";
|
||||
@ -8016,6 +8020,10 @@ pub const UnionTypeInit = struct {
|
||||
zir_index: TrackedInst.Index,
|
||||
captures: []const CaptureValue,
|
||||
},
|
||||
declared_owned_captures: struct {
|
||||
zir_index: TrackedInst.Index,
|
||||
captures: CaptureValue.Slice,
|
||||
},
|
||||
reified: struct {
|
||||
zir_index: TrackedInst.Index,
|
||||
type_hash: u64,
|
||||
@ -8037,6 +8045,10 @@ pub fn getUnionType(
|
||||
.zir_index = d.zir_index,
|
||||
.captures = .{ .external = d.captures },
|
||||
} },
|
||||
.declared_owned_captures => |d| .{ .declared = .{
|
||||
.zir_index = d.zir_index,
|
||||
.captures = .{ .owned = d.captures },
|
||||
} },
|
||||
.reified => |r| .{ .reified = .{
|
||||
.zir_index = r.zir_index,
|
||||
.type_hash = r.type_hash,
|
||||
@ -8060,7 +8072,7 @@ pub fn getUnionType(
|
||||
// TODO: fmt bug
|
||||
// zig fmt: off
|
||||
switch (ini.key) {
|
||||
.declared => |d| @intFromBool(d.captures.len != 0) + d.captures.len,
|
||||
inline .declared, .declared_owned_captures => |d| @intFromBool(d.captures.len != 0) + d.captures.len,
|
||||
.reified => 2, // type_hash: PackedU64
|
||||
} +
|
||||
// zig fmt: on
|
||||
@ -8069,7 +8081,10 @@ pub fn getUnionType(
|
||||
|
||||
const extra_index = addExtraAssumeCapacity(extra, Tag.TypeUnion{
|
||||
.flags = .{
|
||||
.any_captures = ini.key == .declared and ini.key.declared.captures.len != 0,
|
||||
.any_captures = switch (ini.key) {
|
||||
inline .declared, .declared_owned_captures => |d| d.captures.len != 0,
|
||||
.reified => false,
|
||||
},
|
||||
.runtime_tag = ini.flags.runtime_tag,
|
||||
.any_aligned_fields = ini.flags.any_aligned_fields,
|
||||
.layout = ini.flags.layout,
|
||||
@ -8078,7 +8093,10 @@ pub fn getUnionType(
|
||||
.assumed_runtime_bits = ini.flags.assumed_runtime_bits,
|
||||
.assumed_pointer_aligned = ini.flags.assumed_pointer_aligned,
|
||||
.alignment = ini.flags.alignment,
|
||||
.is_reified = ini.key == .reified,
|
||||
.is_reified = switch (ini.key) {
|
||||
.declared, .declared_owned_captures => false,
|
||||
.reified => true,
|
||||
},
|
||||
},
|
||||
.fields_len = ini.fields_len,
|
||||
.size = std.math.maxInt(u32),
|
||||
@ -8102,6 +8120,10 @@ pub fn getUnionType(
|
||||
extra.appendAssumeCapacity(.{@intCast(d.captures.len)});
|
||||
extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)});
|
||||
},
|
||||
.declared_owned_captures => |d| if (d.captures.len != 0) {
|
||||
extra.appendAssumeCapacity(.{@intCast(d.captures.len)});
|
||||
extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures.get(ip))});
|
||||
},
|
||||
.reified => |r| _ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash)),
|
||||
}
|
||||
|
||||
@ -8199,6 +8221,10 @@ pub const StructTypeInit = struct {
|
||||
zir_index: TrackedInst.Index,
|
||||
captures: []const CaptureValue,
|
||||
},
|
||||
declared_owned_captures: struct {
|
||||
zir_index: TrackedInst.Index,
|
||||
captures: CaptureValue.Slice,
|
||||
},
|
||||
reified: struct {
|
||||
zir_index: TrackedInst.Index,
|
||||
type_hash: u64,
|
||||
@ -8220,6 +8246,10 @@ pub fn getStructType(
|
||||
.zir_index = d.zir_index,
|
||||
.captures = .{ .external = d.captures },
|
||||
} },
|
||||
.declared_owned_captures => |d| .{ .declared = .{
|
||||
.zir_index = d.zir_index,
|
||||
.captures = .{ .owned = d.captures },
|
||||
} },
|
||||
.reified => |r| .{ .reified = .{
|
||||
.zir_index = r.zir_index,
|
||||
.type_hash = r.type_hash,
|
||||
@ -8251,7 +8281,7 @@ pub fn getStructType(
|
||||
// TODO: fmt bug
|
||||
// zig fmt: off
|
||||
switch (ini.key) {
|
||||
.declared => |d| @intFromBool(d.captures.len != 0) + d.captures.len,
|
||||
inline .declared, .declared_owned_captures => |d| @intFromBool(d.captures.len != 0) + d.captures.len,
|
||||
.reified => 2, // type_hash: PackedU64
|
||||
} +
|
||||
// zig fmt: on
|
||||
@ -8267,10 +8297,16 @@ pub fn getStructType(
|
||||
.backing_int_ty = .none,
|
||||
.names_map = names_map,
|
||||
.flags = .{
|
||||
.any_captures = ini.key == .declared and ini.key.declared.captures.len != 0,
|
||||
.any_captures = switch (ini.key) {
|
||||
inline .declared, .declared_owned_captures => |d| d.captures.len != 0,
|
||||
.reified => false,
|
||||
},
|
||||
.field_inits_wip = false,
|
||||
.inits_resolved = ini.inits_resolved,
|
||||
.is_reified = ini.key == .reified,
|
||||
.is_reified = switch (ini.key) {
|
||||
.declared, .declared_owned_captures => false,
|
||||
.reified => true,
|
||||
},
|
||||
},
|
||||
});
|
||||
try items.append(.{
|
||||
@ -8282,6 +8318,10 @@ pub fn getStructType(
|
||||
extra.appendAssumeCapacity(.{@intCast(d.captures.len)});
|
||||
extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)});
|
||||
},
|
||||
.declared_owned_captures => |d| if (d.captures.len != 0) {
|
||||
extra.appendAssumeCapacity(.{@intCast(d.captures.len)});
|
||||
extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures.get(ip))});
|
||||
},
|
||||
.reified => |r| {
|
||||
_ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash));
|
||||
},
|
||||
@ -8309,7 +8349,7 @@ pub fn getStructType(
|
||||
// TODO: fmt bug
|
||||
// zig fmt: off
|
||||
switch (ini.key) {
|
||||
.declared => |d| @intFromBool(d.captures.len != 0) + d.captures.len,
|
||||
inline .declared, .declared_owned_captures => |d| @intFromBool(d.captures.len != 0) + d.captures.len,
|
||||
.reified => 2, // type_hash: PackedU64
|
||||
} +
|
||||
// zig fmt: on
|
||||
@ -8324,7 +8364,10 @@ pub fn getStructType(
|
||||
.fields_len = ini.fields_len,
|
||||
.size = std.math.maxInt(u32),
|
||||
.flags = .{
|
||||
.any_captures = ini.key == .declared and ini.key.declared.captures.len != 0,
|
||||
.any_captures = switch (ini.key) {
|
||||
inline .declared, .declared_owned_captures => |d| d.captures.len != 0,
|
||||
.reified => false,
|
||||
},
|
||||
.is_extern = is_extern,
|
||||
.known_non_opv = ini.known_non_opv,
|
||||
.requires_comptime = ini.requires_comptime,
|
||||
@ -8342,7 +8385,10 @@ pub fn getStructType(
|
||||
.field_inits_wip = false,
|
||||
.inits_resolved = ini.inits_resolved,
|
||||
.fully_resolved = false,
|
||||
.is_reified = ini.key == .reified,
|
||||
.is_reified = switch (ini.key) {
|
||||
.declared, .declared_owned_captures => false,
|
||||
.reified => true,
|
||||
},
|
||||
},
|
||||
});
|
||||
try items.append(.{
|
||||
@ -8354,6 +8400,10 @@ pub fn getStructType(
|
||||
extra.appendAssumeCapacity(.{@intCast(d.captures.len)});
|
||||
extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)});
|
||||
},
|
||||
.declared_owned_captures => |d| if (d.captures.len != 0) {
|
||||
extra.appendAssumeCapacity(.{@intCast(d.captures.len)});
|
||||
extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures.get(ip))});
|
||||
},
|
||||
.reified => |r| {
|
||||
_ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash));
|
||||
},
|
||||
@ -9157,6 +9207,10 @@ pub const EnumTypeInit = struct {
|
||||
zir_index: TrackedInst.Index,
|
||||
captures: []const CaptureValue,
|
||||
},
|
||||
declared_owned_captures: struct {
|
||||
zir_index: TrackedInst.Index,
|
||||
captures: CaptureValue.Slice,
|
||||
},
|
||||
reified: struct {
|
||||
zir_index: TrackedInst.Index,
|
||||
type_hash: u64,
|
||||
@ -9261,6 +9315,10 @@ pub fn getEnumType(
|
||||
.zir_index = d.zir_index,
|
||||
.captures = .{ .external = d.captures },
|
||||
} },
|
||||
.declared_owned_captures => |d| .{ .declared = .{
|
||||
.zir_index = d.zir_index,
|
||||
.captures = .{ .owned = d.captures },
|
||||
} },
|
||||
.reified => |r| .{ .reified = .{
|
||||
.zir_index = r.zir_index,
|
||||
.type_hash = r.type_hash,
|
||||
@ -9288,7 +9346,7 @@ pub fn getEnumType(
|
||||
// TODO: fmt bug
|
||||
// zig fmt: off
|
||||
switch (ini.key) {
|
||||
.declared => |d| d.captures.len,
|
||||
inline .declared, .declared_owned_captures => |d| d.captures.len,
|
||||
.reified => 2, // type_hash: PackedU64
|
||||
} +
|
||||
// zig fmt: on
|
||||
@ -9298,7 +9356,7 @@ pub fn getEnumType(
|
||||
const extra_index = addExtraAssumeCapacity(extra, EnumAuto{
|
||||
.name = undefined, // set by `prepare`
|
||||
.captures_len = switch (ini.key) {
|
||||
.declared => |d| @intCast(d.captures.len),
|
||||
inline .declared, .declared_owned_captures => |d| @intCast(d.captures.len),
|
||||
.reified => std.math.maxInt(u32),
|
||||
},
|
||||
.namespace = undefined, // set by `prepare`
|
||||
@ -9317,6 +9375,7 @@ pub fn getEnumType(
|
||||
extra.appendAssumeCapacity(undefined); // `cau` will be set by `finish`
|
||||
switch (ini.key) {
|
||||
.declared => |d| extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)}),
|
||||
.declared_owned_captures => |d| extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures.get(ip))}),
|
||||
.reified => |r| _ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash)),
|
||||
}
|
||||
const names_start = extra.mutate.len;
|
||||
@ -9347,7 +9406,7 @@ pub fn getEnumType(
|
||||
// TODO: fmt bug
|
||||
// zig fmt: off
|
||||
switch (ini.key) {
|
||||
.declared => |d| d.captures.len,
|
||||
inline .declared, .declared_owned_captures => |d| d.captures.len,
|
||||
.reified => 2, // type_hash: PackedU64
|
||||
} +
|
||||
// zig fmt: on
|
||||
@ -9358,7 +9417,7 @@ pub fn getEnumType(
|
||||
const extra_index = addExtraAssumeCapacity(extra, EnumExplicit{
|
||||
.name = undefined, // set by `prepare`
|
||||
.captures_len = switch (ini.key) {
|
||||
.declared => |d| @intCast(d.captures.len),
|
||||
inline .declared, .declared_owned_captures => |d| @intCast(d.captures.len),
|
||||
.reified => std.math.maxInt(u32),
|
||||
},
|
||||
.namespace = undefined, // set by `prepare`
|
||||
@ -9382,6 +9441,7 @@ pub fn getEnumType(
|
||||
extra.appendAssumeCapacity(undefined); // `cau` will be set by `finish`
|
||||
switch (ini.key) {
|
||||
.declared => |d| extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures)}),
|
||||
.declared_owned_captures => |d| extra.appendSliceAssumeCapacity(.{@ptrCast(d.captures.get(ip))}),
|
||||
.reified => |r| _ = addExtraAssumeCapacity(extra, PackedU64.init(r.type_hash)),
|
||||
}
|
||||
const names_start = extra.mutate.len;
|
||||
@ -9445,10 +9505,12 @@ pub fn getGeneratedTagEnumType(
|
||||
.tid = tid,
|
||||
.index = items.mutate.len,
|
||||
}, ip);
|
||||
const parent_namespace = ip.namespacePtr(ini.parent_namespace);
|
||||
const namespace = try ip.createNamespace(gpa, tid, .{
|
||||
.parent = ini.parent_namespace.toOptional(),
|
||||
.owner_type = enum_index,
|
||||
.file_scope = ip.namespacePtr(ini.parent_namespace).file_scope,
|
||||
.file_scope = parent_namespace.file_scope,
|
||||
.generation = parent_namespace.generation,
|
||||
});
|
||||
errdefer ip.destroyNamespace(tid, namespace);
|
||||
|
||||
@ -11044,6 +11106,7 @@ pub fn destroyNamespace(
|
||||
.parent = undefined,
|
||||
.file_scope = undefined,
|
||||
.owner_type = undefined,
|
||||
.generation = undefined,
|
||||
};
|
||||
@field(namespace, Local.namespace_next_free_field) =
|
||||
@enumFromInt(local.mutate.namespaces.free_list);
|
||||
|
452
src/Sema.zig
452
src/Sema.zig
@ -2723,32 +2723,6 @@ fn wrapWipTy(sema: *Sema, wip_ty: anytype) @TypeOf(wip_ty) {
|
||||
return new;
|
||||
}
|
||||
|
||||
/// Given a type just looked up in the `InternPool`, check whether it is
|
||||
/// considered outdated on this update. If so, returns `true`, and the
|
||||
/// caller must replace the outdated type with a fresh one.
|
||||
fn checkOutdatedType(sema: *Sema, ty: InternPool.Index) !bool {
|
||||
const pt = sema.pt;
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
|
||||
if (!zcu.comp.incremental) return false;
|
||||
|
||||
const cau_index = switch (ip.indexToKey(ty)) {
|
||||
.struct_type => ip.loadStructType(ty).cau.unwrap().?,
|
||||
.union_type => ip.loadUnionType(ty).cau,
|
||||
.enum_type => ip.loadEnumType(ty).cau.unwrap().?,
|
||||
else => unreachable,
|
||||
};
|
||||
const cau_unit = AnalUnit.wrap(.{ .cau = cau_index });
|
||||
const was_outdated = zcu.outdated.swapRemove(cau_unit) or
|
||||
zcu.potentially_outdated.swapRemove(cau_unit);
|
||||
if (!was_outdated) return false;
|
||||
_ = zcu.outdated_ready.swapRemove(cau_unit);
|
||||
zcu.intern_pool.removeDependenciesForDepender(zcu.gpa, cau_unit);
|
||||
try zcu.markDependeeOutdated(.marked_po, .{ .interned = ty });
|
||||
return true;
|
||||
}
|
||||
|
||||
fn zirStructDecl(
|
||||
sema: *Sema,
|
||||
block: *Block,
|
||||
@ -2815,13 +2789,16 @@ fn zirStructDecl(
|
||||
} },
|
||||
};
|
||||
const wip_ty = sema.wrapWipTy(switch (try ip.getStructType(gpa, pt.tid, struct_init, false)) {
|
||||
.existing => |ty| wip: {
|
||||
if (!try sema.checkOutdatedType(ty)) {
|
||||
try sema.declareDependency(.{ .interned = ty });
|
||||
try sema.addTypeReferenceEntry(src, ty);
|
||||
return Air.internedToRef(ty);
|
||||
}
|
||||
break :wip (try ip.getStructType(gpa, pt.tid, struct_init, true)).wip;
|
||||
.existing => |ty| {
|
||||
const new_ty = try pt.ensureTypeUpToDate(ty, false);
|
||||
|
||||
// Make sure we update the namespace if the declaration is re-analyzed, to pick
|
||||
// up on e.g. changed comptime decls.
|
||||
try pt.ensureNamespaceUpToDate(Type.fromInterned(new_ty).getNamespaceIndex(mod));
|
||||
|
||||
try sema.declareDependency(.{ .interned = new_ty });
|
||||
try sema.addTypeReferenceEntry(src, new_ty);
|
||||
return Air.internedToRef(new_ty);
|
||||
},
|
||||
.wip => |wip| wip,
|
||||
});
|
||||
@ -2839,6 +2816,7 @@ fn zirStructDecl(
|
||||
.parent = block.namespace.toOptional(),
|
||||
.owner_type = wip_ty.index,
|
||||
.file_scope = block.getFileScopeIndex(mod),
|
||||
.generation = mod.generation,
|
||||
});
|
||||
errdefer pt.destroyNamespace(new_namespace_index);
|
||||
|
||||
@ -2977,7 +2955,6 @@ fn zirEnumDecl(
|
||||
|
||||
const tracked_inst = try block.trackZir(inst);
|
||||
const src: LazySrcLoc = .{ .base_node_inst = tracked_inst, .offset = LazySrcLoc.Offset.nodeOffset(0) };
|
||||
const tag_ty_src: LazySrcLoc = .{ .base_node_inst = tracked_inst, .offset = .{ .node_offset_container_tag = 0 } };
|
||||
|
||||
const tag_type_ref = if (small.has_tag_type) blk: {
|
||||
const tag_type_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
|
||||
@ -3041,13 +3018,16 @@ fn zirEnumDecl(
|
||||
} },
|
||||
};
|
||||
const wip_ty = sema.wrapWipTy(switch (try ip.getEnumType(gpa, pt.tid, enum_init, false)) {
|
||||
.existing => |ty| wip: {
|
||||
if (!try sema.checkOutdatedType(ty)) {
|
||||
try sema.declareDependency(.{ .interned = ty });
|
||||
try sema.addTypeReferenceEntry(src, ty);
|
||||
return Air.internedToRef(ty);
|
||||
}
|
||||
break :wip (try ip.getEnumType(gpa, pt.tid, enum_init, true)).wip;
|
||||
.existing => |ty| {
|
||||
const new_ty = try pt.ensureTypeUpToDate(ty, false);
|
||||
|
||||
// Make sure we update the namespace if the declaration is re-analyzed, to pick
|
||||
// up on e.g. changed comptime decls.
|
||||
try pt.ensureNamespaceUpToDate(Type.fromInterned(new_ty).getNamespaceIndex(mod));
|
||||
|
||||
try sema.declareDependency(.{ .interned = new_ty });
|
||||
try sema.addTypeReferenceEntry(src, new_ty);
|
||||
return Air.internedToRef(new_ty);
|
||||
},
|
||||
.wip => |wip| wip,
|
||||
});
|
||||
@ -3071,19 +3051,12 @@ fn zirEnumDecl(
|
||||
.parent = block.namespace.toOptional(),
|
||||
.owner_type = wip_ty.index,
|
||||
.file_scope = block.getFileScopeIndex(mod),
|
||||
.generation = mod.generation,
|
||||
});
|
||||
errdefer if (!done) pt.destroyNamespace(new_namespace_index);
|
||||
|
||||
const new_cau_index = try ip.createTypeCau(gpa, pt.tid, tracked_inst, new_namespace_index, wip_ty.index);
|
||||
|
||||
if (pt.zcu.comp.incremental) {
|
||||
try mod.intern_pool.addDependency(
|
||||
gpa,
|
||||
AnalUnit.wrap(.{ .cau = new_cau_index }),
|
||||
.{ .src_hash = try block.trackZir(inst) },
|
||||
);
|
||||
}
|
||||
|
||||
try pt.scanNamespace(new_namespace_index, decls);
|
||||
|
||||
try sema.declareDependency(.{ .interned = wip_ty.index });
|
||||
@ -3094,144 +3067,22 @@ fn zirEnumDecl(
|
||||
wip_ty.prepare(ip, new_cau_index, new_namespace_index);
|
||||
done = true;
|
||||
|
||||
const int_tag_ty = ty: {
|
||||
// We create a block for the field type instructions because they
|
||||
// may need to reference Decls from inside the enum namespace.
|
||||
// Within the field type, default value, and alignment expressions, the owner should be the enum's `Cau`.
|
||||
|
||||
const prev_owner = sema.owner;
|
||||
sema.owner = AnalUnit.wrap(.{ .cau = new_cau_index });
|
||||
defer sema.owner = prev_owner;
|
||||
|
||||
const prev_func_index = sema.func_index;
|
||||
sema.func_index = .none;
|
||||
defer sema.func_index = prev_func_index;
|
||||
|
||||
var enum_block: Block = .{
|
||||
.parent = null,
|
||||
.sema = sema,
|
||||
.namespace = new_namespace_index,
|
||||
.instructions = .{},
|
||||
.inlining = null,
|
||||
.is_comptime = true,
|
||||
.src_base_inst = tracked_inst,
|
||||
.type_name_ctx = type_name,
|
||||
};
|
||||
defer enum_block.instructions.deinit(sema.gpa);
|
||||
|
||||
if (body.len != 0) {
|
||||
_ = try sema.analyzeInlineBody(&enum_block, body, inst);
|
||||
}
|
||||
|
||||
if (tag_type_ref != .none) {
|
||||
const ty = try sema.resolveType(&enum_block, tag_ty_src, tag_type_ref);
|
||||
if (ty.zigTypeTag(mod) != .Int and ty.zigTypeTag(mod) != .ComptimeInt) {
|
||||
return sema.fail(&enum_block, tag_ty_src, "expected integer tag type, found '{}'", .{ty.fmt(pt)});
|
||||
}
|
||||
break :ty ty;
|
||||
} else if (fields_len == 0) {
|
||||
break :ty try pt.intType(.unsigned, 0);
|
||||
} else {
|
||||
const bits = std.math.log2_int_ceil(usize, fields_len);
|
||||
break :ty try pt.intType(.unsigned, bits);
|
||||
}
|
||||
};
|
||||
|
||||
wip_ty.setTagTy(ip, int_tag_ty.toIntern());
|
||||
|
||||
if (small.nonexhaustive and int_tag_ty.toIntern() != .comptime_int_type) {
|
||||
if (fields_len > 1 and std.math.log2_int(u64, fields_len) == int_tag_ty.bitSize(pt)) {
|
||||
return sema.fail(block, src, "non-exhaustive enum specifies every value", .{});
|
||||
}
|
||||
}
|
||||
|
||||
var bit_bag_index: usize = body_end;
|
||||
var cur_bit_bag: u32 = undefined;
|
||||
var field_i: u32 = 0;
|
||||
var last_tag_val: ?Value = null;
|
||||
while (field_i < fields_len) : (field_i += 1) {
|
||||
if (field_i % 32 == 0) {
|
||||
cur_bit_bag = sema.code.extra[bit_bag_index];
|
||||
bit_bag_index += 1;
|
||||
}
|
||||
const has_tag_value = @as(u1, @truncate(cur_bit_bag)) != 0;
|
||||
cur_bit_bag >>= 1;
|
||||
|
||||
const field_name_index: Zir.NullTerminatedString = @enumFromInt(sema.code.extra[extra_index]);
|
||||
const field_name_zir = sema.code.nullTerminatedString(field_name_index);
|
||||
extra_index += 2; // field name, doc comment
|
||||
|
||||
const field_name = try mod.intern_pool.getOrPutString(gpa, pt.tid, field_name_zir, .no_embedded_nulls);
|
||||
|
||||
const value_src: LazySrcLoc = .{
|
||||
.base_node_inst = tracked_inst,
|
||||
.offset = .{ .container_field_value = field_i },
|
||||
};
|
||||
|
||||
const tag_overflow = if (has_tag_value) overflow: {
|
||||
const tag_val_ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_index]);
|
||||
extra_index += 1;
|
||||
const tag_inst = try sema.resolveInst(tag_val_ref);
|
||||
last_tag_val = try sema.resolveConstDefinedValue(block, .{
|
||||
.base_node_inst = tracked_inst,
|
||||
.offset = .{ .container_field_name = field_i },
|
||||
}, tag_inst, .{
|
||||
.needed_comptime_reason = "enum tag value must be comptime-known",
|
||||
});
|
||||
if (!(try sema.intFitsInType(last_tag_val.?, int_tag_ty, null))) break :overflow true;
|
||||
last_tag_val = try pt.getCoerced(last_tag_val.?, int_tag_ty);
|
||||
if (wip_ty.nextField(&mod.intern_pool, field_name, last_tag_val.?.toIntern())) |conflict| {
|
||||
assert(conflict.kind == .value); // AstGen validated names are unique
|
||||
const other_field_src: LazySrcLoc = .{
|
||||
.base_node_inst = tracked_inst,
|
||||
.offset = .{ .container_field_value = conflict.prev_field_idx },
|
||||
};
|
||||
const msg = msg: {
|
||||
const msg = try sema.errMsg(value_src, "enum tag value {} already taken", .{last_tag_val.?.fmtValueSema(pt, sema)});
|
||||
errdefer msg.destroy(gpa);
|
||||
try sema.errNote(other_field_src, msg, "other occurrence here", .{});
|
||||
break :msg msg;
|
||||
};
|
||||
return sema.failWithOwnedErrorMsg(block, msg);
|
||||
}
|
||||
break :overflow false;
|
||||
} else if (any_values) overflow: {
|
||||
var overflow: ?usize = null;
|
||||
last_tag_val = if (last_tag_val) |val|
|
||||
try sema.intAdd(val, try pt.intValue(int_tag_ty, 1), int_tag_ty, &overflow)
|
||||
else
|
||||
try pt.intValue(int_tag_ty, 0);
|
||||
if (overflow != null) break :overflow true;
|
||||
if (wip_ty.nextField(&mod.intern_pool, field_name, last_tag_val.?.toIntern())) |conflict| {
|
||||
assert(conflict.kind == .value); // AstGen validated names are unique
|
||||
const other_field_src: LazySrcLoc = .{
|
||||
.base_node_inst = tracked_inst,
|
||||
.offset = .{ .container_field_value = conflict.prev_field_idx },
|
||||
};
|
||||
const msg = msg: {
|
||||
const msg = try sema.errMsg(value_src, "enum tag value {} already taken", .{last_tag_val.?.fmtValueSema(pt, sema)});
|
||||
errdefer msg.destroy(gpa);
|
||||
try sema.errNote(other_field_src, msg, "other occurrence here", .{});
|
||||
break :msg msg;
|
||||
};
|
||||
return sema.failWithOwnedErrorMsg(block, msg);
|
||||
}
|
||||
break :overflow false;
|
||||
} else overflow: {
|
||||
assert(wip_ty.nextField(&mod.intern_pool, field_name, .none) == null);
|
||||
last_tag_val = try pt.intValue(Type.comptime_int, field_i);
|
||||
if (!try sema.intFitsInType(last_tag_val.?, int_tag_ty, null)) break :overflow true;
|
||||
last_tag_val = try pt.getCoerced(last_tag_val.?, int_tag_ty);
|
||||
break :overflow false;
|
||||
};
|
||||
|
||||
if (tag_overflow) {
|
||||
const msg = try sema.errMsg(value_src, "enumeration value '{}' too large for type '{}'", .{
|
||||
last_tag_val.?.fmtValueSema(pt, sema), int_tag_ty.fmt(pt),
|
||||
});
|
||||
return sema.failWithOwnedErrorMsg(block, msg);
|
||||
}
|
||||
}
|
||||
try Sema.resolveDeclaredEnum(
|
||||
pt,
|
||||
wip_ty,
|
||||
inst,
|
||||
tracked_inst,
|
||||
new_namespace_index,
|
||||
type_name,
|
||||
new_cau_index,
|
||||
small,
|
||||
body,
|
||||
tag_type_ref,
|
||||
any_values,
|
||||
fields_len,
|
||||
sema.code,
|
||||
body_end,
|
||||
);
|
||||
|
||||
codegen_type: {
|
||||
if (mod.comp.config.use_llvm) break :codegen_type;
|
||||
@ -3311,13 +3162,16 @@ fn zirUnionDecl(
|
||||
} },
|
||||
};
|
||||
const wip_ty = sema.wrapWipTy(switch (try ip.getUnionType(gpa, pt.tid, union_init, false)) {
|
||||
.existing => |ty| wip: {
|
||||
if (!try sema.checkOutdatedType(ty)) {
|
||||
try sema.declareDependency(.{ .interned = ty });
|
||||
try sema.addTypeReferenceEntry(src, ty);
|
||||
return Air.internedToRef(ty);
|
||||
}
|
||||
break :wip (try ip.getUnionType(gpa, pt.tid, union_init, true)).wip;
|
||||
.existing => |ty| {
|
||||
const new_ty = try pt.ensureTypeUpToDate(ty, false);
|
||||
|
||||
// Make sure we update the namespace if the declaration is re-analyzed, to pick
|
||||
// up on e.g. changed comptime decls.
|
||||
try pt.ensureNamespaceUpToDate(Type.fromInterned(new_ty).getNamespaceIndex(mod));
|
||||
|
||||
try sema.declareDependency(.{ .interned = new_ty });
|
||||
try sema.addTypeReferenceEntry(src, new_ty);
|
||||
return Air.internedToRef(new_ty);
|
||||
},
|
||||
.wip => |wip| wip,
|
||||
});
|
||||
@ -3335,6 +3189,7 @@ fn zirUnionDecl(
|
||||
.parent = block.namespace.toOptional(),
|
||||
.owner_type = wip_ty.index,
|
||||
.file_scope = block.getFileScopeIndex(mod),
|
||||
.generation = mod.generation,
|
||||
});
|
||||
errdefer pt.destroyNamespace(new_namespace_index);
|
||||
|
||||
@ -3344,7 +3199,7 @@ fn zirUnionDecl(
|
||||
try mod.intern_pool.addDependency(
|
||||
gpa,
|
||||
AnalUnit.wrap(.{ .cau = new_cau_index }),
|
||||
.{ .src_hash = try block.trackZir(inst) },
|
||||
.{ .src_hash = tracked_inst },
|
||||
);
|
||||
}
|
||||
|
||||
@ -3406,8 +3261,12 @@ fn zirOpaqueDecl(
|
||||
};
|
||||
// No `wrapWipTy` needed as no std.builtin types are opaque.
|
||||
const wip_ty = switch (try ip.getOpaqueType(gpa, pt.tid, opaque_init)) {
|
||||
// No `checkOutdatedType` as opaque types are never outdated.
|
||||
.existing => |ty| {
|
||||
// Make sure we update the namespace if the declaration is re-analyzed, to pick
|
||||
// up on e.g. changed comptime decls.
|
||||
try pt.ensureNamespaceUpToDate(Type.fromInterned(ty).getNamespaceIndex(mod));
|
||||
|
||||
try sema.declareDependency(.{ .interned = ty });
|
||||
try sema.addTypeReferenceEntry(src, ty);
|
||||
return Air.internedToRef(ty);
|
||||
},
|
||||
@ -3427,6 +3286,7 @@ fn zirOpaqueDecl(
|
||||
.parent = block.namespace.toOptional(),
|
||||
.owner_type = wip_ty.index,
|
||||
.file_scope = block.getFileScopeIndex(mod),
|
||||
.generation = mod.generation,
|
||||
});
|
||||
errdefer pt.destroyNamespace(new_namespace_index);
|
||||
|
||||
@ -6072,6 +5932,7 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr
|
||||
// trigger re-analysis later.
|
||||
try pt.ensureFileAnalyzed(result.file_index);
|
||||
const ty = zcu.fileRootType(result.file_index);
|
||||
try sema.declareDependency(.{ .interned = ty });
|
||||
try sema.addTypeReferenceEntry(src, ty);
|
||||
return Air.internedToRef(ty);
|
||||
}
|
||||
@ -6821,6 +6682,8 @@ fn lookupInNamespace(
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
|
||||
try pt.ensureNamespaceUpToDate(namespace_index);
|
||||
|
||||
const namespace = zcu.namespacePtr(namespace_index);
|
||||
|
||||
const adapter: Zcu.Namespace.NameAdapter = .{ .zcu = zcu };
|
||||
@ -14038,6 +13901,7 @@ fn zirImport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
|
||||
// trigger re-analysis later.
|
||||
try pt.ensureFileAnalyzed(result.file_index);
|
||||
const ty = zcu.fileRootType(result.file_index);
|
||||
try sema.declareDependency(.{ .interned = ty });
|
||||
try sema.addTypeReferenceEntry(operand_src, ty);
|
||||
return Air.internedToRef(ty);
|
||||
}
|
||||
@ -17703,7 +17567,13 @@ fn zirThis(
|
||||
_ = extended;
|
||||
const pt = sema.pt;
|
||||
const namespace = pt.zcu.namespacePtr(block.namespace);
|
||||
return Air.internedToRef(namespace.owner_type);
|
||||
const new_ty = try pt.ensureTypeUpToDate(namespace.owner_type, false);
|
||||
switch (pt.zcu.intern_pool.indexToKey(new_ty)) {
|
||||
.struct_type, .union_type, .enum_type => try sema.declareDependency(.{ .interned = new_ty }),
|
||||
.opaque_type => {},
|
||||
else => unreachable,
|
||||
}
|
||||
return Air.internedToRef(new_ty);
|
||||
}
|
||||
|
||||
fn zirClosureGet(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
|
||||
@ -19005,6 +18875,7 @@ fn typeInfoNamespaceDecls(
|
||||
const ip = &zcu.intern_pool;
|
||||
|
||||
const namespace_index = opt_namespace_index.unwrap() orelse return;
|
||||
try pt.ensureNamespaceUpToDate(namespace_index);
|
||||
const namespace = zcu.namespacePtr(namespace_index);
|
||||
|
||||
const gop = try seen_namespaces.getOrPut(namespace);
|
||||
@ -21871,6 +21742,7 @@ fn zirReify(
|
||||
.parent = block.namespace.toOptional(),
|
||||
.owner_type = wip_ty.index,
|
||||
.file_scope = block.getFileScopeIndex(mod),
|
||||
.generation = mod.generation,
|
||||
});
|
||||
|
||||
try sema.addTypeReferenceEntry(src, wip_ty.index);
|
||||
@ -22080,6 +21952,7 @@ fn reifyEnum(
|
||||
.parent = block.namespace.toOptional(),
|
||||
.owner_type = wip_ty.index,
|
||||
.file_scope = block.getFileScopeIndex(mod),
|
||||
.generation = mod.generation,
|
||||
});
|
||||
|
||||
const new_cau_index = try ip.createTypeCau(gpa, pt.tid, tracked_inst, new_namespace_index, wip_ty.index);
|
||||
@ -22384,6 +22257,7 @@ fn reifyUnion(
|
||||
.parent = block.namespace.toOptional(),
|
||||
.owner_type = wip_ty.index,
|
||||
.file_scope = block.getFileScopeIndex(mod),
|
||||
.generation = mod.generation,
|
||||
});
|
||||
|
||||
const new_cau_index = try ip.createTypeCau(gpa, pt.tid, tracked_inst, new_namespace_index, wip_ty.index);
|
||||
@ -22667,6 +22541,7 @@ fn reifyStruct(
|
||||
.parent = block.namespace.toOptional(),
|
||||
.owner_type = wip_ty.index,
|
||||
.file_scope = block.getFileScopeIndex(mod),
|
||||
.generation = mod.generation,
|
||||
});
|
||||
|
||||
const new_cau_index = try ip.createTypeCau(gpa, pt.tid, tracked_inst, new_namespace_index, wip_ty.index);
|
||||
@ -35373,7 +35248,7 @@ pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void {
|
||||
if (struct_type.haveLayout(ip))
|
||||
return;
|
||||
|
||||
try ty.resolveFields(pt);
|
||||
try sema.resolveTypeFieldsStruct(ty.toIntern(), struct_type);
|
||||
|
||||
if (struct_type.layout == .@"packed") {
|
||||
semaBackingIntType(pt, struct_type) catch |err| switch (err) {
|
||||
@ -38499,6 +38374,187 @@ fn getOwnerFuncDeclInst(sema: *Sema) InternPool.TrackedInst.Index {
|
||||
return ip.getCau(cau).zir_index;
|
||||
}
|
||||
|
||||
/// Called as soon as a `declared` enum type is created.
|
||||
/// Resolves the tag type and field inits.
|
||||
/// Marks the `src_inst` dependency on the enum's declaration, so call sites need not do this.
|
||||
pub fn resolveDeclaredEnum(
|
||||
pt: Zcu.PerThread,
|
||||
wip_ty: InternPool.WipEnumType,
|
||||
inst: Zir.Inst.Index,
|
||||
tracked_inst: InternPool.TrackedInst.Index,
|
||||
namespace: InternPool.NamespaceIndex,
|
||||
type_name: InternPool.NullTerminatedString,
|
||||
enum_cau: InternPool.Cau.Index,
|
||||
small: Zir.Inst.EnumDecl.Small,
|
||||
body: []const Zir.Inst.Index,
|
||||
tag_type_ref: Zir.Inst.Ref,
|
||||
any_values: bool,
|
||||
fields_len: u32,
|
||||
zir: Zir,
|
||||
body_end: usize,
|
||||
) Zcu.CompileError!void {
|
||||
const zcu = pt.zcu;
|
||||
const gpa = zcu.gpa;
|
||||
const ip = &zcu.intern_pool;
|
||||
|
||||
const bit_bags_count = std.math.divCeil(usize, fields_len, 32) catch unreachable;
|
||||
|
||||
const src: LazySrcLoc = .{ .base_node_inst = tracked_inst, .offset = LazySrcLoc.Offset.nodeOffset(0) };
|
||||
const tag_ty_src: LazySrcLoc = .{ .base_node_inst = tracked_inst, .offset = .{ .node_offset_container_tag = 0 } };
|
||||
|
||||
const anal_unit = AnalUnit.wrap(.{ .cau = enum_cau });
|
||||
|
||||
var arena = std.heap.ArenaAllocator.init(gpa);
|
||||
defer arena.deinit();
|
||||
|
||||
var comptime_err_ret_trace = std.ArrayList(Zcu.LazySrcLoc).init(gpa);
|
||||
defer comptime_err_ret_trace.deinit();
|
||||
|
||||
var sema: Sema = .{
|
||||
.pt = pt,
|
||||
.gpa = gpa,
|
||||
.arena = arena.allocator(),
|
||||
.code = zir,
|
||||
.owner = anal_unit,
|
||||
.func_index = .none,
|
||||
.func_is_naked = false,
|
||||
.fn_ret_ty = Type.void,
|
||||
.fn_ret_ty_ies = null,
|
||||
.comptime_err_ret_trace = &comptime_err_ret_trace,
|
||||
};
|
||||
defer sema.deinit();
|
||||
|
||||
try sema.declareDependency(.{ .src_hash = tracked_inst });
|
||||
|
||||
var block: Block = .{
|
||||
.parent = null,
|
||||
.sema = &sema,
|
||||
.namespace = namespace,
|
||||
.instructions = .{},
|
||||
.inlining = null,
|
||||
.is_comptime = true,
|
||||
.src_base_inst = tracked_inst,
|
||||
.type_name_ctx = type_name,
|
||||
};
|
||||
defer block.instructions.deinit(gpa);
|
||||
|
||||
const int_tag_ty = ty: {
|
||||
if (body.len != 0) {
|
||||
_ = try sema.analyzeInlineBody(&block, body, inst);
|
||||
}
|
||||
|
||||
if (tag_type_ref != .none) {
|
||||
const ty = try sema.resolveType(&block, tag_ty_src, tag_type_ref);
|
||||
if (ty.zigTypeTag(zcu) != .Int and ty.zigTypeTag(zcu) != .ComptimeInt) {
|
||||
return sema.fail(&block, tag_ty_src, "expected integer tag type, found '{}'", .{ty.fmt(pt)});
|
||||
}
|
||||
break :ty ty;
|
||||
} else if (fields_len == 0) {
|
||||
break :ty try pt.intType(.unsigned, 0);
|
||||
} else {
|
||||
const bits = std.math.log2_int_ceil(usize, fields_len);
|
||||
break :ty try pt.intType(.unsigned, bits);
|
||||
}
|
||||
};
|
||||
|
||||
wip_ty.setTagTy(ip, int_tag_ty.toIntern());
|
||||
|
||||
if (small.nonexhaustive and int_tag_ty.toIntern() != .comptime_int_type) {
|
||||
if (fields_len > 1 and std.math.log2_int(u64, fields_len) == int_tag_ty.bitSize(pt)) {
|
||||
return sema.fail(&block, src, "non-exhaustive enum specifies every value", .{});
|
||||
}
|
||||
}
|
||||
|
||||
var extra_index = body_end + bit_bags_count;
|
||||
var bit_bag_index: usize = body_end;
|
||||
var cur_bit_bag: u32 = undefined;
|
||||
var last_tag_val: ?Value = null;
|
||||
for (0..fields_len) |field_i_usize| {
|
||||
const field_i: u32 = @intCast(field_i_usize);
|
||||
if (field_i % 32 == 0) {
|
||||
cur_bit_bag = zir.extra[bit_bag_index];
|
||||
bit_bag_index += 1;
|
||||
}
|
||||
const has_tag_value = @as(u1, @truncate(cur_bit_bag)) != 0;
|
||||
cur_bit_bag >>= 1;
|
||||
|
||||
const field_name_index: Zir.NullTerminatedString = @enumFromInt(zir.extra[extra_index]);
|
||||
const field_name_zir = zir.nullTerminatedString(field_name_index);
|
||||
extra_index += 2; // field name, doc comment
|
||||
|
||||
const field_name = try ip.getOrPutString(gpa, pt.tid, field_name_zir, .no_embedded_nulls);
|
||||
|
||||
const value_src: LazySrcLoc = .{
|
||||
.base_node_inst = tracked_inst,
|
||||
.offset = .{ .container_field_value = field_i },
|
||||
};
|
||||
|
||||
const tag_overflow = if (has_tag_value) overflow: {
|
||||
const tag_val_ref: Zir.Inst.Ref = @enumFromInt(zir.extra[extra_index]);
|
||||
extra_index += 1;
|
||||
const tag_inst = try sema.resolveInst(tag_val_ref);
|
||||
last_tag_val = try sema.resolveConstDefinedValue(&block, .{
|
||||
.base_node_inst = tracked_inst,
|
||||
.offset = .{ .container_field_name = field_i },
|
||||
}, tag_inst, .{
|
||||
.needed_comptime_reason = "enum tag value must be comptime-known",
|
||||
});
|
||||
if (!(try sema.intFitsInType(last_tag_val.?, int_tag_ty, null))) break :overflow true;
|
||||
last_tag_val = try pt.getCoerced(last_tag_val.?, int_tag_ty);
|
||||
if (wip_ty.nextField(ip, field_name, last_tag_val.?.toIntern())) |conflict| {
|
||||
assert(conflict.kind == .value); // AstGen validated names are unique
|
||||
const other_field_src: LazySrcLoc = .{
|
||||
.base_node_inst = tracked_inst,
|
||||
.offset = .{ .container_field_value = conflict.prev_field_idx },
|
||||
};
|
||||
const msg = msg: {
|
||||
const msg = try sema.errMsg(value_src, "enum tag value {} already taken", .{last_tag_val.?.fmtValueSema(pt, &sema)});
|
||||
errdefer msg.destroy(gpa);
|
||||
try sema.errNote(other_field_src, msg, "other occurrence here", .{});
|
||||
break :msg msg;
|
||||
};
|
||||
return sema.failWithOwnedErrorMsg(&block, msg);
|
||||
}
|
||||
break :overflow false;
|
||||
} else if (any_values) overflow: {
|
||||
var overflow: ?usize = null;
|
||||
last_tag_val = if (last_tag_val) |val|
|
||||
try sema.intAdd(val, try pt.intValue(int_tag_ty, 1), int_tag_ty, &overflow)
|
||||
else
|
||||
try pt.intValue(int_tag_ty, 0);
|
||||
if (overflow != null) break :overflow true;
|
||||
if (wip_ty.nextField(ip, field_name, last_tag_val.?.toIntern())) |conflict| {
|
||||
assert(conflict.kind == .value); // AstGen validated names are unique
|
||||
const other_field_src: LazySrcLoc = .{
|
||||
.base_node_inst = tracked_inst,
|
||||
.offset = .{ .container_field_value = conflict.prev_field_idx },
|
||||
};
|
||||
const msg = msg: {
|
||||
const msg = try sema.errMsg(value_src, "enum tag value {} already taken", .{last_tag_val.?.fmtValueSema(pt, &sema)});
|
||||
errdefer msg.destroy(gpa);
|
||||
try sema.errNote(other_field_src, msg, "other occurrence here", .{});
|
||||
break :msg msg;
|
||||
};
|
||||
return sema.failWithOwnedErrorMsg(&block, msg);
|
||||
}
|
||||
break :overflow false;
|
||||
} else overflow: {
|
||||
assert(wip_ty.nextField(ip, field_name, .none) == null);
|
||||
last_tag_val = try pt.intValue(Type.comptime_int, field_i);
|
||||
if (!try sema.intFitsInType(last_tag_val.?, int_tag_ty, null)) break :overflow true;
|
||||
last_tag_val = try pt.getCoerced(last_tag_val.?, int_tag_ty);
|
||||
break :overflow false;
|
||||
};
|
||||
|
||||
if (tag_overflow) {
|
||||
const msg = try sema.errMsg(value_src, "enumeration value '{}' too large for type '{}'", .{
|
||||
last_tag_val.?.fmtValueSema(pt, &sema), int_tag_ty.fmt(pt),
|
||||
});
|
||||
return sema.failWithOwnedErrorMsg(&block, msg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub const bitCastVal = @import("Sema/bitcast.zig").bitCast;
|
||||
pub const bitCastSpliceVal = @import("Sema/bitcast.zig").bitCastSplice;
|
||||
|
||||
|
61
src/Zcu.zig
61
src/Zcu.zig
@ -215,6 +215,8 @@ panic_messages: [PanicId.len]InternPool.Nav.Index.Optional = .{.none} ** PanicId
|
||||
panic_func_index: InternPool.Index = .none,
|
||||
null_stack_trace: InternPool.Index = .none,
|
||||
|
||||
generation: u32 = 0,
|
||||
|
||||
pub const PerThread = @import("Zcu/PerThread.zig");
|
||||
|
||||
pub const PanicId = enum {
|
||||
@ -332,6 +334,7 @@ pub const TypeReference = struct {
|
||||
pub const Namespace = struct {
|
||||
parent: OptionalIndex,
|
||||
file_scope: File.Index,
|
||||
generation: u32,
|
||||
/// Will be a struct, enum, union, or opaque.
|
||||
owner_type: InternPool.Index,
|
||||
/// Members of the namespace which are marked `pub`.
|
||||
@ -2295,7 +2298,7 @@ pub fn markDependeeOutdated(
|
||||
marked_po: enum { not_marked_po, marked_po },
|
||||
dependee: InternPool.Dependee,
|
||||
) !void {
|
||||
log.debug("outdated dependee: {}", .{fmtDependee(dependee, zcu)});
|
||||
log.debug("outdated dependee: {}", .{zcu.fmtDependee(dependee)});
|
||||
var it = zcu.intern_pool.dependencyIterator(dependee);
|
||||
while (it.next()) |depender| {
|
||||
if (zcu.outdated.getPtr(depender)) |po_dep_count| {
|
||||
@ -2303,9 +2306,9 @@ pub fn markDependeeOutdated(
|
||||
.not_marked_po => {},
|
||||
.marked_po => {
|
||||
po_dep_count.* -= 1;
|
||||
log.debug("po dep count: {} = {}", .{ fmtAnalUnit(depender, zcu), po_dep_count.* });
|
||||
log.debug("outdated {} => already outdated {} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), po_dep_count.* });
|
||||
if (po_dep_count.* == 0) {
|
||||
log.debug("outdated ready: {}", .{fmtAnalUnit(depender, zcu)});
|
||||
log.debug("outdated ready: {}", .{zcu.fmtAnalUnit(depender)});
|
||||
try zcu.outdated_ready.put(zcu.gpa, depender, {});
|
||||
}
|
||||
},
|
||||
@ -2316,20 +2319,19 @@ pub fn markDependeeOutdated(
|
||||
const new_po_dep_count = switch (marked_po) {
|
||||
.not_marked_po => if (opt_po_entry) |e| e.value else 0,
|
||||
.marked_po => if (opt_po_entry) |e| e.value - 1 else {
|
||||
// This dependency has been registered during in-progress analysis, but the unit is
|
||||
// not in `potentially_outdated` because analysis is in-progress. Nothing to do.
|
||||
// This `AnalUnit` has already been re-analyzed this update, and registered a dependency
|
||||
// on this thing, but already has sufficiently up-to-date information. Nothing to do.
|
||||
continue;
|
||||
},
|
||||
};
|
||||
log.debug("po dep count: {} = {}", .{ fmtAnalUnit(depender, zcu), new_po_dep_count });
|
||||
try zcu.outdated.putNoClobber(
|
||||
zcu.gpa,
|
||||
depender,
|
||||
new_po_dep_count,
|
||||
);
|
||||
log.debug("outdated: {}", .{fmtAnalUnit(depender, zcu)});
|
||||
log.debug("outdated {} => new outdated {} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), new_po_dep_count });
|
||||
if (new_po_dep_count == 0) {
|
||||
log.debug("outdated ready: {}", .{fmtAnalUnit(depender, zcu)});
|
||||
log.debug("outdated ready: {}", .{zcu.fmtAnalUnit(depender)});
|
||||
try zcu.outdated_ready.put(zcu.gpa, depender, {});
|
||||
}
|
||||
// If this is a Decl and was not previously PO, we must recursively
|
||||
@ -2342,16 +2344,16 @@ pub fn markDependeeOutdated(
|
||||
}
|
||||
|
||||
pub fn markPoDependeeUpToDate(zcu: *Zcu, dependee: InternPool.Dependee) !void {
|
||||
log.debug("up-to-date dependee: {}", .{fmtDependee(dependee, zcu)});
|
||||
log.debug("up-to-date dependee: {}", .{zcu.fmtDependee(dependee)});
|
||||
var it = zcu.intern_pool.dependencyIterator(dependee);
|
||||
while (it.next()) |depender| {
|
||||
if (zcu.outdated.getPtr(depender)) |po_dep_count| {
|
||||
// This depender is already outdated, but it now has one
|
||||
// less PO dependency!
|
||||
po_dep_count.* -= 1;
|
||||
log.debug("po dep count: {} = {}", .{ fmtAnalUnit(depender, zcu), po_dep_count.* });
|
||||
log.debug("up-to-date {} => {} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), po_dep_count.* });
|
||||
if (po_dep_count.* == 0) {
|
||||
log.debug("outdated ready: {}", .{fmtAnalUnit(depender, zcu)});
|
||||
log.debug("outdated ready: {}", .{zcu.fmtAnalUnit(depender)});
|
||||
try zcu.outdated_ready.put(zcu.gpa, depender, {});
|
||||
}
|
||||
continue;
|
||||
@ -2365,11 +2367,11 @@ pub fn markPoDependeeUpToDate(zcu: *Zcu, dependee: InternPool.Dependee) !void {
|
||||
};
|
||||
if (ptr.* > 1) {
|
||||
ptr.* -= 1;
|
||||
log.debug("po dep count: {} = {}", .{ fmtAnalUnit(depender, zcu), ptr.* });
|
||||
log.debug("up-to-date {} => {} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender), ptr.* });
|
||||
continue;
|
||||
}
|
||||
|
||||
log.debug("up-to-date (po deps = 0): {}", .{fmtAnalUnit(depender, zcu)});
|
||||
log.debug("up-to-date {} => {} po_deps=0 (up-to-date)", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(depender) });
|
||||
|
||||
// This dependency is no longer PO, i.e. is known to be up-to-date.
|
||||
assert(zcu.potentially_outdated.swapRemove(depender));
|
||||
@ -2398,7 +2400,7 @@ fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: AnalUni
|
||||
},
|
||||
.func => |func_index| .{ .interned = func_index }, // IES
|
||||
};
|
||||
log.debug("marking dependee po: {}", .{fmtDependee(dependee, zcu)});
|
||||
log.debug("potentially outdated dependee: {}", .{zcu.fmtDependee(dependee)});
|
||||
var it = ip.dependencyIterator(dependee);
|
||||
while (it.next()) |po| {
|
||||
if (zcu.outdated.getPtr(po)) |po_dep_count| {
|
||||
@ -2408,17 +2410,17 @@ fn markTransitiveDependersPotentiallyOutdated(zcu: *Zcu, maybe_outdated: AnalUni
|
||||
_ = zcu.outdated_ready.swapRemove(po);
|
||||
}
|
||||
po_dep_count.* += 1;
|
||||
log.debug("po dep count: {} = {}", .{ fmtAnalUnit(po, zcu), po_dep_count.* });
|
||||
log.debug("po {} => {} [outdated] po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(po), po_dep_count.* });
|
||||
continue;
|
||||
}
|
||||
if (zcu.potentially_outdated.getPtr(po)) |n| {
|
||||
// There is now one more PO dependency.
|
||||
n.* += 1;
|
||||
log.debug("po dep count: {} = {}", .{ fmtAnalUnit(po, zcu), n.* });
|
||||
log.debug("po {} => {} po_deps={}", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(po), n.* });
|
||||
continue;
|
||||
}
|
||||
try zcu.potentially_outdated.putNoClobber(zcu.gpa, po, 1);
|
||||
log.debug("po dep count: {} = {}", .{ fmtAnalUnit(po, zcu), 1 });
|
||||
log.debug("po {} => {} po_deps=1", .{ zcu.fmtDependee(dependee), zcu.fmtAnalUnit(po) });
|
||||
// This AnalUnit was not already PO, so we must recursively mark its dependers as also PO.
|
||||
try zcu.markTransitiveDependersPotentiallyOutdated(po);
|
||||
}
|
||||
@ -2443,7 +2445,7 @@ pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?AnalUnit {
|
||||
|
||||
if (zcu.outdated_ready.count() > 0) {
|
||||
const unit = zcu.outdated_ready.keys()[0];
|
||||
log.debug("findOutdatedToAnalyze: trivial {}", .{fmtAnalUnit(unit, zcu)});
|
||||
log.debug("findOutdatedToAnalyze: trivial {}", .{zcu.fmtAnalUnit(unit)});
|
||||
return unit;
|
||||
}
|
||||
|
||||
@ -2498,10 +2500,15 @@ pub fn findOutdatedToAnalyze(zcu: *Zcu) Allocator.Error!?AnalUnit {
|
||||
const nav = zcu.funcInfo(func).owner_nav;
|
||||
std.io.getStdErr().writer().print("outdated: func {}, nav {}, name '{}', [p]o deps {}\n", .{ func, nav, ip.getNav(nav).fqn.fmt(ip), opod }) catch {};
|
||||
}
|
||||
for (zcu.potentially_outdated.keys(), zcu.potentially_outdated.values()) |o, opod| {
|
||||
const func = o.unwrap().func;
|
||||
const nav = zcu.funcInfo(func).owner_nav;
|
||||
std.io.getStdErr().writer().print("po: func {}, nav {}, name '{}', [p]o deps {}\n", .{ func, nav, ip.getNav(nav).fqn.fmt(ip), opod }) catch {};
|
||||
}
|
||||
}
|
||||
|
||||
log.debug("findOutdatedToAnalyze: heuristic returned '{}' ({d} dependers)", .{
|
||||
fmtAnalUnit(AnalUnit.wrap(.{ .cau = chosen_cau.? }), zcu),
|
||||
zcu.fmtAnalUnit(AnalUnit.wrap(.{ .cau = chosen_cau.? })),
|
||||
chosen_cau_dependers,
|
||||
});
|
||||
|
||||
@ -2744,7 +2751,7 @@ pub fn deleteUnitReferences(zcu: *Zcu, anal_unit: AnalUnit) void {
|
||||
const gpa = zcu.gpa;
|
||||
|
||||
unit_refs: {
|
||||
const kv = zcu.reference_table.fetchSwapRemove(anal_unit) orelse return;
|
||||
const kv = zcu.reference_table.fetchSwapRemove(anal_unit) orelse break :unit_refs;
|
||||
var idx = kv.value;
|
||||
|
||||
while (idx != std.math.maxInt(u32)) {
|
||||
@ -2758,7 +2765,7 @@ pub fn deleteUnitReferences(zcu: *Zcu, anal_unit: AnalUnit) void {
|
||||
}
|
||||
|
||||
type_refs: {
|
||||
const kv = zcu.type_reference_table.fetchSwapRemove(anal_unit) orelse return;
|
||||
const kv = zcu.type_reference_table.fetchSwapRemove(anal_unit) orelse break :type_refs;
|
||||
var idx = kv.value;
|
||||
|
||||
while (idx != std.math.maxInt(u32)) {
|
||||
@ -3280,7 +3287,7 @@ pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolve
|
||||
const unit = kv.key;
|
||||
try result.putNoClobber(gpa, unit, kv.value);
|
||||
|
||||
log.debug("handle unit '{}'", .{fmtAnalUnit(unit, zcu)});
|
||||
log.debug("handle unit '{}'", .{zcu.fmtAnalUnit(unit)});
|
||||
|
||||
if (zcu.reference_table.get(unit)) |first_ref_idx| {
|
||||
assert(first_ref_idx != std.math.maxInt(u32));
|
||||
@ -3289,8 +3296,8 @@ pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolve
|
||||
const ref = zcu.all_references.items[ref_idx];
|
||||
if (!result.contains(ref.referenced)) {
|
||||
log.debug("unit '{}': ref unit '{}'", .{
|
||||
fmtAnalUnit(unit, zcu),
|
||||
fmtAnalUnit(ref.referenced, zcu),
|
||||
zcu.fmtAnalUnit(unit),
|
||||
zcu.fmtAnalUnit(ref.referenced),
|
||||
});
|
||||
try unit_queue.put(gpa, ref.referenced, .{
|
||||
.referencer = unit,
|
||||
@ -3307,7 +3314,7 @@ pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolve
|
||||
const ref = zcu.all_type_references.items[ref_idx];
|
||||
if (!checked_types.contains(ref.referenced)) {
|
||||
log.debug("unit '{}': ref type '{}'", .{
|
||||
fmtAnalUnit(unit, zcu),
|
||||
zcu.fmtAnalUnit(unit),
|
||||
Type.fromInterned(ref.referenced).containerTypeName(ip).fmt(ip),
|
||||
});
|
||||
try type_queue.put(gpa, ref.referenced, .{
|
||||
@ -3389,10 +3396,10 @@ pub fn cauFileScope(zcu: *Zcu, cau: InternPool.Cau.Index) *File {
|
||||
return zcu.fileByIndex(file_index);
|
||||
}
|
||||
|
||||
fn fmtAnalUnit(unit: AnalUnit, zcu: *Zcu) std.fmt.Formatter(formatAnalUnit) {
|
||||
pub fn fmtAnalUnit(zcu: *Zcu, unit: AnalUnit) std.fmt.Formatter(formatAnalUnit) {
|
||||
return .{ .data = .{ .unit = unit, .zcu = zcu } };
|
||||
}
|
||||
fn fmtDependee(d: InternPool.Dependee, zcu: *Zcu) std.fmt.Formatter(formatDependee) {
|
||||
pub fn fmtDependee(zcu: *Zcu, d: InternPool.Dependee) std.fmt.Formatter(formatDependee) {
|
||||
return .{ .data = .{ .dependee = d, .zcu = zcu } };
|
||||
}
|
||||
|
||||
|
@ -485,10 +485,7 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void {
|
||||
pub fn ensureFileAnalyzed(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void {
|
||||
const file_root_type = pt.zcu.fileRootType(file_index);
|
||||
if (file_root_type != .none) {
|
||||
// The namespace is already up-to-date thanks to the `updateFileNamespace` calls at the
|
||||
// start of this update. We just have to check whether the type itself is okay!
|
||||
const file_root_type_cau = pt.zcu.intern_pool.loadStructType(file_root_type).cau.unwrap().?;
|
||||
return pt.ensureCauAnalyzed(file_root_type_cau);
|
||||
_ = try pt.ensureTypeUpToDate(file_root_type, false);
|
||||
} else {
|
||||
return pt.semaFile(file_index);
|
||||
}
|
||||
@ -505,10 +502,10 @@ pub fn ensureCauAnalyzed(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) Zcu
|
||||
const gpa = zcu.gpa;
|
||||
const ip = &zcu.intern_pool;
|
||||
|
||||
const anal_unit = InternPool.AnalUnit.wrap(.{ .cau = cau_index });
|
||||
const anal_unit = AnalUnit.wrap(.{ .cau = cau_index });
|
||||
const cau = ip.getCau(cau_index);
|
||||
|
||||
//log.debug("ensureCauAnalyzed {d}", .{@intFromEnum(cau_index)});
|
||||
log.debug("ensureCauAnalyzed {d}", .{@intFromEnum(cau_index)});
|
||||
|
||||
assert(!zcu.analysis_in_progress.contains(anal_unit));
|
||||
|
||||
@ -552,10 +549,12 @@ pub fn ensureCauAnalyzed(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) Zcu
|
||||
// Since it does not, this must be a transitive failure.
|
||||
try zcu.transitive_failed_analysis.put(gpa, anal_unit, {});
|
||||
}
|
||||
// We treat errors as up-to-date, since those uses would just trigger a transitive error
|
||||
// We treat errors as up-to-date, since those uses would just trigger a transitive error.
|
||||
// The exception is types, since type declarations may require re-analysis if the type, e.g. its captures, changed.
|
||||
const outdated = cau.owner.unwrap() == .type;
|
||||
break :res .{ .{
|
||||
.invalidate_decl_val = false,
|
||||
.invalidate_decl_ref = false,
|
||||
.invalidate_decl_val = outdated,
|
||||
.invalidate_decl_ref = outdated,
|
||||
}, true };
|
||||
},
|
||||
error.OutOfMemory => res: {
|
||||
@ -610,7 +609,7 @@ fn ensureCauAnalyzedInner(
|
||||
const ip = &zcu.intern_pool;
|
||||
|
||||
const cau = ip.getCau(cau_index);
|
||||
const anal_unit = InternPool.AnalUnit.wrap(.{ .cau = cau_index });
|
||||
const anal_unit = AnalUnit.wrap(.{ .cau = cau_index });
|
||||
|
||||
const inst_info = cau.zir_index.resolveFull(ip) orelse return error.AnalysisFail;
|
||||
|
||||
@ -626,7 +625,6 @@ fn ensureCauAnalyzedInner(
|
||||
// * so, it uses the same `struct`
|
||||
// * but this doesn't stop it from updating the namespace!
|
||||
// * we basically do `scanDecls`, updating the namespace as needed
|
||||
// * TODO: optimize this to make sure we only do it once a generation i guess?
|
||||
// * so everyone lived happily ever after
|
||||
|
||||
if (zcu.fileByIndex(inst_info.file).status != .success_zir) {
|
||||
@ -646,17 +644,6 @@ fn ensureCauAnalyzedInner(
|
||||
_ = zcu.transitive_failed_analysis.swapRemove(anal_unit);
|
||||
}
|
||||
|
||||
if (inst_info.inst == .main_struct_inst) {
|
||||
// Note that this is definitely a *recreation* due to outdated, because
|
||||
// this instruction indicates that `cau.owner` is a `type`, which only
|
||||
// reaches here if `cau_outdated`.
|
||||
try pt.recreateFileRoot(inst_info.file);
|
||||
return .{
|
||||
.invalidate_decl_val = true,
|
||||
.invalidate_decl_ref = true,
|
||||
};
|
||||
}
|
||||
|
||||
const decl_prog_node = zcu.sema_prog_node.start(switch (cau.owner.unwrap()) {
|
||||
.nav => |nav| ip.getNav(nav).fqn.toSlice(ip),
|
||||
.type => |ty| Type.fromInterned(ty).containerTypeName(ip).toSlice(ip),
|
||||
@ -685,9 +672,9 @@ pub fn ensureFuncBodyAnalyzed(pt: Zcu.PerThread, maybe_coerced_func_index: Inter
|
||||
|
||||
const func = zcu.funcInfo(maybe_coerced_func_index);
|
||||
|
||||
//log.debug("ensureFuncBodyAnalyzed {d}", .{@intFromEnum(func_index)});
|
||||
log.debug("ensureFuncBodyAnalyzed {d}", .{@intFromEnum(func_index)});
|
||||
|
||||
const anal_unit = InternPool.AnalUnit.wrap(.{ .func = func_index });
|
||||
const anal_unit = AnalUnit.wrap(.{ .func = func_index });
|
||||
const func_outdated = zcu.outdated.swapRemove(anal_unit) or
|
||||
zcu.potentially_outdated.swapRemove(anal_unit);
|
||||
|
||||
@ -742,7 +729,7 @@ fn ensureFuncBodyAnalyzedInner(
|
||||
const ip = &zcu.intern_pool;
|
||||
|
||||
const func = zcu.funcInfo(func_index);
|
||||
const anal_unit = InternPool.AnalUnit.wrap(.{ .func = func_index });
|
||||
const anal_unit = AnalUnit.wrap(.{ .func = func_index });
|
||||
|
||||
// Here's an interesting question: is this function actually valid?
|
||||
// Maybe the signature changed, so we'll end up creating a whole different `func`
|
||||
@ -766,7 +753,7 @@ fn ensureFuncBodyAnalyzedInner(
|
||||
if (func_outdated) {
|
||||
try zcu.markDependeeOutdated(.marked_po, .{ .interned = func_index }); // IES
|
||||
}
|
||||
ip.removeDependenciesForDepender(gpa, InternPool.AnalUnit.wrap(.{ .func = func_index }));
|
||||
ip.removeDependenciesForDepender(gpa, AnalUnit.wrap(.{ .func = func_index }));
|
||||
ip.remove(pt.tid, func_index);
|
||||
@panic("TODO: remove orphaned function from binary");
|
||||
}
|
||||
@ -901,7 +888,7 @@ pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: Ai
|
||||
"unable to codegen: {s}",
|
||||
.{@errorName(err)},
|
||||
));
|
||||
try zcu.retryable_failures.append(zcu.gpa, InternPool.AnalUnit.wrap(.{ .func = func_index }));
|
||||
try zcu.retryable_failures.append(zcu.gpa, AnalUnit.wrap(.{ .func = func_index }));
|
||||
},
|
||||
};
|
||||
} else if (zcu.llvm_object) |llvm_object| {
|
||||
@ -982,7 +969,7 @@ fn createFileRootStruct(
|
||||
if (zcu.comp.incremental) {
|
||||
try ip.addDependency(
|
||||
gpa,
|
||||
InternPool.AnalUnit.wrap(.{ .cau = new_cau_index }),
|
||||
AnalUnit.wrap(.{ .cau = new_cau_index }),
|
||||
.{ .src_hash = tracked_inst },
|
||||
);
|
||||
}
|
||||
@ -998,35 +985,6 @@ fn createFileRootStruct(
|
||||
return wip_ty.finish(ip, new_cau_index.toOptional(), namespace_index);
|
||||
}
|
||||
|
||||
/// Recreate the root type of a file after it becomes outdated. A new struct type
|
||||
/// is constructed at a new InternPool index, reusing the namespace for efficiency.
|
||||
fn recreateFileRoot(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void {
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
const file = zcu.fileByIndex(file_index);
|
||||
const file_root_type = zcu.fileRootType(file_index);
|
||||
const namespace_index = Type.fromInterned(file_root_type).getNamespaceIndex(zcu);
|
||||
|
||||
assert(file_root_type != .none);
|
||||
|
||||
log.debug("recreateFileRoot mod={s} sub_file_path={s}", .{
|
||||
file.mod.fully_qualified_name,
|
||||
file.sub_file_path,
|
||||
});
|
||||
|
||||
if (file.status != .success_zir) {
|
||||
return error.AnalysisFail;
|
||||
}
|
||||
|
||||
// Invalidate the existing type, reusing its namespace.
|
||||
const file_root_type_cau = ip.loadStructType(file_root_type).cau.unwrap().?;
|
||||
ip.removeDependenciesForDepender(
|
||||
zcu.gpa,
|
||||
InternPool.AnalUnit.wrap(.{ .cau = file_root_type_cau }),
|
||||
);
|
||||
_ = try pt.createFileRootStruct(file_index, namespace_index, true);
|
||||
}
|
||||
|
||||
/// Re-scan the namespace of a file's root struct type on an incremental update.
|
||||
/// The file must have successfully populated ZIR.
|
||||
/// If the file's root struct type is not populated (the file is unreferenced), nothing is done.
|
||||
@ -1060,6 +1018,7 @@ fn updateFileNamespace(pt: Zcu.PerThread, file_index: Zcu.File.Index) Allocator.
|
||||
break :decls file.zir.bodySlice(extra_index, decls_len);
|
||||
};
|
||||
try pt.scanNamespace(namespace_index, decls);
|
||||
zcu.namespacePtr(namespace_index).generation = zcu.generation;
|
||||
}
|
||||
|
||||
fn semaFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void {
|
||||
@ -1080,6 +1039,7 @@ fn semaFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void {
|
||||
.parent = .none,
|
||||
.owner_type = undefined, // set in `createFileRootStruct`
|
||||
.file_scope = file_index,
|
||||
.generation = zcu.generation,
|
||||
});
|
||||
const struct_ty = try pt.createFileRootStruct(file_index, new_namespace_index, false);
|
||||
errdefer zcu.intern_pool.remove(pt.tid, struct_ty);
|
||||
@ -1131,7 +1091,7 @@ fn semaCau(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) !SemaCauResult {
|
||||
const gpa = zcu.gpa;
|
||||
const ip = &zcu.intern_pool;
|
||||
|
||||
const anal_unit = InternPool.AnalUnit.wrap(.{ .cau = cau_index });
|
||||
const anal_unit = AnalUnit.wrap(.{ .cau = cau_index });
|
||||
|
||||
const cau = ip.getCau(cau_index);
|
||||
const inst_info = cau.zir_index.resolveFull(ip) orelse return error.AnalysisFail;
|
||||
@ -1151,10 +1111,12 @@ fn semaCau(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) !SemaCauResult {
|
||||
// This declaration has no value so is definitely not a std.builtin type.
|
||||
break :ip_index .none;
|
||||
},
|
||||
.type => {
|
||||
.type => |ty| {
|
||||
// This is an incremental update, and this type is being re-analyzed because it is outdated.
|
||||
// The type must be recreated at a new `InternPool.Index`.
|
||||
// Mark it outdated so that creation sites are re-analyzed.
|
||||
// Create a new type in its place, and mark the old one as outdated so that use sites will
|
||||
// be re-analyzed and discover an up-to-date type.
|
||||
const new_ty = try pt.ensureTypeUpToDate(ty, true);
|
||||
assert(new_ty != ty);
|
||||
return .{
|
||||
.invalidate_decl_val = true,
|
||||
.invalidate_decl_ref = true,
|
||||
@ -2002,21 +1964,23 @@ const ScanDeclIter = struct {
|
||||
|
||||
try namespace.other_decls.append(gpa, cau);
|
||||
|
||||
// For a `comptime` declaration, whether to re-analyze is based solely on whether the
|
||||
// `Cau` is outdated. So, add this one to `outdated` and `outdated_ready` if not already.
|
||||
const unit = InternPool.AnalUnit.wrap(.{ .cau = cau });
|
||||
if (zcu.potentially_outdated.fetchSwapRemove(unit)) |kv| {
|
||||
try zcu.outdated.ensureUnusedCapacity(gpa, 1);
|
||||
try zcu.outdated_ready.ensureUnusedCapacity(gpa, 1);
|
||||
zcu.outdated.putAssumeCapacityNoClobber(unit, kv.value);
|
||||
if (kv.value == 0) { // no PO deps
|
||||
if (existing_cau == null) {
|
||||
// For a `comptime` declaration, whether to analyze is based solely on whether the
|
||||
// `Cau` is outdated. So, add this one to `outdated` and `outdated_ready` if not already.
|
||||
const unit = AnalUnit.wrap(.{ .cau = cau });
|
||||
if (zcu.potentially_outdated.fetchSwapRemove(unit)) |kv| {
|
||||
try zcu.outdated.ensureUnusedCapacity(gpa, 1);
|
||||
try zcu.outdated_ready.ensureUnusedCapacity(gpa, 1);
|
||||
zcu.outdated.putAssumeCapacityNoClobber(unit, kv.value);
|
||||
if (kv.value == 0) { // no PO deps
|
||||
zcu.outdated_ready.putAssumeCapacityNoClobber(unit, {});
|
||||
}
|
||||
} else if (!zcu.outdated.contains(unit)) {
|
||||
try zcu.outdated.ensureUnusedCapacity(gpa, 1);
|
||||
try zcu.outdated_ready.ensureUnusedCapacity(gpa, 1);
|
||||
zcu.outdated.putAssumeCapacityNoClobber(unit, 0);
|
||||
zcu.outdated_ready.putAssumeCapacityNoClobber(unit, {});
|
||||
}
|
||||
} else if (!zcu.outdated.contains(unit)) {
|
||||
try zcu.outdated.ensureUnusedCapacity(gpa, 1);
|
||||
try zcu.outdated_ready.ensureUnusedCapacity(gpa, 1);
|
||||
zcu.outdated.putAssumeCapacityNoClobber(unit, 0);
|
||||
zcu.outdated_ready.putAssumeCapacityNoClobber(unit, {});
|
||||
}
|
||||
|
||||
break :cau .{ cau, true };
|
||||
@ -2027,9 +1991,6 @@ const ScanDeclIter = struct {
|
||||
const cau, const nav = if (existing_cau) |cau_index| cau_nav: {
|
||||
const nav_index = ip.getCau(cau_index).owner.unwrap().nav;
|
||||
const nav = ip.getNav(nav_index);
|
||||
if (nav.name != name) {
|
||||
std.debug.panic("'{}' vs '{}'", .{ nav.name.fmt(ip), name.fmt(ip) });
|
||||
}
|
||||
assert(nav.name == name);
|
||||
assert(nav.fqn == fqn);
|
||||
break :cau_nav .{ cau_index, nav_index };
|
||||
@ -2078,7 +2039,7 @@ const ScanDeclIter = struct {
|
||||
},
|
||||
};
|
||||
|
||||
if (want_analysis or declaration.flags.is_export) {
|
||||
if (existing_cau == null and (want_analysis or declaration.flags.is_export)) {
|
||||
log.debug(
|
||||
"scanDecl queue analyze_cau file='{s}' cau_index={d}",
|
||||
.{ namespace.fileScope(zcu).sub_file_path, cau },
|
||||
@ -2098,7 +2059,7 @@ fn analyzeFnBody(pt: Zcu.PerThread, func_index: InternPool.Index) Zcu.SemaError!
|
||||
const gpa = zcu.gpa;
|
||||
const ip = &zcu.intern_pool;
|
||||
|
||||
const anal_unit = InternPool.AnalUnit.wrap(.{ .func = func_index });
|
||||
const anal_unit = AnalUnit.wrap(.{ .func = func_index });
|
||||
const func = zcu.funcInfo(func_index);
|
||||
const inst_info = func.zir_body_inst.resolveFull(ip) orelse return error.AnalysisFail;
|
||||
const file = zcu.fileByIndex(inst_info.file);
|
||||
@ -2484,7 +2445,7 @@ fn processExportsInner(
|
||||
const nav = ip.getNav(nav_index);
|
||||
if (zcu.failed_codegen.contains(nav_index)) break :failed true;
|
||||
if (nav.analysis_owner.unwrap()) |cau| {
|
||||
const cau_unit = InternPool.AnalUnit.wrap(.{ .cau = cau });
|
||||
const cau_unit = AnalUnit.wrap(.{ .cau = cau });
|
||||
if (zcu.failed_analysis.contains(cau_unit)) break :failed true;
|
||||
if (zcu.transitive_failed_analysis.contains(cau_unit)) break :failed true;
|
||||
}
|
||||
@ -2494,7 +2455,7 @@ fn processExportsInner(
|
||||
};
|
||||
// If the value is a function, we also need to check if that function succeeded analysis.
|
||||
if (val.typeOf(zcu).zigTypeTag(zcu) == .Fn) {
|
||||
const func_unit = InternPool.AnalUnit.wrap(.{ .func = val.toIntern() });
|
||||
const func_unit = AnalUnit.wrap(.{ .func = val.toIntern() });
|
||||
if (zcu.failed_analysis.contains(func_unit)) break :failed true;
|
||||
if (zcu.transitive_failed_analysis.contains(func_unit)) break :failed true;
|
||||
}
|
||||
@ -2669,7 +2630,7 @@ pub fn linkerUpdateNav(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) !void
|
||||
.{@errorName(err)},
|
||||
));
|
||||
if (nav.analysis_owner.unwrap()) |cau| {
|
||||
try zcu.retryable_failures.append(zcu.gpa, InternPool.AnalUnit.wrap(.{ .cau = cau }));
|
||||
try zcu.retryable_failures.append(zcu.gpa, AnalUnit.wrap(.{ .cau = cau }));
|
||||
} else {
|
||||
// TODO: we don't have a way to indicate that this failure is retryable!
|
||||
// Since these are really rare, we could as a cop-out retry the whole build next update.
|
||||
@ -2782,7 +2743,7 @@ pub fn reportRetryableFileError(
|
||||
gop.value_ptr.* = err_msg;
|
||||
}
|
||||
|
||||
/// Shortcut for calling `intern_pool.get`.
|
||||
///Shortcut for calling `intern_pool.get`.
|
||||
pub fn intern(pt: Zcu.PerThread, key: InternPool.Key) Allocator.Error!InternPool.Index {
|
||||
return pt.zcu.intern_pool.get(pt.zcu.gpa, pt.tid, key);
|
||||
}
|
||||
@ -3367,6 +3328,532 @@ pub fn navAlignment(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) InternPo
|
||||
return Value.fromInterned(r.val).typeOf(zcu).abiAlignment(pt);
|
||||
}
|
||||
|
||||
/// Given a container type requiring resolution, ensures that it is up-to-date.
|
||||
/// If not, the type is recreated at a new `InternPool.Index`.
|
||||
/// The new index is returned. This is the same as the old index if the fields were up-to-date.
|
||||
/// If `already_updating` is set, assumes the type is already outdated and undergoing re-analysis rather than checking `zcu.outdated`.
|
||||
pub fn ensureTypeUpToDate(pt: Zcu.PerThread, ty: InternPool.Index, already_updating: bool) Zcu.SemaError!InternPool.Index {
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
switch (ip.indexToKey(ty)) {
|
||||
.struct_type => |key| {
|
||||
const struct_obj = ip.loadStructType(ty);
|
||||
const outdated = already_updating or o: {
|
||||
const anal_unit = AnalUnit.wrap(.{ .cau = struct_obj.cau.unwrap().? });
|
||||
const o = zcu.outdated.swapRemove(anal_unit) or
|
||||
zcu.potentially_outdated.swapRemove(anal_unit);
|
||||
if (o) {
|
||||
_ = zcu.outdated_ready.swapRemove(anal_unit);
|
||||
try zcu.markDependeeOutdated(.marked_po, .{ .interned = ty });
|
||||
}
|
||||
break :o o;
|
||||
};
|
||||
if (!outdated) return ty;
|
||||
return pt.recreateStructType(ty, key, struct_obj);
|
||||
},
|
||||
.union_type => |key| {
|
||||
const union_obj = ip.loadUnionType(ty);
|
||||
const outdated = already_updating or o: {
|
||||
const anal_unit = AnalUnit.wrap(.{ .cau = union_obj.cau });
|
||||
const o = zcu.outdated.swapRemove(anal_unit) or
|
||||
zcu.potentially_outdated.swapRemove(anal_unit);
|
||||
if (o) {
|
||||
_ = zcu.outdated_ready.swapRemove(anal_unit);
|
||||
try zcu.markDependeeOutdated(.marked_po, .{ .interned = ty });
|
||||
}
|
||||
break :o o;
|
||||
};
|
||||
if (!outdated) return ty;
|
||||
return pt.recreateUnionType(ty, key, union_obj);
|
||||
},
|
||||
.enum_type => |key| {
|
||||
const enum_obj = ip.loadEnumType(ty);
|
||||
const outdated = already_updating or o: {
|
||||
const anal_unit = AnalUnit.wrap(.{ .cau = enum_obj.cau.unwrap().? });
|
||||
const o = zcu.outdated.swapRemove(anal_unit) or
|
||||
zcu.potentially_outdated.swapRemove(anal_unit);
|
||||
if (o) {
|
||||
_ = zcu.outdated_ready.swapRemove(anal_unit);
|
||||
try zcu.markDependeeOutdated(.marked_po, .{ .interned = ty });
|
||||
}
|
||||
break :o o;
|
||||
};
|
||||
if (!outdated) return ty;
|
||||
return pt.recreateEnumType(ty, key, enum_obj);
|
||||
},
|
||||
.opaque_type => {
|
||||
assert(!already_updating);
|
||||
return ty;
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
fn recreateStructType(
|
||||
pt: Zcu.PerThread,
|
||||
ty: InternPool.Index,
|
||||
full_key: InternPool.Key.NamespaceType,
|
||||
struct_obj: InternPool.LoadedStructType,
|
||||
) Zcu.SemaError!InternPool.Index {
|
||||
const zcu = pt.zcu;
|
||||
const gpa = zcu.gpa;
|
||||
const ip = &zcu.intern_pool;
|
||||
|
||||
const key = switch (full_key) {
|
||||
.reified => unreachable, // never outdated
|
||||
.empty_struct => unreachable, // never outdated
|
||||
.generated_tag => unreachable, // not a struct
|
||||
.declared => |d| d,
|
||||
};
|
||||
|
||||
if (@intFromEnum(ty) <= InternPool.static_len) {
|
||||
@panic("TODO: recreate resolved builtin type");
|
||||
}
|
||||
|
||||
const inst_info = key.zir_index.resolveFull(ip) orelse return error.AnalysisFail;
|
||||
const file = zcu.fileByIndex(inst_info.file);
|
||||
if (file.status != .success_zir) return error.AnalysisFail;
|
||||
const zir = file.zir;
|
||||
|
||||
assert(zir.instructions.items(.tag)[@intFromEnum(inst_info.inst)] == .extended);
|
||||
const extended = zir.instructions.items(.data)[@intFromEnum(inst_info.inst)].extended;
|
||||
assert(extended.opcode == .struct_decl);
|
||||
const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
|
||||
const extra = zir.extraData(Zir.Inst.StructDecl, extended.operand);
|
||||
var extra_index = extra.end;
|
||||
|
||||
const captures_len = if (small.has_captures_len) blk: {
|
||||
const captures_len = zir.extra[extra_index];
|
||||
extra_index += 1;
|
||||
break :blk captures_len;
|
||||
} else 0;
|
||||
const fields_len = if (small.has_fields_len) blk: {
|
||||
const fields_len = zir.extra[extra_index];
|
||||
extra_index += 1;
|
||||
break :blk fields_len;
|
||||
} else 0;
|
||||
|
||||
if (captures_len != key.captures.owned.len) return error.AnalysisFail;
|
||||
if (fields_len != struct_obj.field_types.len) return error.AnalysisFail;
|
||||
|
||||
// The old type will be unused, so drop its dependency information.
|
||||
ip.removeDependenciesForDepender(gpa, AnalUnit.wrap(.{ .cau = struct_obj.cau.unwrap().? }));
|
||||
|
||||
const namespace_index = struct_obj.namespace.unwrap().?;
|
||||
|
||||
const wip_ty = switch (try ip.getStructType(gpa, pt.tid, .{
|
||||
.layout = small.layout,
|
||||
.fields_len = fields_len,
|
||||
.known_non_opv = small.known_non_opv,
|
||||
.requires_comptime = if (small.known_comptime_only) .yes else .unknown,
|
||||
.is_tuple = small.is_tuple,
|
||||
.any_comptime_fields = small.any_comptime_fields,
|
||||
.any_default_inits = small.any_default_inits,
|
||||
.inits_resolved = false,
|
||||
.any_aligned_fields = small.any_aligned_fields,
|
||||
.key = .{ .declared_owned_captures = .{
|
||||
.zir_index = key.zir_index,
|
||||
.captures = key.captures.owned,
|
||||
} },
|
||||
}, true)) {
|
||||
.wip => |wip| wip,
|
||||
.existing => unreachable, // we passed `replace_existing`
|
||||
};
|
||||
errdefer wip_ty.cancel(ip, pt.tid);
|
||||
|
||||
wip_ty.setName(ip, struct_obj.name);
|
||||
const new_cau_index = try ip.createTypeCau(gpa, pt.tid, key.zir_index, namespace_index, wip_ty.index);
|
||||
try ip.addDependency(
|
||||
gpa,
|
||||
AnalUnit.wrap(.{ .cau = new_cau_index }),
|
||||
.{ .src_hash = key.zir_index },
|
||||
);
|
||||
zcu.namespacePtr(namespace_index).owner_type = wip_ty.index;
|
||||
// No need to re-scan the namespace -- `zirStructDecl` will ultimately do that if the type is still alive.
|
||||
try zcu.comp.queueJob(.{ .resolve_type_fully = wip_ty.index });
|
||||
|
||||
const new_ty = wip_ty.finish(ip, new_cau_index.toOptional(), namespace_index);
|
||||
if (inst_info.inst == .main_struct_inst) {
|
||||
// This is the root type of a file! Update the reference.
|
||||
zcu.setFileRootType(inst_info.file, new_ty);
|
||||
}
|
||||
return new_ty;
|
||||
}
|
||||
|
||||
fn recreateUnionType(
|
||||
pt: Zcu.PerThread,
|
||||
ty: InternPool.Index,
|
||||
full_key: InternPool.Key.NamespaceType,
|
||||
union_obj: InternPool.LoadedUnionType,
|
||||
) Zcu.SemaError!InternPool.Index {
|
||||
const zcu = pt.zcu;
|
||||
const gpa = zcu.gpa;
|
||||
const ip = &zcu.intern_pool;
|
||||
|
||||
const key = switch (full_key) {
|
||||
.reified => unreachable, // never outdated
|
||||
.empty_struct => unreachable, // never outdated
|
||||
.generated_tag => unreachable, // not a union
|
||||
.declared => |d| d,
|
||||
};
|
||||
|
||||
if (@intFromEnum(ty) <= InternPool.static_len) {
|
||||
@panic("TODO: recreate resolved builtin type");
|
||||
}
|
||||
|
||||
const inst_info = key.zir_index.resolveFull(ip) orelse return error.AnalysisFail;
|
||||
const file = zcu.fileByIndex(inst_info.file);
|
||||
if (file.status != .success_zir) return error.AnalysisFail;
|
||||
const zir = file.zir;
|
||||
|
||||
assert(zir.instructions.items(.tag)[@intFromEnum(inst_info.inst)] == .extended);
|
||||
const extended = zir.instructions.items(.data)[@intFromEnum(inst_info.inst)].extended;
|
||||
assert(extended.opcode == .union_decl);
|
||||
const small: Zir.Inst.UnionDecl.Small = @bitCast(extended.small);
|
||||
const extra = zir.extraData(Zir.Inst.UnionDecl, extended.operand);
|
||||
var extra_index = extra.end;
|
||||
|
||||
extra_index += @intFromBool(small.has_tag_type);
|
||||
const captures_len = if (small.has_captures_len) blk: {
|
||||
const captures_len = zir.extra[extra_index];
|
||||
extra_index += 1;
|
||||
break :blk captures_len;
|
||||
} else 0;
|
||||
extra_index += @intFromBool(small.has_body_len);
|
||||
const fields_len = if (small.has_fields_len) blk: {
|
||||
const fields_len = zir.extra[extra_index];
|
||||
extra_index += 1;
|
||||
break :blk fields_len;
|
||||
} else 0;
|
||||
|
||||
if (captures_len != key.captures.owned.len) return error.AnalysisFail;
|
||||
if (fields_len != union_obj.field_types.len) return error.AnalysisFail;
|
||||
|
||||
// The old type will be unused, so drop its dependency information.
|
||||
ip.removeDependenciesForDepender(gpa, AnalUnit.wrap(.{ .cau = union_obj.cau }));
|
||||
|
||||
const namespace_index = union_obj.namespace;
|
||||
|
||||
const wip_ty = switch (try ip.getUnionType(gpa, pt.tid, .{
|
||||
.flags = .{
|
||||
.layout = small.layout,
|
||||
.status = .none,
|
||||
.runtime_tag = if (small.has_tag_type or small.auto_enum_tag)
|
||||
.tagged
|
||||
else if (small.layout != .auto)
|
||||
.none
|
||||
else switch (true) { // TODO
|
||||
true => .safety,
|
||||
false => .none,
|
||||
},
|
||||
.any_aligned_fields = small.any_aligned_fields,
|
||||
.requires_comptime = .unknown,
|
||||
.assumed_runtime_bits = false,
|
||||
.assumed_pointer_aligned = false,
|
||||
.alignment = .none,
|
||||
},
|
||||
.fields_len = fields_len,
|
||||
.enum_tag_ty = .none, // set later
|
||||
.field_types = &.{}, // set later
|
||||
.field_aligns = &.{}, // set later
|
||||
.key = .{ .declared_owned_captures = .{
|
||||
.zir_index = key.zir_index,
|
||||
.captures = key.captures.owned,
|
||||
} },
|
||||
}, true)) {
|
||||
.wip => |wip| wip,
|
||||
.existing => unreachable, // we passed `replace_existing`
|
||||
};
|
||||
errdefer wip_ty.cancel(ip, pt.tid);
|
||||
|
||||
wip_ty.setName(ip, union_obj.name);
|
||||
const new_cau_index = try ip.createTypeCau(gpa, pt.tid, key.zir_index, namespace_index, wip_ty.index);
|
||||
try ip.addDependency(
|
||||
gpa,
|
||||
AnalUnit.wrap(.{ .cau = new_cau_index }),
|
||||
.{ .src_hash = key.zir_index },
|
||||
);
|
||||
zcu.namespacePtr(namespace_index).owner_type = wip_ty.index;
|
||||
// No need to re-scan the namespace -- `zirUnionDecl` will ultimately do that if the type is still alive.
|
||||
try zcu.comp.queueJob(.{ .resolve_type_fully = wip_ty.index });
|
||||
return wip_ty.finish(ip, new_cau_index.toOptional(), namespace_index);
|
||||
}
|
||||
|
||||
fn recreateEnumType(
|
||||
pt: Zcu.PerThread,
|
||||
ty: InternPool.Index,
|
||||
full_key: InternPool.Key.NamespaceType,
|
||||
enum_obj: InternPool.LoadedEnumType,
|
||||
) Zcu.SemaError!InternPool.Index {
|
||||
const zcu = pt.zcu;
|
||||
const gpa = zcu.gpa;
|
||||
const ip = &zcu.intern_pool;
|
||||
|
||||
const key = switch (full_key) {
|
||||
.reified => unreachable, // never outdated
|
||||
.empty_struct => unreachable, // never outdated
|
||||
.generated_tag => unreachable, // never outdated
|
||||
.declared => |d| d,
|
||||
};
|
||||
|
||||
if (@intFromEnum(ty) <= InternPool.static_len) {
|
||||
@panic("TODO: recreate resolved builtin type");
|
||||
}
|
||||
|
||||
const inst_info = key.zir_index.resolveFull(ip) orelse return error.AnalysisFail;
|
||||
const file = zcu.fileByIndex(inst_info.file);
|
||||
if (file.status != .success_zir) return error.AnalysisFail;
|
||||
const zir = file.zir;
|
||||
|
||||
assert(zir.instructions.items(.tag)[@intFromEnum(inst_info.inst)] == .extended);
|
||||
const extended = zir.instructions.items(.data)[@intFromEnum(inst_info.inst)].extended;
|
||||
assert(extended.opcode == .enum_decl);
|
||||
const small: Zir.Inst.EnumDecl.Small = @bitCast(extended.small);
|
||||
const extra = zir.extraData(Zir.Inst.EnumDecl, extended.operand);
|
||||
var extra_index = extra.end;
|
||||
|
||||
const tag_type_ref = if (small.has_tag_type) blk: {
|
||||
const tag_type_ref: Zir.Inst.Ref = @enumFromInt(zir.extra[extra_index]);
|
||||
extra_index += 1;
|
||||
break :blk tag_type_ref;
|
||||
} else .none;
|
||||
|
||||
const captures_len = if (small.has_captures_len) blk: {
|
||||
const captures_len = zir.extra[extra_index];
|
||||
extra_index += 1;
|
||||
break :blk captures_len;
|
||||
} else 0;
|
||||
|
||||
const body_len = if (small.has_body_len) blk: {
|
||||
const body_len = zir.extra[extra_index];
|
||||
extra_index += 1;
|
||||
break :blk body_len;
|
||||
} else 0;
|
||||
|
||||
const fields_len = if (small.has_fields_len) blk: {
|
||||
const fields_len = zir.extra[extra_index];
|
||||
extra_index += 1;
|
||||
break :blk fields_len;
|
||||
} else 0;
|
||||
|
||||
const decls_len = if (small.has_decls_len) blk: {
|
||||
const decls_len = zir.extra[extra_index];
|
||||
extra_index += 1;
|
||||
break :blk decls_len;
|
||||
} else 0;
|
||||
|
||||
if (captures_len != key.captures.owned.len) return error.AnalysisFail;
|
||||
if (fields_len != enum_obj.names.len) return error.AnalysisFail;
|
||||
|
||||
extra_index += captures_len;
|
||||
extra_index += decls_len;
|
||||
|
||||
const body = zir.bodySlice(extra_index, body_len);
|
||||
extra_index += body.len;
|
||||
|
||||
const bit_bags_count = std.math.divCeil(usize, fields_len, 32) catch unreachable;
|
||||
const body_end = extra_index;
|
||||
extra_index += bit_bags_count;
|
||||
|
||||
const any_values = for (zir.extra[body_end..][0..bit_bags_count]) |bag| {
|
||||
if (bag != 0) break true;
|
||||
} else false;
|
||||
|
||||
// The old type will be unused, so drop its dependency information.
|
||||
ip.removeDependenciesForDepender(gpa, AnalUnit.wrap(.{ .cau = enum_obj.cau.unwrap().? }));
|
||||
|
||||
const namespace_index = enum_obj.namespace;
|
||||
|
||||
const wip_ty = switch (try ip.getEnumType(gpa, pt.tid, .{
|
||||
.has_values = any_values,
|
||||
.tag_mode = if (small.nonexhaustive)
|
||||
.nonexhaustive
|
||||
else if (tag_type_ref == .none)
|
||||
.auto
|
||||
else
|
||||
.explicit,
|
||||
.fields_len = fields_len,
|
||||
.key = .{ .declared_owned_captures = .{
|
||||
.zir_index = key.zir_index,
|
||||
.captures = key.captures.owned,
|
||||
} },
|
||||
}, true)) {
|
||||
.wip => |wip| wip,
|
||||
.existing => unreachable, // we passed `replace_existing`
|
||||
};
|
||||
var done = true;
|
||||
errdefer if (!done) wip_ty.cancel(ip, pt.tid);
|
||||
|
||||
wip_ty.setName(ip, enum_obj.name);
|
||||
|
||||
const new_cau_index = try ip.createTypeCau(gpa, pt.tid, key.zir_index, namespace_index, wip_ty.index);
|
||||
|
||||
zcu.namespacePtr(namespace_index).owner_type = wip_ty.index;
|
||||
// No need to re-scan the namespace -- `zirEnumDecl` will ultimately do that if the type is still alive.
|
||||
|
||||
wip_ty.prepare(ip, new_cau_index, namespace_index);
|
||||
done = true;
|
||||
|
||||
Sema.resolveDeclaredEnum(
|
||||
pt,
|
||||
wip_ty,
|
||||
inst_info.inst,
|
||||
key.zir_index,
|
||||
namespace_index,
|
||||
enum_obj.name,
|
||||
new_cau_index,
|
||||
small,
|
||||
body,
|
||||
tag_type_ref,
|
||||
any_values,
|
||||
fields_len,
|
||||
zir,
|
||||
body_end,
|
||||
) catch |err| switch (err) {
|
||||
error.GenericPoison => unreachable,
|
||||
error.ComptimeBreak => unreachable,
|
||||
error.ComptimeReturn => unreachable,
|
||||
error.AnalysisFail, error.OutOfMemory => |e| return e,
|
||||
};
|
||||
|
||||
return wip_ty.index;
|
||||
}
|
||||
|
||||
/// Given a namespace, re-scan its declarations from the type definition if they have not
|
||||
/// yet been re-scanned on this update.
|
||||
/// If the type declaration instruction has been lost, returns `error.AnalysisFail`.
|
||||
/// This will effectively short-circuit the caller, which will be semantic analysis of a
|
||||
/// guaranteed-unreferenced `AnalUnit`, to trigger a transitive analysis error.
|
||||
pub fn ensureNamespaceUpToDate(pt: Zcu.PerThread, namespace_index: Zcu.Namespace.Index) Zcu.SemaError!void {
|
||||
const zcu = pt.zcu;
|
||||
const ip = &zcu.intern_pool;
|
||||
const namespace = zcu.namespacePtr(namespace_index);
|
||||
|
||||
if (namespace.generation == zcu.generation) return;
|
||||
|
||||
const Container = enum { @"struct", @"union", @"enum", @"opaque" };
|
||||
const container: Container, const full_key = switch (ip.indexToKey(namespace.owner_type)) {
|
||||
.struct_type => |k| .{ .@"struct", k },
|
||||
.union_type => |k| .{ .@"union", k },
|
||||
.enum_type => |k| .{ .@"enum", k },
|
||||
.opaque_type => |k| .{ .@"opaque", k },
|
||||
else => unreachable, // namespaces are owned by a container type
|
||||
};
|
||||
|
||||
const key = switch (full_key) {
|
||||
.reified, .empty_struct, .generated_tag => {
|
||||
// Namespace always empty, so up-to-date.
|
||||
namespace.generation = zcu.generation;
|
||||
return;
|
||||
},
|
||||
.declared => |d| d,
|
||||
};
|
||||
|
||||
// Namespace outdated -- re-scan the type if necessary.
|
||||
|
||||
const inst_info = key.zir_index.resolveFull(ip) orelse return error.AnalysisFail;
|
||||
const file = zcu.fileByIndex(inst_info.file);
|
||||
if (file.status != .success_zir) return error.AnalysisFail;
|
||||
const zir = file.zir;
|
||||
|
||||
assert(zir.instructions.items(.tag)[@intFromEnum(inst_info.inst)] == .extended);
|
||||
const extended = zir.instructions.items(.data)[@intFromEnum(inst_info.inst)].extended;
|
||||
|
||||
const decls = switch (container) {
|
||||
.@"struct" => decls: {
|
||||
assert(extended.opcode == .struct_decl);
|
||||
const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
|
||||
const extra = zir.extraData(Zir.Inst.StructDecl, extended.operand);
|
||||
var extra_index = extra.end;
|
||||
const captures_len = if (small.has_captures_len) blk: {
|
||||
const captures_len = zir.extra[extra_index];
|
||||
extra_index += 1;
|
||||
break :blk captures_len;
|
||||
} else 0;
|
||||
extra_index += @intFromBool(small.has_fields_len);
|
||||
const decls_len = if (small.has_decls_len) blk: {
|
||||
const decls_len = zir.extra[extra_index];
|
||||
extra_index += 1;
|
||||
break :blk decls_len;
|
||||
} else 0;
|
||||
extra_index += captures_len;
|
||||
if (small.has_backing_int) {
|
||||
const backing_int_body_len = zir.extra[extra_index];
|
||||
extra_index += 1; // backing_int_body_len
|
||||
if (backing_int_body_len == 0) {
|
||||
extra_index += 1; // backing_int_ref
|
||||
} else {
|
||||
extra_index += backing_int_body_len; // backing_int_body_inst
|
||||
}
|
||||
}
|
||||
break :decls zir.bodySlice(extra_index, decls_len);
|
||||
},
|
||||
.@"union" => decls: {
|
||||
assert(extended.opcode == .union_decl);
|
||||
const small: Zir.Inst.UnionDecl.Small = @bitCast(extended.small);
|
||||
const extra = zir.extraData(Zir.Inst.UnionDecl, extended.operand);
|
||||
var extra_index = extra.end;
|
||||
extra_index += @intFromBool(small.has_tag_type);
|
||||
const captures_len = if (small.has_captures_len) blk: {
|
||||
const captures_len = zir.extra[extra_index];
|
||||
extra_index += 1;
|
||||
break :blk captures_len;
|
||||
} else 0;
|
||||
extra_index += @intFromBool(small.has_body_len);
|
||||
extra_index += @intFromBool(small.has_fields_len);
|
||||
const decls_len = if (small.has_decls_len) blk: {
|
||||
const decls_len = zir.extra[extra_index];
|
||||
extra_index += 1;
|
||||
break :blk decls_len;
|
||||
} else 0;
|
||||
extra_index += captures_len;
|
||||
break :decls zir.bodySlice(extra_index, decls_len);
|
||||
},
|
||||
.@"enum" => decls: {
|
||||
assert(extended.opcode == .enum_decl);
|
||||
const small: Zir.Inst.EnumDecl.Small = @bitCast(extended.small);
|
||||
const extra = zir.extraData(Zir.Inst.EnumDecl, extended.operand);
|
||||
var extra_index = extra.end;
|
||||
extra_index += @intFromBool(small.has_tag_type);
|
||||
const captures_len = if (small.has_captures_len) blk: {
|
||||
const captures_len = zir.extra[extra_index];
|
||||
extra_index += 1;
|
||||
break :blk captures_len;
|
||||
} else 0;
|
||||
extra_index += @intFromBool(small.has_body_len);
|
||||
extra_index += @intFromBool(small.has_fields_len);
|
||||
const decls_len = if (small.has_decls_len) blk: {
|
||||
const decls_len = zir.extra[extra_index];
|
||||
extra_index += 1;
|
||||
break :blk decls_len;
|
||||
} else 0;
|
||||
extra_index += captures_len;
|
||||
break :decls zir.bodySlice(extra_index, decls_len);
|
||||
},
|
||||
.@"opaque" => decls: {
|
||||
assert(extended.opcode == .opaque_decl);
|
||||
const small: Zir.Inst.OpaqueDecl.Small = @bitCast(extended.small);
|
||||
const extra = zir.extraData(Zir.Inst.OpaqueDecl, extended.operand);
|
||||
var extra_index = extra.end;
|
||||
const captures_len = if (small.has_captures_len) blk: {
|
||||
const captures_len = zir.extra[extra_index];
|
||||
extra_index += 1;
|
||||
break :blk captures_len;
|
||||
} else 0;
|
||||
const decls_len = if (small.has_decls_len) blk: {
|
||||
const decls_len = zir.extra[extra_index];
|
||||
extra_index += 1;
|
||||
break :blk decls_len;
|
||||
} else 0;
|
||||
extra_index += captures_len;
|
||||
break :decls zir.bodySlice(extra_index, decls_len);
|
||||
},
|
||||
};
|
||||
|
||||
try pt.scanNamespace(namespace_index, decls);
|
||||
namespace.generation = zcu.generation;
|
||||
}
|
||||
|
||||
const Air = @import("../Air.zig");
|
||||
const Allocator = std.mem.Allocator;
|
||||
const assert = std.debug.assert;
|
||||
@ -3379,6 +3866,7 @@ const builtin = @import("builtin");
|
||||
const Cache = std.Build.Cache;
|
||||
const dev = @import("../dev.zig");
|
||||
const InternPool = @import("../InternPool.zig");
|
||||
const AnalUnit = InternPool.AnalUnit;
|
||||
const isUpDir = @import("../introspect.zig").isUpDir;
|
||||
const Liveness = @import("../Liveness.zig");
|
||||
const log = std.log.scoped(.zcu);
|
||||
|
Loading…
Reference in New Issue
Block a user