mirror of
https://github.com/ziglang/zig.git
synced 2024-11-26 15:12:31 +00:00
compiler: remove anonymous struct types, unify all tuples
This commit reworks how anonymous struct literals and tuples work. Previously, an untyped anonymous struct literal (e.g. `const x = .{ .a = 123 }`) was given an "anonymous struct type", which is a special kind of struct which coerces using structural equivalence. This mechanism was a holdover from before we used RLS / result types as the primary mechanism of type inference. This commit changes the language so that the type assigned here is a "normal" struct type. It uses a form of equivalence based on the AST node and the type's structure, much like a reified (`@Type`) type. Additionally, tuples have been simplified. The distinction between "simple" and "complex" tuple types is eliminated. All tuples, even those explicitly declared using `struct { ... }` syntax, use structural equivalence, and do not undergo staged type resolution. Tuples are very restricted: they cannot have non-`auto` layouts, cannot have aligned fields, and cannot have default values with the exception of `comptime` fields. Tuples currently do not have optimized layout, but this can be changed in the future. This change simplifies the language, and fixes some problematic coercions through pointers which led to unintuitive behavior. Resolves: #16865
This commit is contained in:
parent
a916bc7fdd
commit
d11bbde5f9
2
lib/compiler/aro/aro/Builtins.zig
vendored
2
lib/compiler/aro/aro/Builtins.zig
vendored
@ -157,7 +157,7 @@ fn createType(desc: TypeDescription, it: *TypeDescription.TypeIterator, comp: *c
|
||||
.len = element_count,
|
||||
.elem = child_ty,
|
||||
};
|
||||
const vector_ty = .{ .specifier = .vector, .data = .{ .array = arr_ty } };
|
||||
const vector_ty: Type = .{ .specifier = .vector, .data = .{ .array = arr_ty } };
|
||||
builder.specifier = Type.Builder.fromType(vector_ty);
|
||||
},
|
||||
.q => {
|
||||
|
2
lib/compiler/aro/aro/Parser.zig
vendored
2
lib/compiler/aro/aro/Parser.zig
vendored
@ -8095,7 +8095,7 @@ fn primaryExpr(p: *Parser) Error!Result {
|
||||
|
||||
fn makePredefinedIdentifier(p: *Parser, strings_top: usize) !Result {
|
||||
const end: u32 = @intCast(p.strings.items.len);
|
||||
const elem_ty = .{ .specifier = .char, .qual = .{ .@"const" = true } };
|
||||
const elem_ty: Type = .{ .specifier = .char, .qual = .{ .@"const" = true } };
|
||||
const arr_ty = try p.arena.create(Type.Array);
|
||||
arr_ty.* = .{ .elem = elem_ty, .len = end - strings_top };
|
||||
const ty: Type = .{ .specifier = .array, .data = .{ .array = arr_ty } };
|
||||
|
2
lib/compiler/aro/aro/text_literal.zig
vendored
2
lib/compiler/aro/aro/text_literal.zig
vendored
@ -188,7 +188,7 @@ pub const Parser = struct {
|
||||
pub fn err(self: *Parser, tag: Diagnostics.Tag, extra: Diagnostics.Message.Extra) void {
|
||||
if (self.errored) return;
|
||||
self.errored = true;
|
||||
const diagnostic = .{ .tag = tag, .extra = extra };
|
||||
const diagnostic: CharDiagnostic = .{ .tag = tag, .extra = extra };
|
||||
if (self.errors_len == self.errors_buffer.len) {
|
||||
self.errors_buffer[self.errors_buffer.len - 1] = diagnostic;
|
||||
} else {
|
||||
|
@ -749,7 +749,7 @@ fn transType(c: *Context, scope: *Scope, raw_ty: Type, qual_handling: Type.QualH
|
||||
const is_const = is_fn_proto or child_type.isConst();
|
||||
const is_volatile = child_type.qual.@"volatile";
|
||||
const elem_type = try transType(c, scope, child_type, qual_handling, source_loc);
|
||||
const ptr_info = .{
|
||||
const ptr_info: @FieldType(ast.Payload.Pointer, "data") = .{
|
||||
.is_const = is_const,
|
||||
.is_volatile = is_volatile,
|
||||
.elem_type = elem_type,
|
||||
|
@ -6,7 +6,7 @@ const io = std.io;
|
||||
const testing = std.testing;
|
||||
const assert = std.debug.assert;
|
||||
|
||||
pub const std_options = .{
|
||||
pub const std_options: std.Options = .{
|
||||
.logFn = log,
|
||||
};
|
||||
|
||||
|
@ -299,7 +299,7 @@ test "precedence" {
|
||||
|
||||
test "zig_version" {
|
||||
// An approximate Zig build that predates this test.
|
||||
const older_version = .{ .major = 0, .minor = 8, .patch = 0, .pre = "dev.874" };
|
||||
const older_version: Version = .{ .major = 0, .minor = 8, .patch = 0, .pre = "dev.874" };
|
||||
|
||||
// Simulated compatibility check using Zig version.
|
||||
const compatible = comptime @import("builtin").zig_version.order(older_version) == .gt;
|
||||
|
@ -509,7 +509,7 @@ pub const Os = struct {
|
||||
.max = .{ .major = 6, .minor = 10, .patch = 3 },
|
||||
},
|
||||
.glibc = blk: {
|
||||
const default_min = .{ .major = 2, .minor = 28, .patch = 0 };
|
||||
const default_min: std.SemanticVersion = .{ .major = 2, .minor = 28, .patch = 0 };
|
||||
|
||||
for (std.zig.target.available_libcs) |libc| {
|
||||
// We don't know the ABI here. We can get away with not checking it
|
||||
|
@ -100,7 +100,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
|
||||
/// of this ArrayList. Empties this ArrayList.
|
||||
pub fn moveToUnmanaged(self: *Self) ArrayListAlignedUnmanaged(T, alignment) {
|
||||
const allocator = self.allocator;
|
||||
const result = .{ .items = self.items, .capacity = self.capacity };
|
||||
const result: ArrayListAlignedUnmanaged(T, alignment) = .{ .items = self.items, .capacity = self.capacity };
|
||||
self.* = init(allocator);
|
||||
return result;
|
||||
}
|
||||
|
@ -258,8 +258,7 @@ fn kvSplit(str: []const u8) !struct { key: []const u8, value: []const u8 } {
|
||||
var it = mem.splitScalar(u8, str, kv_delimiter_scalar);
|
||||
const key = it.first();
|
||||
const value = it.next() orelse return Error.InvalidEncoding;
|
||||
const ret = .{ .key = key, .value = value };
|
||||
return ret;
|
||||
return .{ .key = key, .value = value };
|
||||
}
|
||||
|
||||
test "phc format - encoding/decoding" {
|
||||
|
@ -1018,7 +1018,7 @@ fn CreateUniqueTuple(comptime N: comptime_int, comptime types: [N]type) type {
|
||||
.type = T,
|
||||
.default_value = null,
|
||||
.is_comptime = false,
|
||||
.alignment = if (@sizeOf(T) > 0) @alignOf(T) else 0,
|
||||
.alignment = 0,
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -1711,7 +1711,7 @@ fn structInitExpr(
|
||||
return rvalue(gz, ri, val, node);
|
||||
},
|
||||
.none, .ref, .inferred_ptr => {
|
||||
return rvalue(gz, ri, .empty_struct, node);
|
||||
return rvalue(gz, ri, .empty_tuple, node);
|
||||
},
|
||||
.destructure => |destructure| {
|
||||
return astgen.failNodeNotes(node, "empty initializer cannot be destructured", .{}, &.{
|
||||
@ -1888,6 +1888,8 @@ fn structInitExprAnon(
|
||||
const tree = astgen.tree;
|
||||
|
||||
const payload_index = try addExtra(astgen, Zir.Inst.StructInitAnon{
|
||||
.abs_node = node,
|
||||
.abs_line = astgen.source_line,
|
||||
.fields_len = @intCast(struct_init.ast.fields.len),
|
||||
});
|
||||
const field_size = @typeInfo(Zir.Inst.StructInitAnon.Item).@"struct".fields.len;
|
||||
@ -1919,6 +1921,8 @@ fn structInitExprTyped(
|
||||
const tree = astgen.tree;
|
||||
|
||||
const payload_index = try addExtra(astgen, Zir.Inst.StructInit{
|
||||
.abs_node = node,
|
||||
.abs_line = astgen.source_line,
|
||||
.fields_len = @intCast(struct_init.ast.fields.len),
|
||||
});
|
||||
const field_size = @typeInfo(Zir.Inst.StructInit.Item).@"struct".fields.len;
|
||||
@ -5007,6 +5011,25 @@ fn structDeclInner(
|
||||
layout: std.builtin.Type.ContainerLayout,
|
||||
backing_int_node: Ast.Node.Index,
|
||||
) InnerError!Zir.Inst.Ref {
|
||||
const astgen = gz.astgen;
|
||||
const gpa = astgen.gpa;
|
||||
const tree = astgen.tree;
|
||||
|
||||
{
|
||||
const is_tuple = for (container_decl.ast.members) |member_node| {
|
||||
const container_field = tree.fullContainerField(member_node) orelse continue;
|
||||
if (container_field.ast.tuple_like) break true;
|
||||
} else false;
|
||||
|
||||
if (is_tuple) {
|
||||
if (node == 0) {
|
||||
return astgen.failTok(0, "file cannot be a tuple", .{});
|
||||
} else {
|
||||
return tupleDecl(gz, scope, node, container_decl, layout, backing_int_node);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const decl_inst = try gz.reserveInstructionIndex();
|
||||
|
||||
if (container_decl.ast.members.len == 0 and backing_int_node == 0) {
|
||||
@ -5019,7 +5042,6 @@ fn structDeclInner(
|
||||
.has_backing_int = false,
|
||||
.known_non_opv = false,
|
||||
.known_comptime_only = false,
|
||||
.is_tuple = false,
|
||||
.any_comptime_fields = false,
|
||||
.any_default_inits = false,
|
||||
.any_aligned_fields = false,
|
||||
@ -5028,10 +5050,6 @@ fn structDeclInner(
|
||||
return decl_inst.toRef();
|
||||
}
|
||||
|
||||
const astgen = gz.astgen;
|
||||
const gpa = astgen.gpa;
|
||||
const tree = astgen.tree;
|
||||
|
||||
var namespace: Scope.Namespace = .{
|
||||
.parent = scope,
|
||||
.node = node,
|
||||
@ -5106,46 +5124,6 @@ fn structDeclInner(
|
||||
// No defer needed here because it is handled by `wip_members.deinit()` above.
|
||||
const bodies_start = astgen.scratch.items.len;
|
||||
|
||||
const node_tags = tree.nodes.items(.tag);
|
||||
const is_tuple = for (container_decl.ast.members) |member_node| {
|
||||
const container_field = tree.fullContainerField(member_node) orelse continue;
|
||||
if (container_field.ast.tuple_like) break true;
|
||||
} else false;
|
||||
|
||||
if (is_tuple) switch (layout) {
|
||||
.auto => {},
|
||||
.@"extern" => return astgen.failNode(node, "extern tuples are not supported", .{}),
|
||||
.@"packed" => return astgen.failNode(node, "packed tuples are not supported", .{}),
|
||||
};
|
||||
|
||||
if (is_tuple) for (container_decl.ast.members) |member_node| {
|
||||
switch (node_tags[member_node]) {
|
||||
.container_field_init,
|
||||
.container_field_align,
|
||||
.container_field,
|
||||
.@"comptime",
|
||||
.test_decl,
|
||||
=> continue,
|
||||
else => {
|
||||
const tuple_member = for (container_decl.ast.members) |maybe_tuple| switch (node_tags[maybe_tuple]) {
|
||||
.container_field_init,
|
||||
.container_field_align,
|
||||
.container_field,
|
||||
=> break maybe_tuple,
|
||||
else => {},
|
||||
} else unreachable;
|
||||
return astgen.failNodeNotes(
|
||||
member_node,
|
||||
"tuple declarations cannot contain declarations",
|
||||
.{},
|
||||
&[_]u32{
|
||||
try astgen.errNoteNode(tuple_member, "tuple field here", .{}),
|
||||
},
|
||||
);
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
const old_hasher = astgen.src_hasher;
|
||||
defer astgen.src_hasher = old_hasher;
|
||||
astgen.src_hasher = std.zig.SrcHasher.init(.{});
|
||||
@ -5167,16 +5145,10 @@ fn structDeclInner(
|
||||
|
||||
astgen.src_hasher.update(tree.getNodeSource(member_node));
|
||||
|
||||
if (!is_tuple) {
|
||||
const field_name = try astgen.identAsString(member.ast.main_token);
|
||||
|
||||
member.convertToNonTupleLike(astgen.tree.nodes);
|
||||
assert(!member.ast.tuple_like);
|
||||
|
||||
wip_members.appendToField(@intFromEnum(field_name));
|
||||
} else if (!member.ast.tuple_like) {
|
||||
return astgen.failTok(member.ast.main_token, "tuple field has a name", .{});
|
||||
}
|
||||
const field_name = try astgen.identAsString(member.ast.main_token);
|
||||
member.convertToNonTupleLike(astgen.tree.nodes);
|
||||
assert(!member.ast.tuple_like);
|
||||
wip_members.appendToField(@intFromEnum(field_name));
|
||||
|
||||
const doc_comment_index = try astgen.docCommentAsString(member.firstToken());
|
||||
wip_members.appendToField(@intFromEnum(doc_comment_index));
|
||||
@ -5270,7 +5242,6 @@ fn structDeclInner(
|
||||
.has_backing_int = backing_int_ref != .none,
|
||||
.known_non_opv = known_non_opv,
|
||||
.known_comptime_only = known_comptime_only,
|
||||
.is_tuple = is_tuple,
|
||||
.any_comptime_fields = any_comptime_fields,
|
||||
.any_default_inits = any_default_inits,
|
||||
.any_aligned_fields = any_aligned_fields,
|
||||
@ -5300,6 +5271,106 @@ fn structDeclInner(
|
||||
return decl_inst.toRef();
|
||||
}
|
||||
|
||||
fn tupleDecl(
|
||||
gz: *GenZir,
|
||||
scope: *Scope,
|
||||
node: Ast.Node.Index,
|
||||
container_decl: Ast.full.ContainerDecl,
|
||||
layout: std.builtin.Type.ContainerLayout,
|
||||
backing_int_node: Ast.Node.Index,
|
||||
) InnerError!Zir.Inst.Ref {
|
||||
const astgen = gz.astgen;
|
||||
const gpa = astgen.gpa;
|
||||
const tree = astgen.tree;
|
||||
|
||||
const node_tags = tree.nodes.items(.tag);
|
||||
|
||||
switch (layout) {
|
||||
.auto => {},
|
||||
.@"extern" => return astgen.failNode(node, "extern tuples are not supported", .{}),
|
||||
.@"packed" => return astgen.failNode(node, "packed tuples are not supported", .{}),
|
||||
}
|
||||
|
||||
if (backing_int_node != 0) {
|
||||
return astgen.failNode(backing_int_node, "tuple does not support backing integer type", .{});
|
||||
}
|
||||
|
||||
// We will use the scratch buffer, starting here, for the field data:
|
||||
// 1. fields: { // for every `fields_len` (stored in `extended.small`)
|
||||
// type: Inst.Ref,
|
||||
// init: Inst.Ref, // `.none` for non-`comptime` fields
|
||||
// }
|
||||
const fields_start = astgen.scratch.items.len;
|
||||
defer astgen.scratch.items.len = fields_start;
|
||||
|
||||
try astgen.scratch.ensureUnusedCapacity(gpa, container_decl.ast.members.len * 2);
|
||||
|
||||
for (container_decl.ast.members) |member_node| {
|
||||
const field = tree.fullContainerField(member_node) orelse {
|
||||
const tuple_member = for (container_decl.ast.members) |maybe_tuple| switch (node_tags[maybe_tuple]) {
|
||||
.container_field_init,
|
||||
.container_field_align,
|
||||
.container_field,
|
||||
=> break maybe_tuple,
|
||||
else => {},
|
||||
} else unreachable;
|
||||
return astgen.failNodeNotes(
|
||||
member_node,
|
||||
"tuple declarations cannot contain declarations",
|
||||
.{},
|
||||
&.{try astgen.errNoteNode(tuple_member, "tuple field here", .{})},
|
||||
);
|
||||
};
|
||||
|
||||
if (!field.ast.tuple_like) {
|
||||
return astgen.failTok(field.ast.main_token, "tuple field has a name", .{});
|
||||
}
|
||||
|
||||
if (field.ast.align_expr != 0) {
|
||||
return astgen.failTok(field.ast.main_token, "tuple field has alignment", .{});
|
||||
}
|
||||
|
||||
if (field.ast.value_expr != 0 and field.comptime_token == null) {
|
||||
return astgen.failTok(field.ast.main_token, "non-comptime tuple field has default initialization value", .{});
|
||||
}
|
||||
|
||||
if (field.ast.value_expr == 0 and field.comptime_token != null) {
|
||||
return astgen.failTok(field.comptime_token.?, "comptime field without default initialization value", .{});
|
||||
}
|
||||
|
||||
const field_type_ref = try typeExpr(gz, scope, field.ast.type_expr);
|
||||
astgen.scratch.appendAssumeCapacity(@intFromEnum(field_type_ref));
|
||||
|
||||
if (field.ast.value_expr != 0) {
|
||||
const field_init_ref = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = field_type_ref } }, field.ast.value_expr);
|
||||
astgen.scratch.appendAssumeCapacity(@intFromEnum(field_init_ref));
|
||||
} else {
|
||||
astgen.scratch.appendAssumeCapacity(@intFromEnum(Zir.Inst.Ref.none));
|
||||
}
|
||||
}
|
||||
|
||||
const fields_len = std.math.cast(u16, container_decl.ast.members.len) orelse {
|
||||
return astgen.failNode(node, "this compiler implementation only supports 65535 tuple fields", .{});
|
||||
};
|
||||
|
||||
const extra_trail = astgen.scratch.items[fields_start..];
|
||||
assert(extra_trail.len == fields_len * 2);
|
||||
try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.TupleDecl).@"struct".fields.len + extra_trail.len);
|
||||
const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.TupleDecl{
|
||||
.src_node = gz.nodeIndexToRelative(node),
|
||||
});
|
||||
astgen.extra.appendSliceAssumeCapacity(extra_trail);
|
||||
|
||||
return gz.add(.{
|
||||
.tag = .extended,
|
||||
.data = .{ .extended = .{
|
||||
.opcode = .tuple_decl,
|
||||
.small = fields_len,
|
||||
.operand = payload_index,
|
||||
} },
|
||||
});
|
||||
}
|
||||
|
||||
fn unionDeclInner(
|
||||
gz: *GenZir,
|
||||
scope: *Scope,
|
||||
@ -11172,7 +11243,7 @@ fn rvalueInner(
|
||||
as_ty | @intFromEnum(Zir.Inst.Ref.slice_const_u8_sentinel_0_type),
|
||||
as_ty | @intFromEnum(Zir.Inst.Ref.anyerror_void_error_union_type),
|
||||
as_ty | @intFromEnum(Zir.Inst.Ref.generic_poison_type),
|
||||
as_ty | @intFromEnum(Zir.Inst.Ref.empty_struct_type),
|
||||
as_ty | @intFromEnum(Zir.Inst.Ref.empty_tuple_type),
|
||||
as_comptime_int | @intFromEnum(Zir.Inst.Ref.zero),
|
||||
as_comptime_int | @intFromEnum(Zir.Inst.Ref.one),
|
||||
as_comptime_int | @intFromEnum(Zir.Inst.Ref.negative_one),
|
||||
@ -13173,7 +13244,6 @@ const GenZir = struct {
|
||||
layout: std.builtin.Type.ContainerLayout,
|
||||
known_non_opv: bool,
|
||||
known_comptime_only: bool,
|
||||
is_tuple: bool,
|
||||
any_comptime_fields: bool,
|
||||
any_default_inits: bool,
|
||||
any_aligned_fields: bool,
|
||||
@ -13217,7 +13287,6 @@ const GenZir = struct {
|
||||
.has_backing_int = args.has_backing_int,
|
||||
.known_non_opv = args.known_non_opv,
|
||||
.known_comptime_only = args.known_comptime_only,
|
||||
.is_tuple = args.is_tuple,
|
||||
.name_strategy = gz.anon_name_strategy,
|
||||
.layout = args.layout,
|
||||
.any_comptime_fields = args.any_comptime_fields,
|
||||
|
@ -1,5 +1,3 @@
|
||||
const std = @import("std");
|
||||
|
||||
pub const Tag = enum {
|
||||
add_with_overflow,
|
||||
addrspace_cast,
|
||||
@ -147,7 +145,7 @@ param_count: ?u8,
|
||||
|
||||
pub const list = list: {
|
||||
@setEvalBranchQuota(3000);
|
||||
break :list std.StaticStringMap(@This()).initComptime(.{
|
||||
break :list std.StaticStringMap(BuiltinFn).initComptime([_]struct { []const u8, BuiltinFn }{
|
||||
.{
|
||||
"@addWithOverflow",
|
||||
.{
|
||||
@ -1011,3 +1009,6 @@ pub const list = list: {
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
const std = @import("std");
|
||||
const BuiltinFn = @This();
|
||||
|
@ -1887,6 +1887,10 @@ pub const Inst = struct {
|
||||
/// `operand` is payload index to `OpaqueDecl`.
|
||||
/// `small` is `OpaqueDecl.Small`.
|
||||
opaque_decl,
|
||||
/// A tuple type. Note that tuples are not namespace/container types.
|
||||
/// `operand` is payload index to `TupleDecl`.
|
||||
/// `small` is `fields_len: u16`.
|
||||
tuple_decl,
|
||||
/// Implements the `@This` builtin.
|
||||
/// `operand` is `src_node: i32`.
|
||||
this,
|
||||
@ -2187,7 +2191,7 @@ pub const Inst = struct {
|
||||
anyerror_void_error_union_type,
|
||||
adhoc_inferred_error_set_type,
|
||||
generic_poison_type,
|
||||
empty_struct_type,
|
||||
empty_tuple_type,
|
||||
undef,
|
||||
zero,
|
||||
zero_usize,
|
||||
@ -2202,7 +2206,7 @@ pub const Inst = struct {
|
||||
null_value,
|
||||
bool_true,
|
||||
bool_false,
|
||||
empty_struct,
|
||||
empty_tuple,
|
||||
generic_poison,
|
||||
|
||||
/// This Ref does not correspond to any ZIR instruction or constant
|
||||
@ -3041,7 +3045,7 @@ pub const Inst = struct {
|
||||
/// 0b0X00: whether corresponding field is comptime
|
||||
/// 0bX000: whether corresponding field has a type expression
|
||||
/// 9. fields: { // for every fields_len
|
||||
/// field_name: u32, // if !is_tuple
|
||||
/// field_name: u32,
|
||||
/// doc_comment: NullTerminatedString, // .empty if no doc comment
|
||||
/// field_type: Ref, // if corresponding bit is not set. none means anytype.
|
||||
/// field_type_body_len: u32, // if corresponding bit is set
|
||||
@ -3071,13 +3075,12 @@ pub const Inst = struct {
|
||||
has_backing_int: bool,
|
||||
known_non_opv: bool,
|
||||
known_comptime_only: bool,
|
||||
is_tuple: bool,
|
||||
name_strategy: NameStrategy,
|
||||
layout: std.builtin.Type.ContainerLayout,
|
||||
any_default_inits: bool,
|
||||
any_comptime_fields: bool,
|
||||
any_aligned_fields: bool,
|
||||
_: u2 = undefined,
|
||||
_: u3 = undefined,
|
||||
};
|
||||
};
|
||||
|
||||
@ -3302,6 +3305,15 @@ pub const Inst = struct {
|
||||
};
|
||||
};
|
||||
|
||||
/// Trailing:
|
||||
/// 1. fields: { // for every `fields_len` (stored in `extended.small`)
|
||||
/// type: Inst.Ref,
|
||||
/// init: Inst.Ref, // `.none` for non-`comptime` fields
|
||||
/// }
|
||||
pub const TupleDecl = struct {
|
||||
src_node: i32, // relative
|
||||
};
|
||||
|
||||
/// Trailing:
|
||||
/// { // for every fields_len
|
||||
/// field_name: NullTerminatedString // null terminated string index
|
||||
@ -3329,6 +3341,11 @@ pub const Inst = struct {
|
||||
|
||||
/// Trailing is an item per field.
|
||||
pub const StructInit = struct {
|
||||
/// If this is an anonymous initialization (the operand is poison), this instruction becomes the owner of a type.
|
||||
/// To resolve source locations, we need an absolute source node.
|
||||
abs_node: Ast.Node.Index,
|
||||
/// Likewise, we need an absolute line number.
|
||||
abs_line: u32,
|
||||
fields_len: u32,
|
||||
|
||||
pub const Item = struct {
|
||||
@ -3344,6 +3361,11 @@ pub const Inst = struct {
|
||||
/// TODO make this instead array of inits followed by array of names because
|
||||
/// it will be simpler Sema code and better for CPU cache.
|
||||
pub const StructInitAnon = struct {
|
||||
/// This is an anonymous initialization, meaning this instruction becomes the owner of a type.
|
||||
/// To resolve source locations, we need an absolute source node.
|
||||
abs_node: Ast.Node.Index,
|
||||
/// Likewise, we need an absolute line number.
|
||||
abs_line: u32,
|
||||
fields_len: u32,
|
||||
|
||||
pub const Item = struct {
|
||||
@ -3741,6 +3763,8 @@ fn findDeclsInner(
|
||||
defers: *std.AutoHashMapUnmanaged(u32, void),
|
||||
inst: Inst.Index,
|
||||
) Allocator.Error!void {
|
||||
comptime assert(Zir.inst_tracking_version == 0);
|
||||
|
||||
const tags = zir.instructions.items(.tag);
|
||||
const datas = zir.instructions.items(.data);
|
||||
|
||||
@ -3884,9 +3908,6 @@ fn findDeclsInner(
|
||||
.struct_init_empty,
|
||||
.struct_init_empty_result,
|
||||
.struct_init_empty_ref_result,
|
||||
.struct_init_anon,
|
||||
.struct_init,
|
||||
.struct_init_ref,
|
||||
.validate_struct_init_ty,
|
||||
.validate_struct_init_result_ty,
|
||||
.validate_ptr_struct_init,
|
||||
@ -3978,6 +3999,12 @@ fn findDeclsInner(
|
||||
.restore_err_ret_index_fn_entry,
|
||||
=> return,
|
||||
|
||||
// Struct initializations need tracking, as they may create anonymous struct types.
|
||||
.struct_init,
|
||||
.struct_init_ref,
|
||||
.struct_init_anon,
|
||||
=> return list.append(gpa, inst),
|
||||
|
||||
.extended => {
|
||||
const extended = datas[@intFromEnum(inst)].extended;
|
||||
switch (extended.opcode) {
|
||||
@ -4034,6 +4061,7 @@ fn findDeclsInner(
|
||||
.builtin_value,
|
||||
.branch_hint,
|
||||
.inplace_arith_result_ty,
|
||||
.tuple_decl,
|
||||
=> return,
|
||||
|
||||
// `@TypeOf` has a body.
|
||||
@ -4110,8 +4138,7 @@ fn findDeclsInner(
|
||||
const has_type_body = @as(u1, @truncate(cur_bit_bag)) != 0;
|
||||
cur_bit_bag >>= 1;
|
||||
|
||||
fields_extra_index += @intFromBool(!small.is_tuple); // field_name
|
||||
fields_extra_index += 1; // doc_comment
|
||||
fields_extra_index += 2; // field_name, doc_comment
|
||||
|
||||
if (has_type_body) {
|
||||
const field_type_body_len = zir.extra[fields_extra_index];
|
||||
@ -4736,3 +4763,35 @@ pub fn getAssociatedSrcHash(zir: Zir, inst: Zir.Inst.Index) ?std.zig.SrcHash {
|
||||
else => return null,
|
||||
}
|
||||
}
|
||||
|
||||
/// When the ZIR update tracking logic must be modified to consider new instructions,
|
||||
/// change this constant to trigger compile errors at all relevant locations.
|
||||
pub const inst_tracking_version = 0;
|
||||
|
||||
/// Asserts that a ZIR instruction is tracked across incremental updates, and
|
||||
/// thus may be given an `InternPool.TrackedInst`.
|
||||
pub fn assertTrackable(zir: Zir, inst_idx: Zir.Inst.Index) void {
|
||||
comptime assert(Zir.inst_tracking_version == 0);
|
||||
const inst = zir.instructions.get(@intFromEnum(inst_idx));
|
||||
switch (inst.tag) {
|
||||
.struct_init,
|
||||
.struct_init_ref,
|
||||
.struct_init_anon,
|
||||
=> {}, // tracked in order, as the owner instructions of anonymous struct types
|
||||
.func,
|
||||
.func_inferred,
|
||||
.func_fancy,
|
||||
=> {}, // tracked in order, as the owner instructions of function bodies
|
||||
.declaration => {}, // tracked by correlating names in the namespace of the parent container
|
||||
.extended => switch (inst.data.extended.opcode) {
|
||||
.struct_decl,
|
||||
.union_decl,
|
||||
.enum_decl,
|
||||
.opaque_decl,
|
||||
.reify,
|
||||
=> {}, // tracked in order, as the owner instructions of explicit container types
|
||||
else => unreachable, // assertion failure; not trackable
|
||||
},
|
||||
else => unreachable, // assertion failure; not trackable
|
||||
}
|
||||
}
|
||||
|
@ -277,7 +277,7 @@ const SystemVersionTokenizer = struct {
|
||||
};
|
||||
|
||||
test "detect" {
|
||||
const cases = .{
|
||||
const cases: [5]struct { []const u8, std.SemanticVersion } = .{
|
||||
.{
|
||||
\\<?xml version="1.0" encoding="UTF-8"?>
|
||||
\\<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||
@ -388,8 +388,8 @@ test "detect" {
|
||||
|
||||
inline for (cases) |case| {
|
||||
const ver0 = try parseSystemVersion(case[0]);
|
||||
const ver1: std.SemanticVersion = case[1];
|
||||
try testing.expectEqual(@as(std.math.Order, .eq), ver0.order(ver1));
|
||||
const ver1 = case[1];
|
||||
try testing.expectEqual(std.math.Order.eq, ver0.order(ver1));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -962,7 +962,7 @@ pub const Inst = struct {
|
||||
anyerror_void_error_union_type = @intFromEnum(InternPool.Index.anyerror_void_error_union_type),
|
||||
adhoc_inferred_error_set_type = @intFromEnum(InternPool.Index.adhoc_inferred_error_set_type),
|
||||
generic_poison_type = @intFromEnum(InternPool.Index.generic_poison_type),
|
||||
empty_struct_type = @intFromEnum(InternPool.Index.empty_struct_type),
|
||||
empty_tuple_type = @intFromEnum(InternPool.Index.empty_tuple_type),
|
||||
undef = @intFromEnum(InternPool.Index.undef),
|
||||
zero = @intFromEnum(InternPool.Index.zero),
|
||||
zero_usize = @intFromEnum(InternPool.Index.zero_usize),
|
||||
@ -977,7 +977,7 @@ pub const Inst = struct {
|
||||
null_value = @intFromEnum(InternPool.Index.null_value),
|
||||
bool_true = @intFromEnum(InternPool.Index.bool_true),
|
||||
bool_false = @intFromEnum(InternPool.Index.bool_false),
|
||||
empty_struct = @intFromEnum(InternPool.Index.empty_struct),
|
||||
empty_tuple = @intFromEnum(InternPool.Index.empty_tuple),
|
||||
generic_poison = @intFromEnum(InternPool.Index.generic_poison),
|
||||
|
||||
/// This Ref does not correspond to any AIR instruction or constant
|
||||
|
@ -501,7 +501,7 @@ pub fn checkType(ty: Type, zcu: *Zcu) bool {
|
||||
.auto, .@"extern" => struct_obj.flagsUnordered(ip).fully_resolved,
|
||||
};
|
||||
},
|
||||
.anon_struct_type => |tuple| {
|
||||
.tuple_type => |tuple| {
|
||||
for (0..tuple.types.len) |i| {
|
||||
const field_is_comptime = tuple.values.get(ip)[i] != .none;
|
||||
if (field_is_comptime) continue;
|
||||
|
@ -2081,7 +2081,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
|
||||
log.debug("CacheMode.whole cache miss for {s}", .{comp.root_name});
|
||||
|
||||
// Compile the artifacts to a temporary directory.
|
||||
const tmp_artifact_directory = d: {
|
||||
const tmp_artifact_directory: Directory = d: {
|
||||
const s = std.fs.path.sep_str;
|
||||
tmp_dir_rand_int = std.crypto.random.int(u64);
|
||||
const tmp_dir_sub_path = "tmp" ++ s ++ std.fmt.hex(tmp_dir_rand_int);
|
||||
|
@ -1787,10 +1787,11 @@ pub const Key = union(enum) {
|
||||
/// or was created with `@Type`. It is unique and based on a declaration.
|
||||
/// It may be a tuple, if declared like this: `struct {A, B, C}`.
|
||||
struct_type: NamespaceType,
|
||||
/// This is an anonymous struct or tuple type which has no corresponding
|
||||
/// declaration. It is used for types that have no `struct` keyword in the
|
||||
/// source code, and were not created via `@Type`.
|
||||
anon_struct_type: AnonStructType,
|
||||
/// This is a tuple type. Tuples are logically similar to structs, but have some
|
||||
/// important differences in semantics; they do not undergo staged type resolution,
|
||||
/// so cannot be self-referential, and they are not considered container/namespace
|
||||
/// types, so cannot have declarations and have structural equality properties.
|
||||
tuple_type: TupleType,
|
||||
union_type: NamespaceType,
|
||||
opaque_type: NamespaceType,
|
||||
enum_type: NamespaceType,
|
||||
@ -1919,27 +1920,10 @@ pub const Key = union(enum) {
|
||||
child: Index,
|
||||
};
|
||||
|
||||
pub const AnonStructType = struct {
|
||||
pub const TupleType = struct {
|
||||
types: Index.Slice,
|
||||
/// This may be empty, indicating this is a tuple.
|
||||
names: NullTerminatedString.Slice,
|
||||
/// These elements may be `none`, indicating runtime-known.
|
||||
values: Index.Slice,
|
||||
|
||||
pub fn isTuple(self: AnonStructType) bool {
|
||||
return self.names.len == 0;
|
||||
}
|
||||
|
||||
pub fn fieldName(
|
||||
self: AnonStructType,
|
||||
ip: *const InternPool,
|
||||
index: usize,
|
||||
) OptionalNullTerminatedString {
|
||||
if (self.names.len == 0)
|
||||
return .none;
|
||||
|
||||
return self.names.get(ip)[index].toOptional();
|
||||
}
|
||||
};
|
||||
|
||||
/// This is the hashmap key. To fetch other data associated with the type, see:
|
||||
@ -1965,18 +1949,15 @@ pub const Key = union(enum) {
|
||||
/// The union for which this is a tag type.
|
||||
union_type: Index,
|
||||
},
|
||||
/// This type originates from a reification via `@Type`.
|
||||
/// It is hased based on its ZIR instruction index and fields, attributes, etc.
|
||||
/// This type originates from a reification via `@Type`, or from an anonymous initialization.
|
||||
/// It is hashed based on its ZIR instruction index and fields, attributes, etc.
|
||||
/// To avoid making this key overly complex, the type-specific data is hased by Sema.
|
||||
reified: struct {
|
||||
/// A `reify` instruction.
|
||||
/// A `reify`, `struct_init`, `struct_init_ref`, or `struct_init_anon` instruction.
|
||||
zir_index: TrackedInst.Index,
|
||||
/// A hash of this type's attributes, fields, etc, generated by Sema.
|
||||
type_hash: u64,
|
||||
},
|
||||
/// This type is `@TypeOf(.{})`.
|
||||
/// TODO: can we change the language spec to not special-case this type?
|
||||
empty_struct: void,
|
||||
};
|
||||
|
||||
pub const FuncType = struct {
|
||||
@ -2497,7 +2478,6 @@ pub const Key = union(enum) {
|
||||
std.hash.autoHash(&hasher, reified.zir_index);
|
||||
std.hash.autoHash(&hasher, reified.type_hash);
|
||||
},
|
||||
.empty_struct => {},
|
||||
}
|
||||
return hasher.final();
|
||||
},
|
||||
@ -2570,7 +2550,7 @@ pub const Key = union(enum) {
|
||||
const child = switch (ip.indexToKey(aggregate.ty)) {
|
||||
.array_type => |array_type| array_type.child,
|
||||
.vector_type => |vector_type| vector_type.child,
|
||||
.anon_struct_type, .struct_type => .none,
|
||||
.tuple_type, .struct_type => .none,
|
||||
else => unreachable,
|
||||
};
|
||||
|
||||
@ -2625,11 +2605,10 @@ pub const Key = union(enum) {
|
||||
|
||||
.error_set_type => |x| Hash.hash(seed, std.mem.sliceAsBytes(x.names.get(ip))),
|
||||
|
||||
.anon_struct_type => |anon_struct_type| {
|
||||
.tuple_type => |tuple_type| {
|
||||
var hasher = Hash.init(seed);
|
||||
for (anon_struct_type.types.get(ip)) |elem| std.hash.autoHash(&hasher, elem);
|
||||
for (anon_struct_type.values.get(ip)) |elem| std.hash.autoHash(&hasher, elem);
|
||||
for (anon_struct_type.names.get(ip)) |elem| std.hash.autoHash(&hasher, elem);
|
||||
for (tuple_type.types.get(ip)) |elem| std.hash.autoHash(&hasher, elem);
|
||||
for (tuple_type.values.get(ip)) |elem| std.hash.autoHash(&hasher, elem);
|
||||
return hasher.final();
|
||||
},
|
||||
|
||||
@ -2929,7 +2908,6 @@ pub const Key = union(enum) {
|
||||
return a_r.zir_index == b_r.zir_index and
|
||||
a_r.type_hash == b_r.type_hash;
|
||||
},
|
||||
.empty_struct => return true,
|
||||
}
|
||||
},
|
||||
.aggregate => |a_info| {
|
||||
@ -2981,11 +2959,10 @@ pub const Key = union(enum) {
|
||||
},
|
||||
}
|
||||
},
|
||||
.anon_struct_type => |a_info| {
|
||||
const b_info = b.anon_struct_type;
|
||||
.tuple_type => |a_info| {
|
||||
const b_info = b.tuple_type;
|
||||
return std.mem.eql(Index, a_info.types.get(ip), b_info.types.get(ip)) and
|
||||
std.mem.eql(Index, a_info.values.get(ip), b_info.values.get(ip)) and
|
||||
std.mem.eql(NullTerminatedString, a_info.names.get(ip), b_info.names.get(ip));
|
||||
std.mem.eql(Index, a_info.values.get(ip), b_info.values.get(ip));
|
||||
},
|
||||
.error_set_type => |a_info| {
|
||||
const b_info = b.error_set_type;
|
||||
@ -3025,7 +3002,7 @@ pub const Key = union(enum) {
|
||||
.union_type,
|
||||
.opaque_type,
|
||||
.enum_type,
|
||||
.anon_struct_type,
|
||||
.tuple_type,
|
||||
.func_type,
|
||||
=> .type_type,
|
||||
|
||||
@ -3054,7 +3031,7 @@ pub const Key = union(enum) {
|
||||
.void => .void_type,
|
||||
.null => .null_type,
|
||||
.false, .true => .bool_type,
|
||||
.empty_struct => .empty_struct_type,
|
||||
.empty_tuple => .empty_tuple_type,
|
||||
.@"unreachable" => .noreturn_type,
|
||||
.generic_poison => .generic_poison_type,
|
||||
},
|
||||
@ -3411,13 +3388,11 @@ pub const LoadedStructType = struct {
|
||||
// TODO: the non-fqn will be needed by the new dwarf structure
|
||||
/// The name of this struct type.
|
||||
name: NullTerminatedString,
|
||||
/// The `Cau` within which type resolution occurs. `none` when the struct is `@TypeOf(.{})`.
|
||||
cau: Cau.Index.Optional,
|
||||
/// `none` when the struct is `@TypeOf(.{})`.
|
||||
namespace: OptionalNamespaceIndex,
|
||||
/// The `Cau` within which type resolution occurs.
|
||||
cau: Cau.Index,
|
||||
namespace: NamespaceIndex,
|
||||
/// Index of the `struct_decl` or `reify` ZIR instruction.
|
||||
/// Only `none` when the struct is `@TypeOf(.{})`.
|
||||
zir_index: TrackedInst.Index.Optional,
|
||||
zir_index: TrackedInst.Index,
|
||||
layout: std.builtin.Type.ContainerLayout,
|
||||
field_names: NullTerminatedString.Slice,
|
||||
field_types: Index.Slice,
|
||||
@ -3913,10 +3888,6 @@ pub const LoadedStructType = struct {
|
||||
@atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release);
|
||||
}
|
||||
|
||||
pub fn isTuple(s: LoadedStructType, ip: *InternPool) bool {
|
||||
return s.layout != .@"packed" and s.flagsUnordered(ip).is_tuple;
|
||||
}
|
||||
|
||||
pub fn hasReorderedFields(s: LoadedStructType) bool {
|
||||
return s.layout == .auto;
|
||||
}
|
||||
@ -4008,24 +3979,6 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType {
|
||||
const item = unwrapped_index.getItem(ip);
|
||||
switch (item.tag) {
|
||||
.type_struct => {
|
||||
if (item.data == 0) return .{
|
||||
.tid = .main,
|
||||
.extra_index = 0,
|
||||
.name = .empty,
|
||||
.cau = .none,
|
||||
.namespace = .none,
|
||||
.zir_index = .none,
|
||||
.layout = .auto,
|
||||
.field_names = NullTerminatedString.Slice.empty,
|
||||
.field_types = Index.Slice.empty,
|
||||
.field_inits = Index.Slice.empty,
|
||||
.field_aligns = Alignment.Slice.empty,
|
||||
.runtime_order = LoadedStructType.RuntimeOrder.Slice.empty,
|
||||
.comptime_bits = LoadedStructType.ComptimeBits.empty,
|
||||
.offsets = LoadedStructType.Offsets.empty,
|
||||
.names_map = .none,
|
||||
.captures = CaptureValue.Slice.empty,
|
||||
};
|
||||
const name: NullTerminatedString = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStruct, "name").?]);
|
||||
const cau: Cau.Index = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStruct, "cau").?]);
|
||||
const namespace: NamespaceIndex = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStruct, "namespace").?]);
|
||||
@ -4045,7 +3998,7 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType {
|
||||
};
|
||||
extra_index += captures_len;
|
||||
if (flags.is_reified) {
|
||||
extra_index += 2; // PackedU64
|
||||
extra_index += 2; // type_hash: PackedU64
|
||||
}
|
||||
const field_types: Index.Slice = .{
|
||||
.tid = unwrapped_index.tid,
|
||||
@ -4053,7 +4006,7 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType {
|
||||
.len = fields_len,
|
||||
};
|
||||
extra_index += fields_len;
|
||||
const names_map: OptionalMapIndex, const names = if (!flags.is_tuple) n: {
|
||||
const names_map: OptionalMapIndex, const names = n: {
|
||||
const names_map: OptionalMapIndex = @enumFromInt(extra_list.view().items(.@"0")[extra_index]);
|
||||
extra_index += 1;
|
||||
const names: NullTerminatedString.Slice = .{
|
||||
@ -4063,7 +4016,7 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType {
|
||||
};
|
||||
extra_index += fields_len;
|
||||
break :n .{ names_map, names };
|
||||
} else .{ .none, NullTerminatedString.Slice.empty };
|
||||
};
|
||||
const inits: Index.Slice = if (flags.any_default_inits) i: {
|
||||
const inits: Index.Slice = .{
|
||||
.tid = unwrapped_index.tid,
|
||||
@ -4114,9 +4067,9 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType {
|
||||
.tid = unwrapped_index.tid,
|
||||
.extra_index = item.data,
|
||||
.name = name,
|
||||
.cau = cau.toOptional(),
|
||||
.namespace = namespace.toOptional(),
|
||||
.zir_index = zir_index.toOptional(),
|
||||
.cau = cau,
|
||||
.namespace = namespace,
|
||||
.zir_index = zir_index,
|
||||
.layout = if (flags.is_extern) .@"extern" else .auto,
|
||||
.field_names = names,
|
||||
.field_types = field_types,
|
||||
@ -4178,9 +4131,9 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType {
|
||||
.tid = unwrapped_index.tid,
|
||||
.extra_index = item.data,
|
||||
.name = name,
|
||||
.cau = cau.toOptional(),
|
||||
.namespace = namespace.toOptional(),
|
||||
.zir_index = zir_index.toOptional(),
|
||||
.cau = cau,
|
||||
.namespace = namespace,
|
||||
.zir_index = zir_index,
|
||||
.layout = .@"packed",
|
||||
.field_names = field_names,
|
||||
.field_types = field_types,
|
||||
@ -4407,9 +4360,9 @@ pub const Item = struct {
|
||||
/// `primitives` in AstGen.zig.
|
||||
pub const Index = enum(u32) {
|
||||
pub const first_type: Index = .u0_type;
|
||||
pub const last_type: Index = .empty_struct_type;
|
||||
pub const last_type: Index = .empty_tuple_type;
|
||||
pub const first_value: Index = .undef;
|
||||
pub const last_value: Index = .empty_struct;
|
||||
pub const last_value: Index = .empty_tuple;
|
||||
|
||||
u0_type,
|
||||
i0_type,
|
||||
@ -4466,8 +4419,9 @@ pub const Index = enum(u32) {
|
||||
/// Used for the inferred error set of inline/comptime function calls.
|
||||
adhoc_inferred_error_set_type,
|
||||
generic_poison_type,
|
||||
/// `@TypeOf(.{})`
|
||||
empty_struct_type,
|
||||
/// `@TypeOf(.{})`; a tuple with zero elements.
|
||||
/// This is not the same as `struct {}`, since that is a struct rather than a tuple.
|
||||
empty_tuple_type,
|
||||
|
||||
/// `undefined` (untyped)
|
||||
undef,
|
||||
@ -4497,8 +4451,8 @@ pub const Index = enum(u32) {
|
||||
bool_true,
|
||||
/// `false`
|
||||
bool_false,
|
||||
/// `.{}` (untyped)
|
||||
empty_struct,
|
||||
/// `.{}`
|
||||
empty_tuple,
|
||||
|
||||
/// Used for generic parameters where the type and value
|
||||
/// is not known until generic function instantiation.
|
||||
@ -4606,16 +4560,14 @@ pub const Index = enum(u32) {
|
||||
values: []Index,
|
||||
},
|
||||
};
|
||||
const DataIsExtraIndexOfTypeStructAnon = struct {
|
||||
const DataIsExtraIndexOfTypeTuple = struct {
|
||||
const @"data.fields_len" = opaque {};
|
||||
data: *TypeStructAnon,
|
||||
data: *TypeTuple,
|
||||
@"trailing.types.len": *@"data.fields_len",
|
||||
@"trailing.values.len": *@"data.fields_len",
|
||||
@"trailing.names.len": *@"data.fields_len",
|
||||
trailing: struct {
|
||||
types: []Index,
|
||||
values: []Index,
|
||||
names: []NullTerminatedString,
|
||||
},
|
||||
};
|
||||
|
||||
@ -4649,10 +4601,9 @@ pub const Index = enum(u32) {
|
||||
simple_type: void,
|
||||
type_opaque: struct { data: *Tag.TypeOpaque },
|
||||
type_struct: struct { data: *Tag.TypeStruct },
|
||||
type_struct_anon: DataIsExtraIndexOfTypeStructAnon,
|
||||
type_struct_packed: struct { data: *Tag.TypeStructPacked },
|
||||
type_struct_packed_inits: struct { data: *Tag.TypeStructPacked },
|
||||
type_tuple_anon: DataIsExtraIndexOfTypeStructAnon,
|
||||
type_tuple: DataIsExtraIndexOfTypeTuple,
|
||||
type_union: struct { data: *Tag.TypeUnion },
|
||||
type_function: struct {
|
||||
const @"data.flags.has_comptime_bits" = opaque {};
|
||||
@ -4936,11 +4887,10 @@ pub const static_keys = [_]Key{
|
||||
// generic_poison_type
|
||||
.{ .simple_type = .generic_poison },
|
||||
|
||||
// empty_struct_type
|
||||
.{ .anon_struct_type = .{
|
||||
.types = Index.Slice.empty,
|
||||
.names = NullTerminatedString.Slice.empty,
|
||||
.values = Index.Slice.empty,
|
||||
// empty_tuple_type
|
||||
.{ .tuple_type = .{
|
||||
.types = .empty,
|
||||
.values = .empty,
|
||||
} },
|
||||
|
||||
.{ .simple_value = .undefined },
|
||||
@ -4991,7 +4941,7 @@ pub const static_keys = [_]Key{
|
||||
.{ .simple_value = .null },
|
||||
.{ .simple_value = .true },
|
||||
.{ .simple_value = .false },
|
||||
.{ .simple_value = .empty_struct },
|
||||
.{ .simple_value = .empty_tuple },
|
||||
.{ .simple_value = .generic_poison },
|
||||
};
|
||||
|
||||
@ -5071,20 +5021,16 @@ pub const Tag = enum(u8) {
|
||||
type_opaque,
|
||||
/// A non-packed struct type.
|
||||
/// data is 0 or extra index of `TypeStruct`.
|
||||
/// data == 0 represents `@TypeOf(.{})`.
|
||||
type_struct,
|
||||
/// An AnonStructType which stores types, names, and values for fields.
|
||||
/// data is extra index of `TypeStructAnon`.
|
||||
type_struct_anon,
|
||||
/// A packed struct, no fields have any init values.
|
||||
/// data is extra index of `TypeStructPacked`.
|
||||
type_struct_packed,
|
||||
/// A packed struct, one or more fields have init values.
|
||||
/// data is extra index of `TypeStructPacked`.
|
||||
type_struct_packed_inits,
|
||||
/// An AnonStructType which has only types and values for fields.
|
||||
/// data is extra index of `TypeStructAnon`.
|
||||
type_tuple_anon,
|
||||
/// A `TupleType`.
|
||||
/// data is extra index of `TypeTuple`.
|
||||
type_tuple,
|
||||
/// A union type.
|
||||
/// `data` is extra index of `TypeUnion`.
|
||||
type_union,
|
||||
@ -5299,9 +5245,8 @@ pub const Tag = enum(u8) {
|
||||
.simple_type => unreachable,
|
||||
.type_opaque => TypeOpaque,
|
||||
.type_struct => TypeStruct,
|
||||
.type_struct_anon => TypeStructAnon,
|
||||
.type_struct_packed, .type_struct_packed_inits => TypeStructPacked,
|
||||
.type_tuple_anon => TypeStructAnon,
|
||||
.type_tuple => TypeTuple,
|
||||
.type_union => TypeUnion,
|
||||
.type_function => TypeFunction,
|
||||
|
||||
@ -5546,18 +5491,15 @@ pub const Tag = enum(u8) {
|
||||
/// 1. capture: CaptureValue // for each `captures_len`
|
||||
/// 2. type_hash: PackedU64 // if `is_reified`
|
||||
/// 3. type: Index for each field in declared order
|
||||
/// 4. if not is_tuple:
|
||||
/// names_map: MapIndex,
|
||||
/// name: NullTerminatedString // for each field in declared order
|
||||
/// 5. if any_default_inits:
|
||||
/// 4. if any_default_inits:
|
||||
/// init: Index // for each field in declared order
|
||||
/// 6. if any_aligned_fields:
|
||||
/// 5. if any_aligned_fields:
|
||||
/// align: Alignment // for each field in declared order
|
||||
/// 7. if any_comptime_fields:
|
||||
/// 6. if any_comptime_fields:
|
||||
/// field_is_comptime_bits: u32 // minimal number of u32s needed, LSB is field 0
|
||||
/// 8. if not is_extern:
|
||||
/// 7. if not is_extern:
|
||||
/// field_index: RuntimeOrder // for each field in runtime order
|
||||
/// 9. field_offset: u32 // for each field in declared order, undef until layout_resolved
|
||||
/// 8. field_offset: u32 // for each field in declared order, undef until layout_resolved
|
||||
pub const TypeStruct = struct {
|
||||
name: NullTerminatedString,
|
||||
cau: Cau.Index,
|
||||
@ -5572,7 +5514,6 @@ pub const Tag = enum(u8) {
|
||||
is_extern: bool = false,
|
||||
known_non_opv: bool = false,
|
||||
requires_comptime: RequiresComptime = @enumFromInt(0),
|
||||
is_tuple: bool = false,
|
||||
assumed_runtime_bits: bool = false,
|
||||
assumed_pointer_aligned: bool = false,
|
||||
any_comptime_fields: bool = false,
|
||||
@ -5597,7 +5538,7 @@ pub const Tag = enum(u8) {
|
||||
// which `layout_resolved` does not ensure.
|
||||
fully_resolved: bool = false,
|
||||
is_reified: bool = false,
|
||||
_: u7 = 0,
|
||||
_: u8 = 0,
|
||||
};
|
||||
};
|
||||
|
||||
@ -5659,9 +5600,7 @@ pub const Repeated = struct {
|
||||
/// Trailing:
|
||||
/// 0. type: Index for each fields_len
|
||||
/// 1. value: Index for each fields_len
|
||||
/// 2. name: NullTerminatedString for each fields_len
|
||||
/// The set of field names is omitted when the `Tag` is `type_tuple_anon`.
|
||||
pub const TypeStructAnon = struct {
|
||||
pub const TypeTuple = struct {
|
||||
fields_len: u32,
|
||||
};
|
||||
|
||||
@ -5708,8 +5647,8 @@ pub const SimpleValue = enum(u32) {
|
||||
void = @intFromEnum(Index.void_value),
|
||||
/// This is untyped `null`.
|
||||
null = @intFromEnum(Index.null_value),
|
||||
/// This is the untyped empty struct literal: `.{}`
|
||||
empty_struct = @intFromEnum(Index.empty_struct),
|
||||
/// This is the untyped empty struct/array literal: `.{}`
|
||||
empty_tuple = @intFromEnum(Index.empty_tuple),
|
||||
true = @intFromEnum(Index.bool_true),
|
||||
false = @intFromEnum(Index.bool_false),
|
||||
@"unreachable" = @intFromEnum(Index.unreachable_value),
|
||||
@ -6266,11 +6205,10 @@ pub fn init(ip: *InternPool, gpa: Allocator, available_threads: usize) !void {
|
||||
// This inserts all the statically-known values into the intern pool in the
|
||||
// order expected.
|
||||
for (&static_keys, 0..) |key, key_index| switch (@as(Index, @enumFromInt(key_index))) {
|
||||
.empty_struct_type => assert(try ip.getAnonStructType(gpa, .main, .{
|
||||
.empty_tuple_type => assert(try ip.getTupleType(gpa, .main, .{
|
||||
.types = &.{},
|
||||
.names = &.{},
|
||||
.values = &.{},
|
||||
}) == .empty_struct_type),
|
||||
}) == .empty_tuple_type),
|
||||
else => |expected_index| assert(try ip.get(gpa, .main, key) == expected_index),
|
||||
};
|
||||
|
||||
@ -6412,7 +6350,6 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
|
||||
} },
|
||||
|
||||
.type_struct => .{ .struct_type = ns: {
|
||||
if (data == 0) break :ns .empty_struct;
|
||||
const extra_list = unwrapped_index.getExtra(ip);
|
||||
const extra_items = extra_list.view().items(.@"0");
|
||||
const zir_index: TrackedInst.Index = @enumFromInt(extra_items[data + std.meta.fieldIndex(Tag.TypeStruct, "zir_index").?]);
|
||||
@ -6457,8 +6394,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
|
||||
} else CaptureValue.Slice.empty },
|
||||
} };
|
||||
} },
|
||||
.type_struct_anon => .{ .anon_struct_type = extraTypeStructAnon(unwrapped_index.tid, unwrapped_index.getExtra(ip), data) },
|
||||
.type_tuple_anon => .{ .anon_struct_type = extraTypeTupleAnon(unwrapped_index.tid, unwrapped_index.getExtra(ip), data) },
|
||||
.type_tuple => .{ .tuple_type = extraTypeTuple(unwrapped_index.tid, unwrapped_index.getExtra(ip), data) },
|
||||
.type_union => .{ .union_type = ns: {
|
||||
const extra_list = unwrapped_index.getExtra(ip);
|
||||
const extra = extraDataTrail(extra_list, Tag.TypeUnion, data);
|
||||
@ -6764,10 +6700,10 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
|
||||
|
||||
// There is only one possible value precisely due to the
|
||||
// fact that this values slice is fully populated!
|
||||
.type_struct_anon, .type_tuple_anon => {
|
||||
const type_struct_anon = extraDataTrail(ty_extra, TypeStructAnon, ty_item.data);
|
||||
const fields_len = type_struct_anon.data.fields_len;
|
||||
const values = ty_extra.view().items(.@"0")[type_struct_anon.end + fields_len ..][0..fields_len];
|
||||
.type_tuple => {
|
||||
const type_tuple = extraDataTrail(ty_extra, TypeTuple, ty_item.data);
|
||||
const fields_len = type_tuple.data.fields_len;
|
||||
const values = ty_extra.view().items(.@"0")[type_tuple.end + fields_len ..][0..fields_len];
|
||||
return .{ .aggregate = .{
|
||||
.ty = ty,
|
||||
.storage = .{ .elems = @ptrCast(values) },
|
||||
@ -6850,47 +6786,20 @@ fn extraErrorSet(tid: Zcu.PerThread.Id, extra: Local.Extra, extra_index: u32) Ke
|
||||
};
|
||||
}
|
||||
|
||||
fn extraTypeStructAnon(tid: Zcu.PerThread.Id, extra: Local.Extra, extra_index: u32) Key.AnonStructType {
|
||||
const type_struct_anon = extraDataTrail(extra, TypeStructAnon, extra_index);
|
||||
const fields_len = type_struct_anon.data.fields_len;
|
||||
fn extraTypeTuple(tid: Zcu.PerThread.Id, extra: Local.Extra, extra_index: u32) Key.TupleType {
|
||||
const type_tuple = extraDataTrail(extra, TypeTuple, extra_index);
|
||||
const fields_len = type_tuple.data.fields_len;
|
||||
return .{
|
||||
.types = .{
|
||||
.tid = tid,
|
||||
.start = type_struct_anon.end,
|
||||
.start = type_tuple.end,
|
||||
.len = fields_len,
|
||||
},
|
||||
.values = .{
|
||||
.tid = tid,
|
||||
.start = type_struct_anon.end + fields_len,
|
||||
.start = type_tuple.end + fields_len,
|
||||
.len = fields_len,
|
||||
},
|
||||
.names = .{
|
||||
.tid = tid,
|
||||
.start = type_struct_anon.end + fields_len + fields_len,
|
||||
.len = fields_len,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
fn extraTypeTupleAnon(tid: Zcu.PerThread.Id, extra: Local.Extra, extra_index: u32) Key.AnonStructType {
|
||||
const type_struct_anon = extraDataTrail(extra, TypeStructAnon, extra_index);
|
||||
const fields_len = type_struct_anon.data.fields_len;
|
||||
return .{
|
||||
.types = .{
|
||||
.tid = tid,
|
||||
.start = type_struct_anon.end,
|
||||
.len = fields_len,
|
||||
},
|
||||
.values = .{
|
||||
.tid = tid,
|
||||
.start = type_struct_anon.end + fields_len,
|
||||
.len = fields_len,
|
||||
},
|
||||
.names = .{
|
||||
.tid = tid,
|
||||
.start = 0,
|
||||
.len = 0,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
@ -7361,7 +7270,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All
|
||||
},
|
||||
|
||||
.struct_type => unreachable, // use getStructType() instead
|
||||
.anon_struct_type => unreachable, // use getAnonStructType() instead
|
||||
.tuple_type => unreachable, // use getTupleType() instead
|
||||
.union_type => unreachable, // use getUnionType() instead
|
||||
.opaque_type => unreachable, // use getOpaqueType() instead
|
||||
|
||||
@ -7469,9 +7378,9 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All
|
||||
.field => {
|
||||
assert(base_ptr_type.flags.size == .One);
|
||||
switch (ip.indexToKey(base_ptr_type.child)) {
|
||||
.anon_struct_type => |anon_struct_type| {
|
||||
.tuple_type => |tuple_type| {
|
||||
assert(ptr.base_addr == .field);
|
||||
assert(base_index.index < anon_struct_type.types.len);
|
||||
assert(base_index.index < tuple_type.types.len);
|
||||
},
|
||||
.struct_type => {
|
||||
assert(ptr.base_addr == .field);
|
||||
@ -7808,12 +7717,12 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All
|
||||
const child = switch (ty_key) {
|
||||
.array_type => |array_type| array_type.child,
|
||||
.vector_type => |vector_type| vector_type.child,
|
||||
.anon_struct_type, .struct_type => .none,
|
||||
.tuple_type, .struct_type => .none,
|
||||
else => unreachable,
|
||||
};
|
||||
const sentinel = switch (ty_key) {
|
||||
.array_type => |array_type| array_type.sentinel,
|
||||
.vector_type, .anon_struct_type, .struct_type => .none,
|
||||
.vector_type, .tuple_type, .struct_type => .none,
|
||||
else => unreachable,
|
||||
};
|
||||
const len_including_sentinel = len + @intFromBool(sentinel != .none);
|
||||
@ -7845,8 +7754,8 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All
|
||||
assert(ip.typeOf(elem) == field_ty);
|
||||
}
|
||||
},
|
||||
.anon_struct_type => |anon_struct_type| {
|
||||
for (aggregate.storage.values(), anon_struct_type.types.get(ip)) |elem, ty| {
|
||||
.tuple_type => |tuple_type| {
|
||||
for (aggregate.storage.values(), tuple_type.types.get(ip)) |elem, ty| {
|
||||
assert(ip.typeOf(elem) == ty);
|
||||
}
|
||||
},
|
||||
@ -7862,9 +7771,9 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All
|
||||
}
|
||||
|
||||
switch (ty_key) {
|
||||
.anon_struct_type => |anon_struct_type| opv: {
|
||||
.tuple_type => |tuple_type| opv: {
|
||||
switch (aggregate.storage) {
|
||||
.bytes => |bytes| for (anon_struct_type.values.get(ip), bytes.at(0, ip)..) |value, byte| {
|
||||
.bytes => |bytes| for (tuple_type.values.get(ip), bytes.at(0, ip)..) |value, byte| {
|
||||
if (value == .none) break :opv;
|
||||
switch (ip.indexToKey(value)) {
|
||||
.undef => break :opv,
|
||||
@ -7877,10 +7786,10 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All
|
||||
},
|
||||
.elems => |elems| if (!std.mem.eql(
|
||||
Index,
|
||||
anon_struct_type.values.get(ip),
|
||||
tuple_type.values.get(ip),
|
||||
elems,
|
||||
)) break :opv,
|
||||
.repeated_elem => |elem| for (anon_struct_type.values.get(ip)) |value| {
|
||||
.repeated_elem => |elem| for (tuple_type.values.get(ip)) |value| {
|
||||
if (value != elem) break :opv;
|
||||
},
|
||||
}
|
||||
@ -8244,7 +8153,6 @@ pub const StructTypeInit = struct {
|
||||
fields_len: u32,
|
||||
known_non_opv: bool,
|
||||
requires_comptime: RequiresComptime,
|
||||
is_tuple: bool,
|
||||
any_comptime_fields: bool,
|
||||
any_default_inits: bool,
|
||||
inits_resolved: bool,
|
||||
@ -8404,7 +8312,6 @@ pub fn getStructType(
|
||||
.is_extern = is_extern,
|
||||
.known_non_opv = ini.known_non_opv,
|
||||
.requires_comptime = ini.requires_comptime,
|
||||
.is_tuple = ini.is_tuple,
|
||||
.assumed_runtime_bits = false,
|
||||
.assumed_pointer_aligned = false,
|
||||
.any_comptime_fields = ini.any_comptime_fields,
|
||||
@ -8442,10 +8349,8 @@ pub fn getStructType(
|
||||
},
|
||||
}
|
||||
extra.appendNTimesAssumeCapacity(.{@intFromEnum(Index.none)}, ini.fields_len);
|
||||
if (!ini.is_tuple) {
|
||||
extra.appendAssumeCapacity(.{@intFromEnum(names_map)});
|
||||
extra.appendNTimesAssumeCapacity(.{@intFromEnum(OptionalNullTerminatedString.none)}, ini.fields_len);
|
||||
}
|
||||
extra.appendAssumeCapacity(.{@intFromEnum(names_map)});
|
||||
extra.appendNTimesAssumeCapacity(.{@intFromEnum(OptionalNullTerminatedString.none)}, ini.fields_len);
|
||||
if (ini.any_default_inits) {
|
||||
extra.appendNTimesAssumeCapacity(.{@intFromEnum(Index.none)}, ini.fields_len);
|
||||
}
|
||||
@ -8468,19 +8373,17 @@ pub fn getStructType(
|
||||
} };
|
||||
}
|
||||
|
||||
pub const AnonStructTypeInit = struct {
|
||||
pub const TupleTypeInit = struct {
|
||||
types: []const Index,
|
||||
/// This may be empty, indicating this is a tuple.
|
||||
names: []const NullTerminatedString,
|
||||
/// These elements may be `none`, indicating runtime-known.
|
||||
values: []const Index,
|
||||
};
|
||||
|
||||
pub fn getAnonStructType(
|
||||
pub fn getTupleType(
|
||||
ip: *InternPool,
|
||||
gpa: Allocator,
|
||||
tid: Zcu.PerThread.Id,
|
||||
ini: AnonStructTypeInit,
|
||||
ini: TupleTypeInit,
|
||||
) Allocator.Error!Index {
|
||||
assert(ini.types.len == ini.values.len);
|
||||
for (ini.types) |elem| assert(elem != .none);
|
||||
@ -8494,23 +8397,17 @@ pub fn getAnonStructType(
|
||||
|
||||
try items.ensureUnusedCapacity(1);
|
||||
try extra.ensureUnusedCapacity(
|
||||
@typeInfo(TypeStructAnon).@"struct".fields.len + (fields_len * 3),
|
||||
@typeInfo(TypeTuple).@"struct".fields.len + (fields_len * 3),
|
||||
);
|
||||
|
||||
const extra_index = addExtraAssumeCapacity(extra, TypeStructAnon{
|
||||
const extra_index = addExtraAssumeCapacity(extra, TypeTuple{
|
||||
.fields_len = fields_len,
|
||||
});
|
||||
extra.appendSliceAssumeCapacity(.{@ptrCast(ini.types)});
|
||||
extra.appendSliceAssumeCapacity(.{@ptrCast(ini.values)});
|
||||
errdefer extra.mutate.len = prev_extra_len;
|
||||
|
||||
var gop = try ip.getOrPutKey(gpa, tid, .{
|
||||
.anon_struct_type = if (ini.names.len == 0) extraTypeTupleAnon(tid, extra.list.*, extra_index) else k: {
|
||||
assert(ini.names.len == ini.types.len);
|
||||
extra.appendSliceAssumeCapacity(.{@ptrCast(ini.names)});
|
||||
break :k extraTypeStructAnon(tid, extra.list.*, extra_index);
|
||||
},
|
||||
});
|
||||
var gop = try ip.getOrPutKey(gpa, tid, .{ .tuple_type = extraTypeTuple(tid, extra.list.*, extra_index) });
|
||||
defer gop.deinit();
|
||||
if (gop == .existing) {
|
||||
extra.mutate.len = prev_extra_len;
|
||||
@ -8518,7 +8415,7 @@ pub fn getAnonStructType(
|
||||
}
|
||||
|
||||
items.appendAssumeCapacity(.{
|
||||
.tag = if (ini.names.len == 0) .type_tuple_anon else .type_struct_anon,
|
||||
.tag = .type_tuple,
|
||||
.data = extra_index,
|
||||
});
|
||||
return gop.put();
|
||||
@ -10181,12 +10078,12 @@ pub fn getCoerced(
|
||||
direct: {
|
||||
const old_ty_child = switch (ip.indexToKey(old_ty)) {
|
||||
inline .array_type, .vector_type => |seq_type| seq_type.child,
|
||||
.anon_struct_type, .struct_type => break :direct,
|
||||
.tuple_type, .struct_type => break :direct,
|
||||
else => unreachable,
|
||||
};
|
||||
const new_ty_child = switch (ip.indexToKey(new_ty)) {
|
||||
inline .array_type, .vector_type => |seq_type| seq_type.child,
|
||||
.anon_struct_type, .struct_type => break :direct,
|
||||
.tuple_type, .struct_type => break :direct,
|
||||
else => unreachable,
|
||||
};
|
||||
if (old_ty_child != new_ty_child) break :direct;
|
||||
@ -10235,7 +10132,7 @@ pub fn getCoerced(
|
||||
for (agg_elems, 0..) |*elem, i| {
|
||||
const new_elem_ty = switch (ip.indexToKey(new_ty)) {
|
||||
inline .array_type, .vector_type => |seq_type| seq_type.child,
|
||||
.anon_struct_type => |anon_struct_type| anon_struct_type.types.get(ip)[i],
|
||||
.tuple_type => |tuple_type| tuple_type.types.get(ip)[i],
|
||||
.struct_type => ip.loadStructType(new_ty).field_types.get(ip)[i],
|
||||
else => unreachable,
|
||||
};
|
||||
@ -10425,7 +10322,7 @@ pub fn isErrorUnionType(ip: *const InternPool, ty: Index) bool {
|
||||
|
||||
pub fn isAggregateType(ip: *const InternPool, ty: Index) bool {
|
||||
return switch (ip.indexToKey(ty)) {
|
||||
.array_type, .vector_type, .anon_struct_type, .struct_type => true,
|
||||
.array_type, .vector_type, .tuple_type, .struct_type => true,
|
||||
else => false,
|
||||
};
|
||||
}
|
||||
@ -10549,7 +10446,6 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
|
||||
break :b @sizeOf(u32) * ints;
|
||||
},
|
||||
.type_struct => b: {
|
||||
if (data == 0) break :b 0;
|
||||
const extra = extraDataTrail(extra_list, Tag.TypeStruct, data);
|
||||
const info = extra.data;
|
||||
var ints: usize = @typeInfo(Tag.TypeStruct).@"struct".fields.len;
|
||||
@ -10558,10 +10454,8 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
|
||||
ints += 1 + captures_len;
|
||||
}
|
||||
ints += info.fields_len; // types
|
||||
if (!info.flags.is_tuple) {
|
||||
ints += 1; // names_map
|
||||
ints += info.fields_len; // names
|
||||
}
|
||||
ints += 1; // names_map
|
||||
ints += info.fields_len; // names
|
||||
if (info.flags.any_default_inits)
|
||||
ints += info.fields_len; // inits
|
||||
if (info.flags.any_aligned_fields)
|
||||
@ -10573,10 +10467,6 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
|
||||
ints += info.fields_len; // offsets
|
||||
break :b @sizeOf(u32) * ints;
|
||||
},
|
||||
.type_struct_anon => b: {
|
||||
const info = extraData(extra_list, TypeStructAnon, data);
|
||||
break :b @sizeOf(TypeStructAnon) + (@sizeOf(u32) * 3 * info.fields_len);
|
||||
},
|
||||
.type_struct_packed => b: {
|
||||
const extra = extraDataTrail(extra_list, Tag.TypeStructPacked, data);
|
||||
const captures_len = if (extra.data.flags.any_captures)
|
||||
@ -10597,9 +10487,9 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
|
||||
@intFromBool(extra.data.flags.any_captures) + captures_len +
|
||||
extra.data.fields_len * 3);
|
||||
},
|
||||
.type_tuple_anon => b: {
|
||||
const info = extraData(extra_list, TypeStructAnon, data);
|
||||
break :b @sizeOf(TypeStructAnon) + (@sizeOf(u32) * 2 * info.fields_len);
|
||||
.type_tuple => b: {
|
||||
const info = extraData(extra_list, TypeTuple, data);
|
||||
break :b @sizeOf(TypeTuple) + (@sizeOf(u32) * 2 * info.fields_len);
|
||||
},
|
||||
|
||||
.type_union => b: {
|
||||
@ -10760,10 +10650,9 @@ fn dumpAllFallible(ip: *const InternPool) anyerror!void {
|
||||
.type_enum_auto,
|
||||
.type_opaque,
|
||||
.type_struct,
|
||||
.type_struct_anon,
|
||||
.type_struct_packed,
|
||||
.type_struct_packed_inits,
|
||||
.type_tuple_anon,
|
||||
.type_tuple,
|
||||
.type_union,
|
||||
.type_function,
|
||||
.undef,
|
||||
@ -11396,7 +11285,7 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index {
|
||||
.anyerror_void_error_union_type,
|
||||
.adhoc_inferred_error_set_type,
|
||||
.generic_poison_type,
|
||||
.empty_struct_type,
|
||||
.empty_tuple_type,
|
||||
=> .type_type,
|
||||
|
||||
.undef => .undefined_type,
|
||||
@ -11407,7 +11296,7 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index {
|
||||
.unreachable_value => .noreturn_type,
|
||||
.null_value => .null_type,
|
||||
.bool_true, .bool_false => .bool_type,
|
||||
.empty_struct => .empty_struct_type,
|
||||
.empty_tuple => .empty_tuple_type,
|
||||
.generic_poison => .generic_poison_type,
|
||||
|
||||
// This optimization on tags is needed so that indexToKey can call
|
||||
@ -11436,10 +11325,9 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index {
|
||||
.type_enum_nonexhaustive,
|
||||
.type_opaque,
|
||||
.type_struct,
|
||||
.type_struct_anon,
|
||||
.type_struct_packed,
|
||||
.type_struct_packed_inits,
|
||||
.type_tuple_anon,
|
||||
.type_tuple,
|
||||
.type_union,
|
||||
.type_function,
|
||||
=> .type_type,
|
||||
@ -11533,7 +11421,7 @@ pub fn toEnum(ip: *const InternPool, comptime E: type, i: Index) E {
|
||||
pub fn aggregateTypeLen(ip: *const InternPool, ty: Index) u64 {
|
||||
return switch (ip.indexToKey(ty)) {
|
||||
.struct_type => ip.loadStructType(ty).field_types.len,
|
||||
.anon_struct_type => |anon_struct_type| anon_struct_type.types.len,
|
||||
.tuple_type => |tuple_type| tuple_type.types.len,
|
||||
.array_type => |array_type| array_type.len,
|
||||
.vector_type => |vector_type| vector_type.len,
|
||||
else => unreachable,
|
||||
@ -11543,7 +11431,7 @@ pub fn aggregateTypeLen(ip: *const InternPool, ty: Index) u64 {
|
||||
pub fn aggregateTypeLenIncludingSentinel(ip: *const InternPool, ty: Index) u64 {
|
||||
return switch (ip.indexToKey(ty)) {
|
||||
.struct_type => ip.loadStructType(ty).field_types.len,
|
||||
.anon_struct_type => |anon_struct_type| anon_struct_type.types.len,
|
||||
.tuple_type => |tuple_type| tuple_type.types.len,
|
||||
.array_type => |array_type| array_type.lenIncludingSentinel(),
|
||||
.vector_type => |vector_type| vector_type.len,
|
||||
else => unreachable,
|
||||
@ -11708,7 +11596,7 @@ pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPois
|
||||
|
||||
.optional_noreturn_type => .optional,
|
||||
.anyerror_void_error_union_type => .error_union,
|
||||
.empty_struct_type => .@"struct",
|
||||
.empty_tuple_type => .@"struct",
|
||||
|
||||
.generic_poison_type => return error.GenericPoison,
|
||||
|
||||
@ -11727,7 +11615,7 @@ pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPois
|
||||
.null_value => unreachable,
|
||||
.bool_true => unreachable,
|
||||
.bool_false => unreachable,
|
||||
.empty_struct => unreachable,
|
||||
.empty_tuple => unreachable,
|
||||
.generic_poison => unreachable,
|
||||
|
||||
_ => switch (index.unwrap(ip).getTag(ip)) {
|
||||
@ -11768,10 +11656,9 @@ pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPois
|
||||
.type_opaque => .@"opaque",
|
||||
|
||||
.type_struct,
|
||||
.type_struct_anon,
|
||||
.type_struct_packed,
|
||||
.type_struct_packed_inits,
|
||||
.type_tuple_anon,
|
||||
.type_tuple,
|
||||
=> .@"struct",
|
||||
|
||||
.type_union => .@"union",
|
||||
@ -12013,14 +11900,6 @@ pub fn unwrapCoercedFunc(ip: *const InternPool, index: Index) Index {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn anonStructFieldTypes(ip: *const InternPool, i: Index) []const Index {
|
||||
return ip.indexToKey(i).anon_struct_type.types;
|
||||
}
|
||||
|
||||
pub fn anonStructFieldsLen(ip: *const InternPool, i: Index) u32 {
|
||||
return @intCast(ip.indexToKey(i).anon_struct_type.types.len);
|
||||
}
|
||||
|
||||
/// Returns the already-existing field with the same name, if any.
|
||||
pub fn addFieldName(
|
||||
ip: *InternPool,
|
||||
|
808
src/Sema.zig
808
src/Sema.zig
File diff suppressed because it is too large
Load Diff
@ -246,7 +246,7 @@ const UnpackValueBits = struct {
|
||||
.error_union_type,
|
||||
.simple_type,
|
||||
.struct_type,
|
||||
.anon_struct_type,
|
||||
.tuple_type,
|
||||
.union_type,
|
||||
.opaque_type,
|
||||
.enum_type,
|
||||
|
178
src/Type.zig
178
src/Type.zig
@ -320,33 +320,20 @@ pub fn print(ty: Type, writer: anytype, pt: Zcu.PerThread) @TypeOf(writer).Error
|
||||
},
|
||||
.struct_type => {
|
||||
const name = ip.loadStructType(ty.toIntern()).name;
|
||||
if (name == .empty) {
|
||||
try writer.writeAll("@TypeOf(.{})");
|
||||
} else {
|
||||
try writer.print("{}", .{name.fmt(ip)});
|
||||
}
|
||||
try writer.print("{}", .{name.fmt(ip)});
|
||||
},
|
||||
.anon_struct_type => |anon_struct| {
|
||||
if (anon_struct.types.len == 0) {
|
||||
.tuple_type => |tuple| {
|
||||
if (tuple.types.len == 0) {
|
||||
return writer.writeAll("@TypeOf(.{})");
|
||||
}
|
||||
try writer.writeAll("struct{");
|
||||
for (anon_struct.types.get(ip), anon_struct.values.get(ip), 0..) |field_ty, val, i| {
|
||||
if (i != 0) try writer.writeAll(", ");
|
||||
if (val != .none) {
|
||||
try writer.writeAll("comptime ");
|
||||
}
|
||||
if (anon_struct.names.len != 0) {
|
||||
try writer.print("{}: ", .{anon_struct.names.get(ip)[i].fmt(&zcu.intern_pool)});
|
||||
}
|
||||
|
||||
try writer.writeAll("struct {");
|
||||
for (tuple.types.get(ip), tuple.values.get(ip), 0..) |field_ty, val, i| {
|
||||
try writer.writeAll(if (i == 0) " " else ", ");
|
||||
if (val != .none) try writer.writeAll("comptime ");
|
||||
try print(Type.fromInterned(field_ty), writer, pt);
|
||||
|
||||
if (val != .none) {
|
||||
try writer.print(" = {}", .{Value.fromInterned(val).fmtValue(pt)});
|
||||
}
|
||||
if (val != .none) try writer.print(" = {}", .{Value.fromInterned(val).fmtValue(pt)});
|
||||
}
|
||||
try writer.writeAll("}");
|
||||
try writer.writeAll(" }");
|
||||
},
|
||||
|
||||
.union_type => {
|
||||
@ -489,8 +476,7 @@ pub fn hasRuntimeBitsInner(
|
||||
) RuntimeBitsError!bool {
|
||||
const ip = &zcu.intern_pool;
|
||||
return switch (ty.toIntern()) {
|
||||
// False because it is a comptime-only type.
|
||||
.empty_struct_type => false,
|
||||
.empty_tuple_type => false,
|
||||
else => switch (ip.indexToKey(ty.toIntern())) {
|
||||
.int_type => |int_type| int_type.bits != 0,
|
||||
.ptr_type => {
|
||||
@ -593,7 +579,7 @@ pub fn hasRuntimeBitsInner(
|
||||
return false;
|
||||
}
|
||||
},
|
||||
.anon_struct_type => |tuple| {
|
||||
.tuple_type => |tuple| {
|
||||
for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| {
|
||||
if (val != .none) continue; // comptime field
|
||||
if (try Type.fromInterned(field_ty).hasRuntimeBitsInner(
|
||||
@ -691,7 +677,7 @@ pub fn hasWellDefinedLayout(ty: Type, zcu: *const Zcu) bool {
|
||||
.error_union_type,
|
||||
.error_set_type,
|
||||
.inferred_error_set_type,
|
||||
.anon_struct_type,
|
||||
.tuple_type,
|
||||
.opaque_type,
|
||||
.anyframe_type,
|
||||
// These are function bodies, not function pointers.
|
||||
@ -966,7 +952,7 @@ pub fn abiAlignmentInner(
|
||||
const ip = &zcu.intern_pool;
|
||||
|
||||
switch (ty.toIntern()) {
|
||||
.empty_struct_type => return .{ .scalar = .@"1" },
|
||||
.empty_tuple_type => return .{ .scalar = .@"1" },
|
||||
else => switch (ip.indexToKey(ty.toIntern())) {
|
||||
.int_type => |int_type| {
|
||||
if (int_type.bits == 0) return .{ .scalar = .@"1" };
|
||||
@ -1109,7 +1095,7 @@ pub fn abiAlignmentInner(
|
||||
|
||||
return .{ .scalar = struct_type.flagsUnordered(ip).alignment };
|
||||
},
|
||||
.anon_struct_type => |tuple| {
|
||||
.tuple_type => |tuple| {
|
||||
var big_align: Alignment = .@"1";
|
||||
for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| {
|
||||
if (val != .none) continue; // comptime field
|
||||
@ -1295,7 +1281,7 @@ pub fn abiSizeInner(
|
||||
const ip = &zcu.intern_pool;
|
||||
|
||||
switch (ty.toIntern()) {
|
||||
.empty_struct_type => return .{ .scalar = 0 },
|
||||
.empty_tuple_type => return .{ .scalar = 0 },
|
||||
|
||||
else => switch (ip.indexToKey(ty.toIntern())) {
|
||||
.int_type => |int_type| {
|
||||
@ -1498,7 +1484,7 @@ pub fn abiSizeInner(
|
||||
},
|
||||
}
|
||||
},
|
||||
.anon_struct_type => |tuple| {
|
||||
.tuple_type => |tuple| {
|
||||
switch (strat) {
|
||||
.sema => try ty.resolveLayout(strat.pt(zcu, tid)),
|
||||
.lazy, .eager => {},
|
||||
@ -1831,8 +1817,7 @@ pub fn bitSizeInner(
|
||||
return (try ty.abiSizeInner(strat_lazy, zcu, tid)).scalar * 8;
|
||||
},
|
||||
|
||||
.anon_struct_type => {
|
||||
if (strat == .sema) try ty.resolveFields(strat.pt(zcu, tid));
|
||||
.tuple_type => {
|
||||
return (try ty.abiSizeInner(strat_lazy, zcu, tid)).scalar * 8;
|
||||
},
|
||||
|
||||
@ -2176,7 +2161,7 @@ pub fn containerLayout(ty: Type, zcu: *const Zcu) std.builtin.Type.ContainerLayo
|
||||
const ip = &zcu.intern_pool;
|
||||
return switch (ip.indexToKey(ty.toIntern())) {
|
||||
.struct_type => ip.loadStructType(ty.toIntern()).layout,
|
||||
.anon_struct_type => .auto,
|
||||
.tuple_type => .auto,
|
||||
.union_type => ip.loadUnionType(ty.toIntern()).flagsUnordered(ip).layout,
|
||||
else => unreachable,
|
||||
};
|
||||
@ -2295,7 +2280,7 @@ pub fn arrayLenIncludingSentinel(ty: Type, zcu: *const Zcu) u64 {
|
||||
pub fn vectorLen(ty: Type, zcu: *const Zcu) u32 {
|
||||
return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
|
||||
.vector_type => |vector_type| vector_type.len,
|
||||
.anon_struct_type => |tuple| @intCast(tuple.types.len),
|
||||
.tuple_type => |tuple| @intCast(tuple.types.len),
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
@ -2305,7 +2290,7 @@ pub fn sentinel(ty: Type, zcu: *const Zcu) ?Value {
|
||||
return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
|
||||
.vector_type,
|
||||
.struct_type,
|
||||
.anon_struct_type,
|
||||
.tuple_type,
|
||||
=> null,
|
||||
|
||||
.array_type => |t| if (t.sentinel != .none) Value.fromInterned(t.sentinel) else null,
|
||||
@ -2386,7 +2371,7 @@ pub fn intInfo(starting_ty: Type, zcu: *const Zcu) InternPool.Key.IntType {
|
||||
return .{ .signedness = .unsigned, .bits = zcu.errorSetBits() };
|
||||
},
|
||||
|
||||
.anon_struct_type => unreachable,
|
||||
.tuple_type => unreachable,
|
||||
|
||||
.ptr_type => unreachable,
|
||||
.anyframe_type => unreachable,
|
||||
@ -2556,7 +2541,7 @@ pub fn onePossibleValue(starting_type: Type, pt: Zcu.PerThread) !?Value {
|
||||
var ty = starting_type;
|
||||
const ip = &zcu.intern_pool;
|
||||
while (true) switch (ty.toIntern()) {
|
||||
.empty_struct_type => return Value.empty_struct,
|
||||
.empty_tuple_type => return Value.empty_tuple,
|
||||
|
||||
else => switch (ip.indexToKey(ty.toIntern())) {
|
||||
.int_type => |int_type| {
|
||||
@ -2660,7 +2645,7 @@ pub fn onePossibleValue(starting_type: Type, pt: Zcu.PerThread) !?Value {
|
||||
} }));
|
||||
},
|
||||
|
||||
.anon_struct_type => |tuple| {
|
||||
.tuple_type => |tuple| {
|
||||
for (tuple.values.get(ip)) |val| {
|
||||
if (val == .none) return null;
|
||||
}
|
||||
@ -2783,7 +2768,7 @@ pub fn comptimeOnlyInner(
|
||||
) SemaError!bool {
|
||||
const ip = &zcu.intern_pool;
|
||||
return switch (ty.toIntern()) {
|
||||
.empty_struct_type => false,
|
||||
.empty_tuple_type => false,
|
||||
|
||||
else => switch (ip.indexToKey(ty.toIntern())) {
|
||||
.int_type => false,
|
||||
@ -2891,7 +2876,7 @@ pub fn comptimeOnlyInner(
|
||||
};
|
||||
},
|
||||
|
||||
.anon_struct_type => |tuple| {
|
||||
.tuple_type => |tuple| {
|
||||
for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| {
|
||||
const have_comptime_val = val != .none;
|
||||
if (!have_comptime_val and try Type.fromInterned(field_ty).comptimeOnlyInner(strat, zcu, tid)) return true;
|
||||
@ -3022,7 +3007,7 @@ pub fn getNamespace(ty: Type, zcu: *Zcu) InternPool.OptionalNamespaceIndex {
|
||||
const ip = &zcu.intern_pool;
|
||||
return switch (ip.indexToKey(ty.toIntern())) {
|
||||
.opaque_type => ip.loadOpaqueType(ty.toIntern()).namespace.toOptional(),
|
||||
.struct_type => ip.loadStructType(ty.toIntern()).namespace,
|
||||
.struct_type => ip.loadStructType(ty.toIntern()).namespace.toOptional(),
|
||||
.union_type => ip.loadUnionType(ty.toIntern()).namespace.toOptional(),
|
||||
.enum_type => ip.loadEnumType(ty.toIntern()).namespace.toOptional(),
|
||||
else => .none,
|
||||
@ -3181,7 +3166,7 @@ pub fn structFieldName(ty: Type, index: usize, zcu: *const Zcu) InternPool.Optio
|
||||
const ip = &zcu.intern_pool;
|
||||
return switch (ip.indexToKey(ty.toIntern())) {
|
||||
.struct_type => ip.loadStructType(ty.toIntern()).fieldName(ip, index),
|
||||
.anon_struct_type => |anon_struct| anon_struct.fieldName(ip, index),
|
||||
.tuple_type => .none,
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
@ -3190,7 +3175,7 @@ pub fn structFieldCount(ty: Type, zcu: *const Zcu) u32 {
|
||||
const ip = &zcu.intern_pool;
|
||||
return switch (ip.indexToKey(ty.toIntern())) {
|
||||
.struct_type => ip.loadStructType(ty.toIntern()).field_types.len,
|
||||
.anon_struct_type => |anon_struct| anon_struct.types.len,
|
||||
.tuple_type => |tuple| tuple.types.len,
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
@ -3204,7 +3189,7 @@ pub fn fieldType(ty: Type, index: usize, zcu: *const Zcu) Type {
|
||||
const union_obj = ip.loadUnionType(ty.toIntern());
|
||||
return Type.fromInterned(union_obj.field_types.get(ip)[index]);
|
||||
},
|
||||
.anon_struct_type => |anon_struct| Type.fromInterned(anon_struct.types.get(ip)[index]),
|
||||
.tuple_type => |tuple| Type.fromInterned(tuple.types.get(ip)[index]),
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
@ -3238,8 +3223,8 @@ pub fn fieldAlignmentInner(
|
||||
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[index]);
|
||||
return field_ty.structFieldAlignmentInner(explicit_align, struct_type.layout, strat, zcu, tid);
|
||||
},
|
||||
.anon_struct_type => |anon_struct| {
|
||||
return (try Type.fromInterned(anon_struct.types.get(ip)[index]).abiAlignmentInner(
|
||||
.tuple_type => |tuple| {
|
||||
return (try Type.fromInterned(tuple.types.get(ip)[index]).abiAlignmentInner(
|
||||
strat.toLazy(),
|
||||
zcu,
|
||||
tid,
|
||||
@ -3361,8 +3346,8 @@ pub fn structFieldDefaultValue(ty: Type, index: usize, zcu: *const Zcu) Value {
|
||||
if (val == .none) return Value.@"unreachable";
|
||||
return Value.fromInterned(val);
|
||||
},
|
||||
.anon_struct_type => |anon_struct| {
|
||||
const val = anon_struct.values.get(ip)[index];
|
||||
.tuple_type => |tuple| {
|
||||
const val = tuple.values.get(ip)[index];
|
||||
// TODO: avoid using `unreachable` to indicate this.
|
||||
if (val == .none) return Value.@"unreachable";
|
||||
return Value.fromInterned(val);
|
||||
@ -3384,7 +3369,7 @@ pub fn structFieldValueComptime(ty: Type, pt: Zcu.PerThread, index: usize) !?Val
|
||||
return Type.fromInterned(struct_type.field_types.get(ip)[index]).onePossibleValue(pt);
|
||||
}
|
||||
},
|
||||
.anon_struct_type => |tuple| {
|
||||
.tuple_type => |tuple| {
|
||||
const val = tuple.values.get(ip)[index];
|
||||
if (val == .none) {
|
||||
return Type.fromInterned(tuple.types.get(ip)[index]).onePossibleValue(pt);
|
||||
@ -3400,7 +3385,7 @@ pub fn structFieldIsComptime(ty: Type, index: usize, zcu: *const Zcu) bool {
|
||||
const ip = &zcu.intern_pool;
|
||||
return switch (ip.indexToKey(ty.toIntern())) {
|
||||
.struct_type => ip.loadStructType(ty.toIntern()).fieldIsComptime(ip, index),
|
||||
.anon_struct_type => |anon_struct| anon_struct.values.get(ip)[index] != .none,
|
||||
.tuple_type => |tuple| tuple.values.get(ip)[index] != .none,
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
@ -3425,7 +3410,7 @@ pub fn structFieldOffset(
|
||||
return struct_type.offsets.get(ip)[index];
|
||||
},
|
||||
|
||||
.anon_struct_type => |tuple| {
|
||||
.tuple_type => |tuple| {
|
||||
var offset: u64 = 0;
|
||||
var big_align: Alignment = .none;
|
||||
|
||||
@ -3472,7 +3457,6 @@ pub fn srcLocOrNull(ty: Type, zcu: *Zcu) ?Zcu.LazySrcLoc {
|
||||
.declared => |d| d.zir_index,
|
||||
.reified => |r| r.zir_index,
|
||||
.generated_tag => |gt| ip.loadUnionType(gt.union_type).zir_index,
|
||||
.empty_struct => return null,
|
||||
},
|
||||
else => return null,
|
||||
},
|
||||
@ -3491,49 +3475,7 @@ pub fn isGenericPoison(ty: Type) bool {
|
||||
pub fn isTuple(ty: Type, zcu: *const Zcu) bool {
|
||||
const ip = &zcu.intern_pool;
|
||||
return switch (ip.indexToKey(ty.toIntern())) {
|
||||
.struct_type => {
|
||||
const struct_type = ip.loadStructType(ty.toIntern());
|
||||
if (struct_type.layout == .@"packed") return false;
|
||||
if (struct_type.cau == .none) return false;
|
||||
return struct_type.flagsUnordered(ip).is_tuple;
|
||||
},
|
||||
.anon_struct_type => |anon_struct| anon_struct.names.len == 0,
|
||||
else => false,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn isAnonStruct(ty: Type, zcu: *const Zcu) bool {
|
||||
if (ty.toIntern() == .empty_struct_type) return true;
|
||||
return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
|
||||
.anon_struct_type => |anon_struct_type| anon_struct_type.names.len > 0,
|
||||
else => false,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn isTupleOrAnonStruct(ty: Type, zcu: *const Zcu) bool {
|
||||
const ip = &zcu.intern_pool;
|
||||
return switch (ip.indexToKey(ty.toIntern())) {
|
||||
.struct_type => {
|
||||
const struct_type = ip.loadStructType(ty.toIntern());
|
||||
if (struct_type.layout == .@"packed") return false;
|
||||
if (struct_type.cau == .none) return false;
|
||||
return struct_type.flagsUnordered(ip).is_tuple;
|
||||
},
|
||||
.anon_struct_type => true,
|
||||
else => false,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn isSimpleTuple(ty: Type, zcu: *const Zcu) bool {
|
||||
return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
|
||||
.anon_struct_type => |anon_struct_type| anon_struct_type.names.len == 0,
|
||||
else => false,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn isSimpleTupleOrAnonStruct(ty: Type, zcu: *const Zcu) bool {
|
||||
return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
|
||||
.anon_struct_type => true,
|
||||
.tuple_type => true,
|
||||
else => false,
|
||||
};
|
||||
}
|
||||
@ -3564,7 +3506,7 @@ pub fn toUnsigned(ty: Type, pt: Zcu.PerThread) !Type {
|
||||
pub fn typeDeclInst(ty: Type, zcu: *const Zcu) ?InternPool.TrackedInst.Index {
|
||||
const ip = &zcu.intern_pool;
|
||||
return switch (ip.indexToKey(ty.toIntern())) {
|
||||
.struct_type => ip.loadStructType(ty.toIntern()).zir_index.unwrap(),
|
||||
.struct_type => ip.loadStructType(ty.toIntern()).zir_index,
|
||||
.union_type => ip.loadUnionType(ty.toIntern()).zir_index,
|
||||
.enum_type => ip.loadEnumType(ty.toIntern()).zir_index.unwrap(),
|
||||
.opaque_type => ip.loadOpaqueType(ty.toIntern()).zir_index,
|
||||
@ -3575,12 +3517,11 @@ pub fn typeDeclInst(ty: Type, zcu: *const Zcu) ?InternPool.TrackedInst.Index {
|
||||
pub fn typeDeclInstAllowGeneratedTag(ty: Type, zcu: *const Zcu) ?InternPool.TrackedInst.Index {
|
||||
const ip = &zcu.intern_pool;
|
||||
return switch (ip.indexToKey(ty.toIntern())) {
|
||||
.struct_type => ip.loadStructType(ty.toIntern()).zir_index.unwrap(),
|
||||
.struct_type => ip.loadStructType(ty.toIntern()).zir_index,
|
||||
.union_type => ip.loadUnionType(ty.toIntern()).zir_index,
|
||||
.enum_type => |e| switch (e) {
|
||||
.declared, .reified => ip.loadEnumType(ty.toIntern()).zir_index.unwrap().?,
|
||||
.generated_tag => |gt| ip.loadUnionType(gt.union_type).zir_index,
|
||||
.empty_struct => unreachable,
|
||||
},
|
||||
.opaque_type => ip.loadOpaqueType(ty.toIntern()).zir_index,
|
||||
else => null,
|
||||
@ -3588,13 +3529,16 @@ pub fn typeDeclInstAllowGeneratedTag(ty: Type, zcu: *const Zcu) ?InternPool.Trac
|
||||
}
|
||||
|
||||
pub fn typeDeclSrcLine(ty: Type, zcu: *Zcu) ?u32 {
|
||||
// Note that changes to ZIR instruction tracking only need to update this code
|
||||
// if a newly-tracked instruction can be a type's owner `zir_index`.
|
||||
comptime assert(Zir.inst_tracking_version == 0);
|
||||
|
||||
const ip = &zcu.intern_pool;
|
||||
const tracked = switch (ip.indexToKey(ty.toIntern())) {
|
||||
.struct_type, .union_type, .opaque_type, .enum_type => |info| switch (info) {
|
||||
.declared => |d| d.zir_index,
|
||||
.reified => |r| r.zir_index,
|
||||
.generated_tag => |gt| ip.loadUnionType(gt.union_type).zir_index,
|
||||
.empty_struct => return null,
|
||||
},
|
||||
else => return null,
|
||||
};
|
||||
@ -3603,13 +3547,17 @@ pub fn typeDeclSrcLine(ty: Type, zcu: *Zcu) ?u32 {
|
||||
assert(file.zir_loaded);
|
||||
const zir = file.zir;
|
||||
const inst = zir.instructions.get(@intFromEnum(info.inst));
|
||||
assert(inst.tag == .extended);
|
||||
return switch (inst.data.extended.opcode) {
|
||||
.struct_decl => zir.extraData(Zir.Inst.StructDecl, inst.data.extended.operand).data.src_line,
|
||||
.union_decl => zir.extraData(Zir.Inst.UnionDecl, inst.data.extended.operand).data.src_line,
|
||||
.enum_decl => zir.extraData(Zir.Inst.EnumDecl, inst.data.extended.operand).data.src_line,
|
||||
.opaque_decl => zir.extraData(Zir.Inst.OpaqueDecl, inst.data.extended.operand).data.src_line,
|
||||
.reify => zir.extraData(Zir.Inst.Reify, inst.data.extended.operand).data.src_line,
|
||||
return switch (inst.tag) {
|
||||
.struct_init, .struct_init_ref => zir.extraData(Zir.Inst.StructInit, inst.data.pl_node.payload_index).data.abs_line,
|
||||
.struct_init_anon => zir.extraData(Zir.Inst.StructInitAnon, inst.data.pl_node.payload_index).data.abs_line,
|
||||
.extended => switch (inst.data.extended.opcode) {
|
||||
.struct_decl => zir.extraData(Zir.Inst.StructDecl, inst.data.extended.operand).data.src_line,
|
||||
.union_decl => zir.extraData(Zir.Inst.UnionDecl, inst.data.extended.operand).data.src_line,
|
||||
.enum_decl => zir.extraData(Zir.Inst.EnumDecl, inst.data.extended.operand).data.src_line,
|
||||
.opaque_decl => zir.extraData(Zir.Inst.OpaqueDecl, inst.data.extended.operand).data.src_line,
|
||||
.reify => zir.extraData(Zir.Inst.Reify, inst.data.extended.operand).data.src_line,
|
||||
else => unreachable,
|
||||
},
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
@ -3697,8 +3645,8 @@ pub fn resolveLayout(ty: Type, pt: Zcu.PerThread) SemaError!void {
|
||||
const ip = &zcu.intern_pool;
|
||||
switch (ty.zigTypeTag(zcu)) {
|
||||
.@"struct" => switch (ip.indexToKey(ty.toIntern())) {
|
||||
.anon_struct_type => |anon_struct_type| for (0..anon_struct_type.types.len) |i| {
|
||||
const field_ty = Type.fromInterned(anon_struct_type.types.get(ip)[i]);
|
||||
.tuple_type => |tuple_type| for (0..tuple_type.types.len) |i| {
|
||||
const field_ty = Type.fromInterned(tuple_type.types.get(ip)[i]);
|
||||
try field_ty.resolveLayout(pt);
|
||||
},
|
||||
.struct_type => return ty.resolveStructInner(pt, .layout),
|
||||
@ -3796,7 +3744,7 @@ pub fn resolveFields(ty: Type, pt: Zcu.PerThread) SemaError!void {
|
||||
.optional_noreturn_type,
|
||||
.anyerror_void_error_union_type,
|
||||
.generic_poison_type,
|
||||
.empty_struct_type,
|
||||
.empty_tuple_type,
|
||||
=> {},
|
||||
|
||||
.undef => unreachable,
|
||||
@ -3813,7 +3761,7 @@ pub fn resolveFields(ty: Type, pt: Zcu.PerThread) SemaError!void {
|
||||
.null_value => unreachable,
|
||||
.bool_true => unreachable,
|
||||
.bool_false => unreachable,
|
||||
.empty_struct => unreachable,
|
||||
.empty_tuple => unreachable,
|
||||
.generic_poison => unreachable,
|
||||
|
||||
else => switch (ty_ip.unwrap(ip).getTag(ip)) {
|
||||
@ -3868,8 +3816,8 @@ pub fn resolveFully(ty: Type, pt: Zcu.PerThread) SemaError!void {
|
||||
},
|
||||
|
||||
.@"struct" => switch (ip.indexToKey(ty.toIntern())) {
|
||||
.anon_struct_type => |anon_struct_type| for (0..anon_struct_type.types.len) |i| {
|
||||
const field_ty = Type.fromInterned(anon_struct_type.types.get(ip)[i]);
|
||||
.tuple_type => |tuple_type| for (0..tuple_type.types.len) |i| {
|
||||
const field_ty = Type.fromInterned(tuple_type.types.get(ip)[i]);
|
||||
try field_ty.resolveFully(pt);
|
||||
},
|
||||
.struct_type => return ty.resolveStructInner(pt, .full),
|
||||
@ -3903,7 +3851,7 @@ fn resolveStructInner(
|
||||
const gpa = zcu.gpa;
|
||||
|
||||
const struct_obj = zcu.typeToStruct(ty).?;
|
||||
const owner = InternPool.AnalUnit.wrap(.{ .cau = struct_obj.cau.unwrap() orelse return });
|
||||
const owner = InternPool.AnalUnit.wrap(.{ .cau = struct_obj.cau });
|
||||
|
||||
if (zcu.failed_analysis.contains(owner) or zcu.transitive_failed_analysis.contains(owner)) {
|
||||
return error.AnalysisFail;
|
||||
@ -3915,7 +3863,7 @@ fn resolveStructInner(
|
||||
var comptime_err_ret_trace = std.ArrayList(Zcu.LazySrcLoc).init(gpa);
|
||||
defer comptime_err_ret_trace.deinit();
|
||||
|
||||
const zir = zcu.namespacePtr(struct_obj.namespace.unwrap().?).fileScope(zcu).zir;
|
||||
const zir = zcu.namespacePtr(struct_obj.namespace).fileScope(zcu).zir;
|
||||
var sema: Sema = .{
|
||||
.pt = pt,
|
||||
.gpa = gpa,
|
||||
@ -4196,7 +4144,7 @@ pub const single_const_pointer_to_comptime_int: Type = .{
|
||||
.ip_index = .single_const_pointer_to_comptime_int_type,
|
||||
};
|
||||
pub const slice_const_u8_sentinel_0: Type = .{ .ip_index = .slice_const_u8_sentinel_0_type };
|
||||
pub const empty_struct_literal: Type = .{ .ip_index = .empty_struct_type };
|
||||
pub const empty_tuple_type: Type = .{ .ip_index = .empty_tuple_type };
|
||||
|
||||
pub const generic_poison: Type = .{ .ip_index = .generic_poison_type };
|
||||
|
||||
|
@ -3704,7 +3704,7 @@ pub const @"unreachable": Value = .{ .ip_index = .unreachable_value };
|
||||
|
||||
pub const generic_poison: Value = .{ .ip_index = .generic_poison };
|
||||
pub const generic_poison_type: Value = .{ .ip_index = .generic_poison_type };
|
||||
pub const empty_struct: Value = .{ .ip_index = .empty_struct };
|
||||
pub const empty_tuple: Value = .{ .ip_index = .empty_tuple };
|
||||
|
||||
pub fn makeBool(x: bool) Value {
|
||||
return if (x) Value.true else Value.false;
|
||||
|
35
src/Zcu.zig
35
src/Zcu.zig
@ -1497,6 +1497,20 @@ pub const SrcLoc = struct {
|
||||
}
|
||||
} else unreachable;
|
||||
},
|
||||
.tuple_field_type, .tuple_field_init => |field_info| {
|
||||
const tree = try src_loc.file_scope.getTree(gpa);
|
||||
const node = src_loc.relativeToNodeIndex(0);
|
||||
var buf: [2]Ast.Node.Index = undefined;
|
||||
const container_decl = tree.fullContainerDecl(&buf, node) orelse
|
||||
return tree.nodeToSpan(node);
|
||||
|
||||
const field = tree.fullContainerField(container_decl.ast.members[field_info.elem_index]).?;
|
||||
return tree.nodeToSpan(switch (src_loc.lazy) {
|
||||
.tuple_field_type => field.ast.type_expr,
|
||||
.tuple_field_init => field.ast.value_expr,
|
||||
else => unreachable,
|
||||
});
|
||||
},
|
||||
.init_elem => |init_elem| {
|
||||
const tree = try src_loc.file_scope.getTree(gpa);
|
||||
const init_node = src_loc.relativeToNodeIndex(init_elem.init_node_offset);
|
||||
@ -1939,6 +1953,12 @@ pub const LazySrcLoc = struct {
|
||||
container_field_type: u32,
|
||||
/// Like `continer_field_name`, but points at the field's alignment.
|
||||
container_field_align: u32,
|
||||
/// The source location points to the type of the field at the given index
|
||||
/// of the tuple type declaration at `tuple_decl_node_offset`.
|
||||
tuple_field_type: TupleField,
|
||||
/// The source location points to the default init of the field at the given index
|
||||
/// of the tuple type declaration at `tuple_decl_node_offset`.
|
||||
tuple_field_init: TupleField,
|
||||
/// The source location points to the given element/field of a struct or
|
||||
/// array initialization expression.
|
||||
init_elem: struct {
|
||||
@ -2016,13 +2036,20 @@ pub const LazySrcLoc = struct {
|
||||
index: u31,
|
||||
};
|
||||
|
||||
const ArrayCat = struct {
|
||||
pub const ArrayCat = struct {
|
||||
/// Points to the array concat AST node.
|
||||
array_cat_offset: i32,
|
||||
/// The index of the element the source location points to.
|
||||
elem_index: u32,
|
||||
};
|
||||
|
||||
pub const TupleField = struct {
|
||||
/// Points to the AST node of the tuple type decaration.
|
||||
tuple_decl_node_offset: i32,
|
||||
/// The index of the tuple field the source location points to.
|
||||
elem_index: u32,
|
||||
};
|
||||
|
||||
pub const nodeOffset = if (TracedOffset.want_tracing) nodeOffsetDebug else nodeOffsetRelease;
|
||||
|
||||
noinline fn nodeOffsetDebug(node_offset: i32) Offset {
|
||||
@ -2052,6 +2079,8 @@ pub const LazySrcLoc = struct {
|
||||
|
||||
/// Returns `null` if the ZIR instruction has been lost across incremental updates.
|
||||
pub fn resolveBaseNode(base_node_inst: InternPool.TrackedInst.Index, zcu: *Zcu) ?struct { *File, Ast.Node.Index } {
|
||||
comptime assert(Zir.inst_tracking_version == 0);
|
||||
|
||||
const ip = &zcu.intern_pool;
|
||||
const file_index, const zir_inst = inst: {
|
||||
const info = base_node_inst.resolveFull(ip) orelse return null;
|
||||
@ -2064,6 +2093,8 @@ pub const LazySrcLoc = struct {
|
||||
const inst = zir.instructions.get(@intFromEnum(zir_inst));
|
||||
const base_node: Ast.Node.Index = switch (inst.tag) {
|
||||
.declaration => inst.data.declaration.src_node,
|
||||
.struct_init, .struct_init_ref => zir.extraData(Zir.Inst.StructInit, inst.data.pl_node.payload_index).data.abs_node,
|
||||
.struct_init_anon => zir.extraData(Zir.Inst.StructInitAnon, inst.data.pl_node.payload_index).data.abs_node,
|
||||
.extended => switch (inst.data.extended.opcode) {
|
||||
.struct_decl => zir.extraData(Zir.Inst.StructDecl, inst.data.extended.operand).data.src_node,
|
||||
.union_decl => zir.extraData(Zir.Inst.UnionDecl, inst.data.extended.operand).data.src_node,
|
||||
@ -3215,7 +3246,7 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolv
|
||||
|
||||
// If this type has a `Cau` for resolution, it's automatically referenced.
|
||||
const resolution_cau: InternPool.Cau.Index.Optional = switch (ip.indexToKey(ty)) {
|
||||
.struct_type => ip.loadStructType(ty).cau,
|
||||
.struct_type => ip.loadStructType(ty).cau.toOptional(),
|
||||
.union_type => ip.loadUnionType(ty).cau.toOptional(),
|
||||
.enum_type => ip.loadEnumType(ty).cau,
|
||||
.opaque_type => .none,
|
||||
|
@ -985,7 +985,6 @@ fn createFileRootStruct(
|
||||
.fields_len = fields_len,
|
||||
.known_non_opv = small.known_non_opv,
|
||||
.requires_comptime = if (small.known_comptime_only) .yes else .unknown,
|
||||
.is_tuple = small.is_tuple,
|
||||
.any_comptime_fields = small.any_comptime_fields,
|
||||
.any_default_inits = small.any_default_inits,
|
||||
.inits_resolved = false,
|
||||
@ -3191,7 +3190,7 @@ pub fn ensureTypeUpToDate(pt: Zcu.PerThread, ty: InternPool.Index, already_updat
|
||||
.struct_type => |key| {
|
||||
const struct_obj = ip.loadStructType(ty);
|
||||
const outdated = already_updating or o: {
|
||||
const anal_unit = AnalUnit.wrap(.{ .cau = struct_obj.cau.unwrap().? });
|
||||
const anal_unit = AnalUnit.wrap(.{ .cau = struct_obj.cau });
|
||||
const o = zcu.outdated.swapRemove(anal_unit) or
|
||||
zcu.potentially_outdated.swapRemove(anal_unit);
|
||||
if (o) {
|
||||
@ -3252,7 +3251,6 @@ fn recreateStructType(
|
||||
|
||||
const key = switch (full_key) {
|
||||
.reified => unreachable, // never outdated
|
||||
.empty_struct => unreachable, // never outdated
|
||||
.generated_tag => unreachable, // not a struct
|
||||
.declared => |d| d,
|
||||
};
|
||||
@ -3283,16 +3281,13 @@ fn recreateStructType(
|
||||
if (captures_len != key.captures.owned.len) return error.AnalysisFail;
|
||||
|
||||
// The old type will be unused, so drop its dependency information.
|
||||
ip.removeDependenciesForDepender(gpa, AnalUnit.wrap(.{ .cau = struct_obj.cau.unwrap().? }));
|
||||
|
||||
const namespace_index = struct_obj.namespace.unwrap().?;
|
||||
ip.removeDependenciesForDepender(gpa, AnalUnit.wrap(.{ .cau = struct_obj.cau }));
|
||||
|
||||
const wip_ty = switch (try ip.getStructType(gpa, pt.tid, .{
|
||||
.layout = small.layout,
|
||||
.fields_len = fields_len,
|
||||
.known_non_opv = small.known_non_opv,
|
||||
.requires_comptime = if (small.known_comptime_only) .yes else .unknown,
|
||||
.is_tuple = small.is_tuple,
|
||||
.any_comptime_fields = small.any_comptime_fields,
|
||||
.any_default_inits = small.any_default_inits,
|
||||
.inits_resolved = false,
|
||||
@ -3308,17 +3303,17 @@ fn recreateStructType(
|
||||
errdefer wip_ty.cancel(ip, pt.tid);
|
||||
|
||||
wip_ty.setName(ip, struct_obj.name);
|
||||
const new_cau_index = try ip.createTypeCau(gpa, pt.tid, key.zir_index, namespace_index, wip_ty.index);
|
||||
const new_cau_index = try ip.createTypeCau(gpa, pt.tid, key.zir_index, struct_obj.namespace, wip_ty.index);
|
||||
try ip.addDependency(
|
||||
gpa,
|
||||
AnalUnit.wrap(.{ .cau = new_cau_index }),
|
||||
.{ .src_hash = key.zir_index },
|
||||
);
|
||||
zcu.namespacePtr(namespace_index).owner_type = wip_ty.index;
|
||||
zcu.namespacePtr(struct_obj.namespace).owner_type = wip_ty.index;
|
||||
// No need to re-scan the namespace -- `zirStructDecl` will ultimately do that if the type is still alive.
|
||||
try zcu.comp.queueJob(.{ .resolve_type_fully = wip_ty.index });
|
||||
|
||||
const new_ty = wip_ty.finish(ip, new_cau_index.toOptional(), namespace_index);
|
||||
const new_ty = wip_ty.finish(ip, new_cau_index.toOptional(), struct_obj.namespace);
|
||||
if (inst_info.inst == .main_struct_inst) {
|
||||
// This is the root type of a file! Update the reference.
|
||||
zcu.setFileRootType(inst_info.file, new_ty);
|
||||
@ -3337,7 +3332,6 @@ fn recreateUnionType(
|
||||
|
||||
const key = switch (full_key) {
|
||||
.reified => unreachable, // never outdated
|
||||
.empty_struct => unreachable, // never outdated
|
||||
.generated_tag => unreachable, // not a union
|
||||
.declared => |d| d,
|
||||
};
|
||||
@ -3429,9 +3423,7 @@ fn recreateEnumType(
|
||||
const ip = &zcu.intern_pool;
|
||||
|
||||
const key = switch (full_key) {
|
||||
.reified => unreachable, // never outdated
|
||||
.empty_struct => unreachable, // never outdated
|
||||
.generated_tag => unreachable, // never outdated
|
||||
.reified, .generated_tag => unreachable, // never outdated
|
||||
.declared => |d| d,
|
||||
};
|
||||
|
||||
@ -3575,7 +3567,7 @@ pub fn ensureNamespaceUpToDate(pt: Zcu.PerThread, namespace_index: Zcu.Namespace
|
||||
};
|
||||
|
||||
const key = switch (full_key) {
|
||||
.reified, .empty_struct, .generated_tag => {
|
||||
.reified, .generated_tag => {
|
||||
// Namespace always empty, so up-to-date.
|
||||
namespace.generation = zcu.generation;
|
||||
return;
|
||||
|
@ -3114,7 +3114,7 @@ fn binOpImmediate(
|
||||
const reg = try self.register_manager.allocReg(track_inst, gp);
|
||||
|
||||
if (track_inst) |inst| {
|
||||
const mcv = .{ .register = reg };
|
||||
const mcv: MCValue = .{ .register = reg };
|
||||
log.debug("binOpRegister move lhs %{d} to register: {} -> {}", .{ inst, lhs, mcv });
|
||||
branch.inst_table.putAssumeCapacity(inst, mcv);
|
||||
|
||||
@ -3252,7 +3252,7 @@ fn binOpRegister(
|
||||
|
||||
const reg = try self.register_manager.allocReg(track_inst, gp);
|
||||
if (track_inst) |inst| {
|
||||
const mcv = .{ .register = reg };
|
||||
const mcv: MCValue = .{ .register = reg };
|
||||
log.debug("binOpRegister move lhs %{d} to register: {} -> {}", .{ inst, lhs, mcv });
|
||||
branch.inst_table.putAssumeCapacity(inst, mcv);
|
||||
|
||||
@ -3276,7 +3276,7 @@ fn binOpRegister(
|
||||
|
||||
const reg = try self.register_manager.allocReg(track_inst, gp);
|
||||
if (track_inst) |inst| {
|
||||
const mcv = .{ .register = reg };
|
||||
const mcv: MCValue = .{ .register = reg };
|
||||
log.debug("binOpRegister move rhs %{d} to register: {} -> {}", .{ inst, rhs, mcv });
|
||||
branch.inst_table.putAssumeCapacity(inst, mcv);
|
||||
|
||||
@ -3650,7 +3650,6 @@ fn genLoad(self: *Self, value_reg: Register, addr_reg: Register, comptime off_ty
|
||||
assert(off_type == Register or off_type == i13);
|
||||
|
||||
const is_imm = (off_type == i13);
|
||||
const rs2_or_imm = if (is_imm) .{ .imm = off } else .{ .rs2 = off };
|
||||
|
||||
switch (abi_size) {
|
||||
1, 2, 4, 8 => {
|
||||
@ -3669,7 +3668,7 @@ fn genLoad(self: *Self, value_reg: Register, addr_reg: Register, comptime off_ty
|
||||
.is_imm = is_imm,
|
||||
.rd = value_reg,
|
||||
.rs1 = addr_reg,
|
||||
.rs2_or_imm = rs2_or_imm,
|
||||
.rs2_or_imm = if (is_imm) .{ .imm = off } else .{ .rs2 = off },
|
||||
},
|
||||
},
|
||||
});
|
||||
@ -4037,7 +4036,6 @@ fn genStore(self: *Self, value_reg: Register, addr_reg: Register, comptime off_t
|
||||
assert(off_type == Register or off_type == i13);
|
||||
|
||||
const is_imm = (off_type == i13);
|
||||
const rs2_or_imm = if (is_imm) .{ .imm = off } else .{ .rs2 = off };
|
||||
|
||||
switch (abi_size) {
|
||||
1, 2, 4, 8 => {
|
||||
@ -4056,7 +4054,7 @@ fn genStore(self: *Self, value_reg: Register, addr_reg: Register, comptime off_t
|
||||
.is_imm = is_imm,
|
||||
.rd = value_reg,
|
||||
.rs1 = addr_reg,
|
||||
.rs2_or_imm = rs2_or_imm,
|
||||
.rs2_or_imm = if (is_imm) .{ .imm = off } else .{ .rs2 = off },
|
||||
},
|
||||
},
|
||||
});
|
||||
|
@ -3259,7 +3259,7 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
|
||||
.error_union_type,
|
||||
.simple_type,
|
||||
.struct_type,
|
||||
.anon_struct_type,
|
||||
.tuple_type,
|
||||
.union_type,
|
||||
.opaque_type,
|
||||
.enum_type,
|
||||
@ -3273,7 +3273,7 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
|
||||
.undefined,
|
||||
.void,
|
||||
.null,
|
||||
.empty_struct,
|
||||
.empty_tuple,
|
||||
.@"unreachable",
|
||||
.generic_poison,
|
||||
=> unreachable, // non-runtime values
|
||||
@ -3708,7 +3708,7 @@ fn airCmpLtErrorsLen(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
||||
const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
|
||||
const operand = try func.resolveInst(un_op);
|
||||
const sym_index = try func.bin_file.getGlobalSymbol("__zig_errors_len", null);
|
||||
const errors_len = .{ .memory = @intFromEnum(sym_index) };
|
||||
const errors_len: WValue = .{ .memory = @intFromEnum(sym_index) };
|
||||
|
||||
try func.emitWValue(operand);
|
||||
const pt = func.pt;
|
||||
|
@ -13683,7 +13683,7 @@ fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
|
||||
const operand = try self.resolveInst(un_op);
|
||||
const ty = self.typeOf(un_op);
|
||||
const result = switch (try self.isNullPtr(inst, ty, operand)) {
|
||||
const result: MCValue = switch (try self.isNullPtr(inst, ty, operand)) {
|
||||
.eflags => |cc| .{ .eflags = cc.negate() },
|
||||
else => unreachable,
|
||||
};
|
||||
|
@ -216,7 +216,7 @@ pub fn generateSymbol(
|
||||
.error_union_type,
|
||||
.simple_type,
|
||||
.struct_type,
|
||||
.anon_struct_type,
|
||||
.tuple_type,
|
||||
.union_type,
|
||||
.opaque_type,
|
||||
.enum_type,
|
||||
@ -230,7 +230,7 @@ pub fn generateSymbol(
|
||||
.undefined,
|
||||
.void,
|
||||
.null,
|
||||
.empty_struct,
|
||||
.empty_tuple,
|
||||
.@"unreachable",
|
||||
.generic_poison,
|
||||
=> unreachable, // non-runtime values
|
||||
@ -456,7 +456,7 @@ pub fn generateSymbol(
|
||||
if (padding > 0) try code.appendNTimes(0, padding);
|
||||
}
|
||||
},
|
||||
.anon_struct_type => |tuple| {
|
||||
.tuple_type => |tuple| {
|
||||
const struct_begin = code.items.len;
|
||||
for (
|
||||
tuple.types.get(ip),
|
||||
|
@ -891,7 +891,7 @@ pub const DeclGen = struct {
|
||||
.error_union_type,
|
||||
.simple_type,
|
||||
.struct_type,
|
||||
.anon_struct_type,
|
||||
.tuple_type,
|
||||
.union_type,
|
||||
.opaque_type,
|
||||
.enum_type,
|
||||
@ -908,7 +908,7 @@ pub const DeclGen = struct {
|
||||
.undefined => unreachable,
|
||||
.void => unreachable,
|
||||
.null => unreachable,
|
||||
.empty_struct => unreachable,
|
||||
.empty_tuple => unreachable,
|
||||
.@"unreachable" => unreachable,
|
||||
.generic_poison => unreachable,
|
||||
|
||||
@ -1194,7 +1194,7 @@ pub const DeclGen = struct {
|
||||
try writer.writeByte('}');
|
||||
}
|
||||
},
|
||||
.anon_struct_type => |tuple| {
|
||||
.tuple_type => |tuple| {
|
||||
if (!location.isInitializer()) {
|
||||
try writer.writeByte('(');
|
||||
try dg.renderCType(writer, ctype);
|
||||
@ -1605,7 +1605,7 @@ pub const DeclGen = struct {
|
||||
}),
|
||||
}
|
||||
},
|
||||
.anon_struct_type => |anon_struct_info| {
|
||||
.tuple_type => |tuple_info| {
|
||||
if (!location.isInitializer()) {
|
||||
try writer.writeByte('(');
|
||||
try dg.renderCType(writer, ctype);
|
||||
@ -1614,9 +1614,9 @@ pub const DeclGen = struct {
|
||||
|
||||
try writer.writeByte('{');
|
||||
var need_comma = false;
|
||||
for (0..anon_struct_info.types.len) |field_index| {
|
||||
if (anon_struct_info.values.get(ip)[field_index] != .none) continue;
|
||||
const field_ty = Type.fromInterned(anon_struct_info.types.get(ip)[field_index]);
|
||||
for (0..tuple_info.types.len) |field_index| {
|
||||
if (tuple_info.values.get(ip)[field_index] != .none) continue;
|
||||
const field_ty = Type.fromInterned(tuple_info.types.get(ip)[field_index]);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
|
||||
|
||||
if (need_comma) try writer.writeByte(',');
|
||||
@ -5411,9 +5411,9 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
const input_val = try f.resolveInst(input);
|
||||
try writer.print("{s}(", .{fmtStringLiteral(if (is_reg) "r" else constraint, null)});
|
||||
try f.writeCValue(writer, if (asmInputNeedsLocal(f, constraint, input_val)) local: {
|
||||
const input_local = .{ .local = locals_index };
|
||||
const input_local_idx = locals_index;
|
||||
locals_index += 1;
|
||||
break :local input_local;
|
||||
break :local .{ .local = input_local_idx };
|
||||
} else input_val, .Other);
|
||||
try writer.writeByte(')');
|
||||
}
|
||||
@ -5651,15 +5651,12 @@ fn fieldLocation(
|
||||
.begin,
|
||||
};
|
||||
},
|
||||
.anon_struct_type => |anon_struct_info| return if (!container_ty.hasRuntimeBitsIgnoreComptime(zcu))
|
||||
.tuple_type => return if (!container_ty.hasRuntimeBitsIgnoreComptime(zcu))
|
||||
.begin
|
||||
else if (!field_ptr_ty.childType(zcu).hasRuntimeBitsIgnoreComptime(zcu))
|
||||
.{ .byte_offset = container_ty.structFieldOffset(field_index, zcu) }
|
||||
else
|
||||
.{ .field = if (anon_struct_info.fieldName(ip, field_index).unwrap()) |field_name|
|
||||
.{ .identifier = field_name.toSlice(ip) }
|
||||
else
|
||||
.{ .field = field_index } },
|
||||
.{ .field = .{ .field = field_index } },
|
||||
.union_type => {
|
||||
const loaded_union = ip.loadUnionType(container_ty.toIntern());
|
||||
switch (loaded_union.flagsUnordered(ip).layout) {
|
||||
@ -5892,10 +5889,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
},
|
||||
}
|
||||
},
|
||||
.anon_struct_type => |anon_struct_info| if (anon_struct_info.fieldName(ip, extra.field_index).unwrap()) |field_name|
|
||||
.{ .identifier = field_name.toSlice(ip) }
|
||||
else
|
||||
.{ .field = extra.field_index },
|
||||
.tuple_type => .{ .field = extra.field_index },
|
||||
.union_type => field_name: {
|
||||
const loaded_union = ip.loadUnionType(struct_ty.toIntern());
|
||||
switch (loaded_union.flagsUnordered(ip).layout) {
|
||||
@ -7366,16 +7360,13 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
},
|
||||
}
|
||||
},
|
||||
.anon_struct_type => |anon_struct_info| for (0..anon_struct_info.types.len) |field_index| {
|
||||
if (anon_struct_info.values.get(ip)[field_index] != .none) continue;
|
||||
const field_ty = Type.fromInterned(anon_struct_info.types.get(ip)[field_index]);
|
||||
.tuple_type => |tuple_info| for (0..tuple_info.types.len) |field_index| {
|
||||
if (tuple_info.values.get(ip)[field_index] != .none) continue;
|
||||
const field_ty = Type.fromInterned(tuple_info.types.get(ip)[field_index]);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
|
||||
|
||||
const a = try Assignment.start(f, writer, try f.ctypeFromType(field_ty, .complete));
|
||||
try f.writeCValueMember(writer, local, if (anon_struct_info.fieldName(ip, field_index).unwrap()) |field_name|
|
||||
.{ .identifier = field_name.toSlice(ip) }
|
||||
else
|
||||
.{ .field = field_index });
|
||||
try f.writeCValueMember(writer, local, .{ .field = field_index });
|
||||
try a.assign(f, writer);
|
||||
try f.writeCValue(writer, resolved_elements[field_index], .Other);
|
||||
try a.end(f, writer);
|
||||
|
@ -1350,7 +1350,7 @@ pub const Pool = struct {
|
||||
.i0_type,
|
||||
.anyopaque_type,
|
||||
.void_type,
|
||||
.empty_struct_type,
|
||||
.empty_tuple_type,
|
||||
.type_type,
|
||||
.comptime_int_type,
|
||||
.comptime_float_type,
|
||||
@ -1450,7 +1450,7 @@ pub const Pool = struct {
|
||||
.null_value,
|
||||
.bool_true,
|
||||
.bool_false,
|
||||
.empty_struct,
|
||||
.empty_tuple,
|
||||
.generic_poison,
|
||||
.none,
|
||||
=> unreachable,
|
||||
@ -1730,16 +1730,16 @@ pub const Pool = struct {
|
||||
),
|
||||
}
|
||||
},
|
||||
.anon_struct_type => |anon_struct_info| {
|
||||
.tuple_type => |tuple_info| {
|
||||
const scratch_top = scratch.items.len;
|
||||
defer scratch.shrinkRetainingCapacity(scratch_top);
|
||||
try scratch.ensureUnusedCapacity(allocator, anon_struct_info.types.len *
|
||||
try scratch.ensureUnusedCapacity(allocator, tuple_info.types.len *
|
||||
@typeInfo(Field).@"struct".fields.len);
|
||||
var hasher = Hasher.init;
|
||||
for (0..anon_struct_info.types.len) |field_index| {
|
||||
if (anon_struct_info.values.get(ip)[field_index] != .none) continue;
|
||||
for (0..tuple_info.types.len) |field_index| {
|
||||
if (tuple_info.values.get(ip)[field_index] != .none) continue;
|
||||
const field_type = Type.fromInterned(
|
||||
anon_struct_info.types.get(ip)[field_index],
|
||||
tuple_info.types.get(ip)[field_index],
|
||||
);
|
||||
const field_ctype = try pool.fromType(
|
||||
allocator,
|
||||
@ -1750,11 +1750,7 @@ pub const Pool = struct {
|
||||
kind.noParameter(),
|
||||
);
|
||||
if (field_ctype.index == .void) continue;
|
||||
const field_name = if (anon_struct_info.fieldName(ip, @intCast(field_index))
|
||||
.unwrap()) |field_name|
|
||||
try pool.string(allocator, field_name.toSlice(ip))
|
||||
else
|
||||
try pool.fmt(allocator, "f{d}", .{field_index});
|
||||
const field_name = try pool.fmt(allocator, "f{d}", .{field_index});
|
||||
pool.addHashedExtraAssumeCapacityTo(scratch, &hasher, Field, .{
|
||||
.name = field_name.index,
|
||||
.ctype = field_ctype.index,
|
||||
|
@ -2563,7 +2563,7 @@ pub const Object = struct {
|
||||
}
|
||||
|
||||
switch (ip.indexToKey(ty.toIntern())) {
|
||||
.anon_struct_type => |tuple| {
|
||||
.tuple_type => |tuple| {
|
||||
var fields: std.ArrayListUnmanaged(Builder.Metadata) = .empty;
|
||||
defer fields.deinit(gpa);
|
||||
|
||||
@ -2582,11 +2582,8 @@ pub const Object = struct {
|
||||
const field_offset = field_align.forward(offset);
|
||||
offset = field_offset + field_size;
|
||||
|
||||
const field_name = if (tuple.names.len != 0)
|
||||
tuple.names.get(ip)[i].toSlice(ip)
|
||||
else
|
||||
try std.fmt.allocPrintZ(gpa, "{d}", .{i});
|
||||
defer if (tuple.names.len == 0) gpa.free(field_name);
|
||||
var name_buf: [32]u8 = undefined;
|
||||
const field_name = std.fmt.bufPrint(&name_buf, "{d}", .{i}) catch unreachable;
|
||||
|
||||
fields.appendAssumeCapacity(try o.builder.debugMemberType(
|
||||
try o.builder.metadataString(field_name),
|
||||
@ -3426,7 +3423,7 @@ pub const Object = struct {
|
||||
.adhoc_inferred_error_set_type,
|
||||
=> try o.errorIntType(),
|
||||
.generic_poison_type,
|
||||
.empty_struct_type,
|
||||
.empty_tuple_type,
|
||||
=> unreachable,
|
||||
// values, not types
|
||||
.undef,
|
||||
@ -3443,7 +3440,7 @@ pub const Object = struct {
|
||||
.null_value,
|
||||
.bool_true,
|
||||
.bool_false,
|
||||
.empty_struct,
|
||||
.empty_tuple,
|
||||
.generic_poison,
|
||||
.none,
|
||||
=> unreachable,
|
||||
@ -3610,13 +3607,13 @@ pub const Object = struct {
|
||||
);
|
||||
return ty;
|
||||
},
|
||||
.anon_struct_type => |anon_struct_type| {
|
||||
.tuple_type => |tuple_type| {
|
||||
var llvm_field_types: std.ArrayListUnmanaged(Builder.Type) = .empty;
|
||||
defer llvm_field_types.deinit(o.gpa);
|
||||
// Although we can estimate how much capacity to add, these cannot be
|
||||
// relied upon because of the recursive calls to lowerType below.
|
||||
try llvm_field_types.ensureUnusedCapacity(o.gpa, anon_struct_type.types.len);
|
||||
try o.struct_field_map.ensureUnusedCapacity(o.gpa, anon_struct_type.types.len);
|
||||
try llvm_field_types.ensureUnusedCapacity(o.gpa, tuple_type.types.len);
|
||||
try o.struct_field_map.ensureUnusedCapacity(o.gpa, tuple_type.types.len);
|
||||
|
||||
comptime assert(struct_layout_version == 2);
|
||||
var offset: u64 = 0;
|
||||
@ -3625,8 +3622,8 @@ pub const Object = struct {
|
||||
const struct_size = t.abiSize(zcu);
|
||||
|
||||
for (
|
||||
anon_struct_type.types.get(ip),
|
||||
anon_struct_type.values.get(ip),
|
||||
tuple_type.types.get(ip),
|
||||
tuple_type.values.get(ip),
|
||||
0..,
|
||||
) |field_ty, field_val, field_index| {
|
||||
if (field_val != .none) continue;
|
||||
@ -3979,7 +3976,7 @@ pub const Object = struct {
|
||||
.error_union_type,
|
||||
.simple_type,
|
||||
.struct_type,
|
||||
.anon_struct_type,
|
||||
.tuple_type,
|
||||
.union_type,
|
||||
.opaque_type,
|
||||
.enum_type,
|
||||
@ -3993,7 +3990,7 @@ pub const Object = struct {
|
||||
.undefined => unreachable, // non-runtime value
|
||||
.void => unreachable, // non-runtime value
|
||||
.null => unreachable, // non-runtime value
|
||||
.empty_struct => unreachable, // non-runtime value
|
||||
.empty_tuple => unreachable, // non-runtime value
|
||||
.@"unreachable" => unreachable, // non-runtime value
|
||||
.generic_poison => unreachable, // non-runtime value
|
||||
|
||||
@ -4232,7 +4229,7 @@ pub const Object = struct {
|
||||
),
|
||||
}
|
||||
},
|
||||
.anon_struct_type => |tuple| {
|
||||
.tuple_type => |tuple| {
|
||||
const struct_ty = try o.lowerType(ty);
|
||||
const llvm_len = struct_ty.aggregateLen(&o.builder);
|
||||
|
||||
@ -12516,7 +12513,7 @@ fn isByRef(ty: Type, zcu: *Zcu) bool {
|
||||
.array, .frame => return ty.hasRuntimeBits(zcu),
|
||||
.@"struct" => {
|
||||
const struct_type = switch (ip.indexToKey(ty.toIntern())) {
|
||||
.anon_struct_type => |tuple| {
|
||||
.tuple_type => |tuple| {
|
||||
var count: usize = 0;
|
||||
for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, field_val| {
|
||||
if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(zcu)) continue;
|
||||
|
@ -731,13 +731,15 @@ const NavGen = struct {
|
||||
.direct => {
|
||||
const result_ty_id = try self.resolveType(Type.bool, .direct);
|
||||
const result_id = self.spv.allocId();
|
||||
const operands = .{
|
||||
.id_result_type = result_ty_id,
|
||||
.id_result = result_id,
|
||||
};
|
||||
switch (value) {
|
||||
true => try section.emit(self.spv.gpa, .OpConstantTrue, operands),
|
||||
false => try section.emit(self.spv.gpa, .OpConstantFalse, operands),
|
||||
inline else => |val_ct| try section.emit(
|
||||
self.spv.gpa,
|
||||
if (val_ct) .OpConstantTrue else .OpConstantFalse,
|
||||
.{
|
||||
.id_result_type = result_ty_id,
|
||||
.id_result = result_id,
|
||||
},
|
||||
),
|
||||
}
|
||||
return result_id;
|
||||
},
|
||||
@ -915,7 +917,7 @@ const NavGen = struct {
|
||||
.error_union_type,
|
||||
.simple_type,
|
||||
.struct_type,
|
||||
.anon_struct_type,
|
||||
.tuple_type,
|
||||
.union_type,
|
||||
.opaque_type,
|
||||
.enum_type,
|
||||
@ -937,7 +939,7 @@ const NavGen = struct {
|
||||
.undefined,
|
||||
.void,
|
||||
.null,
|
||||
.empty_struct,
|
||||
.empty_tuple,
|
||||
.@"unreachable",
|
||||
.generic_poison,
|
||||
=> unreachable, // non-runtime values
|
||||
@ -1125,7 +1127,7 @@ const NavGen = struct {
|
||||
|
||||
return try self.constructStruct(ty, types.items, constituents.items);
|
||||
},
|
||||
.anon_struct_type => unreachable, // TODO
|
||||
.tuple_type => unreachable, // TODO
|
||||
else => unreachable,
|
||||
},
|
||||
.un => |un| {
|
||||
@ -1718,7 +1720,7 @@ const NavGen = struct {
|
||||
},
|
||||
.@"struct" => {
|
||||
const struct_type = switch (ip.indexToKey(ty.toIntern())) {
|
||||
.anon_struct_type => |tuple| {
|
||||
.tuple_type => |tuple| {
|
||||
const member_types = try self.gpa.alloc(IdRef, tuple.values.len);
|
||||
defer self.gpa.free(member_types);
|
||||
|
||||
@ -2831,18 +2833,12 @@ const NavGen = struct {
|
||||
}
|
||||
},
|
||||
.vulkan => {
|
||||
const op_result_ty = blk: {
|
||||
// Operations return a struct{T, T}
|
||||
// where T is maybe vectorized.
|
||||
const types = [2]InternPool.Index{ arith_op_ty.toIntern(), arith_op_ty.toIntern() };
|
||||
const values = [2]InternPool.Index{ .none, .none };
|
||||
const index = try ip.getAnonStructType(zcu.gpa, pt.tid, .{
|
||||
.types = &types,
|
||||
.values = &values,
|
||||
.names = &.{},
|
||||
});
|
||||
break :blk Type.fromInterned(index);
|
||||
};
|
||||
// Operations return a struct{T, T}
|
||||
// where T is maybe vectorized.
|
||||
const op_result_ty: Type = .fromInterned(try ip.getTupleType(zcu.gpa, pt.tid, .{
|
||||
.types = &.{ arith_op_ty.toIntern(), arith_op_ty.toIntern() },
|
||||
.values = &.{ .none, .none },
|
||||
}));
|
||||
const op_result_ty_id = try self.resolveType(op_result_ty, .direct);
|
||||
|
||||
const opcode: Opcode = switch (op) {
|
||||
@ -4867,7 +4863,7 @@ const NavGen = struct {
|
||||
var index: usize = 0;
|
||||
|
||||
switch (ip.indexToKey(result_ty.toIntern())) {
|
||||
.anon_struct_type => |tuple| {
|
||||
.tuple_type => |tuple| {
|
||||
for (tuple.types.get(ip), elements, 0..) |field_ty, element, i| {
|
||||
if ((try result_ty.structFieldValueComptime(pt, i)) != null) continue;
|
||||
assert(Type.fromInterned(field_ty).hasRuntimeBits(zcu));
|
||||
@ -6216,15 +6212,20 @@ const NavGen = struct {
|
||||
try self.extractField(Type.anyerror, operand_id, eu_layout.errorFieldIndex());
|
||||
|
||||
const result_id = self.spv.allocId();
|
||||
const operands = .{
|
||||
.id_result_type = bool_ty_id,
|
||||
.id_result = result_id,
|
||||
.operand_1 = error_id,
|
||||
.operand_2 = try self.constInt(Type.anyerror, 0, .direct),
|
||||
};
|
||||
switch (pred) {
|
||||
.is_err => try self.func.body.emit(self.spv.gpa, .OpINotEqual, operands),
|
||||
.is_non_err => try self.func.body.emit(self.spv.gpa, .OpIEqual, operands),
|
||||
inline else => |pred_ct| try self.func.body.emit(
|
||||
self.spv.gpa,
|
||||
switch (pred_ct) {
|
||||
.is_err => .OpINotEqual,
|
||||
.is_non_err => .OpIEqual,
|
||||
},
|
||||
.{
|
||||
.id_result_type = bool_ty_id,
|
||||
.id_result = result_id,
|
||||
.operand_1 = error_id,
|
||||
.operand_2 = try self.constInt(Type.anyerror, 0, .direct),
|
||||
},
|
||||
),
|
||||
}
|
||||
return result_id;
|
||||
}
|
||||
|
@ -2599,16 +2599,15 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool
|
||||
.anyframe_type,
|
||||
.error_union_type,
|
||||
.simple_type,
|
||||
.anon_struct_type,
|
||||
.tuple_type,
|
||||
.func_type,
|
||||
.error_set_type,
|
||||
.inferred_error_set_type,
|
||||
=> .decl_alias,
|
||||
.struct_type => tag: {
|
||||
const loaded_struct = ip.loadStructType(nav_val.toIntern());
|
||||
if (loaded_struct.zir_index == .none) break :tag .decl_alias;
|
||||
|
||||
const type_inst_info = loaded_struct.zir_index.unwrap().?.resolveFull(ip).?;
|
||||
const type_inst_info = loaded_struct.zir_index.resolveFull(ip).?;
|
||||
if (type_inst_info.file != inst_info.file) break :tag .decl_alias;
|
||||
|
||||
const value_inst = value_inst: {
|
||||
@ -3349,7 +3348,7 @@ fn updateType(
|
||||
.union_type,
|
||||
.opaque_type,
|
||||
=> unreachable,
|
||||
.anon_struct_type => |anon_struct_type| if (anon_struct_type.types.len == 0) {
|
||||
.tuple_type => |tuple_type| if (tuple_type.types.len == 0) {
|
||||
try wip_nav.abbrevCode(.namespace_struct_type);
|
||||
try wip_nav.strp(name);
|
||||
try diw.writeByte(@intFromBool(false));
|
||||
@ -3359,15 +3358,15 @@ fn updateType(
|
||||
try uleb128(diw, ty.abiSize(zcu));
|
||||
try uleb128(diw, ty.abiAlignment(zcu).toByteUnits().?);
|
||||
var field_byte_offset: u64 = 0;
|
||||
for (0..anon_struct_type.types.len) |field_index| {
|
||||
const comptime_value = anon_struct_type.values.get(ip)[field_index];
|
||||
for (0..tuple_type.types.len) |field_index| {
|
||||
const comptime_value = tuple_type.values.get(ip)[field_index];
|
||||
try wip_nav.abbrevCode(if (comptime_value != .none) .struct_field_comptime else .struct_field);
|
||||
if (anon_struct_type.fieldName(ip, field_index).unwrap()) |field_name| try wip_nav.strp(field_name.toSlice(ip)) else {
|
||||
const field_name = try std.fmt.allocPrint(dwarf.gpa, "{d}", .{field_index});
|
||||
defer dwarf.gpa.free(field_name);
|
||||
{
|
||||
var name_buf: [32]u8 = undefined;
|
||||
const field_name = std.fmt.bufPrint(&name_buf, "{d}", .{field_index}) catch unreachable;
|
||||
try wip_nav.strp(field_name);
|
||||
}
|
||||
const field_type = Type.fromInterned(anon_struct_type.types.get(ip)[field_index]);
|
||||
const field_type = Type.fromInterned(tuple_type.types.get(ip)[field_index]);
|
||||
try wip_nav.refType(field_type);
|
||||
if (comptime_value != .none) try wip_nav.blockValue(
|
||||
src_loc,
|
||||
@ -3595,16 +3594,26 @@ pub fn updateContainerType(dwarf: *Dwarf, pt: Zcu.PerThread, type_index: InternP
|
||||
try dwarf.debug_info.section.replaceEntry(wip_nav.unit, wip_nav.entry, dwarf, wip_nav.debug_info.items);
|
||||
try wip_nav.flush(ty_src_loc);
|
||||
} else {
|
||||
const decl_inst = file.zir.instructions.get(@intFromEnum(inst_info.inst));
|
||||
assert(decl_inst.tag == .extended);
|
||||
if (switch (decl_inst.data.extended.opcode) {
|
||||
.struct_decl => @as(Zir.Inst.StructDecl.Small, @bitCast(decl_inst.data.extended.small)).name_strategy,
|
||||
.enum_decl => @as(Zir.Inst.EnumDecl.Small, @bitCast(decl_inst.data.extended.small)).name_strategy,
|
||||
.union_decl => @as(Zir.Inst.UnionDecl.Small, @bitCast(decl_inst.data.extended.small)).name_strategy,
|
||||
.opaque_decl => @as(Zir.Inst.OpaqueDecl.Small, @bitCast(decl_inst.data.extended.small)).name_strategy,
|
||||
.reify => @as(Zir.Inst.NameStrategy, @enumFromInt(decl_inst.data.extended.small)),
|
||||
else => unreachable,
|
||||
} == .parent) return;
|
||||
{
|
||||
// Note that changes to ZIR instruction tracking only need to update this code
|
||||
// if a newly-tracked instruction can be a type's owner `zir_index`.
|
||||
comptime assert(Zir.inst_tracking_version == 0);
|
||||
|
||||
const decl_inst = file.zir.instructions.get(@intFromEnum(inst_info.inst));
|
||||
const name_strat: Zir.Inst.NameStrategy = switch (decl_inst.tag) {
|
||||
.struct_init, .struct_init_ref, .struct_init_anon => .anon,
|
||||
.extended => switch (decl_inst.data.extended.opcode) {
|
||||
.struct_decl => @as(Zir.Inst.StructDecl.Small, @bitCast(decl_inst.data.extended.small)).name_strategy,
|
||||
.enum_decl => @as(Zir.Inst.EnumDecl.Small, @bitCast(decl_inst.data.extended.small)).name_strategy,
|
||||
.union_decl => @as(Zir.Inst.UnionDecl.Small, @bitCast(decl_inst.data.extended.small)).name_strategy,
|
||||
.opaque_decl => @as(Zir.Inst.OpaqueDecl.Small, @bitCast(decl_inst.data.extended.small)).name_strategy,
|
||||
.reify => @as(Zir.Inst.NameStrategy, @enumFromInt(decl_inst.data.extended.small)),
|
||||
else => unreachable,
|
||||
},
|
||||
else => unreachable,
|
||||
};
|
||||
if (name_strat == .parent) return;
|
||||
}
|
||||
|
||||
const unit = try dwarf.getUnit(file.mod);
|
||||
const type_gop = try dwarf.types.getOrPut(dwarf.gpa, type_index);
|
||||
|
@ -931,7 +931,7 @@ fn addNavExports(
|
||||
break;
|
||||
}
|
||||
}
|
||||
const sym = .{
|
||||
const sym: aout.Sym = .{
|
||||
.value = atom.offset.?,
|
||||
.type = atom.type.toGlobal(),
|
||||
.name = try gpa.dupe(u8, exp_name),
|
||||
|
@ -34,7 +34,7 @@ const Zcu = @import("Zcu.zig");
|
||||
const mingw = @import("mingw.zig");
|
||||
const dev = @import("dev.zig");
|
||||
|
||||
pub const std_options = .{
|
||||
pub const std_options: std.Options = .{
|
||||
.wasiCwd = wasi_cwd,
|
||||
.logFn = log,
|
||||
.enable_segfault_handler = false,
|
||||
|
@ -74,7 +74,7 @@ pub fn print(
|
||||
.error_union_type,
|
||||
.simple_type,
|
||||
.struct_type,
|
||||
.anon_struct_type,
|
||||
.tuple_type,
|
||||
.union_type,
|
||||
.opaque_type,
|
||||
.enum_type,
|
||||
@ -85,7 +85,7 @@ pub fn print(
|
||||
.undef => try writer.writeAll("undefined"),
|
||||
.simple_value => |simple_value| switch (simple_value) {
|
||||
.void => try writer.writeAll("{}"),
|
||||
.empty_struct => try writer.writeAll(".{}"),
|
||||
.empty_tuple => try writer.writeAll(".{}"),
|
||||
.generic_poison => try writer.writeAll("(generic poison)"),
|
||||
else => try writer.writeAll(@tagName(simple_value)),
|
||||
},
|
||||
|
@ -563,6 +563,8 @@ const Writer = struct {
|
||||
.enum_decl => try self.writeEnumDecl(stream, extended),
|
||||
.opaque_decl => try self.writeOpaqueDecl(stream, extended),
|
||||
|
||||
.tuple_decl => try self.writeTupleDecl(stream, extended),
|
||||
|
||||
.await_nosuspend,
|
||||
.c_undef,
|
||||
.c_include,
|
||||
@ -1421,7 +1423,6 @@ const Writer = struct {
|
||||
|
||||
try self.writeFlag(stream, "known_non_opv, ", small.known_non_opv);
|
||||
try self.writeFlag(stream, "known_comptime_only, ", small.known_comptime_only);
|
||||
try self.writeFlag(stream, "tuple, ", small.is_tuple);
|
||||
|
||||
try stream.print("{s}, ", .{@tagName(small.name_strategy)});
|
||||
|
||||
@ -1506,11 +1507,8 @@ const Writer = struct {
|
||||
const has_type_body = @as(u1, @truncate(cur_bit_bag)) != 0;
|
||||
cur_bit_bag >>= 1;
|
||||
|
||||
var field_name_index: Zir.NullTerminatedString = .empty;
|
||||
if (!small.is_tuple) {
|
||||
field_name_index = @enumFromInt(self.code.extra[extra_index]);
|
||||
extra_index += 1;
|
||||
}
|
||||
const field_name_index: Zir.NullTerminatedString = @enumFromInt(self.code.extra[extra_index]);
|
||||
extra_index += 1;
|
||||
const doc_comment_index: Zir.NullTerminatedString = @enumFromInt(self.code.extra[extra_index]);
|
||||
extra_index += 1;
|
||||
|
||||
@ -1948,6 +1946,32 @@ const Writer = struct {
|
||||
try self.writeSrcNode(stream, 0);
|
||||
}
|
||||
|
||||
fn writeTupleDecl(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void {
|
||||
const fields_len = extended.small;
|
||||
assert(fields_len != 0);
|
||||
const extra = self.code.extraData(Zir.Inst.TupleDecl, extended.operand);
|
||||
|
||||
var extra_index = extra.end;
|
||||
|
||||
try stream.writeAll("{ ");
|
||||
|
||||
for (0..fields_len) |field_idx| {
|
||||
if (field_idx != 0) try stream.writeAll(", ");
|
||||
|
||||
const field_ty, const field_init = self.code.extra[extra_index..][0..2].*;
|
||||
extra_index += 2;
|
||||
|
||||
try stream.print("@\"{d}\": ", .{field_idx});
|
||||
try self.writeInstRef(stream, @enumFromInt(field_ty));
|
||||
try stream.writeAll(" = ");
|
||||
try self.writeInstRef(stream, @enumFromInt(field_init));
|
||||
}
|
||||
|
||||
try stream.writeAll(" }) ");
|
||||
|
||||
try self.writeSrcNode(stream, extra.data.src_node);
|
||||
}
|
||||
|
||||
fn writeErrorSetDecl(
|
||||
self: *Writer,
|
||||
stream: anytype,
|
||||
|
@ -2314,8 +2314,11 @@ fn transStringLiteralInitializer(
|
||||
while (i < num_inits) : (i += 1) {
|
||||
init_list[i] = try transCreateCharLitNode(c, false, stmt.getCodeUnit(i));
|
||||
}
|
||||
const init_args = .{ .len = num_inits, .elem_type = elem_type };
|
||||
const init_array_type = try if (array_type.tag() == .array_type) Tag.array_type.create(c.arena, init_args) else Tag.null_sentinel_array_type.create(c.arena, init_args);
|
||||
const init_args: ast.Payload.Array.ArrayTypeInfo = .{ .len = num_inits, .elem_type = elem_type };
|
||||
const init_array_type = if (array_type.tag() == .array_type)
|
||||
try Tag.array_type.create(c.arena, init_args)
|
||||
else
|
||||
try Tag.null_sentinel_array_type.create(c.arena, init_args);
|
||||
break :blk try Tag.array_init.create(c.arena, .{
|
||||
.cond = init_array_type,
|
||||
.cases = init_list,
|
||||
@ -3910,7 +3913,7 @@ fn transCreateCompoundAssign(
|
||||
|
||||
if ((is_mod or is_div) and is_signed) {
|
||||
if (requires_cast) rhs_node = try transCCast(c, scope, loc, lhs_qt, rhs_qt, rhs_node);
|
||||
const operands = .{ .lhs = lhs_node, .rhs = rhs_node };
|
||||
const operands: @FieldType(ast.Payload.BinOp, "data") = .{ .lhs = lhs_node, .rhs = rhs_node };
|
||||
const builtin = if (is_mod)
|
||||
try Tag.signed_remainder.create(c.arena, operands)
|
||||
else
|
||||
@ -3949,7 +3952,7 @@ fn transCreateCompoundAssign(
|
||||
if (is_ptr_op_signed) rhs_node = try usizeCastForWrappingPtrArithmetic(c.arena, rhs_node);
|
||||
if ((is_mod or is_div) and is_signed) {
|
||||
if (requires_cast) rhs_node = try transCCast(c, scope, loc, lhs_qt, rhs_qt, rhs_node);
|
||||
const operands = .{ .lhs = ref_node, .rhs = rhs_node };
|
||||
const operands: @FieldType(ast.Payload.BinOp, "data") = .{ .lhs = ref_node, .rhs = rhs_node };
|
||||
const builtin = if (is_mod)
|
||||
try Tag.signed_remainder.create(c.arena, operands)
|
||||
else
|
||||
@ -4777,7 +4780,7 @@ fn transType(c: *Context, scope: *Scope, ty: *const clang.Type, source_loc: clan
|
||||
const is_const = is_fn_proto or child_qt.isConstQualified();
|
||||
const is_volatile = child_qt.isVolatileQualified();
|
||||
const elem_type = try transQualType(c, scope, child_qt, source_loc);
|
||||
const ptr_info = .{
|
||||
const ptr_info: @FieldType(ast.Payload.Pointer, "data") = .{
|
||||
.is_const = is_const,
|
||||
.is_volatile = is_volatile,
|
||||
.elem_type = elem_type,
|
||||
|
@ -26,7 +26,6 @@ test {
|
||||
_ = @import("behavior/duplicated_test_names.zig");
|
||||
_ = @import("behavior/defer.zig");
|
||||
_ = @import("behavior/destructure.zig");
|
||||
_ = @import("behavior/empty_tuple_fields.zig");
|
||||
_ = @import("behavior/empty_union.zig");
|
||||
_ = @import("behavior/enum.zig");
|
||||
_ = @import("behavior/error.zig");
|
||||
|
@ -596,7 +596,7 @@ test "type coercion of anon struct literal to array" {
|
||||
|
||||
var x2: U = .{ .a = 42 };
|
||||
_ = &x2;
|
||||
const t2 = .{ x2, .{ .b = true }, .{ .c = "hello" } };
|
||||
const t2 = .{ x2, U{ .b = true }, U{ .c = "hello" } };
|
||||
const arr2: [3]U = t2;
|
||||
try expect(arr2[0].a == 42);
|
||||
try expect(arr2[1].b == true);
|
||||
@ -607,40 +607,6 @@ test "type coercion of anon struct literal to array" {
|
||||
try comptime S.doTheTest();
|
||||
}
|
||||
|
||||
test "type coercion of pointer to anon struct literal to pointer to array" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
|
||||
const S = struct {
|
||||
const U = union {
|
||||
a: u32,
|
||||
b: bool,
|
||||
c: []const u8,
|
||||
};
|
||||
|
||||
fn doTheTest() !void {
|
||||
var x1: u8 = 42;
|
||||
_ = &x1;
|
||||
const t1 = &.{ x1, 56, 54 };
|
||||
const arr1: *const [3]u8 = t1;
|
||||
try expect(arr1[0] == 42);
|
||||
try expect(arr1[1] == 56);
|
||||
try expect(arr1[2] == 54);
|
||||
|
||||
var x2: U = .{ .a = 42 };
|
||||
_ = &x2;
|
||||
const t2 = &.{ x2, .{ .b = true }, .{ .c = "hello" } };
|
||||
const arr2: *const [3]U = t2;
|
||||
try expect(arr2[0].a == 42);
|
||||
try expect(arr2[1].b == true);
|
||||
try expect(mem.eql(u8, arr2[2].c, "hello"));
|
||||
}
|
||||
};
|
||||
try S.doTheTest();
|
||||
try comptime S.doTheTest();
|
||||
}
|
||||
|
||||
test "array with comptime-only element type" {
|
||||
const a = [_]type{ u32, i32 };
|
||||
try testing.expect(a[0] == u32);
|
||||
|
@ -2600,32 +2600,6 @@ test "result type is preserved into comptime block" {
|
||||
try expect(x == 123);
|
||||
}
|
||||
|
||||
test "implicit cast from ptr to tuple to ptr to struct" {
|
||||
if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
const ComptimeReason = union(enum) {
|
||||
c_import: struct {
|
||||
a: u32,
|
||||
},
|
||||
};
|
||||
|
||||
const Block = struct {
|
||||
reason: ?*const ComptimeReason,
|
||||
};
|
||||
|
||||
var a: u32 = 16;
|
||||
_ = &a;
|
||||
var reason = .{ .c_import = .{ .a = a } };
|
||||
var block = Block{
|
||||
.reason = &reason,
|
||||
};
|
||||
_ = █
|
||||
try expect(block.reason.?.c_import.a == 16);
|
||||
}
|
||||
|
||||
test "bitcast vector" {
|
||||
if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
|
@ -1 +0,0 @@
|
||||
struct {}
|
@ -1 +0,0 @@
|
||||
union {}
|
@ -1,28 +0,0 @@
|
||||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
|
||||
test "empty file level struct" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
const T = @import("empty_file_level_struct.zig");
|
||||
const info = @typeInfo(T);
|
||||
try std.testing.expectEqual(@as(usize, 1), info.@"struct".fields.len);
|
||||
try std.testing.expectEqualStrings("0", info.@"struct".fields[0].name);
|
||||
try std.testing.expect(@typeInfo(info.@"struct".fields[0].type) == .@"struct");
|
||||
}
|
||||
|
||||
test "empty file level union" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
const T = @import("empty_file_level_union.zig");
|
||||
const info = @typeInfo(T);
|
||||
try std.testing.expectEqual(@as(usize, 1), info.@"struct".fields.len);
|
||||
try std.testing.expectEqualStrings("0", info.@"struct".fields[0].name);
|
||||
try std.testing.expect(@typeInfo(info.@"struct".fields[0].type) == .@"union");
|
||||
}
|
@ -1013,84 +1013,6 @@ test "struct with 0-length union array field" {
|
||||
try expectEqual(@as(usize, 0), s.zero_length.len);
|
||||
}
|
||||
|
||||
test "type coercion of anon struct literal to struct" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
const S2 = struct {
|
||||
A: u32,
|
||||
B: []const u8,
|
||||
C: void,
|
||||
D: Foo = .{},
|
||||
};
|
||||
|
||||
const Foo = struct {
|
||||
field: i32 = 1234,
|
||||
};
|
||||
|
||||
fn doTheTest() !void {
|
||||
var y: u32 = 42;
|
||||
_ = &y;
|
||||
const t0 = .{ .A = 123, .B = "foo", .C = {} };
|
||||
const t1 = .{ .A = y, .B = "foo", .C = {} };
|
||||
const y0: S2 = t0;
|
||||
const y1: S2 = t1;
|
||||
try expect(y0.A == 123);
|
||||
try expect(std.mem.eql(u8, y0.B, "foo"));
|
||||
try expect(y0.C == {});
|
||||
try expect(y0.D.field == 1234);
|
||||
try expect(y1.A == y);
|
||||
try expect(std.mem.eql(u8, y1.B, "foo"));
|
||||
try expect(y1.C == {});
|
||||
try expect(y1.D.field == 1234);
|
||||
}
|
||||
};
|
||||
try S.doTheTest();
|
||||
try comptime S.doTheTest();
|
||||
}
|
||||
|
||||
test "type coercion of pointer to anon struct literal to pointer to struct" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
const S2 = struct {
|
||||
A: u32,
|
||||
B: []const u8,
|
||||
C: void,
|
||||
D: Foo = .{},
|
||||
};
|
||||
|
||||
const Foo = struct {
|
||||
field: i32 = 1234,
|
||||
};
|
||||
|
||||
fn doTheTest() !void {
|
||||
var y: u32 = 42;
|
||||
_ = &y;
|
||||
const t0 = &.{ .A = 123, .B = "foo", .C = {} };
|
||||
const t1 = &.{ .A = y, .B = "foo", .C = {} };
|
||||
const y0: *const S2 = t0;
|
||||
const y1: *const S2 = t1;
|
||||
try expect(y0.A == 123);
|
||||
try expect(std.mem.eql(u8, y0.B, "foo"));
|
||||
try expect(y0.C == {});
|
||||
try expect(y0.D.field == 1234);
|
||||
try expect(y1.A == y);
|
||||
try expect(std.mem.eql(u8, y1.B, "foo"));
|
||||
try expect(y1.C == {});
|
||||
try expect(y1.D.field == 1234);
|
||||
}
|
||||
};
|
||||
try S.doTheTest();
|
||||
try comptime S.doTheTest();
|
||||
}
|
||||
|
||||
test "packed struct with undefined initializers" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
@ -2183,3 +2105,20 @@ test "extern struct @FieldType" {
|
||||
comptime assert(@FieldType(S, "b") == f64);
|
||||
comptime assert(@FieldType(S, "c") == *S);
|
||||
}
|
||||
|
||||
test "anonymous struct equivalence" {
|
||||
const S = struct {
|
||||
fn anonStructType(comptime x: anytype) type {
|
||||
const val = .{ .a = "hello", .b = x };
|
||||
return @TypeOf(val);
|
||||
}
|
||||
};
|
||||
|
||||
const A = S.anonStructType(123);
|
||||
const B = S.anonStructType(123);
|
||||
const C = S.anonStructType(456);
|
||||
|
||||
comptime assert(A == B);
|
||||
comptime assert(A != C);
|
||||
comptime assert(B != C);
|
||||
}
|
||||
|
@ -150,7 +150,7 @@ test "array-like initializer for tuple types" {
|
||||
.type = u8,
|
||||
.default_value = null,
|
||||
.is_comptime = false,
|
||||
.alignment = @alignOf(i32),
|
||||
.alignment = @alignOf(u8),
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -566,16 +566,28 @@ test "comptime fields in tuple can be initialized" {
|
||||
_ = &a;
|
||||
}
|
||||
|
||||
test "tuple default values" {
|
||||
const T = struct {
|
||||
usize,
|
||||
usize = 123,
|
||||
usize = 456,
|
||||
};
|
||||
test "empty struct in tuple" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
const t: T = .{1};
|
||||
|
||||
try expectEqual(1, t[0]);
|
||||
try expectEqual(123, t[1]);
|
||||
try expectEqual(456, t[2]);
|
||||
const T = struct { struct {} };
|
||||
const info = @typeInfo(T);
|
||||
try std.testing.expectEqual(@as(usize, 1), info.@"struct".fields.len);
|
||||
try std.testing.expectEqualStrings("0", info.@"struct".fields[0].name);
|
||||
try std.testing.expect(@typeInfo(info.@"struct".fields[0].type) == .@"struct");
|
||||
}
|
||||
|
||||
test "empty union in tuple" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
const T = struct { union {} };
|
||||
const info = @typeInfo(T);
|
||||
try std.testing.expectEqual(@as(usize, 1), info.@"struct".fields.len);
|
||||
try std.testing.expectEqualStrings("0", info.@"struct".fields[0].name);
|
||||
try std.testing.expect(@typeInfo(info.@"struct".fields[0].type) == .@"union");
|
||||
}
|
||||
|
@ -9,7 +9,7 @@ test "tuple declaration type info" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
|
||||
{
|
||||
const T = struct { comptime u32 align(2) = 1, []const u8 };
|
||||
const T = struct { comptime u32 = 1, []const u8 };
|
||||
const info = @typeInfo(T).@"struct";
|
||||
|
||||
try expect(info.layout == .auto);
|
||||
@ -22,7 +22,7 @@ test "tuple declaration type info" {
|
||||
try expect(info.fields[0].type == u32);
|
||||
try expect(@as(*const u32, @ptrCast(@alignCast(info.fields[0].default_value))).* == 1);
|
||||
try expect(info.fields[0].is_comptime);
|
||||
try expect(info.fields[0].alignment == 2);
|
||||
try expect(info.fields[0].alignment == @alignOf(u32));
|
||||
|
||||
try expectEqualStrings(info.fields[1].name, "1");
|
||||
try expect(info.fields[1].type == []const u8);
|
||||
@ -32,7 +32,7 @@ test "tuple declaration type info" {
|
||||
}
|
||||
}
|
||||
|
||||
test "Tuple declaration usage" {
|
||||
test "tuple declaration usage" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
|
||||
|
@ -986,76 +986,6 @@ test "function call result coerces from tagged union to the tag" {
|
||||
try comptime S.doTheTest();
|
||||
}
|
||||
|
||||
test "cast from anonymous struct to union" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
const U = union(enum) {
|
||||
A: u32,
|
||||
B: []const u8,
|
||||
C: void,
|
||||
};
|
||||
fn doTheTest() !void {
|
||||
var y: u32 = 42;
|
||||
_ = &y;
|
||||
const t0 = .{ .A = 123 };
|
||||
const t1 = .{ .B = "foo" };
|
||||
const t2 = .{ .C = {} };
|
||||
const t3 = .{ .A = y };
|
||||
const x0: U = t0;
|
||||
var x1: U = t1;
|
||||
_ = &x1;
|
||||
const x2: U = t2;
|
||||
var x3: U = t3;
|
||||
_ = &x3;
|
||||
try expect(x0.A == 123);
|
||||
try expect(std.mem.eql(u8, x1.B, "foo"));
|
||||
try expect(x2 == .C);
|
||||
try expect(x3.A == y);
|
||||
}
|
||||
};
|
||||
try S.doTheTest();
|
||||
try comptime S.doTheTest();
|
||||
}
|
||||
|
||||
test "cast from pointer to anonymous struct to pointer to union" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
const U = union(enum) {
|
||||
A: u32,
|
||||
B: []const u8,
|
||||
C: void,
|
||||
};
|
||||
fn doTheTest() !void {
|
||||
var y: u32 = 42;
|
||||
_ = &y;
|
||||
const t0 = &.{ .A = 123 };
|
||||
const t1 = &.{ .B = "foo" };
|
||||
const t2 = &.{ .C = {} };
|
||||
const t3 = &.{ .A = y };
|
||||
const x0: *const U = t0;
|
||||
var x1: *const U = t1;
|
||||
_ = &x1;
|
||||
const x2: *const U = t2;
|
||||
var x3: *const U = t3;
|
||||
_ = &x3;
|
||||
try expect(x0.A == 123);
|
||||
try expect(std.mem.eql(u8, x1.B, "foo"));
|
||||
try expect(x2.* == .C);
|
||||
try expect(x3.A == y);
|
||||
}
|
||||
};
|
||||
try S.doTheTest();
|
||||
try comptime S.doTheTest();
|
||||
}
|
||||
|
||||
test "switching on non exhaustive union" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
|
@ -1,7 +1,8 @@
|
||||
export fn foo() void {
|
||||
const S = struct { a: u32 };
|
||||
const sentinel: S = .{ .a = 1 };
|
||||
var arr = [_]S{ .{ .a = 1 }, .{ .a = 2 } };
|
||||
const s = arr[0..1 :.{ .a = 1 }];
|
||||
const s = arr[0..1 :sentinel];
|
||||
_ = s;
|
||||
}
|
||||
|
||||
@ -9,5 +10,5 @@ export fn foo() void {
|
||||
// backend=stage2
|
||||
// target=native
|
||||
//
|
||||
// :4:26: error: non-scalar sentinel type 'tmp.foo.S'
|
||||
// :5:25: error: non-scalar sentinel type 'tmp.foo.S'
|
||||
// :2:15: note: struct declared here
|
||||
|
@ -18,4 +18,5 @@ pub export fn entry2() void {
|
||||
//
|
||||
// :3:6: error: no field or member function named 'copy' in '[]const u8'
|
||||
// :9:8: error: no field or member function named 'bar' in '@TypeOf(.{})'
|
||||
// :12:18: error: no field or member function named 'bar' in 'struct{comptime foo: comptime_int = 1}'
|
||||
// :12:18: error: no field or member function named 'bar' in 'tmp.entry2__struct_170'
|
||||
// :12:6: note: struct declared here
|
||||
|
11
test/cases/compile_errors/coerce_anon_struct.zig
Normal file
11
test/cases/compile_errors/coerce_anon_struct.zig
Normal file
@ -0,0 +1,11 @@
|
||||
const T = struct { x: u32 };
|
||||
export fn foo() void {
|
||||
const a = .{ .x = 123 };
|
||||
_ = @as(T, a);
|
||||
}
|
||||
|
||||
// error
|
||||
//
|
||||
// :4:16: error: expected type 'tmp.T', found 'tmp.foo__struct_159'
|
||||
// :3:16: note: struct declared here
|
||||
// :1:11: note: struct declared here
|
@ -10,6 +10,6 @@ pub export fn entry() void {
|
||||
// backend=stage2
|
||||
// target=native
|
||||
//
|
||||
// :4:28: error: type 'anyerror!tmp.entry.Foo' cannot be destructured
|
||||
// :4:28: error: type 'anyerror!struct { u8, u8 }' cannot be destructured
|
||||
// :4:26: note: result destructured here
|
||||
// :4:28: note: consider using 'try', 'catch', or 'if'
|
||||
|
6
test/cases/compile_errors/file_level_tuple.zig
Normal file
6
test/cases/compile_errors/file_level_tuple.zig
Normal file
@ -0,0 +1,6 @@
|
||||
u32,
|
||||
comptime u8 = 123,
|
||||
|
||||
// error
|
||||
//
|
||||
// :1:1: error: file cannot be a tuple
|
@ -10,11 +10,6 @@ export fn badTupleField() void {
|
||||
_ = .{ &x, &y };
|
||||
_ = @TypeOf(x, y);
|
||||
}
|
||||
export fn badNestedField() void {
|
||||
const x = .{ .foo = "hi", .bar = .{ 0, 1 } };
|
||||
const y = .{ .foo = "hello", .bar = .{ 2, "hi" } };
|
||||
_ = @TypeOf(x, y);
|
||||
}
|
||||
export fn incompatiblePointers() void {
|
||||
const x: []const u8 = "foo";
|
||||
const y: [*:0]const u8 = "bar";
|
||||
@ -39,14 +34,9 @@ export fn incompatiblePointers4() void {
|
||||
// :11:9: note: incompatible types: 'u32' and '*const [5:0]u8'
|
||||
// :11:17: note: type 'u32' here
|
||||
// :11:20: note: type '*const [5:0]u8' here
|
||||
// :16:9: error: struct field 'bar' has conflicting types
|
||||
// :16:9: note: struct field '1' has conflicting types
|
||||
// :16:9: note: incompatible types: 'comptime_int' and '*const [2:0]u8'
|
||||
// :16:17: note: type 'comptime_int' here
|
||||
// :16:20: note: type '*const [2:0]u8' here
|
||||
// :21:9: error: incompatible types: '[]const u8' and '[*:0]const u8'
|
||||
// :21:17: note: type '[]const u8' here
|
||||
// :21:20: note: type '[*:0]const u8' here
|
||||
// :28:9: error: incompatible types: '[]const u8' and '[*]const u8'
|
||||
// :28:23: note: type '[]const u8' here
|
||||
// :28:26: note: type '[*]const u8' here
|
||||
// :16:9: error: incompatible types: '[]const u8' and '[*:0]const u8'
|
||||
// :16:17: note: type '[]const u8' here
|
||||
// :16:20: note: type '[*:0]const u8' here
|
||||
// :23:9: error: incompatible types: '[]const u8' and '[*]const u8'
|
||||
// :23:23: note: type '[]const u8' here
|
||||
// :23:26: note: type '[*]const u8' here
|
||||
|
@ -29,7 +29,5 @@ export fn h() void {
|
||||
// :9:16: error: missing struct field: x
|
||||
// :1:11: note: struct declared here
|
||||
// :18:16: error: missing tuple field with index 1
|
||||
// :16:11: note: struct declared here
|
||||
// :22:16: error: missing tuple field with index 0
|
||||
// :22:16: note: missing tuple field with index 1
|
||||
// :16:11: note: struct declared here
|
||||
|
@ -21,6 +21,6 @@ comptime {
|
||||
// :14:5: note: also here
|
||||
//
|
||||
// Compile Log Output:
|
||||
// @as(struct{@Vector(3, u8), @Vector(3, u1)}, .{ .{ 2, 144, undefined }, .{ 0, 1, undefined } })
|
||||
// @as(struct{@Vector(3, u8), @Vector(3, u1)}, .{ .{ 1, 255, undefined }, .{ 0, 1, undefined } })
|
||||
// @as(struct{@Vector(3, u8), @Vector(3, u1)}, .{ .{ 1, 64, undefined }, .{ 0, 1, undefined } })
|
||||
// @as(struct { @Vector(3, u8), @Vector(3, u1) }, .{ .{ 2, 144, undefined }, .{ 0, 1, undefined } })
|
||||
// @as(struct { @Vector(3, u8), @Vector(3, u1) }, .{ .{ 1, 255, undefined }, .{ 0, 1, undefined } })
|
||||
// @as(struct { @Vector(3, u8), @Vector(3, u1) }, .{ .{ 1, 64, undefined }, .{ 0, 1, undefined } })
|
||||
|
@ -72,6 +72,5 @@ pub export fn entry6() void {
|
||||
// :18:14: error: missing tuple field with index 1
|
||||
// :25:14: error: missing tuple field with index 1
|
||||
// :43:14: error: expected at most 2 tuple fields; found 3
|
||||
// :50:30: error: index '2' out of bounds of tuple 'struct{comptime comptime_int = 123, u32}'
|
||||
// :50:30: error: index '2' out of bounds of tuple 'struct { comptime comptime_int = 123, u32 }'
|
||||
// :63:37: error: missing tuple field with index 3
|
||||
// :58:32: note: struct declared here
|
||||
|
@ -7,4 +7,4 @@ export fn entry() void {
|
||||
// backend=stage2
|
||||
// target=native
|
||||
//
|
||||
// :3:11: error: expected type '@TypeOf(.{})', found 'struct{comptime comptime_int = 1, comptime comptime_int = 2, comptime comptime_int = 3}'
|
||||
// :3:11: error: expected type '@TypeOf(.{})', found 'struct { comptime comptime_int = 1, comptime comptime_int = 2, comptime comptime_int = 3 }'
|
||||
|
@ -440,7 +440,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
|
||||
cases.add("std.log per scope log level override",
|
||||
\\const std = @import("std");
|
||||
\\
|
||||
\\pub const std_options = .{
|
||||
\\pub const std_options: std.Options = .{
|
||||
\\ .log_level = .debug,
|
||||
\\
|
||||
\\ .log_scope_levels = &.{
|
||||
@ -497,7 +497,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void {
|
||||
cases.add("std.heap.LoggingAllocator logs to std.log",
|
||||
\\const std = @import("std");
|
||||
\\
|
||||
\\pub const std_options = .{
|
||||
\\pub const std_options: std.Options = .{
|
||||
\\ .log_level = .debug,
|
||||
\\ .logFn = log,
|
||||
\\};
|
||||
|
@ -1532,18 +1532,18 @@ pub fn addTestsForTarget(db: *Debugger, target: Target) void {
|
||||
,
|
||||
&.{
|
||||
\\(lldb) frame variable --show-types -- list0 list0.len list0.capacity list0[0] list0[1] list0[2] list0.0 list0.1 list0.2
|
||||
\\(std.multi_array_list.MultiArrayList(main.Elem0)) list0 = len=3 capacity=8 {
|
||||
\\ (root.main.Elem0) [0] = {
|
||||
\\(std.multi_array_list.MultiArrayList(struct { u32, u8, u16 })) list0 = len=3 capacity=8 {
|
||||
\\ (std.struct { u32, u8, u16 }) [0] = {
|
||||
\\ (u32) .@"0" = 1
|
||||
\\ (u8) .@"1" = 2
|
||||
\\ (u16) .@"2" = 3
|
||||
\\ }
|
||||
\\ (root.main.Elem0) [1] = {
|
||||
\\ (std.struct { u32, u8, u16 }) [1] = {
|
||||
\\ (u32) .@"0" = 4
|
||||
\\ (u8) .@"1" = 5
|
||||
\\ (u16) .@"2" = 6
|
||||
\\ }
|
||||
\\ (root.main.Elem0) [2] = {
|
||||
\\ (std.struct { u32, u8, u16 }) [2] = {
|
||||
\\ (u32) .@"0" = 7
|
||||
\\ (u8) .@"1" = 8
|
||||
\\ (u16) .@"2" = 9
|
||||
@ -1551,17 +1551,17 @@ pub fn addTestsForTarget(db: *Debugger, target: Target) void {
|
||||
\\}
|
||||
\\(usize) list0.len = 3
|
||||
\\(usize) list0.capacity = 8
|
||||
\\(root.main.Elem0) list0[0] = {
|
||||
\\(std.struct { u32, u8, u16 }) list0[0] = {
|
||||
\\ (u32) .@"0" = 1
|
||||
\\ (u8) .@"1" = 2
|
||||
\\ (u16) .@"2" = 3
|
||||
\\}
|
||||
\\(root.main.Elem0) list0[1] = {
|
||||
\\(std.struct { u32, u8, u16 }) list0[1] = {
|
||||
\\ (u32) .@"0" = 4
|
||||
\\ (u8) .@"1" = 5
|
||||
\\ (u16) .@"2" = 6
|
||||
\\}
|
||||
\\(root.main.Elem0) list0[2] = {
|
||||
\\(std.struct { u32, u8, u16 }) list0[2] = {
|
||||
\\ (u32) .@"0" = 7
|
||||
\\ (u8) .@"1" = 8
|
||||
\\ (u16) .@"2" = 9
|
||||
@ -1582,18 +1582,18 @@ pub fn addTestsForTarget(db: *Debugger, target: Target) void {
|
||||
\\ (u16) [2] = 9
|
||||
\\}
|
||||
\\(lldb) frame variable --show-types -- slice0 slice0.len slice0.capacity slice0[0] slice0[1] slice0[2] slice0.0 slice0.1 slice0.2
|
||||
\\(std.multi_array_list.MultiArrayList(main.Elem0).Slice) slice0 = len=3 capacity=8 {
|
||||
\\ (root.main.Elem0) [0] = {
|
||||
\\(std.multi_array_list.MultiArrayList(struct { u32, u8, u16 }).Slice) slice0 = len=3 capacity=8 {
|
||||
\\ (std.struct { u32, u8, u16 }) [0] = {
|
||||
\\ (u32) .@"0" = 1
|
||||
\\ (u8) .@"1" = 2
|
||||
\\ (u16) .@"2" = 3
|
||||
\\ }
|
||||
\\ (root.main.Elem0) [1] = {
|
||||
\\ (std.struct { u32, u8, u16 }) [1] = {
|
||||
\\ (u32) .@"0" = 4
|
||||
\\ (u8) .@"1" = 5
|
||||
\\ (u16) .@"2" = 6
|
||||
\\ }
|
||||
\\ (root.main.Elem0) [2] = {
|
||||
\\ (std.struct { u32, u8, u16 }) [2] = {
|
||||
\\ (u32) .@"0" = 7
|
||||
\\ (u8) .@"1" = 8
|
||||
\\ (u16) .@"2" = 9
|
||||
@ -1601,17 +1601,17 @@ pub fn addTestsForTarget(db: *Debugger, target: Target) void {
|
||||
\\}
|
||||
\\(usize) slice0.len = 3
|
||||
\\(usize) slice0.capacity = 8
|
||||
\\(root.main.Elem0) slice0[0] = {
|
||||
\\(std.struct { u32, u8, u16 }) slice0[0] = {
|
||||
\\ (u32) .@"0" = 1
|
||||
\\ (u8) .@"1" = 2
|
||||
\\ (u16) .@"2" = 3
|
||||
\\}
|
||||
\\(root.main.Elem0) slice0[1] = {
|
||||
\\(std.struct { u32, u8, u16 }) slice0[1] = {
|
||||
\\ (u32) .@"0" = 4
|
||||
\\ (u8) .@"1" = 5
|
||||
\\ (u16) .@"2" = 6
|
||||
\\}
|
||||
\\(root.main.Elem0) slice0[2] = {
|
||||
\\(std.struct { u32, u8, u16 }) slice0[2] = {
|
||||
\\ (u32) .@"0" = 7
|
||||
\\ (u8) .@"1" = 8
|
||||
\\ (u16) .@"2" = 9
|
||||
|
@ -1,7 +1,7 @@
|
||||
const std = @import("std");
|
||||
const build_options = @import("build_options");
|
||||
|
||||
pub const std_options = .{
|
||||
pub const std_options: std.Options = .{
|
||||
.keep_sigpipe = build_options.keep_sigpipe,
|
||||
};
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
const std = @import("std");
|
||||
|
||||
pub const std_options = .{
|
||||
pub const std_options: std.Options = .{
|
||||
.logFn = log,
|
||||
};
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user