From d11bbde5f9c64ef58405604601d55f88bb5d5f3a Mon Sep 17 00:00:00 2001 From: mlugg Date: Sat, 26 Oct 2024 23:13:58 +0100 Subject: [PATCH] compiler: remove anonymous struct types, unify all tuples This commit reworks how anonymous struct literals and tuples work. Previously, an untyped anonymous struct literal (e.g. `const x = .{ .a = 123 }`) was given an "anonymous struct type", which is a special kind of struct which coerces using structural equivalence. This mechanism was a holdover from before we used RLS / result types as the primary mechanism of type inference. This commit changes the language so that the type assigned here is a "normal" struct type. It uses a form of equivalence based on the AST node and the type's structure, much like a reified (`@Type`) type. Additionally, tuples have been simplified. The distinction between "simple" and "complex" tuple types is eliminated. All tuples, even those explicitly declared using `struct { ... }` syntax, use structural equivalence, and do not undergo staged type resolution. Tuples are very restricted: they cannot have non-`auto` layouts, cannot have aligned fields, and cannot have default values with the exception of `comptime` fields. Tuples currently do not have optimized layout, but this can be changed in the future. This change simplifies the language, and fixes some problematic coercions through pointers which led to unintuitive behavior. Resolves: #16865 --- lib/compiler/aro/aro/Builtins.zig | 2 +- lib/compiler/aro/aro/Parser.zig | 2 +- lib/compiler/aro/aro/text_literal.zig | 2 +- lib/compiler/aro_translate_c.zig | 2 +- lib/compiler/test_runner.zig | 2 +- lib/std/SemanticVersion.zig | 2 +- lib/std/Target.zig | 2 +- lib/std/array_list.zig | 2 +- lib/std/crypto/phc_encoding.zig | 3 +- lib/std/meta.zig | 2 +- lib/std/zig/AstGen.zig | 189 ++-- lib/std/zig/BuiltinFn.zig | 7 +- lib/std/zig/Zir.zig | 79 +- lib/std/zig/system/darwin/macos.zig | 6 +- src/Air.zig | 4 +- src/Air/types_resolved.zig | 2 +- src/Compilation.zig | 2 +- src/InternPool.zig | 339 +++----- src/Sema.zig | 808 +++++++++--------- src/Sema/bitcast.zig | 2 +- src/Type.zig | 178 ++-- src/Value.zig | 2 +- src/Zcu.zig | 35 +- src/Zcu/PerThread.zig | 22 +- src/arch/sparc64/CodeGen.zig | 12 +- src/arch/wasm/CodeGen.zig | 6 +- src/arch/x86_64/CodeGen.zig | 2 +- src/codegen.zig | 6 +- src/codegen/c.zig | 41 +- src/codegen/c/Type.zig | 20 +- src/codegen/llvm.zig | 31 +- src/codegen/spirv.zig | 63 +- src/link/Dwarf.zig | 49 +- src/link/Plan9.zig | 2 +- src/main.zig | 2 +- src/print_value.zig | 4 +- src/print_zir.zig | 36 +- src/translate_c.zig | 13 +- test/behavior.zig | 1 - test/behavior/array.zig | 36 +- test/behavior/cast.zig | 26 - test/behavior/empty_file_level_struct.zig | 1 - test/behavior/empty_file_level_union.zig | 1 - test/behavior/empty_tuple_fields.zig | 28 - test/behavior/struct.zig | 95 +- test/behavior/tuple.zig | 36 +- test/behavior/tuple_declarations.zig | 6 +- test/behavior/union.zig | 70 -- ...ray slice sentinel mismatch non-scalar.zig | 5 +- .../bogus_method_call_on_slice.zig | 3 +- .../compile_errors/coerce_anon_struct.zig | 11 + .../destructure_error_union.zig | 2 +- .../cases/compile_errors/file_level_tuple.zig | 6 + .../invalid_peer_type_resolution.zig | 22 +- ...ssing_field_in_struct_value_expression.zig | 2 - ...thmetic_on_vector_with_undefined_elems.zig | 6 +- .../compile_errors/tuple_init_edge_cases.zig | 3 +- ...type_mismatch_with_tuple_concatenation.zig | 2 +- test/compare_output.zig | 4 +- test/src/Debugger.zig | 28 +- test/standalone/sigpipe/breakpipe.zig | 2 +- test/standalone/simple/issue_7030.zig | 2 +- 62 files changed, 1067 insertions(+), 1314 deletions(-) delete mode 100644 test/behavior/empty_file_level_struct.zig delete mode 100644 test/behavior/empty_file_level_union.zig delete mode 100644 test/behavior/empty_tuple_fields.zig create mode 100644 test/cases/compile_errors/coerce_anon_struct.zig create mode 100644 test/cases/compile_errors/file_level_tuple.zig diff --git a/lib/compiler/aro/aro/Builtins.zig b/lib/compiler/aro/aro/Builtins.zig index fa92de328a..6443a6b6d4 100644 --- a/lib/compiler/aro/aro/Builtins.zig +++ b/lib/compiler/aro/aro/Builtins.zig @@ -157,7 +157,7 @@ fn createType(desc: TypeDescription, it: *TypeDescription.TypeIterator, comp: *c .len = element_count, .elem = child_ty, }; - const vector_ty = .{ .specifier = .vector, .data = .{ .array = arr_ty } }; + const vector_ty: Type = .{ .specifier = .vector, .data = .{ .array = arr_ty } }; builder.specifier = Type.Builder.fromType(vector_ty); }, .q => { diff --git a/lib/compiler/aro/aro/Parser.zig b/lib/compiler/aro/aro/Parser.zig index 00857c65e1..85053b7e48 100644 --- a/lib/compiler/aro/aro/Parser.zig +++ b/lib/compiler/aro/aro/Parser.zig @@ -8095,7 +8095,7 @@ fn primaryExpr(p: *Parser) Error!Result { fn makePredefinedIdentifier(p: *Parser, strings_top: usize) !Result { const end: u32 = @intCast(p.strings.items.len); - const elem_ty = .{ .specifier = .char, .qual = .{ .@"const" = true } }; + const elem_ty: Type = .{ .specifier = .char, .qual = .{ .@"const" = true } }; const arr_ty = try p.arena.create(Type.Array); arr_ty.* = .{ .elem = elem_ty, .len = end - strings_top }; const ty: Type = .{ .specifier = .array, .data = .{ .array = arr_ty } }; diff --git a/lib/compiler/aro/aro/text_literal.zig b/lib/compiler/aro/aro/text_literal.zig index d9f6b2a88b..7bc8fd95cb 100644 --- a/lib/compiler/aro/aro/text_literal.zig +++ b/lib/compiler/aro/aro/text_literal.zig @@ -188,7 +188,7 @@ pub const Parser = struct { pub fn err(self: *Parser, tag: Diagnostics.Tag, extra: Diagnostics.Message.Extra) void { if (self.errored) return; self.errored = true; - const diagnostic = .{ .tag = tag, .extra = extra }; + const diagnostic: CharDiagnostic = .{ .tag = tag, .extra = extra }; if (self.errors_len == self.errors_buffer.len) { self.errors_buffer[self.errors_buffer.len - 1] = diagnostic; } else { diff --git a/lib/compiler/aro_translate_c.zig b/lib/compiler/aro_translate_c.zig index 910d12d32b..6be33a196f 100644 --- a/lib/compiler/aro_translate_c.zig +++ b/lib/compiler/aro_translate_c.zig @@ -749,7 +749,7 @@ fn transType(c: *Context, scope: *Scope, raw_ty: Type, qual_handling: Type.QualH const is_const = is_fn_proto or child_type.isConst(); const is_volatile = child_type.qual.@"volatile"; const elem_type = try transType(c, scope, child_type, qual_handling, source_loc); - const ptr_info = .{ + const ptr_info: @FieldType(ast.Payload.Pointer, "data") = .{ .is_const = is_const, .is_volatile = is_volatile, .elem_type = elem_type, diff --git a/lib/compiler/test_runner.zig b/lib/compiler/test_runner.zig index f88354623d..4f3b40656c 100644 --- a/lib/compiler/test_runner.zig +++ b/lib/compiler/test_runner.zig @@ -6,7 +6,7 @@ const io = std.io; const testing = std.testing; const assert = std.debug.assert; -pub const std_options = .{ +pub const std_options: std.Options = .{ .logFn = log, }; diff --git a/lib/std/SemanticVersion.zig b/lib/std/SemanticVersion.zig index bde3e906d8..7cb3888e54 100644 --- a/lib/std/SemanticVersion.zig +++ b/lib/std/SemanticVersion.zig @@ -299,7 +299,7 @@ test "precedence" { test "zig_version" { // An approximate Zig build that predates this test. - const older_version = .{ .major = 0, .minor = 8, .patch = 0, .pre = "dev.874" }; + const older_version: Version = .{ .major = 0, .minor = 8, .patch = 0, .pre = "dev.874" }; // Simulated compatibility check using Zig version. const compatible = comptime @import("builtin").zig_version.order(older_version) == .gt; diff --git a/lib/std/Target.zig b/lib/std/Target.zig index def304bc6c..8b021b5429 100644 --- a/lib/std/Target.zig +++ b/lib/std/Target.zig @@ -509,7 +509,7 @@ pub const Os = struct { .max = .{ .major = 6, .minor = 10, .patch = 3 }, }, .glibc = blk: { - const default_min = .{ .major = 2, .minor = 28, .patch = 0 }; + const default_min: std.SemanticVersion = .{ .major = 2, .minor = 28, .patch = 0 }; for (std.zig.target.available_libcs) |libc| { // We don't know the ABI here. We can get away with not checking it diff --git a/lib/std/array_list.zig b/lib/std/array_list.zig index ac1b144690..197b8c7fba 100644 --- a/lib/std/array_list.zig +++ b/lib/std/array_list.zig @@ -100,7 +100,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type { /// of this ArrayList. Empties this ArrayList. pub fn moveToUnmanaged(self: *Self) ArrayListAlignedUnmanaged(T, alignment) { const allocator = self.allocator; - const result = .{ .items = self.items, .capacity = self.capacity }; + const result: ArrayListAlignedUnmanaged(T, alignment) = .{ .items = self.items, .capacity = self.capacity }; self.* = init(allocator); return result; } diff --git a/lib/std/crypto/phc_encoding.zig b/lib/std/crypto/phc_encoding.zig index 3442073632..ba48a9954f 100644 --- a/lib/std/crypto/phc_encoding.zig +++ b/lib/std/crypto/phc_encoding.zig @@ -258,8 +258,7 @@ fn kvSplit(str: []const u8) !struct { key: []const u8, value: []const u8 } { var it = mem.splitScalar(u8, str, kv_delimiter_scalar); const key = it.first(); const value = it.next() orelse return Error.InvalidEncoding; - const ret = .{ .key = key, .value = value }; - return ret; + return .{ .key = key, .value = value }; } test "phc format - encoding/decoding" { diff --git a/lib/std/meta.zig b/lib/std/meta.zig index ea81c87648..0ea83bb11e 100644 --- a/lib/std/meta.zig +++ b/lib/std/meta.zig @@ -1018,7 +1018,7 @@ fn CreateUniqueTuple(comptime N: comptime_int, comptime types: [N]type) type { .type = T, .default_value = null, .is_comptime = false, - .alignment = if (@sizeOf(T) > 0) @alignOf(T) else 0, + .alignment = 0, }; } diff --git a/lib/std/zig/AstGen.zig b/lib/std/zig/AstGen.zig index dc39ce62f5..807d634886 100644 --- a/lib/std/zig/AstGen.zig +++ b/lib/std/zig/AstGen.zig @@ -1711,7 +1711,7 @@ fn structInitExpr( return rvalue(gz, ri, val, node); }, .none, .ref, .inferred_ptr => { - return rvalue(gz, ri, .empty_struct, node); + return rvalue(gz, ri, .empty_tuple, node); }, .destructure => |destructure| { return astgen.failNodeNotes(node, "empty initializer cannot be destructured", .{}, &.{ @@ -1888,6 +1888,8 @@ fn structInitExprAnon( const tree = astgen.tree; const payload_index = try addExtra(astgen, Zir.Inst.StructInitAnon{ + .abs_node = node, + .abs_line = astgen.source_line, .fields_len = @intCast(struct_init.ast.fields.len), }); const field_size = @typeInfo(Zir.Inst.StructInitAnon.Item).@"struct".fields.len; @@ -1919,6 +1921,8 @@ fn structInitExprTyped( const tree = astgen.tree; const payload_index = try addExtra(astgen, Zir.Inst.StructInit{ + .abs_node = node, + .abs_line = astgen.source_line, .fields_len = @intCast(struct_init.ast.fields.len), }); const field_size = @typeInfo(Zir.Inst.StructInit.Item).@"struct".fields.len; @@ -5007,6 +5011,25 @@ fn structDeclInner( layout: std.builtin.Type.ContainerLayout, backing_int_node: Ast.Node.Index, ) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const gpa = astgen.gpa; + const tree = astgen.tree; + + { + const is_tuple = for (container_decl.ast.members) |member_node| { + const container_field = tree.fullContainerField(member_node) orelse continue; + if (container_field.ast.tuple_like) break true; + } else false; + + if (is_tuple) { + if (node == 0) { + return astgen.failTok(0, "file cannot be a tuple", .{}); + } else { + return tupleDecl(gz, scope, node, container_decl, layout, backing_int_node); + } + } + } + const decl_inst = try gz.reserveInstructionIndex(); if (container_decl.ast.members.len == 0 and backing_int_node == 0) { @@ -5019,7 +5042,6 @@ fn structDeclInner( .has_backing_int = false, .known_non_opv = false, .known_comptime_only = false, - .is_tuple = false, .any_comptime_fields = false, .any_default_inits = false, .any_aligned_fields = false, @@ -5028,10 +5050,6 @@ fn structDeclInner( return decl_inst.toRef(); } - const astgen = gz.astgen; - const gpa = astgen.gpa; - const tree = astgen.tree; - var namespace: Scope.Namespace = .{ .parent = scope, .node = node, @@ -5106,46 +5124,6 @@ fn structDeclInner( // No defer needed here because it is handled by `wip_members.deinit()` above. const bodies_start = astgen.scratch.items.len; - const node_tags = tree.nodes.items(.tag); - const is_tuple = for (container_decl.ast.members) |member_node| { - const container_field = tree.fullContainerField(member_node) orelse continue; - if (container_field.ast.tuple_like) break true; - } else false; - - if (is_tuple) switch (layout) { - .auto => {}, - .@"extern" => return astgen.failNode(node, "extern tuples are not supported", .{}), - .@"packed" => return astgen.failNode(node, "packed tuples are not supported", .{}), - }; - - if (is_tuple) for (container_decl.ast.members) |member_node| { - switch (node_tags[member_node]) { - .container_field_init, - .container_field_align, - .container_field, - .@"comptime", - .test_decl, - => continue, - else => { - const tuple_member = for (container_decl.ast.members) |maybe_tuple| switch (node_tags[maybe_tuple]) { - .container_field_init, - .container_field_align, - .container_field, - => break maybe_tuple, - else => {}, - } else unreachable; - return astgen.failNodeNotes( - member_node, - "tuple declarations cannot contain declarations", - .{}, - &[_]u32{ - try astgen.errNoteNode(tuple_member, "tuple field here", .{}), - }, - ); - }, - } - }; - const old_hasher = astgen.src_hasher; defer astgen.src_hasher = old_hasher; astgen.src_hasher = std.zig.SrcHasher.init(.{}); @@ -5167,16 +5145,10 @@ fn structDeclInner( astgen.src_hasher.update(tree.getNodeSource(member_node)); - if (!is_tuple) { - const field_name = try astgen.identAsString(member.ast.main_token); - - member.convertToNonTupleLike(astgen.tree.nodes); - assert(!member.ast.tuple_like); - - wip_members.appendToField(@intFromEnum(field_name)); - } else if (!member.ast.tuple_like) { - return astgen.failTok(member.ast.main_token, "tuple field has a name", .{}); - } + const field_name = try astgen.identAsString(member.ast.main_token); + member.convertToNonTupleLike(astgen.tree.nodes); + assert(!member.ast.tuple_like); + wip_members.appendToField(@intFromEnum(field_name)); const doc_comment_index = try astgen.docCommentAsString(member.firstToken()); wip_members.appendToField(@intFromEnum(doc_comment_index)); @@ -5270,7 +5242,6 @@ fn structDeclInner( .has_backing_int = backing_int_ref != .none, .known_non_opv = known_non_opv, .known_comptime_only = known_comptime_only, - .is_tuple = is_tuple, .any_comptime_fields = any_comptime_fields, .any_default_inits = any_default_inits, .any_aligned_fields = any_aligned_fields, @@ -5300,6 +5271,106 @@ fn structDeclInner( return decl_inst.toRef(); } +fn tupleDecl( + gz: *GenZir, + scope: *Scope, + node: Ast.Node.Index, + container_decl: Ast.full.ContainerDecl, + layout: std.builtin.Type.ContainerLayout, + backing_int_node: Ast.Node.Index, +) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; + const gpa = astgen.gpa; + const tree = astgen.tree; + + const node_tags = tree.nodes.items(.tag); + + switch (layout) { + .auto => {}, + .@"extern" => return astgen.failNode(node, "extern tuples are not supported", .{}), + .@"packed" => return astgen.failNode(node, "packed tuples are not supported", .{}), + } + + if (backing_int_node != 0) { + return astgen.failNode(backing_int_node, "tuple does not support backing integer type", .{}); + } + + // We will use the scratch buffer, starting here, for the field data: + // 1. fields: { // for every `fields_len` (stored in `extended.small`) + // type: Inst.Ref, + // init: Inst.Ref, // `.none` for non-`comptime` fields + // } + const fields_start = astgen.scratch.items.len; + defer astgen.scratch.items.len = fields_start; + + try astgen.scratch.ensureUnusedCapacity(gpa, container_decl.ast.members.len * 2); + + for (container_decl.ast.members) |member_node| { + const field = tree.fullContainerField(member_node) orelse { + const tuple_member = for (container_decl.ast.members) |maybe_tuple| switch (node_tags[maybe_tuple]) { + .container_field_init, + .container_field_align, + .container_field, + => break maybe_tuple, + else => {}, + } else unreachable; + return astgen.failNodeNotes( + member_node, + "tuple declarations cannot contain declarations", + .{}, + &.{try astgen.errNoteNode(tuple_member, "tuple field here", .{})}, + ); + }; + + if (!field.ast.tuple_like) { + return astgen.failTok(field.ast.main_token, "tuple field has a name", .{}); + } + + if (field.ast.align_expr != 0) { + return astgen.failTok(field.ast.main_token, "tuple field has alignment", .{}); + } + + if (field.ast.value_expr != 0 and field.comptime_token == null) { + return astgen.failTok(field.ast.main_token, "non-comptime tuple field has default initialization value", .{}); + } + + if (field.ast.value_expr == 0 and field.comptime_token != null) { + return astgen.failTok(field.comptime_token.?, "comptime field without default initialization value", .{}); + } + + const field_type_ref = try typeExpr(gz, scope, field.ast.type_expr); + astgen.scratch.appendAssumeCapacity(@intFromEnum(field_type_ref)); + + if (field.ast.value_expr != 0) { + const field_init_ref = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = field_type_ref } }, field.ast.value_expr); + astgen.scratch.appendAssumeCapacity(@intFromEnum(field_init_ref)); + } else { + astgen.scratch.appendAssumeCapacity(@intFromEnum(Zir.Inst.Ref.none)); + } + } + + const fields_len = std.math.cast(u16, container_decl.ast.members.len) orelse { + return astgen.failNode(node, "this compiler implementation only supports 65535 tuple fields", .{}); + }; + + const extra_trail = astgen.scratch.items[fields_start..]; + assert(extra_trail.len == fields_len * 2); + try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.TupleDecl).@"struct".fields.len + extra_trail.len); + const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.TupleDecl{ + .src_node = gz.nodeIndexToRelative(node), + }); + astgen.extra.appendSliceAssumeCapacity(extra_trail); + + return gz.add(.{ + .tag = .extended, + .data = .{ .extended = .{ + .opcode = .tuple_decl, + .small = fields_len, + .operand = payload_index, + } }, + }); +} + fn unionDeclInner( gz: *GenZir, scope: *Scope, @@ -11172,7 +11243,7 @@ fn rvalueInner( as_ty | @intFromEnum(Zir.Inst.Ref.slice_const_u8_sentinel_0_type), as_ty | @intFromEnum(Zir.Inst.Ref.anyerror_void_error_union_type), as_ty | @intFromEnum(Zir.Inst.Ref.generic_poison_type), - as_ty | @intFromEnum(Zir.Inst.Ref.empty_struct_type), + as_ty | @intFromEnum(Zir.Inst.Ref.empty_tuple_type), as_comptime_int | @intFromEnum(Zir.Inst.Ref.zero), as_comptime_int | @intFromEnum(Zir.Inst.Ref.one), as_comptime_int | @intFromEnum(Zir.Inst.Ref.negative_one), @@ -13173,7 +13244,6 @@ const GenZir = struct { layout: std.builtin.Type.ContainerLayout, known_non_opv: bool, known_comptime_only: bool, - is_tuple: bool, any_comptime_fields: bool, any_default_inits: bool, any_aligned_fields: bool, @@ -13217,7 +13287,6 @@ const GenZir = struct { .has_backing_int = args.has_backing_int, .known_non_opv = args.known_non_opv, .known_comptime_only = args.known_comptime_only, - .is_tuple = args.is_tuple, .name_strategy = gz.anon_name_strategy, .layout = args.layout, .any_comptime_fields = args.any_comptime_fields, diff --git a/lib/std/zig/BuiltinFn.zig b/lib/std/zig/BuiltinFn.zig index ad9176b0ab..7ad5bb1a87 100644 --- a/lib/std/zig/BuiltinFn.zig +++ b/lib/std/zig/BuiltinFn.zig @@ -1,5 +1,3 @@ -const std = @import("std"); - pub const Tag = enum { add_with_overflow, addrspace_cast, @@ -147,7 +145,7 @@ param_count: ?u8, pub const list = list: { @setEvalBranchQuota(3000); - break :list std.StaticStringMap(@This()).initComptime(.{ + break :list std.StaticStringMap(BuiltinFn).initComptime([_]struct { []const u8, BuiltinFn }{ .{ "@addWithOverflow", .{ @@ -1011,3 +1009,6 @@ pub const list = list: { }, }); }; + +const std = @import("std"); +const BuiltinFn = @This(); diff --git a/lib/std/zig/Zir.zig b/lib/std/zig/Zir.zig index ab6bcf1b0e..00a48e21f7 100644 --- a/lib/std/zig/Zir.zig +++ b/lib/std/zig/Zir.zig @@ -1887,6 +1887,10 @@ pub const Inst = struct { /// `operand` is payload index to `OpaqueDecl`. /// `small` is `OpaqueDecl.Small`. opaque_decl, + /// A tuple type. Note that tuples are not namespace/container types. + /// `operand` is payload index to `TupleDecl`. + /// `small` is `fields_len: u16`. + tuple_decl, /// Implements the `@This` builtin. /// `operand` is `src_node: i32`. this, @@ -2187,7 +2191,7 @@ pub const Inst = struct { anyerror_void_error_union_type, adhoc_inferred_error_set_type, generic_poison_type, - empty_struct_type, + empty_tuple_type, undef, zero, zero_usize, @@ -2202,7 +2206,7 @@ pub const Inst = struct { null_value, bool_true, bool_false, - empty_struct, + empty_tuple, generic_poison, /// This Ref does not correspond to any ZIR instruction or constant @@ -3041,7 +3045,7 @@ pub const Inst = struct { /// 0b0X00: whether corresponding field is comptime /// 0bX000: whether corresponding field has a type expression /// 9. fields: { // for every fields_len - /// field_name: u32, // if !is_tuple + /// field_name: u32, /// doc_comment: NullTerminatedString, // .empty if no doc comment /// field_type: Ref, // if corresponding bit is not set. none means anytype. /// field_type_body_len: u32, // if corresponding bit is set @@ -3071,13 +3075,12 @@ pub const Inst = struct { has_backing_int: bool, known_non_opv: bool, known_comptime_only: bool, - is_tuple: bool, name_strategy: NameStrategy, layout: std.builtin.Type.ContainerLayout, any_default_inits: bool, any_comptime_fields: bool, any_aligned_fields: bool, - _: u2 = undefined, + _: u3 = undefined, }; }; @@ -3302,6 +3305,15 @@ pub const Inst = struct { }; }; + /// Trailing: + /// 1. fields: { // for every `fields_len` (stored in `extended.small`) + /// type: Inst.Ref, + /// init: Inst.Ref, // `.none` for non-`comptime` fields + /// } + pub const TupleDecl = struct { + src_node: i32, // relative + }; + /// Trailing: /// { // for every fields_len /// field_name: NullTerminatedString // null terminated string index @@ -3329,6 +3341,11 @@ pub const Inst = struct { /// Trailing is an item per field. pub const StructInit = struct { + /// If this is an anonymous initialization (the operand is poison), this instruction becomes the owner of a type. + /// To resolve source locations, we need an absolute source node. + abs_node: Ast.Node.Index, + /// Likewise, we need an absolute line number. + abs_line: u32, fields_len: u32, pub const Item = struct { @@ -3344,6 +3361,11 @@ pub const Inst = struct { /// TODO make this instead array of inits followed by array of names because /// it will be simpler Sema code and better for CPU cache. pub const StructInitAnon = struct { + /// This is an anonymous initialization, meaning this instruction becomes the owner of a type. + /// To resolve source locations, we need an absolute source node. + abs_node: Ast.Node.Index, + /// Likewise, we need an absolute line number. + abs_line: u32, fields_len: u32, pub const Item = struct { @@ -3741,6 +3763,8 @@ fn findDeclsInner( defers: *std.AutoHashMapUnmanaged(u32, void), inst: Inst.Index, ) Allocator.Error!void { + comptime assert(Zir.inst_tracking_version == 0); + const tags = zir.instructions.items(.tag); const datas = zir.instructions.items(.data); @@ -3884,9 +3908,6 @@ fn findDeclsInner( .struct_init_empty, .struct_init_empty_result, .struct_init_empty_ref_result, - .struct_init_anon, - .struct_init, - .struct_init_ref, .validate_struct_init_ty, .validate_struct_init_result_ty, .validate_ptr_struct_init, @@ -3978,6 +3999,12 @@ fn findDeclsInner( .restore_err_ret_index_fn_entry, => return, + // Struct initializations need tracking, as they may create anonymous struct types. + .struct_init, + .struct_init_ref, + .struct_init_anon, + => return list.append(gpa, inst), + .extended => { const extended = datas[@intFromEnum(inst)].extended; switch (extended.opcode) { @@ -4034,6 +4061,7 @@ fn findDeclsInner( .builtin_value, .branch_hint, .inplace_arith_result_ty, + .tuple_decl, => return, // `@TypeOf` has a body. @@ -4110,8 +4138,7 @@ fn findDeclsInner( const has_type_body = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; - fields_extra_index += @intFromBool(!small.is_tuple); // field_name - fields_extra_index += 1; // doc_comment + fields_extra_index += 2; // field_name, doc_comment if (has_type_body) { const field_type_body_len = zir.extra[fields_extra_index]; @@ -4736,3 +4763,35 @@ pub fn getAssociatedSrcHash(zir: Zir, inst: Zir.Inst.Index) ?std.zig.SrcHash { else => return null, } } + +/// When the ZIR update tracking logic must be modified to consider new instructions, +/// change this constant to trigger compile errors at all relevant locations. +pub const inst_tracking_version = 0; + +/// Asserts that a ZIR instruction is tracked across incremental updates, and +/// thus may be given an `InternPool.TrackedInst`. +pub fn assertTrackable(zir: Zir, inst_idx: Zir.Inst.Index) void { + comptime assert(Zir.inst_tracking_version == 0); + const inst = zir.instructions.get(@intFromEnum(inst_idx)); + switch (inst.tag) { + .struct_init, + .struct_init_ref, + .struct_init_anon, + => {}, // tracked in order, as the owner instructions of anonymous struct types + .func, + .func_inferred, + .func_fancy, + => {}, // tracked in order, as the owner instructions of function bodies + .declaration => {}, // tracked by correlating names in the namespace of the parent container + .extended => switch (inst.data.extended.opcode) { + .struct_decl, + .union_decl, + .enum_decl, + .opaque_decl, + .reify, + => {}, // tracked in order, as the owner instructions of explicit container types + else => unreachable, // assertion failure; not trackable + }, + else => unreachable, // assertion failure; not trackable + } +} diff --git a/lib/std/zig/system/darwin/macos.zig b/lib/std/zig/system/darwin/macos.zig index 3201f102b6..f5f413cb4e 100644 --- a/lib/std/zig/system/darwin/macos.zig +++ b/lib/std/zig/system/darwin/macos.zig @@ -277,7 +277,7 @@ const SystemVersionTokenizer = struct { }; test "detect" { - const cases = .{ + const cases: [5]struct { []const u8, std.SemanticVersion } = .{ .{ \\ \\ @@ -388,8 +388,8 @@ test "detect" { inline for (cases) |case| { const ver0 = try parseSystemVersion(case[0]); - const ver1: std.SemanticVersion = case[1]; - try testing.expectEqual(@as(std.math.Order, .eq), ver0.order(ver1)); + const ver1 = case[1]; + try testing.expectEqual(std.math.Order.eq, ver0.order(ver1)); } } diff --git a/src/Air.zig b/src/Air.zig index c58020daaa..3aa5f317c0 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -962,7 +962,7 @@ pub const Inst = struct { anyerror_void_error_union_type = @intFromEnum(InternPool.Index.anyerror_void_error_union_type), adhoc_inferred_error_set_type = @intFromEnum(InternPool.Index.adhoc_inferred_error_set_type), generic_poison_type = @intFromEnum(InternPool.Index.generic_poison_type), - empty_struct_type = @intFromEnum(InternPool.Index.empty_struct_type), + empty_tuple_type = @intFromEnum(InternPool.Index.empty_tuple_type), undef = @intFromEnum(InternPool.Index.undef), zero = @intFromEnum(InternPool.Index.zero), zero_usize = @intFromEnum(InternPool.Index.zero_usize), @@ -977,7 +977,7 @@ pub const Inst = struct { null_value = @intFromEnum(InternPool.Index.null_value), bool_true = @intFromEnum(InternPool.Index.bool_true), bool_false = @intFromEnum(InternPool.Index.bool_false), - empty_struct = @intFromEnum(InternPool.Index.empty_struct), + empty_tuple = @intFromEnum(InternPool.Index.empty_tuple), generic_poison = @intFromEnum(InternPool.Index.generic_poison), /// This Ref does not correspond to any AIR instruction or constant diff --git a/src/Air/types_resolved.zig b/src/Air/types_resolved.zig index 5a149b08d2..098cb29b22 100644 --- a/src/Air/types_resolved.zig +++ b/src/Air/types_resolved.zig @@ -501,7 +501,7 @@ pub fn checkType(ty: Type, zcu: *Zcu) bool { .auto, .@"extern" => struct_obj.flagsUnordered(ip).fully_resolved, }; }, - .anon_struct_type => |tuple| { + .tuple_type => |tuple| { for (0..tuple.types.len) |i| { const field_is_comptime = tuple.values.get(ip)[i] != .none; if (field_is_comptime) continue; diff --git a/src/Compilation.zig b/src/Compilation.zig index 73185a91ea..2744a317c1 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2081,7 +2081,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void { log.debug("CacheMode.whole cache miss for {s}", .{comp.root_name}); // Compile the artifacts to a temporary directory. - const tmp_artifact_directory = d: { + const tmp_artifact_directory: Directory = d: { const s = std.fs.path.sep_str; tmp_dir_rand_int = std.crypto.random.int(u64); const tmp_dir_sub_path = "tmp" ++ s ++ std.fmt.hex(tmp_dir_rand_int); diff --git a/src/InternPool.zig b/src/InternPool.zig index 22defd5a96..59db85681a 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -1787,10 +1787,11 @@ pub const Key = union(enum) { /// or was created with `@Type`. It is unique and based on a declaration. /// It may be a tuple, if declared like this: `struct {A, B, C}`. struct_type: NamespaceType, - /// This is an anonymous struct or tuple type which has no corresponding - /// declaration. It is used for types that have no `struct` keyword in the - /// source code, and were not created via `@Type`. - anon_struct_type: AnonStructType, + /// This is a tuple type. Tuples are logically similar to structs, but have some + /// important differences in semantics; they do not undergo staged type resolution, + /// so cannot be self-referential, and they are not considered container/namespace + /// types, so cannot have declarations and have structural equality properties. + tuple_type: TupleType, union_type: NamespaceType, opaque_type: NamespaceType, enum_type: NamespaceType, @@ -1919,27 +1920,10 @@ pub const Key = union(enum) { child: Index, }; - pub const AnonStructType = struct { + pub const TupleType = struct { types: Index.Slice, - /// This may be empty, indicating this is a tuple. - names: NullTerminatedString.Slice, /// These elements may be `none`, indicating runtime-known. values: Index.Slice, - - pub fn isTuple(self: AnonStructType) bool { - return self.names.len == 0; - } - - pub fn fieldName( - self: AnonStructType, - ip: *const InternPool, - index: usize, - ) OptionalNullTerminatedString { - if (self.names.len == 0) - return .none; - - return self.names.get(ip)[index].toOptional(); - } }; /// This is the hashmap key. To fetch other data associated with the type, see: @@ -1965,18 +1949,15 @@ pub const Key = union(enum) { /// The union for which this is a tag type. union_type: Index, }, - /// This type originates from a reification via `@Type`. - /// It is hased based on its ZIR instruction index and fields, attributes, etc. + /// This type originates from a reification via `@Type`, or from an anonymous initialization. + /// It is hashed based on its ZIR instruction index and fields, attributes, etc. /// To avoid making this key overly complex, the type-specific data is hased by Sema. reified: struct { - /// A `reify` instruction. + /// A `reify`, `struct_init`, `struct_init_ref`, or `struct_init_anon` instruction. zir_index: TrackedInst.Index, /// A hash of this type's attributes, fields, etc, generated by Sema. type_hash: u64, }, - /// This type is `@TypeOf(.{})`. - /// TODO: can we change the language spec to not special-case this type? - empty_struct: void, }; pub const FuncType = struct { @@ -2497,7 +2478,6 @@ pub const Key = union(enum) { std.hash.autoHash(&hasher, reified.zir_index); std.hash.autoHash(&hasher, reified.type_hash); }, - .empty_struct => {}, } return hasher.final(); }, @@ -2570,7 +2550,7 @@ pub const Key = union(enum) { const child = switch (ip.indexToKey(aggregate.ty)) { .array_type => |array_type| array_type.child, .vector_type => |vector_type| vector_type.child, - .anon_struct_type, .struct_type => .none, + .tuple_type, .struct_type => .none, else => unreachable, }; @@ -2625,11 +2605,10 @@ pub const Key = union(enum) { .error_set_type => |x| Hash.hash(seed, std.mem.sliceAsBytes(x.names.get(ip))), - .anon_struct_type => |anon_struct_type| { + .tuple_type => |tuple_type| { var hasher = Hash.init(seed); - for (anon_struct_type.types.get(ip)) |elem| std.hash.autoHash(&hasher, elem); - for (anon_struct_type.values.get(ip)) |elem| std.hash.autoHash(&hasher, elem); - for (anon_struct_type.names.get(ip)) |elem| std.hash.autoHash(&hasher, elem); + for (tuple_type.types.get(ip)) |elem| std.hash.autoHash(&hasher, elem); + for (tuple_type.values.get(ip)) |elem| std.hash.autoHash(&hasher, elem); return hasher.final(); }, @@ -2929,7 +2908,6 @@ pub const Key = union(enum) { return a_r.zir_index == b_r.zir_index and a_r.type_hash == b_r.type_hash; }, - .empty_struct => return true, } }, .aggregate => |a_info| { @@ -2981,11 +2959,10 @@ pub const Key = union(enum) { }, } }, - .anon_struct_type => |a_info| { - const b_info = b.anon_struct_type; + .tuple_type => |a_info| { + const b_info = b.tuple_type; return std.mem.eql(Index, a_info.types.get(ip), b_info.types.get(ip)) and - std.mem.eql(Index, a_info.values.get(ip), b_info.values.get(ip)) and - std.mem.eql(NullTerminatedString, a_info.names.get(ip), b_info.names.get(ip)); + std.mem.eql(Index, a_info.values.get(ip), b_info.values.get(ip)); }, .error_set_type => |a_info| { const b_info = b.error_set_type; @@ -3025,7 +3002,7 @@ pub const Key = union(enum) { .union_type, .opaque_type, .enum_type, - .anon_struct_type, + .tuple_type, .func_type, => .type_type, @@ -3054,7 +3031,7 @@ pub const Key = union(enum) { .void => .void_type, .null => .null_type, .false, .true => .bool_type, - .empty_struct => .empty_struct_type, + .empty_tuple => .empty_tuple_type, .@"unreachable" => .noreturn_type, .generic_poison => .generic_poison_type, }, @@ -3411,13 +3388,11 @@ pub const LoadedStructType = struct { // TODO: the non-fqn will be needed by the new dwarf structure /// The name of this struct type. name: NullTerminatedString, - /// The `Cau` within which type resolution occurs. `none` when the struct is `@TypeOf(.{})`. - cau: Cau.Index.Optional, - /// `none` when the struct is `@TypeOf(.{})`. - namespace: OptionalNamespaceIndex, + /// The `Cau` within which type resolution occurs. + cau: Cau.Index, + namespace: NamespaceIndex, /// Index of the `struct_decl` or `reify` ZIR instruction. - /// Only `none` when the struct is `@TypeOf(.{})`. - zir_index: TrackedInst.Index.Optional, + zir_index: TrackedInst.Index, layout: std.builtin.Type.ContainerLayout, field_names: NullTerminatedString.Slice, field_types: Index.Slice, @@ -3913,10 +3888,6 @@ pub const LoadedStructType = struct { @atomicStore(Tag.TypeStruct.Flags, flags_ptr, flags, .release); } - pub fn isTuple(s: LoadedStructType, ip: *InternPool) bool { - return s.layout != .@"packed" and s.flagsUnordered(ip).is_tuple; - } - pub fn hasReorderedFields(s: LoadedStructType) bool { return s.layout == .auto; } @@ -4008,24 +3979,6 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType { const item = unwrapped_index.getItem(ip); switch (item.tag) { .type_struct => { - if (item.data == 0) return .{ - .tid = .main, - .extra_index = 0, - .name = .empty, - .cau = .none, - .namespace = .none, - .zir_index = .none, - .layout = .auto, - .field_names = NullTerminatedString.Slice.empty, - .field_types = Index.Slice.empty, - .field_inits = Index.Slice.empty, - .field_aligns = Alignment.Slice.empty, - .runtime_order = LoadedStructType.RuntimeOrder.Slice.empty, - .comptime_bits = LoadedStructType.ComptimeBits.empty, - .offsets = LoadedStructType.Offsets.empty, - .names_map = .none, - .captures = CaptureValue.Slice.empty, - }; const name: NullTerminatedString = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStruct, "name").?]); const cau: Cau.Index = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStruct, "cau").?]); const namespace: NamespaceIndex = @enumFromInt(extra_items[item.data + std.meta.fieldIndex(Tag.TypeStruct, "namespace").?]); @@ -4045,7 +3998,7 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType { }; extra_index += captures_len; if (flags.is_reified) { - extra_index += 2; // PackedU64 + extra_index += 2; // type_hash: PackedU64 } const field_types: Index.Slice = .{ .tid = unwrapped_index.tid, @@ -4053,7 +4006,7 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType { .len = fields_len, }; extra_index += fields_len; - const names_map: OptionalMapIndex, const names = if (!flags.is_tuple) n: { + const names_map: OptionalMapIndex, const names = n: { const names_map: OptionalMapIndex = @enumFromInt(extra_list.view().items(.@"0")[extra_index]); extra_index += 1; const names: NullTerminatedString.Slice = .{ @@ -4063,7 +4016,7 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType { }; extra_index += fields_len; break :n .{ names_map, names }; - } else .{ .none, NullTerminatedString.Slice.empty }; + }; const inits: Index.Slice = if (flags.any_default_inits) i: { const inits: Index.Slice = .{ .tid = unwrapped_index.tid, @@ -4114,9 +4067,9 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType { .tid = unwrapped_index.tid, .extra_index = item.data, .name = name, - .cau = cau.toOptional(), - .namespace = namespace.toOptional(), - .zir_index = zir_index.toOptional(), + .cau = cau, + .namespace = namespace, + .zir_index = zir_index, .layout = if (flags.is_extern) .@"extern" else .auto, .field_names = names, .field_types = field_types, @@ -4178,9 +4131,9 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType { .tid = unwrapped_index.tid, .extra_index = item.data, .name = name, - .cau = cau.toOptional(), - .namespace = namespace.toOptional(), - .zir_index = zir_index.toOptional(), + .cau = cau, + .namespace = namespace, + .zir_index = zir_index, .layout = .@"packed", .field_names = field_names, .field_types = field_types, @@ -4407,9 +4360,9 @@ pub const Item = struct { /// `primitives` in AstGen.zig. pub const Index = enum(u32) { pub const first_type: Index = .u0_type; - pub const last_type: Index = .empty_struct_type; + pub const last_type: Index = .empty_tuple_type; pub const first_value: Index = .undef; - pub const last_value: Index = .empty_struct; + pub const last_value: Index = .empty_tuple; u0_type, i0_type, @@ -4466,8 +4419,9 @@ pub const Index = enum(u32) { /// Used for the inferred error set of inline/comptime function calls. adhoc_inferred_error_set_type, generic_poison_type, - /// `@TypeOf(.{})` - empty_struct_type, + /// `@TypeOf(.{})`; a tuple with zero elements. + /// This is not the same as `struct {}`, since that is a struct rather than a tuple. + empty_tuple_type, /// `undefined` (untyped) undef, @@ -4497,8 +4451,8 @@ pub const Index = enum(u32) { bool_true, /// `false` bool_false, - /// `.{}` (untyped) - empty_struct, + /// `.{}` + empty_tuple, /// Used for generic parameters where the type and value /// is not known until generic function instantiation. @@ -4606,16 +4560,14 @@ pub const Index = enum(u32) { values: []Index, }, }; - const DataIsExtraIndexOfTypeStructAnon = struct { + const DataIsExtraIndexOfTypeTuple = struct { const @"data.fields_len" = opaque {}; - data: *TypeStructAnon, + data: *TypeTuple, @"trailing.types.len": *@"data.fields_len", @"trailing.values.len": *@"data.fields_len", - @"trailing.names.len": *@"data.fields_len", trailing: struct { types: []Index, values: []Index, - names: []NullTerminatedString, }, }; @@ -4649,10 +4601,9 @@ pub const Index = enum(u32) { simple_type: void, type_opaque: struct { data: *Tag.TypeOpaque }, type_struct: struct { data: *Tag.TypeStruct }, - type_struct_anon: DataIsExtraIndexOfTypeStructAnon, type_struct_packed: struct { data: *Tag.TypeStructPacked }, type_struct_packed_inits: struct { data: *Tag.TypeStructPacked }, - type_tuple_anon: DataIsExtraIndexOfTypeStructAnon, + type_tuple: DataIsExtraIndexOfTypeTuple, type_union: struct { data: *Tag.TypeUnion }, type_function: struct { const @"data.flags.has_comptime_bits" = opaque {}; @@ -4936,11 +4887,10 @@ pub const static_keys = [_]Key{ // generic_poison_type .{ .simple_type = .generic_poison }, - // empty_struct_type - .{ .anon_struct_type = .{ - .types = Index.Slice.empty, - .names = NullTerminatedString.Slice.empty, - .values = Index.Slice.empty, + // empty_tuple_type + .{ .tuple_type = .{ + .types = .empty, + .values = .empty, } }, .{ .simple_value = .undefined }, @@ -4991,7 +4941,7 @@ pub const static_keys = [_]Key{ .{ .simple_value = .null }, .{ .simple_value = .true }, .{ .simple_value = .false }, - .{ .simple_value = .empty_struct }, + .{ .simple_value = .empty_tuple }, .{ .simple_value = .generic_poison }, }; @@ -5071,20 +5021,16 @@ pub const Tag = enum(u8) { type_opaque, /// A non-packed struct type. /// data is 0 or extra index of `TypeStruct`. - /// data == 0 represents `@TypeOf(.{})`. type_struct, - /// An AnonStructType which stores types, names, and values for fields. - /// data is extra index of `TypeStructAnon`. - type_struct_anon, /// A packed struct, no fields have any init values. /// data is extra index of `TypeStructPacked`. type_struct_packed, /// A packed struct, one or more fields have init values. /// data is extra index of `TypeStructPacked`. type_struct_packed_inits, - /// An AnonStructType which has only types and values for fields. - /// data is extra index of `TypeStructAnon`. - type_tuple_anon, + /// A `TupleType`. + /// data is extra index of `TypeTuple`. + type_tuple, /// A union type. /// `data` is extra index of `TypeUnion`. type_union, @@ -5299,9 +5245,8 @@ pub const Tag = enum(u8) { .simple_type => unreachable, .type_opaque => TypeOpaque, .type_struct => TypeStruct, - .type_struct_anon => TypeStructAnon, .type_struct_packed, .type_struct_packed_inits => TypeStructPacked, - .type_tuple_anon => TypeStructAnon, + .type_tuple => TypeTuple, .type_union => TypeUnion, .type_function => TypeFunction, @@ -5546,18 +5491,15 @@ pub const Tag = enum(u8) { /// 1. capture: CaptureValue // for each `captures_len` /// 2. type_hash: PackedU64 // if `is_reified` /// 3. type: Index for each field in declared order - /// 4. if not is_tuple: - /// names_map: MapIndex, - /// name: NullTerminatedString // for each field in declared order - /// 5. if any_default_inits: + /// 4. if any_default_inits: /// init: Index // for each field in declared order - /// 6. if any_aligned_fields: + /// 5. if any_aligned_fields: /// align: Alignment // for each field in declared order - /// 7. if any_comptime_fields: + /// 6. if any_comptime_fields: /// field_is_comptime_bits: u32 // minimal number of u32s needed, LSB is field 0 - /// 8. if not is_extern: + /// 7. if not is_extern: /// field_index: RuntimeOrder // for each field in runtime order - /// 9. field_offset: u32 // for each field in declared order, undef until layout_resolved + /// 8. field_offset: u32 // for each field in declared order, undef until layout_resolved pub const TypeStruct = struct { name: NullTerminatedString, cau: Cau.Index, @@ -5572,7 +5514,6 @@ pub const Tag = enum(u8) { is_extern: bool = false, known_non_opv: bool = false, requires_comptime: RequiresComptime = @enumFromInt(0), - is_tuple: bool = false, assumed_runtime_bits: bool = false, assumed_pointer_aligned: bool = false, any_comptime_fields: bool = false, @@ -5597,7 +5538,7 @@ pub const Tag = enum(u8) { // which `layout_resolved` does not ensure. fully_resolved: bool = false, is_reified: bool = false, - _: u7 = 0, + _: u8 = 0, }; }; @@ -5659,9 +5600,7 @@ pub const Repeated = struct { /// Trailing: /// 0. type: Index for each fields_len /// 1. value: Index for each fields_len -/// 2. name: NullTerminatedString for each fields_len -/// The set of field names is omitted when the `Tag` is `type_tuple_anon`. -pub const TypeStructAnon = struct { +pub const TypeTuple = struct { fields_len: u32, }; @@ -5708,8 +5647,8 @@ pub const SimpleValue = enum(u32) { void = @intFromEnum(Index.void_value), /// This is untyped `null`. null = @intFromEnum(Index.null_value), - /// This is the untyped empty struct literal: `.{}` - empty_struct = @intFromEnum(Index.empty_struct), + /// This is the untyped empty struct/array literal: `.{}` + empty_tuple = @intFromEnum(Index.empty_tuple), true = @intFromEnum(Index.bool_true), false = @intFromEnum(Index.bool_false), @"unreachable" = @intFromEnum(Index.unreachable_value), @@ -6266,11 +6205,10 @@ pub fn init(ip: *InternPool, gpa: Allocator, available_threads: usize) !void { // This inserts all the statically-known values into the intern pool in the // order expected. for (&static_keys, 0..) |key, key_index| switch (@as(Index, @enumFromInt(key_index))) { - .empty_struct_type => assert(try ip.getAnonStructType(gpa, .main, .{ + .empty_tuple_type => assert(try ip.getTupleType(gpa, .main, .{ .types = &.{}, - .names = &.{}, .values = &.{}, - }) == .empty_struct_type), + }) == .empty_tuple_type), else => |expected_index| assert(try ip.get(gpa, .main, key) == expected_index), }; @@ -6412,7 +6350,6 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { } }, .type_struct => .{ .struct_type = ns: { - if (data == 0) break :ns .empty_struct; const extra_list = unwrapped_index.getExtra(ip); const extra_items = extra_list.view().items(.@"0"); const zir_index: TrackedInst.Index = @enumFromInt(extra_items[data + std.meta.fieldIndex(Tag.TypeStruct, "zir_index").?]); @@ -6457,8 +6394,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { } else CaptureValue.Slice.empty }, } }; } }, - .type_struct_anon => .{ .anon_struct_type = extraTypeStructAnon(unwrapped_index.tid, unwrapped_index.getExtra(ip), data) }, - .type_tuple_anon => .{ .anon_struct_type = extraTypeTupleAnon(unwrapped_index.tid, unwrapped_index.getExtra(ip), data) }, + .type_tuple => .{ .tuple_type = extraTypeTuple(unwrapped_index.tid, unwrapped_index.getExtra(ip), data) }, .type_union => .{ .union_type = ns: { const extra_list = unwrapped_index.getExtra(ip); const extra = extraDataTrail(extra_list, Tag.TypeUnion, data); @@ -6764,10 +6700,10 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { // There is only one possible value precisely due to the // fact that this values slice is fully populated! - .type_struct_anon, .type_tuple_anon => { - const type_struct_anon = extraDataTrail(ty_extra, TypeStructAnon, ty_item.data); - const fields_len = type_struct_anon.data.fields_len; - const values = ty_extra.view().items(.@"0")[type_struct_anon.end + fields_len ..][0..fields_len]; + .type_tuple => { + const type_tuple = extraDataTrail(ty_extra, TypeTuple, ty_item.data); + const fields_len = type_tuple.data.fields_len; + const values = ty_extra.view().items(.@"0")[type_tuple.end + fields_len ..][0..fields_len]; return .{ .aggregate = .{ .ty = ty, .storage = .{ .elems = @ptrCast(values) }, @@ -6850,47 +6786,20 @@ fn extraErrorSet(tid: Zcu.PerThread.Id, extra: Local.Extra, extra_index: u32) Ke }; } -fn extraTypeStructAnon(tid: Zcu.PerThread.Id, extra: Local.Extra, extra_index: u32) Key.AnonStructType { - const type_struct_anon = extraDataTrail(extra, TypeStructAnon, extra_index); - const fields_len = type_struct_anon.data.fields_len; +fn extraTypeTuple(tid: Zcu.PerThread.Id, extra: Local.Extra, extra_index: u32) Key.TupleType { + const type_tuple = extraDataTrail(extra, TypeTuple, extra_index); + const fields_len = type_tuple.data.fields_len; return .{ .types = .{ .tid = tid, - .start = type_struct_anon.end, + .start = type_tuple.end, .len = fields_len, }, .values = .{ .tid = tid, - .start = type_struct_anon.end + fields_len, + .start = type_tuple.end + fields_len, .len = fields_len, }, - .names = .{ - .tid = tid, - .start = type_struct_anon.end + fields_len + fields_len, - .len = fields_len, - }, - }; -} - -fn extraTypeTupleAnon(tid: Zcu.PerThread.Id, extra: Local.Extra, extra_index: u32) Key.AnonStructType { - const type_struct_anon = extraDataTrail(extra, TypeStructAnon, extra_index); - const fields_len = type_struct_anon.data.fields_len; - return .{ - .types = .{ - .tid = tid, - .start = type_struct_anon.end, - .len = fields_len, - }, - .values = .{ - .tid = tid, - .start = type_struct_anon.end + fields_len, - .len = fields_len, - }, - .names = .{ - .tid = tid, - .start = 0, - .len = 0, - }, }; } @@ -7361,7 +7270,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All }, .struct_type => unreachable, // use getStructType() instead - .anon_struct_type => unreachable, // use getAnonStructType() instead + .tuple_type => unreachable, // use getTupleType() instead .union_type => unreachable, // use getUnionType() instead .opaque_type => unreachable, // use getOpaqueType() instead @@ -7469,9 +7378,9 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All .field => { assert(base_ptr_type.flags.size == .One); switch (ip.indexToKey(base_ptr_type.child)) { - .anon_struct_type => |anon_struct_type| { + .tuple_type => |tuple_type| { assert(ptr.base_addr == .field); - assert(base_index.index < anon_struct_type.types.len); + assert(base_index.index < tuple_type.types.len); }, .struct_type => { assert(ptr.base_addr == .field); @@ -7808,12 +7717,12 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All const child = switch (ty_key) { .array_type => |array_type| array_type.child, .vector_type => |vector_type| vector_type.child, - .anon_struct_type, .struct_type => .none, + .tuple_type, .struct_type => .none, else => unreachable, }; const sentinel = switch (ty_key) { .array_type => |array_type| array_type.sentinel, - .vector_type, .anon_struct_type, .struct_type => .none, + .vector_type, .tuple_type, .struct_type => .none, else => unreachable, }; const len_including_sentinel = len + @intFromBool(sentinel != .none); @@ -7845,8 +7754,8 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All assert(ip.typeOf(elem) == field_ty); } }, - .anon_struct_type => |anon_struct_type| { - for (aggregate.storage.values(), anon_struct_type.types.get(ip)) |elem, ty| { + .tuple_type => |tuple_type| { + for (aggregate.storage.values(), tuple_type.types.get(ip)) |elem, ty| { assert(ip.typeOf(elem) == ty); } }, @@ -7862,9 +7771,9 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All } switch (ty_key) { - .anon_struct_type => |anon_struct_type| opv: { + .tuple_type => |tuple_type| opv: { switch (aggregate.storage) { - .bytes => |bytes| for (anon_struct_type.values.get(ip), bytes.at(0, ip)..) |value, byte| { + .bytes => |bytes| for (tuple_type.values.get(ip), bytes.at(0, ip)..) |value, byte| { if (value == .none) break :opv; switch (ip.indexToKey(value)) { .undef => break :opv, @@ -7877,10 +7786,10 @@ pub fn get(ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, key: Key) All }, .elems => |elems| if (!std.mem.eql( Index, - anon_struct_type.values.get(ip), + tuple_type.values.get(ip), elems, )) break :opv, - .repeated_elem => |elem| for (anon_struct_type.values.get(ip)) |value| { + .repeated_elem => |elem| for (tuple_type.values.get(ip)) |value| { if (value != elem) break :opv; }, } @@ -8244,7 +8153,6 @@ pub const StructTypeInit = struct { fields_len: u32, known_non_opv: bool, requires_comptime: RequiresComptime, - is_tuple: bool, any_comptime_fields: bool, any_default_inits: bool, inits_resolved: bool, @@ -8404,7 +8312,6 @@ pub fn getStructType( .is_extern = is_extern, .known_non_opv = ini.known_non_opv, .requires_comptime = ini.requires_comptime, - .is_tuple = ini.is_tuple, .assumed_runtime_bits = false, .assumed_pointer_aligned = false, .any_comptime_fields = ini.any_comptime_fields, @@ -8442,10 +8349,8 @@ pub fn getStructType( }, } extra.appendNTimesAssumeCapacity(.{@intFromEnum(Index.none)}, ini.fields_len); - if (!ini.is_tuple) { - extra.appendAssumeCapacity(.{@intFromEnum(names_map)}); - extra.appendNTimesAssumeCapacity(.{@intFromEnum(OptionalNullTerminatedString.none)}, ini.fields_len); - } + extra.appendAssumeCapacity(.{@intFromEnum(names_map)}); + extra.appendNTimesAssumeCapacity(.{@intFromEnum(OptionalNullTerminatedString.none)}, ini.fields_len); if (ini.any_default_inits) { extra.appendNTimesAssumeCapacity(.{@intFromEnum(Index.none)}, ini.fields_len); } @@ -8468,19 +8373,17 @@ pub fn getStructType( } }; } -pub const AnonStructTypeInit = struct { +pub const TupleTypeInit = struct { types: []const Index, - /// This may be empty, indicating this is a tuple. - names: []const NullTerminatedString, /// These elements may be `none`, indicating runtime-known. values: []const Index, }; -pub fn getAnonStructType( +pub fn getTupleType( ip: *InternPool, gpa: Allocator, tid: Zcu.PerThread.Id, - ini: AnonStructTypeInit, + ini: TupleTypeInit, ) Allocator.Error!Index { assert(ini.types.len == ini.values.len); for (ini.types) |elem| assert(elem != .none); @@ -8494,23 +8397,17 @@ pub fn getAnonStructType( try items.ensureUnusedCapacity(1); try extra.ensureUnusedCapacity( - @typeInfo(TypeStructAnon).@"struct".fields.len + (fields_len * 3), + @typeInfo(TypeTuple).@"struct".fields.len + (fields_len * 3), ); - const extra_index = addExtraAssumeCapacity(extra, TypeStructAnon{ + const extra_index = addExtraAssumeCapacity(extra, TypeTuple{ .fields_len = fields_len, }); extra.appendSliceAssumeCapacity(.{@ptrCast(ini.types)}); extra.appendSliceAssumeCapacity(.{@ptrCast(ini.values)}); errdefer extra.mutate.len = prev_extra_len; - var gop = try ip.getOrPutKey(gpa, tid, .{ - .anon_struct_type = if (ini.names.len == 0) extraTypeTupleAnon(tid, extra.list.*, extra_index) else k: { - assert(ini.names.len == ini.types.len); - extra.appendSliceAssumeCapacity(.{@ptrCast(ini.names)}); - break :k extraTypeStructAnon(tid, extra.list.*, extra_index); - }, - }); + var gop = try ip.getOrPutKey(gpa, tid, .{ .tuple_type = extraTypeTuple(tid, extra.list.*, extra_index) }); defer gop.deinit(); if (gop == .existing) { extra.mutate.len = prev_extra_len; @@ -8518,7 +8415,7 @@ pub fn getAnonStructType( } items.appendAssumeCapacity(.{ - .tag = if (ini.names.len == 0) .type_tuple_anon else .type_struct_anon, + .tag = .type_tuple, .data = extra_index, }); return gop.put(); @@ -10181,12 +10078,12 @@ pub fn getCoerced( direct: { const old_ty_child = switch (ip.indexToKey(old_ty)) { inline .array_type, .vector_type => |seq_type| seq_type.child, - .anon_struct_type, .struct_type => break :direct, + .tuple_type, .struct_type => break :direct, else => unreachable, }; const new_ty_child = switch (ip.indexToKey(new_ty)) { inline .array_type, .vector_type => |seq_type| seq_type.child, - .anon_struct_type, .struct_type => break :direct, + .tuple_type, .struct_type => break :direct, else => unreachable, }; if (old_ty_child != new_ty_child) break :direct; @@ -10235,7 +10132,7 @@ pub fn getCoerced( for (agg_elems, 0..) |*elem, i| { const new_elem_ty = switch (ip.indexToKey(new_ty)) { inline .array_type, .vector_type => |seq_type| seq_type.child, - .anon_struct_type => |anon_struct_type| anon_struct_type.types.get(ip)[i], + .tuple_type => |tuple_type| tuple_type.types.get(ip)[i], .struct_type => ip.loadStructType(new_ty).field_types.get(ip)[i], else => unreachable, }; @@ -10425,7 +10322,7 @@ pub fn isErrorUnionType(ip: *const InternPool, ty: Index) bool { pub fn isAggregateType(ip: *const InternPool, ty: Index) bool { return switch (ip.indexToKey(ty)) { - .array_type, .vector_type, .anon_struct_type, .struct_type => true, + .array_type, .vector_type, .tuple_type, .struct_type => true, else => false, }; } @@ -10549,7 +10446,6 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { break :b @sizeOf(u32) * ints; }, .type_struct => b: { - if (data == 0) break :b 0; const extra = extraDataTrail(extra_list, Tag.TypeStruct, data); const info = extra.data; var ints: usize = @typeInfo(Tag.TypeStruct).@"struct".fields.len; @@ -10558,10 +10454,8 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { ints += 1 + captures_len; } ints += info.fields_len; // types - if (!info.flags.is_tuple) { - ints += 1; // names_map - ints += info.fields_len; // names - } + ints += 1; // names_map + ints += info.fields_len; // names if (info.flags.any_default_inits) ints += info.fields_len; // inits if (info.flags.any_aligned_fields) @@ -10573,10 +10467,6 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { ints += info.fields_len; // offsets break :b @sizeOf(u32) * ints; }, - .type_struct_anon => b: { - const info = extraData(extra_list, TypeStructAnon, data); - break :b @sizeOf(TypeStructAnon) + (@sizeOf(u32) * 3 * info.fields_len); - }, .type_struct_packed => b: { const extra = extraDataTrail(extra_list, Tag.TypeStructPacked, data); const captures_len = if (extra.data.flags.any_captures) @@ -10597,9 +10487,9 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { @intFromBool(extra.data.flags.any_captures) + captures_len + extra.data.fields_len * 3); }, - .type_tuple_anon => b: { - const info = extraData(extra_list, TypeStructAnon, data); - break :b @sizeOf(TypeStructAnon) + (@sizeOf(u32) * 2 * info.fields_len); + .type_tuple => b: { + const info = extraData(extra_list, TypeTuple, data); + break :b @sizeOf(TypeTuple) + (@sizeOf(u32) * 2 * info.fields_len); }, .type_union => b: { @@ -10760,10 +10650,9 @@ fn dumpAllFallible(ip: *const InternPool) anyerror!void { .type_enum_auto, .type_opaque, .type_struct, - .type_struct_anon, .type_struct_packed, .type_struct_packed_inits, - .type_tuple_anon, + .type_tuple, .type_union, .type_function, .undef, @@ -11396,7 +11285,7 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index { .anyerror_void_error_union_type, .adhoc_inferred_error_set_type, .generic_poison_type, - .empty_struct_type, + .empty_tuple_type, => .type_type, .undef => .undefined_type, @@ -11407,7 +11296,7 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index { .unreachable_value => .noreturn_type, .null_value => .null_type, .bool_true, .bool_false => .bool_type, - .empty_struct => .empty_struct_type, + .empty_tuple => .empty_tuple_type, .generic_poison => .generic_poison_type, // This optimization on tags is needed so that indexToKey can call @@ -11436,10 +11325,9 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index { .type_enum_nonexhaustive, .type_opaque, .type_struct, - .type_struct_anon, .type_struct_packed, .type_struct_packed_inits, - .type_tuple_anon, + .type_tuple, .type_union, .type_function, => .type_type, @@ -11533,7 +11421,7 @@ pub fn toEnum(ip: *const InternPool, comptime E: type, i: Index) E { pub fn aggregateTypeLen(ip: *const InternPool, ty: Index) u64 { return switch (ip.indexToKey(ty)) { .struct_type => ip.loadStructType(ty).field_types.len, - .anon_struct_type => |anon_struct_type| anon_struct_type.types.len, + .tuple_type => |tuple_type| tuple_type.types.len, .array_type => |array_type| array_type.len, .vector_type => |vector_type| vector_type.len, else => unreachable, @@ -11543,7 +11431,7 @@ pub fn aggregateTypeLen(ip: *const InternPool, ty: Index) u64 { pub fn aggregateTypeLenIncludingSentinel(ip: *const InternPool, ty: Index) u64 { return switch (ip.indexToKey(ty)) { .struct_type => ip.loadStructType(ty).field_types.len, - .anon_struct_type => |anon_struct_type| anon_struct_type.types.len, + .tuple_type => |tuple_type| tuple_type.types.len, .array_type => |array_type| array_type.lenIncludingSentinel(), .vector_type => |vector_type| vector_type.len, else => unreachable, @@ -11708,7 +11596,7 @@ pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPois .optional_noreturn_type => .optional, .anyerror_void_error_union_type => .error_union, - .empty_struct_type => .@"struct", + .empty_tuple_type => .@"struct", .generic_poison_type => return error.GenericPoison, @@ -11727,7 +11615,7 @@ pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPois .null_value => unreachable, .bool_true => unreachable, .bool_false => unreachable, - .empty_struct => unreachable, + .empty_tuple => unreachable, .generic_poison => unreachable, _ => switch (index.unwrap(ip).getTag(ip)) { @@ -11768,10 +11656,9 @@ pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPois .type_opaque => .@"opaque", .type_struct, - .type_struct_anon, .type_struct_packed, .type_struct_packed_inits, - .type_tuple_anon, + .type_tuple, => .@"struct", .type_union => .@"union", @@ -12013,14 +11900,6 @@ pub fn unwrapCoercedFunc(ip: *const InternPool, index: Index) Index { }; } -pub fn anonStructFieldTypes(ip: *const InternPool, i: Index) []const Index { - return ip.indexToKey(i).anon_struct_type.types; -} - -pub fn anonStructFieldsLen(ip: *const InternPool, i: Index) u32 { - return @intCast(ip.indexToKey(i).anon_struct_type.types.len); -} - /// Returns the already-existing field with the same name, if any. pub fn addFieldName( ip: *InternPool, diff --git a/src/Sema.zig b/src/Sema.zig index fdf39b8305..2194b9e96c 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -844,6 +844,7 @@ pub const Block = struct { fn trackZir(block: *Block, inst: Zir.Inst.Index) Allocator.Error!InternPool.TrackedInst.Index { const pt = block.sema.pt; + block.sema.code.assertTrackable(inst); return pt.zcu.intern_pool.trackZir(pt.zcu.gpa, pt.tid, .{ .file = block.getFileScopeIndex(pt.zcu), .inst = inst, @@ -1277,6 +1278,7 @@ fn analyzeBodyInner( .enum_decl => try sema.zirEnumDecl( block, extended, inst), .union_decl => try sema.zirUnionDecl( block, extended, inst), .opaque_decl => try sema.zirOpaqueDecl( block, extended, inst), + .tuple_decl => try sema.zirTupleDecl( block, extended), .this => try sema.zirThis( block, extended), .ret_addr => try sema.zirRetAddr( block, extended), .builtin_src => try sema.zirBuiltinSrc( block, extended), @@ -2338,7 +2340,7 @@ fn failWithInvalidComptimeFieldStore(sema: *Sema, block: *Block, init_src: LazyS const struct_type = zcu.typeToStruct(container_ty) orelse break :msg msg; try sema.errNote(.{ - .base_node_inst = struct_type.zir_index.unwrap().?, + .base_node_inst = struct_type.zir_index, .offset = .{ .container_field_value = @intCast(field_index) }, }, msg, "default value set here", .{}); break :msg msg; @@ -2651,6 +2653,94 @@ fn analyzeValueAsCallconv( }; } +fn zirTupleDecl( + sema: *Sema, + block: *Block, + extended: Zir.Inst.Extended.InstData, +) CompileError!Air.Inst.Ref { + const gpa = sema.gpa; + const pt = sema.pt; + const zcu = pt.zcu; + const fields_len = extended.small; + const extra = sema.code.extraData(Zir.Inst.TupleDecl, extended.operand); + var extra_index = extra.end; + + const types = try sema.arena.alloc(InternPool.Index, fields_len); + const inits = try sema.arena.alloc(InternPool.Index, fields_len); + + const extra_as_refs: []const Zir.Inst.Ref = @ptrCast(sema.code.extra); + + for (types, inits, 0..) |*field_ty, *field_init, field_index| { + const zir_field_ty, const zir_field_init = extra_as_refs[extra_index..][0..2].*; + extra_index += 2; + + const type_src = block.src(.{ .tuple_field_type = .{ + .tuple_decl_node_offset = extra.data.src_node, + .elem_index = @intCast(field_index), + } }); + const init_src = block.src(.{ .tuple_field_init = .{ + .tuple_decl_node_offset = extra.data.src_node, + .elem_index = @intCast(field_index), + } }); + + const uncoerced_field_ty = try sema.resolveInst(zir_field_ty); + const field_type = try sema.analyzeAsType(block, type_src, uncoerced_field_ty); + try sema.validateTupleFieldType(block, field_type, type_src); + + field_ty.* = field_type.toIntern(); + field_init.* = init: { + if (zir_field_init != .none) { + const uncoerced_field_init = try sema.resolveInst(zir_field_init); + const coerced_field_init = try sema.coerce(block, field_type, uncoerced_field_init, init_src); + const field_init_val = try sema.resolveConstDefinedValue(block, init_src, coerced_field_init, .{ + .needed_comptime_reason = "tuple field default value must be comptime-known", + }); + if (field_init_val.canMutateComptimeVarState(zcu)) { + return sema.fail(block, init_src, "field default value contains reference to comptime-mutable memory", .{}); + } + break :init field_init_val.toIntern(); + } + if (try sema.typeHasOnePossibleValue(field_type)) |opv| { + break :init opv.toIntern(); + } + break :init .none; + }; + } + + return Air.internedToRef(try zcu.intern_pool.getTupleType(gpa, pt.tid, .{ + .types = types, + .values = inits, + })); +} + +fn validateTupleFieldType( + sema: *Sema, + block: *Block, + field_ty: Type, + field_ty_src: LazySrcLoc, +) CompileError!void { + const gpa = sema.gpa; + const zcu = sema.pt.zcu; + if (field_ty.zigTypeTag(zcu) == .@"opaque") { + return sema.failWithOwnedErrorMsg(block, msg: { + const msg = try sema.errMsg(field_ty_src, "opaque types have unknown size and therefore cannot be directly embedded in tuples", .{}); + errdefer msg.destroy(gpa); + + try sema.addDeclaredHereNote(msg, field_ty); + break :msg msg; + }); + } + if (field_ty.zigTypeTag(zcu) == .noreturn) { + return sema.failWithOwnedErrorMsg(block, msg: { + const msg = try sema.errMsg(field_ty_src, "tuple fields cannot be 'noreturn'", .{}); + errdefer msg.destroy(gpa); + + try sema.addDeclaredHereNote(msg, field_ty); + break :msg msg; + }); + } +} + /// Given a ZIR extra index which points to a list of `Zir.Inst.Capture`, /// resolves this into a list of `InternPool.CaptureValue` allocated by `arena`. fn getCaptures(sema: *Sema, block: *Block, type_src: LazySrcLoc, extra_index: usize, captures_len: u32) ![]InternPool.CaptureValue { @@ -2774,7 +2864,6 @@ fn zirStructDecl( .fields_len = fields_len, .known_non_opv = small.known_non_opv, .requires_comptime = if (small.known_comptime_only) .yes else .unknown, - .is_tuple = small.is_tuple, .any_comptime_fields = small.any_comptime_fields, .any_default_inits = small.any_default_inits, .inits_resolved = false, @@ -4912,7 +5001,7 @@ fn validateStructInit( const default_field_ptr = if (struct_ty.isTuple(zcu)) try sema.tupleFieldPtr(block, init_src, struct_ptr, field_src, @intCast(i), true) else - try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @intCast(i), field_src, struct_ty, true); + try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @intCast(i), struct_ty); const init = Air.internedToRef(default_val.toIntern()); try sema.storePtr2(block, init_src, default_field_ptr, init_src, init, field_src, .store); } @@ -5104,7 +5193,7 @@ fn validateStructInit( const default_field_ptr = if (struct_ty.isTuple(zcu)) try sema.tupleFieldPtr(block, init_src, struct_ptr, field_src, @intCast(i), true) else - try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @intCast(i), field_src, struct_ty, true); + try sema.structFieldPtrByIndex(block, init_src, struct_ptr, @intCast(i), struct_ty); try sema.checkKnownAllocPtr(block, struct_ptr, default_field_ptr); const init = Air.internedToRef(field_values[i]); try sema.storePtr2(block, init_src, default_field_ptr, init_src, init, field_src, .store); @@ -8430,22 +8519,6 @@ fn instantiateGenericCall( return result; } -fn resolveTupleLazyValues(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void { - const pt = sema.pt; - const zcu = pt.zcu; - const ip = &zcu.intern_pool; - const tuple = switch (ip.indexToKey(ty.toIntern())) { - .anon_struct_type => |tuple| tuple, - else => return, - }; - for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, field_val| { - try sema.resolveTupleLazyValues(block, src, Type.fromInterned(field_ty)); - if (field_val == .none) continue; - // TODO: mutate in intern pool - _ = try sema.resolveLazyValue(Value.fromInterned(field_val)); - } -} - fn zirIntType(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const int_type = sema.code.instructions.items(.data)[@intFromEnum(inst)].int_type; const ty = try sema.pt.intType(int_type.signedness, int_type.bit_count); @@ -14321,13 +14394,9 @@ fn zirHasField(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }, else => {}, }, - .anon_struct_type => |anon_struct| { - if (anon_struct.names.len != 0) { - break :hf mem.indexOfScalar(InternPool.NullTerminatedString, anon_struct.names.get(ip), field_name) != null; - } else { - const field_index = field_name.toUnsigned(ip) orelse break :hf false; - break :hf field_index < ty.structFieldCount(zcu); - } + .tuple_type => |tuple| { + const field_index = field_name.toUnsigned(ip) orelse break :hf false; + break :hf field_index < tuple.types.len; }, .struct_type => { break :hf ip.loadStructType(ty.toIntern()).nameIndex(ip, field_name) != null; @@ -14882,7 +14951,7 @@ fn analyzeTupleCat( const dest_fields = lhs_len + rhs_len; if (dest_fields == 0) { - return Air.internedToRef(Value.empty_struct.toIntern()); + return .empty_tuple; } if (lhs_len == 0) { return rhs; @@ -14928,10 +14997,9 @@ fn analyzeTupleCat( break :rs runtime_src; }; - const tuple_ty = try zcu.intern_pool.getAnonStructType(zcu.gpa, pt.tid, .{ + const tuple_ty = try zcu.intern_pool.getTupleType(zcu.gpa, pt.tid, .{ .types = types, .values = values, - .names = &.{}, }); const runtime_src = opt_runtime_src orelse { @@ -15263,7 +15331,7 @@ fn analyzeTupleMul( return sema.fail(block, len_src, "operation results in overflow", .{}); if (final_len == 0) { - return Air.internedToRef(Value.empty_struct.toIntern()); + return .empty_tuple; } const types = try sema.arena.alloc(InternPool.Index, final_len); const values = try sema.arena.alloc(InternPool.Index, final_len); @@ -15289,10 +15357,9 @@ fn analyzeTupleMul( break :rs runtime_src; }; - const tuple_ty = try zcu.intern_pool.getAnonStructType(zcu.gpa, pt.tid, .{ + const tuple_ty = try zcu.intern_pool.getTupleType(zcu.gpa, pt.tid, .{ .types = types, .values = values, - .names = &.{}, }); const runtime_src = opt_runtime_src orelse { @@ -16689,7 +16756,7 @@ fn zirOverflowArithmetic( const maybe_rhs_val = try sema.resolveValue(rhs); const tuple_ty = try sema.overflowArithmeticTupleType(dest_ty); - const overflow_ty = Type.fromInterned(ip.indexToKey(tuple_ty.toIntern()).anon_struct_type.types.get(ip)[1]); + const overflow_ty = Type.fromInterned(ip.indexToKey(tuple_ty.toIntern()).tuple_type.types.get(ip)[1]); var result: struct { inst: Air.Inst.Ref = .none, @@ -16873,10 +16940,9 @@ fn overflowArithmeticTupleType(sema: *Sema, ty: Type) !Type { const types = [2]InternPool.Index{ ty.toIntern(), ov_ty.toIntern() }; const values = [2]InternPool.Index{ .none, .none }; - const tuple_ty = try ip.getAnonStructType(zcu.gpa, pt.tid, .{ + const tuple_ty = try ip.getTupleType(zcu.gpa, pt.tid, .{ .types = &types, .values = &values, - .names = &.{}, }); return Type.fromInterned(tuple_ty); } @@ -18908,16 +18974,13 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai defer gpa.free(struct_field_vals); fv: { const struct_type = switch (ip.indexToKey(ty.toIntern())) { - .anon_struct_type => |anon_struct_type| { - struct_field_vals = try gpa.alloc(InternPool.Index, anon_struct_type.types.len); + .tuple_type => |tuple_type| { + struct_field_vals = try gpa.alloc(InternPool.Index, tuple_type.types.len); for (struct_field_vals, 0..) |*struct_field_val, field_index| { - const field_ty = anon_struct_type.types.get(ip)[field_index]; - const field_val = anon_struct_type.values.get(ip)[field_index]; + const field_ty = tuple_type.types.get(ip)[field_index]; + const field_val = tuple_type.values.get(ip)[field_index]; const name_val = v: { - const field_name = if (anon_struct_type.names.len != 0) - anon_struct_type.names.get(ip)[field_index] - else - try ip.getOrPutStringFmt(gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls); + const field_name = try ip.getOrPutStringFmt(gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls); const field_name_len = field_name.length(ip); const new_decl_ty = try pt.arrayType(.{ .len = field_name_len, @@ -20509,8 +20572,8 @@ fn zirStructInitEmptyResult(sema: *Sema, block: *Block, inst: Zir.Inst.Index, is const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].un_node; const src = block.nodeOffset(inst_data.src_node); const ty_operand = sema.resolveType(block, src, inst_data.operand) catch |err| switch (err) { - // Generic poison means this is an untyped anonymous empty struct init - error.GenericPoison => return .empty_struct, + // Generic poison means this is an untyped anonymous empty struct/array init + error.GenericPoison => return .empty_tuple, else => |e| return e, }; const init_ty = if (is_byref) ty: { @@ -20671,7 +20734,7 @@ fn zirStructInit( const result_ty = sema.resolveType(block, src, first_field_type_extra.container_type) catch |err| switch (err) { error.GenericPoison => { // The type wasn't actually known, so treat this as an anon struct init. - return sema.structInitAnon(block, src, .typed_init, extra.data, extra.end, is_ref); + return sema.structInitAnon(block, src, inst, .typed_init, extra.data, extra.end, is_ref); }, else => |e| return e, }; @@ -20837,39 +20900,28 @@ fn finishStructInit( errdefer if (root_msg) |msg| msg.destroy(sema.gpa); switch (ip.indexToKey(struct_ty.toIntern())) { - .anon_struct_type => |anon_struct| { + .tuple_type => |tuple| { // We can't get the slices, as the coercion may invalidate them. - for (0..anon_struct.types.len) |i| { + for (0..tuple.types.len) |i| { if (field_inits[i] != .none) { // Coerce the init value to the field type. const field_src = block.src(.{ .init_elem = .{ .init_node_offset = init_src.offset.node_offset.x, .elem_index = @intCast(i), } }); - const field_ty = Type.fromInterned(anon_struct.types.get(ip)[i]); + const field_ty = Type.fromInterned(tuple.types.get(ip)[i]); field_inits[i] = try sema.coerce(block, field_ty, field_inits[i], field_src); continue; } - const default_val = anon_struct.values.get(ip)[i]; + const default_val = tuple.values.get(ip)[i]; if (default_val == .none) { - if (anon_struct.names.len == 0) { - const template = "missing tuple field with index {d}"; - if (root_msg) |msg| { - try sema.errNote(init_src, msg, template, .{i}); - } else { - root_msg = try sema.errMsg(init_src, template, .{i}); - } + const template = "missing tuple field with index {d}"; + if (root_msg) |msg| { + try sema.errNote(init_src, msg, template, .{i}); } else { - const field_name = anon_struct.names.get(ip)[i]; - const template = "missing struct field: {}"; - const args = .{field_name.fmt(ip)}; - if (root_msg) |msg| { - try sema.errNote(init_src, msg, template, args); - } else { - root_msg = try sema.errMsg(init_src, template, args); - } + root_msg = try sema.errMsg(init_src, template, .{i}); } } else { field_inits[i] = Air.internedToRef(default_val); @@ -20894,22 +20946,13 @@ fn finishStructInit( const field_init = struct_type.fieldInit(ip, i); if (field_init == .none) { - if (!struct_type.isTuple(ip)) { - const field_name = struct_type.field_names.get(ip)[i]; - const template = "missing struct field: {}"; - const args = .{field_name.fmt(ip)}; - if (root_msg) |msg| { - try sema.errNote(init_src, msg, template, args); - } else { - root_msg = try sema.errMsg(init_src, template, args); - } + const field_name = struct_type.field_names.get(ip)[i]; + const template = "missing struct field: {}"; + const args = .{field_name.fmt(ip)}; + if (root_msg) |msg| { + try sema.errNote(init_src, msg, template, args); } else { - const template = "missing tuple field with index {d}"; - if (root_msg) |msg| { - try sema.errNote(init_src, msg, template, .{i}); - } else { - root_msg = try sema.errMsg(init_src, template, .{i}); - } + root_msg = try sema.errMsg(init_src, template, args); } } else { field_inits[i] = Air.internedToRef(field_init); @@ -20970,8 +21013,7 @@ fn finishStructInit( const base_ptr = try sema.optEuBasePtrInit(block, alloc, init_src); for (field_inits, 0..) |field_init, i_usize| { const i: u32 = @intCast(i_usize); - const field_src = dest_src; - const field_ptr = try sema.structFieldPtrByIndex(block, dest_src, base_ptr, i, field_src, struct_ty, true); + const field_ptr = try sema.structFieldPtrByIndex(block, dest_src, base_ptr, i, struct_ty); try sema.storePtr(block, dest_src, field_ptr, field_init); } @@ -20995,13 +21037,14 @@ fn zirStructInitAnon( const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].pl_node; const src = block.nodeOffset(inst_data.src_node); const extra = sema.code.extraData(Zir.Inst.StructInitAnon, inst_data.payload_index); - return sema.structInitAnon(block, src, .anon_init, extra.data, extra.end, false); + return sema.structInitAnon(block, src, inst, .anon_init, extra.data, extra.end, false); } fn structInitAnon( sema: *Sema, block: *Block, src: LazySrcLoc, + inst: Zir.Inst.Index, /// It is possible for a typed struct_init to be downgraded to an anonymous init due to a /// generic poison type. In this case, we need to know to interpret the extra data differently. comptime kind: enum { anon_init, typed_init }, @@ -21022,6 +21065,8 @@ fn structInitAnon( const values = try sema.arena.alloc(InternPool.Index, types.len); const names = try sema.arena.alloc(InternPool.NullTerminatedString, types.len); + var any_values = false; + // Find which field forces the expression to be runtime, if any. const opt_runtime_index = rs: { var runtime_index: ?usize = null; @@ -21063,6 +21108,7 @@ fn structInitAnon( } if (try sema.resolveValue(init)) |init_val| { field_val.* = init_val.toIntern(); + any_values = true; } else { field_val.* = .none; runtime_index = @intCast(i_usize); @@ -21071,18 +21117,76 @@ fn structInitAnon( break :rs runtime_index; }; - const tuple_ty = try ip.getAnonStructType(gpa, pt.tid, .{ - .names = names, - .types = types, - .values = values, - }); + // We treat anonymous struct types as reified types, because there are similarities: + // * They use a form of structural equivalence, which we can easily model using a custom hash + // * They do not have captures + // * They immediately have their fields resolved + // In general, other code should treat anon struct types and reified struct types identically, + // so there's no point having a separate `InternPool.NamespaceType` field for them. + const type_hash: u64 = hash: { + var hasher = std.hash.Wyhash.init(0); + hasher.update(std.mem.sliceAsBytes(types)); + hasher.update(std.mem.sliceAsBytes(values)); + hasher.update(std.mem.sliceAsBytes(names)); + break :hash hasher.final(); + }; + const tracked_inst = try block.trackZir(inst); + const struct_ty = switch (try ip.getStructType(gpa, pt.tid, .{ + .layout = .auto, + .fields_len = extra_data.fields_len, + .known_non_opv = false, + .requires_comptime = .unknown, + .any_comptime_fields = any_values, + .any_default_inits = any_values, + .inits_resolved = true, + .any_aligned_fields = false, + .key = .{ .reified = .{ + .zir_index = tracked_inst, + .type_hash = type_hash, + } }, + }, false)) { + .wip => |wip| ty: { + errdefer wip.cancel(ip, pt.tid); + wip.setName(ip, try sema.createTypeName(block, .anon, "struct", inst, wip.index)); + + const struct_type = ip.loadStructType(wip.index); + + for (names, values, 0..) |name, init_val, field_idx| { + assert(struct_type.addFieldName(ip, name) == null); + if (init_val != .none) struct_type.setFieldComptime(ip, field_idx); + } + + @memcpy(struct_type.field_types.get(ip), types); + if (any_values) { + @memcpy(struct_type.field_inits.get(ip), values); + } + + const new_namespace_index = try pt.createNamespace(.{ + .parent = block.namespace.toOptional(), + .owner_type = wip.index, + .file_scope = block.getFileScopeIndex(zcu), + .generation = zcu.generation, + }); + const new_cau_index = try ip.createTypeCau(gpa, pt.tid, tracked_inst, new_namespace_index, wip.index); + try zcu.comp.queueJob(.{ .resolve_type_fully = wip.index }); + codegen_type: { + if (zcu.comp.config.use_llvm) break :codegen_type; + if (block.ownerModule().strip) break :codegen_type; + try zcu.comp.queueJob(.{ .codegen_type = wip.index }); + } + break :ty wip.finish(ip, new_cau_index.toOptional(), new_namespace_index); + }, + .existing => |ty| ty, + }; + try sema.declareDependency(.{ .interned = struct_ty }); + try sema.addTypeReferenceEntry(src, struct_ty); const runtime_index = opt_runtime_index orelse { - const tuple_val = try pt.intern(.{ .aggregate = .{ - .ty = tuple_ty, + const struct_val = try pt.intern(.{ .aggregate = .{ + .ty = struct_ty, .storage = .{ .elems = values }, } }); - return sema.addConstantMaybeRef(tuple_val, is_ref); + return sema.addConstantMaybeRef(struct_val, is_ref); }; try sema.requireRuntimeBlock(block, LazySrcLoc.unneeded, block.src(.{ .init_elem = .{ @@ -21093,7 +21197,7 @@ fn structInitAnon( if (is_ref) { const target = zcu.getTarget(); const alloc_ty = try pt.ptrTypeSema(.{ - .child = tuple_ty, + .child = struct_ty, .flags = .{ .address_space = target_util.defaultAddressSpace(target, .local) }, }); const alloc = try block.addTy(.alloc, alloc_ty); @@ -21131,7 +21235,7 @@ fn structInitAnon( element_refs[i] = try sema.resolveInst(item.data.init); } - return block.addAggregateInit(Type.fromInterned(tuple_ty), element_refs); + return block.addAggregateInit(Type.fromInterned(struct_ty), element_refs); } fn zirArrayInit( @@ -21340,10 +21444,9 @@ fn arrayInitAnon( break :rs runtime_src; }; - const tuple_ty = try ip.getAnonStructType(gpa, pt.tid, .{ + const tuple_ty = try ip.getTupleType(gpa, pt.tid, .{ .types = types, .values = values, - .names = &.{}, }); const runtime_src = opt_runtime_src orelse { @@ -21440,12 +21543,9 @@ fn fieldType( try cur_ty.resolveFields(pt); switch (cur_ty.zigTypeTag(zcu)) { .@"struct" => switch (ip.indexToKey(cur_ty.toIntern())) { - .anon_struct_type => |anon_struct| { - const field_index = if (anon_struct.names.len == 0) - try sema.tupleFieldIndex(block, cur_ty, field_name, field_src) - else - try sema.anonStructFieldIndex(block, cur_ty, field_name, field_src); - return Air.internedToRef(anon_struct.types.get(ip)[field_index]); + .tuple_type => |tuple| { + const field_index = try sema.tupleFieldIndex(block, cur_ty, field_name, field_src); + return Air.internedToRef(tuple.types.get(ip)[field_index]); }, .struct_type => { const struct_type = ip.loadStructType(cur_ty.toIntern()); @@ -22095,7 +22195,16 @@ fn zirReify( .needed_comptime_reason = "struct fields must be comptime-known", }); - return try sema.reifyStruct(block, inst, src, layout, backing_integer_val, fields_arr, name_strategy, is_tuple_val.toBool()); + if (is_tuple_val.toBool()) { + switch (layout) { + .@"extern" => return sema.fail(block, src, "extern tuples are not supported", .{}), + .@"packed" => return sema.fail(block, src, "packed tuples are not supported", .{}), + .auto => {}, + } + return sema.reifyTuple(block, src, fields_arr); + } else { + return sema.reifyStruct(block, inst, src, layout, backing_integer_val, fields_arr, name_strategy); + } }, .@"enum" => { const struct_type = ip.loadStructType(ip.typeOf(union_val.val)); @@ -22696,6 +22805,104 @@ fn reifyUnion( return Air.internedToRef(wip_ty.finish(ip, new_cau_index.toOptional(), new_namespace_index)); } +fn reifyTuple( + sema: *Sema, + block: *Block, + src: LazySrcLoc, + fields_val: Value, +) CompileError!Air.Inst.Ref { + const pt = sema.pt; + const zcu = pt.zcu; + const gpa = sema.gpa; + const ip = &zcu.intern_pool; + + const fields_len: u32 = @intCast(fields_val.typeOf(zcu).arrayLen(zcu)); + + const types = try sema.arena.alloc(InternPool.Index, fields_len); + const inits = try sema.arena.alloc(InternPool.Index, fields_len); + + for (types, inits, 0..) |*field_ty, *field_init, field_idx| { + const field_info = try fields_val.elemValue(pt, field_idx); + + const field_name_val = try field_info.fieldValue(pt, 0); + const field_type_val = try field_info.fieldValue(pt, 1); + const field_default_value_val = try field_info.fieldValue(pt, 2); + const field_is_comptime_val = try field_info.fieldValue(pt, 3); + const field_alignment_val = try sema.resolveLazyValue(try field_info.fieldValue(pt, 4)); + + const field_name = try sema.sliceToIpString(block, src, field_name_val, .{ + .needed_comptime_reason = "tuple field name must be comptime-known", + }); + const field_type = field_type_val.toType(); + const field_default_value: InternPool.Index = if (field_default_value_val.optionalValue(zcu)) |ptr_val| d: { + const ptr_ty = try pt.singleConstPtrType(field_type_val.toType()); + // We need to do this deref here, so we won't check for this error case later on. + const val = try sema.pointerDeref(block, src, ptr_val, ptr_ty) orelse return sema.failWithNeededComptime( + block, + src, + .{ .needed_comptime_reason = "tuple field default value must be comptime-known" }, + ); + // Resolve the value so that lazy values do not create distinct types. + break :d (try sema.resolveLazyValue(val)).toIntern(); + } else .none; + + const field_name_index = field_name.toUnsigned(ip) orelse return sema.fail( + block, + src, + "tuple cannot have non-numeric field '{}'", + .{field_name.fmt(ip)}, + ); + if (field_name_index != field_idx) { + return sema.fail( + block, + src, + "tuple field name '{}' does not match field index {}", + .{ field_name_index, field_idx }, + ); + } + + try sema.validateTupleFieldType(block, field_type, src); + + { + const alignment_ok = ok: { + if (field_alignment_val.toIntern() == .zero) break :ok true; + const given_align = try field_alignment_val.getUnsignedIntSema(pt) orelse break :ok false; + const abi_align = (try field_type.abiAlignmentSema(pt)).toByteUnits() orelse 0; + break :ok abi_align == given_align; + }; + if (!alignment_ok) { + return sema.fail(block, src, "tuple fields cannot specify alignment", .{}); + } + } + + if (field_is_comptime_val.toBool() and field_default_value == .none) { + return sema.fail(block, src, "comptime field without default initialization value", .{}); + } + + if (!field_is_comptime_val.toBool() and field_default_value != .none) { + return sema.fail(block, src, "non-comptime tuple fields cannot specify default initialization value", .{}); + } + + const default_or_opv: InternPool.Index = default: { + if (field_default_value != .none) { + break :default field_default_value; + } + if (try sema.typeHasOnePossibleValue(field_type)) |opv| { + break :default opv.toIntern(); + } + break :default .none; + }; + + field_ty.* = field_type.toIntern(); + field_init.* = default_or_opv; + } + + return Air.internedToRef(try zcu.intern_pool.getTupleType(gpa, pt.tid, .{ + .types = types, + .values = inits, + })); +} + fn reifyStruct( sema: *Sema, block: *Block, @@ -22705,7 +22912,6 @@ fn reifyStruct( opt_backing_int_val: Value, fields_val: Value, name_strategy: Zir.Inst.NameStrategy, - is_tuple: bool, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const zcu = pt.zcu; @@ -22725,7 +22931,6 @@ fn reifyStruct( var hasher = std.hash.Wyhash.init(0); std.hash.autoHash(&hasher, layout); std.hash.autoHash(&hasher, opt_backing_int_val.toIntern()); - std.hash.autoHash(&hasher, is_tuple); std.hash.autoHash(&hasher, fields_len); var any_comptime_fields = false; @@ -22781,7 +22986,6 @@ fn reifyStruct( .fields_len = fields_len, .known_non_opv = false, .requires_comptime = .unknown, - .is_tuple = is_tuple, .any_comptime_fields = any_comptime_fields, .any_default_inits = any_default_inits, .any_aligned_fields = any_aligned_fields, @@ -22800,12 +23004,6 @@ fn reifyStruct( }; errdefer wip_ty.cancel(ip, pt.tid); - if (is_tuple) switch (layout) { - .@"extern" => return sema.fail(block, src, "extern tuples are not supported", .{}), - .@"packed" => return sema.fail(block, src, "packed tuples are not supported", .{}), - .auto => {}, - }; - wip_ty.setName(ip, try sema.createTypeName( block, name_strategy, @@ -22828,22 +23026,7 @@ fn reifyStruct( const field_ty = field_type_val.toType(); // Don't pass a reason; first loop acts as an assertion that this is valid. const field_name = try sema.sliceToIpString(block, src, field_name_val, undefined); - if (is_tuple) { - const field_name_index = field_name.toUnsigned(ip) orelse return sema.fail( - block, - src, - "tuple cannot have non-numeric field '{}'", - .{field_name.fmt(ip)}, - ); - if (field_name_index != field_idx) { - return sema.fail( - block, - src, - "tuple field name '{}' does not match field index {}", - .{ field_name_index, field_idx }, - ); - } - } else if (struct_type.addFieldName(ip, field_name)) |prev_index| { + if (struct_type.addFieldName(ip, field_name)) |prev_index| { _ = prev_index; // TODO: better source location return sema.fail(block, src, "duplicate struct field name {}", .{field_name.fmt(ip)}); } @@ -25579,7 +25762,7 @@ fn zirBuiltinCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError const args = try sema.resolveInst(extra.args); const args_ty = sema.typeOf(args); - if (!args_ty.isTuple(zcu) and args_ty.toIntern() != .empty_struct_type) { + if (!args_ty.isTuple(zcu)) { return sema.fail(block, args_src, "expected a tuple, found '{}'", .{args_ty.fmt(pt)}); } @@ -27471,7 +27654,7 @@ fn explainWhyTypeIsComptimeInner( for (0..struct_type.field_types.len) |i| { const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); const field_src: LazySrcLoc = .{ - .base_node_inst = struct_type.zir_index.unwrap().?, + .base_node_inst = struct_type.zir_index, .offset = .{ .container_field_type = @intCast(i) }, }; @@ -28236,11 +28419,10 @@ fn fieldVal( return Air.internedToRef(enum_val.toIntern()); }, .@"struct", .@"opaque" => { - switch (child_type.toIntern()) { - .empty_struct_type, .anyopaque_type => {}, // no namespace - else => if (try sema.namespaceLookupVal(block, src, child_type.getNamespaceIndex(zcu), field_name)) |inst| { + if (!child_type.isTuple(zcu) and child_type.toIntern() != .anyopaque_type) { + if (try sema.namespaceLookupVal(block, src, child_type.getNamespaceIndex(zcu), field_name)) |inst| { return inst; - }, + } } return sema.failWithBadMemberAccess(block, child_type, src, field_name); }, @@ -28788,9 +28970,6 @@ fn structFieldPtr( } const field_index = try sema.tupleFieldIndex(block, struct_ty, field_name, field_name_src); return sema.tupleFieldPtr(block, src, struct_ptr, field_name_src, field_index, initializing); - } else if (struct_ty.isAnonStruct(zcu)) { - const field_index = try sema.anonStructFieldIndex(block, struct_ty, field_name, field_name_src); - return sema.tupleFieldPtr(block, src, struct_ptr, field_name_src, field_index, initializing); } const struct_type = zcu.typeToStruct(struct_ty).?; @@ -28798,7 +28977,7 @@ fn structFieldPtr( const field_index = struct_type.nameIndex(ip, field_name) orelse return sema.failWithBadStructFieldAccess(block, struct_ty, struct_type, field_name_src, field_name); - return sema.structFieldPtrByIndex(block, src, struct_ptr, field_index, field_name_src, struct_ty, initializing); + return sema.structFieldPtrByIndex(block, src, struct_ptr, field_index, struct_ty); } fn structFieldPtrByIndex( @@ -28807,16 +28986,11 @@ fn structFieldPtrByIndex( src: LazySrcLoc, struct_ptr: Air.Inst.Ref, field_index: u32, - field_src: LazySrcLoc, struct_ty: Type, - initializing: bool, ) CompileError!Air.Inst.Ref { const pt = sema.pt; const zcu = pt.zcu; const ip = &zcu.intern_pool; - if (struct_ty.isAnonStruct(zcu)) { - return sema.tupleFieldPtr(block, src, struct_ptr, field_src, field_index, initializing); - } if (try sema.resolveDefinedValue(block, src, struct_ptr)) |struct_ptr_val| { const val = try struct_ptr_val.ptrField(field_index, pt); @@ -28909,8 +29083,6 @@ fn structFieldVal( switch (ip.indexToKey(struct_ty.toIntern())) { .struct_type => { const struct_type = ip.loadStructType(struct_ty.toIntern()); - if (struct_type.isTuple(ip)) - return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty); const field_index = struct_type.nameIndex(ip, field_name) orelse return sema.failWithBadStructFieldAccess(block, struct_ty, struct_type, field_name_src, field_name); @@ -28935,13 +29107,8 @@ fn structFieldVal( try field_ty.resolveLayout(pt); return block.addStructFieldVal(struct_byval, field_index, field_ty); }, - .anon_struct_type => |anon_struct| { - if (anon_struct.names.len == 0) { - return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty); - } else { - const field_index = try sema.anonStructFieldIndex(block, struct_ty, field_name, field_name_src); - return sema.tupleFieldValByIndex(block, src, struct_byval, field_index, struct_ty); - } + .tuple_type => { + return sema.tupleFieldVal(block, src, struct_byval, field_name, field_name_src, struct_ty); }, else => unreachable, } @@ -30087,39 +30254,7 @@ fn coerceExtra( }, else => {}, }, - .One => switch (Type.fromInterned(dest_info.child).zigTypeTag(zcu)) { - .@"union" => { - // pointer to anonymous struct to pointer to union - if (inst_ty.isSinglePointer(zcu) and - inst_ty.childType(zcu).isAnonStruct(zcu) and - sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) - { - return sema.coerceAnonStructToUnionPtrs(block, dest_ty, dest_ty_src, inst, inst_src); - } - }, - .@"struct" => { - // pointer to anonymous struct to pointer to struct - if (inst_ty.isSinglePointer(zcu) and - inst_ty.childType(zcu).isAnonStruct(zcu) and - sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) - { - return sema.coerceAnonStructToStructPtrs(block, dest_ty, dest_ty_src, inst, inst_src) catch |err| switch (err) { - error.NotCoercible => break :pointer, - else => |e| return e, - }; - } - }, - .array => { - // pointer to tuple to pointer to array - if (inst_ty.isSinglePointer(zcu) and - inst_ty.childType(zcu).isTuple(zcu) and - sema.checkPtrAttributes(dest_ty, inst_ty, &in_memory_result)) - { - return sema.coerceTupleToArrayPtrs(block, dest_ty, dest_ty_src, inst, inst_src); - } - }, - else => {}, - }, + .One => {}, .Slice => to_slice: { if (inst_ty.zigTypeTag(zcu) == .array) { return sema.fail( @@ -30368,11 +30503,6 @@ fn coerceExtra( }, .@"union" => switch (inst_ty.zigTypeTag(zcu)) { .@"enum", .enum_literal => return sema.coerceEnumToUnion(block, dest_ty, dest_ty_src, inst, inst_src), - .@"struct" => { - if (inst_ty.isAnonStruct(zcu)) { - return sema.coerceAnonStructToUnion(block, dest_ty, dest_ty_src, inst, inst_src); - } - }, else => {}, }, .array => switch (inst_ty.zigTypeTag(zcu)) { @@ -30402,9 +30532,6 @@ fn coerceExtra( }, .vector => return sema.coerceArrayLike(block, dest_ty, dest_ty_src, inst, inst_src), .@"struct" => { - if (inst == .empty_struct) { - return sema.arrayInitEmpty(block, inst_src, dest_ty); - } if (inst_ty.isTuple(zcu)) { return sema.coerceTupleToArray(block, dest_ty, dest_ty_src, inst, inst_src); } @@ -30421,10 +30548,7 @@ fn coerceExtra( else => {}, }, .@"struct" => blk: { - if (inst == .empty_struct) { - return sema.structInitEmpty(block, dest_ty, dest_ty_src, inst_src); - } - if (inst_ty.isTupleOrAnonStruct(zcu)) { + if (inst_ty.isTuple(zcu)) { return sema.coerceTupleToStruct(block, dest_ty, inst, inst_src) catch |err| switch (err) { error.NotCoercible => break :blk, else => |e| return e, @@ -32208,97 +32332,6 @@ fn coerceEnumToUnion( return sema.failWithOwnedErrorMsg(block, msg); } -fn coerceAnonStructToUnion( - sema: *Sema, - block: *Block, - union_ty: Type, - union_ty_src: LazySrcLoc, - inst: Air.Inst.Ref, - inst_src: LazySrcLoc, -) !Air.Inst.Ref { - const pt = sema.pt; - const zcu = pt.zcu; - const ip = &zcu.intern_pool; - const inst_ty = sema.typeOf(inst); - const field_info: union(enum) { - name: InternPool.NullTerminatedString, - count: usize, - } = switch (ip.indexToKey(inst_ty.toIntern())) { - .anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len == 1) - .{ .name = anon_struct_type.names.get(ip)[0] } - else - .{ .count = anon_struct_type.names.len }, - .struct_type => name: { - const field_names = ip.loadStructType(inst_ty.toIntern()).field_names.get(ip); - break :name if (field_names.len == 1) - .{ .name = field_names[0] } - else - .{ .count = field_names.len }; - }, - else => unreachable, - }; - switch (field_info) { - .name => |field_name| { - const init = try sema.structFieldVal(block, inst_src, inst, field_name, inst_src, inst_ty); - return sema.unionInit(block, init, inst_src, union_ty, union_ty_src, field_name, inst_src); - }, - .count => |field_count| { - assert(field_count != 1); - const msg = msg: { - const msg = if (field_count > 1) try sema.errMsg( - inst_src, - "cannot initialize multiple union fields at once; unions can only have one active field", - .{}, - ) else try sema.errMsg( - inst_src, - "union initializer must initialize one field", - .{}, - ); - errdefer msg.destroy(sema.gpa); - - // TODO add notes for where the anon struct was created to point out - // the extra fields. - - try sema.addDeclaredHereNote(msg, union_ty); - break :msg msg; - }; - return sema.failWithOwnedErrorMsg(block, msg); - }, - } -} - -fn coerceAnonStructToUnionPtrs( - sema: *Sema, - block: *Block, - ptr_union_ty: Type, - union_ty_src: LazySrcLoc, - ptr_anon_struct: Air.Inst.Ref, - anon_struct_src: LazySrcLoc, -) !Air.Inst.Ref { - const pt = sema.pt; - const zcu = pt.zcu; - const union_ty = ptr_union_ty.childType(zcu); - const anon_struct = try sema.analyzeLoad(block, anon_struct_src, ptr_anon_struct, anon_struct_src); - const union_inst = try sema.coerceAnonStructToUnion(block, union_ty, union_ty_src, anon_struct, anon_struct_src); - return sema.analyzeRef(block, union_ty_src, union_inst); -} - -fn coerceAnonStructToStructPtrs( - sema: *Sema, - block: *Block, - ptr_struct_ty: Type, - struct_ty_src: LazySrcLoc, - ptr_anon_struct: Air.Inst.Ref, - anon_struct_src: LazySrcLoc, -) !Air.Inst.Ref { - const pt = sema.pt; - const zcu = pt.zcu; - const struct_ty = ptr_struct_ty.childType(zcu); - const anon_struct = try sema.analyzeLoad(block, anon_struct_src, ptr_anon_struct, anon_struct_src); - const struct_inst = try sema.coerceTupleToStruct(block, struct_ty, anon_struct, anon_struct_src); - return sema.analyzeRef(block, struct_ty_src, struct_inst); -} - /// If the lengths match, coerces element-wise. fn coerceArrayLike( sema: *Sema, @@ -32530,7 +32563,7 @@ fn coerceTupleToStruct( try struct_ty.resolveFields(pt); try struct_ty.resolveStructFieldInits(pt); - if (struct_ty.isTupleOrAnonStruct(zcu)) { + if (struct_ty.isTuple(zcu)) { return sema.coerceTupleToTuple(block, struct_ty, inst, inst_src); } @@ -32542,7 +32575,7 @@ fn coerceTupleToStruct( const inst_ty = sema.typeOf(inst); var runtime_src: ?LazySrcLoc = null; const field_count = switch (ip.indexToKey(inst_ty.toIntern())) { - .anon_struct_type => |anon_struct_type| anon_struct_type.types.len, + .tuple_type => |tuple| tuple.types.len, .struct_type => ip.loadStructType(inst_ty.toIntern()).field_types.len, else => unreachable, }; @@ -32557,7 +32590,7 @@ fn coerceTupleToStruct( const coerced = try sema.coerce(block, struct_field_ty, elem_ref, field_src); field_refs[struct_field_index] = coerced; if (struct_type.fieldIsComptime(ip, struct_field_index)) { - const init_val = (try sema.resolveValue(coerced)) orelse { + const init_val = try sema.resolveValue(coerced) orelse { return sema.failWithNeededComptime(block, field_src, .{ .needed_comptime_reason = "value stored in comptime field must be comptime-known", }); @@ -32636,8 +32669,7 @@ fn coerceTupleToTuple( const zcu = pt.zcu; const ip = &zcu.intern_pool; const dest_field_count = switch (ip.indexToKey(tuple_ty.toIntern())) { - .anon_struct_type => |anon_struct_type| anon_struct_type.types.len, - .struct_type => ip.loadStructType(tuple_ty.toIntern()).field_types.len, + .tuple_type => |tuple_type| tuple_type.types.len, else => unreachable, }; const field_vals = try sema.arena.alloc(InternPool.Index, dest_field_count); @@ -32646,8 +32678,7 @@ fn coerceTupleToTuple( const inst_ty = sema.typeOf(inst); const src_field_count = switch (ip.indexToKey(inst_ty.toIntern())) { - .anon_struct_type => |anon_struct_type| anon_struct_type.types.len, - .struct_type => ip.loadStructType(inst_ty.toIntern()).field_types.len, + .tuple_type => |tuple_type| tuple_type.types.len, else => unreachable, }; if (src_field_count > dest_field_count) return error.NotCoercible; @@ -32656,24 +32687,19 @@ fn coerceTupleToTuple( for (0..dest_field_count) |field_index_usize| { const field_i: u32 = @intCast(field_index_usize); const field_src = inst_src; // TODO better source location - const field_name = inst_ty.structFieldName(field_index_usize, zcu).unwrap() orelse - try ip.getOrPutStringFmt(sema.gpa, pt.tid, "{d}", .{field_index_usize}, .no_embedded_nulls); - - if (field_name.eqlSlice("len", ip)) - return sema.fail(block, field_src, "cannot assign to 'len' field of tuple", .{}); const field_ty = switch (ip.indexToKey(tuple_ty.toIntern())) { - .anon_struct_type => |anon_struct_type| anon_struct_type.types.get(ip)[field_index_usize], + .tuple_type => |tuple_type| tuple_type.types.get(ip)[field_index_usize], .struct_type => ip.loadStructType(tuple_ty.toIntern()).field_types.get(ip)[field_index_usize], else => unreachable, }; const default_val = switch (ip.indexToKey(tuple_ty.toIntern())) { - .anon_struct_type => |anon_struct_type| anon_struct_type.values.get(ip)[field_index_usize], + .tuple_type => |tuple_type| tuple_type.values.get(ip)[field_index_usize], .struct_type => ip.loadStructType(tuple_ty.toIntern()).fieldInit(ip, field_index_usize), else => unreachable, }; - const field_index = try sema.tupleFieldIndex(block, tuple_ty, field_name, field_src); + const field_index: u32 = @intCast(field_index_usize); const elem_ref = try sema.tupleField(block, inst_src, inst, field_src, field_i); const coerced = try sema.coerce(block, Type.fromInterned(field_ty), elem_ref, field_src); @@ -32707,28 +32733,18 @@ fn coerceTupleToTuple( if (field_ref.* != .none) continue; const default_val = switch (ip.indexToKey(tuple_ty.toIntern())) { - .anon_struct_type => |anon_struct_type| anon_struct_type.values.get(ip)[i], + .tuple_type => |tuple_type| tuple_type.values.get(ip)[i], .struct_type => ip.loadStructType(tuple_ty.toIntern()).fieldInit(ip, i), else => unreachable, }; const field_src = inst_src; // TODO better source location if (default_val == .none) { - const field_name = tuple_ty.structFieldName(i, zcu).unwrap() orelse { - const template = "missing tuple field: {d}"; - if (root_msg) |msg| { - try sema.errNote(field_src, msg, template, .{i}); - } else { - root_msg = try sema.errMsg(field_src, template, .{i}); - } - continue; - }; - const template = "missing struct field: {}"; - const args = .{field_name.fmt(ip)}; + const template = "missing tuple field: {d}"; if (root_msg) |msg| { - try sema.errNote(field_src, msg, template, args); + try sema.errNote(field_src, msg, template, .{i}); } else { - root_msg = try sema.errMsg(field_src, template, args); + root_msg = try sema.errMsg(field_src, template, .{i}); } continue; } @@ -34265,8 +34281,8 @@ const PeerResolveStrategy = enum { fixed_int, /// The type must be some fixed-width float type. fixed_float, - /// The type must be a struct literal or tuple type. - coercible_struct, + /// The type must be a tuple. + tuple, /// The peers must all be of the same type. exact, @@ -34350,9 +34366,9 @@ const PeerResolveStrategy = enum { .fixed_float => .{ .either, .fixed_float }, else => .{ .all_s1, s1 }, // doesn't override anything later }, - .coercible_struct => switch (s1) { + .tuple => switch (s1) { .exact => .{ .all_s1, .exact }, - else => .{ .all_s0, .coercible_struct }, + else => .{ .all_s0, .tuple }, }, .exact => .{ .all_s0, .exact }, }; @@ -34393,7 +34409,7 @@ const PeerResolveStrategy = enum { .error_set => .error_set, .error_union => .error_union, .enum_literal, .@"enum", .@"union" => .enum_or_union, - .@"struct" => if (ty.isTupleOrAnonStruct(zcu)) .coercible_struct else .exact, + .@"struct" => if (ty.isTuple(zcu)) .tuple else .exact, .@"fn" => .func, }; } @@ -35501,19 +35517,17 @@ fn resolvePeerTypesInner( return .{ .success = opt_cur_ty.? }; }, - .coercible_struct => { - // First, check that every peer has the same approximate structure (field count and names) + .tuple => { + // First, check that every peer has the same approximate structure (field count) var opt_first_idx: ?usize = null; var is_tuple: bool = undefined; var field_count: usize = undefined; - // Only defined for non-tuples. - var field_names: []InternPool.NullTerminatedString = undefined; for (peer_tys, 0..) |opt_ty, i| { const ty = opt_ty orelse continue; - if (!ty.isTupleOrAnonStruct(zcu)) { + if (!ty.isTuple(zcu)) { return .{ .conflict = .{ .peer_idx_a = strat_reason, .peer_idx_b = i, @@ -35524,31 +35538,15 @@ fn resolvePeerTypesInner( opt_first_idx = i; is_tuple = ty.isTuple(zcu); field_count = ty.structFieldCount(zcu); - if (!is_tuple) { - const names = ip.indexToKey(ty.toIntern()).anon_struct_type.names.get(ip); - field_names = try sema.arena.dupe(InternPool.NullTerminatedString, names); - } continue; }; - if (ty.isTuple(zcu) != is_tuple or ty.structFieldCount(zcu) != field_count) { + if (ty.structFieldCount(zcu) != field_count) { return .{ .conflict = .{ .peer_idx_a = first_idx, .peer_idx_b = i, } }; } - - if (!is_tuple) { - for (field_names, 0..) |expected, field_index_usize| { - const field_index: u32 = @intCast(field_index_usize); - const actual = ty.structFieldName(field_index, zcu).unwrap().?; - if (actual == expected) continue; - return .{ .conflict = .{ - .peer_idx_a = first_idx, - .peer_idx_b = i, - } }; - } - } } assert(opt_first_idx != null); @@ -35578,10 +35576,7 @@ fn resolvePeerTypesInner( else => |result| { const result_buf = try sema.arena.create(PeerResolveResult); result_buf.* = result; - const field_name = if (is_tuple) - try ip.getOrPutStringFmt(sema.gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls) - else - field_names[field_index]; + const field_name = try ip.getOrPutStringFmt(sema.gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls); // The error info needs the field types, but we can't reuse sub_peer_tys // since the recursive call may have clobbered it. @@ -35636,9 +35631,8 @@ fn resolvePeerTypesInner( field_val.* = if (comptime_val) |v| v.toIntern() else .none; } - const final_ty = try ip.getAnonStructType(zcu.gpa, pt.tid, .{ + const final_ty = try ip.getTupleType(zcu.gpa, pt.tid, .{ .types = field_types, - .names = if (is_tuple) &.{} else field_names, .values = field_vals, }); @@ -35778,7 +35772,7 @@ pub fn resolveStructAlignment( const ip = &zcu.intern_pool; const target = zcu.getTarget(); - assert(sema.owner.unwrap().cau == struct_type.cau.unwrap().?); + assert(sema.owner.unwrap().cau == struct_type.cau); assert(struct_type.layout != .@"packed"); assert(struct_type.flagsUnordered(ip).alignment == .none); @@ -35821,7 +35815,7 @@ pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void { const ip = &zcu.intern_pool; const struct_type = zcu.typeToStruct(ty) orelse return; - assert(sema.owner.unwrap().cau == struct_type.cau.unwrap().?); + assert(sema.owner.unwrap().cau == struct_type.cau); if (struct_type.haveLayout(ip)) return; @@ -35921,12 +35915,14 @@ pub fn resolveStructLayout(sema: *Sema, ty: Type) SemaError!void { return a_align.compare(.gt, b_align); } }; - if (struct_type.isTuple(ip) or !zcu.backendSupportsFeature(.field_reordering)) { - // TODO: don't handle tuples differently. This logic exists only because it - // uncovers latent bugs if removed. Fix the latent bugs and remove this logic! - // Likewise, implement field reordering support in all the backends! + if (!zcu.backendSupportsFeature(.field_reordering)) { + // TODO: we should probably also reorder tuple fields? This is a bit weird because it'll involve + // mutating the `InternPool` for a non-container type. + // + // TODO: implement field reordering support in all the backends! + // // This logic does not reorder fields; it only moves the omitted ones to the end - // so that logic elsewhere does not need to special-case tuples. + // so that logic elsewhere does not need to special-case here. var i: usize = 0; var off: usize = 0; while (i + off < runtime_order.len) { @@ -35966,7 +35962,7 @@ fn backingIntType( const gpa = zcu.gpa; const ip = &zcu.intern_pool; - const cau_index = struct_type.cau.unwrap().?; + const cau_index = struct_type.cau; var analysis_arena = std.heap.ArenaAllocator.init(gpa); defer analysis_arena.deinit(); @@ -35978,7 +35974,7 @@ fn backingIntType( .instructions = .{}, .inlining = null, .is_comptime = true, - .src_base_inst = struct_type.zir_index.unwrap().?, + .src_base_inst = struct_type.zir_index, .type_name_ctx = struct_type.name, }; defer assert(block.instructions.items.len == 0); @@ -35992,8 +35988,8 @@ fn backingIntType( break :blk accumulator; }; - const zir = zcu.namespacePtr(struct_type.namespace.unwrap().?).fileScope(zcu).zir; - const zir_index = struct_type.zir_index.unwrap().?.resolve(ip) orelse return error.AnalysisFail; + const zir = zcu.namespacePtr(struct_type.namespace).fileScope(zcu).zir; + const zir_index = struct_type.zir_index.resolve(ip) orelse return error.AnalysisFail; const extended = zir.instructions.items(.data)[@intFromEnum(zir_index)].extended; assert(extended.opcode == .struct_decl); const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small); @@ -36014,7 +36010,7 @@ fn backingIntType( extra_index += 1; const backing_int_src: LazySrcLoc = .{ - .base_node_inst = struct_type.zir_index.unwrap().?, + .base_node_inst = struct_type.zir_index, .offset = .{ .node_offset_container_tag = 0 }, }; const backing_int_ty = blk: { @@ -36261,7 +36257,7 @@ pub fn resolveStructFully(sema: *Sema, ty: Type) SemaError!void { const ip = &zcu.intern_pool; const struct_type = zcu.typeToStruct(ty).?; - assert(sema.owner.unwrap().cau == struct_type.cau.unwrap().?); + assert(sema.owner.unwrap().cau == struct_type.cau); if (struct_type.setFullyResolved(ip)) return; errdefer struct_type.clearFullyResolved(ip); @@ -36319,7 +36315,7 @@ pub fn resolveStructFieldTypes( const zcu = pt.zcu; const ip = &zcu.intern_pool; - assert(sema.owner.unwrap().cau == struct_type.cau.unwrap().?); + assert(sema.owner.unwrap().cau == struct_type.cau); if (struct_type.haveFieldTypes(ip)) return; @@ -36345,7 +36341,7 @@ pub fn resolveStructFieldInits(sema: *Sema, ty: Type) SemaError!void { const ip = &zcu.intern_pool; const struct_type = zcu.typeToStruct(ty) orelse return; - assert(sema.owner.unwrap().cau == struct_type.cau.unwrap().?); + assert(sema.owner.unwrap().cau == struct_type.cau); // Inits can start as resolved if (struct_type.haveFieldInits(ip)) return; @@ -36607,12 +36603,12 @@ fn structFields( const zcu = pt.zcu; const gpa = zcu.gpa; const ip = &zcu.intern_pool; - const cau_index = struct_type.cau.unwrap().?; + const cau_index = struct_type.cau; const namespace_index = ip.getCau(cau_index).namespace; const zir = zcu.namespacePtr(namespace_index).fileScope(zcu).zir; - const zir_index = struct_type.zir_index.unwrap().?.resolve(ip) orelse return error.AnalysisFail; + const zir_index = struct_type.zir_index.resolve(ip) orelse return error.AnalysisFail; - const fields_len, const small, var extra_index = structZirInfo(zir, zir_index); + const fields_len, _, var extra_index = structZirInfo(zir, zir_index); if (fields_len == 0) switch (struct_type.layout) { .@"packed" => { @@ -36632,7 +36628,7 @@ fn structFields( .instructions = .{}, .inlining = null, .is_comptime = true, - .src_base_inst = struct_type.zir_index.unwrap().?, + .src_base_inst = struct_type.zir_index, .type_name_ctx = struct_type.name, }; defer assert(block_scope.instructions.items.len == 0); @@ -36673,12 +36669,8 @@ fn structFields( if (is_comptime) struct_type.setFieldComptime(ip, field_i); - var opt_field_name_zir: ?[:0]const u8 = null; - if (!small.is_tuple) { - opt_field_name_zir = zir.nullTerminatedString(@enumFromInt(zir.extra[extra_index])); - extra_index += 1; - } - extra_index += 1; // doc_comment + const field_name_zir: [:0]const u8 = zir.nullTerminatedString(@enumFromInt(zir.extra[extra_index])); + extra_index += 2; // field_name, doc_comment fields[field_i] = .{}; @@ -36690,10 +36682,8 @@ fn structFields( extra_index += 1; // This string needs to outlive the ZIR code. - if (opt_field_name_zir) |field_name_zir| { - const field_name = try ip.getOrPutString(gpa, pt.tid, field_name_zir, .no_embedded_nulls); - assert(struct_type.addFieldName(ip, field_name) == null); - } + const field_name = try ip.getOrPutString(gpa, pt.tid, field_name_zir, .no_embedded_nulls); + assert(struct_type.addFieldName(ip, field_name) == null); if (has_align) { fields[field_i].align_body_len = zir.extra[extra_index]; @@ -36713,7 +36703,7 @@ fn structFields( for (fields, 0..) |zir_field, field_i| { const ty_src: LazySrcLoc = .{ - .base_node_inst = struct_type.zir_index.unwrap().?, + .base_node_inst = struct_type.zir_index, .offset = .{ .container_field_type = @intCast(field_i) }, }; const field_ty: Type = ty: { @@ -36785,7 +36775,7 @@ fn structFields( extra_index += body.len; const align_ref = try sema.resolveInlineBody(&block_scope, body, zir_index); const align_src: LazySrcLoc = .{ - .base_node_inst = struct_type.zir_index.unwrap().?, + .base_node_inst = struct_type.zir_index, .offset = .{ .container_field_align = @intCast(field_i) }, }; const field_align = try sema.analyzeAsAlign(&block_scope, align_src, align_ref); @@ -36812,11 +36802,11 @@ fn structFieldInits( assert(!struct_type.haveFieldInits(ip)); - const cau_index = struct_type.cau.unwrap().?; + const cau_index = struct_type.cau; const namespace_index = ip.getCau(cau_index).namespace; const zir = zcu.namespacePtr(namespace_index).fileScope(zcu).zir; - const zir_index = struct_type.zir_index.unwrap().?.resolve(ip) orelse return error.AnalysisFail; - const fields_len, const small, var extra_index = structZirInfo(zir, zir_index); + const zir_index = struct_type.zir_index.resolve(ip) orelse return error.AnalysisFail; + const fields_len, _, var extra_index = structZirInfo(zir, zir_index); var block_scope: Block = .{ .parent = null, @@ -36825,7 +36815,7 @@ fn structFieldInits( .instructions = .{}, .inlining = null, .is_comptime = true, - .src_base_inst = struct_type.zir_index.unwrap().?, + .src_base_inst = struct_type.zir_index, .type_name_ctx = struct_type.name, }; defer assert(block_scope.instructions.items.len == 0); @@ -36860,10 +36850,7 @@ fn structFieldInits( const has_type_body = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; - if (!small.is_tuple) { - extra_index += 1; - } - extra_index += 1; // doc_comment + extra_index += 2; // field_name, doc_comment fields[field_i] = .{}; @@ -36901,7 +36888,7 @@ fn structFieldInits( sema.inst_map.putAssumeCapacity(zir_index, type_ref); const init_src: LazySrcLoc = .{ - .base_node_inst = struct_type.zir_index.unwrap().?, + .base_node_inst = struct_type.zir_index, .offset = .{ .container_field_value = @intCast(field_i) }, }; @@ -37430,7 +37417,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .undefined_type => Value.undef, .optional_noreturn_type => try pt.nullValue(ty), .generic_poison_type => error.GenericPoison, - .empty_struct_type => Value.empty_struct, + .empty_tuple_type => Value.empty_tuple, // values, not types .undef, .zero, @@ -37446,7 +37433,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .null_value, .bool_true, .bool_false, - .empty_struct, + .empty_tuple, .generic_poison, // invalid .none, @@ -37532,10 +37519,9 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .type_enum_explicit, .type_enum_nonexhaustive, .type_struct, - .type_struct_anon, .type_struct_packed, .type_struct_packed_inits, - .type_tuple_anon, + .type_tuple, .type_union, => switch (ip.indexToKey(ty.toIntern())) { inline .array_type, .vector_type => |seq_type, seq_tag| { @@ -37594,7 +37580,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { } })); }, - .anon_struct_type => |tuple| { + .tuple_type => |tuple| { for (tuple.values.get(ip)) |val| { if (val == .none) return null; } @@ -37965,35 +37951,9 @@ fn structFieldIndex( const zcu = pt.zcu; const ip = &zcu.intern_pool; try struct_ty.resolveFields(pt); - if (struct_ty.isAnonStruct(zcu)) { - return sema.anonStructFieldIndex(block, struct_ty, field_name, field_src); - } else { - const struct_type = zcu.typeToStruct(struct_ty).?; - return struct_type.nameIndex(ip, field_name) orelse - return sema.failWithBadStructFieldAccess(block, struct_ty, struct_type, field_src, field_name); - } -} - -fn anonStructFieldIndex( - sema: *Sema, - block: *Block, - struct_ty: Type, - field_name: InternPool.NullTerminatedString, - field_src: LazySrcLoc, -) !u32 { - const pt = sema.pt; - const zcu = pt.zcu; - const ip = &zcu.intern_pool; - switch (ip.indexToKey(struct_ty.toIntern())) { - .anon_struct_type => |anon_struct_type| for (anon_struct_type.names.get(ip), 0..) |name, i| { - if (name == field_name) return @intCast(i); - }, - .struct_type => if (ip.loadStructType(struct_ty.toIntern()).nameIndex(ip, field_name)) |i| return i, - else => unreachable, - } - return sema.fail(block, field_src, "no field named '{}' in anonymous struct '{}'", .{ - field_name.fmt(ip), struct_ty.fmt(pt), - }); + const struct_type = zcu.typeToStruct(struct_ty).?; + return struct_type.nameIndex(ip, field_name) orelse + return sema.failWithBadStructFieldAccess(block, struct_ty, struct_type, field_src, field_name); } /// If the value overflowed the type, returns a comptime_int (or vector thereof) instead, setting diff --git a/src/Sema/bitcast.zig b/src/Sema/bitcast.zig index 25955b113f..8fcc40c67c 100644 --- a/src/Sema/bitcast.zig +++ b/src/Sema/bitcast.zig @@ -246,7 +246,7 @@ const UnpackValueBits = struct { .error_union_type, .simple_type, .struct_type, - .anon_struct_type, + .tuple_type, .union_type, .opaque_type, .enum_type, diff --git a/src/Type.zig b/src/Type.zig index 1fa526a304..9548a4e5ef 100644 --- a/src/Type.zig +++ b/src/Type.zig @@ -320,33 +320,20 @@ pub fn print(ty: Type, writer: anytype, pt: Zcu.PerThread) @TypeOf(writer).Error }, .struct_type => { const name = ip.loadStructType(ty.toIntern()).name; - if (name == .empty) { - try writer.writeAll("@TypeOf(.{})"); - } else { - try writer.print("{}", .{name.fmt(ip)}); - } + try writer.print("{}", .{name.fmt(ip)}); }, - .anon_struct_type => |anon_struct| { - if (anon_struct.types.len == 0) { + .tuple_type => |tuple| { + if (tuple.types.len == 0) { return writer.writeAll("@TypeOf(.{})"); } - try writer.writeAll("struct{"); - for (anon_struct.types.get(ip), anon_struct.values.get(ip), 0..) |field_ty, val, i| { - if (i != 0) try writer.writeAll(", "); - if (val != .none) { - try writer.writeAll("comptime "); - } - if (anon_struct.names.len != 0) { - try writer.print("{}: ", .{anon_struct.names.get(ip)[i].fmt(&zcu.intern_pool)}); - } - + try writer.writeAll("struct {"); + for (tuple.types.get(ip), tuple.values.get(ip), 0..) |field_ty, val, i| { + try writer.writeAll(if (i == 0) " " else ", "); + if (val != .none) try writer.writeAll("comptime "); try print(Type.fromInterned(field_ty), writer, pt); - - if (val != .none) { - try writer.print(" = {}", .{Value.fromInterned(val).fmtValue(pt)}); - } + if (val != .none) try writer.print(" = {}", .{Value.fromInterned(val).fmtValue(pt)}); } - try writer.writeAll("}"); + try writer.writeAll(" }"); }, .union_type => { @@ -489,8 +476,7 @@ pub fn hasRuntimeBitsInner( ) RuntimeBitsError!bool { const ip = &zcu.intern_pool; return switch (ty.toIntern()) { - // False because it is a comptime-only type. - .empty_struct_type => false, + .empty_tuple_type => false, else => switch (ip.indexToKey(ty.toIntern())) { .int_type => |int_type| int_type.bits != 0, .ptr_type => { @@ -593,7 +579,7 @@ pub fn hasRuntimeBitsInner( return false; } }, - .anon_struct_type => |tuple| { + .tuple_type => |tuple| { for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| { if (val != .none) continue; // comptime field if (try Type.fromInterned(field_ty).hasRuntimeBitsInner( @@ -691,7 +677,7 @@ pub fn hasWellDefinedLayout(ty: Type, zcu: *const Zcu) bool { .error_union_type, .error_set_type, .inferred_error_set_type, - .anon_struct_type, + .tuple_type, .opaque_type, .anyframe_type, // These are function bodies, not function pointers. @@ -966,7 +952,7 @@ pub fn abiAlignmentInner( const ip = &zcu.intern_pool; switch (ty.toIntern()) { - .empty_struct_type => return .{ .scalar = .@"1" }, + .empty_tuple_type => return .{ .scalar = .@"1" }, else => switch (ip.indexToKey(ty.toIntern())) { .int_type => |int_type| { if (int_type.bits == 0) return .{ .scalar = .@"1" }; @@ -1109,7 +1095,7 @@ pub fn abiAlignmentInner( return .{ .scalar = struct_type.flagsUnordered(ip).alignment }; }, - .anon_struct_type => |tuple| { + .tuple_type => |tuple| { var big_align: Alignment = .@"1"; for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| { if (val != .none) continue; // comptime field @@ -1295,7 +1281,7 @@ pub fn abiSizeInner( const ip = &zcu.intern_pool; switch (ty.toIntern()) { - .empty_struct_type => return .{ .scalar = 0 }, + .empty_tuple_type => return .{ .scalar = 0 }, else => switch (ip.indexToKey(ty.toIntern())) { .int_type => |int_type| { @@ -1498,7 +1484,7 @@ pub fn abiSizeInner( }, } }, - .anon_struct_type => |tuple| { + .tuple_type => |tuple| { switch (strat) { .sema => try ty.resolveLayout(strat.pt(zcu, tid)), .lazy, .eager => {}, @@ -1831,8 +1817,7 @@ pub fn bitSizeInner( return (try ty.abiSizeInner(strat_lazy, zcu, tid)).scalar * 8; }, - .anon_struct_type => { - if (strat == .sema) try ty.resolveFields(strat.pt(zcu, tid)); + .tuple_type => { return (try ty.abiSizeInner(strat_lazy, zcu, tid)).scalar * 8; }, @@ -2176,7 +2161,7 @@ pub fn containerLayout(ty: Type, zcu: *const Zcu) std.builtin.Type.ContainerLayo const ip = &zcu.intern_pool; return switch (ip.indexToKey(ty.toIntern())) { .struct_type => ip.loadStructType(ty.toIntern()).layout, - .anon_struct_type => .auto, + .tuple_type => .auto, .union_type => ip.loadUnionType(ty.toIntern()).flagsUnordered(ip).layout, else => unreachable, }; @@ -2295,7 +2280,7 @@ pub fn arrayLenIncludingSentinel(ty: Type, zcu: *const Zcu) u64 { pub fn vectorLen(ty: Type, zcu: *const Zcu) u32 { return switch (zcu.intern_pool.indexToKey(ty.toIntern())) { .vector_type => |vector_type| vector_type.len, - .anon_struct_type => |tuple| @intCast(tuple.types.len), + .tuple_type => |tuple| @intCast(tuple.types.len), else => unreachable, }; } @@ -2305,7 +2290,7 @@ pub fn sentinel(ty: Type, zcu: *const Zcu) ?Value { return switch (zcu.intern_pool.indexToKey(ty.toIntern())) { .vector_type, .struct_type, - .anon_struct_type, + .tuple_type, => null, .array_type => |t| if (t.sentinel != .none) Value.fromInterned(t.sentinel) else null, @@ -2386,7 +2371,7 @@ pub fn intInfo(starting_ty: Type, zcu: *const Zcu) InternPool.Key.IntType { return .{ .signedness = .unsigned, .bits = zcu.errorSetBits() }; }, - .anon_struct_type => unreachable, + .tuple_type => unreachable, .ptr_type => unreachable, .anyframe_type => unreachable, @@ -2556,7 +2541,7 @@ pub fn onePossibleValue(starting_type: Type, pt: Zcu.PerThread) !?Value { var ty = starting_type; const ip = &zcu.intern_pool; while (true) switch (ty.toIntern()) { - .empty_struct_type => return Value.empty_struct, + .empty_tuple_type => return Value.empty_tuple, else => switch (ip.indexToKey(ty.toIntern())) { .int_type => |int_type| { @@ -2660,7 +2645,7 @@ pub fn onePossibleValue(starting_type: Type, pt: Zcu.PerThread) !?Value { } })); }, - .anon_struct_type => |tuple| { + .tuple_type => |tuple| { for (tuple.values.get(ip)) |val| { if (val == .none) return null; } @@ -2783,7 +2768,7 @@ pub fn comptimeOnlyInner( ) SemaError!bool { const ip = &zcu.intern_pool; return switch (ty.toIntern()) { - .empty_struct_type => false, + .empty_tuple_type => false, else => switch (ip.indexToKey(ty.toIntern())) { .int_type => false, @@ -2891,7 +2876,7 @@ pub fn comptimeOnlyInner( }; }, - .anon_struct_type => |tuple| { + .tuple_type => |tuple| { for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, val| { const have_comptime_val = val != .none; if (!have_comptime_val and try Type.fromInterned(field_ty).comptimeOnlyInner(strat, zcu, tid)) return true; @@ -3022,7 +3007,7 @@ pub fn getNamespace(ty: Type, zcu: *Zcu) InternPool.OptionalNamespaceIndex { const ip = &zcu.intern_pool; return switch (ip.indexToKey(ty.toIntern())) { .opaque_type => ip.loadOpaqueType(ty.toIntern()).namespace.toOptional(), - .struct_type => ip.loadStructType(ty.toIntern()).namespace, + .struct_type => ip.loadStructType(ty.toIntern()).namespace.toOptional(), .union_type => ip.loadUnionType(ty.toIntern()).namespace.toOptional(), .enum_type => ip.loadEnumType(ty.toIntern()).namespace.toOptional(), else => .none, @@ -3181,7 +3166,7 @@ pub fn structFieldName(ty: Type, index: usize, zcu: *const Zcu) InternPool.Optio const ip = &zcu.intern_pool; return switch (ip.indexToKey(ty.toIntern())) { .struct_type => ip.loadStructType(ty.toIntern()).fieldName(ip, index), - .anon_struct_type => |anon_struct| anon_struct.fieldName(ip, index), + .tuple_type => .none, else => unreachable, }; } @@ -3190,7 +3175,7 @@ pub fn structFieldCount(ty: Type, zcu: *const Zcu) u32 { const ip = &zcu.intern_pool; return switch (ip.indexToKey(ty.toIntern())) { .struct_type => ip.loadStructType(ty.toIntern()).field_types.len, - .anon_struct_type => |anon_struct| anon_struct.types.len, + .tuple_type => |tuple| tuple.types.len, else => unreachable, }; } @@ -3204,7 +3189,7 @@ pub fn fieldType(ty: Type, index: usize, zcu: *const Zcu) Type { const union_obj = ip.loadUnionType(ty.toIntern()); return Type.fromInterned(union_obj.field_types.get(ip)[index]); }, - .anon_struct_type => |anon_struct| Type.fromInterned(anon_struct.types.get(ip)[index]), + .tuple_type => |tuple| Type.fromInterned(tuple.types.get(ip)[index]), else => unreachable, }; } @@ -3238,8 +3223,8 @@ pub fn fieldAlignmentInner( const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[index]); return field_ty.structFieldAlignmentInner(explicit_align, struct_type.layout, strat, zcu, tid); }, - .anon_struct_type => |anon_struct| { - return (try Type.fromInterned(anon_struct.types.get(ip)[index]).abiAlignmentInner( + .tuple_type => |tuple| { + return (try Type.fromInterned(tuple.types.get(ip)[index]).abiAlignmentInner( strat.toLazy(), zcu, tid, @@ -3361,8 +3346,8 @@ pub fn structFieldDefaultValue(ty: Type, index: usize, zcu: *const Zcu) Value { if (val == .none) return Value.@"unreachable"; return Value.fromInterned(val); }, - .anon_struct_type => |anon_struct| { - const val = anon_struct.values.get(ip)[index]; + .tuple_type => |tuple| { + const val = tuple.values.get(ip)[index]; // TODO: avoid using `unreachable` to indicate this. if (val == .none) return Value.@"unreachable"; return Value.fromInterned(val); @@ -3384,7 +3369,7 @@ pub fn structFieldValueComptime(ty: Type, pt: Zcu.PerThread, index: usize) !?Val return Type.fromInterned(struct_type.field_types.get(ip)[index]).onePossibleValue(pt); } }, - .anon_struct_type => |tuple| { + .tuple_type => |tuple| { const val = tuple.values.get(ip)[index]; if (val == .none) { return Type.fromInterned(tuple.types.get(ip)[index]).onePossibleValue(pt); @@ -3400,7 +3385,7 @@ pub fn structFieldIsComptime(ty: Type, index: usize, zcu: *const Zcu) bool { const ip = &zcu.intern_pool; return switch (ip.indexToKey(ty.toIntern())) { .struct_type => ip.loadStructType(ty.toIntern()).fieldIsComptime(ip, index), - .anon_struct_type => |anon_struct| anon_struct.values.get(ip)[index] != .none, + .tuple_type => |tuple| tuple.values.get(ip)[index] != .none, else => unreachable, }; } @@ -3425,7 +3410,7 @@ pub fn structFieldOffset( return struct_type.offsets.get(ip)[index]; }, - .anon_struct_type => |tuple| { + .tuple_type => |tuple| { var offset: u64 = 0; var big_align: Alignment = .none; @@ -3472,7 +3457,6 @@ pub fn srcLocOrNull(ty: Type, zcu: *Zcu) ?Zcu.LazySrcLoc { .declared => |d| d.zir_index, .reified => |r| r.zir_index, .generated_tag => |gt| ip.loadUnionType(gt.union_type).zir_index, - .empty_struct => return null, }, else => return null, }, @@ -3491,49 +3475,7 @@ pub fn isGenericPoison(ty: Type) bool { pub fn isTuple(ty: Type, zcu: *const Zcu) bool { const ip = &zcu.intern_pool; return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => { - const struct_type = ip.loadStructType(ty.toIntern()); - if (struct_type.layout == .@"packed") return false; - if (struct_type.cau == .none) return false; - return struct_type.flagsUnordered(ip).is_tuple; - }, - .anon_struct_type => |anon_struct| anon_struct.names.len == 0, - else => false, - }; -} - -pub fn isAnonStruct(ty: Type, zcu: *const Zcu) bool { - if (ty.toIntern() == .empty_struct_type) return true; - return switch (zcu.intern_pool.indexToKey(ty.toIntern())) { - .anon_struct_type => |anon_struct_type| anon_struct_type.names.len > 0, - else => false, - }; -} - -pub fn isTupleOrAnonStruct(ty: Type, zcu: *const Zcu) bool { - const ip = &zcu.intern_pool; - return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => { - const struct_type = ip.loadStructType(ty.toIntern()); - if (struct_type.layout == .@"packed") return false; - if (struct_type.cau == .none) return false; - return struct_type.flagsUnordered(ip).is_tuple; - }, - .anon_struct_type => true, - else => false, - }; -} - -pub fn isSimpleTuple(ty: Type, zcu: *const Zcu) bool { - return switch (zcu.intern_pool.indexToKey(ty.toIntern())) { - .anon_struct_type => |anon_struct_type| anon_struct_type.names.len == 0, - else => false, - }; -} - -pub fn isSimpleTupleOrAnonStruct(ty: Type, zcu: *const Zcu) bool { - return switch (zcu.intern_pool.indexToKey(ty.toIntern())) { - .anon_struct_type => true, + .tuple_type => true, else => false, }; } @@ -3564,7 +3506,7 @@ pub fn toUnsigned(ty: Type, pt: Zcu.PerThread) !Type { pub fn typeDeclInst(ty: Type, zcu: *const Zcu) ?InternPool.TrackedInst.Index { const ip = &zcu.intern_pool; return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => ip.loadStructType(ty.toIntern()).zir_index.unwrap(), + .struct_type => ip.loadStructType(ty.toIntern()).zir_index, .union_type => ip.loadUnionType(ty.toIntern()).zir_index, .enum_type => ip.loadEnumType(ty.toIntern()).zir_index.unwrap(), .opaque_type => ip.loadOpaqueType(ty.toIntern()).zir_index, @@ -3575,12 +3517,11 @@ pub fn typeDeclInst(ty: Type, zcu: *const Zcu) ?InternPool.TrackedInst.Index { pub fn typeDeclInstAllowGeneratedTag(ty: Type, zcu: *const Zcu) ?InternPool.TrackedInst.Index { const ip = &zcu.intern_pool; return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => ip.loadStructType(ty.toIntern()).zir_index.unwrap(), + .struct_type => ip.loadStructType(ty.toIntern()).zir_index, .union_type => ip.loadUnionType(ty.toIntern()).zir_index, .enum_type => |e| switch (e) { .declared, .reified => ip.loadEnumType(ty.toIntern()).zir_index.unwrap().?, .generated_tag => |gt| ip.loadUnionType(gt.union_type).zir_index, - .empty_struct => unreachable, }, .opaque_type => ip.loadOpaqueType(ty.toIntern()).zir_index, else => null, @@ -3588,13 +3529,16 @@ pub fn typeDeclInstAllowGeneratedTag(ty: Type, zcu: *const Zcu) ?InternPool.Trac } pub fn typeDeclSrcLine(ty: Type, zcu: *Zcu) ?u32 { + // Note that changes to ZIR instruction tracking only need to update this code + // if a newly-tracked instruction can be a type's owner `zir_index`. + comptime assert(Zir.inst_tracking_version == 0); + const ip = &zcu.intern_pool; const tracked = switch (ip.indexToKey(ty.toIntern())) { .struct_type, .union_type, .opaque_type, .enum_type => |info| switch (info) { .declared => |d| d.zir_index, .reified => |r| r.zir_index, .generated_tag => |gt| ip.loadUnionType(gt.union_type).zir_index, - .empty_struct => return null, }, else => return null, }; @@ -3603,13 +3547,17 @@ pub fn typeDeclSrcLine(ty: Type, zcu: *Zcu) ?u32 { assert(file.zir_loaded); const zir = file.zir; const inst = zir.instructions.get(@intFromEnum(info.inst)); - assert(inst.tag == .extended); - return switch (inst.data.extended.opcode) { - .struct_decl => zir.extraData(Zir.Inst.StructDecl, inst.data.extended.operand).data.src_line, - .union_decl => zir.extraData(Zir.Inst.UnionDecl, inst.data.extended.operand).data.src_line, - .enum_decl => zir.extraData(Zir.Inst.EnumDecl, inst.data.extended.operand).data.src_line, - .opaque_decl => zir.extraData(Zir.Inst.OpaqueDecl, inst.data.extended.operand).data.src_line, - .reify => zir.extraData(Zir.Inst.Reify, inst.data.extended.operand).data.src_line, + return switch (inst.tag) { + .struct_init, .struct_init_ref => zir.extraData(Zir.Inst.StructInit, inst.data.pl_node.payload_index).data.abs_line, + .struct_init_anon => zir.extraData(Zir.Inst.StructInitAnon, inst.data.pl_node.payload_index).data.abs_line, + .extended => switch (inst.data.extended.opcode) { + .struct_decl => zir.extraData(Zir.Inst.StructDecl, inst.data.extended.operand).data.src_line, + .union_decl => zir.extraData(Zir.Inst.UnionDecl, inst.data.extended.operand).data.src_line, + .enum_decl => zir.extraData(Zir.Inst.EnumDecl, inst.data.extended.operand).data.src_line, + .opaque_decl => zir.extraData(Zir.Inst.OpaqueDecl, inst.data.extended.operand).data.src_line, + .reify => zir.extraData(Zir.Inst.Reify, inst.data.extended.operand).data.src_line, + else => unreachable, + }, else => unreachable, }; } @@ -3697,8 +3645,8 @@ pub fn resolveLayout(ty: Type, pt: Zcu.PerThread) SemaError!void { const ip = &zcu.intern_pool; switch (ty.zigTypeTag(zcu)) { .@"struct" => switch (ip.indexToKey(ty.toIntern())) { - .anon_struct_type => |anon_struct_type| for (0..anon_struct_type.types.len) |i| { - const field_ty = Type.fromInterned(anon_struct_type.types.get(ip)[i]); + .tuple_type => |tuple_type| for (0..tuple_type.types.len) |i| { + const field_ty = Type.fromInterned(tuple_type.types.get(ip)[i]); try field_ty.resolveLayout(pt); }, .struct_type => return ty.resolveStructInner(pt, .layout), @@ -3796,7 +3744,7 @@ pub fn resolveFields(ty: Type, pt: Zcu.PerThread) SemaError!void { .optional_noreturn_type, .anyerror_void_error_union_type, .generic_poison_type, - .empty_struct_type, + .empty_tuple_type, => {}, .undef => unreachable, @@ -3813,7 +3761,7 @@ pub fn resolveFields(ty: Type, pt: Zcu.PerThread) SemaError!void { .null_value => unreachable, .bool_true => unreachable, .bool_false => unreachable, - .empty_struct => unreachable, + .empty_tuple => unreachable, .generic_poison => unreachable, else => switch (ty_ip.unwrap(ip).getTag(ip)) { @@ -3868,8 +3816,8 @@ pub fn resolveFully(ty: Type, pt: Zcu.PerThread) SemaError!void { }, .@"struct" => switch (ip.indexToKey(ty.toIntern())) { - .anon_struct_type => |anon_struct_type| for (0..anon_struct_type.types.len) |i| { - const field_ty = Type.fromInterned(anon_struct_type.types.get(ip)[i]); + .tuple_type => |tuple_type| for (0..tuple_type.types.len) |i| { + const field_ty = Type.fromInterned(tuple_type.types.get(ip)[i]); try field_ty.resolveFully(pt); }, .struct_type => return ty.resolveStructInner(pt, .full), @@ -3903,7 +3851,7 @@ fn resolveStructInner( const gpa = zcu.gpa; const struct_obj = zcu.typeToStruct(ty).?; - const owner = InternPool.AnalUnit.wrap(.{ .cau = struct_obj.cau.unwrap() orelse return }); + const owner = InternPool.AnalUnit.wrap(.{ .cau = struct_obj.cau }); if (zcu.failed_analysis.contains(owner) or zcu.transitive_failed_analysis.contains(owner)) { return error.AnalysisFail; @@ -3915,7 +3863,7 @@ fn resolveStructInner( var comptime_err_ret_trace = std.ArrayList(Zcu.LazySrcLoc).init(gpa); defer comptime_err_ret_trace.deinit(); - const zir = zcu.namespacePtr(struct_obj.namespace.unwrap().?).fileScope(zcu).zir; + const zir = zcu.namespacePtr(struct_obj.namespace).fileScope(zcu).zir; var sema: Sema = .{ .pt = pt, .gpa = gpa, @@ -4196,7 +4144,7 @@ pub const single_const_pointer_to_comptime_int: Type = .{ .ip_index = .single_const_pointer_to_comptime_int_type, }; pub const slice_const_u8_sentinel_0: Type = .{ .ip_index = .slice_const_u8_sentinel_0_type }; -pub const empty_struct_literal: Type = .{ .ip_index = .empty_struct_type }; +pub const empty_tuple_type: Type = .{ .ip_index = .empty_tuple_type }; pub const generic_poison: Type = .{ .ip_index = .generic_poison_type }; diff --git a/src/Value.zig b/src/Value.zig index be2f7dc07f..dd27aaced2 100644 --- a/src/Value.zig +++ b/src/Value.zig @@ -3704,7 +3704,7 @@ pub const @"unreachable": Value = .{ .ip_index = .unreachable_value }; pub const generic_poison: Value = .{ .ip_index = .generic_poison }; pub const generic_poison_type: Value = .{ .ip_index = .generic_poison_type }; -pub const empty_struct: Value = .{ .ip_index = .empty_struct }; +pub const empty_tuple: Value = .{ .ip_index = .empty_tuple }; pub fn makeBool(x: bool) Value { return if (x) Value.true else Value.false; diff --git a/src/Zcu.zig b/src/Zcu.zig index aa8610b7fd..88bc4750af 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -1497,6 +1497,20 @@ pub const SrcLoc = struct { } } else unreachable; }, + .tuple_field_type, .tuple_field_init => |field_info| { + const tree = try src_loc.file_scope.getTree(gpa); + const node = src_loc.relativeToNodeIndex(0); + var buf: [2]Ast.Node.Index = undefined; + const container_decl = tree.fullContainerDecl(&buf, node) orelse + return tree.nodeToSpan(node); + + const field = tree.fullContainerField(container_decl.ast.members[field_info.elem_index]).?; + return tree.nodeToSpan(switch (src_loc.lazy) { + .tuple_field_type => field.ast.type_expr, + .tuple_field_init => field.ast.value_expr, + else => unreachable, + }); + }, .init_elem => |init_elem| { const tree = try src_loc.file_scope.getTree(gpa); const init_node = src_loc.relativeToNodeIndex(init_elem.init_node_offset); @@ -1939,6 +1953,12 @@ pub const LazySrcLoc = struct { container_field_type: u32, /// Like `continer_field_name`, but points at the field's alignment. container_field_align: u32, + /// The source location points to the type of the field at the given index + /// of the tuple type declaration at `tuple_decl_node_offset`. + tuple_field_type: TupleField, + /// The source location points to the default init of the field at the given index + /// of the tuple type declaration at `tuple_decl_node_offset`. + tuple_field_init: TupleField, /// The source location points to the given element/field of a struct or /// array initialization expression. init_elem: struct { @@ -2016,13 +2036,20 @@ pub const LazySrcLoc = struct { index: u31, }; - const ArrayCat = struct { + pub const ArrayCat = struct { /// Points to the array concat AST node. array_cat_offset: i32, /// The index of the element the source location points to. elem_index: u32, }; + pub const TupleField = struct { + /// Points to the AST node of the tuple type decaration. + tuple_decl_node_offset: i32, + /// The index of the tuple field the source location points to. + elem_index: u32, + }; + pub const nodeOffset = if (TracedOffset.want_tracing) nodeOffsetDebug else nodeOffsetRelease; noinline fn nodeOffsetDebug(node_offset: i32) Offset { @@ -2052,6 +2079,8 @@ pub const LazySrcLoc = struct { /// Returns `null` if the ZIR instruction has been lost across incremental updates. pub fn resolveBaseNode(base_node_inst: InternPool.TrackedInst.Index, zcu: *Zcu) ?struct { *File, Ast.Node.Index } { + comptime assert(Zir.inst_tracking_version == 0); + const ip = &zcu.intern_pool; const file_index, const zir_inst = inst: { const info = base_node_inst.resolveFull(ip) orelse return null; @@ -2064,6 +2093,8 @@ pub const LazySrcLoc = struct { const inst = zir.instructions.get(@intFromEnum(zir_inst)); const base_node: Ast.Node.Index = switch (inst.tag) { .declaration => inst.data.declaration.src_node, + .struct_init, .struct_init_ref => zir.extraData(Zir.Inst.StructInit, inst.data.pl_node.payload_index).data.abs_node, + .struct_init_anon => zir.extraData(Zir.Inst.StructInitAnon, inst.data.pl_node.payload_index).data.abs_node, .extended => switch (inst.data.extended.opcode) { .struct_decl => zir.extraData(Zir.Inst.StructDecl, inst.data.extended.operand).data.src_node, .union_decl => zir.extraData(Zir.Inst.UnionDecl, inst.data.extended.operand).data.src_node, @@ -3215,7 +3246,7 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolv // If this type has a `Cau` for resolution, it's automatically referenced. const resolution_cau: InternPool.Cau.Index.Optional = switch (ip.indexToKey(ty)) { - .struct_type => ip.loadStructType(ty).cau, + .struct_type => ip.loadStructType(ty).cau.toOptional(), .union_type => ip.loadUnionType(ty).cau.toOptional(), .enum_type => ip.loadEnumType(ty).cau, .opaque_type => .none, diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index db1ee319dc..29b9716152 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -985,7 +985,6 @@ fn createFileRootStruct( .fields_len = fields_len, .known_non_opv = small.known_non_opv, .requires_comptime = if (small.known_comptime_only) .yes else .unknown, - .is_tuple = small.is_tuple, .any_comptime_fields = small.any_comptime_fields, .any_default_inits = small.any_default_inits, .inits_resolved = false, @@ -3191,7 +3190,7 @@ pub fn ensureTypeUpToDate(pt: Zcu.PerThread, ty: InternPool.Index, already_updat .struct_type => |key| { const struct_obj = ip.loadStructType(ty); const outdated = already_updating or o: { - const anal_unit = AnalUnit.wrap(.{ .cau = struct_obj.cau.unwrap().? }); + const anal_unit = AnalUnit.wrap(.{ .cau = struct_obj.cau }); const o = zcu.outdated.swapRemove(anal_unit) or zcu.potentially_outdated.swapRemove(anal_unit); if (o) { @@ -3252,7 +3251,6 @@ fn recreateStructType( const key = switch (full_key) { .reified => unreachable, // never outdated - .empty_struct => unreachable, // never outdated .generated_tag => unreachable, // not a struct .declared => |d| d, }; @@ -3283,16 +3281,13 @@ fn recreateStructType( if (captures_len != key.captures.owned.len) return error.AnalysisFail; // The old type will be unused, so drop its dependency information. - ip.removeDependenciesForDepender(gpa, AnalUnit.wrap(.{ .cau = struct_obj.cau.unwrap().? })); - - const namespace_index = struct_obj.namespace.unwrap().?; + ip.removeDependenciesForDepender(gpa, AnalUnit.wrap(.{ .cau = struct_obj.cau })); const wip_ty = switch (try ip.getStructType(gpa, pt.tid, .{ .layout = small.layout, .fields_len = fields_len, .known_non_opv = small.known_non_opv, .requires_comptime = if (small.known_comptime_only) .yes else .unknown, - .is_tuple = small.is_tuple, .any_comptime_fields = small.any_comptime_fields, .any_default_inits = small.any_default_inits, .inits_resolved = false, @@ -3308,17 +3303,17 @@ fn recreateStructType( errdefer wip_ty.cancel(ip, pt.tid); wip_ty.setName(ip, struct_obj.name); - const new_cau_index = try ip.createTypeCau(gpa, pt.tid, key.zir_index, namespace_index, wip_ty.index); + const new_cau_index = try ip.createTypeCau(gpa, pt.tid, key.zir_index, struct_obj.namespace, wip_ty.index); try ip.addDependency( gpa, AnalUnit.wrap(.{ .cau = new_cau_index }), .{ .src_hash = key.zir_index }, ); - zcu.namespacePtr(namespace_index).owner_type = wip_ty.index; + zcu.namespacePtr(struct_obj.namespace).owner_type = wip_ty.index; // No need to re-scan the namespace -- `zirStructDecl` will ultimately do that if the type is still alive. try zcu.comp.queueJob(.{ .resolve_type_fully = wip_ty.index }); - const new_ty = wip_ty.finish(ip, new_cau_index.toOptional(), namespace_index); + const new_ty = wip_ty.finish(ip, new_cau_index.toOptional(), struct_obj.namespace); if (inst_info.inst == .main_struct_inst) { // This is the root type of a file! Update the reference. zcu.setFileRootType(inst_info.file, new_ty); @@ -3337,7 +3332,6 @@ fn recreateUnionType( const key = switch (full_key) { .reified => unreachable, // never outdated - .empty_struct => unreachable, // never outdated .generated_tag => unreachable, // not a union .declared => |d| d, }; @@ -3429,9 +3423,7 @@ fn recreateEnumType( const ip = &zcu.intern_pool; const key = switch (full_key) { - .reified => unreachable, // never outdated - .empty_struct => unreachable, // never outdated - .generated_tag => unreachable, // never outdated + .reified, .generated_tag => unreachable, // never outdated .declared => |d| d, }; @@ -3575,7 +3567,7 @@ pub fn ensureNamespaceUpToDate(pt: Zcu.PerThread, namespace_index: Zcu.Namespace }; const key = switch (full_key) { - .reified, .empty_struct, .generated_tag => { + .reified, .generated_tag => { // Namespace always empty, so up-to-date. namespace.generation = zcu.generation; return; diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index f25c16ef4a..a1bef1f4cd 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -3114,7 +3114,7 @@ fn binOpImmediate( const reg = try self.register_manager.allocReg(track_inst, gp); if (track_inst) |inst| { - const mcv = .{ .register = reg }; + const mcv: MCValue = .{ .register = reg }; log.debug("binOpRegister move lhs %{d} to register: {} -> {}", .{ inst, lhs, mcv }); branch.inst_table.putAssumeCapacity(inst, mcv); @@ -3252,7 +3252,7 @@ fn binOpRegister( const reg = try self.register_manager.allocReg(track_inst, gp); if (track_inst) |inst| { - const mcv = .{ .register = reg }; + const mcv: MCValue = .{ .register = reg }; log.debug("binOpRegister move lhs %{d} to register: {} -> {}", .{ inst, lhs, mcv }); branch.inst_table.putAssumeCapacity(inst, mcv); @@ -3276,7 +3276,7 @@ fn binOpRegister( const reg = try self.register_manager.allocReg(track_inst, gp); if (track_inst) |inst| { - const mcv = .{ .register = reg }; + const mcv: MCValue = .{ .register = reg }; log.debug("binOpRegister move rhs %{d} to register: {} -> {}", .{ inst, rhs, mcv }); branch.inst_table.putAssumeCapacity(inst, mcv); @@ -3650,7 +3650,6 @@ fn genLoad(self: *Self, value_reg: Register, addr_reg: Register, comptime off_ty assert(off_type == Register or off_type == i13); const is_imm = (off_type == i13); - const rs2_or_imm = if (is_imm) .{ .imm = off } else .{ .rs2 = off }; switch (abi_size) { 1, 2, 4, 8 => { @@ -3669,7 +3668,7 @@ fn genLoad(self: *Self, value_reg: Register, addr_reg: Register, comptime off_ty .is_imm = is_imm, .rd = value_reg, .rs1 = addr_reg, - .rs2_or_imm = rs2_or_imm, + .rs2_or_imm = if (is_imm) .{ .imm = off } else .{ .rs2 = off }, }, }, }); @@ -4037,7 +4036,6 @@ fn genStore(self: *Self, value_reg: Register, addr_reg: Register, comptime off_t assert(off_type == Register or off_type == i13); const is_imm = (off_type == i13); - const rs2_or_imm = if (is_imm) .{ .imm = off } else .{ .rs2 = off }; switch (abi_size) { 1, 2, 4, 8 => { @@ -4056,7 +4054,7 @@ fn genStore(self: *Self, value_reg: Register, addr_reg: Register, comptime off_t .is_imm = is_imm, .rd = value_reg, .rs1 = addr_reg, - .rs2_or_imm = rs2_or_imm, + .rs2_or_imm = if (is_imm) .{ .imm = off } else .{ .rs2 = off }, }, }, }); diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 1bc11980bb..50a8869282 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -3259,7 +3259,7 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue { .error_union_type, .simple_type, .struct_type, - .anon_struct_type, + .tuple_type, .union_type, .opaque_type, .enum_type, @@ -3273,7 +3273,7 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue { .undefined, .void, .null, - .empty_struct, + .empty_tuple, .@"unreachable", .generic_poison, => unreachable, // non-runtime values @@ -3708,7 +3708,7 @@ fn airCmpLtErrorsLen(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand = try func.resolveInst(un_op); const sym_index = try func.bin_file.getGlobalSymbol("__zig_errors_len", null); - const errors_len = .{ .memory = @intFromEnum(sym_index) }; + const errors_len: WValue = .{ .memory = @intFromEnum(sym_index) }; try func.emitWValue(operand); const pt = func.pt; diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 7bd3517fac..06ae399f25 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -13683,7 +13683,7 @@ fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[@intFromEnum(inst)].un_op; const operand = try self.resolveInst(un_op); const ty = self.typeOf(un_op); - const result = switch (try self.isNullPtr(inst, ty, operand)) { + const result: MCValue = switch (try self.isNullPtr(inst, ty, operand)) { .eflags => |cc| .{ .eflags = cc.negate() }, else => unreachable, }; diff --git a/src/codegen.zig b/src/codegen.zig index 287d0bad51..2b179979f0 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -216,7 +216,7 @@ pub fn generateSymbol( .error_union_type, .simple_type, .struct_type, - .anon_struct_type, + .tuple_type, .union_type, .opaque_type, .enum_type, @@ -230,7 +230,7 @@ pub fn generateSymbol( .undefined, .void, .null, - .empty_struct, + .empty_tuple, .@"unreachable", .generic_poison, => unreachable, // non-runtime values @@ -456,7 +456,7 @@ pub fn generateSymbol( if (padding > 0) try code.appendNTimes(0, padding); } }, - .anon_struct_type => |tuple| { + .tuple_type => |tuple| { const struct_begin = code.items.len; for ( tuple.types.get(ip), diff --git a/src/codegen/c.zig b/src/codegen/c.zig index be8d2ad2a5..4d55637f27 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -891,7 +891,7 @@ pub const DeclGen = struct { .error_union_type, .simple_type, .struct_type, - .anon_struct_type, + .tuple_type, .union_type, .opaque_type, .enum_type, @@ -908,7 +908,7 @@ pub const DeclGen = struct { .undefined => unreachable, .void => unreachable, .null => unreachable, - .empty_struct => unreachable, + .empty_tuple => unreachable, .@"unreachable" => unreachable, .generic_poison => unreachable, @@ -1194,7 +1194,7 @@ pub const DeclGen = struct { try writer.writeByte('}'); } }, - .anon_struct_type => |tuple| { + .tuple_type => |tuple| { if (!location.isInitializer()) { try writer.writeByte('('); try dg.renderCType(writer, ctype); @@ -1605,7 +1605,7 @@ pub const DeclGen = struct { }), } }, - .anon_struct_type => |anon_struct_info| { + .tuple_type => |tuple_info| { if (!location.isInitializer()) { try writer.writeByte('('); try dg.renderCType(writer, ctype); @@ -1614,9 +1614,9 @@ pub const DeclGen = struct { try writer.writeByte('{'); var need_comma = false; - for (0..anon_struct_info.types.len) |field_index| { - if (anon_struct_info.values.get(ip)[field_index] != .none) continue; - const field_ty = Type.fromInterned(anon_struct_info.types.get(ip)[field_index]); + for (0..tuple_info.types.len) |field_index| { + if (tuple_info.values.get(ip)[field_index] != .none) continue; + const field_ty = Type.fromInterned(tuple_info.types.get(ip)[field_index]); if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; if (need_comma) try writer.writeByte(','); @@ -5411,9 +5411,9 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { const input_val = try f.resolveInst(input); try writer.print("{s}(", .{fmtStringLiteral(if (is_reg) "r" else constraint, null)}); try f.writeCValue(writer, if (asmInputNeedsLocal(f, constraint, input_val)) local: { - const input_local = .{ .local = locals_index }; + const input_local_idx = locals_index; locals_index += 1; - break :local input_local; + break :local .{ .local = input_local_idx }; } else input_val, .Other); try writer.writeByte(')'); } @@ -5651,15 +5651,12 @@ fn fieldLocation( .begin, }; }, - .anon_struct_type => |anon_struct_info| return if (!container_ty.hasRuntimeBitsIgnoreComptime(zcu)) + .tuple_type => return if (!container_ty.hasRuntimeBitsIgnoreComptime(zcu)) .begin else if (!field_ptr_ty.childType(zcu).hasRuntimeBitsIgnoreComptime(zcu)) .{ .byte_offset = container_ty.structFieldOffset(field_index, zcu) } else - .{ .field = if (anon_struct_info.fieldName(ip, field_index).unwrap()) |field_name| - .{ .identifier = field_name.toSlice(ip) } - else - .{ .field = field_index } }, + .{ .field = .{ .field = field_index } }, .union_type => { const loaded_union = ip.loadUnionType(container_ty.toIntern()); switch (loaded_union.flagsUnordered(ip).layout) { @@ -5892,10 +5889,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { }, } }, - .anon_struct_type => |anon_struct_info| if (anon_struct_info.fieldName(ip, extra.field_index).unwrap()) |field_name| - .{ .identifier = field_name.toSlice(ip) } - else - .{ .field = extra.field_index }, + .tuple_type => .{ .field = extra.field_index }, .union_type => field_name: { const loaded_union = ip.loadUnionType(struct_ty.toIntern()); switch (loaded_union.flagsUnordered(ip).layout) { @@ -7366,16 +7360,13 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { }, } }, - .anon_struct_type => |anon_struct_info| for (0..anon_struct_info.types.len) |field_index| { - if (anon_struct_info.values.get(ip)[field_index] != .none) continue; - const field_ty = Type.fromInterned(anon_struct_info.types.get(ip)[field_index]); + .tuple_type => |tuple_info| for (0..tuple_info.types.len) |field_index| { + if (tuple_info.values.get(ip)[field_index] != .none) continue; + const field_ty = Type.fromInterned(tuple_info.types.get(ip)[field_index]); if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; const a = try Assignment.start(f, writer, try f.ctypeFromType(field_ty, .complete)); - try f.writeCValueMember(writer, local, if (anon_struct_info.fieldName(ip, field_index).unwrap()) |field_name| - .{ .identifier = field_name.toSlice(ip) } - else - .{ .field = field_index }); + try f.writeCValueMember(writer, local, .{ .field = field_index }); try a.assign(f, writer); try f.writeCValue(writer, resolved_elements[field_index], .Other); try a.end(f, writer); diff --git a/src/codegen/c/Type.zig b/src/codegen/c/Type.zig index 1e0c23a96b..31daa75a13 100644 --- a/src/codegen/c/Type.zig +++ b/src/codegen/c/Type.zig @@ -1350,7 +1350,7 @@ pub const Pool = struct { .i0_type, .anyopaque_type, .void_type, - .empty_struct_type, + .empty_tuple_type, .type_type, .comptime_int_type, .comptime_float_type, @@ -1450,7 +1450,7 @@ pub const Pool = struct { .null_value, .bool_true, .bool_false, - .empty_struct, + .empty_tuple, .generic_poison, .none, => unreachable, @@ -1730,16 +1730,16 @@ pub const Pool = struct { ), } }, - .anon_struct_type => |anon_struct_info| { + .tuple_type => |tuple_info| { const scratch_top = scratch.items.len; defer scratch.shrinkRetainingCapacity(scratch_top); - try scratch.ensureUnusedCapacity(allocator, anon_struct_info.types.len * + try scratch.ensureUnusedCapacity(allocator, tuple_info.types.len * @typeInfo(Field).@"struct".fields.len); var hasher = Hasher.init; - for (0..anon_struct_info.types.len) |field_index| { - if (anon_struct_info.values.get(ip)[field_index] != .none) continue; + for (0..tuple_info.types.len) |field_index| { + if (tuple_info.values.get(ip)[field_index] != .none) continue; const field_type = Type.fromInterned( - anon_struct_info.types.get(ip)[field_index], + tuple_info.types.get(ip)[field_index], ); const field_ctype = try pool.fromType( allocator, @@ -1750,11 +1750,7 @@ pub const Pool = struct { kind.noParameter(), ); if (field_ctype.index == .void) continue; - const field_name = if (anon_struct_info.fieldName(ip, @intCast(field_index)) - .unwrap()) |field_name| - try pool.string(allocator, field_name.toSlice(ip)) - else - try pool.fmt(allocator, "f{d}", .{field_index}); + const field_name = try pool.fmt(allocator, "f{d}", .{field_index}); pool.addHashedExtraAssumeCapacityTo(scratch, &hasher, Field, .{ .name = field_name.index, .ctype = field_ctype.index, diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index ec294ecd37..cec888fba9 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -2563,7 +2563,7 @@ pub const Object = struct { } switch (ip.indexToKey(ty.toIntern())) { - .anon_struct_type => |tuple| { + .tuple_type => |tuple| { var fields: std.ArrayListUnmanaged(Builder.Metadata) = .empty; defer fields.deinit(gpa); @@ -2582,11 +2582,8 @@ pub const Object = struct { const field_offset = field_align.forward(offset); offset = field_offset + field_size; - const field_name = if (tuple.names.len != 0) - tuple.names.get(ip)[i].toSlice(ip) - else - try std.fmt.allocPrintZ(gpa, "{d}", .{i}); - defer if (tuple.names.len == 0) gpa.free(field_name); + var name_buf: [32]u8 = undefined; + const field_name = std.fmt.bufPrint(&name_buf, "{d}", .{i}) catch unreachable; fields.appendAssumeCapacity(try o.builder.debugMemberType( try o.builder.metadataString(field_name), @@ -3426,7 +3423,7 @@ pub const Object = struct { .adhoc_inferred_error_set_type, => try o.errorIntType(), .generic_poison_type, - .empty_struct_type, + .empty_tuple_type, => unreachable, // values, not types .undef, @@ -3443,7 +3440,7 @@ pub const Object = struct { .null_value, .bool_true, .bool_false, - .empty_struct, + .empty_tuple, .generic_poison, .none, => unreachable, @@ -3610,13 +3607,13 @@ pub const Object = struct { ); return ty; }, - .anon_struct_type => |anon_struct_type| { + .tuple_type => |tuple_type| { var llvm_field_types: std.ArrayListUnmanaged(Builder.Type) = .empty; defer llvm_field_types.deinit(o.gpa); // Although we can estimate how much capacity to add, these cannot be // relied upon because of the recursive calls to lowerType below. - try llvm_field_types.ensureUnusedCapacity(o.gpa, anon_struct_type.types.len); - try o.struct_field_map.ensureUnusedCapacity(o.gpa, anon_struct_type.types.len); + try llvm_field_types.ensureUnusedCapacity(o.gpa, tuple_type.types.len); + try o.struct_field_map.ensureUnusedCapacity(o.gpa, tuple_type.types.len); comptime assert(struct_layout_version == 2); var offset: u64 = 0; @@ -3625,8 +3622,8 @@ pub const Object = struct { const struct_size = t.abiSize(zcu); for ( - anon_struct_type.types.get(ip), - anon_struct_type.values.get(ip), + tuple_type.types.get(ip), + tuple_type.values.get(ip), 0.., ) |field_ty, field_val, field_index| { if (field_val != .none) continue; @@ -3979,7 +3976,7 @@ pub const Object = struct { .error_union_type, .simple_type, .struct_type, - .anon_struct_type, + .tuple_type, .union_type, .opaque_type, .enum_type, @@ -3993,7 +3990,7 @@ pub const Object = struct { .undefined => unreachable, // non-runtime value .void => unreachable, // non-runtime value .null => unreachable, // non-runtime value - .empty_struct => unreachable, // non-runtime value + .empty_tuple => unreachable, // non-runtime value .@"unreachable" => unreachable, // non-runtime value .generic_poison => unreachable, // non-runtime value @@ -4232,7 +4229,7 @@ pub const Object = struct { ), } }, - .anon_struct_type => |tuple| { + .tuple_type => |tuple| { const struct_ty = try o.lowerType(ty); const llvm_len = struct_ty.aggregateLen(&o.builder); @@ -12516,7 +12513,7 @@ fn isByRef(ty: Type, zcu: *Zcu) bool { .array, .frame => return ty.hasRuntimeBits(zcu), .@"struct" => { const struct_type = switch (ip.indexToKey(ty.toIntern())) { - .anon_struct_type => |tuple| { + .tuple_type => |tuple| { var count: usize = 0; for (tuple.types.get(ip), tuple.values.get(ip)) |field_ty, field_val| { if (field_val != .none or !Type.fromInterned(field_ty).hasRuntimeBits(zcu)) continue; diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 46c23d7d53..997771a940 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -731,13 +731,15 @@ const NavGen = struct { .direct => { const result_ty_id = try self.resolveType(Type.bool, .direct); const result_id = self.spv.allocId(); - const operands = .{ - .id_result_type = result_ty_id, - .id_result = result_id, - }; switch (value) { - true => try section.emit(self.spv.gpa, .OpConstantTrue, operands), - false => try section.emit(self.spv.gpa, .OpConstantFalse, operands), + inline else => |val_ct| try section.emit( + self.spv.gpa, + if (val_ct) .OpConstantTrue else .OpConstantFalse, + .{ + .id_result_type = result_ty_id, + .id_result = result_id, + }, + ), } return result_id; }, @@ -915,7 +917,7 @@ const NavGen = struct { .error_union_type, .simple_type, .struct_type, - .anon_struct_type, + .tuple_type, .union_type, .opaque_type, .enum_type, @@ -937,7 +939,7 @@ const NavGen = struct { .undefined, .void, .null, - .empty_struct, + .empty_tuple, .@"unreachable", .generic_poison, => unreachable, // non-runtime values @@ -1125,7 +1127,7 @@ const NavGen = struct { return try self.constructStruct(ty, types.items, constituents.items); }, - .anon_struct_type => unreachable, // TODO + .tuple_type => unreachable, // TODO else => unreachable, }, .un => |un| { @@ -1718,7 +1720,7 @@ const NavGen = struct { }, .@"struct" => { const struct_type = switch (ip.indexToKey(ty.toIntern())) { - .anon_struct_type => |tuple| { + .tuple_type => |tuple| { const member_types = try self.gpa.alloc(IdRef, tuple.values.len); defer self.gpa.free(member_types); @@ -2831,18 +2833,12 @@ const NavGen = struct { } }, .vulkan => { - const op_result_ty = blk: { - // Operations return a struct{T, T} - // where T is maybe vectorized. - const types = [2]InternPool.Index{ arith_op_ty.toIntern(), arith_op_ty.toIntern() }; - const values = [2]InternPool.Index{ .none, .none }; - const index = try ip.getAnonStructType(zcu.gpa, pt.tid, .{ - .types = &types, - .values = &values, - .names = &.{}, - }); - break :blk Type.fromInterned(index); - }; + // Operations return a struct{T, T} + // where T is maybe vectorized. + const op_result_ty: Type = .fromInterned(try ip.getTupleType(zcu.gpa, pt.tid, .{ + .types = &.{ arith_op_ty.toIntern(), arith_op_ty.toIntern() }, + .values = &.{ .none, .none }, + })); const op_result_ty_id = try self.resolveType(op_result_ty, .direct); const opcode: Opcode = switch (op) { @@ -4867,7 +4863,7 @@ const NavGen = struct { var index: usize = 0; switch (ip.indexToKey(result_ty.toIntern())) { - .anon_struct_type => |tuple| { + .tuple_type => |tuple| { for (tuple.types.get(ip), elements, 0..) |field_ty, element, i| { if ((try result_ty.structFieldValueComptime(pt, i)) != null) continue; assert(Type.fromInterned(field_ty).hasRuntimeBits(zcu)); @@ -6216,15 +6212,20 @@ const NavGen = struct { try self.extractField(Type.anyerror, operand_id, eu_layout.errorFieldIndex()); const result_id = self.spv.allocId(); - const operands = .{ - .id_result_type = bool_ty_id, - .id_result = result_id, - .operand_1 = error_id, - .operand_2 = try self.constInt(Type.anyerror, 0, .direct), - }; switch (pred) { - .is_err => try self.func.body.emit(self.spv.gpa, .OpINotEqual, operands), - .is_non_err => try self.func.body.emit(self.spv.gpa, .OpIEqual, operands), + inline else => |pred_ct| try self.func.body.emit( + self.spv.gpa, + switch (pred_ct) { + .is_err => .OpINotEqual, + .is_non_err => .OpIEqual, + }, + .{ + .id_result_type = bool_ty_id, + .id_result = result_id, + .operand_1 = error_id, + .operand_2 = try self.constInt(Type.anyerror, 0, .direct), + }, + ), } return result_id; } diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 41e9adb2c3..2babe2b090 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -2599,16 +2599,15 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool .anyframe_type, .error_union_type, .simple_type, - .anon_struct_type, + .tuple_type, .func_type, .error_set_type, .inferred_error_set_type, => .decl_alias, .struct_type => tag: { const loaded_struct = ip.loadStructType(nav_val.toIntern()); - if (loaded_struct.zir_index == .none) break :tag .decl_alias; - const type_inst_info = loaded_struct.zir_index.unwrap().?.resolveFull(ip).?; + const type_inst_info = loaded_struct.zir_index.resolveFull(ip).?; if (type_inst_info.file != inst_info.file) break :tag .decl_alias; const value_inst = value_inst: { @@ -3349,7 +3348,7 @@ fn updateType( .union_type, .opaque_type, => unreachable, - .anon_struct_type => |anon_struct_type| if (anon_struct_type.types.len == 0) { + .tuple_type => |tuple_type| if (tuple_type.types.len == 0) { try wip_nav.abbrevCode(.namespace_struct_type); try wip_nav.strp(name); try diw.writeByte(@intFromBool(false)); @@ -3359,15 +3358,15 @@ fn updateType( try uleb128(diw, ty.abiSize(zcu)); try uleb128(diw, ty.abiAlignment(zcu).toByteUnits().?); var field_byte_offset: u64 = 0; - for (0..anon_struct_type.types.len) |field_index| { - const comptime_value = anon_struct_type.values.get(ip)[field_index]; + for (0..tuple_type.types.len) |field_index| { + const comptime_value = tuple_type.values.get(ip)[field_index]; try wip_nav.abbrevCode(if (comptime_value != .none) .struct_field_comptime else .struct_field); - if (anon_struct_type.fieldName(ip, field_index).unwrap()) |field_name| try wip_nav.strp(field_name.toSlice(ip)) else { - const field_name = try std.fmt.allocPrint(dwarf.gpa, "{d}", .{field_index}); - defer dwarf.gpa.free(field_name); + { + var name_buf: [32]u8 = undefined; + const field_name = std.fmt.bufPrint(&name_buf, "{d}", .{field_index}) catch unreachable; try wip_nav.strp(field_name); } - const field_type = Type.fromInterned(anon_struct_type.types.get(ip)[field_index]); + const field_type = Type.fromInterned(tuple_type.types.get(ip)[field_index]); try wip_nav.refType(field_type); if (comptime_value != .none) try wip_nav.blockValue( src_loc, @@ -3595,16 +3594,26 @@ pub fn updateContainerType(dwarf: *Dwarf, pt: Zcu.PerThread, type_index: InternP try dwarf.debug_info.section.replaceEntry(wip_nav.unit, wip_nav.entry, dwarf, wip_nav.debug_info.items); try wip_nav.flush(ty_src_loc); } else { - const decl_inst = file.zir.instructions.get(@intFromEnum(inst_info.inst)); - assert(decl_inst.tag == .extended); - if (switch (decl_inst.data.extended.opcode) { - .struct_decl => @as(Zir.Inst.StructDecl.Small, @bitCast(decl_inst.data.extended.small)).name_strategy, - .enum_decl => @as(Zir.Inst.EnumDecl.Small, @bitCast(decl_inst.data.extended.small)).name_strategy, - .union_decl => @as(Zir.Inst.UnionDecl.Small, @bitCast(decl_inst.data.extended.small)).name_strategy, - .opaque_decl => @as(Zir.Inst.OpaqueDecl.Small, @bitCast(decl_inst.data.extended.small)).name_strategy, - .reify => @as(Zir.Inst.NameStrategy, @enumFromInt(decl_inst.data.extended.small)), - else => unreachable, - } == .parent) return; + { + // Note that changes to ZIR instruction tracking only need to update this code + // if a newly-tracked instruction can be a type's owner `zir_index`. + comptime assert(Zir.inst_tracking_version == 0); + + const decl_inst = file.zir.instructions.get(@intFromEnum(inst_info.inst)); + const name_strat: Zir.Inst.NameStrategy = switch (decl_inst.tag) { + .struct_init, .struct_init_ref, .struct_init_anon => .anon, + .extended => switch (decl_inst.data.extended.opcode) { + .struct_decl => @as(Zir.Inst.StructDecl.Small, @bitCast(decl_inst.data.extended.small)).name_strategy, + .enum_decl => @as(Zir.Inst.EnumDecl.Small, @bitCast(decl_inst.data.extended.small)).name_strategy, + .union_decl => @as(Zir.Inst.UnionDecl.Small, @bitCast(decl_inst.data.extended.small)).name_strategy, + .opaque_decl => @as(Zir.Inst.OpaqueDecl.Small, @bitCast(decl_inst.data.extended.small)).name_strategy, + .reify => @as(Zir.Inst.NameStrategy, @enumFromInt(decl_inst.data.extended.small)), + else => unreachable, + }, + else => unreachable, + }; + if (name_strat == .parent) return; + } const unit = try dwarf.getUnit(file.mod); const type_gop = try dwarf.types.getOrPut(dwarf.gpa, type_index); diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index 483f540699..34f2de126a 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -931,7 +931,7 @@ fn addNavExports( break; } } - const sym = .{ + const sym: aout.Sym = .{ .value = atom.offset.?, .type = atom.type.toGlobal(), .name = try gpa.dupe(u8, exp_name), diff --git a/src/main.zig b/src/main.zig index 528b470f7e..24fc0aa60c 100644 --- a/src/main.zig +++ b/src/main.zig @@ -34,7 +34,7 @@ const Zcu = @import("Zcu.zig"); const mingw = @import("mingw.zig"); const dev = @import("dev.zig"); -pub const std_options = .{ +pub const std_options: std.Options = .{ .wasiCwd = wasi_cwd, .logFn = log, .enable_segfault_handler = false, diff --git a/src/print_value.zig b/src/print_value.zig index 9c06c6bcd8..06bd23d03c 100644 --- a/src/print_value.zig +++ b/src/print_value.zig @@ -74,7 +74,7 @@ pub fn print( .error_union_type, .simple_type, .struct_type, - .anon_struct_type, + .tuple_type, .union_type, .opaque_type, .enum_type, @@ -85,7 +85,7 @@ pub fn print( .undef => try writer.writeAll("undefined"), .simple_value => |simple_value| switch (simple_value) { .void => try writer.writeAll("{}"), - .empty_struct => try writer.writeAll(".{}"), + .empty_tuple => try writer.writeAll(".{}"), .generic_poison => try writer.writeAll("(generic poison)"), else => try writer.writeAll(@tagName(simple_value)), }, diff --git a/src/print_zir.zig b/src/print_zir.zig index 7cc381955f..b8f4432e72 100644 --- a/src/print_zir.zig +++ b/src/print_zir.zig @@ -563,6 +563,8 @@ const Writer = struct { .enum_decl => try self.writeEnumDecl(stream, extended), .opaque_decl => try self.writeOpaqueDecl(stream, extended), + .tuple_decl => try self.writeTupleDecl(stream, extended), + .await_nosuspend, .c_undef, .c_include, @@ -1421,7 +1423,6 @@ const Writer = struct { try self.writeFlag(stream, "known_non_opv, ", small.known_non_opv); try self.writeFlag(stream, "known_comptime_only, ", small.known_comptime_only); - try self.writeFlag(stream, "tuple, ", small.is_tuple); try stream.print("{s}, ", .{@tagName(small.name_strategy)}); @@ -1506,11 +1507,8 @@ const Writer = struct { const has_type_body = @as(u1, @truncate(cur_bit_bag)) != 0; cur_bit_bag >>= 1; - var field_name_index: Zir.NullTerminatedString = .empty; - if (!small.is_tuple) { - field_name_index = @enumFromInt(self.code.extra[extra_index]); - extra_index += 1; - } + const field_name_index: Zir.NullTerminatedString = @enumFromInt(self.code.extra[extra_index]); + extra_index += 1; const doc_comment_index: Zir.NullTerminatedString = @enumFromInt(self.code.extra[extra_index]); extra_index += 1; @@ -1948,6 +1946,32 @@ const Writer = struct { try self.writeSrcNode(stream, 0); } + fn writeTupleDecl(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void { + const fields_len = extended.small; + assert(fields_len != 0); + const extra = self.code.extraData(Zir.Inst.TupleDecl, extended.operand); + + var extra_index = extra.end; + + try stream.writeAll("{ "); + + for (0..fields_len) |field_idx| { + if (field_idx != 0) try stream.writeAll(", "); + + const field_ty, const field_init = self.code.extra[extra_index..][0..2].*; + extra_index += 2; + + try stream.print("@\"{d}\": ", .{field_idx}); + try self.writeInstRef(stream, @enumFromInt(field_ty)); + try stream.writeAll(" = "); + try self.writeInstRef(stream, @enumFromInt(field_init)); + } + + try stream.writeAll(" }) "); + + try self.writeSrcNode(stream, extra.data.src_node); + } + fn writeErrorSetDecl( self: *Writer, stream: anytype, diff --git a/src/translate_c.zig b/src/translate_c.zig index a58951c228..6faffde7e2 100644 --- a/src/translate_c.zig +++ b/src/translate_c.zig @@ -2314,8 +2314,11 @@ fn transStringLiteralInitializer( while (i < num_inits) : (i += 1) { init_list[i] = try transCreateCharLitNode(c, false, stmt.getCodeUnit(i)); } - const init_args = .{ .len = num_inits, .elem_type = elem_type }; - const init_array_type = try if (array_type.tag() == .array_type) Tag.array_type.create(c.arena, init_args) else Tag.null_sentinel_array_type.create(c.arena, init_args); + const init_args: ast.Payload.Array.ArrayTypeInfo = .{ .len = num_inits, .elem_type = elem_type }; + const init_array_type = if (array_type.tag() == .array_type) + try Tag.array_type.create(c.arena, init_args) + else + try Tag.null_sentinel_array_type.create(c.arena, init_args); break :blk try Tag.array_init.create(c.arena, .{ .cond = init_array_type, .cases = init_list, @@ -3910,7 +3913,7 @@ fn transCreateCompoundAssign( if ((is_mod or is_div) and is_signed) { if (requires_cast) rhs_node = try transCCast(c, scope, loc, lhs_qt, rhs_qt, rhs_node); - const operands = .{ .lhs = lhs_node, .rhs = rhs_node }; + const operands: @FieldType(ast.Payload.BinOp, "data") = .{ .lhs = lhs_node, .rhs = rhs_node }; const builtin = if (is_mod) try Tag.signed_remainder.create(c.arena, operands) else @@ -3949,7 +3952,7 @@ fn transCreateCompoundAssign( if (is_ptr_op_signed) rhs_node = try usizeCastForWrappingPtrArithmetic(c.arena, rhs_node); if ((is_mod or is_div) and is_signed) { if (requires_cast) rhs_node = try transCCast(c, scope, loc, lhs_qt, rhs_qt, rhs_node); - const operands = .{ .lhs = ref_node, .rhs = rhs_node }; + const operands: @FieldType(ast.Payload.BinOp, "data") = .{ .lhs = ref_node, .rhs = rhs_node }; const builtin = if (is_mod) try Tag.signed_remainder.create(c.arena, operands) else @@ -4777,7 +4780,7 @@ fn transType(c: *Context, scope: *Scope, ty: *const clang.Type, source_loc: clan const is_const = is_fn_proto or child_qt.isConstQualified(); const is_volatile = child_qt.isVolatileQualified(); const elem_type = try transQualType(c, scope, child_qt, source_loc); - const ptr_info = .{ + const ptr_info: @FieldType(ast.Payload.Pointer, "data") = .{ .is_const = is_const, .is_volatile = is_volatile, .elem_type = elem_type, diff --git a/test/behavior.zig b/test/behavior.zig index f0d05a146a..48d749146c 100644 --- a/test/behavior.zig +++ b/test/behavior.zig @@ -26,7 +26,6 @@ test { _ = @import("behavior/duplicated_test_names.zig"); _ = @import("behavior/defer.zig"); _ = @import("behavior/destructure.zig"); - _ = @import("behavior/empty_tuple_fields.zig"); _ = @import("behavior/empty_union.zig"); _ = @import("behavior/enum.zig"); _ = @import("behavior/error.zig"); diff --git a/test/behavior/array.zig b/test/behavior/array.zig index 17b8667238..adcbe49eeb 100644 --- a/test/behavior/array.zig +++ b/test/behavior/array.zig @@ -596,7 +596,7 @@ test "type coercion of anon struct literal to array" { var x2: U = .{ .a = 42 }; _ = &x2; - const t2 = .{ x2, .{ .b = true }, .{ .c = "hello" } }; + const t2 = .{ x2, U{ .b = true }, U{ .c = "hello" } }; const arr2: [3]U = t2; try expect(arr2[0].a == 42); try expect(arr2[1].b == true); @@ -607,40 +607,6 @@ test "type coercion of anon struct literal to array" { try comptime S.doTheTest(); } -test "type coercion of pointer to anon struct literal to pointer to array" { - if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - - const S = struct { - const U = union { - a: u32, - b: bool, - c: []const u8, - }; - - fn doTheTest() !void { - var x1: u8 = 42; - _ = &x1; - const t1 = &.{ x1, 56, 54 }; - const arr1: *const [3]u8 = t1; - try expect(arr1[0] == 42); - try expect(arr1[1] == 56); - try expect(arr1[2] == 54); - - var x2: U = .{ .a = 42 }; - _ = &x2; - const t2 = &.{ x2, .{ .b = true }, .{ .c = "hello" } }; - const arr2: *const [3]U = t2; - try expect(arr2[0].a == 42); - try expect(arr2[1].b == true); - try expect(mem.eql(u8, arr2[2].c, "hello")); - } - }; - try S.doTheTest(); - try comptime S.doTheTest(); -} - test "array with comptime-only element type" { const a = [_]type{ u32, i32 }; try testing.expect(a[0] == u32); diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig index 5d88564083..efc7d4237f 100644 --- a/test/behavior/cast.zig +++ b/test/behavior/cast.zig @@ -2600,32 +2600,6 @@ test "result type is preserved into comptime block" { try expect(x == 123); } -test "implicit cast from ptr to tuple to ptr to struct" { - if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - - const ComptimeReason = union(enum) { - c_import: struct { - a: u32, - }, - }; - - const Block = struct { - reason: ?*const ComptimeReason, - }; - - var a: u32 = 16; - _ = &a; - var reason = .{ .c_import = .{ .a = a } }; - var block = Block{ - .reason = &reason, - }; - _ = █ - try expect(block.reason.?.c_import.a == 16); -} - test "bitcast vector" { if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO diff --git a/test/behavior/empty_file_level_struct.zig b/test/behavior/empty_file_level_struct.zig deleted file mode 100644 index 86f0f2b3c7..0000000000 --- a/test/behavior/empty_file_level_struct.zig +++ /dev/null @@ -1 +0,0 @@ -struct {} diff --git a/test/behavior/empty_file_level_union.zig b/test/behavior/empty_file_level_union.zig deleted file mode 100644 index 0d24797ffb..0000000000 --- a/test/behavior/empty_file_level_union.zig +++ /dev/null @@ -1 +0,0 @@ -union {} diff --git a/test/behavior/empty_tuple_fields.zig b/test/behavior/empty_tuple_fields.zig deleted file mode 100644 index dc809b0355..0000000000 --- a/test/behavior/empty_tuple_fields.zig +++ /dev/null @@ -1,28 +0,0 @@ -const std = @import("std"); -const builtin = @import("builtin"); - -test "empty file level struct" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - - const T = @import("empty_file_level_struct.zig"); - const info = @typeInfo(T); - try std.testing.expectEqual(@as(usize, 1), info.@"struct".fields.len); - try std.testing.expectEqualStrings("0", info.@"struct".fields[0].name); - try std.testing.expect(@typeInfo(info.@"struct".fields[0].type) == .@"struct"); -} - -test "empty file level union" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - - const T = @import("empty_file_level_union.zig"); - const info = @typeInfo(T); - try std.testing.expectEqual(@as(usize, 1), info.@"struct".fields.len); - try std.testing.expectEqualStrings("0", info.@"struct".fields[0].name); - try std.testing.expect(@typeInfo(info.@"struct".fields[0].type) == .@"union"); -} diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig index cc373cd8b1..283c8dbc4a 100644 --- a/test/behavior/struct.zig +++ b/test/behavior/struct.zig @@ -1013,84 +1013,6 @@ test "struct with 0-length union array field" { try expectEqual(@as(usize, 0), s.zero_length.len); } -test "type coercion of anon struct literal to struct" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - - const S = struct { - const S2 = struct { - A: u32, - B: []const u8, - C: void, - D: Foo = .{}, - }; - - const Foo = struct { - field: i32 = 1234, - }; - - fn doTheTest() !void { - var y: u32 = 42; - _ = &y; - const t0 = .{ .A = 123, .B = "foo", .C = {} }; - const t1 = .{ .A = y, .B = "foo", .C = {} }; - const y0: S2 = t0; - const y1: S2 = t1; - try expect(y0.A == 123); - try expect(std.mem.eql(u8, y0.B, "foo")); - try expect(y0.C == {}); - try expect(y0.D.field == 1234); - try expect(y1.A == y); - try expect(std.mem.eql(u8, y1.B, "foo")); - try expect(y1.C == {}); - try expect(y1.D.field == 1234); - } - }; - try S.doTheTest(); - try comptime S.doTheTest(); -} - -test "type coercion of pointer to anon struct literal to pointer to struct" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - - const S = struct { - const S2 = struct { - A: u32, - B: []const u8, - C: void, - D: Foo = .{}, - }; - - const Foo = struct { - field: i32 = 1234, - }; - - fn doTheTest() !void { - var y: u32 = 42; - _ = &y; - const t0 = &.{ .A = 123, .B = "foo", .C = {} }; - const t1 = &.{ .A = y, .B = "foo", .C = {} }; - const y0: *const S2 = t0; - const y1: *const S2 = t1; - try expect(y0.A == 123); - try expect(std.mem.eql(u8, y0.B, "foo")); - try expect(y0.C == {}); - try expect(y0.D.field == 1234); - try expect(y1.A == y); - try expect(std.mem.eql(u8, y1.B, "foo")); - try expect(y1.C == {}); - try expect(y1.D.field == 1234); - } - }; - try S.doTheTest(); - try comptime S.doTheTest(); -} - test "packed struct with undefined initializers" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO @@ -2183,3 +2105,20 @@ test "extern struct @FieldType" { comptime assert(@FieldType(S, "b") == f64); comptime assert(@FieldType(S, "c") == *S); } + +test "anonymous struct equivalence" { + const S = struct { + fn anonStructType(comptime x: anytype) type { + const val = .{ .a = "hello", .b = x }; + return @TypeOf(val); + } + }; + + const A = S.anonStructType(123); + const B = S.anonStructType(123); + const C = S.anonStructType(456); + + comptime assert(A == B); + comptime assert(A != C); + comptime assert(B != C); +} diff --git a/test/behavior/tuple.zig b/test/behavior/tuple.zig index a511b1c1b3..becfcce028 100644 --- a/test/behavior/tuple.zig +++ b/test/behavior/tuple.zig @@ -150,7 +150,7 @@ test "array-like initializer for tuple types" { .type = u8, .default_value = null, .is_comptime = false, - .alignment = @alignOf(i32), + .alignment = @alignOf(u8), }, }, }, @@ -566,16 +566,28 @@ test "comptime fields in tuple can be initialized" { _ = &a; } -test "tuple default values" { - const T = struct { - usize, - usize = 123, - usize = 456, - }; +test "empty struct in tuple" { + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - const t: T = .{1}; - - try expectEqual(1, t[0]); - try expectEqual(123, t[1]); - try expectEqual(456, t[2]); + const T = struct { struct {} }; + const info = @typeInfo(T); + try std.testing.expectEqual(@as(usize, 1), info.@"struct".fields.len); + try std.testing.expectEqualStrings("0", info.@"struct".fields[0].name); + try std.testing.expect(@typeInfo(info.@"struct".fields[0].type) == .@"struct"); +} + +test "empty union in tuple" { + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + + const T = struct { union {} }; + const info = @typeInfo(T); + try std.testing.expectEqual(@as(usize, 1), info.@"struct".fields.len); + try std.testing.expectEqualStrings("0", info.@"struct".fields[0].name); + try std.testing.expect(@typeInfo(info.@"struct".fields[0].type) == .@"union"); } diff --git a/test/behavior/tuple_declarations.zig b/test/behavior/tuple_declarations.zig index cf113c0201..907b114aeb 100644 --- a/test/behavior/tuple_declarations.zig +++ b/test/behavior/tuple_declarations.zig @@ -9,7 +9,7 @@ test "tuple declaration type info" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; { - const T = struct { comptime u32 align(2) = 1, []const u8 }; + const T = struct { comptime u32 = 1, []const u8 }; const info = @typeInfo(T).@"struct"; try expect(info.layout == .auto); @@ -22,7 +22,7 @@ test "tuple declaration type info" { try expect(info.fields[0].type == u32); try expect(@as(*const u32, @ptrCast(@alignCast(info.fields[0].default_value))).* == 1); try expect(info.fields[0].is_comptime); - try expect(info.fields[0].alignment == 2); + try expect(info.fields[0].alignment == @alignOf(u32)); try expectEqualStrings(info.fields[1].name, "1"); try expect(info.fields[1].type == []const u8); @@ -32,7 +32,7 @@ test "tuple declaration type info" { } } -test "Tuple declaration usage" { +test "tuple declaration usage" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; diff --git a/test/behavior/union.zig b/test/behavior/union.zig index 16ccfdd451..5acb1b5abc 100644 --- a/test/behavior/union.zig +++ b/test/behavior/union.zig @@ -986,76 +986,6 @@ test "function call result coerces from tagged union to the tag" { try comptime S.doTheTest(); } -test "cast from anonymous struct to union" { - if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - - const S = struct { - const U = union(enum) { - A: u32, - B: []const u8, - C: void, - }; - fn doTheTest() !void { - var y: u32 = 42; - _ = &y; - const t0 = .{ .A = 123 }; - const t1 = .{ .B = "foo" }; - const t2 = .{ .C = {} }; - const t3 = .{ .A = y }; - const x0: U = t0; - var x1: U = t1; - _ = &x1; - const x2: U = t2; - var x3: U = t3; - _ = &x3; - try expect(x0.A == 123); - try expect(std.mem.eql(u8, x1.B, "foo")); - try expect(x2 == .C); - try expect(x3.A == y); - } - }; - try S.doTheTest(); - try comptime S.doTheTest(); -} - -test "cast from pointer to anonymous struct to pointer to union" { - if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - - const S = struct { - const U = union(enum) { - A: u32, - B: []const u8, - C: void, - }; - fn doTheTest() !void { - var y: u32 = 42; - _ = &y; - const t0 = &.{ .A = 123 }; - const t1 = &.{ .B = "foo" }; - const t2 = &.{ .C = {} }; - const t3 = &.{ .A = y }; - const x0: *const U = t0; - var x1: *const U = t1; - _ = &x1; - const x2: *const U = t2; - var x3: *const U = t3; - _ = &x3; - try expect(x0.A == 123); - try expect(std.mem.eql(u8, x1.B, "foo")); - try expect(x2.* == .C); - try expect(x3.A == y); - } - }; - try S.doTheTest(); - try comptime S.doTheTest(); -} - test "switching on non exhaustive union" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO diff --git a/test/cases/compile_errors/array slice sentinel mismatch non-scalar.zig b/test/cases/compile_errors/array slice sentinel mismatch non-scalar.zig index 085caffc99..8eba014107 100644 --- a/test/cases/compile_errors/array slice sentinel mismatch non-scalar.zig +++ b/test/cases/compile_errors/array slice sentinel mismatch non-scalar.zig @@ -1,7 +1,8 @@ export fn foo() void { const S = struct { a: u32 }; + const sentinel: S = .{ .a = 1 }; var arr = [_]S{ .{ .a = 1 }, .{ .a = 2 } }; - const s = arr[0..1 :.{ .a = 1 }]; + const s = arr[0..1 :sentinel]; _ = s; } @@ -9,5 +10,5 @@ export fn foo() void { // backend=stage2 // target=native // -// :4:26: error: non-scalar sentinel type 'tmp.foo.S' +// :5:25: error: non-scalar sentinel type 'tmp.foo.S' // :2:15: note: struct declared here diff --git a/test/cases/compile_errors/bogus_method_call_on_slice.zig b/test/cases/compile_errors/bogus_method_call_on_slice.zig index 5139b7550d..b796f6dd36 100644 --- a/test/cases/compile_errors/bogus_method_call_on_slice.zig +++ b/test/cases/compile_errors/bogus_method_call_on_slice.zig @@ -18,4 +18,5 @@ pub export fn entry2() void { // // :3:6: error: no field or member function named 'copy' in '[]const u8' // :9:8: error: no field or member function named 'bar' in '@TypeOf(.{})' -// :12:18: error: no field or member function named 'bar' in 'struct{comptime foo: comptime_int = 1}' +// :12:18: error: no field or member function named 'bar' in 'tmp.entry2__struct_170' +// :12:6: note: struct declared here diff --git a/test/cases/compile_errors/coerce_anon_struct.zig b/test/cases/compile_errors/coerce_anon_struct.zig new file mode 100644 index 0000000000..84e3c732c8 --- /dev/null +++ b/test/cases/compile_errors/coerce_anon_struct.zig @@ -0,0 +1,11 @@ +const T = struct { x: u32 }; +export fn foo() void { + const a = .{ .x = 123 }; + _ = @as(T, a); +} + +// error +// +// :4:16: error: expected type 'tmp.T', found 'tmp.foo__struct_159' +// :3:16: note: struct declared here +// :1:11: note: struct declared here diff --git a/test/cases/compile_errors/destructure_error_union.zig b/test/cases/compile_errors/destructure_error_union.zig index fb5872707d..ebc4823cdf 100644 --- a/test/cases/compile_errors/destructure_error_union.zig +++ b/test/cases/compile_errors/destructure_error_union.zig @@ -10,6 +10,6 @@ pub export fn entry() void { // backend=stage2 // target=native // -// :4:28: error: type 'anyerror!tmp.entry.Foo' cannot be destructured +// :4:28: error: type 'anyerror!struct { u8, u8 }' cannot be destructured // :4:26: note: result destructured here // :4:28: note: consider using 'try', 'catch', or 'if' diff --git a/test/cases/compile_errors/file_level_tuple.zig b/test/cases/compile_errors/file_level_tuple.zig new file mode 100644 index 0000000000..97d119bb7d --- /dev/null +++ b/test/cases/compile_errors/file_level_tuple.zig @@ -0,0 +1,6 @@ +u32, +comptime u8 = 123, + +// error +// +// :1:1: error: file cannot be a tuple diff --git a/test/cases/compile_errors/invalid_peer_type_resolution.zig b/test/cases/compile_errors/invalid_peer_type_resolution.zig index e96caecfb6..c8cef83cac 100644 --- a/test/cases/compile_errors/invalid_peer_type_resolution.zig +++ b/test/cases/compile_errors/invalid_peer_type_resolution.zig @@ -10,11 +10,6 @@ export fn badTupleField() void { _ = .{ &x, &y }; _ = @TypeOf(x, y); } -export fn badNestedField() void { - const x = .{ .foo = "hi", .bar = .{ 0, 1 } }; - const y = .{ .foo = "hello", .bar = .{ 2, "hi" } }; - _ = @TypeOf(x, y); -} export fn incompatiblePointers() void { const x: []const u8 = "foo"; const y: [*:0]const u8 = "bar"; @@ -39,14 +34,9 @@ export fn incompatiblePointers4() void { // :11:9: note: incompatible types: 'u32' and '*const [5:0]u8' // :11:17: note: type 'u32' here // :11:20: note: type '*const [5:0]u8' here -// :16:9: error: struct field 'bar' has conflicting types -// :16:9: note: struct field '1' has conflicting types -// :16:9: note: incompatible types: 'comptime_int' and '*const [2:0]u8' -// :16:17: note: type 'comptime_int' here -// :16:20: note: type '*const [2:0]u8' here -// :21:9: error: incompatible types: '[]const u8' and '[*:0]const u8' -// :21:17: note: type '[]const u8' here -// :21:20: note: type '[*:0]const u8' here -// :28:9: error: incompatible types: '[]const u8' and '[*]const u8' -// :28:23: note: type '[]const u8' here -// :28:26: note: type '[*]const u8' here +// :16:9: error: incompatible types: '[]const u8' and '[*:0]const u8' +// :16:17: note: type '[]const u8' here +// :16:20: note: type '[*:0]const u8' here +// :23:9: error: incompatible types: '[]const u8' and '[*]const u8' +// :23:23: note: type '[]const u8' here +// :23:26: note: type '[*]const u8' here diff --git a/test/cases/compile_errors/missing_field_in_struct_value_expression.zig b/test/cases/compile_errors/missing_field_in_struct_value_expression.zig index 3b09df106f..8c06dc0be6 100644 --- a/test/cases/compile_errors/missing_field_in_struct_value_expression.zig +++ b/test/cases/compile_errors/missing_field_in_struct_value_expression.zig @@ -29,7 +29,5 @@ export fn h() void { // :9:16: error: missing struct field: x // :1:11: note: struct declared here // :18:16: error: missing tuple field with index 1 -// :16:11: note: struct declared here // :22:16: error: missing tuple field with index 0 // :22:16: note: missing tuple field with index 1 -// :16:11: note: struct declared here diff --git a/test/cases/compile_errors/overflow_arithmetic_on_vector_with_undefined_elems.zig b/test/cases/compile_errors/overflow_arithmetic_on_vector_with_undefined_elems.zig index 8055756a11..c4223d7727 100644 --- a/test/cases/compile_errors/overflow_arithmetic_on_vector_with_undefined_elems.zig +++ b/test/cases/compile_errors/overflow_arithmetic_on_vector_with_undefined_elems.zig @@ -21,6 +21,6 @@ comptime { // :14:5: note: also here // // Compile Log Output: -// @as(struct{@Vector(3, u8), @Vector(3, u1)}, .{ .{ 2, 144, undefined }, .{ 0, 1, undefined } }) -// @as(struct{@Vector(3, u8), @Vector(3, u1)}, .{ .{ 1, 255, undefined }, .{ 0, 1, undefined } }) -// @as(struct{@Vector(3, u8), @Vector(3, u1)}, .{ .{ 1, 64, undefined }, .{ 0, 1, undefined } }) +// @as(struct { @Vector(3, u8), @Vector(3, u1) }, .{ .{ 2, 144, undefined }, .{ 0, 1, undefined } }) +// @as(struct { @Vector(3, u8), @Vector(3, u1) }, .{ .{ 1, 255, undefined }, .{ 0, 1, undefined } }) +// @as(struct { @Vector(3, u8), @Vector(3, u1) }, .{ .{ 1, 64, undefined }, .{ 0, 1, undefined } }) diff --git a/test/cases/compile_errors/tuple_init_edge_cases.zig b/test/cases/compile_errors/tuple_init_edge_cases.zig index 24f4ff9bdd..7200fb42f1 100644 --- a/test/cases/compile_errors/tuple_init_edge_cases.zig +++ b/test/cases/compile_errors/tuple_init_edge_cases.zig @@ -72,6 +72,5 @@ pub export fn entry6() void { // :18:14: error: missing tuple field with index 1 // :25:14: error: missing tuple field with index 1 // :43:14: error: expected at most 2 tuple fields; found 3 -// :50:30: error: index '2' out of bounds of tuple 'struct{comptime comptime_int = 123, u32}' +// :50:30: error: index '2' out of bounds of tuple 'struct { comptime comptime_int = 123, u32 }' // :63:37: error: missing tuple field with index 3 -// :58:32: note: struct declared here diff --git a/test/cases/compile_errors/type_mismatch_with_tuple_concatenation.zig b/test/cases/compile_errors/type_mismatch_with_tuple_concatenation.zig index 9f360e2afe..1efcead765 100644 --- a/test/cases/compile_errors/type_mismatch_with_tuple_concatenation.zig +++ b/test/cases/compile_errors/type_mismatch_with_tuple_concatenation.zig @@ -7,4 +7,4 @@ export fn entry() void { // backend=stage2 // target=native // -// :3:11: error: expected type '@TypeOf(.{})', found 'struct{comptime comptime_int = 1, comptime comptime_int = 2, comptime comptime_int = 3}' +// :3:11: error: expected type '@TypeOf(.{})', found 'struct { comptime comptime_int = 1, comptime comptime_int = 2, comptime comptime_int = 3 }' diff --git a/test/compare_output.zig b/test/compare_output.zig index b5c65df889..e766641cde 100644 --- a/test/compare_output.zig +++ b/test/compare_output.zig @@ -440,7 +440,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void { cases.add("std.log per scope log level override", \\const std = @import("std"); \\ - \\pub const std_options = .{ + \\pub const std_options: std.Options = .{ \\ .log_level = .debug, \\ \\ .log_scope_levels = &.{ @@ -497,7 +497,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void { cases.add("std.heap.LoggingAllocator logs to std.log", \\const std = @import("std"); \\ - \\pub const std_options = .{ + \\pub const std_options: std.Options = .{ \\ .log_level = .debug, \\ .logFn = log, \\}; diff --git a/test/src/Debugger.zig b/test/src/Debugger.zig index 913c9b632c..91eaa385ba 100644 --- a/test/src/Debugger.zig +++ b/test/src/Debugger.zig @@ -1532,18 +1532,18 @@ pub fn addTestsForTarget(db: *Debugger, target: Target) void { , &.{ \\(lldb) frame variable --show-types -- list0 list0.len list0.capacity list0[0] list0[1] list0[2] list0.0 list0.1 list0.2 - \\(std.multi_array_list.MultiArrayList(main.Elem0)) list0 = len=3 capacity=8 { - \\ (root.main.Elem0) [0] = { + \\(std.multi_array_list.MultiArrayList(struct { u32, u8, u16 })) list0 = len=3 capacity=8 { + \\ (std.struct { u32, u8, u16 }) [0] = { \\ (u32) .@"0" = 1 \\ (u8) .@"1" = 2 \\ (u16) .@"2" = 3 \\ } - \\ (root.main.Elem0) [1] = { + \\ (std.struct { u32, u8, u16 }) [1] = { \\ (u32) .@"0" = 4 \\ (u8) .@"1" = 5 \\ (u16) .@"2" = 6 \\ } - \\ (root.main.Elem0) [2] = { + \\ (std.struct { u32, u8, u16 }) [2] = { \\ (u32) .@"0" = 7 \\ (u8) .@"1" = 8 \\ (u16) .@"2" = 9 @@ -1551,17 +1551,17 @@ pub fn addTestsForTarget(db: *Debugger, target: Target) void { \\} \\(usize) list0.len = 3 \\(usize) list0.capacity = 8 - \\(root.main.Elem0) list0[0] = { + \\(std.struct { u32, u8, u16 }) list0[0] = { \\ (u32) .@"0" = 1 \\ (u8) .@"1" = 2 \\ (u16) .@"2" = 3 \\} - \\(root.main.Elem0) list0[1] = { + \\(std.struct { u32, u8, u16 }) list0[1] = { \\ (u32) .@"0" = 4 \\ (u8) .@"1" = 5 \\ (u16) .@"2" = 6 \\} - \\(root.main.Elem0) list0[2] = { + \\(std.struct { u32, u8, u16 }) list0[2] = { \\ (u32) .@"0" = 7 \\ (u8) .@"1" = 8 \\ (u16) .@"2" = 9 @@ -1582,18 +1582,18 @@ pub fn addTestsForTarget(db: *Debugger, target: Target) void { \\ (u16) [2] = 9 \\} \\(lldb) frame variable --show-types -- slice0 slice0.len slice0.capacity slice0[0] slice0[1] slice0[2] slice0.0 slice0.1 slice0.2 - \\(std.multi_array_list.MultiArrayList(main.Elem0).Slice) slice0 = len=3 capacity=8 { - \\ (root.main.Elem0) [0] = { + \\(std.multi_array_list.MultiArrayList(struct { u32, u8, u16 }).Slice) slice0 = len=3 capacity=8 { + \\ (std.struct { u32, u8, u16 }) [0] = { \\ (u32) .@"0" = 1 \\ (u8) .@"1" = 2 \\ (u16) .@"2" = 3 \\ } - \\ (root.main.Elem0) [1] = { + \\ (std.struct { u32, u8, u16 }) [1] = { \\ (u32) .@"0" = 4 \\ (u8) .@"1" = 5 \\ (u16) .@"2" = 6 \\ } - \\ (root.main.Elem0) [2] = { + \\ (std.struct { u32, u8, u16 }) [2] = { \\ (u32) .@"0" = 7 \\ (u8) .@"1" = 8 \\ (u16) .@"2" = 9 @@ -1601,17 +1601,17 @@ pub fn addTestsForTarget(db: *Debugger, target: Target) void { \\} \\(usize) slice0.len = 3 \\(usize) slice0.capacity = 8 - \\(root.main.Elem0) slice0[0] = { + \\(std.struct { u32, u8, u16 }) slice0[0] = { \\ (u32) .@"0" = 1 \\ (u8) .@"1" = 2 \\ (u16) .@"2" = 3 \\} - \\(root.main.Elem0) slice0[1] = { + \\(std.struct { u32, u8, u16 }) slice0[1] = { \\ (u32) .@"0" = 4 \\ (u8) .@"1" = 5 \\ (u16) .@"2" = 6 \\} - \\(root.main.Elem0) slice0[2] = { + \\(std.struct { u32, u8, u16 }) slice0[2] = { \\ (u32) .@"0" = 7 \\ (u8) .@"1" = 8 \\ (u16) .@"2" = 9 diff --git a/test/standalone/sigpipe/breakpipe.zig b/test/standalone/sigpipe/breakpipe.zig index 044a2bc65f..293a6839a1 100644 --- a/test/standalone/sigpipe/breakpipe.zig +++ b/test/standalone/sigpipe/breakpipe.zig @@ -1,7 +1,7 @@ const std = @import("std"); const build_options = @import("build_options"); -pub const std_options = .{ +pub const std_options: std.Options = .{ .keep_sigpipe = build_options.keep_sigpipe, }; diff --git a/test/standalone/simple/issue_7030.zig b/test/standalone/simple/issue_7030.zig index d4732c331c..eb7aa65387 100644 --- a/test/standalone/simple/issue_7030.zig +++ b/test/standalone/simple/issue_7030.zig @@ -1,6 +1,6 @@ const std = @import("std"); -pub const std_options = .{ +pub const std_options: std.Options = .{ .logFn = log, };