diff --git a/CMakeLists.txt b/CMakeLists.txt index 081872fbdf..c14a938353 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -564,7 +564,7 @@ set(ZIG_STAGE2_SOURCES "${CMAKE_SOURCE_DIR}/src/clang_options_data.zig" "${CMAKE_SOURCE_DIR}/src/codegen.zig" "${CMAKE_SOURCE_DIR}/src/codegen/c.zig" - "${CMAKE_SOURCE_DIR}/src/codegen/c/type.zig" + "${CMAKE_SOURCE_DIR}/src/codegen/c/Type.zig" "${CMAKE_SOURCE_DIR}/src/codegen/llvm.zig" "${CMAKE_SOURCE_DIR}/src/codegen/llvm/bindings.zig" "${CMAKE_SOURCE_DIR}/src/glibc.zig" diff --git a/build.zig b/build.zig index f661bd6887..36bb1e0f73 100644 --- a/build.zig +++ b/build.zig @@ -16,9 +16,7 @@ pub fn build(b: *std.Build) !void { const only_c = b.option(bool, "only-c", "Translate the Zig compiler to C code, with only the C backend enabled") orelse false; const target = t: { var default_target: std.zig.CrossTarget = .{}; - if (only_c) { - default_target.ofmt = .c; - } + default_target.ofmt = b.option(std.Target.ObjectFormat, "ofmt", "Object format to target") orelse if (only_c) .c else null; break :t b.standardTargetOptions(.{ .default_target = default_target }); }; diff --git a/lib/std/c/darwin.zig b/lib/std/c/darwin.zig index 9827ef6493..dfc0fd56ea 100644 --- a/lib/std/c/darwin.zig +++ b/lib/std/c/darwin.zig @@ -1150,8 +1150,8 @@ pub const siginfo_t = extern struct { /// Renamed from `sigaction` to `Sigaction` to avoid conflict with function name. pub const Sigaction = extern struct { - pub const handler_fn = *align(1) const fn (c_int) callconv(.C) void; - pub const sigaction_fn = *const fn (c_int, *const siginfo_t, ?*anyopaque) callconv(.C) void; + pub const handler_fn = *align(1) const fn (i32) callconv(.C) void; + pub const sigaction_fn = *const fn (i32, *const siginfo_t, ?*anyopaque) callconv(.C) void; handler: extern union { handler: ?handler_fn, diff --git a/lib/std/c/dragonfly.zig b/lib/std/c/dragonfly.zig index 0026549202..cc72aaa072 100644 --- a/lib/std/c/dragonfly.zig +++ b/lib/std/c/dragonfly.zig @@ -690,8 +690,8 @@ pub const empty_sigset = sigset_t{ .__bits = [_]c_uint{0} ** _SIG_WORDS }; pub const sig_atomic_t = c_int; pub const Sigaction = extern struct { - pub const handler_fn = *align(1) const fn (c_int) callconv(.C) void; - pub const sigaction_fn = *const fn (c_int, *const siginfo_t, ?*anyopaque) callconv(.C) void; + pub const handler_fn = *align(1) const fn (i32) callconv(.C) void; + pub const sigaction_fn = *const fn (i32, *const siginfo_t, ?*anyopaque) callconv(.C) void; /// signal handler handler: extern union { @@ -702,7 +702,7 @@ pub const Sigaction = extern struct { mask: sigset_t, }; -pub const sig_t = *const fn (c_int) callconv(.C) void; +pub const sig_t = *const fn (i32) callconv(.C) void; pub const SOCK = struct { pub const STREAM = 1; diff --git a/lib/std/c/freebsd.zig b/lib/std/c/freebsd.zig index 5e2b6bd315..a60f5de525 100644 --- a/lib/std/c/freebsd.zig +++ b/lib/std/c/freebsd.zig @@ -1171,8 +1171,8 @@ const NSIG = 32; /// Renamed from `sigaction` to `Sigaction` to avoid conflict with the syscall. pub const Sigaction = extern struct { - pub const handler_fn = *align(1) const fn (c_int) callconv(.C) void; - pub const sigaction_fn = *const fn (c_int, *const siginfo_t, ?*anyopaque) callconv(.C) void; + pub const handler_fn = *align(1) const fn (i32) callconv(.C) void; + pub const sigaction_fn = *const fn (i32, *const siginfo_t, ?*anyopaque) callconv(.C) void; /// signal handler handler: extern union { diff --git a/lib/std/c/haiku.zig b/lib/std/c/haiku.zig index f4c928c79c..d75dd3bf00 100644 --- a/lib/std/c/haiku.zig +++ b/lib/std/c/haiku.zig @@ -501,7 +501,7 @@ pub const siginfo_t = extern struct { /// Renamed from `sigaction` to `Sigaction` to avoid conflict with the syscall. pub const Sigaction = extern struct { pub const handler_fn = *align(1) const fn (i32) callconv(.C) void; - pub const sigaction_fn = *const fn (c_int, *allowzero anyopaque, ?*anyopaque) callconv(.C) void; + pub const sigaction_fn = *const fn (i32, *const siginfo_t, ?*anyopaque) callconv(.C) void; /// signal handler handler: extern union { diff --git a/lib/std/c/netbsd.zig b/lib/std/c/netbsd.zig index 61be065eaa..3ec6de59b2 100644 --- a/lib/std/c/netbsd.zig +++ b/lib/std/c/netbsd.zig @@ -864,8 +864,8 @@ pub const SIG = struct { /// Renamed from `sigaction` to `Sigaction` to avoid conflict with the syscall. pub const Sigaction = extern struct { - pub const handler_fn = *align(1) const fn (c_int) callconv(.C) void; - pub const sigaction_fn = *const fn (c_int, *const siginfo_t, ?*anyopaque) callconv(.C) void; + pub const handler_fn = *align(1) const fn (i32) callconv(.C) void; + pub const sigaction_fn = *const fn (i32, *const siginfo_t, ?*anyopaque) callconv(.C) void; /// signal handler handler: extern union { diff --git a/lib/std/c/openbsd.zig b/lib/std/c/openbsd.zig index 98a93ac86f..bb82168ca3 100644 --- a/lib/std/c/openbsd.zig +++ b/lib/std/c/openbsd.zig @@ -842,8 +842,8 @@ pub const SIG = struct { /// Renamed from `sigaction` to `Sigaction` to avoid conflict with the syscall. pub const Sigaction = extern struct { - pub const handler_fn = *align(1) const fn (c_int) callconv(.C) void; - pub const sigaction_fn = *const fn (c_int, *const siginfo_t, ?*anyopaque) callconv(.C) void; + pub const handler_fn = *align(1) const fn (i32) callconv(.C) void; + pub const sigaction_fn = *const fn (i32, *const siginfo_t, ?*anyopaque) callconv(.C) void; /// signal handler handler: extern union { diff --git a/lib/std/c/solaris.zig b/lib/std/c/solaris.zig index c497512299..4f08c32b03 100644 --- a/lib/std/c/solaris.zig +++ b/lib/std/c/solaris.zig @@ -874,8 +874,8 @@ pub const SIG = struct { /// Renamed from `sigaction` to `Sigaction` to avoid conflict with the syscall. pub const Sigaction = extern struct { - pub const handler_fn = *align(1) const fn (c_int) callconv(.C) void; - pub const sigaction_fn = *const fn (c_int, *const siginfo_t, ?*anyopaque) callconv(.C) void; + pub const handler_fn = *align(1) const fn (i32) callconv(.C) void; + pub const sigaction_fn = *const fn (i32, *const siginfo_t, ?*anyopaque) callconv(.C) void; /// signal options flags: c_uint, diff --git a/lib/std/debug.zig b/lib/std/debug.zig index 824adc0261..d1d6201b80 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -2570,7 +2570,7 @@ fn resetSegfaultHandler() void { updateSegfaultHandler(&act) catch {}; } -fn handleSegfaultPosix(sig: i32, info: *const posix.siginfo_t, ctx_ptr: ?*const anyopaque) callconv(.C) noreturn { +fn handleSegfaultPosix(sig: i32, info: *const posix.siginfo_t, ctx_ptr: ?*anyopaque) callconv(.C) noreturn { // Reset to the default handler so that if a segfault happens in this handler it will crash // the process. Also when this handler returns, the original instruction will be repeated // and the resulting segfault will crash the process rather than continually dump stack traces. diff --git a/lib/std/os/emscripten.zig b/lib/std/os/emscripten.zig index 76435de7e3..95d550d726 100644 --- a/lib/std/os/emscripten.zig +++ b/lib/std/os/emscripten.zig @@ -695,8 +695,8 @@ pub const SIG = struct { }; pub const Sigaction = extern struct { - pub const handler_fn = *align(1) const fn (c_int) callconv(.C) void; - pub const sigaction_fn = *const fn (c_int, *const siginfo_t, ?*anyopaque) callconv(.C) void; + pub const handler_fn = *align(1) const fn (i32) callconv(.C) void; + pub const sigaction_fn = *const fn (i32, *const siginfo_t, ?*anyopaque) callconv(.C) void; handler: extern union { handler: ?handler_fn, diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig index bcafa9fff3..69cef35e98 100644 --- a/lib/std/os/linux.zig +++ b/lib/std/os/linux.zig @@ -4301,7 +4301,7 @@ pub const all_mask: sigset_t = [_]u32{0xffffffff} ** @typeInfo(sigset_t).Array.l pub const app_mask: sigset_t = [2]u32{ 0xfffffffc, 0x7fffffff } ++ [_]u32{0xffffffff} ** 30; const k_sigaction_funcs = struct { - const handler = ?*align(1) const fn (c_int) callconv(.C) void; + const handler = ?*align(1) const fn (i32) callconv(.C) void; const restorer = *const fn () callconv(.C) void; }; @@ -4328,8 +4328,8 @@ pub const k_sigaction = switch (native_arch) { /// Renamed from `sigaction` to `Sigaction` to avoid conflict with the syscall. pub const Sigaction = extern struct { - pub const handler_fn = *align(1) const fn (c_int) callconv(.C) void; - pub const sigaction_fn = *const fn (c_int, *const siginfo_t, ?*anyopaque) callconv(.C) void; + pub const handler_fn = *align(1) const fn (i32) callconv(.C) void; + pub const sigaction_fn = *const fn (i32, *const siginfo_t, ?*anyopaque) callconv(.C) void; handler: extern union { handler: ?handler_fn, diff --git a/lib/std/os/plan9.zig b/lib/std/os/plan9.zig index d44f228f31..377b6d8c09 100644 --- a/lib/std/os/plan9.zig +++ b/lib/std/os/plan9.zig @@ -186,8 +186,8 @@ pub const empty_sigset = 0; pub const siginfo_t = c_long; // TODO plan9 doesn't have sigaction_fn. Sigaction is not a union, but we incude it here to be compatible. pub const Sigaction = extern struct { - pub const handler_fn = *const fn (c_int) callconv(.C) void; - pub const sigaction_fn = *const fn (c_int, *const siginfo_t, ?*anyopaque) callconv(.C) void; + pub const handler_fn = *const fn (i32) callconv(.C) void; + pub const sigaction_fn = *const fn (i32, *const siginfo_t, ?*anyopaque) callconv(.C) void; handler: extern union { handler: ?handler_fn, diff --git a/lib/std/start.zig b/lib/std/start.zig index 3a2b0714f7..bcd39a27bf 100644 --- a/lib/std/start.zig +++ b/lib/std/start.zig @@ -597,4 +597,4 @@ fn maybeIgnoreSigpipe() void { } } -fn noopSigHandler(_: c_int) callconv(.C) void {} +fn noopSigHandler(_: i32) callconv(.C) void {} diff --git a/src/Compilation.zig b/src/Compilation.zig index 442c1075e0..c533f2fae7 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -3457,14 +3457,18 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: *std.Progress.Node) !v .pass = .{ .decl = decl_index }, .is_naked_fn = false, .fwd_decl = fwd_decl.toManaged(gpa), - .ctypes = .{}, + .ctype_pool = c_codegen.CType.Pool.empty, + .scratch = .{}, .anon_decl_deps = .{}, .aligned_anon_decls = .{}, }; defer { - dg.ctypes.deinit(gpa); - dg.fwd_decl.deinit(); + fwd_decl.* = dg.fwd_decl.moveToUnmanaged(); + fwd_decl.shrinkAndFree(gpa, fwd_decl.items.len); + dg.ctype_pool.deinit(gpa); + dg.scratch.deinit(gpa); } + try dg.ctype_pool.init(gpa); c_codegen.genHeader(&dg) catch |err| switch (err) { error.AnalysisFail => { @@ -3473,9 +3477,6 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: *std.Progress.Node) !v }, else => |e| return e, }; - - fwd_decl.* = dg.fwd_decl.moveToUnmanaged(); - fwd_decl.shrinkAndFree(gpa, fwd_decl.items.len); }, } }, diff --git a/src/InternPool.zig b/src/InternPool.zig index 63d29d3760..67368e1195 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -712,7 +712,7 @@ pub const Key = union(enum) { pub fn fieldName( self: AnonStructType, ip: *const InternPool, - index: u32, + index: usize, ) OptionalNullTerminatedString { if (self.names.len == 0) return .none; @@ -3879,20 +3879,13 @@ pub const Alignment = enum(u6) { none = std.math.maxInt(u6), _, - pub fn toByteUnitsOptional(a: Alignment) ?u64 { + pub fn toByteUnits(a: Alignment) ?u64 { return switch (a) { .none => null, else => @as(u64, 1) << @intFromEnum(a), }; } - pub fn toByteUnits(a: Alignment, default: u64) u64 { - return switch (a) { - .none => default, - else => @as(u64, 1) << @intFromEnum(a), - }; - } - pub fn fromByteUnits(n: u64) Alignment { if (n == 0) return .none; assert(std.math.isPowerOfTwo(n)); diff --git a/src/Module.zig b/src/Module.zig index d4a4522441..ac2d11d575 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -5846,7 +5846,7 @@ pub fn intBitsForValue(mod: *Module, val: Value, sign: bool) u16 { return @as(u16, @intCast(big.bitCountTwosComp())); }, .lazy_align => |lazy_ty| { - return Type.smallestUnsignedBits(Type.fromInterned(lazy_ty).abiAlignment(mod).toByteUnits(0)) + @intFromBool(sign); + return Type.smallestUnsignedBits(Type.fromInterned(lazy_ty).abiAlignment(mod).toByteUnits() orelse 0) + @intFromBool(sign); }, .lazy_size => |lazy_ty| { return Type.smallestUnsignedBits(Type.fromInterned(lazy_ty).abiSize(mod)) + @intFromBool(sign); diff --git a/src/Sema.zig b/src/Sema.zig index 7db354334a..74991d5769 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -6508,7 +6508,7 @@ fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst const alignment = try sema.resolveAlign(block, operand_src, extra.operand); if (alignment.order(Alignment.fromNonzeroByteUnits(256)).compare(.gt)) { return sema.fail(block, src, "attempt to @setAlignStack({d}); maximum is 256", .{ - alignment.toByteUnitsOptional().?, + alignment.toByteUnits().?, }); } @@ -17804,7 +17804,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai }, .Pointer => { const info = ty.ptrInfo(mod); - const alignment = if (info.flags.alignment.toByteUnitsOptional()) |alignment| + const alignment = if (info.flags.alignment.toByteUnits()) |alignment| try mod.intValue(Type.comptime_int, alignment) else try Type.fromInterned(info.child).lazyAbiAlignment(mod); @@ -18279,7 +18279,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // type: type, field_ty, // alignment: comptime_int, - (try mod.intValue(Type.comptime_int, alignment.toByteUnits(0))).toIntern(), + (try mod.intValue(Type.comptime_int, alignment.toByteUnits() orelse 0)).toIntern(), }; field_val.* = try mod.intern(.{ .aggregate = .{ .ty = union_field_ty.toIntern(), @@ -18436,7 +18436,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // is_comptime: bool, Value.makeBool(is_comptime).toIntern(), // alignment: comptime_int, - (try mod.intValue(Type.comptime_int, Type.fromInterned(field_ty).abiAlignment(mod).toByteUnits(0))).toIntern(), + (try mod.intValue(Type.comptime_int, Type.fromInterned(field_ty).abiAlignment(mod).toByteUnits() orelse 0)).toIntern(), }; struct_field_val.* = try mod.intern(.{ .aggregate = .{ .ty = struct_field_ty.toIntern(), @@ -18505,7 +18505,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai // is_comptime: bool, Value.makeBool(field_is_comptime).toIntern(), // alignment: comptime_int, - (try mod.intValue(Type.comptime_int, alignment.toByteUnits(0))).toIntern(), + (try mod.intValue(Type.comptime_int, alignment.toByteUnits() orelse 0)).toIntern(), }; field_val.* = try mod.intern(.{ .aggregate = .{ .ty = struct_field_ty.toIntern(), @@ -22552,7 +22552,7 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! try sema.addSafetyCheck(block, src, is_non_zero, .cast_to_null); } if (ptr_align.compare(.gt, .@"1")) { - const align_bytes_minus_1 = ptr_align.toByteUnitsOptional().? - 1; + const align_bytes_minus_1 = ptr_align.toByteUnits().? - 1; const align_minus_1 = Air.internedToRef((try mod.intValue(Type.usize, align_bytes_minus_1)).toIntern()); const remainder = try block.addBinOp(.bit_and, operand_coerced, align_minus_1); const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize); @@ -22572,7 +22572,7 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError! try sema.addSafetyCheck(block, src, is_non_zero, .cast_to_null); } if (ptr_align.compare(.gt, .@"1")) { - const align_bytes_minus_1 = ptr_align.toByteUnitsOptional().? - 1; + const align_bytes_minus_1 = ptr_align.toByteUnits().? - 1; const align_minus_1 = Air.internedToRef((try mod.intValue(Type.usize, align_bytes_minus_1)).toIntern()); const remainder = try block.addBinOp(.bit_and, elem_coerced, align_minus_1); const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize); @@ -22970,10 +22970,10 @@ fn ptrCastFull( const msg = try sema.errMsg(block, src, "cast increases pointer alignment", .{}); errdefer msg.destroy(sema.gpa); try sema.errNote(block, operand_src, msg, "'{}' has alignment '{d}'", .{ - operand_ty.fmt(mod), src_align.toByteUnits(0), + operand_ty.fmt(mod), src_align.toByteUnits() orelse 0, }); try sema.errNote(block, src, msg, "'{}' has alignment '{d}'", .{ - dest_ty.fmt(mod), dest_align.toByteUnits(0), + dest_ty.fmt(mod), dest_align.toByteUnits() orelse 0, }); try sema.errNote(block, src, msg, "use @alignCast to assert pointer alignment", .{}); break :msg msg; @@ -23067,7 +23067,7 @@ fn ptrCastFull( if (!dest_align.check(addr)) { return sema.fail(block, operand_src, "pointer address 0x{X} is not aligned to {d} bytes", .{ addr, - dest_align.toByteUnitsOptional().?, + dest_align.toByteUnits().?, }); } } @@ -23110,7 +23110,7 @@ fn ptrCastFull( dest_align.compare(.gt, src_align) and try sema.typeHasRuntimeBits(Type.fromInterned(dest_info.child))) { - const align_bytes_minus_1 = dest_align.toByteUnitsOptional().? - 1; + const align_bytes_minus_1 = dest_align.toByteUnits().? - 1; const align_minus_1 = Air.internedToRef((try mod.intValue(Type.usize, align_bytes_minus_1)).toIntern()); const ptr_int = try block.addUnOp(.int_from_ptr, ptr); const remainder = try block.addBinOp(.bit_and, ptr_int, align_minus_1); @@ -27837,7 +27837,7 @@ fn structFieldPtrByIndex( const elem_size_bits = Type.fromInterned(ptr_ty_data.child).bitSize(mod); if (elem_size_bytes * 8 == elem_size_bits) { const byte_offset = ptr_ty_data.packed_offset.bit_offset / 8; - const new_align: Alignment = @enumFromInt(@ctz(byte_offset | parent_align.toByteUnitsOptional().?)); + const new_align: Alignment = @enumFromInt(@ctz(byte_offset | parent_align.toByteUnits().?)); assert(new_align != .none); ptr_ty_data.flags.alignment = new_align; ptr_ty_data.packed_offset = .{ .host_size = 0, .bit_offset = 0 }; @@ -29132,7 +29132,7 @@ fn coerceExtra( .addr = .{ .int = if (dest_info.flags.alignment != .none) (try mod.intValue( Type.usize, - dest_info.flags.alignment.toByteUnitsOptional().?, + dest_info.flags.alignment.toByteUnits().?, )).toIntern() else try mod.intern_pool.getCoercedInts( @@ -29800,7 +29800,7 @@ const InMemoryCoercionResult = union(enum) { }, .ptr_alignment => |pair| { try sema.errNote(block, src, msg, "pointer alignment '{d}' cannot cast into pointer alignment '{d}'", .{ - pair.actual.toByteUnits(0), pair.wanted.toByteUnits(0), + pair.actual.toByteUnits() orelse 0, pair.wanted.toByteUnits() orelse 0, }); break; }, diff --git a/src/Value.zig b/src/Value.zig index f8f23667e2..7a9775e198 100644 --- a/src/Value.zig +++ b/src/Value.zig @@ -176,7 +176,7 @@ pub fn toBigIntAdvanced( if (opt_sema) |sema| try sema.resolveTypeLayout(Type.fromInterned(ty)); const x = switch (int.storage) { else => unreachable, - .lazy_align => Type.fromInterned(ty).abiAlignment(mod).toByteUnits(0), + .lazy_align => Type.fromInterned(ty).abiAlignment(mod).toByteUnits() orelse 0, .lazy_size => Type.fromInterned(ty).abiSize(mod), }; return BigIntMutable.init(&space.limbs, x).toConst(); @@ -237,9 +237,9 @@ pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, opt_sema: ?*Sema) !?u64 .u64 => |x| x, .i64 => |x| std.math.cast(u64, x), .lazy_align => |ty| if (opt_sema) |sema| - (try Type.fromInterned(ty).abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar.toByteUnits(0) + (try Type.fromInterned(ty).abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar.toByteUnits() orelse 0 else - Type.fromInterned(ty).abiAlignment(mod).toByteUnits(0), + Type.fromInterned(ty).abiAlignment(mod).toByteUnits() orelse 0, .lazy_size => |ty| if (opt_sema) |sema| (try Type.fromInterned(ty).abiSizeAdvanced(mod, .{ .sema = sema })).scalar else @@ -289,7 +289,7 @@ pub fn toSignedInt(val: Value, mod: *Module) i64 { .big_int => |big_int| big_int.to(i64) catch unreachable, .i64 => |x| x, .u64 => |x| @intCast(x), - .lazy_align => |ty| @intCast(Type.fromInterned(ty).abiAlignment(mod).toByteUnits(0)), + .lazy_align => |ty| @intCast(Type.fromInterned(ty).abiAlignment(mod).toByteUnits() orelse 0), .lazy_size => |ty| @intCast(Type.fromInterned(ty).abiSize(mod)), }, else => unreachable, @@ -497,7 +497,7 @@ pub fn writeToPackedMemory( inline .u64, .i64 => |int| std.mem.writeVarPackedInt(buffer, bit_offset, bits, int, endian), .big_int => |bigint| bigint.writePackedTwosComplement(buffer, bit_offset, bits, endian), .lazy_align => |lazy_align| { - const num = Type.fromInterned(lazy_align).abiAlignment(mod).toByteUnits(0); + const num = Type.fromInterned(lazy_align).abiAlignment(mod).toByteUnits() orelse 0; std.mem.writeVarPackedInt(buffer, bit_offset, bits, num, endian); }, .lazy_size => |lazy_size| { @@ -890,7 +890,7 @@ pub fn toFloat(val: Value, comptime T: type, mod: *Module) T { } return @floatFromInt(x); }, - .lazy_align => |ty| @floatFromInt(Type.fromInterned(ty).abiAlignment(mod).toByteUnits(0)), + .lazy_align => |ty| @floatFromInt(Type.fromInterned(ty).abiAlignment(mod).toByteUnits() orelse 0), .lazy_size => |ty| @floatFromInt(Type.fromInterned(ty).abiSize(mod)), }, .float => |float| switch (float.storage) { @@ -1529,9 +1529,9 @@ pub fn floatFromIntScalar(val: Value, float_ty: Type, mod: *Module, opt_sema: ?* }, inline .u64, .i64 => |x| floatFromIntInner(x, float_ty, mod), .lazy_align => |ty| if (opt_sema) |sema| { - return floatFromIntInner((try Type.fromInterned(ty).abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar.toByteUnits(0), float_ty, mod); + return floatFromIntInner((try Type.fromInterned(ty).abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar.toByteUnits() orelse 0, float_ty, mod); } else { - return floatFromIntInner(Type.fromInterned(ty).abiAlignment(mod).toByteUnits(0), float_ty, mod); + return floatFromIntInner(Type.fromInterned(ty).abiAlignment(mod).toByteUnits() orelse 0, float_ty, mod); }, .lazy_size => |ty| if (opt_sema) |sema| { return floatFromIntInner((try Type.fromInterned(ty).abiSizeAdvanced(mod, .{ .sema = sema })).scalar, float_ty, mod); diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 6dc2316724..022f2f9bee 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1296,7 +1296,7 @@ fn genFunc(func: *CodeGen) InnerError!void { // subtract it from the current stack pointer try prologue.append(.{ .tag = .i32_sub, .data = .{ .tag = {} } }); // Get negative stack aligment - try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @as(i32, @intCast(func.stack_alignment.toByteUnitsOptional().?)) * -1 } }); + try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @as(i32, @intCast(func.stack_alignment.toByteUnits().?)) * -1 } }); // Bitwise-and the value to get the new stack pointer to ensure the pointers are aligned with the abi alignment try prologue.append(.{ .tag = .i32_and, .data = .{ .tag = {} } }); // store the current stack pointer as the bottom, which will be used to calculate all stack pointer offsets @@ -2107,7 +2107,7 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { }); try func.addMemArg(Mir.Inst.Tag.fromOpcode(opcode), .{ .offset = operand.offset(), - .alignment = @intCast(scalar_type.abiAlignment(mod).toByteUnitsOptional().?), + .alignment = @intCast(scalar_type.abiAlignment(mod).toByteUnits().?), }); }, else => try func.emitWValue(operand), @@ -2384,7 +2384,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE try func.mir_extra.appendSlice(func.gpa, &[_]u32{ std.wasm.simdOpcode(.v128_store), offset + lhs.offset(), - @intCast(ty.abiAlignment(mod).toByteUnits(0)), + @intCast(ty.abiAlignment(mod).toByteUnits() orelse 0), }); return func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } }); }, @@ -2440,7 +2440,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE Mir.Inst.Tag.fromOpcode(opcode), .{ .offset = offset + lhs.offset(), - .alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?), + .alignment = @intCast(ty.abiAlignment(mod).toByteUnits().?), }, ); } @@ -2500,7 +2500,7 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu try func.mir_extra.appendSlice(func.gpa, &[_]u32{ std.wasm.simdOpcode(.v128_load), offset + operand.offset(), - @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?), + @intCast(ty.abiAlignment(mod).toByteUnits().?), }); try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } }); return WValue{ .stack = {} }; @@ -2518,7 +2518,7 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu Mir.Inst.Tag.fromOpcode(opcode), .{ .offset = offset + operand.offset(), - .alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?), + .alignment = @intCast(ty.abiAlignment(mod).toByteUnits().?), }, ); @@ -3456,7 +3456,7 @@ fn intStorageAsI32(storage: InternPool.Key.Int.Storage, mod: *Module) i32 { .i64 => |x| @as(i32, @intCast(x)), .u64 => |x| @as(i32, @bitCast(@as(u32, @intCast(x)))), .big_int => unreachable, - .lazy_align => |ty| @as(i32, @bitCast(@as(u32, @intCast(Type.fromInterned(ty).abiAlignment(mod).toByteUnits(0))))), + .lazy_align => |ty| @as(i32, @bitCast(@as(u32, @intCast(Type.fromInterned(ty).abiAlignment(mod).toByteUnits() orelse 0)))), .lazy_size => |ty| @as(i32, @bitCast(@as(u32, @intCast(Type.fromInterned(ty).abiSize(mod))))), }; } @@ -4204,7 +4204,7 @@ fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerErro if (pl_ty.hasRuntimeBitsIgnoreComptime(mod)) { try func.addMemArg(.i32_load16_u, .{ .offset = operand.offset() + @as(u32, @intCast(errUnionErrorOffset(pl_ty, mod))), - .alignment = @intCast(Type.anyerror.abiAlignment(mod).toByteUnitsOptional().?), + .alignment = @intCast(Type.anyerror.abiAlignment(mod).toByteUnits().?), }); } @@ -5141,7 +5141,7 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.mir_extra.appendSlice(func.gpa, &[_]u32{ opcode, operand.offset(), - @intCast(elem_ty.abiAlignment(mod).toByteUnitsOptional().?), + @intCast(elem_ty.abiAlignment(mod).toByteUnits().?), }); try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } }); try func.addLabel(.local_set, result.local.value); @@ -6552,7 +6552,7 @@ fn lowerTry( const err_offset = @as(u32, @intCast(errUnionErrorOffset(pl_ty, mod))); try func.addMemArg(.i32_load16_u, .{ .offset = err_union.offset() + err_offset, - .alignment = @intCast(Type.anyerror.abiAlignment(mod).toByteUnitsOptional().?), + .alignment = @intCast(Type.anyerror.abiAlignment(mod).toByteUnits().?), }); } try func.addTag(.i32_eqz); @@ -7499,7 +7499,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { else => |size| return func.fail("TODO: implement `@cmpxchg` for types with abi size '{d}'", .{size}), }, .{ .offset = ptr_operand.offset(), - .alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?), + .alignment = @intCast(ty.abiAlignment(mod).toByteUnits().?), }); try func.addLabel(.local_tee, val_local.local.value); _ = try func.cmp(.stack, expected_val, ty, .eq); @@ -7561,7 +7561,7 @@ fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.emitWValue(ptr); try func.addAtomicMemArg(tag, .{ .offset = ptr.offset(), - .alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?), + .alignment = @intCast(ty.abiAlignment(mod).toByteUnits().?), }); } else { _ = try func.load(ptr, ty, 0); @@ -7622,7 +7622,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { }, .{ .offset = ptr.offset(), - .alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?), + .alignment = @intCast(ty.abiAlignment(mod).toByteUnits().?), }, ); const select_res = try func.allocLocal(ty); @@ -7682,7 +7682,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { }; try func.addAtomicMemArg(tag, .{ .offset = ptr.offset(), - .alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?), + .alignment = @intCast(ty.abiAlignment(mod).toByteUnits().?), }); const result = try WValue.toLocal(.stack, func, ty); return func.finishAir(inst, result, &.{ pl_op.operand, extra.operand }); @@ -7781,7 +7781,7 @@ fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.lowerToStack(operand); try func.addAtomicMemArg(tag, .{ .offset = ptr.offset(), - .alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?), + .alignment = @intCast(ty.abiAlignment(mod).toByteUnits().?), }); } else { try func.store(ptr, operand, ty, 0); diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 1b584bfe53..b20a322033 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -18959,7 +18959,7 @@ fn resolveCallingConventionValues( const param_size: u31 = @intCast(ty.abiSize(mod)); const param_align: u31 = - @intCast(@max(ty.abiAlignment(mod).toByteUnitsOptional().?, 8)); + @intCast(@max(ty.abiAlignment(mod).toByteUnits().?, 8)); result.stack_byte_count = mem.alignForward(u31, result.stack_byte_count, param_align); arg.* = .{ .load_frame = .{ @@ -19003,7 +19003,7 @@ fn resolveCallingConventionValues( continue; } const param_size: u31 = @intCast(ty.abiSize(mod)); - const param_align: u31 = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?); + const param_align: u31 = @intCast(ty.abiAlignment(mod).toByteUnits().?); result.stack_byte_count = mem.alignForward(u31, result.stack_byte_count, param_align); arg.* = .{ .load_frame = .{ @@ -19096,7 +19096,7 @@ fn splitType(self: *Self, ty: Type) ![2]Type { .integer => switch (part_i) { 0 => Type.u64, 1 => part: { - const elem_size = ty.abiAlignment(mod).minStrict(.@"8").toByteUnitsOptional().?; + const elem_size = ty.abiAlignment(mod).minStrict(.@"8").toByteUnits().?; const elem_ty = try mod.intType(.unsigned, @intCast(elem_size * 8)); break :part switch (@divExact(ty.abiSize(mod) - 8, elem_size)) { 1 => elem_ty, diff --git a/src/codegen.zig b/src/codegen.zig index 004cf7a7be..76be8be974 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -548,7 +548,7 @@ pub fn generateSymbol( } const size = struct_type.size(ip).*; - const alignment = struct_type.flagsPtr(ip).alignment.toByteUnitsOptional().?; + const alignment = struct_type.flagsPtr(ip).alignment.toByteUnits().?; const padding = math.cast( usize, @@ -893,12 +893,12 @@ fn genDeclRef( // TODO this feels clunky. Perhaps we should check for it in `genTypedValue`? if (ty.castPtrToFn(zcu)) |fn_ty| { if (zcu.typeToFunc(fn_ty).?.is_generic) { - return GenResult.mcv(.{ .immediate = fn_ty.abiAlignment(zcu).toByteUnitsOptional().? }); + return GenResult.mcv(.{ .immediate = fn_ty.abiAlignment(zcu).toByteUnits().? }); } } else if (ty.zigTypeTag(zcu) == .Pointer) { const elem_ty = ty.elemType2(zcu); if (!elem_ty.hasRuntimeBits(zcu)) { - return GenResult.mcv(.{ .immediate = elem_ty.abiAlignment(zcu).toByteUnitsOptional().? }); + return GenResult.mcv(.{ .immediate = elem_ty.abiAlignment(zcu).toByteUnits().? }); } } diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 246f4e1e0f..cec7cdcd99 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -22,7 +22,7 @@ const Alignment = InternPool.Alignment; const BigIntLimb = std.math.big.Limb; const BigInt = std.math.big.int; -pub const CType = @import("c/type.zig").CType; +pub const CType = @import("c/Type.zig"); pub const CValue = union(enum) { none: void, @@ -62,7 +62,7 @@ pub const LazyFnKey = union(enum) { never_inline: InternPool.DeclIndex, }; pub const LazyFnValue = struct { - fn_name: []const u8, + fn_name: CType.String, data: Data, pub const Data = union { @@ -74,19 +74,19 @@ pub const LazyFnValue = struct { pub const LazyFnMap = std.AutoArrayHashMapUnmanaged(LazyFnKey, LazyFnValue); const Local = struct { - cty_idx: CType.Index, + ctype: CType, flags: packed struct(u32) { alignas: CType.AlignAs, _: u20 = undefined, }, pub fn getType(local: Local) LocalType { - return .{ .cty_idx = local.cty_idx, .alignas = local.flags.alignas }; + return .{ .ctype = local.ctype, .alignas = local.flags.alignas }; } }; const LocalIndex = u16; -const LocalType = struct { cty_idx: CType.Index, alignas: CType.AlignAs }; +const LocalType = struct { ctype: CType, alignas: CType.AlignAs }; const LocalsList = std.AutoArrayHashMapUnmanaged(LocalIndex, void); const LocalsMap = std.AutoArrayHashMapUnmanaged(LocalType, LocalsList); @@ -193,6 +193,7 @@ const reserved_idents = std.ComptimeStringMap(void, .{ .{ "switch", {} }, .{ "thread_local", {} }, .{ "typedef", {} }, + .{ "typeof", {} }, .{ "uint16_t", {} }, .{ "uint32_t", {} }, .{ "uint64_t", {} }, @@ -309,12 +310,14 @@ pub const Function = struct { const result: CValue = if (lowersToArray(ty, zcu)) result: { const writer = f.object.codeHeaderWriter(); - const alignment: Alignment = .none; - const decl_c_value = try f.allocLocalValue(ty, alignment); + const decl_c_value = try f.allocLocalValue(.{ + .ctype = try f.ctypeFromType(ty, .complete), + .alignas = CType.AlignAs.fromAbiAlignment(ty.abiAlignment(zcu)), + }); const gpa = f.object.dg.gpa; try f.allocs.put(gpa, decl_c_value.new_local, false); try writer.writeAll("static "); - try f.object.dg.renderTypeAndName(writer, ty, decl_c_value, Const, alignment, .complete); + try f.object.dg.renderTypeAndName(writer, ty, decl_c_value, Const, .none, .complete); try writer.writeAll(" = "); try f.object.dg.renderValue(writer, val, .StaticInitializer); try writer.writeAll(";\n "); @@ -335,20 +338,33 @@ pub const Function = struct { /// Skips the reuse logic. This function should be used for any persistent allocation, i.e. /// those which go into `allocs`. This function does not add the resulting local into `allocs`; /// that responsibility lies with the caller. - fn allocLocalValue(f: *Function, ty: Type, alignment: Alignment) !CValue { - const zcu = f.object.dg.zcu; - const gpa = f.object.dg.gpa; - try f.locals.append(gpa, .{ - .cty_idx = try f.typeToIndex(ty, .complete), - .flags = .{ - .alignas = CType.AlignAs.init(alignment, ty.abiAlignment(zcu)), - }, + fn allocLocalValue(f: *Function, local_type: LocalType) !CValue { + try f.locals.ensureUnusedCapacity(f.object.dg.gpa, 1); + defer f.locals.appendAssumeCapacity(.{ + .ctype = local_type.ctype, + .flags = .{ .alignas = local_type.alignas }, }); - return .{ .new_local = @intCast(f.locals.items.len - 1) }; + return .{ .new_local = @intCast(f.locals.items.len) }; } fn allocLocal(f: *Function, inst: ?Air.Inst.Index, ty: Type) !CValue { - const result = try f.allocAlignedLocal(ty, .{}, .none); + return f.allocAlignedLocal(inst, .{ + .ctype = try f.ctypeFromType(ty, .complete), + .alignas = CType.AlignAs.fromAbiAlignment(ty.abiAlignment(f.object.dg.zcu)), + }); + } + + /// Only allocates the local; does not print anything. Will attempt to re-use locals, so should + /// not be used for persistent locals (i.e. those in `allocs`). + fn allocAlignedLocal(f: *Function, inst: ?Air.Inst.Index, local_type: LocalType) !CValue { + const result: CValue = result: { + if (f.free_locals_map.getPtr(local_type)) |locals_list| { + if (locals_list.popOrNull()) |local_entry| { + break :result .{ .new_local = local_entry.key }; + } + } + break :result try f.allocLocalValue(local_type); + }; if (inst) |i| { log.debug("%{d}: allocating t{d}", .{ i, result.new_local }); } else { @@ -357,22 +373,6 @@ pub const Function = struct { return result; } - /// Only allocates the local; does not print anything. Will attempt to re-use locals, so should - /// not be used for persistent locals (i.e. those in `allocs`). - fn allocAlignedLocal(f: *Function, ty: Type, _: CQualifiers, alignment: Alignment) !CValue { - const zcu = f.object.dg.zcu; - if (f.free_locals_map.getPtr(.{ - .cty_idx = try f.typeToIndex(ty, .complete), - .alignas = CType.AlignAs.init(alignment, ty.abiAlignment(zcu)), - })) |locals_list| { - if (locals_list.popOrNull()) |local_entry| { - return .{ .new_local = local_entry.key }; - } - } - - return f.allocLocalValue(ty, alignment); - } - fn writeCValue(f: *Function, w: anytype, c_value: CValue, location: ValueRenderLocation) !void { switch (c_value) { .none => unreachable, @@ -380,15 +380,20 @@ pub const Function = struct { .local_ref => |i| { const local = &f.locals.items[i]; if (local.flags.alignas.abiOrder().compare(.lt)) { - const zcu = f.object.dg.zcu; - const pointee_ty = try zcu.intType(.unsigned, @min( - local.flags.alignas.@"align".toByteUnitsOptional().?, - f.object.dg.mod.resolved_target.result.maxIntAlignment(), - ) * 8); - const ptr_ty = try zcu.singleMutPtrType(pointee_ty); + const gpa = f.object.dg.gpa; + const mod = f.object.dg.mod; + const ctype_pool = &f.object.dg.ctype_pool; try w.writeByte('('); - try f.renderType(w, ptr_ty); + try f.renderCType(w, try ctype_pool.getPointer(gpa, .{ + .elem_ctype = try ctype_pool.fromIntInfo(gpa, .{ + .signedness = .unsigned, + .bits = @min( + local.flags.alignas.toByteUnits(), + mod.resolved_target.result.maxIntAlignment(), + ) * 8, + }, mod, .forward), + })); try w.writeByte(')'); } try w.print("&t{d}", .{i}); @@ -460,28 +465,20 @@ pub const Function = struct { return f.object.dg.fail(format, args); } - fn indexToCType(f: *Function, idx: CType.Index) CType { - return f.object.dg.indexToCType(idx); + fn ctypeFromType(f: *Function, ty: Type, kind: CType.Kind) !CType { + return f.object.dg.ctypeFromType(ty, kind); } - fn typeToIndex(f: *Function, ty: Type, kind: CType.Kind) !CType.Index { - return f.object.dg.typeToIndex(ty, kind); + fn byteSize(f: *Function, ctype: CType) u64 { + return f.object.dg.byteSize(ctype); } - fn typeToCType(f: *Function, ty: Type, kind: CType.Kind) !CType { - return f.object.dg.typeToCType(ty, kind); + fn renderType(f: *Function, w: anytype, ctype: Type) !void { + return f.object.dg.renderType(w, ctype); } - fn byteSize(f: *Function, cty: CType) u64 { - return f.object.dg.byteSize(cty); - } - - fn renderType(f: *Function, w: anytype, t: Type) !void { - return f.object.dg.renderType(w, t); - } - - fn renderCType(f: *Function, w: anytype, t: CType.Index) !void { - return f.object.dg.renderCType(w, t); + fn renderCType(f: *Function, w: anytype, ctype: CType) !void { + return f.object.dg.renderCType(w, ctype); } fn renderIntCast(f: *Function, w: anytype, dest_ty: Type, src: CValue, v: Vectorize, src_ty: Type, location: ValueRenderLocation) !void { @@ -494,21 +491,19 @@ pub const Function = struct { fn getLazyFnName(f: *Function, key: LazyFnKey, data: LazyFnValue.Data) ![]const u8 { const gpa = f.object.dg.gpa; + const zcu = f.object.dg.zcu; + const ctype_pool = &f.object.dg.ctype_pool; + const gop = try f.lazy_fns.getOrPut(gpa, key); if (!gop.found_existing) { errdefer _ = f.lazy_fns.pop(); - var promoted = f.object.dg.ctypes.promote(gpa); - defer f.object.dg.ctypes.demote(promoted); - const arena = promoted.arena.allocator(); - const zcu = f.object.dg.zcu; - gop.value_ptr.* = .{ .fn_name = switch (key) { .tag_name, .never_tail, .never_inline, - => |owner_decl| try std.fmt.allocPrint(arena, "zig_{s}_{}__{d}", .{ + => |owner_decl| try ctype_pool.fmt(gpa, "zig_{s}_{}__{d}", .{ @tagName(key), fmtIdent(zcu.intern_pool.stringToSlice(zcu.declPtr(owner_decl).name)), @intFromEnum(owner_decl), @@ -521,7 +516,7 @@ pub const Function = struct { }, }; } - return gop.value_ptr.fn_name; + return gop.value_ptr.fn_name.slice(ctype_pool); } pub fn deinit(f: *Function) void { @@ -532,7 +527,6 @@ pub const Function = struct { f.blocks.deinit(gpa); f.value_map.deinit(); f.lazy_fns.deinit(gpa); - f.object.dg.ctypes.deinit(gpa); } fn typeOf(f: *Function, inst: Air.Inst.Ref) Type { @@ -575,7 +569,8 @@ pub const DeclGen = struct { /// This is a borrowed reference from `link.C`. fwd_decl: std.ArrayList(u8), error_msg: ?*Zcu.ErrorMsg, - ctypes: CType.Store, + ctype_pool: CType.Pool, + scratch: std.ArrayListUnmanaged(u32), /// Keeps track of anonymous decls that need to be rendered before this /// (named) Decl in the output C code. anon_decl_deps: std.AutoArrayHashMapUnmanaged(InternPool.Index, C.DeclBlock), @@ -610,6 +605,7 @@ pub const DeclGen = struct { ) error{ OutOfMemory, AnalysisFail }!void { const zcu = dg.zcu; const ip = &zcu.intern_pool; + const ctype_pool = &dg.ctype_pool; const decl_val = Value.fromInterned(anon_decl.val); const decl_ty = decl_val.typeOf(zcu); @@ -631,10 +627,10 @@ pub const DeclGen = struct { // them). The analysis until now should ensure that the C function // pointers are compatible. If they are not, then there is a bug // somewhere and we should let the C compiler tell us about it. - const child_cty = (try dg.typeToCType(ptr_ty, .complete)).cast(CType.Payload.Child).?.data; - const decl_cty = try dg.typeToIndex(decl_ty, .complete); - const need_cast = child_cty != decl_cty and - (dg.indexToCType(child_cty).tag() != .function or dg.indexToCType(decl_cty).tag() != .function); + const elem_ctype = (try dg.ctypeFromType(ptr_ty, .complete)).info(ctype_pool).pointer.elem_ctype; + const decl_ctype = try dg.ctypeFromType(decl_ty, .complete); + const need_cast = !elem_ctype.eql(decl_ctype) and + (elem_ctype.info(ctype_pool) != .function or decl_ctype.info(ctype_pool) != .function); if (need_cast) { try writer.writeAll("(("); try dg.renderType(writer, ptr_ty); @@ -655,7 +651,7 @@ pub const DeclGen = struct { const explicit_alignment = ptr_type.flags.alignment; if (explicit_alignment != .none) { const abi_alignment = Type.fromInterned(ptr_type.child).abiAlignment(zcu); - if (explicit_alignment.compareStrict(.gt, abi_alignment)) { + if (explicit_alignment.order(abi_alignment).compare(.gt)) { const aligned_gop = try dg.aligned_anon_decls.getOrPut(dg.gpa, anon_decl.val); aligned_gop.value_ptr.* = if (aligned_gop.found_existing) aligned_gop.value_ptr.maxStrict(explicit_alignment) @@ -673,6 +669,7 @@ pub const DeclGen = struct { location: ValueRenderLocation, ) error{ OutOfMemory, AnalysisFail }!void { const zcu = dg.zcu; + const ctype_pool = &dg.ctype_pool; const decl = zcu.declPtr(decl_index); assert(decl.has_tv); @@ -695,10 +692,10 @@ pub const DeclGen = struct { // them). The analysis until now should ensure that the C function // pointers are compatible. If they are not, then there is a bug // somewhere and we should let the C compiler tell us about it. - const child_cty = (try dg.typeToCType(ty, .complete)).cast(CType.Payload.Child).?.data; - const decl_cty = try dg.typeToIndex(decl_ty, .complete); - const need_cast = child_cty != decl_cty and - (dg.indexToCType(child_cty).tag() != .function or dg.indexToCType(decl_cty).tag() != .function); + const elem_ctype = (try dg.ctypeFromType(ty, .complete)).info(ctype_pool).pointer.elem_ctype; + const decl_ctype = try dg.ctypeFromType(decl_ty, .complete); + const need_cast = !elem_ctype.eql(decl_ctype) and + (elem_ctype.info(ctype_pool) != .function or decl_ctype.info(ctype_pool) != .function); if (need_cast) { try writer.writeAll("(("); try dg.renderType(writer, ty); @@ -720,31 +717,31 @@ pub const DeclGen = struct { const zcu = dg.zcu; const ip = &zcu.intern_pool; const ptr_ty = Type.fromInterned(ip.typeOf(ptr_val)); - const ptr_cty = try dg.typeToIndex(ptr_ty, .complete); - const ptr_child_cty = dg.indexToCType(ptr_cty).cast(CType.Payload.Child).?.data; + const ptr_ctype = try dg.ctypeFromType(ptr_ty, .complete); + const ptr_child_ctype = ptr_ctype.info(&dg.ctype_pool).pointer.elem_ctype; const ptr = ip.indexToKey(ptr_val).ptr; switch (ptr.addr) { .decl => |d| try dg.renderDeclValue(writer, Value.fromInterned(ptr_val), d, location), .anon_decl => |anon_decl| try dg.renderAnonDeclValue(writer, Value.fromInterned(ptr_val), anon_decl, location), .int => |int| { try writer.writeByte('('); - try dg.renderCType(writer, ptr_cty); + try dg.renderCType(writer, ptr_ctype); try writer.print("){x}", .{try dg.fmtIntLiteral(Value.fromInterned(int), .Other)}); }, .eu_payload, .opt_payload => |base| { const ptr_base_ty = Type.fromInterned(ip.typeOf(base)); const base_ty = ptr_base_ty.childType(zcu); // Ensure complete type definition is visible before accessing fields. - _ = try dg.typeToIndex(base_ty, .complete); + _ = try dg.ctypeFromType(base_ty, .complete); const payload_ty = switch (ptr.addr) { .eu_payload => base_ty.errorUnionPayload(zcu), .opt_payload => base_ty.optionalChild(zcu), else => unreachable, }; - const payload_cty = try dg.typeToIndex(payload_ty, .forward); - if (ptr_child_cty != payload_cty) { + const payload_ctype = try dg.ctypeFromType(payload_ty, .forward); + if (!ptr_child_ctype.eql(payload_ctype)) { try writer.writeByte('('); - try dg.renderCType(writer, ptr_cty); + try dg.renderCType(writer, ptr_ctype); try writer.writeByte(')'); } try writer.writeAll("&("); @@ -754,10 +751,10 @@ pub const DeclGen = struct { .elem => |elem| { const ptr_base_ty = Type.fromInterned(ip.typeOf(elem.base)); const elem_ty = ptr_base_ty.elemType2(zcu); - const elem_cty = try dg.typeToIndex(elem_ty, .forward); - if (ptr_child_cty != elem_cty) { + const elem_ctype = try dg.ctypeFromType(elem_ty, .forward); + if (!ptr_child_ctype.eql(elem_ctype)) { try writer.writeByte('('); - try dg.renderCType(writer, ptr_cty); + try dg.renderCType(writer, ptr_ctype); try writer.writeByte(')'); } try writer.writeAll("&("); @@ -769,14 +766,14 @@ pub const DeclGen = struct { .field => |field| { const ptr_base_ty = Type.fromInterned(ip.typeOf(field.base)); const base_ty = ptr_base_ty.childType(zcu); - // Ensure complete type definition is visible before accessing fields. - _ = try dg.typeToIndex(base_ty, .complete); + // Ensure complete type definition is available before accessing fields. + _ = try dg.ctypeFromType(base_ty, .complete); switch (fieldLocation(ptr_base_ty, ptr_ty, @as(u32, @intCast(field.index)), zcu)) { .begin => { - const ptr_base_cty = try dg.typeToIndex(ptr_base_ty, .complete); - if (ptr_cty != ptr_base_cty) { + const ptr_base_ctype = try dg.ctypeFromType(ptr_base_ty, .complete); + if (!ptr_ctype.eql(ptr_base_ctype)) { try writer.writeByte('('); - try dg.renderCType(writer, ptr_cty); + try dg.renderCType(writer, ptr_ctype); try writer.writeByte(')'); } try dg.renderParentPtr(writer, field.base, location); @@ -797,10 +794,10 @@ pub const DeclGen = struct { }, else => unreachable, }; - const field_cty = try dg.typeToIndex(field_ty, .forward); - if (ptr_child_cty != field_cty) { + const field_ctype = try dg.ctypeFromType(field_ty, .forward); + if (!ptr_child_ctype.eql(field_ctype)) { try writer.writeByte('('); - try dg.renderCType(writer, ptr_cty); + try dg.renderCType(writer, ptr_ctype); try writer.writeByte(')'); } try writer.writeAll("&("); @@ -810,15 +807,15 @@ pub const DeclGen = struct { }, .byte_offset => |byte_offset| { const u8_ptr_ty = try zcu.adjustPtrTypeChild(ptr_ty, Type.u8); - const u8_ptr_cty = try dg.typeToIndex(u8_ptr_ty, .complete); + const u8_ptr_ctype = try dg.ctypeFromType(u8_ptr_ty, .complete); - if (ptr_cty != u8_ptr_cty) { + if (!ptr_ctype.eql(u8_ptr_ctype)) { try writer.writeByte('('); - try dg.renderCType(writer, ptr_cty); + try dg.renderCType(writer, ptr_ctype); try writer.writeByte(')'); } try writer.writeAll("(("); - try dg.renderCType(writer, u8_ptr_cty); + try dg.renderCType(writer, u8_ptr_ctype); try writer.writeByte(')'); try dg.renderParentPtr(writer, field.base, location); try writer.print(" + {})", .{ @@ -826,10 +823,10 @@ pub const DeclGen = struct { }); }, .end => { - const ptr_base_cty = try dg.typeToIndex(ptr_base_ty, .complete); - if (ptr_cty != ptr_base_cty) { + const ptr_base_ctype = try dg.ctypeFromType(ptr_base_ty, .complete); + if (!ptr_ctype.eql(ptr_base_ctype)) { try writer.writeByte('('); - try dg.renderCType(writer, ptr_cty); + try dg.renderCType(writer, ptr_ctype); try writer.writeByte(')'); } try writer.writeAll("(("); @@ -1207,8 +1204,8 @@ pub const DeclGen = struct { try writer.writeByte('}'); }, .struct_type => { - const struct_type = ip.loadStructType(ty.toIntern()); - switch (struct_type.layout) { + const loaded_struct = ip.loadStructType(ty.toIntern()); + switch (loaded_struct.layout) { .auto, .@"extern" => { if (!location.isInitializer()) { try writer.writeByte('('); @@ -1217,13 +1214,14 @@ pub const DeclGen = struct { } try writer.writeByte('{'); - var empty = true; - for (0..struct_type.field_types.len) |field_index| { - const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]); - if (struct_type.fieldIsComptime(ip, field_index)) continue; + var field_it = loaded_struct.iterateRuntimeOrder(ip); + var need_comma = false; + while (field_it.next()) |field_index| { + const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; - if (!empty) try writer.writeByte(','); + if (need_comma) try writer.writeByte(','); + need_comma = true; const field_val = switch (ip.indexToKey(val.toIntern()).aggregate.storage) { .bytes => |bytes| try ip.get(zcu.gpa, .{ .int = .{ .ty = field_ty.toIntern(), @@ -1233,8 +1231,6 @@ pub const DeclGen = struct { .repeated_elem => |elem| elem, }; try dg.renderValue(writer, Value.fromInterned(field_val), initializer_type); - - empty = false; } try writer.writeByte('}'); }, @@ -1247,8 +1243,8 @@ pub const DeclGen = struct { var bit_offset: u64 = 0; var eff_num_fields: usize = 0; - for (0..struct_type.field_types.len) |field_index| { - const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]); + for (0..loaded_struct.field_types.len) |field_index| { + const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; eff_num_fields += 1; } @@ -1268,8 +1264,8 @@ pub const DeclGen = struct { var eff_index: usize = 0; var needs_closing_paren = false; - for (0..struct_type.field_types.len) |field_index| { - const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]); + for (0..loaded_struct.field_types.len) |field_index| { + const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; const field_val = switch (ip.indexToKey(val.toIntern()).aggregate.storage) { @@ -1304,8 +1300,8 @@ pub const DeclGen = struct { try writer.writeByte('('); // a << a_off | b << b_off | c << c_off var empty = true; - for (0..struct_type.field_types.len) |field_index| { - const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]); + for (0..loaded_struct.field_types.len) |field_index| { + const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; if (!empty) try writer.writeAll(" | "); @@ -1341,10 +1337,10 @@ pub const DeclGen = struct { else => unreachable, }, .un => |un| { - const union_obj = zcu.typeToUnion(ty).?; + const loaded_union = ip.loadUnionType(ty.toIntern()); if (un.tag == .none) { const backing_ty = try ty.unionBackingType(zcu); - switch (union_obj.getLayout(ip)) { + switch (loaded_union.getLayout(ip)) { .@"packed" => { if (!location.isInitializer()) { try writer.writeByte('('); @@ -1376,10 +1372,10 @@ pub const DeclGen = struct { try writer.writeByte(')'); } - const field_index = zcu.unionTagFieldIndex(union_obj, Value.fromInterned(un.tag)).?; - const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); - const field_name = union_obj.loadTagType(ip).names.get(ip)[field_index]; - if (union_obj.getLayout(ip) == .@"packed") { + const field_index = zcu.unionTagFieldIndex(loaded_union, Value.fromInterned(un.tag)).?; + const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]); + const field_name = loaded_union.loadTagType(ip).names.get(ip)[field_index]; + if (loaded_union.getLayout(ip) == .@"packed") { if (field_ty.hasRuntimeBits(zcu)) { if (field_ty.isPtrAtRuntime(zcu)) { try writer.writeByte('('); @@ -1399,7 +1395,7 @@ pub const DeclGen = struct { try writer.writeByte('{'); if (ty.unionTagTypeSafety(zcu)) |_| { - const layout = zcu.getUnionLayout(union_obj); + const layout = zcu.getUnionLayout(loaded_union); if (layout.tag_size != 0) { try writer.writeAll(" .tag = "); try dg.renderValue(writer, Value.fromInterned(un.tag), initializer_type); @@ -1412,8 +1408,8 @@ pub const DeclGen = struct { try writer.print(" .{ } = ", .{fmtIdent(ip.stringToSlice(field_name))}); try dg.renderValue(writer, Value.fromInterned(un.val), initializer_type); try writer.writeByte(' '); - } else for (0..union_obj.field_types.len) |this_field_index| { - const this_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[this_field_index]); + } else for (0..loaded_union.field_types.len) |this_field_index| { + const this_field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[this_field_index]); if (!this_field_ty.hasRuntimeBits(zcu)) continue; try dg.renderUndefValue(writer, this_field_ty, initializer_type); break; @@ -1445,12 +1441,14 @@ pub const DeclGen = struct { .ReleaseFast, .ReleaseSmall => false, }; - switch (ty.zigTypeTag(zcu)) { - .Bool => try writer.writeAll(if (safety_on) "0xaa" else "false"), - .Int, .Enum, .ErrorSet => try writer.print("{x}", .{ - try dg.fmtIntLiteral(try zcu.undefValue(ty), location), - }), - .Float => { + switch (ty.toIntern()) { + .c_longdouble_type, + .f16_type, + .f32_type, + .f64_type, + .f80_type, + .f128_type, + => { const bits = ty.floatBits(target.*); // All unsigned ints matching float types are pre-allocated. const repr_ty = zcu.intType(.unsigned, bits) catch unreachable; @@ -1468,133 +1466,90 @@ pub const DeclGen = struct { } try writer.writeAll(", "); try dg.renderUndefValue(writer, repr_ty, .FunctionArgument); - try writer.writeByte(')'); + return writer.writeByte(')'); }, - .Pointer => if (ty.isSlice(zcu)) { - if (!location.isInitializer()) { - try writer.writeByte('('); - try dg.renderType(writer, ty); - try writer.writeByte(')'); - } - - try writer.writeAll("{("); - const ptr_ty = ty.slicePtrFieldType(zcu); - try dg.renderType(writer, ptr_ty); - try writer.print("){x}, {0x}}}", .{try dg.fmtIntLiteral(try zcu.undefValue(Type.usize), .Other)}); - } else { - try writer.writeAll("(("); - try dg.renderType(writer, ty); - try writer.print("){x})", .{try dg.fmtIntLiteral(try zcu.undefValue(Type.usize), .Other)}); - }, - .Optional => { - const payload_ty = ty.optionalChild(zcu); - - if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { - return dg.renderUndefValue(writer, Type.bool, location); - } - - if (ty.optionalReprIsPayload(zcu)) { - return dg.renderUndefValue(writer, payload_ty, location); - } - - if (!location.isInitializer()) { - try writer.writeByte('('); - try dg.renderType(writer, ty); - try writer.writeByte(')'); - } - - try writer.writeAll("{ .payload = "); - try dg.renderUndefValue(writer, payload_ty, initializer_type); - try writer.writeAll(", .is_null = "); - try dg.renderUndefValue(writer, Type.bool, initializer_type); - try writer.writeAll(" }"); - }, - .Struct => switch (ty.containerLayout(zcu)) { - .auto, .@"extern" => { - if (!location.isInitializer()) { - try writer.writeByte('('); - try dg.renderType(writer, ty); - try writer.writeByte(')'); - } - - try writer.writeByte('{'); - var empty = true; - for (0..ty.structFieldCount(zcu)) |field_index| { - if (ty.structFieldIsComptime(field_index, zcu)) continue; - const field_ty = ty.structFieldType(field_index, zcu); - if (!field_ty.hasRuntimeBits(zcu)) continue; - - if (!empty) try writer.writeByte(','); - try dg.renderUndefValue(writer, field_ty, initializer_type); - - empty = false; - } - - try writer.writeByte('}'); - }, - .@"packed" => try writer.print("{x}", .{ - try dg.fmtIntLiteral(try zcu.undefValue(ty), .Other), + .bool_type => try writer.writeAll(if (safety_on) "0xaa" else "false"), + else => switch (ip.indexToKey(ty.toIntern())) { + .simple_type, + .int_type, + .enum_type, + .error_set_type, + .inferred_error_set_type, + => return writer.print("{x}", .{ + try dg.fmtIntLiteral(try zcu.undefValue(ty), location), }), - }, - .Union => { - if (!location.isInitializer()) { - try writer.writeByte('('); - try dg.renderType(writer, ty); - try writer.writeByte(')'); - } - - try writer.writeByte('{'); - if (ty.unionTagTypeSafety(zcu)) |tag_ty| { - const layout = ty.unionGetLayout(zcu); - if (layout.tag_size != 0) { - try writer.writeAll(" .tag = "); - try dg.renderUndefValue(writer, tag_ty, initializer_type); + .ptr_type => if (ty.isSlice(zcu)) { + if (!location.isInitializer()) { + try writer.writeByte('('); + try dg.renderType(writer, ty); + try writer.writeByte(')'); } - if (ty.unionHasAllZeroBitFieldTypes(zcu)) return try writer.writeByte('}'); - if (layout.tag_size != 0) try writer.writeByte(','); - try writer.writeAll(" .payload = {"); - } - const union_obj = zcu.typeToUnion(ty).?; - for (0..union_obj.field_types.len) |field_index| { - const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); - if (!field_ty.hasRuntimeBits(zcu)) continue; - try dg.renderUndefValue(writer, field_ty, initializer_type); - break; - } - if (ty.unionTagTypeSafety(zcu)) |_| try writer.writeByte('}'); - try writer.writeByte('}'); - }, - .ErrorUnion => { - const payload_ty = ty.errorUnionPayload(zcu); - const error_ty = ty.errorUnionSet(zcu); - if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { - return dg.renderUndefValue(writer, error_ty, location); - } - - if (!location.isInitializer()) { - try writer.writeByte('('); - try dg.renderType(writer, ty); - try writer.writeByte(')'); - } - - try writer.writeAll("{ .payload = "); - try dg.renderUndefValue(writer, payload_ty, initializer_type); - try writer.writeAll(", .error = "); - try dg.renderUndefValue(writer, error_ty, initializer_type); - try writer.writeAll(" }"); - }, - .Array, .Vector => { - const ai = ty.arrayInfo(zcu); - if (ai.elem_type.eql(Type.u8, zcu)) { - const c_len = ty.arrayLenIncludingSentinel(zcu); - var literal = stringLiteral(writer, c_len); - try literal.start(); - var index: u64 = 0; - while (index < c_len) : (index += 1) - try literal.writeChar(0xaa); - try literal.end(); + try writer.writeAll("{("); + const ptr_ty = ty.slicePtrFieldType(zcu); + try dg.renderType(writer, ptr_ty); + return writer.print("){x}, {0x}}}", .{ + try dg.fmtIntLiteral(try zcu.undefValue(Type.usize), .Other), + }); } else { + try writer.writeAll("(("); + try dg.renderType(writer, ty); + return writer.print("){x})", .{ + try dg.fmtIntLiteral(try zcu.undefValue(Type.usize), .Other), + }); + }, + .opt_type => { + const payload_ty = ty.optionalChild(zcu); + + if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { + return dg.renderUndefValue(writer, Type.bool, location); + } + + if (ty.optionalReprIsPayload(zcu)) { + return dg.renderUndefValue(writer, payload_ty, location); + } + + if (!location.isInitializer()) { + try writer.writeByte('('); + try dg.renderType(writer, ty); + try writer.writeByte(')'); + } + + try writer.writeAll("{ .payload = "); + try dg.renderUndefValue(writer, payload_ty, initializer_type); + try writer.writeAll(", .is_null = "); + try dg.renderUndefValue(writer, Type.bool, initializer_type); + return writer.writeAll(" }"); + }, + .struct_type => { + const loaded_struct = ip.loadStructType(ty.toIntern()); + switch (loaded_struct.layout) { + .auto, .@"extern" => { + if (!location.isInitializer()) { + try writer.writeByte('('); + try dg.renderType(writer, ty); + try writer.writeByte(')'); + } + + try writer.writeByte('{'); + var field_it = loaded_struct.iterateRuntimeOrder(ip); + var need_comma = false; + while (field_it.next()) |field_index| { + const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); + if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; + + if (need_comma) try writer.writeByte(','); + need_comma = true; + try dg.renderUndefValue(writer, field_ty, initializer_type); + } + return writer.writeByte('}'); + }, + .@"packed" => return writer.print("{x}", .{ + try dg.fmtIntLiteral(try zcu.undefValue(ty), .Other), + }), + } + }, + .anon_struct_type => |anon_struct_info| { if (!location.isInitializer()) { try writer.writeByte('('); try dg.renderType(writer, ty); @@ -1602,32 +1557,125 @@ pub const DeclGen = struct { } try writer.writeByte('{'); - const c_len = ty.arrayLenIncludingSentinel(zcu); - var index: u64 = 0; - while (index < c_len) : (index += 1) { - if (index > 0) try writer.writeAll(", "); - try dg.renderUndefValue(writer, ty.childType(zcu), initializer_type); - } - try writer.writeByte('}'); - } - }, - .ComptimeInt, - .ComptimeFloat, - .Type, - .EnumLiteral, - .Void, - .NoReturn, - .Undefined, - .Null, - .Opaque, - => unreachable, + var need_comma = false; + for (0..anon_struct_info.types.len) |field_index| { + if (anon_struct_info.values.get(ip)[field_index] != .none) continue; + const field_ty = Type.fromInterned(anon_struct_info.types.get(ip)[field_index]); + if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; - .Fn, - .Frame, - .AnyFrame, - => |tag| return dg.fail("TODO: C backend: implement value of type {s}", .{ - @tagName(tag), - }), + if (need_comma) try writer.writeByte(','); + need_comma = true; + try dg.renderUndefValue(writer, field_ty, initializer_type); + } + return writer.writeByte('}'); + }, + .union_type => { + const loaded_union = ip.loadUnionType(ty.toIntern()); + switch (loaded_union.getLayout(ip)) { + .auto, .@"extern" => { + if (!location.isInitializer()) { + try writer.writeByte('('); + try dg.renderType(writer, ty); + try writer.writeByte(')'); + } + + try writer.writeByte('{'); + if (ty.unionTagTypeSafety(zcu)) |tag_ty| { + const layout = ty.unionGetLayout(zcu); + if (layout.tag_size != 0) { + try writer.writeAll(" .tag = "); + try dg.renderUndefValue(writer, tag_ty, initializer_type); + } + if (ty.unionHasAllZeroBitFieldTypes(zcu)) return try writer.writeByte('}'); + if (layout.tag_size != 0) try writer.writeByte(','); + try writer.writeAll(" .payload = {"); + } + for (0..loaded_union.field_types.len) |field_index| { + const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]); + if (!field_ty.hasRuntimeBits(zcu)) continue; + try dg.renderUndefValue(writer, field_ty, initializer_type); + break; + } + if (ty.unionTagTypeSafety(zcu)) |_| try writer.writeByte('}'); + return writer.writeByte('}'); + }, + .@"packed" => return writer.print("{x}", .{ + try dg.fmtIntLiteral(try zcu.undefValue(ty), .Other), + }), + } + }, + .error_union_type => { + const payload_ty = ty.errorUnionPayload(zcu); + const error_ty = ty.errorUnionSet(zcu); + + if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { + return dg.renderUndefValue(writer, error_ty, location); + } + + if (!location.isInitializer()) { + try writer.writeByte('('); + try dg.renderType(writer, ty); + try writer.writeByte(')'); + } + + try writer.writeAll("{ .payload = "); + try dg.renderUndefValue(writer, payload_ty, initializer_type); + try writer.writeAll(", .error = "); + try dg.renderUndefValue(writer, error_ty, initializer_type); + return writer.writeAll(" }"); + }, + .array_type, .vector_type => { + const ai = ty.arrayInfo(zcu); + if (ai.elem_type.eql(Type.u8, zcu)) { + const c_len = ty.arrayLenIncludingSentinel(zcu); + var literal = stringLiteral(writer, c_len); + try literal.start(); + var index: u64 = 0; + while (index < c_len) : (index += 1) + try literal.writeChar(0xaa); + return literal.end(); + } else { + if (!location.isInitializer()) { + try writer.writeByte('('); + try dg.renderType(writer, ty); + try writer.writeByte(')'); + } + + try writer.writeByte('{'); + const c_len = ty.arrayLenIncludingSentinel(zcu); + var index: u64 = 0; + while (index < c_len) : (index += 1) { + if (index > 0) try writer.writeAll(", "); + try dg.renderUndefValue(writer, ty.childType(zcu), initializer_type); + } + return writer.writeByte('}'); + } + }, + .anyframe_type, + .opaque_type, + .func_type, + => unreachable, + + .undef, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .slice, + .opt, + .aggregate, + .un, + .memoized_call, + => unreachable, + }, } } @@ -1641,13 +1689,12 @@ pub const DeclGen = struct { ident: []const u8, }, ) !void { - const store = &dg.ctypes.set; const zcu = dg.zcu; const ip = &zcu.intern_pool; const fn_decl = zcu.declPtr(fn_decl_index); const fn_ty = fn_decl.typeOf(zcu); - const fn_cty_idx = try dg.typeToIndex(fn_ty, kind); + const fn_ctype = try dg.ctypeFromType(fn_ty, kind); const fn_info = zcu.typeToFunc(fn_ty).?; if (fn_info.cc == .Naked) { @@ -1661,7 +1708,7 @@ pub const DeclGen = struct { try w.writeAll("zig_cold "); if (fn_info.return_type == .noreturn_type) try w.writeAll("zig_noreturn "); - var trailing = try renderTypePrefix(dg.pass, store.*, zcu, w, fn_cty_idx, .suffix, .{}); + var trailing = try renderTypePrefix(dg.pass, &dg.ctype_pool, zcu, w, fn_ctype, .suffix, .{}); if (toCallingConvention(fn_info.cc)) |call_conv| { try w.print("{}zig_callconv({s})", .{ trailing, call_conv }); @@ -1670,7 +1717,7 @@ pub const DeclGen = struct { switch (kind) { .forward => {}, - .complete => if (fn_decl.alignment.toByteUnitsOptional()) |a| { + .complete => if (fn_decl.alignment.toByteUnits()) |a| { try w.print("{}zig_align_fn({})", .{ trailing, a }); trailing = .maybe_space; }, @@ -1687,10 +1734,10 @@ pub const DeclGen = struct { try renderTypeSuffix( dg.pass, - store.*, + &dg.ctype_pool, zcu, w, - fn_cty_idx, + fn_ctype, .suffix, CQualifiers.init(.{ .@"const" = switch (kind) { .forward => false, @@ -1701,7 +1748,7 @@ pub const DeclGen = struct { switch (kind) { .forward => { - if (fn_decl.alignment.toByteUnitsOptional()) |a| { + if (fn_decl.alignment.toByteUnits()) |a| { try w.print(" zig_align_fn({})", .{a}); } switch (name) { @@ -1748,20 +1795,13 @@ pub const DeclGen = struct { } } - fn indexToCType(dg: *DeclGen, idx: CType.Index) CType { - return dg.ctypes.indexToCType(idx); + fn ctypeFromType(dg: *DeclGen, ty: Type, kind: CType.Kind) !CType { + defer std.debug.assert(dg.scratch.items.len == 0); + return dg.ctype_pool.fromType(dg.gpa, &dg.scratch, ty, dg.zcu, dg.mod, kind); } - fn typeToIndex(dg: *DeclGen, ty: Type, kind: CType.Kind) !CType.Index { - return dg.ctypes.typeToIndex(dg.gpa, ty, dg.zcu, dg.mod, kind); - } - - fn typeToCType(dg: *DeclGen, ty: Type, kind: CType.Kind) !CType { - return dg.ctypes.typeToCType(dg.gpa, ty, dg.zcu, dg.mod, kind); - } - - fn byteSize(dg: *DeclGen, cty: CType) u64 { - return cty.byteSize(dg.ctypes.set, dg.mod); + fn byteSize(dg: *DeclGen, ctype: CType) u64 { + return ctype.byteSize(&dg.ctype_pool, dg.mod); } /// Renders a type as a single identifier, generating intermediate typedefs @@ -1776,14 +1816,12 @@ pub const DeclGen = struct { /// | `renderType` | "uint8_t *" | "uint8_t *[10]" | /// fn renderType(dg: *DeclGen, w: anytype, t: Type) error{ OutOfMemory, AnalysisFail }!void { - try dg.renderCType(w, try dg.typeToIndex(t, .complete)); + try dg.renderCType(w, try dg.ctypeFromType(t, .complete)); } - fn renderCType(dg: *DeclGen, w: anytype, idx: CType.Index) error{ OutOfMemory, AnalysisFail }!void { - const store = &dg.ctypes.set; - const zcu = dg.zcu; - _ = try renderTypePrefix(dg.pass, store.*, zcu, w, idx, .suffix, .{}); - try renderTypeSuffix(dg.pass, store.*, zcu, w, idx, .suffix, .{}); + fn renderCType(dg: *DeclGen, w: anytype, ctype: CType) error{ OutOfMemory, AnalysisFail }!void { + _ = try renderTypePrefix(dg.pass, &dg.ctype_pool, dg.zcu, w, ctype, .suffix, .{}); + try renderTypeSuffix(dg.pass, &dg.ctype_pool, dg.zcu, w, ctype, .suffix, .{}); } const IntCastContext = union(enum) { @@ -1905,32 +1943,37 @@ pub const DeclGen = struct { alignment: Alignment, kind: CType.Kind, ) error{ OutOfMemory, AnalysisFail }!void { - const zcu = dg.zcu; - const alignas = CType.AlignAs.init(alignment, ty.abiAlignment(zcu)); - try dg.renderCTypeAndName(w, try dg.typeToIndex(ty, kind), name, qualifiers, alignas); + try dg.renderCTypeAndName( + w, + try dg.ctypeFromType(ty, kind), + name, + qualifiers, + CType.AlignAs.fromAlignment(.{ + .@"align" = alignment, + .abi = ty.abiAlignment(dg.zcu), + }), + ); } fn renderCTypeAndName( dg: *DeclGen, w: anytype, - cty_idx: CType.Index, + ctype: CType, name: CValue, qualifiers: CQualifiers, alignas: CType.AlignAs, ) error{ OutOfMemory, AnalysisFail }!void { - const store = &dg.ctypes.set; - const zcu = dg.zcu; - switch (alignas.abiOrder()) { .lt => try w.print("zig_under_align({}) ", .{alignas.toByteUnits()}), .eq => {}, .gt => try w.print("zig_align({}) ", .{alignas.toByteUnits()}), } - const trailing = try renderTypePrefix(dg.pass, store.*, zcu, w, cty_idx, .suffix, qualifiers); - try w.print("{}", .{trailing}); + try w.print("{}", .{ + try renderTypePrefix(dg.pass, &dg.ctype_pool, dg.zcu, w, ctype, .suffix, qualifiers), + }); try dg.writeName(w, name); - try renderTypeSuffix(dg.pass, store.*, zcu, w, cty_idx, .suffix, .{}); + try renderTypeSuffix(dg.pass, &dg.ctype_pool, dg.zcu, w, ctype, .suffix, .{}); } fn declIsGlobal(dg: *DeclGen, val: Value) bool { @@ -2094,33 +2137,31 @@ pub const DeclGen = struct { } fn renderTypeForBuiltinFnName(dg: *DeclGen, writer: anytype, ty: Type) !void { - try dg.renderCTypeForBuiltinFnName(writer, try dg.typeToCType(ty, .complete)); + try dg.renderCTypeForBuiltinFnName(writer, try dg.ctypeFromType(ty, .complete)); } - fn renderCTypeForBuiltinFnName(dg: *DeclGen, writer: anytype, cty: CType) !void { - switch (cty.tag()) { - else => try writer.print("{c}{d}", .{ - if (cty.isBool()) + fn renderCTypeForBuiltinFnName(dg: *DeclGen, writer: anytype, ctype: CType) !void { + switch (ctype.info(&dg.ctype_pool)) { + else => |ctype_info| try writer.print("{c}{d}", .{ + if (ctype.isBool()) signAbbrev(.unsigned) - else if (cty.isInteger()) - signAbbrev(cty.signedness(dg.mod)) - else if (cty.isFloat()) + else if (ctype.isInteger()) + signAbbrev(ctype.signedness(dg.mod)) + else if (ctype.isFloat()) @as(u8, 'f') - else if (cty.isPointer()) + else if (ctype_info == .pointer) @as(u8, 'p') else - return dg.fail("TODO: CBE: implement renderTypeForBuiltinFnName for type {}", .{ - cty.tag(), - }), - if (cty.isFloat()) cty.floatActiveBits(dg.mod) else dg.byteSize(cty) * 8, + return dg.fail("TODO: CBE: implement renderTypeForBuiltinFnName for {s} type", .{@tagName(ctype_info)}), + if (ctype.isFloat()) ctype.floatActiveBits(dg.mod) else dg.byteSize(ctype) * 8, }), .array => try writer.writeAll("big"), } } fn renderBuiltinInfo(dg: *DeclGen, writer: anytype, ty: Type, info: BuiltinInfo) !void { - const cty = try dg.typeToCType(ty, .complete); - const is_big = cty.tag() == .array; + const ctype = try dg.ctypeFromType(ty, .complete); + const is_big = ctype.info(&dg.ctype_pool) == .array; switch (info) { .none => if (!is_big) return, .bits => {}, @@ -2155,7 +2196,7 @@ pub const DeclGen = struct { .dg = dg, .int_info = ty.intInfo(zcu), .kind = kind, - .cty = try dg.typeToCType(ty, kind), + .ctype = try dg.ctypeFromType(ty, kind), .val = val, } }; } @@ -2184,122 +2225,74 @@ const RenderCTypeTrailing = enum { } } }; -fn renderTypeName( +fn renderAlignedTypeName(w: anytype, ctype: CType) !void { + try w.print("anon__aligned_{d}", .{@intFromEnum(ctype.index)}); +} +fn renderFwdDeclTypeName( zcu: *Zcu, w: anytype, - idx: CType.Index, - cty: CType, + ctype: CType, + fwd_decl: CType.Info.FwdDecl, attributes: []const u8, ) !void { - switch (cty.tag()) { - else => unreachable, - - .fwd_anon_struct, - .fwd_anon_union, - => |tag| try w.print("{s} {s}anon__lazy_{d}", .{ - @tagName(tag)["fwd_anon_".len..], - attributes, - idx, + try w.print("{s} {s}", .{ @tagName(fwd_decl.tag), attributes }); + switch (fwd_decl.name) { + .anon => try w.print("anon__lazy_{d}", .{@intFromEnum(ctype.index)}), + .owner_decl => |owner_decl| try w.print("{}__{d}", .{ + fmtIdent(zcu.intern_pool.stringToSlice(zcu.declPtr(owner_decl).name)), + @intFromEnum(owner_decl), }), - - .fwd_struct, - .fwd_union, - => |tag| { - const owner_decl = cty.cast(CType.Payload.FwdDecl).?.data; - try w.print("{s} {s}{}__{d}", .{ - @tagName(tag)["fwd_".len..], - attributes, - fmtIdent(zcu.intern_pool.stringToSlice(zcu.declPtr(owner_decl).name)), - @intFromEnum(owner_decl), - }); - }, } } fn renderTypePrefix( pass: DeclGen.Pass, - store: CType.Store.Set, + ctype_pool: *const CType.Pool, zcu: *Zcu, w: anytype, - idx: CType.Index, + ctype: CType, parent_fix: CTypeFix, qualifiers: CQualifiers, ) @TypeOf(w).Error!RenderCTypeTrailing { var trailing = RenderCTypeTrailing.maybe_space; + switch (ctype.info(ctype_pool)) { + .basic => |basic_info| try w.writeAll(@tagName(basic_info)), - const cty = store.indexToCType(idx); - switch (cty.tag()) { - .void, - .char, - .@"signed char", - .short, - .int, - .long, - .@"long long", - ._Bool, - .@"unsigned char", - .@"unsigned short", - .@"unsigned int", - .@"unsigned long", - .@"unsigned long long", - .float, - .double, - .@"long double", - .bool, - .size_t, - .ptrdiff_t, - .uint8_t, - .int8_t, - .uint16_t, - .int16_t, - .uint32_t, - .int32_t, - .uint64_t, - .int64_t, - .uintptr_t, - .intptr_t, - .zig_u128, - .zig_i128, - .zig_f16, - .zig_f32, - .zig_f64, - .zig_f80, - .zig_f128, - .zig_c_longdouble, - => |tag| try w.writeAll(@tagName(tag)), - - .pointer, - .pointer_const, - .pointer_volatile, - .pointer_const_volatile, - => |tag| { - const child_idx = cty.cast(CType.Payload.Child).?.data; - const child_trailing = try renderTypePrefix( + .pointer => |pointer_info| { + try w.print("{}*", .{try renderTypePrefix( pass, - store, + ctype_pool, zcu, w, - child_idx, + pointer_info.elem_ctype, .prefix, - CQualifiers.init(.{ .@"const" = switch (tag) { - .pointer, .pointer_volatile => false, - .pointer_const, .pointer_const_volatile => true, - else => unreachable, - }, .@"volatile" = switch (tag) { - .pointer, .pointer_const => false, - .pointer_volatile, .pointer_const_volatile => true, - else => unreachable, - } }), - ); - try w.print("{}*", .{child_trailing}); + CQualifiers.init(.{ + .@"const" = pointer_info.@"const", + .@"volatile" = pointer_info.@"volatile", + }), + )}); trailing = .no_space; }, - .array, - .vector, - => { - const child_idx = cty.cast(CType.Payload.Sequence).?.data.elem_type; - const child_trailing = - try renderTypePrefix(pass, store, zcu, w, child_idx, .suffix, qualifiers); + .aligned => switch (pass) { + .decl => |decl_index| try w.print("decl__{d}_{d}", .{ + @intFromEnum(decl_index), @intFromEnum(ctype.index), + }), + .anon => |anon_decl| try w.print("anon__{d}_{d}", .{ + @intFromEnum(anon_decl), @intFromEnum(ctype.index), + }), + .flush => try renderAlignedTypeName(w, ctype), + }, + + .array, .vector => |sequence_info| { + const child_trailing = try renderTypePrefix( + pass, + ctype_pool, + zcu, + w, + sequence_info.elem_ctype, + .suffix, + qualifiers, + ); switch (parent_fix) { .prefix => { try w.print("{}(", .{child_trailing}); @@ -2309,56 +2302,46 @@ fn renderTypePrefix( } }, - .fwd_anon_struct, - .fwd_anon_union, - => switch (pass) { - .decl => |decl_index| try w.print("decl__{d}_{d}", .{ @intFromEnum(decl_index), idx }), - .anon => |anon_decl| try w.print("anon__{d}_{d}", .{ @intFromEnum(anon_decl), idx }), - .flush => try renderTypeName(zcu, w, idx, cty, ""), + .fwd_decl => |fwd_decl_info| switch (fwd_decl_info.name) { + .anon => switch (pass) { + .decl => |decl_index| try w.print("decl__{d}_{d}", .{ + @intFromEnum(decl_index), @intFromEnum(ctype.index), + }), + .anon => |anon_decl| try w.print("anon__{d}_{d}", .{ + @intFromEnum(anon_decl), @intFromEnum(ctype.index), + }), + .flush => try renderFwdDeclTypeName(zcu, w, ctype, fwd_decl_info, ""), + }, + .owner_decl => try renderFwdDeclTypeName(zcu, w, ctype, fwd_decl_info, ""), }, - .fwd_struct, - .fwd_union, - => try renderTypeName(zcu, w, idx, cty, ""), - - .unnamed_struct, - .unnamed_union, - .packed_unnamed_struct, - .packed_unnamed_union, - => |tag| { - try w.print("{s} {s}", .{ - @tagName(tag)["unnamed_".len..], - if (cty.isPacked()) "zig_packed(" else "", - }); - try renderAggregateFields(zcu, w, store, cty, 1); - if (cty.isPacked()) try w.writeByte(')'); - }, - - .anon_struct, - .anon_union, - .@"struct", - .@"union", - .packed_struct, - .packed_union, - => return renderTypePrefix( - pass, - store, - zcu, - w, - cty.cast(CType.Payload.Aggregate).?.data.fwd_decl, - parent_fix, - qualifiers, - ), - - .function, - .varargs_function, - => { - const child_trailing = try renderTypePrefix( + .aggregate => |aggregate_info| switch (aggregate_info.name) { + .anon => { + try w.print("{s} {s}", .{ + @tagName(aggregate_info.tag), + if (aggregate_info.@"packed") "zig_packed(" else "", + }); + try renderFields(zcu, w, ctype_pool, aggregate_info, 1); + if (aggregate_info.@"packed") try w.writeByte(')'); + }, + .fwd_decl => |fwd_decl| return renderTypePrefix( pass, - store, + ctype_pool, zcu, w, - cty.cast(CType.Payload.Function).?.data.return_type, + fwd_decl, + parent_fix, + qualifiers, + ), + }, + + .function => |function_info| { + const child_trailing = try renderTypePrefix( + pass, + ctype_pool, + zcu, + w, + function_info.return_ctype, .suffix, .{}, ); @@ -2371,170 +2354,107 @@ fn renderTypePrefix( } }, } - var qualifier_it = qualifiers.iterator(); while (qualifier_it.next()) |qualifier| { try w.print("{}{s}", .{ trailing, @tagName(qualifier) }); trailing = .maybe_space; } - return trailing; } fn renderTypeSuffix( pass: DeclGen.Pass, - store: CType.Store.Set, + ctype_pool: *const CType.Pool, zcu: *Zcu, w: anytype, - idx: CType.Index, + ctype: CType, parent_fix: CTypeFix, qualifiers: CQualifiers, ) @TypeOf(w).Error!void { - const cty = store.indexToCType(idx); - switch (cty.tag()) { - .void, - .char, - .@"signed char", - .short, - .int, - .long, - .@"long long", - ._Bool, - .@"unsigned char", - .@"unsigned short", - .@"unsigned int", - .@"unsigned long", - .@"unsigned long long", - .float, - .double, - .@"long double", - .bool, - .size_t, - .ptrdiff_t, - .uint8_t, - .int8_t, - .uint16_t, - .int16_t, - .uint32_t, - .int32_t, - .uint64_t, - .int64_t, - .uintptr_t, - .intptr_t, - .zig_u128, - .zig_i128, - .zig_f16, - .zig_f32, - .zig_f64, - .zig_f80, - .zig_f128, - .zig_c_longdouble, - => {}, - - .pointer, - .pointer_const, - .pointer_volatile, - .pointer_const_volatile, - => try renderTypeSuffix( + switch (ctype.info(ctype_pool)) { + .basic, .aligned, .fwd_decl, .aggregate => {}, + .pointer => |pointer_info| try renderTypeSuffix( pass, - store, + ctype_pool, zcu, w, - cty.cast(CType.Payload.Child).?.data, + pointer_info.elem_ctype, .prefix, .{}, ), - - .array, - .vector, - => { + .array, .vector => |sequence_info| { switch (parent_fix) { .prefix => try w.writeByte(')'), .suffix => {}, } - try w.print("[{}]", .{cty.cast(CType.Payload.Sequence).?.data.len}); - try renderTypeSuffix( - pass, - store, - zcu, - w, - cty.cast(CType.Payload.Sequence).?.data.elem_type, - .suffix, - .{}, - ); + try w.print("[{}]", .{sequence_info.len}); + try renderTypeSuffix(pass, ctype_pool, zcu, w, sequence_info.elem_ctype, .suffix, .{}); }, - - .fwd_anon_struct, - .fwd_anon_union, - .fwd_struct, - .fwd_union, - .unnamed_struct, - .unnamed_union, - .packed_unnamed_struct, - .packed_unnamed_union, - .anon_struct, - .anon_union, - .@"struct", - .@"union", - .packed_struct, - .packed_union, - => {}, - - .function, - .varargs_function, - => |tag| { + .function => |function_info| { switch (parent_fix) { .prefix => try w.writeByte(')'), .suffix => {}, } - const data = cty.cast(CType.Payload.Function).?.data; - try w.writeByte('('); var need_comma = false; - for (data.param_types, 0..) |param_type, param_i| { + for (0..function_info.param_ctypes.len) |param_index| { + const param_type = function_info.param_ctypes.at(param_index, ctype_pool); if (need_comma) try w.writeAll(", "); need_comma = true; const trailing = - try renderTypePrefix(pass, store, zcu, w, param_type, .suffix, qualifiers); - if (qualifiers.contains(.@"const")) try w.print("{}a{d}", .{ trailing, param_i }); - try renderTypeSuffix(pass, store, zcu, w, param_type, .suffix, .{}); + try renderTypePrefix(pass, ctype_pool, zcu, w, param_type, .suffix, qualifiers); + if (qualifiers.contains(.@"const")) try w.print("{}a{d}", .{ trailing, param_index }); + try renderTypeSuffix(pass, ctype_pool, zcu, w, param_type, .suffix, .{}); } - switch (tag) { - .function => {}, - .varargs_function => { - if (need_comma) try w.writeAll(", "); - need_comma = true; - try w.writeAll("..."); - }, - else => unreachable, + if (function_info.varargs) { + if (need_comma) try w.writeAll(", "); + need_comma = true; + try w.writeAll("..."); } if (!need_comma) try w.writeAll("void"); try w.writeByte(')'); - try renderTypeSuffix(pass, store, zcu, w, data.return_type, .suffix, .{}); + try renderTypeSuffix(pass, ctype_pool, zcu, w, function_info.return_ctype, .suffix, .{}); }, } } -fn renderAggregateFields( +fn renderFields( zcu: *Zcu, writer: anytype, - store: CType.Store.Set, - cty: CType, + ctype_pool: *const CType.Pool, + aggregate_info: CType.Info.Aggregate, indent: usize, ) !void { try writer.writeAll("{\n"); - const fields = cty.fields(); - for (fields) |field| { + for (0..aggregate_info.fields.len) |field_index| { + const field_info = aggregate_info.fields.at(field_index, ctype_pool); try writer.writeByteNTimes(' ', indent + 1); - switch (field.alignas.abiOrder()) { - .lt => try writer.print("zig_under_align({}) ", .{field.alignas.toByteUnits()}), - .eq => {}, - .gt => try writer.print("zig_align({}) ", .{field.alignas.toByteUnits()}), + switch (field_info.alignas.abiOrder()) { + .lt => { + std.debug.assert(aggregate_info.@"packed"); + if (field_info.alignas.@"align" != .@"1") try writer.print("zig_under_align({}) ", .{ + field_info.alignas.toByteUnits(), + }); + }, + .eq => if (aggregate_info.@"packed" and field_info.alignas.@"align" != .@"1") + try writer.print("zig_align({}) ", .{field_info.alignas.toByteUnits()}), + .gt => { + std.debug.assert(field_info.alignas.@"align" != .@"1"); + try writer.print("zig_align({}) ", .{field_info.alignas.toByteUnits()}); + }, } - const trailing = try renderTypePrefix(.flush, store, zcu, writer, field.type, .suffix, .{}); - try writer.print("{}{ }", .{ trailing, fmtIdent(mem.span(field.name)) }); - try renderTypeSuffix(.flush, store, zcu, writer, field.type, .suffix, .{}); + const trailing = try renderTypePrefix( + .flush, + ctype_pool, + zcu, + writer, + field_info.ctype, + .suffix, + .{}, + ); + try writer.print("{}{ }", .{ trailing, fmtIdent(field_info.name.slice(ctype_pool)) }); + try renderTypeSuffix(.flush, ctype_pool, zcu, writer, field_info.ctype, .suffix, .{}); try writer.writeAll(";\n"); } try writer.writeByteNTimes(' ', indent); @@ -2544,77 +2464,77 @@ fn renderAggregateFields( pub fn genTypeDecl( zcu: *Zcu, writer: anytype, - global_store: CType.Store.Set, - global_idx: CType.Index, + global_ctype_pool: *const CType.Pool, + global_ctype: CType, pass: DeclGen.Pass, - decl_store: CType.Store.Set, - decl_idx: CType.Index, + decl_ctype_pool: *const CType.Pool, + decl_ctype: CType, found_existing: bool, ) !void { - const global_cty = global_store.indexToCType(global_idx); - switch (global_cty.tag()) { - .fwd_anon_struct => if (pass != .flush) { - try writer.writeAll("typedef "); - _ = try renderTypePrefix(.flush, global_store, zcu, writer, global_idx, .suffix, .{}); - try writer.writeByte(' '); - _ = try renderTypePrefix(pass, decl_store, zcu, writer, decl_idx, .suffix, .{}); - try writer.writeAll(";\n"); - }, - - .fwd_struct, - .fwd_union, - .anon_struct, - .anon_union, - .@"struct", - .@"union", - .packed_struct, - .packed_union, - => |tag| if (!found_existing) { - switch (tag) { - .fwd_struct, - .fwd_union, - => { - const owner_decl = global_cty.cast(CType.Payload.FwdDecl).?.data; - _ = try renderTypePrefix( - .flush, - global_store, - zcu, - writer, - global_idx, - .suffix, - .{}, - ); - try writer.writeAll("; /* "); - try zcu.declPtr(owner_decl).renderFullyQualifiedName(zcu, writer); - try writer.writeAll(" */\n"); - }, - - .anon_struct, - .anon_union, - .@"struct", - .@"union", - .packed_struct, - .packed_union, - => { - const fwd_idx = global_cty.cast(CType.Payload.Aggregate).?.data.fwd_decl; - try renderTypeName( - zcu, - writer, - fwd_idx, - global_store.indexToCType(fwd_idx), - if (global_cty.isPacked()) "zig_packed(" else "", - ); + switch (global_ctype.info(global_ctype_pool)) { + .basic, .pointer, .array, .vector, .function => {}, + .aligned => |aligned_info| { + if (!found_existing) { + try writer.writeAll("typedef "); + try writer.print("{}", .{ + try renderTypePrefix(pass, global_ctype_pool, zcu, writer, aligned_info.ctype, .suffix, .{}), + }); + try renderAlignedTypeName(writer, global_ctype); + try renderTypeSuffix(pass, global_ctype_pool, zcu, writer, aligned_info.ctype, .suffix, .{}); + std.debug.assert(aligned_info.alignas.abiOrder().compare(.lt)); + try writer.print(" zig_under_align({d});\n", .{aligned_info.alignas.toByteUnits()}); + } + switch (pass) { + .decl, .anon => { + try writer.writeAll("typedef "); + _ = try renderTypePrefix(.flush, global_ctype_pool, zcu, writer, global_ctype, .suffix, .{}); try writer.writeByte(' '); - try renderAggregateFields(zcu, writer, global_store, global_cty, 0); - if (global_cty.isPacked()) try writer.writeByte(')'); + _ = try renderTypePrefix(pass, decl_ctype_pool, zcu, writer, decl_ctype, .suffix, .{}); try writer.writeAll(";\n"); }, - - else => unreachable, + .flush => {}, } }, - - else => {}, + .fwd_decl => |fwd_decl_info| switch (fwd_decl_info.name) { + .anon => switch (pass) { + .decl, .anon => { + try writer.writeAll("typedef "); + _ = try renderTypePrefix(.flush, global_ctype_pool, zcu, writer, global_ctype, .suffix, .{}); + try writer.writeByte(' '); + _ = try renderTypePrefix(pass, decl_ctype_pool, zcu, writer, decl_ctype, .suffix, .{}); + try writer.writeAll(";\n"); + }, + .flush => {}, + }, + .owner_decl => |owner_decl_index| if (!found_existing) { + _ = try renderTypePrefix(.flush, global_ctype_pool, zcu, writer, global_ctype, .suffix, .{}); + try writer.writeByte(';'); + const owner_decl = zcu.declPtr(owner_decl_index); + const owner_mod = zcu.namespacePtr(owner_decl.src_namespace).file_scope.mod; + if (!owner_mod.strip) { + try writer.writeAll(" /* "); + try owner_decl.renderFullyQualifiedName(zcu, writer); + try writer.writeAll(" */"); + } + try writer.writeByte('\n'); + }, + }, + .aggregate => |aggregate_info| switch (aggregate_info.name) { + .anon => {}, + .fwd_decl => |fwd_decl| if (!found_existing) { + try renderFwdDeclTypeName( + zcu, + writer, + fwd_decl, + fwd_decl.info(global_ctype_pool).fwd_decl, + if (aggregate_info.@"packed") "zig_packed(" else "", + ); + try writer.writeByte(' '); + try renderFields(zcu, writer, global_ctype_pool, aggregate_info, 0); + if (aggregate_info.@"packed") try writer.writeByte(')'); + try writer.writeAll(";\n"); + }, + }, } } @@ -2771,13 +2691,13 @@ fn genExports(o: *Object) !void { } } -pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void { +pub fn genLazyFn(o: *Object, lazy_ctype_pool: *const CType.Pool, lazy_fn: LazyFnMap.Entry) !void { const zcu = o.dg.zcu; const ip = &zcu.intern_pool; + const ctype_pool = &o.dg.ctype_pool; const w = o.writer(); const key = lazy_fn.key_ptr.*; const val = lazy_fn.value_ptr; - const fn_name = val.fn_name; switch (key) { .tag_name => { const enum_ty = val.data.tag_name; @@ -2787,7 +2707,7 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void { try w.writeAll("static "); try o.dg.renderType(w, name_slice_ty); try w.writeByte(' '); - try w.writeAll(fn_name); + try w.writeAll(val.fn_name.slice(lazy_ctype_pool)); try w.writeByte('('); try o.dg.renderTypeAndName(w, enum_ty, .{ .identifier = "tag" }, Const, .none, .complete); try w.writeAll(") {\n switch (tag) {\n"); @@ -2829,8 +2749,9 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void { }, .never_tail, .never_inline => |fn_decl_index| { const fn_decl = zcu.declPtr(fn_decl_index); - const fn_cty = try o.dg.typeToCType(fn_decl.typeOf(zcu), .complete); - const fn_info = fn_cty.cast(CType.Payload.Function).?.data; + const fn_ctype = try o.dg.ctypeFromType(fn_decl.typeOf(zcu), .complete); + const fn_info = fn_ctype.info(ctype_pool).function; + const fn_name = val.fn_name.slice(lazy_ctype_pool); const fwd_decl_writer = o.dg.fwdDeclWriter(); try fwd_decl_writer.print("static zig_{s} ", .{@tagName(key)}); @@ -2843,11 +2764,13 @@ pub fn genLazyFn(o: *Object, lazy_fn: LazyFnMap.Entry) !void { try fwd_decl_writer.writeAll(";\n"); try w.print("static zig_{s} ", .{@tagName(key)}); - try o.dg.renderFunctionSignature(w, fn_decl_index, .complete, .{ .ident = fn_name }); + try o.dg.renderFunctionSignature(w, fn_decl_index, .complete, .{ + .ident = fn_name, + }); try w.writeAll(" {\n return "); try o.dg.renderDeclName(w, fn_decl_index, 0); try w.writeByte('('); - for (0..fn_info.param_types.len) |arg| { + for (0..fn_info.param_ctypes.len) |arg| { if (arg > 0) try w.writeAll(", "); try o.dg.writeCValue(w, .{ .arg = arg }); } @@ -2931,7 +2854,7 @@ pub fn genFunc(f: *Function) !void { for (free_locals.values()) |list| { for (list.keys()) |local_index| { const local = f.locals.items[local_index]; - try o.dg.renderCTypeAndName(w, local.cty_idx, .{ .local = local_index }, .{}, local.flags.alignas); + try o.dg.renderCTypeAndName(w, local.ctype, .{ .local = local_index }, .{}, local.flags.alignas); try w.writeAll(";\n "); } } @@ -3451,11 +3374,7 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { const inst_ty = f.typeOfIndex(inst); const ptr_ty = f.typeOf(bin_op.lhs); - const ptr_align = ptr_ty.ptrAlignment(zcu); - const elem_ty = ptr_ty.elemType2(zcu); - const elem_align = elem_ty.abiAlignment(zcu); - const is_under_aligned = ptr_align.compareStrict(.lt, elem_align); - const elem_has_bits = elem_ty.hasRuntimeBitsIgnoreComptime(zcu); + const elem_has_bits = ptr_ty.elemType2(zcu).hasRuntimeBitsIgnoreComptime(zcu); const ptr = try f.resolveInst(bin_op.lhs); const index = try f.resolveInst(bin_op.rhs); @@ -3470,22 +3389,13 @@ fn airPtrElemPtr(f: *Function, inst: Air.Inst.Index) !CValue { try f.renderType(writer, inst_ty); try writer.writeByte(')'); if (elem_has_bits) try writer.writeByte('&'); - if (elem_has_bits and ptr_ty.ptrSize(zcu) == .One and !is_under_aligned) { + if (elem_has_bits and ptr_ty.ptrSize(zcu) == .One) { // It's a pointer to an array, so we need to de-reference. try f.writeCValueDeref(writer, ptr); } else try f.writeCValue(writer, ptr, .Other); if (elem_has_bits) { try writer.writeByte('['); try f.writeCValue(writer, index, .Other); - if (is_under_aligned) { - const factor = @divExact(elem_align.toByteUnitsOptional().?, @min( - ptr_align.toByteUnitsOptional().?, - f.object.dg.mod.resolved_target.result.maxIntAlignment(), - )); - try writer.print(" * {}", .{ - try f.fmtIntLiteral(try zcu.intValue(Type.usize, factor)), - }); - } try writer.writeByte(']'); } try a.end(f, writer); @@ -3577,13 +3487,16 @@ fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue { fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue { const zcu = f.object.dg.zcu; const inst_ty = f.typeOfIndex(inst); - const elem_type = inst_ty.childType(zcu); - if (!elem_type.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) return .{ .undef = inst_ty }; + const elem_ty = inst_ty.childType(zcu); + if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) return .{ .undef = inst_ty }; - const local = try f.allocLocalValue( - elem_type, - inst_ty.ptrAlignment(zcu), - ); + const local = try f.allocLocalValue(.{ + .ctype = try f.ctypeFromType(elem_ty, .complete), + .alignas = CType.AlignAs.fromAlignment(.{ + .@"align" = inst_ty.ptrInfo(zcu).flags.alignment, + .abi = elem_ty.abiAlignment(zcu), + }), + }); log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local }); const gpa = f.object.dg.zcu.gpa; try f.allocs.put(gpa, local.new_local, true); @@ -3596,10 +3509,13 @@ fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue { const elem_ty = inst_ty.childType(zcu); if (!elem_ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) return .{ .undef = inst_ty }; - const local = try f.allocLocalValue( - elem_ty, - inst_ty.ptrAlignment(zcu), - ); + const local = try f.allocLocalValue(.{ + .ctype = try f.ctypeFromType(elem_ty, .complete), + .alignas = CType.AlignAs.fromAlignment(.{ + .@"align" = inst_ty.ptrInfo(zcu).flags.alignment, + .abi = elem_ty.abiAlignment(zcu), + }), + }); log.debug("%{d}: allocated unfreeable t{d}", .{ inst, local.new_local }); const gpa = f.object.dg.zcu.gpa; try f.allocs.put(gpa, local.new_local, true); @@ -3608,14 +3524,14 @@ fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue { fn airArg(f: *Function, inst: Air.Inst.Index) !CValue { const inst_ty = f.typeOfIndex(inst); - const inst_cty = try f.typeToIndex(inst_ty, .parameter); + const inst_ctype = try f.ctypeFromType(inst_ty, .parameter); const i = f.next_arg_index; f.next_arg_index += 1; - const result: CValue = if (inst_cty != try f.typeToIndex(inst_ty, .complete)) - .{ .arg_array = i } + const result: CValue = if (inst_ctype.eql(try f.ctypeFromType(inst_ty, .complete))) + .{ .arg = i } else - .{ .arg = i }; + .{ .arg_array = i }; if (f.liveness.isUnused(inst)) { const writer = f.object.writer(); @@ -3649,7 +3565,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { try reap(f, inst, &.{ty_op.operand}); const is_aligned = if (ptr_info.flags.alignment != .none) - ptr_info.flags.alignment.compare(.gte, src_ty.abiAlignment(zcu)) + ptr_info.flags.alignment.order(src_ty.abiAlignment(zcu)).compare(.gte) else true; const is_array = lowersToArray(src_ty, zcu); @@ -3724,18 +3640,21 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { const op_inst = un_op.toIndex(); const op_ty = f.typeOf(un_op); const ret_ty = if (is_ptr) op_ty.childType(zcu) else op_ty; - const lowered_ret_ty = try lowerFnRetTy(ret_ty, zcu); + const ret_ctype = try f.ctypeFromType(ret_ty, .parameter); if (op_inst != null and f.air.instructions.items(.tag)[@intFromEnum(op_inst.?)] == .call_always_tail) { try reap(f, inst, &.{un_op}); _ = try airCall(f, op_inst.?, .always_tail); - } else if (lowered_ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) { + } else if (ret_ctype.index != .void) { const operand = try f.resolveInst(un_op); try reap(f, inst, &.{un_op}); var deref = is_ptr; const is_array = lowersToArray(ret_ty, zcu); const ret_val = if (is_array) ret_val: { - const array_local = try f.allocLocal(inst, lowered_ret_ty); + const array_local = try f.allocAlignedLocal(inst, .{ + .ctype = ret_ctype, + .alignas = CType.AlignAs.fromAbiAlignment(ret_ty.abiAlignment(f.object.dg.zcu)), + }); try writer.writeAll("memcpy("); try f.writeCValueMember(writer, array_local, .{ .identifier = "array" }); try writer.writeAll(", "); @@ -3921,7 +3840,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue { } const is_aligned = if (ptr_info.flags.alignment != .none) - ptr_info.flags.alignment.compare(.gte, src_ty.abiAlignment(zcu)) + ptr_info.flags.alignment.order(src_ty.abiAlignment(zcu)).compare(.gte) else true; const is_array = lowersToArray(Type.fromInterned(ptr_info.child), zcu); @@ -4397,22 +4316,23 @@ fn airCall( defer gpa.free(resolved_args); for (resolved_args, args) |*resolved_arg, arg| { const arg_ty = f.typeOf(arg); - const arg_cty = try f.typeToIndex(arg_ty, .parameter); - if (f.indexToCType(arg_cty).tag() == .void) { + const arg_ctype = try f.ctypeFromType(arg_ty, .parameter); + if (arg_ctype.index == .void) { resolved_arg.* = .none; continue; } resolved_arg.* = try f.resolveInst(arg); - if (arg_cty != try f.typeToIndex(arg_ty, .complete)) { - const lowered_arg_ty = try lowerFnRetTy(arg_ty, zcu); - - const array_local = try f.allocLocal(inst, lowered_arg_ty); + if (!arg_ctype.eql(try f.ctypeFromType(arg_ty, .complete))) { + const array_local = try f.allocAlignedLocal(inst, .{ + .ctype = arg_ctype, + .alignas = CType.AlignAs.fromAbiAlignment(arg_ty.abiAlignment(zcu)), + }); try writer.writeAll("memcpy("); try f.writeCValueMember(writer, array_local, .{ .identifier = "array" }); try writer.writeAll(", "); try f.writeCValue(writer, resolved_arg.*, .FunctionArgument); try writer.writeAll(", sizeof("); - try f.renderType(writer, lowered_arg_ty); + try f.renderCType(writer, arg_ctype); try writer.writeAll("));\n"); resolved_arg.* = array_local; } @@ -4433,21 +4353,27 @@ fn airCall( else => unreachable, }).?; const ret_ty = Type.fromInterned(fn_info.return_type); - const lowered_ret_ty = try lowerFnRetTy(ret_ty, zcu); + const ret_ctype: CType = if (ret_ty.isNoReturn(zcu)) + .{ .index = .void } + else + try f.ctypeFromType(ret_ty, .parameter); const result_local = result: { if (modifier == .always_tail) { try writer.writeAll("zig_always_tail return "); break :result .none; - } else if (!lowered_ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) { + } else if (ret_ctype.index == .void) { break :result .none; } else if (f.liveness.isUnused(inst)) { try writer.writeByte('('); - try f.renderType(writer, Type.void); + try f.renderCType(writer, .{ .index = .void }); try writer.writeByte(')'); break :result .none; } else { - const local = try f.allocLocal(inst, lowered_ret_ty); + const local = try f.allocAlignedLocal(inst, .{ + .ctype = ret_ctype, + .alignas = CType.AlignAs.fromAbiAlignment(ret_ty.abiAlignment(zcu)), + }); try f.writeCValue(writer, local, .Other); try writer.writeAll(" = "); break :result local; @@ -4767,6 +4693,7 @@ const LocalResult = struct { fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !LocalResult { const zcu = f.object.dg.zcu; const target = &f.object.dg.mod.resolved_target.result; + const ctype_pool = &f.object.dg.ctype_pool; const writer = f.object.writer(); if (operand_ty.isAbiInt(zcu) and dest_ty.isAbiInt(zcu)) { @@ -4825,49 +4752,54 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !Loca // Ensure padding bits have the expected value. if (dest_ty.isAbiInt(zcu)) { - const dest_cty = try f.typeToCType(dest_ty, .complete); + const dest_ctype = try f.ctypeFromType(dest_ty, .complete); const dest_info = dest_ty.intInfo(zcu); var bits: u16 = dest_info.bits; - var wrap_cty: ?CType = null; + var wrap_ctype: ?CType = null; var need_bitcasts = false; try f.writeCValue(writer, local, .Other); - if (dest_cty.castTag(.array)) |pl| { - try writer.print("[{d}]", .{switch (target.cpu.arch.endian()) { - .little => pl.data.len - 1, - .big => 0, - }}); - const elem_cty = f.indexToCType(pl.data.elem_type); - wrap_cty = elem_cty.toSignedness(dest_info.signedness); - need_bitcasts = wrap_cty.?.tag() == .zig_i128; - bits -= 1; - bits %= @as(u16, @intCast(f.byteSize(elem_cty) * 8)); - bits += 1; + switch (dest_ctype.info(ctype_pool)) { + else => {}, + .array => |array_info| { + try writer.print("[{d}]", .{switch (target.cpu.arch.endian()) { + .little => array_info.len - 1, + .big => 0, + }}); + wrap_ctype = array_info.elem_ctype.toSignedness(dest_info.signedness); + need_bitcasts = wrap_ctype.?.index == .zig_i128; + bits -= 1; + bits %= @as(u16, @intCast(f.byteSize(array_info.elem_ctype) * 8)); + bits += 1; + }, } try writer.writeAll(" = "); if (need_bitcasts) { try writer.writeAll("zig_bitCast_"); - try f.object.dg.renderCTypeForBuiltinFnName(writer, wrap_cty.?.toUnsigned()); + try f.object.dg.renderCTypeForBuiltinFnName(writer, wrap_ctype.?.toUnsigned()); try writer.writeByte('('); } try writer.writeAll("zig_wrap_"); const info_ty = try zcu.intType(dest_info.signedness, bits); - if (wrap_cty) |cty| - try f.object.dg.renderCTypeForBuiltinFnName(writer, cty) + if (wrap_ctype) |ctype| + try f.object.dg.renderCTypeForBuiltinFnName(writer, ctype) else try f.object.dg.renderTypeForBuiltinFnName(writer, info_ty); try writer.writeByte('('); if (need_bitcasts) { try writer.writeAll("zig_bitCast_"); - try f.object.dg.renderCTypeForBuiltinFnName(writer, wrap_cty.?); + try f.object.dg.renderCTypeForBuiltinFnName(writer, wrap_ctype.?); try writer.writeByte('('); } try f.writeCValue(writer, local, .Other); - if (dest_cty.castTag(.array)) |pl| { - try writer.print("[{d}]", .{switch (target.cpu.arch.endian()) { - .little => pl.data.len - 1, - .big => 0, - }}); + switch (dest_ctype.info(ctype_pool)) { + else => {}, + .array => |array_info| try writer.print("[{d}]", .{ + switch (target.cpu.arch.endian()) { + .little => array_info.len - 1, + .big => 0, + }, + }), } if (need_bitcasts) try writer.writeByte(')'); try f.object.dg.renderBuiltinInfo(writer, info_ty, .bits); @@ -5131,10 +5063,9 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { if (is_reg) { const output_ty = if (output == .none) inst_ty else f.typeOf(output).childType(zcu); try writer.writeAll("register "); - const alignment: Alignment = .none; - const local_value = try f.allocLocalValue(output_ty, alignment); + const local_value = try f.allocLocal(inst, output_ty); try f.allocs.put(gpa, local_value.new_local, false); - try f.object.dg.renderTypeAndName(writer, output_ty, local_value, .{}, alignment, .complete); + try f.object.dg.renderTypeAndName(writer, output_ty, local_value, .{}, .none, .complete); try writer.writeAll(" __asm(\""); try writer.writeAll(constraint["={".len .. constraint.len - "}".len]); try writer.writeAll("\")"); @@ -5164,10 +5095,9 @@ fn airAsm(f: *Function, inst: Air.Inst.Index) !CValue { if (asmInputNeedsLocal(f, constraint, input_val)) { const input_ty = f.typeOf(input); if (is_reg) try writer.writeAll("register "); - const alignment: Alignment = .none; - const local_value = try f.allocLocalValue(input_ty, alignment); + const local_value = try f.allocLocal(inst, input_ty); try f.allocs.put(gpa, local_value.new_local, false); - try f.object.dg.renderTypeAndName(writer, input_ty, local_value, Const, alignment, .complete); + try f.object.dg.renderTypeAndName(writer, input_ty, local_value, Const, .none, .complete); if (is_reg) { try writer.writeAll(" __asm(\""); try writer.writeAll(constraint["{".len .. constraint.len - "}".len]); @@ -5512,59 +5442,74 @@ fn fieldLocation( end: void, } { const ip = &zcu.intern_pool; - const container_ty = container_ptr_ty.childType(zcu); - return switch (container_ty.zigTypeTag(zcu)) { - .Struct => blk: { - if (zcu.typeToPackedStruct(container_ty)) |struct_type| { - if (field_ptr_ty.ptrInfo(zcu).packed_offset.host_size == 0) - break :blk .{ .byte_offset = @divExact(zcu.structPackedFieldBitOffset(struct_type, field_index) + container_ptr_ty.ptrInfo(zcu).packed_offset.bit_offset, 8) } - else - break :blk .begin; - } - - for (field_index..container_ty.structFieldCount(zcu)) |next_field_index_usize| { - const next_field_index: u32 = @intCast(next_field_index_usize); - if (container_ty.structFieldIsComptime(next_field_index, zcu)) continue; - const field_ty = container_ty.structFieldType(next_field_index, zcu); - if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; - - break :blk .{ .field = if (container_ty.isSimpleTuple(zcu)) - .{ .field = next_field_index } - else - .{ .identifier = ip.stringToSlice(container_ty.legacyStructFieldName(next_field_index, zcu)) } }; - } - break :blk if (container_ty.hasRuntimeBitsIgnoreComptime(zcu)) .end else .begin; - }, - .Union => { - const union_obj = zcu.typeToUnion(container_ty).?; - return switch (union_obj.getLayout(ip)) { + const container_ty = Type.fromInterned(ip.indexToKey(container_ptr_ty.toIntern()).ptr_type.child); + switch (ip.indexToKey(container_ty.toIntern())) { + .struct_type => { + const loaded_struct = ip.loadStructType(container_ty.toIntern()); + switch (loaded_struct.layout) { .auto, .@"extern" => { - const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); + var field_it = loaded_struct.iterateRuntimeOrder(ip); + var before = true; + while (field_it.next()) |next_field_index| { + if (next_field_index == field_index) before = false; + if (before) continue; + const field_type = Type.fromInterned(loaded_struct.field_types.get(ip)[next_field_index]); + if (!field_type.hasRuntimeBitsIgnoreComptime(zcu)) continue; + return .{ .field = if (loaded_struct.fieldName(ip, next_field_index).unwrap()) |field_name| + .{ .identifier = ip.stringToSlice(field_name) } + else + .{ .field = next_field_index } }; + } + return if (container_ty.hasRuntimeBitsIgnoreComptime(zcu)) .end else .begin; + }, + .@"packed" => return if (field_ptr_ty.ptrInfo(zcu).packed_offset.host_size == 0) + .{ .byte_offset = @divExact(zcu.structPackedFieldBitOffset(loaded_struct, field_index) + + container_ptr_ty.ptrInfo(zcu).packed_offset.bit_offset, 8) } + else + .begin, + } + }, + .anon_struct_type => |anon_struct_info| { + for (field_index..anon_struct_info.types.len) |next_field_index| { + if (anon_struct_info.values.get(ip)[next_field_index] != .none) continue; + const field_type = Type.fromInterned(anon_struct_info.types.get(ip)[next_field_index]); + if (!field_type.hasRuntimeBitsIgnoreComptime(zcu)) continue; + return .{ .field = if (anon_struct_info.fieldName(ip, next_field_index).unwrap()) |field_name| + .{ .identifier = ip.stringToSlice(field_name) } + else + .{ .field = next_field_index } }; + } + return if (container_ty.hasRuntimeBitsIgnoreComptime(zcu)) .end else .begin; + }, + .union_type => { + const loaded_union = ip.loadUnionType(container_ty.toIntern()); + switch (loaded_union.getLayout(ip)) { + .auto, .@"extern" => { + const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]); if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) - return if (container_ty.unionTagTypeSafety(zcu) != null and - !container_ty.unionHasAllZeroBitFieldTypes(zcu)) + return if (loaded_union.hasTag(ip) and !container_ty.unionHasAllZeroBitFieldTypes(zcu)) .{ .field = .{ .identifier = "payload" } } else .begin; - const field_name = union_obj.loadTagType(ip).names.get(ip)[field_index]; - return .{ .field = if (container_ty.unionTagTypeSafety(zcu)) |_| + const field_name = loaded_union.loadTagType(ip).names.get(ip)[field_index]; + return .{ .field = if (loaded_union.hasTag(ip)) .{ .payload_identifier = ip.stringToSlice(field_name) } else .{ .identifier = ip.stringToSlice(field_name) } }; }, - .@"packed" => .begin, - }; + .@"packed" => return .begin, + } }, - .Pointer => switch (container_ty.ptrSize(zcu)) { + .ptr_type => |ptr_info| switch (ptr_info.flags.size) { + .One, .Many, .C => unreachable, .Slice => switch (field_index) { - 0 => .{ .field = .{ .identifier = "ptr" } }, - 1 => .{ .field = .{ .identifier = "len" } }, + 0 => return .{ .field = .{ .identifier = "ptr" } }, + 1 => return .{ .field = .{ .identifier = "len" } }, else => unreachable, }, - .One, .Many, .C => unreachable, }, else => unreachable, - }; + } } fn airStructFieldPtr(f: *Function, inst: Air.Inst.Index) !CValue { @@ -5653,7 +5598,7 @@ fn fieldPtr( const field_ptr_ty = f.typeOfIndex(inst); // Ensure complete type definition is visible before accessing fields. - _ = try f.typeToIndex(container_ty, .complete); + _ = try f.ctypeFromType(container_ty, .complete); const writer = f.object.writer(); const local = try f.allocLocal(inst, field_ptr_ty); @@ -5708,109 +5653,109 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue { const writer = f.object.writer(); // Ensure complete type definition is visible before accessing fields. - _ = try f.typeToIndex(struct_ty, .complete); + _ = try f.ctypeFromType(struct_ty, .complete); - const field_name: CValue = switch (zcu.intern_pool.indexToKey(struct_ty.toIntern())) { - .struct_type => switch (struct_ty.containerLayout(zcu)) { - .auto, .@"extern" => if (struct_ty.isSimpleTuple(zcu)) - .{ .field = extra.field_index } - else - .{ .identifier = ip.stringToSlice(struct_ty.legacyStructFieldName(extra.field_index, zcu)) }, - .@"packed" => { - const struct_type = zcu.typeToStruct(struct_ty).?; - const int_info = struct_ty.intInfo(zcu); - - const bit_offset_ty = try zcu.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1)); - - const bit_offset = zcu.structPackedFieldBitOffset(struct_type, extra.field_index); - - const field_int_signedness = if (inst_ty.isAbiInt(zcu)) - inst_ty.intInfo(zcu).signedness + const field_name: CValue = switch (ip.indexToKey(struct_ty.toIntern())) { + .struct_type => field_name: { + const loaded_struct = ip.loadStructType(struct_ty.toIntern()); + switch (loaded_struct.layout) { + .auto, .@"extern" => break :field_name if (loaded_struct.fieldName(ip, extra.field_index).unwrap()) |field_name| + .{ .identifier = ip.stringToSlice(field_name) } else - .unsigned; - const field_int_ty = try zcu.intType(field_int_signedness, @as(u16, @intCast(inst_ty.bitSize(zcu)))); + .{ .field = extra.field_index }, + .@"packed" => { + const int_info = struct_ty.intInfo(zcu); - const temp_local = try f.allocLocal(inst, field_int_ty); - try f.writeCValue(writer, temp_local, .Other); - try writer.writeAll(" = zig_wrap_"); - try f.object.dg.renderTypeForBuiltinFnName(writer, field_int_ty); - try writer.writeAll("(("); - try f.renderType(writer, field_int_ty); - try writer.writeByte(')'); - const cant_cast = int_info.bits > 64; - if (cant_cast) { - if (field_int_ty.bitSize(zcu) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); - try writer.writeAll("zig_lo_"); - try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty); - try writer.writeByte('('); - } - if (bit_offset > 0) { - try writer.writeAll("zig_shr_"); - try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty); - try writer.writeByte('('); - } - try f.writeCValue(writer, struct_byval, .Other); - if (bit_offset > 0) { - try writer.writeAll(", "); - try f.object.dg.renderValue(writer, try zcu.intValue(bit_offset_ty, bit_offset), .FunctionArgument); + const bit_offset_ty = try zcu.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1)); + + const bit_offset = zcu.structPackedFieldBitOffset(loaded_struct, extra.field_index); + + const field_int_signedness = if (inst_ty.isAbiInt(zcu)) + inst_ty.intInfo(zcu).signedness + else + .unsigned; + const field_int_ty = try zcu.intType(field_int_signedness, @as(u16, @intCast(inst_ty.bitSize(zcu)))); + + const temp_local = try f.allocLocal(inst, field_int_ty); + try f.writeCValue(writer, temp_local, .Other); + try writer.writeAll(" = zig_wrap_"); + try f.object.dg.renderTypeForBuiltinFnName(writer, field_int_ty); + try writer.writeAll("(("); + try f.renderType(writer, field_int_ty); try writer.writeByte(')'); - } - if (cant_cast) try writer.writeByte(')'); - try f.object.dg.renderBuiltinInfo(writer, field_int_ty, .bits); - try writer.writeAll(");\n"); - if (inst_ty.eql(field_int_ty, f.object.dg.zcu)) return temp_local; + const cant_cast = int_info.bits > 64; + if (cant_cast) { + if (field_int_ty.bitSize(zcu) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{}); + try writer.writeAll("zig_lo_"); + try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty); + try writer.writeByte('('); + } + if (bit_offset > 0) { + try writer.writeAll("zig_shr_"); + try f.object.dg.renderTypeForBuiltinFnName(writer, struct_ty); + try writer.writeByte('('); + } + try f.writeCValue(writer, struct_byval, .Other); + if (bit_offset > 0) try writer.print(", {})", .{ + try f.fmtIntLiteral(try zcu.intValue(bit_offset_ty, bit_offset)), + }); + if (cant_cast) try writer.writeByte(')'); + try f.object.dg.renderBuiltinInfo(writer, field_int_ty, .bits); + try writer.writeAll(");\n"); + if (inst_ty.eql(field_int_ty, f.object.dg.zcu)) return temp_local; - const local = try f.allocLocal(inst, inst_ty); - try writer.writeAll("memcpy("); - try f.writeCValue(writer, .{ .local_ref = local.new_local }, .FunctionArgument); - try writer.writeAll(", "); - try f.writeCValue(writer, .{ .local_ref = temp_local.new_local }, .FunctionArgument); - try writer.writeAll(", sizeof("); - try f.renderType(writer, inst_ty); - try writer.writeAll("));\n"); - try freeLocal(f, inst, temp_local.new_local, null); - return local; - }, + const local = try f.allocLocal(inst, inst_ty); + try writer.writeAll("memcpy("); + try f.writeCValue(writer, .{ .local_ref = local.new_local }, .FunctionArgument); + try writer.writeAll(", "); + try f.writeCValue(writer, .{ .local_ref = temp_local.new_local }, .FunctionArgument); + try writer.writeAll(", sizeof("); + try f.renderType(writer, inst_ty); + try writer.writeAll("));\n"); + try freeLocal(f, inst, temp_local.new_local, null); + return local; + }, + } }, - - .anon_struct_type => |anon_struct_type| if (anon_struct_type.names.len == 0) - .{ .field = extra.field_index } + .anon_struct_type => |anon_struct_info| if (anon_struct_info.fieldName(ip, extra.field_index).unwrap()) |field_name| + .{ .identifier = ip.stringToSlice(field_name) } else - .{ .identifier = ip.stringToSlice(struct_ty.legacyStructFieldName(extra.field_index, zcu)) }, - + .{ .field = extra.field_index }, .union_type => field_name: { - const union_obj = ip.loadUnionType(struct_ty.toIntern()); - if (union_obj.flagsPtr(ip).layout == .@"packed") { - const operand_lval = if (struct_byval == .constant) blk: { - const operand_local = try f.allocLocal(inst, struct_ty); - try f.writeCValue(writer, operand_local, .Other); - try writer.writeAll(" = "); - try f.writeCValue(writer, struct_byval, .Initializer); - try writer.writeAll(";\n"); - break :blk operand_local; - } else struct_byval; + const loaded_union = ip.loadUnionType(struct_ty.toIntern()); + switch (loaded_union.getLayout(ip)) { + .auto, .@"extern" => { + const name = loaded_union.loadTagType(ip).names.get(ip)[extra.field_index]; + break :field_name if (loaded_union.hasTag(ip)) + .{ .payload_identifier = ip.stringToSlice(name) } + else + .{ .identifier = ip.stringToSlice(name) }; + }, + .@"packed" => { + const operand_lval = if (struct_byval == .constant) blk: { + const operand_local = try f.allocLocal(inst, struct_ty); + try f.writeCValue(writer, operand_local, .Other); + try writer.writeAll(" = "); + try f.writeCValue(writer, struct_byval, .Initializer); + try writer.writeAll(";\n"); + break :blk operand_local; + } else struct_byval; - const local = try f.allocLocal(inst, inst_ty); - try writer.writeAll("memcpy(&"); - try f.writeCValue(writer, local, .Other); - try writer.writeAll(", &"); - try f.writeCValue(writer, operand_lval, .Other); - try writer.writeAll(", sizeof("); - try f.renderType(writer, inst_ty); - try writer.writeAll("));\n"); + const local = try f.allocLocal(inst, inst_ty); + try writer.writeAll("memcpy(&"); + try f.writeCValue(writer, local, .Other); + try writer.writeAll(", &"); + try f.writeCValue(writer, operand_lval, .Other); + try writer.writeAll(", sizeof("); + try f.renderType(writer, inst_ty); + try writer.writeAll("));\n"); - if (struct_byval == .constant) { - try freeLocal(f, inst, operand_lval.new_local, null); - } + if (struct_byval == .constant) { + try freeLocal(f, inst, operand_lval.new_local, null); + } - return local; - } else { - const name = union_obj.loadTagType(ip).names.get(ip)[extra.field_index]; - break :field_name if (union_obj.hasTag(ip)) .{ - .payload_identifier = ip.stringToSlice(name), - } else .{ - .identifier = ip.stringToSlice(name), - }; + return local; + }, } }, else => unreachable, @@ -6089,6 +6034,7 @@ fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { const zcu = f.object.dg.zcu; + const ctype_pool = &f.object.dg.ctype_pool; const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const operand = try f.resolveInst(ty_op.operand); @@ -6107,18 +6053,18 @@ fn airArrayToSlice(f: *Function, inst: Air.Inst.Index) !CValue { if (operand == .undef) { try f.writeCValue(writer, .{ .undef = inst_ty.slicePtrFieldType(zcu) }, .Initializer); } else { - const ptr_cty = try f.typeToIndex(ptr_ty, .complete); - const ptr_child_cty = f.indexToCType(ptr_cty).cast(CType.Payload.Child).?.data; + const ptr_ctype = try f.ctypeFromType(ptr_ty, .complete); + const ptr_child_ctype = ptr_ctype.info(ctype_pool).pointer.elem_ctype; const elem_ty = array_ty.childType(zcu); - const elem_cty = try f.typeToIndex(elem_ty, .complete); - if (ptr_child_cty != elem_cty) { + const elem_ctype = try f.ctypeFromType(elem_ty, .complete); + if (!ptr_child_ctype.eql(elem_ctype)) { try writer.writeByte('('); - try f.renderCType(writer, ptr_cty); + try f.renderCType(writer, ptr_ctype); try writer.writeByte(')'); } - const operand_cty = try f.typeToCType(operand_ty, .complete); - const operand_child_cty = operand_cty.cast(CType.Payload.Child).?.data; - if (f.indexToCType(operand_child_cty).tag() == .array) { + const operand_ctype = try f.ctypeFromType(operand_ty, .complete); + const operand_child_ctype = operand_ctype.info(ctype_pool).pointer.elem_ctype; + if (operand_child_ctype.info(ctype_pool) == .array) { try writer.writeByte('&'); try f.writeCValueDeref(writer, operand); try writer.print("[{}]", .{try f.fmtIntLiteral(try zcu.intValue(Type.usize, 0))}); @@ -6229,8 +6175,8 @@ fn airUnBuiltinCall( const operand_ty = f.typeOf(ty_op.operand); const scalar_ty = operand_ty.scalarType(zcu); - const inst_scalar_cty = try f.typeToCType(inst_scalar_ty, .complete); - const ref_ret = inst_scalar_cty.tag() == .array; + const inst_scalar_ctype = try f.ctypeFromType(inst_scalar_ty, .complete); + const ref_ret = inst_scalar_ctype.info(&f.object.dg.ctype_pool) == .array; const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -6267,8 +6213,8 @@ fn airBinBuiltinCall( const bin_op = f.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const operand_ty = f.typeOf(bin_op.lhs); - const operand_cty = try f.typeToCType(operand_ty, .complete); - const is_big = operand_cty.tag() == .array; + const operand_ctype = try f.ctypeFromType(operand_ty, .complete); + const is_big = operand_ctype.info(&f.object.dg.ctype_pool) == .array; const lhs = try f.resolveInst(bin_op.lhs); const rhs = try f.resolveInst(bin_op.rhs); @@ -6278,8 +6224,8 @@ fn airBinBuiltinCall( const inst_scalar_ty = inst_ty.scalarType(zcu); const scalar_ty = operand_ty.scalarType(zcu); - const inst_scalar_cty = try f.typeToCType(inst_scalar_ty, .complete); - const ref_ret = inst_scalar_cty.tag() == .array; + const inst_scalar_ctype = try f.ctypeFromType(inst_scalar_ty, .complete); + const ref_ret = inst_scalar_ctype.info(&f.object.dg.ctype_pool) == .array; const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -6328,8 +6274,8 @@ fn airCmpBuiltinCall( const operand_ty = f.typeOf(data.lhs); const scalar_ty = operand_ty.scalarType(zcu); - const inst_scalar_cty = try f.typeToCType(inst_scalar_ty, .complete); - const ref_ret = inst_scalar_cty.tag() == .array; + const inst_scalar_ctype = try f.ctypeFromType(inst_scalar_ty, .complete); + const ref_ret = inst_scalar_ctype.info(&f.object.dg.ctype_pool) == .array; const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); @@ -7112,9 +7058,9 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); - switch (inst_ty.zigTypeTag(zcu)) { - .Array, .Vector => { - const a = try Assignment.init(f, inst_ty.childType(zcu)); + switch (ip.indexToKey(inst_ty.toIntern())) { + inline .array_type, .vector_type => |info, tag| { + const a = try Assignment.init(f, Type.fromInterned(info.child)); for (resolved_elements, 0..) |element, i| { try a.restart(f, writer); try f.writeCValue(writer, local, .Other); @@ -7123,94 +7069,112 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue { try f.writeCValue(writer, element, .Other); try a.end(f, writer); } - if (inst_ty.sentinel(zcu)) |sentinel| { + if (tag == .array_type and info.sentinel != .none) { try a.restart(f, writer); try f.writeCValue(writer, local, .Other); - try writer.print("[{d}]", .{resolved_elements.len}); + try writer.print("[{d}]", .{info.len}); try a.assign(f, writer); - try f.object.dg.renderValue(writer, sentinel, .Other); + try f.object.dg.renderValue(writer, Value.fromInterned(info.sentinel), .Other); try a.end(f, writer); } }, - .Struct => switch (inst_ty.containerLayout(zcu)) { - .auto, .@"extern" => for (resolved_elements, 0..) |element, field_index| { - if (inst_ty.structFieldIsComptime(field_index, zcu)) continue; - const field_ty = inst_ty.structFieldType(field_index, zcu); - if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; + .struct_type => { + const loaded_struct = ip.loadStructType(inst_ty.toIntern()); + switch (loaded_struct.layout) { + .auto, .@"extern" => { + var field_it = loaded_struct.iterateRuntimeOrder(ip); + while (field_it.next()) |field_index| { + const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); + if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; - const a = try Assignment.start(f, writer, field_ty); - try f.writeCValueMember(writer, local, if (inst_ty.isSimpleTuple(zcu)) - .{ .field = field_index } - else - .{ .identifier = ip.stringToSlice(inst_ty.legacyStructFieldName(@intCast(field_index), zcu)) }); - try a.assign(f, writer); - try f.writeCValue(writer, element, .Other); - try a.end(f, writer); - }, - .@"packed" => { - try f.writeCValue(writer, local, .Other); - try writer.writeAll(" = "); - const int_info = inst_ty.intInfo(zcu); + const a = try Assignment.start(f, writer, field_ty); + try f.writeCValueMember(writer, local, if (loaded_struct.fieldName(ip, field_index).unwrap()) |field_name| + .{ .identifier = ip.stringToSlice(field_name) } + else + .{ .field = field_index }); + try a.assign(f, writer); + try f.writeCValue(writer, resolved_elements[field_index], .Other); + try a.end(f, writer); + } + }, + .@"packed" => { + try f.writeCValue(writer, local, .Other); + try writer.writeAll(" = "); + const int_info = inst_ty.intInfo(zcu); - const bit_offset_ty = try zcu.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1)); + const bit_offset_ty = try zcu.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1)); - var bit_offset: u64 = 0; + var bit_offset: u64 = 0; - var empty = true; - for (0..elements.len) |field_index| { - if (inst_ty.structFieldIsComptime(field_index, zcu)) continue; - const field_ty = inst_ty.structFieldType(field_index, zcu); - if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; + var empty = true; + for (0..elements.len) |field_index| { + if (inst_ty.structFieldIsComptime(field_index, zcu)) continue; + const field_ty = inst_ty.structFieldType(field_index, zcu); + if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; - if (!empty) { - try writer.writeAll("zig_or_"); + if (!empty) { + try writer.writeAll("zig_or_"); + try f.object.dg.renderTypeForBuiltinFnName(writer, inst_ty); + try writer.writeByte('('); + } + empty = false; + } + empty = true; + for (resolved_elements, 0..) |element, field_index| { + if (inst_ty.structFieldIsComptime(field_index, zcu)) continue; + const field_ty = inst_ty.structFieldType(field_index, zcu); + if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; + + if (!empty) try writer.writeAll(", "); + // TODO: Skip this entire shift if val is 0? + try writer.writeAll("zig_shlw_"); try f.object.dg.renderTypeForBuiltinFnName(writer, inst_ty); try writer.writeByte('('); - } - empty = false; - } - empty = true; - for (resolved_elements, 0..) |element, field_index| { - if (inst_ty.structFieldIsComptime(field_index, zcu)) continue; - const field_ty = inst_ty.structFieldType(field_index, zcu); - if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; - if (!empty) try writer.writeAll(", "); - // TODO: Skip this entire shift if val is 0? - try writer.writeAll("zig_shlw_"); - try f.object.dg.renderTypeForBuiltinFnName(writer, inst_ty); - try writer.writeByte('('); - - if (inst_ty.isAbiInt(zcu) and (field_ty.isAbiInt(zcu) or field_ty.isPtrAtRuntime(zcu))) { - try f.renderIntCast(writer, inst_ty, element, .{}, field_ty, .FunctionArgument); - } else { - try writer.writeByte('('); - try f.renderType(writer, inst_ty); - try writer.writeByte(')'); - if (field_ty.isPtrAtRuntime(zcu)) { + if (inst_ty.isAbiInt(zcu) and (field_ty.isAbiInt(zcu) or field_ty.isPtrAtRuntime(zcu))) { + try f.renderIntCast(writer, inst_ty, element, .{}, field_ty, .FunctionArgument); + } else { try writer.writeByte('('); - try f.renderType(writer, switch (int_info.signedness) { - .unsigned => Type.usize, - .signed => Type.isize, - }); + try f.renderType(writer, inst_ty); try writer.writeByte(')'); + if (field_ty.isPtrAtRuntime(zcu)) { + try writer.writeByte('('); + try f.renderType(writer, switch (int_info.signedness) { + .unsigned => Type.usize, + .signed => Type.isize, + }); + try writer.writeByte(')'); + } + try f.writeCValue(writer, element, .Other); } - try f.writeCValue(writer, element, .Other); + + try writer.print(", {}", .{ + try f.fmtIntLiteral(try zcu.intValue(bit_offset_ty, bit_offset)), + }); + try f.object.dg.renderBuiltinInfo(writer, inst_ty, .bits); + try writer.writeByte(')'); + if (!empty) try writer.writeByte(')'); + + bit_offset += field_ty.bitSize(zcu); + empty = false; } + try writer.writeAll(";\n"); + }, + } + }, + .anon_struct_type => |anon_struct_info| for (0..anon_struct_info.types.len) |field_index| { + if (anon_struct_info.values.get(ip)[field_index] != .none) continue; + const field_ty = Type.fromInterned(anon_struct_info.types.get(ip)[field_index]); + if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; - try writer.print(", {}", .{ - try f.fmtIntLiteral(try zcu.intValue(bit_offset_ty, bit_offset)), - }); - try f.object.dg.renderBuiltinInfo(writer, inst_ty, .bits); - try writer.writeByte(')'); - if (!empty) try writer.writeByte(')'); - - bit_offset += field_ty.bitSize(zcu); - empty = false; - } - - try writer.writeAll(";\n"); - }, + const a = try Assignment.start(f, writer, field_ty); + try f.writeCValueMember(writer, local, if (anon_struct_info.fieldName(ip, field_index).unwrap()) |field_name| + .{ .identifier = ip.stringToSlice(field_name) } + else + .{ .field = field_index }); + try a.assign(f, writer); + try f.writeCValue(writer, resolved_elements[field_index], .Other); + try a.end(f, writer); }, else => unreachable, } @@ -7225,15 +7189,15 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue { const extra = f.air.extraData(Air.UnionInit, ty_pl.payload).data; const union_ty = f.typeOfIndex(inst); - const union_obj = zcu.typeToUnion(union_ty).?; - const field_name = union_obj.loadTagType(ip).names.get(ip)[extra.field_index]; + const loaded_union = ip.loadUnionType(union_ty.toIntern()); + const field_name = loaded_union.loadTagType(ip).names.get(ip)[extra.field_index]; const payload_ty = f.typeOf(extra.init); const payload = try f.resolveInst(extra.init); try reap(f, inst, &.{extra.init}); const writer = f.object.writer(); const local = try f.allocLocal(inst, union_ty); - if (union_obj.getLayout(ip) == .@"packed") { + if (loaded_union.getLayout(ip) == .@"packed") { try f.writeCValue(writer, local, .Other); try writer.writeAll(" = "); try f.writeCValue(writer, payload, .Initializer); @@ -7465,16 +7429,16 @@ fn airCVaStart(f: *Function, inst: Air.Inst.Index) !CValue { const inst_ty = f.typeOfIndex(inst); const decl_index = f.object.dg.pass.decl; const decl = zcu.declPtr(decl_index); - const fn_cty = try f.typeToCType(decl.typeOf(zcu), .complete); - const param_len = fn_cty.castTag(.varargs_function).?.data.param_types.len; + const function_ctype = try f.ctypeFromType(decl.typeOf(zcu), .complete); + const params_len = function_ctype.info(&f.object.dg.ctype_pool).function.param_ctypes.len; const writer = f.object.writer(); const local = try f.allocLocal(inst, inst_ty); try writer.writeAll("va_start(*(va_list *)&"); try f.writeCValue(writer, local, .Other); - if (param_len > 0) { + if (params_len > 0) { try writer.writeAll(", "); - try f.writeCValue(writer, .{ .arg = param_len - 1 }, .FunctionArgument); + try f.writeCValue(writer, .{ .arg = params_len - 1 }, .FunctionArgument); } try writer.writeAll(");\n"); return local; @@ -7823,7 +7787,7 @@ const FormatIntLiteralContext = struct { dg: *DeclGen, int_info: InternPool.Key.IntType, kind: CType.Kind, - cty: CType, + ctype: CType, val: Value, }; fn formatIntLiteral( @@ -7834,6 +7798,7 @@ fn formatIntLiteral( ) @TypeOf(writer).Error!void { const zcu = data.dg.zcu; const target = &data.dg.mod.resolved_target.result; + const ctype_pool = &data.dg.ctype_pool; const ExpectedContents = struct { const base = 10; @@ -7867,7 +7832,7 @@ fn formatIntLiteral( } else data.val.toBigInt(&int_buf, zcu); assert(int.fitsInTwosComp(data.int_info.signedness, data.int_info.bits)); - const c_bits: usize = @intCast(data.cty.byteSize(data.dg.ctypes.set, data.dg.mod) * 8); + const c_bits: usize = @intCast(data.ctype.byteSize(ctype_pool, data.dg.mod) * 8); var one_limbs: [BigInt.calcLimbLen(1)]BigIntLimb = undefined; const one = BigInt.Mutable.init(&one_limbs, 1).toConst(); @@ -7879,45 +7844,45 @@ fn formatIntLiteral( defer allocator.free(wrap.limbs); const c_limb_info: struct { - cty: CType, + ctype: CType, count: usize, endian: std.builtin.Endian, homogeneous: bool, - } = switch (data.cty.tag()) { - else => .{ - .cty = CType.initTag(.void), - .count = 1, - .endian = .little, + } = switch (data.ctype.info(ctype_pool)) { + .basic => |basic_info| switch (basic_info) { + else => .{ + .ctype = .{ .index = .void }, + .count = 1, + .endian = .little, + .homogeneous = true, + }, + .zig_u128, .zig_i128 => .{ + .ctype = .{ .index = .uint64_t }, + .count = 2, + .endian = .big, + .homogeneous = false, + }, + }, + .array => |array_info| .{ + .ctype = array_info.elem_ctype, + .count = @intCast(array_info.len), + .endian = target.cpu.arch.endian(), .homogeneous = true, }, - .zig_u128, .zig_i128 => .{ - .cty = CType.initTag(.uint64_t), - .count = 2, - .endian = .big, - .homogeneous = false, - }, - .array => info: { - const array_data = data.cty.castTag(.array).?.data; - break :info .{ - .cty = data.dg.indexToCType(array_data.elem_type), - .count = @as(usize, @intCast(array_data.len)), - .endian = target.cpu.arch.endian(), - .homogeneous = true, - }; - }, + else => unreachable, }; if (c_limb_info.count == 1) { if (wrap.addWrap(int, one, data.int_info.signedness, c_bits) or data.int_info.signedness == .signed and wrap.subWrap(int, one, data.int_info.signedness, c_bits)) return writer.print("{s}_{s}", .{ - data.cty.getStandardDefineAbbrev() orelse return writer.print("zig_{s}Int_{c}{d}", .{ + data.ctype.getStandardDefineAbbrev() orelse return writer.print("zig_{s}Int_{c}{d}", .{ if (int.positive) "max" else "min", signAbbrev(data.int_info.signedness), c_bits, }), if (int.positive) "MAX" else "MIN", }); if (!int.positive) try writer.writeByte('-'); - try data.cty.renderLiteralPrefix(writer, data.kind); + try data.ctype.renderLiteralPrefix(writer, data.kind, ctype_pool); const style: struct { base: u8, case: std.fmt.Case = undefined } = switch (fmt.len) { 0 => .{ .base = 10 }, @@ -7948,7 +7913,7 @@ fn formatIntLiteral( defer allocator.free(string); try writer.writeAll(string); } else { - try data.cty.renderLiteralPrefix(writer, data.kind); + try data.ctype.renderLiteralPrefix(writer, data.kind, ctype_pool); wrap.convertToTwosComplement(int, data.int_info.signedness, c_bits); @memset(wrap.limbs[wrap.len..], 0); wrap.len = wrap.limbs.len; @@ -7958,7 +7923,7 @@ fn formatIntLiteral( .signedness = undefined, .bits = @as(u16, @intCast(@divExact(c_bits, c_limb_info.count))), }; - var c_limb_cty: CType = undefined; + var c_limb_ctype: CType = undefined; var limb_offset: usize = 0; const most_significant_limb_i = wrap.len - limbs_per_c_limb; @@ -7979,7 +7944,7 @@ fn formatIntLiteral( { // most significant limb is actually signed c_limb_int_info.signedness = .signed; - c_limb_cty = c_limb_info.cty.toSigned(); + c_limb_ctype = c_limb_info.ctype.toSigned(); c_limb_mut.positive = wrap.positive; c_limb_mut.truncate( @@ -7989,7 +7954,7 @@ fn formatIntLiteral( ); } else { c_limb_int_info.signedness = .unsigned; - c_limb_cty = c_limb_info.cty; + c_limb_ctype = c_limb_info.ctype; } if (limb_offset > 0) try writer.writeAll(", "); @@ -7997,12 +7962,12 @@ fn formatIntLiteral( .dg = data.dg, .int_info = c_limb_int_info, .kind = data.kind, - .cty = c_limb_cty, + .ctype = c_limb_ctype, .val = try zcu.intValue_big(Type.comptime_int, c_limb_mut.toConst()), }, fmt, options, writer); } } - try data.cty.renderLiteralSuffix(writer); + try data.ctype.renderLiteralSuffix(writer, ctype_pool); } const Materialize = struct { @@ -8045,10 +8010,10 @@ const Materialize = struct { }; const Assignment = struct { - cty: CType.Index, + ctype: CType, pub fn init(f: *Function, ty: Type) !Assignment { - return .{ .cty = try f.typeToIndex(ty, .complete) }; + return .{ .ctype = try f.ctypeFromType(ty, .complete) }; } pub fn start(f: *Function, writer: anytype, ty: Type) !Assignment { @@ -8076,7 +8041,7 @@ const Assignment = struct { .assign => {}, .memcpy => { try writer.writeAll(", sizeof("); - try f.renderCType(writer, self.cty); + try f.renderCType(writer, self.ctype); try writer.writeAll("))"); }, } @@ -8084,7 +8049,7 @@ const Assignment = struct { } fn strategy(self: Assignment, f: *Function) enum { assign, memcpy } { - return switch (f.indexToCType(self.cty).tag()) { + return switch (self.ctype.info(&f.object.dg.ctype_pool)) { else => .assign, .array, .vector => .memcpy, }; @@ -8129,28 +8094,6 @@ const Vectorize = struct { } }; -fn lowerFnRetTy(ret_ty: Type, zcu: *Zcu) !Type { - if (ret_ty.toIntern() == .noreturn_type) return Type.noreturn; - - if (lowersToArray(ret_ty, zcu)) { - const gpa = zcu.gpa; - const ip = &zcu.intern_pool; - const names = [1]InternPool.NullTerminatedString{ - try ip.getOrPutString(gpa, "array"), - }; - const types = [1]InternPool.Index{ret_ty.toIntern()}; - const values = [1]InternPool.Index{.none}; - const interned = try ip.getAnonStructType(gpa, .{ - .names = &names, - .types = &types, - .values = &values, - }); - return Type.fromInterned(interned); - } - - return if (ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) ret_ty else Type.void; -} - fn lowersToArray(ty: Type, zcu: *Zcu) bool { return switch (ty.zigTypeTag(zcu)) { .Array, .Vector => return true, diff --git a/src/codegen/c/Type.zig b/src/codegen/c/Type.zig new file mode 100644 index 0000000000..16fb7d708a --- /dev/null +++ b/src/codegen/c/Type.zig @@ -0,0 +1,2472 @@ +index: CType.Index, + +pub fn fromPoolIndex(pool_index: usize) CType { + return .{ .index = @enumFromInt(CType.Index.first_pool_index + pool_index) }; +} + +pub fn toPoolIndex(ctype: CType) ?u32 { + const pool_index, const is_basic = + @subWithOverflow(@intFromEnum(ctype.index), CType.Index.first_pool_index); + return switch (is_basic) { + 0 => pool_index, + 1 => null, + }; +} + +pub fn eql(lhs: CType, rhs: CType) bool { + return lhs.index == rhs.index; +} + +pub fn isBool(ctype: CType) bool { + return switch (ctype.index) { + ._Bool, .bool => true, + else => false, + }; +} + +pub fn isInteger(ctype: CType) bool { + return switch (ctype.index) { + .char, + .@"signed char", + .short, + .int, + .long, + .@"long long", + .@"unsigned char", + .@"unsigned short", + .@"unsigned int", + .@"unsigned long", + .@"unsigned long long", + .size_t, + .ptrdiff_t, + .uint8_t, + .int8_t, + .uint16_t, + .int16_t, + .uint32_t, + .int32_t, + .uint64_t, + .int64_t, + .uintptr_t, + .intptr_t, + .zig_u128, + .zig_i128, + => true, + else => false, + }; +} + +pub fn signedness(ctype: CType, mod: *Module) std.builtin.Signedness { + return switch (ctype.index) { + .char => mod.resolved_target.result.charSignedness(), + .@"signed char", + .short, + .int, + .long, + .@"long long", + .ptrdiff_t, + .int8_t, + .int16_t, + .int32_t, + .int64_t, + .intptr_t, + .zig_i128, + => .signed, + .@"unsigned char", + .@"unsigned short", + .@"unsigned int", + .@"unsigned long", + .@"unsigned long long", + .size_t, + .uint8_t, + .uint16_t, + .uint32_t, + .uint64_t, + .uintptr_t, + .zig_u128, + => .unsigned, + else => unreachable, + }; +} + +pub fn isFloat(ctype: CType) bool { + return switch (ctype.index) { + .float, + .double, + .@"long double", + .zig_f16, + .zig_f32, + .zig_f64, + .zig_f80, + .zig_f128, + .zig_c_longdouble, + => true, + else => false, + }; +} + +pub fn toSigned(ctype: CType) CType { + return switch (ctype.index) { + .char, .@"signed char", .@"unsigned char" => .{ .index = .@"signed char" }, + .short, .@"unsigned short" => .{ .index = .short }, + .int, .@"unsigned int" => .{ .index = .int }, + .long, .@"unsigned long" => .{ .index = .long }, + .@"long long", .@"unsigned long long" => .{ .index = .@"long long" }, + .size_t, .ptrdiff_t => .{ .index = .ptrdiff_t }, + .uint8_t, .int8_t => .{ .index = .int8_t }, + .uint16_t, .int16_t => .{ .index = .int16_t }, + .uint32_t, .int32_t => .{ .index = .int32_t }, + .uint64_t, .int64_t => .{ .index = .int64_t }, + .uintptr_t, .intptr_t => .{ .index = .intptr_t }, + .zig_u128, .zig_i128 => .{ .index = .zig_i128 }, + .float, + .double, + .@"long double", + .zig_f16, + .zig_f32, + .zig_f80, + .zig_f128, + .zig_c_longdouble, + => ctype, + else => unreachable, + }; +} + +pub fn toUnsigned(ctype: CType) CType { + return switch (ctype.index) { + .char, .@"signed char", .@"unsigned char" => .{ .index = .@"unsigned char" }, + .short, .@"unsigned short" => .{ .index = .@"unsigned short" }, + .int, .@"unsigned int" => .{ .index = .@"unsigned int" }, + .long, .@"unsigned long" => .{ .index = .@"unsigned long" }, + .@"long long", .@"unsigned long long" => .{ .index = .@"unsigned long long" }, + .size_t, .ptrdiff_t => .{ .index = .size_t }, + .uint8_t, .int8_t => .{ .index = .uint8_t }, + .uint16_t, .int16_t => .{ .index = .uint16_t }, + .uint32_t, .int32_t => .{ .index = .uint32_t }, + .uint64_t, .int64_t => .{ .index = .uint64_t }, + .uintptr_t, .intptr_t => .{ .index = .uintptr_t }, + .zig_u128, .zig_i128 => .{ .index = .zig_u128 }, + else => unreachable, + }; +} + +pub fn toSignedness(ctype: CType, s: std.builtin.Signedness) CType { + return switch (s) { + .unsigned => ctype.toUnsigned(), + .signed => ctype.toSigned(), + }; +} + +pub fn getStandardDefineAbbrev(ctype: CType) ?[]const u8 { + return switch (ctype.index) { + .char => "CHAR", + .@"signed char" => "SCHAR", + .short => "SHRT", + .int => "INT", + .long => "LONG", + .@"long long" => "LLONG", + .@"unsigned char" => "UCHAR", + .@"unsigned short" => "USHRT", + .@"unsigned int" => "UINT", + .@"unsigned long" => "ULONG", + .@"unsigned long long" => "ULLONG", + .float => "FLT", + .double => "DBL", + .@"long double" => "LDBL", + .size_t => "SIZE", + .ptrdiff_t => "PTRDIFF", + .uint8_t => "UINT8", + .int8_t => "INT8", + .uint16_t => "UINT16", + .int16_t => "INT16", + .uint32_t => "UINT32", + .int32_t => "INT32", + .uint64_t => "UINT64", + .int64_t => "INT64", + .uintptr_t => "UINTPTR", + .intptr_t => "INTPTR", + else => null, + }; +} + +pub fn renderLiteralPrefix(ctype: CType, writer: anytype, kind: Kind, pool: *const Pool) @TypeOf(writer).Error!void { + switch (ctype.info(pool)) { + .basic => |basic_info| switch (basic_info) { + .void => unreachable, + ._Bool, + .char, + .@"signed char", + .short, + .@"unsigned short", + .bool, + .size_t, + .ptrdiff_t, + .uintptr_t, + .intptr_t, + => switch (kind) { + else => try writer.print("({s})", .{@tagName(basic_info)}), + .global => {}, + }, + .int, + .long, + .@"long long", + .@"unsigned char", + .@"unsigned int", + .@"unsigned long", + .@"unsigned long long", + .float, + .double, + .@"long double", + => {}, + .uint8_t, + .int8_t, + .uint16_t, + .int16_t, + .uint32_t, + .int32_t, + .uint64_t, + .int64_t, + => try writer.print("{s}_C(", .{ctype.getStandardDefineAbbrev().?}), + .zig_u128, + .zig_i128, + .zig_f16, + .zig_f32, + .zig_f64, + .zig_f80, + .zig_f128, + .zig_c_longdouble, + => try writer.print("zig_{s}_{s}(", .{ + switch (kind) { + else => "make", + .global => "init", + }, + @tagName(basic_info)["zig_".len..], + }), + .va_list => unreachable, + _ => unreachable, + }, + .array, .vector => try writer.writeByte('{'), + else => unreachable, + } +} + +pub fn renderLiteralSuffix(ctype: CType, writer: anytype, pool: *const Pool) @TypeOf(writer).Error!void { + switch (ctype.info(pool)) { + .basic => |basic_info| switch (basic_info) { + .void => unreachable, + ._Bool => {}, + .char, + .@"signed char", + .short, + .int, + => {}, + .long => try writer.writeByte('l'), + .@"long long" => try writer.writeAll("ll"), + .@"unsigned char", + .@"unsigned short", + .@"unsigned int", + => try writer.writeByte('u'), + .@"unsigned long", + .size_t, + .uintptr_t, + => try writer.writeAll("ul"), + .@"unsigned long long" => try writer.writeAll("ull"), + .float => try writer.writeByte('f'), + .double => {}, + .@"long double" => try writer.writeByte('l'), + .bool, + .ptrdiff_t, + .intptr_t, + => {}, + .uint8_t, + .int8_t, + .uint16_t, + .int16_t, + .uint32_t, + .int32_t, + .uint64_t, + .int64_t, + .zig_u128, + .zig_i128, + .zig_f16, + .zig_f32, + .zig_f64, + .zig_f80, + .zig_f128, + .zig_c_longdouble, + => try writer.writeByte(')'), + .va_list => unreachable, + _ => unreachable, + }, + .array, .vector => try writer.writeByte('}'), + else => unreachable, + } +} + +pub fn floatActiveBits(ctype: CType, mod: *Module) u16 { + const target = &mod.resolved_target.result; + return switch (ctype.index) { + .float => target.c_type_bit_size(.float), + .double => target.c_type_bit_size(.double), + .@"long double", .zig_c_longdouble => target.c_type_bit_size(.longdouble), + .zig_f16 => 16, + .zig_f32 => 32, + .zig_f64 => 64, + .zig_f80 => 80, + .zig_f128 => 128, + else => unreachable, + }; +} + +pub fn byteSize(ctype: CType, pool: *const Pool, mod: *Module) u64 { + const target = &mod.resolved_target.result; + return switch (ctype.info(pool)) { + .basic => |basic_info| switch (basic_info) { + .void => 0, + .char, .@"signed char", ._Bool, .@"unsigned char", .bool, .uint8_t, .int8_t => 1, + .short => target.c_type_byte_size(.short), + .int => target.c_type_byte_size(.int), + .long => target.c_type_byte_size(.long), + .@"long long" => target.c_type_byte_size(.longlong), + .@"unsigned short" => target.c_type_byte_size(.ushort), + .@"unsigned int" => target.c_type_byte_size(.uint), + .@"unsigned long" => target.c_type_byte_size(.ulong), + .@"unsigned long long" => target.c_type_byte_size(.ulonglong), + .float => target.c_type_byte_size(.float), + .double => target.c_type_byte_size(.double), + .@"long double" => target.c_type_byte_size(.longdouble), + .size_t, + .ptrdiff_t, + .uintptr_t, + .intptr_t, + => @divExact(target.ptrBitWidth(), 8), + .uint16_t, .int16_t, .zig_f16 => 2, + .uint32_t, .int32_t, .zig_f32 => 4, + .uint64_t, .int64_t, .zig_f64 => 8, + .zig_u128, .zig_i128, .zig_f128 => 16, + .zig_f80 => if (target.c_type_bit_size(.longdouble) == 80) + target.c_type_byte_size(.longdouble) + else + 16, + .zig_c_longdouble => target.c_type_byte_size(.longdouble), + .va_list => unreachable, + _ => unreachable, + }, + .pointer => @divExact(target.ptrBitWidth(), 8), + .array, .vector => |sequence_info| sequence_info.elem_ctype.byteSize(pool, mod) * sequence_info.len, + else => unreachable, + }; +} + +pub fn info(ctype: CType, pool: *const Pool) Info { + const pool_index = ctype.toPoolIndex() orelse return .{ .basic = ctype.index }; + const item = pool.items.get(pool_index); + switch (item.tag) { + .basic => unreachable, + .pointer => return .{ .pointer = .{ + .elem_ctype = .{ .index = @enumFromInt(item.data) }, + } }, + .pointer_const => return .{ .pointer = .{ + .elem_ctype = .{ .index = @enumFromInt(item.data) }, + .@"const" = true, + } }, + .pointer_volatile => return .{ .pointer = .{ + .elem_ctype = .{ .index = @enumFromInt(item.data) }, + .@"volatile" = true, + } }, + .pointer_const_volatile => return .{ .pointer = .{ + .elem_ctype = .{ .index = @enumFromInt(item.data) }, + .@"const" = true, + .@"volatile" = true, + } }, + .aligned => { + const extra = pool.getExtra(Pool.Aligned, item.data); + return .{ .aligned = .{ + .ctype = .{ .index = extra.ctype }, + .alignas = extra.flags.alignas, + } }; + }, + .array_small => { + const extra = pool.getExtra(Pool.SequenceSmall, item.data); + return .{ .array = .{ + .elem_ctype = .{ .index = extra.elem_ctype }, + .len = extra.len, + } }; + }, + .array_large => { + const extra = pool.getExtra(Pool.SequenceLarge, item.data); + return .{ .array = .{ + .elem_ctype = .{ .index = extra.elem_ctype }, + .len = extra.len(), + } }; + }, + .vector => { + const extra = pool.getExtra(Pool.SequenceSmall, item.data); + return .{ .vector = .{ + .elem_ctype = .{ .index = extra.elem_ctype }, + .len = extra.len, + } }; + }, + .fwd_decl_struct_anon => { + const extra_trail = pool.getExtraTrail(Pool.FwdDeclAnon, item.data); + return .{ .fwd_decl = .{ + .tag = .@"struct", + .name = .{ .anon = .{ + .extra_index = extra_trail.trail.extra_index, + .len = extra_trail.extra.fields_len, + } }, + } }; + }, + .fwd_decl_union_anon => { + const extra_trail = pool.getExtraTrail(Pool.FwdDeclAnon, item.data); + return .{ .fwd_decl = .{ + .tag = .@"union", + .name = .{ .anon = .{ + .extra_index = extra_trail.trail.extra_index, + .len = extra_trail.extra.fields_len, + } }, + } }; + }, + .fwd_decl_struct => return .{ .fwd_decl = .{ + .tag = .@"struct", + .name = .{ .owner_decl = @enumFromInt(item.data) }, + } }, + .fwd_decl_union => return .{ .fwd_decl = .{ + .tag = .@"union", + .name = .{ .owner_decl = @enumFromInt(item.data) }, + } }, + .aggregate_struct_anon => { + const extra_trail = pool.getExtraTrail(Pool.AggregateAnon, item.data); + return .{ .aggregate = .{ + .tag = .@"struct", + .name = .{ .anon = .{ + .owner_decl = extra_trail.extra.owner_decl, + .id = extra_trail.extra.id, + } }, + .fields = .{ + .extra_index = extra_trail.trail.extra_index, + .len = extra_trail.extra.fields_len, + }, + } }; + }, + .aggregate_union_anon => { + const extra_trail = pool.getExtraTrail(Pool.AggregateAnon, item.data); + return .{ .aggregate = .{ + .tag = .@"union", + .name = .{ .anon = .{ + .owner_decl = extra_trail.extra.owner_decl, + .id = extra_trail.extra.id, + } }, + .fields = .{ + .extra_index = extra_trail.trail.extra_index, + .len = extra_trail.extra.fields_len, + }, + } }; + }, + .aggregate_struct_packed_anon => { + const extra_trail = pool.getExtraTrail(Pool.AggregateAnon, item.data); + return .{ .aggregate = .{ + .tag = .@"struct", + .@"packed" = true, + .name = .{ .anon = .{ + .owner_decl = extra_trail.extra.owner_decl, + .id = extra_trail.extra.id, + } }, + .fields = .{ + .extra_index = extra_trail.trail.extra_index, + .len = extra_trail.extra.fields_len, + }, + } }; + }, + .aggregate_union_packed_anon => { + const extra_trail = pool.getExtraTrail(Pool.AggregateAnon, item.data); + return .{ .aggregate = .{ + .tag = .@"union", + .@"packed" = true, + .name = .{ .anon = .{ + .owner_decl = extra_trail.extra.owner_decl, + .id = extra_trail.extra.id, + } }, + .fields = .{ + .extra_index = extra_trail.trail.extra_index, + .len = extra_trail.extra.fields_len, + }, + } }; + }, + .aggregate_struct => { + const extra_trail = pool.getExtraTrail(Pool.Aggregate, item.data); + return .{ .aggregate = .{ + .tag = .@"struct", + .name = .{ .fwd_decl = .{ .index = extra_trail.extra.fwd_decl } }, + .fields = .{ + .extra_index = extra_trail.trail.extra_index, + .len = extra_trail.extra.fields_len, + }, + } }; + }, + .aggregate_union => { + const extra_trail = pool.getExtraTrail(Pool.Aggregate, item.data); + return .{ .aggregate = .{ + .tag = .@"union", + .name = .{ .fwd_decl = .{ .index = extra_trail.extra.fwd_decl } }, + .fields = .{ + .extra_index = extra_trail.trail.extra_index, + .len = extra_trail.extra.fields_len, + }, + } }; + }, + .aggregate_struct_packed => { + const extra_trail = pool.getExtraTrail(Pool.Aggregate, item.data); + return .{ .aggregate = .{ + .tag = .@"struct", + .@"packed" = true, + .name = .{ .fwd_decl = .{ .index = extra_trail.extra.fwd_decl } }, + .fields = .{ + .extra_index = extra_trail.trail.extra_index, + .len = extra_trail.extra.fields_len, + }, + } }; + }, + .aggregate_union_packed => { + const extra_trail = pool.getExtraTrail(Pool.Aggregate, item.data); + return .{ .aggregate = .{ + .tag = .@"union", + .@"packed" = true, + .name = .{ .fwd_decl = .{ .index = extra_trail.extra.fwd_decl } }, + .fields = .{ + .extra_index = extra_trail.trail.extra_index, + .len = extra_trail.extra.fields_len, + }, + } }; + }, + .function => { + const extra_trail = pool.getExtraTrail(Pool.Function, item.data); + return .{ .function = .{ + .return_ctype = .{ .index = extra_trail.extra.return_ctype }, + .param_ctypes = .{ + .extra_index = extra_trail.trail.extra_index, + .len = extra_trail.extra.param_ctypes_len, + }, + .varargs = false, + } }; + }, + .function_varargs => { + const extra_trail = pool.getExtraTrail(Pool.Function, item.data); + return .{ .function = .{ + .return_ctype = .{ .index = extra_trail.extra.return_ctype }, + .param_ctypes = .{ + .extra_index = extra_trail.trail.extra_index, + .len = extra_trail.extra.param_ctypes_len, + }, + .varargs = true, + } }; + }, + } +} + +pub fn hash(ctype: CType, pool: *const Pool) Pool.Map.Hash { + return if (ctype.toPoolIndex()) |pool_index| + pool.map.entries.items(.hash)[pool_index] + else + CType.Index.basic_hashes[@intFromEnum(ctype.index)]; +} + +fn toForward(ctype: CType, pool: *Pool, allocator: std.mem.Allocator) !CType { + return switch (ctype.info(pool)) { + .basic, .pointer, .fwd_decl => ctype, + .aligned => |aligned_info| pool.getAligned(allocator, .{ + .ctype = try aligned_info.ctype.toForward(pool, allocator), + .alignas = aligned_info.alignas, + }), + .array => |array_info| pool.getArray(allocator, .{ + .elem_ctype = try array_info.elem_ctype.toForward(pool, allocator), + .len = array_info.len, + }), + .vector => |vector_info| pool.getVector(allocator, .{ + .elem_ctype = try vector_info.elem_ctype.toForward(pool, allocator), + .len = vector_info.len, + }), + .aggregate => |aggregate_info| switch (aggregate_info.name) { + .anon => ctype, + .fwd_decl => |fwd_decl| fwd_decl, + }, + .function => unreachable, + }; +} + +const Index = enum(u32) { + void, + + // C basic types + char, + + @"signed char", + short, + int, + long, + @"long long", + + _Bool, + @"unsigned char", + @"unsigned short", + @"unsigned int", + @"unsigned long", + @"unsigned long long", + + float, + double, + @"long double", + + // C header types + // - stdbool.h + bool, + // - stddef.h + size_t, + ptrdiff_t, + // - stdint.h + uint8_t, + int8_t, + uint16_t, + int16_t, + uint32_t, + int32_t, + uint64_t, + int64_t, + uintptr_t, + intptr_t, + // - stdarg.h + va_list, + + // zig.h types + zig_u128, + zig_i128, + zig_f16, + zig_f32, + zig_f64, + zig_f80, + zig_f128, + zig_c_longdouble, + + _, + + const first_pool_index: u32 = @typeInfo(CType.Index).Enum.fields.len; + const basic_hashes = init: { + @setEvalBranchQuota(1_600); + var basic_hashes_init: [first_pool_index]Pool.Map.Hash = undefined; + for (&basic_hashes_init, 0..) |*basic_hash, index| { + const ctype_index: CType.Index = @enumFromInt(index); + var hasher = Pool.Hasher.init; + hasher.update(@intFromEnum(ctype_index)); + basic_hash.* = hasher.final(.basic); + } + break :init basic_hashes_init; + }; +}; + +const Slice = struct { + extra_index: Pool.ExtraIndex, + len: u32, + + pub fn at(slice: CType.Slice, index: usize, pool: *const Pool) CType { + var extra: Pool.ExtraTrail = .{ .extra_index = slice.extra_index }; + return .{ .index = extra.next(slice.len, CType.Index, pool)[index] }; + } +}; + +pub const Kind = enum { + forward, + forward_parameter, + complete, + global, + parameter, + + pub fn isForward(kind: Kind) bool { + return switch (kind) { + .forward, .forward_parameter => true, + .complete, .global, .parameter => false, + }; + } + + pub fn isParameter(kind: Kind) bool { + return switch (kind) { + .forward_parameter, .parameter => true, + .forward, .complete, .global => false, + }; + } + + pub fn asParameter(kind: Kind) Kind { + return switch (kind) { + .forward, .forward_parameter => .forward_parameter, + .complete, .parameter, .global => .parameter, + }; + } + + pub fn noParameter(kind: Kind) Kind { + return switch (kind) { + .forward, .forward_parameter => .forward, + .complete, .parameter => .complete, + .global => .global, + }; + } +}; + +pub const String = struct { + index: String.Index, + + const Index = enum(u32) { + _, + }; + + pub fn slice(string: String, pool: *const Pool) []const u8 { + const start = pool.string_indices.items[@intFromEnum(string.index)]; + const end = pool.string_indices.items[@intFromEnum(string.index) + 1]; + return pool.string_bytes.items[start..end]; + } +}; + +pub const Info = union(enum) { + basic: CType.Index, + pointer: Pointer, + aligned: Aligned, + array: Sequence, + vector: Sequence, + fwd_decl: FwdDecl, + aggregate: Aggregate, + function: Function, + + pub const Pointer = struct { + elem_ctype: CType, + @"const": bool = false, + @"volatile": bool = false, + + fn tag(pointer_info: Pointer) Pool.Tag { + return @enumFromInt(@intFromEnum(Pool.Tag.pointer) + + @as(u2, @bitCast(packed struct(u2) { + @"const": bool, + @"volatile": bool, + }{ + .@"const" = pointer_info.@"const", + .@"volatile" = pointer_info.@"volatile", + }))); + } + }; + + pub const Aligned = struct { + ctype: CType, + alignas: AlignAs, + }; + + pub const Sequence = struct { + elem_ctype: CType, + len: u64, + }; + + pub const Tag = enum { @"enum", @"struct", @"union" }; + + pub const Field = struct { + name: String, + ctype: CType, + alignas: AlignAs, + + pub const Slice = struct { + extra_index: Pool.ExtraIndex, + len: u32, + + pub fn at(slice: Field.Slice, index: usize, pool: *const Pool) Field { + assert(index < slice.len); + const extra = pool.getExtra(Pool.Field, @intCast(slice.extra_index + + index * @typeInfo(Pool.Field).Struct.fields.len)); + return .{ + .name = .{ .index = extra.name }, + .ctype = .{ .index = extra.ctype }, + .alignas = extra.flags.alignas, + }; + } + + fn eqlAdapted( + lhs_slice: Field.Slice, + lhs_pool: *const Pool, + rhs_slice: Field.Slice, + rhs_pool: *const Pool, + pool_adapter: anytype, + ) bool { + if (lhs_slice.len != rhs_slice.len) return false; + for (0..lhs_slice.len) |index| { + if (!lhs_slice.at(index, lhs_pool).eqlAdapted( + lhs_pool, + rhs_slice.at(index, rhs_pool), + rhs_pool, + pool_adapter, + )) return false; + } + return true; + } + }; + + fn eqlAdapted( + lhs_field: Field, + lhs_pool: *const Pool, + rhs_field: Field, + rhs_pool: *const Pool, + pool_adapter: anytype, + ) bool { + return std.meta.eql(lhs_field.alignas, rhs_field.alignas) and + pool_adapter.eql(lhs_field.ctype, rhs_field.ctype) and std.mem.eql( + u8, + lhs_field.name.slice(lhs_pool), + rhs_field.name.slice(rhs_pool), + ); + } + }; + + pub const FwdDecl = struct { + tag: Tag, + name: union(enum) { + anon: Field.Slice, + owner_decl: DeclIndex, + }, + }; + + pub const Aggregate = struct { + tag: Tag, + @"packed": bool = false, + name: union(enum) { + anon: struct { + owner_decl: DeclIndex, + id: u32, + }, + fwd_decl: CType, + }, + fields: Field.Slice, + }; + + pub const Function = struct { + return_ctype: CType, + param_ctypes: CType.Slice, + varargs: bool = false, + }; + + pub fn eqlAdapted( + lhs_info: Info, + lhs_pool: *const Pool, + rhs_ctype: CType, + rhs_pool: *const Pool, + pool_adapter: anytype, + ) bool { + const InfoTag = @typeInfo(Info).Union.tag_type.?; + const rhs_info = rhs_ctype.info(rhs_pool); + if (@as(InfoTag, lhs_info) != @as(InfoTag, rhs_info)) return false; + return switch (lhs_info) { + .basic => |lhs_basic_info| lhs_basic_info == rhs_info.basic, + .pointer => |lhs_pointer_info| lhs_pointer_info.@"const" == rhs_info.pointer.@"const" and + lhs_pointer_info.@"volatile" == rhs_info.pointer.@"volatile" and + pool_adapter.eql(lhs_pointer_info.elem_ctype, rhs_info.pointer.elem_ctype), + .aligned => |lhs_aligned_info| std.meta.eql(lhs_aligned_info.alignas, rhs_info.aligned.alignas) and + pool_adapter.eql(lhs_aligned_info.ctype, rhs_info.aligned.ctype), + .array => |lhs_array_info| lhs_array_info.len == rhs_info.array.len and + pool_adapter.eql(lhs_array_info.elem_ctype, rhs_info.array.elem_ctype), + .vector => |lhs_vector_info| lhs_vector_info.len == rhs_info.vector.len and + pool_adapter.eql(lhs_vector_info.elem_ctype, rhs_info.vector.elem_ctype), + .fwd_decl => |lhs_fwd_decl_info| lhs_fwd_decl_info.tag == rhs_info.fwd_decl.tag and + switch (lhs_fwd_decl_info.name) { + .anon => |lhs_anon| rhs_info.fwd_decl.name == .anon and lhs_anon.eqlAdapted( + lhs_pool, + rhs_info.fwd_decl.name.anon, + rhs_pool, + pool_adapter, + ), + .owner_decl => |lhs_owner_decl| rhs_info.fwd_decl.name == .owner_decl and + lhs_owner_decl == rhs_info.fwd_decl.name.owner_decl, + }, + .aggregate => |lhs_aggregate_info| lhs_aggregate_info.tag == rhs_info.aggregate.tag and + lhs_aggregate_info.@"packed" == rhs_info.aggregate.@"packed" and + switch (lhs_aggregate_info.name) { + .anon => |lhs_anon| rhs_info.aggregate.name == .anon and + lhs_anon.owner_decl == rhs_info.aggregate.name.anon.owner_decl and + lhs_anon.id == rhs_info.aggregate.name.anon.id, + .fwd_decl => |lhs_fwd_decl| rhs_info.aggregate.name == .fwd_decl and + pool_adapter.eql(lhs_fwd_decl, rhs_info.aggregate.name.fwd_decl), + } and lhs_aggregate_info.fields.eqlAdapted( + lhs_pool, + rhs_info.aggregate.fields, + rhs_pool, + pool_adapter, + ), + .function => |lhs_function_info| lhs_function_info.param_ctypes.len == + rhs_info.function.param_ctypes.len and + pool_adapter.eql(lhs_function_info.return_ctype, rhs_info.function.return_ctype) and + for (0..lhs_function_info.param_ctypes.len) |param_index| + { + if (!pool_adapter.eql( + lhs_function_info.param_ctypes.at(param_index, lhs_pool), + rhs_info.function.param_ctypes.at(param_index, rhs_pool), + )) break false; + } else true, + }; + } +}; + +pub const Pool = struct { + map: Map, + items: std.MultiArrayList(Item), + extra: std.ArrayListUnmanaged(u32), + + string_map: Map, + string_indices: std.ArrayListUnmanaged(u32), + string_bytes: std.ArrayListUnmanaged(u8), + + const Map = std.AutoArrayHashMapUnmanaged(void, void); + + pub const empty: Pool = .{ + .map = .{}, + .items = .{}, + .extra = .{}, + + .string_map = .{}, + .string_indices = .{}, + .string_bytes = .{}, + }; + + pub fn init(pool: *Pool, allocator: std.mem.Allocator) !void { + if (pool.string_indices.items.len == 0) + try pool.string_indices.append(allocator, 0); + } + + pub fn deinit(pool: *Pool, allocator: std.mem.Allocator) void { + pool.map.deinit(allocator); + pool.items.deinit(allocator); + pool.extra.deinit(allocator); + + pool.string_map.deinit(allocator); + pool.string_indices.deinit(allocator); + pool.string_bytes.deinit(allocator); + + pool.* = undefined; + } + + pub fn move(pool: *Pool) Pool { + defer pool.* = empty; + return pool.*; + } + + pub fn clearRetainingCapacity(pool: *Pool) void { + pool.map.clearRetainingCapacity(); + pool.items.shrinkRetainingCapacity(0); + pool.extra.clearRetainingCapacity(); + + pool.string_map.clearRetainingCapacity(); + pool.string_indices.shrinkRetainingCapacity(1); + pool.string_bytes.clearRetainingCapacity(); + } + + pub fn freeUnusedCapacity(pool: *Pool, allocator: std.mem.Allocator) void { + pool.map.shrinkAndFree(allocator, pool.map.count()); + pool.items.shrinkAndFree(allocator, pool.items.len); + pool.extra.shrinkAndFree(allocator, pool.extra.items.len); + + pool.string_map.shrinkAndFree(allocator, pool.string_map.count()); + pool.string_indices.shrinkAndFree(allocator, pool.string_indices.items.len); + pool.string_bytes.shrinkAndFree(allocator, pool.string_bytes.items.len); + } + + pub fn getPointer(pool: *Pool, allocator: std.mem.Allocator, pointer_info: Info.Pointer) !CType { + var hasher = Hasher.init; + hasher.update(pointer_info.elem_ctype.hash(pool)); + return pool.tagData( + allocator, + hasher, + pointer_info.tag(), + @intFromEnum(pointer_info.elem_ctype.index), + ); + } + + pub fn getAligned(pool: *Pool, allocator: std.mem.Allocator, aligned_info: Info.Aligned) !CType { + return pool.tagExtra(allocator, .aligned, Aligned, .{ + .ctype = aligned_info.ctype.index, + .flags = .{ .alignas = aligned_info.alignas }, + }); + } + + pub fn getArray(pool: *Pool, allocator: std.mem.Allocator, array_info: Info.Sequence) !CType { + return if (std.math.cast(u32, array_info.len)) |small_len| + pool.tagExtra(allocator, .array_small, SequenceSmall, .{ + .elem_ctype = array_info.elem_ctype.index, + .len = small_len, + }) + else + pool.tagExtra(allocator, .array_large, SequenceLarge, .{ + .elem_ctype = array_info.elem_ctype.index, + .len_lo = @truncate(array_info.len >> 0), + .len_hi = @truncate(array_info.len >> 32), + }); + } + + pub fn getVector(pool: *Pool, allocator: std.mem.Allocator, vector_info: Info.Sequence) !CType { + return pool.tagExtra(allocator, .vector, SequenceSmall, .{ + .elem_ctype = vector_info.elem_ctype.index, + .len = @intCast(vector_info.len), + }); + } + + pub fn getFwdDecl( + pool: *Pool, + allocator: std.mem.Allocator, + fwd_decl_info: struct { + tag: Info.Tag, + name: union(enum) { + anon: []const Info.Field, + owner_decl: DeclIndex, + }, + }, + ) !CType { + var hasher = Hasher.init; + switch (fwd_decl_info.name) { + .anon => |fields| { + const ExpectedContents = [32]CType; + var stack align(@max( + @alignOf(std.heap.StackFallbackAllocator(0)), + @alignOf(ExpectedContents), + )) = std.heap.stackFallback(@sizeOf(ExpectedContents), allocator); + const stack_allocator = stack.get(); + const field_ctypes = try stack_allocator.alloc(CType, fields.len); + defer stack_allocator.free(field_ctypes); + for (field_ctypes, fields) |*field_ctype, field| + field_ctype.* = try field.ctype.toForward(pool, allocator); + const extra: FwdDeclAnon = .{ .fields_len = @intCast(fields.len) }; + const extra_index = try pool.addExtra( + allocator, + FwdDeclAnon, + extra, + fields.len * @typeInfo(Field).Struct.fields.len, + ); + for (fields, field_ctypes) |field, field_ctype| pool.addHashedExtraAssumeCapacity( + &hasher, + Field, + .{ + .name = field.name.index, + .ctype = field_ctype.index, + .flags = .{ .alignas = field.alignas }, + }, + ); + hasher.updateExtra(FwdDeclAnon, extra, pool); + return pool.tagTrailingExtra(allocator, hasher, switch (fwd_decl_info.tag) { + .@"struct" => .fwd_decl_struct_anon, + .@"union" => .fwd_decl_union_anon, + .@"enum" => unreachable, + }, extra_index); + }, + .owner_decl => |owner_decl| { + hasher.update(owner_decl); + return pool.tagData(allocator, hasher, switch (fwd_decl_info.tag) { + .@"struct" => .fwd_decl_struct, + .@"union" => .fwd_decl_union, + .@"enum" => unreachable, + }, @intFromEnum(owner_decl)); + }, + } + } + + pub fn getAggregate( + pool: *Pool, + allocator: std.mem.Allocator, + aggregate_info: struct { + tag: Info.Tag, + @"packed": bool = false, + name: union(enum) { + anon: struct { + owner_decl: DeclIndex, + id: u32, + }, + fwd_decl: CType, + }, + fields: []const Info.Field, + }, + ) !CType { + var hasher = Hasher.init; + switch (aggregate_info.name) { + .anon => |anon| { + const extra: AggregateAnon = .{ + .owner_decl = anon.owner_decl, + .id = anon.id, + .fields_len = @intCast(aggregate_info.fields.len), + }; + const extra_index = try pool.addExtra( + allocator, + AggregateAnon, + extra, + aggregate_info.fields.len * @typeInfo(Field).Struct.fields.len, + ); + for (aggregate_info.fields) |field| pool.addHashedExtraAssumeCapacity(&hasher, Field, .{ + .name = field.name.index, + .ctype = field.ctype.index, + .flags = .{ .alignas = field.alignas }, + }); + hasher.updateExtra(AggregateAnon, extra, pool); + return pool.tagTrailingExtra(allocator, hasher, switch (aggregate_info.tag) { + .@"struct" => switch (aggregate_info.@"packed") { + false => .aggregate_struct_anon, + true => .aggregate_struct_packed_anon, + }, + .@"union" => switch (aggregate_info.@"packed") { + false => .aggregate_union_anon, + true => .aggregate_union_packed_anon, + }, + .@"enum" => unreachable, + }, extra_index); + }, + .fwd_decl => |fwd_decl| { + const extra: Aggregate = .{ + .fwd_decl = fwd_decl.index, + .fields_len = @intCast(aggregate_info.fields.len), + }; + const extra_index = try pool.addExtra( + allocator, + Aggregate, + extra, + aggregate_info.fields.len * @typeInfo(Field).Struct.fields.len, + ); + for (aggregate_info.fields) |field| pool.addHashedExtraAssumeCapacity(&hasher, Field, .{ + .name = field.name.index, + .ctype = field.ctype.index, + .flags = .{ .alignas = field.alignas }, + }); + hasher.updateExtra(Aggregate, extra, pool); + return pool.tagTrailingExtra(allocator, hasher, switch (aggregate_info.tag) { + .@"struct" => switch (aggregate_info.@"packed") { + false => .aggregate_struct, + true => .aggregate_struct_packed, + }, + .@"union" => switch (aggregate_info.@"packed") { + false => .aggregate_union, + true => .aggregate_union_packed, + }, + .@"enum" => unreachable, + }, extra_index); + }, + } + } + + pub fn getFunction( + pool: *Pool, + allocator: std.mem.Allocator, + function_info: struct { + return_ctype: CType, + param_ctypes: []const CType, + varargs: bool = false, + }, + ) !CType { + var hasher = Hasher.init; + const extra: Function = .{ + .return_ctype = function_info.return_ctype.index, + .param_ctypes_len = @intCast(function_info.param_ctypes.len), + }; + const extra_index = try pool.addExtra(allocator, Function, extra, function_info.param_ctypes.len); + for (function_info.param_ctypes) |param_ctype| { + hasher.update(param_ctype.hash(pool)); + pool.extra.appendAssumeCapacity(@intFromEnum(param_ctype.index)); + } + hasher.updateExtra(Function, extra, pool); + return pool.tagTrailingExtra(allocator, hasher, switch (function_info.varargs) { + false => .function, + true => .function_varargs, + }, extra_index); + } + + pub fn fromFields( + pool: *Pool, + allocator: std.mem.Allocator, + tag: Info.Tag, + fields: []Info.Field, + kind: Kind, + ) !CType { + sortFields(fields); + const fwd_decl = try pool.getFwdDecl(allocator, .{ + .tag = tag, + .name = .{ .anon = fields }, + }); + return if (kind.isForward()) fwd_decl else pool.getAggregate(allocator, .{ + .tag = tag, + .name = .{ .fwd_decl = fwd_decl }, + .fields = fields, + }); + } + + pub fn fromIntInfo( + pool: *Pool, + allocator: std.mem.Allocator, + int_info: std.builtin.Type.Int, + mod: *Module, + kind: Kind, + ) !CType { + switch (int_info.bits) { + 0 => return .{ .index = .void }, + 1...8 => switch (int_info.signedness) { + .unsigned => return .{ .index = .uint8_t }, + .signed => return .{ .index = .int8_t }, + }, + 9...16 => switch (int_info.signedness) { + .unsigned => return .{ .index = .uint16_t }, + .signed => return .{ .index = .int16_t }, + }, + 17...32 => switch (int_info.signedness) { + .unsigned => return .{ .index = .uint32_t }, + .signed => return .{ .index = .int32_t }, + }, + 33...64 => switch (int_info.signedness) { + .unsigned => return .{ .index = .uint64_t }, + .signed => return .{ .index = .int64_t }, + }, + 65...128 => switch (int_info.signedness) { + .unsigned => return .{ .index = .zig_u128 }, + .signed => return .{ .index = .zig_i128 }, + }, + else => { + const target = &mod.resolved_target.result; + const abi_align = Type.intAbiAlignment(int_info.bits, target.*); + const abi_align_bytes = abi_align.toByteUnits().?; + const array_ctype = try pool.getArray(allocator, .{ + .len = @divExact(Type.intAbiSize(int_info.bits, target.*), abi_align_bytes), + .elem_ctype = try pool.fromIntInfo(allocator, .{ + .signedness = .unsigned, + .bits = @intCast(abi_align_bytes * 8), + }, mod, kind.noParameter()), + }); + if (!kind.isParameter()) return array_ctype; + var fields = [_]Info.Field{ + .{ + .name = try pool.string(allocator, "array"), + .ctype = array_ctype, + .alignas = AlignAs.fromAbiAlignment(abi_align), + }, + }; + return pool.fromFields(allocator, .@"struct", &fields, kind); + }, + } + } + + pub fn fromType( + pool: *Pool, + allocator: std.mem.Allocator, + scratch: *std.ArrayListUnmanaged(u32), + ty: Type, + zcu: *Zcu, + mod: *Module, + kind: Kind, + ) !CType { + const ip = &zcu.intern_pool; + switch (ty.toIntern()) { + .u0_type, + .i0_type, + .anyopaque_type, + .void_type, + .empty_struct_type, + .type_type, + .comptime_int_type, + .comptime_float_type, + .null_type, + .undefined_type, + .enum_literal_type, + => return .{ .index = .void }, + .u1_type, .u8_type => return .{ .index = .uint8_t }, + .i8_type => return .{ .index = .int8_t }, + .u16_type => return .{ .index = .uint16_t }, + .i16_type => return .{ .index = .int16_t }, + .u29_type, .u32_type => return .{ .index = .uint32_t }, + .i32_type => return .{ .index = .int32_t }, + .u64_type => return .{ .index = .uint64_t }, + .i64_type => return .{ .index = .int64_t }, + .u80_type, .u128_type => return .{ .index = .zig_u128 }, + .i128_type => return .{ .index = .zig_i128 }, + .usize_type => return .{ .index = .uintptr_t }, + .isize_type => return .{ .index = .intptr_t }, + .c_char_type => return .{ .index = .char }, + .c_short_type => return .{ .index = .short }, + .c_ushort_type => return .{ .index = .@"unsigned short" }, + .c_int_type => return .{ .index = .int }, + .c_uint_type => return .{ .index = .@"unsigned int" }, + .c_long_type => return .{ .index = .long }, + .c_ulong_type => return .{ .index = .@"unsigned long" }, + .c_longlong_type => return .{ .index = .@"long long" }, + .c_ulonglong_type => return .{ .index = .@"unsigned long long" }, + .c_longdouble_type => return .{ .index = .@"long double" }, + .f16_type => return .{ .index = .zig_f16 }, + .f32_type => return .{ .index = .zig_f32 }, + .f64_type => return .{ .index = .zig_f64 }, + .f80_type => return .{ .index = .zig_f80 }, + .f128_type => return .{ .index = .zig_f128 }, + .bool_type, .optional_noreturn_type => return .{ .index = .bool }, + .noreturn_type, + .anyframe_type, + .generic_poison_type, + => unreachable, + .atomic_order_type, + .atomic_rmw_op_type, + .calling_convention_type, + .address_space_type, + .float_mode_type, + .reduce_op_type, + .call_modifier_type, + => |ip_index| return pool.fromType( + allocator, + scratch, + Type.fromInterned(ip.loadEnumType(ip_index).tag_ty), + zcu, + mod, + kind, + ), + .anyerror_type, + .anyerror_void_error_union_type, + .adhoc_inferred_error_set_type, + => return pool.fromIntInfo(allocator, .{ + .signedness = .unsigned, + .bits = zcu.errorSetBits(), + }, mod, kind), + .manyptr_u8_type, + => return pool.getPointer(allocator, .{ + .elem_ctype = .{ .index = .uint8_t }, + }), + .manyptr_const_u8_type, + .manyptr_const_u8_sentinel_0_type, + => return pool.getPointer(allocator, .{ + .elem_ctype = .{ .index = .uint8_t }, + .@"const" = true, + }), + .single_const_pointer_to_comptime_int_type, + => return pool.getPointer(allocator, .{ + .elem_ctype = .{ .index = .void }, + .@"const" = true, + }), + .slice_const_u8_type, + .slice_const_u8_sentinel_0_type, + => { + const target = &mod.resolved_target.result; + var fields = [_]Info.Field{ + .{ + .name = try pool.string(allocator, "ptr"), + .ctype = try pool.getPointer(allocator, .{ + .elem_ctype = .{ .index = .uint8_t }, + .@"const" = true, + }), + .alignas = AlignAs.fromAbiAlignment(Type.ptrAbiAlignment(target.*)), + }, + .{ + .name = try pool.string(allocator, "len"), + .ctype = .{ .index = .uintptr_t }, + .alignas = AlignAs.fromAbiAlignment( + Type.intAbiAlignment(target.ptrBitWidth(), target.*), + ), + }, + }; + return pool.fromFields(allocator, .@"struct", &fields, kind); + }, + + .undef, + .zero, + .zero_usize, + .zero_u8, + .one, + .one_usize, + .one_u8, + .four_u8, + .negative_one, + .calling_convention_c, + .calling_convention_inline, + .void_value, + .unreachable_value, + .null_value, + .bool_true, + .bool_false, + .empty_struct, + .generic_poison, + .var_args_param_type, + .none, + => unreachable, + + //.prefetch_options_type, + //.export_options_type, + //.extern_options_type, + //.type_info_type, + //_, + else => |ip_index| switch (ip.indexToKey(ip_index)) { + .int_type => |int_info| return pool.fromIntInfo(allocator, int_info, mod, kind), + .ptr_type => |ptr_info| switch (ptr_info.flags.size) { + .One, .Many, .C => return pool.getPointer(allocator, .{ + .elem_ctype = elem_ctype: { + if (ptr_info.packed_offset.host_size > 0 and + ptr_info.flags.vector_index == .none) + break :elem_ctype try pool.fromIntInfo(allocator, .{ + .signedness = .unsigned, + .bits = ptr_info.packed_offset.host_size * 8, + }, mod, .forward); + const elem: Info.Aligned = .{ + .ctype = try pool.fromType( + allocator, + scratch, + Type.fromInterned(ptr_info.child), + zcu, + mod, + .forward, + ), + .alignas = AlignAs.fromAlignment(.{ + .@"align" = ptr_info.flags.alignment, + .abi = Type.fromInterned(ptr_info.child).abiAlignment(zcu), + }), + }; + if (elem.alignas.abiOrder().compare(.gte)) + break :elem_ctype elem.ctype; + break :elem_ctype try pool.getAligned(allocator, elem); + }, + .@"const" = ptr_info.flags.is_const, + .@"volatile" = ptr_info.flags.is_volatile, + }), + .Slice => { + const target = &mod.resolved_target.result; + var fields = [_]Info.Field{ + .{ + .name = try pool.string(allocator, "ptr"), + .ctype = try pool.fromType( + allocator, + scratch, + Type.fromInterned(ip.slicePtrType(ip_index)), + zcu, + mod, + kind, + ), + .alignas = AlignAs.fromAbiAlignment(Type.ptrAbiAlignment(target.*)), + }, + .{ + .name = try pool.string(allocator, "len"), + .ctype = .{ .index = .uintptr_t }, + .alignas = AlignAs.fromAbiAlignment( + Type.intAbiAlignment(target.ptrBitWidth(), target.*), + ), + }, + }; + return pool.fromFields(allocator, .@"struct", &fields, kind); + }, + }, + .array_type => |array_info| { + const len = array_info.len + @intFromBool(array_info.sentinel != .none); + if (len == 0) return .{ .index = .void }; + const elem_type = Type.fromInterned(array_info.child); + const elem_ctype = try pool.fromType( + allocator, + scratch, + elem_type, + zcu, + mod, + kind.noParameter(), + ); + if (elem_ctype.index == .void) return .{ .index = .void }; + const array_ctype = try pool.getArray(allocator, .{ + .elem_ctype = elem_ctype, + .len = array_info.len + @intFromBool(array_info.sentinel != .none), + }); + if (!kind.isParameter()) return array_ctype; + var fields = [_]Info.Field{ + .{ + .name = try pool.string(allocator, "array"), + .ctype = array_ctype, + .alignas = AlignAs.fromAbiAlignment(elem_type.abiAlignment(zcu)), + }, + }; + return pool.fromFields(allocator, .@"struct", &fields, kind); + }, + .vector_type => |vector_info| { + if (vector_info.len == 0) return .{ .index = .void }; + const elem_type = Type.fromInterned(vector_info.child); + const elem_ctype = try pool.fromType( + allocator, + scratch, + elem_type, + zcu, + mod, + kind.noParameter(), + ); + if (elem_ctype.index == .void) return .{ .index = .void }; + const vector_ctype = try pool.getVector(allocator, .{ + .elem_ctype = elem_ctype, + .len = vector_info.len, + }); + if (!kind.isParameter()) return vector_ctype; + var fields = [_]Info.Field{ + .{ + .name = try pool.string(allocator, "array"), + .ctype = vector_ctype, + .alignas = AlignAs.fromAbiAlignment(elem_type.abiAlignment(zcu)), + }, + }; + return pool.fromFields(allocator, .@"struct", &fields, kind); + }, + .opt_type => |payload_type| { + if (ip.isNoReturn(payload_type)) return .{ .index = .void }; + const payload_ctype = try pool.fromType( + allocator, + scratch, + Type.fromInterned(payload_type), + zcu, + mod, + kind.noParameter(), + ); + if (payload_ctype.index == .void) return .{ .index = .bool }; + switch (payload_type) { + .anyerror_type => return payload_ctype, + else => switch (ip.indexToKey(payload_type)) { + .ptr_type => |payload_ptr_info| if (payload_ptr_info.flags.size != .C and + !payload_ptr_info.flags.is_allowzero) return payload_ctype, + .error_set_type, .inferred_error_set_type => return payload_ctype, + else => {}, + }, + } + var fields = [_]Info.Field{ + .{ + .name = try pool.string(allocator, "is_null"), + .ctype = .{ .index = .bool }, + .alignas = AlignAs.fromAbiAlignment(.@"1"), + }, + .{ + .name = try pool.string(allocator, "payload"), + .ctype = payload_ctype, + .alignas = AlignAs.fromAbiAlignment( + Type.fromInterned(payload_type).abiAlignment(zcu), + ), + }, + }; + return pool.fromFields(allocator, .@"struct", &fields, kind); + }, + .anyframe_type => unreachable, + .error_union_type => |error_union_info| { + const error_set_bits = zcu.errorSetBits(); + const error_set_ctype = try pool.fromIntInfo(allocator, .{ + .signedness = .unsigned, + .bits = error_set_bits, + }, mod, kind); + if (ip.isNoReturn(error_union_info.payload_type)) return error_set_ctype; + const payload_type = Type.fromInterned(error_union_info.payload_type); + const payload_ctype = try pool.fromType( + allocator, + scratch, + payload_type, + zcu, + mod, + kind.noParameter(), + ); + if (payload_ctype.index == .void) return error_set_ctype; + const target = &mod.resolved_target.result; + var fields = [_]Info.Field{ + .{ + .name = try pool.string(allocator, "error"), + .ctype = error_set_ctype, + .alignas = AlignAs.fromAbiAlignment( + Type.intAbiAlignment(error_set_bits, target.*), + ), + }, + .{ + .name = try pool.string(allocator, "payload"), + .ctype = payload_ctype, + .alignas = AlignAs.fromAbiAlignment(payload_type.abiAlignment(zcu)), + }, + }; + return pool.fromFields(allocator, .@"struct", &fields, kind); + }, + .simple_type => unreachable, + .struct_type => { + const loaded_struct = ip.loadStructType(ip_index); + switch (loaded_struct.layout) { + .auto, .@"extern" => { + const fwd_decl = try pool.getFwdDecl(allocator, .{ + .tag = .@"struct", + .name = .{ .owner_decl = loaded_struct.decl.unwrap().? }, + }); + if (kind.isForward()) return if (ty.hasRuntimeBitsIgnoreComptime(zcu)) + fwd_decl + else + .{ .index = .void }; + const scratch_top = scratch.items.len; + defer scratch.shrinkRetainingCapacity(scratch_top); + try scratch.ensureUnusedCapacity( + allocator, + loaded_struct.field_types.len * @typeInfo(Field).Struct.fields.len, + ); + var hasher = Hasher.init; + var tag: Tag = .aggregate_struct; + var field_it = loaded_struct.iterateRuntimeOrder(ip); + while (field_it.next()) |field_index| { + const field_type = Type.fromInterned( + loaded_struct.field_types.get(ip)[field_index], + ); + const field_ctype = try pool.fromType( + allocator, + scratch, + field_type, + zcu, + mod, + kind.noParameter(), + ); + if (field_ctype.index == .void) continue; + const field_name = if (loaded_struct.fieldName(ip, field_index) + .unwrap()) |field_name| + try pool.string(allocator, ip.stringToSlice(field_name)) + else + try pool.fmt(allocator, "f{d}", .{field_index}); + const field_alignas = AlignAs.fromAlignment(.{ + .@"align" = loaded_struct.fieldAlign(ip, field_index), + .abi = field_type.abiAlignment(zcu), + }); + pool.addHashedExtraAssumeCapacityTo(scratch, &hasher, Field, .{ + .name = field_name.index, + .ctype = field_ctype.index, + .flags = .{ .alignas = field_alignas }, + }); + if (field_alignas.abiOrder().compare(.lt)) + tag = .aggregate_struct_packed; + } + const fields_len: u32 = @intCast(@divExact( + scratch.items.len - scratch_top, + @typeInfo(Field).Struct.fields.len, + )); + if (fields_len == 0) return .{ .index = .void }; + try pool.ensureUnusedCapacity(allocator, 1); + const extra_index = try pool.addHashedExtra(allocator, &hasher, Aggregate, .{ + .fwd_decl = fwd_decl.index, + .fields_len = fields_len, + }, fields_len * @typeInfo(Field).Struct.fields.len); + pool.extra.appendSliceAssumeCapacity(scratch.items[scratch_top..]); + return pool.tagTrailingExtraAssumeCapacity(hasher, tag, extra_index); + }, + .@"packed" => return pool.fromType( + allocator, + scratch, + Type.fromInterned(loaded_struct.backingIntType(ip).*), + zcu, + mod, + kind, + ), + } + }, + .anon_struct_type => |anon_struct_info| { + const scratch_top = scratch.items.len; + defer scratch.shrinkRetainingCapacity(scratch_top); + try scratch.ensureUnusedCapacity(allocator, anon_struct_info.types.len * + @typeInfo(Field).Struct.fields.len); + var hasher = Hasher.init; + for (0..anon_struct_info.types.len) |field_index| { + if (anon_struct_info.values.get(ip)[field_index] != .none) continue; + const field_type = Type.fromInterned( + anon_struct_info.types.get(ip)[field_index], + ); + const field_ctype = try pool.fromType( + allocator, + scratch, + field_type, + zcu, + mod, + kind.noParameter(), + ); + if (field_ctype.index == .void) continue; + const field_name = if (anon_struct_info.fieldName(ip, @intCast(field_index)) + .unwrap()) |field_name| + try pool.string(allocator, ip.stringToSlice(field_name)) + else + try pool.fmt(allocator, "f{d}", .{field_index}); + pool.addHashedExtraAssumeCapacityTo(scratch, &hasher, Field, .{ + .name = field_name.index, + .ctype = field_ctype.index, + .flags = .{ .alignas = AlignAs.fromAbiAlignment( + field_type.abiAlignment(zcu), + ) }, + }); + } + const fields_len: u32 = @intCast(@divExact( + scratch.items.len - scratch_top, + @typeInfo(Field).Struct.fields.len, + )); + if (fields_len == 0) return .{ .index = .void }; + if (kind.isForward()) { + try pool.ensureUnusedCapacity(allocator, 1); + const extra_index = try pool.addHashedExtra( + allocator, + &hasher, + FwdDeclAnon, + .{ .fields_len = fields_len }, + fields_len * @typeInfo(Field).Struct.fields.len, + ); + pool.extra.appendSliceAssumeCapacity(scratch.items[scratch_top..]); + return pool.tagTrailingExtra( + allocator, + hasher, + .fwd_decl_struct_anon, + extra_index, + ); + } + const fwd_decl = try pool.fromType(allocator, scratch, ty, zcu, mod, .forward); + try pool.ensureUnusedCapacity(allocator, 1); + const extra_index = try pool.addHashedExtra(allocator, &hasher, Aggregate, .{ + .fwd_decl = fwd_decl.index, + .fields_len = fields_len, + }, fields_len * @typeInfo(Field).Struct.fields.len); + pool.extra.appendSliceAssumeCapacity(scratch.items[scratch_top..]); + return pool.tagTrailingExtraAssumeCapacity(hasher, .aggregate_struct, extra_index); + }, + .union_type => { + const loaded_union = ip.loadUnionType(ip_index); + switch (loaded_union.getLayout(ip)) { + .auto, .@"extern" => { + const has_tag = loaded_union.hasTag(ip); + const fwd_decl = try pool.getFwdDecl(allocator, .{ + .tag = if (has_tag) .@"struct" else .@"union", + .name = .{ .owner_decl = loaded_union.decl }, + }); + if (kind.isForward()) return if (ty.hasRuntimeBitsIgnoreComptime(zcu)) + fwd_decl + else + .{ .index = .void }; + const loaded_tag = loaded_union.loadTagType(ip); + const scratch_top = scratch.items.len; + defer scratch.shrinkRetainingCapacity(scratch_top); + try scratch.ensureUnusedCapacity( + allocator, + loaded_union.field_types.len * @typeInfo(Field).Struct.fields.len, + ); + var hasher = Hasher.init; + var tag: Tag = .aggregate_union; + var payload_align: Alignment = .@"1"; + for (0..loaded_union.field_types.len) |field_index| { + const field_type = Type.fromInterned( + loaded_union.field_types.get(ip)[field_index], + ); + if (ip.isNoReturn(field_type.toIntern())) continue; + const field_ctype = try pool.fromType( + allocator, + scratch, + field_type, + zcu, + mod, + kind.noParameter(), + ); + if (field_ctype.index == .void) continue; + const field_name = try pool.string( + allocator, + ip.stringToSlice(loaded_tag.names.get(ip)[field_index]), + ); + const field_alignas = AlignAs.fromAlignment(.{ + .@"align" = loaded_union.fieldAlign(ip, @intCast(field_index)), + .abi = field_type.abiAlignment(zcu), + }); + pool.addHashedExtraAssumeCapacityTo(scratch, &hasher, Field, .{ + .name = field_name.index, + .ctype = field_ctype.index, + .flags = .{ .alignas = field_alignas }, + }); + if (field_alignas.abiOrder().compare(.lt)) + tag = .aggregate_union_packed; + payload_align = payload_align.maxStrict(field_alignas.@"align"); + } + const fields_len: u32 = @intCast(@divExact( + scratch.items.len - scratch_top, + @typeInfo(Field).Struct.fields.len, + )); + if (!has_tag) { + if (fields_len == 0) return .{ .index = .void }; + try pool.ensureUnusedCapacity(allocator, 1); + const extra_index = try pool.addHashedExtra( + allocator, + &hasher, + Aggregate, + .{ .fwd_decl = fwd_decl.index, .fields_len = fields_len }, + fields_len * @typeInfo(Field).Struct.fields.len, + ); + pool.extra.appendSliceAssumeCapacity(scratch.items[scratch_top..]); + return pool.tagTrailingExtraAssumeCapacity(hasher, tag, extra_index); + } + try pool.ensureUnusedCapacity(allocator, 2); + var struct_fields: [2]Info.Field = undefined; + var struct_fields_len: usize = 0; + if (loaded_tag.tag_ty != .comptime_int_type) { + const tag_type = Type.fromInterned(loaded_tag.tag_ty); + const tag_ctype: CType = try pool.fromType( + allocator, + scratch, + tag_type, + zcu, + mod, + kind.noParameter(), + ); + if (tag_ctype.index != .void) { + struct_fields[struct_fields_len] = .{ + .name = try pool.string(allocator, "tag"), + .ctype = tag_ctype, + .alignas = AlignAs.fromAbiAlignment(tag_type.abiAlignment(zcu)), + }; + struct_fields_len += 1; + } + } + if (fields_len > 0) { + const payload_ctype = payload_ctype: { + const extra_index = try pool.addHashedExtra( + allocator, + &hasher, + AggregateAnon, + .{ + .owner_decl = loaded_union.decl, + .id = 0, + .fields_len = fields_len, + }, + fields_len * @typeInfo(Field).Struct.fields.len, + ); + pool.extra.appendSliceAssumeCapacity(scratch.items[scratch_top..]); + break :payload_ctype pool.tagTrailingExtraAssumeCapacity( + hasher, + switch (tag) { + .aggregate_union => .aggregate_union_anon, + .aggregate_union_packed => .aggregate_union_packed_anon, + else => unreachable, + }, + extra_index, + ); + }; + if (payload_ctype.index != .void) { + struct_fields[struct_fields_len] = .{ + .name = try pool.string(allocator, "payload"), + .ctype = payload_ctype, + .alignas = AlignAs.fromAbiAlignment(payload_align), + }; + struct_fields_len += 1; + } + } + if (struct_fields_len == 0) return .{ .index = .void }; + sortFields(struct_fields[0..struct_fields_len]); + return pool.getAggregate(allocator, .{ + .tag = .@"struct", + .name = .{ .fwd_decl = fwd_decl }, + .fields = struct_fields[0..struct_fields_len], + }); + }, + .@"packed" => return pool.fromIntInfo(allocator, .{ + .signedness = .unsigned, + .bits = @intCast(ty.bitSize(zcu)), + }, mod, kind), + } + }, + .opaque_type => return .{ .index = .void }, + .enum_type => return pool.fromType( + allocator, + scratch, + Type.fromInterned(ip.loadEnumType(ip_index).tag_ty), + zcu, + mod, + kind, + ), + .func_type => |func_info| if (func_info.is_generic) return .{ .index = .void } else { + const scratch_top = scratch.items.len; + defer scratch.shrinkRetainingCapacity(scratch_top); + try scratch.ensureUnusedCapacity(allocator, func_info.param_types.len); + var hasher = Hasher.init; + const return_type = Type.fromInterned(func_info.return_type); + const return_ctype: CType = + if (!ip.isNoReturn(func_info.return_type)) try pool.fromType( + allocator, + scratch, + return_type, + zcu, + mod, + kind.asParameter(), + ) else .{ .index = .void }; + for (0..func_info.param_types.len) |param_index| { + const param_type = Type.fromInterned( + func_info.param_types.get(ip)[param_index], + ); + const param_ctype = try pool.fromType( + allocator, + scratch, + param_type, + zcu, + mod, + kind.asParameter(), + ); + if (param_ctype.index == .void) continue; + hasher.update(param_ctype.hash(pool)); + scratch.appendAssumeCapacity(@intFromEnum(param_ctype.index)); + } + const param_ctypes_len: u32 = @intCast(scratch.items.len - scratch_top); + try pool.ensureUnusedCapacity(allocator, 1); + const extra_index = try pool.addHashedExtra(allocator, &hasher, Function, .{ + .return_ctype = return_ctype.index, + .param_ctypes_len = param_ctypes_len, + }, param_ctypes_len); + pool.extra.appendSliceAssumeCapacity(scratch.items[scratch_top..]); + return pool.tagTrailingExtraAssumeCapacity(hasher, switch (func_info.is_var_args) { + false => .function, + true => .function_varargs, + }, extra_index); + }, + .error_set_type, + .inferred_error_set_type, + => return pool.fromIntInfo(allocator, .{ + .signedness = .unsigned, + .bits = zcu.errorSetBits(), + }, mod, kind), + + .undef, + .simple_value, + .variable, + .extern_func, + .func, + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .slice, + .opt, + .aggregate, + .un, + .memoized_call, + => unreachable, + }, + } + } + + pub fn getOrPutAdapted( + pool: *Pool, + allocator: std.mem.Allocator, + source_pool: *const Pool, + source_ctype: CType, + pool_adapter: anytype, + ) !struct { CType, bool } { + const tag = source_pool.items.items(.tag)[ + source_ctype.toPoolIndex() orelse return .{ source_ctype, true } + ]; + try pool.ensureUnusedCapacity(allocator, 1); + const CTypeAdapter = struct { + pool: *const Pool, + source_pool: *const Pool, + source_info: Info, + pool_adapter: @TypeOf(pool_adapter), + pub fn hash(map_adapter: @This(), key_ctype: CType) Map.Hash { + return key_ctype.hash(map_adapter.source_pool); + } + pub fn eql(map_adapter: @This(), _: CType, _: void, pool_index: usize) bool { + return map_adapter.source_info.eqlAdapted( + map_adapter.source_pool, + CType.fromPoolIndex(pool_index), + map_adapter.pool, + map_adapter.pool_adapter, + ); + } + }; + const source_info = source_ctype.info(source_pool); + const gop = pool.map.getOrPutAssumeCapacityAdapted(source_ctype, CTypeAdapter{ + .pool = pool, + .source_pool = source_pool, + .source_info = source_info, + .pool_adapter = pool_adapter, + }); + errdefer _ = pool.map.pop(); + const ctype = CType.fromPoolIndex(gop.index); + if (!gop.found_existing) switch (source_info) { + .basic => unreachable, + .pointer => |pointer_info| pool.items.appendAssumeCapacity(.{ + .tag = tag, + .data = @intFromEnum(pool_adapter.copy(pointer_info.elem_ctype).index), + }), + .aligned => |aligned_info| pool.items.appendAssumeCapacity(.{ + .tag = tag, + .data = try pool.addExtra(allocator, Aligned, .{ + .ctype = pool_adapter.copy(aligned_info.ctype).index, + .flags = .{ .alignas = aligned_info.alignas }, + }, 0), + }), + .array, .vector => |sequence_info| pool.items.appendAssumeCapacity(.{ + .tag = tag, + .data = switch (tag) { + .array_small, .vector => try pool.addExtra(allocator, SequenceSmall, .{ + .elem_ctype = pool_adapter.copy(sequence_info.elem_ctype).index, + .len = @intCast(sequence_info.len), + }, 0), + .array_large => try pool.addExtra(allocator, SequenceLarge, .{ + .elem_ctype = pool_adapter.copy(sequence_info.elem_ctype).index, + .len_lo = @truncate(sequence_info.len >> 0), + .len_hi = @truncate(sequence_info.len >> 32), + }, 0), + else => unreachable, + }, + }), + .fwd_decl => |fwd_decl_info| switch (fwd_decl_info.name) { + .anon => |fields| { + pool.items.appendAssumeCapacity(.{ + .tag = tag, + .data = try pool.addExtra(allocator, FwdDeclAnon, .{ + .fields_len = fields.len, + }, fields.len * @typeInfo(Field).Struct.fields.len), + }); + for (0..fields.len) |field_index| { + const field = fields.at(field_index, source_pool); + const field_name = try pool.string(allocator, field.name.slice(source_pool)); + pool.addExtraAssumeCapacity(Field, .{ + .name = field_name.index, + .ctype = pool_adapter.copy(field.ctype).index, + .flags = .{ .alignas = field.alignas }, + }); + } + }, + .owner_decl => |owner_decl| pool.items.appendAssumeCapacity(.{ + .tag = tag, + .data = @intFromEnum(owner_decl), + }), + }, + .aggregate => |aggregate_info| { + pool.items.appendAssumeCapacity(.{ + .tag = tag, + .data = switch (aggregate_info.name) { + .anon => |anon| try pool.addExtra(allocator, AggregateAnon, .{ + .owner_decl = anon.owner_decl, + .id = anon.id, + .fields_len = aggregate_info.fields.len, + }, aggregate_info.fields.len * @typeInfo(Field).Struct.fields.len), + .fwd_decl => |fwd_decl| try pool.addExtra(allocator, Aggregate, .{ + .fwd_decl = pool_adapter.copy(fwd_decl).index, + .fields_len = aggregate_info.fields.len, + }, aggregate_info.fields.len * @typeInfo(Field).Struct.fields.len), + }, + }); + for (0..aggregate_info.fields.len) |field_index| { + const field = aggregate_info.fields.at(field_index, source_pool); + const field_name = try pool.string(allocator, field.name.slice(source_pool)); + pool.addExtraAssumeCapacity(Field, .{ + .name = field_name.index, + .ctype = pool_adapter.copy(field.ctype).index, + .flags = .{ .alignas = field.alignas }, + }); + } + }, + .function => |function_info| { + pool.items.appendAssumeCapacity(.{ + .tag = tag, + .data = try pool.addExtra(allocator, Function, .{ + .return_ctype = pool_adapter.copy(function_info.return_ctype).index, + .param_ctypes_len = function_info.param_ctypes.len, + }, function_info.param_ctypes.len), + }); + for (0..function_info.param_ctypes.len) |param_index| pool.extra.appendAssumeCapacity( + @intFromEnum(pool_adapter.copy( + function_info.param_ctypes.at(param_index, source_pool), + ).index), + ); + }, + }; + assert(source_info.eqlAdapted(source_pool, ctype, pool, pool_adapter)); + assert(source_ctype.hash(source_pool) == ctype.hash(pool)); + return .{ ctype, gop.found_existing }; + } + + pub fn string(pool: *Pool, allocator: std.mem.Allocator, str: []const u8) !String { + try pool.string_bytes.appendSlice(allocator, str); + return pool.trailingString(allocator); + } + + pub fn fmt( + pool: *Pool, + allocator: std.mem.Allocator, + comptime fmt_str: []const u8, + fmt_args: anytype, + ) !String { + try pool.string_bytes.writer(allocator).print(fmt_str, fmt_args); + return pool.trailingString(allocator); + } + + fn ensureUnusedCapacity(pool: *Pool, allocator: std.mem.Allocator, len: u32) !void { + try pool.map.ensureUnusedCapacity(allocator, len); + try pool.items.ensureUnusedCapacity(allocator, len); + } + + const Hasher = struct { + const Impl = std.hash.Wyhash; + impl: Impl, + + const init: Hasher = .{ .impl = Impl.init(0) }; + + fn updateExtra(hasher: *Hasher, comptime Extra: type, extra: Extra, pool: *const Pool) void { + inline for (@typeInfo(Extra).Struct.fields) |field| { + const value = @field(extra, field.name); + hasher.update(switch (field.type) { + Tag, String, CType => unreachable, + CType.Index => (CType{ .index = value }).hash(pool), + String.Index => (String{ .index = value }).slice(pool), + else => value, + }); + } + } + fn update(hasher: *Hasher, data: anytype) void { + switch (@TypeOf(data)) { + Tag => @compileError("pass tag to final"), + CType, CType.Index => @compileError("hash ctype.hash(pool) instead"), + String, String.Index => @compileError("hash string.slice(pool) instead"), + u32, DeclIndex, Aligned.Flags => hasher.impl.update(std.mem.asBytes(&data)), + []const u8 => hasher.impl.update(data), + else => @compileError("unhandled type: " ++ @typeName(@TypeOf(data))), + } + } + + fn final(hasher: Hasher, tag: Tag) Map.Hash { + var impl = hasher.impl; + impl.update(std.mem.asBytes(&tag)); + return @truncate(impl.final()); + } + }; + + fn tagData( + pool: *Pool, + allocator: std.mem.Allocator, + hasher: Hasher, + tag: Tag, + data: u32, + ) !CType { + try pool.ensureUnusedCapacity(allocator, 1); + const Key = struct { hash: Map.Hash, tag: Tag, data: u32 }; + const CTypeAdapter = struct { + pool: *const Pool, + pub fn hash(_: @This(), key: Key) Map.Hash { + return key.hash; + } + pub fn eql(ctype_adapter: @This(), lhs_key: Key, _: void, rhs_index: usize) bool { + const rhs_item = ctype_adapter.pool.items.get(rhs_index); + return lhs_key.tag == rhs_item.tag and lhs_key.data == rhs_item.data; + } + }; + const gop = pool.map.getOrPutAssumeCapacityAdapted( + Key{ .hash = hasher.final(tag), .tag = tag, .data = data }, + CTypeAdapter{ .pool = pool }, + ); + if (!gop.found_existing) pool.items.appendAssumeCapacity(.{ .tag = tag, .data = data }); + return CType.fromPoolIndex(gop.index); + } + + fn tagExtra( + pool: *Pool, + allocator: std.mem.Allocator, + tag: Tag, + comptime Extra: type, + extra: Extra, + ) !CType { + var hasher = Hasher.init; + hasher.updateExtra(Extra, extra, pool); + return pool.tagTrailingExtra( + allocator, + hasher, + tag, + try pool.addExtra(allocator, Extra, extra, 0), + ); + } + + fn tagTrailingExtra( + pool: *Pool, + allocator: std.mem.Allocator, + hasher: Hasher, + tag: Tag, + extra_index: ExtraIndex, + ) !CType { + try pool.ensureUnusedCapacity(allocator, 1); + return pool.tagTrailingExtraAssumeCapacity(hasher, tag, extra_index); + } + + fn tagTrailingExtraAssumeCapacity( + pool: *Pool, + hasher: Hasher, + tag: Tag, + extra_index: ExtraIndex, + ) CType { + const Key = struct { hash: Map.Hash, tag: Tag, extra: []const u32 }; + const CTypeAdapter = struct { + pool: *const Pool, + pub fn hash(_: @This(), key: Key) Map.Hash { + return key.hash; + } + pub fn eql(ctype_adapter: @This(), lhs_key: Key, _: void, rhs_index: usize) bool { + const rhs_item = ctype_adapter.pool.items.get(rhs_index); + if (lhs_key.tag != rhs_item.tag) return false; + const rhs_extra = ctype_adapter.pool.extra.items[rhs_item.data..]; + return std.mem.startsWith(u32, rhs_extra, lhs_key.extra); + } + }; + const gop = pool.map.getOrPutAssumeCapacityAdapted( + Key{ .hash = hasher.final(tag), .tag = tag, .extra = pool.extra.items[extra_index..] }, + CTypeAdapter{ .pool = pool }, + ); + if (gop.found_existing) + pool.extra.shrinkRetainingCapacity(extra_index) + else + pool.items.appendAssumeCapacity(.{ .tag = tag, .data = extra_index }); + return CType.fromPoolIndex(gop.index); + } + + fn sortFields(fields: []Info.Field) void { + std.mem.sort(Info.Field, fields, {}, struct { + fn before(_: void, lhs_field: Info.Field, rhs_field: Info.Field) bool { + return lhs_field.alignas.order(rhs_field.alignas).compare(.gt); + } + }.before); + } + + fn trailingString(pool: *Pool, allocator: std.mem.Allocator) !String { + const StringAdapter = struct { + pool: *const Pool, + pub fn hash(_: @This(), slice: []const u8) Map.Hash { + return @truncate(Hasher.Impl.hash(1, slice)); + } + pub fn eql(string_adapter: @This(), lhs_slice: []const u8, _: void, rhs_index: usize) bool { + const rhs_string: String = .{ .index = @enumFromInt(rhs_index) }; + const rhs_slice = rhs_string.slice(string_adapter.pool); + return std.mem.eql(u8, lhs_slice, rhs_slice); + } + }; + try pool.string_map.ensureUnusedCapacity(allocator, 1); + try pool.string_indices.ensureUnusedCapacity(allocator, 1); + + const start = pool.string_indices.getLast(); + const gop = pool.string_map.getOrPutAssumeCapacityAdapted( + @as([]const u8, pool.string_bytes.items[start..]), + StringAdapter{ .pool = pool }, + ); + if (gop.found_existing) + pool.string_bytes.shrinkRetainingCapacity(start) + else + pool.string_indices.appendAssumeCapacity(@intCast(pool.string_bytes.items.len)); + return .{ .index = @enumFromInt(gop.index) }; + } + + const Item = struct { + tag: Tag, + data: u32, + }; + + const ExtraIndex = u32; + + const Tag = enum(u8) { + basic, + pointer, + pointer_const, + pointer_volatile, + pointer_const_volatile, + aligned, + array_small, + array_large, + vector, + fwd_decl_struct_anon, + fwd_decl_union_anon, + fwd_decl_struct, + fwd_decl_union, + aggregate_struct_anon, + aggregate_struct_packed_anon, + aggregate_union_anon, + aggregate_union_packed_anon, + aggregate_struct, + aggregate_struct_packed, + aggregate_union, + aggregate_union_packed, + function, + function_varargs, + }; + + const Aligned = struct { + ctype: CType.Index, + flags: Flags, + + const Flags = packed struct(u32) { + alignas: AlignAs, + _: u20 = 0, + }; + }; + + const SequenceSmall = struct { + elem_ctype: CType.Index, + len: u32, + }; + + const SequenceLarge = struct { + elem_ctype: CType.Index, + len_lo: u32, + len_hi: u32, + + fn len(extra: SequenceLarge) u64 { + return @as(u64, extra.len_lo) << 0 | + @as(u64, extra.len_hi) << 32; + } + }; + + const Field = struct { + name: String.Index, + ctype: CType.Index, + flags: Flags, + + const Flags = Aligned.Flags; + }; + + const FwdDeclAnon = struct { + fields_len: u32, + }; + + const AggregateAnon = struct { + owner_decl: DeclIndex, + id: u32, + fields_len: u32, + }; + + const Aggregate = struct { + fwd_decl: CType.Index, + fields_len: u32, + }; + + const Function = struct { + return_ctype: CType.Index, + param_ctypes_len: u32, + }; + + fn addExtra( + pool: *Pool, + allocator: std.mem.Allocator, + comptime Extra: type, + extra: Extra, + trailing_len: usize, + ) !ExtraIndex { + try pool.extra.ensureUnusedCapacity( + allocator, + @typeInfo(Extra).Struct.fields.len + trailing_len, + ); + defer pool.addExtraAssumeCapacity(Extra, extra); + return @intCast(pool.extra.items.len); + } + fn addExtraAssumeCapacity(pool: *Pool, comptime Extra: type, extra: Extra) void { + addExtraAssumeCapacityTo(&pool.extra, Extra, extra); + } + fn addExtraAssumeCapacityTo( + array: *std.ArrayListUnmanaged(u32), + comptime Extra: type, + extra: Extra, + ) void { + inline for (@typeInfo(Extra).Struct.fields) |field| { + const value = @field(extra, field.name); + array.appendAssumeCapacity(switch (field.type) { + u32 => value, + CType.Index, String.Index, DeclIndex => @intFromEnum(value), + Aligned.Flags => @bitCast(value), + else => @compileError("bad field type: " ++ field.name ++ ": " ++ + @typeName(field.type)), + }); + } + } + + fn addHashedExtra( + pool: *Pool, + allocator: std.mem.Allocator, + hasher: *Hasher, + comptime Extra: type, + extra: Extra, + trailing_len: usize, + ) !ExtraIndex { + hasher.updateExtra(Extra, extra, pool); + return pool.addExtra(allocator, Extra, extra, trailing_len); + } + fn addHashedExtraAssumeCapacity( + pool: *Pool, + hasher: *Hasher, + comptime Extra: type, + extra: Extra, + ) void { + hasher.updateExtra(Extra, extra, pool); + pool.addExtraAssumeCapacity(Extra, extra); + } + fn addHashedExtraAssumeCapacityTo( + pool: *Pool, + array: *std.ArrayListUnmanaged(u32), + hasher: *Hasher, + comptime Extra: type, + extra: Extra, + ) void { + hasher.updateExtra(Extra, extra, pool); + addExtraAssumeCapacityTo(array, Extra, extra); + } + + const ExtraTrail = struct { + extra_index: ExtraIndex, + + fn next( + extra_trail: *ExtraTrail, + len: u32, + comptime Extra: type, + pool: *const Pool, + ) []const Extra { + defer extra_trail.extra_index += @intCast(len); + return @ptrCast(pool.extra.items[extra_trail.extra_index..][0..len]); + } + }; + + fn getExtraTrail( + pool: *const Pool, + comptime Extra: type, + extra_index: ExtraIndex, + ) struct { extra: Extra, trail: ExtraTrail } { + var extra: Extra = undefined; + const fields = @typeInfo(Extra).Struct.fields; + inline for (fields, pool.extra.items[extra_index..][0..fields.len]) |field, value| + @field(extra, field.name) = switch (field.type) { + u32 => value, + CType.Index, String.Index, DeclIndex => @enumFromInt(value), + Aligned.Flags => @bitCast(value), + else => @compileError("bad field type: " ++ field.name ++ ": " ++ @typeName(field.type)), + }; + return .{ + .extra = extra, + .trail = .{ .extra_index = extra_index + @as(ExtraIndex, @intCast(fields.len)) }, + }; + } + + fn getExtra(pool: *const Pool, comptime Extra: type, extra_index: ExtraIndex) Extra { + return pool.getExtraTrail(Extra, extra_index).extra; + } +}; + +pub const AlignAs = packed struct { + @"align": Alignment, + abi: Alignment, + + pub fn fromAlignment(alignas: AlignAs) AlignAs { + assert(alignas.abi != .none); + return .{ + .@"align" = if (alignas.@"align" != .none) alignas.@"align" else alignas.abi, + .abi = alignas.abi, + }; + } + pub fn fromAbiAlignment(abi: Alignment) AlignAs { + assert(abi != .none); + return .{ .@"align" = abi, .abi = abi }; + } + pub fn fromByteUnits(@"align": u64, abi: u64) AlignAs { + return fromAlignment(.{ + .@"align" = Alignment.fromByteUnits(@"align"), + .abi = Alignment.fromNonzeroByteUnits(abi), + }); + } + + pub fn order(lhs: AlignAs, rhs: AlignAs) std.math.Order { + return lhs.@"align".order(rhs.@"align"); + } + pub fn abiOrder(alignas: AlignAs) std.math.Order { + return alignas.@"align".order(alignas.abi); + } + pub fn toByteUnits(alignas: AlignAs) u64 { + return alignas.@"align".toByteUnits().?; + } +}; + +const Alignment = @import("../../InternPool.zig").Alignment; +const assert = std.debug.assert; +const CType = @This(); +const DeclIndex = std.zig.DeclIndex; +const Module = @import("../../Package/Module.zig"); +const std = @import("std"); +const Type = @import("../../type.zig").Type; +const Zcu = @import("../../Module.zig"); diff --git a/src/codegen/c/type.zig b/src/codegen/c/type.zig deleted file mode 100644 index a75f5e8f15..0000000000 --- a/src/codegen/c/type.zig +++ /dev/null @@ -1,2332 +0,0 @@ -const std = @import("std"); -const mem = std.mem; -const Allocator = mem.Allocator; -const assert = std.debug.assert; -const autoHash = std.hash.autoHash; - -const Alignment = @import("../../InternPool.zig").Alignment; -const Zcu = @import("../../Module.zig"); -const Module = @import("../../Package/Module.zig"); -const InternPool = @import("../../InternPool.zig"); -const Type = @import("../../type.zig").Type; - -pub const CType = extern union { - /// If the tag value is less than Tag.no_payload_count, then no pointer - /// dereference is needed. - tag_if_small_enough: Tag, - ptr_otherwise: *const Payload, - - pub fn initTag(small_tag: Tag) CType { - assert(!small_tag.hasPayload()); - return .{ .tag_if_small_enough = small_tag }; - } - - pub fn initPayload(pl: anytype) CType { - const T = @typeInfo(@TypeOf(pl)).Pointer.child; - return switch (pl.base.tag) { - inline else => |t| if (comptime t.hasPayload() and t.Type() == T) .{ - .ptr_otherwise = &pl.base, - } else unreachable, - }; - } - - pub fn hasPayload(self: CType) bool { - return self.tag_if_small_enough.hasPayload(); - } - - pub fn tag(self: CType) Tag { - return if (self.hasPayload()) self.ptr_otherwise.tag else self.tag_if_small_enough; - } - - pub fn cast(self: CType, comptime T: type) ?*const T { - if (!self.hasPayload()) return null; - const pl = self.ptr_otherwise; - return switch (pl.tag) { - inline else => |t| if (comptime t.hasPayload() and t.Type() == T) - @fieldParentPtr(T, "base", pl) - else - null, - }; - } - - pub fn castTag(self: CType, comptime t: Tag) ?*const t.Type() { - return if (self.tag() == t) @fieldParentPtr(t.Type(), "base", self.ptr_otherwise) else null; - } - - pub const Tag = enum(usize) { - // The first section of this enum are tags that require no payload. - void, - - // C basic types - char, - - @"signed char", - short, - int, - long, - @"long long", - - _Bool, - @"unsigned char", - @"unsigned short", - @"unsigned int", - @"unsigned long", - @"unsigned long long", - - float, - double, - @"long double", - - // C header types - // - stdbool.h - bool, - // - stddef.h - size_t, - ptrdiff_t, - // - stdint.h - uint8_t, - int8_t, - uint16_t, - int16_t, - uint32_t, - int32_t, - uint64_t, - int64_t, - uintptr_t, - intptr_t, - - // zig.h types - zig_u128, - zig_i128, - zig_f16, - zig_f32, - zig_f64, - zig_f80, - zig_f128, - zig_c_longdouble, // Keep last_no_payload_tag updated! - - // After this, the tag requires a payload. - pointer, - pointer_const, - pointer_volatile, - pointer_const_volatile, - array, - vector, - fwd_anon_struct, - fwd_anon_union, - fwd_struct, - fwd_union, - unnamed_struct, - unnamed_union, - packed_unnamed_struct, - packed_unnamed_union, - anon_struct, - anon_union, - @"struct", - @"union", - packed_struct, - packed_union, - function, - varargs_function, - - pub const last_no_payload_tag = Tag.zig_c_longdouble; - pub const no_payload_count = @intFromEnum(last_no_payload_tag) + 1; - - pub fn hasPayload(self: Tag) bool { - return @intFromEnum(self) >= no_payload_count; - } - - pub fn toIndex(self: Tag) Index { - assert(!self.hasPayload()); - return @as(Index, @intCast(@intFromEnum(self))); - } - - pub fn Type(comptime self: Tag) type { - return switch (self) { - .void, - .char, - .@"signed char", - .short, - .int, - .long, - .@"long long", - ._Bool, - .@"unsigned char", - .@"unsigned short", - .@"unsigned int", - .@"unsigned long", - .@"unsigned long long", - .float, - .double, - .@"long double", - .bool, - .size_t, - .ptrdiff_t, - .uint8_t, - .int8_t, - .uint16_t, - .int16_t, - .uint32_t, - .int32_t, - .uint64_t, - .int64_t, - .uintptr_t, - .intptr_t, - .zig_u128, - .zig_i128, - .zig_f16, - .zig_f32, - .zig_f64, - .zig_f80, - .zig_f128, - .zig_c_longdouble, - => @compileError("Type Tag " ++ @tagName(self) ++ " has no payload"), - - .pointer, - .pointer_const, - .pointer_volatile, - .pointer_const_volatile, - => Payload.Child, - - .array, - .vector, - => Payload.Sequence, - - .fwd_anon_struct, - .fwd_anon_union, - => Payload.Fields, - - .fwd_struct, - .fwd_union, - => Payload.FwdDecl, - - .unnamed_struct, - .unnamed_union, - .packed_unnamed_struct, - .packed_unnamed_union, - => Payload.Unnamed, - - .anon_struct, - .anon_union, - .@"struct", - .@"union", - .packed_struct, - .packed_union, - => Payload.Aggregate, - - .function, - .varargs_function, - => Payload.Function, - }; - } - }; - - pub const Payload = struct { - tag: Tag, - - pub const Child = struct { - base: Payload, - data: Index, - }; - - pub const Sequence = struct { - base: Payload, - data: struct { - len: u64, - elem_type: Index, - }, - }; - - pub const FwdDecl = struct { - base: Payload, - data: InternPool.DeclIndex, - }; - - pub const Fields = struct { - base: Payload, - data: Data, - - pub const Data = []const Field; - pub const Field = struct { - name: [*:0]const u8, - type: Index, - alignas: AlignAs, - }; - }; - - pub const Unnamed = struct { - base: Payload, - data: struct { - fields: Fields.Data, - owner_decl: InternPool.DeclIndex, - id: u32, - }, - }; - - pub const Aggregate = struct { - base: Payload, - data: struct { - fields: Fields.Data, - fwd_decl: Index, - }, - }; - - pub const Function = struct { - base: Payload, - data: struct { - return_type: Index, - param_types: []const Index, - }, - }; - }; - - pub const AlignAs = packed struct { - @"align": Alignment, - abi: Alignment, - - pub fn init(@"align": Alignment, abi_align: Alignment) AlignAs { - assert(abi_align != .none); - return .{ - .@"align" = if (@"align" != .none) @"align" else abi_align, - .abi = abi_align, - }; - } - - pub fn initByteUnits(alignment: u64, abi_alignment: u32) AlignAs { - return init( - Alignment.fromByteUnits(alignment), - Alignment.fromNonzeroByteUnits(abi_alignment), - ); - } - pub fn abiAlign(ty: Type, zcu: *Zcu) AlignAs { - const abi_align = ty.abiAlignment(zcu); - return init(abi_align, abi_align); - } - pub fn fieldAlign(struct_ty: Type, field_i: usize, zcu: *Zcu) AlignAs { - return init( - struct_ty.structFieldAlign(field_i, zcu), - struct_ty.structFieldType(field_i, zcu).abiAlignment(zcu), - ); - } - pub fn unionPayloadAlign(union_ty: Type, zcu: *Zcu) AlignAs { - const union_obj = zcu.typeToUnion(union_ty).?; - const union_payload_align = zcu.unionAbiAlignment(union_obj); - return init(union_payload_align, union_payload_align); - } - - pub fn order(lhs: AlignAs, rhs: AlignAs) std.math.Order { - return lhs.@"align".order(rhs.@"align"); - } - pub fn abiOrder(self: AlignAs) std.math.Order { - return self.@"align".order(self.abi); - } - pub fn toByteUnits(self: AlignAs) u64 { - return self.@"align".toByteUnitsOptional().?; - } - }; - - pub const Index = u32; - pub const Store = struct { - arena: std.heap.ArenaAllocator.State = .{}, - set: Set = .{}, - - pub const Set = struct { - pub const Map = std.ArrayHashMapUnmanaged(CType, void, HashContext, true); - const HashContext = struct { - store: *const Set, - - pub fn hash(self: @This(), cty: CType) Map.Hash { - return @as(Map.Hash, @truncate(cty.hash(self.store.*))); - } - pub fn eql(_: @This(), lhs: CType, rhs: CType, _: usize) bool { - return lhs.eql(rhs); - } - }; - - map: Map = .{}, - - pub fn indexToCType(self: Set, index: Index) CType { - if (index < Tag.no_payload_count) return initTag(@as(Tag, @enumFromInt(index))); - return self.map.keys()[index - Tag.no_payload_count]; - } - - pub fn indexToHash(self: Set, index: Index) Map.Hash { - if (index < Tag.no_payload_count) - return (HashContext{ .store = &self }).hash(self.indexToCType(index)); - return self.map.entries.items(.hash)[index - Tag.no_payload_count]; - } - - pub fn typeToIndex(self: Set, ty: Type, zcu: *Zcu, mod: *Module, kind: Kind) ?Index { - const lookup = Convert.Lookup{ .imm = .{ .set = &self, .zcu = zcu, .mod = mod } }; - - var convert: Convert = undefined; - convert.initType(ty, kind, lookup) catch unreachable; - - const t = convert.tag(); - if (!t.hasPayload()) return t.toIndex(); - - return if (self.map.getIndexAdapted( - ty, - TypeAdapter32{ .kind = kind, .lookup = lookup, .convert = &convert }, - )) |idx| @as(Index, @intCast(Tag.no_payload_count + idx)) else null; - } - }; - - pub const Promoted = struct { - arena: std.heap.ArenaAllocator, - set: Set, - - pub fn gpa(self: *Promoted) Allocator { - return self.arena.child_allocator; - } - - pub fn cTypeToIndex(self: *Promoted, cty: CType) Allocator.Error!Index { - const t = cty.tag(); - if (@intFromEnum(t) < Tag.no_payload_count) return @as(Index, @intCast(@intFromEnum(t))); - - const gop = try self.set.map.getOrPutContext(self.gpa(), cty, .{ .store = &self.set }); - if (!gop.found_existing) gop.key_ptr.* = cty; - if (std.debug.runtime_safety) { - const key = &self.set.map.entries.items(.key)[gop.index]; - assert(key == gop.key_ptr); - assert(cty.eql(key.*)); - assert(cty.hash(self.set) == key.hash(self.set)); - } - return @as(Index, @intCast(Tag.no_payload_count + gop.index)); - } - - pub fn typeToIndex( - self: *Promoted, - ty: Type, - zcu: *Zcu, - mod: *Module, - kind: Kind, - ) Allocator.Error!Index { - const lookup = Convert.Lookup{ .mut = .{ .promoted = self, .zcu = zcu, .mod = mod } }; - - var convert: Convert = undefined; - try convert.initType(ty, kind, lookup); - - const t = convert.tag(); - if (!t.hasPayload()) return t.toIndex(); - - const gop = try self.set.map.getOrPutContextAdapted( - self.gpa(), - ty, - TypeAdapter32{ .kind = kind, .lookup = lookup.freeze(), .convert = &convert }, - .{ .store = &self.set }, - ); - if (!gop.found_existing) { - errdefer _ = self.set.map.pop(); - gop.key_ptr.* = try createFromConvert(self, ty, zcu, mod, kind, convert); - } - if (std.debug.runtime_safety) { - const adapter = TypeAdapter64{ - .kind = kind, - .lookup = lookup.freeze(), - .convert = &convert, - }; - const cty = &self.set.map.entries.items(.key)[gop.index]; - assert(cty == gop.key_ptr); - assert(adapter.eql(ty, cty.*)); - assert(adapter.hash(ty) == cty.hash(self.set)); - } - return @as(Index, @intCast(Tag.no_payload_count + gop.index)); - } - }; - - pub fn promote(self: Store, gpa: Allocator) Promoted { - return .{ .arena = self.arena.promote(gpa), .set = self.set }; - } - - pub fn demote(self: *Store, promoted: Promoted) void { - self.arena = promoted.arena.state; - self.set = promoted.set; - } - - pub fn indexToCType(self: Store, index: Index) CType { - return self.set.indexToCType(index); - } - - pub fn indexToHash(self: Store, index: Index) Set.Map.Hash { - return self.set.indexToHash(index); - } - - pub fn cTypeToIndex(self: *Store, gpa: Allocator, cty: CType) !Index { - var promoted = self.promote(gpa); - defer self.demote(promoted); - return promoted.cTypeToIndex(cty); - } - - pub fn typeToCType(self: *Store, gpa: Allocator, ty: Type, zcu: *Zcu, mod: *Module, kind: Kind) !CType { - const idx = try self.typeToIndex(gpa, ty, zcu, mod, kind); - return self.indexToCType(idx); - } - - pub fn typeToIndex(self: *Store, gpa: Allocator, ty: Type, zcu: *Zcu, mod: *Module, kind: Kind) !Index { - var promoted = self.promote(gpa); - defer self.demote(promoted); - return promoted.typeToIndex(ty, zcu, mod, kind); - } - - pub fn clearRetainingCapacity(self: *Store, gpa: Allocator) void { - var promoted = self.promote(gpa); - defer self.demote(promoted); - promoted.set.map.clearRetainingCapacity(); - _ = promoted.arena.reset(.retain_capacity); - } - - pub fn clearAndFree(self: *Store, gpa: Allocator) void { - var promoted = self.promote(gpa); - defer self.demote(promoted); - promoted.set.map.clearAndFree(gpa); - _ = promoted.arena.reset(.free_all); - } - - pub fn shrinkRetainingCapacity(self: *Store, gpa: Allocator, new_len: usize) void { - self.set.map.shrinkRetainingCapacity(gpa, new_len); - } - - pub fn shrinkAndFree(self: *Store, gpa: Allocator, new_len: usize) void { - self.set.map.shrinkAndFree(gpa, new_len); - } - - pub fn count(self: Store) usize { - return self.set.map.count(); - } - - pub fn move(self: *Store) Store { - const moved = self.*; - self.* = .{}; - return moved; - } - - pub fn deinit(self: *Store, gpa: Allocator) void { - var promoted = self.promote(gpa); - promoted.set.map.deinit(gpa); - _ = promoted.arena.deinit(); - self.* = undefined; - } - }; - - pub fn isBool(self: CType) bool { - return switch (self.tag()) { - ._Bool, - .bool, - => true, - else => false, - }; - } - - pub fn isInteger(self: CType) bool { - return switch (self.tag()) { - .char, - .@"signed char", - .short, - .int, - .long, - .@"long long", - .@"unsigned char", - .@"unsigned short", - .@"unsigned int", - .@"unsigned long", - .@"unsigned long long", - .size_t, - .ptrdiff_t, - .uint8_t, - .int8_t, - .uint16_t, - .int16_t, - .uint32_t, - .int32_t, - .uint64_t, - .int64_t, - .uintptr_t, - .intptr_t, - .zig_u128, - .zig_i128, - => true, - else => false, - }; - } - - pub fn signedness(self: CType, mod: *Module) std.builtin.Signedness { - return switch (self.tag()) { - .char => mod.resolved_target.result.charSignedness(), - .@"signed char", - .short, - .int, - .long, - .@"long long", - .ptrdiff_t, - .int8_t, - .int16_t, - .int32_t, - .int64_t, - .intptr_t, - .zig_i128, - => .signed, - .@"unsigned char", - .@"unsigned short", - .@"unsigned int", - .@"unsigned long", - .@"unsigned long long", - .size_t, - .uint8_t, - .uint16_t, - .uint32_t, - .uint64_t, - .uintptr_t, - .zig_u128, - => .unsigned, - else => unreachable, - }; - } - - pub fn isFloat(self: CType) bool { - return switch (self.tag()) { - .float, - .double, - .@"long double", - .zig_f16, - .zig_f32, - .zig_f64, - .zig_f80, - .zig_f128, - .zig_c_longdouble, - => true, - else => false, - }; - } - - pub fn isPointer(self: CType) bool { - return switch (self.tag()) { - .pointer, - .pointer_const, - .pointer_volatile, - .pointer_const_volatile, - => true, - else => false, - }; - } - - pub fn isFunction(self: CType) bool { - return switch (self.tag()) { - .function, - .varargs_function, - => true, - else => false, - }; - } - - pub fn toSigned(self: CType) CType { - return CType.initTag(switch (self.tag()) { - .char, .@"signed char", .@"unsigned char" => .@"signed char", - .short, .@"unsigned short" => .short, - .int, .@"unsigned int" => .int, - .long, .@"unsigned long" => .long, - .@"long long", .@"unsigned long long" => .@"long long", - .size_t, .ptrdiff_t => .ptrdiff_t, - .uint8_t, .int8_t => .int8_t, - .uint16_t, .int16_t => .int16_t, - .uint32_t, .int32_t => .int32_t, - .uint64_t, .int64_t => .int64_t, - .uintptr_t, .intptr_t => .intptr_t, - .zig_u128, .zig_i128 => .zig_i128, - .float, - .double, - .@"long double", - .zig_f16, - .zig_f32, - .zig_f80, - .zig_f128, - .zig_c_longdouble, - => |t| t, - else => unreachable, - }); - } - - pub fn toUnsigned(self: CType) CType { - return CType.initTag(switch (self.tag()) { - .char, .@"signed char", .@"unsigned char" => .@"unsigned char", - .short, .@"unsigned short" => .@"unsigned short", - .int, .@"unsigned int" => .@"unsigned int", - .long, .@"unsigned long" => .@"unsigned long", - .@"long long", .@"unsigned long long" => .@"unsigned long long", - .size_t, .ptrdiff_t => .size_t, - .uint8_t, .int8_t => .uint8_t, - .uint16_t, .int16_t => .uint16_t, - .uint32_t, .int32_t => .uint32_t, - .uint64_t, .int64_t => .uint64_t, - .uintptr_t, .intptr_t => .uintptr_t, - .zig_u128, .zig_i128 => .zig_u128, - else => unreachable, - }); - } - - pub fn toSignedness(self: CType, s: std.builtin.Signedness) CType { - return switch (s) { - .unsigned => self.toUnsigned(), - .signed => self.toSigned(), - }; - } - - pub fn getStandardDefineAbbrev(self: CType) ?[]const u8 { - return switch (self.tag()) { - .char => "CHAR", - .@"signed char" => "SCHAR", - .short => "SHRT", - .int => "INT", - .long => "LONG", - .@"long long" => "LLONG", - .@"unsigned char" => "UCHAR", - .@"unsigned short" => "USHRT", - .@"unsigned int" => "UINT", - .@"unsigned long" => "ULONG", - .@"unsigned long long" => "ULLONG", - .float => "FLT", - .double => "DBL", - .@"long double" => "LDBL", - .size_t => "SIZE", - .ptrdiff_t => "PTRDIFF", - .uint8_t => "UINT8", - .int8_t => "INT8", - .uint16_t => "UINT16", - .int16_t => "INT16", - .uint32_t => "UINT32", - .int32_t => "INT32", - .uint64_t => "UINT64", - .int64_t => "INT64", - .uintptr_t => "UINTPTR", - .intptr_t => "INTPTR", - else => null, - }; - } - - pub fn renderLiteralPrefix(self: CType, writer: anytype, kind: Kind) @TypeOf(writer).Error!void { - switch (self.tag()) { - .void => unreachable, - ._Bool, - .char, - .@"signed char", - .short, - .@"unsigned short", - .bool, - .size_t, - .ptrdiff_t, - .uintptr_t, - .intptr_t, - => |t| switch (kind) { - else => try writer.print("({s})", .{@tagName(t)}), - .global => {}, - }, - .int, - .long, - .@"long long", - .@"unsigned char", - .@"unsigned int", - .@"unsigned long", - .@"unsigned long long", - .float, - .double, - .@"long double", - => {}, - .uint8_t, - .int8_t, - .uint16_t, - .int16_t, - .uint32_t, - .int32_t, - .uint64_t, - .int64_t, - => try writer.print("{s}_C(", .{self.getStandardDefineAbbrev().?}), - .zig_u128, - .zig_i128, - .zig_f16, - .zig_f32, - .zig_f64, - .zig_f80, - .zig_f128, - .zig_c_longdouble, - => |t| try writer.print("zig_{s}_{s}(", .{ - switch (kind) { - else => "make", - .global => "init", - }, - @tagName(t)["zig_".len..], - }), - .pointer, - .pointer_const, - .pointer_volatile, - .pointer_const_volatile, - => unreachable, - .array, - .vector, - => try writer.writeByte('{'), - .fwd_anon_struct, - .fwd_anon_union, - .fwd_struct, - .fwd_union, - .unnamed_struct, - .unnamed_union, - .packed_unnamed_struct, - .packed_unnamed_union, - .anon_struct, - .anon_union, - .@"struct", - .@"union", - .packed_struct, - .packed_union, - .function, - .varargs_function, - => unreachable, - } - } - - pub fn renderLiteralSuffix(self: CType, writer: anytype) @TypeOf(writer).Error!void { - switch (self.tag()) { - .void => unreachable, - ._Bool => {}, - .char, - .@"signed char", - .short, - .int, - => {}, - .long => try writer.writeByte('l'), - .@"long long" => try writer.writeAll("ll"), - .@"unsigned char", - .@"unsigned short", - .@"unsigned int", - => try writer.writeByte('u'), - .@"unsigned long", - .size_t, - .uintptr_t, - => try writer.writeAll("ul"), - .@"unsigned long long" => try writer.writeAll("ull"), - .float => try writer.writeByte('f'), - .double => {}, - .@"long double" => try writer.writeByte('l'), - .bool, - .ptrdiff_t, - .intptr_t, - => {}, - .uint8_t, - .int8_t, - .uint16_t, - .int16_t, - .uint32_t, - .int32_t, - .uint64_t, - .int64_t, - .zig_u128, - .zig_i128, - .zig_f16, - .zig_f32, - .zig_f64, - .zig_f80, - .zig_f128, - .zig_c_longdouble, - => try writer.writeByte(')'), - .pointer, - .pointer_const, - .pointer_volatile, - .pointer_const_volatile, - => unreachable, - .array, - .vector, - => try writer.writeByte('}'), - .fwd_anon_struct, - .fwd_anon_union, - .fwd_struct, - .fwd_union, - .unnamed_struct, - .unnamed_union, - .packed_unnamed_struct, - .packed_unnamed_union, - .anon_struct, - .anon_union, - .@"struct", - .@"union", - .packed_struct, - .packed_union, - .function, - .varargs_function, - => unreachable, - } - } - - pub fn floatActiveBits(self: CType, mod: *Module) u16 { - const target = &mod.resolved_target.result; - return switch (self.tag()) { - .float => target.c_type_bit_size(.float), - .double => target.c_type_bit_size(.double), - .@"long double", .zig_c_longdouble => target.c_type_bit_size(.longdouble), - .zig_f16 => 16, - .zig_f32 => 32, - .zig_f64 => 64, - .zig_f80 => 80, - .zig_f128 => 128, - else => unreachable, - }; - } - - pub fn byteSize(self: CType, store: Store.Set, mod: *Module) u64 { - const target = &mod.resolved_target.result; - return switch (self.tag()) { - .void => 0, - .char, .@"signed char", ._Bool, .@"unsigned char", .bool, .uint8_t, .int8_t => 1, - .short => target.c_type_byte_size(.short), - .int => target.c_type_byte_size(.int), - .long => target.c_type_byte_size(.long), - .@"long long" => target.c_type_byte_size(.longlong), - .@"unsigned short" => target.c_type_byte_size(.ushort), - .@"unsigned int" => target.c_type_byte_size(.uint), - .@"unsigned long" => target.c_type_byte_size(.ulong), - .@"unsigned long long" => target.c_type_byte_size(.ulonglong), - .float => target.c_type_byte_size(.float), - .double => target.c_type_byte_size(.double), - .@"long double" => target.c_type_byte_size(.longdouble), - .size_t, - .ptrdiff_t, - .uintptr_t, - .intptr_t, - .pointer, - .pointer_const, - .pointer_volatile, - .pointer_const_volatile, - => @divExact(target.ptrBitWidth(), 8), - .uint16_t, .int16_t, .zig_f16 => 2, - .uint32_t, .int32_t, .zig_f32 => 4, - .uint64_t, .int64_t, .zig_f64 => 8, - .zig_u128, .zig_i128, .zig_f128 => 16, - .zig_f80 => if (target.c_type_bit_size(.longdouble) == 80) - target.c_type_byte_size(.longdouble) - else - 16, - .zig_c_longdouble => target.c_type_byte_size(.longdouble), - - .array, - .vector, - => { - const data = self.cast(Payload.Sequence).?.data; - return data.len * store.indexToCType(data.elem_type).byteSize(store, mod); - }, - - .fwd_anon_struct, - .fwd_anon_union, - .fwd_struct, - .fwd_union, - .unnamed_struct, - .unnamed_union, - .packed_unnamed_struct, - .packed_unnamed_union, - .anon_struct, - .anon_union, - .@"struct", - .@"union", - .packed_struct, - .packed_union, - .function, - .varargs_function, - => unreachable, - }; - } - - pub fn isPacked(self: CType) bool { - return switch (self.tag()) { - else => false, - .packed_unnamed_struct, - .packed_unnamed_union, - .packed_struct, - .packed_union, - => true, - }; - } - - pub fn fields(self: CType) Payload.Fields.Data { - return if (self.cast(Payload.Aggregate)) |pl| - pl.data.fields - else if (self.cast(Payload.Unnamed)) |pl| - pl.data.fields - else if (self.cast(Payload.Fields)) |pl| - pl.data - else - unreachable; - } - - pub fn eql(lhs: CType, rhs: CType) bool { - return lhs.eqlContext(rhs, struct { - pub fn eqlIndex(_: @This(), lhs_idx: Index, rhs_idx: Index) bool { - return lhs_idx == rhs_idx; - } - }{}); - } - - pub fn eqlContext(lhs: CType, rhs: CType, ctx: anytype) bool { - // As a shortcut, if the small tags / addresses match, we're done. - if (lhs.tag_if_small_enough == rhs.tag_if_small_enough) return true; - - const lhs_tag = lhs.tag(); - const rhs_tag = rhs.tag(); - if (lhs_tag != rhs_tag) return false; - - return switch (lhs_tag) { - .void, - .char, - .@"signed char", - .short, - .int, - .long, - .@"long long", - ._Bool, - .@"unsigned char", - .@"unsigned short", - .@"unsigned int", - .@"unsigned long", - .@"unsigned long long", - .float, - .double, - .@"long double", - .bool, - .size_t, - .ptrdiff_t, - .uint8_t, - .int8_t, - .uint16_t, - .int16_t, - .uint32_t, - .int32_t, - .uint64_t, - .int64_t, - .uintptr_t, - .intptr_t, - .zig_u128, - .zig_i128, - .zig_f16, - .zig_f32, - .zig_f64, - .zig_f80, - .zig_f128, - .zig_c_longdouble, - => false, - - .pointer, - .pointer_const, - .pointer_volatile, - .pointer_const_volatile, - => ctx.eqlIndex(lhs.cast(Payload.Child).?.data, rhs.cast(Payload.Child).?.data), - - .array, - .vector, - => { - const lhs_data = lhs.cast(Payload.Sequence).?.data; - const rhs_data = rhs.cast(Payload.Sequence).?.data; - return lhs_data.len == rhs_data.len and - ctx.eqlIndex(lhs_data.elem_type, rhs_data.elem_type); - }, - - .fwd_anon_struct, - .fwd_anon_union, - => { - const lhs_data = lhs.cast(Payload.Fields).?.data; - const rhs_data = rhs.cast(Payload.Fields).?.data; - if (lhs_data.len != rhs_data.len) return false; - for (lhs_data, rhs_data) |lhs_field, rhs_field| { - if (!ctx.eqlIndex(lhs_field.type, rhs_field.type)) return false; - if (lhs_field.alignas.@"align" != rhs_field.alignas.@"align") return false; - if (std.mem.orderZ(u8, lhs_field.name, rhs_field.name) != .eq) return false; - } - return true; - }, - - .fwd_struct, - .fwd_union, - => lhs.cast(Payload.FwdDecl).?.data == rhs.cast(Payload.FwdDecl).?.data, - - .unnamed_struct, - .unnamed_union, - .packed_unnamed_struct, - .packed_unnamed_union, - => { - const lhs_data = lhs.cast(Payload.Unnamed).?.data; - const rhs_data = rhs.cast(Payload.Unnamed).?.data; - return lhs_data.owner_decl == rhs_data.owner_decl and lhs_data.id == rhs_data.id; - }, - - .anon_struct, - .anon_union, - .@"struct", - .@"union", - .packed_struct, - .packed_union, - => ctx.eqlIndex( - lhs.cast(Payload.Aggregate).?.data.fwd_decl, - rhs.cast(Payload.Aggregate).?.data.fwd_decl, - ), - - .function, - .varargs_function, - => { - const lhs_data = lhs.cast(Payload.Function).?.data; - const rhs_data = rhs.cast(Payload.Function).?.data; - if (lhs_data.param_types.len != rhs_data.param_types.len) return false; - if (!ctx.eqlIndex(lhs_data.return_type, rhs_data.return_type)) return false; - for (lhs_data.param_types, rhs_data.param_types) |lhs_param_idx, rhs_param_idx| { - if (!ctx.eqlIndex(lhs_param_idx, rhs_param_idx)) return false; - } - return true; - }, - }; - } - - pub fn hash(self: CType, store: Store.Set) u64 { - var hasher = std.hash.Wyhash.init(0); - self.updateHasher(&hasher, store); - return hasher.final(); - } - - pub fn updateHasher(self: CType, hasher: anytype, store: Store.Set) void { - const t = self.tag(); - autoHash(hasher, t); - switch (t) { - .void, - .char, - .@"signed char", - .short, - .int, - .long, - .@"long long", - ._Bool, - .@"unsigned char", - .@"unsigned short", - .@"unsigned int", - .@"unsigned long", - .@"unsigned long long", - .float, - .double, - .@"long double", - .bool, - .size_t, - .ptrdiff_t, - .uint8_t, - .int8_t, - .uint16_t, - .int16_t, - .uint32_t, - .int32_t, - .uint64_t, - .int64_t, - .uintptr_t, - .intptr_t, - .zig_u128, - .zig_i128, - .zig_f16, - .zig_f32, - .zig_f64, - .zig_f80, - .zig_f128, - .zig_c_longdouble, - => {}, - - .pointer, - .pointer_const, - .pointer_volatile, - .pointer_const_volatile, - => store.indexToCType(self.cast(Payload.Child).?.data).updateHasher(hasher, store), - - .array, - .vector, - => { - const data = self.cast(Payload.Sequence).?.data; - autoHash(hasher, data.len); - store.indexToCType(data.elem_type).updateHasher(hasher, store); - }, - - .fwd_anon_struct, - .fwd_anon_union, - => for (self.cast(Payload.Fields).?.data) |field| { - store.indexToCType(field.type).updateHasher(hasher, store); - hasher.update(mem.span(field.name)); - autoHash(hasher, field.alignas.@"align"); - }, - - .fwd_struct, - .fwd_union, - => autoHash(hasher, self.cast(Payload.FwdDecl).?.data), - - .unnamed_struct, - .unnamed_union, - .packed_unnamed_struct, - .packed_unnamed_union, - => { - const data = self.cast(Payload.Unnamed).?.data; - autoHash(hasher, data.owner_decl); - autoHash(hasher, data.id); - }, - - .anon_struct, - .anon_union, - .@"struct", - .@"union", - .packed_struct, - .packed_union, - => store.indexToCType(self.cast(Payload.Aggregate).?.data.fwd_decl) - .updateHasher(hasher, store), - - .function, - .varargs_function, - => { - const data = self.cast(Payload.Function).?.data; - store.indexToCType(data.return_type).updateHasher(hasher, store); - for (data.param_types) |param_ty| { - store.indexToCType(param_ty).updateHasher(hasher, store); - } - }, - } - } - - pub const Kind = enum { forward, forward_parameter, complete, global, parameter, payload }; - - const Convert = struct { - storage: union { - none: void, - child: Payload.Child, - seq: Payload.Sequence, - fwd: Payload.FwdDecl, - anon: struct { - fields: [2]Payload.Fields.Field, - pl: union { - forward: Payload.Fields, - complete: Payload.Aggregate, - }, - }, - }, - value: union(enum) { - tag: Tag, - cty: CType, - }, - - pub fn init(self: *@This(), t: Tag) void { - self.* = if (t.hasPayload()) .{ - .storage = .{ .none = {} }, - .value = .{ .tag = t }, - } else .{ - .storage = .{ .none = {} }, - .value = .{ .cty = initTag(t) }, - }; - } - - pub fn tag(self: @This()) Tag { - return switch (self.value) { - .tag => |t| t, - .cty => |c| c.tag(), - }; - } - - fn tagFromIntInfo(int_info: std.builtin.Type.Int) Tag { - return switch (int_info.bits) { - 0 => .void, - 1...8 => switch (int_info.signedness) { - .unsigned => .uint8_t, - .signed => .int8_t, - }, - 9...16 => switch (int_info.signedness) { - .unsigned => .uint16_t, - .signed => .int16_t, - }, - 17...32 => switch (int_info.signedness) { - .unsigned => .uint32_t, - .signed => .int32_t, - }, - 33...64 => switch (int_info.signedness) { - .unsigned => .uint64_t, - .signed => .int64_t, - }, - 65...128 => switch (int_info.signedness) { - .unsigned => .zig_u128, - .signed => .zig_i128, - }, - else => .array, - }; - } - - pub const Lookup = union(enum) { - fail: struct { - zcu: *Zcu, - mod: *Module, - }, - imm: struct { - set: *const Store.Set, - zcu: *Zcu, - mod: *Module, - }, - mut: struct { - promoted: *Store.Promoted, - zcu: *Zcu, - mod: *Module, - }, - - pub fn isMutable(self: @This()) bool { - return switch (self) { - .fail, .imm => false, - .mut => true, - }; - } - - pub fn getZcu(self: @This()) *Zcu { - return switch (self) { - inline else => |pl| pl.zcu, - }; - } - - pub fn getModule(self: @This()) *Module { - return switch (self) { - inline else => |pl| pl.mod, - }; - } - - pub fn getSet(self: @This()) ?*const Store.Set { - return switch (self) { - .fail => null, - .imm => |imm| imm.set, - .mut => |mut| &mut.promoted.set, - }; - } - - pub fn typeToIndex(self: @This(), ty: Type, kind: Kind) !?Index { - return switch (self) { - .fail => null, - .imm => |imm| imm.set.typeToIndex(ty, imm.zcu, imm.mod, kind), - .mut => |mut| try mut.promoted.typeToIndex(ty, mut.zcu, mut.mod, kind), - }; - } - - pub fn indexToCType(self: @This(), index: Index) ?CType { - return if (self.getSet()) |set| set.indexToCType(index) else null; - } - - pub fn freeze(self: @This()) @This() { - return switch (self) { - .fail, .imm => self, - .mut => |mut| .{ .imm = .{ .set = &mut.promoted.set, .zcu = mut.zcu, .mod = mut.mod } }, - }; - } - }; - - fn sortFields(self: *@This(), fields_len: usize) []Payload.Fields.Field { - const Field = Payload.Fields.Field; - const slice = self.storage.anon.fields[0..fields_len]; - mem.sort(Field, slice, {}, struct { - fn before(_: void, lhs: Field, rhs: Field) bool { - return lhs.alignas.order(rhs.alignas).compare(.gt); - } - }.before); - return slice; - } - - fn initAnon(self: *@This(), kind: Kind, fwd_idx: Index, fields_len: usize) void { - switch (kind) { - .forward, .forward_parameter => { - self.storage.anon.pl = .{ .forward = .{ - .base = .{ .tag = .fwd_anon_struct }, - .data = self.sortFields(fields_len), - } }; - self.value = .{ .cty = initPayload(&self.storage.anon.pl.forward) }; - }, - .complete, .parameter, .global => { - self.storage.anon.pl = .{ .complete = .{ - .base = .{ .tag = .anon_struct }, - .data = .{ - .fields = self.sortFields(fields_len), - .fwd_decl = fwd_idx, - }, - } }; - self.value = .{ .cty = initPayload(&self.storage.anon.pl.complete) }; - }, - .payload => unreachable, - } - } - - fn initArrayParameter(self: *@This(), ty: Type, kind: Kind, lookup: Lookup) !void { - if (switch (kind) { - .forward_parameter => @as(Index, undefined), - .parameter => try lookup.typeToIndex(ty, .forward_parameter), - .forward, .complete, .global, .payload => unreachable, - }) |fwd_idx| { - if (try lookup.typeToIndex(ty, switch (kind) { - .forward_parameter => .forward, - .parameter => .complete, - .forward, .complete, .global, .payload => unreachable, - })) |array_idx| { - self.storage = .{ .anon = undefined }; - self.storage.anon.fields[0] = .{ - .name = "array", - .type = array_idx, - .alignas = AlignAs.abiAlign(ty, lookup.getZcu()), - }; - self.initAnon(kind, fwd_idx, 1); - } else self.init(switch (kind) { - .forward_parameter => .fwd_anon_struct, - .parameter => .anon_struct, - .forward, .complete, .global, .payload => unreachable, - }); - } else self.init(.anon_struct); - } - - pub fn initType(self: *@This(), ty: Type, kind: Kind, lookup: Lookup) !void { - const zcu = lookup.getZcu(); - const ip = &zcu.intern_pool; - - self.* = undefined; - if (!ty.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) - self.init(.void) - else if (ty.isAbiInt(zcu)) switch (ty.ip_index) { - .usize_type => self.init(.uintptr_t), - .isize_type => self.init(.intptr_t), - .c_char_type => self.init(.char), - .c_short_type => self.init(.short), - .c_ushort_type => self.init(.@"unsigned short"), - .c_int_type => self.init(.int), - .c_uint_type => self.init(.@"unsigned int"), - .c_long_type => self.init(.long), - .c_ulong_type => self.init(.@"unsigned long"), - .c_longlong_type => self.init(.@"long long"), - .c_ulonglong_type => self.init(.@"unsigned long long"), - else => switch (tagFromIntInfo(ty.intInfo(zcu))) { - .void => unreachable, - else => |t| self.init(t), - .array => switch (kind) { - .forward, .complete, .global => { - const abi_size = ty.abiSize(zcu); - const abi_align = ty.abiAlignment(zcu).toByteUnits(0); - self.storage = .{ .seq = .{ .base = .{ .tag = .array }, .data = .{ - .len = @divExact(abi_size, abi_align), - .elem_type = tagFromIntInfo(.{ - .signedness = .unsigned, - .bits = @intCast(abi_align * 8), - }).toIndex(), - } } }; - self.value = .{ .cty = initPayload(&self.storage.seq) }; - }, - .forward_parameter, - .parameter, - => try self.initArrayParameter(ty, kind, lookup), - .payload => unreachable, - }, - }, - } else switch (ty.zigTypeTag(zcu)) { - .Frame => unreachable, - .AnyFrame => unreachable, - - .Int, - .Enum, - .ErrorSet, - .Type, - .Void, - .NoReturn, - .ComptimeFloat, - .ComptimeInt, - .Undefined, - .Null, - .EnumLiteral, - => unreachable, - - .Bool => self.init(.bool), - - .Float => self.init(switch (ty.ip_index) { - .f16_type => .zig_f16, - .f32_type => .zig_f32, - .f64_type => .zig_f64, - .f80_type => .zig_f80, - .f128_type => .zig_f128, - .c_longdouble_type => .zig_c_longdouble, - else => unreachable, - }), - - .Pointer => { - const info = ty.ptrInfo(zcu); - switch (info.flags.size) { - .Slice => { - if (switch (kind) { - .forward, .forward_parameter => @as(Index, undefined), - .complete, .parameter, .global => try lookup.typeToIndex(ty, .forward), - .payload => unreachable, - }) |fwd_idx| { - const ptr_ty = ty.slicePtrFieldType(zcu); - if (try lookup.typeToIndex(ptr_ty, kind)) |ptr_idx| { - self.storage = .{ .anon = undefined }; - self.storage.anon.fields[0] = .{ - .name = "ptr", - .type = ptr_idx, - .alignas = AlignAs.abiAlign(ptr_ty, zcu), - }; - self.storage.anon.fields[1] = .{ - .name = "len", - .type = Tag.uintptr_t.toIndex(), - .alignas = AlignAs.abiAlign(Type.usize, zcu), - }; - self.initAnon(kind, fwd_idx, 2); - } else self.init(switch (kind) { - .forward, .forward_parameter => .fwd_anon_struct, - .complete, .parameter, .global => .anon_struct, - .payload => unreachable, - }); - } else self.init(.anon_struct); - }, - - .One, .Many, .C => { - const t: Tag = switch (info.flags.is_volatile) { - false => switch (info.flags.is_const) { - false => .pointer, - true => .pointer_const, - }, - true => switch (info.flags.is_const) { - false => .pointer_volatile, - true => .pointer_const_volatile, - }, - }; - - const pointee_ty = if (info.packed_offset.host_size > 0 and info.flags.vector_index == .none) - try zcu.intType(.unsigned, info.packed_offset.host_size * 8) - else if (info.flags.alignment == .none or - info.flags.alignment.compareStrict(.gte, Type.fromInterned(info.child).abiAlignment(zcu))) - Type.fromInterned(info.child) - else - try zcu.intType(.unsigned, @min( - info.flags.alignment.toByteUnitsOptional().?, - lookup.getModule().resolved_target.result.maxIntAlignment(), - ) * 8); - - if (try lookup.typeToIndex(pointee_ty, .forward)) |child_idx| { - self.storage = .{ .child = .{ - .base = .{ .tag = t }, - .data = child_idx, - } }; - self.value = .{ .cty = initPayload(&self.storage.child) }; - } else self.init(t); - }, - } - }, - - .Struct, .Union => |zig_ty_tag| if (ty.containerLayout(zcu) == .@"packed") { - if (zcu.typeToPackedStruct(ty)) |packed_struct| { - try self.initType(Type.fromInterned(packed_struct.backingIntType(ip).*), kind, lookup); - } else { - const bits: u16 = @intCast(ty.bitSize(zcu)); - const int_ty = try zcu.intType(.unsigned, bits); - try self.initType(int_ty, kind, lookup); - } - } else if (ty.isTupleOrAnonStruct(zcu)) { - if (lookup.isMutable()) { - for (0..switch (zig_ty_tag) { - .Struct => ty.structFieldCount(zcu), - .Union => zcu.typeToUnion(ty).?.field_types.len, - else => unreachable, - }) |field_i| { - const field_ty = ty.structFieldType(field_i, zcu); - if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, zcu)) or - !field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; - _ = try lookup.typeToIndex(field_ty, switch (kind) { - .forward, .forward_parameter => .forward, - .complete, .parameter => .complete, - .global => .global, - .payload => unreachable, - }); - } - switch (kind) { - .forward, .forward_parameter => {}, - .complete, .parameter, .global => _ = try lookup.typeToIndex(ty, .forward), - .payload => unreachable, - } - } - self.init(switch (kind) { - .forward, .forward_parameter => switch (zig_ty_tag) { - .Struct => .fwd_anon_struct, - .Union => .fwd_anon_union, - else => unreachable, - }, - .complete, .parameter, .global => switch (zig_ty_tag) { - .Struct => .anon_struct, - .Union => .anon_union, - else => unreachable, - }, - .payload => unreachable, - }); - } else { - const tag_ty = ty.unionTagTypeSafety(zcu); - const is_tagged_union_wrapper = kind != .payload and tag_ty != null; - const is_struct = zig_ty_tag == .Struct or is_tagged_union_wrapper; - switch (kind) { - .forward, .forward_parameter => { - self.storage = .{ .fwd = .{ - .base = .{ .tag = if (is_struct) .fwd_struct else .fwd_union }, - .data = ty.getOwnerDecl(zcu), - } }; - self.value = .{ .cty = initPayload(&self.storage.fwd) }; - }, - .complete, .parameter, .global, .payload => if (is_tagged_union_wrapper) { - const fwd_idx = try lookup.typeToIndex(ty, .forward); - const payload_idx = try lookup.typeToIndex(ty, .payload); - const tag_idx = try lookup.typeToIndex(tag_ty.?, kind); - if (fwd_idx != null and payload_idx != null and tag_idx != null) { - self.storage = .{ .anon = undefined }; - var field_count: usize = 0; - if (payload_idx != Tag.void.toIndex()) { - self.storage.anon.fields[field_count] = .{ - .name = "payload", - .type = payload_idx.?, - .alignas = AlignAs.unionPayloadAlign(ty, zcu), - }; - field_count += 1; - } - if (tag_idx != Tag.void.toIndex()) { - self.storage.anon.fields[field_count] = .{ - .name = "tag", - .type = tag_idx.?, - .alignas = AlignAs.abiAlign(tag_ty.?, zcu), - }; - field_count += 1; - } - self.storage.anon.pl = .{ .complete = .{ - .base = .{ .tag = .@"struct" }, - .data = .{ - .fields = self.sortFields(field_count), - .fwd_decl = fwd_idx.?, - }, - } }; - self.value = .{ .cty = initPayload(&self.storage.anon.pl.complete) }; - } else self.init(.@"struct"); - } else if (kind == .payload and ty.unionHasAllZeroBitFieldTypes(zcu)) { - self.init(.void); - } else { - var is_packed = false; - for (0..switch (zig_ty_tag) { - .Struct => ty.structFieldCount(zcu), - .Union => zcu.typeToUnion(ty).?.field_types.len, - else => unreachable, - }) |field_i| { - const field_ty = ty.structFieldType(field_i, zcu); - if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; - - const field_align = AlignAs.fieldAlign(ty, field_i, zcu); - if (field_align.abiOrder().compare(.lt)) { - is_packed = true; - if (!lookup.isMutable()) break; - } - - if (lookup.isMutable()) { - _ = try lookup.typeToIndex(field_ty, switch (kind) { - .forward, .forward_parameter => unreachable, - .complete, .parameter, .payload => .complete, - .global => .global, - }); - } - } - switch (kind) { - .forward, .forward_parameter => unreachable, - .complete, .parameter, .global => { - _ = try lookup.typeToIndex(ty, .forward); - self.init(if (is_struct) - if (is_packed) .packed_struct else .@"struct" - else if (is_packed) .packed_union else .@"union"); - }, - .payload => self.init(if (is_packed) - .packed_unnamed_union - else - .unnamed_union), - } - }, - } - }, - - .Array, .Vector => |zig_ty_tag| { - switch (kind) { - .forward, .complete, .global => { - const t: Tag = switch (zig_ty_tag) { - .Array => .array, - .Vector => .vector, - else => unreachable, - }; - if (try lookup.typeToIndex(ty.childType(zcu), kind)) |child_idx| { - self.storage = .{ .seq = .{ .base = .{ .tag = t }, .data = .{ - .len = ty.arrayLenIncludingSentinel(zcu), - .elem_type = child_idx, - } } }; - self.value = .{ .cty = initPayload(&self.storage.seq) }; - } else self.init(t); - }, - .forward_parameter, .parameter => try self.initArrayParameter(ty, kind, lookup), - .payload => unreachable, - } - }, - - .Optional => { - const payload_ty = ty.optionalChild(zcu); - if (payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) { - if (ty.optionalReprIsPayload(zcu)) { - try self.initType(payload_ty, kind, lookup); - } else if (switch (kind) { - .forward, .forward_parameter => @as(Index, undefined), - .complete, .parameter, .global => try lookup.typeToIndex(ty, .forward), - .payload => unreachable, - }) |fwd_idx| { - if (try lookup.typeToIndex(payload_ty, switch (kind) { - .forward, .forward_parameter => .forward, - .complete, .parameter => .complete, - .global => .global, - .payload => unreachable, - })) |payload_idx| { - self.storage = .{ .anon = undefined }; - self.storage.anon.fields[0] = .{ - .name = "payload", - .type = payload_idx, - .alignas = AlignAs.abiAlign(payload_ty, zcu), - }; - self.storage.anon.fields[1] = .{ - .name = "is_null", - .type = Tag.bool.toIndex(), - .alignas = AlignAs.abiAlign(Type.bool, zcu), - }; - self.initAnon(kind, fwd_idx, 2); - } else self.init(switch (kind) { - .forward, .forward_parameter => .fwd_anon_struct, - .complete, .parameter, .global => .anon_struct, - .payload => unreachable, - }); - } else self.init(.anon_struct); - } else self.init(.bool); - }, - - .ErrorUnion => { - if (switch (kind) { - .forward, .forward_parameter => @as(Index, undefined), - .complete, .parameter, .global => try lookup.typeToIndex(ty, .forward), - .payload => unreachable, - }) |fwd_idx| { - const payload_ty = ty.errorUnionPayload(zcu); - if (try lookup.typeToIndex(payload_ty, switch (kind) { - .forward, .forward_parameter => .forward, - .complete, .parameter => .complete, - .global => .global, - .payload => unreachable, - })) |payload_idx| { - const error_ty = ty.errorUnionSet(zcu); - if (payload_idx == Tag.void.toIndex()) { - try self.initType(error_ty, kind, lookup); - } else if (try lookup.typeToIndex(error_ty, kind)) |error_idx| { - self.storage = .{ .anon = undefined }; - self.storage.anon.fields[0] = .{ - .name = "payload", - .type = payload_idx, - .alignas = AlignAs.abiAlign(payload_ty, zcu), - }; - self.storage.anon.fields[1] = .{ - .name = "error", - .type = error_idx, - .alignas = AlignAs.abiAlign(error_ty, zcu), - }; - self.initAnon(kind, fwd_idx, 2); - } else self.init(switch (kind) { - .forward, .forward_parameter => .fwd_anon_struct, - .complete, .parameter, .global => .anon_struct, - .payload => unreachable, - }); - } else self.init(switch (kind) { - .forward, .forward_parameter => .fwd_anon_struct, - .complete, .parameter, .global => .anon_struct, - .payload => unreachable, - }); - } else self.init(.anon_struct); - }, - - .Opaque => self.init(.void), - - .Fn => { - const info = zcu.typeToFunc(ty).?; - if (!info.is_generic) { - if (lookup.isMutable()) { - const param_kind: Kind = switch (kind) { - .forward, .forward_parameter => .forward_parameter, - .complete, .parameter, .global => .parameter, - .payload => unreachable, - }; - _ = try lookup.typeToIndex(Type.fromInterned(info.return_type), param_kind); - for (info.param_types.get(ip)) |param_type| { - if (!Type.fromInterned(param_type).hasRuntimeBitsIgnoreComptime(zcu)) continue; - _ = try lookup.typeToIndex(Type.fromInterned(param_type), param_kind); - } - } - self.init(if (info.is_var_args) .varargs_function else .function); - } else self.init(.void); - }, - } - } - }; - - pub fn copy(self: CType, arena: Allocator) !CType { - return self.copyContext(struct { - arena: Allocator, - pub fn copyIndex(_: @This(), idx: Index) Index { - return idx; - } - }{ .arena = arena }); - } - - fn copyFields(ctx: anytype, old_fields: Payload.Fields.Data) !Payload.Fields.Data { - const new_fields = try ctx.arena.alloc(Payload.Fields.Field, old_fields.len); - for (new_fields, old_fields) |*new_field, old_field| { - new_field.name = try ctx.arena.dupeZ(u8, mem.span(old_field.name)); - new_field.type = ctx.copyIndex(old_field.type); - new_field.alignas = old_field.alignas; - } - return new_fields; - } - - fn copyParams(ctx: anytype, old_param_types: []const Index) ![]const Index { - const new_param_types = try ctx.arena.alloc(Index, old_param_types.len); - for (new_param_types, old_param_types) |*new_param_type, old_param_type| - new_param_type.* = ctx.copyIndex(old_param_type); - return new_param_types; - } - - pub fn copyContext(self: CType, ctx: anytype) !CType { - switch (self.tag()) { - .void, - .char, - .@"signed char", - .short, - .int, - .long, - .@"long long", - ._Bool, - .@"unsigned char", - .@"unsigned short", - .@"unsigned int", - .@"unsigned long", - .@"unsigned long long", - .float, - .double, - .@"long double", - .bool, - .size_t, - .ptrdiff_t, - .uint8_t, - .int8_t, - .uint16_t, - .int16_t, - .uint32_t, - .int32_t, - .uint64_t, - .int64_t, - .uintptr_t, - .intptr_t, - .zig_u128, - .zig_i128, - .zig_f16, - .zig_f32, - .zig_f64, - .zig_f80, - .zig_f128, - .zig_c_longdouble, - => return self, - - .pointer, - .pointer_const, - .pointer_volatile, - .pointer_const_volatile, - => { - const pl = self.cast(Payload.Child).?; - const new_pl = try ctx.arena.create(Payload.Child); - new_pl.* = .{ .base = .{ .tag = pl.base.tag }, .data = ctx.copyIndex(pl.data) }; - return initPayload(new_pl); - }, - - .array, - .vector, - => { - const pl = self.cast(Payload.Sequence).?; - const new_pl = try ctx.arena.create(Payload.Sequence); - new_pl.* = .{ - .base = .{ .tag = pl.base.tag }, - .data = .{ .len = pl.data.len, .elem_type = ctx.copyIndex(pl.data.elem_type) }, - }; - return initPayload(new_pl); - }, - - .fwd_anon_struct, - .fwd_anon_union, - => { - const pl = self.cast(Payload.Fields).?; - const new_pl = try ctx.arena.create(Payload.Fields); - new_pl.* = .{ - .base = .{ .tag = pl.base.tag }, - .data = try copyFields(ctx, pl.data), - }; - return initPayload(new_pl); - }, - - .fwd_struct, - .fwd_union, - => { - const pl = self.cast(Payload.FwdDecl).?; - const new_pl = try ctx.arena.create(Payload.FwdDecl); - new_pl.* = .{ .base = .{ .tag = pl.base.tag }, .data = pl.data }; - return initPayload(new_pl); - }, - - .unnamed_struct, - .unnamed_union, - .packed_unnamed_struct, - .packed_unnamed_union, - => { - const pl = self.cast(Payload.Unnamed).?; - const new_pl = try ctx.arena.create(Payload.Unnamed); - new_pl.* = .{ .base = .{ .tag = pl.base.tag }, .data = .{ - .fields = try copyFields(ctx, pl.data.fields), - .owner_decl = pl.data.owner_decl, - .id = pl.data.id, - } }; - return initPayload(new_pl); - }, - - .anon_struct, - .anon_union, - .@"struct", - .@"union", - .packed_struct, - .packed_union, - => { - const pl = self.cast(Payload.Aggregate).?; - const new_pl = try ctx.arena.create(Payload.Aggregate); - new_pl.* = .{ .base = .{ .tag = pl.base.tag }, .data = .{ - .fields = try copyFields(ctx, pl.data.fields), - .fwd_decl = ctx.copyIndex(pl.data.fwd_decl), - } }; - return initPayload(new_pl); - }, - - .function, - .varargs_function, - => { - const pl = self.cast(Payload.Function).?; - const new_pl = try ctx.arena.create(Payload.Function); - new_pl.* = .{ .base = .{ .tag = pl.base.tag }, .data = .{ - .return_type = ctx.copyIndex(pl.data.return_type), - .param_types = try copyParams(ctx, pl.data.param_types), - } }; - return initPayload(new_pl); - }, - } - } - - fn createFromType(store: *Store.Promoted, ty: Type, zcu: *Zcu, mod: *Module, kind: Kind) !CType { - var convert: Convert = undefined; - try convert.initType(ty, kind, .{ .imm = .{ .set = &store.set, .zcu = zcu } }); - return createFromConvert(store, ty, zcu, mod, kind, &convert); - } - - fn createFromConvert( - store: *Store.Promoted, - ty: Type, - zcu: *Zcu, - mod: *Module, - kind: Kind, - convert: Convert, - ) !CType { - const ip = &zcu.intern_pool; - const arena = store.arena.allocator(); - switch (convert.value) { - .cty => |c| return c.copy(arena), - .tag => |t| switch (t) { - .fwd_anon_struct, - .fwd_anon_union, - .unnamed_struct, - .unnamed_union, - .packed_unnamed_struct, - .packed_unnamed_union, - .anon_struct, - .anon_union, - .@"struct", - .@"union", - .packed_struct, - .packed_union, - => { - const zig_ty_tag = ty.zigTypeTag(zcu); - const fields_len = switch (zig_ty_tag) { - .Struct => ty.structFieldCount(zcu), - .Union => zcu.typeToUnion(ty).?.field_types.len, - else => unreachable, - }; - - var c_fields_len: usize = 0; - for (0..fields_len) |field_i| { - const field_ty = ty.structFieldType(field_i, zcu); - if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, zcu)) or - !field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; - c_fields_len += 1; - } - - const fields_pl = try arena.alloc(Payload.Fields.Field, c_fields_len); - var c_field_i: usize = 0; - for (0..fields_len) |field_i_usize| { - const field_i: u32 = @intCast(field_i_usize); - const field_ty = ty.structFieldType(field_i, zcu); - if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, zcu)) or - !field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; - - defer c_field_i += 1; - fields_pl[c_field_i] = .{ - .name = try if (ty.isSimpleTuple(zcu)) - std.fmt.allocPrintZ(arena, "f{}", .{field_i}) - else - arena.dupeZ(u8, ip.stringToSlice(switch (zig_ty_tag) { - .Struct => ty.legacyStructFieldName(field_i, zcu), - .Union => ip.loadUnionType(ty.toIntern()).loadTagType(ip).names.get(ip)[field_i], - else => unreachable, - })), - .type = store.set.typeToIndex(field_ty, zcu, mod, switch (kind) { - .forward, .forward_parameter => .forward, - .complete, .parameter, .payload => .complete, - .global => .global, - }).?, - .alignas = AlignAs.fieldAlign(ty, field_i, zcu), - }; - } - - switch (t) { - .fwd_anon_struct, - .fwd_anon_union, - => { - const anon_pl = try arena.create(Payload.Fields); - anon_pl.* = .{ .base = .{ .tag = t }, .data = fields_pl }; - return initPayload(anon_pl); - }, - - .unnamed_struct, - .unnamed_union, - .packed_unnamed_struct, - .packed_unnamed_union, - => { - const unnamed_pl = try arena.create(Payload.Unnamed); - unnamed_pl.* = .{ .base = .{ .tag = t }, .data = .{ - .fields = fields_pl, - .owner_decl = ty.getOwnerDecl(zcu), - .id = if (ty.unionTagTypeSafety(zcu)) |_| 0 else unreachable, - } }; - return initPayload(unnamed_pl); - }, - - .anon_struct, - .anon_union, - .@"struct", - .@"union", - .packed_struct, - .packed_union, - => { - const struct_pl = try arena.create(Payload.Aggregate); - struct_pl.* = .{ .base = .{ .tag = t }, .data = .{ - .fields = fields_pl, - .fwd_decl = store.set.typeToIndex(ty, zcu, mod, .forward).?, - } }; - return initPayload(struct_pl); - }, - - else => unreachable, - } - }, - - .function, - .varargs_function, - => { - const info = zcu.typeToFunc(ty).?; - assert(!info.is_generic); - const param_kind: Kind = switch (kind) { - .forward, .forward_parameter => .forward_parameter, - .complete, .parameter, .global => .parameter, - .payload => unreachable, - }; - - var c_params_len: usize = 0; - for (info.param_types.get(ip)) |param_type| { - if (!Type.fromInterned(param_type).hasRuntimeBitsIgnoreComptime(zcu)) continue; - c_params_len += 1; - } - - const params_pl = try arena.alloc(Index, c_params_len); - var c_param_i: usize = 0; - for (info.param_types.get(ip)) |param_type| { - if (!Type.fromInterned(param_type).hasRuntimeBitsIgnoreComptime(zcu)) continue; - params_pl[c_param_i] = store.set.typeToIndex(Type.fromInterned(param_type), zcu, mod, param_kind).?; - c_param_i += 1; - } - - const fn_pl = try arena.create(Payload.Function); - fn_pl.* = .{ .base = .{ .tag = t }, .data = .{ - .return_type = store.set.typeToIndex(Type.fromInterned(info.return_type), zcu, mod, param_kind).?, - .param_types = params_pl, - } }; - return initPayload(fn_pl); - }, - - else => unreachable, - }, - } - } - - pub const TypeAdapter64 = struct { - kind: Kind, - lookup: Convert.Lookup, - convert: *const Convert, - - fn eqlRecurse(self: @This(), ty: Type, cty: Index, kind: Kind) bool { - assert(!self.lookup.isMutable()); - - var convert: Convert = undefined; - convert.initType(ty, kind, self.lookup) catch unreachable; - - const self_recurse = @This(){ .kind = kind, .lookup = self.lookup, .convert = &convert }; - return self_recurse.eql(ty, self.lookup.indexToCType(cty).?); - } - - pub fn eql(self: @This(), ty: Type, cty: CType) bool { - const zcu = self.lookup.getZcu(); - const ip = &zcu.intern_pool; - switch (self.convert.value) { - .cty => |c| return c.eql(cty), - .tag => |t| { - if (t != cty.tag()) return false; - - switch (t) { - .fwd_anon_struct, - .fwd_anon_union, - => { - if (!ty.isTupleOrAnonStruct(zcu)) return false; - - var name_buf: [ - std.fmt.count("f{}", .{std.math.maxInt(usize)}) - ]u8 = undefined; - const c_fields = cty.cast(Payload.Fields).?.data; - - const zig_ty_tag = ty.zigTypeTag(zcu); - var c_field_i: usize = 0; - for (0..switch (zig_ty_tag) { - .Struct => ty.structFieldCount(zcu), - .Union => zcu.typeToUnion(ty).?.field_types.len, - else => unreachable, - }) |field_i_usize| { - const field_i: u32 = @intCast(field_i_usize); - const field_ty = ty.structFieldType(field_i, zcu); - if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, zcu)) or - !field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; - - defer c_field_i += 1; - const c_field = &c_fields[c_field_i]; - - if (!self.eqlRecurse(field_ty, c_field.type, switch (self.kind) { - .forward, .forward_parameter => .forward, - .complete, .parameter => .complete, - .global => .global, - .payload => unreachable, - }) or !mem.eql( - u8, - if (ty.isSimpleTuple(zcu)) - std.fmt.bufPrintZ(&name_buf, "f{}", .{field_i}) catch unreachable - else - ip.stringToSlice(switch (zig_ty_tag) { - .Struct => ty.legacyStructFieldName(field_i, zcu), - .Union => ip.loadUnionType(ty.toIntern()).loadTagType(ip).names.get(ip)[field_i], - else => unreachable, - }), - mem.span(c_field.name), - ) or AlignAs.fieldAlign(ty, field_i, zcu).@"align" != - c_field.alignas.@"align") return false; - } - return true; - }, - - .unnamed_struct, - .unnamed_union, - .packed_unnamed_struct, - .packed_unnamed_union, - => switch (self.kind) { - .forward, .forward_parameter, .complete, .parameter, .global => unreachable, - .payload => if (ty.unionTagTypeSafety(zcu)) |_| { - const data = cty.cast(Payload.Unnamed).?.data; - return ty.getOwnerDecl(zcu) == data.owner_decl and data.id == 0; - } else unreachable, - }, - - .anon_struct, - .anon_union, - .@"struct", - .@"union", - .packed_struct, - .packed_union, - => return self.eqlRecurse( - ty, - cty.cast(Payload.Aggregate).?.data.fwd_decl, - .forward, - ), - - .function, - .varargs_function, - => { - if (ty.zigTypeTag(zcu) != .Fn) return false; - - const info = zcu.typeToFunc(ty).?; - assert(!info.is_generic); - const data = cty.cast(Payload.Function).?.data; - const param_kind: Kind = switch (self.kind) { - .forward, .forward_parameter => .forward_parameter, - .complete, .parameter, .global => .parameter, - .payload => unreachable, - }; - - if (!self.eqlRecurse(Type.fromInterned(info.return_type), data.return_type, param_kind)) - return false; - - var c_param_i: usize = 0; - for (info.param_types.get(ip)) |param_type| { - if (!Type.fromInterned(param_type).hasRuntimeBitsIgnoreComptime(zcu)) continue; - - if (c_param_i >= data.param_types.len) return false; - const param_cty = data.param_types[c_param_i]; - c_param_i += 1; - - if (!self.eqlRecurse(Type.fromInterned(param_type), param_cty, param_kind)) - return false; - } - return c_param_i == data.param_types.len; - }, - - else => unreachable, - } - }, - } - } - - pub fn hash(self: @This(), ty: Type) u64 { - var hasher = std.hash.Wyhash.init(0); - self.updateHasher(&hasher, ty); - return hasher.final(); - } - - fn updateHasherRecurse(self: @This(), hasher: anytype, ty: Type, kind: Kind) void { - assert(!self.lookup.isMutable()); - - var convert: Convert = undefined; - convert.initType(ty, kind, self.lookup) catch unreachable; - - const self_recurse = @This(){ .kind = kind, .lookup = self.lookup, .convert = &convert }; - self_recurse.updateHasher(hasher, ty); - } - - pub fn updateHasher(self: @This(), hasher: anytype, ty: Type) void { - switch (self.convert.value) { - .cty => |c| return c.updateHasher(hasher, self.lookup.getSet().?.*), - .tag => |t| { - autoHash(hasher, t); - - const zcu = self.lookup.getZcu(); - const ip = &zcu.intern_pool; - switch (t) { - .fwd_anon_struct, - .fwd_anon_union, - => { - var name_buf: [ - std.fmt.count("f{}", .{std.math.maxInt(usize)}) - ]u8 = undefined; - - const zig_ty_tag = ty.zigTypeTag(zcu); - for (0..switch (ty.zigTypeTag(zcu)) { - .Struct => ty.structFieldCount(zcu), - .Union => zcu.typeToUnion(ty).?.field_types.len, - else => unreachable, - }) |field_i_usize| { - const field_i: u32 = @intCast(field_i_usize); - const field_ty = ty.structFieldType(field_i, zcu); - if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, zcu)) or - !field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; - - self.updateHasherRecurse(hasher, field_ty, switch (self.kind) { - .forward, .forward_parameter => .forward, - .complete, .parameter => .complete, - .global => .global, - .payload => unreachable, - }); - hasher.update(if (ty.isSimpleTuple(zcu)) - std.fmt.bufPrint(&name_buf, "f{}", .{field_i}) catch unreachable - else - zcu.intern_pool.stringToSlice(switch (zig_ty_tag) { - .Struct => ty.legacyStructFieldName(field_i, zcu), - .Union => ip.loadUnionType(ty.toIntern()).loadTagType(ip).names.get(ip)[field_i], - else => unreachable, - })); - autoHash(hasher, AlignAs.fieldAlign(ty, field_i, zcu).@"align"); - } - }, - - .unnamed_struct, - .unnamed_union, - .packed_unnamed_struct, - .packed_unnamed_union, - => switch (self.kind) { - .forward, .forward_parameter, .complete, .parameter, .global => unreachable, - .payload => if (ty.unionTagTypeSafety(zcu)) |_| { - autoHash(hasher, ty.getOwnerDecl(zcu)); - autoHash(hasher, @as(u32, 0)); - } else unreachable, - }, - - .anon_struct, - .anon_union, - .@"struct", - .@"union", - .packed_struct, - .packed_union, - => self.updateHasherRecurse(hasher, ty, .forward), - - .function, - .varargs_function, - => { - const info = zcu.typeToFunc(ty).?; - assert(!info.is_generic); - const param_kind: Kind = switch (self.kind) { - .forward, .forward_parameter => .forward_parameter, - .complete, .parameter, .global => .parameter, - .payload => unreachable, - }; - - self.updateHasherRecurse(hasher, Type.fromInterned(info.return_type), param_kind); - for (info.param_types.get(ip)) |param_type| { - if (!Type.fromInterned(param_type).hasRuntimeBitsIgnoreComptime(zcu)) continue; - self.updateHasherRecurse(hasher, Type.fromInterned(param_type), param_kind); - } - }, - - else => unreachable, - } - }, - } - } - }; - - pub const TypeAdapter32 = struct { - kind: Kind, - lookup: Convert.Lookup, - convert: *const Convert, - - fn to64(self: @This()) TypeAdapter64 { - return .{ .kind = self.kind, .lookup = self.lookup, .convert = self.convert }; - } - - pub fn eql(self: @This(), ty: Type, cty: CType, cty_index: usize) bool { - _ = cty_index; - return self.to64().eql(ty, cty); - } - - pub fn hash(self: @This(), ty: Type) u32 { - return @as(u32, @truncate(self.to64().hash(ty))); - } - }; -}; diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 8ddacbe11c..7419e778a1 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -2033,7 +2033,7 @@ pub const Object = struct { owner_decl.src_node + 1, // Line try o.lowerDebugType(int_ty), ty.abiSize(mod) * 8, - ty.abiAlignment(mod).toByteUnits(0) * 8, + (ty.abiAlignment(mod).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(enumerators), ); @@ -2120,7 +2120,7 @@ pub const Object = struct { 0, // Line try o.lowerDebugType(ptr_ty), ptr_size * 8, - ptr_align.toByteUnits(0) * 8, + (ptr_align.toByteUnits() orelse 0) * 8, 0, // Offset ); @@ -2131,7 +2131,7 @@ pub const Object = struct { 0, // Line try o.lowerDebugType(len_ty), len_size * 8, - len_align.toByteUnits(0) * 8, + (len_align.toByteUnits() orelse 0) * 8, len_offset * 8, ); @@ -2142,7 +2142,7 @@ pub const Object = struct { line, .none, // Underlying type ty.abiSize(mod) * 8, - ty.abiAlignment(mod).toByteUnits(0) * 8, + (ty.abiAlignment(mod).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(&.{ debug_ptr_type, debug_len_type, @@ -2170,7 +2170,7 @@ pub const Object = struct { 0, // Line debug_elem_ty, target.ptrBitWidth(), - ty.ptrAlignment(mod).toByteUnits(0) * 8, + (ty.ptrAlignment(mod).toByteUnits() orelse 0) * 8, 0, // Offset ); @@ -2217,7 +2217,7 @@ pub const Object = struct { 0, // Line try o.lowerDebugType(ty.childType(mod)), ty.abiSize(mod) * 8, - ty.abiAlignment(mod).toByteUnits(0) * 8, + (ty.abiAlignment(mod).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(&.{ try o.builder.debugSubrange( try o.builder.debugConstant(try o.builder.intConst(.i64, 0)), @@ -2260,7 +2260,7 @@ pub const Object = struct { 0, // Line debug_elem_type, ty.abiSize(mod) * 8, - ty.abiAlignment(mod).toByteUnits(0) * 8, + (ty.abiAlignment(mod).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(&.{ try o.builder.debugSubrange( try o.builder.debugConstant(try o.builder.intConst(.i64, 0)), @@ -2316,7 +2316,7 @@ pub const Object = struct { 0, // Line try o.lowerDebugType(child_ty), payload_size * 8, - payload_align.toByteUnits(0) * 8, + (payload_align.toByteUnits() orelse 0) * 8, 0, // Offset ); @@ -2327,7 +2327,7 @@ pub const Object = struct { 0, try o.lowerDebugType(non_null_ty), non_null_size * 8, - non_null_align.toByteUnits(0) * 8, + (non_null_align.toByteUnits() orelse 0) * 8, non_null_offset * 8, ); @@ -2338,7 +2338,7 @@ pub const Object = struct { 0, // Line .none, // Underlying type ty.abiSize(mod) * 8, - ty.abiAlignment(mod).toByteUnits(0) * 8, + (ty.abiAlignment(mod).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(&.{ debug_data_type, debug_some_type, @@ -2396,7 +2396,7 @@ pub const Object = struct { 0, // Line try o.lowerDebugType(Type.anyerror), error_size * 8, - error_align.toByteUnits(0) * 8, + (error_align.toByteUnits() orelse 0) * 8, error_offset * 8, ); fields[payload_index] = try o.builder.debugMemberType( @@ -2406,7 +2406,7 @@ pub const Object = struct { 0, // Line try o.lowerDebugType(payload_ty), payload_size * 8, - payload_align.toByteUnits(0) * 8, + (payload_align.toByteUnits() orelse 0) * 8, payload_offset * 8, ); @@ -2417,7 +2417,7 @@ pub const Object = struct { 0, // Line .none, // Underlying type ty.abiSize(mod) * 8, - ty.abiAlignment(mod).toByteUnits(0) * 8, + (ty.abiAlignment(mod).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(&fields), ); @@ -2485,7 +2485,7 @@ pub const Object = struct { 0, try o.lowerDebugType(Type.fromInterned(field_ty)), field_size * 8, - field_align.toByteUnits(0) * 8, + (field_align.toByteUnits() orelse 0) * 8, field_offset * 8, )); } @@ -2497,7 +2497,7 @@ pub const Object = struct { 0, // Line .none, // Underlying type ty.abiSize(mod) * 8, - ty.abiAlignment(mod).toByteUnits(0) * 8, + (ty.abiAlignment(mod).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(fields.items), ); @@ -2566,7 +2566,7 @@ pub const Object = struct { 0, // Line try o.lowerDebugType(field_ty), field_size * 8, - field_align.toByteUnits(0) * 8, + (field_align.toByteUnits() orelse 0) * 8, field_offset * 8, )); } @@ -2578,7 +2578,7 @@ pub const Object = struct { 0, // Line .none, // Underlying type ty.abiSize(mod) * 8, - ty.abiAlignment(mod).toByteUnits(0) * 8, + (ty.abiAlignment(mod).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(fields.items), ); @@ -2621,7 +2621,7 @@ pub const Object = struct { 0, // Line .none, // Underlying type ty.abiSize(mod) * 8, - ty.abiAlignment(mod).toByteUnits(0) * 8, + (ty.abiAlignment(mod).toByteUnits() orelse 0) * 8, try o.builder.debugTuple( &.{try o.lowerDebugType(Type.fromInterned(union_type.enum_tag_ty))}, ), @@ -2661,7 +2661,7 @@ pub const Object = struct { 0, // Line try o.lowerDebugType(Type.fromInterned(field_ty)), field_size * 8, - field_align.toByteUnits(0) * 8, + (field_align.toByteUnits() orelse 0) * 8, 0, // Offset )); } @@ -2680,7 +2680,7 @@ pub const Object = struct { 0, // Line .none, // Underlying type ty.abiSize(mod) * 8, - ty.abiAlignment(mod).toByteUnits(0) * 8, + (ty.abiAlignment(mod).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(fields.items), ); @@ -2711,7 +2711,7 @@ pub const Object = struct { 0, // Line try o.lowerDebugType(Type.fromInterned(union_type.enum_tag_ty)), layout.tag_size * 8, - layout.tag_align.toByteUnits(0) * 8, + (layout.tag_align.toByteUnits() orelse 0) * 8, tag_offset * 8, ); @@ -2722,7 +2722,7 @@ pub const Object = struct { 0, // Line debug_union_type, layout.payload_size * 8, - layout.payload_align.toByteUnits(0) * 8, + (layout.payload_align.toByteUnits() orelse 0) * 8, payload_offset * 8, ); @@ -2739,7 +2739,7 @@ pub const Object = struct { 0, // Line .none, // Underlying type ty.abiSize(mod) * 8, - ty.abiAlignment(mod).toByteUnits(0) * 8, + (ty.abiAlignment(mod).toByteUnits() orelse 0) * 8, try o.builder.debugTuple(&full_fields), ); @@ -4473,7 +4473,7 @@ pub const Object = struct { // The value cannot be undefined, because we use the `nonnull` annotation // for non-optional pointers. We also need to respect the alignment, even though // the address will never be dereferenced. - const int: u64 = ptr_ty.ptrInfo(mod).flags.alignment.toByteUnitsOptional() orelse + const int: u64 = ptr_ty.ptrInfo(mod).flags.alignment.toByteUnits() orelse // Note that these 0xaa values are appropriate even in release-optimized builds // because we need a well-defined value that is not null, and LLVM does not // have an "undef_but_not_null" attribute. As an example, if this `alloc` AIR diff --git a/src/crash_report.zig b/src/crash_report.zig index f33bef78e7..311647f23f 100644 --- a/src/crash_report.zig +++ b/src/crash_report.zig @@ -172,7 +172,7 @@ pub fn attachSegfaultHandler() void { }; } -fn handleSegfaultPosix(sig: i32, info: *const posix.siginfo_t, ctx_ptr: ?*const anyopaque) callconv(.C) noreturn { +fn handleSegfaultPosix(sig: i32, info: *const posix.siginfo_t, ctx_ptr: ?*anyopaque) callconv(.C) noreturn { // TODO: use alarm() here to prevent infinite loops PanicSwitch.preDispatch(); diff --git a/src/link/C.zig b/src/link/C.zig index d717903ff3..8bff6d9fce 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -69,13 +69,13 @@ pub const DeclBlock = struct { fwd_decl: String = String.empty, /// Each `Decl` stores a set of used `CType`s. In `flush()`, we iterate /// over each `Decl` and generate the definition for each used `CType` once. - ctypes: codegen.CType.Store = .{}, - /// Key and Value storage use the ctype arena. + ctype_pool: codegen.CType.Pool = codegen.CType.Pool.empty, + /// May contain string references to ctype_pool lazy_fns: codegen.LazyFnMap = .{}, fn deinit(db: *DeclBlock, gpa: Allocator) void { db.lazy_fns.deinit(gpa); - db.ctypes.deinit(gpa); + db.ctype_pool.deinit(gpa); db.* = undefined; } }; @@ -190,11 +190,12 @@ pub fn updateFunc( const decl = zcu.declPtr(decl_index); const gop = try self.decl_table.getOrPut(gpa, decl_index); if (!gop.found_existing) gop.value_ptr.* = .{}; - const ctypes = &gop.value_ptr.ctypes; + const ctype_pool = &gop.value_ptr.ctype_pool; const lazy_fns = &gop.value_ptr.lazy_fns; const fwd_decl = &self.fwd_decl_buf; const code = &self.code_buf; - ctypes.clearRetainingCapacity(gpa); + try ctype_pool.init(gpa); + ctype_pool.clearRetainingCapacity(); lazy_fns.clearRetainingCapacity(); fwd_decl.clearRetainingCapacity(); code.clearRetainingCapacity(); @@ -213,7 +214,8 @@ pub fn updateFunc( .pass = .{ .decl = decl_index }, .is_naked_fn = decl.typeOf(zcu).fnCallingConvention(zcu) == .Naked, .fwd_decl = fwd_decl.toManaged(gpa), - .ctypes = ctypes.*, + .ctype_pool = ctype_pool.*, + .scratch = .{}, .anon_decl_deps = self.anon_decls, .aligned_anon_decls = self.aligned_anon_decls, }, @@ -222,12 +224,16 @@ pub fn updateFunc( }, .lazy_fns = lazy_fns.*, }; - function.object.indent_writer = .{ .underlying_writer = function.object.code.writer() }; defer { self.anon_decls = function.object.dg.anon_decl_deps; self.aligned_anon_decls = function.object.dg.aligned_anon_decls; fwd_decl.* = function.object.dg.fwd_decl.moveToUnmanaged(); + ctype_pool.* = function.object.dg.ctype_pool.move(); + ctype_pool.freeUnusedCapacity(gpa); + function.object.dg.scratch.deinit(gpa); + lazy_fns.* = function.lazy_fns.move(); + lazy_fns.shrinkAndFree(gpa, lazy_fns.count()); code.* = function.object.code.moveToUnmanaged(); function.deinit(); } @@ -239,16 +245,8 @@ pub fn updateFunc( }, else => |e| return e, }; - - ctypes.* = function.object.dg.ctypes.move(); - lazy_fns.* = function.lazy_fns.move(); - - // Free excess allocated memory for this Decl. - ctypes.shrinkAndFree(gpa, ctypes.count()); - lazy_fns.shrinkAndFree(gpa, lazy_fns.count()); - - gop.value_ptr.code = try self.addString(function.object.code.items); gop.value_ptr.fwd_decl = try self.addString(function.object.dg.fwd_decl.items); + gop.value_ptr.code = try self.addString(function.object.code.items); } fn updateAnonDecl(self: *C, zcu: *Zcu, i: usize) !void { @@ -269,7 +267,8 @@ fn updateAnonDecl(self: *C, zcu: *Zcu, i: usize) !void { .pass = .{ .anon = anon_decl }, .is_naked_fn = false, .fwd_decl = fwd_decl.toManaged(gpa), - .ctypes = .{}, + .ctype_pool = codegen.CType.Pool.empty, + .scratch = .{}, .anon_decl_deps = self.anon_decls, .aligned_anon_decls = self.aligned_anon_decls, }, @@ -277,14 +276,15 @@ fn updateAnonDecl(self: *C, zcu: *Zcu, i: usize) !void { .indent_writer = undefined, // set later so we can get a pointer to object.code }; object.indent_writer = .{ .underlying_writer = object.code.writer() }; - defer { self.anon_decls = object.dg.anon_decl_deps; self.aligned_anon_decls = object.dg.aligned_anon_decls; - object.dg.ctypes.deinit(object.dg.gpa); fwd_decl.* = object.dg.fwd_decl.moveToUnmanaged(); + object.dg.ctype_pool.deinit(object.dg.gpa); + object.dg.scratch.deinit(gpa); code.* = object.code.moveToUnmanaged(); } + try object.dg.ctype_pool.init(gpa); const c_value: codegen.CValue = .{ .constant = Value.fromInterned(anon_decl) }; const alignment: Alignment = self.aligned_anon_decls.get(anon_decl) orelse .none; @@ -297,13 +297,11 @@ fn updateAnonDecl(self: *C, zcu: *Zcu, i: usize) !void { else => |e| return e, }; - // Free excess allocated memory for this Decl. - object.dg.ctypes.shrinkAndFree(gpa, object.dg.ctypes.count()); - + object.dg.ctype_pool.freeUnusedCapacity(gpa); object.dg.anon_decl_deps.values()[i] = .{ .code = try self.addString(object.code.items), .fwd_decl = try self.addString(object.dg.fwd_decl.items), - .ctypes = object.dg.ctypes.move(), + .ctype_pool = object.dg.ctype_pool.move(), }; } @@ -315,13 +313,13 @@ pub fn updateDecl(self: *C, zcu: *Zcu, decl_index: InternPool.DeclIndex) !void { const decl = zcu.declPtr(decl_index); const gop = try self.decl_table.getOrPut(gpa, decl_index); - if (!gop.found_existing) { - gop.value_ptr.* = .{}; - } - const ctypes = &gop.value_ptr.ctypes; + errdefer _ = self.decl_table.pop(); + if (!gop.found_existing) gop.value_ptr.* = .{}; + const ctype_pool = &gop.value_ptr.ctype_pool; const fwd_decl = &self.fwd_decl_buf; const code = &self.code_buf; - ctypes.clearRetainingCapacity(gpa); + try ctype_pool.init(gpa); + ctype_pool.clearRetainingCapacity(); fwd_decl.clearRetainingCapacity(); code.clearRetainingCapacity(); @@ -334,7 +332,8 @@ pub fn updateDecl(self: *C, zcu: *Zcu, decl_index: InternPool.DeclIndex) !void { .pass = .{ .decl = decl_index }, .is_naked_fn = false, .fwd_decl = fwd_decl.toManaged(gpa), - .ctypes = ctypes.*, + .ctype_pool = ctype_pool.*, + .scratch = .{}, .anon_decl_deps = self.anon_decls, .aligned_anon_decls = self.aligned_anon_decls, }, @@ -345,8 +344,10 @@ pub fn updateDecl(self: *C, zcu: *Zcu, decl_index: InternPool.DeclIndex) !void { defer { self.anon_decls = object.dg.anon_decl_deps; self.aligned_anon_decls = object.dg.aligned_anon_decls; - object.dg.ctypes.deinit(object.dg.gpa); fwd_decl.* = object.dg.fwd_decl.moveToUnmanaged(); + ctype_pool.* = object.dg.ctype_pool.move(); + ctype_pool.freeUnusedCapacity(gpa); + object.dg.scratch.deinit(gpa); code.* = object.code.moveToUnmanaged(); } @@ -357,12 +358,6 @@ pub fn updateDecl(self: *C, zcu: *Zcu, decl_index: InternPool.DeclIndex) !void { }, else => |e| return e, }; - - ctypes.* = object.dg.ctypes.move(); - - // Free excess allocated memory for this Decl. - ctypes.shrinkAndFree(gpa, ctypes.count()); - gop.value_ptr.code = try self.addString(object.code.items); gop.value_ptr.fwd_decl = try self.addString(object.dg.fwd_decl.items); } @@ -416,7 +411,10 @@ pub fn flushModule(self: *C, arena: Allocator, prog_node: *std.Progress.Node) !v // This code path happens exclusively with -ofmt=c. The flush logic for // emit-h is in `flushEmitH` below. - var f: Flush = .{}; + var f: Flush = .{ + .ctype_pool = codegen.CType.Pool.empty, + .lazy_ctype_pool = codegen.CType.Pool.empty, + }; defer f.deinit(gpa); const abi_defines = try self.abiDefines(zcu.getTarget()); @@ -443,7 +441,8 @@ pub fn flushModule(self: *C, arena: Allocator, prog_node: *std.Progress.Node) !v self.lazy_fwd_decl_buf.clearRetainingCapacity(); self.lazy_code_buf.clearRetainingCapacity(); - try self.flushErrDecls(zcu, &f.lazy_ctypes); + try f.lazy_ctype_pool.init(gpa); + try self.flushErrDecls(zcu, &f.lazy_ctype_pool); // Unlike other backends, the .c code we are emitting has order-dependent decls. // `CType`s, forward decls, and non-functions first. @@ -471,15 +470,15 @@ pub fn flushModule(self: *C, arena: Allocator, prog_node: *std.Progress.Node) !v { // We need to flush lazy ctypes after flushing all decls but before flushing any decl ctypes. // This ensures that every lazy CType.Index exactly matches the global CType.Index. - assert(f.ctypes.count() == 0); - try self.flushCTypes(zcu, &f, .flush, f.lazy_ctypes); + try f.ctype_pool.init(gpa); + try self.flushCTypes(zcu, &f, .flush, &f.lazy_ctype_pool); for (self.anon_decls.keys(), self.anon_decls.values()) |anon_decl, decl_block| { - try self.flushCTypes(zcu, &f, .{ .anon = anon_decl }, decl_block.ctypes); + try self.flushCTypes(zcu, &f, .{ .anon = anon_decl }, &decl_block.ctype_pool); } for (self.decl_table.keys(), self.decl_table.values()) |decl_index, decl_block| { - try self.flushCTypes(zcu, &f, .{ .decl = decl_index }, decl_block.ctypes); + try self.flushCTypes(zcu, &f, .{ .decl = decl_index }, &decl_block.ctype_pool); } } @@ -510,11 +509,11 @@ pub fn flushModule(self: *C, arena: Allocator, prog_node: *std.Progress.Node) !v } const Flush = struct { - ctypes: codegen.CType.Store = .{}, - ctypes_map: std.ArrayListUnmanaged(codegen.CType.Index) = .{}, + ctype_pool: codegen.CType.Pool, + ctype_global_from_decl_map: std.ArrayListUnmanaged(codegen.CType) = .{}, ctypes_buf: std.ArrayListUnmanaged(u8) = .{}, - lazy_ctypes: codegen.CType.Store = .{}, + lazy_ctype_pool: codegen.CType.Pool, lazy_fns: LazyFns = .{}, asm_buf: std.ArrayListUnmanaged(u8) = .{}, @@ -536,10 +535,11 @@ const Flush = struct { f.all_buffers.deinit(gpa); f.asm_buf.deinit(gpa); f.lazy_fns.deinit(gpa); - f.lazy_ctypes.deinit(gpa); + f.lazy_ctype_pool.deinit(gpa); f.ctypes_buf.deinit(gpa); - f.ctypes_map.deinit(gpa); - f.ctypes.deinit(gpa); + assert(f.ctype_global_from_decl_map.items.len == 0); + f.ctype_global_from_decl_map.deinit(gpa); + f.ctype_pool.deinit(gpa); } }; @@ -552,88 +552,59 @@ fn flushCTypes( zcu: *Zcu, f: *Flush, pass: codegen.DeclGen.Pass, - decl_ctypes: codegen.CType.Store, + decl_ctype_pool: *const codegen.CType.Pool, ) FlushDeclError!void { const gpa = self.base.comp.gpa; + const global_ctype_pool = &f.ctype_pool; - const decl_ctypes_len = decl_ctypes.count(); - f.ctypes_map.clearRetainingCapacity(); - try f.ctypes_map.ensureTotalCapacity(gpa, decl_ctypes_len); - - var global_ctypes = f.ctypes.promote(gpa); - defer f.ctypes.demote(global_ctypes); + const global_from_decl_map = &f.ctype_global_from_decl_map; + assert(global_from_decl_map.items.len == 0); + try global_from_decl_map.ensureTotalCapacity(gpa, decl_ctype_pool.items.len); + defer global_from_decl_map.clearRetainingCapacity(); var ctypes_buf = f.ctypes_buf.toManaged(gpa); defer f.ctypes_buf = ctypes_buf.moveToUnmanaged(); const writer = ctypes_buf.writer(); - const slice = decl_ctypes.set.map.entries.slice(); - for (slice.items(.key), 0..) |decl_cty, decl_i| { - const Context = struct { - arena: Allocator, - ctypes_map: []codegen.CType.Index, - cached_hash: codegen.CType.Store.Set.Map.Hash, - idx: codegen.CType.Index, - - pub fn hash(ctx: @This(), _: codegen.CType) codegen.CType.Store.Set.Map.Hash { - return ctx.cached_hash; + for (0..decl_ctype_pool.items.len) |decl_ctype_pool_index| { + const PoolAdapter = struct { + global_from_decl_map: []const codegen.CType, + pub fn eql(pool_adapter: @This(), decl_ctype: codegen.CType, global_ctype: codegen.CType) bool { + return if (decl_ctype.toPoolIndex()) |decl_pool_index| + decl_pool_index < pool_adapter.global_from_decl_map.len and + pool_adapter.global_from_decl_map[decl_pool_index].eql(global_ctype) + else + decl_ctype.index == global_ctype.index; } - pub fn eql(ctx: @This(), lhs: codegen.CType, rhs: codegen.CType, _: usize) bool { - return lhs.eqlContext(rhs, ctx); - } - pub fn eqlIndex( - ctx: @This(), - lhs_idx: codegen.CType.Index, - rhs_idx: codegen.CType.Index, - ) bool { - if (lhs_idx < codegen.CType.Tag.no_payload_count or - rhs_idx < codegen.CType.Tag.no_payload_count) return lhs_idx == rhs_idx; - const lhs_i = lhs_idx - codegen.CType.Tag.no_payload_count; - if (lhs_i >= ctx.ctypes_map.len) return false; - return ctx.ctypes_map[lhs_i] == rhs_idx; - } - pub fn copyIndex(ctx: @This(), idx: codegen.CType.Index) codegen.CType.Index { - if (idx < codegen.CType.Tag.no_payload_count) return idx; - return ctx.ctypes_map[idx - codegen.CType.Tag.no_payload_count]; + pub fn copy(pool_adapter: @This(), decl_ctype: codegen.CType) codegen.CType { + return if (decl_ctype.toPoolIndex()) |decl_pool_index| + pool_adapter.global_from_decl_map[decl_pool_index] + else + decl_ctype; } }; - const decl_idx = @as(codegen.CType.Index, @intCast(codegen.CType.Tag.no_payload_count + decl_i)); - const ctx = Context{ - .arena = global_ctypes.arena.allocator(), - .ctypes_map = f.ctypes_map.items, - .cached_hash = decl_ctypes.indexToHash(decl_idx), - .idx = decl_idx, - }; - const gop = try global_ctypes.set.map.getOrPutContextAdapted(gpa, decl_cty, ctx, .{ - .store = &global_ctypes.set, - }); - const global_idx = - @as(codegen.CType.Index, @intCast(codegen.CType.Tag.no_payload_count + gop.index)); - f.ctypes_map.appendAssumeCapacity(global_idx); - if (!gop.found_existing) { - errdefer _ = global_ctypes.set.map.pop(); - gop.key_ptr.* = try decl_cty.copyContext(ctx); - } - if (std.debug.runtime_safety) { - const global_cty = &global_ctypes.set.map.entries.items(.key)[gop.index]; - assert(global_cty == gop.key_ptr); - assert(decl_cty.eqlContext(global_cty.*, ctx)); - assert(decl_cty.hash(decl_ctypes.set) == global_cty.hash(global_ctypes.set)); - } + const decl_ctype = codegen.CType.fromPoolIndex(decl_ctype_pool_index); + const global_ctype, const found_existing = try global_ctype_pool.getOrPutAdapted( + gpa, + decl_ctype_pool, + decl_ctype, + PoolAdapter{ .global_from_decl_map = global_from_decl_map.items }, + ); + global_from_decl_map.appendAssumeCapacity(global_ctype); try codegen.genTypeDecl( zcu, writer, - global_ctypes.set, - global_idx, + global_ctype_pool, + global_ctype, pass, - decl_ctypes.set, - decl_idx, - gop.found_existing, + decl_ctype_pool, + decl_ctype, + found_existing, ); } } -fn flushErrDecls(self: *C, zcu: *Zcu, ctypes: *codegen.CType.Store) FlushDeclError!void { +fn flushErrDecls(self: *C, zcu: *Zcu, ctype_pool: *codegen.CType.Pool) FlushDeclError!void { const gpa = self.base.comp.gpa; const fwd_decl = &self.lazy_fwd_decl_buf; @@ -648,7 +619,8 @@ fn flushErrDecls(self: *C, zcu: *Zcu, ctypes: *codegen.CType.Store) FlushDeclErr .pass = .flush, .is_naked_fn = false, .fwd_decl = fwd_decl.toManaged(gpa), - .ctypes = ctypes.*, + .ctype_pool = ctype_pool.*, + .scratch = .{}, .anon_decl_deps = self.anon_decls, .aligned_anon_decls = self.aligned_anon_decls, }, @@ -659,8 +631,10 @@ fn flushErrDecls(self: *C, zcu: *Zcu, ctypes: *codegen.CType.Store) FlushDeclErr defer { self.anon_decls = object.dg.anon_decl_deps; self.aligned_anon_decls = object.dg.aligned_anon_decls; - object.dg.ctypes.deinit(gpa); fwd_decl.* = object.dg.fwd_decl.moveToUnmanaged(); + ctype_pool.* = object.dg.ctype_pool.move(); + ctype_pool.freeUnusedCapacity(gpa); + object.dg.scratch.deinit(gpa); code.* = object.code.moveToUnmanaged(); } @@ -668,15 +642,14 @@ fn flushErrDecls(self: *C, zcu: *Zcu, ctypes: *codegen.CType.Store) FlushDeclErr error.AnalysisFail => unreachable, else => |e| return e, }; - - ctypes.* = object.dg.ctypes.move(); } fn flushLazyFn( self: *C, zcu: *Zcu, mod: *Module, - ctypes: *codegen.CType.Store, + ctype_pool: *codegen.CType.Pool, + lazy_ctype_pool: *const codegen.CType.Pool, lazy_fn: codegen.LazyFnMap.Entry, ) FlushDeclError!void { const gpa = self.base.comp.gpa; @@ -693,7 +666,8 @@ fn flushLazyFn( .pass = .flush, .is_naked_fn = false, .fwd_decl = fwd_decl.toManaged(gpa), - .ctypes = ctypes.*, + .ctype_pool = ctype_pool.*, + .scratch = .{}, .anon_decl_deps = .{}, .aligned_anon_decls = .{}, }, @@ -706,17 +680,17 @@ fn flushLazyFn( // `updateFunc()` does. assert(object.dg.anon_decl_deps.count() == 0); assert(object.dg.aligned_anon_decls.count() == 0); - object.dg.ctypes.deinit(gpa); fwd_decl.* = object.dg.fwd_decl.moveToUnmanaged(); + ctype_pool.* = object.dg.ctype_pool.move(); + ctype_pool.freeUnusedCapacity(gpa); + object.dg.scratch.deinit(gpa); code.* = object.code.moveToUnmanaged(); } - codegen.genLazyFn(&object, lazy_fn) catch |err| switch (err) { + codegen.genLazyFn(&object, lazy_ctype_pool, lazy_fn) catch |err| switch (err) { error.AnalysisFail => unreachable, else => |e| return e, }; - - ctypes.* = object.dg.ctypes.move(); } fn flushLazyFns( @@ -724,6 +698,7 @@ fn flushLazyFns( zcu: *Zcu, mod: *Module, f: *Flush, + lazy_ctype_pool: *const codegen.CType.Pool, lazy_fns: codegen.LazyFnMap, ) FlushDeclError!void { const gpa = self.base.comp.gpa; @@ -734,7 +709,7 @@ fn flushLazyFns( const gop = f.lazy_fns.getOrPutAssumeCapacity(entry.key_ptr.*); if (gop.found_existing) continue; gop.value_ptr.* = {}; - try self.flushLazyFn(zcu, mod, &f.lazy_ctypes, entry); + try self.flushLazyFn(zcu, mod, &f.lazy_ctype_pool, lazy_ctype_pool, entry); } } @@ -748,7 +723,7 @@ fn flushDeclBlock( extern_symbol_name: InternPool.OptionalNullTerminatedString, ) FlushDeclError!void { const gpa = self.base.comp.gpa; - try self.flushLazyFns(zcu, mod, f, decl_block.lazy_fns); + try self.flushLazyFns(zcu, mod, f, &decl_block.ctype_pool, decl_block.lazy_fns); try f.all_buffers.ensureUnusedCapacity(gpa, 1); fwd_decl: { if (extern_symbol_name.unwrap()) |name| { diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 7becb3f366..aaf840e02c 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1223,7 +1223,7 @@ fn lowerConst(self: *Coff, name: []const u8, val: Value, required_alignment: Int atom.getSymbolPtr(self).value = try self.allocateAtom( atom_index, atom.size, - @intCast(required_alignment.toByteUnitsOptional().?), + @intCast(required_alignment.toByteUnits().?), ); errdefer self.freeAtom(atom_index); @@ -1344,7 +1344,7 @@ fn updateLazySymbolAtom( symbol.section_number = @as(coff.SectionNumber, @enumFromInt(section_index + 1)); symbol.type = .{ .complex_type = .NULL, .base_type = .NULL }; - const vaddr = try self.allocateAtom(atom_index, code_len, @intCast(required_alignment.toByteUnits(0))); + const vaddr = try self.allocateAtom(atom_index, code_len, @intCast(required_alignment.toByteUnits() orelse 0)); errdefer self.freeAtom(atom_index); log.debug("allocated atom for {s} at 0x{x}", .{ name, vaddr }); @@ -1428,7 +1428,7 @@ fn updateDeclCode(self: *Coff, decl_index: InternPool.DeclIndex, code: []u8, com const decl_name = mod.intern_pool.stringToSlice(try decl.fullyQualifiedName(mod)); log.debug("updateDeclCode {s}{*}", .{ decl_name, decl }); - const required_alignment: u32 = @intCast(decl.getAlignment(mod).toByteUnits(0)); + const required_alignment: u32 = @intCast(decl.getAlignment(mod).toByteUnits() orelse 0); const decl_metadata = self.decls.get(decl_index).?; const atom_index = decl_metadata.atom; diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 95ddc81e3c..c20a4b6afa 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -4051,7 +4051,7 @@ fn updateSectionSizes(self: *Elf) !void { const padding = offset - shdr.sh_size; atom_ptr.value = offset; shdr.sh_size += padding + atom_ptr.size; - shdr.sh_addralign = @max(shdr.sh_addralign, atom_ptr.alignment.toByteUnits(1)); + shdr.sh_addralign = @max(shdr.sh_addralign, atom_ptr.alignment.toByteUnits() orelse 1); } } diff --git a/src/link/Elf/Atom.zig b/src/link/Elf/Atom.zig index f391326670..3db1182696 100644 --- a/src/link/Elf/Atom.zig +++ b/src/link/Elf/Atom.zig @@ -208,7 +208,7 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void { zig_object.debug_aranges_section_dirty = true; } } - shdr.sh_addralign = @max(shdr.sh_addralign, self.alignment.toByteUnitsOptional().?); + shdr.sh_addralign = @max(shdr.sh_addralign, self.alignment.toByteUnits().?); // This function can also reallocate an atom. // In this case we need to "unplug" it from its previous location before diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig index 6ed55dac10..6aede441c8 100644 --- a/src/link/Elf/ZigObject.zig +++ b/src/link/Elf/ZigObject.zig @@ -313,7 +313,7 @@ pub fn inputShdr(self: ZigObject, atom_index: Atom.Index, elf_file: *Elf) elf.El shdr.sh_addr = 0; shdr.sh_offset = 0; shdr.sh_size = atom.size; - shdr.sh_addralign = atom.alignment.toByteUnits(1); + shdr.sh_addralign = atom.alignment.toByteUnits() orelse 1; return shdr; } diff --git a/src/link/Elf/relocatable.zig b/src/link/Elf/relocatable.zig index 017329dde7..98abed420e 100644 --- a/src/link/Elf/relocatable.zig +++ b/src/link/Elf/relocatable.zig @@ -330,7 +330,7 @@ fn updateSectionSizes(elf_file: *Elf) !void { const padding = offset - shdr.sh_size; atom_ptr.value = offset; shdr.sh_size += padding + atom_ptr.size; - shdr.sh_addralign = @max(shdr.sh_addralign, atom_ptr.alignment.toByteUnits(1)); + shdr.sh_addralign = @max(shdr.sh_addralign, atom_ptr.alignment.toByteUnits() orelse 1); } } diff --git a/src/link/Elf/thunks.zig b/src/link/Elf/thunks.zig index 119529b512..7d06b3b190 100644 --- a/src/link/Elf/thunks.zig +++ b/src/link/Elf/thunks.zig @@ -63,7 +63,7 @@ fn advance(shdr: *elf.Elf64_Shdr, size: u64, alignment: Atom.Alignment) !u64 { const offset = alignment.forward(shdr.sh_size); const padding = offset - shdr.sh_size; shdr.sh_size += padding + size; - shdr.sh_addralign = @max(shdr.sh_addralign, alignment.toByteUnits(1)); + shdr.sh_addralign = @max(shdr.sh_addralign, alignment.toByteUnits() orelse 1); return offset; } diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 71666beb07..2c8a3da59f 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -2060,7 +2060,7 @@ fn calcSectionSizes(self: *MachO) !void { for (atoms.items) |atom_index| { const atom = self.getAtom(atom_index).?; - const atom_alignment = atom.alignment.toByteUnits(1); + const atom_alignment = atom.alignment.toByteUnits() orelse 1; const offset = mem.alignForward(u64, header.size, atom_alignment); const padding = offset - header.size; atom.value = offset; diff --git a/src/link/MachO/relocatable.zig b/src/link/MachO/relocatable.zig index b0eced27eb..711aa01fb4 100644 --- a/src/link/MachO/relocatable.zig +++ b/src/link/MachO/relocatable.zig @@ -380,7 +380,7 @@ fn calcSectionSizes(macho_file: *MachO) !void { if (atoms.items.len == 0) continue; for (atoms.items) |atom_index| { const atom = macho_file.getAtom(atom_index).?; - const atom_alignment = atom.alignment.toByteUnits(1); + const atom_alignment = atom.alignment.toByteUnits() orelse 1; const offset = mem.alignForward(u64, header.size, atom_alignment); const padding = offset - header.size; atom.value = offset; diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index ce91beedae..10f00d8992 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -2263,7 +2263,7 @@ fn setupMemory(wasm: *Wasm) !void { } if (wasm.findGlobalSymbol("__tls_align")) |loc| { const sym = loc.getSymbol(wasm); - wasm.wasm_globals.items[sym.index - wasm.imported_globals_count].init.i32_const = @intCast(segment.alignment.toByteUnitsOptional().?); + wasm.wasm_globals.items[sym.index - wasm.imported_globals_count].init.i32_const = @intCast(segment.alignment.toByteUnits().?); } if (wasm.findGlobalSymbol("__tls_base")) |loc| { const sym = loc.getSymbol(wasm); diff --git a/src/main.zig b/src/main.zig index 2edc3864c6..9e699c07e6 100644 --- a/src/main.zig +++ b/src/main.zig @@ -3544,11 +3544,7 @@ fn createModule( // If the target is not overridden, use the parent's target. Of course, // if this is the root module then we need to proceed to resolve the // target. - if (cli_mod.target_arch_os_abi == null and - cli_mod.target_mcpu == null and - create_module.dynamic_linker == null and - create_module.object_format == null) - { + if (cli_mod.target_arch_os_abi == null and cli_mod.target_mcpu == null) { if (parent) |p| break :t p.resolved_target; } diff --git a/src/print_value.zig b/src/print_value.zig index 25c20bbbbd..21a322cd63 100644 --- a/src/print_value.zig +++ b/src/print_value.zig @@ -80,7 +80,7 @@ pub fn print( inline .u64, .i64, .big_int => |x| try writer.print("{}", .{x}), .lazy_align => |ty| if (opt_sema) |sema| { const a = (try Type.fromInterned(ty).abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar; - try writer.print("{}", .{a.toByteUnits(0)}); + try writer.print("{}", .{a.toByteUnits() orelse 0}); } else try writer.print("@alignOf({})", .{Type.fromInterned(ty).fmt(mod)}), .lazy_size => |ty| if (opt_sema) |sema| { const s = (try Type.fromInterned(ty).abiSizeAdvanced(mod, .{ .sema = sema })).scalar; diff --git a/src/target.zig b/src/target.zig index 8f1942111d..fa782075c7 100644 --- a/src/target.zig +++ b/src/target.zig @@ -525,7 +525,7 @@ pub fn backendSupportsFeature( .error_return_trace => use_llvm, .is_named_enum_value => use_llvm, .error_set_has_value => use_llvm or cpu_arch.isWasm(), - .field_reordering => use_llvm, + .field_reordering => ofmt == .c or use_llvm, .safety_checked_instructions => use_llvm, }; } diff --git a/src/type.zig b/src/type.zig index 203ab4f63e..8352552463 100644 --- a/src/type.zig +++ b/src/type.zig @@ -203,7 +203,7 @@ pub const Type = struct { info.flags.alignment else Type.fromInterned(info.child).abiAlignment(mod); - try writer.print("align({d}", .{alignment.toByteUnits(0)}); + try writer.print("align({d}", .{alignment.toByteUnits() orelse 0}); if (info.packed_offset.bit_offset != 0 or info.packed_offset.host_size != 0) { try writer.print(":{d}:{d}", .{ @@ -863,7 +863,7 @@ pub const Type = struct { pub fn lazyAbiAlignment(ty: Type, mod: *Module) !Value { switch (try ty.abiAlignmentAdvanced(mod, .lazy)) { .val => |val| return val, - .scalar => |x| return mod.intValue(Type.comptime_int, x.toByteUnits(0)), + .scalar => |x| return mod.intValue(Type.comptime_int, x.toByteUnits() orelse 0), } } @@ -905,7 +905,7 @@ pub const Type = struct { return .{ .scalar = intAbiAlignment(int_type.bits, target) }; }, .ptr_type, .anyframe_type => { - return .{ .scalar = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)) }; + return .{ .scalar = ptrAbiAlignment(target) }; }, .array_type => |array_type| { return Type.fromInterned(array_type.child).abiAlignmentAdvanced(mod, strat); @@ -920,6 +920,9 @@ pub const Type = struct { const alignment = std.math.ceilPowerOfTwoAssert(u32, bytes); return .{ .scalar = Alignment.fromByteUnits(alignment) }; }, + .stage2_c => { + return Type.fromInterned(vector_type.child).abiAlignmentAdvanced(mod, strat); + }, .stage2_x86_64 => { if (vector_type.child == .bool_type) { if (vector_type.len > 256 and std.Target.x86.featureSetHas(target.cpu.features, .avx512f)) return .{ .scalar = .@"64" }; @@ -966,12 +969,12 @@ pub const Type = struct { .usize, .isize, + => return .{ .scalar = intAbiAlignment(target.ptrBitWidth(), target) }, + .export_options, .extern_options, .type_info, - => return .{ - .scalar = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)), - }, + => return .{ .scalar = ptrAbiAlignment(target) }, .c_char => return .{ .scalar = cTypeAlign(target, .char) }, .c_short => return .{ .scalar = cTypeAlign(target, .short) }, @@ -1160,9 +1163,7 @@ pub const Type = struct { const child_type = ty.optionalChild(mod); switch (child_type.zigTypeTag(mod)) { - .Pointer => return .{ - .scalar = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)), - }, + .Pointer => return .{ .scalar = ptrAbiAlignment(target) }, .ErrorSet => return abiAlignmentAdvanced(Type.anyerror, mod, strat), .NoReturn => return .{ .scalar = .@"1" }, else => {}, @@ -1274,6 +1275,10 @@ pub const Type = struct { const total_bits = elem_bits * vector_type.len; break :total_bytes (total_bits + 7) / 8; }, + .stage2_c => total_bytes: { + const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeAdvanced(mod, strat)).scalar); + break :total_bytes elem_bytes * vector_type.len; + }, .stage2_x86_64 => total_bytes: { if (vector_type.child == .bool_type) break :total_bytes std.math.divCeil(u32, vector_type.len, 8) catch unreachable; const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeAdvanced(mod, strat)).scalar); @@ -1527,15 +1532,19 @@ pub const Type = struct { // guaranteed to be >= that of bool's (1 byte) the added size is exactly equal // to the child type's ABI alignment. return AbiSizeAdvanced{ - .scalar = child_ty.abiAlignment(mod).toByteUnits(0) + payload_size, + .scalar = (child_ty.abiAlignment(mod).toByteUnits() orelse 0) + payload_size, }; } - fn intAbiSize(bits: u16, target: Target) u64 { + pub fn ptrAbiAlignment(target: Target) Alignment { + return Alignment.fromNonzeroByteUnits(@divExact(target.ptrBitWidth(), 8)); + } + + pub fn intAbiSize(bits: u16, target: Target) u64 { return intAbiAlignment(bits, target).forward(@as(u16, @intCast((@as(u17, bits) + 7) / 8))); } - fn intAbiAlignment(bits: u16, target: Target) Alignment { + pub fn intAbiAlignment(bits: u16, target: Target) Alignment { return Alignment.fromByteUnits(@min( std.math.ceilPowerOfTwoPromote(u16, @as(u16, @intCast((@as(u17, bits) + 7) / 8))), target.maxIntAlignment(), @@ -1572,7 +1581,7 @@ pub const Type = struct { if (len == 0) return 0; const elem_ty = Type.fromInterned(array_type.child); const elem_size = @max( - (try elem_ty.abiAlignmentAdvanced(mod, strat)).scalar.toByteUnits(0), + (try elem_ty.abiAlignmentAdvanced(mod, strat)).scalar.toByteUnits() orelse 0, (try elem_ty.abiSizeAdvanced(mod, strat)).scalar, ); if (elem_size == 0) return 0; @@ -3016,26 +3025,15 @@ pub const Type = struct { } /// Returns none in the case of a tuple which uses the integer index as the field name. - pub fn structFieldName(ty: Type, field_index: u32, mod: *Module) InternPool.OptionalNullTerminatedString { + pub fn structFieldName(ty: Type, index: usize, mod: *Module) InternPool.OptionalNullTerminatedString { const ip = &mod.intern_pool; return switch (ip.indexToKey(ty.toIntern())) { - .struct_type => ip.loadStructType(ty.toIntern()).fieldName(ip, field_index), - .anon_struct_type => |anon_struct| anon_struct.fieldName(ip, field_index), + .struct_type => ip.loadStructType(ty.toIntern()).fieldName(ip, index), + .anon_struct_type => |anon_struct| anon_struct.fieldName(ip, index), else => unreachable, }; } - /// When struct types have no field names, the names are implicitly understood to be - /// strings corresponding to the field indexes in declaration order. It used to be the - /// case that a NullTerminatedString would be stored for each field in this case, however, - /// now, callers must handle the possibility that there are no names stored at all. - /// Here we fake the previous behavior. Probably something better could be done by examining - /// all the callsites of this function. - pub fn legacyStructFieldName(ty: Type, i: u32, mod: *Module) InternPool.NullTerminatedString { - return ty.structFieldName(i, mod).unwrap() orelse - mod.intern_pool.getOrPutStringFmt(mod.gpa, "{d}", .{i}) catch @panic("OOM"); - } - pub fn structFieldCount(ty: Type, mod: *Module) u32 { const ip = &mod.intern_pool; return switch (ip.indexToKey(ty.toIntern())) { diff --git a/test/behavior/align.zig b/test/behavior/align.zig index b19ab8ae0c..2714612682 100644 --- a/test/behavior/align.zig +++ b/test/behavior/align.zig @@ -624,7 +624,6 @@ test "sub-aligned pointer field access" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // Originally reported at https://github.com/ziglang/zig/issues/14904 diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig index 4b2ab52c59..042ee5a986 100644 --- a/test/behavior/vector.zig +++ b/test/behavior/vector.zig @@ -1176,18 +1176,22 @@ test "@shlWithOverflow" { test "alignment of vectors" { try expect(@alignOf(@Vector(2, u8)) == switch (builtin.zig_backend) { else => 2, + .stage2_c => @alignOf(u8), .stage2_x86_64 => 16, }); try expect(@alignOf(@Vector(2, u1)) == switch (builtin.zig_backend) { else => 1, + .stage2_c => @alignOf(u1), .stage2_x86_64 => 16, }); try expect(@alignOf(@Vector(1, u1)) == switch (builtin.zig_backend) { else => 1, + .stage2_c => @alignOf(u1), .stage2_x86_64 => 16, }); try expect(@alignOf(@Vector(2, u16)) == switch (builtin.zig_backend) { else => 4, + .stage2_c => @alignOf(u16), .stage2_x86_64 => 16, }); } diff --git a/test/tests.zig b/test/tests.zig index 525c6792b5..0c847e6e7f 100644 --- a/test/tests.zig +++ b/test/tests.zig @@ -1164,19 +1164,26 @@ pub fn addModuleTests(b: *std.Build, options: ModuleTestOptions) *Step { compile_c.addCSourceFile(.{ .file = these_tests.getEmittedBin(), .flags = &.{ - // TODO output -std=c89 compatible C code + // Tracking issue for making the C backend generate C89 compatible code: + // https://github.com/ziglang/zig/issues/19468 "-std=c99", "-pedantic", "-Werror", - // TODO stop violating these pedantic errors. spotted everywhere + + // Tracking issue for making the C backend generate code + // that does not trigger warnings: + // https://github.com/ziglang/zig/issues/19467 + + // spotted everywhere "-Wno-builtin-requires-header", - // TODO stop violating these pedantic errors. spotted on linux - "-Wno-address-of-packed-member", + + // spotted on linux "-Wno-gnu-folding-constant", "-Wno-incompatible-function-pointer-types", "-Wno-incompatible-pointer-types", "-Wno-overlength-strings", - // TODO stop violating these pedantic errors. spotted on darwin + + // spotted on darwin "-Wno-dollar-in-identifier-extension", "-Wno-absolute-value", }, diff --git a/tools/lldb_pretty_printers.py b/tools/lldb_pretty_printers.py index ebd5b0b3d0..e8263fbc21 100644 --- a/tools/lldb_pretty_printers.py +++ b/tools/lldb_pretty_printers.py @@ -354,7 +354,7 @@ def InstRef_SummaryProvider(value, _=None): def InstIndex_SummaryProvider(value, _=None): return 'instructions[%d]' % value.unsigned -class Module_Decl__Module_Decl_Index_SynthProvider: +class zig_DeclIndex_SynthProvider: def __init__(self, value, _=None): self.value = value def update(self): try: @@ -425,7 +425,7 @@ def InternPool_Find(thread): for frame in thread: ip = frame.FindVariable('ip') or frame.FindVariable('intern_pool') if ip: return ip - mod = frame.FindVariable('mod') or frame.FindVariable('module') + mod = frame.FindVariable('zcu') or frame.FindVariable('mod') or frame.FindVariable('module') if mod: ip = mod.GetChildMemberWithName('intern_pool') if ip: return ip @@ -617,7 +617,7 @@ type_tag_handlers = { def value_Value_str_lit(payload): for frame in payload.thread: - mod = frame.FindVariable('mod') or frame.FindVariable('module') + mod = frame.FindVariable('zcu') or frame.FindVariable('mod') or frame.FindVariable('module') if mod: break else: return return '"%s"' % zig_String_decode(mod.GetChildMemberWithName('string_literal_bytes').GetChildMemberWithName('items'), payload.GetChildMemberWithName('index').unsigned, payload.GetChildMemberWithName('len').unsigned) @@ -714,7 +714,7 @@ def __lldb_init_module(debugger, _=None): add(debugger, category='zig.stage2', type='Air.Inst::Air.Inst.Index', identifier='InstIndex', summary=True) add(debugger, category='zig.stage2', regex=True, type=MultiArrayList_Entry('Air\\.Inst'), identifier='TagAndPayload', synth=True, inline_children=True, summary=True) add(debugger, category='zig.stage2', regex=True, type='^Air\\.Inst\\.Data\\.Data__struct_[1-9][0-9]*$', inline_children=True, summary=True) - add(debugger, category='zig.stage2', type='Module.Decl::Module.Decl.Index', synth=True) + add(debugger, category='zig.stage2', type='zig.DeclIndex', synth=True) add(debugger, category='zig.stage2', type='Module.Namespace::Module.Namespace.Index', synth=True) add(debugger, category='zig.stage2', type='Module.LazySrcLoc', identifier='zig_TaggedUnion', synth=True) add(debugger, category='zig.stage2', type='InternPool.Index', synth=True)