cbe: rewrite CType

Closes #14904
This commit is contained in:
Jacob Young 2024-03-28 20:41:58 -04:00
parent 6f10b11658
commit 5a41704f7e
45 changed files with 3681 additions and 3627 deletions

View File

@ -564,7 +564,7 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/src/clang_options_data.zig"
"${CMAKE_SOURCE_DIR}/src/codegen.zig"
"${CMAKE_SOURCE_DIR}/src/codegen/c.zig"
"${CMAKE_SOURCE_DIR}/src/codegen/c/type.zig"
"${CMAKE_SOURCE_DIR}/src/codegen/c/Type.zig"
"${CMAKE_SOURCE_DIR}/src/codegen/llvm.zig"
"${CMAKE_SOURCE_DIR}/src/codegen/llvm/bindings.zig"
"${CMAKE_SOURCE_DIR}/src/glibc.zig"

View File

@ -16,9 +16,7 @@ pub fn build(b: *std.Build) !void {
const only_c = b.option(bool, "only-c", "Translate the Zig compiler to C code, with only the C backend enabled") orelse false;
const target = t: {
var default_target: std.zig.CrossTarget = .{};
if (only_c) {
default_target.ofmt = .c;
}
default_target.ofmt = b.option(std.Target.ObjectFormat, "ofmt", "Object format to target") orelse if (only_c) .c else null;
break :t b.standardTargetOptions(.{ .default_target = default_target });
};

View File

@ -1150,8 +1150,8 @@ pub const siginfo_t = extern struct {
/// Renamed from `sigaction` to `Sigaction` to avoid conflict with function name.
pub const Sigaction = extern struct {
pub const handler_fn = *align(1) const fn (c_int) callconv(.C) void;
pub const sigaction_fn = *const fn (c_int, *const siginfo_t, ?*anyopaque) callconv(.C) void;
pub const handler_fn = *align(1) const fn (i32) callconv(.C) void;
pub const sigaction_fn = *const fn (i32, *const siginfo_t, ?*anyopaque) callconv(.C) void;
handler: extern union {
handler: ?handler_fn,

View File

@ -690,8 +690,8 @@ pub const empty_sigset = sigset_t{ .__bits = [_]c_uint{0} ** _SIG_WORDS };
pub const sig_atomic_t = c_int;
pub const Sigaction = extern struct {
pub const handler_fn = *align(1) const fn (c_int) callconv(.C) void;
pub const sigaction_fn = *const fn (c_int, *const siginfo_t, ?*anyopaque) callconv(.C) void;
pub const handler_fn = *align(1) const fn (i32) callconv(.C) void;
pub const sigaction_fn = *const fn (i32, *const siginfo_t, ?*anyopaque) callconv(.C) void;
/// signal handler
handler: extern union {
@ -702,7 +702,7 @@ pub const Sigaction = extern struct {
mask: sigset_t,
};
pub const sig_t = *const fn (c_int) callconv(.C) void;
pub const sig_t = *const fn (i32) callconv(.C) void;
pub const SOCK = struct {
pub const STREAM = 1;

View File

@ -1171,8 +1171,8 @@ const NSIG = 32;
/// Renamed from `sigaction` to `Sigaction` to avoid conflict with the syscall.
pub const Sigaction = extern struct {
pub const handler_fn = *align(1) const fn (c_int) callconv(.C) void;
pub const sigaction_fn = *const fn (c_int, *const siginfo_t, ?*anyopaque) callconv(.C) void;
pub const handler_fn = *align(1) const fn (i32) callconv(.C) void;
pub const sigaction_fn = *const fn (i32, *const siginfo_t, ?*anyopaque) callconv(.C) void;
/// signal handler
handler: extern union {

View File

@ -501,7 +501,7 @@ pub const siginfo_t = extern struct {
/// Renamed from `sigaction` to `Sigaction` to avoid conflict with the syscall.
pub const Sigaction = extern struct {
pub const handler_fn = *align(1) const fn (i32) callconv(.C) void;
pub const sigaction_fn = *const fn (c_int, *allowzero anyopaque, ?*anyopaque) callconv(.C) void;
pub const sigaction_fn = *const fn (i32, *const siginfo_t, ?*anyopaque) callconv(.C) void;
/// signal handler
handler: extern union {

View File

@ -864,8 +864,8 @@ pub const SIG = struct {
/// Renamed from `sigaction` to `Sigaction` to avoid conflict with the syscall.
pub const Sigaction = extern struct {
pub const handler_fn = *align(1) const fn (c_int) callconv(.C) void;
pub const sigaction_fn = *const fn (c_int, *const siginfo_t, ?*anyopaque) callconv(.C) void;
pub const handler_fn = *align(1) const fn (i32) callconv(.C) void;
pub const sigaction_fn = *const fn (i32, *const siginfo_t, ?*anyopaque) callconv(.C) void;
/// signal handler
handler: extern union {

View File

@ -842,8 +842,8 @@ pub const SIG = struct {
/// Renamed from `sigaction` to `Sigaction` to avoid conflict with the syscall.
pub const Sigaction = extern struct {
pub const handler_fn = *align(1) const fn (c_int) callconv(.C) void;
pub const sigaction_fn = *const fn (c_int, *const siginfo_t, ?*anyopaque) callconv(.C) void;
pub const handler_fn = *align(1) const fn (i32) callconv(.C) void;
pub const sigaction_fn = *const fn (i32, *const siginfo_t, ?*anyopaque) callconv(.C) void;
/// signal handler
handler: extern union {

View File

@ -874,8 +874,8 @@ pub const SIG = struct {
/// Renamed from `sigaction` to `Sigaction` to avoid conflict with the syscall.
pub const Sigaction = extern struct {
pub const handler_fn = *align(1) const fn (c_int) callconv(.C) void;
pub const sigaction_fn = *const fn (c_int, *const siginfo_t, ?*anyopaque) callconv(.C) void;
pub const handler_fn = *align(1) const fn (i32) callconv(.C) void;
pub const sigaction_fn = *const fn (i32, *const siginfo_t, ?*anyopaque) callconv(.C) void;
/// signal options
flags: c_uint,

View File

@ -2570,7 +2570,7 @@ fn resetSegfaultHandler() void {
updateSegfaultHandler(&act) catch {};
}
fn handleSegfaultPosix(sig: i32, info: *const posix.siginfo_t, ctx_ptr: ?*const anyopaque) callconv(.C) noreturn {
fn handleSegfaultPosix(sig: i32, info: *const posix.siginfo_t, ctx_ptr: ?*anyopaque) callconv(.C) noreturn {
// Reset to the default handler so that if a segfault happens in this handler it will crash
// the process. Also when this handler returns, the original instruction will be repeated
// and the resulting segfault will crash the process rather than continually dump stack traces.

View File

@ -695,8 +695,8 @@ pub const SIG = struct {
};
pub const Sigaction = extern struct {
pub const handler_fn = *align(1) const fn (c_int) callconv(.C) void;
pub const sigaction_fn = *const fn (c_int, *const siginfo_t, ?*anyopaque) callconv(.C) void;
pub const handler_fn = *align(1) const fn (i32) callconv(.C) void;
pub const sigaction_fn = *const fn (i32, *const siginfo_t, ?*anyopaque) callconv(.C) void;
handler: extern union {
handler: ?handler_fn,

View File

@ -4301,7 +4301,7 @@ pub const all_mask: sigset_t = [_]u32{0xffffffff} ** @typeInfo(sigset_t).Array.l
pub const app_mask: sigset_t = [2]u32{ 0xfffffffc, 0x7fffffff } ++ [_]u32{0xffffffff} ** 30;
const k_sigaction_funcs = struct {
const handler = ?*align(1) const fn (c_int) callconv(.C) void;
const handler = ?*align(1) const fn (i32) callconv(.C) void;
const restorer = *const fn () callconv(.C) void;
};
@ -4328,8 +4328,8 @@ pub const k_sigaction = switch (native_arch) {
/// Renamed from `sigaction` to `Sigaction` to avoid conflict with the syscall.
pub const Sigaction = extern struct {
pub const handler_fn = *align(1) const fn (c_int) callconv(.C) void;
pub const sigaction_fn = *const fn (c_int, *const siginfo_t, ?*anyopaque) callconv(.C) void;
pub const handler_fn = *align(1) const fn (i32) callconv(.C) void;
pub const sigaction_fn = *const fn (i32, *const siginfo_t, ?*anyopaque) callconv(.C) void;
handler: extern union {
handler: ?handler_fn,

View File

@ -186,8 +186,8 @@ pub const empty_sigset = 0;
pub const siginfo_t = c_long;
// TODO plan9 doesn't have sigaction_fn. Sigaction is not a union, but we incude it here to be compatible.
pub const Sigaction = extern struct {
pub const handler_fn = *const fn (c_int) callconv(.C) void;
pub const sigaction_fn = *const fn (c_int, *const siginfo_t, ?*anyopaque) callconv(.C) void;
pub const handler_fn = *const fn (i32) callconv(.C) void;
pub const sigaction_fn = *const fn (i32, *const siginfo_t, ?*anyopaque) callconv(.C) void;
handler: extern union {
handler: ?handler_fn,

View File

@ -597,4 +597,4 @@ fn maybeIgnoreSigpipe() void {
}
}
fn noopSigHandler(_: c_int) callconv(.C) void {}
fn noopSigHandler(_: i32) callconv(.C) void {}

View File

@ -3457,14 +3457,18 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: *std.Progress.Node) !v
.pass = .{ .decl = decl_index },
.is_naked_fn = false,
.fwd_decl = fwd_decl.toManaged(gpa),
.ctypes = .{},
.ctype_pool = c_codegen.CType.Pool.empty,
.scratch = .{},
.anon_decl_deps = .{},
.aligned_anon_decls = .{},
};
defer {
dg.ctypes.deinit(gpa);
dg.fwd_decl.deinit();
fwd_decl.* = dg.fwd_decl.moveToUnmanaged();
fwd_decl.shrinkAndFree(gpa, fwd_decl.items.len);
dg.ctype_pool.deinit(gpa);
dg.scratch.deinit(gpa);
}
try dg.ctype_pool.init(gpa);
c_codegen.genHeader(&dg) catch |err| switch (err) {
error.AnalysisFail => {
@ -3473,9 +3477,6 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: *std.Progress.Node) !v
},
else => |e| return e,
};
fwd_decl.* = dg.fwd_decl.moveToUnmanaged();
fwd_decl.shrinkAndFree(gpa, fwd_decl.items.len);
},
}
},

View File

@ -712,7 +712,7 @@ pub const Key = union(enum) {
pub fn fieldName(
self: AnonStructType,
ip: *const InternPool,
index: u32,
index: usize,
) OptionalNullTerminatedString {
if (self.names.len == 0)
return .none;
@ -3879,20 +3879,13 @@ pub const Alignment = enum(u6) {
none = std.math.maxInt(u6),
_,
pub fn toByteUnitsOptional(a: Alignment) ?u64 {
pub fn toByteUnits(a: Alignment) ?u64 {
return switch (a) {
.none => null,
else => @as(u64, 1) << @intFromEnum(a),
};
}
pub fn toByteUnits(a: Alignment, default: u64) u64 {
return switch (a) {
.none => default,
else => @as(u64, 1) << @intFromEnum(a),
};
}
pub fn fromByteUnits(n: u64) Alignment {
if (n == 0) return .none;
assert(std.math.isPowerOfTwo(n));

View File

@ -5846,7 +5846,7 @@ pub fn intBitsForValue(mod: *Module, val: Value, sign: bool) u16 {
return @as(u16, @intCast(big.bitCountTwosComp()));
},
.lazy_align => |lazy_ty| {
return Type.smallestUnsignedBits(Type.fromInterned(lazy_ty).abiAlignment(mod).toByteUnits(0)) + @intFromBool(sign);
return Type.smallestUnsignedBits(Type.fromInterned(lazy_ty).abiAlignment(mod).toByteUnits() orelse 0) + @intFromBool(sign);
},
.lazy_size => |lazy_ty| {
return Type.smallestUnsignedBits(Type.fromInterned(lazy_ty).abiSize(mod)) + @intFromBool(sign);

View File

@ -6508,7 +6508,7 @@ fn zirSetAlignStack(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Inst
const alignment = try sema.resolveAlign(block, operand_src, extra.operand);
if (alignment.order(Alignment.fromNonzeroByteUnits(256)).compare(.gt)) {
return sema.fail(block, src, "attempt to @setAlignStack({d}); maximum is 256", .{
alignment.toByteUnitsOptional().?,
alignment.toByteUnits().?,
});
}
@ -17804,7 +17804,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
},
.Pointer => {
const info = ty.ptrInfo(mod);
const alignment = if (info.flags.alignment.toByteUnitsOptional()) |alignment|
const alignment = if (info.flags.alignment.toByteUnits()) |alignment|
try mod.intValue(Type.comptime_int, alignment)
else
try Type.fromInterned(info.child).lazyAbiAlignment(mod);
@ -18279,7 +18279,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// type: type,
field_ty,
// alignment: comptime_int,
(try mod.intValue(Type.comptime_int, alignment.toByteUnits(0))).toIntern(),
(try mod.intValue(Type.comptime_int, alignment.toByteUnits() orelse 0)).toIntern(),
};
field_val.* = try mod.intern(.{ .aggregate = .{
.ty = union_field_ty.toIntern(),
@ -18436,7 +18436,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// is_comptime: bool,
Value.makeBool(is_comptime).toIntern(),
// alignment: comptime_int,
(try mod.intValue(Type.comptime_int, Type.fromInterned(field_ty).abiAlignment(mod).toByteUnits(0))).toIntern(),
(try mod.intValue(Type.comptime_int, Type.fromInterned(field_ty).abiAlignment(mod).toByteUnits() orelse 0)).toIntern(),
};
struct_field_val.* = try mod.intern(.{ .aggregate = .{
.ty = struct_field_ty.toIntern(),
@ -18505,7 +18505,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
// is_comptime: bool,
Value.makeBool(field_is_comptime).toIntern(),
// alignment: comptime_int,
(try mod.intValue(Type.comptime_int, alignment.toByteUnits(0))).toIntern(),
(try mod.intValue(Type.comptime_int, alignment.toByteUnits() orelse 0)).toIntern(),
};
field_val.* = try mod.intern(.{ .aggregate = .{
.ty = struct_field_ty.toIntern(),
@ -22552,7 +22552,7 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
try sema.addSafetyCheck(block, src, is_non_zero, .cast_to_null);
}
if (ptr_align.compare(.gt, .@"1")) {
const align_bytes_minus_1 = ptr_align.toByteUnitsOptional().? - 1;
const align_bytes_minus_1 = ptr_align.toByteUnits().? - 1;
const align_minus_1 = Air.internedToRef((try mod.intValue(Type.usize, align_bytes_minus_1)).toIntern());
const remainder = try block.addBinOp(.bit_and, operand_coerced, align_minus_1);
const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize);
@ -22572,7 +22572,7 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
try sema.addSafetyCheck(block, src, is_non_zero, .cast_to_null);
}
if (ptr_align.compare(.gt, .@"1")) {
const align_bytes_minus_1 = ptr_align.toByteUnitsOptional().? - 1;
const align_bytes_minus_1 = ptr_align.toByteUnits().? - 1;
const align_minus_1 = Air.internedToRef((try mod.intValue(Type.usize, align_bytes_minus_1)).toIntern());
const remainder = try block.addBinOp(.bit_and, elem_coerced, align_minus_1);
const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize);
@ -22970,10 +22970,10 @@ fn ptrCastFull(
const msg = try sema.errMsg(block, src, "cast increases pointer alignment", .{});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, operand_src, msg, "'{}' has alignment '{d}'", .{
operand_ty.fmt(mod), src_align.toByteUnits(0),
operand_ty.fmt(mod), src_align.toByteUnits() orelse 0,
});
try sema.errNote(block, src, msg, "'{}' has alignment '{d}'", .{
dest_ty.fmt(mod), dest_align.toByteUnits(0),
dest_ty.fmt(mod), dest_align.toByteUnits() orelse 0,
});
try sema.errNote(block, src, msg, "use @alignCast to assert pointer alignment", .{});
break :msg msg;
@ -23067,7 +23067,7 @@ fn ptrCastFull(
if (!dest_align.check(addr)) {
return sema.fail(block, operand_src, "pointer address 0x{X} is not aligned to {d} bytes", .{
addr,
dest_align.toByteUnitsOptional().?,
dest_align.toByteUnits().?,
});
}
}
@ -23110,7 +23110,7 @@ fn ptrCastFull(
dest_align.compare(.gt, src_align) and
try sema.typeHasRuntimeBits(Type.fromInterned(dest_info.child)))
{
const align_bytes_minus_1 = dest_align.toByteUnitsOptional().? - 1;
const align_bytes_minus_1 = dest_align.toByteUnits().? - 1;
const align_minus_1 = Air.internedToRef((try mod.intValue(Type.usize, align_bytes_minus_1)).toIntern());
const ptr_int = try block.addUnOp(.int_from_ptr, ptr);
const remainder = try block.addBinOp(.bit_and, ptr_int, align_minus_1);
@ -27837,7 +27837,7 @@ fn structFieldPtrByIndex(
const elem_size_bits = Type.fromInterned(ptr_ty_data.child).bitSize(mod);
if (elem_size_bytes * 8 == elem_size_bits) {
const byte_offset = ptr_ty_data.packed_offset.bit_offset / 8;
const new_align: Alignment = @enumFromInt(@ctz(byte_offset | parent_align.toByteUnitsOptional().?));
const new_align: Alignment = @enumFromInt(@ctz(byte_offset | parent_align.toByteUnits().?));
assert(new_align != .none);
ptr_ty_data.flags.alignment = new_align;
ptr_ty_data.packed_offset = .{ .host_size = 0, .bit_offset = 0 };
@ -29132,7 +29132,7 @@ fn coerceExtra(
.addr = .{ .int = if (dest_info.flags.alignment != .none)
(try mod.intValue(
Type.usize,
dest_info.flags.alignment.toByteUnitsOptional().?,
dest_info.flags.alignment.toByteUnits().?,
)).toIntern()
else
try mod.intern_pool.getCoercedInts(
@ -29800,7 +29800,7 @@ const InMemoryCoercionResult = union(enum) {
},
.ptr_alignment => |pair| {
try sema.errNote(block, src, msg, "pointer alignment '{d}' cannot cast into pointer alignment '{d}'", .{
pair.actual.toByteUnits(0), pair.wanted.toByteUnits(0),
pair.actual.toByteUnits() orelse 0, pair.wanted.toByteUnits() orelse 0,
});
break;
},

View File

@ -176,7 +176,7 @@ pub fn toBigIntAdvanced(
if (opt_sema) |sema| try sema.resolveTypeLayout(Type.fromInterned(ty));
const x = switch (int.storage) {
else => unreachable,
.lazy_align => Type.fromInterned(ty).abiAlignment(mod).toByteUnits(0),
.lazy_align => Type.fromInterned(ty).abiAlignment(mod).toByteUnits() orelse 0,
.lazy_size => Type.fromInterned(ty).abiSize(mod),
};
return BigIntMutable.init(&space.limbs, x).toConst();
@ -237,9 +237,9 @@ pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, opt_sema: ?*Sema) !?u64
.u64 => |x| x,
.i64 => |x| std.math.cast(u64, x),
.lazy_align => |ty| if (opt_sema) |sema|
(try Type.fromInterned(ty).abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar.toByteUnits(0)
(try Type.fromInterned(ty).abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar.toByteUnits() orelse 0
else
Type.fromInterned(ty).abiAlignment(mod).toByteUnits(0),
Type.fromInterned(ty).abiAlignment(mod).toByteUnits() orelse 0,
.lazy_size => |ty| if (opt_sema) |sema|
(try Type.fromInterned(ty).abiSizeAdvanced(mod, .{ .sema = sema })).scalar
else
@ -289,7 +289,7 @@ pub fn toSignedInt(val: Value, mod: *Module) i64 {
.big_int => |big_int| big_int.to(i64) catch unreachable,
.i64 => |x| x,
.u64 => |x| @intCast(x),
.lazy_align => |ty| @intCast(Type.fromInterned(ty).abiAlignment(mod).toByteUnits(0)),
.lazy_align => |ty| @intCast(Type.fromInterned(ty).abiAlignment(mod).toByteUnits() orelse 0),
.lazy_size => |ty| @intCast(Type.fromInterned(ty).abiSize(mod)),
},
else => unreachable,
@ -497,7 +497,7 @@ pub fn writeToPackedMemory(
inline .u64, .i64 => |int| std.mem.writeVarPackedInt(buffer, bit_offset, bits, int, endian),
.big_int => |bigint| bigint.writePackedTwosComplement(buffer, bit_offset, bits, endian),
.lazy_align => |lazy_align| {
const num = Type.fromInterned(lazy_align).abiAlignment(mod).toByteUnits(0);
const num = Type.fromInterned(lazy_align).abiAlignment(mod).toByteUnits() orelse 0;
std.mem.writeVarPackedInt(buffer, bit_offset, bits, num, endian);
},
.lazy_size => |lazy_size| {
@ -890,7 +890,7 @@ pub fn toFloat(val: Value, comptime T: type, mod: *Module) T {
}
return @floatFromInt(x);
},
.lazy_align => |ty| @floatFromInt(Type.fromInterned(ty).abiAlignment(mod).toByteUnits(0)),
.lazy_align => |ty| @floatFromInt(Type.fromInterned(ty).abiAlignment(mod).toByteUnits() orelse 0),
.lazy_size => |ty| @floatFromInt(Type.fromInterned(ty).abiSize(mod)),
},
.float => |float| switch (float.storage) {
@ -1529,9 +1529,9 @@ pub fn floatFromIntScalar(val: Value, float_ty: Type, mod: *Module, opt_sema: ?*
},
inline .u64, .i64 => |x| floatFromIntInner(x, float_ty, mod),
.lazy_align => |ty| if (opt_sema) |sema| {
return floatFromIntInner((try Type.fromInterned(ty).abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar.toByteUnits(0), float_ty, mod);
return floatFromIntInner((try Type.fromInterned(ty).abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar.toByteUnits() orelse 0, float_ty, mod);
} else {
return floatFromIntInner(Type.fromInterned(ty).abiAlignment(mod).toByteUnits(0), float_ty, mod);
return floatFromIntInner(Type.fromInterned(ty).abiAlignment(mod).toByteUnits() orelse 0, float_ty, mod);
},
.lazy_size => |ty| if (opt_sema) |sema| {
return floatFromIntInner((try Type.fromInterned(ty).abiSizeAdvanced(mod, .{ .sema = sema })).scalar, float_ty, mod);

View File

@ -1296,7 +1296,7 @@ fn genFunc(func: *CodeGen) InnerError!void {
// subtract it from the current stack pointer
try prologue.append(.{ .tag = .i32_sub, .data = .{ .tag = {} } });
// Get negative stack aligment
try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @as(i32, @intCast(func.stack_alignment.toByteUnitsOptional().?)) * -1 } });
try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @as(i32, @intCast(func.stack_alignment.toByteUnits().?)) * -1 } });
// Bitwise-and the value to get the new stack pointer to ensure the pointers are aligned with the abi alignment
try prologue.append(.{ .tag = .i32_and, .data = .{ .tag = {} } });
// store the current stack pointer as the bottom, which will be used to calculate all stack pointer offsets
@ -2107,7 +2107,7 @@ fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
});
try func.addMemArg(Mir.Inst.Tag.fromOpcode(opcode), .{
.offset = operand.offset(),
.alignment = @intCast(scalar_type.abiAlignment(mod).toByteUnitsOptional().?),
.alignment = @intCast(scalar_type.abiAlignment(mod).toByteUnits().?),
});
},
else => try func.emitWValue(operand),
@ -2384,7 +2384,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
try func.mir_extra.appendSlice(func.gpa, &[_]u32{
std.wasm.simdOpcode(.v128_store),
offset + lhs.offset(),
@intCast(ty.abiAlignment(mod).toByteUnits(0)),
@intCast(ty.abiAlignment(mod).toByteUnits() orelse 0),
});
return func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
},
@ -2440,7 +2440,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
Mir.Inst.Tag.fromOpcode(opcode),
.{
.offset = offset + lhs.offset(),
.alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
.alignment = @intCast(ty.abiAlignment(mod).toByteUnits().?),
},
);
}
@ -2500,7 +2500,7 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu
try func.mir_extra.appendSlice(func.gpa, &[_]u32{
std.wasm.simdOpcode(.v128_load),
offset + operand.offset(),
@intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
@intCast(ty.abiAlignment(mod).toByteUnits().?),
});
try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
return WValue{ .stack = {} };
@ -2518,7 +2518,7 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu
Mir.Inst.Tag.fromOpcode(opcode),
.{
.offset = offset + operand.offset(),
.alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
.alignment = @intCast(ty.abiAlignment(mod).toByteUnits().?),
},
);
@ -3456,7 +3456,7 @@ fn intStorageAsI32(storage: InternPool.Key.Int.Storage, mod: *Module) i32 {
.i64 => |x| @as(i32, @intCast(x)),
.u64 => |x| @as(i32, @bitCast(@as(u32, @intCast(x)))),
.big_int => unreachable,
.lazy_align => |ty| @as(i32, @bitCast(@as(u32, @intCast(Type.fromInterned(ty).abiAlignment(mod).toByteUnits(0))))),
.lazy_align => |ty| @as(i32, @bitCast(@as(u32, @intCast(Type.fromInterned(ty).abiAlignment(mod).toByteUnits() orelse 0)))),
.lazy_size => |ty| @as(i32, @bitCast(@as(u32, @intCast(Type.fromInterned(ty).abiSize(mod))))),
};
}
@ -4204,7 +4204,7 @@ fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerErro
if (pl_ty.hasRuntimeBitsIgnoreComptime(mod)) {
try func.addMemArg(.i32_load16_u, .{
.offset = operand.offset() + @as(u32, @intCast(errUnionErrorOffset(pl_ty, mod))),
.alignment = @intCast(Type.anyerror.abiAlignment(mod).toByteUnitsOptional().?),
.alignment = @intCast(Type.anyerror.abiAlignment(mod).toByteUnits().?),
});
}
@ -5141,7 +5141,7 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.mir_extra.appendSlice(func.gpa, &[_]u32{
opcode,
operand.offset(),
@intCast(elem_ty.abiAlignment(mod).toByteUnitsOptional().?),
@intCast(elem_ty.abiAlignment(mod).toByteUnits().?),
});
try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
try func.addLabel(.local_set, result.local.value);
@ -6552,7 +6552,7 @@ fn lowerTry(
const err_offset = @as(u32, @intCast(errUnionErrorOffset(pl_ty, mod)));
try func.addMemArg(.i32_load16_u, .{
.offset = err_union.offset() + err_offset,
.alignment = @intCast(Type.anyerror.abiAlignment(mod).toByteUnitsOptional().?),
.alignment = @intCast(Type.anyerror.abiAlignment(mod).toByteUnits().?),
});
}
try func.addTag(.i32_eqz);
@ -7499,7 +7499,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
else => |size| return func.fail("TODO: implement `@cmpxchg` for types with abi size '{d}'", .{size}),
}, .{
.offset = ptr_operand.offset(),
.alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
.alignment = @intCast(ty.abiAlignment(mod).toByteUnits().?),
});
try func.addLabel(.local_tee, val_local.local.value);
_ = try func.cmp(.stack, expected_val, ty, .eq);
@ -7561,7 +7561,7 @@ fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.emitWValue(ptr);
try func.addAtomicMemArg(tag, .{
.offset = ptr.offset(),
.alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
.alignment = @intCast(ty.abiAlignment(mod).toByteUnits().?),
});
} else {
_ = try func.load(ptr, ty, 0);
@ -7622,7 +7622,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
},
.{
.offset = ptr.offset(),
.alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
.alignment = @intCast(ty.abiAlignment(mod).toByteUnits().?),
},
);
const select_res = try func.allocLocal(ty);
@ -7682,7 +7682,7 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
};
try func.addAtomicMemArg(tag, .{
.offset = ptr.offset(),
.alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
.alignment = @intCast(ty.abiAlignment(mod).toByteUnits().?),
});
const result = try WValue.toLocal(.stack, func, ty);
return func.finishAir(inst, result, &.{ pl_op.operand, extra.operand });
@ -7781,7 +7781,7 @@ fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.lowerToStack(operand);
try func.addAtomicMemArg(tag, .{
.offset = ptr.offset(),
.alignment = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?),
.alignment = @intCast(ty.abiAlignment(mod).toByteUnits().?),
});
} else {
try func.store(ptr, operand, ty, 0);

View File

@ -18959,7 +18959,7 @@ fn resolveCallingConventionValues(
const param_size: u31 = @intCast(ty.abiSize(mod));
const param_align: u31 =
@intCast(@max(ty.abiAlignment(mod).toByteUnitsOptional().?, 8));
@intCast(@max(ty.abiAlignment(mod).toByteUnits().?, 8));
result.stack_byte_count =
mem.alignForward(u31, result.stack_byte_count, param_align);
arg.* = .{ .load_frame = .{
@ -19003,7 +19003,7 @@ fn resolveCallingConventionValues(
continue;
}
const param_size: u31 = @intCast(ty.abiSize(mod));
const param_align: u31 = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?);
const param_align: u31 = @intCast(ty.abiAlignment(mod).toByteUnits().?);
result.stack_byte_count =
mem.alignForward(u31, result.stack_byte_count, param_align);
arg.* = .{ .load_frame = .{
@ -19096,7 +19096,7 @@ fn splitType(self: *Self, ty: Type) ![2]Type {
.integer => switch (part_i) {
0 => Type.u64,
1 => part: {
const elem_size = ty.abiAlignment(mod).minStrict(.@"8").toByteUnitsOptional().?;
const elem_size = ty.abiAlignment(mod).minStrict(.@"8").toByteUnits().?;
const elem_ty = try mod.intType(.unsigned, @intCast(elem_size * 8));
break :part switch (@divExact(ty.abiSize(mod) - 8, elem_size)) {
1 => elem_ty,

View File

@ -548,7 +548,7 @@ pub fn generateSymbol(
}
const size = struct_type.size(ip).*;
const alignment = struct_type.flagsPtr(ip).alignment.toByteUnitsOptional().?;
const alignment = struct_type.flagsPtr(ip).alignment.toByteUnits().?;
const padding = math.cast(
usize,
@ -893,12 +893,12 @@ fn genDeclRef(
// TODO this feels clunky. Perhaps we should check for it in `genTypedValue`?
if (ty.castPtrToFn(zcu)) |fn_ty| {
if (zcu.typeToFunc(fn_ty).?.is_generic) {
return GenResult.mcv(.{ .immediate = fn_ty.abiAlignment(zcu).toByteUnitsOptional().? });
return GenResult.mcv(.{ .immediate = fn_ty.abiAlignment(zcu).toByteUnits().? });
}
} else if (ty.zigTypeTag(zcu) == .Pointer) {
const elem_ty = ty.elemType2(zcu);
if (!elem_ty.hasRuntimeBits(zcu)) {
return GenResult.mcv(.{ .immediate = elem_ty.abiAlignment(zcu).toByteUnitsOptional().? });
return GenResult.mcv(.{ .immediate = elem_ty.abiAlignment(zcu).toByteUnits().? });
}
}

File diff suppressed because it is too large Load Diff

2472
src/codegen/c/Type.zig Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -2033,7 +2033,7 @@ pub const Object = struct {
owner_decl.src_node + 1, // Line
try o.lowerDebugType(int_ty),
ty.abiSize(mod) * 8,
ty.abiAlignment(mod).toByteUnits(0) * 8,
(ty.abiAlignment(mod).toByteUnits() orelse 0) * 8,
try o.builder.debugTuple(enumerators),
);
@ -2120,7 +2120,7 @@ pub const Object = struct {
0, // Line
try o.lowerDebugType(ptr_ty),
ptr_size * 8,
ptr_align.toByteUnits(0) * 8,
(ptr_align.toByteUnits() orelse 0) * 8,
0, // Offset
);
@ -2131,7 +2131,7 @@ pub const Object = struct {
0, // Line
try o.lowerDebugType(len_ty),
len_size * 8,
len_align.toByteUnits(0) * 8,
(len_align.toByteUnits() orelse 0) * 8,
len_offset * 8,
);
@ -2142,7 +2142,7 @@ pub const Object = struct {
line,
.none, // Underlying type
ty.abiSize(mod) * 8,
ty.abiAlignment(mod).toByteUnits(0) * 8,
(ty.abiAlignment(mod).toByteUnits() orelse 0) * 8,
try o.builder.debugTuple(&.{
debug_ptr_type,
debug_len_type,
@ -2170,7 +2170,7 @@ pub const Object = struct {
0, // Line
debug_elem_ty,
target.ptrBitWidth(),
ty.ptrAlignment(mod).toByteUnits(0) * 8,
(ty.ptrAlignment(mod).toByteUnits() orelse 0) * 8,
0, // Offset
);
@ -2217,7 +2217,7 @@ pub const Object = struct {
0, // Line
try o.lowerDebugType(ty.childType(mod)),
ty.abiSize(mod) * 8,
ty.abiAlignment(mod).toByteUnits(0) * 8,
(ty.abiAlignment(mod).toByteUnits() orelse 0) * 8,
try o.builder.debugTuple(&.{
try o.builder.debugSubrange(
try o.builder.debugConstant(try o.builder.intConst(.i64, 0)),
@ -2260,7 +2260,7 @@ pub const Object = struct {
0, // Line
debug_elem_type,
ty.abiSize(mod) * 8,
ty.abiAlignment(mod).toByteUnits(0) * 8,
(ty.abiAlignment(mod).toByteUnits() orelse 0) * 8,
try o.builder.debugTuple(&.{
try o.builder.debugSubrange(
try o.builder.debugConstant(try o.builder.intConst(.i64, 0)),
@ -2316,7 +2316,7 @@ pub const Object = struct {
0, // Line
try o.lowerDebugType(child_ty),
payload_size * 8,
payload_align.toByteUnits(0) * 8,
(payload_align.toByteUnits() orelse 0) * 8,
0, // Offset
);
@ -2327,7 +2327,7 @@ pub const Object = struct {
0,
try o.lowerDebugType(non_null_ty),
non_null_size * 8,
non_null_align.toByteUnits(0) * 8,
(non_null_align.toByteUnits() orelse 0) * 8,
non_null_offset * 8,
);
@ -2338,7 +2338,7 @@ pub const Object = struct {
0, // Line
.none, // Underlying type
ty.abiSize(mod) * 8,
ty.abiAlignment(mod).toByteUnits(0) * 8,
(ty.abiAlignment(mod).toByteUnits() orelse 0) * 8,
try o.builder.debugTuple(&.{
debug_data_type,
debug_some_type,
@ -2396,7 +2396,7 @@ pub const Object = struct {
0, // Line
try o.lowerDebugType(Type.anyerror),
error_size * 8,
error_align.toByteUnits(0) * 8,
(error_align.toByteUnits() orelse 0) * 8,
error_offset * 8,
);
fields[payload_index] = try o.builder.debugMemberType(
@ -2406,7 +2406,7 @@ pub const Object = struct {
0, // Line
try o.lowerDebugType(payload_ty),
payload_size * 8,
payload_align.toByteUnits(0) * 8,
(payload_align.toByteUnits() orelse 0) * 8,
payload_offset * 8,
);
@ -2417,7 +2417,7 @@ pub const Object = struct {
0, // Line
.none, // Underlying type
ty.abiSize(mod) * 8,
ty.abiAlignment(mod).toByteUnits(0) * 8,
(ty.abiAlignment(mod).toByteUnits() orelse 0) * 8,
try o.builder.debugTuple(&fields),
);
@ -2485,7 +2485,7 @@ pub const Object = struct {
0,
try o.lowerDebugType(Type.fromInterned(field_ty)),
field_size * 8,
field_align.toByteUnits(0) * 8,
(field_align.toByteUnits() orelse 0) * 8,
field_offset * 8,
));
}
@ -2497,7 +2497,7 @@ pub const Object = struct {
0, // Line
.none, // Underlying type
ty.abiSize(mod) * 8,
ty.abiAlignment(mod).toByteUnits(0) * 8,
(ty.abiAlignment(mod).toByteUnits() orelse 0) * 8,
try o.builder.debugTuple(fields.items),
);
@ -2566,7 +2566,7 @@ pub const Object = struct {
0, // Line
try o.lowerDebugType(field_ty),
field_size * 8,
field_align.toByteUnits(0) * 8,
(field_align.toByteUnits() orelse 0) * 8,
field_offset * 8,
));
}
@ -2578,7 +2578,7 @@ pub const Object = struct {
0, // Line
.none, // Underlying type
ty.abiSize(mod) * 8,
ty.abiAlignment(mod).toByteUnits(0) * 8,
(ty.abiAlignment(mod).toByteUnits() orelse 0) * 8,
try o.builder.debugTuple(fields.items),
);
@ -2621,7 +2621,7 @@ pub const Object = struct {
0, // Line
.none, // Underlying type
ty.abiSize(mod) * 8,
ty.abiAlignment(mod).toByteUnits(0) * 8,
(ty.abiAlignment(mod).toByteUnits() orelse 0) * 8,
try o.builder.debugTuple(
&.{try o.lowerDebugType(Type.fromInterned(union_type.enum_tag_ty))},
),
@ -2661,7 +2661,7 @@ pub const Object = struct {
0, // Line
try o.lowerDebugType(Type.fromInterned(field_ty)),
field_size * 8,
field_align.toByteUnits(0) * 8,
(field_align.toByteUnits() orelse 0) * 8,
0, // Offset
));
}
@ -2680,7 +2680,7 @@ pub const Object = struct {
0, // Line
.none, // Underlying type
ty.abiSize(mod) * 8,
ty.abiAlignment(mod).toByteUnits(0) * 8,
(ty.abiAlignment(mod).toByteUnits() orelse 0) * 8,
try o.builder.debugTuple(fields.items),
);
@ -2711,7 +2711,7 @@ pub const Object = struct {
0, // Line
try o.lowerDebugType(Type.fromInterned(union_type.enum_tag_ty)),
layout.tag_size * 8,
layout.tag_align.toByteUnits(0) * 8,
(layout.tag_align.toByteUnits() orelse 0) * 8,
tag_offset * 8,
);
@ -2722,7 +2722,7 @@ pub const Object = struct {
0, // Line
debug_union_type,
layout.payload_size * 8,
layout.payload_align.toByteUnits(0) * 8,
(layout.payload_align.toByteUnits() orelse 0) * 8,
payload_offset * 8,
);
@ -2739,7 +2739,7 @@ pub const Object = struct {
0, // Line
.none, // Underlying type
ty.abiSize(mod) * 8,
ty.abiAlignment(mod).toByteUnits(0) * 8,
(ty.abiAlignment(mod).toByteUnits() orelse 0) * 8,
try o.builder.debugTuple(&full_fields),
);
@ -4473,7 +4473,7 @@ pub const Object = struct {
// The value cannot be undefined, because we use the `nonnull` annotation
// for non-optional pointers. We also need to respect the alignment, even though
// the address will never be dereferenced.
const int: u64 = ptr_ty.ptrInfo(mod).flags.alignment.toByteUnitsOptional() orelse
const int: u64 = ptr_ty.ptrInfo(mod).flags.alignment.toByteUnits() orelse
// Note that these 0xaa values are appropriate even in release-optimized builds
// because we need a well-defined value that is not null, and LLVM does not
// have an "undef_but_not_null" attribute. As an example, if this `alloc` AIR

View File

@ -172,7 +172,7 @@ pub fn attachSegfaultHandler() void {
};
}
fn handleSegfaultPosix(sig: i32, info: *const posix.siginfo_t, ctx_ptr: ?*const anyopaque) callconv(.C) noreturn {
fn handleSegfaultPosix(sig: i32, info: *const posix.siginfo_t, ctx_ptr: ?*anyopaque) callconv(.C) noreturn {
// TODO: use alarm() here to prevent infinite loops
PanicSwitch.preDispatch();

View File

@ -69,13 +69,13 @@ pub const DeclBlock = struct {
fwd_decl: String = String.empty,
/// Each `Decl` stores a set of used `CType`s. In `flush()`, we iterate
/// over each `Decl` and generate the definition for each used `CType` once.
ctypes: codegen.CType.Store = .{},
/// Key and Value storage use the ctype arena.
ctype_pool: codegen.CType.Pool = codegen.CType.Pool.empty,
/// May contain string references to ctype_pool
lazy_fns: codegen.LazyFnMap = .{},
fn deinit(db: *DeclBlock, gpa: Allocator) void {
db.lazy_fns.deinit(gpa);
db.ctypes.deinit(gpa);
db.ctype_pool.deinit(gpa);
db.* = undefined;
}
};
@ -190,11 +190,12 @@ pub fn updateFunc(
const decl = zcu.declPtr(decl_index);
const gop = try self.decl_table.getOrPut(gpa, decl_index);
if (!gop.found_existing) gop.value_ptr.* = .{};
const ctypes = &gop.value_ptr.ctypes;
const ctype_pool = &gop.value_ptr.ctype_pool;
const lazy_fns = &gop.value_ptr.lazy_fns;
const fwd_decl = &self.fwd_decl_buf;
const code = &self.code_buf;
ctypes.clearRetainingCapacity(gpa);
try ctype_pool.init(gpa);
ctype_pool.clearRetainingCapacity();
lazy_fns.clearRetainingCapacity();
fwd_decl.clearRetainingCapacity();
code.clearRetainingCapacity();
@ -213,7 +214,8 @@ pub fn updateFunc(
.pass = .{ .decl = decl_index },
.is_naked_fn = decl.typeOf(zcu).fnCallingConvention(zcu) == .Naked,
.fwd_decl = fwd_decl.toManaged(gpa),
.ctypes = ctypes.*,
.ctype_pool = ctype_pool.*,
.scratch = .{},
.anon_decl_deps = self.anon_decls,
.aligned_anon_decls = self.aligned_anon_decls,
},
@ -222,12 +224,16 @@ pub fn updateFunc(
},
.lazy_fns = lazy_fns.*,
};
function.object.indent_writer = .{ .underlying_writer = function.object.code.writer() };
defer {
self.anon_decls = function.object.dg.anon_decl_deps;
self.aligned_anon_decls = function.object.dg.aligned_anon_decls;
fwd_decl.* = function.object.dg.fwd_decl.moveToUnmanaged();
ctype_pool.* = function.object.dg.ctype_pool.move();
ctype_pool.freeUnusedCapacity(gpa);
function.object.dg.scratch.deinit(gpa);
lazy_fns.* = function.lazy_fns.move();
lazy_fns.shrinkAndFree(gpa, lazy_fns.count());
code.* = function.object.code.moveToUnmanaged();
function.deinit();
}
@ -239,16 +245,8 @@ pub fn updateFunc(
},
else => |e| return e,
};
ctypes.* = function.object.dg.ctypes.move();
lazy_fns.* = function.lazy_fns.move();
// Free excess allocated memory for this Decl.
ctypes.shrinkAndFree(gpa, ctypes.count());
lazy_fns.shrinkAndFree(gpa, lazy_fns.count());
gop.value_ptr.code = try self.addString(function.object.code.items);
gop.value_ptr.fwd_decl = try self.addString(function.object.dg.fwd_decl.items);
gop.value_ptr.code = try self.addString(function.object.code.items);
}
fn updateAnonDecl(self: *C, zcu: *Zcu, i: usize) !void {
@ -269,7 +267,8 @@ fn updateAnonDecl(self: *C, zcu: *Zcu, i: usize) !void {
.pass = .{ .anon = anon_decl },
.is_naked_fn = false,
.fwd_decl = fwd_decl.toManaged(gpa),
.ctypes = .{},
.ctype_pool = codegen.CType.Pool.empty,
.scratch = .{},
.anon_decl_deps = self.anon_decls,
.aligned_anon_decls = self.aligned_anon_decls,
},
@ -277,14 +276,15 @@ fn updateAnonDecl(self: *C, zcu: *Zcu, i: usize) !void {
.indent_writer = undefined, // set later so we can get a pointer to object.code
};
object.indent_writer = .{ .underlying_writer = object.code.writer() };
defer {
self.anon_decls = object.dg.anon_decl_deps;
self.aligned_anon_decls = object.dg.aligned_anon_decls;
object.dg.ctypes.deinit(object.dg.gpa);
fwd_decl.* = object.dg.fwd_decl.moveToUnmanaged();
object.dg.ctype_pool.deinit(object.dg.gpa);
object.dg.scratch.deinit(gpa);
code.* = object.code.moveToUnmanaged();
}
try object.dg.ctype_pool.init(gpa);
const c_value: codegen.CValue = .{ .constant = Value.fromInterned(anon_decl) };
const alignment: Alignment = self.aligned_anon_decls.get(anon_decl) orelse .none;
@ -297,13 +297,11 @@ fn updateAnonDecl(self: *C, zcu: *Zcu, i: usize) !void {
else => |e| return e,
};
// Free excess allocated memory for this Decl.
object.dg.ctypes.shrinkAndFree(gpa, object.dg.ctypes.count());
object.dg.ctype_pool.freeUnusedCapacity(gpa);
object.dg.anon_decl_deps.values()[i] = .{
.code = try self.addString(object.code.items),
.fwd_decl = try self.addString(object.dg.fwd_decl.items),
.ctypes = object.dg.ctypes.move(),
.ctype_pool = object.dg.ctype_pool.move(),
};
}
@ -315,13 +313,13 @@ pub fn updateDecl(self: *C, zcu: *Zcu, decl_index: InternPool.DeclIndex) !void {
const decl = zcu.declPtr(decl_index);
const gop = try self.decl_table.getOrPut(gpa, decl_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
const ctypes = &gop.value_ptr.ctypes;
errdefer _ = self.decl_table.pop();
if (!gop.found_existing) gop.value_ptr.* = .{};
const ctype_pool = &gop.value_ptr.ctype_pool;
const fwd_decl = &self.fwd_decl_buf;
const code = &self.code_buf;
ctypes.clearRetainingCapacity(gpa);
try ctype_pool.init(gpa);
ctype_pool.clearRetainingCapacity();
fwd_decl.clearRetainingCapacity();
code.clearRetainingCapacity();
@ -334,7 +332,8 @@ pub fn updateDecl(self: *C, zcu: *Zcu, decl_index: InternPool.DeclIndex) !void {
.pass = .{ .decl = decl_index },
.is_naked_fn = false,
.fwd_decl = fwd_decl.toManaged(gpa),
.ctypes = ctypes.*,
.ctype_pool = ctype_pool.*,
.scratch = .{},
.anon_decl_deps = self.anon_decls,
.aligned_anon_decls = self.aligned_anon_decls,
},
@ -345,8 +344,10 @@ pub fn updateDecl(self: *C, zcu: *Zcu, decl_index: InternPool.DeclIndex) !void {
defer {
self.anon_decls = object.dg.anon_decl_deps;
self.aligned_anon_decls = object.dg.aligned_anon_decls;
object.dg.ctypes.deinit(object.dg.gpa);
fwd_decl.* = object.dg.fwd_decl.moveToUnmanaged();
ctype_pool.* = object.dg.ctype_pool.move();
ctype_pool.freeUnusedCapacity(gpa);
object.dg.scratch.deinit(gpa);
code.* = object.code.moveToUnmanaged();
}
@ -357,12 +358,6 @@ pub fn updateDecl(self: *C, zcu: *Zcu, decl_index: InternPool.DeclIndex) !void {
},
else => |e| return e,
};
ctypes.* = object.dg.ctypes.move();
// Free excess allocated memory for this Decl.
ctypes.shrinkAndFree(gpa, ctypes.count());
gop.value_ptr.code = try self.addString(object.code.items);
gop.value_ptr.fwd_decl = try self.addString(object.dg.fwd_decl.items);
}
@ -416,7 +411,10 @@ pub fn flushModule(self: *C, arena: Allocator, prog_node: *std.Progress.Node) !v
// This code path happens exclusively with -ofmt=c. The flush logic for
// emit-h is in `flushEmitH` below.
var f: Flush = .{};
var f: Flush = .{
.ctype_pool = codegen.CType.Pool.empty,
.lazy_ctype_pool = codegen.CType.Pool.empty,
};
defer f.deinit(gpa);
const abi_defines = try self.abiDefines(zcu.getTarget());
@ -443,7 +441,8 @@ pub fn flushModule(self: *C, arena: Allocator, prog_node: *std.Progress.Node) !v
self.lazy_fwd_decl_buf.clearRetainingCapacity();
self.lazy_code_buf.clearRetainingCapacity();
try self.flushErrDecls(zcu, &f.lazy_ctypes);
try f.lazy_ctype_pool.init(gpa);
try self.flushErrDecls(zcu, &f.lazy_ctype_pool);
// Unlike other backends, the .c code we are emitting has order-dependent decls.
// `CType`s, forward decls, and non-functions first.
@ -471,15 +470,15 @@ pub fn flushModule(self: *C, arena: Allocator, prog_node: *std.Progress.Node) !v
{
// We need to flush lazy ctypes after flushing all decls but before flushing any decl ctypes.
// This ensures that every lazy CType.Index exactly matches the global CType.Index.
assert(f.ctypes.count() == 0);
try self.flushCTypes(zcu, &f, .flush, f.lazy_ctypes);
try f.ctype_pool.init(gpa);
try self.flushCTypes(zcu, &f, .flush, &f.lazy_ctype_pool);
for (self.anon_decls.keys(), self.anon_decls.values()) |anon_decl, decl_block| {
try self.flushCTypes(zcu, &f, .{ .anon = anon_decl }, decl_block.ctypes);
try self.flushCTypes(zcu, &f, .{ .anon = anon_decl }, &decl_block.ctype_pool);
}
for (self.decl_table.keys(), self.decl_table.values()) |decl_index, decl_block| {
try self.flushCTypes(zcu, &f, .{ .decl = decl_index }, decl_block.ctypes);
try self.flushCTypes(zcu, &f, .{ .decl = decl_index }, &decl_block.ctype_pool);
}
}
@ -510,11 +509,11 @@ pub fn flushModule(self: *C, arena: Allocator, prog_node: *std.Progress.Node) !v
}
const Flush = struct {
ctypes: codegen.CType.Store = .{},
ctypes_map: std.ArrayListUnmanaged(codegen.CType.Index) = .{},
ctype_pool: codegen.CType.Pool,
ctype_global_from_decl_map: std.ArrayListUnmanaged(codegen.CType) = .{},
ctypes_buf: std.ArrayListUnmanaged(u8) = .{},
lazy_ctypes: codegen.CType.Store = .{},
lazy_ctype_pool: codegen.CType.Pool,
lazy_fns: LazyFns = .{},
asm_buf: std.ArrayListUnmanaged(u8) = .{},
@ -536,10 +535,11 @@ const Flush = struct {
f.all_buffers.deinit(gpa);
f.asm_buf.deinit(gpa);
f.lazy_fns.deinit(gpa);
f.lazy_ctypes.deinit(gpa);
f.lazy_ctype_pool.deinit(gpa);
f.ctypes_buf.deinit(gpa);
f.ctypes_map.deinit(gpa);
f.ctypes.deinit(gpa);
assert(f.ctype_global_from_decl_map.items.len == 0);
f.ctype_global_from_decl_map.deinit(gpa);
f.ctype_pool.deinit(gpa);
}
};
@ -552,88 +552,59 @@ fn flushCTypes(
zcu: *Zcu,
f: *Flush,
pass: codegen.DeclGen.Pass,
decl_ctypes: codegen.CType.Store,
decl_ctype_pool: *const codegen.CType.Pool,
) FlushDeclError!void {
const gpa = self.base.comp.gpa;
const global_ctype_pool = &f.ctype_pool;
const decl_ctypes_len = decl_ctypes.count();
f.ctypes_map.clearRetainingCapacity();
try f.ctypes_map.ensureTotalCapacity(gpa, decl_ctypes_len);
var global_ctypes = f.ctypes.promote(gpa);
defer f.ctypes.demote(global_ctypes);
const global_from_decl_map = &f.ctype_global_from_decl_map;
assert(global_from_decl_map.items.len == 0);
try global_from_decl_map.ensureTotalCapacity(gpa, decl_ctype_pool.items.len);
defer global_from_decl_map.clearRetainingCapacity();
var ctypes_buf = f.ctypes_buf.toManaged(gpa);
defer f.ctypes_buf = ctypes_buf.moveToUnmanaged();
const writer = ctypes_buf.writer();
const slice = decl_ctypes.set.map.entries.slice();
for (slice.items(.key), 0..) |decl_cty, decl_i| {
const Context = struct {
arena: Allocator,
ctypes_map: []codegen.CType.Index,
cached_hash: codegen.CType.Store.Set.Map.Hash,
idx: codegen.CType.Index,
pub fn hash(ctx: @This(), _: codegen.CType) codegen.CType.Store.Set.Map.Hash {
return ctx.cached_hash;
for (0..decl_ctype_pool.items.len) |decl_ctype_pool_index| {
const PoolAdapter = struct {
global_from_decl_map: []const codegen.CType,
pub fn eql(pool_adapter: @This(), decl_ctype: codegen.CType, global_ctype: codegen.CType) bool {
return if (decl_ctype.toPoolIndex()) |decl_pool_index|
decl_pool_index < pool_adapter.global_from_decl_map.len and
pool_adapter.global_from_decl_map[decl_pool_index].eql(global_ctype)
else
decl_ctype.index == global_ctype.index;
}
pub fn eql(ctx: @This(), lhs: codegen.CType, rhs: codegen.CType, _: usize) bool {
return lhs.eqlContext(rhs, ctx);
}
pub fn eqlIndex(
ctx: @This(),
lhs_idx: codegen.CType.Index,
rhs_idx: codegen.CType.Index,
) bool {
if (lhs_idx < codegen.CType.Tag.no_payload_count or
rhs_idx < codegen.CType.Tag.no_payload_count) return lhs_idx == rhs_idx;
const lhs_i = lhs_idx - codegen.CType.Tag.no_payload_count;
if (lhs_i >= ctx.ctypes_map.len) return false;
return ctx.ctypes_map[lhs_i] == rhs_idx;
}
pub fn copyIndex(ctx: @This(), idx: codegen.CType.Index) codegen.CType.Index {
if (idx < codegen.CType.Tag.no_payload_count) return idx;
return ctx.ctypes_map[idx - codegen.CType.Tag.no_payload_count];
pub fn copy(pool_adapter: @This(), decl_ctype: codegen.CType) codegen.CType {
return if (decl_ctype.toPoolIndex()) |decl_pool_index|
pool_adapter.global_from_decl_map[decl_pool_index]
else
decl_ctype;
}
};
const decl_idx = @as(codegen.CType.Index, @intCast(codegen.CType.Tag.no_payload_count + decl_i));
const ctx = Context{
.arena = global_ctypes.arena.allocator(),
.ctypes_map = f.ctypes_map.items,
.cached_hash = decl_ctypes.indexToHash(decl_idx),
.idx = decl_idx,
};
const gop = try global_ctypes.set.map.getOrPutContextAdapted(gpa, decl_cty, ctx, .{
.store = &global_ctypes.set,
});
const global_idx =
@as(codegen.CType.Index, @intCast(codegen.CType.Tag.no_payload_count + gop.index));
f.ctypes_map.appendAssumeCapacity(global_idx);
if (!gop.found_existing) {
errdefer _ = global_ctypes.set.map.pop();
gop.key_ptr.* = try decl_cty.copyContext(ctx);
}
if (std.debug.runtime_safety) {
const global_cty = &global_ctypes.set.map.entries.items(.key)[gop.index];
assert(global_cty == gop.key_ptr);
assert(decl_cty.eqlContext(global_cty.*, ctx));
assert(decl_cty.hash(decl_ctypes.set) == global_cty.hash(global_ctypes.set));
}
const decl_ctype = codegen.CType.fromPoolIndex(decl_ctype_pool_index);
const global_ctype, const found_existing = try global_ctype_pool.getOrPutAdapted(
gpa,
decl_ctype_pool,
decl_ctype,
PoolAdapter{ .global_from_decl_map = global_from_decl_map.items },
);
global_from_decl_map.appendAssumeCapacity(global_ctype);
try codegen.genTypeDecl(
zcu,
writer,
global_ctypes.set,
global_idx,
global_ctype_pool,
global_ctype,
pass,
decl_ctypes.set,
decl_idx,
gop.found_existing,
decl_ctype_pool,
decl_ctype,
found_existing,
);
}
}
fn flushErrDecls(self: *C, zcu: *Zcu, ctypes: *codegen.CType.Store) FlushDeclError!void {
fn flushErrDecls(self: *C, zcu: *Zcu, ctype_pool: *codegen.CType.Pool) FlushDeclError!void {
const gpa = self.base.comp.gpa;
const fwd_decl = &self.lazy_fwd_decl_buf;
@ -648,7 +619,8 @@ fn flushErrDecls(self: *C, zcu: *Zcu, ctypes: *codegen.CType.Store) FlushDeclErr
.pass = .flush,
.is_naked_fn = false,
.fwd_decl = fwd_decl.toManaged(gpa),
.ctypes = ctypes.*,
.ctype_pool = ctype_pool.*,
.scratch = .{},
.anon_decl_deps = self.anon_decls,
.aligned_anon_decls = self.aligned_anon_decls,
},
@ -659,8 +631,10 @@ fn flushErrDecls(self: *C, zcu: *Zcu, ctypes: *codegen.CType.Store) FlushDeclErr
defer {
self.anon_decls = object.dg.anon_decl_deps;
self.aligned_anon_decls = object.dg.aligned_anon_decls;
object.dg.ctypes.deinit(gpa);
fwd_decl.* = object.dg.fwd_decl.moveToUnmanaged();
ctype_pool.* = object.dg.ctype_pool.move();
ctype_pool.freeUnusedCapacity(gpa);
object.dg.scratch.deinit(gpa);
code.* = object.code.moveToUnmanaged();
}
@ -668,15 +642,14 @@ fn flushErrDecls(self: *C, zcu: *Zcu, ctypes: *codegen.CType.Store) FlushDeclErr
error.AnalysisFail => unreachable,
else => |e| return e,
};
ctypes.* = object.dg.ctypes.move();
}
fn flushLazyFn(
self: *C,
zcu: *Zcu,
mod: *Module,
ctypes: *codegen.CType.Store,
ctype_pool: *codegen.CType.Pool,
lazy_ctype_pool: *const codegen.CType.Pool,
lazy_fn: codegen.LazyFnMap.Entry,
) FlushDeclError!void {
const gpa = self.base.comp.gpa;
@ -693,7 +666,8 @@ fn flushLazyFn(
.pass = .flush,
.is_naked_fn = false,
.fwd_decl = fwd_decl.toManaged(gpa),
.ctypes = ctypes.*,
.ctype_pool = ctype_pool.*,
.scratch = .{},
.anon_decl_deps = .{},
.aligned_anon_decls = .{},
},
@ -706,17 +680,17 @@ fn flushLazyFn(
// `updateFunc()` does.
assert(object.dg.anon_decl_deps.count() == 0);
assert(object.dg.aligned_anon_decls.count() == 0);
object.dg.ctypes.deinit(gpa);
fwd_decl.* = object.dg.fwd_decl.moveToUnmanaged();
ctype_pool.* = object.dg.ctype_pool.move();
ctype_pool.freeUnusedCapacity(gpa);
object.dg.scratch.deinit(gpa);
code.* = object.code.moveToUnmanaged();
}
codegen.genLazyFn(&object, lazy_fn) catch |err| switch (err) {
codegen.genLazyFn(&object, lazy_ctype_pool, lazy_fn) catch |err| switch (err) {
error.AnalysisFail => unreachable,
else => |e| return e,
};
ctypes.* = object.dg.ctypes.move();
}
fn flushLazyFns(
@ -724,6 +698,7 @@ fn flushLazyFns(
zcu: *Zcu,
mod: *Module,
f: *Flush,
lazy_ctype_pool: *const codegen.CType.Pool,
lazy_fns: codegen.LazyFnMap,
) FlushDeclError!void {
const gpa = self.base.comp.gpa;
@ -734,7 +709,7 @@ fn flushLazyFns(
const gop = f.lazy_fns.getOrPutAssumeCapacity(entry.key_ptr.*);
if (gop.found_existing) continue;
gop.value_ptr.* = {};
try self.flushLazyFn(zcu, mod, &f.lazy_ctypes, entry);
try self.flushLazyFn(zcu, mod, &f.lazy_ctype_pool, lazy_ctype_pool, entry);
}
}
@ -748,7 +723,7 @@ fn flushDeclBlock(
extern_symbol_name: InternPool.OptionalNullTerminatedString,
) FlushDeclError!void {
const gpa = self.base.comp.gpa;
try self.flushLazyFns(zcu, mod, f, decl_block.lazy_fns);
try self.flushLazyFns(zcu, mod, f, &decl_block.ctype_pool, decl_block.lazy_fns);
try f.all_buffers.ensureUnusedCapacity(gpa, 1);
fwd_decl: {
if (extern_symbol_name.unwrap()) |name| {

View File

@ -1223,7 +1223,7 @@ fn lowerConst(self: *Coff, name: []const u8, val: Value, required_alignment: Int
atom.getSymbolPtr(self).value = try self.allocateAtom(
atom_index,
atom.size,
@intCast(required_alignment.toByteUnitsOptional().?),
@intCast(required_alignment.toByteUnits().?),
);
errdefer self.freeAtom(atom_index);
@ -1344,7 +1344,7 @@ fn updateLazySymbolAtom(
symbol.section_number = @as(coff.SectionNumber, @enumFromInt(section_index + 1));
symbol.type = .{ .complex_type = .NULL, .base_type = .NULL };
const vaddr = try self.allocateAtom(atom_index, code_len, @intCast(required_alignment.toByteUnits(0)));
const vaddr = try self.allocateAtom(atom_index, code_len, @intCast(required_alignment.toByteUnits() orelse 0));
errdefer self.freeAtom(atom_index);
log.debug("allocated atom for {s} at 0x{x}", .{ name, vaddr });
@ -1428,7 +1428,7 @@ fn updateDeclCode(self: *Coff, decl_index: InternPool.DeclIndex, code: []u8, com
const decl_name = mod.intern_pool.stringToSlice(try decl.fullyQualifiedName(mod));
log.debug("updateDeclCode {s}{*}", .{ decl_name, decl });
const required_alignment: u32 = @intCast(decl.getAlignment(mod).toByteUnits(0));
const required_alignment: u32 = @intCast(decl.getAlignment(mod).toByteUnits() orelse 0);
const decl_metadata = self.decls.get(decl_index).?;
const atom_index = decl_metadata.atom;

View File

@ -4051,7 +4051,7 @@ fn updateSectionSizes(self: *Elf) !void {
const padding = offset - shdr.sh_size;
atom_ptr.value = offset;
shdr.sh_size += padding + atom_ptr.size;
shdr.sh_addralign = @max(shdr.sh_addralign, atom_ptr.alignment.toByteUnits(1));
shdr.sh_addralign = @max(shdr.sh_addralign, atom_ptr.alignment.toByteUnits() orelse 1);
}
}

View File

@ -208,7 +208,7 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void {
zig_object.debug_aranges_section_dirty = true;
}
}
shdr.sh_addralign = @max(shdr.sh_addralign, self.alignment.toByteUnitsOptional().?);
shdr.sh_addralign = @max(shdr.sh_addralign, self.alignment.toByteUnits().?);
// This function can also reallocate an atom.
// In this case we need to "unplug" it from its previous location before

View File

@ -313,7 +313,7 @@ pub fn inputShdr(self: ZigObject, atom_index: Atom.Index, elf_file: *Elf) elf.El
shdr.sh_addr = 0;
shdr.sh_offset = 0;
shdr.sh_size = atom.size;
shdr.sh_addralign = atom.alignment.toByteUnits(1);
shdr.sh_addralign = atom.alignment.toByteUnits() orelse 1;
return shdr;
}

View File

@ -330,7 +330,7 @@ fn updateSectionSizes(elf_file: *Elf) !void {
const padding = offset - shdr.sh_size;
atom_ptr.value = offset;
shdr.sh_size += padding + atom_ptr.size;
shdr.sh_addralign = @max(shdr.sh_addralign, atom_ptr.alignment.toByteUnits(1));
shdr.sh_addralign = @max(shdr.sh_addralign, atom_ptr.alignment.toByteUnits() orelse 1);
}
}

View File

@ -63,7 +63,7 @@ fn advance(shdr: *elf.Elf64_Shdr, size: u64, alignment: Atom.Alignment) !u64 {
const offset = alignment.forward(shdr.sh_size);
const padding = offset - shdr.sh_size;
shdr.sh_size += padding + size;
shdr.sh_addralign = @max(shdr.sh_addralign, alignment.toByteUnits(1));
shdr.sh_addralign = @max(shdr.sh_addralign, alignment.toByteUnits() orelse 1);
return offset;
}

View File

@ -2060,7 +2060,7 @@ fn calcSectionSizes(self: *MachO) !void {
for (atoms.items) |atom_index| {
const atom = self.getAtom(atom_index).?;
const atom_alignment = atom.alignment.toByteUnits(1);
const atom_alignment = atom.alignment.toByteUnits() orelse 1;
const offset = mem.alignForward(u64, header.size, atom_alignment);
const padding = offset - header.size;
atom.value = offset;

View File

@ -380,7 +380,7 @@ fn calcSectionSizes(macho_file: *MachO) !void {
if (atoms.items.len == 0) continue;
for (atoms.items) |atom_index| {
const atom = macho_file.getAtom(atom_index).?;
const atom_alignment = atom.alignment.toByteUnits(1);
const atom_alignment = atom.alignment.toByteUnits() orelse 1;
const offset = mem.alignForward(u64, header.size, atom_alignment);
const padding = offset - header.size;
atom.value = offset;

View File

@ -2263,7 +2263,7 @@ fn setupMemory(wasm: *Wasm) !void {
}
if (wasm.findGlobalSymbol("__tls_align")) |loc| {
const sym = loc.getSymbol(wasm);
wasm.wasm_globals.items[sym.index - wasm.imported_globals_count].init.i32_const = @intCast(segment.alignment.toByteUnitsOptional().?);
wasm.wasm_globals.items[sym.index - wasm.imported_globals_count].init.i32_const = @intCast(segment.alignment.toByteUnits().?);
}
if (wasm.findGlobalSymbol("__tls_base")) |loc| {
const sym = loc.getSymbol(wasm);

View File

@ -3544,11 +3544,7 @@ fn createModule(
// If the target is not overridden, use the parent's target. Of course,
// if this is the root module then we need to proceed to resolve the
// target.
if (cli_mod.target_arch_os_abi == null and
cli_mod.target_mcpu == null and
create_module.dynamic_linker == null and
create_module.object_format == null)
{
if (cli_mod.target_arch_os_abi == null and cli_mod.target_mcpu == null) {
if (parent) |p| break :t p.resolved_target;
}

View File

@ -80,7 +80,7 @@ pub fn print(
inline .u64, .i64, .big_int => |x| try writer.print("{}", .{x}),
.lazy_align => |ty| if (opt_sema) |sema| {
const a = (try Type.fromInterned(ty).abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar;
try writer.print("{}", .{a.toByteUnits(0)});
try writer.print("{}", .{a.toByteUnits() orelse 0});
} else try writer.print("@alignOf({})", .{Type.fromInterned(ty).fmt(mod)}),
.lazy_size => |ty| if (opt_sema) |sema| {
const s = (try Type.fromInterned(ty).abiSizeAdvanced(mod, .{ .sema = sema })).scalar;

View File

@ -525,7 +525,7 @@ pub fn backendSupportsFeature(
.error_return_trace => use_llvm,
.is_named_enum_value => use_llvm,
.error_set_has_value => use_llvm or cpu_arch.isWasm(),
.field_reordering => use_llvm,
.field_reordering => ofmt == .c or use_llvm,
.safety_checked_instructions => use_llvm,
};
}

View File

@ -203,7 +203,7 @@ pub const Type = struct {
info.flags.alignment
else
Type.fromInterned(info.child).abiAlignment(mod);
try writer.print("align({d}", .{alignment.toByteUnits(0)});
try writer.print("align({d}", .{alignment.toByteUnits() orelse 0});
if (info.packed_offset.bit_offset != 0 or info.packed_offset.host_size != 0) {
try writer.print(":{d}:{d}", .{
@ -863,7 +863,7 @@ pub const Type = struct {
pub fn lazyAbiAlignment(ty: Type, mod: *Module) !Value {
switch (try ty.abiAlignmentAdvanced(mod, .lazy)) {
.val => |val| return val,
.scalar => |x| return mod.intValue(Type.comptime_int, x.toByteUnits(0)),
.scalar => |x| return mod.intValue(Type.comptime_int, x.toByteUnits() orelse 0),
}
}
@ -905,7 +905,7 @@ pub const Type = struct {
return .{ .scalar = intAbiAlignment(int_type.bits, target) };
},
.ptr_type, .anyframe_type => {
return .{ .scalar = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)) };
return .{ .scalar = ptrAbiAlignment(target) };
},
.array_type => |array_type| {
return Type.fromInterned(array_type.child).abiAlignmentAdvanced(mod, strat);
@ -920,6 +920,9 @@ pub const Type = struct {
const alignment = std.math.ceilPowerOfTwoAssert(u32, bytes);
return .{ .scalar = Alignment.fromByteUnits(alignment) };
},
.stage2_c => {
return Type.fromInterned(vector_type.child).abiAlignmentAdvanced(mod, strat);
},
.stage2_x86_64 => {
if (vector_type.child == .bool_type) {
if (vector_type.len > 256 and std.Target.x86.featureSetHas(target.cpu.features, .avx512f)) return .{ .scalar = .@"64" };
@ -966,12 +969,12 @@ pub const Type = struct {
.usize,
.isize,
=> return .{ .scalar = intAbiAlignment(target.ptrBitWidth(), target) },
.export_options,
.extern_options,
.type_info,
=> return .{
.scalar = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)),
},
=> return .{ .scalar = ptrAbiAlignment(target) },
.c_char => return .{ .scalar = cTypeAlign(target, .char) },
.c_short => return .{ .scalar = cTypeAlign(target, .short) },
@ -1160,9 +1163,7 @@ pub const Type = struct {
const child_type = ty.optionalChild(mod);
switch (child_type.zigTypeTag(mod)) {
.Pointer => return .{
.scalar = Alignment.fromByteUnits(@divExact(target.ptrBitWidth(), 8)),
},
.Pointer => return .{ .scalar = ptrAbiAlignment(target) },
.ErrorSet => return abiAlignmentAdvanced(Type.anyerror, mod, strat),
.NoReturn => return .{ .scalar = .@"1" },
else => {},
@ -1274,6 +1275,10 @@ pub const Type = struct {
const total_bits = elem_bits * vector_type.len;
break :total_bytes (total_bits + 7) / 8;
},
.stage2_c => total_bytes: {
const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeAdvanced(mod, strat)).scalar);
break :total_bytes elem_bytes * vector_type.len;
},
.stage2_x86_64 => total_bytes: {
if (vector_type.child == .bool_type) break :total_bytes std.math.divCeil(u32, vector_type.len, 8) catch unreachable;
const elem_bytes: u32 = @intCast((try Type.fromInterned(vector_type.child).abiSizeAdvanced(mod, strat)).scalar);
@ -1527,15 +1532,19 @@ pub const Type = struct {
// guaranteed to be >= that of bool's (1 byte) the added size is exactly equal
// to the child type's ABI alignment.
return AbiSizeAdvanced{
.scalar = child_ty.abiAlignment(mod).toByteUnits(0) + payload_size,
.scalar = (child_ty.abiAlignment(mod).toByteUnits() orelse 0) + payload_size,
};
}
fn intAbiSize(bits: u16, target: Target) u64 {
pub fn ptrAbiAlignment(target: Target) Alignment {
return Alignment.fromNonzeroByteUnits(@divExact(target.ptrBitWidth(), 8));
}
pub fn intAbiSize(bits: u16, target: Target) u64 {
return intAbiAlignment(bits, target).forward(@as(u16, @intCast((@as(u17, bits) + 7) / 8)));
}
fn intAbiAlignment(bits: u16, target: Target) Alignment {
pub fn intAbiAlignment(bits: u16, target: Target) Alignment {
return Alignment.fromByteUnits(@min(
std.math.ceilPowerOfTwoPromote(u16, @as(u16, @intCast((@as(u17, bits) + 7) / 8))),
target.maxIntAlignment(),
@ -1572,7 +1581,7 @@ pub const Type = struct {
if (len == 0) return 0;
const elem_ty = Type.fromInterned(array_type.child);
const elem_size = @max(
(try elem_ty.abiAlignmentAdvanced(mod, strat)).scalar.toByteUnits(0),
(try elem_ty.abiAlignmentAdvanced(mod, strat)).scalar.toByteUnits() orelse 0,
(try elem_ty.abiSizeAdvanced(mod, strat)).scalar,
);
if (elem_size == 0) return 0;
@ -3016,26 +3025,15 @@ pub const Type = struct {
}
/// Returns none in the case of a tuple which uses the integer index as the field name.
pub fn structFieldName(ty: Type, field_index: u32, mod: *Module) InternPool.OptionalNullTerminatedString {
pub fn structFieldName(ty: Type, index: usize, mod: *Module) InternPool.OptionalNullTerminatedString {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.struct_type => ip.loadStructType(ty.toIntern()).fieldName(ip, field_index),
.anon_struct_type => |anon_struct| anon_struct.fieldName(ip, field_index),
.struct_type => ip.loadStructType(ty.toIntern()).fieldName(ip, index),
.anon_struct_type => |anon_struct| anon_struct.fieldName(ip, index),
else => unreachable,
};
}
/// When struct types have no field names, the names are implicitly understood to be
/// strings corresponding to the field indexes in declaration order. It used to be the
/// case that a NullTerminatedString would be stored for each field in this case, however,
/// now, callers must handle the possibility that there are no names stored at all.
/// Here we fake the previous behavior. Probably something better could be done by examining
/// all the callsites of this function.
pub fn legacyStructFieldName(ty: Type, i: u32, mod: *Module) InternPool.NullTerminatedString {
return ty.structFieldName(i, mod).unwrap() orelse
mod.intern_pool.getOrPutStringFmt(mod.gpa, "{d}", .{i}) catch @panic("OOM");
}
pub fn structFieldCount(ty: Type, mod: *Module) u32 {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {

View File

@ -624,7 +624,6 @@ test "sub-aligned pointer field access" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
// Originally reported at https://github.com/ziglang/zig/issues/14904

View File

@ -1176,18 +1176,22 @@ test "@shlWithOverflow" {
test "alignment of vectors" {
try expect(@alignOf(@Vector(2, u8)) == switch (builtin.zig_backend) {
else => 2,
.stage2_c => @alignOf(u8),
.stage2_x86_64 => 16,
});
try expect(@alignOf(@Vector(2, u1)) == switch (builtin.zig_backend) {
else => 1,
.stage2_c => @alignOf(u1),
.stage2_x86_64 => 16,
});
try expect(@alignOf(@Vector(1, u1)) == switch (builtin.zig_backend) {
else => 1,
.stage2_c => @alignOf(u1),
.stage2_x86_64 => 16,
});
try expect(@alignOf(@Vector(2, u16)) == switch (builtin.zig_backend) {
else => 4,
.stage2_c => @alignOf(u16),
.stage2_x86_64 => 16,
});
}

View File

@ -1164,19 +1164,26 @@ pub fn addModuleTests(b: *std.Build, options: ModuleTestOptions) *Step {
compile_c.addCSourceFile(.{
.file = these_tests.getEmittedBin(),
.flags = &.{
// TODO output -std=c89 compatible C code
// Tracking issue for making the C backend generate C89 compatible code:
// https://github.com/ziglang/zig/issues/19468
"-std=c99",
"-pedantic",
"-Werror",
// TODO stop violating these pedantic errors. spotted everywhere
// Tracking issue for making the C backend generate code
// that does not trigger warnings:
// https://github.com/ziglang/zig/issues/19467
// spotted everywhere
"-Wno-builtin-requires-header",
// TODO stop violating these pedantic errors. spotted on linux
"-Wno-address-of-packed-member",
// spotted on linux
"-Wno-gnu-folding-constant",
"-Wno-incompatible-function-pointer-types",
"-Wno-incompatible-pointer-types",
"-Wno-overlength-strings",
// TODO stop violating these pedantic errors. spotted on darwin
// spotted on darwin
"-Wno-dollar-in-identifier-extension",
"-Wno-absolute-value",
},

View File

@ -354,7 +354,7 @@ def InstRef_SummaryProvider(value, _=None):
def InstIndex_SummaryProvider(value, _=None):
return 'instructions[%d]' % value.unsigned
class Module_Decl__Module_Decl_Index_SynthProvider:
class zig_DeclIndex_SynthProvider:
def __init__(self, value, _=None): self.value = value
def update(self):
try:
@ -425,7 +425,7 @@ def InternPool_Find(thread):
for frame in thread:
ip = frame.FindVariable('ip') or frame.FindVariable('intern_pool')
if ip: return ip
mod = frame.FindVariable('mod') or frame.FindVariable('module')
mod = frame.FindVariable('zcu') or frame.FindVariable('mod') or frame.FindVariable('module')
if mod:
ip = mod.GetChildMemberWithName('intern_pool')
if ip: return ip
@ -617,7 +617,7 @@ type_tag_handlers = {
def value_Value_str_lit(payload):
for frame in payload.thread:
mod = frame.FindVariable('mod') or frame.FindVariable('module')
mod = frame.FindVariable('zcu') or frame.FindVariable('mod') or frame.FindVariable('module')
if mod: break
else: return
return '"%s"' % zig_String_decode(mod.GetChildMemberWithName('string_literal_bytes').GetChildMemberWithName('items'), payload.GetChildMemberWithName('index').unsigned, payload.GetChildMemberWithName('len').unsigned)
@ -714,7 +714,7 @@ def __lldb_init_module(debugger, _=None):
add(debugger, category='zig.stage2', type='Air.Inst::Air.Inst.Index', identifier='InstIndex', summary=True)
add(debugger, category='zig.stage2', regex=True, type=MultiArrayList_Entry('Air\\.Inst'), identifier='TagAndPayload', synth=True, inline_children=True, summary=True)
add(debugger, category='zig.stage2', regex=True, type='^Air\\.Inst\\.Data\\.Data__struct_[1-9][0-9]*$', inline_children=True, summary=True)
add(debugger, category='zig.stage2', type='Module.Decl::Module.Decl.Index', synth=True)
add(debugger, category='zig.stage2', type='zig.DeclIndex', synth=True)
add(debugger, category='zig.stage2', type='Module.Namespace::Module.Namespace.Index', synth=True)
add(debugger, category='zig.stage2', type='Module.LazySrcLoc', identifier='zig_TaggedUnion', synth=True)
add(debugger, category='zig.stage2', type='InternPool.Index', synth=True)