mirror of
https://github.com/ziglang/zig.git
synced 2024-11-26 23:22:44 +00:00
stage2: fix the build for 32-bit architectures
* Introduce a mechanism into Sema for emitting a compile error when an integer is too big and we need it to fit into a usize. * Add `@intCast` where necessary * link/MachO: fix an unnecessary allocation when all that was happening was appending zeroes to an ArrayList. * Add `error.Overflow` as a possible error to some codepaths, allowing usage of `math.intCast`. closes #9710
This commit is contained in:
parent
96e5f661bd
commit
6afcaf4a08
@ -3661,7 +3661,8 @@ pub fn embedFile(mod: *Module, cur_file: *File, rel_file_path: []const u8) !*Emb
|
||||
defer file.close();
|
||||
|
||||
const stat = try file.stat();
|
||||
const bytes = try file.readToEndAllocOptions(gpa, std.math.maxInt(u32), stat.size, 1, 0);
|
||||
const size_usize = try std.math.cast(usize, stat.size);
|
||||
const bytes = try file.readToEndAllocOptions(gpa, std.math.maxInt(u32), size_usize, 1, 0);
|
||||
|
||||
log.debug("new embedFile. resolved_root_path={s}, resolved_path={s}, sub_file_path={s}, rel_file_path={s}", .{
|
||||
resolved_root_path, resolved_path, sub_file_path, rel_file_path,
|
||||
@ -3694,7 +3695,8 @@ pub fn detectEmbedFileUpdate(mod: *Module, embed_file: *EmbedFile) !void {
|
||||
if (unchanged_metadata) return;
|
||||
|
||||
const gpa = mod.gpa;
|
||||
const bytes = try file.readToEndAllocOptions(gpa, std.math.maxInt(u32), stat.size, 1, 0);
|
||||
const size_usize = try std.math.cast(usize, stat.size);
|
||||
const bytes = try file.readToEndAllocOptions(gpa, std.math.maxInt(u32), size_usize, 1, 0);
|
||||
gpa.free(embed_file.bytes);
|
||||
embed_file.bytes = bytes;
|
||||
embed_file.stat_size = stat.size;
|
||||
|
90
src/Sema.zig
90
src/Sema.zig
@ -7020,7 +7020,7 @@ fn zirBitNot(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
|
||||
if (val.isUndef()) {
|
||||
return sema.addConstUndef(scalar_type);
|
||||
} else if (operand_type.zigTypeTag() == .Vector) {
|
||||
const vec_len = operand_type.arrayLen();
|
||||
const vec_len = try sema.usizeCast(block, operand_src, operand_type.arrayLen());
|
||||
var elem_val_buf: Value.ElemValueBuffer = undefined;
|
||||
const elems = try sema.arena.alloc(Value, vec_len);
|
||||
for (elems) |*elem, i| {
|
||||
@ -7073,7 +7073,9 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
|
||||
|
||||
if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| {
|
||||
if (try sema.resolveDefinedValue(block, rhs_src, rhs)) |rhs_val| {
|
||||
const final_len = lhs_info.len + rhs_info.len;
|
||||
const lhs_len = try sema.usizeCast(block, lhs_src, lhs_info.len);
|
||||
const rhs_len = try sema.usizeCast(block, lhs_src, rhs_info.len);
|
||||
const final_len = lhs_len + rhs_len;
|
||||
const final_len_including_sent = final_len + @boolToInt(res_sent != null);
|
||||
const is_pointer = lhs_ty.zigTypeTag() == .Pointer;
|
||||
const lhs_sub_val = if (is_pointer) (try sema.pointerDeref(block, lhs_src, lhs_val, lhs_ty)).? else lhs_val;
|
||||
@ -7083,17 +7085,17 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
|
||||
|
||||
const buf = try anon_decl.arena().alloc(Value, final_len_including_sent);
|
||||
{
|
||||
var i: u64 = 0;
|
||||
while (i < lhs_info.len) : (i += 1) {
|
||||
var i: usize = 0;
|
||||
while (i < lhs_len) : (i += 1) {
|
||||
const val = try lhs_sub_val.elemValue(sema.arena, i);
|
||||
buf[i] = try val.copy(anon_decl.arena());
|
||||
}
|
||||
}
|
||||
{
|
||||
var i: u64 = 0;
|
||||
while (i < rhs_info.len) : (i += 1) {
|
||||
var i: usize = 0;
|
||||
while (i < rhs_len) : (i += 1) {
|
||||
const val = try rhs_sub_val.elemValue(sema.arena, i);
|
||||
buf[lhs_info.len + i] = try val.copy(anon_decl.arena());
|
||||
buf[lhs_len + i] = try val.copy(anon_decl.arena());
|
||||
}
|
||||
}
|
||||
const ty = if (res_sent) |rs| ty: {
|
||||
@ -7143,6 +7145,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
|
||||
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
||||
const lhs = sema.resolveInst(extra.lhs);
|
||||
const lhs_ty = sema.typeOf(lhs);
|
||||
const src: LazySrcLoc = inst_data.src();
|
||||
const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node };
|
||||
const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node };
|
||||
|
||||
@ -7151,11 +7154,14 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
|
||||
const mulinfo = getArrayCatInfo(lhs_ty) orelse
|
||||
return sema.fail(block, lhs_src, "expected array, found '{}'", .{lhs_ty});
|
||||
|
||||
const final_len = std.math.mul(u64, mulinfo.len, factor) catch
|
||||
const final_len_u64 = std.math.mul(u64, mulinfo.len, factor) catch
|
||||
return sema.fail(block, rhs_src, "operation results in overflow", .{});
|
||||
const final_len_including_sent = final_len + @boolToInt(mulinfo.sentinel != null);
|
||||
|
||||
if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| {
|
||||
const final_len = try sema.usizeCast(block, src, final_len_u64);
|
||||
const final_len_including_sent = final_len + @boolToInt(mulinfo.sentinel != null);
|
||||
const lhs_len = try sema.usizeCast(block, lhs_src, mulinfo.len);
|
||||
|
||||
const lhs_sub_val = if (lhs_ty.zigTypeTag() == .Pointer) (try sema.pointerDeref(block, lhs_src, lhs_val, lhs_ty)).? else lhs_val;
|
||||
|
||||
var anon_decl = try block.startAnonDecl();
|
||||
@ -7176,18 +7182,18 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
|
||||
|
||||
// Optimization for the common pattern of a single element repeated N times, such
|
||||
// as zero-filling a byte array.
|
||||
const val = if (mulinfo.len == 1) blk: {
|
||||
const val = if (lhs_len == 1) blk: {
|
||||
const elem_val = try lhs_sub_val.elemValue(sema.arena, 0);
|
||||
const copied_val = try elem_val.copy(anon_decl.arena());
|
||||
break :blk try Value.Tag.repeated.create(anon_decl.arena(), copied_val);
|
||||
} else blk: {
|
||||
// the actual loop
|
||||
var i: u64 = 0;
|
||||
var i: usize = 0;
|
||||
while (i < factor) : (i += 1) {
|
||||
var j: u64 = 0;
|
||||
while (j < mulinfo.len) : (j += 1) {
|
||||
var j: usize = 0;
|
||||
while (j < lhs_len) : (j += 1) {
|
||||
const val = try lhs_sub_val.elemValue(sema.arena, j);
|
||||
buf[mulinfo.len * i + j] = try val.copy(anon_decl.arena());
|
||||
buf[lhs_len * i + j] = try val.copy(anon_decl.arena());
|
||||
}
|
||||
}
|
||||
if (mulinfo.sentinel) |sent| {
|
||||
@ -8122,7 +8128,7 @@ fn analyzePtrArithmetic(
|
||||
return sema.addConstUndef(new_ptr_ty);
|
||||
}
|
||||
|
||||
const offset_int = offset_val.toUnsignedInt();
|
||||
const offset_int = try sema.usizeCast(block, offset_src, offset_val.toUnsignedInt());
|
||||
if (ptr_val.getUnsignedInt()) |addr| {
|
||||
const target = sema.mod.getTarget();
|
||||
const ptr_child_ty = ptr_ty.childType();
|
||||
@ -10204,7 +10210,7 @@ fn checkComptimeVarStore(
|
||||
}
|
||||
|
||||
const SimdBinOp = struct {
|
||||
len: ?u64,
|
||||
len: ?usize,
|
||||
/// Coerced to `result_ty`.
|
||||
lhs: Air.Inst.Ref,
|
||||
/// Coerced to `result_ty`.
|
||||
@ -10230,7 +10236,7 @@ fn checkSimdBinOp(
|
||||
const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison();
|
||||
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison();
|
||||
|
||||
var vec_len: ?u64 = null;
|
||||
var vec_len: ?usize = null;
|
||||
if (lhs_zig_ty_tag == .Vector and rhs_zig_ty_tag == .Vector) {
|
||||
const lhs_len = lhs_ty.arrayLen();
|
||||
const rhs_len = rhs_ty.arrayLen();
|
||||
@ -10244,7 +10250,7 @@ fn checkSimdBinOp(
|
||||
};
|
||||
return sema.failWithOwnedErrorMsg(msg);
|
||||
}
|
||||
vec_len = lhs_len;
|
||||
vec_len = try sema.usizeCast(block, lhs_src, lhs_len);
|
||||
} else if (lhs_zig_ty_tag == .Vector or rhs_zig_ty_tag == .Vector) {
|
||||
const msg = msg: {
|
||||
const msg = try sema.errMsg(block, src, "mixed scalar and vector operands: {} and {}", .{
|
||||
@ -12671,8 +12677,7 @@ fn storePtrVal(
|
||||
var kit = try beginComptimePtrMutation(sema, block, src, ptr_val);
|
||||
try sema.checkComptimeVarStore(block, src, kit.decl_ref_mut);
|
||||
|
||||
const target = sema.mod.getTarget();
|
||||
const bitcasted_val = try operand_val.bitCast(operand_ty, kit.ty, target, sema.gpa, sema.arena);
|
||||
const bitcasted_val = try sema.bitCastVal(block, src, operand_val, operand_ty, kit.ty);
|
||||
|
||||
const arena = kit.beginArena(sema.gpa);
|
||||
defer kit.finishArena();
|
||||
@ -12724,7 +12729,9 @@ fn beginComptimePtrMutation(
|
||||
const arena = parent.beginArena(sema.gpa);
|
||||
defer parent.finishArena();
|
||||
|
||||
const elems = try arena.alloc(Value, parent.ty.arrayLenIncludingSentinel());
|
||||
const array_len_including_sentinel =
|
||||
try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel());
|
||||
const elems = try arena.alloc(Value, array_len_including_sentinel);
|
||||
mem.set(Value, elems, Value.undef);
|
||||
|
||||
parent.val.* = try Value.Tag.array.create(arena, elems);
|
||||
@ -12771,7 +12778,9 @@ fn beginComptimePtrMutation(
|
||||
defer parent.finishArena();
|
||||
|
||||
const repeated_val = try parent.val.castTag(.repeated).?.data.copy(arena);
|
||||
const elems = try arena.alloc(Value, parent.ty.arrayLenIncludingSentinel());
|
||||
const array_len_including_sentinel =
|
||||
try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel());
|
||||
const elems = try arena.alloc(Value, array_len_including_sentinel);
|
||||
mem.set(Value, elems, repeated_val);
|
||||
|
||||
parent.val.* = try Value.Tag.array.create(arena, elems);
|
||||
@ -12925,7 +12934,7 @@ fn beginComptimePtrLoad(
|
||||
.root_val = parent.root_val,
|
||||
.val = try parent.val.elemValue(sema.arena, elem_ptr.index),
|
||||
.ty = elem_ty,
|
||||
.byte_offset = parent.byte_offset + elem_size * elem_ptr.index,
|
||||
.byte_offset = try sema.usizeCast(block, src, parent.byte_offset + elem_size * elem_ptr.index),
|
||||
.is_mutable = parent.is_mutable,
|
||||
};
|
||||
},
|
||||
@ -12939,7 +12948,7 @@ fn beginComptimePtrLoad(
|
||||
.root_val = parent.root_val,
|
||||
.val = try parent.val.fieldValue(sema.arena, field_index),
|
||||
.ty = parent.ty.structFieldType(field_index),
|
||||
.byte_offset = parent.byte_offset + field_offset,
|
||||
.byte_offset = try sema.usizeCast(block, src, parent.byte_offset + field_offset),
|
||||
.is_mutable = parent.is_mutable,
|
||||
};
|
||||
},
|
||||
@ -12990,15 +12999,34 @@ fn bitCast(
|
||||
) CompileError!Air.Inst.Ref {
|
||||
// TODO validate the type size and other compile errors
|
||||
if (try sema.resolveMaybeUndefVal(block, inst_src, inst)) |val| {
|
||||
const target = sema.mod.getTarget();
|
||||
const old_ty = sema.typeOf(inst);
|
||||
const result_val = try val.bitCast(old_ty, dest_ty, target, sema.gpa, sema.arena);
|
||||
const result_val = try sema.bitCastVal(block, inst_src, val, old_ty, dest_ty);
|
||||
return sema.addConstant(dest_ty, result_val);
|
||||
}
|
||||
try sema.requireRuntimeBlock(block, inst_src);
|
||||
return block.addBitCast(dest_ty, inst);
|
||||
}
|
||||
|
||||
pub fn bitCastVal(
|
||||
sema: *Sema,
|
||||
block: *Block,
|
||||
src: LazySrcLoc,
|
||||
val: Value,
|
||||
old_ty: Type,
|
||||
new_ty: Type,
|
||||
) !Value {
|
||||
if (old_ty.eql(new_ty)) return val;
|
||||
|
||||
// For types with well-defined memory layouts, we serialize them a byte buffer,
|
||||
// then deserialize to the new type.
|
||||
const target = sema.mod.getTarget();
|
||||
const abi_size = try sema.usizeCast(block, src, old_ty.abiSize(target));
|
||||
const buffer = try sema.gpa.alloc(u8, abi_size);
|
||||
defer sema.gpa.free(buffer);
|
||||
val.writeToMemory(old_ty, target, buffer);
|
||||
return Value.readFromMemory(new_ty, target, buffer, sema.arena);
|
||||
}
|
||||
|
||||
fn coerceArrayPtrToSlice(
|
||||
sema: *Sema,
|
||||
block: *Block,
|
||||
@ -15103,7 +15131,7 @@ fn pointerDeref(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, ptr
|
||||
// The Type it is stored as in the compiler has an ABI size greater or equal to
|
||||
// the ABI size of `load_ty`. We may perform the bitcast based on
|
||||
// `parent.val` alone (more efficient).
|
||||
return try parent.val.bitCast(parent.ty, load_ty, target, sema.gpa, sema.arena);
|
||||
return try sema.bitCastVal(block, src, parent.val, parent.ty, load_ty);
|
||||
}
|
||||
|
||||
// The Type it is stored as in the compiler has an ABI size less than the ABI size
|
||||
@ -15111,3 +15139,11 @@ fn pointerDeref(sema: *Sema, block: *Block, src: LazySrcLoc, ptr_val: Value, ptr
|
||||
// and reinterpreted starting at `parent.byte_offset`.
|
||||
return sema.fail(block, src, "TODO: implement bitcast with index offset", .{});
|
||||
}
|
||||
|
||||
/// Used to convert a u64 value to a usize value, emitting a compile error if the number
|
||||
/// is too big to fit.
|
||||
fn usizeCast(sema: *Sema, block: *Block, src: LazySrcLoc, int: u64) CompileError!usize {
|
||||
return std.math.cast(usize, int) catch |err| switch (err) {
|
||||
error.Overflow => return sema.fail(block, src, "expression produces integer value {d} which is too big for this compiler implementation to handle", .{int}),
|
||||
};
|
||||
}
|
||||
|
@ -538,6 +538,8 @@ const InnerError = error{
|
||||
AnalysisFail,
|
||||
/// Failed to emit MIR instructions to binary/textual representation.
|
||||
EmitFail,
|
||||
/// Compiler implementation could not handle a large integer.
|
||||
Overflow,
|
||||
};
|
||||
|
||||
pub fn deinit(self: *Self) void {
|
||||
@ -877,7 +879,8 @@ pub fn gen(self: *Self, ty: Type, val: Value) InnerError!Result {
|
||||
},
|
||||
.Struct => {
|
||||
// TODO write the fields for real
|
||||
try self.code.writer().writeByteNTimes(0xaa, ty.abiSize(self.target));
|
||||
const abi_size = try std.math.cast(usize, ty.abiSize(self.target));
|
||||
try self.code.writer().writeByteNTimes(0xaa, abi_size);
|
||||
return Result{ .appended = {} };
|
||||
},
|
||||
else => |tag| return self.fail("TODO: Implement zig type codegen for type: '{s}'", .{tag}),
|
||||
|
@ -42,6 +42,7 @@ relocs: std.ArrayListUnmanaged(Reloc) = .{},
|
||||
|
||||
const InnerError = error{
|
||||
OutOfMemory,
|
||||
Overflow,
|
||||
EmitFail,
|
||||
};
|
||||
|
||||
@ -174,10 +175,11 @@ fn fixupRelocs(emit: *Emit) InnerError!void {
|
||||
// possible resolution, i.e., 8bit, and iteratively converge on the minimum required resolution
|
||||
// until the entire decl is correctly emitted with all JMP/CALL instructions within range.
|
||||
for (emit.relocs.items) |reloc| {
|
||||
const offset = try math.cast(usize, reloc.offset);
|
||||
const target = emit.code_offset_mapping.get(reloc.target) orelse
|
||||
return emit.fail("JMP/CALL relocation target not found!", .{});
|
||||
const disp = @intCast(i32, @intCast(i64, target) - @intCast(i64, reloc.source + reloc.length));
|
||||
mem.writeIntLittle(i32, emit.code.items[reloc.offset..][0..4], disp);
|
||||
mem.writeIntLittle(i32, emit.code.items[offset..][0..4], disp);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -207,7 +207,7 @@ pub fn classifySystemV(ty: Type, target: Target) [8]Class {
|
||||
// "Otherwise class SSE is used."
|
||||
result[result_i] = .sse;
|
||||
}
|
||||
byte_i += field_size;
|
||||
byte_i += @intCast(usize, field_size);
|
||||
if (byte_i == 8) {
|
||||
byte_i = 0;
|
||||
result_i += 1;
|
||||
@ -222,7 +222,7 @@ pub fn classifySystemV(ty: Type, target: Target) [8]Class {
|
||||
result_i += field_class.len;
|
||||
// If there are any bytes leftover, we have to try to combine
|
||||
// the next field with them.
|
||||
byte_i = field_size % 8;
|
||||
byte_i = @intCast(usize, field_size % 8);
|
||||
if (byte_i != 0) result_i -= 1;
|
||||
}
|
||||
}
|
||||
|
@ -37,6 +37,7 @@ pub const Result = union(enum) {
|
||||
|
||||
pub const GenerateSymbolError = error{
|
||||
OutOfMemory,
|
||||
Overflow,
|
||||
/// A Decl that this symbol depends on had a semantic analysis failure.
|
||||
AnalysisFail,
|
||||
};
|
||||
@ -289,7 +290,8 @@ pub fn generateSymbol(
|
||||
const field_vals = typed_value.val.castTag(.@"struct").?.data;
|
||||
_ = field_vals; // TODO write the fields for real
|
||||
const target = bin_file.options.target;
|
||||
try code.writer().writeByteNTimes(0xaa, typed_value.ty.abiSize(target));
|
||||
const abi_size = try math.cast(usize, typed_value.ty.abiSize(target));
|
||||
try code.writer().writeByteNTimes(0xaa, abi_size);
|
||||
return Result{ .appended = {} };
|
||||
},
|
||||
else => |t| {
|
||||
|
@ -1006,10 +1006,18 @@ pub const DeclGen = struct {
|
||||
const int_info = tv.ty.intInfo(target);
|
||||
const llvm_type = self.context.intType(int_info.bits);
|
||||
|
||||
const unsigned_val = if (bigint.limbs.len == 1)
|
||||
llvm_type.constInt(bigint.limbs[0], .False)
|
||||
else
|
||||
llvm_type.constIntOfArbitraryPrecision(@intCast(c_uint, bigint.limbs.len), bigint.limbs.ptr);
|
||||
const unsigned_val = v: {
|
||||
if (bigint.limbs.len == 1) {
|
||||
break :v llvm_type.constInt(bigint.limbs[0], .False);
|
||||
}
|
||||
if (@sizeOf(usize) == @sizeOf(u64)) {
|
||||
break :v llvm_type.constIntOfArbitraryPrecision(
|
||||
@intCast(c_uint, bigint.limbs.len),
|
||||
bigint.limbs.ptr,
|
||||
);
|
||||
}
|
||||
@panic("TODO implement bigint to llvm int for 32-bit compiler builds");
|
||||
};
|
||||
if (!bigint.positive) {
|
||||
return llvm.constNeg(unsigned_val);
|
||||
}
|
||||
@ -1026,10 +1034,18 @@ pub const DeclGen = struct {
|
||||
const int_info = tv.ty.intInfo(target);
|
||||
const llvm_type = self.context.intType(int_info.bits);
|
||||
|
||||
const unsigned_val = if (bigint.limbs.len == 1)
|
||||
llvm_type.constInt(bigint.limbs[0], .False)
|
||||
else
|
||||
llvm_type.constIntOfArbitraryPrecision(@intCast(c_uint, bigint.limbs.len), bigint.limbs.ptr);
|
||||
const unsigned_val = v: {
|
||||
if (bigint.limbs.len == 1) {
|
||||
break :v llvm_type.constInt(bigint.limbs[0], .False);
|
||||
}
|
||||
if (@sizeOf(usize) == @sizeOf(u64)) {
|
||||
break :v llvm_type.constIntOfArbitraryPrecision(
|
||||
@intCast(c_uint, bigint.limbs.len),
|
||||
bigint.limbs.ptr,
|
||||
);
|
||||
}
|
||||
@panic("TODO implement bigint to llvm int for 32-bit compiler builds");
|
||||
};
|
||||
if (!bigint.positive) {
|
||||
return llvm.constNeg(unsigned_val);
|
||||
}
|
||||
@ -1144,7 +1160,7 @@ pub const DeclGen = struct {
|
||||
const val = tv.val.castTag(.repeated).?.data;
|
||||
const elem_ty = tv.ty.elemType();
|
||||
const sentinel = tv.ty.sentinel();
|
||||
const len = tv.ty.arrayLen();
|
||||
const len = @intCast(usize, tv.ty.arrayLen());
|
||||
const len_including_sent = len + @boolToInt(sentinel != null);
|
||||
const gpa = self.gpa;
|
||||
const llvm_elems = try gpa.alloc(*const llvm.Value, len_including_sent);
|
||||
@ -1317,7 +1333,7 @@ pub const DeclGen = struct {
|
||||
.bytes => {
|
||||
// Note, sentinel is not stored even if the type has a sentinel.
|
||||
const bytes = tv.val.castTag(.bytes).?.data;
|
||||
const vector_len = tv.ty.arrayLen();
|
||||
const vector_len = @intCast(usize, tv.ty.arrayLen());
|
||||
assert(vector_len == bytes.len or vector_len + 1 == bytes.len);
|
||||
|
||||
const elem_ty = tv.ty.elemType();
|
||||
@ -1343,7 +1359,7 @@ pub const DeclGen = struct {
|
||||
// Note, sentinel is not stored even if the type has a sentinel.
|
||||
// The value includes the sentinel in those cases.
|
||||
const elem_vals = tv.val.castTag(.array).?.data;
|
||||
const vector_len = tv.ty.arrayLen();
|
||||
const vector_len = @intCast(usize, tv.ty.arrayLen());
|
||||
assert(vector_len == elem_vals.len or vector_len + 1 == elem_vals.len);
|
||||
const elem_ty = tv.ty.elemType();
|
||||
const llvm_elems = try self.gpa.alloc(*const llvm.Value, vector_len);
|
||||
@ -1360,7 +1376,7 @@ pub const DeclGen = struct {
|
||||
// Note, sentinel is not stored even if the type has a sentinel.
|
||||
const val = tv.val.castTag(.repeated).?.data;
|
||||
const elem_ty = tv.ty.elemType();
|
||||
const len = tv.ty.arrayLen();
|
||||
const len = @intCast(usize, tv.ty.arrayLen());
|
||||
const llvm_elems = try self.gpa.alloc(*const llvm.Value, len);
|
||||
defer self.gpa.free(llvm_elems);
|
||||
for (llvm_elems) |*elem| {
|
||||
|
45
src/link.zig
45
src/link.zig
@ -350,9 +350,39 @@ pub const File = struct {
|
||||
}
|
||||
}
|
||||
|
||||
pub const UpdateDeclError = error{
|
||||
OutOfMemory,
|
||||
Overflow,
|
||||
Underflow,
|
||||
FileTooBig,
|
||||
InputOutput,
|
||||
FilesOpenedWithWrongFlags,
|
||||
IsDir,
|
||||
NoSpaceLeft,
|
||||
Unseekable,
|
||||
PermissionDenied,
|
||||
FileBusy,
|
||||
SystemResources,
|
||||
OperationAborted,
|
||||
BrokenPipe,
|
||||
ConnectionResetByPeer,
|
||||
ConnectionTimedOut,
|
||||
NotOpenForReading,
|
||||
WouldBlock,
|
||||
AccessDenied,
|
||||
Unexpected,
|
||||
DiskQuota,
|
||||
NotOpenForWriting,
|
||||
AnalysisFail,
|
||||
CodegenFail,
|
||||
EmitFail,
|
||||
NameTooLong,
|
||||
CurrentWorkingDirectoryUnlinked,
|
||||
};
|
||||
|
||||
/// May be called before or after updateDeclExports but must be called
|
||||
/// after allocateDeclIndexes for any given Decl.
|
||||
pub fn updateDecl(base: *File, module: *Module, decl: *Module.Decl) !void {
|
||||
pub fn updateDecl(base: *File, module: *Module, decl: *Module.Decl) UpdateDeclError!void {
|
||||
log.debug("updateDecl {*} ({s}), type={}", .{ decl, decl.name, decl.ty });
|
||||
assert(decl.has_tv);
|
||||
switch (base.tag) {
|
||||
@ -370,7 +400,7 @@ pub const File = struct {
|
||||
|
||||
/// May be called before or after updateDeclExports but must be called
|
||||
/// after allocateDeclIndexes for any given Decl.
|
||||
pub fn updateFunc(base: *File, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
|
||||
pub fn updateFunc(base: *File, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) UpdateDeclError!void {
|
||||
log.debug("updateFunc {*} ({s}), type={}", .{
|
||||
func.owner_decl, func.owner_decl.name, func.owner_decl.ty,
|
||||
});
|
||||
@ -387,7 +417,7 @@ pub const File = struct {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn updateDeclLineNumber(base: *File, module: *Module, decl: *Module.Decl) !void {
|
||||
pub fn updateDeclLineNumber(base: *File, module: *Module, decl: *Module.Decl) UpdateDeclError!void {
|
||||
log.debug("updateDeclLineNumber {*} ({s}), line={}", .{
|
||||
decl, decl.name, decl.src_line + 1,
|
||||
});
|
||||
@ -407,12 +437,17 @@ pub const File = struct {
|
||||
/// TODO we're transitioning to deleting this function and instead having
|
||||
/// each linker backend notice the first time updateDecl or updateFunc is called, or
|
||||
/// a callee referenced from AIR.
|
||||
pub fn allocateDeclIndexes(base: *File, decl: *Module.Decl) !void {
|
||||
pub fn allocateDeclIndexes(base: *File, decl: *Module.Decl) error{OutOfMemory}!void {
|
||||
log.debug("allocateDeclIndexes {*} ({s})", .{ decl, decl.name });
|
||||
switch (base.tag) {
|
||||
.coff => return @fieldParentPtr(Coff, "base", base).allocateDeclIndexes(decl),
|
||||
.elf => return @fieldParentPtr(Elf, "base", base).allocateDeclIndexes(decl),
|
||||
.macho => return @fieldParentPtr(MachO, "base", base).allocateDeclIndexes(decl),
|
||||
.macho => return @fieldParentPtr(MachO, "base", base).allocateDeclIndexes(decl) catch |err| switch (err) {
|
||||
// remap this error code because we are transitioning away from
|
||||
// `allocateDeclIndexes`.
|
||||
error.Overflow => return error.OutOfMemory,
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
},
|
||||
.wasm => return @fieldParentPtr(Wasm, "base", base).allocateDeclIndexes(decl),
|
||||
.plan9 => return @fieldParentPtr(Plan9, "base", base).allocateDeclIndexes(decl),
|
||||
.c, .spirv => {},
|
||||
|
@ -1788,19 +1788,18 @@ pub fn getMatchingSection(self: *MachO, sect: macho.section_64) !?MatchingSectio
|
||||
}
|
||||
|
||||
pub fn createEmptyAtom(self: *MachO, local_sym_index: u32, size: u64, alignment: u32) !*Atom {
|
||||
const code = try self.base.allocator.alloc(u8, size);
|
||||
defer self.base.allocator.free(code);
|
||||
mem.set(u8, code, 0);
|
||||
|
||||
const size_usize = try math.cast(usize, size);
|
||||
const atom = try self.base.allocator.create(Atom);
|
||||
errdefer self.base.allocator.destroy(atom);
|
||||
atom.* = Atom.empty;
|
||||
atom.local_sym_index = local_sym_index;
|
||||
atom.size = size;
|
||||
atom.alignment = alignment;
|
||||
try atom.code.appendSlice(self.base.allocator, code);
|
||||
try self.managed_atoms.append(self.base.allocator, atom);
|
||||
|
||||
try atom.code.resize(self.base.allocator, size_usize);
|
||||
mem.set(u8, atom.code.items, 0);
|
||||
|
||||
try self.managed_atoms.append(self.base.allocator, atom);
|
||||
return atom;
|
||||
}
|
||||
|
||||
@ -1872,9 +1871,10 @@ fn writeAtoms(self: *MachO) !void {
|
||||
while (true) {
|
||||
if (atom.dirty or self.invalidate_relocs) {
|
||||
const atom_sym = self.locals.items[atom.local_sym_index];
|
||||
const padding_size: u64 = if (atom.next) |next| blk: {
|
||||
const padding_size: usize = if (atom.next) |next| blk: {
|
||||
const next_sym = self.locals.items[next.local_sym_index];
|
||||
break :blk next_sym.n_value - (atom_sym.n_value + atom.size);
|
||||
const size = next_sym.n_value - (atom_sym.n_value + atom.size);
|
||||
break :blk try math.cast(usize, size);
|
||||
} else 0;
|
||||
|
||||
log.debug(" (adding atom {s} to buffer: {})", .{ self.getString(atom_sym.n_strx), atom_sym });
|
||||
|
@ -71,9 +71,9 @@ entry_val: ?u64 = null,
|
||||
got_len: usize = 0,
|
||||
// A list of all the free got indexes, so when making a new decl
|
||||
// don't make a new one, just use one from here.
|
||||
got_index_free_list: std.ArrayListUnmanaged(u64) = .{},
|
||||
got_index_free_list: std.ArrayListUnmanaged(usize) = .{},
|
||||
|
||||
syms_index_free_list: std.ArrayListUnmanaged(u64) = .{},
|
||||
syms_index_free_list: std.ArrayListUnmanaged(usize) = .{},
|
||||
|
||||
const Bases = struct {
|
||||
text: u64,
|
||||
@ -356,8 +356,8 @@ pub fn changeLine(l: *std.ArrayList(u8), delta_line: i32) !void {
|
||||
}
|
||||
}
|
||||
|
||||
fn declCount(self: *Plan9) u64 {
|
||||
var fn_decl_count: u64 = 0;
|
||||
fn declCount(self: *Plan9) usize {
|
||||
var fn_decl_count: usize = 0;
|
||||
var itf_files = self.fn_decl_table.iterator();
|
||||
while (itf_files.next()) |ent| {
|
||||
// get the submap
|
||||
|
@ -995,24 +995,6 @@ pub const Value = extern union {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn bitCast(
|
||||
val: Value,
|
||||
old_ty: Type,
|
||||
new_ty: Type,
|
||||
target: Target,
|
||||
gpa: *Allocator,
|
||||
arena: *Allocator,
|
||||
) !Value {
|
||||
if (old_ty.eql(new_ty)) return val;
|
||||
|
||||
// For types with well-defined memory layouts, we serialize them a byte buffer,
|
||||
// then deserialize to the new type.
|
||||
const buffer = try gpa.alloc(u8, old_ty.abiSize(target));
|
||||
defer gpa.free(buffer);
|
||||
val.writeToMemory(old_ty, target, buffer);
|
||||
return Value.readFromMemory(new_ty, target, buffer, arena);
|
||||
}
|
||||
|
||||
pub fn writeToMemory(val: Value, ty: Type, target: Target, buffer: []u8) void {
|
||||
switch (ty.zigTypeTag()) {
|
||||
.Int => {
|
||||
@ -1039,7 +1021,7 @@ pub const Value = extern union {
|
||||
.Array, .Vector => {
|
||||
const len = ty.arrayLen();
|
||||
const elem_ty = ty.childType();
|
||||
const elem_size = elem_ty.abiSize(target);
|
||||
const elem_size = @intCast(usize, elem_ty.abiSize(target));
|
||||
var elem_i: usize = 0;
|
||||
var elem_value_buf: ElemValueBuffer = undefined;
|
||||
var buf_off: usize = 0;
|
||||
@ -2494,7 +2476,7 @@ pub const Value = extern union {
|
||||
// resorting to BigInt first.
|
||||
var lhs_space: Value.BigIntSpace = undefined;
|
||||
const lhs_bigint = lhs.toBigInt(&lhs_space);
|
||||
const shift = rhs.toUnsignedInt();
|
||||
const shift = @intCast(usize, rhs.toUnsignedInt());
|
||||
const limbs = try allocator.alloc(
|
||||
std.math.big.Limb,
|
||||
lhs_bigint.limbs.len + (shift / (@sizeOf(std.math.big.Limb) * 8)) + 1,
|
||||
@ -2521,7 +2503,7 @@ pub const Value = extern union {
|
||||
|
||||
var lhs_space: Value.BigIntSpace = undefined;
|
||||
const lhs_bigint = lhs.toBigInt(&lhs_space);
|
||||
const shift = rhs.toUnsignedInt();
|
||||
const shift = @intCast(usize, rhs.toUnsignedInt());
|
||||
const limbs = try arena.alloc(
|
||||
std.math.big.Limb,
|
||||
std.math.big.int.calcTwosCompLimbCount(info.bits),
|
||||
@ -2540,7 +2522,7 @@ pub const Value = extern union {
|
||||
// resorting to BigInt first.
|
||||
var lhs_space: Value.BigIntSpace = undefined;
|
||||
const lhs_bigint = lhs.toBigInt(&lhs_space);
|
||||
const shift = rhs.toUnsignedInt();
|
||||
const shift = @intCast(usize, rhs.toUnsignedInt());
|
||||
const limbs = try allocator.alloc(
|
||||
std.math.big.Limb,
|
||||
lhs_bigint.limbs.len - (shift / (@sizeOf(std.math.big.Limb) * 8)),
|
||||
|
Loading…
Reference in New Issue
Block a user