mirror of
https://github.com/ziglang/zig.git
synced 2024-11-27 07:32:44 +00:00
remove @inlineCall
from zig
This commit is contained in:
parent
ef83358eb6
commit
343987cd05
@ -7492,27 +7492,6 @@ test "@hasDecl" {
|
||||
{#see_also|Compile Variables|@embedFile#}
|
||||
{#header_close#}
|
||||
|
||||
{#header_open|@inlineCall#}
|
||||
<pre>{#syntax#}@inlineCall(function: X, args: ...) Y{#endsyntax#}</pre>
|
||||
<p>
|
||||
This calls a function, in the same way that invoking an expression with parentheses does:
|
||||
</p>
|
||||
{#code_begin|test#}
|
||||
const assert = @import("std").debug.assert;
|
||||
|
||||
test "inline function call" {
|
||||
assert(@inlineCall(add, 3, 9) == 12);
|
||||
}
|
||||
|
||||
fn add(a: i32, b: i32) i32 { return a + b; }
|
||||
{#code_end#}
|
||||
<p>
|
||||
Unlike a normal function call, however, {#syntax#}@inlineCall{#endsyntax#} guarantees that the call
|
||||
will be inlined. If the call cannot be inlined, a compile error is emitted.
|
||||
</p>
|
||||
{#see_also|@call#}
|
||||
{#header_close#}
|
||||
|
||||
{#header_open|@intCast#}
|
||||
<pre>{#syntax#}@intCast(comptime DestType: type, int: var) DestType{#endsyntax#}</pre>
|
||||
<p>
|
||||
|
@ -92,7 +92,7 @@ pub fn hash(hasher: var, key: var, comptime strat: HashStrategy) void {
|
||||
|
||||
// Help the optimizer see that hashing an int is easy by inlining!
|
||||
// TODO Check if the situation is better after #561 is resolved.
|
||||
.Int => @inlineCall(hasher.update, std.mem.asBytes(&key)),
|
||||
.Int => @call(.{ .modifier = .always_inline }, hasher.update, .{std.mem.asBytes(&key)}),
|
||||
|
||||
.Float => |info| hash(hasher, @bitCast(@IntType(false, info.bits), key), strat),
|
||||
|
||||
@ -101,7 +101,7 @@ pub fn hash(hasher: var, key: var, comptime strat: HashStrategy) void {
|
||||
.ErrorSet => hash(hasher, @errorToInt(key), strat),
|
||||
.AnyFrame, .Fn => hash(hasher, @ptrToInt(key), strat),
|
||||
|
||||
.Pointer => @inlineCall(hashPointer, hasher, key, strat),
|
||||
.Pointer => @call(.{ .modifier = .always_inline }, hashPointer, .{ hasher, key, strat }),
|
||||
|
||||
.Optional => if (key) |k| hash(hasher, k, strat),
|
||||
|
||||
|
@ -197,7 +197,7 @@ pub const CityHash64 = struct {
|
||||
}
|
||||
|
||||
fn hashLen16(u: u64, v: u64) u64 {
|
||||
return @inlineCall(hash128To64, u, v);
|
||||
return @call(.{ .modifier = .always_inline }, hash128To64, .{ u, v });
|
||||
}
|
||||
|
||||
fn hashLen16Mul(low: u64, high: u64, mul: u64) u64 {
|
||||
@ -210,7 +210,7 @@ pub const CityHash64 = struct {
|
||||
}
|
||||
|
||||
fn hash128To64(low: u64, high: u64) u64 {
|
||||
return @inlineCall(hashLen16Mul, low, high, 0x9ddfea08eb382d69);
|
||||
return @call(.{ .modifier = .always_inline }, hashLen16Mul, .{ low, high, 0x9ddfea08eb382d69 });
|
||||
}
|
||||
|
||||
fn hashLen0To16(str: []const u8) u64 {
|
||||
@ -291,7 +291,14 @@ pub const CityHash64 = struct {
|
||||
}
|
||||
|
||||
fn weakHashLen32WithSeeds(ptr: [*]const u8, a: u64, b: u64) WeakPair {
|
||||
return @inlineCall(weakHashLen32WithSeedsHelper, fetch64(ptr), fetch64(ptr + 8), fetch64(ptr + 16), fetch64(ptr + 24), a, b);
|
||||
return @call(.{ .modifier = .always_inline }, weakHashLen32WithSeedsHelper, .{
|
||||
fetch64(ptr),
|
||||
fetch64(ptr + 8),
|
||||
fetch64(ptr + 16),
|
||||
fetch64(ptr + 24),
|
||||
a,
|
||||
b,
|
||||
});
|
||||
}
|
||||
|
||||
pub fn hash(str: []const u8) u64 {
|
||||
@ -339,7 +346,7 @@ pub const CityHash64 = struct {
|
||||
}
|
||||
|
||||
pub fn hashWithSeed(str: []const u8, seed: u64) u64 {
|
||||
return @inlineCall(Self.hashWithSeeds, str, k2, seed);
|
||||
return @call(.{ .modifier = .always_inline }, Self.hashWithSeeds, .{ str, k2, seed });
|
||||
}
|
||||
|
||||
pub fn hashWithSeeds(str: []const u8, seed0: u64, seed1: u64) u64 {
|
||||
|
@ -8,7 +8,7 @@ pub const Murmur2_32 = struct {
|
||||
const Self = @This();
|
||||
|
||||
pub fn hash(str: []const u8) u32 {
|
||||
return @inlineCall(Self.hashWithSeed, str, default_seed);
|
||||
return @call(.{ .modifier = .always_inline }, Self.hashWithSeed, .{ str, default_seed });
|
||||
}
|
||||
|
||||
pub fn hashWithSeed(str: []const u8, seed: u32) u32 {
|
||||
@ -44,7 +44,7 @@ pub const Murmur2_32 = struct {
|
||||
}
|
||||
|
||||
pub fn hashUint32(v: u32) u32 {
|
||||
return @inlineCall(Self.hashUint32WithSeed, v, default_seed);
|
||||
return @call(.{ .modifier = .always_inline }, Self.hashUint32WithSeed, .{ v, default_seed });
|
||||
}
|
||||
|
||||
pub fn hashUint32WithSeed(v: u32, seed: u32) u32 {
|
||||
@ -64,7 +64,7 @@ pub const Murmur2_32 = struct {
|
||||
}
|
||||
|
||||
pub fn hashUint64(v: u64) u32 {
|
||||
return @inlineCall(Self.hashUint64WithSeed, v, default_seed);
|
||||
return @call(.{ .modifier = .always_inline }, Self.hashUint64WithSeed, .{ v, default_seed });
|
||||
}
|
||||
|
||||
pub fn hashUint64WithSeed(v: u64, seed: u32) u32 {
|
||||
@ -93,7 +93,7 @@ pub const Murmur2_64 = struct {
|
||||
const Self = @This();
|
||||
|
||||
pub fn hash(str: []const u8) u64 {
|
||||
return @inlineCall(Self.hashWithSeed, str, default_seed);
|
||||
return @call(.{ .modifier = .always_inline }, Self.hashWithSeed, .{ str, default_seed });
|
||||
}
|
||||
|
||||
pub fn hashWithSeed(str: []const u8, seed: u64) u64 {
|
||||
@ -127,7 +127,7 @@ pub const Murmur2_64 = struct {
|
||||
}
|
||||
|
||||
pub fn hashUint32(v: u32) u64 {
|
||||
return @inlineCall(Self.hashUint32WithSeed, v, default_seed);
|
||||
return @call(.{ .modifier = .always_inline }, Self.hashUint32WithSeed, .{ v, default_seed });
|
||||
}
|
||||
|
||||
pub fn hashUint32WithSeed(v: u32, seed: u32) u64 {
|
||||
@ -144,7 +144,7 @@ pub const Murmur2_64 = struct {
|
||||
}
|
||||
|
||||
pub fn hashUint64(v: u64) u64 {
|
||||
return @inlineCall(Self.hashUint64WithSeed, v, default_seed);
|
||||
return @call(.{ .modifier = .always_inline }, Self.hashUint64WithSeed, .{ v, default_seed });
|
||||
}
|
||||
|
||||
pub fn hashUint64WithSeed(v: u64, seed: u32) u64 {
|
||||
@ -172,7 +172,7 @@ pub const Murmur3_32 = struct {
|
||||
}
|
||||
|
||||
pub fn hash(str: []const u8) u32 {
|
||||
return @inlineCall(Self.hashWithSeed, str, default_seed);
|
||||
return @call(.{ .modifier = .always_inline }, Self.hashWithSeed, .{ str, default_seed });
|
||||
}
|
||||
|
||||
pub fn hashWithSeed(str: []const u8, seed: u32) u32 {
|
||||
@ -220,7 +220,7 @@ pub const Murmur3_32 = struct {
|
||||
}
|
||||
|
||||
pub fn hashUint32(v: u32) u32 {
|
||||
return @inlineCall(Self.hashUint32WithSeed, v, default_seed);
|
||||
return @call(.{ .modifier = .always_inline }, Self.hashUint32WithSeed, .{ v, default_seed });
|
||||
}
|
||||
|
||||
pub fn hashUint32WithSeed(v: u32, seed: u32) u32 {
|
||||
@ -246,7 +246,7 @@ pub const Murmur3_32 = struct {
|
||||
}
|
||||
|
||||
pub fn hashUint64(v: u64) u32 {
|
||||
return @inlineCall(Self.hashUint64WithSeed, v, default_seed);
|
||||
return @call(.{ .modifier = .always_inline }, Self.hashUint64WithSeed, .{ v, default_seed });
|
||||
}
|
||||
|
||||
pub fn hashUint64WithSeed(v: u64, seed: u32) u32 {
|
||||
|
@ -11,7 +11,7 @@ const testing = std.testing;
|
||||
const math = std.math;
|
||||
const mem = std.mem;
|
||||
|
||||
const Endian = @import("builtin").Endian;
|
||||
const Endian = std.builtin.Endian;
|
||||
|
||||
pub fn SipHash64(comptime c_rounds: usize, comptime d_rounds: usize) type {
|
||||
return SipHash(u64, c_rounds, d_rounds);
|
||||
@ -62,7 +62,7 @@ fn SipHashStateless(comptime T: type, comptime c_rounds: usize, comptime d_round
|
||||
|
||||
var off: usize = 0;
|
||||
while (off < b.len) : (off += 8) {
|
||||
@inlineCall(self.round, b[off .. off + 8]);
|
||||
@call(.{ .modifier = .always_inline }, self.round, .{b[off .. off + 8]});
|
||||
}
|
||||
|
||||
self.msg_len +%= @truncate(u8, b.len);
|
||||
@ -84,9 +84,12 @@ fn SipHashStateless(comptime T: type, comptime c_rounds: usize, comptime d_round
|
||||
self.v2 ^= 0xff;
|
||||
}
|
||||
|
||||
// TODO this is a workaround, should be able to supply the value without a separate variable
|
||||
const inl = std.builtin.CallOptions{ .modifier = .always_inline };
|
||||
|
||||
comptime var i: usize = 0;
|
||||
inline while (i < d_rounds) : (i += 1) {
|
||||
@inlineCall(sipRound, self);
|
||||
@call(inl, sipRound, .{self});
|
||||
}
|
||||
|
||||
const b1 = self.v0 ^ self.v1 ^ self.v2 ^ self.v3;
|
||||
@ -98,7 +101,7 @@ fn SipHashStateless(comptime T: type, comptime c_rounds: usize, comptime d_round
|
||||
|
||||
comptime var j: usize = 0;
|
||||
inline while (j < d_rounds) : (j += 1) {
|
||||
@inlineCall(sipRound, self);
|
||||
@call(inl, sipRound, .{self});
|
||||
}
|
||||
|
||||
const b2 = self.v0 ^ self.v1 ^ self.v2 ^ self.v3;
|
||||
@ -111,9 +114,11 @@ fn SipHashStateless(comptime T: type, comptime c_rounds: usize, comptime d_round
|
||||
const m = mem.readIntSliceLittle(u64, b[0..]);
|
||||
self.v3 ^= m;
|
||||
|
||||
// TODO this is a workaround, should be able to supply the value without a separate variable
|
||||
const inl = std.builtin.CallOptions{ .modifier = .always_inline };
|
||||
comptime var i: usize = 0;
|
||||
inline while (i < c_rounds) : (i += 1) {
|
||||
@inlineCall(sipRound, self);
|
||||
@call(inl, sipRound, .{self});
|
||||
}
|
||||
|
||||
self.v0 ^= m;
|
||||
@ -140,8 +145,8 @@ fn SipHashStateless(comptime T: type, comptime c_rounds: usize, comptime d_round
|
||||
const aligned_len = input.len - (input.len % 8);
|
||||
|
||||
var c = Self.init(key);
|
||||
@inlineCall(c.update, input[0..aligned_len]);
|
||||
return @inlineCall(c.final, input[aligned_len..]);
|
||||
@call(.{ .modifier = .always_inline }, c.update, .{input[0..aligned_len]});
|
||||
return @call(.{ .modifier = .always_inline }, c.final, .{input[aligned_len..]});
|
||||
}
|
||||
};
|
||||
}
|
||||
|
@ -65,7 +65,7 @@ const WyhashStateless = struct {
|
||||
|
||||
var off: usize = 0;
|
||||
while (off < b.len) : (off += 32) {
|
||||
@inlineCall(self.round, b[off .. off + 32]);
|
||||
@call(.{ .modifier = .always_inline }, self.round, .{b[off .. off + 32]});
|
||||
}
|
||||
|
||||
self.msg_len += b.len;
|
||||
@ -121,8 +121,8 @@ const WyhashStateless = struct {
|
||||
const aligned_len = input.len - (input.len % 32);
|
||||
|
||||
var c = WyhashStateless.init(seed);
|
||||
@inlineCall(c.update, input[0..aligned_len]);
|
||||
return @inlineCall(c.final, input[aligned_len..]);
|
||||
@call(.{ .modifier = .always_inline }, c.update, .{input[0..aligned_len]});
|
||||
return @call(.{ .modifier = .always_inline }, c.final, .{input[aligned_len..]});
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -811,7 +811,7 @@ pub const Int = struct {
|
||||
|
||||
var j: usize = 0;
|
||||
while (j < a_lo.len) : (j += 1) {
|
||||
a_lo[j] = @inlineCall(addMulLimbWithCarry, a_lo[j], y[j], xi, &carry);
|
||||
a_lo[j] = @call(.{ .modifier = .always_inline }, addMulLimbWithCarry, .{ a_lo[j], y[j], xi, &carry });
|
||||
}
|
||||
|
||||
j = 0;
|
||||
@ -1214,7 +1214,11 @@ pub const Int = struct {
|
||||
const dst_i = src_i + limb_shift;
|
||||
|
||||
const src_digit = a[src_i];
|
||||
r[dst_i] = carry | @inlineCall(math.shr, Limb, src_digit, Limb.bit_count - @intCast(Limb, interior_limb_shift));
|
||||
r[dst_i] = carry | @call(.{ .modifier = .always_inline }, math.shr, .{
|
||||
Limb,
|
||||
src_digit,
|
||||
Limb.bit_count - @intCast(Limb, interior_limb_shift),
|
||||
});
|
||||
carry = (src_digit << interior_limb_shift);
|
||||
}
|
||||
|
||||
@ -1254,7 +1258,11 @@ pub const Int = struct {
|
||||
|
||||
const src_digit = a[src_i];
|
||||
r[dst_i] = carry | (src_digit >> interior_limb_shift);
|
||||
carry = @inlineCall(math.shl, Limb, src_digit, Limb.bit_count - @intCast(Limb, interior_limb_shift));
|
||||
carry = @call(.{ .modifier = .always_inline }, math.shl, .{
|
||||
Limb,
|
||||
src_digit,
|
||||
Limb.bit_count - @intCast(Limb, interior_limb_shift),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -94,7 +94,7 @@ pub fn fork() usize {
|
||||
/// the compiler is not aware of how vfork affects control flow and you may
|
||||
/// see different results in optimized builds.
|
||||
pub inline fn vfork() usize {
|
||||
return @inlineCall(syscall0, SYS_vfork);
|
||||
return @call(.{ .modifier = .always_inline }, syscall0, .{SYS_vfork});
|
||||
}
|
||||
|
||||
pub fn futimens(fd: i32, times: *const [2]timespec) usize {
|
||||
|
@ -14,31 +14,31 @@ const ConditionalOperator = enum {
|
||||
|
||||
pub nakedcc fn __aeabi_dcmpeq() noreturn {
|
||||
@setRuntimeSafety(false);
|
||||
@inlineCall(aeabi_dcmp, .Eq);
|
||||
@call(.{ .modifier = .always_inline }, aeabi_dcmp, .{.Eq});
|
||||
unreachable;
|
||||
}
|
||||
|
||||
pub nakedcc fn __aeabi_dcmplt() noreturn {
|
||||
@setRuntimeSafety(false);
|
||||
@inlineCall(aeabi_dcmp, .Lt);
|
||||
@call(.{ .modifier = .always_inline }, aeabi_dcmp, .{.Lt});
|
||||
unreachable;
|
||||
}
|
||||
|
||||
pub nakedcc fn __aeabi_dcmple() noreturn {
|
||||
@setRuntimeSafety(false);
|
||||
@inlineCall(aeabi_dcmp, .Le);
|
||||
@call(.{ .modifier = .always_inline }, aeabi_dcmp, .{.Le});
|
||||
unreachable;
|
||||
}
|
||||
|
||||
pub nakedcc fn __aeabi_dcmpge() noreturn {
|
||||
@setRuntimeSafety(false);
|
||||
@inlineCall(aeabi_dcmp, .Ge);
|
||||
@call(.{ .modifier = .always_inline }, aeabi_dcmp, .{.Ge});
|
||||
unreachable;
|
||||
}
|
||||
|
||||
pub nakedcc fn __aeabi_dcmpgt() noreturn {
|
||||
@setRuntimeSafety(false);
|
||||
@inlineCall(aeabi_dcmp, .Gt);
|
||||
@call(.{ .modifier = .always_inline }, aeabi_dcmp, .{.Gt});
|
||||
unreachable;
|
||||
}
|
||||
|
||||
|
@ -14,31 +14,31 @@ const ConditionalOperator = enum {
|
||||
|
||||
pub nakedcc fn __aeabi_fcmpeq() noreturn {
|
||||
@setRuntimeSafety(false);
|
||||
@inlineCall(aeabi_fcmp, .Eq);
|
||||
@call(.{ .modifier = .always_inline }, aeabi_fcmp, .{.Eq});
|
||||
unreachable;
|
||||
}
|
||||
|
||||
pub nakedcc fn __aeabi_fcmplt() noreturn {
|
||||
@setRuntimeSafety(false);
|
||||
@inlineCall(aeabi_fcmp, .Lt);
|
||||
@call(.{ .modifier = .always_inline }, aeabi_fcmp, .{.Lt});
|
||||
unreachable;
|
||||
}
|
||||
|
||||
pub nakedcc fn __aeabi_fcmple() noreturn {
|
||||
@setRuntimeSafety(false);
|
||||
@inlineCall(aeabi_fcmp, .Le);
|
||||
@call(.{ .modifier = .always_inline }, aeabi_fcmp, .{.Le});
|
||||
unreachable;
|
||||
}
|
||||
|
||||
pub nakedcc fn __aeabi_fcmpge() noreturn {
|
||||
@setRuntimeSafety(false);
|
||||
@inlineCall(aeabi_fcmp, .Ge);
|
||||
@call(.{ .modifier = .always_inline }, aeabi_fcmp, .{.Ge});
|
||||
unreachable;
|
||||
}
|
||||
|
||||
pub nakedcc fn __aeabi_fcmpgt() noreturn {
|
||||
@setRuntimeSafety(false);
|
||||
@inlineCall(aeabi_fcmp, .Gt);
|
||||
@call(.{ .modifier = .always_inline }, aeabi_fcmp, .{.Gt});
|
||||
unreachable;
|
||||
}
|
||||
|
||||
|
@ -17,7 +17,10 @@ pub extern fn __divti3(a: i128, b: i128) i128 {
|
||||
|
||||
const v128 = @Vector(2, u64);
|
||||
pub extern fn __divti3_windows_x86_64(a: v128, b: v128) v128 {
|
||||
return @bitCast(v128, @inlineCall(__divti3, @bitCast(i128, a), @bitCast(i128, b)));
|
||||
return @bitCast(v128, @call(.{ .modifier = .always_inline }, __divti3, .{
|
||||
@bitCast(i128, a),
|
||||
@bitCast(i128, b),
|
||||
}));
|
||||
}
|
||||
|
||||
test "import divti3" {
|
||||
|
@ -3,19 +3,19 @@ const builtin = @import("builtin");
|
||||
const is_test = builtin.is_test;
|
||||
|
||||
pub extern fn __extendsfdf2(a: f32) f64 {
|
||||
return @inlineCall(extendXfYf2, f64, f32, @bitCast(u32, a));
|
||||
return @call(.{ .modifier = .always_inline }, extendXfYf2, .{ f64, f32, @bitCast(u32, a) });
|
||||
}
|
||||
|
||||
pub extern fn __extenddftf2(a: f64) f128 {
|
||||
return @inlineCall(extendXfYf2, f128, f64, @bitCast(u64, a));
|
||||
return @call(.{ .modifier = .always_inline }, extendXfYf2, .{ f128, f64, @bitCast(u64, a) });
|
||||
}
|
||||
|
||||
pub extern fn __extendsftf2(a: f32) f128 {
|
||||
return @inlineCall(extendXfYf2, f128, f32, @bitCast(u32, a));
|
||||
return @call(.{ .modifier = .always_inline }, extendXfYf2, .{ f128, f32, @bitCast(u32, a) });
|
||||
}
|
||||
|
||||
pub extern fn __extendhfsf2(a: u16) f32 {
|
||||
return @inlineCall(extendXfYf2, f32, f16, a);
|
||||
return @call(.{ .modifier = .always_inline }, extendXfYf2, .{ f32, f16, a });
|
||||
}
|
||||
|
||||
const CHAR_BIT = 8;
|
||||
|
@ -55,17 +55,17 @@ fn floatsiXf(comptime T: type, a: i32) T {
|
||||
|
||||
pub extern fn __floatsisf(arg: i32) f32 {
|
||||
@setRuntimeSafety(builtin.is_test);
|
||||
return @inlineCall(floatsiXf, f32, arg);
|
||||
return @call(.{ .modifier = .always_inline }, floatsiXf, .{ f32, arg });
|
||||
}
|
||||
|
||||
pub extern fn __floatsidf(arg: i32) f64 {
|
||||
@setRuntimeSafety(builtin.is_test);
|
||||
return @inlineCall(floatsiXf, f64, arg);
|
||||
return @call(.{ .modifier = .always_inline }, floatsiXf, .{ f64, arg });
|
||||
}
|
||||
|
||||
pub extern fn __floatsitf(arg: i32) f128 {
|
||||
@setRuntimeSafety(builtin.is_test);
|
||||
return @inlineCall(floatsiXf, f128, arg);
|
||||
return @call(.{ .modifier = .always_inline }, floatsiXf, .{ f128, arg });
|
||||
}
|
||||
|
||||
fn test_one_floatsitf(a: i32, expected: u128) void {
|
||||
|
@ -22,7 +22,10 @@ pub extern fn __modti3(a: i128, b: i128) i128 {
|
||||
|
||||
const v128 = @Vector(2, u64);
|
||||
pub extern fn __modti3_windows_x86_64(a: v128, b: v128) v128 {
|
||||
return @bitCast(v128, @inlineCall(__modti3, @bitCast(i128, a), @bitCast(i128, b)));
|
||||
return @bitCast(v128, @call(.{ .modifier = .always_inline }, __modti3, .{
|
||||
@bitCast(i128, a),
|
||||
@bitCast(i128, b),
|
||||
}));
|
||||
}
|
||||
|
||||
test "import modti3" {
|
||||
|
@ -16,7 +16,10 @@ pub extern fn __multi3(a: i128, b: i128) i128 {
|
||||
|
||||
const v128 = @Vector(2, u64);
|
||||
pub extern fn __multi3_windows_x86_64(a: v128, b: v128) v128 {
|
||||
return @bitCast(v128, @inlineCall(__multi3, @bitCast(i128, a), @bitCast(i128, b)));
|
||||
return @bitCast(v128, @call(.{ .modifier = .always_inline }, __multi3, .{
|
||||
@bitCast(i128, a),
|
||||
@bitCast(i128, b),
|
||||
}));
|
||||
}
|
||||
|
||||
fn __mulddi3(a: u64, b: u64) i128 {
|
||||
|
@ -182,25 +182,25 @@ fn win_probe_stack_adjust_sp() void {
|
||||
|
||||
pub nakedcc fn _chkstk() void {
|
||||
@setRuntimeSafety(false);
|
||||
@inlineCall(win_probe_stack_adjust_sp);
|
||||
@call(.{ .modifier = .always_inline }, win_probe_stack_adjust_sp, .{});
|
||||
}
|
||||
pub nakedcc fn __chkstk() void {
|
||||
@setRuntimeSafety(false);
|
||||
switch (builtin.arch) {
|
||||
.i386 => @inlineCall(win_probe_stack_adjust_sp),
|
||||
.x86_64 => @inlineCall(win_probe_stack_only),
|
||||
.i386 => @call(.{ .modifier = .always_inline }, win_probe_stack_adjust_sp, .{}),
|
||||
.x86_64 => @call(.{ .modifier = .always_inline }, win_probe_stack_only, .{}),
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
pub nakedcc fn ___chkstk() void {
|
||||
@setRuntimeSafety(false);
|
||||
@inlineCall(win_probe_stack_adjust_sp);
|
||||
@call(.{ .modifier = .always_inline }, win_probe_stack_adjust_sp, .{});
|
||||
}
|
||||
pub nakedcc fn __chkstk_ms() void {
|
||||
@setRuntimeSafety(false);
|
||||
@inlineCall(win_probe_stack_only);
|
||||
@call(.{ .modifier = .always_inline }, win_probe_stack_only, .{});
|
||||
}
|
||||
pub nakedcc fn ___chkstk_ms() void {
|
||||
@setRuntimeSafety(false);
|
||||
@inlineCall(win_probe_stack_only);
|
||||
@call(.{ .modifier = .always_inline }, win_probe_stack_only, .{});
|
||||
}
|
||||
|
@ -11,5 +11,8 @@ pub extern fn __umodti3(a: u128, b: u128) u128 {
|
||||
|
||||
const v128 = @Vector(2, u64);
|
||||
pub extern fn __umodti3_windows_x86_64(a: v128, b: v128) v128 {
|
||||
return @bitCast(v128, @inlineCall(__umodti3, @bitCast(u128, a), @bitCast(u128, b)));
|
||||
return @bitCast(v128, @call(.{ .modifier = .always_inline }, __umodti3, .{
|
||||
@bitCast(u128, a),
|
||||
@bitCast(u128, b),
|
||||
}));
|
||||
}
|
||||
|
@ -59,7 +59,7 @@ stdcallcc fn _DllMainCRTStartup(
|
||||
extern fn wasm_freestanding_start() void {
|
||||
// This is marked inline because for some reason LLVM in release mode fails to inline it,
|
||||
// and we want fewer call frames in stack traces.
|
||||
_ = @inlineCall(callMain);
|
||||
_ = @call(.{ .modifier = .always_inline }, callMain, .{});
|
||||
}
|
||||
|
||||
extern fn EfiMain(handle: uefi.Handle, system_table: *uefi.tables.SystemTable) usize {
|
||||
@ -89,7 +89,7 @@ nakedcc fn _start() noreturn {
|
||||
if (builtin.os == builtin.Os.wasi) {
|
||||
// This is marked inline because for some reason LLVM in release mode fails to inline it,
|
||||
// and we want fewer call frames in stack traces.
|
||||
std.os.wasi.proc_exit(@inlineCall(callMain));
|
||||
std.os.wasi.proc_exit(@call(.{ .modifier = .always_inline }, callMain, .{}));
|
||||
}
|
||||
|
||||
switch (builtin.arch) {
|
||||
@ -187,7 +187,7 @@ fn posixCallMainAndExit() noreturn {
|
||||
//std.os.exit(@newStackCall(new_stack, callMainWithArgs, argc, argv, envp));
|
||||
}
|
||||
|
||||
std.os.exit(@inlineCall(callMainWithArgs, argc, argv, envp));
|
||||
std.os.exit(@call(.{ .modifier = .always_inline }, callMainWithArgs, .{ argc, argv, envp }));
|
||||
}
|
||||
|
||||
fn callMainWithArgs(argc: usize, argv: [*][*:0]u8, envp: [][*:0]u8) u8 {
|
||||
@ -203,7 +203,7 @@ extern fn main(c_argc: i32, c_argv: [*][*:0]u8, c_envp: [*:null]?[*:0]u8) i32 {
|
||||
var env_count: usize = 0;
|
||||
while (c_envp[env_count] != null) : (env_count += 1) {}
|
||||
const envp = @ptrCast([*][*:0]u8, c_envp)[0..env_count];
|
||||
return @inlineCall(callMainWithArgs, @intCast(usize, c_argc), c_argv, envp);
|
||||
return @call(.{ .modifier = .always_inline }, callMainWithArgs, .{ @intCast(usize, c_argc), c_argv, envp });
|
||||
}
|
||||
|
||||
// General error message for a malformed return type
|
||||
@ -233,7 +233,7 @@ inline fn initEventLoopAndCallMain() u8 {
|
||||
|
||||
// This is marked inline because for some reason LLVM in release mode fails to inline it,
|
||||
// and we want fewer call frames in stack traces.
|
||||
return @inlineCall(callMain);
|
||||
return @call(.{ .modifier = .always_inline }, callMain, .{});
|
||||
}
|
||||
|
||||
async fn callMainAsync(loop: *std.event.Loop) u8 {
|
||||
|
@ -1700,7 +1700,6 @@ enum BuiltinFnId {
|
||||
BuiltinFnIdFieldParentPtr,
|
||||
BuiltinFnIdByteOffsetOf,
|
||||
BuiltinFnIdBitOffsetOf,
|
||||
BuiltinFnIdInlineCall,
|
||||
BuiltinFnIdNewStackCall,
|
||||
BuiltinFnIdAsyncCall,
|
||||
BuiltinFnIdTypeId,
|
||||
@ -2487,6 +2486,7 @@ enum IrInstructionId {
|
||||
IrInstructionIdVarPtr,
|
||||
IrInstructionIdReturnPtr,
|
||||
IrInstructionIdCallSrc,
|
||||
IrInstructionIdCallSrcArgs,
|
||||
IrInstructionIdCallExtra,
|
||||
IrInstructionIdCallGen,
|
||||
IrInstructionIdConst,
|
||||
@ -2904,8 +2904,21 @@ struct IrInstructionCallSrc {
|
||||
bool is_async_call_builtin;
|
||||
};
|
||||
|
||||
/// This is a pass1 instruction, used by @call.
|
||||
/// `args` is expected to be either a struct or a tuple.
|
||||
// This is a pass1 instruction, used by @call when the args node is
|
||||
// a tuple or struct literal.
|
||||
struct IrInstructionCallSrcArgs {
|
||||
IrInstruction base;
|
||||
|
||||
IrInstruction *options;
|
||||
IrInstruction *fn_ref;
|
||||
IrInstruction **args_ptr;
|
||||
size_t args_len;
|
||||
ResultLoc *result_loc;
|
||||
};
|
||||
|
||||
// This is a pass1 instruction, used by @call, when the args node
|
||||
// is not a literal.
|
||||
// `args` is expected to be either a struct or a tuple.
|
||||
struct IrInstructionCallExtra {
|
||||
IrInstruction base;
|
||||
|
||||
|
@ -594,8 +594,11 @@ ZigType *get_pointer_to_type_extra2(CodeGen *g, ZigType *child_type, bool is_con
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
if (type_is_resolved(child_type, ResolveStatusZeroBitsKnown)) {
|
||||
if (inferred_struct_field != nullptr) {
|
||||
entry->abi_size = g->builtin_types.entry_usize->abi_size;
|
||||
entry->size_in_bits = g->builtin_types.entry_usize->size_in_bits;
|
||||
entry->abi_align = g->builtin_types.entry_usize->abi_align;
|
||||
} else if (type_is_resolved(child_type, ResolveStatusZeroBitsKnown)) {
|
||||
if (type_has_bits(child_type)) {
|
||||
entry->abi_size = g->builtin_types.entry_usize->abi_size;
|
||||
entry->size_in_bits = g->builtin_types.entry_usize->size_in_bits;
|
||||
|
@ -3062,7 +3062,7 @@ static LLVMValueRef ir_render_cast(CodeGen *g, IrExecutable *executable,
|
||||
ZigType *actual_type = cast_instruction->value->value->type;
|
||||
ZigType *wanted_type = cast_instruction->base.value->type;
|
||||
LLVMValueRef expr_val = ir_llvm_value(g, cast_instruction->value);
|
||||
assert(expr_val);
|
||||
ir_assert(expr_val, &cast_instruction->base);
|
||||
|
||||
switch (cast_instruction->cast_op) {
|
||||
case CastOpNoCast:
|
||||
@ -4330,8 +4330,17 @@ static LLVMValueRef ir_render_struct_field_ptr(CodeGen *g, IrExecutable *executa
|
||||
return struct_ptr;
|
||||
}
|
||||
|
||||
ZigType *struct_type = (struct_ptr_type->id == ZigTypeIdPointer) ?
|
||||
struct_ptr_type->data.pointer.child_type : struct_ptr_type;
|
||||
ZigType *struct_type;
|
||||
if (struct_ptr_type->id == ZigTypeIdPointer) {
|
||||
if (struct_ptr_type->data.pointer.inferred_struct_field != nullptr) {
|
||||
struct_type = struct_ptr_type->data.pointer.inferred_struct_field->inferred_struct_type;
|
||||
} else {
|
||||
struct_type = struct_ptr_type->data.pointer.child_type;
|
||||
}
|
||||
} else {
|
||||
struct_type = struct_ptr_type;
|
||||
}
|
||||
|
||||
if ((err = type_resolve(g, struct_type, ResolveStatusLLVMFull)))
|
||||
codegen_report_errors_and_exit(g);
|
||||
|
||||
@ -6152,6 +6161,7 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
|
||||
case IrInstructionIdUndeclaredIdent:
|
||||
case IrInstructionIdCallExtra:
|
||||
case IrInstructionIdCallSrc:
|
||||
case IrInstructionIdCallSrcArgs:
|
||||
case IrInstructionIdAllocaSrc:
|
||||
case IrInstructionIdEndExpr:
|
||||
case IrInstructionIdImplicitCast:
|
||||
@ -8132,7 +8142,6 @@ static void define_builtin_fns(CodeGen *g) {
|
||||
create_builtin_fn(g, BuiltinFnIdNearbyInt, "nearbyInt", 2);
|
||||
create_builtin_fn(g, BuiltinFnIdRound, "round", 2);
|
||||
create_builtin_fn(g, BuiltinFnIdMulAdd, "mulAdd", 4);
|
||||
create_builtin_fn(g, BuiltinFnIdInlineCall, "inlineCall", SIZE_MAX);
|
||||
create_builtin_fn(g, BuiltinFnIdNewStackCall, "newStackCall", SIZE_MAX);
|
||||
create_builtin_fn(g, BuiltinFnIdAsyncCall, "asyncCall", SIZE_MAX);
|
||||
create_builtin_fn(g, BuiltinFnIdTypeId, "typeId", 1);
|
||||
|
354
src/ir.cpp
354
src/ir.cpp
@ -290,6 +290,8 @@ static void destroy_instruction(IrInstruction *inst) {
|
||||
return destroy(reinterpret_cast<IrInstructionCast *>(inst), name);
|
||||
case IrInstructionIdCallSrc:
|
||||
return destroy(reinterpret_cast<IrInstructionCallSrc *>(inst), name);
|
||||
case IrInstructionIdCallSrcArgs:
|
||||
return destroy(reinterpret_cast<IrInstructionCallSrcArgs *>(inst), name);
|
||||
case IrInstructionIdCallExtra:
|
||||
return destroy(reinterpret_cast<IrInstructionCallExtra *>(inst), name);
|
||||
case IrInstructionIdCallGen:
|
||||
@ -649,6 +651,15 @@ static ZigValue *const_ptr_pointee_unchecked(CodeGen *g, ZigValue *const_val) {
|
||||
assert(const_val->special == ConstValSpecialStatic);
|
||||
ZigValue *result;
|
||||
|
||||
InferredStructField *isf = const_val->type->data.pointer.inferred_struct_field;
|
||||
if (isf != nullptr) {
|
||||
TypeStructField *field = find_struct_type_field(isf->inferred_struct_type, isf->field_name);
|
||||
assert(field != nullptr);
|
||||
assert(const_val->data.x_ptr.special == ConstPtrSpecialRef);
|
||||
ZigValue *struct_val = const_val->data.x_ptr.data.ref.pointee;
|
||||
return struct_val->data.x_struct.fields[field->src_index];
|
||||
}
|
||||
|
||||
switch (type_has_one_possible_value(g, const_val->type->data.pointer.child_type)) {
|
||||
case OnePossibleValueInvalid:
|
||||
zig_unreachable();
|
||||
@ -978,6 +989,10 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionCallSrc *) {
|
||||
return IrInstructionIdCallSrc;
|
||||
}
|
||||
|
||||
static constexpr IrInstructionId ir_instruction_id(IrInstructionCallSrcArgs *) {
|
||||
return IrInstructionIdCallSrcArgs;
|
||||
}
|
||||
|
||||
static constexpr IrInstructionId ir_instruction_id(IrInstructionCallExtra *) {
|
||||
return IrInstructionIdCallExtra;
|
||||
}
|
||||
@ -1921,6 +1936,25 @@ static IrInstruction *ir_build_call_extra(IrBuilder *irb, Scope *scope, AstNode
|
||||
return &call_instruction->base;
|
||||
}
|
||||
|
||||
static IrInstruction *ir_build_call_src_args(IrBuilder *irb, Scope *scope, AstNode *source_node,
|
||||
IrInstruction *options, IrInstruction *fn_ref, IrInstruction **args_ptr, size_t args_len,
|
||||
ResultLoc *result_loc)
|
||||
{
|
||||
IrInstructionCallSrcArgs *call_instruction = ir_build_instruction<IrInstructionCallSrcArgs>(irb, scope, source_node);
|
||||
call_instruction->options = options;
|
||||
call_instruction->fn_ref = fn_ref;
|
||||
call_instruction->args_ptr = args_ptr;
|
||||
call_instruction->args_len = args_len;
|
||||
call_instruction->result_loc = result_loc;
|
||||
|
||||
ir_ref_instruction(options, irb->current_basic_block);
|
||||
ir_ref_instruction(fn_ref, irb->current_basic_block);
|
||||
for (size_t i = 0; i < args_len; i += 1)
|
||||
ir_ref_instruction(args_ptr[i], irb->current_basic_block);
|
||||
|
||||
return &call_instruction->base;
|
||||
}
|
||||
|
||||
static IrInstruction *ir_build_call_src(IrBuilder *irb, Scope *scope, AstNode *source_node,
|
||||
ZigFn *fn_entry, IrInstruction *fn_ref, size_t arg_count, IrInstruction **args,
|
||||
IrInstruction *ret_ptr, CallModifier modifier, bool is_async_call_builtin,
|
||||
@ -5095,6 +5129,43 @@ static IrInstruction *ir_gen_async_call(IrBuilder *irb, Scope *scope, AstNode *a
|
||||
return ir_lval_wrap(irb, scope, call, lval, result_loc);
|
||||
}
|
||||
|
||||
static IrInstruction *ir_gen_fn_call_with_args(IrBuilder *irb, Scope *scope, AstNode *source_node,
|
||||
AstNode *fn_ref_node, CallModifier modifier, IrInstruction *options,
|
||||
AstNode **args_ptr, size_t args_len, LVal lval, ResultLoc *result_loc)
|
||||
{
|
||||
IrInstruction *fn_ref = ir_gen_node(irb, fn_ref_node, scope);
|
||||
if (fn_ref == irb->codegen->invalid_instruction)
|
||||
return fn_ref;
|
||||
|
||||
IrInstruction *fn_type = ir_build_typeof(irb, scope, source_node, fn_ref);
|
||||
|
||||
IrInstruction **args = allocate<IrInstruction*>(args_len);
|
||||
for (size_t i = 0; i < args_len; i += 1) {
|
||||
AstNode *arg_node = args_ptr[i];
|
||||
|
||||
IrInstruction *arg_index = ir_build_const_usize(irb, scope, arg_node, i);
|
||||
IrInstruction *arg_type = ir_build_arg_type(irb, scope, source_node, fn_type, arg_index, true);
|
||||
ResultLoc *no_result = no_result_loc();
|
||||
ir_build_reset_result(irb, scope, source_node, no_result);
|
||||
ResultLocCast *result_loc_cast = ir_build_cast_result_loc(irb, arg_type, no_result);
|
||||
|
||||
IrInstruction *arg = ir_gen_node_extra(irb, arg_node, scope, LValNone, &result_loc_cast->base);
|
||||
if (arg == irb->codegen->invalid_instruction)
|
||||
return arg;
|
||||
|
||||
args[i] = ir_build_implicit_cast(irb, scope, arg_node, arg, result_loc_cast);
|
||||
}
|
||||
|
||||
IrInstruction *fn_call;
|
||||
if (options != nullptr) {
|
||||
fn_call = ir_build_call_src_args(irb, scope, source_node, options, fn_ref, args, args_len, result_loc);
|
||||
} else {
|
||||
fn_call = ir_build_call_src(irb, scope, source_node, nullptr, fn_ref, args_len, args, nullptr,
|
||||
modifier, false, nullptr, result_loc);
|
||||
}
|
||||
return ir_lval_wrap(irb, scope, fn_call, lval, result_loc);
|
||||
}
|
||||
|
||||
static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval,
|
||||
ResultLoc *result_loc)
|
||||
{
|
||||
@ -6013,32 +6084,6 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
|
||||
IrInstruction *offset_of = ir_build_bit_offset_of(irb, scope, node, arg0_value, arg1_value);
|
||||
return ir_lval_wrap(irb, scope, offset_of, lval, result_loc);
|
||||
}
|
||||
case BuiltinFnIdInlineCall:
|
||||
{
|
||||
if (node->data.fn_call_expr.params.length == 0) {
|
||||
add_node_error(irb->codegen, node, buf_sprintf("expected at least 1 argument, found 0"));
|
||||
return irb->codegen->invalid_instruction;
|
||||
}
|
||||
|
||||
AstNode *fn_ref_node = node->data.fn_call_expr.params.at(0);
|
||||
IrInstruction *fn_ref = ir_gen_node(irb, fn_ref_node, scope);
|
||||
if (fn_ref == irb->codegen->invalid_instruction)
|
||||
return fn_ref;
|
||||
|
||||
size_t arg_count = node->data.fn_call_expr.params.length - 1;
|
||||
|
||||
IrInstruction **args = allocate<IrInstruction*>(arg_count);
|
||||
for (size_t i = 0; i < arg_count; i += 1) {
|
||||
AstNode *arg_node = node->data.fn_call_expr.params.at(i + 1);
|
||||
args[i] = ir_gen_node(irb, arg_node, scope);
|
||||
if (args[i] == irb->codegen->invalid_instruction)
|
||||
return args[i];
|
||||
}
|
||||
|
||||
IrInstruction *call = ir_build_call_src(irb, scope, node, nullptr, fn_ref, arg_count, args,
|
||||
nullptr, CallModifierAlwaysInline, false, nullptr, result_loc);
|
||||
return ir_lval_wrap(irb, scope, call, lval, result_loc);
|
||||
}
|
||||
case BuiltinFnIdNewStackCall:
|
||||
{
|
||||
if (node->data.fn_call_expr.params.length < 2) {
|
||||
@ -6086,17 +6131,33 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
|
||||
IrInstruction *options = ir_build_implicit_cast(irb, scope, options_node, options_inner, result_loc_cast);
|
||||
|
||||
AstNode *fn_ref_node = node->data.fn_call_expr.params.at(1);
|
||||
IrInstruction *fn_ref = ir_gen_node(irb, fn_ref_node, scope);
|
||||
if (fn_ref == irb->codegen->invalid_instruction)
|
||||
return fn_ref;
|
||||
|
||||
AstNode *args_node = node->data.fn_call_expr.params.at(2);
|
||||
IrInstruction *args = ir_gen_node(irb, args_node, scope);
|
||||
if (args == irb->codegen->invalid_instruction)
|
||||
return args;
|
||||
if (args_node->type == NodeTypeContainerInitExpr) {
|
||||
if (args_node->data.container_init_expr.kind == ContainerInitKindArray ||
|
||||
args_node->data.container_init_expr.entries.length == 0)
|
||||
{
|
||||
return ir_gen_fn_call_with_args(irb, scope, node,
|
||||
fn_ref_node, CallModifierNone, options,
|
||||
args_node->data.container_init_expr.entries.items,
|
||||
args_node->data.container_init_expr.entries.length,
|
||||
lval, result_loc);
|
||||
} else {
|
||||
exec_add_error_node(irb->codegen, irb->exec, args_node,
|
||||
buf_sprintf("TODO: @call with anon struct literal"));
|
||||
return irb->codegen->invalid_instruction;
|
||||
}
|
||||
} else {
|
||||
IrInstruction *fn_ref = ir_gen_node(irb, fn_ref_node, scope);
|
||||
if (fn_ref == irb->codegen->invalid_instruction)
|
||||
return fn_ref;
|
||||
|
||||
IrInstruction *call = ir_build_call_extra(irb, scope, node, options, fn_ref, args, result_loc);
|
||||
return ir_lval_wrap(irb, scope, call, lval, result_loc);
|
||||
IrInstruction *args = ir_gen_node(irb, args_node, scope);
|
||||
if (args == irb->codegen->invalid_instruction)
|
||||
return args;
|
||||
|
||||
IrInstruction *call = ir_build_call_extra(irb, scope, node, options, fn_ref, args, result_loc);
|
||||
return ir_lval_wrap(irb, scope, call, lval, result_loc);
|
||||
}
|
||||
}
|
||||
case BuiltinFnIdAsyncCall:
|
||||
return ir_gen_async_call(irb, scope, nullptr, node, lval, result_loc);
|
||||
@ -6415,33 +6476,8 @@ static IrInstruction *ir_gen_fn_call(IrBuilder *irb, Scope *scope, AstNode *node
|
||||
return ir_gen_builtin_fn_call(irb, scope, node, lval, result_loc);
|
||||
|
||||
AstNode *fn_ref_node = node->data.fn_call_expr.fn_ref_expr;
|
||||
IrInstruction *fn_ref = ir_gen_node(irb, fn_ref_node, scope);
|
||||
if (fn_ref == irb->codegen->invalid_instruction)
|
||||
return fn_ref;
|
||||
|
||||
IrInstruction *fn_type = ir_build_typeof(irb, scope, node, fn_ref);
|
||||
|
||||
size_t arg_count = node->data.fn_call_expr.params.length;
|
||||
IrInstruction **args = allocate<IrInstruction*>(arg_count);
|
||||
for (size_t i = 0; i < arg_count; i += 1) {
|
||||
AstNode *arg_node = node->data.fn_call_expr.params.at(i);
|
||||
|
||||
IrInstruction *arg_index = ir_build_const_usize(irb, scope, arg_node, i);
|
||||
IrInstruction *arg_type = ir_build_arg_type(irb, scope, node, fn_type, arg_index, true);
|
||||
ResultLoc *no_result = no_result_loc();
|
||||
ir_build_reset_result(irb, scope, node, no_result);
|
||||
ResultLocCast *result_loc_cast = ir_build_cast_result_loc(irb, arg_type, no_result);
|
||||
|
||||
IrInstruction *arg = ir_gen_node_extra(irb, arg_node, scope, LValNone, &result_loc_cast->base);
|
||||
if (arg == irb->codegen->invalid_instruction)
|
||||
return arg;
|
||||
|
||||
args[i] = ir_build_implicit_cast(irb, scope, arg_node, arg, result_loc_cast);
|
||||
}
|
||||
|
||||
IrInstruction *fn_call = ir_build_call_src(irb, scope, node, nullptr, fn_ref, arg_count, args, nullptr,
|
||||
node->data.fn_call_expr.modifier, false, nullptr, result_loc);
|
||||
return ir_lval_wrap(irb, scope, fn_call, lval, result_loc);
|
||||
return ir_gen_fn_call_with_args(irb, scope, node, fn_ref_node, node->data.fn_call_expr.modifier,
|
||||
nullptr, node->data.fn_call_expr.params.items, node->data.fn_call_expr.params.length, lval, result_loc);
|
||||
}
|
||||
|
||||
static IrInstruction *ir_gen_if_bool_expr(IrBuilder *irb, Scope *scope, AstNode *node, LVal lval,
|
||||
@ -13955,6 +13991,20 @@ static IrInstruction *ir_implicit_cast(IrAnalyze *ira, IrInstruction *value, Zig
|
||||
return ir_implicit_cast2(ira, value, value, expected_type);
|
||||
}
|
||||
|
||||
static ZigType *get_ptr_elem_type(CodeGen *g, IrInstruction *ptr) {
|
||||
ir_assert(ptr->value->type->id == ZigTypeIdPointer, ptr);
|
||||
ZigType *elem_type = ptr->value->type->data.pointer.child_type;
|
||||
if (elem_type != g->builtin_types.entry_var)
|
||||
return elem_type;
|
||||
|
||||
if (ir_resolve_lazy(g, ptr->source_node, ptr->value))
|
||||
return g->builtin_types.entry_invalid;
|
||||
|
||||
assert(value_is_comptime(ptr->value));
|
||||
ZigValue *pointee = const_ptr_pointee_unchecked(g, ptr->value);
|
||||
return pointee->type;
|
||||
}
|
||||
|
||||
static IrInstruction *ir_get_deref(IrAnalyze *ira, IrInstruction *source_instruction, IrInstruction *ptr,
|
||||
ResultLoc *result_loc)
|
||||
{
|
||||
@ -13971,6 +14021,8 @@ static IrInstruction *ir_get_deref(IrAnalyze *ira, IrInstruction *source_instruc
|
||||
}
|
||||
|
||||
ZigType *child_type = ptr_type->data.pointer.child_type;
|
||||
if (type_is_invalid(child_type))
|
||||
return ira->codegen->invalid_instruction;
|
||||
// if the child type has one possible value, the deref is comptime
|
||||
switch (type_has_one_possible_value(ira->codegen, child_type)) {
|
||||
case OnePossibleValueInvalid:
|
||||
@ -17326,9 +17378,7 @@ static IrInstruction *ir_analyze_store_ptr(IrAnalyze *ira, IrInstruction *source
|
||||
copy_const_val(casted_ptr->value, ptr->value);
|
||||
casted_ptr->value->type = struct_ptr_type;
|
||||
} else {
|
||||
casted_ptr = ir_build_cast(&ira->new_irb, source_instr->scope,
|
||||
source_instr->source_node, struct_ptr_type, ptr, CastOpNoop);
|
||||
casted_ptr->value->type = struct_ptr_type;
|
||||
casted_ptr = ptr;
|
||||
}
|
||||
if (instr_is_comptime(casted_ptr)) {
|
||||
ZigValue *ptr_val = ir_resolve_const(ira, casted_ptr, UndefBad);
|
||||
@ -17409,6 +17459,12 @@ static IrInstruction *ir_analyze_store_ptr(IrAnalyze *ira, IrInstruction *source
|
||||
}
|
||||
}
|
||||
|
||||
if (ptr->value->type->data.pointer.inferred_struct_field != nullptr &&
|
||||
child_type == ira->codegen->builtin_types.entry_var)
|
||||
{
|
||||
child_type = ptr->value->type->data.pointer.inferred_struct_field->inferred_struct_type;
|
||||
}
|
||||
|
||||
switch (type_requires_comptime(ira->codegen, child_type)) {
|
||||
case ReqCompTimeInvalid:
|
||||
return ira->codegen->invalid_instruction;
|
||||
@ -18105,17 +18161,81 @@ static IrInstruction *ir_analyze_fn_call_src(IrAnalyze *ira, IrInstructionCallSr
|
||||
return result;
|
||||
}
|
||||
|
||||
static IrInstruction *ir_analyze_instruction_call_extra(IrAnalyze *ira, IrInstructionCallExtra *instruction) {
|
||||
IrInstruction *options = instruction->options->child;
|
||||
static IrInstruction *ir_analyze_call_extra(IrAnalyze *ira, IrInstruction *source_instr,
|
||||
IrInstruction *pass1_options, IrInstruction *pass1_fn_ref, IrInstruction **args_ptr, size_t args_len,
|
||||
ResultLoc *result_loc)
|
||||
{
|
||||
IrInstruction *options = pass1_options->child;
|
||||
if (type_is_invalid(options->value->type))
|
||||
return ira->codegen->invalid_instruction;
|
||||
|
||||
IrInstruction *fn_ref = instruction->fn_ref->child;
|
||||
IrInstruction *fn_ref = pass1_fn_ref->child;
|
||||
if (type_is_invalid(fn_ref->value->type))
|
||||
return ira->codegen->invalid_instruction;
|
||||
ZigFn *fn = ir_resolve_fn(ira, fn_ref);
|
||||
IrInstruction *first_arg_ptr = nullptr;
|
||||
ZigFn *fn = nullptr;
|
||||
if (fn_ref->value->type->id == ZigTypeIdBoundFn) {
|
||||
assert(fn_ref->value->special == ConstValSpecialStatic);
|
||||
fn = fn_ref->value->data.x_bound_fn.fn;
|
||||
first_arg_ptr = fn_ref->value->data.x_bound_fn.first_arg;
|
||||
if (type_is_invalid(first_arg_ptr->value->type))
|
||||
return ira->codegen->invalid_instruction;
|
||||
} else {
|
||||
fn = ir_resolve_fn(ira, fn_ref);
|
||||
}
|
||||
ZigType *fn_type = (fn != nullptr) ? fn->type_entry : fn_ref->value->type;
|
||||
|
||||
TypeStructField *modifier_field = find_struct_type_field(options->value->type, buf_create_from_str("modifier"));
|
||||
ir_assert(modifier_field != nullptr, source_instr);
|
||||
IrInstruction *modifier_inst = ir_analyze_struct_value_field_value(ira, source_instr, options, modifier_field);
|
||||
ZigValue *modifier_val = ir_resolve_const(ira, modifier_inst, UndefBad);
|
||||
if (modifier_val == nullptr)
|
||||
return ira->codegen->invalid_instruction;
|
||||
CallModifier modifier = (CallModifier)bigint_as_u32(&modifier_val->data.x_enum_tag);
|
||||
if (modifier == CallModifierAsync) {
|
||||
ir_add_error(ira, source_instr, buf_sprintf("TODO: @call with async modifier"));
|
||||
return ira->codegen->invalid_instruction;
|
||||
}
|
||||
if (ir_should_inline(ira->new_irb.exec, source_instr->scope)) {
|
||||
switch (modifier) {
|
||||
case CallModifierBuiltin:
|
||||
zig_unreachable();
|
||||
case CallModifierAsync:
|
||||
ir_add_error(ira, source_instr, buf_sprintf("TODO: comptime @call with async modifier"));
|
||||
return ira->codegen->invalid_instruction;
|
||||
case CallModifierCompileTime:
|
||||
case CallModifierNone:
|
||||
case CallModifierAlwaysInline:
|
||||
case CallModifierAlwaysTail:
|
||||
case CallModifierNoAsync:
|
||||
modifier = CallModifierCompileTime;
|
||||
break;
|
||||
case CallModifierNeverInline:
|
||||
ir_add_error(ira, source_instr,
|
||||
buf_sprintf("unable to perform 'never_inline' call at compile-time"));
|
||||
return ira->codegen->invalid_instruction;
|
||||
case CallModifierNeverTail:
|
||||
ir_add_error(ira, source_instr,
|
||||
buf_sprintf("unable to perform 'never_tail' call at compile-time"));
|
||||
return ira->codegen->invalid_instruction;
|
||||
}
|
||||
}
|
||||
|
||||
TypeStructField *stack_field = find_struct_type_field(options->value->type, buf_create_from_str("stack"));
|
||||
ir_assert(stack_field != nullptr, source_instr);
|
||||
IrInstruction *stack = ir_analyze_struct_value_field_value(ira, source_instr, options, stack_field);
|
||||
IrInstruction *stack_is_non_null_inst = ir_analyze_test_non_null(ira, source_instr, stack);
|
||||
bool stack_is_non_null;
|
||||
if (!ir_resolve_bool(ira, stack_is_non_null_inst, &stack_is_non_null))
|
||||
return ira->codegen->invalid_instruction;
|
||||
if (!stack_is_non_null)
|
||||
stack = nullptr;
|
||||
|
||||
return ir_analyze_fn_call(ira, source_instr, fn, fn_type, fn_ref, first_arg_ptr,
|
||||
modifier, stack, false, args_ptr, args_len, nullptr, result_loc);
|
||||
}
|
||||
|
||||
static IrInstruction *ir_analyze_instruction_call_extra(IrAnalyze *ira, IrInstructionCallExtra *instruction) {
|
||||
IrInstruction *args = instruction->args->child;
|
||||
ZigType *args_type = args->value->type;
|
||||
if (type_is_invalid(args_type))
|
||||
@ -18143,59 +18263,26 @@ static IrInstruction *ir_analyze_instruction_call_extra(IrAnalyze *ira, IrInstru
|
||||
ir_add_error(ira, args, buf_sprintf("TODO: struct args"));
|
||||
return ira->codegen->invalid_instruction;
|
||||
}
|
||||
|
||||
TypeStructField *modifier_field = find_struct_type_field(options->value->type, buf_create_from_str("modifier"));
|
||||
ir_assert(modifier_field != nullptr, &instruction->base);
|
||||
IrInstruction *modifier_inst = ir_analyze_struct_value_field_value(ira, &instruction->base, options, modifier_field);
|
||||
ZigValue *modifier_val = ir_resolve_const(ira, modifier_inst, UndefBad);
|
||||
if (modifier_val == nullptr)
|
||||
return ira->codegen->invalid_instruction;
|
||||
CallModifier modifier = (CallModifier)bigint_as_u32(&modifier_val->data.x_enum_tag);
|
||||
if (modifier == CallModifierAsync) {
|
||||
ir_add_error(ira, args, buf_sprintf("TODO: @call with async modifier"));
|
||||
return ira->codegen->invalid_instruction;
|
||||
}
|
||||
if (ir_should_inline(ira->new_irb.exec, instruction->base.scope)) {
|
||||
switch (modifier) {
|
||||
case CallModifierBuiltin:
|
||||
zig_unreachable();
|
||||
case CallModifierAsync:
|
||||
ir_add_error(ira, args, buf_sprintf("TODO: comptime @call with async modifier"));
|
||||
return ira->codegen->invalid_instruction;
|
||||
case CallModifierCompileTime:
|
||||
case CallModifierNone:
|
||||
case CallModifierAlwaysInline:
|
||||
case CallModifierAlwaysTail:
|
||||
case CallModifierNoAsync:
|
||||
modifier = CallModifierCompileTime;
|
||||
break;
|
||||
case CallModifierNeverInline:
|
||||
ir_add_error(ira, args,
|
||||
buf_sprintf("unable to perform 'never_inline' call at compile-time"));
|
||||
return ira->codegen->invalid_instruction;
|
||||
case CallModifierNeverTail:
|
||||
ir_add_error(ira, args,
|
||||
buf_sprintf("unable to perform 'never_tail' call at compile-time"));
|
||||
return ira->codegen->invalid_instruction;
|
||||
}
|
||||
}
|
||||
|
||||
TypeStructField *stack_field = find_struct_type_field(options->value->type, buf_create_from_str("stack"));
|
||||
ir_assert(stack_field != nullptr, &instruction->base);
|
||||
IrInstruction *stack = ir_analyze_struct_value_field_value(ira, &instruction->base, options, stack_field);
|
||||
IrInstruction *stack_is_non_null_inst = ir_analyze_test_non_null(ira, &instruction->base, stack);
|
||||
bool stack_is_non_null;
|
||||
if (!ir_resolve_bool(ira, stack_is_non_null_inst, &stack_is_non_null))
|
||||
return ira->codegen->invalid_instruction;
|
||||
if (!stack_is_non_null)
|
||||
stack = nullptr;
|
||||
|
||||
IrInstruction *result = ir_analyze_fn_call(ira, &instruction->base, fn, fn_type, fn_ref, nullptr,
|
||||
modifier, stack, false, args_ptr, args_len, nullptr, instruction->result_loc);
|
||||
IrInstruction *result = ir_analyze_call_extra(ira, &instruction->base, instruction->options,
|
||||
instruction->fn_ref, args_ptr, args_len, instruction->result_loc);
|
||||
deallocate(args_ptr, args_len, "IrInstruction *");
|
||||
return result;
|
||||
}
|
||||
|
||||
static IrInstruction *ir_analyze_instruction_call_args(IrAnalyze *ira, IrInstructionCallSrcArgs *instruction) {
|
||||
IrInstruction **args_ptr = allocate<IrInstruction *>(instruction->args_len, "IrInstruction *");
|
||||
for (size_t i = 0; i < instruction->args_len; i += 1) {
|
||||
args_ptr[i] = instruction->args_ptr[i]->child;
|
||||
if (type_is_invalid(args_ptr[i]->value->type))
|
||||
return ira->codegen->invalid_instruction;
|
||||
}
|
||||
|
||||
IrInstruction *result = ir_analyze_call_extra(ira, &instruction->base, instruction->options,
|
||||
instruction->fn_ref, args_ptr, instruction->args_len, instruction->result_loc);
|
||||
deallocate(args_ptr, instruction->args_len, "IrInstruction *");
|
||||
return result;
|
||||
}
|
||||
|
||||
static IrInstruction *ir_analyze_instruction_call(IrAnalyze *ira, IrInstructionCallSrc *call_instruction) {
|
||||
IrInstruction *fn_ref = call_instruction->fn_ref->child;
|
||||
if (type_is_invalid(fn_ref->value->type))
|
||||
@ -19513,8 +19600,18 @@ static IrInstruction *ir_analyze_inferred_field_ptr(IrAnalyze *ira, Buf *field_n
|
||||
PtrLenSingle, 0, 0, 0, false, VECTOR_INDEX_NONE, inferred_struct_field, nullptr);
|
||||
|
||||
if (instr_is_comptime(container_ptr)) {
|
||||
IrInstruction *result = ir_const(ira, source_instr, field_ptr_type);
|
||||
copy_const_val(result->value, container_ptr->value);
|
||||
ZigValue *ptr_val = ir_resolve_const(ira, container_ptr, UndefBad);
|
||||
if (ptr_val == nullptr)
|
||||
return ira->codegen->invalid_instruction;
|
||||
|
||||
IrInstruction *result;
|
||||
if (ptr_val->data.x_ptr.mut == ConstPtrMutInfer) {
|
||||
result = ir_build_cast(&ira->new_irb, source_instr->scope,
|
||||
source_instr->source_node, container_ptr_type, container_ptr, CastOpNoop);
|
||||
} else {
|
||||
result = ir_const(ira, source_instr, field_ptr_type);
|
||||
}
|
||||
copy_const_val(result->value, ptr_val);
|
||||
result->value->type = field_ptr_type;
|
||||
return result;
|
||||
}
|
||||
@ -20531,20 +20628,6 @@ static IrInstruction *ir_analyze_instruction_test_non_null(IrAnalyze *ira, IrIns
|
||||
return ir_analyze_test_non_null(ira, &instruction->base, value);
|
||||
}
|
||||
|
||||
static ZigType *get_ptr_elem_type(CodeGen *g, IrInstruction *ptr) {
|
||||
ir_assert(ptr->value->type->id == ZigTypeIdPointer, ptr);
|
||||
ZigType *elem_type = ptr->value->type->data.pointer.child_type;
|
||||
if (elem_type != g->builtin_types.entry_var)
|
||||
return elem_type;
|
||||
|
||||
if (ir_resolve_lazy(g, ptr->source_node, ptr->value))
|
||||
return g->builtin_types.entry_invalid;
|
||||
|
||||
assert(value_is_comptime(ptr->value));
|
||||
ZigValue *pointee = const_ptr_pointee_unchecked(g, ptr->value);
|
||||
return pointee->type;
|
||||
}
|
||||
|
||||
static IrInstruction *ir_analyze_unwrap_optional_payload(IrAnalyze *ira, IrInstruction *source_instr,
|
||||
IrInstruction *base_ptr, bool safety_check_on, bool initializing)
|
||||
{
|
||||
@ -27932,6 +28015,8 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction
|
||||
return ir_analyze_instruction_field_ptr(ira, (IrInstructionFieldPtr *)instruction);
|
||||
case IrInstructionIdCallSrc:
|
||||
return ir_analyze_instruction_call(ira, (IrInstructionCallSrc *)instruction);
|
||||
case IrInstructionIdCallSrcArgs:
|
||||
return ir_analyze_instruction_call_args(ira, (IrInstructionCallSrcArgs *)instruction);
|
||||
case IrInstructionIdCallExtra:
|
||||
return ir_analyze_instruction_call_extra(ira, (IrInstructionCallExtra *)instruction);
|
||||
case IrInstructionIdBr:
|
||||
@ -28333,6 +28418,7 @@ bool ir_has_side_effects(IrInstruction *instruction) {
|
||||
case IrInstructionIdVectorStoreElem:
|
||||
case IrInstructionIdCallExtra:
|
||||
case IrInstructionIdCallSrc:
|
||||
case IrInstructionIdCallSrcArgs:
|
||||
case IrInstructionIdCallGen:
|
||||
case IrInstructionIdReturn:
|
||||
case IrInstructionIdUnreachable:
|
||||
|
@ -96,6 +96,8 @@ const char* ir_instruction_type_str(IrInstructionId id) {
|
||||
return "CallExtra";
|
||||
case IrInstructionIdCallSrc:
|
||||
return "CallSrc";
|
||||
case IrInstructionIdCallSrcArgs:
|
||||
return "CallSrcArgs";
|
||||
case IrInstructionIdCallGen:
|
||||
return "CallGen";
|
||||
case IrInstructionIdConst:
|
||||
@ -649,6 +651,22 @@ static void ir_print_call_extra(IrPrint *irp, IrInstructionCallExtra *instructio
|
||||
ir_print_result_loc(irp, instruction->result_loc);
|
||||
}
|
||||
|
||||
static void ir_print_call_src_args(IrPrint *irp, IrInstructionCallSrcArgs *instruction) {
|
||||
fprintf(irp->f, "opts=");
|
||||
ir_print_other_instruction(irp, instruction->options);
|
||||
fprintf(irp->f, ", fn=");
|
||||
ir_print_other_instruction(irp, instruction->fn_ref);
|
||||
fprintf(irp->f, ", args=(");
|
||||
for (size_t i = 0; i < instruction->args_len; i += 1) {
|
||||
IrInstruction *arg = instruction->args_ptr[i];
|
||||
if (i != 0)
|
||||
fprintf(irp->f, ", ");
|
||||
ir_print_other_instruction(irp, arg);
|
||||
}
|
||||
fprintf(irp->f, "), result=");
|
||||
ir_print_result_loc(irp, instruction->result_loc);
|
||||
}
|
||||
|
||||
static void ir_print_call_src(IrPrint *irp, IrInstructionCallSrc *call_instruction) {
|
||||
switch (call_instruction->modifier) {
|
||||
case CallModifierNone:
|
||||
@ -2131,6 +2149,9 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction, bool
|
||||
case IrInstructionIdCallSrc:
|
||||
ir_print_call_src(irp, (IrInstructionCallSrc *)instruction);
|
||||
break;
|
||||
case IrInstructionIdCallSrcArgs:
|
||||
ir_print_call_src_args(irp, (IrInstructionCallSrcArgs *)instruction);
|
||||
break;
|
||||
case IrInstructionIdCallGen:
|
||||
ir_print_call_gen(irp, (IrInstructionCallGen *)instruction);
|
||||
break;
|
||||
|
@ -28,10 +28,21 @@ test "tuple parameters" {
|
||||
return a + b;
|
||||
}
|
||||
}.add;
|
||||
var a: i32 = 12;
|
||||
var b: i32 = 34;
|
||||
expect(@call(.{}, add, .{ a, 34 }) == 46);
|
||||
expect(@call(.{}, add, .{ 12, b }) == 46);
|
||||
expect(@call(.{}, add, .{ a, b }) == 46);
|
||||
expect(@call(.{}, add, .{ 12, 34 }) == 46);
|
||||
comptime expect(@call(.{}, add, .{ 12, 34 }) == 46);
|
||||
{
|
||||
const separate_args = .{ 12, 34 };
|
||||
expect(@call(.{ .modifier = .always_inline }, add, separate_args) == 46);
|
||||
const separate_args0 = .{ a, b };
|
||||
//TODO const separate_args1 = .{ a, 34 };
|
||||
const separate_args2 = .{ 12, 34 };
|
||||
//TODO const separate_args3 = .{ 12, b };
|
||||
expect(@call(.{ .modifier = .always_inline }, add, separate_args0) == 46);
|
||||
// TODO expect(@call(.{ .modifier = .always_inline }, add, separate_args1) == 46);
|
||||
expect(@call(.{ .modifier = .always_inline }, add, separate_args2) == 46);
|
||||
// TODO expect(@call(.{ .modifier = .always_inline }, add, separate_args3) == 46);
|
||||
}
|
||||
}
|
||||
|
@ -96,14 +96,6 @@ fn fn4() u32 {
|
||||
return 8;
|
||||
}
|
||||
|
||||
test "inline function call" {
|
||||
expect(@inlineCall(add, 3, 9) == 12);
|
||||
}
|
||||
|
||||
fn add(a: i32, b: i32) i32 {
|
||||
return a + b;
|
||||
}
|
||||
|
||||
test "number literal as an argument" {
|
||||
numberLiteralArg(3);
|
||||
comptime numberLiteralArg(3);
|
||||
@ -251,7 +243,7 @@ test "discard the result of a function that returns a struct" {
|
||||
test "function call with anon list literal" {
|
||||
const S = struct {
|
||||
fn doTheTest() void {
|
||||
consumeVec(.{9, 8, 7});
|
||||
consumeVec(.{ 9, 8, 7 });
|
||||
}
|
||||
|
||||
fn consumeVec(vec: [3]f32) void {
|
||||
|
Loading…
Reference in New Issue
Block a user