diff --git a/src/Air.zig b/src/Air.zig index d030cd3c22..762e8afa37 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -8,7 +8,7 @@ const builtin = @import("builtin"); const assert = std.debug.assert; const Air = @This(); -const Value = @import("value.zig").Value; +const Value = @import("Value.zig"); const Type = @import("type.zig").Type; const InternPool = @import("InternPool.zig"); const Module = @import("Module.zig"); @@ -986,6 +986,12 @@ pub const Inst = struct { empty_struct = @intFromEnum(InternPool.Index.empty_struct), generic_poison = @intFromEnum(InternPool.Index.generic_poison), + /// This Ref does not correspond to any AIR instruction. + /// It is a special value recognized only by Sema. + /// It indicates the value is mutable comptime memory, and represented + /// via the comptime_memory field of Sema. This value never occurs + /// in AIR which is emitted to backends. + mutable_comptime = @intFromEnum(InternPool.Index.mutable_comptime), /// This Ref does not correspond to any AIR instruction or constant /// value. It is used to handle argument types of var args functions. var_args_param_type = @intFromEnum(InternPool.Index.var_args_param_type), @@ -1095,7 +1101,7 @@ pub const Inst = struct { inferred_alloc: InferredAlloc, pub const InferredAllocComptime = struct { - decl_index: InternPool.DeclIndex, + comptime_memory_value_index: @import("Sema/ComptimeMemory.zig").Value.Index, alignment: InternPool.Alignment, is_const: bool, }; diff --git a/src/Compilation.zig b/src/Compilation.zig index e837004779..b735b15f79 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -11,7 +11,7 @@ const ThreadPool = std.Thread.Pool; const WaitGroup = std.Thread.WaitGroup; const ErrorBundle = std.zig.ErrorBundle; -const Value = @import("value.zig").Value; +const Value = @import("Value.zig"); const Type = @import("type.zig").Type; const target_util = @import("target.zig"); const Package = @import("Package.zig"); diff --git a/src/InternPool.zig b/src/InternPool.zig index 01cd41942d..b62e579902 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -98,6 +98,7 @@ const InternPool = @This(); const Module = @import("Module.zig"); const Zcu = Module; const Zir = @import("Zir.zig"); +const Air = @import("Air.zig"); const KeyAdapter = struct { intern_pool: *const InternPool, @@ -132,16 +133,6 @@ pub const MapIndex = enum(u32) { } }; -pub const RuntimeIndex = enum(u32) { - zero = 0, - comptime_field_ptr = std.math.maxInt(u32), - _, - - pub fn increment(ri: *RuntimeIndex) void { - ri.* = @as(RuntimeIndex, @enumFromInt(@intFromEnum(ri.*) + 1)); - } -}; - pub const DeclIndex = enum(u32) { _, @@ -1203,7 +1194,6 @@ pub const Key = union(enum) { const Tag = @typeInfo(Addr).Union.tag_type.?; decl: DeclIndex, - mut_decl: MutDecl, anon_decl: AnonDecl, comptime_field: Index, int: Index, @@ -1212,10 +1202,6 @@ pub const Key = union(enum) { elem: BaseIndex, field: BaseIndex, - pub const MutDecl = struct { - decl: DeclIndex, - runtime_index: RuntimeIndex, - }; pub const BaseIndex = struct { base: Index, index: u64, @@ -1373,11 +1359,6 @@ pub const Key = union(enum) { return switch (ptr.addr) { .decl => |x| Hash.hash(seed2, common ++ asBytes(&x)), - .mut_decl => |x| Hash.hash( - seed2, - common ++ asBytes(&x.decl) ++ asBytes(&x.runtime_index), - ), - .anon_decl => |x| Hash.hash(seed2, common ++ asBytes(&x)), .int, @@ -1651,7 +1632,6 @@ pub const Key = union(enum) { return switch (a_info.addr) { .decl => |a_decl| a_decl == b_info.addr.decl, - .mut_decl => |a_mut_decl| std.meta.eql(a_mut_decl, b_info.addr.mut_decl), .anon_decl => |ad| ad.val == b_info.addr.anon_decl.val and ad.orig_ty == b_info.addr.anon_decl.orig_ty, .int => |a_int| a_int == b_info.addr.int, @@ -2171,6 +2151,7 @@ pub const Index = enum(u32) { generic_poison, /// Used by Air/Sema only. + mutable_comptime = std.math.maxInt(u32) - 2, var_args_param_type = std.math.maxInt(u32) - 1, none = std.math.maxInt(u32), @@ -2280,7 +2261,6 @@ pub const Index = enum(u32) { undef: DataIsIndex, simple_value: struct { data: SimpleValue }, ptr_decl: struct { data: *PtrDecl }, - ptr_mut_decl: struct { data: *PtrMutDecl }, ptr_anon_decl: struct { data: *PtrAnonDecl }, ptr_anon_decl_aligned: struct { data: *PtrAnonDeclAligned }, ptr_comptime_field: struct { data: *PtrComptimeField }, @@ -2732,9 +2712,6 @@ pub const Tag = enum(u8) { /// A pointer to a decl. /// data is extra index of `PtrDecl`, which contains the type and address. ptr_decl, - /// A pointer to a decl that can be mutated at comptime. - /// data is extra index of `PtrMutDecl`, which contains the type and address. - ptr_mut_decl, /// A pointer to an anonymous decl. /// data is extra index of `PtrAnonDecl`, which contains the pointer type and decl value. /// The alignment of the anonymous decl is communicated via the pointer type. @@ -2939,7 +2916,6 @@ pub const Tag = enum(u8) { .undef => unreachable, .simple_value => unreachable, .ptr_decl => PtrDecl, - .ptr_mut_decl => PtrMutDecl, .ptr_anon_decl => PtrAnonDecl, .ptr_anon_decl_aligned => PtrAnonDeclAligned, .ptr_comptime_field => PtrComptimeField, @@ -3570,12 +3546,6 @@ pub const PtrAnonDeclAligned = struct { orig_ty: Index, }; -pub const PtrMutDecl = struct { - ty: Index, - decl: DeclIndex, - runtime_index: RuntimeIndex, -}; - pub const PtrComptimeField = struct { ty: Index, field_val: Index, @@ -3910,16 +3880,6 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { .addr = .{ .decl = info.decl }, } }; }, - .ptr_mut_decl => { - const info = ip.extraData(PtrMutDecl, data); - return .{ .ptr = .{ - .ty = info.ty, - .addr = .{ .mut_decl = .{ - .decl = info.decl, - .runtime_index = info.runtime_index, - } }, - } }; - }, .ptr_anon_decl => { const info = ip.extraData(PtrAnonDecl, data); return .{ .ptr = .{ @@ -4712,14 +4672,6 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index { .decl = decl, }), }), - .mut_decl => |mut_decl| ip.items.appendAssumeCapacity(.{ - .tag = .ptr_mut_decl, - .data = try ip.addExtra(gpa, PtrMutDecl{ - .ty = ptr.ty, - .decl = mut_decl.decl, - .runtime_index = mut_decl.runtime_index, - }), - }), .anon_decl => |anon_decl| ip.items.appendAssumeCapacity( if (ptrsHaveSameAlignment(ip, ptr.ty, ptr_type, anon_decl.orig_ty)) .{ .tag = .ptr_anon_decl, @@ -6147,7 +6099,7 @@ fn finishFuncInstance( .has_tv = true, .owns_tv = true, .ty = @import("type.zig").Type.fromInterned(func_ty), - .val = @import("value.zig").Value.fromInterned(func_index), + .val = @import("Value.zig").fromInterned(func_index), .alignment = .none, .@"linksection" = section, .@"addrspace" = fn_owner_decl.@"addrspace", @@ -6501,7 +6453,6 @@ fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 { OptionalNamespaceIndex, MapIndex, OptionalMapIndex, - RuntimeIndex, String, NullTerminatedString, OptionalNullTerminatedString, @@ -6577,7 +6528,6 @@ fn extraDataTrail(ip: *const InternPool, comptime T: type, index: usize) struct OptionalNamespaceIndex, MapIndex, OptionalMapIndex, - RuntimeIndex, String, NullTerminatedString, OptionalNullTerminatedString, @@ -7344,7 +7294,6 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void { .simple_type => 0, .simple_value => 0, .ptr_decl => @sizeOf(PtrDecl), - .ptr_mut_decl => @sizeOf(PtrMutDecl), .ptr_anon_decl => @sizeOf(PtrAnonDecl), .ptr_anon_decl_aligned => @sizeOf(PtrAnonDeclAligned), .ptr_comptime_field => @sizeOf(PtrComptimeField), @@ -7474,7 +7423,6 @@ fn dumpAllFallible(ip: *const InternPool) anyerror!void { .type_function, .undef, .ptr_decl, - .ptr_mut_decl, .ptr_anon_decl, .ptr_anon_decl_aligned, .ptr_comptime_field, @@ -7887,7 +7835,6 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index { .simple_value => unreachable, // handled via Index above inline .ptr_decl, - .ptr_mut_decl, .ptr_anon_decl, .ptr_anon_decl_aligned, .ptr_comptime_field, @@ -7951,6 +7898,7 @@ pub fn typeOf(ip: *const InternPool, index: Index) Index { .memoized_call => unreachable, }, + .mutable_comptime => unreachable, .var_args_param_type => unreachable, .none => unreachable, }; @@ -8019,9 +7967,7 @@ pub fn getBackingDecl(ip: *const InternPool, val: Index) OptionalDeclIndex { var base = @intFromEnum(val); while (true) { switch (ip.items.items(.tag)[base]) { - inline .ptr_decl, - .ptr_mut_decl, - => |tag| return @enumFromInt(ip.extra.items[ + .ptr_decl => |tag| return @enumFromInt(ip.extra.items[ ip.items.items(.data)[base] + std.meta.fieldIndex(tag.Payload(), "decl").? ]), inline .ptr_eu_payload, @@ -8044,7 +7990,6 @@ pub fn getBackingAddrTag(ip: *const InternPool, val: Index) ?Key.Ptr.Addr.Tag { while (true) { switch (ip.items.items(.tag)[base]) { .ptr_decl => return .decl, - .ptr_mut_decl => return .mut_decl, .ptr_anon_decl, .ptr_anon_decl_aligned => return .anon_decl, .ptr_comptime_field => return .comptime_field, .ptr_int => return .int, @@ -8219,7 +8164,6 @@ pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPois .undef, .simple_value, .ptr_decl, - .ptr_mut_decl, .ptr_anon_decl, .ptr_anon_decl_aligned, .ptr_comptime_field, diff --git a/src/Module.zig b/src/Module.zig index 1b3342f775..37072848cd 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -19,7 +19,7 @@ const Module = Zcu; const Zcu = @This(); const Compilation = @import("Compilation.zig"); const Cache = std.Build.Cache; -const Value = @import("value.zig").Value; +const Value = @import("Value.zig"); const Type = @import("type.zig").Type; const TypedValue = @import("TypedValue.zig"); const Package = @import("Package.zig"); @@ -3416,8 +3416,8 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { defer sema_arena.deinit(); const sema_arena_allocator = sema_arena.allocator(); - var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa); - defer comptime_mutable_decls.deinit(); + var comptime_memory: Sema.ComptimeMemory = .{}; + defer comptime_memory.deinit(gpa); var comptime_err_ret_trace = std.ArrayList(SrcLoc).init(gpa); defer comptime_err_ret_trace.deinit(); @@ -3434,7 +3434,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { .fn_ret_ty = Type.void, .fn_ret_ty_ies = null, .owner_func_index = .none, - .comptime_mutable_decls = &comptime_mutable_decls, + .comptime_memory = &comptime_memory, .comptime_err_ret_trace = &comptime_err_ret_trace, }; defer sema.deinit(); @@ -3448,10 +3448,6 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { }; // TODO: figure out InternPool removals for incremental compilation //errdefer ip.remove(struct_ty); - for (comptime_mutable_decls.items) |decl_index| { - const decl = mod.declPtr(decl_index); - _ = try decl.internValue(mod); - } new_namespace.ty = Type.fromInterned(struct_ty); new_decl.val = Value.fromInterned(struct_ty); @@ -3540,8 +3536,8 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { var analysis_arena = std.heap.ArenaAllocator.init(gpa); defer analysis_arena.deinit(); - var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa); - defer comptime_mutable_decls.deinit(); + var comptime_memory: Sema.ComptimeMemory = .{}; + defer comptime_memory.deinit(gpa); var comptime_err_ret_trace = std.ArrayList(SrcLoc).init(gpa); defer comptime_err_ret_trace.deinit(); @@ -3558,7 +3554,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { .fn_ret_ty = Type.void, .fn_ret_ty_ies = null, .owner_func_index = .none, - .comptime_mutable_decls = &comptime_mutable_decls, + .comptime_memory = &comptime_memory, .comptime_err_ret_trace = &comptime_err_ret_trace, .builtin_type_target_index = builtin_type_target_index, }; @@ -3584,10 +3580,6 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool { // We'll do some other bits with the Sema. Clear the type target index just // in case they analyze any type. sema.builtin_type_target_index = .none; - for (comptime_mutable_decls.items) |ct_decl_index| { - const ct_decl = mod.declPtr(ct_decl_index); - _ = try ct_decl.internValue(mod); - } const align_src: LazySrcLoc = .{ .node_offset_var_decl_align = 0 }; const section_src: LazySrcLoc = .{ .node_offset_var_decl_section = 0 }; const address_space_src: LazySrcLoc = .{ .node_offset_var_decl_addrspace = 0 }; @@ -4362,8 +4354,8 @@ pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocato const decl_index = func.owner_decl; const decl = mod.declPtr(decl_index); - var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa); - defer comptime_mutable_decls.deinit(); + var comptime_memory: Sema.ComptimeMemory = .{}; + defer comptime_memory.deinit(gpa); var comptime_err_ret_trace = std.ArrayList(SrcLoc).init(gpa); defer comptime_err_ret_trace.deinit(); @@ -4389,7 +4381,7 @@ pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocato .fn_ret_ty_ies = null, .owner_func_index = func_index, .branch_quota = @max(func.branchQuota(ip).*, Sema.default_branch_quota), - .comptime_mutable_decls = &comptime_mutable_decls, + .comptime_memory = &comptime_memory, .comptime_err_ret_trace = &comptime_err_ret_trace, }; defer sema.deinit(); @@ -4522,11 +4514,6 @@ pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocato }; } - for (comptime_mutable_decls.items) |ct_decl_index| { - const ct_decl = mod.declPtr(ct_decl_index); - _ = try ct_decl.internValue(mod); - } - // Copy the block into place and mark that as the main block. try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + inner_block.instructions.items.len); @@ -5213,6 +5200,7 @@ pub fn populateTestFunctions( mod: *Module, main_progress_node: *std.Progress.Node, ) !void { + if (true) @panic("TODO implement populateTestFunctions"); const gpa = mod.gpa; const ip = &mod.intern_pool; const builtin_mod = mod.root_mod.getBuiltinDependency(); @@ -5436,7 +5424,6 @@ pub fn markReferencedDeclsAlive(mod: *Module, val: Value) Allocator.Error!void { .ptr => |ptr| switch (ptr.addr) { .decl => |decl| try mod.markDeclIndexAlive(decl), .anon_decl => {}, - .mut_decl => |mut_decl| try mod.markDeclIndexAlive(mut_decl.decl), .int, .comptime_field => {}, .eu_payload, .opt_payload => |parent| try mod.markReferencedDeclsAlive(Value.fromInterned(parent)), .elem, .field => |base_index| try mod.markReferencedDeclsAlive(Value.fromInterned(base_index.base)), diff --git a/src/RangeSet.zig b/src/RangeSet.zig index 158f816129..00dc257d1a 100644 --- a/src/RangeSet.zig +++ b/src/RangeSet.zig @@ -4,7 +4,7 @@ const Order = std.math.Order; const InternPool = @import("InternPool.zig"); const Type = @import("type.zig").Type; -const Value = @import("value.zig").Value; +const Value = @import("Value.zig"); const Module = @import("Module.zig"); const RangeSet = @This(); const SwitchProngSrc = @import("Module.zig").SwitchProngSrc; diff --git a/src/Sema.zig b/src/Sema.zig index dc4acae039..b8b1fdfa70 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -16,6 +16,8 @@ air_instructions: std.MultiArrayList(Air.Inst) = .{}, air_extra: std.ArrayListUnmanaged(u32) = .{}, /// Maps ZIR to AIR. inst_map: InstMap = .{}, +/// Comptime-mutable memory. This is inherited by child Sema instances. +comptime_memory: *ComptimeMemory, /// When analyzing an inline function call, owner_decl is the Decl of the caller /// and `src_decl` of `Block` is the `Decl` of the callee. /// This `Decl` owns the arena memory of this `Sema`. @@ -96,14 +98,6 @@ no_partial_func_ty: bool = false, /// here so the values can be dropped without any cleanup. unresolved_inferred_allocs: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, InferredAlloc) = .{}, -/// Indices of comptime-mutable decls created by this Sema. These decls' values -/// should be interned after analysis completes, as they may refer to memory in -/// the Sema arena. -/// TODO: this is a workaround for memory bugs triggered by the removal of -/// Decl.value_arena. A better solution needs to be found. Probably this will -/// involve transitioning comptime-mutable memory away from using Decls at all. -comptime_mutable_decls: *std.ArrayList(InternPool.DeclIndex), - /// This is populated when `@setAlignStack` occurs so that if there is a duplicate /// one encountered, the conflicting source location can be shown. prev_stack_alignment_src: ?LazySrcLoc = null, @@ -128,9 +122,11 @@ base_allocs: std.AutoHashMapUnmanaged(Air.Inst.Index, Air.Inst.Index) = .{}, /// Backed by gpa. maybe_comptime_allocs: std.AutoHashMapUnmanaged(Air.Inst.Index, MaybeComptimeAlloc) = .{}, +const ComptimeMemory = @import("Sema/ComptimeMemory.zig"); + const MaybeComptimeAlloc = struct { /// The runtime index of the `alloc` instruction. - runtime_index: Value.RuntimeIndex, + runtime_index: ComptimeMemory.RuntimeIndex, /// Backed by sema.arena. Tracks all comptime-known stores to this `alloc`. Due to /// RLS, a single comptime-known allocation may have arbitrarily many stores. /// This may also contain `set_union_tag` instructions. @@ -149,7 +145,8 @@ const assert = std.debug.assert; const log = std.log.scoped(.sema); const Sema = @This(); -const Value = @import("value.zig").Value; +const ConstValue = @import("Value.zig"); +const MutValue = ComptimeMemory.Value; const Type = @import("type.zig").Type; const TypedValue = @import("TypedValue.zig"); const Air = @import("Air.zig"); @@ -349,7 +346,7 @@ pub const Block = struct { src_decl: InternPool.DeclIndex, /// Non zero if a non-inline loop or a runtime conditional have been encountered. /// Stores to comptime variables are only allowed when var.runtime_index <= runtime_index. - runtime_index: Value.RuntimeIndex = .zero, + runtime_index: ComptimeMemory.RuntimeIndex = .zero, inline_block: Zir.Inst.OptionalIndex = .none, comptime_reason: ?*const ComptimeReason = null, @@ -784,45 +781,6 @@ pub const Block = struct { _ = try block.addNoOp(.unreach); } } - - pub fn ownerModule(block: Block) *Package.Module { - const zcu = block.sema.mod; - return zcu.namespacePtr(block.namespace).file_scope.mod; - } - - pub fn startAnonDecl(block: *Block) !WipAnonDecl { - return WipAnonDecl{ - .block = block, - .finished = false, - }; - } - - pub const WipAnonDecl = struct { - block: *Block, - finished: bool, - - pub fn deinit(wad: *WipAnonDecl) void { - wad.* = undefined; - } - - /// `alignment` value of 0 means to use ABI alignment. - pub fn finish(wad: *WipAnonDecl, ty: Type, val: Value, alignment: Alignment) !InternPool.DeclIndex { - const sema = wad.block.sema; - // Do this ahead of time because `createAnonymousDecl` depends on calling - // `type.hasRuntimeBits()`. - _ = try sema.typeHasRuntimeBits(ty); - const new_decl_index = try sema.mod.createAnonymousDecl(wad.block, .{ - .ty = ty, - .val = val, - }); - const new_decl = sema.mod.declPtr(new_decl_index); - new_decl.alignment = alignment; - errdefer sema.mod.abortAnonDecl(new_decl_index); - wad.finished = true; - try sema.mod.finalizeAnonDecl(new_decl_index); - return new_decl_index; - } - }; }; const LabeledBlock = struct { @@ -2117,7 +2075,7 @@ pub fn setupErrorReturnTrace(sema: *Sema, block: *Block, last_arg_index: usize) /// Return the Value corresponding to a given AIR ref, or `null` if it refers to a runtime value. /// InternPool key `variable` is considered a runtime value. /// Generic poison causes `error.GenericPoison` to be returned. -fn resolveValue(sema: *Sema, inst: Air.Inst.Ref) CompileError!?Value { +fn resolveValue(sema: *Sema, inst: Air.Inst.Ref) CompileError!?MutValue { const val = (try sema.resolveValueAllowVariables(inst)) orelse return null; if (val.isGenericPoison()) return error.GenericPoison; if (sema.mod.intern_pool.isVariable(val.toIntern())) return null; @@ -2176,7 +2134,7 @@ fn resolveValueResolveLazy(sema: *Sema, inst: Air.Inst.Ref) CompileError!?Value fn resolveValueIntable(sema: *Sema, inst: Air.Inst.Ref) CompileError!?Value { const val = (try sema.resolveValue(inst)) orelse return null; if (sema.mod.intern_pool.getBackingAddrTag(val.toIntern())) |addr| switch (addr) { - .decl, .anon_decl, .mut_decl, .comptime_field => return null, + .decl, .anon_decl, .comptime_field => return null, .int => {}, .eu_payload, .opt_payload, .elem, .field => unreachable, }; @@ -3699,6 +3657,7 @@ fn zirMakePtrConst(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro /// type. Otherwise, it may be `null`, and the type will be inferred from `alloc`. fn resolveComptimeKnownAllocValue(sema: *Sema, block: *Block, alloc: Air.Inst.Ref, resolved_alloc_ty: ?Type) CompileError!?InternPool.Index { const mod = sema.mod; + const gpa = sema.gpa; const alloc_ty = resolved_alloc_ty orelse sema.typeOf(alloc); const ptr_info = alloc_ty.ptrInfo(mod); @@ -3734,23 +3693,12 @@ fn resolveComptimeKnownAllocValue(sema: *Sema, block: *Block, alloc: Air.Inst.Re // The simple strategy failed: we must create a mutable comptime alloc and // perform all of the runtime store operations at comptime. - - var anon_decl = try block.startAnonDecl(); // TODO: comptime value mutation without Decl - defer anon_decl.deinit(); - const decl_index = try anon_decl.finish(elem_ty, try mod.undefValue(elem_ty), ptr_info.flags.alignment); - - const decl_ptr = try mod.intern(.{ .ptr = .{ - .ty = alloc_ty.toIntern(), - .addr = .{ .mut_decl = .{ - .decl = decl_index, - .runtime_index = block.runtime_index, - } }, - } }); + const comptime_ptr = try sema.comptime_memory.allocate(gpa, alloc_ty, block.runtime_index); // Maps from pointers into the runtime allocs, to comptime-mutable pointers into the mut decl. - var ptr_mapping = std.AutoHashMap(Air.Inst.Index, InternPool.Index).init(sema.arena); + var ptr_mapping = std.AutoHashMap(Air.Inst.Index, ComptimeMemory.Value).init(sema.arena); try ptr_mapping.ensureTotalCapacity(@intCast(stores.len)); - ptr_mapping.putAssumeCapacity(alloc_inst, decl_ptr); + ptr_mapping.putAssumeCapacity(alloc_inst, comptime_ptr); var to_map = try std.ArrayList(Air.Inst.Index).initCapacity(sema.arena, stores.len); for (stores) |store_inst| { @@ -3888,7 +3836,7 @@ fn resolveComptimeKnownAllocValue(sema: *Sema, block: *Block, alloc: Air.Inst.Re } // The value is finalized - load it! - const val = (try sema.pointerDeref(block, .unneeded, Value.fromInterned(decl_ptr), alloc_ty)).?.toIntern(); + const val = (try sema.pointerDeref(block, .unneeded, comptime_ptr.toValue(), alloc_ty)).?.toIntern(); return sema.finishResolveComptimeKnownAllocValue(val, alloc_inst, comptime_info.value); } @@ -5446,10 +5394,18 @@ fn storeToInferredAllocComptime( // There will be only one store_to_inferred_ptr because we are running at comptime. // The alloc will turn into a Decl. if (try sema.resolveValue(operand)) |operand_val| { - var anon_decl = try block.startAnonDecl(); // TODO: comptime value mutation without Decl - defer anon_decl.deinit(); - iac.decl_index = try anon_decl.finish(operand_ty, operand_val, iac.alignment); - try sema.comptime_mutable_decls.append(iac.decl_index); + const gpa = sema.gpa; + const ptr_ty = try sema.ptrType(.{ + .child = operand_ty.toIntern(), + .flags = .{ + .alignment = iac.alignment, + .is_const = iac.is_const, + .address_space = .generic, + }, + }); + const comptime_ptr = try sema.comptime_memory.allocate(gpa, ptr_ty, block.runtime_index); + iac.comptime_memory_value_index = try sema.comptime_memory.addValue(gpa, comptime_ptr); + sema.comptime_memory.store(comptime_ptr, operand_val); return; } @@ -7938,7 +7894,7 @@ fn instantiateGenericCall( .generic_call_decl = block.src_decl.toOptional(), .branch_quota = sema.branch_quota, .branch_count = sema.branch_count, - .comptime_mutable_decls = sema.comptime_mutable_decls, + .comptime_memory = sema.comptime_memory, .comptime_err_ret_trace = sema.comptime_err_ret_trace, }; defer child_sema.deinit(); @@ -30465,7 +30421,6 @@ fn storePtrVal( } const ComptimePtrMutationKit = struct { - mut_decl: InternPool.Key.Ptr.Addr.MutDecl, pointee: union(enum) { opv, /// The pointer type matches the actual comptime Value so a direct @@ -30500,588 +30455,19 @@ fn beginComptimePtrMutation( ptr_val: Value, ptr_elem_ty: Type, ) CompileError!ComptimePtrMutationKit { - const mod = sema.mod; - const ptr = mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr; - switch (ptr.addr) { - .decl, .anon_decl, .int => unreachable, // isComptimeMutablePtr has been checked already - .mut_decl => |mut_decl| { - const decl = mod.declPtr(mut_decl.decl); - return sema.beginComptimePtrMutationInner(block, src, decl.ty, &decl.val, ptr_elem_ty, mut_decl); - }, - .comptime_field => |comptime_field| { - const duped = try sema.arena.create(Value); - duped.* = Value.fromInterned(comptime_field); - return sema.beginComptimePtrMutationInner(block, src, Type.fromInterned(mod.intern_pool.typeOf(comptime_field)), duped, ptr_elem_ty, .{ - .decl = undefined, - .runtime_index = .comptime_field_ptr, - }); - }, - .eu_payload => |eu_ptr| { - const eu_ty = Type.fromInterned(mod.intern_pool.typeOf(eu_ptr)).childType(mod); - var parent = try sema.beginComptimePtrMutation(block, src, Value.fromInterned(eu_ptr), eu_ty); - switch (parent.pointee) { - .opv => unreachable, - .direct => |val_ptr| { - const payload_ty = parent.ty.errorUnionPayload(mod); - if (val_ptr.ip_index == .none and val_ptr.tag() == .eu_payload) { - return ComptimePtrMutationKit{ - .mut_decl = parent.mut_decl, - .pointee = .{ .direct = &val_ptr.castTag(.eu_payload).?.data }, - .ty = payload_ty, - }; - } else { - // An error union has been initialized to undefined at comptime and now we - // are for the first time setting the payload. We must change the - // representation of the error union from `undef` to `opt_payload`. - - const payload = try sema.arena.create(Value.Payload.SubValue); - payload.* = .{ - .base = .{ .tag = .eu_payload }, - .data = Value.fromInterned((try mod.intern(.{ .undef = payload_ty.toIntern() }))), - }; - - val_ptr.* = Value.initPayload(&payload.base); - - return ComptimePtrMutationKit{ - .mut_decl = parent.mut_decl, - .pointee = .{ .direct = &payload.data }, - .ty = payload_ty, - }; - } - }, - .bad_decl_ty, .bad_ptr_ty => return parent, - // Even though the parent value type has well-defined memory layout, our - // pointer type does not. - .reinterpret => return ComptimePtrMutationKit{ - .mut_decl = parent.mut_decl, - .pointee = .bad_ptr_ty, - .ty = eu_ty, - }, - } - }, - .opt_payload => |opt_ptr| { - const opt_ty = Type.fromInterned(mod.intern_pool.typeOf(opt_ptr)).childType(mod); - var parent = try sema.beginComptimePtrMutation(block, src, Value.fromInterned(opt_ptr), opt_ty); - switch (parent.pointee) { - .opv => unreachable, - .direct => |val_ptr| { - const payload_ty = parent.ty.optionalChild(mod); - switch (val_ptr.ip_index) { - .none => return ComptimePtrMutationKit{ - .mut_decl = parent.mut_decl, - .pointee = .{ .direct = &val_ptr.castTag(.opt_payload).?.data }, - .ty = payload_ty, - }, - else => { - const payload_val = switch (mod.intern_pool.indexToKey(val_ptr.ip_index)) { - .undef => try mod.intern(.{ .undef = payload_ty.toIntern() }), - .opt => |opt| switch (opt.val) { - .none => try mod.intern(.{ .undef = payload_ty.toIntern() }), - else => |payload| payload, - }, - else => unreachable, - }; - - // An optional has been initialized to undefined at comptime and now we - // are for the first time setting the payload. We must change the - // representation of the optional from `undef` to `opt_payload`. - - const payload = try sema.arena.create(Value.Payload.SubValue); - payload.* = .{ - .base = .{ .tag = .opt_payload }, - .data = Value.fromInterned(payload_val), - }; - - val_ptr.* = Value.initPayload(&payload.base); - - return ComptimePtrMutationKit{ - .mut_decl = parent.mut_decl, - .pointee = .{ .direct = &payload.data }, - .ty = payload_ty, - }; - }, - } - }, - .bad_decl_ty, .bad_ptr_ty => return parent, - // Even though the parent value type has well-defined memory layout, our - // pointer type does not. - .reinterpret => return ComptimePtrMutationKit{ - .mut_decl = parent.mut_decl, - .pointee = .bad_ptr_ty, - .ty = opt_ty, - }, - } - }, - .elem => |elem_ptr| { - const base_elem_ty = Type.fromInterned(mod.intern_pool.typeOf(elem_ptr.base)).elemType2(mod); - var parent = try sema.beginComptimePtrMutation(block, src, Value.fromInterned(elem_ptr.base), base_elem_ty); - - switch (parent.pointee) { - .opv => unreachable, - .direct => |val_ptr| switch (parent.ty.zigTypeTag(mod)) { - .Array, .Vector => { - const elem_ty = parent.ty.childType(mod); - const check_len = parent.ty.arrayLenIncludingSentinel(mod); - if ((try sema.typeHasOnePossibleValue(ptr_elem_ty)) != null) { - if (elem_ptr.index > check_len) { - // TODO have the parent include the decl so we can say "declared here" - return sema.fail(block, src, "comptime store of index {d} out of bounds of array length {d}", .{ - elem_ptr.index, check_len, - }); - } - return .{ - .mut_decl = parent.mut_decl, - .pointee = .opv, - .ty = elem_ty, - }; - } - if (elem_ptr.index >= check_len) { - // TODO have the parent include the decl so we can say "declared here" - return sema.fail(block, src, "comptime store of index {d} out of bounds of array length {d}", .{ - elem_ptr.index, check_len, - }); - } - - // We might have a pointer to multiple elements of the array (e.g. a pointer - // to a sub-array). In this case, we just have to reinterpret the relevant - // bytes of the whole array rather than any single element. - reinterp_multi_elem: { - if (try sema.typeRequiresComptime(base_elem_ty)) break :reinterp_multi_elem; - if (try sema.typeRequiresComptime(ptr_elem_ty)) break :reinterp_multi_elem; - - const elem_abi_size_u64 = try sema.typeAbiSize(base_elem_ty); - if (elem_abi_size_u64 >= try sema.typeAbiSize(ptr_elem_ty)) break :reinterp_multi_elem; - - const elem_abi_size = try sema.usizeCast(block, src, elem_abi_size_u64); - const elem_idx = try sema.usizeCast(block, src, elem_ptr.index); - return .{ - .mut_decl = parent.mut_decl, - .pointee = .{ .reinterpret = .{ - .val_ptr = val_ptr, - .byte_offset = elem_abi_size * elem_idx, - } }, - .ty = parent.ty, - }; - } - - switch (val_ptr.ip_index) { - .none => switch (val_ptr.tag()) { - .bytes => { - // An array is memory-optimized to store a slice of bytes, but we are about - // to modify an individual field and the representation has to change. - // If we wanted to avoid this, there would need to be special detection - // elsewhere to identify when writing a value to an array element that is stored - // using the `bytes` tag, and handle it without making a call to this function. - const arena = mod.tmp_hack_arena.allocator(); - - const bytes = val_ptr.castTag(.bytes).?.data; - const dest_len = parent.ty.arrayLenIncludingSentinel(mod); - // bytes.len may be one greater than dest_len because of the case when - // assigning `[N:S]T` to `[N]T`. This is allowed; the sentinel is omitted. - assert(bytes.len >= dest_len); - const elems = try arena.alloc(Value, @intCast(dest_len)); - for (elems, 0..) |*elem, i| { - elem.* = try mod.intValue(elem_ty, bytes[i]); - } - - val_ptr.* = try Value.Tag.aggregate.create(arena, elems); - - return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - &elems[@intCast(elem_ptr.index)], - ptr_elem_ty, - parent.mut_decl, - ); - }, - .repeated => { - // An array is memory-optimized to store only a single element value, and - // that value is understood to be the same for the entire length of the array. - // However, now we want to modify an individual field and so the - // representation has to change. If we wanted to avoid this, there would - // need to be special detection elsewhere to identify when writing a value to an - // array element that is stored using the `repeated` tag, and handle it - // without making a call to this function. - const arena = mod.tmp_hack_arena.allocator(); - - const repeated_val = try val_ptr.castTag(.repeated).?.data.intern(parent.ty.childType(mod), mod); - const array_len_including_sentinel = - try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod)); - const elems = try arena.alloc(Value, array_len_including_sentinel); - @memset(elems, Value.fromInterned(repeated_val)); - - val_ptr.* = try Value.Tag.aggregate.create(arena, elems); - - return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - &elems[@intCast(elem_ptr.index)], - ptr_elem_ty, - parent.mut_decl, - ); - }, - - .aggregate => return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - &val_ptr.castTag(.aggregate).?.data[@intCast(elem_ptr.index)], - ptr_elem_ty, - parent.mut_decl, - ), - - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(val_ptr.toIntern())) { - .undef => { - // An array has been initialized to undefined at comptime and now we - // are for the first time setting an element. We must change the representation - // of the array from `undef` to `array`. - const arena = mod.tmp_hack_arena.allocator(); - - const array_len_including_sentinel = - try sema.usizeCast(block, src, parent.ty.arrayLenIncludingSentinel(mod)); - const elems = try arena.alloc(Value, array_len_including_sentinel); - @memset(elems, Value.fromInterned((try mod.intern(.{ .undef = elem_ty.toIntern() })))); - - val_ptr.* = try Value.Tag.aggregate.create(arena, elems); - - return beginComptimePtrMutationInner( - sema, - block, - src, - elem_ty, - &elems[@intCast(elem_ptr.index)], - ptr_elem_ty, - parent.mut_decl, - ); - }, - else => unreachable, - }, - } - }, - else => { - if (elem_ptr.index != 0) { - // TODO include a "declared here" note for the decl - return sema.fail(block, src, "out of bounds comptime store of index {d}", .{ - elem_ptr.index, - }); - } - return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty, - val_ptr, - ptr_elem_ty, - parent.mut_decl, - ); - }, - }, - .reinterpret => |reinterpret| { - if (!base_elem_ty.hasWellDefinedLayout(mod)) { - // Even though the parent value type has well-defined memory layout, our - // pointer type does not. - return ComptimePtrMutationKit{ - .mut_decl = parent.mut_decl, - .pointee = .bad_ptr_ty, - .ty = base_elem_ty, - }; - } - - const elem_abi_size_u64 = try sema.typeAbiSize(base_elem_ty); - const elem_abi_size = try sema.usizeCast(block, src, elem_abi_size_u64); - const elem_idx = try sema.usizeCast(block, src, elem_ptr.index); - return ComptimePtrMutationKit{ - .mut_decl = parent.mut_decl, - .pointee = .{ .reinterpret = .{ - .val_ptr = reinterpret.val_ptr, - .byte_offset = reinterpret.byte_offset + elem_abi_size * elem_idx, - } }, - .ty = parent.ty, - }; - }, - .bad_decl_ty, .bad_ptr_ty => return parent, - } - }, - .field => |field_ptr| { - const base_child_ty = Type.fromInterned(mod.intern_pool.typeOf(field_ptr.base)).childType(mod); - const field_index: u32 = @intCast(field_ptr.index); - - var parent = try sema.beginComptimePtrMutation(block, src, Value.fromInterned(field_ptr.base), base_child_ty); - switch (parent.pointee) { - .opv => unreachable, - .direct => |val_ptr| switch (val_ptr.ip_index) { - .empty_struct => { - const duped = try sema.arena.create(Value); - duped.* = val_ptr.*; - return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.structFieldType(field_index, mod), - duped, - ptr_elem_ty, - parent.mut_decl, - ); - }, - .none => switch (val_ptr.tag()) { - .aggregate => return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.structFieldType(field_index, mod), - &val_ptr.castTag(.aggregate).?.data[field_index], - ptr_elem_ty, - parent.mut_decl, - ), - .repeated => { - const arena = mod.tmp_hack_arena.allocator(); - - const elems = try arena.alloc(Value, parent.ty.structFieldCount(mod)); - @memset(elems, val_ptr.castTag(.repeated).?.data); - val_ptr.* = try Value.Tag.aggregate.create(arena, elems); - - return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.structFieldType(field_index, mod), - &elems[field_index], - ptr_elem_ty, - parent.mut_decl, - ); - }, - .@"union" => { - const payload = &val_ptr.castTag(.@"union").?.data; - const layout = base_child_ty.containerLayout(mod); - - const tag_type = base_child_ty.unionTagTypeHypothetical(mod); - const hypothetical_tag = try mod.enumValueFieldIndex(tag_type, field_index); - if (layout == .Auto or (payload.tag != null and hypothetical_tag.eql(payload.tag.?, tag_type, mod))) { - // We need to set the active field of the union. - payload.tag = hypothetical_tag; - - const field_ty = parent.ty.structFieldType(field_index, mod); - return beginComptimePtrMutationInner( - sema, - block, - src, - field_ty, - &payload.val, - ptr_elem_ty, - parent.mut_decl, - ); - } else { - // Writing to a different field (a different or unknown tag is active) requires reinterpreting - // memory of the entire union, which requires knowing its abiSize. - try sema.resolveTypeLayout(parent.ty); - - // This union value no longer has a well-defined tag type. - // The reinterpretation will read it back out as .none. - payload.val = try payload.val.unintern(sema.arena, mod); - return ComptimePtrMutationKit{ - .mut_decl = parent.mut_decl, - .pointee = .{ .reinterpret = .{ - .val_ptr = val_ptr, - .byte_offset = 0, - .write_packed = layout == .Packed, - } }, - .ty = parent.ty, - }; - } - }, - .slice => switch (field_index) { - Value.slice_ptr_index => return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.slicePtrFieldType(mod), - &val_ptr.castTag(.slice).?.data.ptr, - ptr_elem_ty, - parent.mut_decl, - ), - - Value.slice_len_index => return beginComptimePtrMutationInner( - sema, - block, - src, - Type.usize, - &val_ptr.castTag(.slice).?.data.len, - ptr_elem_ty, - parent.mut_decl, - ), - - else => unreachable, - }, - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(val_ptr.toIntern())) { - .undef => { - // A struct or union has been initialized to undefined at comptime and now we - // are for the first time setting a field. We must change the representation - // of the struct/union from `undef` to `struct`/`union`. - const arena = mod.tmp_hack_arena.allocator(); - - switch (parent.ty.zigTypeTag(mod)) { - .Struct => { - const fields = try arena.alloc(Value, parent.ty.structFieldCount(mod)); - for (fields, 0..) |*field, i| field.* = Value.fromInterned((try mod.intern(.{ - .undef = parent.ty.structFieldType(i, mod).toIntern(), - }))); - - val_ptr.* = try Value.Tag.aggregate.create(arena, fields); - - return beginComptimePtrMutationInner( - sema, - block, - src, - parent.ty.structFieldType(field_index, mod), - &fields[field_index], - ptr_elem_ty, - parent.mut_decl, - ); - }, - .Union => { - const payload = try arena.create(Value.Payload.Union); - const tag_ty = parent.ty.unionTagTypeHypothetical(mod); - const payload_ty = parent.ty.structFieldType(field_index, mod); - payload.* = .{ .data = .{ - .tag = try mod.enumValueFieldIndex(tag_ty, field_index), - .val = Value.fromInterned((try mod.intern(.{ .undef = payload_ty.toIntern() }))), - } }; - - val_ptr.* = Value.initPayload(&payload.base); - - return beginComptimePtrMutationInner( - sema, - block, - src, - payload_ty, - &payload.data.val, - ptr_elem_ty, - parent.mut_decl, - ); - }, - .Pointer => { - assert(parent.ty.isSlice(mod)); - const ptr_ty = parent.ty.slicePtrFieldType(mod); - val_ptr.* = try Value.Tag.slice.create(arena, .{ - .ptr = Value.fromInterned((try mod.intern(.{ .undef = ptr_ty.toIntern() }))), - .len = Value.fromInterned((try mod.intern(.{ .undef = .usize_type }))), - }); - - switch (field_index) { - Value.slice_ptr_index => return beginComptimePtrMutationInner( - sema, - block, - src, - ptr_ty, - &val_ptr.castTag(.slice).?.data.ptr, - ptr_elem_ty, - parent.mut_decl, - ), - Value.slice_len_index => return beginComptimePtrMutationInner( - sema, - block, - src, - Type.usize, - &val_ptr.castTag(.slice).?.data.len, - ptr_elem_ty, - parent.mut_decl, - ), - - else => unreachable, - } - }, - else => unreachable, - } - }, - else => unreachable, - }, - }, - .reinterpret => |reinterpret| { - const field_offset_u64 = base_child_ty.structFieldOffset(field_index, mod); - const field_offset = try sema.usizeCast(block, src, field_offset_u64); - return ComptimePtrMutationKit{ - .mut_decl = parent.mut_decl, - .pointee = .{ .reinterpret = .{ - .val_ptr = reinterpret.val_ptr, - .byte_offset = reinterpret.byte_offset + field_offset, - } }, - .ty = parent.ty, - }; - }, - .bad_decl_ty, .bad_ptr_ty => return parent, - } - }, + if (true) { + // The previous implementation operated on the InternPool pointer value representation, + // which is an immutable data structure. Instead, the new implementation needs to + // operate on ComptimeMemory, which is a mutable data structure. + _ = sema; + _ = block; + _ = src; + _ = ptr_val; + _ = ptr_elem_ty; + @panic("TODO implement beginComptimePtrMutation"); } } -fn beginComptimePtrMutationInner( - sema: *Sema, - block: *Block, - src: LazySrcLoc, - decl_ty: Type, - decl_val: *Value, - ptr_elem_ty: Type, - mut_decl: InternPool.Key.Ptr.Addr.MutDecl, -) CompileError!ComptimePtrMutationKit { - const mod = sema.mod; - const target = mod.getTarget(); - const coerce_ok = (try sema.coerceInMemoryAllowed(block, ptr_elem_ty, decl_ty, true, target, src, src)) == .ok; - - decl_val.* = try decl_val.unintern(sema.arena, mod); - - if (coerce_ok) { - return ComptimePtrMutationKit{ - .mut_decl = mut_decl, - .pointee = .{ .direct = decl_val }, - .ty = decl_ty, - }; - } - - // Handle the case that the decl is an array and we're actually trying to point to an element. - if (decl_ty.isArrayOrVector(mod)) { - const decl_elem_ty = decl_ty.childType(mod); - if ((try sema.coerceInMemoryAllowed(block, ptr_elem_ty, decl_elem_ty, true, target, src, src)) == .ok) { - return ComptimePtrMutationKit{ - .mut_decl = mut_decl, - .pointee = .{ .direct = decl_val }, - .ty = decl_ty, - }; - } - } - - if (!decl_ty.hasWellDefinedLayout(mod)) { - return ComptimePtrMutationKit{ - .mut_decl = mut_decl, - .pointee = .bad_decl_ty, - .ty = decl_ty, - }; - } - if (!ptr_elem_ty.hasWellDefinedLayout(mod)) { - return ComptimePtrMutationKit{ - .mut_decl = mut_decl, - .pointee = .bad_ptr_ty, - .ty = ptr_elem_ty, - }; - } - return ComptimePtrMutationKit{ - .mut_decl = mut_decl, - .pointee = .{ .reinterpret = .{ - .val_ptr = decl_val, - .byte_offset = 0, - } }, - .ty = decl_ty, - }; -} - const TypedValueAndOffset = struct { tv: TypedValue, byte_offset: usize, @@ -31121,13 +30507,11 @@ fn beginComptimePtrLoad( var deref: ComptimePtrLoadKit = switch (ip.indexToKey(ptr_val.toIntern())) { .ptr => |ptr| switch (ptr.addr) { - .decl, .mut_decl => blk: { + .decl => blk: { const decl_index = switch (ptr.addr) { .decl => |decl| decl, - .mut_decl => |mut_decl| mut_decl.decl, else => unreachable, }; - const is_mutable = ptr.addr == .mut_decl; const decl = mod.declPtr(decl_index); const decl_tv = try decl.typedValue(); if (decl.val.getVariable(mod) != null) return error.RuntimeLoad; @@ -31136,7 +30520,7 @@ fn beginComptimePtrLoad( break :blk ComptimePtrLoadKit{ .parent = if (layout_defined) .{ .tv = decl_tv, .byte_offset = 0 } else null, .pointee = decl_tv, - .is_mutable = is_mutable, + .is_mutable = false, .ty_without_well_defined_layout = if (!layout_defined) decl.ty else null, }; }, @@ -35280,7 +34664,7 @@ fn resolveLazyValue(sema: *Sema, val: Value) CompileError!Value { }, .ptr => |ptr| { switch (ptr.addr) { - .decl, .mut_decl, .anon_decl => return val, + .decl, .anon_decl => return val, .comptime_field => |field_val| { const resolved_field_val = (try sema.resolveLazyValue(Value.fromInterned(field_val))).toIntern(); @@ -35635,8 +35019,8 @@ fn semaBackingIntType(mod: *Module, struct_type: InternPool.Key.StructType) Comp var analysis_arena = std.heap.ArenaAllocator.init(gpa); defer analysis_arena.deinit(); - var comptime_mutable_decls = std.ArrayList(InternPool.DeclIndex).init(gpa); - defer comptime_mutable_decls.deinit(); + var comptime_memory: ComptimeMemory = .{}; + defer comptime_memory.deinit(gpa); var comptime_err_ret_trace = std.ArrayList(Module.SrcLoc).init(gpa); defer comptime_err_ret_trace.deinit(); @@ -35653,7 +35037,7 @@ fn semaBackingIntType(mod: *Module, struct_type: InternPool.Key.StructType) Comp .fn_ret_ty = Type.void, .fn_ret_ty_ies = null, .owner_func_index = .none, - .comptime_mutable_decls = &comptime_mutable_decls, + .comptime_memory = &comptime_memory, .comptime_err_ret_trace = &comptime_err_ret_trace, }; defer sema.deinit(); @@ -35714,11 +35098,6 @@ fn semaBackingIntType(mod: *Module, struct_type: InternPool.Key.StructType) Comp const backing_int_ty = try mod.intType(.unsigned, @intCast(fields_bit_sum)); struct_type.backingIntType(ip).* = backing_int_ty.toIntern(); } - - for (comptime_mutable_decls.items) |ct_decl_index| { - const ct_decl = mod.declPtr(ct_decl_index); - _ = try ct_decl.internValue(mod); - } } fn checkBackingIntType(sema: *Sema, block: *Block, src: LazySrcLoc, backing_int_ty: Type, fields_bit_sum: u64) CompileError!void { @@ -36460,8 +35839,8 @@ fn semaStructFields( }, }; - var comptime_mutable_decls = std.ArrayList(InternPool.DeclIndex).init(gpa); - defer comptime_mutable_decls.deinit(); + var comptime_memory: ComptimeMemory = .{}; + defer comptime_memory.deinit(gpa); var comptime_err_ret_trace = std.ArrayList(Module.SrcLoc).init(gpa); defer comptime_err_ret_trace.deinit(); @@ -36478,7 +35857,7 @@ fn semaStructFields( .fn_ret_ty = Type.void, .fn_ret_ty_ies = null, .owner_func_index = .none, - .comptime_mutable_decls = &comptime_mutable_decls, + .comptime_memory = &comptime_memory, .comptime_err_ret_trace = &comptime_err_ret_trace, }; defer sema.deinit(); @@ -36693,11 +36072,6 @@ fn semaStructFields( struct_type.clearTypesWip(ip); if (!any_inits) struct_type.setHaveFieldInits(ip); - - for (comptime_mutable_decls.items) |ct_decl_index| { - const ct_decl = mod.declPtr(ct_decl_index); - _ = try ct_decl.internValue(mod); - } } // This logic must be kept in sync with `semaStructFields` @@ -36718,8 +36092,8 @@ fn semaStructFieldInits( const zir_index = struct_type.zir_index.resolve(ip); const fields_len, const small, var extra_index = structZirInfo(zir, zir_index); - var comptime_mutable_decls = std.ArrayList(InternPool.DeclIndex).init(gpa); - defer comptime_mutable_decls.deinit(); + var comptime_memory: ComptimeMemory = .{}; + defer comptime_memory.deinit(gpa); var comptime_err_ret_trace = std.ArrayList(Module.SrcLoc).init(gpa); defer comptime_err_ret_trace.deinit(); @@ -36736,7 +36110,7 @@ fn semaStructFieldInits( .fn_ret_ty = Type.void, .fn_ret_ty_ies = null, .owner_func_index = .none, - .comptime_mutable_decls = &comptime_mutable_decls, + .comptime_memory = &comptime_memory, .comptime_err_ret_trace = &comptime_err_ret_trace, }; defer sema.deinit(); @@ -36849,11 +36223,6 @@ fn semaStructFieldInits( struct_type.field_inits.get(ip)[field_i] = field_init; } } - - for (comptime_mutable_decls.items) |ct_decl_index| { - const ct_decl = mod.declPtr(ct_decl_index); - _ = try ct_decl.internValue(mod); - } } fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.Key.UnionType) CompileError!void { @@ -36905,8 +36274,8 @@ fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.Key.Un const decl = mod.declPtr(decl_index); - var comptime_mutable_decls = std.ArrayList(InternPool.DeclIndex).init(gpa); - defer comptime_mutable_decls.deinit(); + var comptime_memory: ComptimeMemory = .{}; + defer comptime_memory.deinit(gpa); var comptime_err_ret_trace = std.ArrayList(Module.SrcLoc).init(gpa); defer comptime_err_ret_trace.deinit(); @@ -36923,7 +36292,7 @@ fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.Key.Un .fn_ret_ty = Type.void, .fn_ret_ty_ies = null, .owner_func_index = .none, - .comptime_mutable_decls = &comptime_mutable_decls, + .comptime_memory = &comptime_memory, .comptime_err_ret_trace = &comptime_err_ret_trace, }; defer sema.deinit(); @@ -36944,11 +36313,6 @@ fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.Key.Un try sema.analyzeBody(&block_scope, body); } - for (comptime_mutable_decls.items) |ct_decl_index| { - const ct_decl = mod.declPtr(ct_decl_index); - _ = try ct_decl.internValue(mod); - } - var int_tag_ty: Type = undefined; var enum_field_names: []InternPool.NullTerminatedString = &.{}; var enum_field_vals: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{}; @@ -37567,7 +36931,6 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { .ptr_decl, .ptr_anon_decl, .ptr_anon_decl_aligned, - .ptr_mut_decl, .ptr_comptime_field, .ptr_int, .ptr_eu_payload, @@ -37831,10 +37194,12 @@ fn isComptimeKnown( fn analyzeComptimeAlloc( sema: *Sema, block: *Block, + inst: Zir.Inst.Index, var_type: Type, alignment: Alignment, ) CompileError!Air.Inst.Ref { const mod = sema.mod; + const gpa = sema.gpa; // Needed to make an anon decl with type `var_type` (the `finish()` call below). _ = try sema.typeHasOnePossibleValue(var_type); @@ -37847,28 +37212,10 @@ fn analyzeComptimeAlloc( }, }); - var anon_decl = try block.startAnonDecl(); // TODO: comptime value mutation without Decl - defer anon_decl.deinit(); - - const decl_index = try anon_decl.finish( - var_type, - // There will be stores before the first load, but they may be to sub-elements or - // sub-fields. So we need to initialize with undef to allow the mechanism to expand - // into fields/elements and have those overridden with stored values. - Value.fromInterned((try mod.intern(.{ .undef = var_type.toIntern() }))), - alignment, - ); - const decl = mod.declPtr(decl_index); - decl.alignment = alignment; - - try sema.comptime_mutable_decls.append(decl_index); - return Air.internedToRef((try mod.intern(.{ .ptr = .{ - .ty = ptr_type.toIntern(), - .addr = .{ .mut_decl = .{ - .decl = decl_index, - .runtime_index = block.runtime_index, - } }, - } }))); + const comptime_ptr = try sema.comptime_memory.allocate(gpa, ptr_type, block.runtime_index); + try sema.value_map_values.append(gpa, comptime_ptr); + try sema.comptime_memory.value_map.put(gpa, inst, {}); + return .mutable_comptime; } /// The places where a user can specify an address space attribute @@ -38871,3 +38218,24 @@ fn ptrType(sema: *Sema, info: InternPool.Key.PtrType) CompileError!Type { } return sema.mod.ptrType(info); } + +fn fieldValue(sema: *Sema, val: MutValue, index: usize) !Value { + const zcu = sema.mod; + const cm = sema.comptime_memory; + return switch (val.tag) { + .interned => return ConstValue.fromInterned(val.repr.ip_index).fieldValue(zcu, index), + .aggregate => { + const agg = cm.aggregate_list.items[val.repr.aggregate]; + assert(index < agg.start + agg.len); + return cm.value_list.get(agg.start + index); + }, + .@"union" => { + const un = cm.union_list.items[val.repr.@"union"]; + // TODO assert the tag is correct + return cm.value_list.get(un.val); + }, + else => unreachable, + }; +} + + diff --git a/src/Sema/ComptimeMemory.zig b/src/Sema/ComptimeMemory.zig new file mode 100644 index 0000000000..059703b7d2 --- /dev/null +++ b/src/Sema/ComptimeMemory.zig @@ -0,0 +1,63 @@ +/// The index points into `value_map_values`. +value_map: std.AutoArrayHashMapUnmanaged(Zir.Inst.Index, void) = .{}, +value_map_values: std.MultiArrayList(Value) = .{}, + +// The following fields are used by the untagged union of Value: + +/// Corresponds to `Value.Index` +value_list: std.MultiArrayList(Value) = .{}, +/// Corresponds to `Slice.Index` +slice_list: std.ArrayListUnmanaged(Slice) = .{}, +/// Corresponds to `Bytes.Index` +bytes_list: std.ArrayListUnmanaged(Bytes) = .{}, +/// Corresponds to `Aggregate.Index` +aggregate_list: std.ArrayListUnmanaged(Aggregate) = .{}, +/// Corresponds to `Union.Index` +union_list: std.ArrayListUnmanaged(Union) = .{}, + +pub const Value = @import("ComptimeMemory/Value.zig"); + +pub const Bytes = struct { + /// The full slice of data owned by the allocation backing this value. + memory_island: []u8, + start: usize, + /// Includes the sentinel, if any. + len: usize, + + pub const Index = enum(u32) { _ }; +}; + +pub const Slice = struct { + ptr: Value, + len: Value, + + pub const Index = enum(u32) { _ }; +}; + +pub const Aggregate = struct { + start: Value.Index, + len: u32, + + pub const Index = enum(u32) { _ }; +}; + +pub const Union = struct { + /// none means undefined tag. + tag: Value.OptionalIndex, + val: Value, + + pub const Index = enum(u32) { _ }; +}; + +pub const RuntimeIndex = enum(u32) { + zero = 0, + comptime_field_ptr = std.math.maxInt(u32), + _, + + pub fn increment(ri: *RuntimeIndex) void { + ri.* = @enumFromInt(@intFromEnum(ri.*) + 1); + } +}; + +const std = @import("std"); +const Zir = @import("../Zir.zig"); diff --git a/src/Sema/ComptimeMemory/Value.zig b/src/Sema/ComptimeMemory/Value.zig new file mode 100644 index 0000000000..2971ad63b1 --- /dev/null +++ b/src/Sema/ComptimeMemory/Value.zig @@ -0,0 +1,66 @@ +ty: InternPool.Index, +tag: Tag, +repr: Repr, + +comptime { + switch (builtin.mode) { + .ReleaseFast, .ReleaseSmall => { + assert(@sizeOf(InternPool.Index) == 4); + assert(@sizeOf(Repr) == 4); + assert(@sizeOf(Tag) == 1); + }, + .Debug, .ReleaseSafe => {}, + } +} + +pub const Tag = enum(u8) { + /// Represents an value stored in `InternPool`. + interned, + /// Represents an error union value that is not an error. + /// The value is the payload value. + eu_payload, + /// Represents an optional value that is not null. + /// The value is the payload value. + opt_payload, + /// The type must be an array, vector, or tuple. The element is this sub + /// value repeated according to the length provided by the type. + repeated, + /// The type must be a slice pointer type. + slice, + /// The value is index into ComptimeMemory buffers array. + bytes, + /// An instance of a struct, array, or vector. + /// Each element/field stored as a `Value`. + /// In the case of sentinel-terminated arrays, the sentinel value *is* stored, + /// so the slice length will be one more than the type's array length. + aggregate, + /// An instance of a union. + @"union", +}; + +pub const Repr = union { + ip_index: InternPool.Index, + eu_payload: Index, + opt_payload: Index, + repeated: Index, + slice: ComptimeMemory.Slice.Index, + bytes: ComptimeMemory.Bytes.Index, + aggregate: ComptimeMemory.Aggregate.Index, + @"union": ComptimeMemory.Union.Index, +}; + +pub const Index = enum(u32) { _ }; + +pub const OptionalIndex = enum(u32) { + none = std.math.maxInt(u32), + _, +}; + +const builtin = @import("builtin"); +const std = @import("std"); +const assert = std.debug.assert; +const Value = @This(); +const ConstValue = @import("../../Value.zig"); + +const InternPool = @import("../../InternPool.zig"); +const ComptimeMemory = @import("../ComptimeMemory.zig"); diff --git a/src/TypedValue.zig b/src/TypedValue.zig index 54c9bb791f..2690068c10 100644 --- a/src/TypedValue.zig +++ b/src/TypedValue.zig @@ -1,6 +1,6 @@ const std = @import("std"); const Type = @import("type.zig").Type; -const Value = @import("value.zig").Value; +const Value = @import("Value.zig"); const Module = @import("Module.zig"); const Allocator = std.mem.Allocator; const TypedValue = @This(); @@ -329,14 +329,6 @@ pub fn print( .val = Value.fromInterned(decl_val), }, writer, level - 1, mod); }, - .mut_decl => |mut_decl| { - const decl = mod.declPtr(mut_decl.decl); - if (level == 0) return writer.print("(mut decl '{}')", .{decl.name.fmt(ip)}); - return print(.{ - .ty = decl.ty, - .val = decl.val, - }, writer, level - 1, mod); - }, .comptime_field => |field_val_ip| { return print(.{ .ty = Type.fromInterned(ip.typeOf(field_val_ip)), diff --git a/src/Value.zig b/src/Value.zig new file mode 100644 index 0000000000..316a757662 --- /dev/null +++ b/src/Value.zig @@ -0,0 +1,3787 @@ +const std = @import("std"); +const builtin = @import("builtin"); +const Type = @import("type.zig").Type; +const assert = std.debug.assert; +const BigIntConst = std.math.big.int.Const; +const BigIntMutable = std.math.big.int.Mutable; +const Target = std.Target; +const Allocator = std.mem.Allocator; +const Module = @import("Module.zig"); +const TypedValue = @import("TypedValue.zig"); +const Sema = @import("Sema.zig"); +const InternPool = @import("InternPool.zig"); +const Value = @This(); + +ip_index: InternPool.Index, + +pub fn format(val: Value, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { + _ = val; + _ = fmt; + _ = options; + _ = writer; + @compileError("do not use format values directly; use either fmtDebug or fmtValue"); +} + +/// This is a debug function. In order to print values in a meaningful way +/// we also need access to the type. +pub fn dump( + start_val: Value, + comptime fmt: []const u8, + _: std.fmt.FormatOptions, + out_stream: anytype, +) !void { + comptime assert(fmt.len == 0); + if (start_val.ip_index != .none) { + try out_stream.print("(interned: {})", .{start_val.toIntern()}); + return; + } + var val = start_val; + while (true) switch (val.tag()) { + .aggregate => { + return out_stream.writeAll("(aggregate)"); + }, + .@"union" => { + return out_stream.writeAll("(union value)"); + }, + .bytes => return out_stream.print("\"{}\"", .{std.zig.fmtEscapes(val.castTag(.bytes).?.data)}), + .repeated => { + try out_stream.writeAll("(repeated) "); + val = val.castTag(.repeated).?.data; + }, + .eu_payload => { + try out_stream.writeAll("(eu_payload) "); + val = val.castTag(.repeated).?.data; + }, + .opt_payload => { + try out_stream.writeAll("(opt_payload) "); + val = val.castTag(.repeated).?.data; + }, + .slice => return out_stream.writeAll("(slice)"), + }; +} + +pub fn fmtDebug(val: Value) std.fmt.Formatter(dump) { + return .{ .data = val }; +} + +pub fn fmtValue(val: Value, ty: Type, mod: *Module) std.fmt.Formatter(TypedValue.format) { + return .{ .data = .{ + .tv = .{ .ty = ty, .val = val }, + .mod = mod, + } }; +} + +/// Asserts that the value is representable as an array of bytes. +/// Returns the value as a null-terminated string stored in the InternPool. +pub fn toIpString(val: Value, ty: Type, mod: *Module) !InternPool.NullTerminatedString { + const ip = &mod.intern_pool; + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .enum_literal => |enum_literal| enum_literal, + .slice => |slice| try arrayToIpString(val, Value.fromInterned(slice.len).toUnsignedInt(mod), mod), + .aggregate => |aggregate| switch (aggregate.storage) { + .bytes => |bytes| try ip.getOrPutString(mod.gpa, bytes), + .elems => try arrayToIpString(val, ty.arrayLen(mod), mod), + .repeated_elem => |elem| { + const byte = @as(u8, @intCast(Value.fromInterned(elem).toUnsignedInt(mod))); + const len = @as(usize, @intCast(ty.arrayLen(mod))); + try ip.string_bytes.appendNTimes(mod.gpa, byte, len); + return ip.getOrPutTrailingString(mod.gpa, len); + }, + }, + else => unreachable, + }; +} + +/// Asserts that the value is representable as an array of bytes. +/// Copies the value into a freshly allocated slice of memory, which is owned by the caller. +pub fn toAllocatedBytes(val: Value, ty: Type, allocator: Allocator, mod: *Module) ![]u8 { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .enum_literal => |enum_literal| allocator.dupe(u8, mod.intern_pool.stringToSlice(enum_literal)), + .slice => |slice| try arrayToAllocatedBytes(val, Value.fromInterned(slice.len).toUnsignedInt(mod), allocator, mod), + .aggregate => |aggregate| switch (aggregate.storage) { + .bytes => |bytes| try allocator.dupe(u8, bytes), + .elems => try arrayToAllocatedBytes(val, ty.arrayLen(mod), allocator, mod), + .repeated_elem => |elem| { + const byte = @as(u8, @intCast(Value.fromInterned(elem).toUnsignedInt(mod))); + const result = try allocator.alloc(u8, @as(usize, @intCast(ty.arrayLen(mod)))); + @memset(result, byte); + return result; + }, + }, + else => unreachable, + }; +} + +fn arrayToAllocatedBytes(val: Value, len: u64, allocator: Allocator, mod: *Module) ![]u8 { + const result = try allocator.alloc(u8, @as(usize, @intCast(len))); + for (result, 0..) |*elem, i| { + const elem_val = try val.elemValue(mod, i); + elem.* = @as(u8, @intCast(elem_val.toUnsignedInt(mod))); + } + return result; +} + +fn arrayToIpString(val: Value, len_u64: u64, mod: *Module) !InternPool.NullTerminatedString { + const gpa = mod.gpa; + const ip = &mod.intern_pool; + const len = @as(usize, @intCast(len_u64)); + try ip.string_bytes.ensureUnusedCapacity(gpa, len); + for (0..len) |i| { + // I don't think elemValue has the possibility to affect ip.string_bytes. Let's + // assert just to be sure. + const prev = ip.string_bytes.items.len; + const elem_val = try val.elemValue(mod, i); + assert(ip.string_bytes.items.len == prev); + const byte = @as(u8, @intCast(elem_val.toUnsignedInt(mod))); + ip.string_bytes.appendAssumeCapacity(byte); + } + return ip.getOrPutTrailingString(gpa, len); +} + +pub fn intern2(val: Value, ty: Type, mod: *Module) Allocator.Error!InternPool.Index { + if (val.ip_index != .none) return val.ip_index; + return intern(val, ty, mod); +} + +pub fn intern(val: Value, ty: Type, mod: *Module) Allocator.Error!InternPool.Index { + if (val.ip_index != .none) return (try mod.getCoerced(val, ty)).toIntern(); + const ip = &mod.intern_pool; + switch (val.tag()) { + .eu_payload => { + const pl = val.castTag(.eu_payload).?.data; + return mod.intern(.{ .error_union = .{ + .ty = ty.toIntern(), + .val = .{ .payload = try pl.intern(ty.errorUnionPayload(mod), mod) }, + } }); + }, + .opt_payload => { + const pl = val.castTag(.opt_payload).?.data; + return mod.intern(.{ .opt = .{ + .ty = ty.toIntern(), + .val = try pl.intern(ty.optionalChild(mod), mod), + } }); + }, + .slice => { + const pl = val.castTag(.slice).?.data; + return mod.intern(.{ .slice = .{ + .ty = ty.toIntern(), + .len = try pl.len.intern(Type.usize, mod), + .ptr = try pl.ptr.intern(ty.slicePtrFieldType(mod), mod), + } }); + }, + .bytes => { + const pl = val.castTag(.bytes).?.data; + return mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .bytes = pl }, + } }); + }, + .repeated => { + const pl = val.castTag(.repeated).?.data; + return mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .repeated_elem = try pl.intern(ty.childType(mod), mod) }, + } }); + }, + .aggregate => { + const len = @as(usize, @intCast(ty.arrayLen(mod))); + const old_elems = val.castTag(.aggregate).?.data[0..len]; + const new_elems = try mod.gpa.alloc(InternPool.Index, old_elems.len); + defer mod.gpa.free(new_elems); + const ty_key = ip.indexToKey(ty.toIntern()); + for (new_elems, old_elems, 0..) |*new_elem, old_elem, field_i| + new_elem.* = try old_elem.intern(switch (ty_key) { + .struct_type => ty.structFieldType(field_i, mod), + .anon_struct_type => |info| Type.fromInterned(info.types.get(ip)[field_i]), + inline .array_type, .vector_type => |info| Type.fromInterned(info.child), + else => unreachable, + }, mod); + return mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = new_elems }, + } }); + }, + .@"union" => { + const pl = val.castTag(.@"union").?.data; + if (pl.tag) |pl_tag| { + return mod.intern(.{ .un = .{ + .ty = ty.toIntern(), + .tag = try pl_tag.intern(ty.unionTagTypeHypothetical(mod), mod), + .val = try pl.val.intern(ty.unionFieldType(pl_tag, mod).?, mod), + } }); + } else { + return mod.intern(.{ .un = .{ + .ty = ty.toIntern(), + .tag = .none, + .val = try pl.val.intern(try ty.unionBackingType(mod), mod), + } }); + } + }, + } +} + +pub fn fromInterned(i: InternPool.Index) Value { + assert(i != .none); + return .{ .ip_index = i }; +} + +pub fn toIntern(val: Value) InternPool.Index { + assert(val.ip_index != .none); + return val.ip_index; +} + +/// Asserts that the value is representable as a type. +pub fn toType(self: Value) Type { + return Type.fromInterned(self.toIntern()); +} + +pub fn intFromEnum(val: Value, ty: Type, mod: *Module) Allocator.Error!Value { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(ip.typeOf(val.toIntern()))) { + // Assume it is already an integer and return it directly. + .simple_type, .int_type => val, + .enum_literal => |enum_literal| { + const field_index = ty.enumFieldIndex(enum_literal, mod).?; + return switch (ip.indexToKey(ty.toIntern())) { + // Assume it is already an integer and return it directly. + .simple_type, .int_type => val, + .enum_type => |enum_type| if (enum_type.values.len != 0) + Value.fromInterned(enum_type.values.get(ip)[field_index]) + else // Field index and integer values are the same. + mod.intValue(Type.fromInterned(enum_type.tag_ty), field_index), + else => unreachable, + }; + }, + .enum_type => |enum_type| try mod.getCoerced(val, Type.fromInterned(enum_type.tag_ty)), + else => unreachable, + }; +} + +/// Asserts the value is an integer. +pub fn toBigInt(val: Value, space: *BigIntSpace, mod: *Module) BigIntConst { + return val.toBigIntAdvanced(space, mod, null) catch unreachable; +} + +/// Asserts the value is an integer. +pub fn toBigIntAdvanced( + val: Value, + space: *BigIntSpace, + mod: *Module, + opt_sema: ?*Sema, +) Module.CompileError!BigIntConst { + return switch (val.toIntern()) { + .bool_false => BigIntMutable.init(&space.limbs, 0).toConst(), + .bool_true => BigIntMutable.init(&space.limbs, 1).toConst(), + .null_value => BigIntMutable.init(&space.limbs, 0).toConst(), + else => switch (mod.intern_pool.indexToKey(val.toIntern())) { + .int => |int| switch (int.storage) { + .u64, .i64, .big_int => int.storage.toBigInt(space), + .lazy_align, .lazy_size => |ty| { + if (opt_sema) |sema| try sema.resolveTypeLayout(Type.fromInterned(ty)); + const x = switch (int.storage) { + else => unreachable, + .lazy_align => Type.fromInterned(ty).abiAlignment(mod).toByteUnits(0), + .lazy_size => Type.fromInterned(ty).abiSize(mod), + }; + return BigIntMutable.init(&space.limbs, x).toConst(); + }, + }, + .enum_tag => |enum_tag| Value.fromInterned(enum_tag.int).toBigIntAdvanced(space, mod, opt_sema), + .opt, .ptr => BigIntMutable.init( + &space.limbs, + (try val.getUnsignedIntAdvanced(mod, opt_sema)).?, + ).toConst(), + else => unreachable, + }, + }; +} + +pub fn isFuncBody(val: Value, mod: *Module) bool { + return mod.intern_pool.isFuncBody(val.toIntern()); +} + +pub fn getFunction(val: Value, mod: *Module) ?InternPool.Key.Func { + return if (val.ip_index != .none) switch (mod.intern_pool.indexToKey(val.toIntern())) { + .func => |x| x, + else => null, + } else null; +} + +pub fn getExternFunc(val: Value, mod: *Module) ?InternPool.Key.ExternFunc { + return if (val.ip_index != .none) switch (mod.intern_pool.indexToKey(val.toIntern())) { + .extern_func => |extern_func| extern_func, + else => null, + } else null; +} + +pub fn getVariable(val: Value, mod: *Module) ?InternPool.Key.Variable { + return if (val.ip_index != .none) switch (mod.intern_pool.indexToKey(val.toIntern())) { + .variable => |variable| variable, + else => null, + } else null; +} + +/// If the value fits in a u64, return it, otherwise null. +/// Asserts not undefined. +pub fn getUnsignedInt(val: Value, mod: *Module) ?u64 { + return getUnsignedIntAdvanced(val, mod, null) catch unreachable; +} + +/// If the value fits in a u64, return it, otherwise null. +/// Asserts not undefined. +pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, opt_sema: ?*Sema) !?u64 { + return switch (val.toIntern()) { + .undef => unreachable, + .bool_false => 0, + .bool_true => 1, + else => switch (mod.intern_pool.indexToKey(val.toIntern())) { + .undef => unreachable, + .int => |int| switch (int.storage) { + .big_int => |big_int| big_int.to(u64) catch null, + .u64 => |x| x, + .i64 => |x| std.math.cast(u64, x), + .lazy_align => |ty| if (opt_sema) |sema| + (try Type.fromInterned(ty).abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar.toByteUnits(0) + else + Type.fromInterned(ty).abiAlignment(mod).toByteUnits(0), + .lazy_size => |ty| if (opt_sema) |sema| + (try Type.fromInterned(ty).abiSizeAdvanced(mod, .{ .sema = sema })).scalar + else + Type.fromInterned(ty).abiSize(mod), + }, + .ptr => |ptr| switch (ptr.addr) { + .int => |int| Value.fromInterned(int).getUnsignedIntAdvanced(mod, opt_sema), + .elem => |elem| { + const base_addr = (try Value.fromInterned(elem.base).getUnsignedIntAdvanced(mod, opt_sema)) orelse return null; + const elem_ty = Type.fromInterned(mod.intern_pool.typeOf(elem.base)).elemType2(mod); + return base_addr + elem.index * elem_ty.abiSize(mod); + }, + .field => |field| { + const base_addr = (try Value.fromInterned(field.base).getUnsignedIntAdvanced(mod, opt_sema)) orelse return null; + const struct_ty = Type.fromInterned(mod.intern_pool.typeOf(field.base)).childType(mod); + if (opt_sema) |sema| try sema.resolveTypeLayout(struct_ty); + return base_addr + struct_ty.structFieldOffset(@as(usize, @intCast(field.index)), mod); + }, + else => null, + }, + .opt => |opt| switch (opt.val) { + .none => 0, + else => |payload| Value.fromInterned(payload).getUnsignedIntAdvanced(mod, opt_sema), + }, + else => null, + }, + }; +} + +/// Asserts the value is an integer and it fits in a u64 +pub fn toUnsignedInt(val: Value, mod: *Module) u64 { + return getUnsignedInt(val, mod).?; +} + +/// Asserts the value is an integer and it fits in a u64 +pub fn toUnsignedIntAdvanced(val: Value, sema: *Sema) !u64 { + return (try getUnsignedIntAdvanced(val, sema.mod, sema)).?; +} + +/// Asserts the value is an integer and it fits in a i64 +pub fn toSignedInt(val: Value, mod: *Module) i64 { + return switch (val.toIntern()) { + .bool_false => 0, + .bool_true => 1, + else => switch (mod.intern_pool.indexToKey(val.toIntern())) { + .int => |int| switch (int.storage) { + .big_int => |big_int| big_int.to(i64) catch unreachable, + .i64 => |x| x, + .u64 => |x| @intCast(x), + .lazy_align => |ty| @intCast(Type.fromInterned(ty).abiAlignment(mod).toByteUnits(0)), + .lazy_size => |ty| @intCast(Type.fromInterned(ty).abiSize(mod)), + }, + else => unreachable, + }, + }; +} + +pub fn toBool(val: Value) bool { + return switch (val.toIntern()) { + .bool_true => true, + .bool_false => false, + else => unreachable, + }; +} + +fn isDeclRef(val: Value, mod: *Module) bool { + var check = val; + while (true) switch (mod.intern_pool.indexToKey(check.toIntern())) { + .ptr => |ptr| switch (ptr.addr) { + .decl, .comptime_field, .anon_decl => return true, + .eu_payload, .opt_payload => |base| check = Value.fromInterned(base), + .elem, .field => |base_index| check = Value.fromInterned(base_index.base), + .int => return false, + }, + else => return false, + }; +} + +/// Write a Value's contents to `buffer`. +/// +/// Asserts that buffer.len >= ty.abiSize(). The buffer is allowed to extend past +/// the end of the value in memory. +pub fn writeToMemory(val: Value, ty: Type, mod: *Module, buffer: []u8) error{ + ReinterpretDeclRef, + IllDefinedMemoryLayout, + Unimplemented, + OutOfMemory, +}!void { + const target = mod.getTarget(); + const endian = target.cpu.arch.endian(); + if (val.isUndef(mod)) { + const size: usize = @intCast(ty.abiSize(mod)); + @memset(buffer[0..size], 0xaa); + return; + } + const ip = &mod.intern_pool; + switch (ty.zigTypeTag(mod)) { + .Void => {}, + .Bool => { + buffer[0] = @intFromBool(val.toBool()); + }, + .Int, .Enum => { + const int_info = ty.intInfo(mod); + const bits = int_info.bits; + const byte_count: u16 = @intCast((@as(u17, bits) + 7) / 8); + + var bigint_buffer: BigIntSpace = undefined; + const bigint = val.toBigInt(&bigint_buffer, mod); + bigint.writeTwosComplement(buffer[0..byte_count], endian); + }, + .Float => switch (ty.floatBits(target)) { + 16 => std.mem.writeInt(u16, buffer[0..2], @as(u16, @bitCast(val.toFloat(f16, mod))), endian), + 32 => std.mem.writeInt(u32, buffer[0..4], @as(u32, @bitCast(val.toFloat(f32, mod))), endian), + 64 => std.mem.writeInt(u64, buffer[0..8], @as(u64, @bitCast(val.toFloat(f64, mod))), endian), + 80 => std.mem.writeInt(u80, buffer[0..10], @as(u80, @bitCast(val.toFloat(f80, mod))), endian), + 128 => std.mem.writeInt(u128, buffer[0..16], @as(u128, @bitCast(val.toFloat(f128, mod))), endian), + else => unreachable, + }, + .Array => { + const len = ty.arrayLen(mod); + const elem_ty = ty.childType(mod); + const elem_size = @as(usize, @intCast(elem_ty.abiSize(mod))); + var elem_i: usize = 0; + var buf_off: usize = 0; + while (elem_i < len) : (elem_i += 1) { + const elem_val = try val.elemValue(mod, elem_i); + try elem_val.writeToMemory(elem_ty, mod, buffer[buf_off..]); + buf_off += elem_size; + } + }, + .Vector => { + // We use byte_count instead of abi_size here, so that any padding bytes + // follow the data bytes, on both big- and little-endian systems. + const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8; + return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0); + }, + .Struct => { + const struct_type = mod.typeToStruct(ty) orelse return error.IllDefinedMemoryLayout; + switch (struct_type.layout) { + .Auto => return error.IllDefinedMemoryLayout, + .Extern => for (0..struct_type.field_types.len) |i| { + const off: usize = @intCast(ty.structFieldOffset(i, mod)); + const field_val = switch (val.ip_index) { + .none => switch (val.tag()) { + .bytes => { + buffer[off] = val.castTag(.bytes).?.data[i]; + continue; + }, + .aggregate => val.castTag(.aggregate).?.data[i], + .repeated => val.castTag(.repeated).?.data, + else => unreachable, + }, + else => Value.fromInterned(switch (ip.indexToKey(val.toIntern()).aggregate.storage) { + .bytes => |bytes| { + buffer[off] = bytes[i]; + continue; + }, + .elems => |elems| elems[i], + .repeated_elem => |elem| elem, + }), + }; + const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); + try writeToMemory(field_val, field_ty, mod, buffer[off..]); + }, + .Packed => { + const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8; + return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0); + }, + } + }, + .ErrorSet => { + const bits = mod.errorSetBits(); + const byte_count: u16 = @intCast((@as(u17, bits) + 7) / 8); + + const name = switch (ip.indexToKey(val.toIntern())) { + .err => |err| err.name, + .error_union => |error_union| error_union.val.err_name, + else => unreachable, + }; + var bigint_buffer: BigIntSpace = undefined; + const bigint = BigIntMutable.init( + &bigint_buffer.limbs, + mod.global_error_set.getIndex(name).?, + ).toConst(); + bigint.writeTwosComplement(buffer[0..byte_count], endian); + }, + .Union => switch (ty.containerLayout(mod)) { + .Auto => return error.IllDefinedMemoryLayout, // Sema is supposed to have emitted a compile error already + .Extern => { + if (val.unionTag(mod)) |union_tag| { + const union_obj = mod.typeToUnion(ty).?; + const field_index = mod.unionTagFieldIndex(union_obj, union_tag).?; + const field_type = Type.fromInterned(union_obj.field_types.get(&mod.intern_pool)[field_index]); + const field_val = try val.fieldValue(mod, field_index); + const byte_count = @as(usize, @intCast(field_type.abiSize(mod))); + return writeToMemory(field_val, field_type, mod, buffer[0..byte_count]); + } else { + const backing_ty = try ty.unionBackingType(mod); + const byte_count: usize = @intCast(backing_ty.abiSize(mod)); + return writeToMemory(val.unionValue(mod), backing_ty, mod, buffer[0..byte_count]); + } + }, + .Packed => { + const backing_ty = try ty.unionBackingType(mod); + const byte_count: usize = @intCast(backing_ty.abiSize(mod)); + return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0); + }, + }, + .Pointer => { + if (ty.isSlice(mod)) return error.IllDefinedMemoryLayout; + if (val.isDeclRef(mod)) return error.ReinterpretDeclRef; + return val.writeToMemory(Type.usize, mod, buffer); + }, + .Optional => { + if (!ty.isPtrLikeOptional(mod)) return error.IllDefinedMemoryLayout; + const child = ty.optionalChild(mod); + const opt_val = val.optionalValue(mod); + if (opt_val) |some| { + return some.writeToMemory(child, mod, buffer); + } else { + return writeToMemory(try mod.intValue(Type.usize, 0), Type.usize, mod, buffer); + } + }, + else => return error.Unimplemented, + } +} + +/// Write a Value's contents to `buffer`. +/// +/// Both the start and the end of the provided buffer must be tight, since +/// big-endian packed memory layouts start at the end of the buffer. +pub fn writeToPackedMemory( + val: Value, + ty: Type, + mod: *Module, + buffer: []u8, + bit_offset: usize, +) error{ ReinterpretDeclRef, OutOfMemory }!void { + const ip = &mod.intern_pool; + const target = mod.getTarget(); + const endian = target.cpu.arch.endian(); + if (val.isUndef(mod)) { + const bit_size = @as(usize, @intCast(ty.bitSize(mod))); + std.mem.writeVarPackedInt(buffer, bit_offset, bit_size, @as(u1, 0), endian); + return; + } + switch (ty.zigTypeTag(mod)) { + .Void => {}, + .Bool => { + const byte_index = switch (endian) { + .little => bit_offset / 8, + .big => buffer.len - bit_offset / 8 - 1, + }; + if (val.toBool()) { + buffer[byte_index] |= (@as(u8, 1) << @as(u3, @intCast(bit_offset % 8))); + } else { + buffer[byte_index] &= ~(@as(u8, 1) << @as(u3, @intCast(bit_offset % 8))); + } + }, + .Int, .Enum => { + if (buffer.len == 0) return; + const bits = ty.intInfo(mod).bits; + if (bits == 0) return; + + switch (ip.indexToKey((try val.intFromEnum(ty, mod)).toIntern()).int.storage) { + inline .u64, .i64 => |int| std.mem.writeVarPackedInt(buffer, bit_offset, bits, int, endian), + .big_int => |bigint| bigint.writePackedTwosComplement(buffer, bit_offset, bits, endian), + .lazy_align => |lazy_align| { + const num = Type.fromInterned(lazy_align).abiAlignment(mod).toByteUnits(0); + std.mem.writeVarPackedInt(buffer, bit_offset, bits, num, endian); + }, + .lazy_size => |lazy_size| { + const num = Type.fromInterned(lazy_size).abiSize(mod); + std.mem.writeVarPackedInt(buffer, bit_offset, bits, num, endian); + }, + } + }, + .Float => switch (ty.floatBits(target)) { + 16 => std.mem.writePackedInt(u16, buffer, bit_offset, @as(u16, @bitCast(val.toFloat(f16, mod))), endian), + 32 => std.mem.writePackedInt(u32, buffer, bit_offset, @as(u32, @bitCast(val.toFloat(f32, mod))), endian), + 64 => std.mem.writePackedInt(u64, buffer, bit_offset, @as(u64, @bitCast(val.toFloat(f64, mod))), endian), + 80 => std.mem.writePackedInt(u80, buffer, bit_offset, @as(u80, @bitCast(val.toFloat(f80, mod))), endian), + 128 => std.mem.writePackedInt(u128, buffer, bit_offset, @as(u128, @bitCast(val.toFloat(f128, mod))), endian), + else => unreachable, + }, + .Vector => { + const elem_ty = ty.childType(mod); + const elem_bit_size = @as(u16, @intCast(elem_ty.bitSize(mod))); + const len = @as(usize, @intCast(ty.arrayLen(mod))); + + var bits: u16 = 0; + var elem_i: usize = 0; + while (elem_i < len) : (elem_i += 1) { + // On big-endian systems, LLVM reverses the element order of vectors by default + const tgt_elem_i = if (endian == .big) len - elem_i - 1 else elem_i; + const elem_val = try val.elemValue(mod, tgt_elem_i); + try elem_val.writeToPackedMemory(elem_ty, mod, buffer, bit_offset + bits); + bits += elem_bit_size; + } + }, + .Struct => { + const struct_type = ip.indexToKey(ty.toIntern()).struct_type; + // Sema is supposed to have emitted a compile error already in the case of Auto, + // and Extern is handled in non-packed writeToMemory. + assert(struct_type.layout == .Packed); + var bits: u16 = 0; + for (0..struct_type.field_types.len) |i| { + const field_val = switch (val.ip_index) { + .none => switch (val.tag()) { + .bytes => unreachable, + .aggregate => val.castTag(.aggregate).?.data[i], + .repeated => val.castTag(.repeated).?.data, + else => unreachable, + }, + else => Value.fromInterned(switch (ip.indexToKey(val.toIntern()).aggregate.storage) { + .bytes => unreachable, + .elems => |elems| elems[i], + .repeated_elem => |elem| elem, + }), + }; + const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); + const field_bits: u16 = @intCast(field_ty.bitSize(mod)); + try field_val.writeToPackedMemory(field_ty, mod, buffer, bit_offset + bits); + bits += field_bits; + } + }, + .Union => { + const union_obj = mod.typeToUnion(ty).?; + switch (union_obj.getLayout(ip)) { + .Auto, .Extern => unreachable, // Handled in non-packed writeToMemory + .Packed => { + if (val.unionTag(mod)) |union_tag| { + const field_index = mod.unionTagFieldIndex(union_obj, union_tag).?; + const field_type = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); + const field_val = try val.fieldValue(mod, field_index); + return field_val.writeToPackedMemory(field_type, mod, buffer, bit_offset); + } else { + const backing_ty = try ty.unionBackingType(mod); + return val.unionValue(mod).writeToPackedMemory(backing_ty, mod, buffer, bit_offset); + } + }, + } + }, + .Pointer => { + assert(!ty.isSlice(mod)); // No well defined layout. + if (val.isDeclRef(mod)) return error.ReinterpretDeclRef; + return val.writeToPackedMemory(Type.usize, mod, buffer, bit_offset); + }, + .Optional => { + assert(ty.isPtrLikeOptional(mod)); + const child = ty.optionalChild(mod); + const opt_val = val.optionalValue(mod); + if (opt_val) |some| { + return some.writeToPackedMemory(child, mod, buffer, bit_offset); + } else { + return writeToPackedMemory(try mod.intValue(Type.usize, 0), Type.usize, mod, buffer, bit_offset); + } + }, + else => @panic("TODO implement writeToPackedMemory for more types"), + } +} + +/// Load a Value from the contents of `buffer`. +/// +/// Asserts that buffer.len >= ty.abiSize(). The buffer is allowed to extend past +/// the end of the value in memory. +pub fn readFromMemory( + ty: Type, + mod: *Module, + buffer: []const u8, + arena: Allocator, +) error{ + IllDefinedMemoryLayout, + Unimplemented, + OutOfMemory, +}!Value { + const ip = &mod.intern_pool; + const target = mod.getTarget(); + const endian = target.cpu.arch.endian(); + switch (ty.zigTypeTag(mod)) { + .Void => return Value.void, + .Bool => { + if (buffer[0] == 0) { + return Value.false; + } else { + return Value.true; + } + }, + .Int, .Enum => |ty_tag| { + const int_ty = switch (ty_tag) { + .Int => ty, + .Enum => ty.intTagType(mod), + else => unreachable, + }; + const int_info = int_ty.intInfo(mod); + const bits = int_info.bits; + const byte_count: u16 = @intCast((@as(u17, bits) + 7) / 8); + if (bits == 0 or buffer.len == 0) return mod.getCoerced(try mod.intValue(int_ty, 0), ty); + + if (bits <= 64) switch (int_info.signedness) { // Fast path for integers <= u64 + .signed => { + const val = std.mem.readVarInt(i64, buffer[0..byte_count], endian); + const result = (val << @as(u6, @intCast(64 - bits))) >> @as(u6, @intCast(64 - bits)); + return mod.getCoerced(try mod.intValue(int_ty, result), ty); + }, + .unsigned => { + const val = std.mem.readVarInt(u64, buffer[0..byte_count], endian); + const result = (val << @as(u6, @intCast(64 - bits))) >> @as(u6, @intCast(64 - bits)); + return mod.getCoerced(try mod.intValue(int_ty, result), ty); + }, + } else { // Slow path, we have to construct a big-int + const Limb = std.math.big.Limb; + const limb_count = (byte_count + @sizeOf(Limb) - 1) / @sizeOf(Limb); + const limbs_buffer = try arena.alloc(Limb, limb_count); + + var bigint = BigIntMutable.init(limbs_buffer, 0); + bigint.readTwosComplement(buffer[0..byte_count], bits, endian, int_info.signedness); + return mod.getCoerced(try mod.intValue_big(int_ty, bigint.toConst()), ty); + } + }, + .Float => return Value.fromInterned((try mod.intern(.{ .float = .{ + .ty = ty.toIntern(), + .storage = switch (ty.floatBits(target)) { + 16 => .{ .f16 = @as(f16, @bitCast(std.mem.readInt(u16, buffer[0..2], endian))) }, + 32 => .{ .f32 = @as(f32, @bitCast(std.mem.readInt(u32, buffer[0..4], endian))) }, + 64 => .{ .f64 = @as(f64, @bitCast(std.mem.readInt(u64, buffer[0..8], endian))) }, + 80 => .{ .f80 = @as(f80, @bitCast(std.mem.readInt(u80, buffer[0..10], endian))) }, + 128 => .{ .f128 = @as(f128, @bitCast(std.mem.readInt(u128, buffer[0..16], endian))) }, + else => unreachable, + }, + } }))), + .Array => { + const elem_ty = ty.childType(mod); + const elem_size = elem_ty.abiSize(mod); + const elems = try arena.alloc(InternPool.Index, @as(usize, @intCast(ty.arrayLen(mod)))); + var offset: usize = 0; + for (elems) |*elem| { + elem.* = try (try readFromMemory(elem_ty, mod, buffer[offset..], arena)).intern(elem_ty, mod); + offset += @as(usize, @intCast(elem_size)); + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = elems }, + } }))); + }, + .Vector => { + // We use byte_count instead of abi_size here, so that any padding bytes + // follow the data bytes, on both big- and little-endian systems. + const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8; + return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena); + }, + .Struct => { + const struct_type = mod.typeToStruct(ty).?; + switch (struct_type.layout) { + .Auto => unreachable, // Sema is supposed to have emitted a compile error already + .Extern => { + const field_types = struct_type.field_types; + const field_vals = try arena.alloc(InternPool.Index, field_types.len); + for (field_vals, 0..) |*field_val, i| { + const field_ty = Type.fromInterned(field_types.get(ip)[i]); + const off: usize = @intCast(ty.structFieldOffset(i, mod)); + const sz: usize = @intCast(field_ty.abiSize(mod)); + field_val.* = try (try readFromMemory(field_ty, mod, buffer[off..(off + sz)], arena)).intern(field_ty, mod); + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = field_vals }, + } }))); + }, + .Packed => { + const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8; + return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena); + }, + } + }, + .ErrorSet => { + const bits = mod.errorSetBits(); + const byte_count: u16 = @intCast((@as(u17, bits) + 7) / 8); + const int = std.mem.readVarInt(u64, buffer[0..byte_count], endian); + const index = (int << @as(u6, @intCast(64 - bits))) >> @as(u6, @intCast(64 - bits)); + const name = mod.global_error_set.keys()[@intCast(index)]; + + return Value.fromInterned((try mod.intern(.{ .err = .{ + .ty = ty.toIntern(), + .name = name, + } }))); + }, + .Union => switch (ty.containerLayout(mod)) { + .Auto => return error.IllDefinedMemoryLayout, + .Extern => { + const union_size = ty.abiSize(mod); + const array_ty = try mod.arrayType(.{ .len = union_size, .child = .u8_type }); + const val = try (try readFromMemory(array_ty, mod, buffer, arena)).intern(array_ty, mod); + return Value.fromInterned((try mod.intern(.{ .un = .{ + .ty = ty.toIntern(), + .tag = .none, + .val = val, + } }))); + }, + .Packed => { + const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8; + return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena); + }, + }, + .Pointer => { + assert(!ty.isSlice(mod)); // No well defined layout. + const int_val = try readFromMemory(Type.usize, mod, buffer, arena); + return Value.fromInterned((try mod.intern(.{ .ptr = .{ + .ty = ty.toIntern(), + .addr = .{ .int = int_val.toIntern() }, + } }))); + }, + .Optional => { + assert(ty.isPtrLikeOptional(mod)); + const child_ty = ty.optionalChild(mod); + const child_val = try readFromMemory(child_ty, mod, buffer, arena); + return Value.fromInterned((try mod.intern(.{ .opt = .{ + .ty = ty.toIntern(), + .val = switch (child_val.orderAgainstZero(mod)) { + .lt => unreachable, + .eq => .none, + .gt => child_val.toIntern(), + }, + } }))); + }, + else => return error.Unimplemented, + } +} + +/// Load a Value from the contents of `buffer`. +/// +/// Both the start and the end of the provided buffer must be tight, since +/// big-endian packed memory layouts start at the end of the buffer. +pub fn readFromPackedMemory( + ty: Type, + mod: *Module, + buffer: []const u8, + bit_offset: usize, + arena: Allocator, +) error{ + IllDefinedMemoryLayout, + OutOfMemory, +}!Value { + const ip = &mod.intern_pool; + const target = mod.getTarget(); + const endian = target.cpu.arch.endian(); + switch (ty.zigTypeTag(mod)) { + .Void => return Value.void, + .Bool => { + const byte = switch (endian) { + .big => buffer[buffer.len - bit_offset / 8 - 1], + .little => buffer[bit_offset / 8], + }; + if (((byte >> @as(u3, @intCast(bit_offset % 8))) & 1) == 0) { + return Value.false; + } else { + return Value.true; + } + }, + .Int, .Enum => |ty_tag| { + if (buffer.len == 0) return mod.intValue(ty, 0); + const int_info = ty.intInfo(mod); + const bits = int_info.bits; + if (bits == 0) return mod.intValue(ty, 0); + + // Fast path for integers <= u64 + if (bits <= 64) { + const int_ty = switch (ty_tag) { + .Int => ty, + .Enum => ty.intTagType(mod), + else => unreachable, + }; + return mod.getCoerced(switch (int_info.signedness) { + .signed => return mod.intValue( + int_ty, + std.mem.readVarPackedInt(i64, buffer, bit_offset, bits, endian, .signed), + ), + .unsigned => return mod.intValue( + int_ty, + std.mem.readVarPackedInt(u64, buffer, bit_offset, bits, endian, .unsigned), + ), + }, ty); + } + + // Slow path, we have to construct a big-int + const abi_size = @as(usize, @intCast(ty.abiSize(mod))); + const Limb = std.math.big.Limb; + const limb_count = (abi_size + @sizeOf(Limb) - 1) / @sizeOf(Limb); + const limbs_buffer = try arena.alloc(Limb, limb_count); + + var bigint = BigIntMutable.init(limbs_buffer, 0); + bigint.readPackedTwosComplement(buffer, bit_offset, bits, endian, int_info.signedness); + return mod.intValue_big(ty, bigint.toConst()); + }, + .Float => return Value.fromInterned((try mod.intern(.{ .float = .{ + .ty = ty.toIntern(), + .storage = switch (ty.floatBits(target)) { + 16 => .{ .f16 = @as(f16, @bitCast(std.mem.readPackedInt(u16, buffer, bit_offset, endian))) }, + 32 => .{ .f32 = @as(f32, @bitCast(std.mem.readPackedInt(u32, buffer, bit_offset, endian))) }, + 64 => .{ .f64 = @as(f64, @bitCast(std.mem.readPackedInt(u64, buffer, bit_offset, endian))) }, + 80 => .{ .f80 = @as(f80, @bitCast(std.mem.readPackedInt(u80, buffer, bit_offset, endian))) }, + 128 => .{ .f128 = @as(f128, @bitCast(std.mem.readPackedInt(u128, buffer, bit_offset, endian))) }, + else => unreachable, + }, + } }))), + .Vector => { + const elem_ty = ty.childType(mod); + const elems = try arena.alloc(InternPool.Index, @as(usize, @intCast(ty.arrayLen(mod)))); + + var bits: u16 = 0; + const elem_bit_size = @as(u16, @intCast(elem_ty.bitSize(mod))); + for (elems, 0..) |_, i| { + // On big-endian systems, LLVM reverses the element order of vectors by default + const tgt_elem_i = if (endian == .big) elems.len - i - 1 else i; + elems[tgt_elem_i] = try (try readFromPackedMemory(elem_ty, mod, buffer, bit_offset + bits, arena)).intern(elem_ty, mod); + bits += elem_bit_size; + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = elems }, + } }))); + }, + .Struct => { + // Sema is supposed to have emitted a compile error already for Auto layout structs, + // and Extern is handled by non-packed readFromMemory. + const struct_type = mod.typeToPackedStruct(ty).?; + var bits: u16 = 0; + const field_vals = try arena.alloc(InternPool.Index, struct_type.field_types.len); + for (field_vals, 0..) |*field_val, i| { + const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); + const field_bits: u16 = @intCast(field_ty.bitSize(mod)); + field_val.* = try (try readFromPackedMemory(field_ty, mod, buffer, bit_offset + bits, arena)).intern(field_ty, mod); + bits += field_bits; + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = field_vals }, + } }))); + }, + .Union => switch (ty.containerLayout(mod)) { + .Auto, .Extern => unreachable, // Handled by non-packed readFromMemory + .Packed => { + const backing_ty = try ty.unionBackingType(mod); + const val = (try readFromPackedMemory(backing_ty, mod, buffer, bit_offset, arena)).toIntern(); + return Value.fromInterned((try mod.intern(.{ .un = .{ + .ty = ty.toIntern(), + .tag = .none, + .val = val, + } }))); + }, + }, + .Pointer => { + assert(!ty.isSlice(mod)); // No well defined layout. + return readFromPackedMemory(Type.usize, mod, buffer, bit_offset, arena); + }, + .Optional => { + assert(ty.isPtrLikeOptional(mod)); + const child = ty.optionalChild(mod); + return readFromPackedMemory(child, mod, buffer, bit_offset, arena); + }, + else => @panic("TODO implement readFromPackedMemory for more types"), + } +} + +/// Asserts that the value is a float or an integer. +pub fn toFloat(val: Value, comptime T: type, mod: *Module) T { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .int => |int| switch (int.storage) { + .big_int => |big_int| @floatCast(bigIntToFloat(big_int.limbs, big_int.positive)), + inline .u64, .i64 => |x| { + if (T == f80) { + @panic("TODO we can't lower this properly on non-x86 llvm backend yet"); + } + return @floatFromInt(x); + }, + .lazy_align => |ty| @floatFromInt(Type.fromInterned(ty).abiAlignment(mod).toByteUnits(0)), + .lazy_size => |ty| @floatFromInt(Type.fromInterned(ty).abiSize(mod)), + }, + .float => |float| switch (float.storage) { + inline else => |x| @floatCast(x), + }, + else => unreachable, + }; +} + +/// TODO move this to std lib big int code +fn bigIntToFloat(limbs: []const std.math.big.Limb, positive: bool) f128 { + if (limbs.len == 0) return 0; + + const base = std.math.maxInt(std.math.big.Limb) + 1; + var result: f128 = 0; + var i: usize = limbs.len; + while (i != 0) { + i -= 1; + const limb: f128 = @as(f128, @floatFromInt(limbs[i])); + result = @mulAdd(f128, base, result, limb); + } + if (positive) { + return result; + } else { + return -result; + } +} + +pub fn clz(val: Value, ty: Type, mod: *Module) u64 { + var bigint_buf: BigIntSpace = undefined; + const bigint = val.toBigInt(&bigint_buf, mod); + return bigint.clz(ty.intInfo(mod).bits); +} + +pub fn ctz(val: Value, ty: Type, mod: *Module) u64 { + var bigint_buf: BigIntSpace = undefined; + const bigint = val.toBigInt(&bigint_buf, mod); + return bigint.ctz(ty.intInfo(mod).bits); +} + +pub fn popCount(val: Value, ty: Type, mod: *Module) u64 { + var bigint_buf: BigIntSpace = undefined; + const bigint = val.toBigInt(&bigint_buf, mod); + return @as(u64, @intCast(bigint.popCount(ty.intInfo(mod).bits))); +} + +pub fn bitReverse(val: Value, ty: Type, mod: *Module, arena: Allocator) !Value { + const info = ty.intInfo(mod); + + var buffer: Value.BigIntSpace = undefined; + const operand_bigint = val.toBigInt(&buffer, mod); + + const limbs = try arena.alloc( + std.math.big.Limb, + std.math.big.int.calcTwosCompLimbCount(info.bits), + ); + var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; + result_bigint.bitReverse(operand_bigint, info.signedness, info.bits); + + return mod.intValue_big(ty, result_bigint.toConst()); +} + +pub fn byteSwap(val: Value, ty: Type, mod: *Module, arena: Allocator) !Value { + const info = ty.intInfo(mod); + + // Bit count must be evenly divisible by 8 + assert(info.bits % 8 == 0); + + var buffer: Value.BigIntSpace = undefined; + const operand_bigint = val.toBigInt(&buffer, mod); + + const limbs = try arena.alloc( + std.math.big.Limb, + std.math.big.int.calcTwosCompLimbCount(info.bits), + ); + var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; + result_bigint.byteSwap(operand_bigint, info.signedness, info.bits / 8); + + return mod.intValue_big(ty, result_bigint.toConst()); +} + +/// Asserts the value is an integer and not undefined. +/// Returns the number of bits the value requires to represent stored in twos complement form. +pub fn intBitCountTwosComp(self: Value, mod: *Module) usize { + var buffer: BigIntSpace = undefined; + const big_int = self.toBigInt(&buffer, mod); + return big_int.bitCountTwosComp(); +} + +/// Converts an integer or a float to a float. May result in a loss of information. +/// Caller can find out by equality checking the result against the operand. +pub fn floatCast(self: Value, dest_ty: Type, mod: *Module) !Value { + const target = mod.getTarget(); + return Value.fromInterned((try mod.intern(.{ .float = .{ + .ty = dest_ty.toIntern(), + .storage = switch (dest_ty.floatBits(target)) { + 16 => .{ .f16 = self.toFloat(f16, mod) }, + 32 => .{ .f32 = self.toFloat(f32, mod) }, + 64 => .{ .f64 = self.toFloat(f64, mod) }, + 80 => .{ .f80 = self.toFloat(f80, mod) }, + 128 => .{ .f128 = self.toFloat(f128, mod) }, + else => unreachable, + }, + } }))); +} + +/// Asserts the value is a float +pub fn floatHasFraction(self: Value, mod: *const Module) bool { + return switch (mod.intern_pool.indexToKey(self.toIntern())) { + .float => |float| switch (float.storage) { + inline else => |x| @rem(x, 1) != 0, + }, + else => unreachable, + }; +} + +pub fn orderAgainstZero(lhs: Value, mod: *Module) std.math.Order { + return orderAgainstZeroAdvanced(lhs, mod, null) catch unreachable; +} + +pub fn orderAgainstZeroAdvanced( + lhs: Value, + mod: *Module, + opt_sema: ?*Sema, +) Module.CompileError!std.math.Order { + return switch (lhs.toIntern()) { + .bool_false => .eq, + .bool_true => .gt, + else => switch (mod.intern_pool.indexToKey(lhs.toIntern())) { + .ptr => |ptr| switch (ptr.addr) { + .decl, .comptime_field => .gt, + .int => |int| Value.fromInterned(int).orderAgainstZeroAdvanced(mod, opt_sema), + .elem => |elem| switch (try Value.fromInterned(elem.base).orderAgainstZeroAdvanced(mod, opt_sema)) { + .lt => unreachable, + .gt => .gt, + .eq => if (elem.index == 0) .eq else .gt, + }, + else => unreachable, + }, + .int => |int| switch (int.storage) { + .big_int => |big_int| big_int.orderAgainstScalar(0), + inline .u64, .i64 => |x| std.math.order(x, 0), + .lazy_align => .gt, // alignment is never 0 + .lazy_size => |ty| return if (Type.fromInterned(ty).hasRuntimeBitsAdvanced( + mod, + false, + if (opt_sema) |sema| .{ .sema = sema } else .eager, + ) catch |err| switch (err) { + error.NeedLazy => unreachable, + else => |e| return e, + }) .gt else .eq, + }, + .enum_tag => |enum_tag| Value.fromInterned(enum_tag.int).orderAgainstZeroAdvanced(mod, opt_sema), + .float => |float| switch (float.storage) { + inline else => |x| std.math.order(x, 0), + }, + else => unreachable, + }, + }; +} + +/// Asserts the value is comparable. +pub fn order(lhs: Value, rhs: Value, mod: *Module) std.math.Order { + return orderAdvanced(lhs, rhs, mod, null) catch unreachable; +} + +/// Asserts the value is comparable. +/// If opt_sema is null then this function asserts things are resolved and cannot fail. +pub fn orderAdvanced(lhs: Value, rhs: Value, mod: *Module, opt_sema: ?*Sema) !std.math.Order { + const lhs_against_zero = try lhs.orderAgainstZeroAdvanced(mod, opt_sema); + const rhs_against_zero = try rhs.orderAgainstZeroAdvanced(mod, opt_sema); + switch (lhs_against_zero) { + .lt => if (rhs_against_zero != .lt) return .lt, + .eq => return rhs_against_zero.invert(), + .gt => {}, + } + switch (rhs_against_zero) { + .lt => if (lhs_against_zero != .lt) return .gt, + .eq => return lhs_against_zero, + .gt => {}, + } + + if (lhs.isFloat(mod) or rhs.isFloat(mod)) { + const lhs_f128 = lhs.toFloat(f128, mod); + const rhs_f128 = rhs.toFloat(f128, mod); + return std.math.order(lhs_f128, rhs_f128); + } + + var lhs_bigint_space: BigIntSpace = undefined; + var rhs_bigint_space: BigIntSpace = undefined; + const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_bigint_space, mod, opt_sema); + const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_bigint_space, mod, opt_sema); + return lhs_bigint.order(rhs_bigint); +} + +/// Asserts the value is comparable. Does not take a type parameter because it supports +/// comparisons between heterogeneous types. +pub fn compareHetero(lhs: Value, op: std.math.CompareOperator, rhs: Value, mod: *Module) bool { + return compareHeteroAdvanced(lhs, op, rhs, mod, null) catch unreachable; +} + +pub fn compareHeteroAdvanced( + lhs: Value, + op: std.math.CompareOperator, + rhs: Value, + mod: *Module, + opt_sema: ?*Sema, +) !bool { + if (lhs.pointerDecl(mod)) |lhs_decl| { + if (rhs.pointerDecl(mod)) |rhs_decl| { + switch (op) { + .eq => return lhs_decl == rhs_decl, + .neq => return lhs_decl != rhs_decl, + else => {}, + } + } else { + switch (op) { + .eq => return false, + .neq => return true, + else => {}, + } + } + } else if (rhs.pointerDecl(mod)) |_| { + switch (op) { + .eq => return false, + .neq => return true, + else => {}, + } + } + return (try orderAdvanced(lhs, rhs, mod, opt_sema)).compare(op); +} + +/// Asserts the values are comparable. Both operands have type `ty`. +/// For vectors, returns true if comparison is true for ALL elements. +pub fn compareAll(lhs: Value, op: std.math.CompareOperator, rhs: Value, ty: Type, mod: *Module) !bool { + if (ty.zigTypeTag(mod) == .Vector) { + const scalar_ty = ty.scalarType(mod); + for (0..ty.vectorLen(mod)) |i| { + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + if (!compareScalar(lhs_elem, op, rhs_elem, scalar_ty, mod)) { + return false; + } + } + return true; + } + return compareScalar(lhs, op, rhs, ty, mod); +} + +/// Asserts the values are comparable. Both operands have type `ty`. +pub fn compareScalar( + lhs: Value, + op: std.math.CompareOperator, + rhs: Value, + ty: Type, + mod: *Module, +) bool { + return switch (op) { + .eq => lhs.eql(rhs, ty, mod), + .neq => !lhs.eql(rhs, ty, mod), + else => compareHetero(lhs, op, rhs, mod), + }; +} + +/// Asserts the value is comparable. +/// For vectors, returns true if comparison is true for ALL elements. +/// +/// Note that `!compareAllWithZero(.eq, ...) != compareAllWithZero(.neq, ...)` +pub fn compareAllWithZero(lhs: Value, op: std.math.CompareOperator, mod: *Module) bool { + return compareAllWithZeroAdvancedExtra(lhs, op, mod, null) catch unreachable; +} + +pub fn compareAllWithZeroAdvanced( + lhs: Value, + op: std.math.CompareOperator, + sema: *Sema, +) Module.CompileError!bool { + return compareAllWithZeroAdvancedExtra(lhs, op, sema.mod, sema); +} + +pub fn compareAllWithZeroAdvancedExtra( + lhs: Value, + op: std.math.CompareOperator, + mod: *Module, + opt_sema: ?*Sema, +) Module.CompileError!bool { + if (lhs.isInf(mod)) { + switch (op) { + .neq => return true, + .eq => return false, + .gt, .gte => return !lhs.isNegativeInf(mod), + .lt, .lte => return lhs.isNegativeInf(mod), + } + } + + switch (mod.intern_pool.indexToKey(lhs.toIntern())) { + .float => |float| switch (float.storage) { + inline else => |x| if (std.math.isNan(x)) return op == .neq, + }, + .aggregate => |aggregate| return switch (aggregate.storage) { + .bytes => |bytes| for (bytes) |byte| { + if (!std.math.order(byte, 0).compare(op)) break false; + } else true, + .elems => |elems| for (elems) |elem| { + if (!try Value.fromInterned(elem).compareAllWithZeroAdvancedExtra(op, mod, opt_sema)) break false; + } else true, + .repeated_elem => |elem| Value.fromInterned(elem).compareAllWithZeroAdvancedExtra(op, mod, opt_sema), + }, + else => {}, + } + return (try orderAgainstZeroAdvanced(lhs, mod, opt_sema)).compare(op); +} + +pub fn eql(a: Value, b: Value, ty: Type, mod: *Module) bool { + assert(mod.intern_pool.typeOf(a.toIntern()) == ty.toIntern()); + assert(mod.intern_pool.typeOf(b.toIntern()) == ty.toIntern()); + return a.toIntern() == b.toIntern(); +} + +pub fn isComptimeMutablePtr(val: Value, mod: *Module) bool { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .slice => |slice| return Value.fromInterned(slice.ptr).isComptimeMutablePtr(mod), + .ptr => |ptr| switch (ptr.addr) { + .comptime_field => true, + .eu_payload, .opt_payload => |base_ptr| Value.fromInterned(base_ptr).isComptimeMutablePtr(mod), + .elem, .field => |base_index| Value.fromInterned(base_index.base).isComptimeMutablePtr(mod), + else => false, + }, + else => false, + }; +} + +pub fn canMutateComptimeVarState(val: Value, mod: *Module) bool { + return val.isComptimeMutablePtr(mod) or switch (val.toIntern()) { + else => switch (mod.intern_pool.indexToKey(val.toIntern())) { + .error_union => |error_union| switch (error_union.val) { + .err_name => false, + .payload => |payload| Value.fromInterned(payload).canMutateComptimeVarState(mod), + }, + .ptr => |ptr| switch (ptr.addr) { + .eu_payload, .opt_payload => |base| Value.fromInterned(base).canMutateComptimeVarState(mod), + .anon_decl => |anon_decl| Value.fromInterned(anon_decl.val).canMutateComptimeVarState(mod), + .elem, .field => |base_index| Value.fromInterned(base_index.base).canMutateComptimeVarState(mod), + else => false, + }, + .opt => |opt| switch (opt.val) { + .none => false, + else => |payload| Value.fromInterned(payload).canMutateComptimeVarState(mod), + }, + .aggregate => |aggregate| for (aggregate.storage.values()) |elem| { + if (Value.fromInterned(elem).canMutateComptimeVarState(mod)) break true; + } else false, + .un => |un| Value.fromInterned(un.val).canMutateComptimeVarState(mod), + else => false, + }, + }; +} + +/// Gets the decl referenced by this pointer. If the pointer does not point +/// to a decl, or if it points to some part of a decl (like field_ptr or element_ptr), +/// this function returns null. +pub fn pointerDecl(val: Value, mod: *Module) ?InternPool.DeclIndex { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .variable => |variable| variable.decl, + .extern_func => |extern_func| extern_func.decl, + .func => |func| func.owner_decl, + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| decl, + else => null, + }, + else => null, + }; +} + +pub const slice_ptr_index = 0; +pub const slice_len_index = 1; + +pub fn slicePtr(val: Value, mod: *Module) Value { + return Value.fromInterned(mod.intern_pool.slicePtr(val.toIntern())); +} + +pub fn sliceLen(val: Value, mod: *Module) u64 { + const ip = &mod.intern_pool; + return switch (ip.indexToKey(val.toIntern())) { + .ptr => |ptr| switch (ip.indexToKey(switch (ptr.addr) { + .decl => |decl| mod.declPtr(decl).ty.toIntern(), + .mut_decl => |mut_decl| mod.declPtr(mut_decl.decl).ty.toIntern(), + .anon_decl => |anon_decl| ip.typeOf(anon_decl.val), + .comptime_field => |comptime_field| ip.typeOf(comptime_field), + else => unreachable, + })) { + .array_type => |array_type| array_type.len, + else => 1, + }, + .slice => |slice| Value.fromInterned(slice.len).toUnsignedInt(mod), + else => unreachable, + }; +} + +/// Asserts the value is a single-item pointer to an array, or an array, +/// or an unknown-length pointer, and returns the element value at the index. +pub fn elemValue(val: Value, mod: *Module, index: usize) Allocator.Error!Value { + return (try val.maybeElemValue(mod, index)).?; +} + +/// Like `elemValue`, but returns `null` instead of asserting on failure. +pub fn maybeElemValue(val: Value, mod: *Module, index: usize) Allocator.Error!?Value { + return switch (val.ip_index) { + .none => switch (val.tag()) { + .bytes => try mod.intValue(Type.u8, val.castTag(.bytes).?.data[index]), + .repeated => val.castTag(.repeated).?.data, + .aggregate => val.castTag(.aggregate).?.data[index], + .slice => val.castTag(.slice).?.data.ptr.maybeElemValue(mod, index), + else => null, + }, + else => switch (mod.intern_pool.indexToKey(val.toIntern())) { + .undef => |ty| Value.fromInterned((try mod.intern(.{ + .undef = Type.fromInterned(ty).elemType2(mod).toIntern(), + }))), + .slice => |slice| return Value.fromInterned(slice.ptr).maybeElemValue(mod, index), + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| mod.declPtr(decl).val.maybeElemValue(mod, index), + .anon_decl => |anon_decl| Value.fromInterned(anon_decl.val).maybeElemValue(mod, index), + .int, .eu_payload => null, + .opt_payload => |base| Value.fromInterned(base).maybeElemValue(mod, index), + .comptime_field => |field_val| Value.fromInterned(field_val).maybeElemValue(mod, index), + .elem => |elem| Value.fromInterned(elem.base).maybeElemValue(mod, index + @as(usize, @intCast(elem.index))), + .field => |field| if (Value.fromInterned(field.base).pointerDecl(mod)) |decl_index| { + const base_decl = mod.declPtr(decl_index); + const field_val = try base_decl.val.fieldValue(mod, @as(usize, @intCast(field.index))); + return field_val.maybeElemValue(mod, index); + } else null, + }, + .opt => |opt| Value.fromInterned(opt.val).maybeElemValue(mod, index), + .aggregate => |aggregate| { + const len = mod.intern_pool.aggregateTypeLen(aggregate.ty); + if (index < len) return Value.fromInterned(switch (aggregate.storage) { + .bytes => |bytes| try mod.intern(.{ .int = .{ + .ty = .u8_type, + .storage = .{ .u64 = bytes[index] }, + } }), + .elems => |elems| elems[index], + .repeated_elem => |elem| elem, + }); + assert(index == len); + return Value.fromInterned(mod.intern_pool.indexToKey(aggregate.ty).array_type.sentinel); + }, + else => null, + }, + }; +} + +pub fn isLazyAlign(val: Value, mod: *Module) bool { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .int => |int| int.storage == .lazy_align, + else => false, + }; +} + +pub fn isLazySize(val: Value, mod: *Module) bool { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .int => |int| int.storage == .lazy_size, + else => false, + }; +} + +pub fn isPtrToThreadLocal(val: Value, mod: *Module) bool { + const backing_decl = mod.intern_pool.getBackingDecl(val.toIntern()).unwrap() orelse return false; + const variable = mod.declPtr(backing_decl).getOwnedVariable(mod) orelse return false; + return variable.is_threadlocal; +} + +// Asserts that the provided start/end are in-bounds. +pub fn sliceArray( + val: Value, + mod: *Module, + arena: Allocator, + start: usize, + end: usize, +) error{OutOfMemory}!Value { + // TODO: write something like getCoercedInts to avoid needing to dupe + assert(val.ip_index != .none); + switch (mod.intern_pool.indexToKey(val.toIntern())) { + .ptr => |ptr| switch (ptr.addr) { + .decl => |decl| try mod.declPtr(decl).val.sliceArray(mod, arena, start, end), + .comptime_field => |comptime_field| Value.fromInterned(comptime_field) + .sliceArray(mod, arena, start, end), + .elem => |elem| Value.fromInterned(elem.base) + .sliceArray(mod, arena, start + @as(usize, @intCast(elem.index)), end + @as(usize, @intCast(elem.index))), + else => unreachable, + }, + .aggregate => |aggregate| Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = switch (mod.intern_pool.indexToKey(mod.intern_pool.typeOf(val.toIntern()))) { + .array_type => |array_type| try mod.arrayType(.{ + .len = @as(u32, @intCast(end - start)), + .child = array_type.child, + .sentinel = if (end == array_type.len) array_type.sentinel else .none, + }), + .vector_type => |vector_type| try mod.vectorType(.{ + .len = @as(u32, @intCast(end - start)), + .child = vector_type.child, + }), + else => unreachable, + }.toIntern(), + .storage = switch (aggregate.storage) { + .bytes => .{ .bytes = try arena.dupe(u8, mod.intern_pool.indexToKey(val.toIntern()).aggregate.storage.bytes[start..end]) }, + .elems => .{ .elems = try arena.dupe(InternPool.Index, mod.intern_pool.indexToKey(val.toIntern()).aggregate.storage.elems[start..end]) }, + .repeated_elem => |elem| .{ .repeated_elem = elem }, + }, + } }))), + else => unreachable, + } +} + +pub fn fieldValue(val: Value, mod: *Module, index: usize) !Value { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .undef => |ty| Value.fromInterned((try mod.intern(.{ + .undef = Type.fromInterned(ty).structFieldType(index, mod).toIntern(), + }))), + .aggregate => |aggregate| Value.fromInterned(switch (aggregate.storage) { + .bytes => |bytes| try mod.intern(.{ .int = .{ + .ty = .u8_type, + .storage = .{ .u64 = bytes[index] }, + } }), + .elems => |elems| elems[index], + .repeated_elem => |elem| elem, + }), + // TODO assert the tag is correct + .un => |un| Value.fromInterned(un.val), + else => unreachable, + }; +} + +pub fn unionTag(val: Value, mod: *Module) ?Value { + if (val.ip_index == .none) return val.castTag(.@"union").?.data.tag; + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .undef, .enum_tag => val, + .un => |un| if (un.tag != .none) Value.fromInterned(un.tag) else return null, + else => unreachable, + }; +} + +pub fn unionValue(val: Value, mod: *Module) Value { + if (val.ip_index == .none) return val.castTag(.@"union").?.data.val; + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .un => |un| Value.fromInterned(un.val), + else => unreachable, + }; +} + +/// Returns a pointer to the element value at the index. +pub fn elemPtr( + val: Value, + elem_ptr_ty: Type, + index: usize, + mod: *Module, +) Allocator.Error!Value { + const elem_ty = elem_ptr_ty.childType(mod); + const ptr_val = switch (mod.intern_pool.indexToKey(val.toIntern())) { + .slice => |slice| Value.fromInterned(slice.ptr), + else => val, + }; + switch (mod.intern_pool.indexToKey(ptr_val.toIntern())) { + .ptr => |ptr| switch (ptr.addr) { + .elem => |elem| if (Type.fromInterned(mod.intern_pool.typeOf(elem.base)).elemType2(mod).eql(elem_ty, mod)) + return Value.fromInterned((try mod.intern(.{ .ptr = .{ + .ty = elem_ptr_ty.toIntern(), + .addr = .{ .elem = .{ + .base = elem.base, + .index = elem.index + index, + } }, + } }))), + else => {}, + }, + else => {}, + } + var ptr_ty_key = mod.intern_pool.indexToKey(elem_ptr_ty.toIntern()).ptr_type; + assert(ptr_ty_key.flags.size != .Slice); + ptr_ty_key.flags.size = .Many; + return Value.fromInterned((try mod.intern(.{ .ptr = .{ + .ty = elem_ptr_ty.toIntern(), + .addr = .{ .elem = .{ + .base = (try mod.getCoerced(ptr_val, try mod.ptrType(ptr_ty_key))).toIntern(), + .index = index, + } }, + } }))); +} + +pub fn isUndef(val: Value, mod: *Module) bool { + return val.ip_index != .none and mod.intern_pool.isUndef(val.toIntern()); +} + +/// TODO: check for cases such as array that is not marked undef but all the element +/// values are marked undef, or struct that is not marked undef but all fields are marked +/// undef, etc. +pub fn isUndefDeep(val: Value, mod: *Module) bool { + return val.isUndef(mod); +} + +/// Returns true if any value contained in `self` is undefined. +pub fn anyUndef(val: Value, mod: *Module) !bool { + if (val.ip_index == .none) return false; + return switch (val.toIntern()) { + .undef => true, + else => switch (mod.intern_pool.indexToKey(val.toIntern())) { + .undef => true, + .simple_value => |v| v == .undefined, + .ptr => |ptr| switch (ptr.len) { + .none => false, + else => for (0..@as(usize, @intCast(Value.fromInterned(ptr.len).toUnsignedInt(mod)))) |index| { + if (try (try val.elemValue(mod, index)).anyUndef(mod)) break true; + } else false, + }, + .aggregate => |aggregate| for (0..aggregate.storage.values().len) |i| { + const elem = mod.intern_pool.indexToKey(val.toIntern()).aggregate.storage.values()[i]; + if (try anyUndef(Value.fromInterned(elem), mod)) break true; + } else false, + else => false, + }, + }; +} + +/// Asserts the value is not undefined and not unreachable. +/// C pointers with an integer value of 0 are also considered null. +pub fn isNull(val: Value, mod: *Module) bool { + return switch (val.toIntern()) { + .undef => unreachable, + .unreachable_value => unreachable, + .null_value => true, + else => return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .undef => unreachable, + .slice => |slice| for (0..@intCast(Value.fromInterned(slice.len).toUnsignedInt(mod))) |idx| { + if (try (try val.elemValue(mod, idx)).anyUndef(mod)) break true; + } else false, + .opt => |opt| opt.val == .none, + else => false, + }, + }; +} + +/// Valid only for error (union) types. Asserts the value is not undefined and not unreachable. +pub fn getErrorName(val: Value, mod: *const Module) InternPool.OptionalNullTerminatedString { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .err => |err| err.name.toOptional(), + .error_union => |error_union| switch (error_union.val) { + .err_name => |err_name| err_name.toOptional(), + .payload => .none, + }, + else => unreachable, + }; +} + +pub fn getErrorInt(val: Value, mod: *const Module) Module.ErrorInt { + return if (getErrorName(val, mod).unwrap()) |err_name| + @as(Module.ErrorInt, @intCast(mod.global_error_set.getIndex(err_name).?)) + else + 0; +} + +/// Assumes the type is an error union. Returns true if and only if the value is +/// the error union payload, not an error. +pub fn errorUnionIsPayload(val: Value, mod: *const Module) bool { + return mod.intern_pool.indexToKey(val.toIntern()).error_union.val == .payload; +} + +/// Value of the optional, null if optional has no payload. +pub fn optionalValue(val: Value, mod: *const Module) ?Value { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .opt => |opt| switch (opt.val) { + .none => null, + else => |payload| Value.fromInterned(payload), + }, + .ptr => val, + else => unreachable, + }; +} + +/// Valid for all types. Asserts the value is not undefined. +pub fn isFloat(self: Value, mod: *const Module) bool { + return switch (self.toIntern()) { + .undef => unreachable, + else => switch (mod.intern_pool.indexToKey(self.toIntern())) { + .undef => unreachable, + .float => true, + else => false, + }, + }; +} + +pub fn floatFromInt(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, mod: *Module) !Value { + return floatFromIntAdvanced(val, arena, int_ty, float_ty, mod, null) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + else => unreachable, + }; +} + +pub fn floatFromIntAdvanced(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value { + if (int_ty.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, int_ty.vectorLen(mod)); + const scalar_ty = float_ty.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const elem_val = try val.elemValue(mod, i); + scalar.* = try (try floatFromIntScalar(elem_val, scalar_ty, mod, opt_sema)).intern(scalar_ty, mod); + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = float_ty.toIntern(), + .storage = .{ .elems = result_data }, + } }))); + } + return floatFromIntScalar(val, float_ty, mod, opt_sema); +} + +pub fn floatFromIntScalar(val: Value, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value { + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .undef => try mod.undefValue(float_ty), + .int => |int| switch (int.storage) { + .big_int => |big_int| { + const float = bigIntToFloat(big_int.limbs, big_int.positive); + return mod.floatValue(float_ty, float); + }, + inline .u64, .i64 => |x| floatFromIntInner(x, float_ty, mod), + .lazy_align => |ty| if (opt_sema) |sema| { + return floatFromIntInner((try Type.fromInterned(ty).abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar.toByteUnits(0), float_ty, mod); + } else { + return floatFromIntInner(Type.fromInterned(ty).abiAlignment(mod).toByteUnits(0), float_ty, mod); + }, + .lazy_size => |ty| if (opt_sema) |sema| { + return floatFromIntInner((try Type.fromInterned(ty).abiSizeAdvanced(mod, .{ .sema = sema })).scalar, float_ty, mod); + } else { + return floatFromIntInner(Type.fromInterned(ty).abiSize(mod), float_ty, mod); + }, + }, + else => unreachable, + }; +} + +fn floatFromIntInner(x: anytype, dest_ty: Type, mod: *Module) !Value { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (dest_ty.floatBits(target)) { + 16 => .{ .f16 = @floatFromInt(x) }, + 32 => .{ .f32 = @floatFromInt(x) }, + 64 => .{ .f64 = @floatFromInt(x) }, + 80 => .{ .f80 = @floatFromInt(x) }, + 128 => .{ .f128 = @floatFromInt(x) }, + else => unreachable, + }; + return Value.fromInterned((try mod.intern(.{ .float = .{ + .ty = dest_ty.toIntern(), + .storage = storage, + } }))); +} + +fn calcLimbLenFloat(scalar: anytype) usize { + if (scalar == 0) { + return 1; + } + + const w_value = @abs(scalar); + return @divFloor(@as(std.math.big.Limb, @intFromFloat(std.math.log2(w_value))), @typeInfo(std.math.big.Limb).Int.bits) + 1; +} + +pub const OverflowArithmeticResult = struct { + overflow_bit: Value, + wrapped_result: Value, +}; + +/// Supports (vectors of) integers only; asserts neither operand is undefined. +pub fn intAddSat( + lhs: Value, + rhs: Value, + ty: Type, + arena: Allocator, + mod: *Module, +) !Value { + if (ty.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try (try intAddSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod); + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } }))); + } + return intAddSatScalar(lhs, rhs, ty, arena, mod); +} + +/// Supports integers only; asserts neither operand is undefined. +pub fn intAddSatScalar( + lhs: Value, + rhs: Value, + ty: Type, + arena: Allocator, + mod: *Module, +) !Value { + assert(!lhs.isUndef(mod)); + assert(!rhs.isUndef(mod)); + + const info = ty.intInfo(mod); + + var lhs_space: Value.BigIntSpace = undefined; + var rhs_space: Value.BigIntSpace = undefined; + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); + const limbs = try arena.alloc( + std.math.big.Limb, + std.math.big.int.calcTwosCompLimbCount(info.bits), + ); + var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; + result_bigint.addSat(lhs_bigint, rhs_bigint, info.signedness, info.bits); + return mod.intValue_big(ty, result_bigint.toConst()); +} + +/// Supports (vectors of) integers only; asserts neither operand is undefined. +pub fn intSubSat( + lhs: Value, + rhs: Value, + ty: Type, + arena: Allocator, + mod: *Module, +) !Value { + if (ty.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try (try intSubSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod); + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } }))); + } + return intSubSatScalar(lhs, rhs, ty, arena, mod); +} + +/// Supports integers only; asserts neither operand is undefined. +pub fn intSubSatScalar( + lhs: Value, + rhs: Value, + ty: Type, + arena: Allocator, + mod: *Module, +) !Value { + assert(!lhs.isUndef(mod)); + assert(!rhs.isUndef(mod)); + + const info = ty.intInfo(mod); + + var lhs_space: Value.BigIntSpace = undefined; + var rhs_space: Value.BigIntSpace = undefined; + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); + const limbs = try arena.alloc( + std.math.big.Limb, + std.math.big.int.calcTwosCompLimbCount(info.bits), + ); + var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; + result_bigint.subSat(lhs_bigint, rhs_bigint, info.signedness, info.bits); + return mod.intValue_big(ty, result_bigint.toConst()); +} + +pub fn intMulWithOverflow( + lhs: Value, + rhs: Value, + ty: Type, + arena: Allocator, + mod: *Module, +) !OverflowArithmeticResult { + if (ty.zigTypeTag(mod) == .Vector) { + const vec_len = ty.vectorLen(mod); + const overflowed_data = try arena.alloc(InternPool.Index, vec_len); + const result_data = try arena.alloc(InternPool.Index, vec_len); + const scalar_ty = ty.scalarType(mod); + for (overflowed_data, result_data, 0..) |*of, *scalar, i| { + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + const of_math_result = try intMulWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod); + of.* = try of_math_result.overflow_bit.intern(Type.u1, mod); + scalar.* = try of_math_result.wrapped_result.intern(scalar_ty, mod); + } + return OverflowArithmeticResult{ + .overflow_bit = Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(), + .storage = .{ .elems = overflowed_data }, + } }))), + .wrapped_result = Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } }))), + }; + } + return intMulWithOverflowScalar(lhs, rhs, ty, arena, mod); +} + +pub fn intMulWithOverflowScalar( + lhs: Value, + rhs: Value, + ty: Type, + arena: Allocator, + mod: *Module, +) !OverflowArithmeticResult { + const info = ty.intInfo(mod); + + var lhs_space: Value.BigIntSpace = undefined; + var rhs_space: Value.BigIntSpace = undefined; + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); + const limbs = try arena.alloc( + std.math.big.Limb, + lhs_bigint.limbs.len + rhs_bigint.limbs.len, + ); + var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; + const limbs_buffer = try arena.alloc( + std.math.big.Limb, + std.math.big.int.calcMulLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len, 1), + ); + result_bigint.mul(lhs_bigint, rhs_bigint, limbs_buffer, arena); + + const overflowed = !result_bigint.toConst().fitsInTwosComp(info.signedness, info.bits); + if (overflowed) { + result_bigint.truncate(result_bigint.toConst(), info.signedness, info.bits); + } + + return OverflowArithmeticResult{ + .overflow_bit = try mod.intValue(Type.u1, @intFromBool(overflowed)), + .wrapped_result = try mod.intValue_big(ty, result_bigint.toConst()), + }; +} + +/// Supports both (vectors of) floats and ints; handles undefined scalars. +pub fn numberMulWrap( + lhs: Value, + rhs: Value, + ty: Type, + arena: Allocator, + mod: *Module, +) !Value { + if (ty.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try (try numberMulWrapScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod); + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } }))); + } + return numberMulWrapScalar(lhs, rhs, ty, arena, mod); +} + +/// Supports both floats and ints; handles undefined. +pub fn numberMulWrapScalar( + lhs: Value, + rhs: Value, + ty: Type, + arena: Allocator, + mod: *Module, +) !Value { + if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef; + + if (ty.zigTypeTag(mod) == .ComptimeInt) { + return intMul(lhs, rhs, ty, undefined, arena, mod); + } + + if (ty.isAnyFloat()) { + return floatMul(lhs, rhs, ty, arena, mod); + } + + const overflow_result = try intMulWithOverflow(lhs, rhs, ty, arena, mod); + return overflow_result.wrapped_result; +} + +/// Supports (vectors of) integers only; asserts neither operand is undefined. +pub fn intMulSat( + lhs: Value, + rhs: Value, + ty: Type, + arena: Allocator, + mod: *Module, +) !Value { + if (ty.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try (try intMulSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod); + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } }))); + } + return intMulSatScalar(lhs, rhs, ty, arena, mod); +} + +/// Supports (vectors of) integers only; asserts neither operand is undefined. +pub fn intMulSatScalar( + lhs: Value, + rhs: Value, + ty: Type, + arena: Allocator, + mod: *Module, +) !Value { + assert(!lhs.isUndef(mod)); + assert(!rhs.isUndef(mod)); + + const info = ty.intInfo(mod); + + var lhs_space: Value.BigIntSpace = undefined; + var rhs_space: Value.BigIntSpace = undefined; + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); + const limbs = try arena.alloc( + std.math.big.Limb, + @max( + // For the saturate + std.math.big.int.calcTwosCompLimbCount(info.bits), + lhs_bigint.limbs.len + rhs_bigint.limbs.len, + ), + ); + var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; + const limbs_buffer = try arena.alloc( + std.math.big.Limb, + std.math.big.int.calcMulLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len, 1), + ); + result_bigint.mul(lhs_bigint, rhs_bigint, limbs_buffer, arena); + result_bigint.saturate(result_bigint.toConst(), info.signedness, info.bits); + return mod.intValue_big(ty, result_bigint.toConst()); +} + +/// Supports both floats and ints; handles undefined. +pub fn numberMax(lhs: Value, rhs: Value, mod: *Module) Value { + if (lhs.isUndef(mod) or rhs.isUndef(mod)) return undef; + if (lhs.isNan(mod)) return rhs; + if (rhs.isNan(mod)) return lhs; + + return switch (order(lhs, rhs, mod)) { + .lt => rhs, + .gt, .eq => lhs, + }; +} + +/// Supports both floats and ints; handles undefined. +pub fn numberMin(lhs: Value, rhs: Value, mod: *Module) Value { + if (lhs.isUndef(mod) or rhs.isUndef(mod)) return undef; + if (lhs.isNan(mod)) return rhs; + if (rhs.isNan(mod)) return lhs; + + return switch (order(lhs, rhs, mod)) { + .lt => lhs, + .gt, .eq => rhs, + }; +} + +/// operands must be (vectors of) integers; handles undefined scalars. +pub fn bitwiseNot(val: Value, ty: Type, arena: Allocator, mod: *Module) !Value { + if (ty.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const elem_val = try val.elemValue(mod, i); + scalar.* = try (try bitwiseNotScalar(elem_val, scalar_ty, arena, mod)).intern(scalar_ty, mod); + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } }))); + } + return bitwiseNotScalar(val, ty, arena, mod); +} + +/// operands must be integers; handles undefined. +pub fn bitwiseNotScalar(val: Value, ty: Type, arena: Allocator, mod: *Module) !Value { + if (val.isUndef(mod)) return Value.fromInterned((try mod.intern(.{ .undef = ty.toIntern() }))); + if (ty.toIntern() == .bool_type) return makeBool(!val.toBool()); + + const info = ty.intInfo(mod); + + if (info.bits == 0) { + return val; + } + + // TODO is this a performance issue? maybe we should try the operation without + // resorting to BigInt first. + var val_space: Value.BigIntSpace = undefined; + const val_bigint = val.toBigInt(&val_space, mod); + const limbs = try arena.alloc( + std.math.big.Limb, + std.math.big.int.calcTwosCompLimbCount(info.bits), + ); + + var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; + result_bigint.bitNotWrap(val_bigint, info.signedness, info.bits); + return mod.intValue_big(ty, result_bigint.toConst()); +} + +/// operands must be (vectors of) integers; handles undefined scalars. +pub fn bitwiseAnd(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { + if (ty.zigTypeTag(mod) == .Vector) { + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try (try bitwiseAndScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } }))); + } + return bitwiseAndScalar(lhs, rhs, ty, allocator, mod); +} + +/// operands must be integers; handles undefined. +pub fn bitwiseAndScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { + if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.fromInterned((try mod.intern(.{ .undef = ty.toIntern() }))); + if (ty.toIntern() == .bool_type) return makeBool(lhs.toBool() and rhs.toBool()); + + // TODO is this a performance issue? maybe we should try the operation without + // resorting to BigInt first. + var lhs_space: Value.BigIntSpace = undefined; + var rhs_space: Value.BigIntSpace = undefined; + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); + const limbs = try arena.alloc( + std.math.big.Limb, + // + 1 for negatives + @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, + ); + var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; + result_bigint.bitAnd(lhs_bigint, rhs_bigint); + return mod.intValue_big(ty, result_bigint.toConst()); +} + +/// operands must be (vectors of) integers; handles undefined scalars. +pub fn bitwiseNand(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { + if (ty.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try (try bitwiseNandScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod); + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } }))); + } + return bitwiseNandScalar(lhs, rhs, ty, arena, mod); +} + +/// operands must be integers; handles undefined. +pub fn bitwiseNandScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { + if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.fromInterned((try mod.intern(.{ .undef = ty.toIntern() }))); + if (ty.toIntern() == .bool_type) return makeBool(!(lhs.toBool() and rhs.toBool())); + + const anded = try bitwiseAnd(lhs, rhs, ty, arena, mod); + const all_ones = if (ty.isSignedInt(mod)) try mod.intValue(ty, -1) else try ty.maxIntScalar(mod, ty); + return bitwiseXor(anded, all_ones, ty, arena, mod); +} + +/// operands must be (vectors of) integers; handles undefined scalars. +pub fn bitwiseOr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { + if (ty.zigTypeTag(mod) == .Vector) { + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try (try bitwiseOrScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } }))); + } + return bitwiseOrScalar(lhs, rhs, ty, allocator, mod); +} + +/// operands must be integers; handles undefined. +pub fn bitwiseOrScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { + if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.fromInterned((try mod.intern(.{ .undef = ty.toIntern() }))); + if (ty.toIntern() == .bool_type) return makeBool(lhs.toBool() or rhs.toBool()); + + // TODO is this a performance issue? maybe we should try the operation without + // resorting to BigInt first. + var lhs_space: Value.BigIntSpace = undefined; + var rhs_space: Value.BigIntSpace = undefined; + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); + const limbs = try arena.alloc( + std.math.big.Limb, + @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len), + ); + var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; + result_bigint.bitOr(lhs_bigint, rhs_bigint); + return mod.intValue_big(ty, result_bigint.toConst()); +} + +/// operands must be (vectors of) integers; handles undefined scalars. +pub fn bitwiseXor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { + if (ty.zigTypeTag(mod) == .Vector) { + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try (try bitwiseXorScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } }))); + } + return bitwiseXorScalar(lhs, rhs, ty, allocator, mod); +} + +/// operands must be integers; handles undefined. +pub fn bitwiseXorScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { + if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.fromInterned((try mod.intern(.{ .undef = ty.toIntern() }))); + if (ty.toIntern() == .bool_type) return makeBool(lhs.toBool() != rhs.toBool()); + + // TODO is this a performance issue? maybe we should try the operation without + // resorting to BigInt first. + var lhs_space: Value.BigIntSpace = undefined; + var rhs_space: Value.BigIntSpace = undefined; + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); + const limbs = try arena.alloc( + std.math.big.Limb, + // + 1 for negatives + @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, + ); + var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; + result_bigint.bitXor(lhs_bigint, rhs_bigint); + return mod.intValue_big(ty, result_bigint.toConst()); +} + +/// If the value overflowed the type, returns a comptime_int (or vector thereof) instead, setting +/// overflow_idx to the vector index the overflow was at (or 0 for a scalar). +pub fn intDiv(lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize, allocator: Allocator, mod: *Module) !Value { + var overflow: usize = undefined; + return intDivInner(lhs, rhs, ty, &overflow, allocator, mod) catch |err| switch (err) { + error.Overflow => { + const is_vec = ty.isVector(mod); + overflow_idx.* = if (is_vec) overflow else 0; + const safe_ty = if (is_vec) try mod.vectorType(.{ + .len = ty.vectorLen(mod), + .child = .comptime_int_type, + }) else Type.comptime_int; + return intDivInner(lhs, rhs, safe_ty, undefined, allocator, mod) catch |err1| switch (err1) { + error.Overflow => unreachable, + else => |e| return e, + }; + }, + else => |e| return e, + }; +} + +fn intDivInner(lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize, allocator: Allocator, mod: *Module) !Value { + if (ty.zigTypeTag(mod) == .Vector) { + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + const val = intDivScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod) catch |err| switch (err) { + error.Overflow => { + overflow_idx.* = i; + return error.Overflow; + }, + else => |e| return e, + }; + scalar.* = try val.intern(scalar_ty, mod); + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } }))); + } + return intDivScalar(lhs, rhs, ty, allocator, mod); +} + +pub fn intDivScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { + // TODO is this a performance issue? maybe we should try the operation without + // resorting to BigInt first. + var lhs_space: Value.BigIntSpace = undefined; + var rhs_space: Value.BigIntSpace = undefined; + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); + const limbs_q = try allocator.alloc( + std.math.big.Limb, + lhs_bigint.limbs.len, + ); + const limbs_r = try allocator.alloc( + std.math.big.Limb, + rhs_bigint.limbs.len, + ); + const limbs_buffer = try allocator.alloc( + std.math.big.Limb, + std.math.big.int.calcDivLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len), + ); + var result_q = BigIntMutable{ .limbs = limbs_q, .positive = undefined, .len = undefined }; + var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined }; + result_q.divTrunc(&result_r, lhs_bigint, rhs_bigint, limbs_buffer); + if (ty.toIntern() != .comptime_int_type) { + const info = ty.intInfo(mod); + if (!result_q.toConst().fitsInTwosComp(info.signedness, info.bits)) { + return error.Overflow; + } + } + return mod.intValue_big(ty, result_q.toConst()); +} + +pub fn intDivFloor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { + if (ty.zigTypeTag(mod) == .Vector) { + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try (try intDivFloorScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } }))); + } + return intDivFloorScalar(lhs, rhs, ty, allocator, mod); +} + +pub fn intDivFloorScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { + // TODO is this a performance issue? maybe we should try the operation without + // resorting to BigInt first. + var lhs_space: Value.BigIntSpace = undefined; + var rhs_space: Value.BigIntSpace = undefined; + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); + const limbs_q = try allocator.alloc( + std.math.big.Limb, + lhs_bigint.limbs.len, + ); + const limbs_r = try allocator.alloc( + std.math.big.Limb, + rhs_bigint.limbs.len, + ); + const limbs_buffer = try allocator.alloc( + std.math.big.Limb, + std.math.big.int.calcDivLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len), + ); + var result_q = BigIntMutable{ .limbs = limbs_q, .positive = undefined, .len = undefined }; + var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined }; + result_q.divFloor(&result_r, lhs_bigint, rhs_bigint, limbs_buffer); + return mod.intValue_big(ty, result_q.toConst()); +} + +pub fn intMod(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { + if (ty.zigTypeTag(mod) == .Vector) { + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try (try intModScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } }))); + } + return intModScalar(lhs, rhs, ty, allocator, mod); +} + +pub fn intModScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { + // TODO is this a performance issue? maybe we should try the operation without + // resorting to BigInt first. + var lhs_space: Value.BigIntSpace = undefined; + var rhs_space: Value.BigIntSpace = undefined; + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); + const limbs_q = try allocator.alloc( + std.math.big.Limb, + lhs_bigint.limbs.len, + ); + const limbs_r = try allocator.alloc( + std.math.big.Limb, + rhs_bigint.limbs.len, + ); + const limbs_buffer = try allocator.alloc( + std.math.big.Limb, + std.math.big.int.calcDivLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len), + ); + var result_q = BigIntMutable{ .limbs = limbs_q, .positive = undefined, .len = undefined }; + var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined }; + result_q.divFloor(&result_r, lhs_bigint, rhs_bigint, limbs_buffer); + return mod.intValue_big(ty, result_r.toConst()); +} + +/// Returns true if the value is a floating point type and is NaN. Returns false otherwise. +pub fn isNan(val: Value, mod: *const Module) bool { + if (val.ip_index == .none) return false; + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .float => |float| switch (float.storage) { + inline else => |x| std.math.isNan(x), + }, + else => false, + }; +} + +/// Returns true if the value is a floating point type and is infinite. Returns false otherwise. +pub fn isInf(val: Value, mod: *const Module) bool { + if (val.ip_index == .none) return false; + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .float => |float| switch (float.storage) { + inline else => |x| std.math.isInf(x), + }, + else => false, + }; +} + +pub fn isNegativeInf(val: Value, mod: *const Module) bool { + if (val.ip_index == .none) return false; + return switch (mod.intern_pool.indexToKey(val.toIntern())) { + .float => |float| switch (float.storage) { + inline else => |x| std.math.isNegativeInf(x), + }, + else => false, + }; +} + +pub fn floatRem(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try (try floatRemScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } }))); + } + return floatRemScalar(lhs, rhs, float_type, mod); +} + +pub fn floatRemScalar(lhs: Value, rhs: Value, float_type: Type, mod: *Module) !Value { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @rem(lhs.toFloat(f16, mod), rhs.toFloat(f16, mod)) }, + 32 => .{ .f32 = @rem(lhs.toFloat(f32, mod), rhs.toFloat(f32, mod)) }, + 64 => .{ .f64 = @rem(lhs.toFloat(f64, mod), rhs.toFloat(f64, mod)) }, + 80 => .{ .f80 = @rem(lhs.toFloat(f80, mod), rhs.toFloat(f80, mod)) }, + 128 => .{ .f128 = @rem(lhs.toFloat(f128, mod), rhs.toFloat(f128, mod)) }, + else => unreachable, + }; + return Value.fromInterned((try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } }))); +} + +pub fn floatMod(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try (try floatModScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } }))); + } + return floatModScalar(lhs, rhs, float_type, mod); +} + +pub fn floatModScalar(lhs: Value, rhs: Value, float_type: Type, mod: *Module) !Value { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @mod(lhs.toFloat(f16, mod), rhs.toFloat(f16, mod)) }, + 32 => .{ .f32 = @mod(lhs.toFloat(f32, mod), rhs.toFloat(f32, mod)) }, + 64 => .{ .f64 = @mod(lhs.toFloat(f64, mod), rhs.toFloat(f64, mod)) }, + 80 => .{ .f80 = @mod(lhs.toFloat(f80, mod), rhs.toFloat(f80, mod)) }, + 128 => .{ .f128 = @mod(lhs.toFloat(f128, mod), rhs.toFloat(f128, mod)) }, + else => unreachable, + }; + return Value.fromInterned((try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } }))); +} + +/// If the value overflowed the type, returns a comptime_int (or vector thereof) instead, setting +/// overflow_idx to the vector index the overflow was at (or 0 for a scalar). +pub fn intMul(lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize, allocator: Allocator, mod: *Module) !Value { + var overflow: usize = undefined; + return intMulInner(lhs, rhs, ty, &overflow, allocator, mod) catch |err| switch (err) { + error.Overflow => { + const is_vec = ty.isVector(mod); + overflow_idx.* = if (is_vec) overflow else 0; + const safe_ty = if (is_vec) try mod.vectorType(.{ + .len = ty.vectorLen(mod), + .child = .comptime_int_type, + }) else Type.comptime_int; + return intMulInner(lhs, rhs, safe_ty, undefined, allocator, mod) catch |err1| switch (err1) { + error.Overflow => unreachable, + else => |e| return e, + }; + }, + else => |e| return e, + }; +} + +fn intMulInner(lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize, allocator: Allocator, mod: *Module) !Value { + if (ty.zigTypeTag(mod) == .Vector) { + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + const val = intMulScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod) catch |err| switch (err) { + error.Overflow => { + overflow_idx.* = i; + return error.Overflow; + }, + else => |e| return e, + }; + scalar.* = try val.intern(scalar_ty, mod); + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } }))); + } + return intMulScalar(lhs, rhs, ty, allocator, mod); +} + +pub fn intMulScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { + if (ty.toIntern() != .comptime_int_type) { + const res = try intMulWithOverflowScalar(lhs, rhs, ty, allocator, mod); + if (res.overflow_bit.compareAllWithZero(.neq, mod)) return error.Overflow; + return res.wrapped_result; + } + // TODO is this a performance issue? maybe we should try the operation without + // resorting to BigInt first. + var lhs_space: Value.BigIntSpace = undefined; + var rhs_space: Value.BigIntSpace = undefined; + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const rhs_bigint = rhs.toBigInt(&rhs_space, mod); + const limbs = try allocator.alloc( + std.math.big.Limb, + lhs_bigint.limbs.len + rhs_bigint.limbs.len, + ); + var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; + const limbs_buffer = try allocator.alloc( + std.math.big.Limb, + std.math.big.int.calcMulLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len, 1), + ); + defer allocator.free(limbs_buffer); + result_bigint.mul(lhs_bigint, rhs_bigint, limbs_buffer, allocator); + return mod.intValue_big(ty, result_bigint.toConst()); +} + +pub fn intTrunc(val: Value, ty: Type, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, mod: *Module) !Value { + if (ty.zigTypeTag(mod) == .Vector) { + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const elem_val = try val.elemValue(mod, i); + scalar.* = try (try intTruncScalar(elem_val, scalar_ty, allocator, signedness, bits, mod)).intern(scalar_ty, mod); + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } }))); + } + return intTruncScalar(val, ty, allocator, signedness, bits, mod); +} + +/// This variant may vectorize on `bits`. Asserts that `bits` is a (vector of) `u16`. +pub fn intTruncBitsAsValue( + val: Value, + ty: Type, + allocator: Allocator, + signedness: std.builtin.Signedness, + bits: Value, + mod: *Module, +) !Value { + if (ty.zigTypeTag(mod) == .Vector) { + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const elem_val = try val.elemValue(mod, i); + const bits_elem = try bits.elemValue(mod, i); + scalar.* = try (try intTruncScalar(elem_val, scalar_ty, allocator, signedness, @as(u16, @intCast(bits_elem.toUnsignedInt(mod))), mod)).intern(scalar_ty, mod); + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } }))); + } + return intTruncScalar(val, ty, allocator, signedness, @as(u16, @intCast(bits.toUnsignedInt(mod))), mod); +} + +pub fn intTruncScalar( + val: Value, + ty: Type, + allocator: Allocator, + signedness: std.builtin.Signedness, + bits: u16, + mod: *Module, +) !Value { + if (bits == 0) return mod.intValue(ty, 0); + + var val_space: Value.BigIntSpace = undefined; + const val_bigint = val.toBigInt(&val_space, mod); + + const limbs = try allocator.alloc( + std.math.big.Limb, + std.math.big.int.calcTwosCompLimbCount(bits), + ); + var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; + + result_bigint.truncate(val_bigint, signedness, bits); + return mod.intValue_big(ty, result_bigint.toConst()); +} + +pub fn shl(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { + if (ty.zigTypeTag(mod) == .Vector) { + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try (try shlScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } }))); + } + return shlScalar(lhs, rhs, ty, allocator, mod); +} + +pub fn shlScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { + // TODO is this a performance issue? maybe we should try the operation without + // resorting to BigInt first. + var lhs_space: Value.BigIntSpace = undefined; + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const shift = @as(usize, @intCast(rhs.toUnsignedInt(mod))); + const limbs = try allocator.alloc( + std.math.big.Limb, + lhs_bigint.limbs.len + (shift / (@sizeOf(std.math.big.Limb) * 8)) + 1, + ); + var result_bigint = BigIntMutable{ + .limbs = limbs, + .positive = undefined, + .len = undefined, + }; + result_bigint.shiftLeft(lhs_bigint, shift); + if (ty.toIntern() != .comptime_int_type) { + const int_info = ty.intInfo(mod); + result_bigint.truncate(result_bigint.toConst(), int_info.signedness, int_info.bits); + } + + return mod.intValue_big(ty, result_bigint.toConst()); +} + +pub fn shlWithOverflow( + lhs: Value, + rhs: Value, + ty: Type, + allocator: Allocator, + mod: *Module, +) !OverflowArithmeticResult { + if (ty.zigTypeTag(mod) == .Vector) { + const vec_len = ty.vectorLen(mod); + const overflowed_data = try allocator.alloc(InternPool.Index, vec_len); + const result_data = try allocator.alloc(InternPool.Index, vec_len); + const scalar_ty = ty.scalarType(mod); + for (overflowed_data, result_data, 0..) |*of, *scalar, i| { + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + const of_math_result = try shlWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); + of.* = try of_math_result.overflow_bit.intern(Type.u1, mod); + scalar.* = try of_math_result.wrapped_result.intern(scalar_ty, mod); + } + return OverflowArithmeticResult{ + .overflow_bit = Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(), + .storage = .{ .elems = overflowed_data }, + } }))), + .wrapped_result = Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } }))), + }; + } + return shlWithOverflowScalar(lhs, rhs, ty, allocator, mod); +} + +pub fn shlWithOverflowScalar( + lhs: Value, + rhs: Value, + ty: Type, + allocator: Allocator, + mod: *Module, +) !OverflowArithmeticResult { + const info = ty.intInfo(mod); + var lhs_space: Value.BigIntSpace = undefined; + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const shift = @as(usize, @intCast(rhs.toUnsignedInt(mod))); + const limbs = try allocator.alloc( + std.math.big.Limb, + lhs_bigint.limbs.len + (shift / (@sizeOf(std.math.big.Limb) * 8)) + 1, + ); + var result_bigint = BigIntMutable{ + .limbs = limbs, + .positive = undefined, + .len = undefined, + }; + result_bigint.shiftLeft(lhs_bigint, shift); + const overflowed = !result_bigint.toConst().fitsInTwosComp(info.signedness, info.bits); + if (overflowed) { + result_bigint.truncate(result_bigint.toConst(), info.signedness, info.bits); + } + return OverflowArithmeticResult{ + .overflow_bit = try mod.intValue(Type.u1, @intFromBool(overflowed)), + .wrapped_result = try mod.intValue_big(ty, result_bigint.toConst()), + }; +} + +pub fn shlSat( + lhs: Value, + rhs: Value, + ty: Type, + arena: Allocator, + mod: *Module, +) !Value { + if (ty.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try (try shlSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod); + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } }))); + } + return shlSatScalar(lhs, rhs, ty, arena, mod); +} + +pub fn shlSatScalar( + lhs: Value, + rhs: Value, + ty: Type, + arena: Allocator, + mod: *Module, +) !Value { + // TODO is this a performance issue? maybe we should try the operation without + // resorting to BigInt first. + const info = ty.intInfo(mod); + + var lhs_space: Value.BigIntSpace = undefined; + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const shift = @as(usize, @intCast(rhs.toUnsignedInt(mod))); + const limbs = try arena.alloc( + std.math.big.Limb, + std.math.big.int.calcTwosCompLimbCount(info.bits) + 1, + ); + var result_bigint = BigIntMutable{ + .limbs = limbs, + .positive = undefined, + .len = undefined, + }; + result_bigint.shiftLeftSat(lhs_bigint, shift, info.signedness, info.bits); + return mod.intValue_big(ty, result_bigint.toConst()); +} + +pub fn shlTrunc( + lhs: Value, + rhs: Value, + ty: Type, + arena: Allocator, + mod: *Module, +) !Value { + if (ty.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try (try shlTruncScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod); + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } }))); + } + return shlTruncScalar(lhs, rhs, ty, arena, mod); +} + +pub fn shlTruncScalar( + lhs: Value, + rhs: Value, + ty: Type, + arena: Allocator, + mod: *Module, +) !Value { + const shifted = try lhs.shl(rhs, ty, arena, mod); + const int_info = ty.intInfo(mod); + const truncated = try shifted.intTrunc(ty, arena, int_info.signedness, int_info.bits, mod); + return truncated; +} + +pub fn shr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { + if (ty.zigTypeTag(mod) == .Vector) { + const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try (try shrScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } }))); + } + return shrScalar(lhs, rhs, ty, allocator, mod); +} + +pub fn shrScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { + // TODO is this a performance issue? maybe we should try the operation without + // resorting to BigInt first. + var lhs_space: Value.BigIntSpace = undefined; + const lhs_bigint = lhs.toBigInt(&lhs_space, mod); + const shift = @as(usize, @intCast(rhs.toUnsignedInt(mod))); + + const result_limbs = lhs_bigint.limbs.len -| (shift / (@sizeOf(std.math.big.Limb) * 8)); + if (result_limbs == 0) { + // The shift is enough to remove all the bits from the number, which means the + // result is 0 or -1 depending on the sign. + if (lhs_bigint.positive) { + return mod.intValue(ty, 0); + } else { + return mod.intValue(ty, -1); + } + } + + const limbs = try allocator.alloc( + std.math.big.Limb, + result_limbs, + ); + var result_bigint = BigIntMutable{ + .limbs = limbs, + .positive = undefined, + .len = undefined, + }; + result_bigint.shiftRight(lhs_bigint, shift); + return mod.intValue_big(ty, result_bigint.toConst()); +} + +pub fn floatNeg( + val: Value, + float_type: Type, + arena: Allocator, + mod: *Module, +) !Value { + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const elem_val = try val.elemValue(mod, i); + scalar.* = try (try floatNegScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } }))); + } + return floatNegScalar(val, float_type, mod); +} + +pub fn floatNegScalar( + val: Value, + float_type: Type, + mod: *Module, +) !Value { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = -val.toFloat(f16, mod) }, + 32 => .{ .f32 = -val.toFloat(f32, mod) }, + 64 => .{ .f64 = -val.toFloat(f64, mod) }, + 80 => .{ .f80 = -val.toFloat(f80, mod) }, + 128 => .{ .f128 = -val.toFloat(f128, mod) }, + else => unreachable, + }; + return Value.fromInterned((try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } }))); +} + +pub fn floatAdd( + lhs: Value, + rhs: Value, + float_type: Type, + arena: Allocator, + mod: *Module, +) !Value { + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try (try floatAddScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } }))); + } + return floatAddScalar(lhs, rhs, float_type, mod); +} + +pub fn floatAddScalar( + lhs: Value, + rhs: Value, + float_type: Type, + mod: *Module, +) !Value { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = lhs.toFloat(f16, mod) + rhs.toFloat(f16, mod) }, + 32 => .{ .f32 = lhs.toFloat(f32, mod) + rhs.toFloat(f32, mod) }, + 64 => .{ .f64 = lhs.toFloat(f64, mod) + rhs.toFloat(f64, mod) }, + 80 => .{ .f80 = lhs.toFloat(f80, mod) + rhs.toFloat(f80, mod) }, + 128 => .{ .f128 = lhs.toFloat(f128, mod) + rhs.toFloat(f128, mod) }, + else => unreachable, + }; + return Value.fromInterned((try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } }))); +} + +pub fn floatSub( + lhs: Value, + rhs: Value, + float_type: Type, + arena: Allocator, + mod: *Module, +) !Value { + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try (try floatSubScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } }))); + } + return floatSubScalar(lhs, rhs, float_type, mod); +} + +pub fn floatSubScalar( + lhs: Value, + rhs: Value, + float_type: Type, + mod: *Module, +) !Value { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = lhs.toFloat(f16, mod) - rhs.toFloat(f16, mod) }, + 32 => .{ .f32 = lhs.toFloat(f32, mod) - rhs.toFloat(f32, mod) }, + 64 => .{ .f64 = lhs.toFloat(f64, mod) - rhs.toFloat(f64, mod) }, + 80 => .{ .f80 = lhs.toFloat(f80, mod) - rhs.toFloat(f80, mod) }, + 128 => .{ .f128 = lhs.toFloat(f128, mod) - rhs.toFloat(f128, mod) }, + else => unreachable, + }; + return Value.fromInterned((try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } }))); +} + +pub fn floatDiv( + lhs: Value, + rhs: Value, + float_type: Type, + arena: Allocator, + mod: *Module, +) !Value { + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try (try floatDivScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } }))); + } + return floatDivScalar(lhs, rhs, float_type, mod); +} + +pub fn floatDivScalar( + lhs: Value, + rhs: Value, + float_type: Type, + mod: *Module, +) !Value { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = lhs.toFloat(f16, mod) / rhs.toFloat(f16, mod) }, + 32 => .{ .f32 = lhs.toFloat(f32, mod) / rhs.toFloat(f32, mod) }, + 64 => .{ .f64 = lhs.toFloat(f64, mod) / rhs.toFloat(f64, mod) }, + 80 => .{ .f80 = lhs.toFloat(f80, mod) / rhs.toFloat(f80, mod) }, + 128 => .{ .f128 = lhs.toFloat(f128, mod) / rhs.toFloat(f128, mod) }, + else => unreachable, + }; + return Value.fromInterned((try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } }))); +} + +pub fn floatDivFloor( + lhs: Value, + rhs: Value, + float_type: Type, + arena: Allocator, + mod: *Module, +) !Value { + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try (try floatDivFloorScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } }))); + } + return floatDivFloorScalar(lhs, rhs, float_type, mod); +} + +pub fn floatDivFloorScalar( + lhs: Value, + rhs: Value, + float_type: Type, + mod: *Module, +) !Value { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @divFloor(lhs.toFloat(f16, mod), rhs.toFloat(f16, mod)) }, + 32 => .{ .f32 = @divFloor(lhs.toFloat(f32, mod), rhs.toFloat(f32, mod)) }, + 64 => .{ .f64 = @divFloor(lhs.toFloat(f64, mod), rhs.toFloat(f64, mod)) }, + 80 => .{ .f80 = @divFloor(lhs.toFloat(f80, mod), rhs.toFloat(f80, mod)) }, + 128 => .{ .f128 = @divFloor(lhs.toFloat(f128, mod), rhs.toFloat(f128, mod)) }, + else => unreachable, + }; + return Value.fromInterned((try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } }))); +} + +pub fn floatDivTrunc( + lhs: Value, + rhs: Value, + float_type: Type, + arena: Allocator, + mod: *Module, +) !Value { + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try (try floatDivTruncScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } }))); + } + return floatDivTruncScalar(lhs, rhs, float_type, mod); +} + +pub fn floatDivTruncScalar( + lhs: Value, + rhs: Value, + float_type: Type, + mod: *Module, +) !Value { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @divTrunc(lhs.toFloat(f16, mod), rhs.toFloat(f16, mod)) }, + 32 => .{ .f32 = @divTrunc(lhs.toFloat(f32, mod), rhs.toFloat(f32, mod)) }, + 64 => .{ .f64 = @divTrunc(lhs.toFloat(f64, mod), rhs.toFloat(f64, mod)) }, + 80 => .{ .f80 = @divTrunc(lhs.toFloat(f80, mod), rhs.toFloat(f80, mod)) }, + 128 => .{ .f128 = @divTrunc(lhs.toFloat(f128, mod), rhs.toFloat(f128, mod)) }, + else => unreachable, + }; + return Value.fromInterned((try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } }))); +} + +pub fn floatMul( + lhs: Value, + rhs: Value, + float_type: Type, + arena: Allocator, + mod: *Module, +) !Value { + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const lhs_elem = try lhs.elemValue(mod, i); + const rhs_elem = try rhs.elemValue(mod, i); + scalar.* = try (try floatMulScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } }))); + } + return floatMulScalar(lhs, rhs, float_type, mod); +} + +pub fn floatMulScalar( + lhs: Value, + rhs: Value, + float_type: Type, + mod: *Module, +) !Value { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = lhs.toFloat(f16, mod) * rhs.toFloat(f16, mod) }, + 32 => .{ .f32 = lhs.toFloat(f32, mod) * rhs.toFloat(f32, mod) }, + 64 => .{ .f64 = lhs.toFloat(f64, mod) * rhs.toFloat(f64, mod) }, + 80 => .{ .f80 = lhs.toFloat(f80, mod) * rhs.toFloat(f80, mod) }, + 128 => .{ .f128 = lhs.toFloat(f128, mod) * rhs.toFloat(f128, mod) }, + else => unreachable, + }; + return Value.fromInterned((try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } }))); +} + +pub fn sqrt(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const elem_val = try val.elemValue(mod, i); + scalar.* = try (try sqrtScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } }))); + } + return sqrtScalar(val, float_type, mod); +} + +pub fn sqrtScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @sqrt(val.toFloat(f16, mod)) }, + 32 => .{ .f32 = @sqrt(val.toFloat(f32, mod)) }, + 64 => .{ .f64 = @sqrt(val.toFloat(f64, mod)) }, + 80 => .{ .f80 = @sqrt(val.toFloat(f80, mod)) }, + 128 => .{ .f128 = @sqrt(val.toFloat(f128, mod)) }, + else => unreachable, + }; + return Value.fromInterned((try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } }))); +} + +pub fn sin(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const elem_val = try val.elemValue(mod, i); + scalar.* = try (try sinScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } }))); + } + return sinScalar(val, float_type, mod); +} + +pub fn sinScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @sin(val.toFloat(f16, mod)) }, + 32 => .{ .f32 = @sin(val.toFloat(f32, mod)) }, + 64 => .{ .f64 = @sin(val.toFloat(f64, mod)) }, + 80 => .{ .f80 = @sin(val.toFloat(f80, mod)) }, + 128 => .{ .f128 = @sin(val.toFloat(f128, mod)) }, + else => unreachable, + }; + return Value.fromInterned((try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } }))); +} + +pub fn cos(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const elem_val = try val.elemValue(mod, i); + scalar.* = try (try cosScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } }))); + } + return cosScalar(val, float_type, mod); +} + +pub fn cosScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @cos(val.toFloat(f16, mod)) }, + 32 => .{ .f32 = @cos(val.toFloat(f32, mod)) }, + 64 => .{ .f64 = @cos(val.toFloat(f64, mod)) }, + 80 => .{ .f80 = @cos(val.toFloat(f80, mod)) }, + 128 => .{ .f128 = @cos(val.toFloat(f128, mod)) }, + else => unreachable, + }; + return Value.fromInterned((try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } }))); +} + +pub fn tan(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const elem_val = try val.elemValue(mod, i); + scalar.* = try (try tanScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } }))); + } + return tanScalar(val, float_type, mod); +} + +pub fn tanScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @tan(val.toFloat(f16, mod)) }, + 32 => .{ .f32 = @tan(val.toFloat(f32, mod)) }, + 64 => .{ .f64 = @tan(val.toFloat(f64, mod)) }, + 80 => .{ .f80 = @tan(val.toFloat(f80, mod)) }, + 128 => .{ .f128 = @tan(val.toFloat(f128, mod)) }, + else => unreachable, + }; + return Value.fromInterned((try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } }))); +} + +pub fn exp(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const elem_val = try val.elemValue(mod, i); + scalar.* = try (try expScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } }))); + } + return expScalar(val, float_type, mod); +} + +pub fn expScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @exp(val.toFloat(f16, mod)) }, + 32 => .{ .f32 = @exp(val.toFloat(f32, mod)) }, + 64 => .{ .f64 = @exp(val.toFloat(f64, mod)) }, + 80 => .{ .f80 = @exp(val.toFloat(f80, mod)) }, + 128 => .{ .f128 = @exp(val.toFloat(f128, mod)) }, + else => unreachable, + }; + return Value.fromInterned((try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } }))); +} + +pub fn exp2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const elem_val = try val.elemValue(mod, i); + scalar.* = try (try exp2Scalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } }))); + } + return exp2Scalar(val, float_type, mod); +} + +pub fn exp2Scalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @exp2(val.toFloat(f16, mod)) }, + 32 => .{ .f32 = @exp2(val.toFloat(f32, mod)) }, + 64 => .{ .f64 = @exp2(val.toFloat(f64, mod)) }, + 80 => .{ .f80 = @exp2(val.toFloat(f80, mod)) }, + 128 => .{ .f128 = @exp2(val.toFloat(f128, mod)) }, + else => unreachable, + }; + return Value.fromInterned((try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } }))); +} + +pub fn log(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const elem_val = try val.elemValue(mod, i); + scalar.* = try (try logScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } }))); + } + return logScalar(val, float_type, mod); +} + +pub fn logScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @log(val.toFloat(f16, mod)) }, + 32 => .{ .f32 = @log(val.toFloat(f32, mod)) }, + 64 => .{ .f64 = @log(val.toFloat(f64, mod)) }, + 80 => .{ .f80 = @log(val.toFloat(f80, mod)) }, + 128 => .{ .f128 = @log(val.toFloat(f128, mod)) }, + else => unreachable, + }; + return Value.fromInterned((try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } }))); +} + +pub fn log2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const elem_val = try val.elemValue(mod, i); + scalar.* = try (try log2Scalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } }))); + } + return log2Scalar(val, float_type, mod); +} + +pub fn log2Scalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @log2(val.toFloat(f16, mod)) }, + 32 => .{ .f32 = @log2(val.toFloat(f32, mod)) }, + 64 => .{ .f64 = @log2(val.toFloat(f64, mod)) }, + 80 => .{ .f80 = @log2(val.toFloat(f80, mod)) }, + 128 => .{ .f128 = @log2(val.toFloat(f128, mod)) }, + else => unreachable, + }; + return Value.fromInterned((try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } }))); +} + +pub fn log10(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const elem_val = try val.elemValue(mod, i); + scalar.* = try (try log10Scalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } }))); + } + return log10Scalar(val, float_type, mod); +} + +pub fn log10Scalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @log10(val.toFloat(f16, mod)) }, + 32 => .{ .f32 = @log10(val.toFloat(f32, mod)) }, + 64 => .{ .f64 = @log10(val.toFloat(f64, mod)) }, + 80 => .{ .f80 = @log10(val.toFloat(f80, mod)) }, + 128 => .{ .f128 = @log10(val.toFloat(f128, mod)) }, + else => unreachable, + }; + return Value.fromInterned((try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } }))); +} + +pub fn abs(val: Value, ty: Type, arena: Allocator, mod: *Module) !Value { + if (ty.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); + const scalar_ty = ty.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const elem_val = try val.elemValue(mod, i); + scalar.* = try (try absScalar(elem_val, scalar_ty, mod, arena)).intern(scalar_ty, mod); + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = ty.toIntern(), + .storage = .{ .elems = result_data }, + } }))); + } + return absScalar(val, ty, mod, arena); +} + +pub fn absScalar(val: Value, ty: Type, mod: *Module, arena: Allocator) Allocator.Error!Value { + switch (ty.zigTypeTag(mod)) { + .Int => { + var buffer: Value.BigIntSpace = undefined; + var operand_bigint = try val.toBigInt(&buffer, mod).toManaged(arena); + operand_bigint.abs(); + + return mod.intValue_big(try ty.toUnsigned(mod), operand_bigint.toConst()); + }, + .ComptimeInt => { + var buffer: Value.BigIntSpace = undefined; + var operand_bigint = try val.toBigInt(&buffer, mod).toManaged(arena); + operand_bigint.abs(); + + return mod.intValue_big(ty, operand_bigint.toConst()); + }, + .ComptimeFloat, .Float => { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (ty.floatBits(target)) { + 16 => .{ .f16 = @abs(val.toFloat(f16, mod)) }, + 32 => .{ .f32 = @abs(val.toFloat(f32, mod)) }, + 64 => .{ .f64 = @abs(val.toFloat(f64, mod)) }, + 80 => .{ .f80 = @abs(val.toFloat(f80, mod)) }, + 128 => .{ .f128 = @abs(val.toFloat(f128, mod)) }, + else => unreachable, + }; + return Value.fromInterned((try mod.intern(.{ .float = .{ + .ty = ty.toIntern(), + .storage = storage, + } }))); + }, + else => unreachable, + } +} + +pub fn floor(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const elem_val = try val.elemValue(mod, i); + scalar.* = try (try floorScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } }))); + } + return floorScalar(val, float_type, mod); +} + +pub fn floorScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @floor(val.toFloat(f16, mod)) }, + 32 => .{ .f32 = @floor(val.toFloat(f32, mod)) }, + 64 => .{ .f64 = @floor(val.toFloat(f64, mod)) }, + 80 => .{ .f80 = @floor(val.toFloat(f80, mod)) }, + 128 => .{ .f128 = @floor(val.toFloat(f128, mod)) }, + else => unreachable, + }; + return Value.fromInterned((try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } }))); +} + +pub fn ceil(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const elem_val = try val.elemValue(mod, i); + scalar.* = try (try ceilScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } }))); + } + return ceilScalar(val, float_type, mod); +} + +pub fn ceilScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @ceil(val.toFloat(f16, mod)) }, + 32 => .{ .f32 = @ceil(val.toFloat(f32, mod)) }, + 64 => .{ .f64 = @ceil(val.toFloat(f64, mod)) }, + 80 => .{ .f80 = @ceil(val.toFloat(f80, mod)) }, + 128 => .{ .f128 = @ceil(val.toFloat(f128, mod)) }, + else => unreachable, + }; + return Value.fromInterned((try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } }))); +} + +pub fn round(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const elem_val = try val.elemValue(mod, i); + scalar.* = try (try roundScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } }))); + } + return roundScalar(val, float_type, mod); +} + +pub fn roundScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @round(val.toFloat(f16, mod)) }, + 32 => .{ .f32 = @round(val.toFloat(f32, mod)) }, + 64 => .{ .f64 = @round(val.toFloat(f64, mod)) }, + 80 => .{ .f80 = @round(val.toFloat(f80, mod)) }, + 128 => .{ .f128 = @round(val.toFloat(f128, mod)) }, + else => unreachable, + }; + return Value.fromInterned((try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } }))); +} + +pub fn trunc(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const elem_val = try val.elemValue(mod, i); + scalar.* = try (try truncScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } }))); + } + return truncScalar(val, float_type, mod); +} + +pub fn truncScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @trunc(val.toFloat(f16, mod)) }, + 32 => .{ .f32 = @trunc(val.toFloat(f32, mod)) }, + 64 => .{ .f64 = @trunc(val.toFloat(f64, mod)) }, + 80 => .{ .f80 = @trunc(val.toFloat(f80, mod)) }, + 128 => .{ .f128 = @trunc(val.toFloat(f128, mod)) }, + else => unreachable, + }; + return Value.fromInterned((try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } }))); +} + +pub fn mulAdd( + float_type: Type, + mulend1: Value, + mulend2: Value, + addend: Value, + arena: Allocator, + mod: *Module, +) !Value { + if (float_type.zigTypeTag(mod) == .Vector) { + const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); + const scalar_ty = float_type.scalarType(mod); + for (result_data, 0..) |*scalar, i| { + const mulend1_elem = try mulend1.elemValue(mod, i); + const mulend2_elem = try mulend2.elemValue(mod, i); + const addend_elem = try addend.elemValue(mod, i); + scalar.* = try (try mulAddScalar(scalar_ty, mulend1_elem, mulend2_elem, addend_elem, mod)).intern(scalar_ty, mod); + } + return Value.fromInterned((try mod.intern(.{ .aggregate = .{ + .ty = float_type.toIntern(), + .storage = .{ .elems = result_data }, + } }))); + } + return mulAddScalar(float_type, mulend1, mulend2, addend, mod); +} + +pub fn mulAddScalar( + float_type: Type, + mulend1: Value, + mulend2: Value, + addend: Value, + mod: *Module, +) Allocator.Error!Value { + const target = mod.getTarget(); + const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { + 16 => .{ .f16 = @mulAdd(f16, mulend1.toFloat(f16, mod), mulend2.toFloat(f16, mod), addend.toFloat(f16, mod)) }, + 32 => .{ .f32 = @mulAdd(f32, mulend1.toFloat(f32, mod), mulend2.toFloat(f32, mod), addend.toFloat(f32, mod)) }, + 64 => .{ .f64 = @mulAdd(f64, mulend1.toFloat(f64, mod), mulend2.toFloat(f64, mod), addend.toFloat(f64, mod)) }, + 80 => .{ .f80 = @mulAdd(f80, mulend1.toFloat(f80, mod), mulend2.toFloat(f80, mod), addend.toFloat(f80, mod)) }, + 128 => .{ .f128 = @mulAdd(f128, mulend1.toFloat(f128, mod), mulend2.toFloat(f128, mod), addend.toFloat(f128, mod)) }, + else => unreachable, + }; + return Value.fromInterned((try mod.intern(.{ .float = .{ + .ty = float_type.toIntern(), + .storage = storage, + } }))); +} + +/// If the value is represented in-memory as a series of bytes that all +/// have the same value, return that byte value, otherwise null. +pub fn hasRepeatedByteRepr(val: Value, ty: Type, mod: *Module) !?u8 { + const abi_size = std.math.cast(usize, ty.abiSize(mod)) orelse return null; + assert(abi_size >= 1); + const byte_buffer = try mod.gpa.alloc(u8, abi_size); + defer mod.gpa.free(byte_buffer); + + writeToMemory(val, ty, mod, byte_buffer) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.ReinterpretDeclRef => return null, + // TODO: The writeToMemory function was originally created for the purpose + // of comptime pointer casting. However, it is now additionally being used + // for checking the actual memory layout that will be generated by machine + // code late in compilation. So, this error handling is too aggressive and + // causes some false negatives, causing less-than-ideal code generation. + error.IllDefinedMemoryLayout => return null, + error.Unimplemented => return null, + }; + const first_byte = byte_buffer[0]; + for (byte_buffer[1..]) |byte| { + if (byte != first_byte) return null; + } + return first_byte; +} + +pub fn isGenericPoison(val: Value) bool { + return val.toIntern() == .generic_poison; +} + +/// For an integer (comptime or fixed-width) `val`, returns the comptime-known bounds of the value. +/// If `val` is not undef, the bounds are both `val`. +/// If `val` is undef and has a fixed-width type, the bounds are the bounds of the type. +/// If `val` is undef and is a `comptime_int`, returns null. +pub fn intValueBounds(val: Value, mod: *Module) !?[2]Value { + if (!val.isUndef(mod)) return .{ val, val }; + const ty = mod.intern_pool.typeOf(val.toIntern()); + if (ty == .comptime_int_type) return null; + return .{ + try Type.fromInterned(ty).minInt(mod, Type.fromInterned(ty)), + try Type.fromInterned(ty).maxInt(mod, Type.fromInterned(ty)), + }; +} + +pub const BigIntSpace = InternPool.Key.Int.Storage.BigIntSpace; + +pub const zero_usize: Value = .{ .ip_index = .zero_usize }; +pub const zero_u8: Value = .{ .ip_index = .zero_u8 }; +pub const zero_comptime_int: Value = .{ .ip_index = .zero }; +pub const one_comptime_int: Value = .{ .ip_index = .one }; +pub const negative_one_comptime_int: Value = .{ .ip_index = .negative_one }; +pub const undef: Value = .{ .ip_index = .undef }; +pub const @"void": Value = .{ .ip_index = .void_value }; +pub const @"null": Value = .{ .ip_index = .null_value }; +pub const @"false": Value = .{ .ip_index = .bool_false }; +pub const @"true": Value = .{ .ip_index = .bool_true }; +pub const @"unreachable": Value = .{ .ip_index = .unreachable_value }; + +pub const generic_poison: Value = .{ .ip_index = .generic_poison }; +pub const generic_poison_type: Value = .{ .ip_index = .generic_poison_type }; +pub const empty_struct: Value = .{ .ip_index = .empty_struct }; + +pub fn makeBool(x: bool) Value { + return if (x) Value.true else Value.false; +} diff --git a/src/Zir.zig b/src/Zir.zig index 4462083b1f..0513f6df4e 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -2211,6 +2211,11 @@ pub const Inst = struct { empty_struct = @intFromEnum(InternPool.Index.empty_struct), generic_poison = @intFromEnum(InternPool.Index.generic_poison), + /// This Ref does not correspond to any ZIR instruction. + /// It is a special value recognized only by Sema. + /// It indicates the value is mutable comptime memory, and represented + /// via the comptime_memory field of Sema. This value never occurs in ZIR. + mutable_comptime = @intFromEnum(InternPool.Index.mutable_comptime), /// This tag is here to match Air and InternPool, however it is unused /// for ZIR purposes. var_args_param_type = @intFromEnum(InternPool.Index.var_args_param_type), diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index fa44ede117..750edb45a8 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -9,7 +9,7 @@ const Mir = @import("Mir.zig"); const Emit = @import("Emit.zig"); const Liveness = @import("../../Liveness.zig"); const Type = @import("../../type.zig").Type; -const Value = @import("../../value.zig").Value; +const Value = @import("../../Value.zig"); const TypedValue = @import("../../TypedValue.zig"); const link = @import("../../link.zig"); const Module = @import("../../Module.zig"); diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index fc690bdec1..ed3434106f 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -9,7 +9,7 @@ const Mir = @import("Mir.zig"); const Emit = @import("Emit.zig"); const Liveness = @import("../../Liveness.zig"); const Type = @import("../../type.zig").Type; -const Value = @import("../../value.zig").Value; +const Value = @import("../../Value.zig"); const TypedValue = @import("../../TypedValue.zig"); const link = @import("../../link.zig"); const Module = @import("../../Module.zig"); diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index e00bed5ada..fe46d0ddb6 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -8,7 +8,7 @@ const Mir = @import("Mir.zig"); const Emit = @import("Emit.zig"); const Liveness = @import("../../Liveness.zig"); const Type = @import("../../type.zig").Type; -const Value = @import("../../value.zig").Value; +const Value = @import("../../Value.zig"); const TypedValue = @import("../../TypedValue.zig"); const link = @import("../../link.zig"); const Module = @import("../../Module.zig"); diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 6237a8af23..e797a3bec7 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -14,7 +14,7 @@ const Module = @import("../../Module.zig"); const InternPool = @import("../../InternPool.zig"); const Decl = Module.Decl; const Type = @import("../../type.zig").Type; -const Value = @import("../../value.zig").Value; +const Value = @import("../../Value.zig"); const Compilation = @import("../../Compilation.zig"); const LazySrcLoc = Module.LazySrcLoc; const link = @import("../../link.zig"); @@ -3082,10 +3082,6 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue return func.lowerParentPtrDecl(ptr_val, decl_index, offset); }, .anon_decl => |ad| return func.lowerAnonDeclRef(ad, offset), - .mut_decl => |mut_decl| { - const decl_index = mut_decl.decl; - return func.lowerParentPtrDecl(ptr_val, decl_index, offset); - }, .eu_payload => |tag| return func.fail("TODO: Implement lowerParentPtr for {}", .{tag}), .int => |base| return func.lowerConstant(Value.fromInterned(base), Type.usize), .opt_payload => |base_ptr| return func.lowerParentPtr(Value.fromInterned(base_ptr), offset), @@ -3346,7 +3342,6 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue { }, .ptr => |ptr| switch (ptr.addr) { .decl => |decl| return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl, 0), - .mut_decl => |mut_decl| return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, mut_decl.decl, 0), .int => |int| return func.lowerConstant(Value.fromInterned(int), Type.fromInterned(ip.typeOf(int))), .opt_payload, .elem, .field => return func.lowerParentPtr(val, 0), .anon_decl => |ad| return func.lowerAnonDeclRef(ad, 0), diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 212a30bbf4..8ebb861398 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -33,7 +33,7 @@ const Alignment = InternPool.Alignment; const Target = std.Target; const Type = @import("../../type.zig").Type; const TypedValue = @import("../../TypedValue.zig"); -const Value = @import("../../value.zig").Value; +const Value = @import("../../Value.zig"); const Instruction = @import("encoder.zig").Instruction; const abi = @import("abi.zig"); diff --git a/src/arch/x86_64/abi.zig b/src/arch/x86_64/abi.zig index 91ac0ce63a..1046e73fb6 100644 --- a/src/arch/x86_64/abi.zig +++ b/src/arch/x86_64/abi.zig @@ -570,4 +570,4 @@ const Module = @import("../../Module.zig"); const Register = @import("bits.zig").Register; const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager; const Type = @import("../../type.zig").Type; -const Value = @import("../../value.zig").Value; +const Value = @import("../../Value.zig"); diff --git a/src/codegen.zig b/src/codegen.zig index dd851eeb03..d39d94201d 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -20,7 +20,7 @@ const Module = @import("Module.zig"); const Target = std.Target; const Type = @import("type.zig").Type; const TypedValue = @import("TypedValue.zig"); -const Value = @import("value.zig").Value; +const Value = @import("Value.zig"); const Zir = @import("Zir.zig"); const Alignment = InternPool.Alignment; @@ -678,7 +678,6 @@ fn lowerParentPtr( const ptr = mod.intern_pool.indexToKey(parent_ptr).ptr; return switch (ptr.addr) { .decl => |decl| try lowerDeclRef(bin_file, src_loc, decl, code, debug_output, reloc_info), - .mut_decl => |md| try lowerDeclRef(bin_file, src_loc, md.decl, code, debug_output, reloc_info), .anon_decl => |ad| try lowerAnonDeclRef(bin_file, src_loc, ad, code, debug_output, reloc_info), .int => |int| try generateSymbol(bin_file, src_loc, .{ .ty = Type.usize, @@ -1087,7 +1086,6 @@ pub fn genTypedValue( if (!typed_value.ty.isSlice(zcu)) switch (zcu.intern_pool.indexToKey(typed_value.val.toIntern())) { .ptr => |ptr| switch (ptr.addr) { .decl => |decl| return genDeclRef(lf, src_loc, typed_value, decl), - .mut_decl => |mut_decl| return genDeclRef(lf, src_loc, typed_value, mut_decl.decl), else => {}, }, else => {}, diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 9907a95fef..a44db22f9d 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -7,7 +7,7 @@ const log = std.log.scoped(.c); const link = @import("../link.zig"); const Module = @import("../Module.zig"); const Compilation = @import("../Compilation.zig"); -const Value = @import("../value.zig").Value; +const Value = @import("../Value.zig"); const Type = @import("../type.zig").Type; const TypedValue = @import("../TypedValue.zig"); const C = link.File.C; @@ -691,7 +691,6 @@ pub const DeclGen = struct { const ptr = mod.intern_pool.indexToKey(ptr_val).ptr; switch (ptr.addr) { .decl => |d| try dg.renderDeclValue(writer, ptr_ty, Value.fromInterned(ptr_val), d, location), - .mut_decl => |md| try dg.renderDeclValue(writer, ptr_ty, Value.fromInterned(ptr_val), md.decl, location), .anon_decl => |anon_decl| try dg.renderAnonDeclValue(writer, ptr_ty, Value.fromInterned(ptr_val), anon_decl, location), .int => |int| { try writer.writeByte('('); @@ -1221,7 +1220,6 @@ pub const DeclGen = struct { }, .ptr => |ptr| switch (ptr.addr) { .decl => |d| try dg.renderDeclValue(writer, ty, val, d, location), - .mut_decl => |md| try dg.renderDeclValue(writer, ty, val, md.decl, location), .anon_decl => |decl_val| try dg.renderAnonDeclValue(writer, ty, val, decl_val, location), .int => |int| { try writer.writeAll("(("); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 1e63361048..be26d4c79c 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -21,7 +21,7 @@ const Package = @import("../Package.zig"); const TypedValue = @import("../TypedValue.zig"); const Air = @import("../Air.zig"); const Liveness = @import("../Liveness.zig"); -const Value = @import("../value.zig").Value; +const Value = @import("../Value.zig"); const Type = @import("../type.zig").Type; const LazySrcLoc = Module.LazySrcLoc; const x86_64_abi = @import("../arch/x86_64/abi.zig"); @@ -3875,7 +3875,6 @@ pub const Object = struct { }, .ptr => |ptr| return switch (ptr.addr) { .decl => |decl| try o.lowerDeclRefValue(ty, decl), - .mut_decl => |mut_decl| try o.lowerDeclRefValue(ty, mut_decl.decl), .anon_decl => |anon_decl| try o.lowerAnonDeclRef(ty, anon_decl), .int => |int| try o.lowerIntAsPtr(int), .eu_payload, @@ -4340,7 +4339,6 @@ pub const Object = struct { const ptr = ip.indexToKey(ptr_val.toIntern()).ptr; return switch (ptr.addr) { .decl => |decl| try o.lowerParentPtrDecl(decl), - .mut_decl => |mut_decl| try o.lowerParentPtrDecl(mut_decl.decl), .anon_decl => |ad| try o.lowerAnonDeclRef(Type.fromInterned(ad.orig_ty), ad), .int => |int| try o.lowerIntAsPtr(int), .eu_payload => |eu_ptr| { diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index a499f3d8ed..ecb7c42664 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -7,7 +7,7 @@ const assert = std.debug.assert; const Module = @import("../Module.zig"); const Decl = Module.Decl; const Type = @import("../type.zig").Type; -const Value = @import("../value.zig").Value; +const Value = @import("../Value.zig"); const LazySrcLoc = Module.LazySrcLoc; const Air = @import("../Air.zig"); const Zir = @import("../Zir.zig"); @@ -992,7 +992,6 @@ const DeclGen = struct { const mod = self.module; switch (mod.intern_pool.indexToKey(ptr_val.toIntern()).ptr.addr) { .decl => |decl| return try self.constantDeclRef(ptr_ty, decl), - .mut_decl => |decl_mut| return try self.constantDeclRef(ptr_ty, decl_mut.decl), .anon_decl => |anon_decl| return try self.constantAnonDeclRef(ptr_ty, anon_decl), .int => |int| { const ptr_id = self.spv.allocId(); diff --git a/src/link/C.zig b/src/link/C.zig index 68facb374b..636436eebb 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -14,7 +14,7 @@ const codegen = @import("../codegen/c.zig"); const link = @import("../link.zig"); const trace = @import("../tracy.zig").trace; const Type = @import("../type.zig").Type; -const Value = @import("../value.zig").Value; +const Value = @import("../Value.zig"); const Air = @import("../Air.zig"); const Liveness = @import("../Liveness.zig"); diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 7d825ef4d1..eb39ac9044 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -2753,7 +2753,7 @@ const Relocation = @import("Coff/Relocation.zig"); const TableSection = @import("table_section.zig").TableSection; const StringTable = @import("StringTable.zig"); const Type = @import("../type.zig").Type; -const Value = @import("../value.zig").Value; +const Value = @import("../Value.zig"); const TypedValue = @import("../TypedValue.zig"); pub const base_tag: link.File.Tag = .coff; diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 17afe328fe..6607cdb119 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -2847,4 +2847,4 @@ const Module = @import("../Module.zig"); const InternPool = @import("../InternPool.zig"); const StringTable = @import("StringTable.zig"); const Type = @import("../type.zig").Type; -const Value = @import("../value.zig").Value; +const Value = @import("../Value.zig"); diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig index ef8509e915..1437f4926a 100644 --- a/src/link/Elf/ZigObject.zig +++ b/src/link/Elf/ZigObject.zig @@ -1667,6 +1667,6 @@ const Object = @import("Object.zig"); const Symbol = @import("Symbol.zig"); const StringTable = @import("../StringTable.zig"); const Type = @import("../../type.zig").Type; -const Value = @import("../../value.zig").Value; +const Value = @import("../../Value.zig"); const TypedValue = @import("../../TypedValue.zig"); const ZigObject = @This(); diff --git a/src/link/MachO/ZigObject.zig b/src/link/MachO/ZigObject.zig index df2cac41db..bfa76627e4 100644 --- a/src/link/MachO/ZigObject.zig +++ b/src/link/MachO/ZigObject.zig @@ -1462,6 +1462,6 @@ const Relocation = @import("Relocation.zig"); const Symbol = @import("Symbol.zig"); const StringTable = @import("../StringTable.zig"); const Type = @import("../../type.zig").Type; -const Value = @import("../../value.zig").Value; +const Value = @import("../../Value.zig"); const TypedValue = @import("../../TypedValue.zig"); const ZigObject = @This(); diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index a635b0bf5b..8b4287dcfd 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -14,7 +14,7 @@ const build_options = @import("build_options"); const Air = @import("../Air.zig"); const Liveness = @import("../Liveness.zig"); const Type = @import("../type.zig").Type; -const Value = @import("../value.zig").Value; +const Value = @import("../Value.zig"); const TypedValue = @import("../TypedValue.zig"); const std = @import("std"); diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index 7b66d914bf..51b68f7c3f 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -36,7 +36,7 @@ const trace = @import("../tracy.zig").trace; const build_options = @import("build_options"); const Air = @import("../Air.zig"); const Liveness = @import("../Liveness.zig"); -const Value = @import("../value.zig").Value; +const Value = @import("../Value.zig"); const SpvModule = @import("../codegen/spirv/Module.zig"); const spec = @import("../codegen/spirv/spec.zig"); diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 9fe8ce417e..42179ac5e8 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -23,7 +23,7 @@ const build_options = @import("build_options"); const wasi_libc = @import("../wasi_libc.zig"); const Cache = std.Build.Cache; const Type = @import("../type.zig").Type; -const Value = @import("../value.zig").Value; +const Value = @import("../Value.zig"); const TypedValue = @import("../TypedValue.zig"); const LlvmObject = @import("../codegen/llvm.zig").Object; const Air = @import("../Air.zig"); diff --git a/src/print_air.zig b/src/print_air.zig index c79238e07c..6f9ac1f771 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -3,7 +3,7 @@ const Allocator = std.mem.Allocator; const fmtIntSizeBin = std.fmt.fmtIntSizeBin; const Module = @import("Module.zig"); -const Value = @import("value.zig").Value; +const Value = @import("Value.zig"); const Type = @import("type.zig").Type; const Air = @import("Air.zig"); const Liveness = @import("Liveness.zig"); diff --git a/src/type.zig b/src/type.zig index 9ca2822204..4c7452ed08 100644 --- a/src/type.zig +++ b/src/type.zig @@ -1,6 +1,6 @@ const std = @import("std"); const builtin = @import("builtin"); -const Value = @import("value.zig").Value; +const Value = @import("Value.zig"); const assert = std.debug.assert; const Target = std.Target; const Module = @import("Module.zig"); diff --git a/src/value.zig b/src/value.zig deleted file mode 100644 index 4276b09c68..0000000000 --- a/src/value.zig +++ /dev/null @@ -1,4077 +0,0 @@ -const std = @import("std"); -const builtin = @import("builtin"); -const Type = @import("type.zig").Type; -const log2 = std.math.log2; -const assert = std.debug.assert; -const BigIntConst = std.math.big.int.Const; -const BigIntMutable = std.math.big.int.Mutable; -const Target = std.Target; -const Allocator = std.mem.Allocator; -const Module = @import("Module.zig"); -const TypedValue = @import("TypedValue.zig"); -const Sema = @import("Sema.zig"); -const InternPool = @import("InternPool.zig"); - -pub const Value = struct { - /// We are migrating towards using this for every Value object. However, many - /// values are still represented the legacy way. This is indicated by using - /// InternPool.Index.none. - ip_index: InternPool.Index, - - /// This is the raw data, with no bookkeeping, no memory awareness, - /// no de-duplication, and no type system awareness. - /// This union takes advantage of the fact that the first page of memory - /// is unmapped, giving us 4096 possible enum tags that have no payload. - legacy: extern union { - ptr_otherwise: *Payload, - }, - - // Keep in sync with tools/stage2_pretty_printers_common.py - pub const Tag = enum(usize) { - // The first section of this enum are tags that require no payload. - // After this, the tag requires a payload. - - /// When the type is error union: - /// * If the tag is `.@"error"`, the error union is an error. - /// * If the tag is `.eu_payload`, the error union is a payload. - /// * A nested error such as `anyerror!(anyerror!T)` in which the the outer error union - /// is non-error, but the inner error union is an error, is represented as - /// a tag of `.eu_payload`, with a sub-tag of `.@"error"`. - eu_payload, - /// When the type is optional: - /// * If the tag is `.null_value`, the optional is null. - /// * If the tag is `.opt_payload`, the optional is a payload. - /// * A nested optional such as `??T` in which the the outer optional - /// is non-null, but the inner optional is null, is represented as - /// a tag of `.opt_payload`, with a sub-tag of `.null_value`. - opt_payload, - /// Pointer and length as sub `Value` objects. - slice, - /// A slice of u8 whose memory is managed externally. - bytes, - /// This value is repeated some number of times. The amount of times to repeat - /// is stored externally. - repeated, - /// An instance of a struct, array, or vector. - /// Each element/field stored as a `Value`. - /// In the case of sentinel-terminated arrays, the sentinel value *is* stored, - /// so the slice length will be one more than the type's array length. - aggregate, - /// An instance of a union. - @"union", - - pub fn Type(comptime t: Tag) type { - return switch (t) { - .eu_payload, - .opt_payload, - .repeated, - => Payload.SubValue, - .slice => Payload.Slice, - .bytes => Payload.Bytes, - .aggregate => Payload.Aggregate, - .@"union" => Payload.Union, - }; - } - - pub fn create(comptime t: Tag, ally: Allocator, data: Data(t)) error{OutOfMemory}!Value { - const ptr = try ally.create(t.Type()); - ptr.* = .{ - .base = .{ .tag = t }, - .data = data, - }; - return Value{ - .ip_index = .none, - .legacy = .{ .ptr_otherwise = &ptr.base }, - }; - } - - pub fn Data(comptime t: Tag) type { - return std.meta.fieldInfo(t.Type(), .data).type; - } - }; - - pub fn initPayload(payload: *Payload) Value { - return Value{ - .ip_index = .none, - .legacy = .{ .ptr_otherwise = payload }, - }; - } - - pub fn tag(self: Value) Tag { - assert(self.ip_index == .none); - return self.legacy.ptr_otherwise.tag; - } - - /// Prefer `castTag` to this. - pub fn cast(self: Value, comptime T: type) ?*T { - if (self.ip_index != .none) { - return null; - } - if (@hasField(T, "base_tag")) { - return self.castTag(T.base_tag); - } - inline for (@typeInfo(Tag).Enum.fields) |field| { - const t = @as(Tag, @enumFromInt(field.value)); - if (self.legacy.ptr_otherwise.tag == t) { - if (T == t.Type()) { - return @fieldParentPtr(T, "base", self.legacy.ptr_otherwise); - } - return null; - } - } - unreachable; - } - - pub fn castTag(self: Value, comptime t: Tag) ?*t.Type() { - if (self.ip_index != .none) return null; - - if (self.legacy.ptr_otherwise.tag == t) - return @fieldParentPtr(t.Type(), "base", self.legacy.ptr_otherwise); - - return null; - } - - pub fn format(val: Value, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void { - _ = val; - _ = fmt; - _ = options; - _ = writer; - @compileError("do not use format values directly; use either fmtDebug or fmtValue"); - } - - /// This is a debug function. In order to print values in a meaningful way - /// we also need access to the type. - pub fn dump( - start_val: Value, - comptime fmt: []const u8, - _: std.fmt.FormatOptions, - out_stream: anytype, - ) !void { - comptime assert(fmt.len == 0); - if (start_val.ip_index != .none) { - try out_stream.print("(interned: {})", .{start_val.toIntern()}); - return; - } - var val = start_val; - while (true) switch (val.tag()) { - .aggregate => { - return out_stream.writeAll("(aggregate)"); - }, - .@"union" => { - return out_stream.writeAll("(union value)"); - }, - .bytes => return out_stream.print("\"{}\"", .{std.zig.fmtEscapes(val.castTag(.bytes).?.data)}), - .repeated => { - try out_stream.writeAll("(repeated) "); - val = val.castTag(.repeated).?.data; - }, - .eu_payload => { - try out_stream.writeAll("(eu_payload) "); - val = val.castTag(.repeated).?.data; - }, - .opt_payload => { - try out_stream.writeAll("(opt_payload) "); - val = val.castTag(.repeated).?.data; - }, - .slice => return out_stream.writeAll("(slice)"), - }; - } - - pub fn fmtDebug(val: Value) std.fmt.Formatter(dump) { - return .{ .data = val }; - } - - pub fn fmtValue(val: Value, ty: Type, mod: *Module) std.fmt.Formatter(TypedValue.format) { - return .{ .data = .{ - .tv = .{ .ty = ty, .val = val }, - .mod = mod, - } }; - } - - /// Asserts that the value is representable as an array of bytes. - /// Returns the value as a null-terminated string stored in the InternPool. - pub fn toIpString(val: Value, ty: Type, mod: *Module) !InternPool.NullTerminatedString { - const ip = &mod.intern_pool; - return switch (mod.intern_pool.indexToKey(val.toIntern())) { - .enum_literal => |enum_literal| enum_literal, - .slice => |slice| try arrayToIpString(val, Value.fromInterned(slice.len).toUnsignedInt(mod), mod), - .aggregate => |aggregate| switch (aggregate.storage) { - .bytes => |bytes| try ip.getOrPutString(mod.gpa, bytes), - .elems => try arrayToIpString(val, ty.arrayLen(mod), mod), - .repeated_elem => |elem| { - const byte = @as(u8, @intCast(Value.fromInterned(elem).toUnsignedInt(mod))); - const len = @as(usize, @intCast(ty.arrayLen(mod))); - try ip.string_bytes.appendNTimes(mod.gpa, byte, len); - return ip.getOrPutTrailingString(mod.gpa, len); - }, - }, - else => unreachable, - }; - } - - /// Asserts that the value is representable as an array of bytes. - /// Copies the value into a freshly allocated slice of memory, which is owned by the caller. - pub fn toAllocatedBytes(val: Value, ty: Type, allocator: Allocator, mod: *Module) ![]u8 { - return switch (mod.intern_pool.indexToKey(val.toIntern())) { - .enum_literal => |enum_literal| allocator.dupe(u8, mod.intern_pool.stringToSlice(enum_literal)), - .slice => |slice| try arrayToAllocatedBytes(val, Value.fromInterned(slice.len).toUnsignedInt(mod), allocator, mod), - .aggregate => |aggregate| switch (aggregate.storage) { - .bytes => |bytes| try allocator.dupe(u8, bytes), - .elems => try arrayToAllocatedBytes(val, ty.arrayLen(mod), allocator, mod), - .repeated_elem => |elem| { - const byte = @as(u8, @intCast(Value.fromInterned(elem).toUnsignedInt(mod))); - const result = try allocator.alloc(u8, @as(usize, @intCast(ty.arrayLen(mod)))); - @memset(result, byte); - return result; - }, - }, - else => unreachable, - }; - } - - fn arrayToAllocatedBytes(val: Value, len: u64, allocator: Allocator, mod: *Module) ![]u8 { - const result = try allocator.alloc(u8, @as(usize, @intCast(len))); - for (result, 0..) |*elem, i| { - const elem_val = try val.elemValue(mod, i); - elem.* = @as(u8, @intCast(elem_val.toUnsignedInt(mod))); - } - return result; - } - - fn arrayToIpString(val: Value, len_u64: u64, mod: *Module) !InternPool.NullTerminatedString { - const gpa = mod.gpa; - const ip = &mod.intern_pool; - const len = @as(usize, @intCast(len_u64)); - try ip.string_bytes.ensureUnusedCapacity(gpa, len); - for (0..len) |i| { - // I don't think elemValue has the possibility to affect ip.string_bytes. Let's - // assert just to be sure. - const prev = ip.string_bytes.items.len; - const elem_val = try val.elemValue(mod, i); - assert(ip.string_bytes.items.len == prev); - const byte = @as(u8, @intCast(elem_val.toUnsignedInt(mod))); - ip.string_bytes.appendAssumeCapacity(byte); - } - return ip.getOrPutTrailingString(gpa, len); - } - - pub fn intern2(val: Value, ty: Type, mod: *Module) Allocator.Error!InternPool.Index { - if (val.ip_index != .none) return val.ip_index; - return intern(val, ty, mod); - } - - pub fn intern(val: Value, ty: Type, mod: *Module) Allocator.Error!InternPool.Index { - if (val.ip_index != .none) return (try mod.getCoerced(val, ty)).toIntern(); - const ip = &mod.intern_pool; - switch (val.tag()) { - .eu_payload => { - const pl = val.castTag(.eu_payload).?.data; - return mod.intern(.{ .error_union = .{ - .ty = ty.toIntern(), - .val = .{ .payload = try pl.intern(ty.errorUnionPayload(mod), mod) }, - } }); - }, - .opt_payload => { - const pl = val.castTag(.opt_payload).?.data; - return mod.intern(.{ .opt = .{ - .ty = ty.toIntern(), - .val = try pl.intern(ty.optionalChild(mod), mod), - } }); - }, - .slice => { - const pl = val.castTag(.slice).?.data; - return mod.intern(.{ .slice = .{ - .ty = ty.toIntern(), - .len = try pl.len.intern(Type.usize, mod), - .ptr = try pl.ptr.intern(ty.slicePtrFieldType(mod), mod), - } }); - }, - .bytes => { - const pl = val.castTag(.bytes).?.data; - return mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), - .storage = .{ .bytes = pl }, - } }); - }, - .repeated => { - const pl = val.castTag(.repeated).?.data; - return mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), - .storage = .{ .repeated_elem = try pl.intern(ty.childType(mod), mod) }, - } }); - }, - .aggregate => { - const len = @as(usize, @intCast(ty.arrayLen(mod))); - const old_elems = val.castTag(.aggregate).?.data[0..len]; - const new_elems = try mod.gpa.alloc(InternPool.Index, old_elems.len); - defer mod.gpa.free(new_elems); - const ty_key = ip.indexToKey(ty.toIntern()); - for (new_elems, old_elems, 0..) |*new_elem, old_elem, field_i| - new_elem.* = try old_elem.intern(switch (ty_key) { - .struct_type => ty.structFieldType(field_i, mod), - .anon_struct_type => |info| Type.fromInterned(info.types.get(ip)[field_i]), - inline .array_type, .vector_type => |info| Type.fromInterned(info.child), - else => unreachable, - }, mod); - return mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), - .storage = .{ .elems = new_elems }, - } }); - }, - .@"union" => { - const pl = val.castTag(.@"union").?.data; - if (pl.tag) |pl_tag| { - return mod.intern(.{ .un = .{ - .ty = ty.toIntern(), - .tag = try pl_tag.intern(ty.unionTagTypeHypothetical(mod), mod), - .val = try pl.val.intern(ty.unionFieldType(pl_tag, mod).?, mod), - } }); - } else { - return mod.intern(.{ .un = .{ - .ty = ty.toIntern(), - .tag = .none, - .val = try pl.val.intern(try ty.unionBackingType(mod), mod), - } }); - } - }, - } - } - - pub fn unintern(val: Value, arena: Allocator, mod: *Module) Allocator.Error!Value { - return if (val.ip_index == .none) val else switch (mod.intern_pool.indexToKey(val.toIntern())) { - .int_type, - .ptr_type, - .array_type, - .vector_type, - .opt_type, - .anyframe_type, - .error_union_type, - .simple_type, - .struct_type, - .anon_struct_type, - .union_type, - .opaque_type, - .enum_type, - .func_type, - .error_set_type, - .inferred_error_set_type, - - .undef, - .simple_value, - .variable, - .extern_func, - .func, - .int, - .err, - .enum_literal, - .enum_tag, - .empty_enum_value, - .float, - .ptr, - => val, - - .error_union => |error_union| switch (error_union.val) { - .err_name => val, - .payload => |payload| Tag.eu_payload.create(arena, Value.fromInterned(payload)), - }, - - .slice => |slice| Tag.slice.create(arena, .{ - .ptr = Value.fromInterned(slice.ptr), - .len = Value.fromInterned(slice.len), - }), - - .opt => |opt| switch (opt.val) { - .none => val, - else => |payload| Tag.opt_payload.create(arena, Value.fromInterned(payload)), - }, - - .aggregate => |aggregate| switch (aggregate.storage) { - .bytes => |bytes| Tag.bytes.create(arena, try arena.dupe(u8, bytes)), - .elems => |old_elems| { - const new_elems = try arena.alloc(Value, old_elems.len); - for (new_elems, old_elems) |*new_elem, old_elem| new_elem.* = Value.fromInterned(old_elem); - return Tag.aggregate.create(arena, new_elems); - }, - .repeated_elem => |elem| Tag.repeated.create(arena, Value.fromInterned(elem)), - }, - - .un => |un| Tag.@"union".create(arena, .{ - // toValue asserts that the value cannot be .none which is valid on unions. - .tag = if (un.tag == .none) null else Value.fromInterned(un.tag), - .val = Value.fromInterned(un.val), - }), - - .memoized_call => unreachable, - }; - } - - pub fn fromInterned(i: InternPool.Index) Value { - assert(i != .none); - return .{ - .ip_index = i, - .legacy = undefined, - }; - } - - pub fn toIntern(val: Value) InternPool.Index { - assert(val.ip_index != .none); - return val.ip_index; - } - - /// Asserts that the value is representable as a type. - pub fn toType(self: Value) Type { - return Type.fromInterned(self.toIntern()); - } - - pub fn intFromEnum(val: Value, ty: Type, mod: *Module) Allocator.Error!Value { - const ip = &mod.intern_pool; - return switch (ip.indexToKey(ip.typeOf(val.toIntern()))) { - // Assume it is already an integer and return it directly. - .simple_type, .int_type => val, - .enum_literal => |enum_literal| { - const field_index = ty.enumFieldIndex(enum_literal, mod).?; - return switch (ip.indexToKey(ty.toIntern())) { - // Assume it is already an integer and return it directly. - .simple_type, .int_type => val, - .enum_type => |enum_type| if (enum_type.values.len != 0) - Value.fromInterned(enum_type.values.get(ip)[field_index]) - else // Field index and integer values are the same. - mod.intValue(Type.fromInterned(enum_type.tag_ty), field_index), - else => unreachable, - }; - }, - .enum_type => |enum_type| try mod.getCoerced(val, Type.fromInterned(enum_type.tag_ty)), - else => unreachable, - }; - } - - /// Asserts the value is an integer. - pub fn toBigInt(val: Value, space: *BigIntSpace, mod: *Module) BigIntConst { - return val.toBigIntAdvanced(space, mod, null) catch unreachable; - } - - /// Asserts the value is an integer. - pub fn toBigIntAdvanced( - val: Value, - space: *BigIntSpace, - mod: *Module, - opt_sema: ?*Sema, - ) Module.CompileError!BigIntConst { - return switch (val.toIntern()) { - .bool_false => BigIntMutable.init(&space.limbs, 0).toConst(), - .bool_true => BigIntMutable.init(&space.limbs, 1).toConst(), - .null_value => BigIntMutable.init(&space.limbs, 0).toConst(), - else => switch (mod.intern_pool.indexToKey(val.toIntern())) { - .int => |int| switch (int.storage) { - .u64, .i64, .big_int => int.storage.toBigInt(space), - .lazy_align, .lazy_size => |ty| { - if (opt_sema) |sema| try sema.resolveTypeLayout(Type.fromInterned(ty)); - const x = switch (int.storage) { - else => unreachable, - .lazy_align => Type.fromInterned(ty).abiAlignment(mod).toByteUnits(0), - .lazy_size => Type.fromInterned(ty).abiSize(mod), - }; - return BigIntMutable.init(&space.limbs, x).toConst(); - }, - }, - .enum_tag => |enum_tag| Value.fromInterned(enum_tag.int).toBigIntAdvanced(space, mod, opt_sema), - .opt, .ptr => BigIntMutable.init( - &space.limbs, - (try val.getUnsignedIntAdvanced(mod, opt_sema)).?, - ).toConst(), - else => unreachable, - }, - }; - } - - pub fn isFuncBody(val: Value, mod: *Module) bool { - return mod.intern_pool.isFuncBody(val.toIntern()); - } - - pub fn getFunction(val: Value, mod: *Module) ?InternPool.Key.Func { - return if (val.ip_index != .none) switch (mod.intern_pool.indexToKey(val.toIntern())) { - .func => |x| x, - else => null, - } else null; - } - - pub fn getExternFunc(val: Value, mod: *Module) ?InternPool.Key.ExternFunc { - return if (val.ip_index != .none) switch (mod.intern_pool.indexToKey(val.toIntern())) { - .extern_func => |extern_func| extern_func, - else => null, - } else null; - } - - pub fn getVariable(val: Value, mod: *Module) ?InternPool.Key.Variable { - return if (val.ip_index != .none) switch (mod.intern_pool.indexToKey(val.toIntern())) { - .variable => |variable| variable, - else => null, - } else null; - } - - /// If the value fits in a u64, return it, otherwise null. - /// Asserts not undefined. - pub fn getUnsignedInt(val: Value, mod: *Module) ?u64 { - return getUnsignedIntAdvanced(val, mod, null) catch unreachable; - } - - /// If the value fits in a u64, return it, otherwise null. - /// Asserts not undefined. - pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, opt_sema: ?*Sema) !?u64 { - return switch (val.toIntern()) { - .undef => unreachable, - .bool_false => 0, - .bool_true => 1, - else => switch (mod.intern_pool.indexToKey(val.toIntern())) { - .undef => unreachable, - .int => |int| switch (int.storage) { - .big_int => |big_int| big_int.to(u64) catch null, - .u64 => |x| x, - .i64 => |x| std.math.cast(u64, x), - .lazy_align => |ty| if (opt_sema) |sema| - (try Type.fromInterned(ty).abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar.toByteUnits(0) - else - Type.fromInterned(ty).abiAlignment(mod).toByteUnits(0), - .lazy_size => |ty| if (opt_sema) |sema| - (try Type.fromInterned(ty).abiSizeAdvanced(mod, .{ .sema = sema })).scalar - else - Type.fromInterned(ty).abiSize(mod), - }, - .ptr => |ptr| switch (ptr.addr) { - .int => |int| Value.fromInterned(int).getUnsignedIntAdvanced(mod, opt_sema), - .elem => |elem| { - const base_addr = (try Value.fromInterned(elem.base).getUnsignedIntAdvanced(mod, opt_sema)) orelse return null; - const elem_ty = Type.fromInterned(mod.intern_pool.typeOf(elem.base)).elemType2(mod); - return base_addr + elem.index * elem_ty.abiSize(mod); - }, - .field => |field| { - const base_addr = (try Value.fromInterned(field.base).getUnsignedIntAdvanced(mod, opt_sema)) orelse return null; - const struct_ty = Type.fromInterned(mod.intern_pool.typeOf(field.base)).childType(mod); - if (opt_sema) |sema| try sema.resolveTypeLayout(struct_ty); - return base_addr + struct_ty.structFieldOffset(@as(usize, @intCast(field.index)), mod); - }, - else => null, - }, - .opt => |opt| switch (opt.val) { - .none => 0, - else => |payload| Value.fromInterned(payload).getUnsignedIntAdvanced(mod, opt_sema), - }, - else => null, - }, - }; - } - - /// Asserts the value is an integer and it fits in a u64 - pub fn toUnsignedInt(val: Value, mod: *Module) u64 { - return getUnsignedInt(val, mod).?; - } - - /// Asserts the value is an integer and it fits in a u64 - pub fn toUnsignedIntAdvanced(val: Value, sema: *Sema) !u64 { - return (try getUnsignedIntAdvanced(val, sema.mod, sema)).?; - } - - /// Asserts the value is an integer and it fits in a i64 - pub fn toSignedInt(val: Value, mod: *Module) i64 { - return switch (val.toIntern()) { - .bool_false => 0, - .bool_true => 1, - else => switch (mod.intern_pool.indexToKey(val.toIntern())) { - .int => |int| switch (int.storage) { - .big_int => |big_int| big_int.to(i64) catch unreachable, - .i64 => |x| x, - .u64 => |x| @intCast(x), - .lazy_align => |ty| @intCast(Type.fromInterned(ty).abiAlignment(mod).toByteUnits(0)), - .lazy_size => |ty| @intCast(Type.fromInterned(ty).abiSize(mod)), - }, - else => unreachable, - }, - }; - } - - pub fn toBool(val: Value) bool { - return switch (val.toIntern()) { - .bool_true => true, - .bool_false => false, - else => unreachable, - }; - } - - fn isDeclRef(val: Value, mod: *Module) bool { - var check = val; - while (true) switch (mod.intern_pool.indexToKey(check.toIntern())) { - .ptr => |ptr| switch (ptr.addr) { - .decl, .mut_decl, .comptime_field, .anon_decl => return true, - .eu_payload, .opt_payload => |base| check = Value.fromInterned(base), - .elem, .field => |base_index| check = Value.fromInterned(base_index.base), - .int => return false, - }, - else => return false, - }; - } - - /// Write a Value's contents to `buffer`. - /// - /// Asserts that buffer.len >= ty.abiSize(). The buffer is allowed to extend past - /// the end of the value in memory. - pub fn writeToMemory(val: Value, ty: Type, mod: *Module, buffer: []u8) error{ - ReinterpretDeclRef, - IllDefinedMemoryLayout, - Unimplemented, - OutOfMemory, - }!void { - const target = mod.getTarget(); - const endian = target.cpu.arch.endian(); - if (val.isUndef(mod)) { - const size: usize = @intCast(ty.abiSize(mod)); - @memset(buffer[0..size], 0xaa); - return; - } - const ip = &mod.intern_pool; - switch (ty.zigTypeTag(mod)) { - .Void => {}, - .Bool => { - buffer[0] = @intFromBool(val.toBool()); - }, - .Int, .Enum => { - const int_info = ty.intInfo(mod); - const bits = int_info.bits; - const byte_count: u16 = @intCast((@as(u17, bits) + 7) / 8); - - var bigint_buffer: BigIntSpace = undefined; - const bigint = val.toBigInt(&bigint_buffer, mod); - bigint.writeTwosComplement(buffer[0..byte_count], endian); - }, - .Float => switch (ty.floatBits(target)) { - 16 => std.mem.writeInt(u16, buffer[0..2], @as(u16, @bitCast(val.toFloat(f16, mod))), endian), - 32 => std.mem.writeInt(u32, buffer[0..4], @as(u32, @bitCast(val.toFloat(f32, mod))), endian), - 64 => std.mem.writeInt(u64, buffer[0..8], @as(u64, @bitCast(val.toFloat(f64, mod))), endian), - 80 => std.mem.writeInt(u80, buffer[0..10], @as(u80, @bitCast(val.toFloat(f80, mod))), endian), - 128 => std.mem.writeInt(u128, buffer[0..16], @as(u128, @bitCast(val.toFloat(f128, mod))), endian), - else => unreachable, - }, - .Array => { - const len = ty.arrayLen(mod); - const elem_ty = ty.childType(mod); - const elem_size = @as(usize, @intCast(elem_ty.abiSize(mod))); - var elem_i: usize = 0; - var buf_off: usize = 0; - while (elem_i < len) : (elem_i += 1) { - const elem_val = try val.elemValue(mod, elem_i); - try elem_val.writeToMemory(elem_ty, mod, buffer[buf_off..]); - buf_off += elem_size; - } - }, - .Vector => { - // We use byte_count instead of abi_size here, so that any padding bytes - // follow the data bytes, on both big- and little-endian systems. - const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8; - return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0); - }, - .Struct => { - const struct_type = mod.typeToStruct(ty) orelse return error.IllDefinedMemoryLayout; - switch (struct_type.layout) { - .Auto => return error.IllDefinedMemoryLayout, - .Extern => for (0..struct_type.field_types.len) |i| { - const off: usize = @intCast(ty.structFieldOffset(i, mod)); - const field_val = switch (val.ip_index) { - .none => switch (val.tag()) { - .bytes => { - buffer[off] = val.castTag(.bytes).?.data[i]; - continue; - }, - .aggregate => val.castTag(.aggregate).?.data[i], - .repeated => val.castTag(.repeated).?.data, - else => unreachable, - }, - else => Value.fromInterned(switch (ip.indexToKey(val.toIntern()).aggregate.storage) { - .bytes => |bytes| { - buffer[off] = bytes[i]; - continue; - }, - .elems => |elems| elems[i], - .repeated_elem => |elem| elem, - }), - }; - const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); - try writeToMemory(field_val, field_ty, mod, buffer[off..]); - }, - .Packed => { - const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8; - return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0); - }, - } - }, - .ErrorSet => { - const bits = mod.errorSetBits(); - const byte_count: u16 = @intCast((@as(u17, bits) + 7) / 8); - - const name = switch (ip.indexToKey(val.toIntern())) { - .err => |err| err.name, - .error_union => |error_union| error_union.val.err_name, - else => unreachable, - }; - var bigint_buffer: BigIntSpace = undefined; - const bigint = BigIntMutable.init( - &bigint_buffer.limbs, - mod.global_error_set.getIndex(name).?, - ).toConst(); - bigint.writeTwosComplement(buffer[0..byte_count], endian); - }, - .Union => switch (ty.containerLayout(mod)) { - .Auto => return error.IllDefinedMemoryLayout, // Sema is supposed to have emitted a compile error already - .Extern => { - if (val.unionTag(mod)) |union_tag| { - const union_obj = mod.typeToUnion(ty).?; - const field_index = mod.unionTagFieldIndex(union_obj, union_tag).?; - const field_type = Type.fromInterned(union_obj.field_types.get(&mod.intern_pool)[field_index]); - const field_val = try val.fieldValue(mod, field_index); - const byte_count = @as(usize, @intCast(field_type.abiSize(mod))); - return writeToMemory(field_val, field_type, mod, buffer[0..byte_count]); - } else { - const backing_ty = try ty.unionBackingType(mod); - const byte_count: usize = @intCast(backing_ty.abiSize(mod)); - return writeToMemory(val.unionValue(mod), backing_ty, mod, buffer[0..byte_count]); - } - }, - .Packed => { - const backing_ty = try ty.unionBackingType(mod); - const byte_count: usize = @intCast(backing_ty.abiSize(mod)); - return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0); - }, - }, - .Pointer => { - if (ty.isSlice(mod)) return error.IllDefinedMemoryLayout; - if (val.isDeclRef(mod)) return error.ReinterpretDeclRef; - return val.writeToMemory(Type.usize, mod, buffer); - }, - .Optional => { - if (!ty.isPtrLikeOptional(mod)) return error.IllDefinedMemoryLayout; - const child = ty.optionalChild(mod); - const opt_val = val.optionalValue(mod); - if (opt_val) |some| { - return some.writeToMemory(child, mod, buffer); - } else { - return writeToMemory(try mod.intValue(Type.usize, 0), Type.usize, mod, buffer); - } - }, - else => return error.Unimplemented, - } - } - - /// Write a Value's contents to `buffer`. - /// - /// Both the start and the end of the provided buffer must be tight, since - /// big-endian packed memory layouts start at the end of the buffer. - pub fn writeToPackedMemory( - val: Value, - ty: Type, - mod: *Module, - buffer: []u8, - bit_offset: usize, - ) error{ ReinterpretDeclRef, OutOfMemory }!void { - const ip = &mod.intern_pool; - const target = mod.getTarget(); - const endian = target.cpu.arch.endian(); - if (val.isUndef(mod)) { - const bit_size = @as(usize, @intCast(ty.bitSize(mod))); - std.mem.writeVarPackedInt(buffer, bit_offset, bit_size, @as(u1, 0), endian); - return; - } - switch (ty.zigTypeTag(mod)) { - .Void => {}, - .Bool => { - const byte_index = switch (endian) { - .little => bit_offset / 8, - .big => buffer.len - bit_offset / 8 - 1, - }; - if (val.toBool()) { - buffer[byte_index] |= (@as(u8, 1) << @as(u3, @intCast(bit_offset % 8))); - } else { - buffer[byte_index] &= ~(@as(u8, 1) << @as(u3, @intCast(bit_offset % 8))); - } - }, - .Int, .Enum => { - if (buffer.len == 0) return; - const bits = ty.intInfo(mod).bits; - if (bits == 0) return; - - switch (ip.indexToKey((try val.intFromEnum(ty, mod)).toIntern()).int.storage) { - inline .u64, .i64 => |int| std.mem.writeVarPackedInt(buffer, bit_offset, bits, int, endian), - .big_int => |bigint| bigint.writePackedTwosComplement(buffer, bit_offset, bits, endian), - .lazy_align => |lazy_align| { - const num = Type.fromInterned(lazy_align).abiAlignment(mod).toByteUnits(0); - std.mem.writeVarPackedInt(buffer, bit_offset, bits, num, endian); - }, - .lazy_size => |lazy_size| { - const num = Type.fromInterned(lazy_size).abiSize(mod); - std.mem.writeVarPackedInt(buffer, bit_offset, bits, num, endian); - }, - } - }, - .Float => switch (ty.floatBits(target)) { - 16 => std.mem.writePackedInt(u16, buffer, bit_offset, @as(u16, @bitCast(val.toFloat(f16, mod))), endian), - 32 => std.mem.writePackedInt(u32, buffer, bit_offset, @as(u32, @bitCast(val.toFloat(f32, mod))), endian), - 64 => std.mem.writePackedInt(u64, buffer, bit_offset, @as(u64, @bitCast(val.toFloat(f64, mod))), endian), - 80 => std.mem.writePackedInt(u80, buffer, bit_offset, @as(u80, @bitCast(val.toFloat(f80, mod))), endian), - 128 => std.mem.writePackedInt(u128, buffer, bit_offset, @as(u128, @bitCast(val.toFloat(f128, mod))), endian), - else => unreachable, - }, - .Vector => { - const elem_ty = ty.childType(mod); - const elem_bit_size = @as(u16, @intCast(elem_ty.bitSize(mod))); - const len = @as(usize, @intCast(ty.arrayLen(mod))); - - var bits: u16 = 0; - var elem_i: usize = 0; - while (elem_i < len) : (elem_i += 1) { - // On big-endian systems, LLVM reverses the element order of vectors by default - const tgt_elem_i = if (endian == .big) len - elem_i - 1 else elem_i; - const elem_val = try val.elemValue(mod, tgt_elem_i); - try elem_val.writeToPackedMemory(elem_ty, mod, buffer, bit_offset + bits); - bits += elem_bit_size; - } - }, - .Struct => { - const struct_type = ip.indexToKey(ty.toIntern()).struct_type; - // Sema is supposed to have emitted a compile error already in the case of Auto, - // and Extern is handled in non-packed writeToMemory. - assert(struct_type.layout == .Packed); - var bits: u16 = 0; - for (0..struct_type.field_types.len) |i| { - const field_val = switch (val.ip_index) { - .none => switch (val.tag()) { - .bytes => unreachable, - .aggregate => val.castTag(.aggregate).?.data[i], - .repeated => val.castTag(.repeated).?.data, - else => unreachable, - }, - else => Value.fromInterned(switch (ip.indexToKey(val.toIntern()).aggregate.storage) { - .bytes => unreachable, - .elems => |elems| elems[i], - .repeated_elem => |elem| elem, - }), - }; - const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); - const field_bits: u16 = @intCast(field_ty.bitSize(mod)); - try field_val.writeToPackedMemory(field_ty, mod, buffer, bit_offset + bits); - bits += field_bits; - } - }, - .Union => { - const union_obj = mod.typeToUnion(ty).?; - switch (union_obj.getLayout(ip)) { - .Auto, .Extern => unreachable, // Handled in non-packed writeToMemory - .Packed => { - if (val.unionTag(mod)) |union_tag| { - const field_index = mod.unionTagFieldIndex(union_obj, union_tag).?; - const field_type = Type.fromInterned(union_obj.field_types.get(ip)[field_index]); - const field_val = try val.fieldValue(mod, field_index); - return field_val.writeToPackedMemory(field_type, mod, buffer, bit_offset); - } else { - const backing_ty = try ty.unionBackingType(mod); - return val.unionValue(mod).writeToPackedMemory(backing_ty, mod, buffer, bit_offset); - } - }, - } - }, - .Pointer => { - assert(!ty.isSlice(mod)); // No well defined layout. - if (val.isDeclRef(mod)) return error.ReinterpretDeclRef; - return val.writeToPackedMemory(Type.usize, mod, buffer, bit_offset); - }, - .Optional => { - assert(ty.isPtrLikeOptional(mod)); - const child = ty.optionalChild(mod); - const opt_val = val.optionalValue(mod); - if (opt_val) |some| { - return some.writeToPackedMemory(child, mod, buffer, bit_offset); - } else { - return writeToPackedMemory(try mod.intValue(Type.usize, 0), Type.usize, mod, buffer, bit_offset); - } - }, - else => @panic("TODO implement writeToPackedMemory for more types"), - } - } - - /// Load a Value from the contents of `buffer`. - /// - /// Asserts that buffer.len >= ty.abiSize(). The buffer is allowed to extend past - /// the end of the value in memory. - pub fn readFromMemory( - ty: Type, - mod: *Module, - buffer: []const u8, - arena: Allocator, - ) error{ - IllDefinedMemoryLayout, - Unimplemented, - OutOfMemory, - }!Value { - const ip = &mod.intern_pool; - const target = mod.getTarget(); - const endian = target.cpu.arch.endian(); - switch (ty.zigTypeTag(mod)) { - .Void => return Value.void, - .Bool => { - if (buffer[0] == 0) { - return Value.false; - } else { - return Value.true; - } - }, - .Int, .Enum => |ty_tag| { - const int_ty = switch (ty_tag) { - .Int => ty, - .Enum => ty.intTagType(mod), - else => unreachable, - }; - const int_info = int_ty.intInfo(mod); - const bits = int_info.bits; - const byte_count: u16 = @intCast((@as(u17, bits) + 7) / 8); - if (bits == 0 or buffer.len == 0) return mod.getCoerced(try mod.intValue(int_ty, 0), ty); - - if (bits <= 64) switch (int_info.signedness) { // Fast path for integers <= u64 - .signed => { - const val = std.mem.readVarInt(i64, buffer[0..byte_count], endian); - const result = (val << @as(u6, @intCast(64 - bits))) >> @as(u6, @intCast(64 - bits)); - return mod.getCoerced(try mod.intValue(int_ty, result), ty); - }, - .unsigned => { - const val = std.mem.readVarInt(u64, buffer[0..byte_count], endian); - const result = (val << @as(u6, @intCast(64 - bits))) >> @as(u6, @intCast(64 - bits)); - return mod.getCoerced(try mod.intValue(int_ty, result), ty); - }, - } else { // Slow path, we have to construct a big-int - const Limb = std.math.big.Limb; - const limb_count = (byte_count + @sizeOf(Limb) - 1) / @sizeOf(Limb); - const limbs_buffer = try arena.alloc(Limb, limb_count); - - var bigint = BigIntMutable.init(limbs_buffer, 0); - bigint.readTwosComplement(buffer[0..byte_count], bits, endian, int_info.signedness); - return mod.getCoerced(try mod.intValue_big(int_ty, bigint.toConst()), ty); - } - }, - .Float => return Value.fromInterned((try mod.intern(.{ .float = .{ - .ty = ty.toIntern(), - .storage = switch (ty.floatBits(target)) { - 16 => .{ .f16 = @as(f16, @bitCast(std.mem.readInt(u16, buffer[0..2], endian))) }, - 32 => .{ .f32 = @as(f32, @bitCast(std.mem.readInt(u32, buffer[0..4], endian))) }, - 64 => .{ .f64 = @as(f64, @bitCast(std.mem.readInt(u64, buffer[0..8], endian))) }, - 80 => .{ .f80 = @as(f80, @bitCast(std.mem.readInt(u80, buffer[0..10], endian))) }, - 128 => .{ .f128 = @as(f128, @bitCast(std.mem.readInt(u128, buffer[0..16], endian))) }, - else => unreachable, - }, - } }))), - .Array => { - const elem_ty = ty.childType(mod); - const elem_size = elem_ty.abiSize(mod); - const elems = try arena.alloc(InternPool.Index, @as(usize, @intCast(ty.arrayLen(mod)))); - var offset: usize = 0; - for (elems) |*elem| { - elem.* = try (try readFromMemory(elem_ty, mod, buffer[offset..], arena)).intern(elem_ty, mod); - offset += @as(usize, @intCast(elem_size)); - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), - .storage = .{ .elems = elems }, - } }))); - }, - .Vector => { - // We use byte_count instead of abi_size here, so that any padding bytes - // follow the data bytes, on both big- and little-endian systems. - const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8; - return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena); - }, - .Struct => { - const struct_type = mod.typeToStruct(ty).?; - switch (struct_type.layout) { - .Auto => unreachable, // Sema is supposed to have emitted a compile error already - .Extern => { - const field_types = struct_type.field_types; - const field_vals = try arena.alloc(InternPool.Index, field_types.len); - for (field_vals, 0..) |*field_val, i| { - const field_ty = Type.fromInterned(field_types.get(ip)[i]); - const off: usize = @intCast(ty.structFieldOffset(i, mod)); - const sz: usize = @intCast(field_ty.abiSize(mod)); - field_val.* = try (try readFromMemory(field_ty, mod, buffer[off..(off + sz)], arena)).intern(field_ty, mod); - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), - .storage = .{ .elems = field_vals }, - } }))); - }, - .Packed => { - const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8; - return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena); - }, - } - }, - .ErrorSet => { - const bits = mod.errorSetBits(); - const byte_count: u16 = @intCast((@as(u17, bits) + 7) / 8); - const int = std.mem.readVarInt(u64, buffer[0..byte_count], endian); - const index = (int << @as(u6, @intCast(64 - bits))) >> @as(u6, @intCast(64 - bits)); - const name = mod.global_error_set.keys()[@intCast(index)]; - - return Value.fromInterned((try mod.intern(.{ .err = .{ - .ty = ty.toIntern(), - .name = name, - } }))); - }, - .Union => switch (ty.containerLayout(mod)) { - .Auto => return error.IllDefinedMemoryLayout, - .Extern => { - const union_size = ty.abiSize(mod); - const array_ty = try mod.arrayType(.{ .len = union_size, .child = .u8_type }); - const val = try (try readFromMemory(array_ty, mod, buffer, arena)).intern(array_ty, mod); - return Value.fromInterned((try mod.intern(.{ .un = .{ - .ty = ty.toIntern(), - .tag = .none, - .val = val, - } }))); - }, - .Packed => { - const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8; - return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena); - }, - }, - .Pointer => { - assert(!ty.isSlice(mod)); // No well defined layout. - const int_val = try readFromMemory(Type.usize, mod, buffer, arena); - return Value.fromInterned((try mod.intern(.{ .ptr = .{ - .ty = ty.toIntern(), - .addr = .{ .int = int_val.toIntern() }, - } }))); - }, - .Optional => { - assert(ty.isPtrLikeOptional(mod)); - const child_ty = ty.optionalChild(mod); - const child_val = try readFromMemory(child_ty, mod, buffer, arena); - return Value.fromInterned((try mod.intern(.{ .opt = .{ - .ty = ty.toIntern(), - .val = switch (child_val.orderAgainstZero(mod)) { - .lt => unreachable, - .eq => .none, - .gt => child_val.toIntern(), - }, - } }))); - }, - else => return error.Unimplemented, - } - } - - /// Load a Value from the contents of `buffer`. - /// - /// Both the start and the end of the provided buffer must be tight, since - /// big-endian packed memory layouts start at the end of the buffer. - pub fn readFromPackedMemory( - ty: Type, - mod: *Module, - buffer: []const u8, - bit_offset: usize, - arena: Allocator, - ) error{ - IllDefinedMemoryLayout, - OutOfMemory, - }!Value { - const ip = &mod.intern_pool; - const target = mod.getTarget(); - const endian = target.cpu.arch.endian(); - switch (ty.zigTypeTag(mod)) { - .Void => return Value.void, - .Bool => { - const byte = switch (endian) { - .big => buffer[buffer.len - bit_offset / 8 - 1], - .little => buffer[bit_offset / 8], - }; - if (((byte >> @as(u3, @intCast(bit_offset % 8))) & 1) == 0) { - return Value.false; - } else { - return Value.true; - } - }, - .Int, .Enum => |ty_tag| { - if (buffer.len == 0) return mod.intValue(ty, 0); - const int_info = ty.intInfo(mod); - const bits = int_info.bits; - if (bits == 0) return mod.intValue(ty, 0); - - // Fast path for integers <= u64 - if (bits <= 64) { - const int_ty = switch (ty_tag) { - .Int => ty, - .Enum => ty.intTagType(mod), - else => unreachable, - }; - return mod.getCoerced(switch (int_info.signedness) { - .signed => return mod.intValue( - int_ty, - std.mem.readVarPackedInt(i64, buffer, bit_offset, bits, endian, .signed), - ), - .unsigned => return mod.intValue( - int_ty, - std.mem.readVarPackedInt(u64, buffer, bit_offset, bits, endian, .unsigned), - ), - }, ty); - } - - // Slow path, we have to construct a big-int - const abi_size = @as(usize, @intCast(ty.abiSize(mod))); - const Limb = std.math.big.Limb; - const limb_count = (abi_size + @sizeOf(Limb) - 1) / @sizeOf(Limb); - const limbs_buffer = try arena.alloc(Limb, limb_count); - - var bigint = BigIntMutable.init(limbs_buffer, 0); - bigint.readPackedTwosComplement(buffer, bit_offset, bits, endian, int_info.signedness); - return mod.intValue_big(ty, bigint.toConst()); - }, - .Float => return Value.fromInterned((try mod.intern(.{ .float = .{ - .ty = ty.toIntern(), - .storage = switch (ty.floatBits(target)) { - 16 => .{ .f16 = @as(f16, @bitCast(std.mem.readPackedInt(u16, buffer, bit_offset, endian))) }, - 32 => .{ .f32 = @as(f32, @bitCast(std.mem.readPackedInt(u32, buffer, bit_offset, endian))) }, - 64 => .{ .f64 = @as(f64, @bitCast(std.mem.readPackedInt(u64, buffer, bit_offset, endian))) }, - 80 => .{ .f80 = @as(f80, @bitCast(std.mem.readPackedInt(u80, buffer, bit_offset, endian))) }, - 128 => .{ .f128 = @as(f128, @bitCast(std.mem.readPackedInt(u128, buffer, bit_offset, endian))) }, - else => unreachable, - }, - } }))), - .Vector => { - const elem_ty = ty.childType(mod); - const elems = try arena.alloc(InternPool.Index, @as(usize, @intCast(ty.arrayLen(mod)))); - - var bits: u16 = 0; - const elem_bit_size = @as(u16, @intCast(elem_ty.bitSize(mod))); - for (elems, 0..) |_, i| { - // On big-endian systems, LLVM reverses the element order of vectors by default - const tgt_elem_i = if (endian == .big) elems.len - i - 1 else i; - elems[tgt_elem_i] = try (try readFromPackedMemory(elem_ty, mod, buffer, bit_offset + bits, arena)).intern(elem_ty, mod); - bits += elem_bit_size; - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), - .storage = .{ .elems = elems }, - } }))); - }, - .Struct => { - // Sema is supposed to have emitted a compile error already for Auto layout structs, - // and Extern is handled by non-packed readFromMemory. - const struct_type = mod.typeToPackedStruct(ty).?; - var bits: u16 = 0; - const field_vals = try arena.alloc(InternPool.Index, struct_type.field_types.len); - for (field_vals, 0..) |*field_val, i| { - const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]); - const field_bits: u16 = @intCast(field_ty.bitSize(mod)); - field_val.* = try (try readFromPackedMemory(field_ty, mod, buffer, bit_offset + bits, arena)).intern(field_ty, mod); - bits += field_bits; - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), - .storage = .{ .elems = field_vals }, - } }))); - }, - .Union => switch (ty.containerLayout(mod)) { - .Auto, .Extern => unreachable, // Handled by non-packed readFromMemory - .Packed => { - const backing_ty = try ty.unionBackingType(mod); - const val = (try readFromPackedMemory(backing_ty, mod, buffer, bit_offset, arena)).toIntern(); - return Value.fromInterned((try mod.intern(.{ .un = .{ - .ty = ty.toIntern(), - .tag = .none, - .val = val, - } }))); - }, - }, - .Pointer => { - assert(!ty.isSlice(mod)); // No well defined layout. - return readFromPackedMemory(Type.usize, mod, buffer, bit_offset, arena); - }, - .Optional => { - assert(ty.isPtrLikeOptional(mod)); - const child = ty.optionalChild(mod); - return readFromPackedMemory(child, mod, buffer, bit_offset, arena); - }, - else => @panic("TODO implement readFromPackedMemory for more types"), - } - } - - /// Asserts that the value is a float or an integer. - pub fn toFloat(val: Value, comptime T: type, mod: *Module) T { - return switch (mod.intern_pool.indexToKey(val.toIntern())) { - .int => |int| switch (int.storage) { - .big_int => |big_int| @floatCast(bigIntToFloat(big_int.limbs, big_int.positive)), - inline .u64, .i64 => |x| { - if (T == f80) { - @panic("TODO we can't lower this properly on non-x86 llvm backend yet"); - } - return @floatFromInt(x); - }, - .lazy_align => |ty| @floatFromInt(Type.fromInterned(ty).abiAlignment(mod).toByteUnits(0)), - .lazy_size => |ty| @floatFromInt(Type.fromInterned(ty).abiSize(mod)), - }, - .float => |float| switch (float.storage) { - inline else => |x| @floatCast(x), - }, - else => unreachable, - }; - } - - /// TODO move this to std lib big int code - fn bigIntToFloat(limbs: []const std.math.big.Limb, positive: bool) f128 { - if (limbs.len == 0) return 0; - - const base = std.math.maxInt(std.math.big.Limb) + 1; - var result: f128 = 0; - var i: usize = limbs.len; - while (i != 0) { - i -= 1; - const limb: f128 = @as(f128, @floatFromInt(limbs[i])); - result = @mulAdd(f128, base, result, limb); - } - if (positive) { - return result; - } else { - return -result; - } - } - - pub fn clz(val: Value, ty: Type, mod: *Module) u64 { - var bigint_buf: BigIntSpace = undefined; - const bigint = val.toBigInt(&bigint_buf, mod); - return bigint.clz(ty.intInfo(mod).bits); - } - - pub fn ctz(val: Value, ty: Type, mod: *Module) u64 { - var bigint_buf: BigIntSpace = undefined; - const bigint = val.toBigInt(&bigint_buf, mod); - return bigint.ctz(ty.intInfo(mod).bits); - } - - pub fn popCount(val: Value, ty: Type, mod: *Module) u64 { - var bigint_buf: BigIntSpace = undefined; - const bigint = val.toBigInt(&bigint_buf, mod); - return @as(u64, @intCast(bigint.popCount(ty.intInfo(mod).bits))); - } - - pub fn bitReverse(val: Value, ty: Type, mod: *Module, arena: Allocator) !Value { - const info = ty.intInfo(mod); - - var buffer: Value.BigIntSpace = undefined; - const operand_bigint = val.toBigInt(&buffer, mod); - - const limbs = try arena.alloc( - std.math.big.Limb, - std.math.big.int.calcTwosCompLimbCount(info.bits), - ); - var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; - result_bigint.bitReverse(operand_bigint, info.signedness, info.bits); - - return mod.intValue_big(ty, result_bigint.toConst()); - } - - pub fn byteSwap(val: Value, ty: Type, mod: *Module, arena: Allocator) !Value { - const info = ty.intInfo(mod); - - // Bit count must be evenly divisible by 8 - assert(info.bits % 8 == 0); - - var buffer: Value.BigIntSpace = undefined; - const operand_bigint = val.toBigInt(&buffer, mod); - - const limbs = try arena.alloc( - std.math.big.Limb, - std.math.big.int.calcTwosCompLimbCount(info.bits), - ); - var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; - result_bigint.byteSwap(operand_bigint, info.signedness, info.bits / 8); - - return mod.intValue_big(ty, result_bigint.toConst()); - } - - /// Asserts the value is an integer and not undefined. - /// Returns the number of bits the value requires to represent stored in twos complement form. - pub fn intBitCountTwosComp(self: Value, mod: *Module) usize { - var buffer: BigIntSpace = undefined; - const big_int = self.toBigInt(&buffer, mod); - return big_int.bitCountTwosComp(); - } - - /// Converts an integer or a float to a float. May result in a loss of information. - /// Caller can find out by equality checking the result against the operand. - pub fn floatCast(self: Value, dest_ty: Type, mod: *Module) !Value { - const target = mod.getTarget(); - return Value.fromInterned((try mod.intern(.{ .float = .{ - .ty = dest_ty.toIntern(), - .storage = switch (dest_ty.floatBits(target)) { - 16 => .{ .f16 = self.toFloat(f16, mod) }, - 32 => .{ .f32 = self.toFloat(f32, mod) }, - 64 => .{ .f64 = self.toFloat(f64, mod) }, - 80 => .{ .f80 = self.toFloat(f80, mod) }, - 128 => .{ .f128 = self.toFloat(f128, mod) }, - else => unreachable, - }, - } }))); - } - - /// Asserts the value is a float - pub fn floatHasFraction(self: Value, mod: *const Module) bool { - return switch (mod.intern_pool.indexToKey(self.toIntern())) { - .float => |float| switch (float.storage) { - inline else => |x| @rem(x, 1) != 0, - }, - else => unreachable, - }; - } - - pub fn orderAgainstZero(lhs: Value, mod: *Module) std.math.Order { - return orderAgainstZeroAdvanced(lhs, mod, null) catch unreachable; - } - - pub fn orderAgainstZeroAdvanced( - lhs: Value, - mod: *Module, - opt_sema: ?*Sema, - ) Module.CompileError!std.math.Order { - return switch (lhs.toIntern()) { - .bool_false => .eq, - .bool_true => .gt, - else => switch (mod.intern_pool.indexToKey(lhs.toIntern())) { - .ptr => |ptr| switch (ptr.addr) { - .decl, .mut_decl, .comptime_field => .gt, - .int => |int| Value.fromInterned(int).orderAgainstZeroAdvanced(mod, opt_sema), - .elem => |elem| switch (try Value.fromInterned(elem.base).orderAgainstZeroAdvanced(mod, opt_sema)) { - .lt => unreachable, - .gt => .gt, - .eq => if (elem.index == 0) .eq else .gt, - }, - else => unreachable, - }, - .int => |int| switch (int.storage) { - .big_int => |big_int| big_int.orderAgainstScalar(0), - inline .u64, .i64 => |x| std.math.order(x, 0), - .lazy_align => .gt, // alignment is never 0 - .lazy_size => |ty| return if (Type.fromInterned(ty).hasRuntimeBitsAdvanced( - mod, - false, - if (opt_sema) |sema| .{ .sema = sema } else .eager, - ) catch |err| switch (err) { - error.NeedLazy => unreachable, - else => |e| return e, - }) .gt else .eq, - }, - .enum_tag => |enum_tag| Value.fromInterned(enum_tag.int).orderAgainstZeroAdvanced(mod, opt_sema), - .float => |float| switch (float.storage) { - inline else => |x| std.math.order(x, 0), - }, - else => unreachable, - }, - }; - } - - /// Asserts the value is comparable. - pub fn order(lhs: Value, rhs: Value, mod: *Module) std.math.Order { - return orderAdvanced(lhs, rhs, mod, null) catch unreachable; - } - - /// Asserts the value is comparable. - /// If opt_sema is null then this function asserts things are resolved and cannot fail. - pub fn orderAdvanced(lhs: Value, rhs: Value, mod: *Module, opt_sema: ?*Sema) !std.math.Order { - const lhs_against_zero = try lhs.orderAgainstZeroAdvanced(mod, opt_sema); - const rhs_against_zero = try rhs.orderAgainstZeroAdvanced(mod, opt_sema); - switch (lhs_against_zero) { - .lt => if (rhs_against_zero != .lt) return .lt, - .eq => return rhs_against_zero.invert(), - .gt => {}, - } - switch (rhs_against_zero) { - .lt => if (lhs_against_zero != .lt) return .gt, - .eq => return lhs_against_zero, - .gt => {}, - } - - if (lhs.isFloat(mod) or rhs.isFloat(mod)) { - const lhs_f128 = lhs.toFloat(f128, mod); - const rhs_f128 = rhs.toFloat(f128, mod); - return std.math.order(lhs_f128, rhs_f128); - } - - var lhs_bigint_space: BigIntSpace = undefined; - var rhs_bigint_space: BigIntSpace = undefined; - const lhs_bigint = try lhs.toBigIntAdvanced(&lhs_bigint_space, mod, opt_sema); - const rhs_bigint = try rhs.toBigIntAdvanced(&rhs_bigint_space, mod, opt_sema); - return lhs_bigint.order(rhs_bigint); - } - - /// Asserts the value is comparable. Does not take a type parameter because it supports - /// comparisons between heterogeneous types. - pub fn compareHetero(lhs: Value, op: std.math.CompareOperator, rhs: Value, mod: *Module) bool { - return compareHeteroAdvanced(lhs, op, rhs, mod, null) catch unreachable; - } - - pub fn compareHeteroAdvanced( - lhs: Value, - op: std.math.CompareOperator, - rhs: Value, - mod: *Module, - opt_sema: ?*Sema, - ) !bool { - if (lhs.pointerDecl(mod)) |lhs_decl| { - if (rhs.pointerDecl(mod)) |rhs_decl| { - switch (op) { - .eq => return lhs_decl == rhs_decl, - .neq => return lhs_decl != rhs_decl, - else => {}, - } - } else { - switch (op) { - .eq => return false, - .neq => return true, - else => {}, - } - } - } else if (rhs.pointerDecl(mod)) |_| { - switch (op) { - .eq => return false, - .neq => return true, - else => {}, - } - } - return (try orderAdvanced(lhs, rhs, mod, opt_sema)).compare(op); - } - - /// Asserts the values are comparable. Both operands have type `ty`. - /// For vectors, returns true if comparison is true for ALL elements. - pub fn compareAll(lhs: Value, op: std.math.CompareOperator, rhs: Value, ty: Type, mod: *Module) !bool { - if (ty.zigTypeTag(mod) == .Vector) { - const scalar_ty = ty.scalarType(mod); - for (0..ty.vectorLen(mod)) |i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - if (!compareScalar(lhs_elem, op, rhs_elem, scalar_ty, mod)) { - return false; - } - } - return true; - } - return compareScalar(lhs, op, rhs, ty, mod); - } - - /// Asserts the values are comparable. Both operands have type `ty`. - pub fn compareScalar( - lhs: Value, - op: std.math.CompareOperator, - rhs: Value, - ty: Type, - mod: *Module, - ) bool { - return switch (op) { - .eq => lhs.eql(rhs, ty, mod), - .neq => !lhs.eql(rhs, ty, mod), - else => compareHetero(lhs, op, rhs, mod), - }; - } - - /// Asserts the value is comparable. - /// For vectors, returns true if comparison is true for ALL elements. - /// - /// Note that `!compareAllWithZero(.eq, ...) != compareAllWithZero(.neq, ...)` - pub fn compareAllWithZero(lhs: Value, op: std.math.CompareOperator, mod: *Module) bool { - return compareAllWithZeroAdvancedExtra(lhs, op, mod, null) catch unreachable; - } - - pub fn compareAllWithZeroAdvanced( - lhs: Value, - op: std.math.CompareOperator, - sema: *Sema, - ) Module.CompileError!bool { - return compareAllWithZeroAdvancedExtra(lhs, op, sema.mod, sema); - } - - pub fn compareAllWithZeroAdvancedExtra( - lhs: Value, - op: std.math.CompareOperator, - mod: *Module, - opt_sema: ?*Sema, - ) Module.CompileError!bool { - if (lhs.isInf(mod)) { - switch (op) { - .neq => return true, - .eq => return false, - .gt, .gte => return !lhs.isNegativeInf(mod), - .lt, .lte => return lhs.isNegativeInf(mod), - } - } - - switch (mod.intern_pool.indexToKey(lhs.toIntern())) { - .float => |float| switch (float.storage) { - inline else => |x| if (std.math.isNan(x)) return op == .neq, - }, - .aggregate => |aggregate| return switch (aggregate.storage) { - .bytes => |bytes| for (bytes) |byte| { - if (!std.math.order(byte, 0).compare(op)) break false; - } else true, - .elems => |elems| for (elems) |elem| { - if (!try Value.fromInterned(elem).compareAllWithZeroAdvancedExtra(op, mod, opt_sema)) break false; - } else true, - .repeated_elem => |elem| Value.fromInterned(elem).compareAllWithZeroAdvancedExtra(op, mod, opt_sema), - }, - else => {}, - } - return (try orderAgainstZeroAdvanced(lhs, mod, opt_sema)).compare(op); - } - - pub fn eql(a: Value, b: Value, ty: Type, mod: *Module) bool { - assert(mod.intern_pool.typeOf(a.toIntern()) == ty.toIntern()); - assert(mod.intern_pool.typeOf(b.toIntern()) == ty.toIntern()); - return a.toIntern() == b.toIntern(); - } - - pub fn isComptimeMutablePtr(val: Value, mod: *Module) bool { - return switch (mod.intern_pool.indexToKey(val.toIntern())) { - .slice => |slice| return Value.fromInterned(slice.ptr).isComptimeMutablePtr(mod), - .ptr => |ptr| switch (ptr.addr) { - .mut_decl, .comptime_field => true, - .eu_payload, .opt_payload => |base_ptr| Value.fromInterned(base_ptr).isComptimeMutablePtr(mod), - .elem, .field => |base_index| Value.fromInterned(base_index.base).isComptimeMutablePtr(mod), - else => false, - }, - else => false, - }; - } - - pub fn canMutateComptimeVarState(val: Value, mod: *Module) bool { - return val.isComptimeMutablePtr(mod) or switch (val.toIntern()) { - else => switch (mod.intern_pool.indexToKey(val.toIntern())) { - .error_union => |error_union| switch (error_union.val) { - .err_name => false, - .payload => |payload| Value.fromInterned(payload).canMutateComptimeVarState(mod), - }, - .ptr => |ptr| switch (ptr.addr) { - .eu_payload, .opt_payload => |base| Value.fromInterned(base).canMutateComptimeVarState(mod), - .anon_decl => |anon_decl| Value.fromInterned(anon_decl.val).canMutateComptimeVarState(mod), - .elem, .field => |base_index| Value.fromInterned(base_index.base).canMutateComptimeVarState(mod), - else => false, - }, - .opt => |opt| switch (opt.val) { - .none => false, - else => |payload| Value.fromInterned(payload).canMutateComptimeVarState(mod), - }, - .aggregate => |aggregate| for (aggregate.storage.values()) |elem| { - if (Value.fromInterned(elem).canMutateComptimeVarState(mod)) break true; - } else false, - .un => |un| Value.fromInterned(un.val).canMutateComptimeVarState(mod), - else => false, - }, - }; - } - - /// Gets the decl referenced by this pointer. If the pointer does not point - /// to a decl, or if it points to some part of a decl (like field_ptr or element_ptr), - /// this function returns null. - pub fn pointerDecl(val: Value, mod: *Module) ?InternPool.DeclIndex { - return switch (mod.intern_pool.indexToKey(val.toIntern())) { - .variable => |variable| variable.decl, - .extern_func => |extern_func| extern_func.decl, - .func => |func| func.owner_decl, - .ptr => |ptr| switch (ptr.addr) { - .decl => |decl| decl, - .mut_decl => |mut_decl| mut_decl.decl, - else => null, - }, - else => null, - }; - } - - pub const slice_ptr_index = 0; - pub const slice_len_index = 1; - - pub fn slicePtr(val: Value, mod: *Module) Value { - return Value.fromInterned(mod.intern_pool.slicePtr(val.toIntern())); - } - - pub fn sliceLen(val: Value, mod: *Module) u64 { - const ip = &mod.intern_pool; - return switch (ip.indexToKey(val.toIntern())) { - .ptr => |ptr| switch (ip.indexToKey(switch (ptr.addr) { - .decl => |decl| mod.declPtr(decl).ty.toIntern(), - .mut_decl => |mut_decl| mod.declPtr(mut_decl.decl).ty.toIntern(), - .anon_decl => |anon_decl| ip.typeOf(anon_decl.val), - .comptime_field => |comptime_field| ip.typeOf(comptime_field), - else => unreachable, - })) { - .array_type => |array_type| array_type.len, - else => 1, - }, - .slice => |slice| Value.fromInterned(slice.len).toUnsignedInt(mod), - else => unreachable, - }; - } - - /// Asserts the value is a single-item pointer to an array, or an array, - /// or an unknown-length pointer, and returns the element value at the index. - pub fn elemValue(val: Value, mod: *Module, index: usize) Allocator.Error!Value { - return (try val.maybeElemValue(mod, index)).?; - } - - /// Like `elemValue`, but returns `null` instead of asserting on failure. - pub fn maybeElemValue(val: Value, mod: *Module, index: usize) Allocator.Error!?Value { - return switch (val.ip_index) { - .none => switch (val.tag()) { - .bytes => try mod.intValue(Type.u8, val.castTag(.bytes).?.data[index]), - .repeated => val.castTag(.repeated).?.data, - .aggregate => val.castTag(.aggregate).?.data[index], - .slice => val.castTag(.slice).?.data.ptr.maybeElemValue(mod, index), - else => null, - }, - else => switch (mod.intern_pool.indexToKey(val.toIntern())) { - .undef => |ty| Value.fromInterned((try mod.intern(.{ - .undef = Type.fromInterned(ty).elemType2(mod).toIntern(), - }))), - .slice => |slice| return Value.fromInterned(slice.ptr).maybeElemValue(mod, index), - .ptr => |ptr| switch (ptr.addr) { - .decl => |decl| mod.declPtr(decl).val.maybeElemValue(mod, index), - .anon_decl => |anon_decl| Value.fromInterned(anon_decl.val).maybeElemValue(mod, index), - .mut_decl => |mut_decl| Value.fromInterned((try mod.declPtr(mut_decl.decl).internValue(mod))).maybeElemValue(mod, index), - .int, .eu_payload => null, - .opt_payload => |base| Value.fromInterned(base).maybeElemValue(mod, index), - .comptime_field => |field_val| Value.fromInterned(field_val).maybeElemValue(mod, index), - .elem => |elem| Value.fromInterned(elem.base).maybeElemValue(mod, index + @as(usize, @intCast(elem.index))), - .field => |field| if (Value.fromInterned(field.base).pointerDecl(mod)) |decl_index| { - const base_decl = mod.declPtr(decl_index); - const field_val = try base_decl.val.fieldValue(mod, @as(usize, @intCast(field.index))); - return field_val.maybeElemValue(mod, index); - } else null, - }, - .opt => |opt| Value.fromInterned(opt.val).maybeElemValue(mod, index), - .aggregate => |aggregate| { - const len = mod.intern_pool.aggregateTypeLen(aggregate.ty); - if (index < len) return Value.fromInterned(switch (aggregate.storage) { - .bytes => |bytes| try mod.intern(.{ .int = .{ - .ty = .u8_type, - .storage = .{ .u64 = bytes[index] }, - } }), - .elems => |elems| elems[index], - .repeated_elem => |elem| elem, - }); - assert(index == len); - return Value.fromInterned(mod.intern_pool.indexToKey(aggregate.ty).array_type.sentinel); - }, - else => null, - }, - }; - } - - pub fn isLazyAlign(val: Value, mod: *Module) bool { - return switch (mod.intern_pool.indexToKey(val.toIntern())) { - .int => |int| int.storage == .lazy_align, - else => false, - }; - } - - pub fn isLazySize(val: Value, mod: *Module) bool { - return switch (mod.intern_pool.indexToKey(val.toIntern())) { - .int => |int| int.storage == .lazy_size, - else => false, - }; - } - - pub fn isPtrToThreadLocal(val: Value, mod: *Module) bool { - const backing_decl = mod.intern_pool.getBackingDecl(val.toIntern()).unwrap() orelse return false; - const variable = mod.declPtr(backing_decl).getOwnedVariable(mod) orelse return false; - return variable.is_threadlocal; - } - - // Asserts that the provided start/end are in-bounds. - pub fn sliceArray( - val: Value, - mod: *Module, - arena: Allocator, - start: usize, - end: usize, - ) error{OutOfMemory}!Value { - // TODO: write something like getCoercedInts to avoid needing to dupe - return switch (val.ip_index) { - .none => switch (val.tag()) { - .slice => val.castTag(.slice).?.data.ptr.sliceArray(mod, arena, start, end), - .bytes => Tag.bytes.create(arena, val.castTag(.bytes).?.data[start..end]), - .repeated => val, - .aggregate => Tag.aggregate.create(arena, val.castTag(.aggregate).?.data[start..end]), - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(val.toIntern())) { - .ptr => |ptr| switch (ptr.addr) { - .decl => |decl| try mod.declPtr(decl).val.sliceArray(mod, arena, start, end), - .mut_decl => |mut_decl| Value.fromInterned((try mod.declPtr(mut_decl.decl).internValue(mod))) - .sliceArray(mod, arena, start, end), - .comptime_field => |comptime_field| Value.fromInterned(comptime_field) - .sliceArray(mod, arena, start, end), - .elem => |elem| Value.fromInterned(elem.base) - .sliceArray(mod, arena, start + @as(usize, @intCast(elem.index)), end + @as(usize, @intCast(elem.index))), - else => unreachable, - }, - .aggregate => |aggregate| Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = switch (mod.intern_pool.indexToKey(mod.intern_pool.typeOf(val.toIntern()))) { - .array_type => |array_type| try mod.arrayType(.{ - .len = @as(u32, @intCast(end - start)), - .child = array_type.child, - .sentinel = if (end == array_type.len) array_type.sentinel else .none, - }), - .vector_type => |vector_type| try mod.vectorType(.{ - .len = @as(u32, @intCast(end - start)), - .child = vector_type.child, - }), - else => unreachable, - }.toIntern(), - .storage = switch (aggregate.storage) { - .bytes => .{ .bytes = try arena.dupe(u8, mod.intern_pool.indexToKey(val.toIntern()).aggregate.storage.bytes[start..end]) }, - .elems => .{ .elems = try arena.dupe(InternPool.Index, mod.intern_pool.indexToKey(val.toIntern()).aggregate.storage.elems[start..end]) }, - .repeated_elem => |elem| .{ .repeated_elem = elem }, - }, - } }))), - else => unreachable, - }, - }; - } - - pub fn fieldValue(val: Value, mod: *Module, index: usize) !Value { - return switch (val.ip_index) { - .none => switch (val.tag()) { - .aggregate => { - const field_values = val.castTag(.aggregate).?.data; - return field_values[index]; - }, - .@"union" => { - const payload = val.castTag(.@"union").?.data; - // TODO assert the tag is correct - return payload.val; - }, - else => unreachable, - }, - else => switch (mod.intern_pool.indexToKey(val.toIntern())) { - .undef => |ty| Value.fromInterned((try mod.intern(.{ - .undef = Type.fromInterned(ty).structFieldType(index, mod).toIntern(), - }))), - .aggregate => |aggregate| Value.fromInterned(switch (aggregate.storage) { - .bytes => |bytes| try mod.intern(.{ .int = .{ - .ty = .u8_type, - .storage = .{ .u64 = bytes[index] }, - } }), - .elems => |elems| elems[index], - .repeated_elem => |elem| elem, - }), - // TODO assert the tag is correct - .un => |un| Value.fromInterned(un.val), - else => unreachable, - }, - }; - } - - pub fn unionTag(val: Value, mod: *Module) ?Value { - if (val.ip_index == .none) return val.castTag(.@"union").?.data.tag; - return switch (mod.intern_pool.indexToKey(val.toIntern())) { - .undef, .enum_tag => val, - .un => |un| if (un.tag != .none) Value.fromInterned(un.tag) else return null, - else => unreachable, - }; - } - - pub fn unionValue(val: Value, mod: *Module) Value { - if (val.ip_index == .none) return val.castTag(.@"union").?.data.val; - return switch (mod.intern_pool.indexToKey(val.toIntern())) { - .un => |un| Value.fromInterned(un.val), - else => unreachable, - }; - } - - /// Returns a pointer to the element value at the index. - pub fn elemPtr( - val: Value, - elem_ptr_ty: Type, - index: usize, - mod: *Module, - ) Allocator.Error!Value { - const elem_ty = elem_ptr_ty.childType(mod); - const ptr_val = switch (mod.intern_pool.indexToKey(val.toIntern())) { - .slice => |slice| Value.fromInterned(slice.ptr), - else => val, - }; - switch (mod.intern_pool.indexToKey(ptr_val.toIntern())) { - .ptr => |ptr| switch (ptr.addr) { - .elem => |elem| if (Type.fromInterned(mod.intern_pool.typeOf(elem.base)).elemType2(mod).eql(elem_ty, mod)) - return Value.fromInterned((try mod.intern(.{ .ptr = .{ - .ty = elem_ptr_ty.toIntern(), - .addr = .{ .elem = .{ - .base = elem.base, - .index = elem.index + index, - } }, - } }))), - else => {}, - }, - else => {}, - } - var ptr_ty_key = mod.intern_pool.indexToKey(elem_ptr_ty.toIntern()).ptr_type; - assert(ptr_ty_key.flags.size != .Slice); - ptr_ty_key.flags.size = .Many; - return Value.fromInterned((try mod.intern(.{ .ptr = .{ - .ty = elem_ptr_ty.toIntern(), - .addr = .{ .elem = .{ - .base = (try mod.getCoerced(ptr_val, try mod.ptrType(ptr_ty_key))).toIntern(), - .index = index, - } }, - } }))); - } - - pub fn isUndef(val: Value, mod: *Module) bool { - return val.ip_index != .none and mod.intern_pool.isUndef(val.toIntern()); - } - - /// TODO: check for cases such as array that is not marked undef but all the element - /// values are marked undef, or struct that is not marked undef but all fields are marked - /// undef, etc. - pub fn isUndefDeep(val: Value, mod: *Module) bool { - return val.isUndef(mod); - } - - /// Returns true if any value contained in `self` is undefined. - pub fn anyUndef(val: Value, mod: *Module) !bool { - if (val.ip_index == .none) return false; - return switch (val.toIntern()) { - .undef => true, - else => switch (mod.intern_pool.indexToKey(val.toIntern())) { - .undef => true, - .simple_value => |v| v == .undefined, - .slice => |slice| for (0..@intCast(Value.fromInterned(slice.len).toUnsignedInt(mod))) |idx| { - if (try (try val.elemValue(mod, idx)).anyUndef(mod)) break true; - } else false, - .aggregate => |aggregate| for (0..aggregate.storage.values().len) |i| { - const elem = mod.intern_pool.indexToKey(val.toIntern()).aggregate.storage.values()[i]; - if (try anyUndef(Value.fromInterned(elem), mod)) break true; - } else false, - else => false, - }, - }; - } - - /// Asserts the value is not undefined and not unreachable. - /// C pointers with an integer value of 0 are also considered null. - pub fn isNull(val: Value, mod: *Module) bool { - return switch (val.toIntern()) { - .undef => unreachable, - .unreachable_value => unreachable, - .null_value => true, - else => return switch (mod.intern_pool.indexToKey(val.toIntern())) { - .undef => unreachable, - .ptr => |ptr| switch (ptr.addr) { - .int => { - var buf: BigIntSpace = undefined; - return val.toBigInt(&buf, mod).eqlZero(); - }, - else => false, - }, - .opt => |opt| opt.val == .none, - else => false, - }, - }; - } - - /// Valid only for error (union) types. Asserts the value is not undefined and not unreachable. - pub fn getErrorName(val: Value, mod: *const Module) InternPool.OptionalNullTerminatedString { - return switch (mod.intern_pool.indexToKey(val.toIntern())) { - .err => |err| err.name.toOptional(), - .error_union => |error_union| switch (error_union.val) { - .err_name => |err_name| err_name.toOptional(), - .payload => .none, - }, - else => unreachable, - }; - } - - pub fn getErrorInt(val: Value, mod: *const Module) Module.ErrorInt { - return if (getErrorName(val, mod).unwrap()) |err_name| - @as(Module.ErrorInt, @intCast(mod.global_error_set.getIndex(err_name).?)) - else - 0; - } - - /// Assumes the type is an error union. Returns true if and only if the value is - /// the error union payload, not an error. - pub fn errorUnionIsPayload(val: Value, mod: *const Module) bool { - return mod.intern_pool.indexToKey(val.toIntern()).error_union.val == .payload; - } - - /// Value of the optional, null if optional has no payload. - pub fn optionalValue(val: Value, mod: *const Module) ?Value { - return switch (mod.intern_pool.indexToKey(val.toIntern())) { - .opt => |opt| switch (opt.val) { - .none => null, - else => |payload| Value.fromInterned(payload), - }, - .ptr => val, - else => unreachable, - }; - } - - /// Valid for all types. Asserts the value is not undefined. - pub fn isFloat(self: Value, mod: *const Module) bool { - return switch (self.toIntern()) { - .undef => unreachable, - else => switch (mod.intern_pool.indexToKey(self.toIntern())) { - .undef => unreachable, - .float => true, - else => false, - }, - }; - } - - pub fn floatFromInt(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, mod: *Module) !Value { - return floatFromIntAdvanced(val, arena, int_ty, float_ty, mod, null) catch |err| switch (err) { - error.OutOfMemory => return error.OutOfMemory, - else => unreachable, - }; - } - - pub fn floatFromIntAdvanced(val: Value, arena: Allocator, int_ty: Type, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value { - if (int_ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(InternPool.Index, int_ty.vectorLen(mod)); - const scalar_ty = float_ty.scalarType(mod); - for (result_data, 0..) |*scalar, i| { - const elem_val = try val.elemValue(mod, i); - scalar.* = try (try floatFromIntScalar(elem_val, scalar_ty, mod, opt_sema)).intern(scalar_ty, mod); - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = float_ty.toIntern(), - .storage = .{ .elems = result_data }, - } }))); - } - return floatFromIntScalar(val, float_ty, mod, opt_sema); - } - - pub fn floatFromIntScalar(val: Value, float_ty: Type, mod: *Module, opt_sema: ?*Sema) !Value { - return switch (mod.intern_pool.indexToKey(val.toIntern())) { - .undef => try mod.undefValue(float_ty), - .int => |int| switch (int.storage) { - .big_int => |big_int| { - const float = bigIntToFloat(big_int.limbs, big_int.positive); - return mod.floatValue(float_ty, float); - }, - inline .u64, .i64 => |x| floatFromIntInner(x, float_ty, mod), - .lazy_align => |ty| if (opt_sema) |sema| { - return floatFromIntInner((try Type.fromInterned(ty).abiAlignmentAdvanced(mod, .{ .sema = sema })).scalar.toByteUnits(0), float_ty, mod); - } else { - return floatFromIntInner(Type.fromInterned(ty).abiAlignment(mod).toByteUnits(0), float_ty, mod); - }, - .lazy_size => |ty| if (opt_sema) |sema| { - return floatFromIntInner((try Type.fromInterned(ty).abiSizeAdvanced(mod, .{ .sema = sema })).scalar, float_ty, mod); - } else { - return floatFromIntInner(Type.fromInterned(ty).abiSize(mod), float_ty, mod); - }, - }, - else => unreachable, - }; - } - - fn floatFromIntInner(x: anytype, dest_ty: Type, mod: *Module) !Value { - const target = mod.getTarget(); - const storage: InternPool.Key.Float.Storage = switch (dest_ty.floatBits(target)) { - 16 => .{ .f16 = @floatFromInt(x) }, - 32 => .{ .f32 = @floatFromInt(x) }, - 64 => .{ .f64 = @floatFromInt(x) }, - 80 => .{ .f80 = @floatFromInt(x) }, - 128 => .{ .f128 = @floatFromInt(x) }, - else => unreachable, - }; - return Value.fromInterned((try mod.intern(.{ .float = .{ - .ty = dest_ty.toIntern(), - .storage = storage, - } }))); - } - - fn calcLimbLenFloat(scalar: anytype) usize { - if (scalar == 0) { - return 1; - } - - const w_value = @abs(scalar); - return @divFloor(@as(std.math.big.Limb, @intFromFloat(std.math.log2(w_value))), @typeInfo(std.math.big.Limb).Int.bits) + 1; - } - - pub const OverflowArithmeticResult = struct { - overflow_bit: Value, - wrapped_result: Value, - }; - - /// Supports (vectors of) integers only; asserts neither operand is undefined. - pub fn intAddSat( - lhs: Value, - rhs: Value, - ty: Type, - arena: Allocator, - mod: *Module, - ) !Value { - if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); - const scalar_ty = ty.scalarType(mod); - for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try (try intAddSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod); - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), - .storage = .{ .elems = result_data }, - } }))); - } - return intAddSatScalar(lhs, rhs, ty, arena, mod); - } - - /// Supports integers only; asserts neither operand is undefined. - pub fn intAddSatScalar( - lhs: Value, - rhs: Value, - ty: Type, - arena: Allocator, - mod: *Module, - ) !Value { - assert(!lhs.isUndef(mod)); - assert(!rhs.isUndef(mod)); - - const info = ty.intInfo(mod); - - var lhs_space: Value.BigIntSpace = undefined; - var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, mod); - const rhs_bigint = rhs.toBigInt(&rhs_space, mod); - const limbs = try arena.alloc( - std.math.big.Limb, - std.math.big.int.calcTwosCompLimbCount(info.bits), - ); - var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; - result_bigint.addSat(lhs_bigint, rhs_bigint, info.signedness, info.bits); - return mod.intValue_big(ty, result_bigint.toConst()); - } - - /// Supports (vectors of) integers only; asserts neither operand is undefined. - pub fn intSubSat( - lhs: Value, - rhs: Value, - ty: Type, - arena: Allocator, - mod: *Module, - ) !Value { - if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); - const scalar_ty = ty.scalarType(mod); - for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try (try intSubSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod); - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), - .storage = .{ .elems = result_data }, - } }))); - } - return intSubSatScalar(lhs, rhs, ty, arena, mod); - } - - /// Supports integers only; asserts neither operand is undefined. - pub fn intSubSatScalar( - lhs: Value, - rhs: Value, - ty: Type, - arena: Allocator, - mod: *Module, - ) !Value { - assert(!lhs.isUndef(mod)); - assert(!rhs.isUndef(mod)); - - const info = ty.intInfo(mod); - - var lhs_space: Value.BigIntSpace = undefined; - var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, mod); - const rhs_bigint = rhs.toBigInt(&rhs_space, mod); - const limbs = try arena.alloc( - std.math.big.Limb, - std.math.big.int.calcTwosCompLimbCount(info.bits), - ); - var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; - result_bigint.subSat(lhs_bigint, rhs_bigint, info.signedness, info.bits); - return mod.intValue_big(ty, result_bigint.toConst()); - } - - pub fn intMulWithOverflow( - lhs: Value, - rhs: Value, - ty: Type, - arena: Allocator, - mod: *Module, - ) !OverflowArithmeticResult { - if (ty.zigTypeTag(mod) == .Vector) { - const vec_len = ty.vectorLen(mod); - const overflowed_data = try arena.alloc(InternPool.Index, vec_len); - const result_data = try arena.alloc(InternPool.Index, vec_len); - const scalar_ty = ty.scalarType(mod); - for (overflowed_data, result_data, 0..) |*of, *scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - const of_math_result = try intMulWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod); - of.* = try of_math_result.overflow_bit.intern(Type.u1, mod); - scalar.* = try of_math_result.wrapped_result.intern(scalar_ty, mod); - } - return OverflowArithmeticResult{ - .overflow_bit = Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(), - .storage = .{ .elems = overflowed_data }, - } }))), - .wrapped_result = Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), - .storage = .{ .elems = result_data }, - } }))), - }; - } - return intMulWithOverflowScalar(lhs, rhs, ty, arena, mod); - } - - pub fn intMulWithOverflowScalar( - lhs: Value, - rhs: Value, - ty: Type, - arena: Allocator, - mod: *Module, - ) !OverflowArithmeticResult { - const info = ty.intInfo(mod); - - var lhs_space: Value.BigIntSpace = undefined; - var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, mod); - const rhs_bigint = rhs.toBigInt(&rhs_space, mod); - const limbs = try arena.alloc( - std.math.big.Limb, - lhs_bigint.limbs.len + rhs_bigint.limbs.len, - ); - var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; - const limbs_buffer = try arena.alloc( - std.math.big.Limb, - std.math.big.int.calcMulLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len, 1), - ); - result_bigint.mul(lhs_bigint, rhs_bigint, limbs_buffer, arena); - - const overflowed = !result_bigint.toConst().fitsInTwosComp(info.signedness, info.bits); - if (overflowed) { - result_bigint.truncate(result_bigint.toConst(), info.signedness, info.bits); - } - - return OverflowArithmeticResult{ - .overflow_bit = try mod.intValue(Type.u1, @intFromBool(overflowed)), - .wrapped_result = try mod.intValue_big(ty, result_bigint.toConst()), - }; - } - - /// Supports both (vectors of) floats and ints; handles undefined scalars. - pub fn numberMulWrap( - lhs: Value, - rhs: Value, - ty: Type, - arena: Allocator, - mod: *Module, - ) !Value { - if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); - const scalar_ty = ty.scalarType(mod); - for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try (try numberMulWrapScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod); - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), - .storage = .{ .elems = result_data }, - } }))); - } - return numberMulWrapScalar(lhs, rhs, ty, arena, mod); - } - - /// Supports both floats and ints; handles undefined. - pub fn numberMulWrapScalar( - lhs: Value, - rhs: Value, - ty: Type, - arena: Allocator, - mod: *Module, - ) !Value { - if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.undef; - - if (ty.zigTypeTag(mod) == .ComptimeInt) { - return intMul(lhs, rhs, ty, undefined, arena, mod); - } - - if (ty.isAnyFloat()) { - return floatMul(lhs, rhs, ty, arena, mod); - } - - const overflow_result = try intMulWithOverflow(lhs, rhs, ty, arena, mod); - return overflow_result.wrapped_result; - } - - /// Supports (vectors of) integers only; asserts neither operand is undefined. - pub fn intMulSat( - lhs: Value, - rhs: Value, - ty: Type, - arena: Allocator, - mod: *Module, - ) !Value { - if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); - const scalar_ty = ty.scalarType(mod); - for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try (try intMulSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod); - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), - .storage = .{ .elems = result_data }, - } }))); - } - return intMulSatScalar(lhs, rhs, ty, arena, mod); - } - - /// Supports (vectors of) integers only; asserts neither operand is undefined. - pub fn intMulSatScalar( - lhs: Value, - rhs: Value, - ty: Type, - arena: Allocator, - mod: *Module, - ) !Value { - assert(!lhs.isUndef(mod)); - assert(!rhs.isUndef(mod)); - - const info = ty.intInfo(mod); - - var lhs_space: Value.BigIntSpace = undefined; - var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, mod); - const rhs_bigint = rhs.toBigInt(&rhs_space, mod); - const limbs = try arena.alloc( - std.math.big.Limb, - @max( - // For the saturate - std.math.big.int.calcTwosCompLimbCount(info.bits), - lhs_bigint.limbs.len + rhs_bigint.limbs.len, - ), - ); - var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; - const limbs_buffer = try arena.alloc( - std.math.big.Limb, - std.math.big.int.calcMulLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len, 1), - ); - result_bigint.mul(lhs_bigint, rhs_bigint, limbs_buffer, arena); - result_bigint.saturate(result_bigint.toConst(), info.signedness, info.bits); - return mod.intValue_big(ty, result_bigint.toConst()); - } - - /// Supports both floats and ints; handles undefined. - pub fn numberMax(lhs: Value, rhs: Value, mod: *Module) Value { - if (lhs.isUndef(mod) or rhs.isUndef(mod)) return undef; - if (lhs.isNan(mod)) return rhs; - if (rhs.isNan(mod)) return lhs; - - return switch (order(lhs, rhs, mod)) { - .lt => rhs, - .gt, .eq => lhs, - }; - } - - /// Supports both floats and ints; handles undefined. - pub fn numberMin(lhs: Value, rhs: Value, mod: *Module) Value { - if (lhs.isUndef(mod) or rhs.isUndef(mod)) return undef; - if (lhs.isNan(mod)) return rhs; - if (rhs.isNan(mod)) return lhs; - - return switch (order(lhs, rhs, mod)) { - .lt => lhs, - .gt, .eq => rhs, - }; - } - - /// operands must be (vectors of) integers; handles undefined scalars. - pub fn bitwiseNot(val: Value, ty: Type, arena: Allocator, mod: *Module) !Value { - if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); - const scalar_ty = ty.scalarType(mod); - for (result_data, 0..) |*scalar, i| { - const elem_val = try val.elemValue(mod, i); - scalar.* = try (try bitwiseNotScalar(elem_val, scalar_ty, arena, mod)).intern(scalar_ty, mod); - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), - .storage = .{ .elems = result_data }, - } }))); - } - return bitwiseNotScalar(val, ty, arena, mod); - } - - /// operands must be integers; handles undefined. - pub fn bitwiseNotScalar(val: Value, ty: Type, arena: Allocator, mod: *Module) !Value { - if (val.isUndef(mod)) return Value.fromInterned((try mod.intern(.{ .undef = ty.toIntern() }))); - if (ty.toIntern() == .bool_type) return makeBool(!val.toBool()); - - const info = ty.intInfo(mod); - - if (info.bits == 0) { - return val; - } - - // TODO is this a performance issue? maybe we should try the operation without - // resorting to BigInt first. - var val_space: Value.BigIntSpace = undefined; - const val_bigint = val.toBigInt(&val_space, mod); - const limbs = try arena.alloc( - std.math.big.Limb, - std.math.big.int.calcTwosCompLimbCount(info.bits), - ); - - var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; - result_bigint.bitNotWrap(val_bigint, info.signedness, info.bits); - return mod.intValue_big(ty, result_bigint.toConst()); - } - - /// operands must be (vectors of) integers; handles undefined scalars. - pub fn bitwiseAnd(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); - const scalar_ty = ty.scalarType(mod); - for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try (try bitwiseAndScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), - .storage = .{ .elems = result_data }, - } }))); - } - return bitwiseAndScalar(lhs, rhs, ty, allocator, mod); - } - - /// operands must be integers; handles undefined. - pub fn bitwiseAndScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { - if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.fromInterned((try mod.intern(.{ .undef = ty.toIntern() }))); - if (ty.toIntern() == .bool_type) return makeBool(lhs.toBool() and rhs.toBool()); - - // TODO is this a performance issue? maybe we should try the operation without - // resorting to BigInt first. - var lhs_space: Value.BigIntSpace = undefined; - var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, mod); - const rhs_bigint = rhs.toBigInt(&rhs_space, mod); - const limbs = try arena.alloc( - std.math.big.Limb, - // + 1 for negatives - @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, - ); - var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; - result_bigint.bitAnd(lhs_bigint, rhs_bigint); - return mod.intValue_big(ty, result_bigint.toConst()); - } - - /// operands must be (vectors of) integers; handles undefined scalars. - pub fn bitwiseNand(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { - if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); - const scalar_ty = ty.scalarType(mod); - for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try (try bitwiseNandScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod); - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), - .storage = .{ .elems = result_data }, - } }))); - } - return bitwiseNandScalar(lhs, rhs, ty, arena, mod); - } - - /// operands must be integers; handles undefined. - pub fn bitwiseNandScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { - if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.fromInterned((try mod.intern(.{ .undef = ty.toIntern() }))); - if (ty.toIntern() == .bool_type) return makeBool(!(lhs.toBool() and rhs.toBool())); - - const anded = try bitwiseAnd(lhs, rhs, ty, arena, mod); - const all_ones = if (ty.isSignedInt(mod)) try mod.intValue(ty, -1) else try ty.maxIntScalar(mod, ty); - return bitwiseXor(anded, all_ones, ty, arena, mod); - } - - /// operands must be (vectors of) integers; handles undefined scalars. - pub fn bitwiseOr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); - const scalar_ty = ty.scalarType(mod); - for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try (try bitwiseOrScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), - .storage = .{ .elems = result_data }, - } }))); - } - return bitwiseOrScalar(lhs, rhs, ty, allocator, mod); - } - - /// operands must be integers; handles undefined. - pub fn bitwiseOrScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { - if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.fromInterned((try mod.intern(.{ .undef = ty.toIntern() }))); - if (ty.toIntern() == .bool_type) return makeBool(lhs.toBool() or rhs.toBool()); - - // TODO is this a performance issue? maybe we should try the operation without - // resorting to BigInt first. - var lhs_space: Value.BigIntSpace = undefined; - var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, mod); - const rhs_bigint = rhs.toBigInt(&rhs_space, mod); - const limbs = try arena.alloc( - std.math.big.Limb, - @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len), - ); - var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; - result_bigint.bitOr(lhs_bigint, rhs_bigint); - return mod.intValue_big(ty, result_bigint.toConst()); - } - - /// operands must be (vectors of) integers; handles undefined scalars. - pub fn bitwiseXor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); - const scalar_ty = ty.scalarType(mod); - for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try (try bitwiseXorScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), - .storage = .{ .elems = result_data }, - } }))); - } - return bitwiseXorScalar(lhs, rhs, ty, allocator, mod); - } - - /// operands must be integers; handles undefined. - pub fn bitwiseXorScalar(lhs: Value, rhs: Value, ty: Type, arena: Allocator, mod: *Module) !Value { - if (lhs.isUndef(mod) or rhs.isUndef(mod)) return Value.fromInterned((try mod.intern(.{ .undef = ty.toIntern() }))); - if (ty.toIntern() == .bool_type) return makeBool(lhs.toBool() != rhs.toBool()); - - // TODO is this a performance issue? maybe we should try the operation without - // resorting to BigInt first. - var lhs_space: Value.BigIntSpace = undefined; - var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, mod); - const rhs_bigint = rhs.toBigInt(&rhs_space, mod); - const limbs = try arena.alloc( - std.math.big.Limb, - // + 1 for negatives - @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, - ); - var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; - result_bigint.bitXor(lhs_bigint, rhs_bigint); - return mod.intValue_big(ty, result_bigint.toConst()); - } - - /// If the value overflowed the type, returns a comptime_int (or vector thereof) instead, setting - /// overflow_idx to the vector index the overflow was at (or 0 for a scalar). - pub fn intDiv(lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize, allocator: Allocator, mod: *Module) !Value { - var overflow: usize = undefined; - return intDivInner(lhs, rhs, ty, &overflow, allocator, mod) catch |err| switch (err) { - error.Overflow => { - const is_vec = ty.isVector(mod); - overflow_idx.* = if (is_vec) overflow else 0; - const safe_ty = if (is_vec) try mod.vectorType(.{ - .len = ty.vectorLen(mod), - .child = .comptime_int_type, - }) else Type.comptime_int; - return intDivInner(lhs, rhs, safe_ty, undefined, allocator, mod) catch |err1| switch (err1) { - error.Overflow => unreachable, - else => |e| return e, - }; - }, - else => |e| return e, - }; - } - - fn intDivInner(lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize, allocator: Allocator, mod: *Module) !Value { - if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); - const scalar_ty = ty.scalarType(mod); - for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - const val = intDivScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod) catch |err| switch (err) { - error.Overflow => { - overflow_idx.* = i; - return error.Overflow; - }, - else => |e| return e, - }; - scalar.* = try val.intern(scalar_ty, mod); - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), - .storage = .{ .elems = result_data }, - } }))); - } - return intDivScalar(lhs, rhs, ty, allocator, mod); - } - - pub fn intDivScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - // TODO is this a performance issue? maybe we should try the operation without - // resorting to BigInt first. - var lhs_space: Value.BigIntSpace = undefined; - var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, mod); - const rhs_bigint = rhs.toBigInt(&rhs_space, mod); - const limbs_q = try allocator.alloc( - std.math.big.Limb, - lhs_bigint.limbs.len, - ); - const limbs_r = try allocator.alloc( - std.math.big.Limb, - rhs_bigint.limbs.len, - ); - const limbs_buffer = try allocator.alloc( - std.math.big.Limb, - std.math.big.int.calcDivLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len), - ); - var result_q = BigIntMutable{ .limbs = limbs_q, .positive = undefined, .len = undefined }; - var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined }; - result_q.divTrunc(&result_r, lhs_bigint, rhs_bigint, limbs_buffer); - if (ty.toIntern() != .comptime_int_type) { - const info = ty.intInfo(mod); - if (!result_q.toConst().fitsInTwosComp(info.signedness, info.bits)) { - return error.Overflow; - } - } - return mod.intValue_big(ty, result_q.toConst()); - } - - pub fn intDivFloor(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); - const scalar_ty = ty.scalarType(mod); - for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try (try intDivFloorScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), - .storage = .{ .elems = result_data }, - } }))); - } - return intDivFloorScalar(lhs, rhs, ty, allocator, mod); - } - - pub fn intDivFloorScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - // TODO is this a performance issue? maybe we should try the operation without - // resorting to BigInt first. - var lhs_space: Value.BigIntSpace = undefined; - var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, mod); - const rhs_bigint = rhs.toBigInt(&rhs_space, mod); - const limbs_q = try allocator.alloc( - std.math.big.Limb, - lhs_bigint.limbs.len, - ); - const limbs_r = try allocator.alloc( - std.math.big.Limb, - rhs_bigint.limbs.len, - ); - const limbs_buffer = try allocator.alloc( - std.math.big.Limb, - std.math.big.int.calcDivLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len), - ); - var result_q = BigIntMutable{ .limbs = limbs_q, .positive = undefined, .len = undefined }; - var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined }; - result_q.divFloor(&result_r, lhs_bigint, rhs_bigint, limbs_buffer); - return mod.intValue_big(ty, result_q.toConst()); - } - - pub fn intMod(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); - const scalar_ty = ty.scalarType(mod); - for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try (try intModScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), - .storage = .{ .elems = result_data }, - } }))); - } - return intModScalar(lhs, rhs, ty, allocator, mod); - } - - pub fn intModScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - // TODO is this a performance issue? maybe we should try the operation without - // resorting to BigInt first. - var lhs_space: Value.BigIntSpace = undefined; - var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, mod); - const rhs_bigint = rhs.toBigInt(&rhs_space, mod); - const limbs_q = try allocator.alloc( - std.math.big.Limb, - lhs_bigint.limbs.len, - ); - const limbs_r = try allocator.alloc( - std.math.big.Limb, - rhs_bigint.limbs.len, - ); - const limbs_buffer = try allocator.alloc( - std.math.big.Limb, - std.math.big.int.calcDivLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len), - ); - var result_q = BigIntMutable{ .limbs = limbs_q, .positive = undefined, .len = undefined }; - var result_r = BigIntMutable{ .limbs = limbs_r, .positive = undefined, .len = undefined }; - result_q.divFloor(&result_r, lhs_bigint, rhs_bigint, limbs_buffer); - return mod.intValue_big(ty, result_r.toConst()); - } - - /// Returns true if the value is a floating point type and is NaN. Returns false otherwise. - pub fn isNan(val: Value, mod: *const Module) bool { - if (val.ip_index == .none) return false; - return switch (mod.intern_pool.indexToKey(val.toIntern())) { - .float => |float| switch (float.storage) { - inline else => |x| std.math.isNan(x), - }, - else => false, - }; - } - - /// Returns true if the value is a floating point type and is infinite. Returns false otherwise. - pub fn isInf(val: Value, mod: *const Module) bool { - if (val.ip_index == .none) return false; - return switch (mod.intern_pool.indexToKey(val.toIntern())) { - .float => |float| switch (float.storage) { - inline else => |x| std.math.isInf(x), - }, - else => false, - }; - } - - pub fn isNegativeInf(val: Value, mod: *const Module) bool { - if (val.ip_index == .none) return false; - return switch (mod.intern_pool.indexToKey(val.toIntern())) { - .float => |float| switch (float.storage) { - inline else => |x| std.math.isNegativeInf(x), - }, - else => false, - }; - } - - pub fn floatRem(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); - const scalar_ty = float_type.scalarType(mod); - for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try (try floatRemScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = float_type.toIntern(), - .storage = .{ .elems = result_data }, - } }))); - } - return floatRemScalar(lhs, rhs, float_type, mod); - } - - pub fn floatRemScalar(lhs: Value, rhs: Value, float_type: Type, mod: *Module) !Value { - const target = mod.getTarget(); - const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { - 16 => .{ .f16 = @rem(lhs.toFloat(f16, mod), rhs.toFloat(f16, mod)) }, - 32 => .{ .f32 = @rem(lhs.toFloat(f32, mod), rhs.toFloat(f32, mod)) }, - 64 => .{ .f64 = @rem(lhs.toFloat(f64, mod), rhs.toFloat(f64, mod)) }, - 80 => .{ .f80 = @rem(lhs.toFloat(f80, mod), rhs.toFloat(f80, mod)) }, - 128 => .{ .f128 = @rem(lhs.toFloat(f128, mod), rhs.toFloat(f128, mod)) }, - else => unreachable, - }; - return Value.fromInterned((try mod.intern(.{ .float = .{ - .ty = float_type.toIntern(), - .storage = storage, - } }))); - } - - pub fn floatMod(lhs: Value, rhs: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); - const scalar_ty = float_type.scalarType(mod); - for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try (try floatModScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = float_type.toIntern(), - .storage = .{ .elems = result_data }, - } }))); - } - return floatModScalar(lhs, rhs, float_type, mod); - } - - pub fn floatModScalar(lhs: Value, rhs: Value, float_type: Type, mod: *Module) !Value { - const target = mod.getTarget(); - const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { - 16 => .{ .f16 = @mod(lhs.toFloat(f16, mod), rhs.toFloat(f16, mod)) }, - 32 => .{ .f32 = @mod(lhs.toFloat(f32, mod), rhs.toFloat(f32, mod)) }, - 64 => .{ .f64 = @mod(lhs.toFloat(f64, mod), rhs.toFloat(f64, mod)) }, - 80 => .{ .f80 = @mod(lhs.toFloat(f80, mod), rhs.toFloat(f80, mod)) }, - 128 => .{ .f128 = @mod(lhs.toFloat(f128, mod), rhs.toFloat(f128, mod)) }, - else => unreachable, - }; - return Value.fromInterned((try mod.intern(.{ .float = .{ - .ty = float_type.toIntern(), - .storage = storage, - } }))); - } - - /// If the value overflowed the type, returns a comptime_int (or vector thereof) instead, setting - /// overflow_idx to the vector index the overflow was at (or 0 for a scalar). - pub fn intMul(lhs: Value, rhs: Value, ty: Type, overflow_idx: *?usize, allocator: Allocator, mod: *Module) !Value { - var overflow: usize = undefined; - return intMulInner(lhs, rhs, ty, &overflow, allocator, mod) catch |err| switch (err) { - error.Overflow => { - const is_vec = ty.isVector(mod); - overflow_idx.* = if (is_vec) overflow else 0; - const safe_ty = if (is_vec) try mod.vectorType(.{ - .len = ty.vectorLen(mod), - .child = .comptime_int_type, - }) else Type.comptime_int; - return intMulInner(lhs, rhs, safe_ty, undefined, allocator, mod) catch |err1| switch (err1) { - error.Overflow => unreachable, - else => |e| return e, - }; - }, - else => |e| return e, - }; - } - - fn intMulInner(lhs: Value, rhs: Value, ty: Type, overflow_idx: *usize, allocator: Allocator, mod: *Module) !Value { - if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); - const scalar_ty = ty.scalarType(mod); - for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - const val = intMulScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod) catch |err| switch (err) { - error.Overflow => { - overflow_idx.* = i; - return error.Overflow; - }, - else => |e| return e, - }; - scalar.* = try val.intern(scalar_ty, mod); - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), - .storage = .{ .elems = result_data }, - } }))); - } - return intMulScalar(lhs, rhs, ty, allocator, mod); - } - - pub fn intMulScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - if (ty.toIntern() != .comptime_int_type) { - const res = try intMulWithOverflowScalar(lhs, rhs, ty, allocator, mod); - if (res.overflow_bit.compareAllWithZero(.neq, mod)) return error.Overflow; - return res.wrapped_result; - } - // TODO is this a performance issue? maybe we should try the operation without - // resorting to BigInt first. - var lhs_space: Value.BigIntSpace = undefined; - var rhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, mod); - const rhs_bigint = rhs.toBigInt(&rhs_space, mod); - const limbs = try allocator.alloc( - std.math.big.Limb, - lhs_bigint.limbs.len + rhs_bigint.limbs.len, - ); - var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; - const limbs_buffer = try allocator.alloc( - std.math.big.Limb, - std.math.big.int.calcMulLimbsBufferLen(lhs_bigint.limbs.len, rhs_bigint.limbs.len, 1), - ); - defer allocator.free(limbs_buffer); - result_bigint.mul(lhs_bigint, rhs_bigint, limbs_buffer, allocator); - return mod.intValue_big(ty, result_bigint.toConst()); - } - - pub fn intTrunc(val: Value, ty: Type, allocator: Allocator, signedness: std.builtin.Signedness, bits: u16, mod: *Module) !Value { - if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); - const scalar_ty = ty.scalarType(mod); - for (result_data, 0..) |*scalar, i| { - const elem_val = try val.elemValue(mod, i); - scalar.* = try (try intTruncScalar(elem_val, scalar_ty, allocator, signedness, bits, mod)).intern(scalar_ty, mod); - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), - .storage = .{ .elems = result_data }, - } }))); - } - return intTruncScalar(val, ty, allocator, signedness, bits, mod); - } - - /// This variant may vectorize on `bits`. Asserts that `bits` is a (vector of) `u16`. - pub fn intTruncBitsAsValue( - val: Value, - ty: Type, - allocator: Allocator, - signedness: std.builtin.Signedness, - bits: Value, - mod: *Module, - ) !Value { - if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); - const scalar_ty = ty.scalarType(mod); - for (result_data, 0..) |*scalar, i| { - const elem_val = try val.elemValue(mod, i); - const bits_elem = try bits.elemValue(mod, i); - scalar.* = try (try intTruncScalar(elem_val, scalar_ty, allocator, signedness, @as(u16, @intCast(bits_elem.toUnsignedInt(mod))), mod)).intern(scalar_ty, mod); - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), - .storage = .{ .elems = result_data }, - } }))); - } - return intTruncScalar(val, ty, allocator, signedness, @as(u16, @intCast(bits.toUnsignedInt(mod))), mod); - } - - pub fn intTruncScalar( - val: Value, - ty: Type, - allocator: Allocator, - signedness: std.builtin.Signedness, - bits: u16, - mod: *Module, - ) !Value { - if (bits == 0) return mod.intValue(ty, 0); - - var val_space: Value.BigIntSpace = undefined; - const val_bigint = val.toBigInt(&val_space, mod); - - const limbs = try allocator.alloc( - std.math.big.Limb, - std.math.big.int.calcTwosCompLimbCount(bits), - ); - var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; - - result_bigint.truncate(val_bigint, signedness, bits); - return mod.intValue_big(ty, result_bigint.toConst()); - } - - pub fn shl(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); - const scalar_ty = ty.scalarType(mod); - for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try (try shlScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), - .storage = .{ .elems = result_data }, - } }))); - } - return shlScalar(lhs, rhs, ty, allocator, mod); - } - - pub fn shlScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - // TODO is this a performance issue? maybe we should try the operation without - // resorting to BigInt first. - var lhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, mod); - const shift = @as(usize, @intCast(rhs.toUnsignedInt(mod))); - const limbs = try allocator.alloc( - std.math.big.Limb, - lhs_bigint.limbs.len + (shift / (@sizeOf(std.math.big.Limb) * 8)) + 1, - ); - var result_bigint = BigIntMutable{ - .limbs = limbs, - .positive = undefined, - .len = undefined, - }; - result_bigint.shiftLeft(lhs_bigint, shift); - if (ty.toIntern() != .comptime_int_type) { - const int_info = ty.intInfo(mod); - result_bigint.truncate(result_bigint.toConst(), int_info.signedness, int_info.bits); - } - - return mod.intValue_big(ty, result_bigint.toConst()); - } - - pub fn shlWithOverflow( - lhs: Value, - rhs: Value, - ty: Type, - allocator: Allocator, - mod: *Module, - ) !OverflowArithmeticResult { - if (ty.zigTypeTag(mod) == .Vector) { - const vec_len = ty.vectorLen(mod); - const overflowed_data = try allocator.alloc(InternPool.Index, vec_len); - const result_data = try allocator.alloc(InternPool.Index, vec_len); - const scalar_ty = ty.scalarType(mod); - for (overflowed_data, result_data, 0..) |*of, *scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - const of_math_result = try shlWithOverflowScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod); - of.* = try of_math_result.overflow_bit.intern(Type.u1, mod); - scalar.* = try of_math_result.wrapped_result.intern(scalar_ty, mod); - } - return OverflowArithmeticResult{ - .overflow_bit = Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = (try mod.vectorType(.{ .len = vec_len, .child = .u1_type })).toIntern(), - .storage = .{ .elems = overflowed_data }, - } }))), - .wrapped_result = Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), - .storage = .{ .elems = result_data }, - } }))), - }; - } - return shlWithOverflowScalar(lhs, rhs, ty, allocator, mod); - } - - pub fn shlWithOverflowScalar( - lhs: Value, - rhs: Value, - ty: Type, - allocator: Allocator, - mod: *Module, - ) !OverflowArithmeticResult { - const info = ty.intInfo(mod); - var lhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, mod); - const shift = @as(usize, @intCast(rhs.toUnsignedInt(mod))); - const limbs = try allocator.alloc( - std.math.big.Limb, - lhs_bigint.limbs.len + (shift / (@sizeOf(std.math.big.Limb) * 8)) + 1, - ); - var result_bigint = BigIntMutable{ - .limbs = limbs, - .positive = undefined, - .len = undefined, - }; - result_bigint.shiftLeft(lhs_bigint, shift); - const overflowed = !result_bigint.toConst().fitsInTwosComp(info.signedness, info.bits); - if (overflowed) { - result_bigint.truncate(result_bigint.toConst(), info.signedness, info.bits); - } - return OverflowArithmeticResult{ - .overflow_bit = try mod.intValue(Type.u1, @intFromBool(overflowed)), - .wrapped_result = try mod.intValue_big(ty, result_bigint.toConst()), - }; - } - - pub fn shlSat( - lhs: Value, - rhs: Value, - ty: Type, - arena: Allocator, - mod: *Module, - ) !Value { - if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); - const scalar_ty = ty.scalarType(mod); - for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try (try shlSatScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod); - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), - .storage = .{ .elems = result_data }, - } }))); - } - return shlSatScalar(lhs, rhs, ty, arena, mod); - } - - pub fn shlSatScalar( - lhs: Value, - rhs: Value, - ty: Type, - arena: Allocator, - mod: *Module, - ) !Value { - // TODO is this a performance issue? maybe we should try the operation without - // resorting to BigInt first. - const info = ty.intInfo(mod); - - var lhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, mod); - const shift = @as(usize, @intCast(rhs.toUnsignedInt(mod))); - const limbs = try arena.alloc( - std.math.big.Limb, - std.math.big.int.calcTwosCompLimbCount(info.bits) + 1, - ); - var result_bigint = BigIntMutable{ - .limbs = limbs, - .positive = undefined, - .len = undefined, - }; - result_bigint.shiftLeftSat(lhs_bigint, shift, info.signedness, info.bits); - return mod.intValue_big(ty, result_bigint.toConst()); - } - - pub fn shlTrunc( - lhs: Value, - rhs: Value, - ty: Type, - arena: Allocator, - mod: *Module, - ) !Value { - if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); - const scalar_ty = ty.scalarType(mod); - for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try (try shlTruncScalar(lhs_elem, rhs_elem, scalar_ty, arena, mod)).intern(scalar_ty, mod); - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), - .storage = .{ .elems = result_data }, - } }))); - } - return shlTruncScalar(lhs, rhs, ty, arena, mod); - } - - pub fn shlTruncScalar( - lhs: Value, - rhs: Value, - ty: Type, - arena: Allocator, - mod: *Module, - ) !Value { - const shifted = try lhs.shl(rhs, ty, arena, mod); - const int_info = ty.intInfo(mod); - const truncated = try shifted.intTrunc(ty, arena, int_info.signedness, int_info.bits, mod); - return truncated; - } - - pub fn shr(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try allocator.alloc(InternPool.Index, ty.vectorLen(mod)); - const scalar_ty = ty.scalarType(mod); - for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try (try shrScalar(lhs_elem, rhs_elem, scalar_ty, allocator, mod)).intern(scalar_ty, mod); - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), - .storage = .{ .elems = result_data }, - } }))); - } - return shrScalar(lhs, rhs, ty, allocator, mod); - } - - pub fn shrScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *Module) !Value { - // TODO is this a performance issue? maybe we should try the operation without - // resorting to BigInt first. - var lhs_space: Value.BigIntSpace = undefined; - const lhs_bigint = lhs.toBigInt(&lhs_space, mod); - const shift = @as(usize, @intCast(rhs.toUnsignedInt(mod))); - - const result_limbs = lhs_bigint.limbs.len -| (shift / (@sizeOf(std.math.big.Limb) * 8)); - if (result_limbs == 0) { - // The shift is enough to remove all the bits from the number, which means the - // result is 0 or -1 depending on the sign. - if (lhs_bigint.positive) { - return mod.intValue(ty, 0); - } else { - return mod.intValue(ty, -1); - } - } - - const limbs = try allocator.alloc( - std.math.big.Limb, - result_limbs, - ); - var result_bigint = BigIntMutable{ - .limbs = limbs, - .positive = undefined, - .len = undefined, - }; - result_bigint.shiftRight(lhs_bigint, shift); - return mod.intValue_big(ty, result_bigint.toConst()); - } - - pub fn floatNeg( - val: Value, - float_type: Type, - arena: Allocator, - mod: *Module, - ) !Value { - if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); - const scalar_ty = float_type.scalarType(mod); - for (result_data, 0..) |*scalar, i| { - const elem_val = try val.elemValue(mod, i); - scalar.* = try (try floatNegScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = float_type.toIntern(), - .storage = .{ .elems = result_data }, - } }))); - } - return floatNegScalar(val, float_type, mod); - } - - pub fn floatNegScalar( - val: Value, - float_type: Type, - mod: *Module, - ) !Value { - const target = mod.getTarget(); - const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { - 16 => .{ .f16 = -val.toFloat(f16, mod) }, - 32 => .{ .f32 = -val.toFloat(f32, mod) }, - 64 => .{ .f64 = -val.toFloat(f64, mod) }, - 80 => .{ .f80 = -val.toFloat(f80, mod) }, - 128 => .{ .f128 = -val.toFloat(f128, mod) }, - else => unreachable, - }; - return Value.fromInterned((try mod.intern(.{ .float = .{ - .ty = float_type.toIntern(), - .storage = storage, - } }))); - } - - pub fn floatAdd( - lhs: Value, - rhs: Value, - float_type: Type, - arena: Allocator, - mod: *Module, - ) !Value { - if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); - const scalar_ty = float_type.scalarType(mod); - for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try (try floatAddScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = float_type.toIntern(), - .storage = .{ .elems = result_data }, - } }))); - } - return floatAddScalar(lhs, rhs, float_type, mod); - } - - pub fn floatAddScalar( - lhs: Value, - rhs: Value, - float_type: Type, - mod: *Module, - ) !Value { - const target = mod.getTarget(); - const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { - 16 => .{ .f16 = lhs.toFloat(f16, mod) + rhs.toFloat(f16, mod) }, - 32 => .{ .f32 = lhs.toFloat(f32, mod) + rhs.toFloat(f32, mod) }, - 64 => .{ .f64 = lhs.toFloat(f64, mod) + rhs.toFloat(f64, mod) }, - 80 => .{ .f80 = lhs.toFloat(f80, mod) + rhs.toFloat(f80, mod) }, - 128 => .{ .f128 = lhs.toFloat(f128, mod) + rhs.toFloat(f128, mod) }, - else => unreachable, - }; - return Value.fromInterned((try mod.intern(.{ .float = .{ - .ty = float_type.toIntern(), - .storage = storage, - } }))); - } - - pub fn floatSub( - lhs: Value, - rhs: Value, - float_type: Type, - arena: Allocator, - mod: *Module, - ) !Value { - if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); - const scalar_ty = float_type.scalarType(mod); - for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try (try floatSubScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = float_type.toIntern(), - .storage = .{ .elems = result_data }, - } }))); - } - return floatSubScalar(lhs, rhs, float_type, mod); - } - - pub fn floatSubScalar( - lhs: Value, - rhs: Value, - float_type: Type, - mod: *Module, - ) !Value { - const target = mod.getTarget(); - const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { - 16 => .{ .f16 = lhs.toFloat(f16, mod) - rhs.toFloat(f16, mod) }, - 32 => .{ .f32 = lhs.toFloat(f32, mod) - rhs.toFloat(f32, mod) }, - 64 => .{ .f64 = lhs.toFloat(f64, mod) - rhs.toFloat(f64, mod) }, - 80 => .{ .f80 = lhs.toFloat(f80, mod) - rhs.toFloat(f80, mod) }, - 128 => .{ .f128 = lhs.toFloat(f128, mod) - rhs.toFloat(f128, mod) }, - else => unreachable, - }; - return Value.fromInterned((try mod.intern(.{ .float = .{ - .ty = float_type.toIntern(), - .storage = storage, - } }))); - } - - pub fn floatDiv( - lhs: Value, - rhs: Value, - float_type: Type, - arena: Allocator, - mod: *Module, - ) !Value { - if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); - const scalar_ty = float_type.scalarType(mod); - for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try (try floatDivScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = float_type.toIntern(), - .storage = .{ .elems = result_data }, - } }))); - } - return floatDivScalar(lhs, rhs, float_type, mod); - } - - pub fn floatDivScalar( - lhs: Value, - rhs: Value, - float_type: Type, - mod: *Module, - ) !Value { - const target = mod.getTarget(); - const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { - 16 => .{ .f16 = lhs.toFloat(f16, mod) / rhs.toFloat(f16, mod) }, - 32 => .{ .f32 = lhs.toFloat(f32, mod) / rhs.toFloat(f32, mod) }, - 64 => .{ .f64 = lhs.toFloat(f64, mod) / rhs.toFloat(f64, mod) }, - 80 => .{ .f80 = lhs.toFloat(f80, mod) / rhs.toFloat(f80, mod) }, - 128 => .{ .f128 = lhs.toFloat(f128, mod) / rhs.toFloat(f128, mod) }, - else => unreachable, - }; - return Value.fromInterned((try mod.intern(.{ .float = .{ - .ty = float_type.toIntern(), - .storage = storage, - } }))); - } - - pub fn floatDivFloor( - lhs: Value, - rhs: Value, - float_type: Type, - arena: Allocator, - mod: *Module, - ) !Value { - if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); - const scalar_ty = float_type.scalarType(mod); - for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try (try floatDivFloorScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = float_type.toIntern(), - .storage = .{ .elems = result_data }, - } }))); - } - return floatDivFloorScalar(lhs, rhs, float_type, mod); - } - - pub fn floatDivFloorScalar( - lhs: Value, - rhs: Value, - float_type: Type, - mod: *Module, - ) !Value { - const target = mod.getTarget(); - const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { - 16 => .{ .f16 = @divFloor(lhs.toFloat(f16, mod), rhs.toFloat(f16, mod)) }, - 32 => .{ .f32 = @divFloor(lhs.toFloat(f32, mod), rhs.toFloat(f32, mod)) }, - 64 => .{ .f64 = @divFloor(lhs.toFloat(f64, mod), rhs.toFloat(f64, mod)) }, - 80 => .{ .f80 = @divFloor(lhs.toFloat(f80, mod), rhs.toFloat(f80, mod)) }, - 128 => .{ .f128 = @divFloor(lhs.toFloat(f128, mod), rhs.toFloat(f128, mod)) }, - else => unreachable, - }; - return Value.fromInterned((try mod.intern(.{ .float = .{ - .ty = float_type.toIntern(), - .storage = storage, - } }))); - } - - pub fn floatDivTrunc( - lhs: Value, - rhs: Value, - float_type: Type, - arena: Allocator, - mod: *Module, - ) !Value { - if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); - const scalar_ty = float_type.scalarType(mod); - for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try (try floatDivTruncScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = float_type.toIntern(), - .storage = .{ .elems = result_data }, - } }))); - } - return floatDivTruncScalar(lhs, rhs, float_type, mod); - } - - pub fn floatDivTruncScalar( - lhs: Value, - rhs: Value, - float_type: Type, - mod: *Module, - ) !Value { - const target = mod.getTarget(); - const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { - 16 => .{ .f16 = @divTrunc(lhs.toFloat(f16, mod), rhs.toFloat(f16, mod)) }, - 32 => .{ .f32 = @divTrunc(lhs.toFloat(f32, mod), rhs.toFloat(f32, mod)) }, - 64 => .{ .f64 = @divTrunc(lhs.toFloat(f64, mod), rhs.toFloat(f64, mod)) }, - 80 => .{ .f80 = @divTrunc(lhs.toFloat(f80, mod), rhs.toFloat(f80, mod)) }, - 128 => .{ .f128 = @divTrunc(lhs.toFloat(f128, mod), rhs.toFloat(f128, mod)) }, - else => unreachable, - }; - return Value.fromInterned((try mod.intern(.{ .float = .{ - .ty = float_type.toIntern(), - .storage = storage, - } }))); - } - - pub fn floatMul( - lhs: Value, - rhs: Value, - float_type: Type, - arena: Allocator, - mod: *Module, - ) !Value { - if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); - const scalar_ty = float_type.scalarType(mod); - for (result_data, 0..) |*scalar, i| { - const lhs_elem = try lhs.elemValue(mod, i); - const rhs_elem = try rhs.elemValue(mod, i); - scalar.* = try (try floatMulScalar(lhs_elem, rhs_elem, scalar_ty, mod)).intern(scalar_ty, mod); - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = float_type.toIntern(), - .storage = .{ .elems = result_data }, - } }))); - } - return floatMulScalar(lhs, rhs, float_type, mod); - } - - pub fn floatMulScalar( - lhs: Value, - rhs: Value, - float_type: Type, - mod: *Module, - ) !Value { - const target = mod.getTarget(); - const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { - 16 => .{ .f16 = lhs.toFloat(f16, mod) * rhs.toFloat(f16, mod) }, - 32 => .{ .f32 = lhs.toFloat(f32, mod) * rhs.toFloat(f32, mod) }, - 64 => .{ .f64 = lhs.toFloat(f64, mod) * rhs.toFloat(f64, mod) }, - 80 => .{ .f80 = lhs.toFloat(f80, mod) * rhs.toFloat(f80, mod) }, - 128 => .{ .f128 = lhs.toFloat(f128, mod) * rhs.toFloat(f128, mod) }, - else => unreachable, - }; - return Value.fromInterned((try mod.intern(.{ .float = .{ - .ty = float_type.toIntern(), - .storage = storage, - } }))); - } - - pub fn sqrt(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); - const scalar_ty = float_type.scalarType(mod); - for (result_data, 0..) |*scalar, i| { - const elem_val = try val.elemValue(mod, i); - scalar.* = try (try sqrtScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = float_type.toIntern(), - .storage = .{ .elems = result_data }, - } }))); - } - return sqrtScalar(val, float_type, mod); - } - - pub fn sqrtScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { - const target = mod.getTarget(); - const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { - 16 => .{ .f16 = @sqrt(val.toFloat(f16, mod)) }, - 32 => .{ .f32 = @sqrt(val.toFloat(f32, mod)) }, - 64 => .{ .f64 = @sqrt(val.toFloat(f64, mod)) }, - 80 => .{ .f80 = @sqrt(val.toFloat(f80, mod)) }, - 128 => .{ .f128 = @sqrt(val.toFloat(f128, mod)) }, - else => unreachable, - }; - return Value.fromInterned((try mod.intern(.{ .float = .{ - .ty = float_type.toIntern(), - .storage = storage, - } }))); - } - - pub fn sin(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); - const scalar_ty = float_type.scalarType(mod); - for (result_data, 0..) |*scalar, i| { - const elem_val = try val.elemValue(mod, i); - scalar.* = try (try sinScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = float_type.toIntern(), - .storage = .{ .elems = result_data }, - } }))); - } - return sinScalar(val, float_type, mod); - } - - pub fn sinScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { - const target = mod.getTarget(); - const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { - 16 => .{ .f16 = @sin(val.toFloat(f16, mod)) }, - 32 => .{ .f32 = @sin(val.toFloat(f32, mod)) }, - 64 => .{ .f64 = @sin(val.toFloat(f64, mod)) }, - 80 => .{ .f80 = @sin(val.toFloat(f80, mod)) }, - 128 => .{ .f128 = @sin(val.toFloat(f128, mod)) }, - else => unreachable, - }; - return Value.fromInterned((try mod.intern(.{ .float = .{ - .ty = float_type.toIntern(), - .storage = storage, - } }))); - } - - pub fn cos(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); - const scalar_ty = float_type.scalarType(mod); - for (result_data, 0..) |*scalar, i| { - const elem_val = try val.elemValue(mod, i); - scalar.* = try (try cosScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = float_type.toIntern(), - .storage = .{ .elems = result_data }, - } }))); - } - return cosScalar(val, float_type, mod); - } - - pub fn cosScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { - const target = mod.getTarget(); - const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { - 16 => .{ .f16 = @cos(val.toFloat(f16, mod)) }, - 32 => .{ .f32 = @cos(val.toFloat(f32, mod)) }, - 64 => .{ .f64 = @cos(val.toFloat(f64, mod)) }, - 80 => .{ .f80 = @cos(val.toFloat(f80, mod)) }, - 128 => .{ .f128 = @cos(val.toFloat(f128, mod)) }, - else => unreachable, - }; - return Value.fromInterned((try mod.intern(.{ .float = .{ - .ty = float_type.toIntern(), - .storage = storage, - } }))); - } - - pub fn tan(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); - const scalar_ty = float_type.scalarType(mod); - for (result_data, 0..) |*scalar, i| { - const elem_val = try val.elemValue(mod, i); - scalar.* = try (try tanScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = float_type.toIntern(), - .storage = .{ .elems = result_data }, - } }))); - } - return tanScalar(val, float_type, mod); - } - - pub fn tanScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { - const target = mod.getTarget(); - const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { - 16 => .{ .f16 = @tan(val.toFloat(f16, mod)) }, - 32 => .{ .f32 = @tan(val.toFloat(f32, mod)) }, - 64 => .{ .f64 = @tan(val.toFloat(f64, mod)) }, - 80 => .{ .f80 = @tan(val.toFloat(f80, mod)) }, - 128 => .{ .f128 = @tan(val.toFloat(f128, mod)) }, - else => unreachable, - }; - return Value.fromInterned((try mod.intern(.{ .float = .{ - .ty = float_type.toIntern(), - .storage = storage, - } }))); - } - - pub fn exp(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); - const scalar_ty = float_type.scalarType(mod); - for (result_data, 0..) |*scalar, i| { - const elem_val = try val.elemValue(mod, i); - scalar.* = try (try expScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = float_type.toIntern(), - .storage = .{ .elems = result_data }, - } }))); - } - return expScalar(val, float_type, mod); - } - - pub fn expScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { - const target = mod.getTarget(); - const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { - 16 => .{ .f16 = @exp(val.toFloat(f16, mod)) }, - 32 => .{ .f32 = @exp(val.toFloat(f32, mod)) }, - 64 => .{ .f64 = @exp(val.toFloat(f64, mod)) }, - 80 => .{ .f80 = @exp(val.toFloat(f80, mod)) }, - 128 => .{ .f128 = @exp(val.toFloat(f128, mod)) }, - else => unreachable, - }; - return Value.fromInterned((try mod.intern(.{ .float = .{ - .ty = float_type.toIntern(), - .storage = storage, - } }))); - } - - pub fn exp2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); - const scalar_ty = float_type.scalarType(mod); - for (result_data, 0..) |*scalar, i| { - const elem_val = try val.elemValue(mod, i); - scalar.* = try (try exp2Scalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = float_type.toIntern(), - .storage = .{ .elems = result_data }, - } }))); - } - return exp2Scalar(val, float_type, mod); - } - - pub fn exp2Scalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { - const target = mod.getTarget(); - const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { - 16 => .{ .f16 = @exp2(val.toFloat(f16, mod)) }, - 32 => .{ .f32 = @exp2(val.toFloat(f32, mod)) }, - 64 => .{ .f64 = @exp2(val.toFloat(f64, mod)) }, - 80 => .{ .f80 = @exp2(val.toFloat(f80, mod)) }, - 128 => .{ .f128 = @exp2(val.toFloat(f128, mod)) }, - else => unreachable, - }; - return Value.fromInterned((try mod.intern(.{ .float = .{ - .ty = float_type.toIntern(), - .storage = storage, - } }))); - } - - pub fn log(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); - const scalar_ty = float_type.scalarType(mod); - for (result_data, 0..) |*scalar, i| { - const elem_val = try val.elemValue(mod, i); - scalar.* = try (try logScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = float_type.toIntern(), - .storage = .{ .elems = result_data }, - } }))); - } - return logScalar(val, float_type, mod); - } - - pub fn logScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { - const target = mod.getTarget(); - const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { - 16 => .{ .f16 = @log(val.toFloat(f16, mod)) }, - 32 => .{ .f32 = @log(val.toFloat(f32, mod)) }, - 64 => .{ .f64 = @log(val.toFloat(f64, mod)) }, - 80 => .{ .f80 = @log(val.toFloat(f80, mod)) }, - 128 => .{ .f128 = @log(val.toFloat(f128, mod)) }, - else => unreachable, - }; - return Value.fromInterned((try mod.intern(.{ .float = .{ - .ty = float_type.toIntern(), - .storage = storage, - } }))); - } - - pub fn log2(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); - const scalar_ty = float_type.scalarType(mod); - for (result_data, 0..) |*scalar, i| { - const elem_val = try val.elemValue(mod, i); - scalar.* = try (try log2Scalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = float_type.toIntern(), - .storage = .{ .elems = result_data }, - } }))); - } - return log2Scalar(val, float_type, mod); - } - - pub fn log2Scalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { - const target = mod.getTarget(); - const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { - 16 => .{ .f16 = @log2(val.toFloat(f16, mod)) }, - 32 => .{ .f32 = @log2(val.toFloat(f32, mod)) }, - 64 => .{ .f64 = @log2(val.toFloat(f64, mod)) }, - 80 => .{ .f80 = @log2(val.toFloat(f80, mod)) }, - 128 => .{ .f128 = @log2(val.toFloat(f128, mod)) }, - else => unreachable, - }; - return Value.fromInterned((try mod.intern(.{ .float = .{ - .ty = float_type.toIntern(), - .storage = storage, - } }))); - } - - pub fn log10(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); - const scalar_ty = float_type.scalarType(mod); - for (result_data, 0..) |*scalar, i| { - const elem_val = try val.elemValue(mod, i); - scalar.* = try (try log10Scalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = float_type.toIntern(), - .storage = .{ .elems = result_data }, - } }))); - } - return log10Scalar(val, float_type, mod); - } - - pub fn log10Scalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { - const target = mod.getTarget(); - const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { - 16 => .{ .f16 = @log10(val.toFloat(f16, mod)) }, - 32 => .{ .f32 = @log10(val.toFloat(f32, mod)) }, - 64 => .{ .f64 = @log10(val.toFloat(f64, mod)) }, - 80 => .{ .f80 = @log10(val.toFloat(f80, mod)) }, - 128 => .{ .f128 = @log10(val.toFloat(f128, mod)) }, - else => unreachable, - }; - return Value.fromInterned((try mod.intern(.{ .float = .{ - .ty = float_type.toIntern(), - .storage = storage, - } }))); - } - - pub fn abs(val: Value, ty: Type, arena: Allocator, mod: *Module) !Value { - if (ty.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(InternPool.Index, ty.vectorLen(mod)); - const scalar_ty = ty.scalarType(mod); - for (result_data, 0..) |*scalar, i| { - const elem_val = try val.elemValue(mod, i); - scalar.* = try (try absScalar(elem_val, scalar_ty, mod, arena)).intern(scalar_ty, mod); - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = ty.toIntern(), - .storage = .{ .elems = result_data }, - } }))); - } - return absScalar(val, ty, mod, arena); - } - - pub fn absScalar(val: Value, ty: Type, mod: *Module, arena: Allocator) Allocator.Error!Value { - switch (ty.zigTypeTag(mod)) { - .Int => { - var buffer: Value.BigIntSpace = undefined; - var operand_bigint = try val.toBigInt(&buffer, mod).toManaged(arena); - operand_bigint.abs(); - - return mod.intValue_big(try ty.toUnsigned(mod), operand_bigint.toConst()); - }, - .ComptimeInt => { - var buffer: Value.BigIntSpace = undefined; - var operand_bigint = try val.toBigInt(&buffer, mod).toManaged(arena); - operand_bigint.abs(); - - return mod.intValue_big(ty, operand_bigint.toConst()); - }, - .ComptimeFloat, .Float => { - const target = mod.getTarget(); - const storage: InternPool.Key.Float.Storage = switch (ty.floatBits(target)) { - 16 => .{ .f16 = @abs(val.toFloat(f16, mod)) }, - 32 => .{ .f32 = @abs(val.toFloat(f32, mod)) }, - 64 => .{ .f64 = @abs(val.toFloat(f64, mod)) }, - 80 => .{ .f80 = @abs(val.toFloat(f80, mod)) }, - 128 => .{ .f128 = @abs(val.toFloat(f128, mod)) }, - else => unreachable, - }; - return Value.fromInterned((try mod.intern(.{ .float = .{ - .ty = ty.toIntern(), - .storage = storage, - } }))); - }, - else => unreachable, - } - } - - pub fn floor(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); - const scalar_ty = float_type.scalarType(mod); - for (result_data, 0..) |*scalar, i| { - const elem_val = try val.elemValue(mod, i); - scalar.* = try (try floorScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = float_type.toIntern(), - .storage = .{ .elems = result_data }, - } }))); - } - return floorScalar(val, float_type, mod); - } - - pub fn floorScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { - const target = mod.getTarget(); - const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { - 16 => .{ .f16 = @floor(val.toFloat(f16, mod)) }, - 32 => .{ .f32 = @floor(val.toFloat(f32, mod)) }, - 64 => .{ .f64 = @floor(val.toFloat(f64, mod)) }, - 80 => .{ .f80 = @floor(val.toFloat(f80, mod)) }, - 128 => .{ .f128 = @floor(val.toFloat(f128, mod)) }, - else => unreachable, - }; - return Value.fromInterned((try mod.intern(.{ .float = .{ - .ty = float_type.toIntern(), - .storage = storage, - } }))); - } - - pub fn ceil(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); - const scalar_ty = float_type.scalarType(mod); - for (result_data, 0..) |*scalar, i| { - const elem_val = try val.elemValue(mod, i); - scalar.* = try (try ceilScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = float_type.toIntern(), - .storage = .{ .elems = result_data }, - } }))); - } - return ceilScalar(val, float_type, mod); - } - - pub fn ceilScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { - const target = mod.getTarget(); - const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { - 16 => .{ .f16 = @ceil(val.toFloat(f16, mod)) }, - 32 => .{ .f32 = @ceil(val.toFloat(f32, mod)) }, - 64 => .{ .f64 = @ceil(val.toFloat(f64, mod)) }, - 80 => .{ .f80 = @ceil(val.toFloat(f80, mod)) }, - 128 => .{ .f128 = @ceil(val.toFloat(f128, mod)) }, - else => unreachable, - }; - return Value.fromInterned((try mod.intern(.{ .float = .{ - .ty = float_type.toIntern(), - .storage = storage, - } }))); - } - - pub fn round(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); - const scalar_ty = float_type.scalarType(mod); - for (result_data, 0..) |*scalar, i| { - const elem_val = try val.elemValue(mod, i); - scalar.* = try (try roundScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = float_type.toIntern(), - .storage = .{ .elems = result_data }, - } }))); - } - return roundScalar(val, float_type, mod); - } - - pub fn roundScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { - const target = mod.getTarget(); - const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { - 16 => .{ .f16 = @round(val.toFloat(f16, mod)) }, - 32 => .{ .f32 = @round(val.toFloat(f32, mod)) }, - 64 => .{ .f64 = @round(val.toFloat(f64, mod)) }, - 80 => .{ .f80 = @round(val.toFloat(f80, mod)) }, - 128 => .{ .f128 = @round(val.toFloat(f128, mod)) }, - else => unreachable, - }; - return Value.fromInterned((try mod.intern(.{ .float = .{ - .ty = float_type.toIntern(), - .storage = storage, - } }))); - } - - pub fn trunc(val: Value, float_type: Type, arena: Allocator, mod: *Module) !Value { - if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); - const scalar_ty = float_type.scalarType(mod); - for (result_data, 0..) |*scalar, i| { - const elem_val = try val.elemValue(mod, i); - scalar.* = try (try truncScalar(elem_val, scalar_ty, mod)).intern(scalar_ty, mod); - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = float_type.toIntern(), - .storage = .{ .elems = result_data }, - } }))); - } - return truncScalar(val, float_type, mod); - } - - pub fn truncScalar(val: Value, float_type: Type, mod: *Module) Allocator.Error!Value { - const target = mod.getTarget(); - const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { - 16 => .{ .f16 = @trunc(val.toFloat(f16, mod)) }, - 32 => .{ .f32 = @trunc(val.toFloat(f32, mod)) }, - 64 => .{ .f64 = @trunc(val.toFloat(f64, mod)) }, - 80 => .{ .f80 = @trunc(val.toFloat(f80, mod)) }, - 128 => .{ .f128 = @trunc(val.toFloat(f128, mod)) }, - else => unreachable, - }; - return Value.fromInterned((try mod.intern(.{ .float = .{ - .ty = float_type.toIntern(), - .storage = storage, - } }))); - } - - pub fn mulAdd( - float_type: Type, - mulend1: Value, - mulend2: Value, - addend: Value, - arena: Allocator, - mod: *Module, - ) !Value { - if (float_type.zigTypeTag(mod) == .Vector) { - const result_data = try arena.alloc(InternPool.Index, float_type.vectorLen(mod)); - const scalar_ty = float_type.scalarType(mod); - for (result_data, 0..) |*scalar, i| { - const mulend1_elem = try mulend1.elemValue(mod, i); - const mulend2_elem = try mulend2.elemValue(mod, i); - const addend_elem = try addend.elemValue(mod, i); - scalar.* = try (try mulAddScalar(scalar_ty, mulend1_elem, mulend2_elem, addend_elem, mod)).intern(scalar_ty, mod); - } - return Value.fromInterned((try mod.intern(.{ .aggregate = .{ - .ty = float_type.toIntern(), - .storage = .{ .elems = result_data }, - } }))); - } - return mulAddScalar(float_type, mulend1, mulend2, addend, mod); - } - - pub fn mulAddScalar( - float_type: Type, - mulend1: Value, - mulend2: Value, - addend: Value, - mod: *Module, - ) Allocator.Error!Value { - const target = mod.getTarget(); - const storage: InternPool.Key.Float.Storage = switch (float_type.floatBits(target)) { - 16 => .{ .f16 = @mulAdd(f16, mulend1.toFloat(f16, mod), mulend2.toFloat(f16, mod), addend.toFloat(f16, mod)) }, - 32 => .{ .f32 = @mulAdd(f32, mulend1.toFloat(f32, mod), mulend2.toFloat(f32, mod), addend.toFloat(f32, mod)) }, - 64 => .{ .f64 = @mulAdd(f64, mulend1.toFloat(f64, mod), mulend2.toFloat(f64, mod), addend.toFloat(f64, mod)) }, - 80 => .{ .f80 = @mulAdd(f80, mulend1.toFloat(f80, mod), mulend2.toFloat(f80, mod), addend.toFloat(f80, mod)) }, - 128 => .{ .f128 = @mulAdd(f128, mulend1.toFloat(f128, mod), mulend2.toFloat(f128, mod), addend.toFloat(f128, mod)) }, - else => unreachable, - }; - return Value.fromInterned((try mod.intern(.{ .float = .{ - .ty = float_type.toIntern(), - .storage = storage, - } }))); - } - - /// If the value is represented in-memory as a series of bytes that all - /// have the same value, return that byte value, otherwise null. - pub fn hasRepeatedByteRepr(val: Value, ty: Type, mod: *Module) !?u8 { - const abi_size = std.math.cast(usize, ty.abiSize(mod)) orelse return null; - assert(abi_size >= 1); - const byte_buffer = try mod.gpa.alloc(u8, abi_size); - defer mod.gpa.free(byte_buffer); - - writeToMemory(val, ty, mod, byte_buffer) catch |err| switch (err) { - error.OutOfMemory => return error.OutOfMemory, - error.ReinterpretDeclRef => return null, - // TODO: The writeToMemory function was originally created for the purpose - // of comptime pointer casting. However, it is now additionally being used - // for checking the actual memory layout that will be generated by machine - // code late in compilation. So, this error handling is too aggressive and - // causes some false negatives, causing less-than-ideal code generation. - error.IllDefinedMemoryLayout => return null, - error.Unimplemented => return null, - }; - const first_byte = byte_buffer[0]; - for (byte_buffer[1..]) |byte| { - if (byte != first_byte) return null; - } - return first_byte; - } - - pub fn isGenericPoison(val: Value) bool { - return val.toIntern() == .generic_poison; - } - - /// For an integer (comptime or fixed-width) `val`, returns the comptime-known bounds of the value. - /// If `val` is not undef, the bounds are both `val`. - /// If `val` is undef and has a fixed-width type, the bounds are the bounds of the type. - /// If `val` is undef and is a `comptime_int`, returns null. - pub fn intValueBounds(val: Value, mod: *Module) !?[2]Value { - if (!val.isUndef(mod)) return .{ val, val }; - const ty = mod.intern_pool.typeOf(val.toIntern()); - if (ty == .comptime_int_type) return null; - return .{ - try Type.fromInterned(ty).minInt(mod, Type.fromInterned(ty)), - try Type.fromInterned(ty).maxInt(mod, Type.fromInterned(ty)), - }; - } - - /// This type is not copyable since it may contain pointers to its inner data. - pub const Payload = struct { - tag: Tag, - - pub const Slice = struct { - base: Payload, - data: struct { - ptr: Value, - len: Value, - }, - }; - - pub const Bytes = struct { - base: Payload, - /// Includes the sentinel, if any. - data: []const u8, - }; - - pub const SubValue = struct { - base: Payload, - data: Value, - }; - - pub const Aggregate = struct { - base: Payload, - /// Field values. The types are according to the struct or array type. - /// The length is provided here so that copying a Value does not depend on the Type. - data: []Value, - }; - - pub const Union = struct { - pub const base_tag = Tag.@"union"; - - base: Payload = .{ .tag = base_tag }, - data: Data, - - pub const Data = struct { - tag: ?Value, - val: Value, - }; - }; - }; - - pub const BigIntSpace = InternPool.Key.Int.Storage.BigIntSpace; - - pub const zero_usize: Value = .{ .ip_index = .zero_usize, .legacy = undefined }; - pub const zero_u8: Value = .{ .ip_index = .zero_u8, .legacy = undefined }; - pub const zero_comptime_int: Value = .{ .ip_index = .zero, .legacy = undefined }; - pub const one_comptime_int: Value = .{ .ip_index = .one, .legacy = undefined }; - pub const negative_one_comptime_int: Value = .{ .ip_index = .negative_one, .legacy = undefined }; - pub const undef: Value = .{ .ip_index = .undef, .legacy = undefined }; - pub const @"void": Value = .{ .ip_index = .void_value, .legacy = undefined }; - pub const @"null": Value = .{ .ip_index = .null_value, .legacy = undefined }; - pub const @"false": Value = .{ .ip_index = .bool_false, .legacy = undefined }; - pub const @"true": Value = .{ .ip_index = .bool_true, .legacy = undefined }; - pub const @"unreachable": Value = .{ .ip_index = .unreachable_value, .legacy = undefined }; - - pub const generic_poison: Value = .{ .ip_index = .generic_poison, .legacy = undefined }; - pub const generic_poison_type: Value = .{ .ip_index = .generic_poison_type, .legacy = undefined }; - pub const empty_struct: Value = .{ .ip_index = .empty_struct, .legacy = undefined }; - - pub fn makeBool(x: bool) Value { - return if (x) Value.true else Value.false; - } - - pub const RuntimeIndex = InternPool.RuntimeIndex; - - /// This function is used in the debugger pretty formatters in tools/ to fetch the - /// Tag to Payload mapping to facilitate fancy debug printing for this type. - fn dbHelper(self: *Value, tag_to_payload_map: *map: { - const tags = @typeInfo(Tag).Enum.fields; - var fields: [tags.len]std.builtin.Type.StructField = undefined; - for (&fields, tags) |*field, t| field.* = .{ - .name = t.name ++ "", - .type = *@field(Tag, t.name).Type(), - .default_value = null, - .is_comptime = false, - .alignment = 0, - }; - break :map @Type(.{ .Struct = .{ - .layout = .Extern, - .fields = &fields, - .decls = &.{}, - .is_tuple = false, - } }); - }) void { - _ = self; - _ = tag_to_payload_map; - } - - comptime { - if (builtin.mode == .Debug) { - _ = &dbHelper; - } - } -};