mirror of
https://github.com/ziglang/zig.git
synced 2024-11-26 23:22:44 +00:00
Compilation: cleanup hashmap usage
This commit is contained in:
parent
4129996211
commit
3f2a65594e
@ -410,22 +410,15 @@ pub const CObject = struct {
|
||||
}
|
||||
|
||||
pub const Bundle = struct {
|
||||
file_names: std.AutoHashMapUnmanaged(u32, []const u8) = .{},
|
||||
category_names: std.AutoHashMapUnmanaged(u32, []const u8) = .{},
|
||||
file_names: std.AutoArrayHashMapUnmanaged(u32, []const u8) = .{},
|
||||
category_names: std.AutoArrayHashMapUnmanaged(u32, []const u8) = .{},
|
||||
diags: []Diag = &.{},
|
||||
|
||||
pub fn destroy(bundle: *Bundle, gpa: Allocator) void {
|
||||
var file_name_it = bundle.file_names.valueIterator();
|
||||
while (file_name_it.next()) |file_name| gpa.free(file_name.*);
|
||||
bundle.file_names.deinit(gpa);
|
||||
|
||||
var category_name_it = bundle.category_names.valueIterator();
|
||||
while (category_name_it.next()) |category_name| gpa.free(category_name.*);
|
||||
bundle.category_names.deinit(gpa);
|
||||
|
||||
for (bundle.file_names.values()) |file_name| gpa.free(file_name);
|
||||
for (bundle.category_names.values()) |category_name| gpa.free(category_name);
|
||||
for (bundle.diags) |*diag| diag.deinit(gpa);
|
||||
gpa.free(bundle.diags);
|
||||
|
||||
gpa.destroy(bundle);
|
||||
}
|
||||
|
||||
@ -470,17 +463,15 @@ pub const CObject = struct {
|
||||
var bc = BitcodeReader.init(gpa, .{ .reader = reader.any() });
|
||||
defer bc.deinit();
|
||||
|
||||
var file_names: std.AutoHashMapUnmanaged(u32, []const u8) = .{};
|
||||
var file_names: std.AutoArrayHashMapUnmanaged(u32, []const u8) = .{};
|
||||
errdefer {
|
||||
var file_name_it = file_names.valueIterator();
|
||||
while (file_name_it.next()) |file_name| gpa.free(file_name.*);
|
||||
for (file_names.values()) |file_name| gpa.free(file_name);
|
||||
file_names.deinit(gpa);
|
||||
}
|
||||
|
||||
var category_names: std.AutoHashMapUnmanaged(u32, []const u8) = .{};
|
||||
var category_names: std.AutoArrayHashMapUnmanaged(u32, []const u8) = .{};
|
||||
errdefer {
|
||||
var category_name_it = category_names.valueIterator();
|
||||
while (category_name_it.next()) |category_name| gpa.free(category_name.*);
|
||||
for (category_names.values()) |category_name| gpa.free(category_name);
|
||||
category_names.deinit(gpa);
|
||||
}
|
||||
|
||||
@ -1014,46 +1005,39 @@ fn addModuleTableToCacheHash(
|
||||
) (error{OutOfMemory} || std.os.GetCwdError)!void {
|
||||
const allocator = arena.allocator();
|
||||
|
||||
const modules = try allocator.alloc(Package.Module.Deps.KV, mod_table.count());
|
||||
{
|
||||
// Copy over the hashmap entries to our slice
|
||||
var table_it = mod_table.iterator();
|
||||
var idx: usize = 0;
|
||||
while (table_it.next()) |entry| : (idx += 1) {
|
||||
modules[idx] = .{
|
||||
.key = entry.key_ptr.*,
|
||||
.value = entry.value_ptr.*,
|
||||
};
|
||||
}
|
||||
}
|
||||
const module_indices = try allocator.alloc(u32, mod_table.count());
|
||||
// Copy over the hashmap entries to our slice
|
||||
for (module_indices, 0..) |*module_index, index| module_index.* = @intCast(index);
|
||||
// Sort the slice by package name
|
||||
mem.sortUnstable(Package.Module.Deps.KV, modules, {}, struct {
|
||||
fn lessThan(_: void, lhs: Package.Module.Deps.KV, rhs: Package.Module.Deps.KV) bool {
|
||||
return std.mem.lessThan(u8, lhs.key, rhs.key);
|
||||
mem.sortUnstable(u32, module_indices, &mod_table, struct {
|
||||
fn lessThan(deps: *const Package.Module.Deps, lhs: u32, rhs: u32) bool {
|
||||
const keys = deps.keys();
|
||||
return std.mem.lessThan(u8, keys[lhs], keys[rhs]);
|
||||
}
|
||||
}.lessThan);
|
||||
|
||||
for (modules) |mod| {
|
||||
if ((try seen_table.getOrPut(mod.value)).found_existing) continue;
|
||||
for (module_indices) |module_index| {
|
||||
const module = mod_table.values()[module_index];
|
||||
if ((try seen_table.getOrPut(module)).found_existing) continue;
|
||||
|
||||
// Finally insert the package name and path to the cache hash.
|
||||
hash.addBytes(mod.key);
|
||||
hash.addBytes(mod_table.keys()[module_index]);
|
||||
switch (hash_type) {
|
||||
.path_bytes => {
|
||||
hash.addBytes(mod.value.root_src_path);
|
||||
hash.addOptionalBytes(mod.value.root.root_dir.path);
|
||||
hash.addBytes(mod.value.root.sub_path);
|
||||
hash.addBytes(module.root_src_path);
|
||||
hash.addOptionalBytes(module.root.root_dir.path);
|
||||
hash.addBytes(module.root.sub_path);
|
||||
},
|
||||
.files => |man| {
|
||||
const pkg_zig_file = try mod.value.root.joinString(
|
||||
const pkg_zig_file = try module.root.joinString(
|
||||
allocator,
|
||||
mod.value.root_src_path,
|
||||
module.root_src_path,
|
||||
);
|
||||
_ = try man.addFile(pkg_zig_file, null);
|
||||
},
|
||||
}
|
||||
// Recurse to handle the module's dependencies
|
||||
try addModuleTableToCacheHash(hash, arena, mod.value.deps, seen_table, hash_type);
|
||||
try addModuleTableToCacheHash(hash, arena, module.deps, seen_table, hash_type);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2260,8 +2244,8 @@ pub fn destroy(self: *Compilation) void {
|
||||
}
|
||||
self.c_object_table.deinit(gpa);
|
||||
|
||||
for (self.failed_c_objects.values()) |value| {
|
||||
value.destroy(gpa);
|
||||
for (self.failed_c_objects.values()) |bundle| {
|
||||
bundle.destroy(gpa);
|
||||
}
|
||||
self.failed_c_objects.deinit(gpa);
|
||||
|
||||
@ -2483,13 +2467,9 @@ pub fn update(comp: *Compilation, main_progress_node: *std.Progress.Node) !void
|
||||
}
|
||||
|
||||
// Put a work item in for checking if any files used with `@embedFile` changed.
|
||||
{
|
||||
try comp.embed_file_work_queue.ensureUnusedCapacity(module.embed_table.count());
|
||||
var it = module.embed_table.iterator();
|
||||
while (it.next()) |entry| {
|
||||
const embed_file = entry.value_ptr.*;
|
||||
comp.embed_file_work_queue.writeItemAssumeCapacity(embed_file);
|
||||
}
|
||||
try comp.embed_file_work_queue.ensureUnusedCapacity(module.embed_table.count());
|
||||
for (module.embed_table.values()) |embed_file| {
|
||||
comp.embed_file_work_queue.writeItemAssumeCapacity(embed_file);
|
||||
}
|
||||
|
||||
try comp.work_queue.writeItem(.{ .analyze_mod = std_mod });
|
||||
@ -3083,9 +3063,8 @@ pub fn totalErrorCount(self: *Compilation) u32 {
|
||||
@intFromBool(self.alloc_failure_occurred) +
|
||||
self.lld_errors.items.len;
|
||||
|
||||
{
|
||||
var it = self.failed_c_objects.iterator();
|
||||
while (it.next()) |entry| total += entry.value_ptr.*.diags.len;
|
||||
for (self.failed_c_objects.values()) |bundle| {
|
||||
total += bundle.diags.len;
|
||||
}
|
||||
|
||||
if (!build_options.only_core_functionality) {
|
||||
@ -3098,19 +3077,15 @@ pub fn totalErrorCount(self: *Compilation) u32 {
|
||||
total += module.failed_exports.count();
|
||||
total += module.failed_embed_files.count();
|
||||
|
||||
{
|
||||
var it = module.failed_files.iterator();
|
||||
while (it.next()) |entry| {
|
||||
if (entry.value_ptr.*) |_| {
|
||||
total += 1;
|
||||
} else {
|
||||
const file = entry.key_ptr.*;
|
||||
assert(file.zir_loaded);
|
||||
const payload_index = file.zir.extra[@intFromEnum(Zir.ExtraIndex.compile_errors)];
|
||||
assert(payload_index != 0);
|
||||
const header = file.zir.extraData(Zir.Inst.CompileErrors, payload_index);
|
||||
total += header.data.items_len;
|
||||
}
|
||||
for (module.failed_files.keys(), module.failed_files.values()) |file, error_msg| {
|
||||
if (error_msg) |_| {
|
||||
total += 1;
|
||||
} else {
|
||||
assert(file.zir_loaded);
|
||||
const payload_index = file.zir.extra[@intFromEnum(Zir.ExtraIndex.compile_errors)];
|
||||
assert(payload_index != 0);
|
||||
const header = file.zir.extraData(Zir.Inst.CompileErrors, payload_index);
|
||||
total += header.data.items_len;
|
||||
}
|
||||
}
|
||||
|
||||
@ -3166,15 +3141,13 @@ pub fn getAllErrorsAlloc(self: *Compilation) !ErrorBundle {
|
||||
try bundle.init(gpa);
|
||||
defer bundle.deinit();
|
||||
|
||||
{
|
||||
var it = self.failed_c_objects.iterator();
|
||||
while (it.next()) |entry| try entry.value_ptr.*.addToErrorBundle(&bundle);
|
||||
for (self.failed_c_objects.values()) |diag_bundle| {
|
||||
try diag_bundle.addToErrorBundle(&bundle);
|
||||
}
|
||||
|
||||
if (!build_options.only_core_functionality) {
|
||||
var it = self.failed_win32_resources.iterator();
|
||||
while (it.next()) |entry| {
|
||||
try bundle.addBundleAsRoots(entry.value_ptr.*);
|
||||
for (self.failed_win32_resources.values()) |error_bundle| {
|
||||
try bundle.addBundleAsRoots(error_bundle);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3205,65 +3178,52 @@ pub fn getAllErrorsAlloc(self: *Compilation) !ErrorBundle {
|
||||
});
|
||||
}
|
||||
if (self.bin_file.options.module) |module| {
|
||||
{
|
||||
var it = module.failed_files.iterator();
|
||||
while (it.next()) |entry| {
|
||||
if (entry.value_ptr.*) |msg| {
|
||||
try addModuleErrorMsg(module, &bundle, msg.*);
|
||||
} else {
|
||||
// Must be ZIR errors. Note that this may include AST errors.
|
||||
// addZirErrorMessages asserts that the tree is loaded.
|
||||
_ = try entry.key_ptr.*.getTree(gpa);
|
||||
try addZirErrorMessages(&bundle, entry.key_ptr.*);
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
var it = module.failed_embed_files.iterator();
|
||||
while (it.next()) |entry| {
|
||||
const msg = entry.value_ptr.*;
|
||||
for (module.failed_files.keys(), module.failed_files.values()) |file, error_msg| {
|
||||
if (error_msg) |msg| {
|
||||
try addModuleErrorMsg(module, &bundle, msg.*);
|
||||
} else {
|
||||
// Must be ZIR errors. Note that this may include AST errors.
|
||||
// addZirErrorMessages asserts that the tree is loaded.
|
||||
_ = try file.getTree(gpa);
|
||||
try addZirErrorMessages(&bundle, file);
|
||||
}
|
||||
}
|
||||
{
|
||||
var it = module.failed_decls.iterator();
|
||||
while (it.next()) |entry| {
|
||||
const decl_index = entry.key_ptr.*;
|
||||
// Skip errors for Decls within files that had a parse failure.
|
||||
// We'll try again once parsing succeeds.
|
||||
if (module.declFileScope(decl_index).okToReportErrors()) {
|
||||
try addModuleErrorMsg(module, &bundle, entry.value_ptr.*.*);
|
||||
if (module.cimport_errors.get(entry.key_ptr.*)) |errors| {
|
||||
for (errors.getMessages()) |err_msg_index| {
|
||||
const err_msg = errors.getErrorMessage(err_msg_index);
|
||||
try bundle.addRootErrorMessage(.{
|
||||
.msg = try bundle.addString(errors.nullTerminatedString(err_msg.msg)),
|
||||
.src_loc = if (err_msg.src_loc != .none) blk: {
|
||||
const src_loc = errors.getSourceLocation(err_msg.src_loc);
|
||||
break :blk try bundle.addSourceLocation(.{
|
||||
.src_path = try bundle.addString(errors.nullTerminatedString(src_loc.src_path)),
|
||||
.span_start = src_loc.span_start,
|
||||
.span_main = src_loc.span_main,
|
||||
.span_end = src_loc.span_end,
|
||||
.line = src_loc.line,
|
||||
.column = src_loc.column,
|
||||
.source_line = if (src_loc.source_line != 0) try bundle.addString(errors.nullTerminatedString(src_loc.source_line)) else 0,
|
||||
});
|
||||
} else .none,
|
||||
});
|
||||
}
|
||||
for (module.failed_embed_files.values()) |error_msg| {
|
||||
try addModuleErrorMsg(module, &bundle, error_msg.*);
|
||||
}
|
||||
for (module.failed_decls.keys(), module.failed_decls.values()) |decl_index, error_msg| {
|
||||
// Skip errors for Decls within files that had a parse failure.
|
||||
// We'll try again once parsing succeeds.
|
||||
if (module.declFileScope(decl_index).okToReportErrors()) {
|
||||
try addModuleErrorMsg(module, &bundle, error_msg.*);
|
||||
if (module.cimport_errors.get(decl_index)) |errors| {
|
||||
for (errors.getMessages()) |err_msg_index| {
|
||||
const err_msg = errors.getErrorMessage(err_msg_index);
|
||||
try bundle.addRootErrorMessage(.{
|
||||
.msg = try bundle.addString(errors.nullTerminatedString(err_msg.msg)),
|
||||
.src_loc = if (err_msg.src_loc != .none) blk: {
|
||||
const src_loc = errors.getSourceLocation(err_msg.src_loc);
|
||||
break :blk try bundle.addSourceLocation(.{
|
||||
.src_path = try bundle.addString(errors.nullTerminatedString(src_loc.src_path)),
|
||||
.span_start = src_loc.span_start,
|
||||
.span_main = src_loc.span_main,
|
||||
.span_end = src_loc.span_end,
|
||||
.line = src_loc.line,
|
||||
.column = src_loc.column,
|
||||
.source_line = if (src_loc.source_line != 0) try bundle.addString(errors.nullTerminatedString(src_loc.source_line)) else 0,
|
||||
});
|
||||
} else .none,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (module.emit_h) |emit_h| {
|
||||
var it = emit_h.failed_decls.iterator();
|
||||
while (it.next()) |entry| {
|
||||
const decl_index = entry.key_ptr.*;
|
||||
for (emit_h.failed_decls.keys(), emit_h.failed_decls.values()) |decl_index, error_msg| {
|
||||
// Skip errors for Decls within files that had a parse failure.
|
||||
// We'll try again once parsing succeeds.
|
||||
if (module.declFileScope(decl_index).okToReportErrors()) {
|
||||
try addModuleErrorMsg(module, &bundle, entry.value_ptr.*.*);
|
||||
try addModuleErrorMsg(module, &bundle, error_msg.*);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -87,7 +87,7 @@ import_table: std.StringArrayHashMapUnmanaged(*File) = .{},
|
||||
/// modified on the file system when an update is requested, as well as to cache
|
||||
/// `@embedFile` results.
|
||||
/// Keys are fully resolved file paths. This table owns the keys and values.
|
||||
embed_table: std.StringHashMapUnmanaged(*EmbedFile) = .{},
|
||||
embed_table: std.StringArrayHashMapUnmanaged(*EmbedFile) = .{},
|
||||
|
||||
/// Stores all Type and Value objects.
|
||||
/// The idea is that this will be periodically garbage-collected, but such logic
|
||||
@ -2482,15 +2482,11 @@ pub fn deinit(mod: *Module) void {
|
||||
}
|
||||
mod.import_table.deinit(gpa);
|
||||
|
||||
{
|
||||
var it = mod.embed_table.iterator();
|
||||
while (it.next()) |entry| {
|
||||
gpa.free(entry.key_ptr.*);
|
||||
const ef: *EmbedFile = entry.value_ptr.*;
|
||||
gpa.destroy(ef);
|
||||
}
|
||||
mod.embed_table.deinit(gpa);
|
||||
for (mod.embed_table.keys(), mod.embed_table.values()) |path, embed_file| {
|
||||
gpa.free(path);
|
||||
gpa.destroy(embed_file);
|
||||
}
|
||||
mod.embed_table.deinit(gpa);
|
||||
|
||||
mod.compile_log_text.deinit(gpa);
|
||||
|
||||
@ -4035,7 +4031,7 @@ pub fn embedFile(
|
||||
|
||||
const gop = try mod.embed_table.getOrPut(gpa, resolved_path);
|
||||
errdefer {
|
||||
assert(mod.embed_table.remove(resolved_path));
|
||||
assert(std.mem.eql(u8, mod.embed_table.pop().key, resolved_path));
|
||||
keep_resolved_path = false;
|
||||
}
|
||||
if (gop.found_existing) return gop.value_ptr.*.val;
|
||||
@ -4044,7 +4040,7 @@ pub fn embedFile(
|
||||
const sub_file_path = try gpa.dupe(u8, pkg.root_src_path);
|
||||
errdefer gpa.free(sub_file_path);
|
||||
|
||||
return newEmbedFile(mod, pkg, sub_file_path, resolved_path, gop, src_loc);
|
||||
return newEmbedFile(mod, pkg, sub_file_path, resolved_path, gop.value_ptr, src_loc);
|
||||
}
|
||||
|
||||
// The resolved path is used as the key in the table, to detect if a file
|
||||
@ -4062,7 +4058,7 @@ pub fn embedFile(
|
||||
|
||||
const gop = try mod.embed_table.getOrPut(gpa, resolved_path);
|
||||
errdefer {
|
||||
assert(mod.embed_table.remove(resolved_path));
|
||||
assert(std.mem.eql(u8, mod.embed_table.pop().key, resolved_path));
|
||||
keep_resolved_path = false;
|
||||
}
|
||||
if (gop.found_existing) return gop.value_ptr.*.val;
|
||||
@ -4089,7 +4085,7 @@ pub fn embedFile(
|
||||
};
|
||||
defer gpa.free(sub_file_path);
|
||||
|
||||
return newEmbedFile(mod, cur_file.mod, sub_file_path, resolved_path, gop, src_loc);
|
||||
return newEmbedFile(mod, cur_file.mod, sub_file_path, resolved_path, gop.value_ptr, src_loc);
|
||||
}
|
||||
|
||||
/// https://github.com/ziglang/zig/issues/14307
|
||||
@ -4098,7 +4094,7 @@ fn newEmbedFile(
|
||||
pkg: *Package.Module,
|
||||
sub_file_path: []const u8,
|
||||
resolved_path: []const u8,
|
||||
gop: std.StringHashMapUnmanaged(*EmbedFile).GetOrPutResult,
|
||||
result: **EmbedFile,
|
||||
src_loc: SrcLoc,
|
||||
) !InternPool.Index {
|
||||
const gpa = mod.gpa;
|
||||
@ -4154,7 +4150,7 @@ fn newEmbedFile(
|
||||
} },
|
||||
} });
|
||||
|
||||
gop.value_ptr.* = new_file;
|
||||
result.* = new_file;
|
||||
new_file.* = .{
|
||||
.sub_file_path = try ip.getOrPutString(gpa, sub_file_path),
|
||||
.owner = pkg,
|
||||
@ -4621,16 +4617,13 @@ pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocato
|
||||
else => |e| return e,
|
||||
};
|
||||
|
||||
{
|
||||
var it = sema.unresolved_inferred_allocs.keyIterator();
|
||||
while (it.next()) |ptr_inst| {
|
||||
// The lack of a resolve_inferred_alloc means that this instruction
|
||||
// is unused so it just has to be a no-op.
|
||||
sema.air_instructions.set(@intFromEnum(ptr_inst.*), .{
|
||||
.tag = .alloc,
|
||||
.data = .{ .ty = Type.single_const_pointer_to_comptime_int },
|
||||
});
|
||||
}
|
||||
for (sema.unresolved_inferred_allocs.keys()) |ptr_inst| {
|
||||
// The lack of a resolve_inferred_alloc means that this instruction
|
||||
// is unused so it just has to be a no-op.
|
||||
sema.air_instructions.set(@intFromEnum(ptr_inst), .{
|
||||
.tag = .alloc,
|
||||
.data = .{ .ty = Type.single_const_pointer_to_comptime_int },
|
||||
});
|
||||
}
|
||||
|
||||
// If we don't get an error return trace from a caller, create our own.
|
||||
|
@ -14,7 +14,7 @@ fully_qualified_name: []const u8,
|
||||
/// responsible for detecting these names and using the correct package.
|
||||
deps: Deps = .{},
|
||||
|
||||
pub const Deps = std.StringHashMapUnmanaged(*Module);
|
||||
pub const Deps = std.StringArrayHashMapUnmanaged(*Module);
|
||||
|
||||
pub const Tree = struct {
|
||||
/// Each `Package` exposes a `Module` with build.zig as its root source file.
|
||||
|
@ -93,7 +93,7 @@ no_partial_func_ty: bool = false,
|
||||
|
||||
/// The temporary arena is used for the memory of the `InferredAlloc` values
|
||||
/// here so the values can be dropped without any cleanup.
|
||||
unresolved_inferred_allocs: std.AutoHashMapUnmanaged(Air.Inst.Index, InferredAlloc) = .{},
|
||||
unresolved_inferred_allocs: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, InferredAlloc) = .{},
|
||||
|
||||
/// Indices of comptime-mutable decls created by this Sema. These decls' values
|
||||
/// should be interned after analysis completes, as they may refer to memory in
|
||||
@ -4040,7 +4040,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
|
||||
},
|
||||
.inferred_alloc => {
|
||||
const ia1 = sema.air_instructions.items(.data)[@intFromEnum(ptr_inst)].inferred_alloc;
|
||||
const ia2 = sema.unresolved_inferred_allocs.fetchRemove(ptr_inst).?.value;
|
||||
const ia2 = sema.unresolved_inferred_allocs.fetchSwapRemove(ptr_inst).?.value;
|
||||
const peer_vals = try sema.arena.alloc(Air.Inst.Ref, ia2.prongs.items.len);
|
||||
for (peer_vals, ia2.prongs.items) |*peer_val, store_inst| {
|
||||
assert(sema.air_instructions.items(.tag)[@intFromEnum(store_inst)] == .store);
|
||||
|
@ -9597,9 +9597,9 @@ pub const FuncGen = struct {
|
||||
try wip.@"switch"(tag_int_value, bad_value_block, @intCast(enum_type.names.len));
|
||||
defer wip_switch.finish(&wip);
|
||||
|
||||
for (enum_type.names.get(ip), 0..) |name, field_index| {
|
||||
const name_string = try o.builder.string(ip.stringToSlice(name));
|
||||
const name_init = try o.builder.stringNullConst(name_string);
|
||||
for (0..enum_type.names.len) |field_index| {
|
||||
const name = try o.builder.string(ip.stringToSlice(enum_type.names.get(ip)[field_index]));
|
||||
const name_init = try o.builder.stringNullConst(name);
|
||||
const name_variable_index =
|
||||
try o.builder.addVariable(.empty, name_init.typeOf(&o.builder), .default);
|
||||
try name_variable_index.setInitializer(name_init, &o.builder);
|
||||
@ -9610,7 +9610,7 @@ pub const FuncGen = struct {
|
||||
|
||||
const name_val = try o.builder.structValue(ret_ty, &.{
|
||||
name_variable_index.toConst(&o.builder),
|
||||
try o.builder.intConst(usize_ty, name_string.slice(&o.builder).?.len),
|
||||
try o.builder.intConst(usize_ty, name.slice(&o.builder).?.len),
|
||||
});
|
||||
|
||||
const return_block = try wip.block(1, "Name");
|
||||
|
Loading…
Reference in New Issue
Block a user