mirror of
https://github.com/ziglang/zig.git
synced 2024-11-26 15:12:31 +00:00
Replace deprecated default initializations with decl literals
This commit is contained in:
parent
eccd06f5d0
commit
8588964972
@ -1,7 +1,7 @@
|
||||
const std = @import("std");
|
||||
|
||||
pub fn main() !void {
|
||||
var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{}){};
|
||||
var general_purpose_allocator: std.heap.GeneralPurposeAllocator(.{}) = .init;
|
||||
const gpa = general_purpose_allocator.allocator();
|
||||
const args = try std.process.argsAlloc(gpa);
|
||||
defer std.process.argsFree(gpa, args);
|
||||
|
@ -2,7 +2,7 @@ const std = @import("std");
|
||||
const fs = std.fs;
|
||||
|
||||
pub fn main() !void {
|
||||
var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{}){};
|
||||
var general_purpose_allocator: std.heap.GeneralPurposeAllocator(.{}) = .init;
|
||||
const gpa = general_purpose_allocator.allocator();
|
||||
|
||||
var arena_instance = std.heap.ArenaAllocator.init(gpa);
|
||||
|
10
lib/compiler/aro/aro/CodeGen.zig
vendored
10
lib/compiler/aro/aro/CodeGen.zig
vendored
@ -42,11 +42,11 @@ node_tag: []const Tree.Tag,
|
||||
node_data: []const Tree.Node.Data,
|
||||
node_ty: []const Type,
|
||||
wip_switch: *WipSwitch = undefined,
|
||||
symbols: std.ArrayListUnmanaged(Symbol) = .{},
|
||||
ret_nodes: std.ArrayListUnmanaged(Ir.Inst.Phi.Input) = .{},
|
||||
phi_nodes: std.ArrayListUnmanaged(Ir.Inst.Phi.Input) = .{},
|
||||
record_elem_buf: std.ArrayListUnmanaged(Interner.Ref) = .{},
|
||||
record_cache: std.AutoHashMapUnmanaged(*Type.Record, Interner.Ref) = .{},
|
||||
symbols: std.ArrayListUnmanaged(Symbol) = .empty,
|
||||
ret_nodes: std.ArrayListUnmanaged(Ir.Inst.Phi.Input) = .empty,
|
||||
phi_nodes: std.ArrayListUnmanaged(Ir.Inst.Phi.Input) = .empty,
|
||||
record_elem_buf: std.ArrayListUnmanaged(Interner.Ref) = .empty,
|
||||
record_cache: std.AutoHashMapUnmanaged(*Type.Record, Interner.Ref) = .empty,
|
||||
cond_dummy_ty: ?Interner.Ref = null,
|
||||
bool_invert: bool = false,
|
||||
bool_end_label: Ir.Ref = .none,
|
||||
|
10
lib/compiler/aro/aro/Compilation.zig
vendored
10
lib/compiler/aro/aro/Compilation.zig
vendored
@ -93,13 +93,13 @@ gpa: Allocator,
|
||||
diagnostics: Diagnostics,
|
||||
|
||||
environment: Environment = .{},
|
||||
sources: std.StringArrayHashMapUnmanaged(Source) = .{},
|
||||
include_dirs: std.ArrayListUnmanaged([]const u8) = .{},
|
||||
system_include_dirs: std.ArrayListUnmanaged([]const u8) = .{},
|
||||
sources: std.StringArrayHashMapUnmanaged(Source) = .empty,
|
||||
include_dirs: std.ArrayListUnmanaged([]const u8) = .empty,
|
||||
system_include_dirs: std.ArrayListUnmanaged([]const u8) = .empty,
|
||||
target: std.Target = @import("builtin").target,
|
||||
pragma_handlers: std.StringArrayHashMapUnmanaged(*Pragma) = .{},
|
||||
pragma_handlers: std.StringArrayHashMapUnmanaged(*Pragma) = .empty,
|
||||
langopts: LangOpts = .{},
|
||||
generated_buf: std.ArrayListUnmanaged(u8) = .{},
|
||||
generated_buf: std.ArrayListUnmanaged(u8) = .empty,
|
||||
builtins: Builtins = .{},
|
||||
types: struct {
|
||||
wchar: Type = undefined,
|
||||
|
2
lib/compiler/aro/aro/Diagnostics.zig
vendored
2
lib/compiler/aro/aro/Diagnostics.zig
vendored
@ -221,7 +221,7 @@ pub const Options = struct {
|
||||
|
||||
const Diagnostics = @This();
|
||||
|
||||
list: std.ArrayListUnmanaged(Message) = .{},
|
||||
list: std.ArrayListUnmanaged(Message) = .empty,
|
||||
arena: std.heap.ArenaAllocator,
|
||||
fatal_errors: bool = false,
|
||||
options: Options = .{},
|
||||
|
4
lib/compiler/aro/aro/Driver.zig
vendored
4
lib/compiler/aro/aro/Driver.zig
vendored
@ -25,8 +25,8 @@ pub const Linker = enum {
|
||||
const Driver = @This();
|
||||
|
||||
comp: *Compilation,
|
||||
inputs: std.ArrayListUnmanaged(Source) = .{},
|
||||
link_objects: std.ArrayListUnmanaged([]const u8) = .{},
|
||||
inputs: std.ArrayListUnmanaged(Source) = .empty,
|
||||
link_objects: std.ArrayListUnmanaged([]const u8) = .empty,
|
||||
output_name: ?[]const u8 = null,
|
||||
sysroot: ?[]const u8 = null,
|
||||
system_defines: Compilation.SystemDefinesMode = .include_system_defines,
|
||||
|
4
lib/compiler/aro/aro/Hideset.zig
vendored
4
lib/compiler/aro/aro/Hideset.zig
vendored
@ -51,10 +51,10 @@ pub const Index = enum(u32) {
|
||||
_,
|
||||
};
|
||||
|
||||
map: std.AutoHashMapUnmanaged(Identifier, Index) = .{},
|
||||
map: std.AutoHashMapUnmanaged(Identifier, Index) = .empty,
|
||||
/// Used for computing union/intersection of two lists; stored here so that allocations can be retained
|
||||
/// until hideset is deinit'ed
|
||||
tmp_map: std.AutoHashMapUnmanaged(Identifier, void) = .{},
|
||||
tmp_map: std.AutoHashMapUnmanaged(Identifier, void) = .empty,
|
||||
linked_list: Item.List = .{},
|
||||
comp: *const Compilation,
|
||||
|
||||
|
2
lib/compiler/aro/aro/InitList.zig
vendored
2
lib/compiler/aro/aro/InitList.zig
vendored
@ -23,7 +23,7 @@ const Item = struct {
|
||||
|
||||
const InitList = @This();
|
||||
|
||||
list: std.ArrayListUnmanaged(Item) = .{},
|
||||
list: std.ArrayListUnmanaged(Item) = .empty,
|
||||
node: NodeIndex = .none,
|
||||
tok: TokenIndex = 0,
|
||||
|
||||
|
6
lib/compiler/aro/aro/Parser.zig
vendored
6
lib/compiler/aro/aro/Parser.zig
vendored
@ -109,7 +109,7 @@ param_buf: std.ArrayList(Type.Func.Param),
|
||||
enum_buf: std.ArrayList(Type.Enum.Field),
|
||||
record_buf: std.ArrayList(Type.Record.Field),
|
||||
attr_buf: std.MultiArrayList(TentativeAttribute) = .{},
|
||||
attr_application_buf: std.ArrayListUnmanaged(Attribute) = .{},
|
||||
attr_application_buf: std.ArrayListUnmanaged(Attribute) = .empty,
|
||||
field_attr_buf: std.ArrayList([]const Attribute),
|
||||
/// type name -> variable name location for tentative definitions (top-level defs with thus-far-incomplete types)
|
||||
/// e.g. `struct Foo bar;` where `struct Foo` is not defined yet.
|
||||
@ -117,7 +117,7 @@ field_attr_buf: std.ArrayList([]const Attribute),
|
||||
/// Items are removed if the type is subsequently completed with a definition.
|
||||
/// We only store the first tentative definition that uses a given type because this map is only used
|
||||
/// for issuing an error message, and correcting the first error for a type will fix all of them for that type.
|
||||
tentative_defs: std.AutoHashMapUnmanaged(StringId, TokenIndex) = .{},
|
||||
tentative_defs: std.AutoHashMapUnmanaged(StringId, TokenIndex) = .empty,
|
||||
|
||||
// configuration and miscellaneous info
|
||||
no_eval: bool = false,
|
||||
@ -174,7 +174,7 @@ record: struct {
|
||||
}
|
||||
}
|
||||
} = .{},
|
||||
record_members: std.ArrayListUnmanaged(struct { tok: TokenIndex, name: StringId }) = .{},
|
||||
record_members: std.ArrayListUnmanaged(struct { tok: TokenIndex, name: StringId }) = .empty,
|
||||
@"switch": ?*Switch = null,
|
||||
in_loop: bool = false,
|
||||
pragma_pack: ?u8 = null,
|
||||
|
2
lib/compiler/aro/aro/Preprocessor.zig
vendored
2
lib/compiler/aro/aro/Preprocessor.zig
vendored
@ -95,7 +95,7 @@ counter: u32 = 0,
|
||||
expansion_source_loc: Source.Location = undefined,
|
||||
poisoned_identifiers: std.StringHashMap(void),
|
||||
/// Map from Source.Id to macro name in the `#ifndef` condition which guards the source, if any
|
||||
include_guards: std.AutoHashMapUnmanaged(Source.Id, []const u8) = .{},
|
||||
include_guards: std.AutoHashMapUnmanaged(Source.Id, []const u8) = .empty,
|
||||
|
||||
/// Store `keyword_define` and `keyword_undef` tokens.
|
||||
/// Used to implement preprocessor debug dump options
|
||||
|
6
lib/compiler/aro/aro/SymbolStack.zig
vendored
6
lib/compiler/aro/aro/SymbolStack.zig
vendored
@ -33,14 +33,14 @@ pub const Kind = enum {
|
||||
constexpr,
|
||||
};
|
||||
|
||||
scopes: std.ArrayListUnmanaged(Scope) = .{},
|
||||
scopes: std.ArrayListUnmanaged(Scope) = .empty,
|
||||
/// allocations from nested scopes are retained after popping; `active_len` is the number
|
||||
/// of currently-active items in `scopes`.
|
||||
active_len: usize = 0,
|
||||
|
||||
const Scope = struct {
|
||||
vars: std.AutoHashMapUnmanaged(StringId, Symbol) = .{},
|
||||
tags: std.AutoHashMapUnmanaged(StringId, Symbol) = .{},
|
||||
vars: std.AutoHashMapUnmanaged(StringId, Symbol) = .empty,
|
||||
tags: std.AutoHashMapUnmanaged(StringId, Symbol) = .empty,
|
||||
|
||||
fn deinit(self: *Scope, allocator: Allocator) void {
|
||||
self.vars.deinit(allocator);
|
||||
|
2
lib/compiler/aro/aro/pragmas/gcc.zig
vendored
2
lib/compiler/aro/aro/pragmas/gcc.zig
vendored
@ -19,7 +19,7 @@ pragma: Pragma = .{
|
||||
.preserveTokens = preserveTokens,
|
||||
},
|
||||
original_options: Diagnostics.Options = .{},
|
||||
options_stack: std.ArrayListUnmanaged(Diagnostics.Options) = .{},
|
||||
options_stack: std.ArrayListUnmanaged(Diagnostics.Options) = .empty,
|
||||
|
||||
const Directive = enum {
|
||||
warning,
|
||||
|
2
lib/compiler/aro/aro/pragmas/pack.zig
vendored
2
lib/compiler/aro/aro/pragmas/pack.zig
vendored
@ -15,7 +15,7 @@ pragma: Pragma = .{
|
||||
.parserHandler = parserHandler,
|
||||
.preserveTokens = preserveTokens,
|
||||
},
|
||||
stack: std.ArrayListUnmanaged(struct { label: []const u8, val: u8 }) = .{},
|
||||
stack: std.ArrayListUnmanaged(struct { label: []const u8, val: u8 }) = .empty,
|
||||
|
||||
pub fn init(allocator: mem.Allocator) !*Pragma {
|
||||
var pack = try allocator.create(Pack);
|
||||
|
2
lib/compiler/aro/aro/toolchains/Linux.zig
vendored
2
lib/compiler/aro/aro/toolchains/Linux.zig
vendored
@ -11,7 +11,7 @@ const system_defaults = @import("system_defaults");
|
||||
const Linux = @This();
|
||||
|
||||
distro: Distro.Tag = .unknown,
|
||||
extra_opts: std.ArrayListUnmanaged([]const u8) = .{},
|
||||
extra_opts: std.ArrayListUnmanaged([]const u8) = .empty,
|
||||
gcc_detector: GCCDetector = .{},
|
||||
|
||||
pub fn discover(self: *Linux, tc: *Toolchain) !void {
|
||||
|
8
lib/compiler/aro/backend/Interner.zig
vendored
8
lib/compiler/aro/backend/Interner.zig
vendored
@ -8,14 +8,14 @@ const Limb = std.math.big.Limb;
|
||||
|
||||
const Interner = @This();
|
||||
|
||||
map: std.AutoArrayHashMapUnmanaged(void, void) = .{},
|
||||
map: std.AutoArrayHashMapUnmanaged(void, void) = .empty,
|
||||
items: std.MultiArrayList(struct {
|
||||
tag: Tag,
|
||||
data: u32,
|
||||
}) = .{},
|
||||
extra: std.ArrayListUnmanaged(u32) = .{},
|
||||
limbs: std.ArrayListUnmanaged(Limb) = .{},
|
||||
strings: std.ArrayListUnmanaged(u8) = .{},
|
||||
extra: std.ArrayListUnmanaged(u32) = .empty,
|
||||
limbs: std.ArrayListUnmanaged(Limb) = .empty,
|
||||
strings: std.ArrayListUnmanaged(u8) = .empty,
|
||||
|
||||
const KeyAdapter = struct {
|
||||
interner: *const Interner,
|
||||
|
4
lib/compiler/aro/backend/Ir.zig
vendored
4
lib/compiler/aro/backend/Ir.zig
vendored
@ -26,9 +26,9 @@ pub const Builder = struct {
|
||||
arena: std.heap.ArenaAllocator,
|
||||
interner: *Interner,
|
||||
|
||||
decls: std.StringArrayHashMapUnmanaged(Decl) = .{},
|
||||
decls: std.StringArrayHashMapUnmanaged(Decl) = .empty,
|
||||
instructions: std.MultiArrayList(Ir.Inst) = .{},
|
||||
body: std.ArrayListUnmanaged(Ref) = .{},
|
||||
body: std.ArrayListUnmanaged(Ref) = .empty,
|
||||
alloc_count: u32 = 0,
|
||||
arg_count: u32 = 0,
|
||||
current_label: Ref = undefined,
|
||||
|
8
lib/compiler/aro/backend/Object/Elf.zig
vendored
8
lib/compiler/aro/backend/Object/Elf.zig
vendored
@ -5,7 +5,7 @@ const Object = @import("../Object.zig");
|
||||
|
||||
const Section = struct {
|
||||
data: std.ArrayList(u8),
|
||||
relocations: std.ArrayListUnmanaged(Relocation) = .{},
|
||||
relocations: std.ArrayListUnmanaged(Relocation) = .empty,
|
||||
flags: u64,
|
||||
type: u32,
|
||||
index: u16 = undefined,
|
||||
@ -37,9 +37,9 @@ const Elf = @This();
|
||||
|
||||
obj: Object,
|
||||
/// The keys are owned by the Codegen.tree
|
||||
sections: std.StringHashMapUnmanaged(*Section) = .{},
|
||||
local_symbols: std.StringHashMapUnmanaged(*Symbol) = .{},
|
||||
global_symbols: std.StringHashMapUnmanaged(*Symbol) = .{},
|
||||
sections: std.StringHashMapUnmanaged(*Section) = .empty,
|
||||
local_symbols: std.StringHashMapUnmanaged(*Symbol) = .empty,
|
||||
global_symbols: std.StringHashMapUnmanaged(*Symbol) = .empty,
|
||||
unnamed_symbol_mangle: u32 = 0,
|
||||
strtab_len: u64 = strtab_default.len,
|
||||
arena: std.heap.ArenaAllocator,
|
||||
|
@ -16,22 +16,22 @@ const Context = @This();
|
||||
|
||||
gpa: mem.Allocator,
|
||||
arena: mem.Allocator,
|
||||
decl_table: std.AutoArrayHashMapUnmanaged(usize, []const u8) = .{},
|
||||
decl_table: std.AutoArrayHashMapUnmanaged(usize, []const u8) = .empty,
|
||||
alias_list: AliasList,
|
||||
global_scope: *Scope.Root,
|
||||
mangle_count: u32 = 0,
|
||||
/// Table of record decls that have been demoted to opaques.
|
||||
opaque_demotes: std.AutoHashMapUnmanaged(usize, void) = .{},
|
||||
opaque_demotes: std.AutoHashMapUnmanaged(usize, void) = .empty,
|
||||
/// Table of unnamed enums and records that are child types of typedefs.
|
||||
unnamed_typedefs: std.AutoHashMapUnmanaged(usize, []const u8) = .{},
|
||||
unnamed_typedefs: std.AutoHashMapUnmanaged(usize, []const u8) = .empty,
|
||||
/// Needed to decide if we are parsing a typename
|
||||
typedefs: std.StringArrayHashMapUnmanaged(void) = .{},
|
||||
typedefs: std.StringArrayHashMapUnmanaged(void) = .empty,
|
||||
|
||||
/// This one is different than the root scope's name table. This contains
|
||||
/// a list of names that we found by visiting all the top level decls without
|
||||
/// translating them. The other maps are updated as we translate; this one is updated
|
||||
/// up front in a pre-processing step.
|
||||
global_names: std.StringArrayHashMapUnmanaged(void) = .{},
|
||||
global_names: std.StringArrayHashMapUnmanaged(void) = .empty,
|
||||
|
||||
/// This is similar to `global_names`, but contains names which we would
|
||||
/// *like* to use, but do not strictly *have* to if they are unavailable.
|
||||
@ -40,7 +40,7 @@ global_names: std.StringArrayHashMapUnmanaged(void) = .{},
|
||||
/// may be mangled.
|
||||
/// This is distinct from `global_names` so we can detect at a type
|
||||
/// declaration whether or not the name is available.
|
||||
weak_global_names: std.StringArrayHashMapUnmanaged(void) = .{},
|
||||
weak_global_names: std.StringArrayHashMapUnmanaged(void) = .empty,
|
||||
|
||||
pattern_list: PatternList,
|
||||
tree: Tree,
|
||||
@ -697,7 +697,7 @@ fn transEnumDecl(c: *Context, scope: *Scope, enum_decl: *const Type.Enum, field_
|
||||
}
|
||||
|
||||
fn getTypeStr(c: *Context, ty: Type) ![]const u8 {
|
||||
var buf: std.ArrayListUnmanaged(u8) = .{};
|
||||
var buf: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer buf.deinit(c.gpa);
|
||||
const w = buf.writer(c.gpa);
|
||||
try ty.print(c.mapper, c.comp.langopts, w);
|
||||
@ -1793,7 +1793,7 @@ pub fn main() !void {
|
||||
defer arena_instance.deinit();
|
||||
const arena = arena_instance.allocator();
|
||||
|
||||
var general_purpose_allocator: std.heap.GeneralPurposeAllocator(.{}) = .{};
|
||||
var general_purpose_allocator: std.heap.GeneralPurposeAllocator(.{}) = .init;
|
||||
const gpa = general_purpose_allocator.allocator();
|
||||
|
||||
const args = try std.process.argsAlloc(arena);
|
||||
|
@ -808,7 +808,7 @@ const Context = struct {
|
||||
gpa: Allocator,
|
||||
buf: std.ArrayList(u8),
|
||||
nodes: std.zig.Ast.NodeList = .{},
|
||||
extra_data: std.ArrayListUnmanaged(std.zig.Ast.Node.Index) = .{},
|
||||
extra_data: std.ArrayListUnmanaged(std.zig.Ast.Node.Index) = .empty,
|
||||
tokens: std.zig.Ast.TokenList = .{},
|
||||
|
||||
fn addTokenFmt(c: *Context, tag: TokenTag, comptime format: []const u8, args: anytype) Allocator.Error!TokenIndex {
|
||||
|
@ -336,7 +336,7 @@ pub fn main() !void {
|
||||
}
|
||||
|
||||
if (graph.needed_lazy_dependencies.entries.len != 0) {
|
||||
var buffer: std.ArrayListUnmanaged(u8) = .{};
|
||||
var buffer: std.ArrayListUnmanaged(u8) = .empty;
|
||||
for (graph.needed_lazy_dependencies.keys()) |k| {
|
||||
try buffer.appendSlice(arena, k);
|
||||
try buffer.append(arena, '\n');
|
||||
@ -1173,7 +1173,7 @@ pub fn printErrorMessages(
|
||||
// Provide context for where these error messages are coming from by
|
||||
// printing the corresponding Step subtree.
|
||||
|
||||
var step_stack: std.ArrayListUnmanaged(*Step) = .{};
|
||||
var step_stack: std.ArrayListUnmanaged(*Step) = .empty;
|
||||
defer step_stack.deinit(gpa);
|
||||
try step_stack.append(gpa, failing_step);
|
||||
while (step_stack.items[step_stack.items.len - 1].dependants.items.len != 0) {
|
||||
|
@ -15,7 +15,7 @@ pub fn main() !void {
|
||||
defer arena_instance.deinit();
|
||||
const arena = arena_instance.allocator();
|
||||
|
||||
var general_purpose_allocator: std.heap.GeneralPurposeAllocator(.{}) = .{};
|
||||
var general_purpose_allocator: std.heap.GeneralPurposeAllocator(.{}) = .init;
|
||||
const gpa = general_purpose_allocator.allocator();
|
||||
|
||||
const args = try std.process.argsAlloc(arena);
|
||||
|
@ -51,7 +51,7 @@ pub fn main() !void {
|
||||
defer arena_instance.deinit();
|
||||
const arena = arena_instance.allocator();
|
||||
|
||||
var general_purpose_allocator: std.heap.GeneralPurposeAllocator(.{}) = .{};
|
||||
var general_purpose_allocator: std.heap.GeneralPurposeAllocator(.{}) = .init;
|
||||
const gpa = general_purpose_allocator.allocator();
|
||||
|
||||
const args = try std.process.argsAlloc(arena);
|
||||
@ -109,7 +109,7 @@ pub fn main() !void {
|
||||
const root_source_file_path = opt_root_source_file_path orelse
|
||||
fatal("missing root source file path argument; see -h for usage", .{});
|
||||
|
||||
var interestingness_argv: std.ArrayListUnmanaged([]const u8) = .{};
|
||||
var interestingness_argv: std.ArrayListUnmanaged([]const u8) = .empty;
|
||||
try interestingness_argv.ensureUnusedCapacity(arena, argv.len + 1);
|
||||
interestingness_argv.appendAssumeCapacity(checker_path);
|
||||
interestingness_argv.appendSliceAssumeCapacity(argv);
|
||||
|
@ -28,7 +28,7 @@ pub const Tree = struct {
|
||||
};
|
||||
|
||||
pub const CodePageLookup = struct {
|
||||
lookup: std.ArrayListUnmanaged(CodePage) = .{},
|
||||
lookup: std.ArrayListUnmanaged(CodePage) = .empty,
|
||||
allocator: Allocator,
|
||||
default_code_page: CodePage,
|
||||
|
||||
|
@ -70,13 +70,13 @@ pub fn writeUsage(writer: anytype, command_name: []const u8) !void {
|
||||
}
|
||||
|
||||
pub const Diagnostics = struct {
|
||||
errors: std.ArrayListUnmanaged(ErrorDetails) = .{},
|
||||
errors: std.ArrayListUnmanaged(ErrorDetails) = .empty,
|
||||
allocator: Allocator,
|
||||
|
||||
pub const ErrorDetails = struct {
|
||||
arg_index: usize,
|
||||
arg_span: ArgSpan = .{},
|
||||
msg: std.ArrayListUnmanaged(u8) = .{},
|
||||
msg: std.ArrayListUnmanaged(u8) = .empty,
|
||||
type: Type = .err,
|
||||
print_args: bool = true,
|
||||
|
||||
@ -132,13 +132,13 @@ pub const Options = struct {
|
||||
allocator: Allocator,
|
||||
input_filename: []const u8 = &[_]u8{},
|
||||
output_filename: []const u8 = &[_]u8{},
|
||||
extra_include_paths: std.ArrayListUnmanaged([]const u8) = .{},
|
||||
extra_include_paths: std.ArrayListUnmanaged([]const u8) = .empty,
|
||||
ignore_include_env_var: bool = false,
|
||||
preprocess: Preprocess = .yes,
|
||||
default_language_id: ?u16 = null,
|
||||
default_code_page: ?CodePage = null,
|
||||
verbose: bool = false,
|
||||
symbols: std.StringArrayHashMapUnmanaged(SymbolValue) = .{},
|
||||
symbols: std.StringArrayHashMapUnmanaged(SymbolValue) = .empty,
|
||||
null_terminate_string_table_strings: bool = false,
|
||||
max_string_literal_codepoints: u15 = lex.default_max_string_literal_codepoints,
|
||||
silent_duplicate_control_ids: bool = false,
|
||||
|
@ -3004,9 +3004,9 @@ test "limitedWriter basic usage" {
|
||||
}
|
||||
|
||||
pub const FontDir = struct {
|
||||
fonts: std.ArrayListUnmanaged(Font) = .{},
|
||||
fonts: std.ArrayListUnmanaged(Font) = .empty,
|
||||
/// To keep track of which ids are set and where they were set from
|
||||
ids: std.AutoHashMapUnmanaged(u16, Token) = .{},
|
||||
ids: std.AutoHashMapUnmanaged(u16, Token) = .empty,
|
||||
|
||||
pub const Font = struct {
|
||||
id: u16,
|
||||
@ -3112,7 +3112,7 @@ pub const StringTablesByLanguage = struct {
|
||||
/// when the first STRINGTABLE for the language was defined, and all blocks for a given
|
||||
/// language are written contiguously.
|
||||
/// Using an ArrayHashMap here gives us this property for free.
|
||||
tables: std.AutoArrayHashMapUnmanaged(res.Language, StringTable) = .{},
|
||||
tables: std.AutoArrayHashMapUnmanaged(res.Language, StringTable) = .empty,
|
||||
|
||||
pub fn deinit(self: *StringTablesByLanguage, allocator: Allocator) void {
|
||||
self.tables.deinit(allocator);
|
||||
@ -3143,10 +3143,10 @@ pub const StringTable = struct {
|
||||
/// was added to the block (i.e. `STRINGTABLE { 16 "b" 0 "a" }` would then get written
|
||||
/// with block ID 2 (the one with "b") first and block ID 1 (the one with "a") second).
|
||||
/// Using an ArrayHashMap here gives us this property for free.
|
||||
blocks: std.AutoArrayHashMapUnmanaged(u16, Block) = .{},
|
||||
blocks: std.AutoArrayHashMapUnmanaged(u16, Block) = .empty,
|
||||
|
||||
pub const Block = struct {
|
||||
strings: std.ArrayListUnmanaged(Token) = .{},
|
||||
strings: std.ArrayListUnmanaged(Token) = .empty,
|
||||
set_indexes: std.bit_set.IntegerBitSet(16) = .{ .mask = 0 },
|
||||
memory_flags: MemoryFlags = MemoryFlags.defaults(res.RT.STRING),
|
||||
characteristics: u32,
|
||||
|
@ -13,10 +13,10 @@ const builtin = @import("builtin");
|
||||
const native_endian = builtin.cpu.arch.endian();
|
||||
|
||||
pub const Diagnostics = struct {
|
||||
errors: std.ArrayListUnmanaged(ErrorDetails) = .{},
|
||||
errors: std.ArrayListUnmanaged(ErrorDetails) = .empty,
|
||||
/// Append-only, cannot handle removing strings.
|
||||
/// Expects to own all strings within the list.
|
||||
strings: std.ArrayListUnmanaged([]const u8) = .{},
|
||||
strings: std.ArrayListUnmanaged([]const u8) = .empty,
|
||||
allocator: std.mem.Allocator,
|
||||
|
||||
pub fn init(allocator: std.mem.Allocator) Diagnostics {
|
||||
@ -968,7 +968,7 @@ pub fn renderErrorMessage(allocator: std.mem.Allocator, writer: anytype, tty_con
|
||||
const CorrespondingLines = struct {
|
||||
worth_printing_note: bool = true,
|
||||
worth_printing_lines: bool = true,
|
||||
lines: std.ArrayListUnmanaged(u8) = .{},
|
||||
lines: std.ArrayListUnmanaged(u8) = .empty,
|
||||
lines_is_error_message: bool = false,
|
||||
|
||||
pub fn init(allocator: std.mem.Allocator, cwd: std.fs.Dir, err_details: ErrorDetails, lines_for_comparison: []const u8, corresponding_span: SourceMappings.CorrespondingSpan, corresponding_file: []const u8) !CorrespondingLines {
|
||||
|
@ -10,7 +10,7 @@ const renderErrorMessage = @import("utils.zig").renderErrorMessage;
|
||||
const aro = @import("aro");
|
||||
|
||||
pub fn main() !void {
|
||||
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
|
||||
var gpa: std.heap.GeneralPurposeAllocator(.{}) = .init;
|
||||
defer std.debug.assert(gpa.deinit() == .ok);
|
||||
const allocator = gpa.allocator();
|
||||
|
||||
@ -432,7 +432,7 @@ fn cliDiagnosticsToErrorBundle(
|
||||
});
|
||||
|
||||
var cur_err: ?ErrorBundle.ErrorMessage = null;
|
||||
var cur_notes: std.ArrayListUnmanaged(ErrorBundle.ErrorMessage) = .{};
|
||||
var cur_notes: std.ArrayListUnmanaged(ErrorBundle.ErrorMessage) = .empty;
|
||||
defer cur_notes.deinit(gpa);
|
||||
for (diagnostics.errors.items) |err_details| {
|
||||
switch (err_details.type) {
|
||||
@ -474,10 +474,10 @@ fn diagnosticsToErrorBundle(
|
||||
try bundle.init(gpa);
|
||||
errdefer bundle.deinit();
|
||||
|
||||
var msg_buf: std.ArrayListUnmanaged(u8) = .{};
|
||||
var msg_buf: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer msg_buf.deinit(gpa);
|
||||
var cur_err: ?ErrorBundle.ErrorMessage = null;
|
||||
var cur_notes: std.ArrayListUnmanaged(ErrorBundle.ErrorMessage) = .{};
|
||||
var cur_notes: std.ArrayListUnmanaged(ErrorBundle.ErrorMessage) = .empty;
|
||||
defer cur_notes.deinit(gpa);
|
||||
for (diagnostics.errors.items) |err_details| {
|
||||
switch (err_details.type) {
|
||||
@ -587,7 +587,7 @@ fn aroDiagnosticsToErrorBundle(
|
||||
var msg_writer = MsgWriter.init(gpa);
|
||||
defer msg_writer.deinit();
|
||||
var cur_err: ?ErrorBundle.ErrorMessage = null;
|
||||
var cur_notes: std.ArrayListUnmanaged(ErrorBundle.ErrorMessage) = .{};
|
||||
var cur_notes: std.ArrayListUnmanaged(ErrorBundle.ErrorMessage) = .empty;
|
||||
defer cur_notes.deinit(gpa);
|
||||
for (comp.diagnostics.list.items) |msg| {
|
||||
switch (msg.kind) {
|
||||
|
@ -111,7 +111,7 @@ pub const Parser = struct {
|
||||
/// current token is unchanged.
|
||||
/// The returned slice is allocated by the parser's arena
|
||||
fn parseCommonResourceAttributes(self: *Self) ![]Token {
|
||||
var common_resource_attributes = std.ArrayListUnmanaged(Token){};
|
||||
var common_resource_attributes: std.ArrayListUnmanaged(Token) = .empty;
|
||||
while (true) {
|
||||
const maybe_common_resource_attribute = try self.lookaheadToken(.normal);
|
||||
if (maybe_common_resource_attribute.id == .literal and rc.CommonResourceAttributes.map.has(maybe_common_resource_attribute.slice(self.lexer.buffer))) {
|
||||
@ -131,7 +131,7 @@ pub const Parser = struct {
|
||||
/// current token is unchanged.
|
||||
/// The returned slice is allocated by the parser's arena
|
||||
fn parseOptionalStatements(self: *Self, resource: Resource) ![]*Node {
|
||||
var optional_statements = std.ArrayListUnmanaged(*Node){};
|
||||
var optional_statements: std.ArrayListUnmanaged(*Node) = .empty;
|
||||
while (true) {
|
||||
const lookahead_token = try self.lookaheadToken(.normal);
|
||||
if (lookahead_token.id != .literal) break;
|
||||
@ -445,7 +445,7 @@ pub const Parser = struct {
|
||||
const begin_token = self.state.token;
|
||||
try self.check(.begin);
|
||||
|
||||
var accelerators = std.ArrayListUnmanaged(*Node){};
|
||||
var accelerators: std.ArrayListUnmanaged(*Node) = .empty;
|
||||
|
||||
while (true) {
|
||||
const lookahead = try self.lookaheadToken(.normal);
|
||||
@ -463,7 +463,7 @@ pub const Parser = struct {
|
||||
|
||||
const idvalue = try self.parseExpression(.{ .allowed_types = .{ .number = true } });
|
||||
|
||||
var type_and_options = std.ArrayListUnmanaged(Token){};
|
||||
var type_and_options: std.ArrayListUnmanaged(Token) = .empty;
|
||||
while (true) {
|
||||
if (!(try self.parseOptionalToken(.comma))) break;
|
||||
|
||||
@ -528,7 +528,7 @@ pub const Parser = struct {
|
||||
const begin_token = self.state.token;
|
||||
try self.check(.begin);
|
||||
|
||||
var controls = std.ArrayListUnmanaged(*Node){};
|
||||
var controls: std.ArrayListUnmanaged(*Node) = .empty;
|
||||
defer controls.deinit(self.state.allocator);
|
||||
while (try self.parseControlStatement(resource)) |control_node| {
|
||||
// The number of controls must fit in a u16 in order for it to
|
||||
@ -587,7 +587,7 @@ pub const Parser = struct {
|
||||
const begin_token = self.state.token;
|
||||
try self.check(.begin);
|
||||
|
||||
var buttons = std.ArrayListUnmanaged(*Node){};
|
||||
var buttons: std.ArrayListUnmanaged(*Node) = .empty;
|
||||
defer buttons.deinit(self.state.allocator);
|
||||
while (try self.parseToolbarButtonStatement()) |button_node| {
|
||||
// The number of buttons must fit in a u16 in order for it to
|
||||
@ -645,7 +645,7 @@ pub const Parser = struct {
|
||||
const begin_token = self.state.token;
|
||||
try self.check(.begin);
|
||||
|
||||
var items = std.ArrayListUnmanaged(*Node){};
|
||||
var items: std.ArrayListUnmanaged(*Node) = .empty;
|
||||
defer items.deinit(self.state.allocator);
|
||||
while (try self.parseMenuItemStatement(resource, id_token, 1)) |item_node| {
|
||||
try items.append(self.state.allocator, item_node);
|
||||
@ -679,7 +679,7 @@ pub const Parser = struct {
|
||||
// common resource attributes must all be contiguous and come before optional-statements
|
||||
const common_resource_attributes = try self.parseCommonResourceAttributes();
|
||||
|
||||
var fixed_info = std.ArrayListUnmanaged(*Node){};
|
||||
var fixed_info: std.ArrayListUnmanaged(*Node) = .empty;
|
||||
while (try self.parseVersionStatement()) |version_statement| {
|
||||
try fixed_info.append(self.state.arena, version_statement);
|
||||
}
|
||||
@ -688,7 +688,7 @@ pub const Parser = struct {
|
||||
const begin_token = self.state.token;
|
||||
try self.check(.begin);
|
||||
|
||||
var block_statements = std.ArrayListUnmanaged(*Node){};
|
||||
var block_statements: std.ArrayListUnmanaged(*Node) = .empty;
|
||||
while (try self.parseVersionBlockOrValue(id_token, 1)) |block_node| {
|
||||
try block_statements.append(self.state.arena, block_node);
|
||||
}
|
||||
@ -1064,7 +1064,7 @@ pub const Parser = struct {
|
||||
|
||||
_ = try self.parseOptionalToken(.comma);
|
||||
|
||||
var options = std.ArrayListUnmanaged(Token){};
|
||||
var options: std.ArrayListUnmanaged(Token) = .empty;
|
||||
while (true) {
|
||||
const option_token = try self.lookaheadToken(.normal);
|
||||
if (!rc.MenuItem.Option.map.has(option_token.slice(self.lexer.buffer))) {
|
||||
@ -1099,7 +1099,7 @@ pub const Parser = struct {
|
||||
}
|
||||
try self.skipAnyCommas();
|
||||
|
||||
var options = std.ArrayListUnmanaged(Token){};
|
||||
var options: std.ArrayListUnmanaged(Token) = .empty;
|
||||
while (true) {
|
||||
const option_token = try self.lookaheadToken(.normal);
|
||||
if (!rc.MenuItem.Option.map.has(option_token.slice(self.lexer.buffer))) {
|
||||
@ -1114,7 +1114,7 @@ pub const Parser = struct {
|
||||
const begin_token = self.state.token;
|
||||
try self.check(.begin);
|
||||
|
||||
var items = std.ArrayListUnmanaged(*Node){};
|
||||
var items: std.ArrayListUnmanaged(*Node) = .empty;
|
||||
while (try self.parseMenuItemStatement(resource, top_level_menu_id_token, nesting_level + 1)) |item_node| {
|
||||
try items.append(self.state.arena, item_node);
|
||||
}
|
||||
@ -1184,7 +1184,7 @@ pub const Parser = struct {
|
||||
const begin_token = self.state.token;
|
||||
try self.check(.begin);
|
||||
|
||||
var items = std.ArrayListUnmanaged(*Node){};
|
||||
var items: std.ArrayListUnmanaged(*Node) = .empty;
|
||||
while (try self.parseMenuItemStatement(resource, top_level_menu_id_token, nesting_level + 1)) |item_node| {
|
||||
try items.append(self.state.arena, item_node);
|
||||
}
|
||||
@ -1341,7 +1341,7 @@ pub const Parser = struct {
|
||||
const begin_token = self.state.token;
|
||||
try self.check(.begin);
|
||||
|
||||
var children = std.ArrayListUnmanaged(*Node){};
|
||||
var children: std.ArrayListUnmanaged(*Node) = .empty;
|
||||
while (try self.parseVersionBlockOrValue(top_level_version_id_token, nesting_level + 1)) |value_node| {
|
||||
try children.append(self.state.arena, value_node);
|
||||
}
|
||||
@ -1374,7 +1374,7 @@ pub const Parser = struct {
|
||||
}
|
||||
|
||||
fn parseBlockValuesList(self: *Self, had_comma_before_first_value: bool) Error![]*Node {
|
||||
var values = std.ArrayListUnmanaged(*Node){};
|
||||
var values: std.ArrayListUnmanaged(*Node) = .empty;
|
||||
var seen_number: bool = false;
|
||||
var first_string_value: ?*Node = null;
|
||||
while (true) {
|
||||
|
@ -10,7 +10,7 @@ pub const ParseLineCommandsResult = struct {
|
||||
|
||||
const CurrentMapping = struct {
|
||||
line_num: usize = 1,
|
||||
filename: std.ArrayListUnmanaged(u8) = .{},
|
||||
filename: std.ArrayListUnmanaged(u8) = .empty,
|
||||
pending: bool = true,
|
||||
ignore_contents: bool = false,
|
||||
};
|
||||
@ -626,8 +626,8 @@ test "SourceMappings collapse" {
|
||||
|
||||
/// Same thing as StringTable in Zig's src/Wasm.zig
|
||||
pub const StringTable = struct {
|
||||
data: std.ArrayListUnmanaged(u8) = .{},
|
||||
map: std.HashMapUnmanaged(u32, void, std.hash_map.StringIndexContext, std.hash_map.default_max_load_percentage) = .{},
|
||||
data: std.ArrayListUnmanaged(u8) = .empty,
|
||||
map: std.HashMapUnmanaged(u32, void, std.hash_map.StringIndexContext, std.hash_map.default_max_load_percentage) = .empty,
|
||||
|
||||
pub fn deinit(self: *StringTable, allocator: Allocator) void {
|
||||
self.data.deinit(allocator);
|
||||
|
@ -25,7 +25,7 @@ pub fn main() !void {
|
||||
defer arena_instance.deinit();
|
||||
const arena = arena_instance.allocator();
|
||||
|
||||
var general_purpose_allocator: std.heap.GeneralPurposeAllocator(.{}) = .{};
|
||||
var general_purpose_allocator: std.heap.GeneralPurposeAllocator(.{}) = .init;
|
||||
const gpa = general_purpose_allocator.allocator();
|
||||
|
||||
var argv = try std.process.argsWithAllocator(arena);
|
||||
@ -265,7 +265,7 @@ fn buildWasmBinary(
|
||||
) !Cache.Path {
|
||||
const gpa = context.gpa;
|
||||
|
||||
var argv: std.ArrayListUnmanaged([]const u8) = .{};
|
||||
var argv: std.ArrayListUnmanaged([]const u8) = .empty;
|
||||
|
||||
try argv.appendSlice(arena, &.{
|
||||
context.zig_exe_path, //
|
||||
|
@ -85,7 +85,7 @@ fn mainServer() !void {
|
||||
@panic("internal test runner memory leak");
|
||||
};
|
||||
|
||||
var string_bytes: std.ArrayListUnmanaged(u8) = .{};
|
||||
var string_bytes: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer string_bytes.deinit(testing.allocator);
|
||||
try string_bytes.append(testing.allocator, 0); // Reserve 0 for null.
|
||||
|
||||
|
@ -10,9 +10,9 @@ const Oom = error{OutOfMemory};
|
||||
|
||||
pub const Decl = @import("Decl.zig");
|
||||
|
||||
pub var files: std.StringArrayHashMapUnmanaged(File) = .{};
|
||||
pub var decls: std.ArrayListUnmanaged(Decl) = .{};
|
||||
pub var modules: std.StringArrayHashMapUnmanaged(File.Index) = .{};
|
||||
pub var files: std.StringArrayHashMapUnmanaged(File) = .empty;
|
||||
pub var decls: std.ArrayListUnmanaged(Decl) = .empty;
|
||||
pub var modules: std.StringArrayHashMapUnmanaged(File.Index) = .empty;
|
||||
|
||||
file: File.Index,
|
||||
|
||||
@ -42,17 +42,17 @@ pub const Category = union(enum(u8)) {
|
||||
pub const File = struct {
|
||||
ast: Ast,
|
||||
/// Maps identifiers to the declarations they point to.
|
||||
ident_decls: std.AutoArrayHashMapUnmanaged(Ast.TokenIndex, Ast.Node.Index) = .{},
|
||||
ident_decls: std.AutoArrayHashMapUnmanaged(Ast.TokenIndex, Ast.Node.Index) = .empty,
|
||||
/// Maps field access identifiers to the containing field access node.
|
||||
token_parents: std.AutoArrayHashMapUnmanaged(Ast.TokenIndex, Ast.Node.Index) = .{},
|
||||
token_parents: std.AutoArrayHashMapUnmanaged(Ast.TokenIndex, Ast.Node.Index) = .empty,
|
||||
/// Maps declarations to their global index.
|
||||
node_decls: std.AutoArrayHashMapUnmanaged(Ast.Node.Index, Decl.Index) = .{},
|
||||
node_decls: std.AutoArrayHashMapUnmanaged(Ast.Node.Index, Decl.Index) = .empty,
|
||||
/// Maps function declarations to doctests.
|
||||
doctests: std.AutoArrayHashMapUnmanaged(Ast.Node.Index, Ast.Node.Index) = .{},
|
||||
doctests: std.AutoArrayHashMapUnmanaged(Ast.Node.Index, Ast.Node.Index) = .empty,
|
||||
/// root node => its namespace scope
|
||||
/// struct/union/enum/opaque decl node => its namespace scope
|
||||
/// local var decl node => its local variable scope
|
||||
scopes: std.AutoArrayHashMapUnmanaged(Ast.Node.Index, *Scope) = .{},
|
||||
scopes: std.AutoArrayHashMapUnmanaged(Ast.Node.Index, *Scope) = .empty,
|
||||
|
||||
pub fn lookup_token(file: *File, token: Ast.TokenIndex) Decl.Index {
|
||||
const decl_node = file.ident_decls.get(token) orelse return .none;
|
||||
@ -464,8 +464,8 @@ pub const Scope = struct {
|
||||
const Namespace = struct {
|
||||
base: Scope = .{ .tag = .namespace },
|
||||
parent: *Scope,
|
||||
names: std.StringArrayHashMapUnmanaged(Ast.Node.Index) = .{},
|
||||
doctests: std.StringArrayHashMapUnmanaged(Ast.Node.Index) = .{},
|
||||
names: std.StringArrayHashMapUnmanaged(Ast.Node.Index) = .empty,
|
||||
doctests: std.StringArrayHashMapUnmanaged(Ast.Node.Index) = .empty,
|
||||
decl_index: Decl.Index,
|
||||
};
|
||||
|
||||
|
@ -38,7 +38,7 @@ pub fn fileSourceHtml(
|
||||
const file = file_index.get();
|
||||
|
||||
const g = struct {
|
||||
var field_access_buffer: std.ArrayListUnmanaged(u8) = .{};
|
||||
var field_access_buffer: std.ArrayListUnmanaged(u8) = .empty;
|
||||
};
|
||||
|
||||
const token_tags = ast.tokens.items(.tag);
|
||||
|
@ -60,8 +60,8 @@ export fn unpack(tar_ptr: [*]u8, tar_len: usize) void {
|
||||
};
|
||||
}
|
||||
|
||||
var query_string: std.ArrayListUnmanaged(u8) = .{};
|
||||
var query_results: std.ArrayListUnmanaged(Decl.Index) = .{};
|
||||
var query_string: std.ArrayListUnmanaged(u8) = .empty;
|
||||
var query_results: std.ArrayListUnmanaged(Decl.Index) = .empty;
|
||||
|
||||
/// Resizes the query string to be the correct length; returns the pointer to
|
||||
/// the query string.
|
||||
@ -93,11 +93,11 @@ fn query_exec_fallible(query: []const u8, ignore_case: bool) !void {
|
||||
segments: u16,
|
||||
};
|
||||
const g = struct {
|
||||
var full_path_search_text: std.ArrayListUnmanaged(u8) = .{};
|
||||
var full_path_search_text_lower: std.ArrayListUnmanaged(u8) = .{};
|
||||
var doc_search_text: std.ArrayListUnmanaged(u8) = .{};
|
||||
var full_path_search_text: std.ArrayListUnmanaged(u8) = .empty;
|
||||
var full_path_search_text_lower: std.ArrayListUnmanaged(u8) = .empty;
|
||||
var doc_search_text: std.ArrayListUnmanaged(u8) = .empty;
|
||||
/// Each element matches a corresponding query_results element.
|
||||
var scores: std.ArrayListUnmanaged(Score) = .{};
|
||||
var scores: std.ArrayListUnmanaged(Score) = .empty;
|
||||
};
|
||||
|
||||
// First element stores the size of the list.
|
||||
@ -255,8 +255,8 @@ const ErrorIdentifier = packed struct(u64) {
|
||||
}
|
||||
};
|
||||
|
||||
var string_result: std.ArrayListUnmanaged(u8) = .{};
|
||||
var error_set_result: std.StringArrayHashMapUnmanaged(ErrorIdentifier) = .{};
|
||||
var string_result: std.ArrayListUnmanaged(u8) = .empty;
|
||||
var error_set_result: std.StringArrayHashMapUnmanaged(ErrorIdentifier) = .empty;
|
||||
|
||||
export fn decl_error_set(decl_index: Decl.Index) Slice(ErrorIdentifier) {
|
||||
return Slice(ErrorIdentifier).init(decl_error_set_fallible(decl_index) catch @panic("OOM"));
|
||||
@ -381,7 +381,7 @@ export fn decl_params(decl_index: Decl.Index) Slice(Ast.Node.Index) {
|
||||
|
||||
fn decl_fields_fallible(decl_index: Decl.Index) ![]Ast.Node.Index {
|
||||
const g = struct {
|
||||
var result: std.ArrayListUnmanaged(Ast.Node.Index) = .{};
|
||||
var result: std.ArrayListUnmanaged(Ast.Node.Index) = .empty;
|
||||
};
|
||||
g.result.clearRetainingCapacity();
|
||||
const decl = decl_index.get();
|
||||
@ -403,7 +403,7 @@ fn decl_fields_fallible(decl_index: Decl.Index) ![]Ast.Node.Index {
|
||||
|
||||
fn decl_params_fallible(decl_index: Decl.Index) ![]Ast.Node.Index {
|
||||
const g = struct {
|
||||
var result: std.ArrayListUnmanaged(Ast.Node.Index) = .{};
|
||||
var result: std.ArrayListUnmanaged(Ast.Node.Index) = .empty;
|
||||
};
|
||||
g.result.clearRetainingCapacity();
|
||||
const decl = decl_index.get();
|
||||
@ -672,7 +672,7 @@ fn render_docs(
|
||||
defer parsed_doc.deinit(gpa);
|
||||
|
||||
const g = struct {
|
||||
var link_buffer: std.ArrayListUnmanaged(u8) = .{};
|
||||
var link_buffer: std.ArrayListUnmanaged(u8) = .empty;
|
||||
};
|
||||
|
||||
const Writer = std.ArrayListUnmanaged(u8).Writer;
|
||||
@ -817,7 +817,7 @@ export fn find_module_root(pkg: Walk.ModuleIndex) Decl.Index {
|
||||
}
|
||||
|
||||
/// Set by `set_input_string`.
|
||||
var input_string: std.ArrayListUnmanaged(u8) = .{};
|
||||
var input_string: std.ArrayListUnmanaged(u8) = .empty;
|
||||
|
||||
export fn set_input_string(len: usize) [*]u8 {
|
||||
input_string.resize(gpa, len) catch @panic("OOM");
|
||||
@ -839,7 +839,7 @@ export fn find_decl() Decl.Index {
|
||||
if (result != .none) return result;
|
||||
|
||||
const g = struct {
|
||||
var match_fqn: std.ArrayListUnmanaged(u8) = .{};
|
||||
var match_fqn: std.ArrayListUnmanaged(u8) = .empty;
|
||||
};
|
||||
for (Walk.decls.items, 0..) |*decl, decl_index| {
|
||||
g.match_fqn.clearRetainingCapacity();
|
||||
@ -888,7 +888,7 @@ export fn type_fn_members(parent: Decl.Index, include_private: bool) Slice(Decl.
|
||||
|
||||
export fn namespace_members(parent: Decl.Index, include_private: bool) Slice(Decl.Index) {
|
||||
const g = struct {
|
||||
var members: std.ArrayListUnmanaged(Decl.Index) = .{};
|
||||
var members: std.ArrayListUnmanaged(Decl.Index) = .empty;
|
||||
};
|
||||
|
||||
g.members.clearRetainingCapacity();
|
||||
|
@ -31,11 +31,11 @@ const ExtraData = Document.ExtraData;
|
||||
const StringIndex = Document.StringIndex;
|
||||
|
||||
nodes: Node.List = .{},
|
||||
extra: std.ArrayListUnmanaged(u32) = .{},
|
||||
scratch_extra: std.ArrayListUnmanaged(u32) = .{},
|
||||
string_bytes: std.ArrayListUnmanaged(u8) = .{},
|
||||
scratch_string: std.ArrayListUnmanaged(u8) = .{},
|
||||
pending_blocks: std.ArrayListUnmanaged(Block) = .{},
|
||||
extra: std.ArrayListUnmanaged(u32) = .empty,
|
||||
scratch_extra: std.ArrayListUnmanaged(u32) = .empty,
|
||||
string_bytes: std.ArrayListUnmanaged(u8) = .empty,
|
||||
scratch_string: std.ArrayListUnmanaged(u8) = .empty,
|
||||
pending_blocks: std.ArrayListUnmanaged(Block) = .empty,
|
||||
allocator: Allocator,
|
||||
|
||||
const Parser = @This();
|
||||
@ -928,8 +928,8 @@ const InlineParser = struct {
|
||||
parent: *Parser,
|
||||
content: []const u8,
|
||||
pos: usize = 0,
|
||||
pending_inlines: std.ArrayListUnmanaged(PendingInline) = .{},
|
||||
completed_inlines: std.ArrayListUnmanaged(CompletedInline) = .{},
|
||||
pending_inlines: std.ArrayListUnmanaged(PendingInline) = .empty,
|
||||
completed_inlines: std.ArrayListUnmanaged(CompletedInline) = .empty,
|
||||
|
||||
const PendingInline = struct {
|
||||
tag: Tag,
|
||||
|
@ -402,7 +402,7 @@ fn oom(err: anytype) noreturn {
|
||||
}
|
||||
}
|
||||
|
||||
var general_purpose_allocator: std.heap.GeneralPurposeAllocator(.{}) = .{};
|
||||
var general_purpose_allocator: std.heap.GeneralPurposeAllocator(.{}) = .init;
|
||||
|
||||
var fuzzer: Fuzzer = .{
|
||||
.gpa = general_purpose_allocator.allocator(),
|
||||
|
@ -58,7 +58,7 @@ export fn alloc(n: usize) [*]u8 {
|
||||
return slice.ptr;
|
||||
}
|
||||
|
||||
var message_buffer: std.ArrayListAlignedUnmanaged(u8, @alignOf(u64)) = .{};
|
||||
var message_buffer: std.ArrayListAlignedUnmanaged(u8, @alignOf(u64)) = .empty;
|
||||
|
||||
/// Resizes the message buffer to be the correct length; returns the pointer to
|
||||
/// the query string.
|
||||
@ -90,8 +90,8 @@ export fn unpack(tar_ptr: [*]u8, tar_len: usize) void {
|
||||
}
|
||||
|
||||
/// Set by `set_input_string`.
|
||||
var input_string: std.ArrayListUnmanaged(u8) = .{};
|
||||
var string_result: std.ArrayListUnmanaged(u8) = .{};
|
||||
var input_string: std.ArrayListUnmanaged(u8) = .empty;
|
||||
var string_result: std.ArrayListUnmanaged(u8) = .empty;
|
||||
|
||||
export fn set_input_string(len: usize) [*]u8 {
|
||||
input_string.resize(gpa, len) catch @panic("OOM");
|
||||
@ -249,7 +249,7 @@ fn coverageUpdateMessage(msg_bytes: []u8) error{OutOfMemory}!void {
|
||||
js.emitCoverageUpdate();
|
||||
}
|
||||
|
||||
var entry_points: std.ArrayListUnmanaged(u32) = .{};
|
||||
var entry_points: std.ArrayListUnmanaged(u32) = .empty;
|
||||
|
||||
fn entryPointsMessage(msg_bytes: []u8) error{OutOfMemory}!void {
|
||||
const header: abi.EntryPointHeader = @bitCast(msg_bytes[0..@sizeOf(abi.EntryPointHeader)].*);
|
||||
@ -295,7 +295,7 @@ const SourceLocationIndex = enum(u32) {
|
||||
}
|
||||
|
||||
fn toWalkFile(sli: SourceLocationIndex) ?Walk.File.Index {
|
||||
var buf: std.ArrayListUnmanaged(u8) = .{};
|
||||
var buf: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer buf.deinit(gpa);
|
||||
sli.appendPath(&buf) catch @panic("OOM");
|
||||
return @enumFromInt(Walk.files.getIndex(buf.items) orelse return null);
|
||||
@ -307,7 +307,7 @@ const SourceLocationIndex = enum(u32) {
|
||||
) error{ OutOfMemory, SourceUnavailable }!void {
|
||||
const walk_file_index = sli.toWalkFile() orelse return error.SourceUnavailable;
|
||||
const root_node = walk_file_index.findRootDecl().get().ast_node;
|
||||
var annotations: std.ArrayListUnmanaged(html_render.Annotation) = .{};
|
||||
var annotations: std.ArrayListUnmanaged(html_render.Annotation) = .empty;
|
||||
defer annotations.deinit(gpa);
|
||||
try computeSourceAnnotations(sli.ptr().file, walk_file_index, &annotations, coverage_source_locations.items);
|
||||
html_render.fileSourceHtml(walk_file_index, out, root_node, .{
|
||||
@ -327,7 +327,7 @@ fn computeSourceAnnotations(
|
||||
// Collect all the source locations from only this file into this array
|
||||
// first, then sort by line, col, so that we can collect annotations with
|
||||
// O(N) time complexity.
|
||||
var locs: std.ArrayListUnmanaged(SourceLocationIndex) = .{};
|
||||
var locs: std.ArrayListUnmanaged(SourceLocationIndex) = .empty;
|
||||
defer locs.deinit(gpa);
|
||||
|
||||
for (source_locations, 0..) |sl, sli_usize| {
|
||||
@ -374,9 +374,9 @@ fn computeSourceAnnotations(
|
||||
|
||||
var coverage = Coverage.init;
|
||||
/// Index of type `SourceLocationIndex`.
|
||||
var coverage_source_locations: std.ArrayListUnmanaged(Coverage.SourceLocation) = .{};
|
||||
var coverage_source_locations: std.ArrayListUnmanaged(Coverage.SourceLocation) = .empty;
|
||||
/// Contains the most recent coverage update message, unmodified.
|
||||
var recent_coverage_update: std.ArrayListAlignedUnmanaged(u8, @alignOf(u64)) = .{};
|
||||
var recent_coverage_update: std.ArrayListAlignedUnmanaged(u8, @alignOf(u64)) = .empty;
|
||||
|
||||
fn updateCoverage(
|
||||
directories: []const Coverage.String,
|
||||
@ -425,7 +425,7 @@ export fn sourceLocationFileHtml(sli: SourceLocationIndex) String {
|
||||
|
||||
export fn sourceLocationFileCoveredList(sli_file: SourceLocationIndex) Slice(SourceLocationIndex) {
|
||||
const global = struct {
|
||||
var result: std.ArrayListUnmanaged(SourceLocationIndex) = .{};
|
||||
var result: std.ArrayListUnmanaged(SourceLocationIndex) = .empty;
|
||||
fn add(i: u32, want_file: Coverage.File.Index) void {
|
||||
const src_loc_index: SourceLocationIndex = @enumFromInt(i);
|
||||
if (src_loc_index.ptr().file == want_file) result.appendAssumeCapacity(src_loc_index);
|
||||
|
@ -111,7 +111,7 @@ pub const ReleaseMode = enum {
|
||||
/// Settings that are here rather than in Build are not configurable per-package.
|
||||
pub const Graph = struct {
|
||||
arena: Allocator,
|
||||
system_library_options: std.StringArrayHashMapUnmanaged(SystemLibraryMode) = .{},
|
||||
system_library_options: std.StringArrayHashMapUnmanaged(SystemLibraryMode) = .empty,
|
||||
system_package_mode: bool = false,
|
||||
debug_compiler_runtime_libs: bool = false,
|
||||
cache: Cache,
|
||||
@ -119,7 +119,7 @@ pub const Graph = struct {
|
||||
env_map: EnvMap,
|
||||
global_cache_root: Cache.Directory,
|
||||
zig_lib_directory: Cache.Directory,
|
||||
needed_lazy_dependencies: std.StringArrayHashMapUnmanaged(void) = .{},
|
||||
needed_lazy_dependencies: std.StringArrayHashMapUnmanaged(void) = .empty,
|
||||
/// Information about the native target. Computed before build() is invoked.
|
||||
host: ResolvedTarget,
|
||||
incremental: ?bool = null,
|
||||
|
@ -30,7 +30,7 @@ pub fn start(
|
||||
defer rebuild_node.end();
|
||||
var wait_group: std.Thread.WaitGroup = .{};
|
||||
defer wait_group.wait();
|
||||
var fuzz_run_steps: std.ArrayListUnmanaged(*Step.Run) = .{};
|
||||
var fuzz_run_steps: std.ArrayListUnmanaged(*Step.Run) = .empty;
|
||||
defer fuzz_run_steps.deinit(gpa);
|
||||
for (all_steps) |step| {
|
||||
const run = step.cast(Step.Run) orelse continue;
|
||||
|
@ -236,7 +236,7 @@ fn buildWasmBinary(
|
||||
.sub_path = "docs/wasm/html_render.zig",
|
||||
};
|
||||
|
||||
var argv: std.ArrayListUnmanaged([]const u8) = .{};
|
||||
var argv: std.ArrayListUnmanaged([]const u8) = .empty;
|
||||
|
||||
try argv.appendSlice(arena, &.{
|
||||
ws.zig_exe_path, "build-exe", //
|
||||
|
@ -714,7 +714,7 @@ pub fn allocPrintCmd2(
|
||||
opt_env: ?*const std.process.EnvMap,
|
||||
argv: []const []const u8,
|
||||
) Allocator.Error![]u8 {
|
||||
var buf: std.ArrayListUnmanaged(u8) = .{};
|
||||
var buf: std.ArrayListUnmanaged(u8) = .empty;
|
||||
if (opt_cwd) |cwd| try buf.writer(arena).print("cd {s} && ", .{cwd});
|
||||
if (opt_env) |env| {
|
||||
const process_env_map = std.process.getEnvMap(arena) catch std.process.EnvMap.init(arena);
|
||||
|
@ -713,12 +713,12 @@ const MachODumper = struct {
|
||||
gpa: Allocator,
|
||||
data: []const u8,
|
||||
header: macho.mach_header_64,
|
||||
segments: std.ArrayListUnmanaged(macho.segment_command_64) = .{},
|
||||
sections: std.ArrayListUnmanaged(macho.section_64) = .{},
|
||||
symtab: std.ArrayListUnmanaged(macho.nlist_64) = .{},
|
||||
strtab: std.ArrayListUnmanaged(u8) = .{},
|
||||
indsymtab: std.ArrayListUnmanaged(u32) = .{},
|
||||
imports: std.ArrayListUnmanaged([]const u8) = .{},
|
||||
segments: std.ArrayListUnmanaged(macho.segment_command_64) = .empty,
|
||||
sections: std.ArrayListUnmanaged(macho.section_64) = .empty,
|
||||
symtab: std.ArrayListUnmanaged(macho.nlist_64) = .empty,
|
||||
strtab: std.ArrayListUnmanaged(u8) = .empty,
|
||||
indsymtab: std.ArrayListUnmanaged(u32) = .empty,
|
||||
imports: std.ArrayListUnmanaged([]const u8) = .empty,
|
||||
|
||||
fn parse(ctx: *ObjectContext) !void {
|
||||
var it = ctx.getLoadCommandIterator();
|
||||
@ -1797,9 +1797,9 @@ const ElfDumper = struct {
|
||||
const ArchiveContext = struct {
|
||||
gpa: Allocator,
|
||||
data: []const u8,
|
||||
symtab: std.ArrayListUnmanaged(ArSymtabEntry) = .{},
|
||||
symtab: std.ArrayListUnmanaged(ArSymtabEntry) = .empty,
|
||||
strtab: []const u8,
|
||||
objects: std.ArrayListUnmanaged(struct { name: []const u8, off: usize, len: usize }) = .{},
|
||||
objects: std.ArrayListUnmanaged(struct { name: []const u8, off: usize, len: usize }) = .empty,
|
||||
|
||||
fn parseSymtab(ctx: *ArchiveContext, raw: []const u8, ptr_width: enum { p32, p64 }) !void {
|
||||
var stream = std.io.fixedBufferStream(raw);
|
||||
|
@ -1070,8 +1070,8 @@ fn getZigArgs(compile: *Compile, fuzz: bool) ![][]const u8 {
|
||||
// Stores system libraries that have already been seen for at least one
|
||||
// module, along with any arguments that need to be passed to the
|
||||
// compiler for each module individually.
|
||||
var seen_system_libs: std.StringHashMapUnmanaged([]const []const u8) = .{};
|
||||
var frameworks: std.StringArrayHashMapUnmanaged(Module.LinkFrameworkOptions) = .{};
|
||||
var seen_system_libs: std.StringHashMapUnmanaged([]const []const u8) = .empty;
|
||||
var frameworks: std.StringArrayHashMapUnmanaged(Module.LinkFrameworkOptions) = .empty;
|
||||
|
||||
var prev_has_cflags = false;
|
||||
var prev_has_rcflags = false;
|
||||
|
@ -48,7 +48,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
|
||||
const arena = b.allocator;
|
||||
const fmt: *Fmt = @fieldParentPtr("step", step);
|
||||
|
||||
var argv: std.ArrayListUnmanaged([]const u8) = .{};
|
||||
var argv: std.ArrayListUnmanaged([]const u8) = .empty;
|
||||
try argv.ensureUnusedCapacity(arena, 2 + 1 + fmt.paths.len + 2 * fmt.exclude_paths.len);
|
||||
|
||||
argv.appendAssumeCapacity(b.graph.zig_exe);
|
||||
|
@ -856,7 +856,7 @@ pub fn rerunInFuzzMode(
|
||||
const step = &run.step;
|
||||
const b = step.owner;
|
||||
const arena = b.allocator;
|
||||
var argv_list: std.ArrayListUnmanaged([]const u8) = .{};
|
||||
var argv_list: std.ArrayListUnmanaged([]const u8) = .empty;
|
||||
for (run.argv.items) |arg| {
|
||||
switch (arg) {
|
||||
.bytes => |bytes| {
|
||||
|
@ -130,7 +130,7 @@ pub fn ArrayHashMap(
|
||||
}
|
||||
pub fn initContext(allocator: Allocator, ctx: Context) Self {
|
||||
return .{
|
||||
.unmanaged = .{},
|
||||
.unmanaged = .empty,
|
||||
.allocator = allocator,
|
||||
.ctx = ctx,
|
||||
};
|
||||
@ -429,7 +429,7 @@ pub fn ArrayHashMap(
|
||||
pub fn move(self: *Self) Self {
|
||||
self.unmanaged.pointer_stability.assertUnlocked();
|
||||
const result = self.*;
|
||||
self.unmanaged = .{};
|
||||
self.unmanaged = .empty;
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -1290,7 +1290,7 @@ pub fn ArrayHashMapUnmanaged(
|
||||
pub fn move(self: *Self) Self {
|
||||
self.pointer_stability.assertUnlocked();
|
||||
const result = self.*;
|
||||
self.* = .{};
|
||||
self.* = .empty;
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -710,7 +710,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
|
||||
const old_memory = self.allocatedSlice();
|
||||
if (allocator.resize(old_memory, self.items.len)) {
|
||||
const result = self.items;
|
||||
self.* = .{};
|
||||
self.* = .empty;
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -1267,7 +1267,7 @@ test "init" {
|
||||
}
|
||||
|
||||
{
|
||||
const list = ArrayListUnmanaged(i32){};
|
||||
const list: ArrayListUnmanaged(i32) = .empty;
|
||||
|
||||
try testing.expect(list.items.len == 0);
|
||||
try testing.expect(list.capacity == 0);
|
||||
@ -1312,7 +1312,7 @@ test "clone" {
|
||||
try testing.expectEqual(@as(i32, 5), cloned.items[2]);
|
||||
}
|
||||
{
|
||||
var array = ArrayListUnmanaged(i32){};
|
||||
var array: ArrayListUnmanaged(i32) = .empty;
|
||||
try array.append(a, -1);
|
||||
try array.append(a, 3);
|
||||
try array.append(a, 5);
|
||||
@ -1384,7 +1384,7 @@ test "basic" {
|
||||
try testing.expect(list.pop() == 33);
|
||||
}
|
||||
{
|
||||
var list = ArrayListUnmanaged(i32){};
|
||||
var list: ArrayListUnmanaged(i32) = .empty;
|
||||
defer list.deinit(a);
|
||||
|
||||
{
|
||||
@ -1448,7 +1448,7 @@ test "appendNTimes" {
|
||||
}
|
||||
}
|
||||
{
|
||||
var list = ArrayListUnmanaged(i32){};
|
||||
var list: ArrayListUnmanaged(i32) = .empty;
|
||||
defer list.deinit(a);
|
||||
|
||||
try list.appendNTimes(a, 2, 10);
|
||||
@ -1467,7 +1467,7 @@ test "appendNTimes with failing allocator" {
|
||||
try testing.expectError(error.OutOfMemory, list.appendNTimes(2, 10));
|
||||
}
|
||||
{
|
||||
var list = ArrayListUnmanaged(i32){};
|
||||
var list: ArrayListUnmanaged(i32) = .empty;
|
||||
defer list.deinit(a);
|
||||
try testing.expectError(error.OutOfMemory, list.appendNTimes(a, 2, 10));
|
||||
}
|
||||
@ -1502,7 +1502,7 @@ test "orderedRemove" {
|
||||
try testing.expectEqual(@as(usize, 4), list.items.len);
|
||||
}
|
||||
{
|
||||
var list = ArrayListUnmanaged(i32){};
|
||||
var list: ArrayListUnmanaged(i32) = .empty;
|
||||
defer list.deinit(a);
|
||||
|
||||
try list.append(a, 1);
|
||||
@ -1537,7 +1537,7 @@ test "orderedRemove" {
|
||||
}
|
||||
{
|
||||
// remove last item
|
||||
var list = ArrayListUnmanaged(i32){};
|
||||
var list: ArrayListUnmanaged(i32) = .empty;
|
||||
defer list.deinit(a);
|
||||
try list.append(a, 1);
|
||||
try testing.expectEqual(@as(i32, 1), list.orderedRemove(0));
|
||||
@ -1574,7 +1574,7 @@ test "swapRemove" {
|
||||
try testing.expect(list.items.len == 4);
|
||||
}
|
||||
{
|
||||
var list = ArrayListUnmanaged(i32){};
|
||||
var list: ArrayListUnmanaged(i32) = .empty;
|
||||
defer list.deinit(a);
|
||||
|
||||
try list.append(a, 1);
|
||||
@ -1617,7 +1617,7 @@ test "insert" {
|
||||
try testing.expect(list.items[3] == 3);
|
||||
}
|
||||
{
|
||||
var list = ArrayListUnmanaged(i32){};
|
||||
var list: ArrayListUnmanaged(i32) = .empty;
|
||||
defer list.deinit(a);
|
||||
|
||||
try list.insert(a, 0, 1);
|
||||
@ -1655,7 +1655,7 @@ test "insertSlice" {
|
||||
try testing.expect(list.items[0] == 1);
|
||||
}
|
||||
{
|
||||
var list = ArrayListUnmanaged(i32){};
|
||||
var list: ArrayListUnmanaged(i32) = .empty;
|
||||
defer list.deinit(a);
|
||||
|
||||
try list.append(a, 1);
|
||||
@ -1789,7 +1789,7 @@ test "ArrayListUnmanaged.replaceRange" {
|
||||
const a = testing.allocator;
|
||||
|
||||
{
|
||||
var list = ArrayListUnmanaged(i32){};
|
||||
var list: ArrayListUnmanaged(i32) = .empty;
|
||||
defer list.deinit(a);
|
||||
try list.appendSlice(a, &[_]i32{ 1, 2, 3, 4, 5 });
|
||||
|
||||
@ -1798,7 +1798,7 @@ test "ArrayListUnmanaged.replaceRange" {
|
||||
try testing.expectEqualSlices(i32, &[_]i32{ 1, 0, 0, 0, 2, 3, 4, 5 }, list.items);
|
||||
}
|
||||
{
|
||||
var list = ArrayListUnmanaged(i32){};
|
||||
var list: ArrayListUnmanaged(i32) = .empty;
|
||||
defer list.deinit(a);
|
||||
try list.appendSlice(a, &[_]i32{ 1, 2, 3, 4, 5 });
|
||||
|
||||
@ -1811,7 +1811,7 @@ test "ArrayListUnmanaged.replaceRange" {
|
||||
);
|
||||
}
|
||||
{
|
||||
var list = ArrayListUnmanaged(i32){};
|
||||
var list: ArrayListUnmanaged(i32) = .empty;
|
||||
defer list.deinit(a);
|
||||
try list.appendSlice(a, &[_]i32{ 1, 2, 3, 4, 5 });
|
||||
|
||||
@ -1820,7 +1820,7 @@ test "ArrayListUnmanaged.replaceRange" {
|
||||
try testing.expectEqualSlices(i32, &[_]i32{ 1, 0, 0, 0, 4, 5 }, list.items);
|
||||
}
|
||||
{
|
||||
var list = ArrayListUnmanaged(i32){};
|
||||
var list: ArrayListUnmanaged(i32) = .empty;
|
||||
defer list.deinit(a);
|
||||
try list.appendSlice(a, &[_]i32{ 1, 2, 3, 4, 5 });
|
||||
|
||||
@ -1829,7 +1829,7 @@ test "ArrayListUnmanaged.replaceRange" {
|
||||
try testing.expectEqualSlices(i32, &[_]i32{ 1, 0, 0, 0, 5 }, list.items);
|
||||
}
|
||||
{
|
||||
var list = ArrayListUnmanaged(i32){};
|
||||
var list: ArrayListUnmanaged(i32) = .empty;
|
||||
defer list.deinit(a);
|
||||
try list.appendSlice(a, &[_]i32{ 1, 2, 3, 4, 5 });
|
||||
|
||||
@ -1843,7 +1843,7 @@ test "ArrayListUnmanaged.replaceRangeAssumeCapacity" {
|
||||
const a = testing.allocator;
|
||||
|
||||
{
|
||||
var list = ArrayListUnmanaged(i32){};
|
||||
var list: ArrayListUnmanaged(i32) = .empty;
|
||||
defer list.deinit(a);
|
||||
try list.appendSlice(a, &[_]i32{ 1, 2, 3, 4, 5 });
|
||||
|
||||
@ -1852,7 +1852,7 @@ test "ArrayListUnmanaged.replaceRangeAssumeCapacity" {
|
||||
try testing.expectEqualSlices(i32, &[_]i32{ 1, 0, 0, 0, 2, 3, 4, 5 }, list.items);
|
||||
}
|
||||
{
|
||||
var list = ArrayListUnmanaged(i32){};
|
||||
var list: ArrayListUnmanaged(i32) = .empty;
|
||||
defer list.deinit(a);
|
||||
try list.appendSlice(a, &[_]i32{ 1, 2, 3, 4, 5 });
|
||||
|
||||
@ -1865,7 +1865,7 @@ test "ArrayListUnmanaged.replaceRangeAssumeCapacity" {
|
||||
);
|
||||
}
|
||||
{
|
||||
var list = ArrayListUnmanaged(i32){};
|
||||
var list: ArrayListUnmanaged(i32) = .empty;
|
||||
defer list.deinit(a);
|
||||
try list.appendSlice(a, &[_]i32{ 1, 2, 3, 4, 5 });
|
||||
|
||||
@ -1874,7 +1874,7 @@ test "ArrayListUnmanaged.replaceRangeAssumeCapacity" {
|
||||
try testing.expectEqualSlices(i32, &[_]i32{ 1, 0, 0, 0, 4, 5 }, list.items);
|
||||
}
|
||||
{
|
||||
var list = ArrayListUnmanaged(i32){};
|
||||
var list: ArrayListUnmanaged(i32) = .empty;
|
||||
defer list.deinit(a);
|
||||
try list.appendSlice(a, &[_]i32{ 1, 2, 3, 4, 5 });
|
||||
|
||||
@ -1883,7 +1883,7 @@ test "ArrayListUnmanaged.replaceRangeAssumeCapacity" {
|
||||
try testing.expectEqualSlices(i32, &[_]i32{ 1, 0, 0, 0, 5 }, list.items);
|
||||
}
|
||||
{
|
||||
var list = ArrayListUnmanaged(i32){};
|
||||
var list: ArrayListUnmanaged(i32) = .empty;
|
||||
defer list.deinit(a);
|
||||
try list.appendSlice(a, &[_]i32{ 1, 2, 3, 4, 5 });
|
||||
|
||||
@ -1906,15 +1906,15 @@ const ItemUnmanaged = struct {
|
||||
test "ArrayList(T) of struct T" {
|
||||
const a = std.testing.allocator;
|
||||
{
|
||||
var root = Item{ .integer = 1, .sub_items = ArrayList(Item).init(a) };
|
||||
var root = Item{ .integer = 1, .sub_items = .init(a) };
|
||||
defer root.sub_items.deinit();
|
||||
try root.sub_items.append(Item{ .integer = 42, .sub_items = ArrayList(Item).init(a) });
|
||||
try root.sub_items.append(Item{ .integer = 42, .sub_items = .init(a) });
|
||||
try testing.expect(root.sub_items.items[0].integer == 42);
|
||||
}
|
||||
{
|
||||
var root = ItemUnmanaged{ .integer = 1, .sub_items = ArrayListUnmanaged(ItemUnmanaged){} };
|
||||
var root = ItemUnmanaged{ .integer = 1, .sub_items = .empty };
|
||||
defer root.sub_items.deinit(a);
|
||||
try root.sub_items.append(a, ItemUnmanaged{ .integer = 42, .sub_items = ArrayListUnmanaged(ItemUnmanaged){} });
|
||||
try root.sub_items.append(a, ItemUnmanaged{ .integer = 42, .sub_items = .empty });
|
||||
try testing.expect(root.sub_items.items[0].integer == 42);
|
||||
}
|
||||
}
|
||||
@ -1950,7 +1950,7 @@ test "ArrayListUnmanaged(u8) implements writer" {
|
||||
const a = testing.allocator;
|
||||
|
||||
{
|
||||
var buffer: ArrayListUnmanaged(u8) = .{};
|
||||
var buffer: ArrayListUnmanaged(u8) = .empty;
|
||||
defer buffer.deinit(a);
|
||||
|
||||
const x: i32 = 42;
|
||||
@ -1960,7 +1960,7 @@ test "ArrayListUnmanaged(u8) implements writer" {
|
||||
try testing.expectEqualSlices(u8, "x: 42\ny: 1234\n", buffer.items);
|
||||
}
|
||||
{
|
||||
var list: ArrayListAlignedUnmanaged(u8, 2) = .{};
|
||||
var list: ArrayListAlignedUnmanaged(u8, 2) = .empty;
|
||||
defer list.deinit(a);
|
||||
|
||||
const writer = list.writer(a);
|
||||
@ -1989,7 +1989,7 @@ test "shrink still sets length when resizing is disabled" {
|
||||
try testing.expect(list.items.len == 1);
|
||||
}
|
||||
{
|
||||
var list = ArrayListUnmanaged(i32){};
|
||||
var list: ArrayListUnmanaged(i32) = .empty;
|
||||
defer list.deinit(a);
|
||||
|
||||
try list.append(a, 1);
|
||||
@ -2026,7 +2026,7 @@ test "addManyAsArray" {
|
||||
try testing.expectEqualSlices(u8, list.items, "aoeuasdf");
|
||||
}
|
||||
{
|
||||
var list = ArrayListUnmanaged(u8){};
|
||||
var list: ArrayListUnmanaged(u8) = .empty;
|
||||
defer list.deinit(a);
|
||||
|
||||
(try list.addManyAsArray(a, 4)).* = "aoeu".*;
|
||||
@ -2056,7 +2056,7 @@ test "growing memory preserves contents" {
|
||||
try testing.expectEqualSlices(u8, list.items, "abcdijklefgh");
|
||||
}
|
||||
{
|
||||
var list = ArrayListUnmanaged(u8){};
|
||||
var list: ArrayListUnmanaged(u8) = .empty;
|
||||
defer list.deinit(a);
|
||||
|
||||
(try list.addManyAsArray(a, 4)).* = "abcd".*;
|
||||
@ -2132,7 +2132,7 @@ test "toOwnedSliceSentinel" {
|
||||
try testing.expectEqualStrings(result, mem.sliceTo(result.ptr, 0));
|
||||
}
|
||||
{
|
||||
var list = ArrayListUnmanaged(u8){};
|
||||
var list: ArrayListUnmanaged(u8) = .empty;
|
||||
defer list.deinit(a);
|
||||
|
||||
try list.appendSlice(a, "foobar");
|
||||
@ -2156,7 +2156,7 @@ test "accepts unaligned slices" {
|
||||
try testing.expectEqualSlices(u8, list.items, &.{ 0, 8, 9, 6, 7, 2, 3 });
|
||||
}
|
||||
{
|
||||
var list = std.ArrayListAlignedUnmanaged(u8, 8){};
|
||||
var list: std.ArrayListAlignedUnmanaged(u8, 8) = .empty;
|
||||
defer list.deinit(a);
|
||||
|
||||
try list.appendSlice(a, &.{ 0, 1, 2, 3 });
|
||||
|
@ -6,8 +6,8 @@
|
||||
//! certificate within `bytes`.
|
||||
|
||||
/// The key is the contents slice of the subject.
|
||||
map: std.HashMapUnmanaged(der.Element.Slice, u32, MapContext, std.hash_map.default_max_load_percentage) = .{},
|
||||
bytes: std.ArrayListUnmanaged(u8) = .{},
|
||||
map: std.HashMapUnmanaged(der.Element.Slice, u32, MapContext, std.hash_map.default_max_load_percentage) = .empty,
|
||||
bytes: std.ArrayListUnmanaged(u8) = .empty,
|
||||
|
||||
pub const VerifyError = Certificate.Parsed.VerifyError || error{
|
||||
CertificateIssuerNotFound,
|
||||
|
@ -42,20 +42,20 @@ sections: SectionArray = null_section_array,
|
||||
is_macho: bool,
|
||||
|
||||
/// Filled later by the initializer
|
||||
abbrev_table_list: std.ArrayListUnmanaged(Abbrev.Table) = .{},
|
||||
abbrev_table_list: std.ArrayListUnmanaged(Abbrev.Table) = .empty,
|
||||
/// Filled later by the initializer
|
||||
compile_unit_list: std.ArrayListUnmanaged(CompileUnit) = .{},
|
||||
compile_unit_list: std.ArrayListUnmanaged(CompileUnit) = .empty,
|
||||
/// Filled later by the initializer
|
||||
func_list: std.ArrayListUnmanaged(Func) = .{},
|
||||
func_list: std.ArrayListUnmanaged(Func) = .empty,
|
||||
|
||||
eh_frame_hdr: ?ExceptionFrameHeader = null,
|
||||
/// These lookup tables are only used if `eh_frame_hdr` is null
|
||||
cie_map: std.AutoArrayHashMapUnmanaged(u64, CommonInformationEntry) = .{},
|
||||
cie_map: std.AutoArrayHashMapUnmanaged(u64, CommonInformationEntry) = .empty,
|
||||
/// Sorted by start_pc
|
||||
fde_list: std.ArrayListUnmanaged(FrameDescriptionEntry) = .{},
|
||||
fde_list: std.ArrayListUnmanaged(FrameDescriptionEntry) = .empty,
|
||||
|
||||
/// Populated by `populateRanges`.
|
||||
ranges: std.ArrayListUnmanaged(Range) = .{},
|
||||
ranges: std.ArrayListUnmanaged(Range) = .empty,
|
||||
|
||||
pub const Range = struct {
|
||||
start: u64,
|
||||
@ -1464,9 +1464,9 @@ fn runLineNumberProgram(d: *Dwarf, gpa: Allocator, compile_unit: *CompileUnit) !
|
||||
|
||||
const standard_opcode_lengths = try fbr.readBytes(opcode_base - 1);
|
||||
|
||||
var directories: std.ArrayListUnmanaged(FileEntry) = .{};
|
||||
var directories: std.ArrayListUnmanaged(FileEntry) = .empty;
|
||||
defer directories.deinit(gpa);
|
||||
var file_entries: std.ArrayListUnmanaged(FileEntry) = .{};
|
||||
var file_entries: std.ArrayListUnmanaged(FileEntry) = .empty;
|
||||
defer file_entries.deinit(gpa);
|
||||
|
||||
if (version < 5) {
|
||||
|
@ -153,7 +153,7 @@ pub fn StackMachine(comptime options: Options) type {
|
||||
}
|
||||
};
|
||||
|
||||
stack: std.ArrayListUnmanaged(Value) = .{},
|
||||
stack: std.ArrayListUnmanaged(Value) = .empty,
|
||||
|
||||
pub fn reset(self: *Self) void {
|
||||
self.stack.clearRetainingCapacity();
|
||||
|
@ -1933,8 +1933,8 @@ pub const VirtualMachine = struct {
|
||||
len: u8 = 0,
|
||||
};
|
||||
|
||||
columns: std.ArrayListUnmanaged(Column) = .{},
|
||||
stack: std.ArrayListUnmanaged(ColumnRange) = .{},
|
||||
columns: std.ArrayListUnmanaged(Column) = .empty,
|
||||
stack: std.ArrayListUnmanaged(ColumnRange) = .empty,
|
||||
current_row: Row = .{},
|
||||
|
||||
/// The result of executing the CIE's initial_instructions
|
||||
|
@ -750,7 +750,7 @@ pub const Walker = struct {
|
||||
///
|
||||
/// `self` will not be closed after walking it.
|
||||
pub fn walk(self: Dir, allocator: Allocator) Allocator.Error!Walker {
|
||||
var stack: std.ArrayListUnmanaged(Walker.StackItem) = .{};
|
||||
var stack: std.ArrayListUnmanaged(Walker.StackItem) = .empty;
|
||||
|
||||
try stack.append(allocator, .{
|
||||
.iter = self.iterate(),
|
||||
|
@ -24,7 +24,7 @@ pub const Preopens = struct {
|
||||
};
|
||||
|
||||
pub fn preopensAlloc(gpa: Allocator) Allocator.Error!Preopens {
|
||||
var names: std.ArrayListUnmanaged([]const u8) = .{};
|
||||
var names: std.ArrayListUnmanaged([]const u8) = .empty;
|
||||
defer names.deinit(gpa);
|
||||
|
||||
try names.ensureUnusedCapacity(gpa, 3);
|
||||
|
@ -410,7 +410,7 @@ pub fn main() !void {
|
||||
}
|
||||
}
|
||||
|
||||
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
|
||||
var gpa: std.heap.GeneralPurposeAllocator(.{}) = .init;
|
||||
defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak");
|
||||
const allocator = gpa.allocator();
|
||||
|
||||
|
@ -401,7 +401,7 @@ pub fn HashMap(
|
||||
@compileError("Context must be specified! Call initContext(allocator, ctx) instead.");
|
||||
}
|
||||
return .{
|
||||
.unmanaged = .{},
|
||||
.unmanaged = .empty,
|
||||
.allocator = allocator,
|
||||
.ctx = undefined, // ctx is zero-sized so this is safe.
|
||||
};
|
||||
@ -410,7 +410,7 @@ pub fn HashMap(
|
||||
/// Create a managed hash map with a context
|
||||
pub fn initContext(allocator: Allocator, ctx: Context) Self {
|
||||
return .{
|
||||
.unmanaged = .{},
|
||||
.unmanaged = .empty,
|
||||
.allocator = allocator,
|
||||
.ctx = ctx,
|
||||
};
|
||||
@ -691,7 +691,7 @@ pub fn HashMap(
|
||||
pub fn move(self: *Self) Self {
|
||||
self.unmanaged.pointer_stability.assertUnlocked();
|
||||
const result = self.*;
|
||||
self.unmanaged = .{};
|
||||
self.unmanaged = .empty;
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -1543,7 +1543,7 @@ pub fn HashMapUnmanaged(
|
||||
return self.cloneContext(allocator, @as(Context, undefined));
|
||||
}
|
||||
pub fn cloneContext(self: Self, allocator: Allocator, new_ctx: anytype) Allocator.Error!HashMapUnmanaged(K, V, @TypeOf(new_ctx), max_load_percentage) {
|
||||
var other = HashMapUnmanaged(K, V, @TypeOf(new_ctx), max_load_percentage){};
|
||||
var other: HashMapUnmanaged(K, V, @TypeOf(new_ctx), max_load_percentage) = .empty;
|
||||
if (self.size == 0)
|
||||
return other;
|
||||
|
||||
@ -1572,7 +1572,7 @@ pub fn HashMapUnmanaged(
|
||||
pub fn move(self: *Self) Self {
|
||||
self.pointer_stability.assertUnlocked();
|
||||
const result = self.*;
|
||||
self.* = .{};
|
||||
self.* = .empty;
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -2360,7 +2360,7 @@ test "removeByPtr 0 sized key" {
|
||||
}
|
||||
|
||||
test "repeat fetchRemove" {
|
||||
var map = AutoHashMapUnmanaged(u64, void){};
|
||||
var map: AutoHashMapUnmanaged(u64, void) = .empty;
|
||||
defer map.deinit(testing.allocator);
|
||||
|
||||
try map.ensureTotalCapacity(testing.allocator, 4);
|
||||
@ -2384,7 +2384,7 @@ test "repeat fetchRemove" {
|
||||
}
|
||||
|
||||
test "getOrPut allocation failure" {
|
||||
var map: std.StringHashMapUnmanaged(void) = .{};
|
||||
var map: std.StringHashMapUnmanaged(void) = .empty;
|
||||
try testing.expectError(error.OutOfMemory, map.getOrPut(std.testing.failing_allocator, "hello"));
|
||||
}
|
||||
|
||||
|
@ -12,14 +12,14 @@ const Value = @import("dynamic.zig").Value;
|
||||
/// instead of comptime-known struct field names.
|
||||
pub fn ArrayHashMap(comptime T: type) type {
|
||||
return struct {
|
||||
map: std.StringArrayHashMapUnmanaged(T) = .{},
|
||||
map: std.StringArrayHashMapUnmanaged(T) = .empty,
|
||||
|
||||
pub fn deinit(self: *@This(), allocator: Allocator) void {
|
||||
self.map.deinit(allocator);
|
||||
}
|
||||
|
||||
pub fn jsonParse(allocator: Allocator, source: anytype, options: ParseOptions) !@This() {
|
||||
var map = std.StringArrayHashMapUnmanaged(T){};
|
||||
var map: std.StringArrayHashMapUnmanaged(T) = .empty;
|
||||
errdefer map.deinit(allocator);
|
||||
|
||||
if (.object_begin != try source.next()) return error.UnexpectedToken;
|
||||
@ -52,7 +52,7 @@ pub fn ArrayHashMap(comptime T: type) type {
|
||||
pub fn jsonParseFromValue(allocator: Allocator, source: Value, options: ParseOptions) !@This() {
|
||||
if (source != .object) return error.UnexpectedToken;
|
||||
|
||||
var map = std.StringArrayHashMapUnmanaged(T){};
|
||||
var map: std.StringArrayHashMapUnmanaged(T) = .empty;
|
||||
errdefer map.deinit(allocator);
|
||||
|
||||
var it = source.object.iterator();
|
||||
|
@ -907,12 +907,12 @@ fn spawnWindows(self: *ChildProcess) SpawnError!void {
|
||||
var cmd_line_cache = WindowsCommandLineCache.init(self.allocator, self.argv);
|
||||
defer cmd_line_cache.deinit();
|
||||
|
||||
var app_buf = std.ArrayListUnmanaged(u16){};
|
||||
var app_buf: std.ArrayListUnmanaged(u16) = .empty;
|
||||
defer app_buf.deinit(self.allocator);
|
||||
|
||||
try app_buf.appendSlice(self.allocator, app_name_w);
|
||||
|
||||
var dir_buf = std.ArrayListUnmanaged(u16){};
|
||||
var dir_buf: std.ArrayListUnmanaged(u16) = .empty;
|
||||
defer dir_buf.deinit(self.allocator);
|
||||
|
||||
if (cwd_path_w.len > 0) {
|
||||
|
@ -27,7 +27,7 @@ pub const writer = @import("tar/writer.zig").writer;
|
||||
/// the errors in diagnostics to know whether the operation succeeded or failed.
|
||||
pub const Diagnostics = struct {
|
||||
allocator: std.mem.Allocator,
|
||||
errors: std.ArrayListUnmanaged(Error) = .{},
|
||||
errors: std.ArrayListUnmanaged(Error) = .empty,
|
||||
|
||||
entries: usize = 0,
|
||||
root_dir: []const u8 = "",
|
||||
|
@ -11,10 +11,10 @@ pub const FailingAllocator = @import("testing/failing_allocator.zig").FailingAll
|
||||
|
||||
/// This should only be used in temporary test programs.
|
||||
pub const allocator = allocator_instance.allocator();
|
||||
pub var allocator_instance = b: {
|
||||
pub var allocator_instance: std.heap.GeneralPurposeAllocator(.{}) = b: {
|
||||
if (!builtin.is_test)
|
||||
@compileError("Cannot use testing allocator outside of test block");
|
||||
break :b std.heap.GeneralPurposeAllocator(.{}){};
|
||||
break :b .init;
|
||||
};
|
||||
|
||||
pub const failing_allocator = failing_allocator_instance.allocator();
|
||||
|
@ -22,8 +22,8 @@ tree: *const Ast,
|
||||
/// sub-expressions. See `AstRlAnnotate` for details.
|
||||
nodes_need_rl: *const AstRlAnnotate.RlNeededSet,
|
||||
instructions: std.MultiArrayList(Zir.Inst) = .{},
|
||||
extra: ArrayListUnmanaged(u32) = .{},
|
||||
string_bytes: ArrayListUnmanaged(u8) = .{},
|
||||
extra: ArrayListUnmanaged(u32) = .empty,
|
||||
string_bytes: ArrayListUnmanaged(u8) = .empty,
|
||||
/// Tracks the current byte offset within the source file.
|
||||
/// Used to populate line deltas in the ZIR. AstGen maintains
|
||||
/// this "cursor" throughout the entire AST lowering process in order
|
||||
@ -39,8 +39,8 @@ source_column: u32 = 0,
|
||||
/// Used for temporary allocations; freed after AstGen is complete.
|
||||
/// The resulting ZIR code has no references to anything in this arena.
|
||||
arena: Allocator,
|
||||
string_table: std.HashMapUnmanaged(u32, void, StringIndexContext, std.hash_map.default_max_load_percentage) = .{},
|
||||
compile_errors: ArrayListUnmanaged(Zir.Inst.CompileErrors.Item) = .{},
|
||||
string_table: std.HashMapUnmanaged(u32, void, StringIndexContext, std.hash_map.default_max_load_percentage) = .empty,
|
||||
compile_errors: ArrayListUnmanaged(Zir.Inst.CompileErrors.Item) = .empty,
|
||||
/// The topmost block of the current function.
|
||||
fn_block: ?*GenZir = null,
|
||||
fn_var_args: bool = false,
|
||||
@ -52,9 +52,9 @@ within_fn: bool = false,
|
||||
fn_ret_ty: Zir.Inst.Ref = .none,
|
||||
/// Maps string table indexes to the first `@import` ZIR instruction
|
||||
/// that uses this string as the operand.
|
||||
imports: std.AutoArrayHashMapUnmanaged(Zir.NullTerminatedString, Ast.TokenIndex) = .{},
|
||||
imports: std.AutoArrayHashMapUnmanaged(Zir.NullTerminatedString, Ast.TokenIndex) = .empty,
|
||||
/// Used for temporary storage when building payloads.
|
||||
scratch: std.ArrayListUnmanaged(u32) = .{},
|
||||
scratch: std.ArrayListUnmanaged(u32) = .empty,
|
||||
/// Whenever a `ref` instruction is needed, it is created and saved in this
|
||||
/// table instead of being immediately appended to the current block body.
|
||||
/// Then, when the instruction is being added to the parent block (typically from
|
||||
@ -65,7 +65,7 @@ scratch: std.ArrayListUnmanaged(u32) = .{},
|
||||
/// 2. `ref` instructions will dominate their uses. This is a required property
|
||||
/// of ZIR.
|
||||
/// The key is the ref operand; the value is the ref instruction.
|
||||
ref_table: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .{},
|
||||
ref_table: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .empty,
|
||||
/// Any information which should trigger invalidation of incremental compilation
|
||||
/// data should be used to update this hasher. The result is the final source
|
||||
/// hash of the enclosing declaration/etc.
|
||||
@ -159,7 +159,7 @@ pub fn generate(gpa: Allocator, tree: Ast) Allocator.Error!Zir {
|
||||
|
||||
var top_scope: Scope.Top = .{};
|
||||
|
||||
var gz_instructions: std.ArrayListUnmanaged(Zir.Inst.Index) = .{};
|
||||
var gz_instructions: std.ArrayListUnmanaged(Zir.Inst.Index) = .empty;
|
||||
var gen_scope: GenZir = .{
|
||||
.is_comptime = true,
|
||||
.parent = &top_scope.base,
|
||||
@ -5854,7 +5854,7 @@ fn errorSetDecl(gz: *GenZir, ri: ResultInfo, node: Ast.Node.Index) InnerError!Zi
|
||||
const payload_index = try reserveExtra(astgen, @typeInfo(Zir.Inst.ErrorSetDecl).@"struct".fields.len);
|
||||
var fields_len: usize = 0;
|
||||
{
|
||||
var idents: std.AutoHashMapUnmanaged(Zir.NullTerminatedString, Ast.TokenIndex) = .{};
|
||||
var idents: std.AutoHashMapUnmanaged(Zir.NullTerminatedString, Ast.TokenIndex) = .empty;
|
||||
defer idents.deinit(gpa);
|
||||
|
||||
const error_token = main_tokens[node];
|
||||
@ -11259,7 +11259,7 @@ fn identifierTokenString(astgen: *AstGen, token: Ast.TokenIndex) InnerError![]co
|
||||
if (!mem.startsWith(u8, ident_name, "@")) {
|
||||
return ident_name;
|
||||
}
|
||||
var buf: ArrayListUnmanaged(u8) = .{};
|
||||
var buf: ArrayListUnmanaged(u8) = .empty;
|
||||
defer buf.deinit(astgen.gpa);
|
||||
try astgen.parseStrLit(token, &buf, ident_name, 1);
|
||||
if (mem.indexOfScalar(u8, buf.items, 0) != null) {
|
||||
@ -11881,7 +11881,7 @@ const Scope = struct {
|
||||
parent: *Scope,
|
||||
/// Maps string table index to the source location of declaration,
|
||||
/// for the purposes of reporting name shadowing compile errors.
|
||||
decls: std.AutoHashMapUnmanaged(Zir.NullTerminatedString, Ast.Node.Index) = .{},
|
||||
decls: std.AutoHashMapUnmanaged(Zir.NullTerminatedString, Ast.Node.Index) = .empty,
|
||||
node: Ast.Node.Index,
|
||||
inst: Zir.Inst.Index,
|
||||
maybe_generic: bool,
|
||||
@ -11891,7 +11891,7 @@ const Scope = struct {
|
||||
declaring_gz: ?*GenZir,
|
||||
|
||||
/// Set of captures used by this namespace.
|
||||
captures: std.AutoArrayHashMapUnmanaged(Zir.Inst.Capture, void) = .{},
|
||||
captures: std.AutoArrayHashMapUnmanaged(Zir.Inst.Capture, void) = .empty,
|
||||
|
||||
fn deinit(self: *Namespace, gpa: Allocator) void {
|
||||
self.decls.deinit(gpa);
|
||||
@ -13607,9 +13607,9 @@ fn scanContainer(
|
||||
var sfba_state = std.heap.stackFallback(512, astgen.gpa);
|
||||
const sfba = sfba_state.get();
|
||||
|
||||
var names: std.AutoArrayHashMapUnmanaged(Zir.NullTerminatedString, NameEntry) = .{};
|
||||
var test_names: std.AutoArrayHashMapUnmanaged(Zir.NullTerminatedString, NameEntry) = .{};
|
||||
var decltest_names: std.AutoArrayHashMapUnmanaged(Zir.NullTerminatedString, NameEntry) = .{};
|
||||
var names: std.AutoArrayHashMapUnmanaged(Zir.NullTerminatedString, NameEntry) = .empty;
|
||||
var test_names: std.AutoArrayHashMapUnmanaged(Zir.NullTerminatedString, NameEntry) = .empty;
|
||||
var decltest_names: std.AutoArrayHashMapUnmanaged(Zir.NullTerminatedString, NameEntry) = .empty;
|
||||
defer {
|
||||
names.deinit(sfba);
|
||||
test_names.deinit(sfba);
|
||||
@ -13796,7 +13796,7 @@ fn scanContainer(
|
||||
|
||||
for (names.keys(), names.values()) |name, first| {
|
||||
if (first.next == null) continue;
|
||||
var notes: std.ArrayListUnmanaged(u32) = .{};
|
||||
var notes: std.ArrayListUnmanaged(u32) = .empty;
|
||||
var prev: NameEntry = first;
|
||||
while (prev.next) |cur| : (prev = cur.*) {
|
||||
try notes.append(astgen.arena, try astgen.errNoteTok(cur.tok, "duplicate name here", .{}));
|
||||
@ -13808,7 +13808,7 @@ fn scanContainer(
|
||||
|
||||
for (test_names.keys(), test_names.values()) |name, first| {
|
||||
if (first.next == null) continue;
|
||||
var notes: std.ArrayListUnmanaged(u32) = .{};
|
||||
var notes: std.ArrayListUnmanaged(u32) = .empty;
|
||||
var prev: NameEntry = first;
|
||||
while (prev.next) |cur| : (prev = cur.*) {
|
||||
try notes.append(astgen.arena, try astgen.errNoteTok(cur.tok, "duplicate test here", .{}));
|
||||
@ -13820,7 +13820,7 @@ fn scanContainer(
|
||||
|
||||
for (decltest_names.keys(), decltest_names.values()) |name, first| {
|
||||
if (first.next == null) continue;
|
||||
var notes: std.ArrayListUnmanaged(u32) = .{};
|
||||
var notes: std.ArrayListUnmanaged(u32) = .empty;
|
||||
var prev: NameEntry = first;
|
||||
while (prev.next) |cur| : (prev = cur.*) {
|
||||
try notes.append(astgen.arena, try astgen.errNoteTok(cur.tok, "duplicate decltest here", .{}));
|
||||
@ -13949,10 +13949,10 @@ fn lowerAstErrors(astgen: *AstGen) !void {
|
||||
const gpa = astgen.gpa;
|
||||
const parse_err = tree.errors[0];
|
||||
|
||||
var msg: std.ArrayListUnmanaged(u8) = .{};
|
||||
var msg: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer msg.deinit(gpa);
|
||||
|
||||
var notes: std.ArrayListUnmanaged(u32) = .{};
|
||||
var notes: std.ArrayListUnmanaged(u32) = .empty;
|
||||
defer notes.deinit(gpa);
|
||||
|
||||
for (tree.errors[1..]) |note| {
|
||||
|
@ -571,7 +571,7 @@ pub const Wip = struct {
|
||||
if (index == .none) return .none;
|
||||
const other_sl = other.getSourceLocation(index);
|
||||
|
||||
var ref_traces: std.ArrayListUnmanaged(ReferenceTrace) = .{};
|
||||
var ref_traces: std.ArrayListUnmanaged(ReferenceTrace) = .empty;
|
||||
defer ref_traces.deinit(wip.gpa);
|
||||
|
||||
if (other_sl.reference_trace_len > 0) {
|
||||
|
@ -751,7 +751,7 @@ const MsvcLibDir = struct {
|
||||
defer instances_dir.close();
|
||||
|
||||
var state_subpath_buf: [std.fs.max_name_bytes + 32]u8 = undefined;
|
||||
var latest_version_lib_dir = std.ArrayListUnmanaged(u8){};
|
||||
var latest_version_lib_dir: std.ArrayListUnmanaged(u8) = .empty;
|
||||
errdefer latest_version_lib_dir.deinit(allocator);
|
||||
|
||||
var latest_version: u64 = 0;
|
||||
|
@ -3711,7 +3711,7 @@ pub fn findDecls(zir: Zir, gpa: Allocator, list: *std.ArrayListUnmanaged(Inst.In
|
||||
|
||||
// `defer` instructions duplicate the same body arbitrarily many times, but we only want to traverse
|
||||
// their contents once per defer. So, we store the extra index of the body here to deduplicate.
|
||||
var found_defers: std.AutoHashMapUnmanaged(u32, void) = .{};
|
||||
var found_defers: std.AutoHashMapUnmanaged(u32, void) = .empty;
|
||||
defer found_defers.deinit(gpa);
|
||||
|
||||
try zir.findDeclsBody(gpa, list, &found_defers, bodies.value_body);
|
||||
@ -3725,7 +3725,7 @@ pub fn findDecls(zir: Zir, gpa: Allocator, list: *std.ArrayListUnmanaged(Inst.In
|
||||
pub fn findDeclsRoot(zir: Zir, gpa: Allocator, list: *std.ArrayListUnmanaged(Inst.Index)) !void {
|
||||
list.clearRetainingCapacity();
|
||||
|
||||
var found_defers: std.AutoHashMapUnmanaged(u32, void) = .{};
|
||||
var found_defers: std.AutoHashMapUnmanaged(u32, void) = .empty;
|
||||
defer found_defers.deinit(gpa);
|
||||
|
||||
try zir.findDeclsInner(gpa, list, &found_defers, .main_struct_inst);
|
||||
|
@ -17,21 +17,21 @@ const Ais = AutoIndentingStream(std.ArrayList(u8).Writer);
|
||||
pub const Fixups = struct {
|
||||
/// The key is the mut token (`var`/`const`) of the variable declaration
|
||||
/// that should have a `_ = foo;` inserted afterwards.
|
||||
unused_var_decls: std.AutoHashMapUnmanaged(Ast.TokenIndex, void) = .{},
|
||||
unused_var_decls: std.AutoHashMapUnmanaged(Ast.TokenIndex, void) = .empty,
|
||||
/// The functions in this unordered set of AST fn decl nodes will render
|
||||
/// with a function body of `@trap()` instead, with all parameters
|
||||
/// discarded.
|
||||
gut_functions: std.AutoHashMapUnmanaged(Ast.Node.Index, void) = .{},
|
||||
gut_functions: std.AutoHashMapUnmanaged(Ast.Node.Index, void) = .empty,
|
||||
/// These global declarations will be omitted.
|
||||
omit_nodes: std.AutoHashMapUnmanaged(Ast.Node.Index, void) = .{},
|
||||
omit_nodes: std.AutoHashMapUnmanaged(Ast.Node.Index, void) = .empty,
|
||||
/// These expressions will be replaced with the string value.
|
||||
replace_nodes_with_string: std.AutoHashMapUnmanaged(Ast.Node.Index, []const u8) = .{},
|
||||
replace_nodes_with_string: std.AutoHashMapUnmanaged(Ast.Node.Index, []const u8) = .empty,
|
||||
/// The string value will be inserted directly after the node.
|
||||
append_string_after_node: std.AutoHashMapUnmanaged(Ast.Node.Index, []const u8) = .{},
|
||||
append_string_after_node: std.AutoHashMapUnmanaged(Ast.Node.Index, []const u8) = .empty,
|
||||
/// These nodes will be replaced with a different node.
|
||||
replace_nodes_with_node: std.AutoHashMapUnmanaged(Ast.Node.Index, Ast.Node.Index) = .{},
|
||||
replace_nodes_with_node: std.AutoHashMapUnmanaged(Ast.Node.Index, Ast.Node.Index) = .empty,
|
||||
/// Change all identifier names matching the key to be value instead.
|
||||
rename_identifiers: std.StringArrayHashMapUnmanaged([]const u8) = .{},
|
||||
rename_identifiers: std.StringArrayHashMapUnmanaged([]const u8) = .empty,
|
||||
|
||||
/// All `@import` builtin calls which refer to a file path will be prefixed
|
||||
/// with this path.
|
||||
|
@ -7,11 +7,11 @@ const mem = std.mem;
|
||||
const NativePaths = @This();
|
||||
|
||||
arena: Allocator,
|
||||
include_dirs: std.ArrayListUnmanaged([]const u8) = .{},
|
||||
lib_dirs: std.ArrayListUnmanaged([]const u8) = .{},
|
||||
framework_dirs: std.ArrayListUnmanaged([]const u8) = .{},
|
||||
rpaths: std.ArrayListUnmanaged([]const u8) = .{},
|
||||
warnings: std.ArrayListUnmanaged([]const u8) = .{},
|
||||
include_dirs: std.ArrayListUnmanaged([]const u8) = .empty,
|
||||
lib_dirs: std.ArrayListUnmanaged([]const u8) = .empty,
|
||||
framework_dirs: std.ArrayListUnmanaged([]const u8) = .empty,
|
||||
rpaths: std.ArrayListUnmanaged([]const u8) = .empty,
|
||||
warnings: std.ArrayListUnmanaged([]const u8) = .empty,
|
||||
|
||||
pub fn detect(arena: Allocator, native_target: std.Target) !NativePaths {
|
||||
var self: NativePaths = .{ .arena = arena };
|
||||
|
@ -95,7 +95,7 @@ native_system_include_paths: []const []const u8,
|
||||
/// Corresponds to `-u <symbol>` for ELF/MachO and `/include:<symbol>` for COFF/PE.
|
||||
force_undefined_symbols: std.StringArrayHashMapUnmanaged(void),
|
||||
|
||||
c_object_table: std.AutoArrayHashMapUnmanaged(*CObject, void) = .{},
|
||||
c_object_table: std.AutoArrayHashMapUnmanaged(*CObject, void) = .empty,
|
||||
win32_resource_table: if (dev.env.supports(.win32_resource)) std.AutoArrayHashMapUnmanaged(*Win32Resource, void) else struct {
|
||||
pub fn keys(_: @This()) [0]void {
|
||||
return .{};
|
||||
@ -106,10 +106,10 @@ win32_resource_table: if (dev.env.supports(.win32_resource)) std.AutoArrayHashMa
|
||||
pub fn deinit(_: @This(), _: Allocator) void {}
|
||||
} = .{},
|
||||
|
||||
link_errors: std.ArrayListUnmanaged(link.File.ErrorMsg) = .{},
|
||||
link_errors: std.ArrayListUnmanaged(link.File.ErrorMsg) = .empty,
|
||||
link_errors_mutex: std.Thread.Mutex = .{},
|
||||
link_error_flags: link.File.ErrorFlags = .{},
|
||||
lld_errors: std.ArrayListUnmanaged(LldError) = .{},
|
||||
lld_errors: std.ArrayListUnmanaged(LldError) = .empty,
|
||||
|
||||
work_queues: [
|
||||
len: {
|
||||
@ -154,7 +154,7 @@ embed_file_work_queue: std.fifo.LinearFifo(*Zcu.EmbedFile, .Dynamic),
|
||||
|
||||
/// The ErrorMsg memory is owned by the `CObject`, using Compilation's general purpose allocator.
|
||||
/// This data is accessed by multiple threads and is protected by `mutex`.
|
||||
failed_c_objects: std.AutoArrayHashMapUnmanaged(*CObject, *CObject.Diag.Bundle) = .{},
|
||||
failed_c_objects: std.AutoArrayHashMapUnmanaged(*CObject, *CObject.Diag.Bundle) = .empty,
|
||||
|
||||
/// The ErrorBundle memory is owned by the `Win32Resource`, using Compilation's general purpose allocator.
|
||||
/// This data is accessed by multiple threads and is protected by `mutex`.
|
||||
@ -166,7 +166,7 @@ failed_win32_resources: if (dev.env.supports(.win32_resource)) std.AutoArrayHash
|
||||
} = .{},
|
||||
|
||||
/// Miscellaneous things that can fail.
|
||||
misc_failures: std.AutoArrayHashMapUnmanaged(MiscTask, MiscError) = .{},
|
||||
misc_failures: std.AutoArrayHashMapUnmanaged(MiscTask, MiscError) = .empty,
|
||||
|
||||
/// When this is `true` it means invoking clang as a sub-process is expected to inherit
|
||||
/// stdin, stdout, stderr, and if it returns non success, to forward the exit code.
|
||||
@ -248,7 +248,7 @@ wasi_emulated_libs: []const wasi_libc.CRTFile,
|
||||
/// For example `Scrt1.o` and `libc_nonshared.a`. These are populated after building libc from source,
|
||||
/// The set of needed CRT (C runtime) files differs depending on the target and compilation settings.
|
||||
/// The key is the basename, and the value is the absolute path to the completed build artifact.
|
||||
crt_files: std.StringHashMapUnmanaged(CRTFile) = .{},
|
||||
crt_files: std.StringHashMapUnmanaged(CRTFile) = .empty,
|
||||
|
||||
/// How many lines of reference trace should be included per compile error.
|
||||
/// Null means only show snippet on first error.
|
||||
@ -527,8 +527,8 @@ pub const CObject = struct {
|
||||
}
|
||||
|
||||
pub const Bundle = struct {
|
||||
file_names: std.AutoArrayHashMapUnmanaged(u32, []const u8) = .{},
|
||||
category_names: std.AutoArrayHashMapUnmanaged(u32, []const u8) = .{},
|
||||
file_names: std.AutoArrayHashMapUnmanaged(u32, []const u8) = .empty,
|
||||
category_names: std.AutoArrayHashMapUnmanaged(u32, []const u8) = .empty,
|
||||
diags: []Diag = &.{},
|
||||
|
||||
pub fn destroy(bundle: *Bundle, gpa: Allocator) void {
|
||||
@ -561,8 +561,8 @@ pub const CObject = struct {
|
||||
category: u32 = 0,
|
||||
msg: []const u8 = &.{},
|
||||
src_loc: SrcLoc = .{},
|
||||
src_ranges: std.ArrayListUnmanaged(SrcRange) = .{},
|
||||
sub_diags: std.ArrayListUnmanaged(Diag) = .{},
|
||||
src_ranges: std.ArrayListUnmanaged(SrcRange) = .empty,
|
||||
sub_diags: std.ArrayListUnmanaged(Diag) = .empty,
|
||||
|
||||
fn deinit(wip_diag: *@This(), allocator: Allocator) void {
|
||||
allocator.free(wip_diag.msg);
|
||||
@ -580,19 +580,19 @@ pub const CObject = struct {
|
||||
var bc = BitcodeReader.init(gpa, .{ .reader = reader.any() });
|
||||
defer bc.deinit();
|
||||
|
||||
var file_names: std.AutoArrayHashMapUnmanaged(u32, []const u8) = .{};
|
||||
var file_names: std.AutoArrayHashMapUnmanaged(u32, []const u8) = .empty;
|
||||
errdefer {
|
||||
for (file_names.values()) |file_name| gpa.free(file_name);
|
||||
file_names.deinit(gpa);
|
||||
}
|
||||
|
||||
var category_names: std.AutoArrayHashMapUnmanaged(u32, []const u8) = .{};
|
||||
var category_names: std.AutoArrayHashMapUnmanaged(u32, []const u8) = .empty;
|
||||
errdefer {
|
||||
for (category_names.values()) |category_name| gpa.free(category_name);
|
||||
category_names.deinit(gpa);
|
||||
}
|
||||
|
||||
var stack: std.ArrayListUnmanaged(WipDiag) = .{};
|
||||
var stack: std.ArrayListUnmanaged(WipDiag) = .empty;
|
||||
defer {
|
||||
for (stack.items) |*wip_diag| wip_diag.deinit(gpa);
|
||||
stack.deinit(gpa);
|
||||
@ -1067,7 +1067,7 @@ pub const CreateOptions = struct {
|
||||
cache_mode: CacheMode = .incremental,
|
||||
lib_dirs: []const []const u8 = &[0][]const u8{},
|
||||
rpath_list: []const []const u8 = &[0][]const u8{},
|
||||
symbol_wrap_set: std.StringArrayHashMapUnmanaged(void) = .{},
|
||||
symbol_wrap_set: std.StringArrayHashMapUnmanaged(void) = .empty,
|
||||
c_source_files: []const CSourceFile = &.{},
|
||||
rc_source_files: []const RcSourceFile = &.{},
|
||||
manifest_file: ?[]const u8 = null,
|
||||
@ -1155,7 +1155,7 @@ pub const CreateOptions = struct {
|
||||
skip_linker_dependencies: bool = false,
|
||||
hash_style: link.File.Elf.HashStyle = .both,
|
||||
entry: Entry = .default,
|
||||
force_undefined_symbols: std.StringArrayHashMapUnmanaged(void) = .{},
|
||||
force_undefined_symbols: std.StringArrayHashMapUnmanaged(void) = .empty,
|
||||
stack_size: ?u64 = null,
|
||||
image_base: ?u64 = null,
|
||||
version: ?std.SemanticVersion = null,
|
||||
@ -1210,7 +1210,7 @@ fn addModuleTableToCacheHash(
|
||||
main_mod: *Package.Module,
|
||||
hash_type: union(enum) { path_bytes, files: *Cache.Manifest },
|
||||
) (error{OutOfMemory} || std.process.GetCwdError)!void {
|
||||
var seen_table: std.AutoArrayHashMapUnmanaged(*Package.Module, void) = .{};
|
||||
var seen_table: std.AutoArrayHashMapUnmanaged(*Package.Module, void) = .empty;
|
||||
defer seen_table.deinit(gpa);
|
||||
|
||||
// root_mod and main_mod may be the same pointer. In fact they usually are.
|
||||
@ -3362,7 +3362,7 @@ pub fn addModuleErrorMsg(
|
||||
const file_path = try err_src_loc.file_scope.fullPath(gpa);
|
||||
defer gpa.free(file_path);
|
||||
|
||||
var ref_traces: std.ArrayListUnmanaged(ErrorBundle.ReferenceTrace) = .{};
|
||||
var ref_traces: std.ArrayListUnmanaged(ErrorBundle.ReferenceTrace) = .empty;
|
||||
defer ref_traces.deinit(gpa);
|
||||
|
||||
if (module_err_msg.reference_trace_root.unwrap()) |rt_root| {
|
||||
@ -3370,7 +3370,7 @@ pub fn addModuleErrorMsg(
|
||||
all_references.* = try mod.resolveReferences();
|
||||
}
|
||||
|
||||
var seen: std.AutoHashMapUnmanaged(InternPool.AnalUnit, void) = .{};
|
||||
var seen: std.AutoHashMapUnmanaged(InternPool.AnalUnit, void) = .empty;
|
||||
defer seen.deinit(gpa);
|
||||
|
||||
const max_references = mod.comp.reference_trace orelse Sema.default_reference_trace_len;
|
||||
@ -3439,7 +3439,7 @@ pub fn addModuleErrorMsg(
|
||||
|
||||
// De-duplicate error notes. The main use case in mind for this is
|
||||
// too many "note: called from here" notes when eval branch quota is reached.
|
||||
var notes: std.ArrayHashMapUnmanaged(ErrorBundle.ErrorMessage, void, ErrorNoteHashContext, true) = .{};
|
||||
var notes: std.ArrayHashMapUnmanaged(ErrorBundle.ErrorMessage, void, ErrorNoteHashContext, true) = .empty;
|
||||
defer notes.deinit(gpa);
|
||||
|
||||
for (module_err_msg.notes) |module_note| {
|
||||
@ -3544,7 +3544,7 @@ fn performAllTheWorkInner(
|
||||
comp.job_queued_update_builtin_zig = false;
|
||||
if (comp.zcu == null) break :b;
|
||||
// TODO put all the modules in a flat array to make them easy to iterate.
|
||||
var seen: std.AutoArrayHashMapUnmanaged(*Package.Module, void) = .{};
|
||||
var seen: std.AutoArrayHashMapUnmanaged(*Package.Module, void) = .empty;
|
||||
defer seen.deinit(comp.gpa);
|
||||
try seen.put(comp.gpa, comp.root_mod, {});
|
||||
var i: usize = 0;
|
||||
@ -4026,7 +4026,7 @@ fn docsCopyFallible(comp: *Compilation) anyerror!void {
|
||||
};
|
||||
defer tar_file.close();
|
||||
|
||||
var seen_table: std.AutoArrayHashMapUnmanaged(*Package.Module, []const u8) = .{};
|
||||
var seen_table: std.AutoArrayHashMapUnmanaged(*Package.Module, []const u8) = .empty;
|
||||
defer seen_table.deinit(comp.gpa);
|
||||
|
||||
try seen_table.put(comp.gpa, zcu.main_mod, comp.root_name);
|
||||
@ -5221,7 +5221,7 @@ fn spawnZigRc(
|
||||
argv: []const []const u8,
|
||||
child_progress_node: std.Progress.Node,
|
||||
) !void {
|
||||
var node_name: std.ArrayListUnmanaged(u8) = .{};
|
||||
var node_name: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer node_name.deinit(arena);
|
||||
|
||||
var child = std.process.Child.init(argv, arena);
|
||||
@ -5540,7 +5540,7 @@ pub fn addCCArgs(
|
||||
}
|
||||
|
||||
{
|
||||
var san_arg: std.ArrayListUnmanaged(u8) = .{};
|
||||
var san_arg: std.ArrayListUnmanaged(u8) = .empty;
|
||||
const prefix = "-fsanitize=";
|
||||
if (mod.sanitize_c) {
|
||||
if (san_arg.items.len == 0) try san_arg.appendSlice(arena, prefix);
|
||||
|
@ -23,36 +23,36 @@ tid_shift_32: if (single_threaded) u0 else std.math.Log2Int(u32) = if (single_th
|
||||
/// * For a `func`, this is the source of the full function signature.
|
||||
/// These are also invalidated if tracking fails for this instruction.
|
||||
/// Value is index into `dep_entries` of the first dependency on this hash.
|
||||
src_hash_deps: std.AutoArrayHashMapUnmanaged(TrackedInst.Index, DepEntry.Index) = .{},
|
||||
src_hash_deps: std.AutoArrayHashMapUnmanaged(TrackedInst.Index, DepEntry.Index) = .empty,
|
||||
/// Dependencies on the value of a Nav.
|
||||
/// Value is index into `dep_entries` of the first dependency on this Nav value.
|
||||
nav_val_deps: std.AutoArrayHashMapUnmanaged(Nav.Index, DepEntry.Index) = .{},
|
||||
nav_val_deps: std.AutoArrayHashMapUnmanaged(Nav.Index, DepEntry.Index) = .empty,
|
||||
/// Dependencies on an interned value, either:
|
||||
/// * a runtime function (invalidated when its IES changes)
|
||||
/// * a container type requiring resolution (invalidated when the type must be recreated at a new index)
|
||||
/// Value is index into `dep_entries` of the first dependency on this interned value.
|
||||
interned_deps: std.AutoArrayHashMapUnmanaged(Index, DepEntry.Index) = .{},
|
||||
interned_deps: std.AutoArrayHashMapUnmanaged(Index, DepEntry.Index) = .empty,
|
||||
/// Dependencies on the full set of names in a ZIR namespace.
|
||||
/// Key refers to a `struct_decl`, `union_decl`, etc.
|
||||
/// Value is index into `dep_entries` of the first dependency on this namespace.
|
||||
namespace_deps: std.AutoArrayHashMapUnmanaged(TrackedInst.Index, DepEntry.Index) = .{},
|
||||
namespace_deps: std.AutoArrayHashMapUnmanaged(TrackedInst.Index, DepEntry.Index) = .empty,
|
||||
/// Dependencies on the (non-)existence of some name in a namespace.
|
||||
/// Value is index into `dep_entries` of the first dependency on this name.
|
||||
namespace_name_deps: std.AutoArrayHashMapUnmanaged(NamespaceNameKey, DepEntry.Index) = .{},
|
||||
namespace_name_deps: std.AutoArrayHashMapUnmanaged(NamespaceNameKey, DepEntry.Index) = .empty,
|
||||
|
||||
/// Given a `Depender`, points to an entry in `dep_entries` whose `depender`
|
||||
/// matches. The `next_dependee` field can be used to iterate all such entries
|
||||
/// and remove them from the corresponding lists.
|
||||
first_dependency: std.AutoArrayHashMapUnmanaged(AnalUnit, DepEntry.Index) = .{},
|
||||
first_dependency: std.AutoArrayHashMapUnmanaged(AnalUnit, DepEntry.Index) = .empty,
|
||||
|
||||
/// Stores dependency information. The hashmaps declared above are used to look
|
||||
/// up entries in this list as required. This is not stored in `extra` so that
|
||||
/// we can use `free_dep_entries` to track free indices, since dependencies are
|
||||
/// removed frequently.
|
||||
dep_entries: std.ArrayListUnmanaged(DepEntry) = .{},
|
||||
dep_entries: std.ArrayListUnmanaged(DepEntry) = .empty,
|
||||
/// Stores unused indices in `dep_entries` which can be reused without a full
|
||||
/// garbage collection pass.
|
||||
free_dep_entries: std.ArrayListUnmanaged(DepEntry.Index) = .{},
|
||||
free_dep_entries: std.ArrayListUnmanaged(DepEntry.Index) = .empty,
|
||||
|
||||
/// Whether a multi-threaded intern pool is useful.
|
||||
/// Currently `false` until the intern pool is actually accessed
|
||||
@ -10791,7 +10791,7 @@ pub fn dumpGenericInstancesFallible(ip: *const InternPool, allocator: Allocator)
|
||||
var bw = std.io.bufferedWriter(std.io.getStdErr().writer());
|
||||
const w = bw.writer();
|
||||
|
||||
var instances: std.AutoArrayHashMapUnmanaged(Index, std.ArrayListUnmanaged(Index)) = .{};
|
||||
var instances: std.AutoArrayHashMapUnmanaged(Index, std.ArrayListUnmanaged(Index)) = .empty;
|
||||
for (ip.locals, 0..) |*local, tid| {
|
||||
const items = local.shared.items.view().slice();
|
||||
const extra_list = local.shared.extra;
|
||||
|
@ -94,10 +94,10 @@ fn LivenessPassData(comptime pass: LivenessPass) type {
|
||||
/// body and which we are currently within. Also includes `loop`s which are the target
|
||||
/// of a `repeat` instruction, and `loop_switch_br`s which are the target of a
|
||||
/// `switch_dispatch` instruction.
|
||||
breaks: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{},
|
||||
breaks: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .empty,
|
||||
|
||||
/// The set of operands for which we have seen at least one usage but not their birth.
|
||||
live_set: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{},
|
||||
live_set: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .empty,
|
||||
|
||||
fn deinit(self: *@This(), gpa: Allocator) void {
|
||||
self.breaks.deinit(gpa);
|
||||
@ -107,15 +107,15 @@ fn LivenessPassData(comptime pass: LivenessPass) type {
|
||||
|
||||
.main_analysis => struct {
|
||||
/// Every `block` and `loop` currently under analysis.
|
||||
block_scopes: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockScope) = .{},
|
||||
block_scopes: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockScope) = .empty,
|
||||
|
||||
/// The set of instructions currently alive in the current control
|
||||
/// flow branch.
|
||||
live_set: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{},
|
||||
live_set: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .empty,
|
||||
|
||||
/// The extra data initialized by the `loop_analysis` pass for this pass to consume.
|
||||
/// Owned by this struct during this pass.
|
||||
old_extra: std.ArrayListUnmanaged(u32) = .{},
|
||||
old_extra: std.ArrayListUnmanaged(u32) = .empty,
|
||||
|
||||
const BlockScope = struct {
|
||||
/// If this is a `block`, these instructions are alive upon a `br` to this block.
|
||||
@ -1710,10 +1710,10 @@ fn analyzeInstCondBr(
|
||||
// Operands which are alive in one branch but not the other need to die at the start of
|
||||
// the peer branch.
|
||||
|
||||
var then_mirrored_deaths: std.ArrayListUnmanaged(Air.Inst.Index) = .{};
|
||||
var then_mirrored_deaths: std.ArrayListUnmanaged(Air.Inst.Index) = .empty;
|
||||
defer then_mirrored_deaths.deinit(gpa);
|
||||
|
||||
var else_mirrored_deaths: std.ArrayListUnmanaged(Air.Inst.Index) = .{};
|
||||
var else_mirrored_deaths: std.ArrayListUnmanaged(Air.Inst.Index) = .empty;
|
||||
defer else_mirrored_deaths.deinit(gpa);
|
||||
|
||||
// Note: this invalidates `else_live`, but expands `then_live` to be their union
|
||||
@ -1785,10 +1785,10 @@ fn analyzeInstSwitchBr(
|
||||
|
||||
switch (pass) {
|
||||
.loop_analysis => {
|
||||
var old_breaks: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{};
|
||||
var old_breaks: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .empty;
|
||||
defer old_breaks.deinit(gpa);
|
||||
|
||||
var old_live: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{};
|
||||
var old_live: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .empty;
|
||||
defer old_live.deinit(gpa);
|
||||
|
||||
if (is_dispatch_loop) {
|
||||
|
@ -4,8 +4,8 @@ gpa: std.mem.Allocator,
|
||||
air: Air,
|
||||
liveness: Liveness,
|
||||
live: LiveMap = .{},
|
||||
blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, LiveMap) = .{},
|
||||
loops: std.AutoHashMapUnmanaged(Air.Inst.Index, LiveMap) = .{},
|
||||
blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, LiveMap) = .empty,
|
||||
loops: std.AutoHashMapUnmanaged(Air.Inst.Index, LiveMap) = .empty,
|
||||
intern_pool: *const InternPool,
|
||||
|
||||
pub const Error = error{ LivenessInvalid, OutOfMemory };
|
||||
|
@ -91,7 +91,7 @@ pub const JobQueue = struct {
|
||||
/// `table` may be missing some tasks such as ones that failed, so this
|
||||
/// field contains references to all of them.
|
||||
/// Protected by `mutex`.
|
||||
all_fetches: std.ArrayListUnmanaged(*Fetch) = .{},
|
||||
all_fetches: std.ArrayListUnmanaged(*Fetch) = .empty,
|
||||
|
||||
http_client: *std.http.Client,
|
||||
thread_pool: *ThreadPool,
|
||||
@ -1439,7 +1439,7 @@ fn computeHash(
|
||||
|
||||
// Track directories which had any files deleted from them so that empty directories
|
||||
// can be deleted.
|
||||
var sus_dirs: std.StringArrayHashMapUnmanaged(void) = .{};
|
||||
var sus_dirs: std.StringArrayHashMapUnmanaged(void) = .empty;
|
||||
defer sus_dirs.deinit(gpa);
|
||||
|
||||
var walker = try root_dir.walk(gpa);
|
||||
@ -1710,7 +1710,7 @@ fn normalizePath(bytes: []u8) void {
|
||||
}
|
||||
|
||||
const Filter = struct {
|
||||
include_paths: std.StringArrayHashMapUnmanaged(void) = .{},
|
||||
include_paths: std.StringArrayHashMapUnmanaged(void) = .empty,
|
||||
|
||||
/// sub_path is relative to the package root.
|
||||
pub fn includePath(self: Filter, sub_path: []const u8) bool {
|
||||
@ -2309,7 +2309,7 @@ const TestFetchBuilder = struct {
|
||||
var package_dir = try self.packageDir();
|
||||
defer package_dir.close();
|
||||
|
||||
var actual_files: std.ArrayListUnmanaged([]u8) = .{};
|
||||
var actual_files: std.ArrayListUnmanaged([]u8) = .empty;
|
||||
defer actual_files.deinit(std.testing.allocator);
|
||||
defer for (actual_files.items) |file| std.testing.allocator.free(file);
|
||||
var walker = try package_dir.walk(std.testing.allocator);
|
||||
|
@ -38,7 +38,7 @@ test parseOid {
|
||||
|
||||
pub const Diagnostics = struct {
|
||||
allocator: Allocator,
|
||||
errors: std.ArrayListUnmanaged(Error) = .{},
|
||||
errors: std.ArrayListUnmanaged(Error) = .empty,
|
||||
|
||||
pub const Error = union(enum) {
|
||||
unable_to_create_sym_link: struct {
|
||||
@ -263,7 +263,7 @@ const Odb = struct {
|
||||
fn readObject(odb: *Odb) !Object {
|
||||
var base_offset = try odb.pack_file.getPos();
|
||||
var base_header: EntryHeader = undefined;
|
||||
var delta_offsets = std.ArrayListUnmanaged(u64){};
|
||||
var delta_offsets: std.ArrayListUnmanaged(u64) = .empty;
|
||||
defer delta_offsets.deinit(odb.allocator);
|
||||
const base_object = while (true) {
|
||||
if (odb.cache.get(base_offset)) |base_object| break base_object;
|
||||
@ -361,7 +361,7 @@ const Object = struct {
|
||||
/// freed by the caller at any point after inserting it into the cache. Any
|
||||
/// objects remaining in the cache will be freed when the cache itself is freed.
|
||||
const ObjectCache = struct {
|
||||
objects: std.AutoHashMapUnmanaged(u64, CacheEntry) = .{},
|
||||
objects: std.AutoHashMapUnmanaged(u64, CacheEntry) = .empty,
|
||||
lru_nodes: LruList = .{},
|
||||
byte_size: usize = 0,
|
||||
|
||||
@ -660,7 +660,7 @@ pub const Session = struct {
|
||||
upload_pack_uri.query = null;
|
||||
upload_pack_uri.fragment = null;
|
||||
|
||||
var body = std.ArrayListUnmanaged(u8){};
|
||||
var body: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer body.deinit(allocator);
|
||||
const body_writer = body.writer(allocator);
|
||||
try Packet.write(.{ .data = "command=ls-refs\n" }, body_writer);
|
||||
@ -767,7 +767,7 @@ pub const Session = struct {
|
||||
upload_pack_uri.query = null;
|
||||
upload_pack_uri.fragment = null;
|
||||
|
||||
var body = std.ArrayListUnmanaged(u8){};
|
||||
var body: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer body.deinit(allocator);
|
||||
const body_writer = body.writer(allocator);
|
||||
try Packet.write(.{ .data = "command=fetch\n" }, body_writer);
|
||||
@ -1044,9 +1044,9 @@ const IndexEntry = struct {
|
||||
pub fn indexPack(allocator: Allocator, pack: std.fs.File, index_writer: anytype) !void {
|
||||
try pack.seekTo(0);
|
||||
|
||||
var index_entries = std.AutoHashMapUnmanaged(Oid, IndexEntry){};
|
||||
var index_entries: std.AutoHashMapUnmanaged(Oid, IndexEntry) = .empty;
|
||||
defer index_entries.deinit(allocator);
|
||||
var pending_deltas = std.ArrayListUnmanaged(IndexEntry){};
|
||||
var pending_deltas: std.ArrayListUnmanaged(IndexEntry) = .empty;
|
||||
defer pending_deltas.deinit(allocator);
|
||||
|
||||
const pack_checksum = try indexPackFirstPass(allocator, pack, &index_entries, &pending_deltas);
|
||||
@ -1068,7 +1068,7 @@ pub fn indexPack(allocator: Allocator, pack: std.fs.File, index_writer: anytype)
|
||||
remaining_deltas = pending_deltas.items.len;
|
||||
}
|
||||
|
||||
var oids = std.ArrayListUnmanaged(Oid){};
|
||||
var oids: std.ArrayListUnmanaged(Oid) = .empty;
|
||||
defer oids.deinit(allocator);
|
||||
try oids.ensureTotalCapacityPrecise(allocator, index_entries.count());
|
||||
var index_entries_iter = index_entries.iterator();
|
||||
@ -1109,7 +1109,7 @@ pub fn indexPack(allocator: Allocator, pack: std.fs.File, index_writer: anytype)
|
||||
try writer.writeInt(u32, index_entries.get(oid).?.crc32, .big);
|
||||
}
|
||||
|
||||
var big_offsets = std.ArrayListUnmanaged(u64){};
|
||||
var big_offsets: std.ArrayListUnmanaged(u64) = .empty;
|
||||
defer big_offsets.deinit(allocator);
|
||||
for (oids.items) |oid| {
|
||||
const offset = index_entries.get(oid).?.offset;
|
||||
@ -1213,7 +1213,7 @@ fn indexPackHashDelta(
|
||||
// Figure out the chain of deltas to resolve
|
||||
var base_offset = delta.offset;
|
||||
var base_header: EntryHeader = undefined;
|
||||
var delta_offsets = std.ArrayListUnmanaged(u64){};
|
||||
var delta_offsets: std.ArrayListUnmanaged(u64) = .empty;
|
||||
defer delta_offsets.deinit(allocator);
|
||||
const base_object = while (true) {
|
||||
if (cache.get(base_offset)) |base_object| break base_object;
|
||||
@ -1447,7 +1447,7 @@ test "packfile indexing and checkout" {
|
||||
"file8",
|
||||
"file9",
|
||||
};
|
||||
var actual_files: std.ArrayListUnmanaged([]u8) = .{};
|
||||
var actual_files: std.ArrayListUnmanaged([]u8) = .empty;
|
||||
defer actual_files.deinit(testing.allocator);
|
||||
defer for (actual_files.items) |file| testing.allocator.free(file);
|
||||
var walker = try worktree.dir.walk(testing.allocator);
|
||||
|
42
src/Sema.zig
42
src/Sema.zig
@ -13,7 +13,7 @@ gpa: Allocator,
|
||||
arena: Allocator,
|
||||
code: Zir,
|
||||
air_instructions: std.MultiArrayList(Air.Inst) = .{},
|
||||
air_extra: std.ArrayListUnmanaged(u32) = .{},
|
||||
air_extra: std.ArrayListUnmanaged(u32) = .empty,
|
||||
/// Maps ZIR to AIR.
|
||||
inst_map: InstMap = .{},
|
||||
/// The "owner" of a `Sema` represents the root "thing" that is being analyzed.
|
||||
@ -65,7 +65,7 @@ generic_call_src: LazySrcLoc = LazySrcLoc.unneeded,
|
||||
/// They are created when an break_inline passes through a runtime condition, because
|
||||
/// Sema must convert comptime control flow to runtime control flow, which means
|
||||
/// breaking from a block.
|
||||
post_hoc_blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, *LabeledBlock) = .{},
|
||||
post_hoc_blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, *LabeledBlock) = .empty,
|
||||
/// Populated with the last compile error created.
|
||||
err: ?*Zcu.ErrorMsg = null,
|
||||
/// Set to true when analyzing a func type instruction so that nested generic
|
||||
@ -74,12 +74,12 @@ no_partial_func_ty: bool = false,
|
||||
|
||||
/// The temporary arena is used for the memory of the `InferredAlloc` values
|
||||
/// here so the values can be dropped without any cleanup.
|
||||
unresolved_inferred_allocs: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, InferredAlloc) = .{},
|
||||
unresolved_inferred_allocs: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, InferredAlloc) = .empty,
|
||||
|
||||
/// Links every pointer derived from a base `alloc` back to that `alloc`. Used
|
||||
/// to detect comptime-known `const`s.
|
||||
/// TODO: ZIR liveness analysis would allow us to remove elements from this map.
|
||||
base_allocs: std.AutoHashMapUnmanaged(Air.Inst.Index, Air.Inst.Index) = .{},
|
||||
base_allocs: std.AutoHashMapUnmanaged(Air.Inst.Index, Air.Inst.Index) = .empty,
|
||||
|
||||
/// Runtime `alloc`s are placed in this map to track all comptime-known writes
|
||||
/// before the corresponding `make_ptr_const` instruction.
|
||||
@ -90,28 +90,28 @@ base_allocs: std.AutoHashMapUnmanaged(Air.Inst.Index, Air.Inst.Index) = .{},
|
||||
/// is comptime-known, and all stores to the pointer must be applied at comptime
|
||||
/// to determine the comptime value.
|
||||
/// Backed by gpa.
|
||||
maybe_comptime_allocs: std.AutoHashMapUnmanaged(Air.Inst.Index, MaybeComptimeAlloc) = .{},
|
||||
maybe_comptime_allocs: std.AutoHashMapUnmanaged(Air.Inst.Index, MaybeComptimeAlloc) = .empty,
|
||||
|
||||
/// Comptime-mutable allocs, and any comptime allocs which reference it, are
|
||||
/// stored as elements of this array.
|
||||
/// Pointers to such memory are represented via an index into this array.
|
||||
/// Backed by gpa.
|
||||
comptime_allocs: std.ArrayListUnmanaged(ComptimeAlloc) = .{},
|
||||
comptime_allocs: std.ArrayListUnmanaged(ComptimeAlloc) = .empty,
|
||||
|
||||
/// A list of exports performed by this analysis. After this `Sema` terminates,
|
||||
/// these are flushed to `Zcu.single_exports` or `Zcu.multi_exports`.
|
||||
exports: std.ArrayListUnmanaged(Zcu.Export) = .{},
|
||||
exports: std.ArrayListUnmanaged(Zcu.Export) = .empty,
|
||||
|
||||
/// All references registered so far by this `Sema`. This is a temporary duplicate
|
||||
/// of data stored in `Zcu.all_references`. It exists to avoid adding references to
|
||||
/// a given `AnalUnit` multiple times.
|
||||
references: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .{},
|
||||
type_references: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{},
|
||||
references: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .empty,
|
||||
type_references: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .empty,
|
||||
|
||||
/// All dependencies registered so far by this `Sema`. This is a temporary duplicate
|
||||
/// of the main dependency data. It exists to avoid adding dependencies to a given
|
||||
/// `AnalUnit` multiple times.
|
||||
dependencies: std.AutoArrayHashMapUnmanaged(InternPool.Dependee, void) = .{},
|
||||
dependencies: std.AutoArrayHashMapUnmanaged(InternPool.Dependee, void) = .empty,
|
||||
|
||||
/// Whether memoization of this call is permitted. Operations with side effects global
|
||||
/// to the `Sema`, such as `@setEvalBranchQuota`, set this to `false`. It is observed
|
||||
@ -208,7 +208,7 @@ pub const InferredErrorSet = struct {
|
||||
/// are returned from any dependent functions.
|
||||
errors: NameMap = .{},
|
||||
/// Other inferred error sets which this inferred error set should include.
|
||||
inferred_error_sets: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{},
|
||||
inferred_error_sets: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .empty,
|
||||
/// The regular error set created by resolving this inferred error set.
|
||||
resolved: InternPool.Index = .none,
|
||||
|
||||
@ -508,9 +508,9 @@ pub const Block = struct {
|
||||
/// * for a `switch_block[_ref]`, this refers to dummy `br` instructions
|
||||
/// which correspond to `switch_continue` ZIR. The switch logic will
|
||||
/// rewrite these to appropriate AIR switch dispatches.
|
||||
extra_insts: std.ArrayListUnmanaged(Air.Inst.Index) = .{},
|
||||
extra_insts: std.ArrayListUnmanaged(Air.Inst.Index) = .empty,
|
||||
/// Same indexes, capacity, length as `extra_insts`.
|
||||
extra_src_locs: std.ArrayListUnmanaged(LazySrcLoc) = .{},
|
||||
extra_src_locs: std.ArrayListUnmanaged(LazySrcLoc) = .empty,
|
||||
|
||||
pub fn deinit(merges: *@This(), allocator: Allocator) void {
|
||||
merges.results.deinit(allocator);
|
||||
@ -871,7 +871,7 @@ const InferredAlloc = struct {
|
||||
/// is known. These should be rewritten to perform any required coercions
|
||||
/// when the type is resolved.
|
||||
/// Allocated from `sema.arena`.
|
||||
prongs: std.ArrayListUnmanaged(Air.Inst.Index) = .{},
|
||||
prongs: std.ArrayListUnmanaged(Air.Inst.Index) = .empty,
|
||||
};
|
||||
|
||||
const NeededComptimeReason = struct {
|
||||
@ -2908,7 +2908,7 @@ fn createTypeName(
|
||||
const fn_info = sema.code.getFnInfo(ip.funcZirBodyInst(sema.func_index).resolve(ip) orelse return error.AnalysisFail);
|
||||
const zir_tags = sema.code.instructions.items(.tag);
|
||||
|
||||
var buf: std.ArrayListUnmanaged(u8) = .{};
|
||||
var buf: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer buf.deinit(gpa);
|
||||
|
||||
const writer = buf.writer(gpa);
|
||||
@ -6851,11 +6851,11 @@ fn lookupInNamespace(
|
||||
|
||||
if (observe_usingnamespace and (namespace.pub_usingnamespace.items.len != 0 or namespace.priv_usingnamespace.items.len != 0)) {
|
||||
const gpa = sema.gpa;
|
||||
var checked_namespaces: std.AutoArrayHashMapUnmanaged(*Namespace, void) = .{};
|
||||
var checked_namespaces: std.AutoArrayHashMapUnmanaged(*Namespace, void) = .empty;
|
||||
defer checked_namespaces.deinit(gpa);
|
||||
|
||||
// Keep track of name conflicts for error notes.
|
||||
var candidates: std.ArrayListUnmanaged(InternPool.Nav.Index) = .{};
|
||||
var candidates: std.ArrayListUnmanaged(InternPool.Nav.Index) = .empty;
|
||||
defer candidates.deinit(gpa);
|
||||
|
||||
try checked_namespaces.put(gpa, namespace, {});
|
||||
@ -22754,7 +22754,7 @@ fn reifyUnion(
|
||||
break :tag_ty .{ enum_tag_ty.toIntern(), true };
|
||||
} else tag_ty: {
|
||||
// We must track field names and set up the tag type ourselves.
|
||||
var field_names: std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{};
|
||||
var field_names: std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void) = .empty;
|
||||
try field_names.ensureTotalCapacity(sema.arena, fields_len);
|
||||
|
||||
for (field_types, 0..) |*field_ty, field_idx| {
|
||||
@ -37075,7 +37075,7 @@ fn unionFields(
|
||||
|
||||
var int_tag_ty: Type = undefined;
|
||||
var enum_field_names: []InternPool.NullTerminatedString = &.{};
|
||||
var enum_field_vals: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{};
|
||||
var enum_field_vals: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .empty;
|
||||
var explicit_tags_seen: []bool = &.{};
|
||||
if (tag_type_ref != .none) {
|
||||
const tag_ty_src: LazySrcLoc = .{
|
||||
@ -37126,8 +37126,8 @@ fn unionFields(
|
||||
enum_field_names = try sema.arena.alloc(InternPool.NullTerminatedString, fields_len);
|
||||
}
|
||||
|
||||
var field_types: std.ArrayListUnmanaged(InternPool.Index) = .{};
|
||||
var field_aligns: std.ArrayListUnmanaged(InternPool.Alignment) = .{};
|
||||
var field_types: std.ArrayListUnmanaged(InternPool.Index) = .empty;
|
||||
var field_aligns: std.ArrayListUnmanaged(InternPool.Alignment) = .empty;
|
||||
|
||||
try field_types.ensureTotalCapacityPrecise(sema.arena, fields_len);
|
||||
if (small.any_aligned_fields)
|
||||
|
88
src/Zcu.zig
88
src/Zcu.zig
@ -76,14 +76,14 @@ local_zir_cache: Compilation.Directory,
|
||||
|
||||
/// This is where all `Export` values are stored. Not all values here are necessarily valid exports;
|
||||
/// to enumerate all exports, `single_exports` and `multi_exports` must be consulted.
|
||||
all_exports: std.ArrayListUnmanaged(Export) = .{},
|
||||
all_exports: std.ArrayListUnmanaged(Export) = .empty,
|
||||
/// This is a list of free indices in `all_exports`. These indices may be reused by exports from
|
||||
/// future semantic analysis.
|
||||
free_exports: std.ArrayListUnmanaged(u32) = .{},
|
||||
free_exports: std.ArrayListUnmanaged(u32) = .empty,
|
||||
/// Maps from an `AnalUnit` which performs a single export, to the index into `all_exports` of
|
||||
/// the export it performs. Note that the key is not the `Decl` being exported, but the `AnalUnit`
|
||||
/// whose analysis triggered the export.
|
||||
single_exports: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .{},
|
||||
single_exports: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .empty,
|
||||
/// Like `single_exports`, but for `AnalUnit`s which perform multiple exports.
|
||||
/// The exports are `all_exports.items[index..][0..len]`.
|
||||
multi_exports: std.AutoArrayHashMapUnmanaged(AnalUnit, extern struct {
|
||||
@ -104,29 +104,29 @@ multi_exports: std.AutoArrayHashMapUnmanaged(AnalUnit, extern struct {
|
||||
/// `Compilation.update` of the process for a given `Compilation`.
|
||||
///
|
||||
/// Indexes correspond 1:1 to `files`.
|
||||
import_table: std.StringArrayHashMapUnmanaged(File.Index) = .{},
|
||||
import_table: std.StringArrayHashMapUnmanaged(File.Index) = .empty,
|
||||
|
||||
/// The set of all the files which have been loaded with `@embedFile` in the Module.
|
||||
/// We keep track of this in order to iterate over it and check which files have been
|
||||
/// modified on the file system when an update is requested, as well as to cache
|
||||
/// `@embedFile` results.
|
||||
/// Keys are fully resolved file paths. This table owns the keys and values.
|
||||
embed_table: std.StringArrayHashMapUnmanaged(*EmbedFile) = .{},
|
||||
embed_table: std.StringArrayHashMapUnmanaged(*EmbedFile) = .empty,
|
||||
|
||||
/// Stores all Type and Value objects.
|
||||
/// The idea is that this will be periodically garbage-collected, but such logic
|
||||
/// is not yet implemented.
|
||||
intern_pool: InternPool = .{},
|
||||
|
||||
analysis_in_progress: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .{},
|
||||
analysis_in_progress: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .empty,
|
||||
/// The ErrorMsg memory is owned by the `AnalUnit`, using Module's general purpose allocator.
|
||||
failed_analysis: std.AutoArrayHashMapUnmanaged(AnalUnit, *ErrorMsg) = .{},
|
||||
failed_analysis: std.AutoArrayHashMapUnmanaged(AnalUnit, *ErrorMsg) = .empty,
|
||||
/// This `AnalUnit` failed semantic analysis because it required analysis of another `AnalUnit` which itself failed.
|
||||
transitive_failed_analysis: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .{},
|
||||
transitive_failed_analysis: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .empty,
|
||||
/// This `Nav` succeeded analysis, but failed codegen.
|
||||
/// This may be a simple "value" `Nav`, or it may be a function.
|
||||
/// The ErrorMsg memory is owned by the `AnalUnit`, using Module's general purpose allocator.
|
||||
failed_codegen: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, *ErrorMsg) = .{},
|
||||
failed_codegen: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, *ErrorMsg) = .empty,
|
||||
/// Keep track of one `@compileLog` callsite per `AnalUnit`.
|
||||
/// The value is the source location of the `@compileLog` call, convertible to a `LazySrcLoc`.
|
||||
compile_log_sources: std.AutoArrayHashMapUnmanaged(AnalUnit, extern struct {
|
||||
@ -141,14 +141,14 @@ compile_log_sources: std.AutoArrayHashMapUnmanaged(AnalUnit, extern struct {
|
||||
}) = .{},
|
||||
/// Using a map here for consistency with the other fields here.
|
||||
/// The ErrorMsg memory is owned by the `File`, using Module's general purpose allocator.
|
||||
failed_files: std.AutoArrayHashMapUnmanaged(*File, ?*ErrorMsg) = .{},
|
||||
failed_files: std.AutoArrayHashMapUnmanaged(*File, ?*ErrorMsg) = .empty,
|
||||
/// The ErrorMsg memory is owned by the `EmbedFile`, using Module's general purpose allocator.
|
||||
failed_embed_files: std.AutoArrayHashMapUnmanaged(*EmbedFile, *ErrorMsg) = .{},
|
||||
failed_embed_files: std.AutoArrayHashMapUnmanaged(*EmbedFile, *ErrorMsg) = .empty,
|
||||
/// Key is index into `all_exports`.
|
||||
failed_exports: std.AutoArrayHashMapUnmanaged(u32, *ErrorMsg) = .{},
|
||||
failed_exports: std.AutoArrayHashMapUnmanaged(u32, *ErrorMsg) = .empty,
|
||||
/// If analysis failed due to a cimport error, the corresponding Clang errors
|
||||
/// are stored here.
|
||||
cimport_errors: std.AutoArrayHashMapUnmanaged(AnalUnit, std.zig.ErrorBundle) = .{},
|
||||
cimport_errors: std.AutoArrayHashMapUnmanaged(AnalUnit, std.zig.ErrorBundle) = .empty,
|
||||
|
||||
/// Maximum amount of distinct error values, set by --error-limit
|
||||
error_limit: ErrorInt,
|
||||
@ -156,19 +156,19 @@ error_limit: ErrorInt,
|
||||
/// Value is the number of PO dependencies of this AnalUnit.
|
||||
/// This value will decrease as we perform semantic analysis to learn what is outdated.
|
||||
/// If any of these PO deps is outdated, this value will be moved to `outdated`.
|
||||
potentially_outdated: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .{},
|
||||
potentially_outdated: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .empty,
|
||||
/// Value is the number of PO dependencies of this AnalUnit.
|
||||
/// Once this value drops to 0, the AnalUnit is a candidate for re-analysis.
|
||||
outdated: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .{},
|
||||
outdated: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .empty,
|
||||
/// This contains all `AnalUnit`s in `outdated` whose PO dependency count is 0.
|
||||
/// Such `AnalUnit`s are ready for immediate re-analysis.
|
||||
/// See `findOutdatedToAnalyze` for details.
|
||||
outdated_ready: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .{},
|
||||
outdated_ready: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .empty,
|
||||
/// This contains a list of AnalUnit whose analysis or codegen failed, but the
|
||||
/// failure was something like running out of disk space, and trying again may
|
||||
/// succeed. On the next update, we will flush this list, marking all members of
|
||||
/// it as outdated.
|
||||
retryable_failures: std.ArrayListUnmanaged(AnalUnit) = .{},
|
||||
retryable_failures: std.ArrayListUnmanaged(AnalUnit) = .empty,
|
||||
|
||||
/// These are the modules which we initially queue for analysis in `Compilation.update`.
|
||||
/// `resolveReferences` will use these as the root of its reachability traversal.
|
||||
@ -184,31 +184,31 @@ stage1_flags: packed struct {
|
||||
reserved: u2 = 0,
|
||||
} = .{},
|
||||
|
||||
compile_log_text: std.ArrayListUnmanaged(u8) = .{},
|
||||
compile_log_text: std.ArrayListUnmanaged(u8) = .empty,
|
||||
|
||||
test_functions: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, void) = .{},
|
||||
test_functions: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, void) = .empty,
|
||||
|
||||
global_assembly: std.AutoArrayHashMapUnmanaged(InternPool.Cau.Index, []u8) = .{},
|
||||
global_assembly: std.AutoArrayHashMapUnmanaged(InternPool.Cau.Index, []u8) = .empty,
|
||||
|
||||
/// Key is the `AnalUnit` *performing* the reference. This representation allows
|
||||
/// incremental updates to quickly delete references caused by a specific `AnalUnit`.
|
||||
/// Value is index into `all_references` of the first reference triggered by the unit.
|
||||
/// The `next` field on the `Reference` forms a linked list of all references
|
||||
/// triggered by the key `AnalUnit`.
|
||||
reference_table: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .{},
|
||||
all_references: std.ArrayListUnmanaged(Reference) = .{},
|
||||
reference_table: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .empty,
|
||||
all_references: std.ArrayListUnmanaged(Reference) = .empty,
|
||||
/// Freelist of indices in `all_references`.
|
||||
free_references: std.ArrayListUnmanaged(u32) = .{},
|
||||
free_references: std.ArrayListUnmanaged(u32) = .empty,
|
||||
|
||||
/// Key is the `AnalUnit` *performing* the reference. This representation allows
|
||||
/// incremental updates to quickly delete references caused by a specific `AnalUnit`.
|
||||
/// Value is index into `all_type_reference` of the first reference triggered by the unit.
|
||||
/// The `next` field on the `TypeReference` forms a linked list of all type references
|
||||
/// triggered by the key `AnalUnit`.
|
||||
type_reference_table: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .{},
|
||||
all_type_references: std.ArrayListUnmanaged(TypeReference) = .{},
|
||||
type_reference_table: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .empty,
|
||||
all_type_references: std.ArrayListUnmanaged(TypeReference) = .empty,
|
||||
/// Freelist of indices in `all_type_references`.
|
||||
free_type_references: std.ArrayListUnmanaged(u32) = .{},
|
||||
free_type_references: std.ArrayListUnmanaged(u32) = .empty,
|
||||
|
||||
panic_messages: [PanicId.len]InternPool.Nav.Index.Optional = .{.none} ** PanicId.len,
|
||||
/// The panic function body.
|
||||
@ -338,16 +338,16 @@ pub const Namespace = struct {
|
||||
/// Will be a struct, enum, union, or opaque.
|
||||
owner_type: InternPool.Index,
|
||||
/// Members of the namespace which are marked `pub`.
|
||||
pub_decls: std.ArrayHashMapUnmanaged(InternPool.Nav.Index, void, NavNameContext, true) = .{},
|
||||
pub_decls: std.ArrayHashMapUnmanaged(InternPool.Nav.Index, void, NavNameContext, true) = .empty,
|
||||
/// Members of the namespace which are *not* marked `pub`.
|
||||
priv_decls: std.ArrayHashMapUnmanaged(InternPool.Nav.Index, void, NavNameContext, true) = .{},
|
||||
priv_decls: std.ArrayHashMapUnmanaged(InternPool.Nav.Index, void, NavNameContext, true) = .empty,
|
||||
/// All `usingnamespace` declarations in this namespace which are marked `pub`.
|
||||
pub_usingnamespace: std.ArrayListUnmanaged(InternPool.Nav.Index) = .{},
|
||||
pub_usingnamespace: std.ArrayListUnmanaged(InternPool.Nav.Index) = .empty,
|
||||
/// All `usingnamespace` declarations in this namespace which are *not* marked `pub`.
|
||||
priv_usingnamespace: std.ArrayListUnmanaged(InternPool.Nav.Index) = .{},
|
||||
priv_usingnamespace: std.ArrayListUnmanaged(InternPool.Nav.Index) = .empty,
|
||||
/// All `comptime` and `test` declarations in this namespace. We store these purely so that
|
||||
/// incremental compilation can re-use the existing `Cau`s when a namespace changes.
|
||||
other_decls: std.ArrayListUnmanaged(InternPool.Cau.Index) = .{},
|
||||
other_decls: std.ArrayListUnmanaged(InternPool.Cau.Index) = .empty,
|
||||
|
||||
pub const Index = InternPool.NamespaceIndex;
|
||||
pub const OptionalIndex = InternPool.OptionalNamespaceIndex;
|
||||
@ -451,7 +451,7 @@ pub const File = struct {
|
||||
/// Whether this file is a part of multiple packages. This is an error condition which will be reported after AstGen.
|
||||
multi_pkg: bool = false,
|
||||
/// List of references to this file, used for multi-package errors.
|
||||
references: std.ArrayListUnmanaged(File.Reference) = .{},
|
||||
references: std.ArrayListUnmanaged(File.Reference) = .empty,
|
||||
|
||||
/// The most recent successful ZIR for this file, with no errors.
|
||||
/// This is only populated when a previously successful ZIR
|
||||
@ -2551,13 +2551,13 @@ pub fn mapOldZirToNew(
|
||||
old_inst: Zir.Inst.Index,
|
||||
new_inst: Zir.Inst.Index,
|
||||
};
|
||||
var match_stack: std.ArrayListUnmanaged(MatchedZirDecl) = .{};
|
||||
var match_stack: std.ArrayListUnmanaged(MatchedZirDecl) = .empty;
|
||||
defer match_stack.deinit(gpa);
|
||||
|
||||
// Used as temporary buffers for namespace declaration instructions
|
||||
var old_decls: std.ArrayListUnmanaged(Zir.Inst.Index) = .{};
|
||||
var old_decls: std.ArrayListUnmanaged(Zir.Inst.Index) = .empty;
|
||||
defer old_decls.deinit(gpa);
|
||||
var new_decls: std.ArrayListUnmanaged(Zir.Inst.Index) = .{};
|
||||
var new_decls: std.ArrayListUnmanaged(Zir.Inst.Index) = .empty;
|
||||
defer new_decls.deinit(gpa);
|
||||
|
||||
// Map the main struct inst (and anything in its fields)
|
||||
@ -2582,19 +2582,19 @@ pub fn mapOldZirToNew(
|
||||
try inst_map.put(gpa, match_item.old_inst, match_item.new_inst);
|
||||
|
||||
// Maps decl name to `declaration` instruction.
|
||||
var named_decls: std.StringHashMapUnmanaged(Zir.Inst.Index) = .{};
|
||||
var named_decls: std.StringHashMapUnmanaged(Zir.Inst.Index) = .empty;
|
||||
defer named_decls.deinit(gpa);
|
||||
// Maps test name to `declaration` instruction.
|
||||
var named_tests: std.StringHashMapUnmanaged(Zir.Inst.Index) = .{};
|
||||
var named_tests: std.StringHashMapUnmanaged(Zir.Inst.Index) = .empty;
|
||||
defer named_tests.deinit(gpa);
|
||||
// All unnamed tests, in order, for a best-effort match.
|
||||
var unnamed_tests: std.ArrayListUnmanaged(Zir.Inst.Index) = .{};
|
||||
var unnamed_tests: std.ArrayListUnmanaged(Zir.Inst.Index) = .empty;
|
||||
defer unnamed_tests.deinit(gpa);
|
||||
// All comptime declarations, in order, for a best-effort match.
|
||||
var comptime_decls: std.ArrayListUnmanaged(Zir.Inst.Index) = .{};
|
||||
var comptime_decls: std.ArrayListUnmanaged(Zir.Inst.Index) = .empty;
|
||||
defer comptime_decls.deinit(gpa);
|
||||
// All usingnamespace declarations, in order, for a best-effort match.
|
||||
var usingnamespace_decls: std.ArrayListUnmanaged(Zir.Inst.Index) = .{};
|
||||
var usingnamespace_decls: std.ArrayListUnmanaged(Zir.Inst.Index) = .empty;
|
||||
defer usingnamespace_decls.deinit(gpa);
|
||||
|
||||
{
|
||||
@ -3154,12 +3154,12 @@ pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolve
|
||||
const comp = zcu.comp;
|
||||
const ip = &zcu.intern_pool;
|
||||
|
||||
var result: std.AutoHashMapUnmanaged(AnalUnit, ?ResolvedReference) = .{};
|
||||
var result: std.AutoHashMapUnmanaged(AnalUnit, ?ResolvedReference) = .empty;
|
||||
errdefer result.deinit(gpa);
|
||||
|
||||
var checked_types: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{};
|
||||
var type_queue: std.AutoArrayHashMapUnmanaged(InternPool.Index, ?ResolvedReference) = .{};
|
||||
var unit_queue: std.AutoArrayHashMapUnmanaged(AnalUnit, ?ResolvedReference) = .{};
|
||||
var checked_types: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .empty;
|
||||
var type_queue: std.AutoArrayHashMapUnmanaged(InternPool.Index, ?ResolvedReference) = .empty;
|
||||
var unit_queue: std.AutoArrayHashMapUnmanaged(AnalUnit, ?ResolvedReference) = .empty;
|
||||
defer {
|
||||
checked_types.deinit(gpa);
|
||||
type_queue.deinit(gpa);
|
||||
|
@ -320,7 +320,7 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void {
|
||||
const gpa = zcu.gpa;
|
||||
|
||||
// We need to visit every updated File for every TrackedInst in InternPool.
|
||||
var updated_files: std.AutoArrayHashMapUnmanaged(Zcu.File.Index, UpdatedFile) = .{};
|
||||
var updated_files: std.AutoArrayHashMapUnmanaged(Zcu.File.Index, UpdatedFile) = .empty;
|
||||
defer cleanupUpdatedFiles(gpa, &updated_files);
|
||||
for (zcu.import_table.values()) |file_index| {
|
||||
const file = zcu.fileByIndex(file_index);
|
||||
@ -399,7 +399,7 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void {
|
||||
};
|
||||
if (!has_namespace) continue;
|
||||
|
||||
var old_names: std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{};
|
||||
var old_names: std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void) = .empty;
|
||||
defer old_names.deinit(zcu.gpa);
|
||||
{
|
||||
var it = old_zir.declIterator(old_inst);
|
||||
@ -1721,7 +1721,7 @@ pub fn scanNamespace(
|
||||
// For incremental updates, `scanDecl` wants to look up existing decls by their ZIR index rather
|
||||
// than their name. We'll build an efficient mapping now, then discard the current `decls`.
|
||||
// We map to the `Cau`, since not every declaration has a `Nav`.
|
||||
var existing_by_inst: std.AutoHashMapUnmanaged(InternPool.TrackedInst.Index, InternPool.Cau.Index) = .{};
|
||||
var existing_by_inst: std.AutoHashMapUnmanaged(InternPool.TrackedInst.Index, InternPool.Cau.Index) = .empty;
|
||||
defer existing_by_inst.deinit(gpa);
|
||||
|
||||
try existing_by_inst.ensureTotalCapacity(gpa, @intCast(
|
||||
@ -1761,7 +1761,7 @@ pub fn scanNamespace(
|
||||
}
|
||||
}
|
||||
|
||||
var seen_decls: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{};
|
||||
var seen_decls: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void) = .empty;
|
||||
defer seen_decls.deinit(gpa);
|
||||
|
||||
namespace.pub_decls.clearRetainingCapacity();
|
||||
@ -2293,8 +2293,8 @@ pub fn processExports(pt: Zcu.PerThread) !void {
|
||||
const gpa = zcu.gpa;
|
||||
|
||||
// First, construct a mapping of every exported value and Nav to the indices of all its different exports.
|
||||
var nav_exports: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, std.ArrayListUnmanaged(u32)) = .{};
|
||||
var uav_exports: std.AutoArrayHashMapUnmanaged(InternPool.Index, std.ArrayListUnmanaged(u32)) = .{};
|
||||
var nav_exports: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, std.ArrayListUnmanaged(u32)) = .empty;
|
||||
var uav_exports: std.AutoArrayHashMapUnmanaged(InternPool.Index, std.ArrayListUnmanaged(u32)) = .empty;
|
||||
defer {
|
||||
for (nav_exports.values()) |*exports| {
|
||||
exports.deinit(gpa);
|
||||
|
@ -62,7 +62,7 @@ stack_align: u32,
|
||||
/// MIR Instructions
|
||||
mir_instructions: std.MultiArrayList(Mir.Inst) = .{},
|
||||
/// MIR extra data
|
||||
mir_extra: std.ArrayListUnmanaged(u32) = .{},
|
||||
mir_extra: std.ArrayListUnmanaged(u32) = .empty,
|
||||
|
||||
/// Byte offset within the source file of the ending curly.
|
||||
end_di_line: u32,
|
||||
@ -71,13 +71,13 @@ end_di_column: u32,
|
||||
/// The value is an offset into the `Function` `code` from the beginning.
|
||||
/// To perform the reloc, write 32-bit signed little-endian integer
|
||||
/// which is a relative jump, based on the address following the reloc.
|
||||
exitlude_jump_relocs: std.ArrayListUnmanaged(usize) = .{},
|
||||
exitlude_jump_relocs: std.ArrayListUnmanaged(usize) = .empty,
|
||||
|
||||
/// We postpone the creation of debug info for function args and locals
|
||||
/// until after all Mir instructions have been generated. Only then we
|
||||
/// will know saved_regs_stack_space which is necessary in order to
|
||||
/// calculate the right stack offsest with respect to the `.fp` register.
|
||||
dbg_info_relocs: std.ArrayListUnmanaged(DbgInfoReloc) = .{},
|
||||
dbg_info_relocs: std.ArrayListUnmanaged(DbgInfoReloc) = .empty,
|
||||
|
||||
/// Whenever there is a runtime branch, we push a Branch onto this stack,
|
||||
/// and pop it off when the runtime branch joins. This provides an "overlay"
|
||||
@ -89,11 +89,11 @@ dbg_info_relocs: std.ArrayListUnmanaged(DbgInfoReloc) = .{},
|
||||
branch_stack: *std.ArrayList(Branch),
|
||||
|
||||
// Key is the block instruction
|
||||
blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .{},
|
||||
blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .empty,
|
||||
|
||||
register_manager: RegisterManager = .{},
|
||||
/// Maps offset to what is stored there.
|
||||
stack: std.AutoHashMapUnmanaged(u32, StackAllocation) = .{},
|
||||
stack: std.AutoHashMapUnmanaged(u32, StackAllocation) = .empty,
|
||||
/// Tracks the current instruction allocated to the compare flags
|
||||
compare_flags_inst: ?Air.Inst.Index = null,
|
||||
|
||||
@ -247,7 +247,7 @@ const DbgInfoReloc = struct {
|
||||
};
|
||||
|
||||
const Branch = struct {
|
||||
inst_table: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, MCValue) = .{},
|
||||
inst_table: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, MCValue) = .empty,
|
||||
|
||||
fn deinit(self: *Branch, gpa: Allocator) void {
|
||||
self.inst_table.deinit(gpa);
|
||||
|
@ -33,18 +33,18 @@ prev_di_pc: usize,
|
||||
saved_regs_stack_space: u32,
|
||||
|
||||
/// The branch type of every branch
|
||||
branch_types: std.AutoHashMapUnmanaged(Mir.Inst.Index, BranchType) = .{},
|
||||
branch_types: std.AutoHashMapUnmanaged(Mir.Inst.Index, BranchType) = .empty,
|
||||
|
||||
/// For every forward branch, maps the target instruction to a list of
|
||||
/// branches which branch to this target instruction
|
||||
branch_forward_origins: std.AutoHashMapUnmanaged(Mir.Inst.Index, std.ArrayListUnmanaged(Mir.Inst.Index)) = .{},
|
||||
branch_forward_origins: std.AutoHashMapUnmanaged(Mir.Inst.Index, std.ArrayListUnmanaged(Mir.Inst.Index)) = .empty,
|
||||
|
||||
/// For backward branches: stores the code offset of the target
|
||||
/// instruction
|
||||
///
|
||||
/// For forward branches: stores the code offset of the branch
|
||||
/// instruction
|
||||
code_offset_mapping: std.AutoHashMapUnmanaged(Mir.Inst.Index, usize) = .{},
|
||||
code_offset_mapping: std.AutoHashMapUnmanaged(Mir.Inst.Index, usize) = .empty,
|
||||
|
||||
/// The final stack frame size of the function (already aligned to the
|
||||
/// respective stack alignment). Does not include prologue stack space.
|
||||
@ -346,7 +346,7 @@ fn lowerBranches(emit: *Emit) !void {
|
||||
if (emit.branch_forward_origins.getPtr(target_inst)) |origin_list| {
|
||||
try origin_list.append(gpa, inst);
|
||||
} else {
|
||||
var origin_list: std.ArrayListUnmanaged(Mir.Inst.Index) = .{};
|
||||
var origin_list: std.ArrayListUnmanaged(Mir.Inst.Index) = .empty;
|
||||
try origin_list.append(gpa, inst);
|
||||
try emit.branch_forward_origins.put(gpa, target_inst, origin_list);
|
||||
}
|
||||
|
@ -62,7 +62,7 @@ stack_align: u32,
|
||||
/// MIR Instructions
|
||||
mir_instructions: std.MultiArrayList(Mir.Inst) = .{},
|
||||
/// MIR extra data
|
||||
mir_extra: std.ArrayListUnmanaged(u32) = .{},
|
||||
mir_extra: std.ArrayListUnmanaged(u32) = .empty,
|
||||
|
||||
/// Byte offset within the source file of the ending curly.
|
||||
end_di_line: u32,
|
||||
@ -71,13 +71,13 @@ end_di_column: u32,
|
||||
/// The value is an offset into the `Function` `code` from the beginning.
|
||||
/// To perform the reloc, write 32-bit signed little-endian integer
|
||||
/// which is a relative jump, based on the address following the reloc.
|
||||
exitlude_jump_relocs: std.ArrayListUnmanaged(usize) = .{},
|
||||
exitlude_jump_relocs: std.ArrayListUnmanaged(usize) = .empty,
|
||||
|
||||
/// We postpone the creation of debug info for function args and locals
|
||||
/// until after all Mir instructions have been generated. Only then we
|
||||
/// will know saved_regs_stack_space which is necessary in order to
|
||||
/// calculate the right stack offsest with respect to the `.fp` register.
|
||||
dbg_info_relocs: std.ArrayListUnmanaged(DbgInfoReloc) = .{},
|
||||
dbg_info_relocs: std.ArrayListUnmanaged(DbgInfoReloc) = .empty,
|
||||
|
||||
/// Whenever there is a runtime branch, we push a Branch onto this stack,
|
||||
/// and pop it off when the runtime branch joins. This provides an "overlay"
|
||||
@ -89,11 +89,11 @@ dbg_info_relocs: std.ArrayListUnmanaged(DbgInfoReloc) = .{},
|
||||
branch_stack: *std.ArrayList(Branch),
|
||||
|
||||
// Key is the block instruction
|
||||
blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .{},
|
||||
blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .empty,
|
||||
|
||||
register_manager: RegisterManager = .{},
|
||||
/// Maps offset to what is stored there.
|
||||
stack: std.AutoHashMapUnmanaged(u32, StackAllocation) = .{},
|
||||
stack: std.AutoHashMapUnmanaged(u32, StackAllocation) = .empty,
|
||||
/// Tracks the current instruction allocated to the compare flags
|
||||
cpsr_flags_inst: ?Air.Inst.Index = null,
|
||||
|
||||
@ -168,7 +168,7 @@ const MCValue = union(enum) {
|
||||
};
|
||||
|
||||
const Branch = struct {
|
||||
inst_table: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, MCValue) = .{},
|
||||
inst_table: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, MCValue) = .empty,
|
||||
|
||||
fn deinit(self: *Branch, gpa: Allocator) void {
|
||||
self.inst_table.deinit(gpa);
|
||||
|
@ -40,16 +40,16 @@ saved_regs_stack_space: u32,
|
||||
stack_size: u32,
|
||||
|
||||
/// The branch type of every branch
|
||||
branch_types: std.AutoHashMapUnmanaged(Mir.Inst.Index, BranchType) = .{},
|
||||
branch_types: std.AutoHashMapUnmanaged(Mir.Inst.Index, BranchType) = .empty,
|
||||
/// For every forward branch, maps the target instruction to a list of
|
||||
/// branches which branch to this target instruction
|
||||
branch_forward_origins: std.AutoHashMapUnmanaged(Mir.Inst.Index, std.ArrayListUnmanaged(Mir.Inst.Index)) = .{},
|
||||
branch_forward_origins: std.AutoHashMapUnmanaged(Mir.Inst.Index, std.ArrayListUnmanaged(Mir.Inst.Index)) = .empty,
|
||||
/// For backward branches: stores the code offset of the target
|
||||
/// instruction
|
||||
///
|
||||
/// For forward branches: stores the code offset of the branch
|
||||
/// instruction
|
||||
code_offset_mapping: std.AutoHashMapUnmanaged(Mir.Inst.Index, usize) = .{},
|
||||
code_offset_mapping: std.AutoHashMapUnmanaged(Mir.Inst.Index, usize) = .empty,
|
||||
|
||||
const InnerError = error{
|
||||
OutOfMemory,
|
||||
@ -264,7 +264,7 @@ fn lowerBranches(emit: *Emit) !void {
|
||||
if (emit.branch_forward_origins.getPtr(target_inst)) |origin_list| {
|
||||
try origin_list.append(gpa, inst);
|
||||
} else {
|
||||
var origin_list: std.ArrayListUnmanaged(Mir.Inst.Index) = .{};
|
||||
var origin_list: std.ArrayListUnmanaged(Mir.Inst.Index) = .empty;
|
||||
try origin_list.append(gpa, inst);
|
||||
try emit.branch_forward_origins.put(gpa, target_inst, origin_list);
|
||||
}
|
||||
|
@ -81,7 +81,7 @@ scope_generation: u32,
|
||||
/// The value is an offset into the `Function` `code` from the beginning.
|
||||
/// To perform the reloc, write 32-bit signed little-endian integer
|
||||
/// which is a relative jump, based on the address following the reloc.
|
||||
exitlude_jump_relocs: std.ArrayListUnmanaged(usize) = .{},
|
||||
exitlude_jump_relocs: std.ArrayListUnmanaged(usize) = .empty,
|
||||
|
||||
/// Whenever there is a runtime branch, we push a Branch onto this stack,
|
||||
/// and pop it off when the runtime branch joins. This provides an "overlay"
|
||||
@ -97,14 +97,14 @@ avl: ?u64,
|
||||
vtype: ?bits.VType,
|
||||
|
||||
// Key is the block instruction
|
||||
blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .{},
|
||||
blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .empty,
|
||||
register_manager: RegisterManager = .{},
|
||||
|
||||
const_tracking: ConstTrackingMap = .{},
|
||||
inst_tracking: InstTrackingMap = .{},
|
||||
|
||||
frame_allocs: std.MultiArrayList(FrameAlloc) = .{},
|
||||
free_frame_indices: std.AutoArrayHashMapUnmanaged(FrameIndex, void) = .{},
|
||||
free_frame_indices: std.AutoArrayHashMapUnmanaged(FrameIndex, void) = .empty,
|
||||
frame_locs: std.MultiArrayList(Mir.FrameLoc) = .{},
|
||||
|
||||
loops: std.AutoHashMapUnmanaged(Air.Inst.Index, struct {
|
||||
@ -342,7 +342,7 @@ const MCValue = union(enum) {
|
||||
};
|
||||
|
||||
const Branch = struct {
|
||||
inst_table: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, MCValue) = .{},
|
||||
inst_table: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, MCValue) = .empty,
|
||||
|
||||
fn deinit(func: *Branch, gpa: Allocator) void {
|
||||
func.inst_table.deinit(gpa);
|
||||
@ -621,7 +621,7 @@ const FrameAlloc = struct {
|
||||
};
|
||||
|
||||
const BlockData = struct {
|
||||
relocs: std.ArrayListUnmanaged(Mir.Inst.Index) = .{},
|
||||
relocs: std.ArrayListUnmanaged(Mir.Inst.Index) = .empty,
|
||||
state: State,
|
||||
|
||||
fn deinit(bd: *BlockData, gpa: Allocator) void {
|
||||
@ -6193,7 +6193,7 @@ fn airAsm(func: *Func, inst: Air.Inst.Index) !void {
|
||||
|
||||
const Label = struct {
|
||||
target: Mir.Inst.Index = undefined,
|
||||
pending_relocs: std.ArrayListUnmanaged(Mir.Inst.Index) = .{},
|
||||
pending_relocs: std.ArrayListUnmanaged(Mir.Inst.Index) = .empty,
|
||||
|
||||
const Kind = enum { definition, reference };
|
||||
|
||||
@ -6217,7 +6217,7 @@ fn airAsm(func: *Func, inst: Air.Inst.Index) !void {
|
||||
return name.len > 0;
|
||||
}
|
||||
};
|
||||
var labels: std.StringHashMapUnmanaged(Label) = .{};
|
||||
var labels: std.StringHashMapUnmanaged(Label) = .empty;
|
||||
defer {
|
||||
var label_it = labels.valueIterator();
|
||||
while (label_it.next()) |label| label.pending_relocs.deinit(func.gpa);
|
||||
|
@ -10,8 +10,8 @@ prev_di_column: u32,
|
||||
/// Relative to the beginning of `code`.
|
||||
prev_di_pc: usize,
|
||||
|
||||
code_offset_mapping: std.AutoHashMapUnmanaged(Mir.Inst.Index, usize) = .{},
|
||||
relocs: std.ArrayListUnmanaged(Reloc) = .{},
|
||||
code_offset_mapping: std.AutoHashMapUnmanaged(Mir.Inst.Index, usize) = .empty,
|
||||
relocs: std.ArrayListUnmanaged(Reloc) = .empty,
|
||||
|
||||
pub const Error = Lower.Error || error{
|
||||
EmitFail,
|
||||
|
@ -68,7 +68,7 @@ stack_align: Alignment,
|
||||
/// MIR Instructions
|
||||
mir_instructions: std.MultiArrayList(Mir.Inst) = .{},
|
||||
/// MIR extra data
|
||||
mir_extra: std.ArrayListUnmanaged(u32) = .{},
|
||||
mir_extra: std.ArrayListUnmanaged(u32) = .empty,
|
||||
|
||||
/// Byte offset within the source file of the ending curly.
|
||||
end_di_line: u32,
|
||||
@ -77,7 +77,7 @@ end_di_column: u32,
|
||||
/// The value is an offset into the `Function` `code` from the beginning.
|
||||
/// To perform the reloc, write 32-bit signed little-endian integer
|
||||
/// which is a relative jump, based on the address following the reloc.
|
||||
exitlude_jump_relocs: std.ArrayListUnmanaged(usize) = .{},
|
||||
exitlude_jump_relocs: std.ArrayListUnmanaged(usize) = .empty,
|
||||
|
||||
/// Whenever there is a runtime branch, we push a Branch onto this stack,
|
||||
/// and pop it off when the runtime branch joins. This provides an "overlay"
|
||||
@ -89,12 +89,12 @@ exitlude_jump_relocs: std.ArrayListUnmanaged(usize) = .{},
|
||||
branch_stack: *std.ArrayList(Branch),
|
||||
|
||||
// Key is the block instruction
|
||||
blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .{},
|
||||
blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .empty,
|
||||
|
||||
register_manager: RegisterManager = .{},
|
||||
|
||||
/// Maps offset to what is stored there.
|
||||
stack: std.AutoHashMapUnmanaged(u32, StackAllocation) = .{},
|
||||
stack: std.AutoHashMapUnmanaged(u32, StackAllocation) = .empty,
|
||||
|
||||
/// Tracks the current instruction allocated to the condition flags
|
||||
condition_flags_inst: ?Air.Inst.Index = null,
|
||||
@ -201,7 +201,7 @@ const MCValue = union(enum) {
|
||||
};
|
||||
|
||||
const Branch = struct {
|
||||
inst_table: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, MCValue) = .{},
|
||||
inst_table: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, MCValue) = .empty,
|
||||
|
||||
fn deinit(self: *Branch, gpa: Allocator) void {
|
||||
self.inst_table.deinit(gpa);
|
||||
|
@ -30,16 +30,16 @@ prev_di_column: u32,
|
||||
prev_di_pc: usize,
|
||||
|
||||
/// The branch type of every branch
|
||||
branch_types: std.AutoHashMapUnmanaged(Mir.Inst.Index, BranchType) = .{},
|
||||
branch_types: std.AutoHashMapUnmanaged(Mir.Inst.Index, BranchType) = .empty,
|
||||
/// For every forward branch, maps the target instruction to a list of
|
||||
/// branches which branch to this target instruction
|
||||
branch_forward_origins: std.AutoHashMapUnmanaged(Mir.Inst.Index, std.ArrayListUnmanaged(Mir.Inst.Index)) = .{},
|
||||
branch_forward_origins: std.AutoHashMapUnmanaged(Mir.Inst.Index, std.ArrayListUnmanaged(Mir.Inst.Index)) = .empty,
|
||||
/// For backward branches: stores the code offset of the target
|
||||
/// instruction
|
||||
///
|
||||
/// For forward branches: stores the code offset of the branch
|
||||
/// instruction
|
||||
code_offset_mapping: std.AutoHashMapUnmanaged(Mir.Inst.Index, usize) = .{},
|
||||
code_offset_mapping: std.AutoHashMapUnmanaged(Mir.Inst.Index, usize) = .empty,
|
||||
|
||||
const InnerError = error{
|
||||
OutOfMemory,
|
||||
@ -571,7 +571,7 @@ fn lowerBranches(emit: *Emit) !void {
|
||||
if (emit.branch_forward_origins.getPtr(target_inst)) |origin_list| {
|
||||
try origin_list.append(gpa, inst);
|
||||
} else {
|
||||
var origin_list: std.ArrayListUnmanaged(Mir.Inst.Index) = .{};
|
||||
var origin_list: std.ArrayListUnmanaged(Mir.Inst.Index) = .empty;
|
||||
try origin_list.append(gpa, inst);
|
||||
try emit.branch_forward_origins.put(gpa, target_inst, origin_list);
|
||||
}
|
||||
|
@ -654,7 +654,7 @@ func_index: InternPool.Index,
|
||||
/// When we return from a branch, the branch will be popped from this list,
|
||||
/// which means branches can only contain references from within its own branch,
|
||||
/// or a branch higher (lower index) in the tree.
|
||||
branches: std.ArrayListUnmanaged(Branch) = .{},
|
||||
branches: std.ArrayListUnmanaged(Branch) = .empty,
|
||||
/// Table to save `WValue`'s generated by an `Air.Inst`
|
||||
// values: ValueTable,
|
||||
/// Mapping from Air.Inst.Index to block ids
|
||||
@ -663,7 +663,7 @@ blocks: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, struct {
|
||||
value: WValue,
|
||||
}) = .{},
|
||||
/// Maps `loop` instructions to their label. `br` to here repeats the loop.
|
||||
loops: std.AutoHashMapUnmanaged(Air.Inst.Index, u32) = .{},
|
||||
loops: std.AutoHashMapUnmanaged(Air.Inst.Index, u32) = .empty,
|
||||
/// `bytes` contains the wasm bytecode belonging to the 'code' section.
|
||||
code: *ArrayList(u8),
|
||||
/// The index the next local generated will have
|
||||
@ -681,7 +681,7 @@ locals: std.ArrayListUnmanaged(u8),
|
||||
/// List of simd128 immediates. Each value is stored as an array of bytes.
|
||||
/// This list will only be populated for 128bit-simd values when the target features
|
||||
/// are enabled also.
|
||||
simd_immediates: std.ArrayListUnmanaged([16]u8) = .{},
|
||||
simd_immediates: std.ArrayListUnmanaged([16]u8) = .empty,
|
||||
/// The Target we're emitting (used to call intInfo)
|
||||
target: *const std.Target,
|
||||
/// Represents the wasm binary file that is being linked.
|
||||
@ -690,7 +690,7 @@ pt: Zcu.PerThread,
|
||||
/// List of MIR Instructions
|
||||
mir_instructions: std.MultiArrayList(Mir.Inst) = .{},
|
||||
/// Contains extra data for MIR
|
||||
mir_extra: std.ArrayListUnmanaged(u32) = .{},
|
||||
mir_extra: std.ArrayListUnmanaged(u32) = .empty,
|
||||
/// When a function is executing, we store the the current stack pointer's value within this local.
|
||||
/// This value is then used to restore the stack pointer to the original value at the return of the function.
|
||||
initial_stack_value: WValue = .none,
|
||||
@ -717,19 +717,19 @@ stack_alignment: Alignment = .@"16",
|
||||
// allows us to re-use locals that are no longer used. e.g. a temporary local.
|
||||
/// A list of indexes which represents a local of valtype `i32`.
|
||||
/// It is illegal to store a non-i32 valtype in this list.
|
||||
free_locals_i32: std.ArrayListUnmanaged(u32) = .{},
|
||||
free_locals_i32: std.ArrayListUnmanaged(u32) = .empty,
|
||||
/// A list of indexes which represents a local of valtype `i64`.
|
||||
/// It is illegal to store a non-i64 valtype in this list.
|
||||
free_locals_i64: std.ArrayListUnmanaged(u32) = .{},
|
||||
free_locals_i64: std.ArrayListUnmanaged(u32) = .empty,
|
||||
/// A list of indexes which represents a local of valtype `f32`.
|
||||
/// It is illegal to store a non-f32 valtype in this list.
|
||||
free_locals_f32: std.ArrayListUnmanaged(u32) = .{},
|
||||
free_locals_f32: std.ArrayListUnmanaged(u32) = .empty,
|
||||
/// A list of indexes which represents a local of valtype `f64`.
|
||||
/// It is illegal to store a non-f64 valtype in this list.
|
||||
free_locals_f64: std.ArrayListUnmanaged(u32) = .{},
|
||||
free_locals_f64: std.ArrayListUnmanaged(u32) = .empty,
|
||||
/// A list of indexes which represents a local of valtype `v127`.
|
||||
/// It is illegal to store a non-v128 valtype in this list.
|
||||
free_locals_v128: std.ArrayListUnmanaged(u32) = .{},
|
||||
free_locals_v128: std.ArrayListUnmanaged(u32) = .empty,
|
||||
|
||||
/// When in debug mode, this tracks if no `finishAir` was missed.
|
||||
/// Forgetting to call `finishAir` will cause the result to not be
|
||||
|
@ -78,7 +78,7 @@ eflags_inst: ?Air.Inst.Index = null,
|
||||
/// MIR Instructions
|
||||
mir_instructions: std.MultiArrayList(Mir.Inst) = .{},
|
||||
/// MIR extra data
|
||||
mir_extra: std.ArrayListUnmanaged(u32) = .{},
|
||||
mir_extra: std.ArrayListUnmanaged(u32) = .empty,
|
||||
|
||||
/// Byte offset within the source file of the ending curly.
|
||||
end_di_line: u32,
|
||||
@ -87,13 +87,13 @@ end_di_column: u32,
|
||||
/// The value is an offset into the `Function` `code` from the beginning.
|
||||
/// To perform the reloc, write 32-bit signed little-endian integer
|
||||
/// which is a relative jump, based on the address following the reloc.
|
||||
exitlude_jump_relocs: std.ArrayListUnmanaged(Mir.Inst.Index) = .{},
|
||||
exitlude_jump_relocs: std.ArrayListUnmanaged(Mir.Inst.Index) = .empty,
|
||||
|
||||
const_tracking: ConstTrackingMap = .{},
|
||||
inst_tracking: InstTrackingMap = .{},
|
||||
|
||||
// Key is the block instruction
|
||||
blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .{},
|
||||
blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .empty,
|
||||
|
||||
register_manager: RegisterManager = .{},
|
||||
|
||||
@ -101,7 +101,7 @@ register_manager: RegisterManager = .{},
|
||||
scope_generation: u32 = 0,
|
||||
|
||||
frame_allocs: std.MultiArrayList(FrameAlloc) = .{},
|
||||
free_frame_indices: std.AutoArrayHashMapUnmanaged(FrameIndex, void) = .{},
|
||||
free_frame_indices: std.AutoArrayHashMapUnmanaged(FrameIndex, void) = .empty,
|
||||
frame_locs: std.MultiArrayList(Mir.FrameLoc) = .{},
|
||||
|
||||
loops: std.AutoHashMapUnmanaged(Air.Inst.Index, struct {
|
||||
@ -799,7 +799,7 @@ const StackAllocation = struct {
|
||||
};
|
||||
|
||||
const BlockData = struct {
|
||||
relocs: std.ArrayListUnmanaged(Mir.Inst.Index) = .{},
|
||||
relocs: std.ArrayListUnmanaged(Mir.Inst.Index) = .empty,
|
||||
state: State,
|
||||
|
||||
fn deinit(self: *BlockData, gpa: Allocator) void {
|
||||
@ -14248,7 +14248,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
|
||||
|
||||
const Label = struct {
|
||||
target: Mir.Inst.Index = undefined,
|
||||
pending_relocs: std.ArrayListUnmanaged(Mir.Inst.Index) = .{},
|
||||
pending_relocs: std.ArrayListUnmanaged(Mir.Inst.Index) = .empty,
|
||||
|
||||
const Kind = enum { definition, reference };
|
||||
|
||||
@ -14272,7 +14272,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
|
||||
return name.len > 0;
|
||||
}
|
||||
};
|
||||
var labels: std.StringHashMapUnmanaged(Label) = .{};
|
||||
var labels: std.StringHashMapUnmanaged(Label) = .empty;
|
||||
defer {
|
||||
var label_it = labels.valueIterator();
|
||||
while (label_it.next()) |label| label.pending_relocs.deinit(self.gpa);
|
||||
|
@ -11,8 +11,8 @@ prev_di_column: u32,
|
||||
/// Relative to the beginning of `code`.
|
||||
prev_di_pc: usize,
|
||||
|
||||
code_offset_mapping: std.AutoHashMapUnmanaged(Mir.Inst.Index, usize) = .{},
|
||||
relocs: std.ArrayListUnmanaged(Reloc) = .{},
|
||||
code_offset_mapping: std.AutoHashMapUnmanaged(Mir.Inst.Index, usize) = .empty,
|
||||
relocs: std.ArrayListUnmanaged(Reloc) = .empty,
|
||||
|
||||
pub const Error = Lower.Error || error{
|
||||
EmitFail,
|
||||
|
@ -304,14 +304,14 @@ pub const Function = struct {
|
||||
air: Air,
|
||||
liveness: Liveness,
|
||||
value_map: CValueMap,
|
||||
blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .{},
|
||||
blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .empty,
|
||||
next_arg_index: usize = 0,
|
||||
next_block_index: usize = 0,
|
||||
object: Object,
|
||||
lazy_fns: LazyFnMap,
|
||||
func_index: InternPool.Index,
|
||||
/// All the locals, to be emitted at the top of the function.
|
||||
locals: std.ArrayListUnmanaged(Local) = .{},
|
||||
locals: std.ArrayListUnmanaged(Local) = .empty,
|
||||
/// Which locals are available for reuse, based on Type.
|
||||
free_locals_map: LocalsMap = .{},
|
||||
/// Locals which will not be freed by Liveness. This is used after a
|
||||
@ -320,10 +320,10 @@ pub const Function = struct {
|
||||
/// of variable declarations at the top of a function, sorted descending
|
||||
/// by type alignment.
|
||||
/// The value is whether the alloc needs to be emitted in the header.
|
||||
allocs: std.AutoArrayHashMapUnmanaged(LocalIndex, bool) = .{},
|
||||
allocs: std.AutoArrayHashMapUnmanaged(LocalIndex, bool) = .empty,
|
||||
/// Maps from `loop_switch_br` instructions to the allocated local used
|
||||
/// for the switch cond. Dispatches should set this local to the new cond.
|
||||
loop_switch_conds: std.AutoHashMapUnmanaged(Air.Inst.Index, LocalIndex) = .{},
|
||||
loop_switch_conds: std.AutoHashMapUnmanaged(Air.Inst.Index, LocalIndex) = .empty,
|
||||
|
||||
fn resolveInst(f: *Function, ref: Air.Inst.Ref) !CValue {
|
||||
const gop = try f.value_map.getOrPut(ref);
|
||||
|
@ -1500,7 +1500,7 @@ pub const Object = struct {
|
||||
// instructions. Depending on the calling convention, this list is not necessarily
|
||||
// a bijection with the actual LLVM parameters of the function.
|
||||
const gpa = o.gpa;
|
||||
var args: std.ArrayListUnmanaged(Builder.Value) = .{};
|
||||
var args: std.ArrayListUnmanaged(Builder.Value) = .empty;
|
||||
defer args.deinit(gpa);
|
||||
|
||||
{
|
||||
@ -2497,7 +2497,7 @@ pub const Object = struct {
|
||||
|
||||
switch (ip.indexToKey(ty.toIntern())) {
|
||||
.anon_struct_type => |tuple| {
|
||||
var fields: std.ArrayListUnmanaged(Builder.Metadata) = .{};
|
||||
var fields: std.ArrayListUnmanaged(Builder.Metadata) = .empty;
|
||||
defer fields.deinit(gpa);
|
||||
|
||||
try fields.ensureUnusedCapacity(gpa, tuple.types.len);
|
||||
@ -2574,7 +2574,7 @@ pub const Object = struct {
|
||||
|
||||
const struct_type = zcu.typeToStruct(ty).?;
|
||||
|
||||
var fields: std.ArrayListUnmanaged(Builder.Metadata) = .{};
|
||||
var fields: std.ArrayListUnmanaged(Builder.Metadata) = .empty;
|
||||
defer fields.deinit(gpa);
|
||||
|
||||
try fields.ensureUnusedCapacity(gpa, struct_type.field_types.len);
|
||||
@ -2667,7 +2667,7 @@ pub const Object = struct {
|
||||
return debug_union_type;
|
||||
}
|
||||
|
||||
var fields: std.ArrayListUnmanaged(Builder.Metadata) = .{};
|
||||
var fields: std.ArrayListUnmanaged(Builder.Metadata) = .empty;
|
||||
defer fields.deinit(gpa);
|
||||
|
||||
try fields.ensureUnusedCapacity(gpa, union_type.loadTagType(ip).names.len);
|
||||
@ -3412,7 +3412,7 @@ pub const Object = struct {
|
||||
return int_ty;
|
||||
}
|
||||
|
||||
var llvm_field_types = std.ArrayListUnmanaged(Builder.Type){};
|
||||
var llvm_field_types: std.ArrayListUnmanaged(Builder.Type) = .empty;
|
||||
defer llvm_field_types.deinit(o.gpa);
|
||||
// Although we can estimate how much capacity to add, these cannot be
|
||||
// relied upon because of the recursive calls to lowerType below.
|
||||
@ -3481,7 +3481,7 @@ pub const Object = struct {
|
||||
return ty;
|
||||
},
|
||||
.anon_struct_type => |anon_struct_type| {
|
||||
var llvm_field_types: std.ArrayListUnmanaged(Builder.Type) = .{};
|
||||
var llvm_field_types: std.ArrayListUnmanaged(Builder.Type) = .empty;
|
||||
defer llvm_field_types.deinit(o.gpa);
|
||||
// Although we can estimate how much capacity to add, these cannot be
|
||||
// relied upon because of the recursive calls to lowerType below.
|
||||
@ -3672,7 +3672,7 @@ pub const Object = struct {
|
||||
const target = zcu.getTarget();
|
||||
const ret_ty = try lowerFnRetTy(o, fn_info);
|
||||
|
||||
var llvm_params = std.ArrayListUnmanaged(Builder.Type){};
|
||||
var llvm_params: std.ArrayListUnmanaged(Builder.Type) = .empty;
|
||||
defer llvm_params.deinit(o.gpa);
|
||||
|
||||
if (firstParamSRet(fn_info, zcu, target)) {
|
||||
@ -7438,7 +7438,7 @@ pub const FuncGen = struct {
|
||||
const inputs: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len]);
|
||||
extra_i += inputs.len;
|
||||
|
||||
var llvm_constraints: std.ArrayListUnmanaged(u8) = .{};
|
||||
var llvm_constraints: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer llvm_constraints.deinit(self.gpa);
|
||||
|
||||
var arena_allocator = std.heap.ArenaAllocator.init(self.gpa);
|
||||
@ -7466,7 +7466,7 @@ pub const FuncGen = struct {
|
||||
var llvm_param_i: usize = 0;
|
||||
var total_i: u16 = 0;
|
||||
|
||||
var name_map: std.StringArrayHashMapUnmanaged(u16) = .{};
|
||||
var name_map: std.StringArrayHashMapUnmanaged(u16) = .empty;
|
||||
try name_map.ensureUnusedCapacity(arena, max_param_count);
|
||||
|
||||
var rw_extra_i = extra_i;
|
||||
|
@ -3994,7 +3994,7 @@ pub const Function = struct {
|
||||
names: [*]const String = &[0]String{},
|
||||
value_indices: [*]const u32 = &[0]u32{},
|
||||
strip: bool,
|
||||
debug_locations: std.AutoHashMapUnmanaged(Instruction.Index, DebugLocation) = .{},
|
||||
debug_locations: std.AutoHashMapUnmanaged(Instruction.Index, DebugLocation) = .empty,
|
||||
debug_values: []const Instruction.Index = &.{},
|
||||
extra: []const u32 = &.{},
|
||||
|
||||
@ -6166,7 +6166,7 @@ pub const WipFunction = struct {
|
||||
const value_indices = try gpa.alloc(u32, final_instructions_len);
|
||||
errdefer gpa.free(value_indices);
|
||||
|
||||
var debug_locations: std.AutoHashMapUnmanaged(Instruction.Index, DebugLocation) = .{};
|
||||
var debug_locations: std.AutoHashMapUnmanaged(Instruction.Index, DebugLocation) = .empty;
|
||||
errdefer debug_locations.deinit(gpa);
|
||||
try debug_locations.ensureUnusedCapacity(gpa, @intCast(self.debug_locations.count()));
|
||||
|
||||
@ -9557,7 +9557,7 @@ pub fn printUnbuffered(
|
||||
}
|
||||
}
|
||||
|
||||
var attribute_groups: std.AutoArrayHashMapUnmanaged(Attributes, void) = .{};
|
||||
var attribute_groups: std.AutoArrayHashMapUnmanaged(Attributes, void) = .empty;
|
||||
defer attribute_groups.deinit(self.gpa);
|
||||
|
||||
for (0.., self.functions.items) |function_i, function| {
|
||||
@ -13133,7 +13133,7 @@ pub fn toBitcode(self: *Builder, allocator: Allocator) bitcode_writer.Error![]co
|
||||
// Write LLVM IR magic
|
||||
try bitcode.writeBits(ir.MAGIC, 32);
|
||||
|
||||
var record: std.ArrayListUnmanaged(u64) = .{};
|
||||
var record: std.ArrayListUnmanaged(u64) = .empty;
|
||||
defer record.deinit(self.gpa);
|
||||
|
||||
// IDENTIFICATION_BLOCK
|
||||
@ -13524,7 +13524,7 @@ pub fn toBitcode(self: *Builder, allocator: Allocator) bitcode_writer.Error![]co
|
||||
try paramattr_block.end();
|
||||
}
|
||||
|
||||
var globals: std.AutoArrayHashMapUnmanaged(Global.Index, void) = .{};
|
||||
var globals: std.AutoArrayHashMapUnmanaged(Global.Index, void) = .empty;
|
||||
defer globals.deinit(self.gpa);
|
||||
try globals.ensureUnusedCapacity(
|
||||
self.gpa,
|
||||
@ -13587,7 +13587,7 @@ pub fn toBitcode(self: *Builder, allocator: Allocator) bitcode_writer.Error![]co
|
||||
|
||||
// Globals
|
||||
{
|
||||
var section_map: std.AutoArrayHashMapUnmanaged(String, void) = .{};
|
||||
var section_map: std.AutoArrayHashMapUnmanaged(String, void) = .empty;
|
||||
defer section_map.deinit(self.gpa);
|
||||
try section_map.ensureUnusedCapacity(self.gpa, globals.count());
|
||||
|
||||
|
@ -79,7 +79,7 @@ const ControlFlow = union(enum) {
|
||||
selection: struct {
|
||||
/// In order to know which merges we still need to do, we need to keep
|
||||
/// a stack of those.
|
||||
merge_stack: std.ArrayListUnmanaged(SelectionMerge) = .{},
|
||||
merge_stack: std.ArrayListUnmanaged(SelectionMerge) = .empty,
|
||||
},
|
||||
/// For a `loop` type block, we can early-exit the block by
|
||||
/// jumping to the loop exit node, and we don't need to generate
|
||||
@ -87,7 +87,7 @@ const ControlFlow = union(enum) {
|
||||
loop: struct {
|
||||
/// The next block to jump to can be determined from any number
|
||||
/// of conditions that jump to the loop exit.
|
||||
merges: std.ArrayListUnmanaged(Incoming) = .{},
|
||||
merges: std.ArrayListUnmanaged(Incoming) = .empty,
|
||||
/// The label id of the loop's merge block.
|
||||
merge_block: IdRef,
|
||||
},
|
||||
@ -102,10 +102,10 @@ const ControlFlow = union(enum) {
|
||||
};
|
||||
/// The stack of (structured) blocks that we are currently in. This determines
|
||||
/// how exits from the current block must be handled.
|
||||
block_stack: std.ArrayListUnmanaged(*Structured.Block) = .{},
|
||||
block_stack: std.ArrayListUnmanaged(*Structured.Block) = .empty,
|
||||
/// Maps `block` inst indices to the variable that the block's result
|
||||
/// value must be written to.
|
||||
block_results: std.AutoHashMapUnmanaged(Air.Inst.Index, IdRef) = .{},
|
||||
block_results: std.AutoHashMapUnmanaged(Air.Inst.Index, IdRef) = .empty,
|
||||
};
|
||||
|
||||
const Unstructured = struct {
|
||||
@ -116,12 +116,12 @@ const ControlFlow = union(enum) {
|
||||
|
||||
const Block = struct {
|
||||
label: ?IdRef = null,
|
||||
incoming_blocks: std.ArrayListUnmanaged(Incoming) = .{},
|
||||
incoming_blocks: std.ArrayListUnmanaged(Incoming) = .empty,
|
||||
};
|
||||
|
||||
/// We need to keep track of result ids for block labels, as well as the 'incoming'
|
||||
/// blocks for a block.
|
||||
blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, *Block) = .{},
|
||||
blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, *Block) = .empty,
|
||||
};
|
||||
|
||||
structured: Structured,
|
||||
@ -153,10 +153,10 @@ pub const Object = struct {
|
||||
|
||||
/// The Zig module that this object file is generated for.
|
||||
/// A map of Zig decl indices to SPIR-V decl indices.
|
||||
nav_link: std.AutoHashMapUnmanaged(InternPool.Nav.Index, SpvModule.Decl.Index) = .{},
|
||||
nav_link: std.AutoHashMapUnmanaged(InternPool.Nav.Index, SpvModule.Decl.Index) = .empty,
|
||||
|
||||
/// A map of Zig InternPool indices for anonymous decls to SPIR-V decl indices.
|
||||
uav_link: std.AutoHashMapUnmanaged(struct { InternPool.Index, StorageClass }, SpvModule.Decl.Index) = .{},
|
||||
uav_link: std.AutoHashMapUnmanaged(struct { InternPool.Index, StorageClass }, SpvModule.Decl.Index) = .empty,
|
||||
|
||||
/// A map that maps AIR intern pool indices to SPIR-V result-ids.
|
||||
intern_map: InternMap = .{},
|
||||
@ -300,7 +300,7 @@ const NavGen = struct {
|
||||
|
||||
/// An array of function argument result-ids. Each index corresponds with the
|
||||
/// function argument of the same index.
|
||||
args: std.ArrayListUnmanaged(IdRef) = .{},
|
||||
args: std.ArrayListUnmanaged(IdRef) = .empty,
|
||||
|
||||
/// A counter to keep track of how many `arg` instructions we've seen yet.
|
||||
next_arg_index: u32 = 0,
|
||||
@ -6270,7 +6270,7 @@ const NavGen = struct {
|
||||
}
|
||||
}
|
||||
|
||||
var incoming_structured_blocks = std.ArrayListUnmanaged(ControlFlow.Structured.Block.Incoming){};
|
||||
var incoming_structured_blocks: std.ArrayListUnmanaged(ControlFlow.Structured.Block.Incoming) = .empty;
|
||||
defer incoming_structured_blocks.deinit(self.gpa);
|
||||
|
||||
if (self.control_flow == .structured) {
|
||||
|
@ -148,7 +148,7 @@ const AsmValueMap = std.StringArrayHashMapUnmanaged(AsmValue);
|
||||
gpa: Allocator,
|
||||
|
||||
/// A list of errors that occured during processing the assembly.
|
||||
errors: std.ArrayListUnmanaged(ErrorMsg) = .{},
|
||||
errors: std.ArrayListUnmanaged(ErrorMsg) = .empty,
|
||||
|
||||
/// The source code that is being assembled.
|
||||
src: []const u8,
|
||||
@ -161,7 +161,7 @@ spv: *SpvModule,
|
||||
func: *SpvModule.Fn,
|
||||
|
||||
/// `self.src` tokenized.
|
||||
tokens: std.ArrayListUnmanaged(Token) = .{},
|
||||
tokens: std.ArrayListUnmanaged(Token) = .empty,
|
||||
|
||||
/// The token that is next during parsing.
|
||||
current_token: u32 = 0,
|
||||
@ -172,9 +172,9 @@ inst: struct {
|
||||
/// The opcode of the current instruction.
|
||||
opcode: Opcode = undefined,
|
||||
/// Operands of the current instruction.
|
||||
operands: std.ArrayListUnmanaged(Operand) = .{},
|
||||
operands: std.ArrayListUnmanaged(Operand) = .empty,
|
||||
/// This is where string data resides. Strings are zero-terminated.
|
||||
string_bytes: std.ArrayListUnmanaged(u8) = .{},
|
||||
string_bytes: std.ArrayListUnmanaged(u8) = .empty,
|
||||
|
||||
/// Return a reference to the result of this instruction, if any.
|
||||
fn result(self: @This()) ?AsmValue.Ref {
|
||||
@ -196,7 +196,7 @@ value_map: AsmValueMap = .{},
|
||||
/// This set is used to quickly transform from an opcode name to the
|
||||
/// index in its instruction set. The index of the key is the
|
||||
/// index in `spec.InstructionSet.core.instructions()`.
|
||||
instruction_map: std.StringArrayHashMapUnmanaged(void) = .{},
|
||||
instruction_map: std.StringArrayHashMapUnmanaged(void) = .empty,
|
||||
|
||||
/// Free the resources owned by this assembler.
|
||||
pub fn deinit(self: *Assembler) void {
|
||||
|
@ -35,7 +35,7 @@ pub const Fn = struct {
|
||||
/// the end of this function definition.
|
||||
body: Section = .{},
|
||||
/// The decl dependencies that this function depends on.
|
||||
decl_deps: std.AutoArrayHashMapUnmanaged(Decl.Index, void) = .{},
|
||||
decl_deps: std.AutoArrayHashMapUnmanaged(Decl.Index, void) = .empty,
|
||||
|
||||
/// Reset this function without deallocating resources, so that
|
||||
/// it may be used to emit code for another function.
|
||||
@ -141,7 +141,7 @@ sections: struct {
|
||||
next_result_id: Word,
|
||||
|
||||
/// Cache for results of OpString instructions.
|
||||
strings: std.StringArrayHashMapUnmanaged(IdRef) = .{},
|
||||
strings: std.StringArrayHashMapUnmanaged(IdRef) = .empty,
|
||||
|
||||
/// Some types shouldn't be emitted more than one time, but cannot be caught by
|
||||
/// the `intern_map` during codegen. Sometimes, IDs are compared to check if
|
||||
@ -154,27 +154,27 @@ strings: std.StringArrayHashMapUnmanaged(IdRef) = .{},
|
||||
cache: struct {
|
||||
bool_type: ?IdRef = null,
|
||||
void_type: ?IdRef = null,
|
||||
int_types: std.AutoHashMapUnmanaged(std.builtin.Type.Int, IdRef) = .{},
|
||||
float_types: std.AutoHashMapUnmanaged(std.builtin.Type.Float, IdRef) = .{},
|
||||
int_types: std.AutoHashMapUnmanaged(std.builtin.Type.Int, IdRef) = .empty,
|
||||
float_types: std.AutoHashMapUnmanaged(std.builtin.Type.Float, IdRef) = .empty,
|
||||
// This cache is required so that @Vector(X, u1) in direct representation has the
|
||||
// same ID as @Vector(X, bool) in indirect representation.
|
||||
vector_types: std.AutoHashMapUnmanaged(struct { IdRef, u32 }, IdRef) = .{},
|
||||
vector_types: std.AutoHashMapUnmanaged(struct { IdRef, u32 }, IdRef) = .empty,
|
||||
|
||||
builtins: std.AutoHashMapUnmanaged(struct { IdRef, spec.BuiltIn }, Decl.Index) = .{},
|
||||
builtins: std.AutoHashMapUnmanaged(struct { IdRef, spec.BuiltIn }, Decl.Index) = .empty,
|
||||
} = .{},
|
||||
|
||||
/// Set of Decls, referred to by Decl.Index.
|
||||
decls: std.ArrayListUnmanaged(Decl) = .{},
|
||||
decls: std.ArrayListUnmanaged(Decl) = .empty,
|
||||
|
||||
/// List of dependencies, per decl. This list holds all the dependencies, sliced by the
|
||||
/// begin_dep and end_dep in `self.decls`.
|
||||
decl_deps: std.ArrayListUnmanaged(Decl.Index) = .{},
|
||||
decl_deps: std.ArrayListUnmanaged(Decl.Index) = .empty,
|
||||
|
||||
/// The list of entry points that should be exported from this module.
|
||||
entry_points: std.ArrayListUnmanaged(EntryPoint) = .{},
|
||||
entry_points: std.ArrayListUnmanaged(EntryPoint) = .empty,
|
||||
|
||||
/// The list of extended instruction sets that should be imported.
|
||||
extended_instruction_set: std.AutoHashMapUnmanaged(spec.InstructionSet, IdRef) = .{},
|
||||
extended_instruction_set: std.AutoHashMapUnmanaged(spec.InstructionSet, IdRef) = .empty,
|
||||
|
||||
pub fn init(gpa: Allocator) Module {
|
||||
return .{
|
||||
|
@ -15,7 +15,7 @@ const Opcode = spec.Opcode;
|
||||
|
||||
/// The instructions in this section. Memory is owned by the Module
|
||||
/// externally associated to this Section.
|
||||
instructions: std.ArrayListUnmanaged(Word) = .{},
|
||||
instructions: std.ArrayListUnmanaged(Word) = .empty,
|
||||
|
||||
pub fn deinit(section: *Section, allocator: Allocator) void {
|
||||
section.instructions.deinit(allocator);
|
||||
|
@ -26,34 +26,34 @@ base: link.File,
|
||||
/// This linker backend does not try to incrementally link output C source code.
|
||||
/// Instead, it tracks all declarations in this table, and iterates over it
|
||||
/// in the flush function, stitching pre-rendered pieces of C code together.
|
||||
navs: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, AvBlock) = .{},
|
||||
navs: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, AvBlock) = .empty,
|
||||
/// All the string bytes of rendered C code, all squished into one array.
|
||||
/// While in progress, a separate buffer is used, and then when finished, the
|
||||
/// buffer is copied into this one.
|
||||
string_bytes: std.ArrayListUnmanaged(u8) = .{},
|
||||
string_bytes: std.ArrayListUnmanaged(u8) = .empty,
|
||||
/// Tracks all the anonymous decls that are used by all the decls so they can
|
||||
/// be rendered during flush().
|
||||
uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, AvBlock) = .{},
|
||||
uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, AvBlock) = .empty,
|
||||
/// Sparse set of uavs that are overaligned. Underaligned anon decls are
|
||||
/// lowered the same as ABI-aligned anon decls. The keys here are a subset of
|
||||
/// the keys of `uavs`.
|
||||
aligned_uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, Alignment) = .{},
|
||||
aligned_uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, Alignment) = .empty,
|
||||
|
||||
exported_navs: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, ExportedBlock) = .{},
|
||||
exported_uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, ExportedBlock) = .{},
|
||||
exported_navs: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, ExportedBlock) = .empty,
|
||||
exported_uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, ExportedBlock) = .empty,
|
||||
|
||||
/// Optimization, `updateDecl` reuses this buffer rather than creating a new
|
||||
/// one with every call.
|
||||
fwd_decl_buf: std.ArrayListUnmanaged(u8) = .{},
|
||||
fwd_decl_buf: std.ArrayListUnmanaged(u8) = .empty,
|
||||
/// Optimization, `updateDecl` reuses this buffer rather than creating a new
|
||||
/// one with every call.
|
||||
code_buf: std.ArrayListUnmanaged(u8) = .{},
|
||||
code_buf: std.ArrayListUnmanaged(u8) = .empty,
|
||||
/// Optimization, `flush` reuses this buffer rather than creating a new
|
||||
/// one with every call.
|
||||
lazy_fwd_decl_buf: std.ArrayListUnmanaged(u8) = .{},
|
||||
lazy_fwd_decl_buf: std.ArrayListUnmanaged(u8) = .empty,
|
||||
/// Optimization, `flush` reuses this buffer rather than creating a new
|
||||
/// one with every call.
|
||||
lazy_code_buf: std.ArrayListUnmanaged(u8) = .{},
|
||||
lazy_code_buf: std.ArrayListUnmanaged(u8) = .empty,
|
||||
|
||||
/// A reference into `string_bytes`.
|
||||
const String = extern struct {
|
||||
@ -469,7 +469,7 @@ pub fn flushModule(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node:
|
||||
// `CType`s, forward decls, and non-functions first.
|
||||
|
||||
{
|
||||
var export_names: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{};
|
||||
var export_names: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void) = .empty;
|
||||
defer export_names.deinit(gpa);
|
||||
try export_names.ensureTotalCapacity(gpa, @intCast(zcu.single_exports.count()));
|
||||
for (zcu.single_exports.values()) |export_index| {
|
||||
@ -559,16 +559,16 @@ pub fn flushModule(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node:
|
||||
|
||||
const Flush = struct {
|
||||
ctype_pool: codegen.CType.Pool,
|
||||
ctype_global_from_decl_map: std.ArrayListUnmanaged(codegen.CType) = .{},
|
||||
ctypes_buf: std.ArrayListUnmanaged(u8) = .{},
|
||||
ctype_global_from_decl_map: std.ArrayListUnmanaged(codegen.CType) = .empty,
|
||||
ctypes_buf: std.ArrayListUnmanaged(u8) = .empty,
|
||||
|
||||
lazy_ctype_pool: codegen.CType.Pool,
|
||||
lazy_fns: LazyFns = .{},
|
||||
|
||||
asm_buf: std.ArrayListUnmanaged(u8) = .{},
|
||||
asm_buf: std.ArrayListUnmanaged(u8) = .empty,
|
||||
|
||||
/// We collect a list of buffers to write, and write them all at once with pwritev 😎
|
||||
all_buffers: std.ArrayListUnmanaged(std.posix.iovec_const) = .{},
|
||||
all_buffers: std.ArrayListUnmanaged(std.posix.iovec_const) = .empty,
|
||||
/// Keeps track of the total bytes of `all_buffers`.
|
||||
file_size: u64 = 0,
|
||||
|
||||
|
@ -26,7 +26,7 @@ repro: bool,
|
||||
ptr_width: PtrWidth,
|
||||
page_size: u32,
|
||||
|
||||
objects: std.ArrayListUnmanaged(Object) = .{},
|
||||
objects: std.ArrayListUnmanaged(Object) = .empty,
|
||||
|
||||
sections: std.MultiArrayList(Section) = .{},
|
||||
data_directories: [coff.IMAGE_NUMBEROF_DIRECTORY_ENTRIES]coff.ImageDataDirectory,
|
||||
@ -38,14 +38,14 @@ data_section_index: ?u16 = null,
|
||||
reloc_section_index: ?u16 = null,
|
||||
idata_section_index: ?u16 = null,
|
||||
|
||||
locals: std.ArrayListUnmanaged(coff.Symbol) = .{},
|
||||
globals: std.ArrayListUnmanaged(SymbolWithLoc) = .{},
|
||||
resolver: std.StringHashMapUnmanaged(u32) = .{},
|
||||
unresolved: std.AutoArrayHashMapUnmanaged(u32, bool) = .{},
|
||||
need_got_table: std.AutoHashMapUnmanaged(u32, void) = .{},
|
||||
locals: std.ArrayListUnmanaged(coff.Symbol) = .empty,
|
||||
globals: std.ArrayListUnmanaged(SymbolWithLoc) = .empty,
|
||||
resolver: std.StringHashMapUnmanaged(u32) = .empty,
|
||||
unresolved: std.AutoArrayHashMapUnmanaged(u32, bool) = .empty,
|
||||
need_got_table: std.AutoHashMapUnmanaged(u32, void) = .empty,
|
||||
|
||||
locals_free_list: std.ArrayListUnmanaged(u32) = .{},
|
||||
globals_free_list: std.ArrayListUnmanaged(u32) = .{},
|
||||
locals_free_list: std.ArrayListUnmanaged(u32) = .empty,
|
||||
globals_free_list: std.ArrayListUnmanaged(u32) = .empty,
|
||||
|
||||
strtab: StringTable = .{},
|
||||
strtab_offset: ?u32 = null,
|
||||
@ -56,7 +56,7 @@ got_table: TableSection(SymbolWithLoc) = .{},
|
||||
|
||||
/// A table of ImportTables partitioned by the library name.
|
||||
/// Key is an offset into the interning string table `temp_strtab`.
|
||||
import_tables: std.AutoArrayHashMapUnmanaged(u32, ImportTable) = .{},
|
||||
import_tables: std.AutoArrayHashMapUnmanaged(u32, ImportTable) = .empty,
|
||||
|
||||
got_table_count_dirty: bool = true,
|
||||
got_table_contents_dirty: bool = true,
|
||||
@ -69,10 +69,10 @@ lazy_syms: LazySymbolTable = .{},
|
||||
navs: NavTable = .{},
|
||||
|
||||
/// List of atoms that are either synthetic or map directly to the Zig source program.
|
||||
atoms: std.ArrayListUnmanaged(Atom) = .{},
|
||||
atoms: std.ArrayListUnmanaged(Atom) = .empty,
|
||||
|
||||
/// Table of atoms indexed by the symbol index.
|
||||
atom_by_index_table: std.AutoHashMapUnmanaged(u32, Atom.Index) = .{},
|
||||
atom_by_index_table: std.AutoHashMapUnmanaged(u32, Atom.Index) = .empty,
|
||||
|
||||
uavs: UavTable = .{},
|
||||
|
||||
@ -131,7 +131,7 @@ const Section = struct {
|
||||
/// overcapacity can be negative. A simple way to have negative overcapacity is to
|
||||
/// allocate a fresh atom, which will have ideal capacity, and then grow it
|
||||
/// by 1 byte. It will then have -1 overcapacity.
|
||||
free_list: std.ArrayListUnmanaged(Atom.Index) = .{},
|
||||
free_list: std.ArrayListUnmanaged(Atom.Index) = .empty,
|
||||
};
|
||||
|
||||
const LazySymbolTable = std.AutoArrayHashMapUnmanaged(InternPool.Index, LazySymbolMetadata);
|
||||
@ -148,7 +148,7 @@ const AvMetadata = struct {
|
||||
atom: Atom.Index,
|
||||
section: u16,
|
||||
/// A list of all exports aliases of this Decl.
|
||||
exports: std.ArrayListUnmanaged(u32) = .{},
|
||||
exports: std.ArrayListUnmanaged(u32) = .empty,
|
||||
|
||||
fn deinit(m: *AvMetadata, allocator: Allocator) void {
|
||||
m.exports.deinit(allocator);
|
||||
|
@ -26,9 +26,9 @@
|
||||
//! DLL#2 name
|
||||
//! --- END
|
||||
|
||||
entries: std.ArrayListUnmanaged(SymbolWithLoc) = .{},
|
||||
free_list: std.ArrayListUnmanaged(u32) = .{},
|
||||
lookup: std.AutoHashMapUnmanaged(SymbolWithLoc, u32) = .{},
|
||||
entries: std.ArrayListUnmanaged(SymbolWithLoc) = .empty,
|
||||
free_list: std.ArrayListUnmanaged(u32) = .empty,
|
||||
lookup: std.AutoHashMapUnmanaged(SymbolWithLoc, u32) = .empty,
|
||||
|
||||
pub fn deinit(itab: *ImportTable, allocator: Allocator) void {
|
||||
itab.entries.deinit(allocator);
|
||||
|
@ -39,11 +39,11 @@ files: std.MultiArrayList(File.Entry) = .{},
|
||||
/// Long-lived list of all file descriptors.
|
||||
/// We store them globally rather than per actual File so that we can re-use
|
||||
/// one file handle per every object file within an archive.
|
||||
file_handles: std.ArrayListUnmanaged(File.Handle) = .{},
|
||||
file_handles: std.ArrayListUnmanaged(File.Handle) = .empty,
|
||||
zig_object_index: ?File.Index = null,
|
||||
linker_defined_index: ?File.Index = null,
|
||||
objects: std.ArrayListUnmanaged(File.Index) = .{},
|
||||
shared_objects: std.ArrayListUnmanaged(File.Index) = .{},
|
||||
objects: std.ArrayListUnmanaged(File.Index) = .empty,
|
||||
shared_objects: std.ArrayListUnmanaged(File.Index) = .empty,
|
||||
|
||||
/// List of all output sections and their associated metadata.
|
||||
sections: std.MultiArrayList(Section) = .{},
|
||||
@ -52,7 +52,7 @@ shdr_table_offset: ?u64 = null,
|
||||
|
||||
/// Stored in native-endian format, depending on target endianness needs to be bswapped on read/write.
|
||||
/// Same order as in the file.
|
||||
phdrs: std.ArrayListUnmanaged(elf.Elf64_Phdr) = .{},
|
||||
phdrs: std.ArrayListUnmanaged(elf.Elf64_Phdr) = .empty,
|
||||
|
||||
/// Special program headers
|
||||
/// PT_PHDR
|
||||
@ -77,23 +77,23 @@ page_size: u32,
|
||||
default_sym_version: elf.Elf64_Versym,
|
||||
|
||||
/// .shstrtab buffer
|
||||
shstrtab: std.ArrayListUnmanaged(u8) = .{},
|
||||
shstrtab: std.ArrayListUnmanaged(u8) = .empty,
|
||||
/// .symtab buffer
|
||||
symtab: std.ArrayListUnmanaged(elf.Elf64_Sym) = .{},
|
||||
symtab: std.ArrayListUnmanaged(elf.Elf64_Sym) = .empty,
|
||||
/// .strtab buffer
|
||||
strtab: std.ArrayListUnmanaged(u8) = .{},
|
||||
strtab: std.ArrayListUnmanaged(u8) = .empty,
|
||||
/// Dynamic symbol table. Only populated and emitted when linking dynamically.
|
||||
dynsym: DynsymSection = .{},
|
||||
/// .dynstrtab buffer
|
||||
dynstrtab: std.ArrayListUnmanaged(u8) = .{},
|
||||
dynstrtab: std.ArrayListUnmanaged(u8) = .empty,
|
||||
/// Version symbol table. Only populated and emitted when linking dynamically.
|
||||
versym: std.ArrayListUnmanaged(elf.Elf64_Versym) = .{},
|
||||
versym: std.ArrayListUnmanaged(elf.Elf64_Versym) = .empty,
|
||||
/// .verneed section
|
||||
verneed: VerneedSection = .{},
|
||||
/// .got section
|
||||
got: GotSection = .{},
|
||||
/// .rela.dyn section
|
||||
rela_dyn: std.ArrayListUnmanaged(elf.Elf64_Rela) = .{},
|
||||
rela_dyn: std.ArrayListUnmanaged(elf.Elf64_Rela) = .empty,
|
||||
/// .dynamic section
|
||||
dynamic: DynamicSection = .{},
|
||||
/// .hash section
|
||||
@ -109,10 +109,10 @@ plt_got: PltGotSection = .{},
|
||||
/// .copyrel section
|
||||
copy_rel: CopyRelSection = .{},
|
||||
/// .rela.plt section
|
||||
rela_plt: std.ArrayListUnmanaged(elf.Elf64_Rela) = .{},
|
||||
rela_plt: std.ArrayListUnmanaged(elf.Elf64_Rela) = .empty,
|
||||
/// SHT_GROUP sections
|
||||
/// Applies only to a relocatable.
|
||||
comdat_group_sections: std.ArrayListUnmanaged(ComdatGroupSection) = .{},
|
||||
comdat_group_sections: std.ArrayListUnmanaged(ComdatGroupSection) = .empty,
|
||||
|
||||
copy_rel_section_index: ?u32 = null,
|
||||
dynamic_section_index: ?u32 = null,
|
||||
@ -143,10 +143,10 @@ has_text_reloc: bool = false,
|
||||
num_ifunc_dynrelocs: usize = 0,
|
||||
|
||||
/// List of range extension thunks.
|
||||
thunks: std.ArrayListUnmanaged(Thunk) = .{},
|
||||
thunks: std.ArrayListUnmanaged(Thunk) = .empty,
|
||||
|
||||
/// List of output merge sections with deduped contents.
|
||||
merge_sections: std.ArrayListUnmanaged(MergeSection) = .{},
|
||||
merge_sections: std.ArrayListUnmanaged(MergeSection) = .empty,
|
||||
|
||||
first_eflags: ?elf.Elf64_Word = null,
|
||||
|
||||
@ -5487,9 +5487,9 @@ pub const Ref = struct {
|
||||
};
|
||||
|
||||
pub const SymbolResolver = struct {
|
||||
keys: std.ArrayListUnmanaged(Key) = .{},
|
||||
values: std.ArrayListUnmanaged(Ref) = .{},
|
||||
table: std.AutoArrayHashMapUnmanaged(void, void) = .{},
|
||||
keys: std.ArrayListUnmanaged(Key) = .empty,
|
||||
values: std.ArrayListUnmanaged(Ref) = .empty,
|
||||
table: std.AutoArrayHashMapUnmanaged(void, void) = .empty,
|
||||
|
||||
const Result = struct {
|
||||
found_existing: bool,
|
||||
@ -5586,7 +5586,7 @@ const Section = struct {
|
||||
/// List of atoms contributing to this section.
|
||||
/// TODO currently this is only used for relocations tracking in relocatable mode
|
||||
/// but will be merged with atom_list_2.
|
||||
atom_list: std.ArrayListUnmanaged(Ref) = .{},
|
||||
atom_list: std.ArrayListUnmanaged(Ref) = .empty,
|
||||
|
||||
/// List of atoms contributing to this section.
|
||||
/// This can be used by sections that require special handling such as init/fini array, etc.
|
||||
@ -5610,7 +5610,7 @@ const Section = struct {
|
||||
/// overcapacity can be negative. A simple way to have negative overcapacity is to
|
||||
/// allocate a fresh text block, which will have ideal capacity, and then grow it
|
||||
/// by 1 byte. It will then have -1 overcapacity.
|
||||
free_list: std.ArrayListUnmanaged(Ref) = .{},
|
||||
free_list: std.ArrayListUnmanaged(Ref) = .empty,
|
||||
};
|
||||
|
||||
fn defaultEntrySymbolName(cpu_arch: std.Target.Cpu.Arch) []const u8 {
|
||||
|
@ -1,5 +1,5 @@
|
||||
objects: std.ArrayListUnmanaged(Object) = .{},
|
||||
strtab: std.ArrayListUnmanaged(u8) = .{},
|
||||
objects: std.ArrayListUnmanaged(Object) = .empty,
|
||||
strtab: std.ArrayListUnmanaged(u8) = .empty,
|
||||
|
||||
pub fn isArchive(path: []const u8) !bool {
|
||||
const file = try std.fs.cwd().openFile(path, .{});
|
||||
@ -127,7 +127,7 @@ const strtab_delimiter = '\n';
|
||||
pub const max_member_name_len = 15;
|
||||
|
||||
pub const ArSymtab = struct {
|
||||
symtab: std.ArrayListUnmanaged(Entry) = .{},
|
||||
symtab: std.ArrayListUnmanaged(Entry) = .empty,
|
||||
strtab: StringTable = .{},
|
||||
|
||||
pub fn deinit(ar: *ArSymtab, allocator: Allocator) void {
|
||||
@ -241,7 +241,7 @@ pub const ArSymtab = struct {
|
||||
};
|
||||
|
||||
pub const ArStrtab = struct {
|
||||
buffer: std.ArrayListUnmanaged(u8) = .{},
|
||||
buffer: std.ArrayListUnmanaged(u8) = .empty,
|
||||
|
||||
pub fn deinit(ar: *ArStrtab, allocator: Allocator) void {
|
||||
ar.buffer.deinit(allocator);
|
||||
|
@ -2,7 +2,7 @@ value: i64 = 0,
|
||||
size: u64 = 0,
|
||||
alignment: Atom.Alignment = .@"1",
|
||||
output_section_index: u32 = 0,
|
||||
atoms: std.ArrayListUnmanaged(Elf.Ref) = .{},
|
||||
atoms: std.ArrayListUnmanaged(Elf.Ref) = .empty,
|
||||
|
||||
pub fn deinit(list: *AtomList, allocator: Allocator) void {
|
||||
list.atoms.deinit(allocator);
|
||||
|
@ -1,6 +1,6 @@
|
||||
path: []const u8,
|
||||
cpu_arch: ?std.Target.Cpu.Arch = null,
|
||||
args: std.ArrayListUnmanaged(Elf.SystemLib) = .{},
|
||||
args: std.ArrayListUnmanaged(Elf.SystemLib) = .empty,
|
||||
|
||||
pub fn deinit(scr: *LdScript, allocator: Allocator) void {
|
||||
scr.args.deinit(allocator);
|
||||
|
@ -1,11 +1,11 @@
|
||||
index: File.Index,
|
||||
|
||||
symtab: std.ArrayListUnmanaged(elf.Elf64_Sym) = .{},
|
||||
strtab: std.ArrayListUnmanaged(u8) = .{},
|
||||
symtab: std.ArrayListUnmanaged(elf.Elf64_Sym) = .empty,
|
||||
strtab: std.ArrayListUnmanaged(u8) = .empty,
|
||||
|
||||
symbols: std.ArrayListUnmanaged(Symbol) = .{},
|
||||
symbols_extra: std.ArrayListUnmanaged(u32) = .{},
|
||||
symbols_resolver: std.ArrayListUnmanaged(Elf.SymbolResolver.Index) = .{},
|
||||
symbols: std.ArrayListUnmanaged(Symbol) = .empty,
|
||||
symbols_extra: std.ArrayListUnmanaged(u32) = .empty,
|
||||
symbols_resolver: std.ArrayListUnmanaged(Elf.SymbolResolver.Index) = .empty,
|
||||
|
||||
entry_index: ?Symbol.Index = null,
|
||||
dynamic_index: ?Symbol.Index = null,
|
||||
@ -24,7 +24,7 @@ dso_handle_index: ?Symbol.Index = null,
|
||||
rela_iplt_start_index: ?Symbol.Index = null,
|
||||
rela_iplt_end_index: ?Symbol.Index = null,
|
||||
global_pointer_index: ?Symbol.Index = null,
|
||||
start_stop_indexes: std.ArrayListUnmanaged(u32) = .{},
|
||||
start_stop_indexes: std.ArrayListUnmanaged(u32) = .empty,
|
||||
|
||||
output_symtab_ctx: Elf.SymtabCtx = .{},
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user