fix code broken from previous commit

This commit is contained in:
Jacob G-W 2021-06-19 21:10:22 -04:00 committed by Andrew Kelley
parent b83b3883ba
commit 9fffffb07b
162 changed files with 720 additions and 148 deletions

View File

@ -295,7 +295,7 @@ fn refreshWithHeldLock(self: *Progress) void {
end += 1;
}
_ = file.write(self.output_buffer[0..end]) catch |e| {
_ = file.write(self.output_buffer[0..end]) catch {
// Stop trying to write to this file once it errors.
self.terminal = null;
};

View File

@ -162,6 +162,7 @@ pub fn format(
options: std.fmt.FormatOptions,
out_stream: anytype,
) !void {
_ = options;
if (fmt.len != 0) @compileError("Unknown format string: '" ++ fmt ++ "'");
try std.fmt.format(out_stream, "{d}.{d}.{d}", .{ self.major, self.minor, self.patch });
if (self.pre) |pre| try std.fmt.format(out_stream, "-{s}", .{pre});
@ -259,7 +260,7 @@ test "SemanticVersion format" {
// Invalid version string that may overflow.
const big_invalid = "99999999999999999999999.999999999999999999.99999999999999999----RC-SNAPSHOT.12.09.1--------------------------------..12";
if (parse(big_invalid)) |ver| std.debug.panic("expected error, found {}", .{ver}) else |err| {}
if (parse(big_invalid)) |ver| std.debug.panic("expected error, found {}", .{ver}) else |_| {}
}
test "SemanticVersion precedence" {

View File

@ -40,12 +40,18 @@ else
pub const SingleThreadedCondition = struct {
pub fn wait(cond: *SingleThreadedCondition, mutex: *Mutex) void {
_ = cond;
_ = mutex;
unreachable; // deadlock detected
}
pub fn signal(cond: *SingleThreadedCondition) void {}
pub fn signal(cond: *SingleThreadedCondition) void {
_ = cond;
}
pub fn broadcast(cond: *SingleThreadedCondition) void {}
pub fn broadcast(cond: *SingleThreadedCondition) void {
_ = cond;
}
};
pub const WindowsCondition = struct {

View File

@ -105,6 +105,7 @@ pub const DebugEvent = struct {
}
pub fn timedWait(ev: *DebugEvent, timeout: u64) TimedWaitResult {
_ = timeout;
switch (ev.state) {
.unset => return .timed_out,
.set => return .event_set,
@ -174,7 +175,10 @@ pub const AtomicEvent = struct {
};
pub const SpinFutex = struct {
fn wake(waiters: *u32, wake_count: u32) void {}
fn wake(waiters: *u32, wake_count: u32) void {
_ = waiters;
_ = wake_count;
}
fn wait(waiters: *u32, timeout: ?u64) !void {
var timer: time.Timer = undefined;
@ -193,6 +197,7 @@ pub const AtomicEvent = struct {
pub const LinuxFutex = struct {
fn wake(waiters: *u32, wake_count: u32) void {
_ = wake_count;
const waiting = std.math.maxInt(i32); // wake_count
const ptr = @ptrCast(*const i32, waiters);
const rc = linux.futex_wake(ptr, linux.FUTEX_WAKE | linux.FUTEX_PRIVATE_FLAG, waiting);

View File

@ -40,9 +40,11 @@ pub fn StringArrayHashMapUnmanaged(comptime V: type) type {
pub const StringContext = struct {
pub fn hash(self: @This(), s: []const u8) u32 {
_ = self;
return hashString(s);
}
pub fn eql(self: @This(), a: []const u8, b: []const u8) bool {
_ = self;
return eqlString(a, b);
}
};
@ -1335,6 +1337,7 @@ pub fn ArrayHashMapUnmanaged(
}
fn removeSlot(self: *Self, removed_slot: usize, header: *IndexHeader, comptime I: type, indexes: []Index(I)) void {
_ = self;
const start_index = removed_slot +% 1;
const end_index = start_index +% indexes.len;
@ -1626,6 +1629,7 @@ pub fn ArrayHashMapUnmanaged(
}
}
fn dumpIndex(self: Self, header: *IndexHeader, comptime I: type) void {
_ = self;
const p = std.debug.print;
p(" index len=0x{x} type={}\n", .{ header.length(), header.capacityIndexType() });
const indexes = header.indexes(I);
@ -1918,7 +1922,7 @@ test "iterator hash map" {
try testing.expect(count == 3);
try testing.expect(it.next() == null);
for (buffer) |v, i| {
for (buffer) |_, i| {
try testing.expect(buffer[@intCast(usize, keys[i])] == values[i]);
}
@ -1930,7 +1934,7 @@ test "iterator hash map" {
if (count >= 2) break;
}
for (buffer[0..2]) |v, i| {
for (buffer[0..2]) |_, i| {
try testing.expect(buffer[@intCast(usize, keys[i])] == values[i]);
}
@ -2154,6 +2158,7 @@ test "compile everything" {
pub fn getHashPtrAddrFn(comptime K: type, comptime Context: type) (fn (Context, K) u32) {
return struct {
fn hash(ctx: Context, key: K) u32 {
_ = ctx;
return getAutoHashFn(usize, void)({}, @ptrToInt(key));
}
}.hash;
@ -2162,6 +2167,7 @@ pub fn getHashPtrAddrFn(comptime K: type, comptime Context: type) (fn (Context,
pub fn getTrivialEqlFn(comptime K: type, comptime Context: type) (fn (Context, K, K) bool) {
return struct {
fn eql(ctx: Context, a: K, b: K) bool {
_ = ctx;
return a == b;
}
}.eql;
@ -2177,6 +2183,7 @@ pub fn AutoContext(comptime K: type) type {
pub fn getAutoHashFn(comptime K: type, comptime Context: type) (fn (Context, K) u32) {
return struct {
fn hash(ctx: Context, key: K) u32 {
_ = ctx;
if (comptime trait.hasUniqueRepresentation(K)) {
return @truncate(u32, Wyhash.hash(0, std.mem.asBytes(&key)));
} else {
@ -2191,6 +2198,7 @@ pub fn getAutoHashFn(comptime K: type, comptime Context: type) (fn (Context, K)
pub fn getAutoEqlFn(comptime K: type, comptime Context: type) (fn (Context, K, K) bool) {
return struct {
fn eql(ctx: Context, a: K, b: K) bool {
_ = ctx;
return meta.eql(a, b);
}
}.eql;
@ -2217,6 +2225,7 @@ pub fn autoEqlIsCheap(comptime K: type) bool {
pub fn getAutoHashStratFn(comptime K: type, comptime Context: type, comptime strategy: std.hash.Strategy) (fn (Context, K) u32) {
return struct {
fn hash(ctx: Context, key: K) u32 {
_ = ctx;
var hasher = Wyhash.init(0);
std.hash.autoHashStrat(&hasher, key, strategy);
return @truncate(u32, hasher.final());

View File

@ -232,6 +232,7 @@ test "Atomic.loadUnchecked" {
test "Atomic.storeUnchecked" {
inline for (atomicIntTypes()) |Int| {
_ = Int;
var x = Atomic(usize).init(5);
x.storeUnchecked(10);
try testing.expectEqual(x.loadUnchecked(), 10);
@ -250,6 +251,7 @@ test "Atomic.load" {
test "Atomic.store" {
inline for (atomicIntTypes()) |Int| {
inline for (.{ .Unordered, .Monotonic, .Release, .SeqCst }) |ordering| {
_ = Int;
var x = Atomic(usize).init(5);
x.store(10, ordering);
try testing.expectEqual(x.load(.SeqCst), 10);

View File

@ -84,6 +84,7 @@ pub fn IntegerBitSet(comptime size: u16) type {
/// Returns the number of bits in this bit set
pub inline fn capacity(self: Self) usize {
_ = self;
return bit_length;
}
@ -311,6 +312,7 @@ pub fn ArrayBitSet(comptime MaskIntType: type, comptime size: usize) type {
/// Returns the number of bits in this bit set
pub inline fn capacity(self: Self) usize {
_ = self;
return bit_length;
}
@ -373,7 +375,7 @@ pub fn ArrayBitSet(comptime MaskIntType: type, comptime size: usize) type {
/// Flips every bit in the bit set.
pub fn toggleAll(self: *Self) void {
for (self.masks) |*mask, i| {
for (self.masks) |*mask| {
mask.* = ~mask.*;
}
@ -642,7 +644,7 @@ pub const DynamicBitSetUnmanaged = struct {
if (bit_length == 0) return;
const num_masks = numMasks(self.bit_length);
for (self.masks[0..num_masks]) |*mask, i| {
for (self.masks[0..num_masks]) |*mask| {
mask.* = ~mask.*;
}

View File

@ -390,6 +390,7 @@ pub const Builder = struct {
}
pub fn version(self: *const Builder, major: u32, minor: u32, patch: u32) LibExeObjStep.SharedLibKind {
_ = self;
return .{
.versioned = .{
.major = major,
@ -543,7 +544,7 @@ pub const Builder = struct {
return null;
},
.scalar => |s| {
const n = std.fmt.parseFloat(T, s) catch |err| {
const n = std.fmt.parseFloat(T, s) catch {
warn("Expected -D{s} to be a float of type {s}.\n\n", .{ name, @typeName(T) });
self.markInvalidUserInput();
return null;
@ -3129,7 +3130,9 @@ pub const Step = struct {
self.dependencies.append(other) catch unreachable;
}
fn makeNoOp(self: *Step) anyerror!void {}
fn makeNoOp(self: *Step) anyerror!void {
_ = self;
}
pub fn cast(step: *Step, comptime T: type) ?*T {
if (step.id == T.base_id) {

View File

@ -139,6 +139,7 @@ const BinaryElfOutput = struct {
}
fn segmentSortCompare(context: void, left: *BinaryElfSegment, right: *BinaryElfSegment) bool {
_ = context;
if (left.physicalAddress < right.physicalAddress) {
return true;
}
@ -149,6 +150,7 @@ const BinaryElfOutput = struct {
}
fn sectionSortCompare(context: void, left: *BinaryElfSection, right: *BinaryElfSection) bool {
_ = context;
return left.binaryOffset < right.binaryOffset;
}
};

View File

@ -65,6 +65,8 @@ pub const StackTrace = struct {
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
_ = fmt;
_ = options;
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const debug_info = std.debug.getSelfDebugInfo() catch |err| {
@ -521,6 +523,7 @@ pub const Version = struct {
options: std.fmt.FormatOptions,
out_stream: anytype,
) !void {
_ = options;
if (fmt.len == 0) {
if (self.patch == 0) {
if (self.minor == 0) {

View File

@ -23,6 +23,7 @@ pub fn ComptimeStringMap(comptime V: type, comptime kvs: anytype) type {
var sorted_kvs: [kvs.len]KV = undefined;
const lenAsc = (struct {
fn lenAsc(context: void, a: KV, b: KV) bool {
_ = context;
return a.key.len < b.key.len;
}
}).lenAsc;

View File

@ -346,7 +346,7 @@ test "ed25519 test vectors" {
.expected = error.IdentityElement, // 11 - small-order A
},
};
for (entries) |entry, i| {
for (entries) |entry| {
var msg: [entry.msg_hex.len / 2]u8 = undefined;
_ = try fmt.hexToBytes(&msg, entry.msg_hex);
var public_key: [32]u8 = undefined;

View File

@ -394,6 +394,7 @@ pub const Blake3 = struct {
/// Construct a new `Blake3` for the key derivation function. The context
/// string should be hardcoded, globally unique, and application-specific.
pub fn initKdf(context: []const u8, options: KdfOptions) Blake3 {
_ = options;
var context_hasher = Blake3.init_internal(IV, DERIVE_KEY_CONTEXT);
context_hasher.update(context);
var context_key: [KEY_LEN]u8 = undefined;

View File

@ -219,6 +219,7 @@ pub const Hash = struct {
const Self = @This();
pub fn init(options: Options) Self {
_ = options;
return Self{
.state = State{ .data = [_]u32{0} ** (State.BLOCKBYTES / 4) },
.buf_off = 0,

View File

@ -45,6 +45,7 @@ pub const Md5 = struct {
total_len: u64,
pub fn init(options: Options) Self {
_ = options;
return Self{
.s = [_]u32{
0x67452301,

View File

@ -63,6 +63,7 @@ pub fn add(a: CompressedScalar, b: CompressedScalar, endian: builtin.Endian) Non
/// Return -s (mod L)
pub fn neg(s: CompressedScalar, endian: builtin.Endian) NonCanonicalError!CompressedScalar {
_ = s;
return (try Scalar.fromBytes(a, endian)).neg().toBytes(endian);
}

View File

@ -43,6 +43,7 @@ pub const Sha1 = struct {
total_len: u64 = 0,
pub fn init(options: Options) Self {
_ = options;
return Self{
.s = [_]u32{
0x67452301,

View File

@ -95,6 +95,7 @@ fn Sha2x32(comptime params: Sha2Params32) type {
total_len: u64 = 0,
pub fn init(options: Options) Self {
_ = options;
return Self{
.s = [_]u32{
params.iv0,
@ -462,6 +463,7 @@ fn Sha2x64(comptime params: Sha2Params64) type {
total_len: u128 = 0,
pub fn init(options: Options) Self {
_ = options;
return Self{
.s = [_]u64{
params.iv0,

View File

@ -28,6 +28,7 @@ fn Keccak(comptime bits: usize, comptime delim: u8) type {
rate: usize,
pub fn init(options: Options) Self {
_ = options;
return Self{ .s = [_]u8{0} ** 200, .offset = 0, .rate = 200 - (bits / 4) };
}

View File

@ -84,7 +84,7 @@ fn tlsCsprngFill(_: *const std.rand.Random, buffer: []u8) void {
os.MAP_PRIVATE | os.MAP_ANONYMOUS,
-1,
0,
) catch |err| {
) catch {
// Could not allocate memory for the local state, fall back to
// the OS syscall.
return fillWithOsEntropy(buffer);

View File

@ -325,6 +325,7 @@ pub fn writeStackTrace(
debug_info: *DebugInfo,
tty_config: TTY.Config,
) !void {
_ = allocator;
if (builtin.strip_debug_info) return error.MissingDebugInfo;
var frame_index: usize = 0;
var frames_left: usize = std.math.min(stack_trace.index, stack_trace.instruction_addresses.len);
@ -930,6 +931,7 @@ const MachoSymbol = struct {
}
fn addressLessThan(context: void, lhs: MachoSymbol, rhs: MachoSymbol) bool {
_ = context;
return lhs.address() < rhs.address();
}
};
@ -1134,6 +1136,7 @@ pub const DebugInfo = struct {
if (os.dl_iterate_phdr(&ctx, anyerror, struct {
fn callback(info: *os.dl_phdr_info, size: usize, context: *CtxTy) !void {
_ = size;
// The base address is too high
if (context.address < info.dlpi_addr)
return;
@ -1189,6 +1192,8 @@ pub const DebugInfo = struct {
}
fn lookupModuleHaiku(self: *DebugInfo, address: usize) !*ModuleDebugInfo {
_ = self;
_ = address;
@panic("TODO implement lookup module for Haiku");
}
};

View File

@ -283,6 +283,7 @@ fn parseFormValueBlock(allocator: *mem.Allocator, in_stream: anytype, endian: bu
}
fn parseFormValueConstant(allocator: *mem.Allocator, in_stream: anytype, signed: bool, endian: builtin.Endian, comptime size: i32) !FormValue {
_ = allocator;
// TODO: Please forgive me, I've worked around zig not properly spilling some intermediate values here.
// `nosuspend` should be removed from all the function calls once it is fixed.
return FormValue{
@ -310,6 +311,7 @@ fn parseFormValueConstant(allocator: *mem.Allocator, in_stream: anytype, signed:
// TODO the nosuspends here are workarounds
fn parseFormValueRef(allocator: *mem.Allocator, in_stream: anytype, endian: builtin.Endian, size: i32) !FormValue {
_ = allocator;
return FormValue{
.Ref = switch (size) {
1 => try nosuspend in_stream.readInt(u8, endian),
@ -453,13 +455,13 @@ pub const DwarfInfo = struct {
if (this_die_obj.getAttr(AT_name)) |_| {
const name = try this_die_obj.getAttrString(di, AT_name);
break :x name;
} else if (this_die_obj.getAttr(AT_abstract_origin)) |ref| {
} else if (this_die_obj.getAttr(AT_abstract_origin)) |_| {
// Follow the DIE it points to and repeat
const ref_offset = try this_die_obj.getAttrRef(AT_abstract_origin);
if (ref_offset > next_offset) return error.InvalidDebugInfo;
try seekable.seekTo(this_unit_offset + ref_offset);
this_die_obj = (try di.parseDie(in, abbrev_table, is_64)) orelse return error.InvalidDebugInfo;
} else if (this_die_obj.getAttr(AT_specification)) |ref| {
} else if (this_die_obj.getAttr(AT_specification)) |_| {
// Follow the DIE it points to and repeat
const ref_offset = try this_die_obj.getAttrRef(AT_specification);
if (ref_offset > next_offset) return error.InvalidDebugInfo;

View File

@ -66,6 +66,7 @@ pub fn get_DYNAMIC() ?[*]elf.Dyn {
}
pub fn linkmap_iterator(phdrs: []elf.Phdr) !LinkMap.Iterator {
_ = phdrs;
const _DYNAMIC = get_DYNAMIC() orelse {
// No PT_DYNAMIC means this is either a statically-linked program or a
// badly corrupted dynamically-linked one.

View File

@ -18,7 +18,7 @@ const EnumField = std.builtin.TypeInfo.EnumField;
pub fn EnumFieldStruct(comptime E: type, comptime Data: type, comptime field_default: ?Data) type {
const StructField = std.builtin.TypeInfo.StructField;
var fields: []const StructField = &[_]StructField{};
for (std.meta.fields(E)) |field, i| {
for (std.meta.fields(E)) |field| {
fields = fields ++ &[_]StructField{.{
.name = field.name,
.field_type = Data,
@ -144,7 +144,7 @@ pub fn directEnumArrayDefault(
) [directEnumArrayLen(E, max_unused_slots)]Data {
const len = comptime directEnumArrayLen(E, max_unused_slots);
var result: [len]Data = if (default) |d| [_]Data{d} ** len else undefined;
inline for (@typeInfo(@TypeOf(init_values)).Struct.fields) |f, i| {
inline for (@typeInfo(@TypeOf(init_values)).Struct.fields) |f| {
const enum_value = @field(E, f.name);
const index = @intCast(usize, @enumToInt(enum_value));
result[index] = @field(init_values, f.name);
@ -334,6 +334,7 @@ pub fn EnumArray(comptime E: type, comptime V: type) type {
/// TODO: Once #8169 is fixed, consider switching this param
/// back to an optional.
pub fn NoExtension(comptime Self: type) type {
_ = Self;
return NoExt;
}
const NoExt = struct {};
@ -729,6 +730,7 @@ test "std.enums.ensureIndexer" {
}
fn ascByValue(ctx: void, comptime a: EnumField, comptime b: EnumField) bool {
_ = ctx;
return a.value < b.value;
}
pub fn EnumIndexer(comptime E: type) type {
@ -743,9 +745,11 @@ pub fn EnumIndexer(comptime E: type) type {
pub const Key = E;
pub const count: usize = 0;
pub fn indexOf(e: E) usize {
_ = e;
unreachable;
}
pub fn keyForIndex(i: usize) E {
_ = i;
unreachable;
}
};

View File

@ -345,7 +345,7 @@ pub const Loop = struct {
);
errdefer windows.CloseHandle(self.os_data.io_port);
for (self.eventfd_resume_nodes) |*eventfd_node, i| {
for (self.eventfd_resume_nodes) |*eventfd_node| {
eventfd_node.* = std.atomic.Stack(ResumeNode.EventFd).Node{
.data = ResumeNode.EventFd{
.base = ResumeNode{

View File

@ -369,6 +369,7 @@ pub fn format(
}
pub fn formatAddress(value: anytype, options: FormatOptions, writer: anytype) @TypeOf(writer).Error!void {
_ = options;
const T = @TypeOf(value);
switch (@typeInfo(T)) {
@ -553,7 +554,7 @@ pub fn formatType(
.Many, .C => {
if (actual_fmt.len == 0)
@compileError("cannot format pointer without a specifier (i.e. {s} or {*})");
if (ptr_info.sentinel) |sentinel| {
if (ptr_info.sentinel) |_| {
return formatType(mem.span(value), actual_fmt, options, writer, max_depth);
}
if (ptr_info.child == u8) {
@ -741,6 +742,8 @@ fn formatSliceHexImpl(comptime case: Case) type {
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
_ = fmt;
_ = options;
var buf: [2]u8 = undefined;
for (bytes) |c| {
@ -777,6 +780,8 @@ fn formatSliceEscapeImpl(comptime case: Case) type {
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
_ = fmt;
_ = options;
var buf: [4]u8 = undefined;
buf[0] = '\\';
@ -820,6 +825,7 @@ fn formatSizeImpl(comptime radix: comptime_int) type {
options: FormatOptions,
writer: anytype,
) !void {
_ = fmt;
if (value == 0) {
return writer.writeAll("0B");
}
@ -903,6 +909,7 @@ pub fn formatAsciiChar(
options: FormatOptions,
writer: anytype,
) !void {
_ = options;
return writer.writeAll(@as(*const [1]u8, &c));
}
@ -1362,6 +1369,8 @@ pub fn formatIntBuf(out_buf: []u8, value: anytype, base: u8, case: Case, options
}
fn formatDuration(ns: u64, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
_ = fmt;
_ = options;
var ns_remaining = ns;
inline for (.{
.{ .ns = 365 * std.time.ns_per_day, .sep = 'y' },
@ -2152,6 +2161,7 @@ test "custom" {
options: FormatOptions,
writer: anytype,
) !void {
_ = options;
if (fmt.len == 0 or comptime std.mem.eql(u8, fmt, "p")) {
return std.fmt.format(writer, "({d:.3},{d:.3})", .{ self.x, self.y });
} else if (comptime std.mem.eql(u8, fmt, "d")) {
@ -2340,6 +2350,7 @@ test "formatType max_depth" {
options: FormatOptions,
writer: anytype,
) !void {
_ = options;
if (fmt.len == 0) {
return std.fmt.format(writer, "({d:.3},{d:.3})", .{ self.x, self.y });
} else {

View File

@ -1541,7 +1541,7 @@ pub const Dir = struct {
self: Dir,
target_path: []const u8,
sym_link_path: []const u8,
flags: SymLinkFlags,
_: SymLinkFlags,
) !void {
return os.symlinkatWasi(target_path, self.fd, sym_link_path);
}
@ -1879,6 +1879,7 @@ pub const Dir = struct {
/// * NtDll prefixed
/// TODO currently this ignores `flags`.
pub fn accessW(self: Dir, sub_path_w: [*:0]const u16, flags: File.OpenFlags) AccessError!void {
_ = flags;
return os.faccessatW(self.fd, sub_path_w, 0, 0);
}

View File

@ -579,7 +579,7 @@ pub fn resolveWindows(allocator: *Allocator, paths: []const []const u8) ![]u8 {
// Now we know the disk designator to use, if any, and what kind it is. And our result
// is big enough to append all the paths to.
var correct_disk_designator = true;
for (paths[first_index..]) |p, i| {
for (paths[first_index..]) |p| {
const parsed = windowsParsePath(p);
if (parsed.kind != WindowsPath.Kind.None) {
@ -660,7 +660,7 @@ pub fn resolvePosix(allocator: *Allocator, paths: []const []const u8) ![]u8 {
}
errdefer allocator.free(result);
for (paths[first_index..]) |p, i| {
for (paths[first_index..]) |p| {
var it = mem.tokenize(p, "/");
while (it.next()) |component| {
if (mem.eql(u8, component, ".")) {

View File

@ -541,6 +541,7 @@ test "makePath, put some files in it, deleteTree" {
try tmp.dir.writeFile("os_test_tmp" ++ fs.path.sep_str ++ "b" ++ fs.path.sep_str ++ "file2.txt", "blah");
try tmp.dir.deleteTree("os_test_tmp");
if (tmp.dir.openDir("os_test_tmp", .{})) |dir| {
_ = dir;
@panic("expected error");
} else |err| {
try testing.expect(err == error.FileNotFound);
@ -638,6 +639,7 @@ test "access file" {
try tmp.dir.makePath("os_test_tmp");
if (tmp.dir.access("os_test_tmp" ++ fs.path.sep_str ++ "file.txt", .{})) |ok| {
_ = ok;
@panic("expected error");
} else |err| {
try testing.expect(err == error.FileNotFound);

View File

@ -36,6 +36,8 @@ pub const PreopenType = union(PreopenTypeTag) {
}
pub fn format(self: Self, comptime fmt: []const u8, options: std.fmt.FormatOptions, out_stream: anytype) !void {
_ = fmt;
_ = options;
try out_stream.print("PreopenType{{ ", .{});
switch (self) {
PreopenType.Dir => |path| try out_stream.print(".Dir = '{}'", .{std.zig.fmtId(path)}),

View File

@ -375,6 +375,7 @@ fn SMHasherTest(comptime hash_fn: anytype) u32 {
}
fn CityHash32hashIgnoreSeed(str: []const u8, seed: u32) u32 {
_ = seed;
return CityHash32.hash(str);
}

View File

@ -29,6 +29,7 @@ pub fn getAutoHashFn(comptime K: type, comptime Context: type) (fn (Context, K)
return struct {
fn hash(ctx: Context, key: K) u64 {
_ = ctx;
if (comptime trait.hasUniqueRepresentation(K)) {
return Wyhash.hash(0, std.mem.asBytes(&key));
} else {
@ -43,6 +44,7 @@ pub fn getAutoHashFn(comptime K: type, comptime Context: type) (fn (Context, K)
pub fn getAutoEqlFn(comptime K: type, comptime Context: type) (fn (Context, K, K) bool) {
return struct {
fn eql(ctx: Context, a: K, b: K) bool {
_ = ctx;
return meta.eql(a, b);
}
}.eql;
@ -78,9 +80,11 @@ pub fn StringHashMapUnmanaged(comptime V: type) type {
pub const StringContext = struct {
pub fn hash(self: @This(), s: []const u8) u64 {
_ = self;
return hashString(s);
}
pub fn eql(self: @This(), a: []const u8, b: []const u8) bool {
_ = self;
return eqlString(a, b);
}
};
@ -1887,9 +1891,11 @@ test "std.hash_map clone" {
test "std.hash_map getOrPutAdapted" {
const AdaptedContext = struct {
fn eql(self: @This(), adapted_key: []const u8, test_key: u64) bool {
_ = self;
return std.fmt.parseInt(u64, adapted_key, 10) catch unreachable == test_key;
}
fn hash(self: @This(), adapted_key: []const u8) u64 {
_ = self;
const key = std.fmt.parseInt(u64, adapted_key, 10) catch unreachable;
return (AutoContext(u64){}).hash(key);
}

View File

@ -108,6 +108,8 @@ const CAllocator = struct {
len_align: u29,
return_address: usize,
) error{OutOfMemory}![]u8 {
_ = allocator;
_ = return_address;
assert(len > 0);
assert(std.math.isPowerOfTwo(alignment));
@ -134,6 +136,9 @@ const CAllocator = struct {
len_align: u29,
return_address: usize,
) Allocator.Error!usize {
_ = allocator;
_ = buf_align;
_ = return_address;
if (new_len == 0) {
alignedFree(buf.ptr);
return 0;
@ -178,6 +183,9 @@ fn rawCAlloc(
len_align: u29,
ret_addr: usize,
) Allocator.Error![]u8 {
_ = self;
_ = len_align;
_ = ret_addr;
assert(ptr_align <= @alignOf(std.c.max_align_t));
const ptr = @ptrCast([*]u8, c.malloc(len) orelse return error.OutOfMemory);
return ptr[0..len];
@ -191,6 +199,9 @@ fn rawCResize(
len_align: u29,
ret_addr: usize,
) Allocator.Error!usize {
_ = self;
_ = old_align;
_ = ret_addr;
if (new_len == 0) {
c.free(buf.ptr);
return 0;
@ -231,6 +242,8 @@ pub var next_mmap_addr_hint: ?[*]align(mem.page_size) u8 = null;
const PageAllocator = struct {
fn alloc(allocator: *Allocator, n: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 {
_ = allocator;
_ = ra;
assert(n > 0);
const aligned_len = mem.alignForward(n, mem.page_size);
@ -334,6 +347,9 @@ const PageAllocator = struct {
len_align: u29,
return_address: usize,
) Allocator.Error!usize {
_ = allocator;
_ = buf_align;
_ = return_address;
const new_size_aligned = mem.alignForward(new_size, mem.page_size);
if (builtin.os.tag == .windows) {
@ -482,6 +498,8 @@ const WasmPageAllocator = struct {
}
fn alloc(allocator: *Allocator, len: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 {
_ = allocator;
_ = ra;
const page_count = nPages(len);
const page_idx = try allocPages(page_count, alignment);
return @intToPtr([*]u8, page_idx * mem.page_size)[0..alignPageAllocLen(page_count * mem.page_size, len, len_align)];
@ -542,6 +560,9 @@ const WasmPageAllocator = struct {
len_align: u29,
return_address: usize,
) error{OutOfMemory}!usize {
_ = allocator;
_ = buf_align;
_ = return_address;
const aligned_len = mem.alignForward(buf.len, mem.page_size);
if (new_len > aligned_len) return error.OutOfMemory;
const current_n = nPages(aligned_len);
@ -588,6 +609,7 @@ pub const HeapAllocator = switch (builtin.os.tag) {
len_align: u29,
return_address: usize,
) error{OutOfMemory}![]u8 {
_ = return_address;
const self = @fieldParentPtr(HeapAllocator, "allocator", allocator);
const amt = n + ptr_align - 1 + @sizeOf(usize);
@ -622,6 +644,8 @@ pub const HeapAllocator = switch (builtin.os.tag) {
len_align: u29,
return_address: usize,
) error{OutOfMemory}!usize {
_ = buf_align;
_ = return_address;
const self = @fieldParentPtr(HeapAllocator, "allocator", allocator);
if (new_size == 0) {
os.windows.HeapFree(self.heap_handle.?, 0, @intToPtr(*c_void, getRecordPtr(buf).*));
@ -694,6 +718,8 @@ pub const FixedBufferAllocator = struct {
}
fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
_ = len_align;
_ = ra;
const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
const adjust_off = mem.alignPointerOffset(self.buffer.ptr + self.end_index, ptr_align) orelse
return error.OutOfMemory;
@ -716,6 +742,8 @@ pub const FixedBufferAllocator = struct {
len_align: u29,
return_address: usize,
) Allocator.Error!usize {
_ = buf_align;
_ = return_address;
const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
assert(self.ownsSlice(buf)); // sanity check
@ -766,6 +794,8 @@ pub const ThreadSafeFixedBufferAllocator = blk: {
}
fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
_ = len_align;
_ = ra;
const self = @fieldParentPtr(ThreadSafeFixedBufferAllocator, "allocator", allocator);
var end_index = @atomicLoad(usize, &self.end_index, builtin.AtomicOrder.SeqCst);
while (true) {

View File

@ -66,6 +66,8 @@ pub const ArenaAllocator = struct {
}
fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
_ = len_align;
_ = ra;
const self = @fieldParentPtr(ArenaAllocator, "allocator", allocator);
var cur_node = if (self.state.buffer_list.first) |first_node| first_node else try self.createNode(0, n + ptr_align);
@ -95,6 +97,9 @@ pub const ArenaAllocator = struct {
}
fn resize(allocator: *Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Allocator.Error!usize {
_ = buf_align;
_ = len_align;
_ = ret_addr;
const self = @fieldParentPtr(ArenaAllocator, "allocator", allocator);
const cur_node = self.state.buffer_list.first orelse return error.OutOfMemory;

View File

@ -37,9 +37,9 @@ pub fn LogToWriterAllocator(comptime Writer: type) type {
const self = @fieldParentPtr(Self, "allocator", allocator);
self.writer.print("alloc : {}", .{len}) catch {};
const result = self.parent_allocator.allocFn(self.parent_allocator, len, ptr_align, len_align, ra);
if (result) |buff| {
if (result) |_| {
self.writer.print(" success!\n", .{}) catch {};
} else |err| {
} else |_| {
self.writer.print(" failure!\n", .{}) catch {};
}
return result;

View File

@ -65,7 +65,7 @@ pub fn ScopedLoggingAllocator(
) error{OutOfMemory}![]u8 {
const self = @fieldParentPtr(Self, "allocator", allocator);
const result = self.parent_allocator.allocFn(self.parent_allocator, len, ptr_align, len_align, ra);
if (result) |buff| {
if (result) |_| {
logHelper(
success_log_level,
"alloc - success - len: {}, ptr_align: {}, len_align: {}",

View File

@ -161,6 +161,7 @@ pub const null_writer = @as(NullWriter, .{ .context = {} });
const NullWriter = Writer(void, error{}, dummyWrite);
fn dummyWrite(context: void, data: []const u8) error{}!usize {
_ = context;
return data.len;
}

View File

@ -149,7 +149,7 @@ pub fn BitReader(endian: builtin.Endian, comptime ReaderType: type) type {
var out_bits_total = @as(usize, 0);
//@NOTE: I'm not sure this is a good idea, maybe alignToByte should be forced
if (self.bit_count > 0) {
for (buffer) |*b, i| {
for (buffer) |*b| {
b.* = try self.readBits(u8, u8_bit_count, &out_bits);
out_bits_total += out_bits;
}

View File

@ -128,7 +128,7 @@ pub fn BitWriter(endian: builtin.Endian, comptime WriterType: type) type {
pub fn write(self: *Self, buffer: []const u8) Error!usize {
// TODO: I'm not sure this is a good idea, maybe flushBits should be forced
if (self.bit_count > 0) {
for (buffer) |b, i|
for (buffer) |b|
try self.writeBits(b, u8_bit_count);
return buffer.len;
}

View File

@ -1221,11 +1221,11 @@ test "json.token premature object close" {
pub fn validate(s: []const u8) bool {
var p = StreamingParser.init();
for (s) |c, i| {
for (s) |c| {
var token1: ?Token = undefined;
var token2: ?Token = undefined;
p.feed(c, &token1, &token2) catch |err| {
p.feed(c, &token1, &token2) catch {
return false;
};
}
@ -1410,7 +1410,7 @@ fn parsedEqual(a: anytype, b: @TypeOf(a)) bool {
if (a == null or b == null) return false;
return parsedEqual(a.?, b.?);
},
.Union => |unionInfo| {
.Union => {
if (info.tag_type) |UnionTag| {
const tag_a = std.meta.activeTag(a);
const tag_b = std.meta.activeTag(b);
@ -1771,7 +1771,7 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
const source_slice = stringToken.slice(tokens.slice, tokens.i - 1);
switch (stringToken.escapes) {
.None => return allocator.dupe(u8, source_slice),
.Some => |some_escapes| {
.Some => {
const output = try allocator.alloc(u8, stringToken.decodedLength());
errdefer allocator.free(output);
try unescapeValidString(output, source_slice);
@ -2391,7 +2391,7 @@ pub const Parser = struct {
const slice = s.slice(input, i);
switch (s.escapes) {
.None => return Value{ .String = if (p.copy_strings) try allocator.dupe(u8, slice) else slice },
.Some => |some_escapes| {
.Some => {
const output = try allocator.alloc(u8, s.decodedLength());
errdefer allocator.free(output);
try unescapeValidString(output, slice);
@ -2401,6 +2401,7 @@ pub const Parser = struct {
}
fn parseNumber(p: *Parser, n: std.meta.TagPayload(Token, Token.Number), input: []const u8, i: usize) !Value {
_ = p;
return if (n.is_integer)
Value{
.Integer = std.fmt.parseInt(i64, n.slice(input, i), 10) catch |e| switch (e) {
@ -2815,7 +2816,7 @@ pub fn stringify(
if (child_options.whitespace) |*child_whitespace| {
child_whitespace.indent_level += 1;
}
inline for (S.fields) |Field, field_i| {
inline for (S.fields) |Field| {
// don't include void fields
if (Field.field_type == void) continue;
@ -3114,6 +3115,7 @@ test "stringify struct with custom stringifier" {
options: StringifyOptions,
out_stream: anytype,
) !void {
_ = value;
try out_stream.writeAll("[\"something special\",");
try stringify(42, options, out_stream);
try out_stream.writeByte(']');

View File

@ -63,7 +63,7 @@ pub fn SinglyLinkedList(comptime T: type) type {
pub fn countChildren(node: *const Node) usize {
var count: usize = 0;
var it: ?*const Node = node.next;
while (it) |n| : (it = n.next) {
while (it) |_| : (it = n.next) {
count += 1;
}
return count;

View File

@ -458,6 +458,7 @@ pub const Mutable = struct {
/// If `allocator` is provided, it will be used for temporary storage to improve
/// multiplication performance. `error.OutOfMemory` is handled with a fallback algorithm.
pub fn sqrNoAlias(rma: *Mutable, a: Const, opt_allocator: ?*Allocator) void {
_ = opt_allocator;
assert(rma.limbs.ptr != a.limbs.ptr); // illegal aliasing
mem.set(Limb, rma.limbs, 0);
@ -676,6 +677,7 @@ pub const Mutable = struct {
///
/// `limbs_buffer` is used for temporary storage during the operation.
pub fn gcdNoAlias(rma: *Mutable, x: Const, y: Const, limbs_buffer: *std.ArrayList(Limb)) !void {
_ = limbs_buffer;
assert(rma.limbs.ptr != x.limbs.ptr); // illegal aliasing
assert(rma.limbs.ptr != y.limbs.ptr); // illegal aliasing
return gcdLehmer(rma, x, y, allocator);
@ -1141,6 +1143,7 @@ pub const Const = struct {
options: std.fmt.FormatOptions,
out_stream: anytype,
) !void {
_ = options;
comptime var radix = 10;
comptime var case: std.fmt.Case = .lower;
@ -1618,6 +1621,7 @@ pub const Managed = struct {
/// Converts self to a string in the requested base. Memory is allocated from the provided
/// allocator and not the one present in self.
pub fn toString(self: Managed, allocator: *Allocator, base: u8, case: std.fmt.Case) ![]u8 {
_ = allocator;
if (base < 2 or base > 16) return error.InvalidBase;
return self.toConst().toStringAlloc(self.allocator, base, case);
}

View File

@ -139,6 +139,11 @@ var failAllocator = Allocator{
.resizeFn = Allocator.noResize,
};
fn failAllocatorAlloc(self: *Allocator, n: usize, alignment: u29, len_align: u29, ra: usize) Allocator.Error![]u8 {
_ = self;
_ = n;
_ = alignment;
_ = len_align;
_ = ra;
return error.OutOfMemory;
}

View File

@ -55,6 +55,10 @@ pub fn noResize(
len_align: u29,
ret_addr: usize,
) Error!usize {
_ = self;
_ = buf_align;
_ = len_align;
_ = ret_addr;
if (new_len > buf.len)
return error.OutOfMemory;
return new_len;

View File

@ -843,6 +843,7 @@ pub const refAllDecls = @compileError("refAllDecls has been moved from std.meta
pub fn declList(comptime Namespace: type, comptime Decl: type) []const *const Decl {
const S = struct {
fn declNameLessThan(context: void, lhs: *const Decl, rhs: *const Decl) bool {
_ = context;
return mem.lessThan(u8, lhs.name, rhs.name);
}
};

View File

@ -108,6 +108,7 @@ pub fn TrailerFlags(comptime Fields: type) type {
}
pub fn offset(self: Self, p: [*]align(@alignOf(Fields)) const u8, comptime field: FieldEnum) usize {
_ = p;
var off: usize = 0;
inline for (@typeInfo(Fields).Struct.fields) |field_info, i| {
const active = (self.bits & (1 << i)) != 0;

View File

@ -92,6 +92,7 @@ pub fn MultiArrayList(comptime S: type) type {
}
const Sort = struct {
fn lessThan(trash: *i32, lhs: Data, rhs: Data) bool {
_ = trash;
return lhs.alignment > rhs.alignment;
}
};
@ -221,7 +222,7 @@ pub fn MultiArrayList(comptime S: type) type {
/// retain list ordering.
pub fn swapRemove(self: *Self, index: usize) void {
const slices = self.slice();
inline for (fields) |field_info, i| {
inline for (fields) |_, i| {
const field_slice = slices.items(@intToEnum(Field, i));
field_slice[index] = field_slice[self.len - 1];
field_slice[self.len - 1] = undefined;
@ -233,7 +234,7 @@ pub fn MultiArrayList(comptime S: type) type {
/// after it to preserve order.
pub fn orderedRemove(self: *Self, index: usize) void {
const slices = self.slice();
inline for (fields) |field_info, field_index| {
inline for (fields) |_, field_index| {
const field_slice = slices.items(@intToEnum(Field, field_index));
var i = index;
while (i < self.len - 1) : (i += 1) {

View File

@ -270,6 +270,8 @@ pub const Ip4Address = extern struct {
options: std.fmt.FormatOptions,
out_stream: anytype,
) !void {
_ = fmt;
_ = options;
const bytes = @ptrCast(*const [4]u8, &self.sa.addr);
try std.fmt.format(out_stream, "{}.{}.{}.{}:{}", .{
bytes[0],
@ -281,6 +283,7 @@ pub const Ip4Address = extern struct {
}
pub fn getOsSockLen(self: Ip4Address) os.socklen_t {
_ = self;
return @sizeOf(os.sockaddr_in);
}
};
@ -556,6 +559,8 @@ pub const Ip6Address = extern struct {
options: std.fmt.FormatOptions,
out_stream: anytype,
) !void {
_ = fmt;
_ = options;
const port = mem.bigToNative(u16, self.sa.port);
if (mem.eql(u8, self.sa.addr[0..12], &[_]u8{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff })) {
try std.fmt.format(out_stream, "[::ffff:{}.{}.{}.{}]:{}", .{
@ -598,6 +603,7 @@ pub const Ip6Address = extern struct {
}
pub fn getOsSockLen(self: Ip6Address) os.socklen_t {
_ = self;
return @sizeOf(os.sockaddr_in6);
}
};
@ -1062,6 +1068,7 @@ fn IN6_IS_ADDR_SITELOCAL(a: [16]u8) bool {
// Parameters `b` and `a` swapped to make this descending.
fn addrCmpLessThan(context: void, b: LookupAddr, a: LookupAddr) bool {
_ = context;
return a.sortkey < b.sortkey;
}

View File

@ -61,6 +61,7 @@ test "Once executes its function just once" {
for (threads) |*handle| {
handle.* = try std.Thread.spawn(struct {
fn thread_fn(x: u8) void {
_ = x;
global_once.call();
}
}.thread_fn, 0);

View File

@ -1164,6 +1164,7 @@ fn openOptionsFromFlags(flags: u32) windows.OpenFileOptions {
/// TODO currently, this function does not handle all flag combinations
/// or makes use of perm argument.
pub fn openW(file_path_w: []const u16, flags: u32, perm: mode_t) OpenError!fd_t {
_ = perm;
var options = openOptionsFromFlags(flags);
options.dir = std.fs.cwd().fd;
return windows.OpenFile(file_path_w, options) catch |err| switch (err) {
@ -1273,6 +1274,7 @@ pub fn openatZ(dir_fd: fd_t, file_path: [*:0]const u8, flags: u32, mode: mode_t)
/// TODO currently, this function does not handle all flag combinations
/// or makes use of perm argument.
pub fn openatW(dir_fd: fd_t, file_path_w: []const u16, flags: u32, mode: mode_t) OpenError!fd_t {
_ = mode;
var options = openOptionsFromFlags(flags);
options.dir = dir_fd;
return windows.OpenFile(file_path_w, options) catch |err| switch (err) {
@ -2169,6 +2171,7 @@ pub fn mkdirat(dir_fd: fd_t, sub_dir_path: []const u8, mode: u32) MakeDirError!v
pub const mkdiratC = @compileError("deprecated: renamed to mkdiratZ");
pub fn mkdiratWasi(dir_fd: fd_t, sub_dir_path: []const u8, mode: u32) MakeDirError!void {
_ = mode;
switch (wasi.path_create_directory(dir_fd, sub_dir_path.ptr, sub_dir_path.len)) {
wasi.ESUCCESS => return,
wasi.EACCES => return error.AccessDenied,
@ -2216,6 +2219,7 @@ pub fn mkdiratZ(dir_fd: fd_t, sub_dir_path: [*:0]const u8, mode: u32) MakeDirErr
}
pub fn mkdiratW(dir_fd: fd_t, sub_path_w: []const u16, mode: u32) MakeDirError!void {
_ = mode;
const sub_dir_handle = windows.OpenFile(sub_path_w, .{
.dir = dir_fd,
.access_mask = windows.GENERIC_READ | windows.SYNCHRONIZE,
@ -2291,6 +2295,7 @@ pub fn mkdirZ(dir_path: [*:0]const u8, mode: u32) MakeDirError!void {
/// Windows-only. Same as `mkdir` but the parameters is WTF16 encoded.
pub fn mkdirW(dir_path_w: []const u16, mode: u32) MakeDirError!void {
_ = mode;
const sub_dir_handle = windows.OpenFile(dir_path_w, .{
.dir = std.fs.cwd().fd,
.access_mask = windows.GENERIC_READ | windows.SYNCHRONIZE,
@ -3868,6 +3873,7 @@ pub fn accessZ(path: [*:0]const u8, mode: u32) AccessError!void {
/// Otherwise use `access` or `accessC`.
/// TODO currently this ignores `mode`.
pub fn accessW(path: [*:0]const u16, mode: u32) windows.GetFileAttributesError!void {
_ = mode;
const ret = try windows.GetFileAttributesW(path);
if (ret != windows.INVALID_FILE_ATTRIBUTES) {
return;
@ -3918,6 +3924,8 @@ pub fn faccessatZ(dirfd: fd_t, path: [*:0]const u8, mode: u32, flags: u32) Acces
/// is NtDll-prefixed, null-terminated, WTF-16 encoded.
/// TODO currently this ignores `mode` and `flags`
pub fn faccessatW(dirfd: fd_t, sub_path_w: [*:0]const u16, mode: u32, flags: u32) AccessError!void {
_ = mode;
_ = flags;
if (sub_path_w[0] == '.' and sub_path_w[1] == 0) {
return;
}
@ -4895,6 +4903,8 @@ pub fn res_mkquery(
newrr: ?[*]const u8,
buf: []u8,
) usize {
_ = data;
_ = newrr;
// This implementation is ported from musl libc.
// A more idiomatic "ziggy" implementation would be welcome.
var name = dname;

View File

@ -1286,7 +1286,7 @@ pub const CAP_BLOCK_SUSPEND = 36;
pub const CAP_AUDIT_READ = 37;
pub const CAP_LAST_CAP = CAP_AUDIT_READ;
pub fn cap_valid(u8: x) bool {
pub fn cap_valid(x: u8) bool {
return x >= 0 and x <= CAP_LAST_CAP;
}

View File

@ -70,6 +70,7 @@ fn splitValueLE64(val: i64) [2]u32 {
};
}
fn splitValueBE64(val: i64) [2]u32 {
_ = val;
return [2]u32{
@truncate(u32, u >> 32),
@truncate(u32, u),
@ -1022,7 +1023,7 @@ pub fn sendmmsg(fd: i32, msgvec: [*]mmsghdr_const, vlen: u32, flags: u32) usize
for (msgvec[0..kvlen]) |*msg, i| {
var size: i32 = 0;
const msg_iovlen = @intCast(usize, msg.msg_hdr.msg_iovlen); // kernel side this is treated as unsigned
for (msg.msg_hdr.msg_iov[0..msg_iovlen]) |iov, j| {
for (msg.msg_hdr.msg_iov[0..msg_iovlen]) |iov| {
if (iov.iov_len > std.math.maxInt(i32) or @addWithOverflow(i32, size, @intCast(i32, iov.iov_len), &size)) {
// batch-send all messages up to the current message
if (next_unsent < i) {

View File

@ -1513,7 +1513,7 @@ pub fn map_create(map_type: MapType, key_size: u32, value_size: u32, max_entries
EINVAL => error.MapTypeOrAttrInvalid,
ENOMEM => error.SystemResources,
EPERM => error.AccessDenied,
else => |err| unexpectedErrno(rc),
else => unexpectedErrno(rc),
};
}
@ -1539,7 +1539,7 @@ pub fn map_lookup_elem(fd: fd_t, key: []const u8, value: []u8) !void {
EINVAL => return error.FieldInAttrNeedsZeroing,
ENOENT => return error.NotFound,
EPERM => return error.AccessDenied,
else => |err| return unexpectedErrno(rc),
else => return unexpectedErrno(rc),
}
}

View File

@ -284,6 +284,7 @@ pub const IO_Uring = struct {
}
fn copy_cqes_ready(self: *IO_Uring, cqes: []io_uring_cqe, wait_nr: u32) u32 {
_ = wait_nr;
const ready = self.cq_ready();
const count = std.math.min(cqes.len, ready);
var head = self.cq.head.*;
@ -320,6 +321,7 @@ pub const IO_Uring = struct {
/// Not idempotent, calling more than once will result in other CQEs being lost.
/// Matches the implementation of cqe_seen() in liburing.
pub fn cqe_seen(self: *IO_Uring, cqe: *io_uring_cqe) void {
_ = cqe;
self.cq_advance(1);
}
@ -728,6 +730,7 @@ pub const CompletionQueue = struct {
}
pub fn deinit(self: *CompletionQueue) void {
_ = self;
// A no-op since we now share the mmap with the submission queue.
// Here for symmetry with the submission queue, and for any future feature support.
}

View File

@ -18,6 +18,7 @@ pub fn syscall0(number: SYS) usize {
}
pub fn syscall_pipe(fd: *[2]i32) usize {
_ = fd;
return asm volatile (
\\ .set noat
\\ .set noreorder

View File

@ -353,6 +353,7 @@ test "spawn threads" {
}
fn start1(ctx: void) u8 {
_ = ctx;
return 0;
}
@ -379,6 +380,7 @@ test "thread local storage" {
threadlocal var x: i32 = 1234;
fn testTls(context: void) !void {
_ = context;
if (x != 1234) return error.TlsBadStartValue;
x += 1;
if (x != 1235) return error.TlsBadEndValue;
@ -425,6 +427,7 @@ const IterFnError = error{
};
fn iter_fn(info: *dl_phdr_info, size: usize, counter: *usize) IterFnError!void {
_ = size;
// Count how many libraries are loaded
counter.* += @as(usize, 1);
@ -731,6 +734,7 @@ test "sigaction" {
const S = struct {
fn handler(sig: i32, info: *const os.siginfo_t, ctx_ptr: ?*const c_void) callconv(.C) void {
_ = ctx_ptr;
// Check that we received the correct signal.
switch (native_os) {
.netbsd => {

View File

@ -37,6 +37,7 @@ pub const Guid = extern struct {
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
_ = options;
if (f.len == 0) {
return std.fmt.format(writer, "{x:0>8}-{x:0>4}-{x:0>4}-{x:0>2}{x:0>2}-{x:0>12}", .{
self.time_low,

View File

@ -35,6 +35,7 @@ pub const ManagedNetworkProtocol = extern struct {
/// Translates an IP multicast address to a hardware (MAC) multicast address.
/// This function may be unsupported in some MNP implementations.
pub fn mcastIpToMac(self: *const ManagedNetworkProtocol, ipv6flag: bool, ipaddress: *const c_void, mac_address: *MacAddress) Status {
_ = mac_address;
return self._mcast_ip_to_mac(self, ipv6flag, ipaddress);
}

View File

@ -194,6 +194,7 @@ pub fn PackedIntArrayEndian(comptime Int: type, comptime endian: Endian, comptim
///Returns the number of elements in the packed array
pub fn len(self: Self) usize {
_ = self;
return int_count;
}

View File

@ -675,6 +675,7 @@ pub const Pdb = struct {
}
pub fn getSymbolName(self: *Pdb, module: *Module, address: u64) ?[]const u8 {
_ = self;
std.debug.assert(module.populated);
var symbol_i: usize = 0;
@ -906,7 +907,7 @@ const Msf = struct {
// These streams are not used, but still participate in the file
// and must be taken into account when resolving stream indices.
const Nil = 0xFFFFFFFF;
for (stream_sizes) |*s, i| {
for (stream_sizes) |*s| {
const size = try directory.reader().readIntLittle(u32);
s.* = if (size == Nil) 0 else blockCountFromSize(size, superblock.BlockSize);
}

View File

@ -428,7 +428,7 @@ pub fn PriorityDequeue(comptime T: type) type {
warn("{}, ", .{e});
}
warn("array: ", .{});
for (self.items) |e, i| {
for (self.items) |e| {
warn("{}, ", .{e});
}
warn("len: {} ", .{self.len});

View File

@ -249,7 +249,7 @@ pub fn PriorityQueue(comptime T: type) type {
warn("{}, ", .{e});
}
warn("array: ", .{});
for (self.items) |e, i| {
for (self.items) |e| {
warn("{}, ", .{e});
}
warn("len: {} ", .{self.len});

View File

@ -419,6 +419,7 @@ pub const ArgIteratorWindows = struct {
};
}
fn emitBackslashes(self: *ArgIteratorWindows, buf: *std.ArrayList(u16), emit_count: usize) !void {
_ = self;
var i: usize = 0;
while (i < emit_count) : (i += 1) {
try buf.append(std.mem.nativeToLittle(u16, '\\'));
@ -748,6 +749,7 @@ pub fn getSelfExeSharedLibPaths(allocator: *Allocator) error{OutOfMemory}![][:0]
}
try os.dl_iterate_phdr(&paths, error{OutOfMemory}, struct {
fn callback(info: *os.dl_phdr_info, size: usize, list: *List) !void {
_ = size;
const name = info.dlpi_name orelse return;
if (name[0] == '/') {
const item = try list.allocator.dupeZ(u8, mem.spanZ(name));

View File

@ -37,9 +37,11 @@ pub fn binarySearch(
test "binarySearch" {
const S = struct {
fn order_u32(context: void, lhs: u32, rhs: u32) math.Order {
_ = context;
return math.order(lhs, rhs);
}
fn order_i32(context: void, lhs: i32, rhs: i32) math.Order {
_ = context;
return math.order(lhs, rhs);
}
};
@ -1133,6 +1135,7 @@ fn swap(
pub fn asc(comptime T: type) fn (void, T, T) bool {
const impl = struct {
fn inner(context: void, a: T, b: T) bool {
_ = context;
return a < b;
}
};
@ -1144,6 +1147,7 @@ pub fn asc(comptime T: type) fn (void, T, T) bool {
pub fn desc(comptime T: type) fn (void, T, T) bool {
const impl = struct {
fn inner(context: void, a: T, b: T) bool {
_ = context;
return a > b;
}
};

View File

@ -160,6 +160,7 @@ fn strncmp(_l: [*:0]const u8, _r: [*:0]const u8, _n: usize) callconv(.C) c_int {
}
fn strerror(errnum: c_int) callconv(.C) [*:0]const u8 {
_ = errnum;
return "TODO strerror implementation";
}
@ -173,6 +174,7 @@ test "strncmp" {
// Avoid dragging in the runtime safety mechanisms into this .o file,
// unless we're trying to test this file.
pub fn panic(msg: []const u8, error_return_trace: ?*builtin.StackTrace) noreturn {
_ = error_return_trace;
if (builtin.is_test) {
@setCold(true);
std.debug.panic("{s}", .{msg});

View File

@ -602,6 +602,7 @@ pub usingnamespace @import("compiler_rt/atomics.zig");
// Avoid dragging in the runtime safety mechanisms into this .o file,
// unless we're trying to test this file.
pub fn panic(msg: []const u8, error_return_trace: ?*builtin.StackTrace) noreturn {
_ = error_return_trace;
@setCold(true);
if (is_test) {
std.debug.panic("{s}", .{msg});

View File

@ -80,18 +80,21 @@ var spinlocks: SpinlockTable = SpinlockTable{};
// Those work on any object no matter the pointer alignment nor its size.
fn __atomic_load(size: u32, src: [*]u8, dest: [*]u8, model: i32) callconv(.C) void {
_ = model;
var sl = spinlocks.get(@ptrToInt(src));
defer sl.release();
@memcpy(dest, src, size);
}
fn __atomic_store(size: u32, dest: [*]u8, src: [*]u8, model: i32) callconv(.C) void {
_ = model;
var sl = spinlocks.get(@ptrToInt(dest));
defer sl.release();
@memcpy(dest, src, size);
}
fn __atomic_exchange(size: u32, ptr: [*]u8, val: [*]u8, old: [*]u8, model: i32) callconv(.C) void {
_ = model;
var sl = spinlocks.get(@ptrToInt(ptr));
defer sl.release();
@memcpy(old, ptr, size);
@ -106,6 +109,8 @@ fn __atomic_compare_exchange(
success: i32,
failure: i32,
) callconv(.C) i32 {
_ = success;
_ = failure;
var sl = spinlocks.get(@ptrToInt(ptr));
defer sl.release();
for (ptr[0..size]) |b, i| {
@ -135,6 +140,7 @@ comptime {
fn atomicLoadFn(comptime T: type) fn (*T, i32) callconv(.C) T {
return struct {
fn atomic_load_N(src: *T, model: i32) callconv(.C) T {
_ = model;
if (@sizeOf(T) > largest_atomic_size) {
var sl = spinlocks.get(@ptrToInt(src));
defer sl.release();
@ -162,6 +168,7 @@ comptime {
fn atomicStoreFn(comptime T: type) fn (*T, T, i32) callconv(.C) void {
return struct {
fn atomic_store_N(dst: *T, value: T, model: i32) callconv(.C) void {
_ = model;
if (@sizeOf(T) > largest_atomic_size) {
var sl = spinlocks.get(@ptrToInt(dst));
defer sl.release();
@ -189,6 +196,7 @@ comptime {
fn atomicExchangeFn(comptime T: type) fn (*T, T, i32) callconv(.C) T {
return struct {
fn atomic_exchange_N(ptr: *T, val: T, model: i32) callconv(.C) T {
_ = model;
if (@sizeOf(T) > largest_atomic_size) {
var sl = spinlocks.get(@ptrToInt(ptr));
defer sl.release();
@ -218,6 +226,8 @@ comptime {
fn atomicCompareExchangeFn(comptime T: type) fn (*T, *T, T, i32, i32) callconv(.C) i32 {
return struct {
fn atomic_compare_exchange_N(ptr: *T, expected: *T, desired: T, success: i32, failure: i32) callconv(.C) i32 {
_ = success;
_ = failure;
if (@sizeOf(T) > largest_atomic_size) {
var sl = spinlocks.get(@ptrToInt(ptr));
defer sl.release();
@ -255,6 +265,7 @@ comptime {
fn fetchFn(comptime T: type, comptime op: builtin.AtomicRmwOp) fn (*T, T, i32) callconv(.C) T {
return struct {
pub fn fetch_op_N(ptr: *T, val: T, model: i32) callconv(.C) T {
_ = model;
if (@sizeOf(T) > largest_atomic_size) {
var sl = spinlocks.get(@ptrToInt(ptr));
defer sl.release();

View File

@ -101,6 +101,7 @@ const test_vectors = init: {
test "compare f64" {
for (test_vectors) |vector, i| {
_ = i;
try std.testing.expect(test__cmpdf2(vector));
}
}

View File

@ -101,6 +101,7 @@ const test_vectors = init: {
test "compare f32" {
for (test_vectors) |vector, i| {
_ = i;
try std.testing.expect(test__cmpsf2(vector));
}
}

View File

@ -27,6 +27,8 @@ extern fn memmove(dest: ?[*]u8, src: ?[*]const u8, n: usize) callconv(.C) ?[*]u8
// Avoid dragging in the runtime safety mechanisms into this .o file.
pub fn panic(msg: []const u8, error_return_trace: ?*builtin.StackTrace) noreturn {
_ = msg;
_ = error_return_trace;
@setCold(true);
if (@hasDecl(std.os, "abort"))
std.os.abort();

View File

@ -157,7 +157,7 @@ pub const Target = struct {
pub fn format(
self: WindowsVersion,
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
_: std.fmt.FormatOptions,
out_stream: anytype,
) !void {
if (fmt.len > 0 and fmt[0] == 's') {

View File

@ -210,7 +210,7 @@ pub fn utf8ValidateSlice(s: []const u8) bool {
return false;
}
i += cp_len;
} else |err| {
} else |_| {
return false;
}
}

View File

@ -53,6 +53,8 @@ pub const Address = union(enum) {
opts: fmt.FormatOptions,
writer: anytype,
) !void {
_ = opts;
_ = layout;
switch (self) {
.ipv4 => |address| try fmt.format(writer, "{}:{}", .{ address.host, address.port }),
.ipv6 => |address| try fmt.format(writer, "{}:{}", .{ address.host, address.port }),

View File

@ -143,6 +143,7 @@ pub const IPv4 = extern struct {
opts: fmt.FormatOptions,
writer: anytype,
) !void {
_ = opts;
if (comptime layout.len != 0 and layout[0] != 's') {
@compileError("Unsupported format specifier for IPv4 type '" ++ layout ++ "'.");
}
@ -352,6 +353,7 @@ pub const IPv6 = extern struct {
opts: fmt.FormatOptions,
writer: anytype,
) !void {
_ = opts;
const specifier = comptime &[_]u8{if (layout.len == 0) 'x' else switch (layout[0]) {
'x', 'X' => |specifier| specifier,
's' => 'x',

View File

@ -117,7 +117,7 @@ pub const Socket = struct {
};
}
/// Returns the number of bytes that make up the `sockaddr` equivalent to the address.
/// Returns the number of bytes that make up the `sockaddr` equivalent to the address.
pub fn getNativeSize(self: Socket.Address) u32 {
return switch (self) {
.ipv4 => @sizeOf(os.sockaddr_in),
@ -132,6 +132,8 @@ pub const Socket = struct {
opts: fmt.FormatOptions,
writer: anytype,
) !void {
_ = opts;
_ = layout;
switch (self) {
.ipv4 => |address| try fmt.format(writer, "{}:{}", .{ address.host, address.port }),
.ipv6 => |address| try fmt.format(writer, "{}:{}", .{ address.host, address.port }),
@ -280,7 +282,7 @@ pub const Socket = struct {
///
/// Microsoft's documentation and glibc denote the fields to be unsigned
/// short's on Windows, whereas glibc and musl denote the fields to be
/// int's on every other platform.
/// int's on every other platform.
pub const Linger = extern struct {
pub const Field = switch (native_os.tag) {
.windows => c_ushort,

View File

@ -292,6 +292,7 @@ pub fn Mixin(comptime Socket: type) type {
/// with a set of flags specified. It returns the number of bytes that were
/// read into the buffer provided.
pub fn readMessage(self: Socket, msg: *Socket.Message, flags: u32) !usize {
_ = flags;
const call = try windows.loadWinsockExtensionFunction(ws2_32.LPFN_WSARECVMSG, self.fd, ws2_32.WSAID_WSARECVMSG);
var num_bytes: u32 = undefined;
@ -367,16 +368,19 @@ pub fn Mixin(comptime Socket: type) type {
/// Query and return the latest cached error on the socket.
pub fn getError(self: Socket) !void {
_ = self;
return {};
}
/// Query the read buffer size of the socket.
pub fn getReadBufferSize(self: Socket) !u32 {
_ = self;
return 0;
}
/// Query the write buffer size of the socket.
pub fn getWriteBufferSize(self: Socket) !u32 {
_ = self;
return 0;
}
@ -406,7 +410,7 @@ pub fn Mixin(comptime Socket: type) type {
/// On connection-oriented sockets, have keep-alive messages be sent periodically. The timing in which keep-alive
/// messages are sent are dependant on operating system settings. It returns `error.UnsupportedSocketOption` if
/// the host does not support periodically sending keep-alive messages on connection-oriented sockets.
/// the host does not support periodically sending keep-alive messages on connection-oriented sockets.
pub fn setKeepAlive(self: Socket, enabled: bool) !void {
return self.setOption(ws2_32.SOL_SOCKET, ws2_32.SO_KEEPALIVE, mem.asBytes(&@as(u32, @boolToInt(enabled))));
}
@ -438,7 +442,7 @@ pub fn Mixin(comptime Socket: type) type {
/// WARNING: Timeouts only affect blocking sockets. It is undefined behavior if a timeout is
/// set on a non-blocking socket.
///
///
/// Set a timeout on the socket that is to occur if no messages are successfully written
/// to its bound destination after a specified number of milliseconds. A subsequent write
/// to the socket will thereafter return `error.WouldBlock` should the timeout be exceeded.
@ -448,7 +452,7 @@ pub fn Mixin(comptime Socket: type) type {
/// WARNING: Timeouts only affect blocking sockets. It is undefined behavior if a timeout is
/// set on a non-blocking socket.
///
///
/// Set a timeout on the socket that is to occur if no messages are successfully read
/// from its bound destination after a specified number of milliseconds. A subsequent
/// read from the socket will thereafter return `error.WouldBlock` should the timeout be

View File

@ -1866,6 +1866,7 @@ pub const Tree = struct {
}
fn fullStructInit(tree: Tree, info: full.StructInit.Ast) full.StructInit {
_ = tree;
var result: full.StructInit = .{
.ast = info,
};

View File

@ -136,6 +136,7 @@ pub inline fn __builtin_strcmp(s1: [*c]const u8, s2: [*c]const u8) c_int {
}
pub inline fn __builtin_object_size(ptr: ?*const c_void, ty: c_int) usize {
_ = ptr;
// clang semantics match gcc's: https://gcc.gnu.org/onlinedocs/gcc/Object-Size-Checking.html
// If it is not possible to determine which objects ptr points to at compile time,
// __builtin_object_size should return (size_t) -1 for type 0 or 1 and (size_t) 0
@ -186,6 +187,7 @@ pub inline fn __builtin_memcpy(
/// The return value of __builtin_expect is `expr`. `c` is the expected value
/// of `expr` and is used as a hint to the compiler in C. Here it is unused.
pub inline fn __builtin_expect(expr: c_long, c: c_long) c_long {
_ = c;
return expr;
}

View File

@ -8,6 +8,7 @@ pub fn formatId(
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
_ = fmt;
if (isValidId(bytes)) {
return writer.writeAll(bytes);
}
@ -41,6 +42,7 @@ pub fn formatEscapes(
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
_ = options;
for (bytes) |byte| switch (byte) {
'\n' => try writer.writeAll("\\n"),
'\r' => try writer.writeAll("\\r"),

View File

@ -2104,7 +2104,7 @@ const Parser = struct {
/// FnCallArguments <- LPAREN ExprList RPAREN
/// ExprList <- (Expr COMMA)* Expr?
fn parseSuffixExpr(p: *Parser) !Node.Index {
if (p.eatToken(.keyword_async)) |async_token| {
if (p.eatToken(.keyword_async)) |_| {
var res = try p.expectPrimaryTypeExpr();
while (true) {
const node = try p.parseSuffixOp(res);

View File

@ -200,6 +200,7 @@ pub const NativePaths = struct {
}
fn appendArray(self: *NativePaths, array: *ArrayList([:0]u8), s: []const u8) !void {
_ = self;
const item = try array.allocator.dupeZ(u8, s);
errdefer array.allocator.free(item);
try array.append(item);
@ -332,7 +333,7 @@ pub const NativeTargetInfo = struct {
if (std.builtin.Version.parse(buf[0 .. len - 1])) |ver| {
os.version_range.semver.min = ver;
os.version_range.semver.max = ver;
} else |err| {
} else |_| {
return error.OSVersionDetectionFail;
}
},

View File

@ -68,10 +68,10 @@ pub fn detect(target_os: *Target.Os) !void {
return;
}
continue;
} else |err| {
} else |_| {
return error.OSVersionDetectionFail;
}
} else |err| {
} else |_| {
return error.OSVersionDetectionFail;
}
}

View File

@ -28,6 +28,7 @@ inline fn hasMask(input: u32, mask: u32) bool {
}
pub fn detectNativeCpuAndFeatures(arch: Target.Cpu.Arch, os: Target.Os, cross_target: CrossTarget) Target.Cpu {
_ = cross_target;
var cpu = Target.Cpu{
.arch = arch,
.model = Target.Cpu.Model.generic(arch),

View File

@ -925,6 +925,7 @@ fn suspendExpr(
rl: ResultLoc,
node: ast.Node.Index,
) InnerError!Zir.Inst.Ref {
_ = rl;
const astgen = gz.astgen;
const gpa = astgen.gpa;
const tree = astgen.tree;
@ -1208,6 +1209,7 @@ fn arrayInitExprRlNone(
elements: []const ast.Node.Index,
tag: Zir.Inst.Tag,
) InnerError!Zir.Inst.Ref {
_ = rl;
const astgen = gz.astgen;
const gpa = astgen.gpa;
const elem_list = try gpa.alloc(Zir.Inst.Ref, elements.len);
@ -1233,6 +1235,9 @@ fn arrayInitExprRlTy(
elem_ty_inst: Zir.Inst.Ref,
tag: Zir.Inst.Tag,
) InnerError!Zir.Inst.Ref {
_ = rl;
_ = array_ty_inst;
_ = elem_ty_inst;
const astgen = gz.astgen;
const gpa = astgen.gpa;
@ -1259,6 +1264,7 @@ fn arrayInitExprRlPtr(
elements: []const ast.Node.Index,
result_ptr: Zir.Inst.Ref,
) InnerError!Zir.Inst.Ref {
_ = rl;
const astgen = gz.astgen;
const gpa = astgen.gpa;
@ -1368,6 +1374,7 @@ fn structInitExprRlNone(
struct_init: ast.full.StructInit,
tag: Zir.Inst.Tag,
) InnerError!Zir.Inst.Ref {
_ = rl;
const astgen = gz.astgen;
const gpa = astgen.gpa;
const tree = astgen.tree;
@ -1403,6 +1410,7 @@ fn structInitExprRlPtr(
struct_init: ast.full.StructInit,
result_ptr: Zir.Inst.Ref,
) InnerError!Zir.Inst.Ref {
_ = rl;
const astgen = gz.astgen;
const gpa = astgen.gpa;
const tree = astgen.tree;
@ -1439,6 +1447,7 @@ fn structInitExprRlTy(
ty_inst: Zir.Inst.Ref,
tag: Zir.Inst.Tag,
) InnerError!Zir.Inst.Ref {
_ = rl;
const astgen = gz.astgen;
const gpa = astgen.gpa;
const tree = astgen.tree;
@ -1781,6 +1790,7 @@ fn blockExprStmts(
node: ast.Node.Index,
statements: []const ast.Node.Index,
) !void {
_ = node;
const astgen = gz.astgen;
const tree = astgen.tree;
const node_tags = tree.nodes.items(.tag);
@ -2117,6 +2127,7 @@ fn genDefers(
inner_scope: *Scope,
err_code: Zir.Inst.Ref,
) InnerError!void {
_ = err_code;
const astgen = gz.astgen;
const tree = astgen.tree;
const node_datas = tree.nodes.items(.data);
@ -2201,6 +2212,7 @@ fn deferStmt(
block_arena: *Allocator,
scope_tag: Scope.Tag,
) InnerError!*Scope {
_ = gz;
const defer_scope = try block_arena.create(Scope.Defer);
defer_scope.* = .{
.base = .{ .tag = scope_tag },
@ -4703,6 +4715,8 @@ fn finishThenElseBlock(
then_break_block: Zir.Inst.Index,
break_tag: Zir.Inst.Tag,
) InnerError!Zir.Inst.Ref {
_ = then_src;
_ = else_src;
// We now have enough information to decide whether the result instruction should
// be communicated via result location pointer or break instructions.
const strat = rl.strategy(block_scope);
@ -4886,7 +4900,7 @@ fn ifExpr(
inst: Zir.Inst.Ref,
bool_bit: Zir.Inst.Ref,
} = c: {
if (if_full.error_token) |error_token| {
if (if_full.error_token) |_| {
const cond_rl: ResultLoc = if (payload_is_ref) .ref else .none;
const err_union = try expr(&block_scope, &block_scope.base, cond_rl, if_full.ast.cond_expr);
const tag: Zir.Inst.Tag = if (payload_is_ref) .is_err_ptr else .is_err;
@ -4894,7 +4908,7 @@ fn ifExpr(
.inst = err_union,
.bool_bit = try block_scope.addUnNode(tag, err_union, node),
};
} else if (if_full.payload_token) |payload_token| {
} else if (if_full.payload_token) |_| {
const cond_rl: ResultLoc = if (payload_is_ref) .ref else .none;
const optional = try expr(&block_scope, &block_scope.base, cond_rl, if_full.ast.cond_expr);
const tag: Zir.Inst.Tag = if (payload_is_ref) .is_non_null_ptr else .is_non_null;
@ -5146,7 +5160,7 @@ fn whileExpr(
inst: Zir.Inst.Ref,
bool_bit: Zir.Inst.Ref,
} = c: {
if (while_full.error_token) |error_token| {
if (while_full.error_token) |_| {
const cond_rl: ResultLoc = if (payload_is_ref) .ref else .none;
const err_union = try expr(&continue_scope, &continue_scope.base, cond_rl, while_full.ast.cond_expr);
const tag: Zir.Inst.Tag = if (payload_is_ref) .is_err_ptr else .is_err;
@ -5154,7 +5168,7 @@ fn whileExpr(
.inst = err_union,
.bool_bit = try continue_scope.addUnNode(tag, err_union, node),
};
} else if (while_full.payload_token) |payload_token| {
} else if (while_full.payload_token) |_| {
const cond_rl: ResultLoc = if (payload_is_ref) .ref else .none;
const optional = try expr(&continue_scope, &continue_scope.base, cond_rl, while_full.ast.cond_expr);
const tag: Zir.Inst.Tag = if (payload_is_ref) .is_non_null_ptr else .is_non_null;
@ -6665,6 +6679,7 @@ fn unionInitRlPtr(
union_type: Zir.Inst.Ref,
field_name: Zir.Inst.Ref,
) InnerError!Zir.Inst.Ref {
_ = rl;
const union_init_ptr = try parent_gz.addPlNode(.union_init_ptr, node, Zir.Inst.UnionInitPtr{
.result_ptr = result_ptr,
.union_type = union_type,
@ -6753,6 +6768,8 @@ fn bitCastRlPtr(
result_ptr: Zir.Inst.Ref,
rhs: ast.Node.Index,
) InnerError!Zir.Inst.Ref {
_ = rl;
_ = scope;
const casted_result_ptr = try gz.addPlNode(.bitcast_result_ptr, node, Zir.Inst.Bin{
.lhs = dest_type,
.rhs = result_ptr,
@ -8013,6 +8030,7 @@ fn rvalue(
result: Zir.Inst.Ref,
src_node: ast.Node.Index,
) InnerError!Zir.Inst.Ref {
_ = scope;
switch (rl) {
.none, .none_or_ref => return result,
.discard => {

View File

@ -523,6 +523,7 @@ pub const AllErrors = struct {
errors: *std.ArrayList(Message),
msg: []const u8,
) !void {
_ = arena;
try errors.append(.{ .plain = .{ .msg = msg } });
}

View File

@ -774,7 +774,10 @@ pub const Fn = struct {
ir.dumpFn(mod, func);
}
pub fn deinit(func: *Fn, gpa: *Allocator) void {}
pub fn deinit(func: *Fn, gpa: *Allocator) void {
_ = func;
_ = gpa;
}
};
pub const Var = struct {
@ -2209,6 +2212,7 @@ comptime {
}
pub fn astGenFile(mod: *Module, file: *Scope.File, prog_node: *std.Progress.Node) !void {
_ = prog_node;
const tracy = trace(@src());
defer tracy.end();
@ -3128,6 +3132,7 @@ pub const ImportFileResult = struct {
};
pub fn importPkg(mod: *Module, cur_pkg: *Package, pkg: *Package) !ImportFileResult {
_ = cur_pkg;
const gpa = mod.gpa;
// The resolved path is used as the key in the import table, to detect if
@ -3384,7 +3389,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) InnerError!vo
decl.has_align = has_align;
decl.has_linksection = has_linksection;
decl.zir_decl_index = @intCast(u32, decl_sub_index);
if (decl.getFunction()) |func| {
if (decl.getFunction()) |_| {
switch (mod.comp.bin_file.tag) {
.coff => {
// TODO Implement for COFF
@ -3753,6 +3758,7 @@ pub fn analyzeExport(
errdefer de_gop.value_ptr.* = mod.gpa.shrink(de_gop.value_ptr.*, de_gop.value_ptr.len - 1);
}
pub fn constInst(mod: *Module, arena: *Allocator, src: LazySrcLoc, typed_value: TypedValue) !*ir.Inst {
_ = mod;
const const_inst = try arena.create(ir.Inst.Constant);
const_inst.* = .{
.base = .{
@ -4121,6 +4127,7 @@ pub fn floatAdd(
lhs: Value,
rhs: Value,
) !Value {
_ = src;
switch (float_type.tag()) {
.f16 => {
@panic("TODO add __trunctfhf2 to compiler-rt");
@ -4154,6 +4161,7 @@ pub fn floatSub(
lhs: Value,
rhs: Value,
) !Value {
_ = src;
switch (float_type.tag()) {
.f16 => {
@panic("TODO add __trunctfhf2 to compiler-rt");
@ -4187,6 +4195,7 @@ pub fn floatDiv(
lhs: Value,
rhs: Value,
) !Value {
_ = src;
switch (float_type.tag()) {
.f16 => {
@panic("TODO add __trunctfhf2 to compiler-rt");
@ -4220,6 +4229,7 @@ pub fn floatMul(
lhs: Value,
rhs: Value,
) !Value {
_ = src;
switch (float_type.tag()) {
.f16 => {
@panic("TODO add __trunctfhf2 to compiler-rt");
@ -4253,6 +4263,7 @@ pub fn simplePtrType(
mutable: bool,
size: std.builtin.TypeInfo.Pointer.Size,
) Allocator.Error!Type {
_ = mod;
if (!mutable and size == .Slice and elem_ty.eql(Type.initTag(.u8))) {
return Type.initTag(.const_slice_u8);
}
@ -4287,6 +4298,7 @@ pub fn ptrType(
@"volatile": bool,
size: std.builtin.TypeInfo.Pointer.Size,
) Allocator.Error!Type {
_ = mod;
assert(host_size == 0 or bit_offset < host_size * 8);
// TODO check if type can be represented by simplePtrType
@ -4304,6 +4316,7 @@ pub fn ptrType(
}
pub fn optionalType(mod: *Module, arena: *Allocator, child_type: Type) Allocator.Error!Type {
_ = mod;
switch (child_type.tag()) {
.single_const_pointer => return Type.Tag.optional_single_const_pointer.create(
arena,
@ -4324,6 +4337,7 @@ pub fn arrayType(
sentinel: ?Value,
elem_type: Type,
) Allocator.Error!Type {
_ = mod;
if (elem_type.eql(Type.initTag(.u8))) {
if (sentinel) |some| {
if (some.eql(Value.initTag(.zero))) {
@ -4354,6 +4368,7 @@ pub fn errorUnionType(
error_set: Type,
payload: Type,
) Allocator.Error!Type {
_ = mod;
assert(error_set.zigTypeTag() == .ErrorSet);
if (error_set.eql(Type.initTag(.anyerror)) and payload.eql(Type.initTag(.void))) {
return Type.initTag(.anyerror_void_error_union);

View File

@ -702,6 +702,7 @@ fn zirBitcastResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) I
}
fn zirCoerceResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
_ = inst;
const tracy = trace(@src());
defer tracy.end();
return sema.mod.fail(&block.base, sema.src, "TODO implement zirCoerceResultPtr", .{});
@ -776,6 +777,7 @@ fn zirStructDecl(
}
fn createTypeName(sema: *Sema, block: *Scope.Block, name_strategy: Zir.Inst.NameStrategy) ![:0]u8 {
_ = block;
switch (name_strategy) {
.anon => {
// It would be neat to have "struct:line:column" but this name has
@ -1067,6 +1069,7 @@ fn zirOpaqueDecl(
inst: Zir.Inst.Index,
name_strategy: Zir.Inst.NameStrategy,
) InnerError!*Inst {
_ = name_strategy;
const tracy = trace(@src());
defer tracy.end();
@ -1242,6 +1245,7 @@ fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*In
// TODO check if arg_name shadows a Decl
if (block.inlining) |inlining| {
_ = inlining;
return sema.param_inst_list[arg_index];
}
@ -1640,6 +1644,7 @@ fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*In
}
fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
_ = block;
const tracy = trace(@src());
defer tracy.end();
@ -1648,6 +1653,7 @@ fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*In
}
fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
_ = block;
const tracy = trace(@src());
defer tracy.end();
@ -1665,6 +1671,7 @@ fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!
}
fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
_ = block;
const arena = sema.arena;
const inst_data = sema.code.instructions.items(.data)[inst].float;
const src = inst_data.src();
@ -1677,6 +1684,7 @@ fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*
}
fn zirFloat128(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
_ = block;
const arena = sema.arena;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const extra = sema.code.extraData(Zir.Inst.Float128, inst_data.payload_index).data;
@ -2358,6 +2366,7 @@ fn analyzeCall(
}
fn zirIntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
_ = block;
const tracy = trace(@src());
defer tracy.end();
@ -2466,6 +2475,7 @@ fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inn
}
fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
_ = block;
const tracy = trace(@src());
defer tracy.end();
@ -2626,6 +2636,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inn
}
fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
_ = block;
const tracy = trace(@src());
defer tracy.end();
@ -3056,6 +3067,7 @@ fn funcCommon(
src_locs: Zir.Inst.Func.SrcLocs,
opt_lib_name: ?[]const u8,
) InnerError!*Inst {
_ = inferred_error_set;
const src: LazySrcLoc = .{ .node_offset = src_node_offset };
const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = src_node_offset };
const return_type = try sema.resolveType(block, ret_ty_src, zir_return_type);
@ -3492,6 +3504,8 @@ fn zirSwitchCapture(
is_multi: bool,
is_ref: bool,
) InnerError!*Inst {
_ = is_ref;
_ = is_multi;
const tracy = trace(@src());
defer tracy.end();
@ -3509,6 +3523,7 @@ fn zirSwitchCaptureElse(
inst: Zir.Inst.Index,
is_ref: bool,
) InnerError!*Inst {
_ = is_ref;
const tracy = trace(@src());
defer tracy.end();
@ -4511,12 +4526,15 @@ fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!
}
fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
_ = block;
_ = inst;
const tracy = trace(@src());
defer tracy.end();
return sema.mod.fail(&block.base, sema.src, "TODO implement zirShl", .{});
}
fn zirShr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
_ = inst;
const tracy = trace(@src());
defer tracy.end();
return sema.mod.fail(&block.base, sema.src, "TODO implement zirShr", .{});
@ -4586,18 +4604,21 @@ fn zirBitwise(
}
fn zirBitNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
_ = inst;
const tracy = trace(@src());
defer tracy.end();
return sema.mod.fail(&block.base, sema.src, "TODO implement zirBitNot", .{});
}
fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
_ = inst;
const tracy = trace(@src());
defer tracy.end();
return sema.mod.fail(&block.base, sema.src, "TODO implement zirArrayCat", .{});
}
fn zirArrayMul(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
_ = inst;
const tracy = trace(@src());
defer tracy.end();
return sema.mod.fail(&block.base, sema.src, "TODO implement zirArrayMul", .{});
@ -5059,6 +5080,7 @@ fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro
}
fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
_ = block;
const zir_datas = sema.code.instructions.items(.data);
const inst_data = zir_datas[inst].un_node;
const src = inst_data.src();
@ -5067,6 +5089,7 @@ fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!
}
fn zirTypeofElem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
_ = block;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const operand_ptr = try sema.resolveInst(inst_data.operand);
@ -5504,6 +5527,7 @@ fn zirUnionInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner
}
fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!*Inst {
_ = is_ref;
const mod = sema.mod;
const gpa = sema.gpa;
const zir_datas = sema.code.instructions.items(.data);
@ -5613,18 +5637,21 @@ fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref:
}
fn zirStructInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!*Inst {
_ = is_ref;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
return sema.mod.fail(&block.base, src, "TODO: Sema.zirStructInitAnon", .{});
}
fn zirArrayInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!*Inst {
_ = is_ref;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
return sema.mod.fail(&block.base, src, "TODO: Sema.zirArrayInit", .{});
}
fn zirArrayInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!*Inst {
_ = is_ref;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
return sema.mod.fail(&block.base, src, "TODO: Sema.zirArrayInitAnon", .{});
@ -6021,6 +6048,7 @@ fn zirAwait(
inst: Zir.Inst.Index,
is_nosuspend: bool,
) InnerError!*Inst {
_ = is_nosuspend;
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
return sema.mod.fail(&block.base, src, "TODO: Sema.zirAwait", .{});
@ -6302,6 +6330,8 @@ fn addSafetyCheck(sema: *Sema, parent_block: *Scope.Block, ok: *Inst, panic_id:
}
fn safetyPanic(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, panic_id: PanicId) !Zir.Inst.Index {
_ = sema;
_ = panic_id;
// TODO Once we have a panic function to call, call it here instead of breakpoint.
_ = try block.addNoOp(src, Type.initTag(.void), .breakpoint);
_ = try block.addNoOp(src, Type.initTag(.noreturn), .unreach);
@ -6600,6 +6630,8 @@ fn elemPtrArray(
elem_index: *Inst,
elem_index_src: LazySrcLoc,
) InnerError!*Inst {
_ = elem_index;
_ = elem_index_src;
if (array_ptr.value()) |array_ptr_val| {
if (elem_index.value()) |index_val| {
// Both array pointer and index are compile-time known.
@ -7510,6 +7542,8 @@ fn resolveBuiltinTypeFields(
ty: Type,
name: []const u8,
) InnerError!Type {
_ = ty;
_ = name;
const resolved_ty = try sema.getBuiltinType(block, src, name);
return sema.resolveTypeFields(block, src, resolved_ty);
}

View File

@ -4433,6 +4433,7 @@ const Writer = struct {
}
fn writeInstIndex(self: *Writer, stream: anytype, inst: Inst.Index) !void {
_ = self;
return stream.print("%{d}", .{inst});
}
@ -4453,6 +4454,7 @@ const Writer = struct {
name: []const u8,
flag: bool,
) !void {
_ = self;
if (!flag) return;
try stream.writeAll(name);
}

View File

@ -304,9 +304,12 @@ pub const Inst = struct {
base: Inst,
pub fn operandCount(self: *const NoOp) usize {
_ = self;
return 0;
}
pub fn getOperand(self: *const NoOp, index: usize) ?*Inst {
_ = self;
_ = index;
return null;
}
};
@ -316,6 +319,7 @@ pub const Inst = struct {
operand: *Inst,
pub fn operandCount(self: *const UnOp) usize {
_ = self;
return 1;
}
pub fn getOperand(self: *const UnOp, index: usize) ?*Inst {
@ -331,6 +335,7 @@ pub const Inst = struct {
rhs: *Inst,
pub fn operandCount(self: *const BinOp) usize {
_ = self;
return 2;
}
pub fn getOperand(self: *const BinOp, index: usize) ?*Inst {
@ -356,9 +361,12 @@ pub const Inst = struct {
name: [*:0]const u8,
pub fn operandCount(self: *const Arg) usize {
_ = self;
return 0;
}
pub fn getOperand(self: *const Arg, index: usize) ?*Inst {
_ = self;
_ = index;
return null;
}
};
@ -391,9 +399,12 @@ pub const Inst = struct {
body: Body,
pub fn operandCount(self: *const Block) usize {
_ = self;
return 0;
}
pub fn getOperand(self: *const Block, index: usize) ?*Inst {
_ = self;
_ = index;
return null;
}
};
@ -412,9 +423,12 @@ pub const Inst = struct {
body: Body,
pub fn operandCount(self: *const BrBlockFlat) usize {
_ = self;
return 0;
}
pub fn getOperand(self: *const BrBlockFlat, index: usize) ?*Inst {
_ = self;
_ = index;
return null;
}
};
@ -427,9 +441,11 @@ pub const Inst = struct {
operand: *Inst,
pub fn operandCount(self: *const Br) usize {
_ = self;
return 1;
}
pub fn getOperand(self: *const Br, index: usize) ?*Inst {
_ = self;
if (index == 0)
return self.operand;
return null;
@ -443,9 +459,12 @@ pub const Inst = struct {
block: *Block,
pub fn operandCount(self: *const BrVoid) usize {
_ = self;
return 0;
}
pub fn getOperand(self: *const BrVoid, index: usize) ?*Inst {
_ = self;
_ = index;
return null;
}
};
@ -490,6 +509,7 @@ pub const Inst = struct {
else_death_count: u32 = 0,
pub fn operandCount(self: *const CondBr) usize {
_ = self;
return 1;
}
pub fn getOperand(self: *const CondBr, index: usize) ?*Inst {
@ -516,9 +536,12 @@ pub const Inst = struct {
val: Value,
pub fn operandCount(self: *const Constant) usize {
_ = self;
return 0;
}
pub fn getOperand(self: *const Constant, index: usize) ?*Inst {
_ = self;
_ = index;
return null;
}
};
@ -530,9 +553,12 @@ pub const Inst = struct {
body: Body,
pub fn operandCount(self: *const Loop) usize {
_ = self;
return 0;
}
pub fn getOperand(self: *const Loop, index: usize) ?*Inst {
_ = self;
_ = index;
return null;
}
};
@ -544,9 +570,12 @@ pub const Inst = struct {
variable: *Module.Var,
pub fn operandCount(self: *const VarPtr) usize {
_ = self;
return 0;
}
pub fn getOperand(self: *const VarPtr, index: usize) ?*Inst {
_ = self;
_ = index;
return null;
}
};
@ -559,9 +588,12 @@ pub const Inst = struct {
field_index: usize,
pub fn operandCount(self: *const StructFieldPtr) usize {
_ = self;
return 1;
}
pub fn getOperand(self: *const StructFieldPtr, index: usize) ?*Inst {
_ = self;
_ = index;
var i = index;
if (i < 1)
@ -593,6 +625,7 @@ pub const Inst = struct {
};
pub fn operandCount(self: *const SwitchBr) usize {
_ = self;
return 1;
}
pub fn getOperand(self: *const SwitchBr, index: usize) ?*Inst {
@ -621,9 +654,12 @@ pub const Inst = struct {
column: u32,
pub fn operandCount(self: *const DbgStmt) usize {
_ = self;
return 0;
}
pub fn getOperand(self: *const DbgStmt, index: usize) ?*Inst {
_ = self;
_ = index;
return null;
}
};

View File

@ -564,7 +564,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.r11 = true, // fp
.r14 = true, // lr
};
inline for (callee_preserved_regs) |reg, i| {
inline for (callee_preserved_regs) |reg| {
if (self.register_manager.isRegAllocated(reg)) {
@field(saved_regs, @tagName(reg)) = true;
}
@ -602,7 +602,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
} else {
if (math.cast(i26, amt)) |offset| {
writeInt(u32, self.code.items[jmp_reloc..][0..4], Instruction.b(.al, offset).toU32());
} else |err| {
} else |_| {
return self.failSymbol("exitlude jump is too large", .{});
}
}
@ -675,7 +675,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
} else {
if (math.cast(i28, amt)) |offset| {
writeInt(u32, self.code.items[jmp_reloc..][0..4], Instruction.b(offset).toU32());
} else |err| {
} else |_| {
return self.failSymbol("exitlude jump is too large", .{});
}
}
@ -1497,6 +1497,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
swap_lhs_and_rhs: bool,
op: ir.Inst.Tag,
) !void {
_ = src;
assert(lhs_mcv == .register or rhs_mcv == .register);
const op1 = if (swap_lhs_and_rhs) rhs_mcv.register else lhs_mcv.register;
@ -1905,6 +1906,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
try self.genX8664ModRMRegToStack(src, dst_ty, off, src_reg, mr + 0x1);
},
.immediate => |imm| {
_ = imm;
return self.fail(src, "TODO implement x86 ADD/SUB/CMP source immediate", .{});
},
.embedded_in_code, .memory, .stack_offset => {
@ -2054,6 +2056,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.genSetStack(src, dst_ty, off, MCValue{ .register = dst_reg });
},
.immediate => |imm| {
_ = imm;
return self.fail(src, "TODO implement x86 multiply source immediate", .{});
},
.embedded_in_code, .memory, .stack_offset => {
@ -2982,14 +2985,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.arm, .armeb => {
if (math.cast(i26, @intCast(i32, index) - @intCast(i32, self.code.items.len + 8))) |delta| {
writeInt(u32, try self.code.addManyAsArray(4), Instruction.b(.al, delta).toU32());
} else |err| {
} else |_| {
return self.fail(src, "TODO: enable larger branch offset", .{});
}
},
.aarch64, .aarch64_be, .aarch64_32 => {
if (math.cast(i28, @intCast(i32, index) - @intCast(i32, self.code.items.len + 8))) |delta| {
writeInt(u32, try self.code.addManyAsArray(4), Instruction.b(delta).toU32());
} else |err| {
} else |_| {
return self.fail(src, "TODO: enable larger branch offset", .{});
}
},
@ -3307,16 +3310,18 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
},
.compare_flags_unsigned => |op| {
_ = op;
return self.fail(src, "TODO implement set stack variable with compare flags value (unsigned)", .{});
},
.compare_flags_signed => |op| {
_ = op;
return self.fail(src, "TODO implement set stack variable with compare flags value (signed)", .{});
},
.immediate => {
const reg = try self.copyToTmpRegister(src, ty, mcv);
return self.genSetStack(src, ty, stack_offset, MCValue{ .register = reg });
},
.embedded_in_code => |code_offset| {
.embedded_in_code => {
return self.fail(src, "TODO implement set stack variable from embedded_in_code", .{});
},
.register => |reg| {
@ -3352,7 +3357,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
else => return self.fail(src, "TODO implement storing other types abi_size={}", .{abi_size}),
}
},
.memory => |vaddr| {
.memory => {
return self.fail(src, "TODO implement set stack variable from memory vaddr", .{});
},
.stack_offset => |off| {
@ -3380,10 +3385,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
else => return self.fail(src, "TODO implement memset", .{}),
}
},
.compare_flags_unsigned => |op| {
.compare_flags_unsigned => {
return self.fail(src, "TODO implement set stack variable with compare flags value (unsigned)", .{});
},
.compare_flags_signed => |op| {
.compare_flags_signed => {
return self.fail(src, "TODO implement set stack variable with compare flags value (signed)", .{});
},
.immediate => |x_big| {
@ -3435,13 +3440,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
},
}
},
.embedded_in_code => |code_offset| {
.embedded_in_code => {
return self.fail(src, "TODO implement set stack variable from embedded_in_code", .{});
},
.register => |reg| {
try self.genX8664ModRMRegToStack(src, ty, stack_offset, reg, 0x89);
},
.memory => |vaddr| {
.memory => {
return self.fail(src, "TODO implement set stack variable from memory vaddr", .{});
},
.stack_offset => |off| {
@ -3469,17 +3474,17 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
else => return self.fail(src, "TODO implement memset", .{}),
}
},
.compare_flags_unsigned => |op| {
.compare_flags_unsigned => {
return self.fail(src, "TODO implement set stack variable with compare flags value (unsigned)", .{});
},
.compare_flags_signed => |op| {
.compare_flags_signed => {
return self.fail(src, "TODO implement set stack variable with compare flags value (signed)", .{});
},
.immediate => {
const reg = try self.copyToTmpRegister(src, ty, mcv);
return self.genSetStack(src, ty, stack_offset, MCValue{ .register = reg });
},
.embedded_in_code => |code_offset| {
.embedded_in_code => {
return self.fail(src, "TODO implement set stack variable from embedded_in_code", .{});
},
.register => |reg| {
@ -3511,7 +3516,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
else => return self.fail(src, "TODO implement storing other types abi_size={}", .{abi_size}),
}
},
.memory => |vaddr| {
.memory => {
return self.fail(src, "TODO implement set stack variable from memory vaddr", .{});
},
.stack_offset => |off| {
@ -3842,6 +3847,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
);
},
.compare_flags_signed => |op| {
_ = op;
return self.fail(src, "TODO set register with compare flags value (signed)", .{});
},
.immediate => |x| {
@ -4460,6 +4466,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
dummy,
pub fn allocIndex(self: Register) ?u4 {
_ = self;
return null;
}
};

View File

@ -674,7 +674,7 @@ pub const Instruction = union(enum) {
};
const imm4h: u4 = switch (offset) {
.immediate => |imm| @truncate(u4, imm >> 4),
.register => |reg| 0b0000,
.register => 0b0000,
};
return Instruction{

View File

@ -47,6 +47,8 @@ fn formatTypeAsCIdentifier(
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
_ = fmt;
_ = options;
var buffer = [1]u8{0} ** 128;
// We don't care if it gets cut off, it's still more unique than a number
var buf = std.fmt.bufPrint(&buffer, "{}", .{data}) catch &buffer;
@ -63,6 +65,8 @@ fn formatIdent(
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
_ = fmt;
_ = options;
for (ident) |c, i| {
switch (c) {
'a'...'z', 'A'...'Z', '_' => try writer.writeByte(c),
@ -747,6 +751,7 @@ pub fn genBody(o: *Object, body: ir.Body) error{ AnalysisFail, OutOfMemory }!voi
}
fn genVarPtr(o: *Object, inst: *Inst.VarPtr) !CValue {
_ = o;
return CValue{ .decl_ref = inst.variable.owner_decl };
}
@ -937,6 +942,8 @@ fn genCall(o: *Object, inst: *Inst.Call) !CValue {
}
fn genDbgStmt(o: *Object, inst: *Inst.DbgStmt) !CValue {
_ = o;
_ = inst;
// TODO emit #line directive here with line number and filename
return CValue.none;
}
@ -1016,11 +1023,13 @@ fn genBitcast(o: *Object, inst: *Inst.UnOp) !CValue {
}
fn genBreakpoint(o: *Object, inst: *Inst.NoOp) !CValue {
_ = inst;
try o.writer().writeAll("zig_breakpoint();\n");
return CValue.none;
}
fn genUnreach(o: *Object, inst: *Inst.NoOp) !CValue {
_ = inst;
try o.writer().writeAll("zig_unreachable();\n");
return CValue.none;
}

View File

@ -154,6 +154,7 @@ pub const Object = struct {
object_pathZ: [:0]const u8,
pub fn create(allocator: *Allocator, sub_path: []const u8, options: link.Options) !*Object {
_ = sub_path;
const self = try allocator.create(Object);
errdefer allocator.destroy(self);
@ -742,6 +743,7 @@ pub const FuncGen = struct {
}
fn genRetVoid(self: *FuncGen, inst: *Inst.NoOp) ?*const llvm.Value {
_ = inst;
_ = self.builder.buildRetVoid();
return null;
}
@ -873,6 +875,7 @@ pub const FuncGen = struct {
}
fn genUnreach(self: *FuncGen, inst: *Inst.NoOp) ?*const llvm.Value {
_ = inst;
_ = self.builder.buildUnreachable();
return null;
}
@ -1013,6 +1016,7 @@ pub const FuncGen = struct {
}
fn genBreakpoint(self: *FuncGen, inst: *Inst.NoOp) !?*const llvm.Value {
_ = inst;
const llvn_fn = self.getIntrinsic("llvm.debugtrap");
_ = self.builder.buildCall(llvn_fn, null, 0, "");
return null;

View File

@ -702,7 +702,7 @@ pub const Context = struct {
try writer.writeByte(wasm.valtype(.i32)); // error code is always an i32 integer.
try writer.writeByte(val_type);
},
else => |ret_type| {
else => {
try leb.writeULEB128(writer, @as(u32, 1));
// Can we maybe get the source index of the return type?
const val_type = try self.genValtype(.{ .node_offset = 0 }, return_type);
@ -721,7 +721,7 @@ pub const Context = struct {
// TODO: check for and handle death of instructions
const mod_fn = blk: {
if (typed_value.val.castTag(.function)) |func| break :blk func.data;
if (typed_value.val.castTag(.extern_fn)) |ext_fn| return Result.appended; // don't need code body for extern functions
if (typed_value.val.castTag(.extern_fn)) |_| return Result.appended; // don't need code body for extern functions
unreachable;
};
@ -910,7 +910,7 @@ pub const Context = struct {
},
else => unreachable,
},
.local => |local| {
.local => {
try self.emitWValue(rhs);
try writer.writeByte(wasm.opcode(.local_set));
try leb.writeULEB128(writer, lhs.local);
@ -925,6 +925,7 @@ pub const Context = struct {
}
fn genArg(self: *Context, inst: *Inst.Arg) InnerError!WValue {
_ = inst;
// arguments share the index with locals
defer self.local_index += 1;
return WValue{ .local = self.local_index };
@ -1213,12 +1214,15 @@ pub const Context = struct {
}
fn genBreakpoint(self: *Context, breakpoint: *Inst.NoOp) InnerError!WValue {
_ = self;
_ = breakpoint;
// unsupported by wasm itself. Can be implemented once we support DWARF
// for wasm
return .none;
}
fn genUnreachable(self: *Context, unreach: *Inst.NoOp) InnerError!WValue {
_ = unreach;
try self.code.append(wasm.opcode(.@"unreachable"));
return .none;
}

View File

@ -517,7 +517,7 @@ pub const File = struct {
.target = base.options.target,
.output_mode = .Obj,
});
const o_directory = base.options.module.?.zig_cache_artifact_directory;
const o_directory = module.zig_cache_artifact_directory;
const full_obj_path = try o_directory.join(arena, &[_][]const u8{obj_basename});
break :blk full_obj_path;
}

View File

@ -76,7 +76,12 @@ pub fn deinit(self: *C) void {
self.decl_table.deinit(self.base.allocator);
}
pub fn allocateDeclIndexes(self: *C, decl: *Module.Decl) !void {}
pub fn allocateDeclIndexes(self: *C, decl: *Module.Decl) !void {
if (false) {
self;
decl;
}
}
pub fn freeDecl(self: *C, decl: *Module.Decl) void {
_ = self.decl_table.swapRemove(decl);
@ -307,4 +312,11 @@ pub fn updateDeclExports(
module: *Module,
decl: *Module.Decl,
exports: []const *Module.Export,
) !void {}
) !void {
if (false) {
exports;
decl;
module;
self;
}
}

View File

@ -831,7 +831,7 @@ fn linkWithLLD(self: *Coff, comp: *Compilation) !void {
.target = self.base.options.target,
.output_mode = .Obj,
});
const o_directory = self.base.options.module.?.zig_cache_artifact_directory;
const o_directory = module.zig_cache_artifact_directory;
const full_obj_path = try o_directory.join(arena, &[_][]const u8{obj_basename});
break :blk full_obj_path;
}
@ -1340,6 +1340,9 @@ pub fn getDeclVAddr(self: *Coff, decl: *const Module.Decl) u64 {
}
pub fn updateDeclLineNumber(self: *Coff, module: *Module, decl: *Module.Decl) !void {
_ = self;
_ = module;
_ = decl;
// TODO Implement this
}

View File

@ -1262,7 +1262,7 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void {
.target = self.base.options.target,
.output_mode = .Obj,
});
const o_directory = self.base.options.module.?.zig_cache_artifact_directory;
const o_directory = module.zig_cache_artifact_directory;
const full_obj_path = try o_directory.join(arena, &[_][]const u8{obj_basename});
break :blk full_obj_path;
}
@ -1938,6 +1938,11 @@ fn freeTextBlock(self: *Elf, text_block: *TextBlock) void {
}
fn shrinkTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64) void {
if (false) {
self;
text_block;
new_block_size;
}
// TODO check the new capacity, and if it crosses the size threshold into a big enough
// capacity, insert a free list node for it.
}
@ -2706,6 +2711,7 @@ pub fn updateDeclExports(
/// Must be called only after a successful call to `updateDecl`.
pub fn updateDeclLineNumber(self: *Elf, module: *Module, decl: *const Module.Decl) !void {
_ = module;
const tracy = trace(@src());
defer tracy.end();
@ -2979,6 +2985,7 @@ fn dbgLineNeededHeaderBytes(self: Elf) u32 {
}
fn dbgInfoNeededHeaderBytes(self: Elf) u32 {
_ = self;
return 120;
}
@ -3372,7 +3379,7 @@ const CsuObjects = struct {
if (result.crtend) |*obj| obj.* = try fs.path.join(arena, &[_][]const u8{ gcc_dir_path, obj.* });
},
else => {
inline for (std.meta.fields(@TypeOf(result))) |f, i| {
inline for (std.meta.fields(@TypeOf(result))) |f| {
if (@field(result, f.name)) |*obj| {
obj.* = try fs.path.join(arena, &[_][]const u8{ crt_dir_path, obj.* });
}
@ -3380,7 +3387,7 @@ const CsuObjects = struct {
},
}
} else {
inline for (std.meta.fields(@TypeOf(result))) |f, i| {
inline for (std.meta.fields(@TypeOf(result))) |f| {
if (@field(result, f.name)) |*obj| {
if (comp.crt_files.get(obj.*)) |crtf| {
obj.* = crtf.full_object_path;

View File

@ -441,6 +441,7 @@ pub fn flush(self: *MachO, comp: *Compilation) !void {
}
pub fn flushModule(self: *MachO, comp: *Compilation) !void {
_ = comp;
const tracy = trace(@src());
defer tracy.end();
@ -533,7 +534,7 @@ fn linkWithLLD(self: *MachO, comp: *Compilation) !void {
.target = self.base.options.target,
.output_mode = .Obj,
});
const o_directory = self.base.options.module.?.zig_cache_artifact_directory;
const o_directory = module.zig_cache_artifact_directory;
const full_obj_path = try o_directory.join(arena, &[_][]const u8{obj_basename});
break :blk full_obj_path;
}
@ -1254,6 +1255,9 @@ fn freeTextBlock(self: *MachO, text_block: *TextBlock) void {
}
fn shrinkTextBlock(self: *MachO, text_block: *TextBlock, new_block_size: u64) void {
_ = self;
_ = text_block;
_ = new_block_size;
// TODO check the new capacity, and if it crosses the size threshold into a big enough
// capacity, insert a free list node for it.
}

View File

@ -899,6 +899,7 @@ fn writeStringTable(self: *DebugSymbols) !void {
}
pub fn updateDeclLineNumber(self: *DebugSymbols, module: *Module, decl: *const Module.Decl) !void {
_ = module;
const tracy = trace(@src());
defer tracy.end();
@ -926,6 +927,8 @@ pub fn initDeclDebugBuffers(
module: *Module,
decl: *Module.Decl,
) !DeclDebugBuffers {
_ = self;
_ = module;
const tracy = trace(@src());
defer tracy.end();
@ -1188,6 +1191,7 @@ fn addDbgInfoType(
dbg_info_buffer: *std.ArrayList(u8),
target: std.Target,
) !void {
_ = self;
switch (ty.zigTypeTag()) {
.Void => unreachable,
.NoReturn => unreachable,
@ -1364,6 +1368,7 @@ fn getRelocDbgInfoSubprogramHighPC() u32 {
}
fn dbgLineNeededHeaderBytes(self: DebugSymbols, module: *Module) u32 {
_ = self;
const directory_entry_format_count = 1;
const file_name_entry_format_count = 1;
const directory_count = 1;
@ -1378,6 +1383,7 @@ fn dbgLineNeededHeaderBytes(self: DebugSymbols, module: *Module) u32 {
}
fn dbgInfoNeededHeaderBytes(self: DebugSymbols) u32 {
_ = self;
return 120;
}

Some files were not shown because too many files have changed in this diff Show More