mirror of
https://github.com/ziglang/zig.git
synced 2024-11-27 15:42:49 +00:00
small fixes and zig fmt
This commit is contained in:
parent
be71994fb1
commit
7e30e83900
@ -30,10 +30,8 @@ pub const AutoResetEvent = struct {
|
||||
// std.ResetEvent.wait() |
|
||||
// | std.ResetEvent.set()
|
||||
// | std.ResetEvent.set()
|
||||
// std.ResetEvent.reset() |
|
||||
// std.ResetEvent.reset() |
|
||||
// std.ResetEvent.wait() | (missed the second .set() notification above)
|
||||
|
||||
|
||||
state: usize = UNSET,
|
||||
|
||||
const UNSET = 0;
|
||||
@ -70,7 +68,7 @@ pub const AutoResetEvent = struct {
|
||||
if (state != UNSET) {
|
||||
unreachable; // multiple waiting threads on the same AutoResetEvent
|
||||
}
|
||||
|
||||
|
||||
// lazily initialize the ResetEvent if it hasn't been already
|
||||
if (!has_reset_event) {
|
||||
has_reset_event = true;
|
||||
@ -78,7 +76,7 @@ pub const AutoResetEvent = struct {
|
||||
}
|
||||
|
||||
// Since the AutoResetEvent currently isnt set,
|
||||
// try to register our ResetEvent on it to wait
|
||||
// try to register our ResetEvent on it to wait
|
||||
// for a set() call from another thread.
|
||||
if (@cmpxchgWeak(
|
||||
usize,
|
||||
@ -121,7 +119,7 @@ pub const AutoResetEvent = struct {
|
||||
unreachable; // multiple waiting threads on the same AutoResetEvent observed when timing out
|
||||
}
|
||||
|
||||
// This menas a set() thread saw our ResetEvent pointer, acquired it, and is trying to wake it up.
|
||||
// This menas a set() thread saw our ResetEvent pointer, acquired it, and is trying to wake it up.
|
||||
// We need to wait for it to wake up our ResetEvent before we can return and invalidate it.
|
||||
// We don't return error.TimedOut here as it technically notified us while we were "timing out".
|
||||
reset_event.wait();
|
||||
@ -137,7 +135,7 @@ pub const AutoResetEvent = struct {
|
||||
return;
|
||||
}
|
||||
|
||||
// If the AutoResetEvent isn't set,
|
||||
// If the AutoResetEvent isn't set,
|
||||
// then try to leave a notification for the wait() thread that we set() it.
|
||||
if (state == UNSET) {
|
||||
state = @cmpxchgWeak(
|
||||
@ -226,4 +224,4 @@ test "std.AutoResetEvent" {
|
||||
|
||||
send_thread.wait();
|
||||
recv_thread.wait();
|
||||
}
|
||||
}
|
||||
|
@ -34,4 +34,3 @@ pub const pthread_attr_t = extern struct {
|
||||
};
|
||||
|
||||
pub extern "c" fn posix_memalign(memptr: *?*c_void, alignment: usize, size: usize) c_int;
|
||||
|
||||
|
@ -316,7 +316,7 @@ pub fn InflateStream(comptime ReaderType: type) type {
|
||||
comptime {
|
||||
@setEvalBranchQuota(100000);
|
||||
|
||||
const len_lengths = //
|
||||
const len_lengths =
|
||||
[_]u16{8} ** 144 ++
|
||||
[_]u16{9} ** 112 ++
|
||||
[_]u16{7} ** 24 ++
|
||||
|
@ -1127,7 +1127,7 @@ test "std.hash_map put" {
|
||||
test "std.hash_map putAssumeCapacity" {
|
||||
var map = AutoHashMap(u32, u32).init(std.testing.allocator);
|
||||
defer map.deinit();
|
||||
|
||||
|
||||
try map.ensureCapacity(20);
|
||||
var i: u32 = 0;
|
||||
while (i < 20) : (i += 1) {
|
||||
|
@ -184,8 +184,12 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
||||
const total_requested_bytes_init = if (config.enable_memory_limit) @as(usize, 0) else {};
|
||||
const requested_memory_limit_init = if (config.enable_memory_limit) @as(usize, math.maxInt(usize)) else {};
|
||||
|
||||
const mutex_init = if (config.MutexType) |T| T{} else
|
||||
if (config.thread_safe) std.Mutex{} else std.mutex.Dummy{};
|
||||
const mutex_init = if (config.MutexType) |T|
|
||||
T{}
|
||||
else if (config.thread_safe)
|
||||
std.Mutex{}
|
||||
else
|
||||
std.mutex.Dummy{};
|
||||
|
||||
const stack_n = config.stack_trace_frames;
|
||||
const one_trace_size = @sizeOf(usize) * stack_n;
|
||||
@ -865,9 +869,9 @@ test "realloc large object to small object" {
|
||||
}
|
||||
|
||||
test "overrideable mutexes" {
|
||||
var gpa = GeneralPurposeAllocator(.{.MutexType = std.Mutex}){
|
||||
var gpa = GeneralPurposeAllocator(.{ .MutexType = std.Mutex }){
|
||||
.backing_allocator = std.testing.allocator,
|
||||
.mutex = std.Mutex{}
|
||||
.mutex = std.Mutex{},
|
||||
};
|
||||
defer std.testing.expect(!gpa.deinit());
|
||||
const allocator = &gpa.allocator;
|
||||
|
@ -42,7 +42,6 @@ pub const uuid_command = extern struct {
|
||||
uuid: [16]u8,
|
||||
};
|
||||
|
||||
|
||||
/// The version_min_command contains the min OS version on which this
|
||||
/// binary was built to run.
|
||||
pub const version_min_command = extern struct {
|
||||
|
@ -226,43 +226,55 @@ pub fn Sentinel(comptime T: type, comptime sentinel_val: Elem(T)) type {
|
||||
switch (@typeInfo(T)) {
|
||||
.Pointer => |info| switch (info.size) {
|
||||
.One => switch (@typeInfo(info.child)) {
|
||||
.Array => |array_info| return @Type(.{ .Pointer = .{
|
||||
.Array => |array_info| return @Type(.{
|
||||
.Pointer = .{
|
||||
.size = info.size,
|
||||
.is_const = info.is_const,
|
||||
.is_volatile = info.is_volatile,
|
||||
.alignment = info.alignment,
|
||||
.child = @Type(.{
|
||||
.Array = .{
|
||||
.len = array_info.len,
|
||||
.child = array_info.child,
|
||||
.sentinel = sentinel_val,
|
||||
},
|
||||
}),
|
||||
.is_allowzero = info.is_allowzero,
|
||||
.sentinel = info.sentinel,
|
||||
},
|
||||
}),
|
||||
else => {},
|
||||
},
|
||||
.Many, .Slice => return @Type(.{
|
||||
.Pointer = .{
|
||||
.size = info.size,
|
||||
.is_const = info.is_const,
|
||||
.is_volatile = info.is_volatile,
|
||||
.alignment = info.alignment,
|
||||
.child = @Type(.{ .Array = .{
|
||||
.len = array_info.len,
|
||||
.child = array_info.child,
|
||||
.sentinel = sentinel_val,
|
||||
}}),
|
||||
.child = info.child,
|
||||
.is_allowzero = info.is_allowzero,
|
||||
.sentinel = info.sentinel,
|
||||
}}),
|
||||
else => {},
|
||||
},
|
||||
.Many, .Slice => return @Type(.{ .Pointer = .{
|
||||
.size = info.size,
|
||||
.is_const = info.is_const,
|
||||
.is_volatile = info.is_volatile,
|
||||
.alignment = info.alignment,
|
||||
.child = info.child,
|
||||
.is_allowzero = info.is_allowzero,
|
||||
.sentinel = sentinel_val,
|
||||
}}),
|
||||
.sentinel = sentinel_val,
|
||||
},
|
||||
}),
|
||||
else => {},
|
||||
},
|
||||
.Optional => |info| switch (@typeInfo(info.child)) {
|
||||
.Pointer => |ptr_info| switch (ptr_info.size) {
|
||||
.Many => return @Type(.{ .Optional = .{ .child = @Type(.{ .Pointer = .{
|
||||
.size = ptr_info.size,
|
||||
.is_const = ptr_info.is_const,
|
||||
.is_volatile = ptr_info.is_volatile,
|
||||
.alignment = ptr_info.alignment,
|
||||
.child = ptr_info.child,
|
||||
.is_allowzero = ptr_info.is_allowzero,
|
||||
.sentinel = sentinel_val,
|
||||
}})}}),
|
||||
.Many => return @Type(.{
|
||||
.Optional = .{
|
||||
.child = @Type(.{
|
||||
.Pointer = .{
|
||||
.size = ptr_info.size,
|
||||
.is_const = ptr_info.is_const,
|
||||
.is_volatile = ptr_info.is_volatile,
|
||||
.alignment = ptr_info.alignment,
|
||||
.child = ptr_info.child,
|
||||
.is_allowzero = ptr_info.is_allowzero,
|
||||
.sentinel = sentinel_val,
|
||||
},
|
||||
}),
|
||||
},
|
||||
}),
|
||||
else => {},
|
||||
},
|
||||
else => {},
|
||||
@ -296,17 +308,17 @@ pub fn assumeSentinel(p: anytype, comptime sentinel_val: Elem(@TypeOf(p))) Senti
|
||||
}
|
||||
|
||||
test "std.meta.assumeSentinel" {
|
||||
testing.expect([*:0]u8 == @TypeOf(assumeSentinel(@as([*]u8 , undefined), 0)));
|
||||
testing.expect([:0]u8 == @TypeOf(assumeSentinel(@as([]u8 , undefined), 0)));
|
||||
testing.expect([*:0]const u8 == @TypeOf(assumeSentinel(@as([*]const u8, undefined), 0)));
|
||||
testing.expect([:0]const u8 == @TypeOf(assumeSentinel(@as([]const u8 , undefined), 0)));
|
||||
testing.expect([*:0]u16 == @TypeOf(assumeSentinel(@as([*]u16 , undefined), 0)));
|
||||
testing.expect([:0]const u16 == @TypeOf(assumeSentinel(@as([]const u16, undefined), 0)));
|
||||
testing.expect([*:3]u8 == @TypeOf(assumeSentinel(@as([*:1]u8 , undefined), 3)));
|
||||
testing.expect([:null]?[*]u8 == @TypeOf(assumeSentinel(@as([]?[*]u8 , undefined), null)));
|
||||
testing.expect([*:null]?[*]u8 == @TypeOf(assumeSentinel(@as([*]?[*]u8 , undefined), null)));
|
||||
testing.expect(*[10:0]u8 == @TypeOf(assumeSentinel(@as(*[10]u8 , undefined), 0)));
|
||||
testing.expect(?[*:0]u8 == @TypeOf(assumeSentinel(@as(?[*]u8 , undefined), 0)));
|
||||
testing.expect([*:0]u8 == @TypeOf(assumeSentinel(@as([*]u8, undefined), 0)));
|
||||
testing.expect([:0]u8 == @TypeOf(assumeSentinel(@as([]u8, undefined), 0)));
|
||||
testing.expect([*:0]const u8 == @TypeOf(assumeSentinel(@as([*]const u8, undefined), 0)));
|
||||
testing.expect([:0]const u8 == @TypeOf(assumeSentinel(@as([]const u8, undefined), 0)));
|
||||
testing.expect([*:0]u16 == @TypeOf(assumeSentinel(@as([*]u16, undefined), 0)));
|
||||
testing.expect([:0]const u16 == @TypeOf(assumeSentinel(@as([]const u16, undefined), 0)));
|
||||
testing.expect([*:3]u8 == @TypeOf(assumeSentinel(@as([*:1]u8, undefined), 3)));
|
||||
testing.expect([:null]?[*]u8 == @TypeOf(assumeSentinel(@as([]?[*]u8, undefined), null)));
|
||||
testing.expect([*:null]?[*]u8 == @TypeOf(assumeSentinel(@as([*]?[*]u8, undefined), null)));
|
||||
testing.expect(*[10:0]u8 == @TypeOf(assumeSentinel(@as(*[10]u8, undefined), 0)));
|
||||
testing.expect(?[*:0]u8 == @TypeOf(assumeSentinel(@as(?[*]u8, undefined), 0)));
|
||||
}
|
||||
|
||||
pub fn containerLayout(comptime T: type) TypeInfo.ContainerLayout {
|
||||
|
@ -38,7 +38,7 @@ pub const Mutex = if (builtin.single_threaded)
|
||||
else if (builtin.os.tag == .windows)
|
||||
WindowsMutex
|
||||
else if (builtin.link_libc or builtin.os.tag == .linux)
|
||||
// stack-based version of https://github.com/Amanieu/parking_lot/blob/master/core/src/word_lock.rs
|
||||
// stack-based version of https://github.com/Amanieu/parking_lot/blob/master/core/src/word_lock.rs
|
||||
struct {
|
||||
state: usize = 0,
|
||||
|
||||
|
@ -50,7 +50,7 @@ pub fn getauxval(index: usize) usize {
|
||||
|
||||
// Some architectures (and some syscalls) require 64bit parameters to be passed
|
||||
// in a even-aligned register pair.
|
||||
const require_aligned_register_pair = //
|
||||
const require_aligned_register_pair =
|
||||
std.Target.current.cpu.arch.isMIPS() or
|
||||
std.Target.current.cpu.arch.isARM() or
|
||||
std.Target.current.cpu.arch.isThumb();
|
||||
|
@ -31,7 +31,7 @@ pub const IO_Uring = struct {
|
||||
pub fn init(entries: u12, flags: u32) !IO_Uring {
|
||||
var params = mem.zeroInit(io_uring_params, .{
|
||||
.flags = flags,
|
||||
.sq_thread_idle = 1000
|
||||
.sq_thread_idle = 1000,
|
||||
});
|
||||
return try IO_Uring.init_params(entries, ¶ms);
|
||||
}
|
||||
@ -69,7 +69,7 @@ pub const IO_Uring = struct {
|
||||
// or a container seccomp policy prohibits io_uring syscalls:
|
||||
linux.EPERM => return error.PermissionDenied,
|
||||
linux.ENOSYS => return error.SystemOutdated,
|
||||
else => |errno| return os.unexpectedErrno(errno)
|
||||
else => |errno| return os.unexpectedErrno(errno),
|
||||
}
|
||||
const fd = @intCast(os.fd_t, res);
|
||||
assert(fd >= 0);
|
||||
@ -117,12 +117,12 @@ pub const IO_Uring = struct {
|
||||
assert(cq.overflow.* == 0);
|
||||
assert(cq.cqes.len == p.cq_entries);
|
||||
|
||||
return IO_Uring {
|
||||
return IO_Uring{
|
||||
.fd = fd,
|
||||
.sq = sq,
|
||||
.cq = cq,
|
||||
.flags = p.flags,
|
||||
.features = p.features
|
||||
.features = p.features,
|
||||
};
|
||||
}
|
||||
|
||||
@ -207,7 +207,7 @@ pub const IO_Uring = struct {
|
||||
// The operation was interrupted by a delivery of a signal before it could complete.
|
||||
// This can happen while waiting for events with IORING_ENTER_GETEVENTS:
|
||||
linux.EINTR => return error.SignalInterrupt,
|
||||
else => |errno| return os.unexpectedErrno(errno)
|
||||
else => |errno| return os.unexpectedErrno(errno),
|
||||
}
|
||||
return @intCast(u32, res);
|
||||
}
|
||||
@ -369,7 +369,7 @@ pub const IO_Uring = struct {
|
||||
user_data: u64,
|
||||
fd: os.fd_t,
|
||||
buffer: []u8,
|
||||
offset: u64
|
||||
offset: u64,
|
||||
) !*io_uring_sqe {
|
||||
const sqe = try self.get_sqe();
|
||||
io_uring_prep_read(sqe, fd, buffer, offset);
|
||||
@ -384,7 +384,7 @@ pub const IO_Uring = struct {
|
||||
user_data: u64,
|
||||
fd: os.fd_t,
|
||||
buffer: []const u8,
|
||||
offset: u64
|
||||
offset: u64,
|
||||
) !*io_uring_sqe {
|
||||
const sqe = try self.get_sqe();
|
||||
io_uring_prep_write(sqe, fd, buffer, offset);
|
||||
@ -401,7 +401,7 @@ pub const IO_Uring = struct {
|
||||
user_data: u64,
|
||||
fd: os.fd_t,
|
||||
iovecs: []const os.iovec,
|
||||
offset: u64
|
||||
offset: u64,
|
||||
) !*io_uring_sqe {
|
||||
const sqe = try self.get_sqe();
|
||||
io_uring_prep_readv(sqe, fd, iovecs, offset);
|
||||
@ -418,7 +418,7 @@ pub const IO_Uring = struct {
|
||||
user_data: u64,
|
||||
fd: os.fd_t,
|
||||
iovecs: []const os.iovec_const,
|
||||
offset: u64
|
||||
offset: u64,
|
||||
) !*io_uring_sqe {
|
||||
const sqe = try self.get_sqe();
|
||||
io_uring_prep_writev(sqe, fd, iovecs, offset);
|
||||
@ -434,7 +434,7 @@ pub const IO_Uring = struct {
|
||||
fd: os.fd_t,
|
||||
addr: *os.sockaddr,
|
||||
addrlen: *os.socklen_t,
|
||||
flags: u32
|
||||
flags: u32,
|
||||
) !*io_uring_sqe {
|
||||
const sqe = try self.get_sqe();
|
||||
io_uring_prep_accept(sqe, fd, addr, addrlen, flags);
|
||||
@ -449,7 +449,7 @@ pub const IO_Uring = struct {
|
||||
user_data: u64,
|
||||
fd: os.fd_t,
|
||||
addr: *const os.sockaddr,
|
||||
addrlen: os.socklen_t
|
||||
addrlen: os.socklen_t,
|
||||
) !*io_uring_sqe {
|
||||
const sqe = try self.get_sqe();
|
||||
io_uring_prep_connect(sqe, fd, addr, addrlen);
|
||||
@ -464,7 +464,7 @@ pub const IO_Uring = struct {
|
||||
user_data: u64,
|
||||
fd: os.fd_t,
|
||||
buffer: []u8,
|
||||
flags: u32
|
||||
flags: u32,
|
||||
) !*io_uring_sqe {
|
||||
const sqe = try self.get_sqe();
|
||||
io_uring_prep_recv(sqe, fd, buffer, flags);
|
||||
@ -479,7 +479,7 @@ pub const IO_Uring = struct {
|
||||
user_data: u64,
|
||||
fd: os.fd_t,
|
||||
buffer: []const u8,
|
||||
flags: u32
|
||||
flags: u32,
|
||||
) !*io_uring_sqe {
|
||||
const sqe = try self.get_sqe();
|
||||
io_uring_prep_send(sqe, fd, buffer, flags);
|
||||
@ -495,7 +495,7 @@ pub const IO_Uring = struct {
|
||||
fd: os.fd_t,
|
||||
path: [*:0]const u8,
|
||||
flags: u32,
|
||||
mode: os.mode_t
|
||||
mode: os.mode_t,
|
||||
) !*io_uring_sqe {
|
||||
const sqe = try self.get_sqe();
|
||||
io_uring_prep_openat(sqe, fd, path, flags, mode);
|
||||
@ -529,7 +529,7 @@ pub const IO_Uring = struct {
|
||||
self.fd,
|
||||
.REGISTER_FILES,
|
||||
@ptrCast(*const c_void, fds.ptr),
|
||||
@intCast(u32, fds.len)
|
||||
@intCast(u32, fds.len),
|
||||
);
|
||||
switch (linux.getErrno(res)) {
|
||||
0 => {},
|
||||
@ -548,7 +548,7 @@ pub const IO_Uring = struct {
|
||||
linux.ENOMEM => return error.SystemResources,
|
||||
// Attempt to register files on a ring already registering files or being torn down:
|
||||
linux.ENXIO => return error.RingShuttingDownOrAlreadyRegisteringFiles,
|
||||
else => |errno| return os.unexpectedErrno(errno)
|
||||
else => |errno| return os.unexpectedErrno(errno),
|
||||
}
|
||||
}
|
||||
|
||||
@ -559,7 +559,7 @@ pub const IO_Uring = struct {
|
||||
switch (linux.getErrno(res)) {
|
||||
0 => {},
|
||||
linux.ENXIO => return error.FilesNotRegistered,
|
||||
else => |errno| return os.unexpectedErrno(errno)
|
||||
else => |errno| return os.unexpectedErrno(errno),
|
||||
}
|
||||
}
|
||||
};
|
||||
@ -581,13 +581,13 @@ pub const SubmissionQueue = struct {
|
||||
// This allows us to amortize the cost of the @atomicStore to `tail` across multiple SQEs.
|
||||
sqe_head: u32 = 0,
|
||||
sqe_tail: u32 = 0,
|
||||
|
||||
|
||||
pub fn init(fd: os.fd_t, p: io_uring_params) !SubmissionQueue {
|
||||
assert(fd >= 0);
|
||||
assert((p.features & linux.IORING_FEAT_SINGLE_MMAP) != 0);
|
||||
const size = std.math.max(
|
||||
p.sq_off.array + p.sq_entries * @sizeOf(u32),
|
||||
p.cq_off.cqes + p.cq_entries * @sizeOf(io_uring_cqe)
|
||||
p.cq_off.cqes + p.cq_entries * @sizeOf(io_uring_cqe),
|
||||
);
|
||||
const mmap = try os.mmap(
|
||||
null,
|
||||
@ -620,9 +620,9 @@ pub const SubmissionQueue = struct {
|
||||
// see https://github.com/torvalds/linux/blob/v5.8/fs/io_uring.c#L7843-L7844.
|
||||
assert(
|
||||
p.sq_entries ==
|
||||
@ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.ring_entries])).*
|
||||
@ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.ring_entries])).*,
|
||||
);
|
||||
return SubmissionQueue {
|
||||
return SubmissionQueue{
|
||||
.head = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.head])),
|
||||
.tail = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.tail])),
|
||||
.mask = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.sq_off.ring_mask])).*,
|
||||
@ -631,7 +631,7 @@ pub const SubmissionQueue = struct {
|
||||
.array = array[0..p.sq_entries],
|
||||
.sqes = sqes[0..p.sq_entries],
|
||||
.mmap = mmap,
|
||||
.mmap_sqes = mmap_sqes
|
||||
.mmap_sqes = mmap_sqes,
|
||||
};
|
||||
}
|
||||
|
||||
@ -654,18 +654,16 @@ pub const CompletionQueue = struct {
|
||||
const mmap = sq.mmap;
|
||||
const cqes = @ptrCast(
|
||||
[*]io_uring_cqe,
|
||||
@alignCast(@alignOf(io_uring_cqe), &mmap[p.cq_off.cqes])
|
||||
@alignCast(@alignOf(io_uring_cqe), &mmap[p.cq_off.cqes]),
|
||||
);
|
||||
assert(
|
||||
p.cq_entries ==
|
||||
@ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.cq_off.ring_entries])).*
|
||||
);
|
||||
return CompletionQueue {
|
||||
assert(p.cq_entries ==
|
||||
@ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.cq_off.ring_entries])).*);
|
||||
return CompletionQueue{
|
||||
.head = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.cq_off.head])),
|
||||
.tail = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.cq_off.tail])),
|
||||
.mask = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.cq_off.ring_mask])).*,
|
||||
.overflow = @ptrCast(*u32, @alignCast(@alignOf(u32), &mmap[p.cq_off.overflow])),
|
||||
.cqes = cqes[0..p.cq_entries]
|
||||
.cqes = cqes[0..p.cq_entries],
|
||||
};
|
||||
}
|
||||
|
||||
@ -689,7 +687,7 @@ pub fn io_uring_prep_nop(sqe: *io_uring_sqe) void {
|
||||
.buf_index = 0,
|
||||
.personality = 0,
|
||||
.splice_fd_in = 0,
|
||||
.__pad2 = [2]u64{ 0, 0 }
|
||||
.__pad2 = [2]u64{ 0, 0 },
|
||||
};
|
||||
}
|
||||
|
||||
@ -707,7 +705,7 @@ pub fn io_uring_prep_fsync(sqe: *io_uring_sqe, fd: os.fd_t, flags: u32) void {
|
||||
.buf_index = 0,
|
||||
.personality = 0,
|
||||
.splice_fd_in = 0,
|
||||
.__pad2 = [2]u64{ 0, 0 }
|
||||
.__pad2 = [2]u64{ 0, 0 },
|
||||
};
|
||||
}
|
||||
|
||||
@ -717,7 +715,7 @@ pub fn io_uring_prep_rw(
|
||||
fd: os.fd_t,
|
||||
addr: anytype,
|
||||
len: usize,
|
||||
offset: u64
|
||||
offset: u64,
|
||||
) void {
|
||||
sqe.* = .{
|
||||
.opcode = op,
|
||||
@ -732,7 +730,7 @@ pub fn io_uring_prep_rw(
|
||||
.buf_index = 0,
|
||||
.personality = 0,
|
||||
.splice_fd_in = 0,
|
||||
.__pad2 = [2]u64{ 0, 0 }
|
||||
.__pad2 = [2]u64{ 0, 0 },
|
||||
};
|
||||
}
|
||||
|
||||
@ -748,7 +746,7 @@ pub fn io_uring_prep_readv(
|
||||
sqe: *io_uring_sqe,
|
||||
fd: os.fd_t,
|
||||
iovecs: []const os.iovec,
|
||||
offset: u64
|
||||
offset: u64,
|
||||
) void {
|
||||
io_uring_prep_rw(.READV, sqe, fd, iovecs.ptr, iovecs.len, offset);
|
||||
}
|
||||
@ -757,7 +755,7 @@ pub fn io_uring_prep_writev(
|
||||
sqe: *io_uring_sqe,
|
||||
fd: os.fd_t,
|
||||
iovecs: []const os.iovec_const,
|
||||
offset: u64
|
||||
offset: u64,
|
||||
) void {
|
||||
io_uring_prep_rw(.WRITEV, sqe, fd, iovecs.ptr, iovecs.len, offset);
|
||||
}
|
||||
@ -767,7 +765,7 @@ pub fn io_uring_prep_accept(
|
||||
fd: os.fd_t,
|
||||
addr: *os.sockaddr,
|
||||
addrlen: *os.socklen_t,
|
||||
flags: u32
|
||||
flags: u32,
|
||||
) void {
|
||||
// `addr` holds a pointer to `sockaddr`, and `addr2` holds a pointer to socklen_t`.
|
||||
// `addr2` maps to `sqe.off` (u64) instead of `sqe.len` (which is only a u32).
|
||||
@ -779,7 +777,7 @@ pub fn io_uring_prep_connect(
|
||||
sqe: *io_uring_sqe,
|
||||
fd: os.fd_t,
|
||||
addr: *const os.sockaddr,
|
||||
addrlen: os.socklen_t
|
||||
addrlen: os.socklen_t,
|
||||
) void {
|
||||
// `addrlen` maps to `sqe.off` (u64) instead of `sqe.len` (which is only a u32).
|
||||
io_uring_prep_rw(.CONNECT, sqe, fd, addr, 0, addrlen);
|
||||
@ -800,7 +798,7 @@ pub fn io_uring_prep_openat(
|
||||
fd: os.fd_t,
|
||||
path: [*:0]const u8,
|
||||
flags: u32,
|
||||
mode: os.mode_t
|
||||
mode: os.mode_t,
|
||||
) void {
|
||||
io_uring_prep_rw(.OPENAT, sqe, fd, path, mode, 0);
|
||||
sqe.rw_flags = flags;
|
||||
@ -820,7 +818,7 @@ pub fn io_uring_prep_close(sqe: *io_uring_sqe, fd: os.fd_t) void {
|
||||
.buf_index = 0,
|
||||
.personality = 0,
|
||||
.splice_fd_in = 0,
|
||||
.__pad2 = [2]u64{ 0, 0 }
|
||||
.__pad2 = [2]u64{ 0, 0 },
|
||||
};
|
||||
}
|
||||
|
||||
@ -845,7 +843,7 @@ test "nop" {
|
||||
var ring = IO_Uring.init(1, 0) catch |err| switch (err) {
|
||||
error.SystemOutdated => return error.SkipZigTest,
|
||||
error.PermissionDenied => return error.SkipZigTest,
|
||||
else => return err
|
||||
else => return err,
|
||||
};
|
||||
defer {
|
||||
ring.deinit();
|
||||
@ -853,7 +851,7 @@ test "nop" {
|
||||
}
|
||||
|
||||
const sqe = try ring.nop(0xaaaaaaaa);
|
||||
testing.expectEqual(io_uring_sqe {
|
||||
testing.expectEqual(io_uring_sqe{
|
||||
.opcode = .NOP,
|
||||
.flags = 0,
|
||||
.ioprio = 0,
|
||||
@ -866,7 +864,7 @@ test "nop" {
|
||||
.buf_index = 0,
|
||||
.personality = 0,
|
||||
.splice_fd_in = 0,
|
||||
.__pad2 = [2]u64{ 0, 0 }
|
||||
.__pad2 = [2]u64{ 0, 0 },
|
||||
}, sqe.*);
|
||||
|
||||
testing.expectEqual(@as(u32, 0), ring.sq.sqe_head);
|
||||
@ -883,10 +881,10 @@ test "nop" {
|
||||
testing.expectEqual(@as(u32, 0), ring.cq.head.*);
|
||||
testing.expectEqual(@as(u32, 0), ring.sq_ready());
|
||||
|
||||
testing.expectEqual(io_uring_cqe {
|
||||
testing.expectEqual(io_uring_cqe{
|
||||
.user_data = 0xaaaaaaaa,
|
||||
.res = 0,
|
||||
.flags = 0
|
||||
.flags = 0,
|
||||
}, try ring.copy_cqe());
|
||||
testing.expectEqual(@as(u32, 1), ring.cq.head.*);
|
||||
testing.expectEqual(@as(u32, 0), ring.cq_ready());
|
||||
@ -894,10 +892,10 @@ test "nop" {
|
||||
const sqe_barrier = try ring.nop(0xbbbbbbbb);
|
||||
sqe_barrier.flags |= linux.IOSQE_IO_DRAIN;
|
||||
testing.expectEqual(@as(u32, 1), try ring.submit());
|
||||
testing.expectEqual(io_uring_cqe {
|
||||
testing.expectEqual(io_uring_cqe{
|
||||
.user_data = 0xbbbbbbbb,
|
||||
.res = 0,
|
||||
.flags = 0
|
||||
.flags = 0,
|
||||
}, try ring.copy_cqe());
|
||||
testing.expectEqual(@as(u32, 2), ring.sq.sqe_head);
|
||||
testing.expectEqual(@as(u32, 2), ring.sq.sqe_tail);
|
||||
@ -911,7 +909,7 @@ test "readv" {
|
||||
var ring = IO_Uring.init(1, 0) catch |err| switch (err) {
|
||||
error.SystemOutdated => return error.SkipZigTest,
|
||||
error.PermissionDenied => return error.SkipZigTest,
|
||||
else => return err
|
||||
else => return err,
|
||||
};
|
||||
defer ring.deinit();
|
||||
|
||||
@ -930,14 +928,14 @@ test "readv" {
|
||||
try ring.register_files(registered_fds[0..]);
|
||||
|
||||
var buffer = [_]u8{42} ** 128;
|
||||
var iovecs = [_]os.iovec{ os.iovec { .iov_base = &buffer, .iov_len = buffer.len } };
|
||||
var iovecs = [_]os.iovec{os.iovec{ .iov_base = &buffer, .iov_len = buffer.len }};
|
||||
const sqe = try ring.readv(0xcccccccc, fd_index, iovecs[0..], 0);
|
||||
testing.expectEqual(linux.IORING_OP.READV, sqe.opcode);
|
||||
sqe.flags |= linux.IOSQE_FIXED_FILE;
|
||||
|
||||
testing.expectError(error.SubmissionQueueFull, ring.nop(0));
|
||||
testing.expectEqual(@as(u32, 1), try ring.submit());
|
||||
testing.expectEqual(linux.io_uring_cqe {
|
||||
testing.expectEqual(linux.io_uring_cqe{
|
||||
.user_data = 0xcccccccc,
|
||||
.res = buffer.len,
|
||||
.flags = 0,
|
||||
@ -953,10 +951,10 @@ test "writev/fsync/readv" {
|
||||
var ring = IO_Uring.init(4, 0) catch |err| switch (err) {
|
||||
error.SystemOutdated => return error.SkipZigTest,
|
||||
error.PermissionDenied => return error.SkipZigTest,
|
||||
else => return err
|
||||
else => return err,
|
||||
};
|
||||
defer ring.deinit();
|
||||
|
||||
|
||||
const path = "test_io_uring_writev_fsync_readv";
|
||||
const file = try std.fs.cwd().createFile(path, .{ .read = true, .truncate = true });
|
||||
defer file.close();
|
||||
@ -964,19 +962,19 @@ test "writev/fsync/readv" {
|
||||
const fd = file.handle;
|
||||
|
||||
const buffer_write = [_]u8{42} ** 128;
|
||||
const iovecs_write = [_]os.iovec_const {
|
||||
os.iovec_const { .iov_base = &buffer_write, .iov_len = buffer_write.len }
|
||||
const iovecs_write = [_]os.iovec_const{
|
||||
os.iovec_const{ .iov_base = &buffer_write, .iov_len = buffer_write.len },
|
||||
};
|
||||
var buffer_read = [_]u8{0} ** 128;
|
||||
var iovecs_read = [_]os.iovec {
|
||||
os.iovec { .iov_base = &buffer_read, .iov_len = buffer_read.len }
|
||||
var iovecs_read = [_]os.iovec{
|
||||
os.iovec{ .iov_base = &buffer_read, .iov_len = buffer_read.len },
|
||||
};
|
||||
|
||||
const sqe_writev = try ring.writev(0xdddddddd, fd, iovecs_write[0..], 17);
|
||||
testing.expectEqual(linux.IORING_OP.WRITEV, sqe_writev.opcode);
|
||||
testing.expectEqual(@as(u64, 17), sqe_writev.off);
|
||||
sqe_writev.flags |= linux.IOSQE_IO_LINK;
|
||||
|
||||
|
||||
const sqe_fsync = try ring.fsync(0xeeeeeeee, fd, 0);
|
||||
testing.expectEqual(linux.IORING_OP.FSYNC, sqe_fsync.opcode);
|
||||
testing.expectEqual(fd, sqe_fsync.fd);
|
||||
@ -991,21 +989,21 @@ test "writev/fsync/readv" {
|
||||
testing.expectEqual(@as(u32, 0), ring.sq_ready());
|
||||
testing.expectEqual(@as(u32, 3), ring.cq_ready());
|
||||
|
||||
testing.expectEqual(linux.io_uring_cqe {
|
||||
testing.expectEqual(linux.io_uring_cqe{
|
||||
.user_data = 0xdddddddd,
|
||||
.res = buffer_write.len,
|
||||
.flags = 0,
|
||||
}, try ring.copy_cqe());
|
||||
testing.expectEqual(@as(u32, 2), ring.cq_ready());
|
||||
|
||||
testing.expectEqual(linux.io_uring_cqe {
|
||||
|
||||
testing.expectEqual(linux.io_uring_cqe{
|
||||
.user_data = 0xeeeeeeee,
|
||||
.res = 0,
|
||||
.flags = 0,
|
||||
}, try ring.copy_cqe());
|
||||
testing.expectEqual(@as(u32, 1), ring.cq_ready());
|
||||
|
||||
testing.expectEqual(linux.io_uring_cqe {
|
||||
testing.expectEqual(linux.io_uring_cqe{
|
||||
.user_data = 0xffffffff,
|
||||
.res = buffer_read.len,
|
||||
.flags = 0,
|
||||
@ -1021,10 +1019,10 @@ test "write/read" {
|
||||
var ring = IO_Uring.init(2, 0) catch |err| switch (err) {
|
||||
error.SystemOutdated => return error.SkipZigTest,
|
||||
error.PermissionDenied => return error.SkipZigTest,
|
||||
else => return err
|
||||
else => return err,
|
||||
};
|
||||
defer ring.deinit();
|
||||
|
||||
|
||||
const path = "test_io_uring_write_read";
|
||||
const file = try std.fs.cwd().createFile(path, .{ .read = true, .truncate = true });
|
||||
defer file.close();
|
||||
@ -1048,12 +1046,12 @@ test "write/read" {
|
||||
// https://lwn.net/Articles/809820/
|
||||
if (cqe_write.res == -linux.EINVAL) return error.SkipZigTest;
|
||||
if (cqe_read.res == -linux.EINVAL) return error.SkipZigTest;
|
||||
testing.expectEqual(linux.io_uring_cqe {
|
||||
testing.expectEqual(linux.io_uring_cqe{
|
||||
.user_data = 0x11111111,
|
||||
.res = buffer_write.len,
|
||||
.flags = 0,
|
||||
}, cqe_write);
|
||||
testing.expectEqual(linux.io_uring_cqe {
|
||||
testing.expectEqual(linux.io_uring_cqe{
|
||||
.user_data = 0x22222222,
|
||||
.res = buffer_read.len,
|
||||
.flags = 0,
|
||||
@ -1067,7 +1065,7 @@ test "openat" {
|
||||
var ring = IO_Uring.init(1, 0) catch |err| switch (err) {
|
||||
error.SystemOutdated => return error.SkipZigTest,
|
||||
error.PermissionDenied => return error.SkipZigTest,
|
||||
else => return err
|
||||
else => return err,
|
||||
};
|
||||
defer ring.deinit();
|
||||
|
||||
@ -1077,7 +1075,7 @@ test "openat" {
|
||||
const flags: u32 = os.O_CLOEXEC | os.O_RDWR | os.O_CREAT;
|
||||
const mode: os.mode_t = 0o666;
|
||||
const sqe_openat = try ring.openat(0x33333333, linux.AT_FDCWD, path, flags, mode);
|
||||
testing.expectEqual(io_uring_sqe {
|
||||
testing.expectEqual(io_uring_sqe{
|
||||
.opcode = .OPENAT,
|
||||
.flags = 0,
|
||||
.ioprio = 0,
|
||||
@ -1090,7 +1088,7 @@ test "openat" {
|
||||
.buf_index = 0,
|
||||
.personality = 0,
|
||||
.splice_fd_in = 0,
|
||||
.__pad2 = [2]u64{ 0, 0 }
|
||||
.__pad2 = [2]u64{ 0, 0 },
|
||||
}, sqe_openat.*);
|
||||
testing.expectEqual(@as(u32, 1), try ring.submit());
|
||||
|
||||
@ -1103,7 +1101,7 @@ test "openat" {
|
||||
if (cqe_openat.res == -linux.EBADF and (ring.features & linux.IORING_FEAT_RW_CUR_POS) == 0) {
|
||||
return error.SkipZigTest;
|
||||
}
|
||||
if (cqe_openat.res <= 0) std.debug.print("\ncqe_openat.res={}\n", .{ cqe_openat.res });
|
||||
if (cqe_openat.res <= 0) std.debug.print("\ncqe_openat.res={}\n", .{cqe_openat.res});
|
||||
testing.expect(cqe_openat.res > 0);
|
||||
testing.expectEqual(@as(u32, 0), cqe_openat.flags);
|
||||
|
||||
@ -1112,14 +1110,14 @@ test "openat" {
|
||||
|
||||
test "close" {
|
||||
if (builtin.os.tag != .linux) return error.SkipZigTest;
|
||||
|
||||
|
||||
var ring = IO_Uring.init(1, 0) catch |err| switch (err) {
|
||||
error.SystemOutdated => return error.SkipZigTest,
|
||||
error.PermissionDenied => return error.SkipZigTest,
|
||||
else => return err
|
||||
else => return err,
|
||||
};
|
||||
defer ring.deinit();
|
||||
|
||||
|
||||
const path = "test_io_uring_close";
|
||||
const file = try std.fs.cwd().createFile(path, .{});
|
||||
errdefer file.close();
|
||||
@ -1132,7 +1130,7 @@ test "close" {
|
||||
|
||||
const cqe_close = try ring.copy_cqe();
|
||||
if (cqe_close.res == -linux.EINVAL) return error.SkipZigTest;
|
||||
testing.expectEqual(linux.io_uring_cqe {
|
||||
testing.expectEqual(linux.io_uring_cqe{
|
||||
.user_data = 0x44444444,
|
||||
.res = 0,
|
||||
.flags = 0,
|
||||
@ -1145,7 +1143,7 @@ test "accept/connect/send/recv" {
|
||||
var ring = IO_Uring.init(16, 0) catch |err| switch (err) {
|
||||
error.SystemOutdated => return error.SkipZigTest,
|
||||
error.PermissionDenied => return error.SkipZigTest,
|
||||
else => return err
|
||||
else => return err,
|
||||
};
|
||||
defer ring.deinit();
|
||||
|
||||
@ -1157,8 +1155,8 @@ test "accept/connect/send/recv" {
|
||||
try os.bind(server, &address.any, address.getOsSockLen());
|
||||
try os.listen(server, kernel_backlog);
|
||||
|
||||
const buffer_send = [_]u8{ 1,0,1,0,1,0,1,0,1,0 };
|
||||
var buffer_recv = [_]u8{ 0,1,0,1,0 };
|
||||
const buffer_send = [_]u8{ 1, 0, 1, 0, 1, 0, 1, 0, 1, 0 };
|
||||
var buffer_recv = [_]u8{ 0, 1, 0, 1, 0 };
|
||||
|
||||
var accept_addr: os.sockaddr = undefined;
|
||||
var accept_addr_len: os.socklen_t = @sizeOf(@TypeOf(accept_addr));
|
||||
@ -1184,10 +1182,10 @@ test "accept/connect/send/recv" {
|
||||
}
|
||||
|
||||
testing.expectEqual(@as(u64, 0xaaaaaaaa), cqe_accept.user_data);
|
||||
if (cqe_accept.res <= 0) std.debug.print("\ncqe_accept.res={}\n", .{ cqe_accept.res });
|
||||
if (cqe_accept.res <= 0) std.debug.print("\ncqe_accept.res={}\n", .{cqe_accept.res});
|
||||
testing.expect(cqe_accept.res > 0);
|
||||
testing.expectEqual(@as(u32, 0), cqe_accept.flags);
|
||||
testing.expectEqual(linux.io_uring_cqe {
|
||||
testing.expectEqual(linux.io_uring_cqe{
|
||||
.user_data = 0xcccccccc,
|
||||
.res = 0,
|
||||
.flags = 0,
|
||||
@ -1200,7 +1198,7 @@ test "accept/connect/send/recv" {
|
||||
|
||||
const cqe_send = try ring.copy_cqe();
|
||||
if (cqe_send.res == -linux.EINVAL) return error.SkipZigTest;
|
||||
testing.expectEqual(linux.io_uring_cqe {
|
||||
testing.expectEqual(linux.io_uring_cqe{
|
||||
.user_data = 0xeeeeeeee,
|
||||
.res = buffer_send.len,
|
||||
.flags = 0,
|
||||
@ -1208,7 +1206,7 @@ test "accept/connect/send/recv" {
|
||||
|
||||
const cqe_recv = try ring.copy_cqe();
|
||||
if (cqe_recv.res == -linux.EINVAL) return error.SkipZigTest;
|
||||
testing.expectEqual(linux.io_uring_cqe {
|
||||
testing.expectEqual(linux.io_uring_cqe{
|
||||
.user_data = 0xffffffff,
|
||||
.res = buffer_recv.len,
|
||||
.flags = 0,
|
||||
|
@ -24,7 +24,7 @@ test "fallocate" {
|
||||
0 => {},
|
||||
linux.ENOSYS => return error.SkipZigTest,
|
||||
linux.EOPNOTSUPP => return error.SkipZigTest,
|
||||
else => |errno| std.debug.panic("unhandled errno: {}", .{ errno }),
|
||||
else => |errno| std.debug.panic("unhandled errno: {}", .{errno}),
|
||||
}
|
||||
|
||||
expect((try file.stat()).size == len);
|
||||
|
@ -570,13 +570,13 @@ pub fn SetCurrentDirectory(path_name: []const u16) SetCurrentDirectoryError!void
|
||||
const path_len_bytes = math.cast(u16, path_name.len * 2) catch |err| switch (err) {
|
||||
error.Overflow => return error.NameTooLong,
|
||||
};
|
||||
|
||||
|
||||
var nt_name = UNICODE_STRING{
|
||||
.Length = path_len_bytes,
|
||||
.MaximumLength = path_len_bytes,
|
||||
.Buffer = @intToPtr([*]u16, @ptrToInt(path_name.ptr)),
|
||||
};
|
||||
|
||||
|
||||
const rc = ntdll.RtlSetCurrentDirectory_U(&nt_name);
|
||||
switch (rc) {
|
||||
.SUCCESS => {},
|
||||
|
@ -112,6 +112,4 @@ pub extern "NtDll" fn NtWaitForKeyedEvent(
|
||||
Timeout: ?*LARGE_INTEGER,
|
||||
) callconv(WINAPI) NTSTATUS;
|
||||
|
||||
pub extern "NtDll" fn RtlSetCurrentDirectory_U(
|
||||
PathName: *UNICODE_STRING
|
||||
) callconv(WINAPI) NTSTATUS;
|
||||
pub extern "NtDll" fn RtlSetCurrentDirectory_U(PathName: *UNICODE_STRING) callconv(WINAPI) NTSTATUS;
|
||||
|
@ -468,7 +468,6 @@ test "std.PriorityQueue: update min heap" {
|
||||
expectEqual(@as(u32, 5), queue.remove());
|
||||
}
|
||||
|
||||
|
||||
test "std.PriorityQueue: update same min heap" {
|
||||
var queue = PQ.init(testing.allocator, lessThan);
|
||||
defer queue.deinit();
|
||||
@ -514,4 +513,4 @@ test "std.PriorityQueue: update same max heap" {
|
||||
expectEqual(@as(u32, 4), queue.remove());
|
||||
expectEqual(@as(u32, 2), queue.remove());
|
||||
expectEqual(@as(u32, 1), queue.remove());
|
||||
}
|
||||
}
|
||||
|
@ -212,7 +212,7 @@ pub const NativeTargetInfo = struct {
|
||||
const uts = std.os.uname();
|
||||
const release = mem.spanZ(&uts.release);
|
||||
// The release field sometimes has a weird format,
|
||||
// `Version.parse` will attempt to find some meaningful interpretation.
|
||||
// `Version.parse` will attempt to find some meaningful interpretation.
|
||||
if (std.builtin.Version.parse(release)) |ver| {
|
||||
os.version_range.linux.range.min = ver;
|
||||
os.version_range.linux.range.max = ver;
|
||||
@ -237,7 +237,7 @@ pub const NativeTargetInfo = struct {
|
||||
// `---` `` ``--> Sub-version (Starting from Windows 10 onwards)
|
||||
// \ `--> Service pack (Always zero in the constants defined)
|
||||
// `--> OS version (Major & minor)
|
||||
const os_ver: u16 = //
|
||||
const os_ver: u16 =
|
||||
@intCast(u16, version_info.dwMajorVersion & 0xff) << 8 |
|
||||
@intCast(u16, version_info.dwMinorVersion & 0xff);
|
||||
const sp_ver: u8 = 0;
|
||||
|
@ -572,11 +572,11 @@ pub const File = struct {
|
||||
|
||||
if (!base.options.disable_lld_caching) {
|
||||
Cache.writeSmallFile(directory.handle, id_symlink_basename, &digest) catch |err| {
|
||||
std.log.warn("failed to save archive hash digest file: {}", .{@errorName(err)});
|
||||
log.warn("failed to save archive hash digest file: {}", .{@errorName(err)});
|
||||
};
|
||||
|
||||
man.writeManifest() catch |err| {
|
||||
std.log.warn("failed to write cache manifest when archiving: {}", .{@errorName(err)});
|
||||
log.warn("failed to write cache manifest when archiving: {}", .{@errorName(err)});
|
||||
};
|
||||
|
||||
base.lock = man.toOwnedLock();
|
||||
|
@ -1205,7 +1205,7 @@ fn linkWithLLD(self: *Coff, comp: *Compilation) !void {
|
||||
}
|
||||
|
||||
if (stderr.len != 0) {
|
||||
std.log.warn("unexpected LLD stderr:\n{s}", .{stderr});
|
||||
log.warn("unexpected LLD stderr:\n{s}", .{stderr});
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1214,11 +1214,11 @@ fn linkWithLLD(self: *Coff, comp: *Compilation) !void {
|
||||
// Update the file with the digest. If it fails we can continue; it only
|
||||
// means that the next invocation will have an unnecessary cache miss.
|
||||
Cache.writeSmallFile(directory.handle, id_symlink_basename, &digest) catch |err| {
|
||||
std.log.warn("failed to save linking hash digest file: {}", .{@errorName(err)});
|
||||
log.warn("failed to save linking hash digest file: {}", .{@errorName(err)});
|
||||
};
|
||||
// Again failure here only means an unnecessary cache miss.
|
||||
man.writeManifest() catch |err| {
|
||||
std.log.warn("failed to write cache manifest when linking: {}", .{@errorName(err)});
|
||||
log.warn("failed to write cache manifest when linking: {}", .{@errorName(err)});
|
||||
};
|
||||
// We hang on to this lock so that the output file path can be used without
|
||||
// other processes clobbering it.
|
||||
|
@ -1684,7 +1684,7 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void {
|
||||
}
|
||||
|
||||
if (stderr.len != 0) {
|
||||
std.log.warn("unexpected LLD stderr:\n{s}", .{stderr});
|
||||
log.warn("unexpected LLD stderr:\n{s}", .{stderr});
|
||||
}
|
||||
}
|
||||
|
||||
@ -1692,11 +1692,11 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void {
|
||||
// Update the file with the digest. If it fails we can continue; it only
|
||||
// means that the next invocation will have an unnecessary cache miss.
|
||||
Cache.writeSmallFile(directory.handle, id_symlink_basename, &digest) catch |err| {
|
||||
std.log.warn("failed to save linking hash digest file: {}", .{@errorName(err)});
|
||||
log.warn("failed to save linking hash digest file: {}", .{@errorName(err)});
|
||||
};
|
||||
// Again failure here only means an unnecessary cache miss.
|
||||
man.writeManifest() catch |err| {
|
||||
std.log.warn("failed to write cache manifest when linking: {}", .{@errorName(err)});
|
||||
log.warn("failed to write cache manifest when linking: {}", .{@errorName(err)});
|
||||
};
|
||||
// We hang on to this lock so that the output file path can be used without
|
||||
// other processes clobbering it.
|
||||
|
@ -673,15 +673,15 @@ fn linkWithLLD(self: *MachO, comp: *Compilation) !void {
|
||||
self.base.allocator.free(result.stderr);
|
||||
}
|
||||
if (result.stdout.len != 0) {
|
||||
std.log.warn("unexpected LD stdout: {}", .{result.stdout});
|
||||
log.warn("unexpected LD stdout: {}", .{result.stdout});
|
||||
}
|
||||
if (result.stderr.len != 0) {
|
||||
std.log.warn("unexpected LD stderr: {}", .{result.stderr});
|
||||
log.warn("unexpected LD stderr: {}", .{result.stderr});
|
||||
}
|
||||
if (result.term != .Exited or result.term.Exited != 0) {
|
||||
// TODO parse this output and surface with the Compilation API rather than
|
||||
// directly outputting to stderr here.
|
||||
std.log.err("{}", .{result.stderr});
|
||||
log.err("{}", .{result.stderr});
|
||||
return error.LDReportedFailure;
|
||||
}
|
||||
} else {
|
||||
@ -738,7 +738,7 @@ fn linkWithLLD(self: *MachO, comp: *Compilation) !void {
|
||||
}
|
||||
|
||||
if (stderr.len != 0) {
|
||||
std.log.warn("unexpected LLD stderr:\n{s}", .{stderr});
|
||||
log.warn("unexpected LLD stderr:\n{s}", .{stderr});
|
||||
}
|
||||
}
|
||||
|
||||
@ -757,10 +757,10 @@ fn linkWithLLD(self: *MachO, comp: *Compilation) !void {
|
||||
// TODO We are in the position to be able to increase the padding by moving all sections
|
||||
// by the required offset, but this requires a little bit more thinking and bookkeeping.
|
||||
// For now, return an error informing the user of the problem.
|
||||
std.log.err("Not enough padding between load commands and start of __text section:\n", .{});
|
||||
std.log.err("Offset after last load command: 0x{x}\n", .{after_last_cmd_offset});
|
||||
std.log.err("Beginning of __text section: 0x{x}\n", .{text_section.offset});
|
||||
std.log.err("Needed size: 0x{x}\n", .{needed_size});
|
||||
log.err("Not enough padding between load commands and start of __text section:\n", .{});
|
||||
log.err("Offset after last load command: 0x{x}\n", .{after_last_cmd_offset});
|
||||
log.err("Beginning of __text section: 0x{x}\n", .{text_section.offset});
|
||||
log.err("Needed size: 0x{x}\n", .{needed_size});
|
||||
return error.NotEnoughPadding;
|
||||
}
|
||||
const linkedit_segment = self.load_commands.items[self.linkedit_segment_cmd_index.?].Segment;
|
||||
@ -792,11 +792,11 @@ fn linkWithLLD(self: *MachO, comp: *Compilation) !void {
|
||||
// Update the file with the digest. If it fails we can continue; it only
|
||||
// means that the next invocation will have an unnecessary cache miss.
|
||||
Cache.writeSmallFile(directory.handle, id_symlink_basename, &digest) catch |err| {
|
||||
std.log.warn("failed to save linking hash digest file: {}", .{@errorName(err)});
|
||||
log.warn("failed to save linking hash digest file: {}", .{@errorName(err)});
|
||||
};
|
||||
// Again failure here only means an unnecessary cache miss.
|
||||
man.writeManifest() catch |err| {
|
||||
std.log.warn("failed to write cache manifest when linking: {}", .{@errorName(err)});
|
||||
log.warn("failed to write cache manifest when linking: {}", .{@errorName(err)});
|
||||
};
|
||||
// We hang on to this lock so that the output file path can be used without
|
||||
// other processes clobbering it.
|
||||
|
@ -455,7 +455,7 @@ fn linkWithLLD(self: *Wasm, comp: *Compilation) !void {
|
||||
}
|
||||
|
||||
if (stderr.len != 0) {
|
||||
std.log.warn("unexpected LLD stderr:\n{s}", .{stderr});
|
||||
log.warn("unexpected LLD stderr:\n{s}", .{stderr});
|
||||
}
|
||||
}
|
||||
|
||||
@ -463,11 +463,11 @@ fn linkWithLLD(self: *Wasm, comp: *Compilation) !void {
|
||||
// Update the file with the digest. If it fails we can continue; it only
|
||||
// means that the next invocation will have an unnecessary cache miss.
|
||||
Cache.writeSmallFile(directory.handle, id_symlink_basename, &digest) catch |err| {
|
||||
std.log.warn("failed to save linking hash digest symlink: {}", .{@errorName(err)});
|
||||
log.warn("failed to save linking hash digest symlink: {}", .{@errorName(err)});
|
||||
};
|
||||
// Again failure here only means an unnecessary cache miss.
|
||||
man.writeManifest() catch |err| {
|
||||
std.log.warn("failed to write cache manifest when linking: {}", .{@errorName(err)});
|
||||
log.warn("failed to write cache manifest when linking: {}", .{@errorName(err)});
|
||||
};
|
||||
// We hang on to this lock so that the output file path can be used without
|
||||
// other processes clobbering it.
|
||||
|
@ -3936,12 +3936,6 @@ void update_compile_var(CodeGen *g, Buf *name, ZigValue *value) {
|
||||
|
||||
void scan_decls(CodeGen *g, ScopeDecls *decls_scope, AstNode *node) {
|
||||
switch (node->type) {
|
||||
case NodeTypeContainerDecl:
|
||||
for (size_t i = 0; i < node->data.container_decl.decls.length; i += 1) {
|
||||
AstNode *child = node->data.container_decl.decls.at(i);
|
||||
scan_decls(g, decls_scope, child);
|
||||
}
|
||||
break;
|
||||
case NodeTypeFnDef:
|
||||
scan_decls(g, decls_scope, node->data.fn_def.fn_proto);
|
||||
break;
|
||||
@ -3986,6 +3980,7 @@ void scan_decls(CodeGen *g, ScopeDecls *decls_scope, AstNode *node) {
|
||||
case NodeTypeCompTime:
|
||||
preview_comptime_decl(g, node, decls_scope);
|
||||
break;
|
||||
case NodeTypeContainerDecl:
|
||||
case NodeTypeNoSuspend:
|
||||
case NodeTypeParamDecl:
|
||||
case NodeTypeReturnExpr:
|
||||
|
@ -25310,24 +25310,6 @@ static Error ir_make_type_info_decls(IrAnalyze *ira, IrInst* source_instr, ZigVa
|
||||
}
|
||||
|
||||
inner_fields[2]->data.x_union.payload = fn_decl_val;
|
||||
break;
|
||||
}
|
||||
case TldIdContainer:
|
||||
{
|
||||
ZigType *type_entry = ((TldContainer *)curr_entry->value)->type_entry;
|
||||
if ((err = type_resolve(ira->codegen, type_entry, ResolveStatusSizeKnown)))
|
||||
return ErrorSemanticAnalyzeFail;
|
||||
|
||||
// This is a type.
|
||||
bigint_init_unsigned(&inner_fields[2]->data.x_union.tag, 0);
|
||||
|
||||
ZigValue *payload = ira->codegen->pass1_arena->create<ZigValue>();
|
||||
payload->special = ConstValSpecialStatic;
|
||||
payload->type = ira->codegen->builtin_types.entry_type;
|
||||
payload->data.x_type = type_entry;
|
||||
|
||||
inner_fields[2]->data.x_union.payload = payload;
|
||||
|
||||
break;
|
||||
}
|
||||
default:
|
||||
|
@ -550,8 +550,8 @@ test "vector reduce operation" {
|
||||
// LLVM 11 ERROR: Cannot select type
|
||||
// https://github.com/ziglang/zig/issues/7138
|
||||
if (std.builtin.arch != .aarch64) {
|
||||
doTheTestReduce(.Min, [4]i64{ 1234567, -386, 0, 3 }, @as(i64, -386));
|
||||
doTheTestReduce(.Min, [4]u64{ 99, 9999, 9, 99999 }, @as(u64, 9));
|
||||
doTheTestReduce(.Min, [4]i64{ 1234567, -386, 0, 3 }, @as(i64, -386));
|
||||
doTheTestReduce(.Min, [4]u64{ 99, 9999, 9, 99999 }, @as(u64, 9));
|
||||
}
|
||||
|
||||
doTheTestReduce(.Min, [4]i128{ 1234567, -386, 0, 3 }, @as(i128, -386));
|
||||
@ -568,8 +568,8 @@ test "vector reduce operation" {
|
||||
// LLVM 11 ERROR: Cannot select type
|
||||
// https://github.com/ziglang/zig/issues/7138
|
||||
if (std.builtin.arch != .aarch64) {
|
||||
doTheTestReduce(.Max, [4]i64{ 1234567, -386, 0, 3 }, @as(i64, 1234567));
|
||||
doTheTestReduce(.Max, [4]u64{ 99, 9999, 9, 99999 }, @as(u64, 99999));
|
||||
doTheTestReduce(.Max, [4]i64{ 1234567, -386, 0, 3 }, @as(i64, 1234567));
|
||||
doTheTestReduce(.Max, [4]u64{ 99, 9999, 9, 99999 }, @as(u64, 99999));
|
||||
}
|
||||
|
||||
doTheTestReduce(.Max, [4]i128{ 1234567, -386, 0, 3 }, @as(i128, 1234567));
|
||||
|
Loading…
Reference in New Issue
Block a user