fixups and zig fmt

This commit is contained in:
Andrew Kelley 2019-11-08 01:52:23 -05:00
parent fbbcf2f30d
commit 2723ffc2b2
No known key found for this signature in database
GPG Key ID: 7C5F548F728501A9
23 changed files with 309 additions and 279 deletions

View File

@ -8,9 +8,16 @@ pub usingnamespace switch (builtin.os) {
.linux => @import("c/linux.zig"),
.windows => @import("c/windows.zig"),
.macosx, .ios, .tvos, .watchos => @import("c/darwin.zig"),
.freebsd => @import("c/freebsd.zig"),
.freebsd, .kfreebsd => @import("c/freebsd.zig"),
.netbsd => @import("c/netbsd.zig"),
.dragonfly => @import("c/dragonfly.zig"),
.openbsd => @import("c/openbsd.zig"),
.haiku => @import("c/haiku.zig"),
.hermit => @import("c/hermit.zig"),
.solaris => @import("c/solaris.zig"),
.fuchsia => @import("c/fuchsia.zig"),
.minix => @import("c/minix.zig"),
.emscripten => @import("c/emscripten.zig"),
else => struct {},
};
@ -205,3 +212,16 @@ pub extern "c" fn dn_expand(
) c_int;
pub extern "c" fn sched_yield() c_int;
pub const PTHREAD_MUTEX_INITIALIZER = pthread_mutex_t{};
pub extern "c" fn pthread_mutex_lock(mutex: *pthread_mutex_t) c_int;
pub extern "c" fn pthread_mutex_unlock(mutex: *pthread_mutex_t) c_int;
pub extern "c" fn pthread_mutex_destroy(mutex: *pthread_mutex_t) c_int;
pub const PTHREAD_COND_INITIALIZER = pthread_cond_t{};
pub extern "c" fn pthread_cond_wait(noalias cond: *pthread_cond_t, noalias mutex: *pthread_mutex_t) c_int;
pub extern "c" fn pthread_cond_signal(cond: *pthread_cond_t) c_int;
pub extern "c" fn pthread_cond_destroy(cond: *pthread_cond_t) c_int;
pub const pthread_t = *@OpaqueType();
pub const FILE = @OpaqueType();

View File

@ -112,3 +112,19 @@ pub const EAI_PROTOCOL = 13;
/// argument buffer overflow
pub const EAI_OVERFLOW = 14;
pub const EAI_MAX = 15;
pub const pthread_mutex_t = extern struct {
__sig: c_long = 0x32AAABA7,
__opaque: [__PTHREAD_MUTEX_SIZE__]u8 = [_]u8{0} ** __PTHREAD_MUTEX_SIZE__,
};
pub const pthread_cond_t = extern struct {
__sig: c_long = 0x3CB0B1BB,
__opaque: [__PTHREAD_COND_SIZE__]u8 = [_]u8{0} ** __PTHREAD_COND_SIZE__,
};
const __PTHREAD_MUTEX_SIZE__ = if (@sizeOf(usize) == 8) 56 else 40;
const __PTHREAD_COND_SIZE__ = if (@sizeOf(usize) == 8) 40 else 24;
pub const pthread_attr_t = extern struct {
__sig: c_long,
__opaque: [56]u8,
};

View File

@ -1,6 +1,5 @@
const std = @import("../std.zig");
usingnamespace std.c;
extern "c" threadlocal var errno: c_int;
pub fn _errno() *c_int {
return &errno;
@ -12,3 +11,15 @@ pub extern "c" fn getrandom(buf_ptr: [*]u8, buf_len: usize, flags: c_uint) isize
pub const dl_iterate_phdr_callback = extern fn (info: *dl_phdr_info, size: usize, data: ?*c_void) c_int;
pub extern "c" fn dl_iterate_phdr(callback: dl_iterate_phdr_callback, data: ?*c_void) c_int;
pub const pthread_mutex_t = extern struct {
inner: ?*c_void = null,
};
pub const pthread_cond_t = extern struct {
inner: ?*c_void = null,
};
pub const pthread_attr_t = extern struct { // copied from freebsd
__size: [56]u8,
__align: c_long,
};

8
lib/std/c/emscripten.zig Normal file
View File

@ -0,0 +1,8 @@
pub const pthread_mutex_t = extern struct {
size: [__SIZEOF_PTHREAD_MUTEX_T]u8 align(4) = [_]u8{0} ** __SIZEOF_PTHREAD_MUTEX_T,
};
pub const pthread_cond_t = extern struct {
size: [__SIZEOF_PTHREAD_COND_T]u8 align(@alignOf(usize)) = [_]u8{0} ** __SIZEOF_PTHREAD_COND_T,
};
const __SIZEOF_PTHREAD_COND_T = 48;
const __SIZEOF_PTHREAD_MUTEX_T = 28;

View File

@ -10,3 +10,15 @@ pub extern "c" fn getrandom(buf_ptr: [*]u8, buf_len: usize, flags: c_uint) isize
pub const dl_iterate_phdr_callback = extern fn (info: *dl_phdr_info, size: usize, data: ?*c_void) c_int;
pub extern "c" fn dl_iterate_phdr(callback: dl_iterate_phdr_callback, data: ?*c_void) c_int;
pub const pthread_mutex_t = extern struct {
inner: ?*c_void = null,
};
pub const pthread_cond_t = extern struct {
inner: ?*c_void = null,
};
pub const pthread_attr_t = extern struct {
__size: [56]u8,
__align: c_long,
};

8
lib/std/c/fuchsia.zig Normal file
View File

@ -0,0 +1,8 @@
pub const pthread_mutex_t = extern struct {
size: [__SIZEOF_PTHREAD_MUTEX_T]u8 align(@alignOf(usize)) = [_]u8{0} ** __SIZEOF_PTHREAD_MUTEX_T,
};
pub const pthread_cond_t = extern struct {
size: [__SIZEOF_PTHREAD_COND_T]u8 align(@alignOf(usize)) = [_]u8{0} ** __SIZEOF_PTHREAD_COND_T,
};
const __SIZEOF_PTHREAD_COND_T = 48;
const __SIZEOF_PTHREAD_MUTEX_T = 40;

14
lib/std/c/haiku.zig Normal file
View File

@ -0,0 +1,14 @@
pub const pthread_mutex_t = extern struct {
flags: u32 = 0,
lock: i32 = 0,
unused: i32 = -42,
owner: i32 = -1,
owner_count: i32 = 0,
};
pub const pthread_cond_t = extern struct {
flags: u32 = 0,
unused: i32 = -42,
mutex: ?*c_void = null,
waiter_count: i32 = 0,
lock: i32 = 0,
};

6
lib/std/c/hermit.zig Normal file
View File

@ -0,0 +1,6 @@
pub const pthread_mutex_t = extern struct {
inner: usize = ~usize(0),
};
pub const pthread_cond_t = extern struct {
inner: usize = ~usize(0),
};

View File

@ -75,3 +75,26 @@ pub const dl_iterate_phdr_callback = extern fn (info: *dl_phdr_info, size: usize
pub extern "c" fn dl_iterate_phdr(callback: dl_iterate_phdr_callback, data: ?*c_void) c_int;
pub extern "c" fn sigaltstack(ss: ?*stack_t, old_ss: ?*stack_t) c_int;
pub const pthread_attr_t = extern struct {
__size: [56]u8,
__align: c_long,
};
pub const pthread_mutex_t = extern struct {
size: [__SIZEOF_PTHREAD_MUTEX_T]u8 align(@alignOf(usize)) = [_]u8{0} ** __SIZEOF_PTHREAD_MUTEX_T,
};
pub const pthread_cond_t = extern struct {
size: [__SIZEOF_PTHREAD_COND_T]u8 align(@alignOf(usize)) = [_]u8{0} ** __SIZEOF_PTHREAD_COND_T,
};
const __SIZEOF_PTHREAD_COND_T = 48;
const __SIZEOF_PTHREAD_MUTEX_T = if (builtin.os == .fuchsia) 40 else switch (builtin.abi) {
.musl, .musleabi, .musleabihf => if (@sizeOf(usize) == 8) 40 else 24,
.gnu, .gnuabin32, .gnuabi64, .gnueabi, .gnueabihf, .gnux32 => switch (builtin.arch) {
.aarch64 => 48,
.x86_64 => if (builtin.abi == .gnux32) 40 else 32,
.mips64, .powerpc64, .powerpc64le, .sparcv9 => 40,
else => if (@sizeOf(usize) == 8) 40 else 24,
},
else => unreachable,
};

18
lib/std/c/minix.zig Normal file
View File

@ -0,0 +1,18 @@
const builtin = @import("builtin");
pub const pthread_mutex_t = extern struct {
size: [__SIZEOF_PTHREAD_MUTEX_T]u8 align(@alignOf(usize)) = [_]u8{0} ** __SIZEOF_PTHREAD_MUTEX_T,
};
pub const pthread_cond_t = extern struct {
size: [__SIZEOF_PTHREAD_COND_T]u8 align(@alignOf(usize)) = [_]u8{0} ** __SIZEOF_PTHREAD_COND_T,
};
const __SIZEOF_PTHREAD_COND_T = 48;
const __SIZEOF_PTHREAD_MUTEX_T = switch (builtin.abi) {
.musl, .musleabi, .musleabihf => if (@sizeOf(usize) == 8) 40 else 24,
.gnu, .gnuabin32, .gnuabi64, .gnueabi, .gnueabihf, .gnux32 => switch (builtin.arch) {
.aarch64 => 48,
.x86_64 => if (builtin.abi == .gnux32) 40 else 32,
.mips64, .powerpc64, .powerpc64le, .sparcv9 => 40,
else => if (@sizeOf(usize) == 8) 40 else 24,
},
else => unreachable,
};

View File

@ -6,3 +6,32 @@ pub const _errno = __errno;
pub extern "c" fn getdents(fd: c_int, buf_ptr: [*]u8, nbytes: usize) usize;
pub extern "c" fn sigaltstack(ss: ?*stack_t, old_ss: ?*stack_t) c_int;
pub const pthread_mutex_t = extern struct {
ptm_magic: c_uint = 0x33330003,
ptm_errorcheck: padded_spin_t = 0,
ptm_unused: padded_spin_t = 0,
ptm_owner: usize = 0,
ptm_waiters: ?*u8 = null,
ptm_recursed: c_uint = 0,
ptm_spare2: ?*c_void = null,
};
pub const pthread_cond_t = extern struct {
ptc_magic: c_uint = 0x55550005,
ptc_lock: pthread_spin_t = 0,
ptc_waiters_first: ?*u8 = null,
ptc_waiters_last: ?*u8 = null,
ptc_mutex: ?*pthread_mutex_t = null,
ptc_private: ?*c_void = null,
};
const pthread_spin_t = if (builtin.arch == .arm or .arch == .powerpc) c_int else u8;
const padded_spin_t = switch (builtin.arch) {
.sparc, .sparcel, .sparcv9, .i386, .x86_64, .le64 => u32,
else => spin_t,
};
pub const pthread_attr_t = extern struct {
pta_magic: u32,
pta_flags: c_int,
pta_private: *c_void,
};

6
lib/std/c/openbsd.zig Normal file
View File

@ -0,0 +1,6 @@
pub const pthread_mutex_t = extern struct {
inner: ?*c_void = null,
};
pub const pthread_cond_t = extern struct {
inner: ?*c_void = null,
};

15
lib/std/c/solaris.zig Normal file
View File

@ -0,0 +1,15 @@
pub const pthread_mutex_t = extern struct {
__pthread_mutex_flag1: u16 = 0,
__pthread_mutex_flag2: u8 = 0,
__pthread_mutex_ceiling: u8 = 0,
__pthread_mutex_type: u16 = 0,
__pthread_mutex_magic: u16 = 0x4d58,
__pthread_mutex_lock: u64 = 0,
__pthread_mutex_data: u64 = 0,
};
pub const pthread_cond_t = extern struct {
__pthread_cond_flag: u32 = 0,
__pthread_cond_type: u16 = 0,
__pthread_cond_magic: u16 = 0x4356,
__pthread_cond_data: u64 = 0,
};

View File

@ -37,81 +37,84 @@ pub const Mutex = if (builtin.single_threaded)
return Held{ .mutex = self };
}
}
else struct {
state: u32, // TODO: make this an enum
parker: ThreadParker,
else
struct {
state: u32, // TODO: make this an enum
parker: ThreadParker,
const Unlocked = 0;
const Sleeping = 1;
const Locked = 2;
const Unlocked = 0;
const Sleeping = 1;
const Locked = 2;
/// number of iterations to spin yielding the cpu
const SPIN_CPU = 4;
/// number of iterations to perform in the cpu yield loop
const SPIN_CPU_COUNT = 30;
/// number of iterations to spin yielding the thread
const SPIN_THREAD = 1;
/// number of iterations to spin yielding the cpu
const SPIN_CPU = 4;
pub fn init() Mutex {
return Mutex{
.state = Unlocked,
.parker = ThreadParker.init(),
/// number of iterations to perform in the cpu yield loop
const SPIN_CPU_COUNT = 30;
/// number of iterations to spin yielding the thread
const SPIN_THREAD = 1;
pub fn init() Mutex {
return Mutex{
.state = Unlocked,
.parker = ThreadParker.init(),
};
}
pub fn deinit(self: *Mutex) void {
self.parker.deinit();
}
pub const Held = struct {
mutex: *Mutex,
pub fn release(self: Held) void {
switch (@atomicRmw(u32, &self.mutex.state, .Xchg, Unlocked, .Release)) {
Locked => {},
Sleeping => self.mutex.parker.unpark(&self.mutex.state),
Unlocked => unreachable, // unlocking an unlocked mutex
else => unreachable, // should never be anything else
}
}
};
}
pub fn deinit(self: *Mutex) void {
self.parker.deinit();
}
pub fn acquire(self: *Mutex) Held {
// Try and speculatively grab the lock.
// If it fails, the state is either Locked or Sleeping
// depending on if theres a thread stuck sleeping below.
var state = @atomicRmw(u32, &self.state, .Xchg, Locked, .Acquire);
if (state == Unlocked)
return Held{ .mutex = self };
pub const Held = struct {
mutex: *Mutex,
while (true) {
// try and acquire the lock using cpu spinning on failure
var spin: usize = 0;
while (spin < SPIN_CPU) : (spin += 1) {
var value = @atomicLoad(u32, &self.state, .Monotonic);
while (value == Unlocked)
value = @cmpxchgWeak(u32, &self.state, Unlocked, state, .Acquire, .Monotonic) orelse return Held{ .mutex = self };
SpinLock.yield(SPIN_CPU_COUNT);
}
pub fn release(self: Held) void {
switch (@atomicRmw(u32, &self.mutex.state, .Xchg, Unlocked, .Release)) {
Locked => {},
Sleeping => self.mutex.parker.unpark(&self.mutex.state),
Unlocked => unreachable, // unlocking an unlocked mutex
else => unreachable, // should never be anything else
// try and acquire the lock using thread rescheduling on failure
spin = 0;
while (spin < SPIN_THREAD) : (spin += 1) {
var value = @atomicLoad(u32, &self.state, .Monotonic);
while (value == Unlocked)
value = @cmpxchgWeak(u32, &self.state, Unlocked, state, .Acquire, .Monotonic) orelse return Held{ .mutex = self };
std.os.sched_yield() catch std.time.sleep(1);
}
// failed to acquire the lock, go to sleep until woken up by `Held.release()`
if (@atomicRmw(u32, &self.state, .Xchg, Sleeping, .Acquire) == Unlocked)
return Held{ .mutex = self };
state = Sleeping;
self.parker.park(&self.state, Sleeping);
}
}
};
pub fn acquire(self: *Mutex) Held {
// Try and speculatively grab the lock.
// If it fails, the state is either Locked or Sleeping
// depending on if theres a thread stuck sleeping below.
var state = @atomicRmw(u32, &self.state, .Xchg, Locked, .Acquire);
if (state == Unlocked)
return Held{ .mutex = self };
while (true) {
// try and acquire the lock using cpu spinning on failure
var spin: usize = 0;
while (spin < SPIN_CPU) : (spin += 1) {
var value = @atomicLoad(u32, &self.state, .Monotonic);
while (value == Unlocked)
value = @cmpxchgWeak(u32, &self.state, Unlocked, state, .Acquire, .Monotonic) orelse return Held{ .mutex = self };
SpinLock.yield(SPIN_CPU_COUNT);
}
// try and acquire the lock using thread rescheduling on failure
spin = 0;
while (spin < SPIN_THREAD) : (spin += 1) {
var value = @atomicLoad(u32, &self.state, .Monotonic);
while (value == Unlocked)
value = @cmpxchgWeak(u32, &self.state, Unlocked, state, .Acquire, .Monotonic) orelse return Held{ .mutex = self };
std.os.sched_yield();
}
// failed to acquire the lock, go to sleep until woken up by `Held.release()`
if (@atomicRmw(u32, &self.state, .Xchg, Sleeping, .Acquire) == Unlocked)
return Held{ .mutex = self };
state = Sleeping;
self.parker.park(&self.state, Sleeping);
}
}
};
const TestContext = struct {
mutex: *Mutex,
data: i128,

View File

@ -3172,9 +3172,21 @@ pub fn dn_expand(
return error.InvalidDnsPacket;
}
pub fn sched_yield() void {
switch (builtin.os) {
.windows => _ = windows.kernel32.SwitchToThread(),
else => assert(system.sched_yield() == 0),
pub const SchedYieldError = error{
/// The system is not configured to allow yielding
SystemCannotYield,
};
pub fn sched_yield() SchedYieldError!void {
if (builtin.os == .windows) {
// The return value has to do with how many other threads there are; it is not
// an error condition on Windows.
_ = windows.kernel32.SwitchToThread();
return;
}
switch (errno(system.sched_yield())) {
0 => return,
ENOSYS => return error.SystemCannotYield,
else => return error.SystemCannotYield,
}
}

View File

@ -14,9 +14,6 @@ pub usingnamespace switch (builtin.os) {
else => struct {},
};
pub const pthread_t = *@OpaqueType();
pub const FILE = @OpaqueType();
pub const iovec = extern struct {
iov_base: [*]u8,
iov_len: usize,

View File

@ -133,11 +133,6 @@ pub const dirent = extern struct {
}
};
pub const pthread_attr_t = extern struct {
__sig: c_long,
__opaque: [56]u8,
};
/// Renamed from `kevent` to `Kevent` to avoid conflict with function name.
pub const Kevent = extern struct {
ident: usize,

View File

@ -360,11 +360,6 @@ pub const Kevent = extern struct {
udata: usize,
};
pub const pthread_attr_t = extern struct { // copied from freebsd
__size: [56]u8,
__align: c_long,
};
pub const EVFILT_FS = -10;
pub const EVFILT_USER = -9;
pub const EVFILT_EXCEPT = -8;
@ -515,13 +510,13 @@ pub const sigset_t = extern struct {
pub const sig_atomic_t = c_int;
pub const Sigaction = extern struct {
__sigaction_u: extern union {
__sa_handler: ?extern fn(c_int) void,
__sa_sigaction: ?extern fn(c_int, [*c]siginfo_t, ?*c_void) void,
__sa_handler: ?extern fn (c_int) void,
__sa_sigaction: ?extern fn (c_int, [*c]siginfo_t, ?*c_void) void,
},
sa_flags: c_int,
sa_mask: sigset_t,
};
pub const sig_t = [*c]extern fn(c_int) void;
pub const sig_t = [*c]extern fn (c_int) void;
pub const sigvec = extern struct {
sv_handler: [*c]__sighandler_t,

View File

@ -15,11 +15,6 @@ pub const Kevent = extern struct {
// TODO ext
};
pub const pthread_attr_t = extern struct {
__size: [56]u8,
__align: c_long,
};
pub const dl_phdr_info = extern struct {
dlpi_addr: usize,
dlpi_name: ?[*]const u8,

View File

@ -1000,11 +1000,6 @@ pub const dl_phdr_info = extern struct {
dlpi_phnum: u16,
};
pub const pthread_attr_t = extern struct {
__size: [56]u8,
__align: c_long,
};
pub const CPU_SETSIZE = 128;
pub const cpu_set_t = [CPU_SETSIZE / @sizeOf(usize)]usize;
pub const cpu_count_t = @IntType(false, std.math.log2(CPU_SETSIZE * 8));

View File

@ -14,12 +14,6 @@ pub const Kevent = extern struct {
udata: usize,
};
pub const pthread_attr_t = extern struct {
pta_magic: u32,
pta_flags: c_int,
pta_private: *c_void,
};
pub const dl_phdr_info = extern struct {
dlpi_addr: usize,
dlpi_name: ?[*]const u8,

View File

@ -8,24 +8,9 @@ const linux = std.os.linux;
const windows = std.os.windows;
pub const ThreadParker = switch (builtin.os) {
.macosx,
.tvos,
.ios,
.watchos,
.netbsd,
.openbsd,
.freebsd,
.kfreebsd,
.dragonfly,
.haiku,
.hermit,
.solaris,
.minix,
.fuchsia,
.emscripten => if (builtin.link_libc) PosixParker else SpinParker,
.linux => if (builtin.link_libc) PosixParker else LinuxParker,
.windows => WindowsParker,
else => SpinParker,
else => if (builtin.link_libc) PosixParker else SpinParker,
};
const SpinParker = struct {
@ -62,7 +47,7 @@ const LinuxParker = struct {
0, linux.EAGAIN => return,
linux.EINTR => continue,
linux.EINVAL => unreachable,
else => unreachable,
else => continue,
}
}
}
@ -94,7 +79,7 @@ const WindowsParker = struct {
var spin = SpinLock.Backoff.init();
const ev_handle = getEventHandle();
const key = @ptrCast(*const c_void, ptr);
while (@atomicLoad(u32, ptr, .Monotonic) == expected) {
if (ev_handle) |handle| {
_ = @atomicRmw(u32, &self.waiters, .Add, 1, .Release);
@ -121,191 +106,64 @@ const WindowsParker = struct {
};
const PosixParker = struct {
cond: pthread_cond_t,
mutex: pthread_mutex_t,
cond: c.pthread_cond_t,
mutex: c.pthread_mutex_t,
const c = std.c;
pub fn init() PosixParker {
return PosixParker{
.cond = PTHREAD_COND_INITIALIZER,
.mutex = PTHREAD_MUTEX_INITIALIZER,
.cond = c.PTHREAD_COND_INITIALIZER,
.mutex = c.PTHREAD_MUTEX_INITIALIZER,
};
}
pub fn deinit(self: *PosixParker) void {
// On dragonfly, the destroy functions return EINVAL if they were initialized statically.
const retm = pthread_mutex_destroy(&self.mutex);
const retm = c.pthread_mutex_destroy(&self.mutex);
assert(retm == 0 or retm == (if (builtin.os == .dragonfly) os.EINVAL else 0));
const retc = pthread_cond_destroy(&self.cond);
const retc = c.pthread_cond_destroy(&self.cond);
assert(retc == 0 or retc == (if (builtin.os == .dragonfly) os.EINVAL else 0));
}
pub fn unpark(self: *PosixParker, ptr: *const u32) void {
assert(pthread_mutex_lock(&self.mutex) == 0);
defer assert(pthread_mutex_unlock(&self.mutex) == 0);
assert(pthread_cond_signal(&self.cond) == 0);
assert(c.pthread_mutex_lock(&self.mutex) == 0);
defer assert(c.pthread_mutex_unlock(&self.mutex) == 0);
assert(c.pthread_cond_signal(&self.cond) == 0);
}
pub fn park(self: *PosixParker, ptr: *const u32, expected: u32) void {
assert(pthread_mutex_lock(&self.mutex) == 0);
defer assert(pthread_mutex_unlock(&self.mutex) == 0);
assert(c.pthread_mutex_lock(&self.mutex) == 0);
defer assert(c.pthread_mutex_unlock(&self.mutex) == 0);
while (@atomicLoad(u32, ptr, .Acquire) == expected)
assert(pthread_cond_wait(&self.cond, &self.mutex) == 0);
assert(c.pthread_cond_wait(&self.cond, &self.mutex) == 0);
}
const PTHREAD_MUTEX_INITIALIZER = pthread_mutex_t{};
extern "c" fn pthread_mutex_lock(mutex: *pthread_mutex_t) c_int;
extern "c" fn pthread_mutex_unlock(mutex: *pthread_mutex_t) c_int;
extern "c" fn pthread_mutex_destroy(mutex: *pthread_mutex_t) c_int;
const PTHREAD_COND_INITIALIZER = pthread_cond_t{};
extern "c" fn pthread_cond_wait(noalias cond: *pthread_cond_t, noalias mutex: *pthread_mutex_t) c_int;
extern "c" fn pthread_cond_signal(cond: *pthread_cond_t) c_int;
extern "c" fn pthread_cond_destroy(cond: *pthread_cond_t) c_int;
// https://github.com/rust-lang/libc
usingnamespace switch (builtin.os) {
.macosx, .tvos, .ios, .watchos => struct {
pub const pthread_mutex_t = extern struct {
__sig: c_long = 0x32AAABA7,
__opaque: [__PTHREAD_MUTEX_SIZE__]u8 = [_]u8{0} ** __PTHREAD_MUTEX_SIZE__,
};
pub const pthread_cond_t = extern struct {
__sig: c_long = 0x3CB0B1BB,
__opaque: [__PTHREAD_COND_SIZE__]u8 = [_]u8{0} ** __PTHREAD_COND_SIZE__,
};
const __PTHREAD_MUTEX_SIZE__ = if (@sizeOf(usize) == 8) 56 else 40;
const __PTHREAD_COND_SIZE__ = if (@sizeOf(usize) == 8) 40 else 24;
},
.netbsd => struct {
pub const pthread_mutex_t = extern struct {
ptm_magic: c_uint = 0x33330003,
ptm_errorcheck: padded_spin_t = 0,
ptm_unused: padded_spin_t = 0,
ptm_owner: usize = 0,
ptm_waiters: ?*u8 = null,
ptm_recursed: c_uint = 0,
ptm_spare2: ?*c_void = null,
};
pub const pthread_cond_t = extern struct {
ptc_magic: c_uint = 0x55550005,
ptc_lock: pthread_spin_t = 0,
ptc_waiters_first: ?*u8 = null,
ptc_waiters_last: ?*u8 = null,
ptc_mutex: ?*pthread_mutex_t = null,
ptc_private: ?*c_void = null,
};
const pthread_spin_t = if (builtin.arch == .arm or .arch == .powerpc) c_int else u8;
const padded_spin_t = switch (builtin.arch) {
.sparc, .sparcel, .sparcv9, .i386, .x86_64, .le64 => u32,
else => spin_t,
};
},
.openbsd, .freebsd, .kfreebsd, .dragonfly => struct {
pub const pthread_mutex_t = extern struct {
inner: ?*c_void = null,
};
pub const pthread_cond_t = extern struct {
inner: ?*c_void = null,
};
},
.haiku => struct {
pub const pthread_mutex_t = extern struct {
flags: u32 = 0,
lock: i32 = 0,
unused: i32 = -42,
owner: i32 = -1,
owner_count: i32 = 0,
};
pub const pthread_cond_t = extern struct {
flags: u32 = 0,
unused: i32 = -42,
mutex: ?*c_void = null,
waiter_count: i32 = 0,
lock: i32 = 0,
};
},
.hermit => struct {
pub const pthread_mutex_t = extern struct {
inner: usize = ~usize(0),
};
pub const pthread_cond_t = extern struct {
inner: usize = ~usize(0),
};
},
.solaris => struct {
pub const pthread_mutex_t = extern struct {
__pthread_mutex_flag1: u16 = 0,
__pthread_mutex_flag2: u8 = 0,
__pthread_mutex_ceiling: u8 = 0,
__pthread_mutex_type: u16 = 0,
__pthread_mutex_magic: u16 = 0x4d58,
__pthread_mutex_lock: u64 = 0,
__pthread_mutex_data: u64 = 0,
};
pub const pthread_cond_t = extern struct {
__pthread_cond_flag: u32 = 0,
__pthread_cond_type: u16 = 0,
__pthread_cond_magic: u16 = 0x4356,
__pthread_cond_data: u64 = 0,
};
},
.fuchsia, .minix, .linux => struct {
pub const pthread_mutex_t = extern struct {
size: [__SIZEOF_PTHREAD_MUTEX_T]u8 align(@alignOf(usize)) = [_]u8{0} ** __SIZEOF_PTHREAD_MUTEX_T,
};
pub const pthread_cond_t = extern struct {
size: [__SIZEOF_PTHREAD_COND_T]u8 align(@alignOf(usize)) = [_]u8{0} ** __SIZEOF_PTHREAD_COND_T,
};
const __SIZEOF_PTHREAD_COND_T = 48;
const __SIZEOF_PTHREAD_MUTEX_T = if (builtin.os == .fuchsia) 40 else switch (builtin.abi) {
.musl, .musleabi, .musleabihf => if (@sizeOf(usize) == 8) 40 else 24,
.gnu, .gnuabin32, .gnuabi64, .gnueabi, .gnueabihf, .gnux32 => switch (builtin.arch) {
.aarch64 => 48,
.x86_64 => if (builtin.abi == .gnux32) 40 else 32,
.mips64, .powerpc64, .powerpc64le, .sparcv9 => 40,
else => if (@sizeOf(usize) == 8) 40 else 24,
},
else => unreachable,
};
},
.emscripten => struct {
pub const pthread_mutex_t = extern struct {
size: [__SIZEOF_PTHREAD_MUTEX_T]u8 align(4) = [_]u8{0} ** __SIZEOF_PTHREAD_MUTEX_T,
};
pub const pthread_cond_t = extern struct {
size: [__SIZEOF_PTHREAD_COND_T]u8 align(@alignOf(usize)) = [_]u8{0} ** __SIZEOF_PTHREAD_COND_T,
};
const __SIZEOF_PTHREAD_COND_T = 48;
const __SIZEOF_PTHREAD_MUTEX_T = 28;
},
else => unreachable,
};
};
test "std.ThreadParker" {
if (builtin.single_threaded)
return error.SkipZigTest;
const Context = struct {
parker: ThreadParker,
data: u32,
fn receiver(self: *@This()) void {
self.parker.park(&self.data, 0); // receives 1
assert(@atomicRmw(u32, &self.data, .Xchg, 2, .SeqCst) == 1); // sends 2
self.parker.unpark(&self.data); // wakes up waiters on 2
self.parker.park(&self.data, 2); // receives 3
assert(@atomicRmw(u32, &self.data, .Xchg, 4, .SeqCst) == 3); // sends 4
self.parker.unpark(&self.data); // wakes up waiters on 4
self.parker.park(&self.data, 0); // receives 1
assert(@atomicRmw(u32, &self.data, .Xchg, 2, .SeqCst) == 1); // sends 2
self.parker.unpark(&self.data); // wakes up waiters on 2
self.parker.park(&self.data, 2); // receives 3
assert(@atomicRmw(u32, &self.data, .Xchg, 4, .SeqCst) == 3); // sends 4
self.parker.unpark(&self.data); // wakes up waiters on 4
}
fn sender(self: *@This()) void {
assert(@atomicRmw(u32, &self.data, .Xchg, 1, .SeqCst) == 0); // sends 1
self.parker.unpark(&self.data); // wakes up waiters on 1
self.parker.park(&self.data, 1); // receives 2
assert(@atomicRmw(u32, &self.data, .Xchg, 3, .SeqCst) == 2); // sends 3
self.parker.unpark(&self.data); // wakes up waiters on 3
self.parker.park(&self.data, 3); // receives 4
assert(@atomicRmw(u32, &self.data, .Xchg, 1, .SeqCst) == 0); // sends 1
self.parker.unpark(&self.data); // wakes up waiters on 1
self.parker.park(&self.data, 1); // receives 2
assert(@atomicRmw(u32, &self.data, .Xchg, 3, .SeqCst) == 2); // sends 3
self.parker.unpark(&self.data); // wakes up waiters on 3
self.parker.park(&self.data, 3); // receives 4
}
};
@ -314,9 +172,9 @@ test "std.ThreadParker" {
.data = 0,
};
defer context.parker.deinit();
var receiver = try std.Thread.spawn(&context, Context.receiver);
defer receiver.wait();
context.sender();
}
}

View File

@ -31,8 +31,8 @@ pub const SpinLock = struct {
var i = iterations;
while (i != 0) : (i -= 1) {
switch (builtin.arch) {
.i386, .x86_64 => asm volatile("pause"),
.arm, .aarch64 => asm volatile("yield"),
.i386, .x86_64 => asm volatile ("pause"),
.arm, .aarch64 => asm volatile ("yield"),
else => time.sleep(0),
}
}
@ -53,7 +53,7 @@ pub const SpinLock = struct {
if (self.iteration < 20) {
SpinLock.yield(self.iteration);
} else if (self.iteration < 24) {
os.sched_yield();
os.sched_yield() catch time.sleep(1);
} else if (self.iteration < 26) {
time.sleep(1 * time.millisecond);
} else {