mirror of
https://github.com/ziglang/zig.git
synced 2026-02-15 13:58:27 +00:00
Merge branch 'adaptive_lock' of https://github.com/kprotty/zig into kprotty-adaptive_lock
This commit is contained in:
commit
fbbcf2f30d
@ -203,3 +203,5 @@ pub extern "c" fn dn_expand(
|
||||
exp_dn: [*]u8,
|
||||
length: c_int,
|
||||
) c_int;
|
||||
|
||||
pub extern "c" fn sched_yield() c_int;
|
||||
|
||||
@ -1,19 +1,13 @@
|
||||
const std = @import("std.zig");
|
||||
const builtin = @import("builtin");
|
||||
const AtomicOrder = builtin.AtomicOrder;
|
||||
const AtomicRmwOp = builtin.AtomicRmwOp;
|
||||
const testing = std.testing;
|
||||
const SpinLock = std.SpinLock;
|
||||
const linux = std.os.linux;
|
||||
const windows = std.os.windows;
|
||||
const ThreadParker = std.ThreadParker;
|
||||
|
||||
/// Lock may be held only once. If the same thread
|
||||
/// tries to acquire the same mutex twice, it deadlocks.
|
||||
/// This type must be initialized at runtime, and then deinitialized when no
|
||||
/// longer needed, to free resources.
|
||||
/// If you need static initialization, use std.StaticallyInitializedMutex.
|
||||
/// The Linux implementation is based on mutex3 from
|
||||
/// https://www.akkadia.org/drepper/futex.pdf
|
||||
/// This type supports static initialization and is based off of Golang 1.13 runtime.lock_futex:
|
||||
/// https://github.com/golang/go/blob/master/src/runtime/lock_futex.go
|
||||
/// When an application is built in single threaded release mode, all the functions are
|
||||
/// no-ops. In single threaded debug mode, there is deadlock detection.
|
||||
pub const Mutex = if (builtin.single_threaded)
|
||||
@ -43,83 +37,79 @@ pub const Mutex = if (builtin.single_threaded)
|
||||
return Held{ .mutex = self };
|
||||
}
|
||||
}
|
||||
else switch (builtin.os) {
|
||||
builtin.Os.linux => struct {
|
||||
/// 0: unlocked
|
||||
/// 1: locked, no waiters
|
||||
/// 2: locked, one or more waiters
|
||||
lock: i32,
|
||||
else struct {
|
||||
state: u32, // TODO: make this an enum
|
||||
parker: ThreadParker,
|
||||
|
||||
pub const Held = struct {
|
||||
mutex: *Mutex,
|
||||
const Unlocked = 0;
|
||||
const Sleeping = 1;
|
||||
const Locked = 2;
|
||||
|
||||
pub fn release(self: Held) void {
|
||||
const c = @atomicRmw(i32, &self.mutex.lock, AtomicRmwOp.Sub, 1, AtomicOrder.Release);
|
||||
if (c != 1) {
|
||||
_ = @atomicRmw(i32, &self.mutex.lock, AtomicRmwOp.Xchg, 0, AtomicOrder.Release);
|
||||
const rc = linux.futex_wake(&self.mutex.lock, linux.FUTEX_WAKE | linux.FUTEX_PRIVATE_FLAG, 1);
|
||||
switch (linux.getErrno(rc)) {
|
||||
0 => {},
|
||||
linux.EINVAL => unreachable,
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
}
|
||||
/// number of iterations to spin yielding the cpu
|
||||
const SPIN_CPU = 4;
|
||||
/// number of iterations to perform in the cpu yield loop
|
||||
const SPIN_CPU_COUNT = 30;
|
||||
/// number of iterations to spin yielding the thread
|
||||
const SPIN_THREAD = 1;
|
||||
|
||||
pub fn init() Mutex {
|
||||
return Mutex{
|
||||
.state = Unlocked,
|
||||
.parker = ThreadParker.init(),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn init() Mutex {
|
||||
return Mutex{ .lock = 0 };
|
||||
pub fn deinit(self: *Mutex) void {
|
||||
self.parker.deinit();
|
||||
}
|
||||
|
||||
pub const Held = struct {
|
||||
mutex: *Mutex,
|
||||
|
||||
pub fn release(self: Held) void {
|
||||
switch (@atomicRmw(u32, &self.mutex.state, .Xchg, Unlocked, .Release)) {
|
||||
Locked => {},
|
||||
Sleeping => self.mutex.parker.unpark(&self.mutex.state),
|
||||
Unlocked => unreachable, // unlocking an unlocked mutex
|
||||
else => unreachable, // should never be anything else
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
pub fn deinit(self: *Mutex) void {}
|
||||
pub fn acquire(self: *Mutex) Held {
|
||||
// Try and speculatively grab the lock.
|
||||
// If it fails, the state is either Locked or Sleeping
|
||||
// depending on if theres a thread stuck sleeping below.
|
||||
var state = @atomicRmw(u32, &self.state, .Xchg, Locked, .Acquire);
|
||||
if (state == Unlocked)
|
||||
return Held{ .mutex = self };
|
||||
|
||||
pub fn acquire(self: *Mutex) Held {
|
||||
var c = @cmpxchgWeak(i32, &self.lock, 0, 1, AtomicOrder.Acquire, AtomicOrder.Monotonic) orelse
|
||||
while (true) {
|
||||
// try and acquire the lock using cpu spinning on failure
|
||||
var spin: usize = 0;
|
||||
while (spin < SPIN_CPU) : (spin += 1) {
|
||||
var value = @atomicLoad(u32, &self.state, .Monotonic);
|
||||
while (value == Unlocked)
|
||||
value = @cmpxchgWeak(u32, &self.state, Unlocked, state, .Acquire, .Monotonic) orelse return Held{ .mutex = self };
|
||||
SpinLock.yield(SPIN_CPU_COUNT);
|
||||
}
|
||||
|
||||
// try and acquire the lock using thread rescheduling on failure
|
||||
spin = 0;
|
||||
while (spin < SPIN_THREAD) : (spin += 1) {
|
||||
var value = @atomicLoad(u32, &self.state, .Monotonic);
|
||||
while (value == Unlocked)
|
||||
value = @cmpxchgWeak(u32, &self.state, Unlocked, state, .Acquire, .Monotonic) orelse return Held{ .mutex = self };
|
||||
std.os.sched_yield();
|
||||
}
|
||||
|
||||
// failed to acquire the lock, go to sleep until woken up by `Held.release()`
|
||||
if (@atomicRmw(u32, &self.state, .Xchg, Sleeping, .Acquire) == Unlocked)
|
||||
return Held{ .mutex = self };
|
||||
if (c != 2)
|
||||
c = @atomicRmw(i32, &self.lock, AtomicRmwOp.Xchg, 2, AtomicOrder.Acquire);
|
||||
while (c != 0) {
|
||||
const rc = linux.futex_wait(&self.lock, linux.FUTEX_WAIT | linux.FUTEX_PRIVATE_FLAG, 2, null);
|
||||
switch (linux.getErrno(rc)) {
|
||||
0, linux.EINTR, linux.EAGAIN => {},
|
||||
linux.EINVAL => unreachable,
|
||||
else => unreachable,
|
||||
}
|
||||
c = @atomicRmw(i32, &self.lock, AtomicRmwOp.Xchg, 2, AtomicOrder.Acquire);
|
||||
}
|
||||
return Held{ .mutex = self };
|
||||
state = Sleeping;
|
||||
self.parker.park(&self.state, Sleeping);
|
||||
}
|
||||
},
|
||||
// TODO once https://github.com/ziglang/zig/issues/287 (copy elision) is solved, we can make a
|
||||
// better implementation of this. The problem is we need the init() function to have access to
|
||||
// the address of the CRITICAL_SECTION, and then have it not move.
|
||||
builtin.Os.windows => std.StaticallyInitializedMutex,
|
||||
else => struct {
|
||||
/// TODO better implementation than spin lock.
|
||||
/// When changing this, one must also change the corresponding
|
||||
/// std.StaticallyInitializedMutex code, since it aliases this type,
|
||||
/// under the assumption that it works both statically and at runtime.
|
||||
lock: SpinLock,
|
||||
|
||||
pub const Held = struct {
|
||||
mutex: *Mutex,
|
||||
|
||||
pub fn release(self: Held) void {
|
||||
SpinLock.Held.release(SpinLock.Held{ .spinlock = &self.mutex.lock });
|
||||
}
|
||||
};
|
||||
|
||||
pub fn init() Mutex {
|
||||
return Mutex{ .lock = SpinLock.init() };
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Mutex) void {}
|
||||
|
||||
pub fn acquire(self: *Mutex) Held {
|
||||
_ = self.lock.acquire();
|
||||
return Held{ .mutex = self };
|
||||
}
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
const TestContext = struct {
|
||||
|
||||
@ -3171,3 +3171,10 @@ pub fn dn_expand(
|
||||
}
|
||||
return error.InvalidDnsPacket;
|
||||
}
|
||||
|
||||
pub fn sched_yield() void {
|
||||
switch (builtin.os) {
|
||||
.windows => _ = windows.kernel32.SwitchToThread(),
|
||||
else => assert(system.sched_yield() == 0),
|
||||
}
|
||||
}
|
||||
|
||||
@ -954,6 +954,10 @@ pub fn fremovexattr(fd: usize, name: [*]const u8) usize {
|
||||
return syscall2(SYS_fremovexattr, fd, @ptrToInt(name));
|
||||
}
|
||||
|
||||
pub fn sched_yield() usize {
|
||||
return syscall0(SYS_sched_yield);
|
||||
}
|
||||
|
||||
pub fn sched_getaffinity(pid: i32, size: usize, set: *cpu_set_t) usize {
|
||||
const rc = syscall3(SYS_sched_getaffinity, @bitCast(usize, isize(pid)), size, @ptrToInt(set));
|
||||
if (@bitCast(isize, rc) < 0) return rc;
|
||||
|
||||
@ -184,6 +184,8 @@ pub extern "kernel32" stdcallcc fn SetHandleInformation(hObject: HANDLE, dwMask:
|
||||
|
||||
pub extern "kernel32" stdcallcc fn Sleep(dwMilliseconds: DWORD) void;
|
||||
|
||||
pub extern "kernel32" stdcallcc fn SwitchToThread() BOOL;
|
||||
|
||||
pub extern "kernel32" stdcallcc fn TerminateProcess(hProcess: HANDLE, uExitCode: UINT) BOOL;
|
||||
|
||||
pub extern "kernel32" stdcallcc fn TlsAlloc() DWORD;
|
||||
|
||||
@ -43,3 +43,21 @@ pub extern "NtDll" stdcallcc fn NtQueryDirectoryFile(
|
||||
FileName: ?*UNICODE_STRING,
|
||||
RestartScan: BOOLEAN,
|
||||
) NTSTATUS;
|
||||
pub extern "NtDll" stdcallcc fn NtCreateKeyedEvent(
|
||||
KeyedEventHandle: *HANDLE,
|
||||
DesiredAccess: ACCESS_MASK,
|
||||
ObjectAttributes: ?PVOID,
|
||||
Flags: ULONG,
|
||||
) NTSTATUS;
|
||||
pub extern "NtDll" stdcallcc fn NtReleaseKeyedEvent(
|
||||
EventHandle: HANDLE,
|
||||
Key: *const c_void,
|
||||
Alertable: BOOLEAN,
|
||||
Timeout: ?*LARGE_INTEGER,
|
||||
) NTSTATUS;
|
||||
pub extern "NtDll" stdcallcc fn NtWaitForKeyedEvent(
|
||||
EventHandle: HANDLE,
|
||||
Key: *const c_void,
|
||||
Alertable: BOOLEAN,
|
||||
Timeout: ?*LARGE_INTEGER,
|
||||
) NTSTATUS;
|
||||
|
||||
322
lib/std/parker.zig
Normal file
322
lib/std/parker.zig
Normal file
@ -0,0 +1,322 @@
|
||||
const std = @import("std.zig");
|
||||
const builtin = @import("builtin");
|
||||
const time = std.time;
|
||||
const testing = std.testing;
|
||||
const assert = std.debug.assert;
|
||||
const SpinLock = std.SpinLock;
|
||||
const linux = std.os.linux;
|
||||
const windows = std.os.windows;
|
||||
|
||||
pub const ThreadParker = switch (builtin.os) {
|
||||
.macosx,
|
||||
.tvos,
|
||||
.ios,
|
||||
.watchos,
|
||||
.netbsd,
|
||||
.openbsd,
|
||||
.freebsd,
|
||||
.kfreebsd,
|
||||
.dragonfly,
|
||||
.haiku,
|
||||
.hermit,
|
||||
.solaris,
|
||||
.minix,
|
||||
.fuchsia,
|
||||
.emscripten => if (builtin.link_libc) PosixParker else SpinParker,
|
||||
.linux => if (builtin.link_libc) PosixParker else LinuxParker,
|
||||
.windows => WindowsParker,
|
||||
else => SpinParker,
|
||||
};
|
||||
|
||||
const SpinParker = struct {
|
||||
pub fn init() SpinParker {
|
||||
return SpinParker{};
|
||||
}
|
||||
pub fn deinit(self: *SpinParker) void {}
|
||||
|
||||
pub fn unpark(self: *SpinParker, ptr: *const u32) void {}
|
||||
|
||||
pub fn park(self: *SpinParker, ptr: *const u32, expected: u32) void {
|
||||
var backoff = SpinLock.Backoff.init();
|
||||
while (@atomicLoad(u32, ptr, .Acquire) == expected)
|
||||
backoff.yield();
|
||||
}
|
||||
};
|
||||
|
||||
const LinuxParker = struct {
|
||||
pub fn init() LinuxParker {
|
||||
return LinuxParker{};
|
||||
}
|
||||
pub fn deinit(self: *LinuxParker) void {}
|
||||
|
||||
pub fn unpark(self: *LinuxParker, ptr: *const u32) void {
|
||||
const rc = linux.futex_wake(@ptrCast(*const i32, ptr), linux.FUTEX_WAKE | linux.FUTEX_PRIVATE_FLAG, 1);
|
||||
assert(linux.getErrno(rc) == 0);
|
||||
}
|
||||
|
||||
pub fn park(self: *LinuxParker, ptr: *const u32, expected: u32) void {
|
||||
const value = @intCast(i32, expected);
|
||||
while (@atomicLoad(u32, ptr, .Acquire) == expected) {
|
||||
const rc = linux.futex_wait(@ptrCast(*const i32, ptr), linux.FUTEX_WAIT | linux.FUTEX_PRIVATE_FLAG, value, null);
|
||||
switch (linux.getErrno(rc)) {
|
||||
0, linux.EAGAIN => return,
|
||||
linux.EINTR => continue,
|
||||
linux.EINVAL => unreachable,
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const WindowsParker = struct {
|
||||
waiters: u32,
|
||||
|
||||
pub fn init() WindowsParker {
|
||||
return WindowsParker{ .waiters = 0 };
|
||||
}
|
||||
pub fn deinit(self: *WindowsParker) void {}
|
||||
|
||||
pub fn unpark(self: *WindowsParker, ptr: *const u32) void {
|
||||
const key = @ptrCast(*const c_void, ptr);
|
||||
const handle = getEventHandle() orelse return;
|
||||
|
||||
var waiting = @atomicLoad(u32, &self.waiters, .Monotonic);
|
||||
while (waiting != 0) {
|
||||
waiting = @cmpxchgWeak(u32, &self.waiters, waiting, waiting - 1, .Acquire, .Monotonic) orelse {
|
||||
const rc = windows.ntdll.NtReleaseKeyedEvent(handle, key, windows.FALSE, null);
|
||||
assert(rc == 0);
|
||||
return;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
pub fn park(self: *WindowsParker, ptr: *const u32, expected: u32) void {
|
||||
var spin = SpinLock.Backoff.init();
|
||||
const ev_handle = getEventHandle();
|
||||
const key = @ptrCast(*const c_void, ptr);
|
||||
|
||||
while (@atomicLoad(u32, ptr, .Monotonic) == expected) {
|
||||
if (ev_handle) |handle| {
|
||||
_ = @atomicRmw(u32, &self.waiters, .Add, 1, .Release);
|
||||
const rc = windows.ntdll.NtWaitForKeyedEvent(handle, key, windows.FALSE, null);
|
||||
assert(rc == 0);
|
||||
} else {
|
||||
spin.yield();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var event_handle = std.lazyInit(windows.HANDLE);
|
||||
|
||||
fn getEventHandle() ?windows.HANDLE {
|
||||
if (event_handle.get()) |handle_ptr|
|
||||
return handle_ptr.*;
|
||||
defer event_handle.resolve();
|
||||
|
||||
const access_mask = windows.GENERIC_READ | windows.GENERIC_WRITE;
|
||||
if (windows.ntdll.NtCreateKeyedEvent(&event_handle.data, access_mask, null, 0) != 0)
|
||||
return null;
|
||||
return event_handle.data;
|
||||
}
|
||||
};
|
||||
|
||||
const PosixParker = struct {
|
||||
cond: pthread_cond_t,
|
||||
mutex: pthread_mutex_t,
|
||||
|
||||
pub fn init() PosixParker {
|
||||
return PosixParker{
|
||||
.cond = PTHREAD_COND_INITIALIZER,
|
||||
.mutex = PTHREAD_MUTEX_INITIALIZER,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(self: *PosixParker) void {
|
||||
// On dragonfly, the destroy functions return EINVAL if they were initialized statically.
|
||||
const retm = pthread_mutex_destroy(&self.mutex);
|
||||
assert(retm == 0 or retm == (if (builtin.os == .dragonfly) os.EINVAL else 0));
|
||||
const retc = pthread_cond_destroy(&self.cond);
|
||||
assert(retc == 0 or retc == (if (builtin.os == .dragonfly) os.EINVAL else 0));
|
||||
}
|
||||
|
||||
pub fn unpark(self: *PosixParker, ptr: *const u32) void {
|
||||
assert(pthread_mutex_lock(&self.mutex) == 0);
|
||||
defer assert(pthread_mutex_unlock(&self.mutex) == 0);
|
||||
assert(pthread_cond_signal(&self.cond) == 0);
|
||||
}
|
||||
|
||||
pub fn park(self: *PosixParker, ptr: *const u32, expected: u32) void {
|
||||
assert(pthread_mutex_lock(&self.mutex) == 0);
|
||||
defer assert(pthread_mutex_unlock(&self.mutex) == 0);
|
||||
while (@atomicLoad(u32, ptr, .Acquire) == expected)
|
||||
assert(pthread_cond_wait(&self.cond, &self.mutex) == 0);
|
||||
}
|
||||
|
||||
const PTHREAD_MUTEX_INITIALIZER = pthread_mutex_t{};
|
||||
extern "c" fn pthread_mutex_lock(mutex: *pthread_mutex_t) c_int;
|
||||
extern "c" fn pthread_mutex_unlock(mutex: *pthread_mutex_t) c_int;
|
||||
extern "c" fn pthread_mutex_destroy(mutex: *pthread_mutex_t) c_int;
|
||||
|
||||
const PTHREAD_COND_INITIALIZER = pthread_cond_t{};
|
||||
extern "c" fn pthread_cond_wait(noalias cond: *pthread_cond_t, noalias mutex: *pthread_mutex_t) c_int;
|
||||
extern "c" fn pthread_cond_signal(cond: *pthread_cond_t) c_int;
|
||||
extern "c" fn pthread_cond_destroy(cond: *pthread_cond_t) c_int;
|
||||
|
||||
// https://github.com/rust-lang/libc
|
||||
usingnamespace switch (builtin.os) {
|
||||
.macosx, .tvos, .ios, .watchos => struct {
|
||||
pub const pthread_mutex_t = extern struct {
|
||||
__sig: c_long = 0x32AAABA7,
|
||||
__opaque: [__PTHREAD_MUTEX_SIZE__]u8 = [_]u8{0} ** __PTHREAD_MUTEX_SIZE__,
|
||||
};
|
||||
pub const pthread_cond_t = extern struct {
|
||||
__sig: c_long = 0x3CB0B1BB,
|
||||
__opaque: [__PTHREAD_COND_SIZE__]u8 = [_]u8{0} ** __PTHREAD_COND_SIZE__,
|
||||
};
|
||||
const __PTHREAD_MUTEX_SIZE__ = if (@sizeOf(usize) == 8) 56 else 40;
|
||||
const __PTHREAD_COND_SIZE__ = if (@sizeOf(usize) == 8) 40 else 24;
|
||||
},
|
||||
.netbsd => struct {
|
||||
pub const pthread_mutex_t = extern struct {
|
||||
ptm_magic: c_uint = 0x33330003,
|
||||
ptm_errorcheck: padded_spin_t = 0,
|
||||
ptm_unused: padded_spin_t = 0,
|
||||
ptm_owner: usize = 0,
|
||||
ptm_waiters: ?*u8 = null,
|
||||
ptm_recursed: c_uint = 0,
|
||||
ptm_spare2: ?*c_void = null,
|
||||
};
|
||||
pub const pthread_cond_t = extern struct {
|
||||
ptc_magic: c_uint = 0x55550005,
|
||||
ptc_lock: pthread_spin_t = 0,
|
||||
ptc_waiters_first: ?*u8 = null,
|
||||
ptc_waiters_last: ?*u8 = null,
|
||||
ptc_mutex: ?*pthread_mutex_t = null,
|
||||
ptc_private: ?*c_void = null,
|
||||
};
|
||||
const pthread_spin_t = if (builtin.arch == .arm or .arch == .powerpc) c_int else u8;
|
||||
const padded_spin_t = switch (builtin.arch) {
|
||||
.sparc, .sparcel, .sparcv9, .i386, .x86_64, .le64 => u32,
|
||||
else => spin_t,
|
||||
};
|
||||
},
|
||||
.openbsd, .freebsd, .kfreebsd, .dragonfly => struct {
|
||||
pub const pthread_mutex_t = extern struct {
|
||||
inner: ?*c_void = null,
|
||||
};
|
||||
pub const pthread_cond_t = extern struct {
|
||||
inner: ?*c_void = null,
|
||||
};
|
||||
},
|
||||
.haiku => struct {
|
||||
pub const pthread_mutex_t = extern struct {
|
||||
flags: u32 = 0,
|
||||
lock: i32 = 0,
|
||||
unused: i32 = -42,
|
||||
owner: i32 = -1,
|
||||
owner_count: i32 = 0,
|
||||
};
|
||||
pub const pthread_cond_t = extern struct {
|
||||
flags: u32 = 0,
|
||||
unused: i32 = -42,
|
||||
mutex: ?*c_void = null,
|
||||
waiter_count: i32 = 0,
|
||||
lock: i32 = 0,
|
||||
};
|
||||
},
|
||||
.hermit => struct {
|
||||
pub const pthread_mutex_t = extern struct {
|
||||
inner: usize = ~usize(0),
|
||||
};
|
||||
pub const pthread_cond_t = extern struct {
|
||||
inner: usize = ~usize(0),
|
||||
};
|
||||
},
|
||||
.solaris => struct {
|
||||
pub const pthread_mutex_t = extern struct {
|
||||
__pthread_mutex_flag1: u16 = 0,
|
||||
__pthread_mutex_flag2: u8 = 0,
|
||||
__pthread_mutex_ceiling: u8 = 0,
|
||||
__pthread_mutex_type: u16 = 0,
|
||||
__pthread_mutex_magic: u16 = 0x4d58,
|
||||
__pthread_mutex_lock: u64 = 0,
|
||||
__pthread_mutex_data: u64 = 0,
|
||||
};
|
||||
pub const pthread_cond_t = extern struct {
|
||||
__pthread_cond_flag: u32 = 0,
|
||||
__pthread_cond_type: u16 = 0,
|
||||
__pthread_cond_magic: u16 = 0x4356,
|
||||
__pthread_cond_data: u64 = 0,
|
||||
};
|
||||
},
|
||||
.fuchsia, .minix, .linux => struct {
|
||||
pub const pthread_mutex_t = extern struct {
|
||||
size: [__SIZEOF_PTHREAD_MUTEX_T]u8 align(@alignOf(usize)) = [_]u8{0} ** __SIZEOF_PTHREAD_MUTEX_T,
|
||||
};
|
||||
pub const pthread_cond_t = extern struct {
|
||||
size: [__SIZEOF_PTHREAD_COND_T]u8 align(@alignOf(usize)) = [_]u8{0} ** __SIZEOF_PTHREAD_COND_T,
|
||||
};
|
||||
const __SIZEOF_PTHREAD_COND_T = 48;
|
||||
const __SIZEOF_PTHREAD_MUTEX_T = if (builtin.os == .fuchsia) 40 else switch (builtin.abi) {
|
||||
.musl, .musleabi, .musleabihf => if (@sizeOf(usize) == 8) 40 else 24,
|
||||
.gnu, .gnuabin32, .gnuabi64, .gnueabi, .gnueabihf, .gnux32 => switch (builtin.arch) {
|
||||
.aarch64 => 48,
|
||||
.x86_64 => if (builtin.abi == .gnux32) 40 else 32,
|
||||
.mips64, .powerpc64, .powerpc64le, .sparcv9 => 40,
|
||||
else => if (@sizeOf(usize) == 8) 40 else 24,
|
||||
},
|
||||
else => unreachable,
|
||||
};
|
||||
},
|
||||
.emscripten => struct {
|
||||
pub const pthread_mutex_t = extern struct {
|
||||
size: [__SIZEOF_PTHREAD_MUTEX_T]u8 align(4) = [_]u8{0} ** __SIZEOF_PTHREAD_MUTEX_T,
|
||||
};
|
||||
pub const pthread_cond_t = extern struct {
|
||||
size: [__SIZEOF_PTHREAD_COND_T]u8 align(@alignOf(usize)) = [_]u8{0} ** __SIZEOF_PTHREAD_COND_T,
|
||||
};
|
||||
const __SIZEOF_PTHREAD_COND_T = 48;
|
||||
const __SIZEOF_PTHREAD_MUTEX_T = 28;
|
||||
},
|
||||
else => unreachable,
|
||||
};
|
||||
};
|
||||
|
||||
test "std.ThreadParker" {
|
||||
if (builtin.single_threaded)
|
||||
return error.SkipZigTest;
|
||||
|
||||
const Context = struct {
|
||||
parker: ThreadParker,
|
||||
data: u32,
|
||||
|
||||
fn receiver(self: *@This()) void {
|
||||
self.parker.park(&self.data, 0); // receives 1
|
||||
assert(@atomicRmw(u32, &self.data, .Xchg, 2, .SeqCst) == 1); // sends 2
|
||||
self.parker.unpark(&self.data); // wakes up waiters on 2
|
||||
self.parker.park(&self.data, 2); // receives 3
|
||||
assert(@atomicRmw(u32, &self.data, .Xchg, 4, .SeqCst) == 3); // sends 4
|
||||
self.parker.unpark(&self.data); // wakes up waiters on 4
|
||||
}
|
||||
|
||||
fn sender(self: *@This()) void {
|
||||
assert(@atomicRmw(u32, &self.data, .Xchg, 1, .SeqCst) == 0); // sends 1
|
||||
self.parker.unpark(&self.data); // wakes up waiters on 1
|
||||
self.parker.park(&self.data, 1); // receives 2
|
||||
assert(@atomicRmw(u32, &self.data, .Xchg, 3, .SeqCst) == 2); // sends 3
|
||||
self.parker.unpark(&self.data); // wakes up waiters on 3
|
||||
self.parker.park(&self.data, 3); // receives 4
|
||||
}
|
||||
};
|
||||
|
||||
var context = Context{
|
||||
.parker = ThreadParker.init(),
|
||||
.data = 0,
|
||||
};
|
||||
defer context.parker.deinit();
|
||||
|
||||
var receiver = try std.Thread.spawn(&context, Context.receiver);
|
||||
defer receiver.wait();
|
||||
|
||||
context.sender();
|
||||
}
|
||||
@ -1,8 +1,8 @@
|
||||
const std = @import("std.zig");
|
||||
const builtin = @import("builtin");
|
||||
const AtomicOrder = builtin.AtomicOrder;
|
||||
const AtomicRmwOp = builtin.AtomicRmwOp;
|
||||
const assert = std.debug.assert;
|
||||
const time = std.time;
|
||||
const os = std.os;
|
||||
|
||||
pub const SpinLock = struct {
|
||||
lock: u8, // TODO use a bool or enum
|
||||
@ -11,7 +11,8 @@ pub const SpinLock = struct {
|
||||
spinlock: *SpinLock,
|
||||
|
||||
pub fn release(self: Held) void {
|
||||
assert(@atomicRmw(u8, &self.spinlock.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1);
|
||||
// TODO: @atomicStore() https://github.com/ziglang/zig/issues/2995
|
||||
assert(@atomicRmw(u8, &self.spinlock.lock, .Xchg, 0, .Release) == 1);
|
||||
}
|
||||
};
|
||||
|
||||
@ -20,9 +21,46 @@ pub const SpinLock = struct {
|
||||
}
|
||||
|
||||
pub fn acquire(self: *SpinLock) Held {
|
||||
while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {}
|
||||
var backoff = Backoff.init();
|
||||
while (@atomicRmw(u8, &self.lock, .Xchg, 1, .Acquire) != 0)
|
||||
backoff.yield();
|
||||
return Held{ .spinlock = self };
|
||||
}
|
||||
|
||||
pub fn yield(iterations: usize) void {
|
||||
var i = iterations;
|
||||
while (i != 0) : (i -= 1) {
|
||||
switch (builtin.arch) {
|
||||
.i386, .x86_64 => asm volatile("pause"),
|
||||
.arm, .aarch64 => asm volatile("yield"),
|
||||
else => time.sleep(0),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Provides a method to incrementally yield longer each time its called.
|
||||
pub const Backoff = struct {
|
||||
iteration: usize,
|
||||
|
||||
pub fn init() @This() {
|
||||
return @This(){ .iteration = 0 };
|
||||
}
|
||||
|
||||
/// Modified hybrid yielding from
|
||||
/// http://www.1024cores.net/home/lock-free-algorithms/tricks/spinning
|
||||
pub fn yield(self: *@This()) void {
|
||||
defer self.iteration +%= 1;
|
||||
if (self.iteration < 20) {
|
||||
SpinLock.yield(self.iteration);
|
||||
} else if (self.iteration < 24) {
|
||||
os.sched_yield();
|
||||
} else if (self.iteration < 26) {
|
||||
time.sleep(1 * time.millisecond);
|
||||
} else {
|
||||
time.sleep(10 * time.millisecond);
|
||||
}
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
test "spinlock" {
|
||||
|
||||
@ -1,105 +0,0 @@
|
||||
const std = @import("std.zig");
|
||||
const builtin = @import("builtin");
|
||||
const AtomicOrder = builtin.AtomicOrder;
|
||||
const AtomicRmwOp = builtin.AtomicRmwOp;
|
||||
const assert = std.debug.assert;
|
||||
const expect = std.testing.expect;
|
||||
const windows = std.os.windows;
|
||||
|
||||
/// Lock may be held only once. If the same thread
|
||||
/// tries to acquire the same mutex twice, it deadlocks.
|
||||
/// This type is intended to be initialized statically. If you don't
|
||||
/// require static initialization, use std.Mutex.
|
||||
/// On Windows, this mutex allocates resources when it is
|
||||
/// first used, and the resources cannot be freed.
|
||||
/// On Linux, this is an alias of std.Mutex.
|
||||
pub const StaticallyInitializedMutex = switch (builtin.os) {
|
||||
builtin.Os.linux => std.Mutex,
|
||||
builtin.Os.windows => struct {
|
||||
lock: windows.CRITICAL_SECTION,
|
||||
init_once: windows.RTL_RUN_ONCE,
|
||||
|
||||
pub const Held = struct {
|
||||
mutex: *StaticallyInitializedMutex,
|
||||
|
||||
pub fn release(self: Held) void {
|
||||
windows.kernel32.LeaveCriticalSection(&self.mutex.lock);
|
||||
}
|
||||
};
|
||||
|
||||
pub fn init() StaticallyInitializedMutex {
|
||||
return StaticallyInitializedMutex{
|
||||
.lock = undefined,
|
||||
.init_once = windows.INIT_ONCE_STATIC_INIT,
|
||||
};
|
||||
}
|
||||
|
||||
extern fn initCriticalSection(
|
||||
InitOnce: *windows.RTL_RUN_ONCE,
|
||||
Parameter: ?*c_void,
|
||||
Context: ?*c_void,
|
||||
) windows.BOOL {
|
||||
const lock = @ptrCast(*windows.CRITICAL_SECTION, @alignCast(@alignOf(windows.CRITICAL_SECTION), Parameter));
|
||||
windows.kernel32.InitializeCriticalSection(lock);
|
||||
return windows.TRUE;
|
||||
}
|
||||
|
||||
/// TODO: once https://github.com/ziglang/zig/issues/287 is solved and std.Mutex has a better
|
||||
/// implementation of a runtime initialized mutex, remove this function.
|
||||
pub fn deinit(self: *StaticallyInitializedMutex) void {
|
||||
windows.InitOnceExecuteOnce(&self.init_once, initCriticalSection, &self.lock, null);
|
||||
windows.kernel32.DeleteCriticalSection(&self.lock);
|
||||
}
|
||||
|
||||
pub fn acquire(self: *StaticallyInitializedMutex) Held {
|
||||
windows.InitOnceExecuteOnce(&self.init_once, initCriticalSection, &self.lock, null);
|
||||
windows.kernel32.EnterCriticalSection(&self.lock);
|
||||
return Held{ .mutex = self };
|
||||
}
|
||||
},
|
||||
else => std.Mutex,
|
||||
};
|
||||
|
||||
test "std.StaticallyInitializedMutex" {
|
||||
const TestContext = struct {
|
||||
data: i128,
|
||||
|
||||
const TestContext = @This();
|
||||
const incr_count = 10000;
|
||||
|
||||
var mutex = StaticallyInitializedMutex.init();
|
||||
|
||||
fn worker(ctx: *TestContext) void {
|
||||
var i: usize = 0;
|
||||
while (i != TestContext.incr_count) : (i += 1) {
|
||||
const held = mutex.acquire();
|
||||
defer held.release();
|
||||
|
||||
ctx.data += 1;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
var plenty_of_memory = try std.heap.direct_allocator.alloc(u8, 300 * 1024);
|
||||
defer std.heap.direct_allocator.free(plenty_of_memory);
|
||||
|
||||
var fixed_buffer_allocator = std.heap.ThreadSafeFixedBufferAllocator.init(plenty_of_memory);
|
||||
var a = &fixed_buffer_allocator.allocator;
|
||||
|
||||
var context = TestContext{ .data = 0 };
|
||||
|
||||
if (builtin.single_threaded) {
|
||||
TestContext.worker(&context);
|
||||
expect(context.data == TestContext.incr_count);
|
||||
} else {
|
||||
const thread_count = 10;
|
||||
var threads: [thread_count]*std.Thread = undefined;
|
||||
for (threads) |*t| {
|
||||
t.* = try std.Thread.spawn(&context, TestContext.worker);
|
||||
}
|
||||
for (threads) |t|
|
||||
t.wait();
|
||||
|
||||
expect(context.data == thread_count * TestContext.incr_count);
|
||||
}
|
||||
}
|
||||
@ -19,11 +19,11 @@ pub const Progress = @import("progress.zig").Progress;
|
||||
pub const SegmentedList = @import("segmented_list.zig").SegmentedList;
|
||||
pub const SinglyLinkedList = @import("linked_list.zig").SinglyLinkedList;
|
||||
pub const SpinLock = @import("spinlock.zig").SpinLock;
|
||||
pub const StaticallyInitializedMutex = @import("statically_initialized_mutex.zig").StaticallyInitializedMutex;
|
||||
pub const StringHashMap = @import("hash_map.zig").StringHashMap;
|
||||
pub const TailQueue = @import("linked_list.zig").TailQueue;
|
||||
pub const Target = @import("target.zig").Target;
|
||||
pub const Thread = @import("thread.zig").Thread;
|
||||
pub const ThreadParker = @import("parker.zig").ThreadParker;
|
||||
|
||||
pub const atomic = @import("atomic.zig");
|
||||
pub const base64 = @import("base64.zig");
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user