Mutex deadlock detection in debug

Add a debug implementation to Mutex that detects deadlocks caused by calling lock twice in a single thread.
This commit is contained in:
IntegratedQuantum 2023-01-19 15:57:29 +01:00 committed by GitHub
parent 116b770809
commit f38fd388f8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 64 additions and 22 deletions

View File

@ -161,12 +161,20 @@ const WindowsImpl = struct {
} }
} }
if (comptime builtin.mode == .Debug) {
// The internal state of the DebugMutex needs to be handled here as well.
mutex.impl.locking_thread.store(0, .Unordered);
}
const rc = os.windows.kernel32.SleepConditionVariableSRW( const rc = os.windows.kernel32.SleepConditionVariableSRW(
&self.condition, &self.condition,
&mutex.impl.srwlock, if (comptime builtin.mode == .Debug) &mutex.impl.impl.srwlock else &mutex.impl.srwlock,
timeout_ms, timeout_ms,
0, // the srwlock was assumed to acquired in exclusive mode not shared 0, // the srwlock was assumed to acquired in exclusive mode not shared
); );
if (comptime builtin.mode == .Debug) {
// The internal state of the DebugMutex needs to be handled here as well.
mutex.impl.locking_thread.store(std.Thread.getCurrentId(), .Unordered);
}
// Return error.Timeout if we know the timeout elapsed correctly. // Return error.Timeout if we know the timeout elapsed correctly.
if (rc == os.windows.FALSE) { if (rc == os.windows.FALSE) {

View File

@ -27,7 +27,8 @@ const os = std.os;
const assert = std.debug.assert; const assert = std.debug.assert;
const testing = std.testing; const testing = std.testing;
const Atomic = std.atomic.Atomic; const Atomic = std.atomic.Atomic;
const Futex = std.Thread.Futex; const Thread = std.Thread;
const Futex = Thread.Futex;
impl: Impl = .{}, impl: Impl = .{},
@ -51,7 +52,12 @@ pub fn unlock(self: *Mutex) void {
self.impl.unlock(); self.impl.unlock();
} }
const Impl = if (builtin.single_threaded) const Impl = if (builtin.mode == .Debug and !builtin.single_threaded)
DebugImpl
else
ReleaseImpl;
const ReleaseImpl = if (builtin.single_threaded)
SingleThreadedImpl SingleThreadedImpl
else if (builtin.os.tag == .windows) else if (builtin.os.tag == .windows)
WindowsImpl WindowsImpl
@ -60,22 +66,50 @@ else if (builtin.os.tag.isDarwin())
else else
FutexImpl; FutexImpl;
const DebugImpl = struct {
locking_thread: Atomic(Thread.Id) = Atomic(Thread.Id).init(0), // 0 means it's not locked.
impl: ReleaseImpl = .{},
inline fn tryLock(self: *@This()) bool {
const locking = self.impl.tryLock();
if (locking) {
self.locking_thread.store(Thread.getCurrentId(), .Unordered);
}
return locking;
}
inline fn lock(self: *@This()) void {
const current_id = Thread.getCurrentId();
if (self.locking_thread.load(.Unordered) == current_id and current_id != 0) {
@panic("Deadlock detected");
}
self.impl.lock();
self.locking_thread.store(current_id, .Unordered);
}
inline fn unlock(self: *@This()) void {
assert(self.locking_thread.load(.Unordered) == Thread.getCurrentId());
self.locking_thread.store(0, .Unordered);
self.impl.unlock();
}
};
const SingleThreadedImpl = struct { const SingleThreadedImpl = struct {
is_locked: bool = false, is_locked: bool = false,
fn tryLock(self: *Impl) bool { fn tryLock(self: *@This()) bool {
if (self.is_locked) return false; if (self.is_locked) return false;
self.is_locked = true; self.is_locked = true;
return true; return true;
} }
fn lock(self: *Impl) void { fn lock(self: *@This()) void {
if (!self.tryLock()) { if (!self.tryLock()) {
unreachable; // deadlock detected unreachable; // deadlock detected
} }
} }
fn unlock(self: *Impl) void { fn unlock(self: *@This()) void {
assert(self.is_locked); assert(self.is_locked);
self.is_locked = false; self.is_locked = false;
} }
@ -86,15 +120,15 @@ const SingleThreadedImpl = struct {
const WindowsImpl = struct { const WindowsImpl = struct {
srwlock: os.windows.SRWLOCK = .{}, srwlock: os.windows.SRWLOCK = .{},
fn tryLock(self: *Impl) bool { fn tryLock(self: *@This()) bool {
return os.windows.kernel32.TryAcquireSRWLockExclusive(&self.srwlock) != os.windows.FALSE; return os.windows.kernel32.TryAcquireSRWLockExclusive(&self.srwlock) != os.windows.FALSE;
} }
fn lock(self: *Impl) void { fn lock(self: *@This()) void {
os.windows.kernel32.AcquireSRWLockExclusive(&self.srwlock); os.windows.kernel32.AcquireSRWLockExclusive(&self.srwlock);
} }
fn unlock(self: *Impl) void { fn unlock(self: *@This()) void {
os.windows.kernel32.ReleaseSRWLockExclusive(&self.srwlock); os.windows.kernel32.ReleaseSRWLockExclusive(&self.srwlock);
} }
}; };
@ -103,15 +137,15 @@ const WindowsImpl = struct {
const DarwinImpl = struct { const DarwinImpl = struct {
oul: os.darwin.os_unfair_lock = .{}, oul: os.darwin.os_unfair_lock = .{},
fn tryLock(self: *Impl) bool { fn tryLock(self: *@This()) bool {
return os.darwin.os_unfair_lock_trylock(&self.oul); return os.darwin.os_unfair_lock_trylock(&self.oul);
} }
fn lock(self: *Impl) void { fn lock(self: *@This()) void {
os.darwin.os_unfair_lock_lock(&self.oul); os.darwin.os_unfair_lock_lock(&self.oul);
} }
fn unlock(self: *Impl) void { fn unlock(self: *@This()) void {
os.darwin.os_unfair_lock_unlock(&self.oul); os.darwin.os_unfair_lock_unlock(&self.oul);
} }
}; };
@ -123,19 +157,19 @@ const FutexImpl = struct {
const locked = 0b01; const locked = 0b01;
const contended = 0b11; // must contain the `locked` bit for x86 optimization below const contended = 0b11; // must contain the `locked` bit for x86 optimization below
fn tryLock(self: *Impl) bool { fn tryLock(self: *@This()) bool {
// Lock with compareAndSwap instead of tryCompareAndSwap to avoid reporting spurious CAS failure. // Lock with compareAndSwap instead of tryCompareAndSwap to avoid reporting spurious CAS failure.
return self.lockFast("compareAndSwap"); return self.lockFast("compareAndSwap");
} }
fn lock(self: *Impl) void { fn lock(self: *@This()) void {
// Lock with tryCompareAndSwap instead of compareAndSwap due to being more inline-able on LL/SC archs like ARM. // Lock with tryCompareAndSwap instead of compareAndSwap due to being more inline-able on LL/SC archs like ARM.
if (!self.lockFast("tryCompareAndSwap")) { if (!self.lockFast("tryCompareAndSwap")) {
self.lockSlow(); self.lockSlow();
} }
} }
inline fn lockFast(self: *Impl, comptime casFn: []const u8) bool { inline fn lockFast(self: *@This(), comptime casFn: []const u8) bool {
// On x86, use `lock bts` instead of `lock cmpxchg` as: // On x86, use `lock bts` instead of `lock cmpxchg` as:
// - they both seem to mark the cache-line as modified regardless: https://stackoverflow.com/a/63350048 // - they both seem to mark the cache-line as modified regardless: https://stackoverflow.com/a/63350048
// - `lock bts` is smaller instruction-wise which makes it better for inlining // - `lock bts` is smaller instruction-wise which makes it better for inlining
@ -149,7 +183,7 @@ const FutexImpl = struct {
return @field(self.state, casFn)(unlocked, locked, .Acquire, .Monotonic) == null; return @field(self.state, casFn)(unlocked, locked, .Acquire, .Monotonic) == null;
} }
fn lockSlow(self: *Impl) void { fn lockSlow(self: *@This()) void {
@setCold(true); @setCold(true);
// Avoid doing an atomic swap below if we already know the state is contended. // Avoid doing an atomic swap below if we already know the state is contended.
@ -172,7 +206,7 @@ const FutexImpl = struct {
} }
} }
fn unlock(self: *Impl) void { fn unlock(self: *@This()) void {
// Unlock the mutex and wake up a waiting thread if any. // Unlock the mutex and wake up a waiting thread if any.
// //
// A waiting thread will acquire with `contended` instead of `locked` // A waiting thread will acquire with `contended` instead of `locked`
@ -228,7 +262,7 @@ test "Mutex - many uncontended" {
const Runner = struct { const Runner = struct {
mutex: Mutex = .{}, mutex: Mutex = .{},
thread: std.Thread = undefined, thread: Thread = undefined,
counter: NonAtomicCounter = .{}, counter: NonAtomicCounter = .{},
fn run(self: *@This()) void { fn run(self: *@This()) void {
@ -243,7 +277,7 @@ test "Mutex - many uncontended" {
}; };
var runners = [_]Runner{.{}} ** num_threads; var runners = [_]Runner{.{}} ** num_threads;
for (runners) |*r| r.thread = try std.Thread.spawn(.{}, Runner.run, .{r}); for (runners) |*r| r.thread = try Thread.spawn(.{}, Runner.run, .{r});
for (runners) |r| r.thread.join(); for (runners) |r| r.thread.join();
for (runners) |r| try testing.expectEqual(r.counter.get(), num_increments); for (runners) |r| try testing.expectEqual(r.counter.get(), num_increments);
} }
@ -265,7 +299,7 @@ test "Mutex - many contended" {
var i: usize = num_increments; var i: usize = num_increments;
while (i > 0) : (i -= 1) { while (i > 0) : (i -= 1) {
// Occasionally hint to let another thread run. // Occasionally hint to let another thread run.
defer if (i % 100 == 0) std.Thread.yield() catch {}; defer if (i % 100 == 0) Thread.yield() catch {};
self.mutex.lock(); self.mutex.lock();
defer self.mutex.unlock(); defer self.mutex.unlock();
@ -277,8 +311,8 @@ test "Mutex - many contended" {
var runner = Runner{}; var runner = Runner{};
var threads: [num_threads]std.Thread = undefined; var threads: [num_threads]Thread = undefined;
for (threads) |*t| t.* = try std.Thread.spawn(.{}, Runner.run, .{&runner}); for (threads) |*t| t.* = try Thread.spawn(.{}, Runner.run, .{&runner});
for (threads) |t| t.join(); for (threads) |t| t.join();
try testing.expectEqual(runner.counter.get(), num_increments * num_threads); try testing.expectEqual(runner.counter.get(), num_increments * num_threads);