This commit is contained in:
Andrew Kelley 2025-11-21 08:04:52 -08:00
parent c91dec3b6f
commit c6e7c2cb80
3 changed files with 818 additions and 557 deletions

View File

@ -633,11 +633,6 @@ pub const VTable = struct {
result: []u8, result: []u8,
result_alignment: std.mem.Alignment, result_alignment: std.mem.Alignment,
) void, ) void,
/// Returns whether the current thread of execution is known to have
/// been requested to cancel.
///
/// Thread-safe.
cancelRequested: *const fn (?*anyopaque) bool,
/// Executes `start` asynchronously in a manner such that it cleans itself /// Executes `start` asynchronously in a manner such that it cleans itself
/// up. This mode does not support results, await, or cancel. /// up. This mode does not support results, await, or cancel.

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
//! This struct represents a kernel thread, and acts as a namespace for concurrency //! This struct represents a kernel thread, and acts as a namespace for
//! primitives that operate on kernel threads. For concurrency primitives that support //! concurrency primitives that operate on kernel threads. For concurrency
//! both evented I/O and async I/O, see the respective names in the top level std namespace. //! primitives that interact with the I/O interface, see `std.Io`.
const std = @import("std.zig"); const std = @import("std.zig");
const builtin = @import("builtin"); const builtin = @import("builtin");
@ -20,7 +20,7 @@ pub const RwLock = @import("Thread/RwLock.zig");
pub const Pool = @import("Thread/Pool.zig"); pub const Pool = @import("Thread/Pool.zig");
pub const WaitGroup = @import("Thread/WaitGroup.zig"); pub const WaitGroup = @import("Thread/WaitGroup.zig");
pub const use_pthreads = native_os != .windows and native_os != .wasi and builtin.link_libc; pub const use_pthreads = std.Io.Threaded.use_pthreads;
/// A thread-safe logical boolean value which can be `set` and `unset`. /// A thread-safe logical boolean value which can be `set` and `unset`.
/// ///
@ -422,12 +422,7 @@ pub fn getCurrentId() Id {
return Impl.getCurrentId(); return Impl.getCurrentId();
} }
pub const CpuCountError = error{ pub const CpuCountError = std.Io.Threaded.CpuCountError;
PermissionDenied,
SystemResources,
Unsupported,
Unexpected,
};
/// Returns the platforms view on the number of logical CPU cores available. /// Returns the platforms view on the number of logical CPU cores available.
/// ///
@ -446,7 +441,7 @@ pub const SpawnConfig = struct {
/// The allocator to be used to allocate memory for the to-be-spawned thread /// The allocator to be used to allocate memory for the to-be-spawned thread
allocator: ?std.mem.Allocator = null, allocator: ?std.mem.Allocator = null,
pub const default_stack_size = 16 * 1024 * 1024; pub const default_stack_size = std.Io.Threaded.default_stack_size;
}; };
pub const SpawnError = error{ pub const SpawnError = error{
@ -1215,308 +1210,6 @@ const LinuxThreadImpl = struct {
thread: *ThreadCompletion, thread: *ThreadCompletion,
const ThreadCompletion = struct {
completion: Completion = Completion.init(.running),
child_tid: std.atomic.Value(i32) = std.atomic.Value(i32).init(1),
parent_tid: i32 = undefined,
mapped: []align(std.heap.page_size_min) u8,
/// Calls `munmap(mapped.ptr, mapped.len)` then `exit(1)` without touching the stack (which lives in `mapped.ptr`).
/// Ported over from musl libc's pthread detached implementation:
/// https://github.com/ifduyue/musl/search?q=__unmapself
fn freeAndExit(self: *ThreadCompletion) noreturn {
switch (target.cpu.arch) {
.x86 => asm volatile (
\\ movl $91, %%eax # SYS_munmap
\\ movl %[ptr], %%ebx
\\ movl %[len], %%ecx
\\ int $128
\\ movl $1, %%eax # SYS_exit
\\ movl $0, %%ebx
\\ int $128
:
: [ptr] "r" (@intFromPtr(self.mapped.ptr)),
[len] "r" (self.mapped.len),
: .{ .memory = true }),
.x86_64 => asm volatile (switch (target.abi) {
.gnux32, .muslx32 =>
\\ movl $0x4000000b, %%eax # SYS_munmap
\\ syscall
\\ movl $0x4000003c, %%eax # SYS_exit
\\ xor %%rdi, %%rdi
\\ syscall
,
else =>
\\ movl $11, %%eax # SYS_munmap
\\ syscall
\\ movl $60, %%eax # SYS_exit
\\ xor %%rdi, %%rdi
\\ syscall
,
}
:
: [ptr] "{rdi}" (@intFromPtr(self.mapped.ptr)),
[len] "{rsi}" (self.mapped.len),
),
.arm, .armeb, .thumb, .thumbeb => asm volatile (
\\ mov r7, #91 // SYS_munmap
\\ mov r0, %[ptr]
\\ mov r1, %[len]
\\ svc 0
\\ mov r7, #1 // SYS_exit
\\ mov r0, #0
\\ svc 0
:
: [ptr] "r" (@intFromPtr(self.mapped.ptr)),
[len] "r" (self.mapped.len),
: .{ .memory = true }),
.aarch64, .aarch64_be => asm volatile (
\\ mov x8, #215 // SYS_munmap
\\ mov x0, %[ptr]
\\ mov x1, %[len]
\\ svc 0
\\ mov x8, #93 // SYS_exit
\\ mov x0, #0
\\ svc 0
:
: [ptr] "r" (@intFromPtr(self.mapped.ptr)),
[len] "r" (self.mapped.len),
: .{ .memory = true }),
.alpha => asm volatile (
\\ ldi $0, 73 # SYS_munmap
\\ mov %[ptr], $16
\\ mov %[len], $17
\\ callsys
\\ ldi $0, 1 # SYS_exit
\\ ldi $16, 0
\\ callsys
:
: [ptr] "r" (@intFromPtr(self.mapped.ptr)),
[len] "r" (self.mapped.len),
: .{ .memory = true }),
.hexagon => asm volatile (
\\ r6 = #215 // SYS_munmap
\\ r0 = %[ptr]
\\ r1 = %[len]
\\ trap0(#1)
\\ r6 = #93 // SYS_exit
\\ r0 = #0
\\ trap0(#1)
:
: [ptr] "r" (@intFromPtr(self.mapped.ptr)),
[len] "r" (self.mapped.len),
: .{ .memory = true }),
.hppa => asm volatile (
\\ ldi 91, %%r20 /* SYS_munmap */
\\ copy %[ptr], %%r26
\\ copy %[len], %%r25
\\ ble 0x100(%%sr2, %%r0)
\\ ldi 1, %%r20 /* SYS_exit */
\\ ldi 0, %%r26
\\ ble 0x100(%%sr2, %%r0)
:
: [ptr] "r" (@intFromPtr(self.mapped.ptr)),
[len] "r" (self.mapped.len),
: .{ .memory = true }),
.m68k => asm volatile (
\\ move.l #91, %%d0 // SYS_munmap
\\ move.l %[ptr], %%d1
\\ move.l %[len], %%d2
\\ trap #0
\\ move.l #1, %%d0 // SYS_exit
\\ move.l #0, %%d1
\\ trap #0
:
: [ptr] "r" (@intFromPtr(self.mapped.ptr)),
[len] "r" (self.mapped.len),
: .{ .memory = true }),
.microblaze, .microblazeel => asm volatile (
\\ ori r12, r0, 91 # SYS_munmap
\\ ori r5, %[ptr], 0
\\ ori r6, %[len], 0
\\ brki r14, 0x8
\\ ori r12, r0, 1 # SYS_exit
\\ or r5, r0, r0
\\ brki r14, 0x8
:
: [ptr] "r" (@intFromPtr(self.mapped.ptr)),
[len] "r" (self.mapped.len),
: .{ .memory = true }),
// We set `sp` to the address of the current function as a workaround for a Linux
// kernel bug that caused syscalls to return EFAULT if the stack pointer is invalid.
// The bug was introduced in 46e12c07b3b9603c60fc1d421ff18618241cb081 and fixed in
// 7928eb0370d1133d0d8cd2f5ddfca19c309079d5.
.mips, .mipsel => asm volatile (
\\ move $sp, $t9
\\ li $v0, 4091 # SYS_munmap
\\ move $a0, %[ptr]
\\ move $a1, %[len]
\\ syscall
\\ li $v0, 4001 # SYS_exit
\\ li $a0, 0
\\ syscall
:
: [ptr] "r" (@intFromPtr(self.mapped.ptr)),
[len] "r" (self.mapped.len),
: .{ .memory = true }),
.mips64, .mips64el => asm volatile (switch (target.abi) {
.gnuabin32, .muslabin32 =>
\\ li $v0, 6011 # SYS_munmap
\\ move $a0, %[ptr]
\\ move $a1, %[len]
\\ syscall
\\ li $v0, 6058 # SYS_exit
\\ li $a0, 0
\\ syscall
,
else =>
\\ li $v0, 5011 # SYS_munmap
\\ move $a0, %[ptr]
\\ move $a1, %[len]
\\ syscall
\\ li $v0, 5058 # SYS_exit
\\ li $a0, 0
\\ syscall
,
}
:
: [ptr] "r" (@intFromPtr(self.mapped.ptr)),
[len] "r" (self.mapped.len),
: .{ .memory = true }),
.or1k => asm volatile (
\\ l.ori r11, r0, 215 # SYS_munmap
\\ l.ori r3, %[ptr]
\\ l.ori r4, %[len]
\\ l.sys 1
\\ l.ori r11, r0, 93 # SYS_exit
\\ l.ori r3, r0, r0
\\ l.sys 1
:
: [ptr] "r" (@intFromPtr(self.mapped.ptr)),
[len] "r" (self.mapped.len),
: .{ .memory = true }),
.powerpc, .powerpcle, .powerpc64, .powerpc64le => asm volatile (
\\ li 0, 91 # SYS_munmap
\\ mr 3, %[ptr]
\\ mr 4, %[len]
\\ sc
\\ li 0, 1 # SYS_exit
\\ li 3, 0
\\ sc
\\ blr
:
: [ptr] "r" (@intFromPtr(self.mapped.ptr)),
[len] "r" (self.mapped.len),
: .{ .memory = true }),
.riscv32, .riscv64 => asm volatile (
\\ li a7, 215 # SYS_munmap
\\ mv a0, %[ptr]
\\ mv a1, %[len]
\\ ecall
\\ li a7, 93 # SYS_exit
\\ mv a0, zero
\\ ecall
:
: [ptr] "r" (@intFromPtr(self.mapped.ptr)),
[len] "r" (self.mapped.len),
: .{ .memory = true }),
.s390x => asm volatile (
\\ lgr %%r2, %[ptr]
\\ lgr %%r3, %[len]
\\ svc 91 # SYS_munmap
\\ lghi %%r2, 0
\\ svc 1 # SYS_exit
:
: [ptr] "r" (@intFromPtr(self.mapped.ptr)),
[len] "r" (self.mapped.len),
: .{ .memory = true }),
.sh, .sheb => asm volatile (
\\ mov #91, r3 ! SYS_munmap
\\ mov %[ptr], r4
\\ mov %[len], r5
\\ trapa #31
\\ or r0, r0
\\ or r0, r0
\\ or r0, r0
\\ or r0, r0
\\ or r0, r0
\\ mov #1, r3 ! SYS_exit
\\ mov #0, r4
\\ trapa #31
\\ or r0, r0
\\ or r0, r0
\\ or r0, r0
\\ or r0, r0
\\ or r0, r0
:
: [ptr] "r" (@intFromPtr(self.mapped.ptr)),
[len] "r" (self.mapped.len),
: .{ .memory = true }),
.sparc => asm volatile (
\\ # See sparc64 comments below.
\\ 1:
\\ cmp %%fp, 0
\\ beq 2f
\\ nop
\\ ba 1b
\\ restore
\\ 2:
\\ mov 73, %%g1 // SYS_munmap
\\ mov %[ptr], %%o0
\\ mov %[len], %%o1
\\ t 0x3 # ST_FLUSH_WINDOWS
\\ t 0x10
\\ mov 1, %%g1 // SYS_exit
\\ mov 0, %%o0
\\ t 0x10
:
: [ptr] "r" (@intFromPtr(self.mapped.ptr)),
[len] "r" (self.mapped.len),
: .{ .memory = true }),
.sparc64 => asm volatile (
\\ # SPARCs really don't like it when active stack frames
\\ # is unmapped (it will result in a segfault), so we
\\ # force-deactivate it by running `restore` until
\\ # all frames are cleared.
\\ 1:
\\ cmp %%fp, 0
\\ beq 2f
\\ nop
\\ ba 1b
\\ restore
\\ 2:
\\ mov 73, %%g1 // SYS_munmap
\\ mov %[ptr], %%o0
\\ mov %[len], %%o1
\\ # Flush register window contents to prevent background
\\ # memory access before unmapping the stack.
\\ flushw
\\ t 0x6d
\\ mov 1, %%g1 // SYS_exit
\\ mov 0, %%o0
\\ t 0x6d
:
: [ptr] "r" (@intFromPtr(self.mapped.ptr)),
[len] "r" (self.mapped.len),
: .{ .memory = true }),
.loongarch32, .loongarch64 => asm volatile (
\\ or $a0, $zero, %[ptr]
\\ or $a1, $zero, %[len]
\\ ori $a7, $zero, 215 # SYS_munmap
\\ syscall 0 # call munmap
\\ ori $a0, $zero, 0
\\ ori $a7, $zero, 93 # SYS_exit
\\ syscall 0 # call exit
:
: [ptr] "r" (@intFromPtr(self.mapped.ptr)),
[len] "r" (self.mapped.len),
: .{ .memory = true }),
else => |cpu_arch| @compileError("Unsupported linux arch: " ++ @tagName(cpu_arch)),
}
unreachable;
}
};
fn spawn(config: SpawnConfig, comptime f: anytype, args: anytype) !Impl { fn spawn(config: SpawnConfig, comptime f: anytype, args: anytype) !Impl {
const page_size = std.heap.pageSize(); const page_size = std.heap.pageSize();
const Args = @TypeOf(args); const Args = @TypeOf(args);