std: rework/remove ucontext_t

Our usage of `ucontext_t` in the standard library was kind of
problematic. We unnecessarily mimiced libc-specific structures, and our
`getcontext` implementation was overkill for our use case of stack
tracing.

This commit introduces a new namespace, `std.debug.cpu_context`, which
contains "context" types for various architectures (currently x86,
x86_64, ARM, and AARCH64) containing the general-purpose CPU registers;
the ones needed in practice for stack unwinding. Each implementation has
a function `current` which populates the structure using inline
assembly. The structure is user-overrideable, though that should only be
necessary if the standard library does not have an implementation for
the *architecture*: that is to say, none of this is OS-dependent.

Of course, in POSIX signal handlers, we get a `ucontext_t` from the
kernel. The function `std.debug.cpu_context.fromPosixSignalContext`
converts this to a `std.debug.cpu_context.Native` with a big ol' target
switch.

This functionality is not exposed from `std.c` or `std.posix`, and
neither are `ucontext_t`, `mcontext_t`, or `getcontext`. The rationale
is that these types and functions do not conform to a specific ABI, and
in fact tend to get updated over time based on CPU features and
extensions; in addition, different libcs use different structures which
are "partially compatible" with the kernel structure. Overall, it's a
mess, but all we need is the kernel context, so we can just define a
kernel-compatible structure as long as we don't claim C compatibility by
putting it in `std.c` or `std.posix`.

This change resulted in a few nice `std.debug` simplifications, but
nothing too noteworthy. However, the main benefit of this change is that
DWARF unwinding---sometimes necessary for collecting stack traces
reliably---now requires far less target-specific integration.

Also fix a bug I noticed in `PageAllocator` (I found this due to a bug
in my distro's QEMU distribution; thanks, broken QEMU patch!) and I
think a couple of minor bugs in `std.debug`.

Resolves: #23801
Resolves: #23802
This commit is contained in:
mlugg 2025-09-17 18:38:11 +01:00
parent b578cca022
commit a18fd41064
No known key found for this signature in database
GPG Key ID: 3F5B7DCCBF4AF02E
33 changed files with 1409 additions and 1516 deletions

View File

@ -7035,205 +7035,6 @@ pub const timezone = switch (native_os) {
else => void,
};
pub const ucontext_t = switch (native_os) {
.linux => linux.ucontext_t, // std.os.linux.ucontext_t is currently glibc-compatible, but it should probably not be.
.emscripten => emscripten.ucontext_t,
.macos, .ios, .tvos, .watchos, .visionos => extern struct {
onstack: c_int,
sigmask: sigset_t,
stack: stack_t,
link: ?*ucontext_t,
mcsize: u64,
mcontext: *mcontext_t,
__mcontext_data: mcontext_t,
},
.freebsd => extern struct {
sigmask: sigset_t,
mcontext: mcontext_t,
link: ?*ucontext_t,
stack: stack_t,
flags: c_int,
__spare__: [4]c_int,
},
.solaris, .illumos => extern struct {
flags: u64,
link: ?*ucontext_t,
sigmask: sigset_t,
stack: stack_t,
mcontext: mcontext_t,
brand_data: [3]?*anyopaque,
filler: [2]i64,
},
.netbsd => extern struct {
flags: u32,
link: ?*ucontext_t,
sigmask: sigset_t,
stack: stack_t,
mcontext: mcontext_t,
__pad: [
switch (builtin.cpu.arch) {
.x86 => 4,
.mips, .mipsel, .mips64, .mips64el => 14,
.arm, .armeb, .thumb, .thumbeb => 1,
.sparc, .sparc64 => if (@sizeOf(usize) == 4) 43 else 8,
else => 0,
}
]u32,
},
.dragonfly => extern struct {
sigmask: sigset_t,
mcontext: mcontext_t,
link: ?*ucontext_t,
stack: stack_t,
cofunc: ?*fn (?*ucontext_t, ?*anyopaque) void,
arg: ?*void,
_spare: [4]c_int,
},
// https://github.com/SerenityOS/serenity/blob/87eac0e424cff4a1f941fb704b9362a08654c24d/Kernel/API/POSIX/ucontext.h#L19-L24
.haiku, .serenity => extern struct {
link: ?*ucontext_t,
sigmask: sigset_t,
stack: stack_t,
mcontext: mcontext_t,
},
.openbsd => openbsd.ucontext_t,
else => void,
};
pub const mcontext_t = switch (native_os) {
.linux => linux.mcontext_t,
.emscripten => emscripten.mcontext_t,
.macos, .ios, .tvos, .watchos, .visionos => darwin.mcontext_t,
.freebsd => switch (builtin.cpu.arch) {
.x86_64 => extern struct {
onstack: u64,
rdi: u64,
rsi: u64,
rdx: u64,
rcx: u64,
r8: u64,
r9: u64,
rax: u64,
rbx: u64,
rbp: u64,
r10: u64,
r11: u64,
r12: u64,
r13: u64,
r14: u64,
r15: u64,
trapno: u32,
fs: u16,
gs: u16,
addr: u64,
flags: u32,
es: u16,
ds: u16,
err: u64,
rip: u64,
cs: u64,
rflags: u64,
rsp: u64,
ss: u64,
len: u64,
fpformat: u64,
ownedfp: u64,
fpstate: [64]u64 align(16),
fsbase: u64,
gsbase: u64,
xfpustate: u64,
xfpustate_len: u64,
spare: [4]u64,
},
.aarch64 => extern struct {
gpregs: extern struct {
x: [30]u64,
lr: u64,
sp: u64,
elr: u64,
spsr: u32,
_pad: u32,
},
fpregs: extern struct {
q: [32]u128,
sr: u32,
cr: u32,
flags: u32,
_pad: u32,
},
flags: u32,
_pad: u32,
_spare: [8]u64,
},
else => struct {},
},
.solaris, .illumos => extern struct {
gregs: [28]u64,
fpregs: solaris.fpregset_t,
},
.netbsd => switch (builtin.cpu.arch) {
.aarch64, .aarch64_be => extern struct {
gregs: [35]u64,
fregs: [528]u8 align(16),
spare: [8]u64,
},
.x86 => extern struct {
gregs: [19]u32,
fpregs: [161]u32,
mc_tlsbase: u32,
},
.x86_64 => extern struct {
gregs: [26]u64,
mc_tlsbase: u64,
fpregs: [512]u8 align(8),
},
else => struct {},
},
.dragonfly => dragonfly.mcontext_t,
.haiku => haiku.mcontext_t,
.serenity => switch (native_arch) {
// https://github.com/SerenityOS/serenity/blob/200e91cd7f1ec5453799a2720d4dc114a59cc289/Kernel/Arch/aarch64/mcontext.h#L15-L19
.aarch64 => extern struct {
x: [31]u64,
sp: u64,
pc: u64,
},
// https://github.com/SerenityOS/serenity/blob/66f8d0f031ef25c409dbb4fecaa454800fecae0f/Kernel/Arch/riscv64/mcontext.h#L15-L18
.riscv64 => extern struct {
x: [31]u64,
pc: u64,
},
// https://github.com/SerenityOS/serenity/blob/7b9ea3efdec9f86a1042893e8107d0b23aad8727/Kernel/Arch/x86_64/mcontext.h#L15-L40
.x86_64 => extern struct {
rax: u64,
rcx: u64,
rdx: u64,
rbx: u64,
rsp: u64,
rbp: u64,
rsi: u64,
rdi: u64,
rip: u64,
r8: u64,
r9: u64,
r10: u64,
r11: u64,
r12: u64,
r13: u64,
r14: u64,
r15: u64,
rflags: u64,
cs: u32,
ss: u32,
ds: u32,
es: u32,
fs: u32,
gs: u32,
},
else => struct {},
},
else => void,
};
pub const user_desc = switch (native_os) {
.linux => linux.user_desc,
else => void,
@ -11238,13 +11039,6 @@ pub const LC = enum(c_int) {
pub extern "c" fn setlocale(category: LC, locale: ?[*:0]const u8) ?[*:0]const u8;
pub const getcontext = if (builtin.target.abi.isAndroid() or builtin.target.os.tag == .openbsd or builtin.target.os.tag == .haiku)
{} // libc does not implement getcontext
else if (native_os == .linux and builtin.target.abi.isMusl())
linux.getcontext
else
private.getcontext;
pub const max_align_t = if (native_abi == .msvc or native_abi == .itanium)
f64
else if (native_os.isDarwin())
@ -11668,7 +11462,6 @@ const private = struct {
extern "c" fn shm_open(name: [*:0]const u8, flag: c_int, mode: mode_t) c_int;
extern "c" fn pthread_setname_np(thread: pthread_t, name: [*:0]const u8) c_int;
extern "c" fn getcontext(ucp: *ucontext_t) c_int;
extern "c" fn getrandom(buf_ptr: [*]u8, buf_len: usize, flags: c_uint) isize;
extern "c" fn getentropy(buffer: [*]u8, size: usize) c_int;

View File

@ -348,107 +348,6 @@ pub const VM = struct {
pub const exception_type_t = c_int;
pub const mcontext_t = switch (native_arch) {
.aarch64 => extern struct {
es: exception_state,
ss: thread_state,
ns: neon_state,
},
.x86_64 => extern struct {
es: exception_state,
ss: thread_state,
fs: float_state,
},
else => @compileError("unsupported arch"),
};
pub const exception_state = switch (native_arch) {
.aarch64 => extern struct {
far: u64, // Virtual Fault Address
esr: u32, // Exception syndrome
exception: u32, // Number of arm exception taken
},
.x86_64 => extern struct {
trapno: u16,
cpu: u16,
err: u32,
faultvaddr: u64,
},
else => @compileError("unsupported arch"),
};
pub const thread_state = switch (native_arch) {
.aarch64 => extern struct {
/// General purpose registers
regs: [29]u64,
/// Frame pointer x29
fp: u64,
/// Link register x30
lr: u64,
/// Stack pointer x31
sp: u64,
/// Program counter
pc: u64,
/// Current program status register
cpsr: u32,
__pad: u32,
},
.x86_64 => extern struct {
rax: u64,
rbx: u64,
rcx: u64,
rdx: u64,
rdi: u64,
rsi: u64,
rbp: u64,
rsp: u64,
r8: u64,
r9: u64,
r10: u64,
r11: u64,
r12: u64,
r13: u64,
r14: u64,
r15: u64,
rip: u64,
rflags: u64,
cs: u64,
fs: u64,
gs: u64,
},
else => @compileError("unsupported arch"),
};
pub const neon_state = extern struct {
q: [32]u128,
fpsr: u32,
fpcr: u32,
};
pub const float_state = extern struct {
reserved: [2]c_int,
fcw: u16,
fsw: u16,
ftw: u8,
rsrv1: u8,
fop: u16,
ip: u32,
cs: u16,
rsrv2: u16,
dp: u32,
ds: u16,
rsrv3: u16,
mxcsr: u32,
mxcsrmask: u32,
stmm: [8]stmm_reg,
xmm: [16]xmm_reg,
rsrv4: [96]u8,
reserved1: c_int,
};
pub const stmm_reg = [16]u8;
pub const xmm_reg = [16]u8;
pub extern "c" fn NSVersionOfRunTimeLibrary(library_name: [*:0]const u8) u32;
pub extern "c" fn _NSGetExecutablePath(buf: [*:0]u8, bufsize: *u32) c_int;
pub extern "c" fn _dyld_image_count() u32;

View File

@ -13,46 +13,6 @@ pub extern "c" fn ptrace(request: c_int, pid: pid_t, addr: caddr_t, data: c_int)
pub extern "c" fn umtx_sleep(ptr: *const volatile c_int, value: c_int, timeout: c_int) c_int;
pub extern "c" fn umtx_wakeup(ptr: *const volatile c_int, count: c_int) c_int;
pub const mcontext_t = extern struct {
onstack: register_t, // XXX - sigcontext compat.
rdi: register_t,
rsi: register_t,
rdx: register_t,
rcx: register_t,
r8: register_t,
r9: register_t,
rax: register_t,
rbx: register_t,
rbp: register_t,
r10: register_t,
r11: register_t,
r12: register_t,
r13: register_t,
r14: register_t,
r15: register_t,
xflags: register_t,
trapno: register_t,
addr: register_t,
flags: register_t,
err: register_t,
rip: register_t,
cs: register_t,
rflags: register_t,
rsp: register_t, // machine state
ss: register_t,
len: c_uint, // sizeof(mcontext_t)
fpformat: c_uint,
ownedfp: c_uint,
reserved: c_uint,
unused: [8]c_uint,
// NOTE! 64-byte aligned as of here. Also must match savefpu structure.
fpregs: [256]c_int align(64),
};
pub const register_t = isize;
pub const E = enum(u16) {
/// No error occurred.
SUCCESS = 0,

View File

@ -273,269 +273,6 @@ pub const E = enum(i32) {
pub const status_t = i32;
pub const mcontext_t = switch (builtin.cpu.arch) {
.arm, .thumb => extern struct {
r0: u32,
r1: u32,
r2: u32,
r3: u32,
r4: u32,
r5: u32,
r6: u32,
r7: u32,
r8: u32,
r9: u32,
r10: u32,
r11: u32,
r12: u32,
r13: u32,
r14: u32,
r15: u32,
cpsr: u32,
},
.aarch64 => extern struct {
x: [10]u64,
lr: u64,
sp: u64,
elr: u64,
spsr: u64,
fp_q: [32]u128,
fpsr: u32,
fpcr: u32,
},
.m68k => extern struct {
pc: u32,
d0: u32,
d1: u32,
d2: u32,
d3: u32,
d4: u32,
d5: u32,
d6: u32,
d7: u32,
a0: u32,
a1: u32,
a2: u32,
a3: u32,
a4: u32,
a5: u32,
a6: u32,
a7: u32,
ccr: u8,
f0: f64,
f1: f64,
f2: f64,
f3: f64,
f4: f64,
f5: f64,
f6: f64,
f7: f64,
f8: f64,
f9: f64,
f10: f64,
f11: f64,
f12: f64,
f13: f64,
},
.mipsel => extern struct {
r0: u32,
},
.powerpc => extern struct {
pc: u32,
r0: u32,
r1: u32,
r2: u32,
r3: u32,
r4: u32,
r5: u32,
r6: u32,
r7: u32,
r8: u32,
r9: u32,
r10: u32,
r11: u32,
r12: u32,
f0: f64,
f1: f64,
f2: f64,
f3: f64,
f4: f64,
f5: f64,
f6: f64,
f7: f64,
f8: f64,
f9: f64,
f10: f64,
f11: f64,
f12: f64,
f13: f64,
reserved: u32,
fpscr: u32,
ctr: u32,
xer: u32,
cr: u32,
msr: u32,
lr: u32,
},
.riscv64 => extern struct {
x: [31]u64,
pc: u64,
f: [32]f64,
fcsr: u64,
},
.sparc64 => extern struct {
g1: u64,
g2: u64,
g3: u64,
g4: u64,
g5: u64,
g6: u64,
g7: u64,
o0: u64,
o1: u64,
o2: u64,
o3: u64,
o4: u64,
o5: u64,
sp: u64,
o7: u64,
l0: u64,
l1: u64,
l2: u64,
l3: u64,
l4: u64,
l5: u64,
l6: u64,
l7: u64,
i0: u64,
i1: u64,
i2: u64,
i3: u64,
i4: u64,
i5: u64,
fp: u64,
i7: u64,
},
.x86 => extern struct {
pub const old_extended_regs = extern struct {
control: u16,
reserved1: u16,
status: u16,
reserved2: u16,
tag: u16,
reserved3: u16,
eip: u32,
cs: u16,
opcode: u16,
datap: u32,
ds: u16,
reserved4: u16,
fp_mmx: [8][10]u8,
};
pub const fp_register = extern struct { value: [10]u8, reserved: [6]u8 };
pub const xmm_register = extern struct { value: [16]u8 };
pub const new_extended_regs = extern struct {
control: u16,
status: u16,
tag: u16,
opcode: u16,
eip: u32,
cs: u16,
reserved1: u16,
datap: u32,
ds: u16,
reserved2: u16,
mxcsr: u32,
reserved3: u32,
fp_mmx: [8]fp_register,
xmmx: [8]xmm_register,
reserved4: [224]u8,
};
pub const extended_regs = extern struct {
state: extern union {
old_format: old_extended_regs,
new_format: new_extended_regs,
},
format: u32,
};
eip: u32,
eflags: u32,
eax: u32,
ecx: u32,
edx: u32,
esp: u32,
ebp: u32,
reserved: u32,
xregs: extended_regs,
edi: u32,
esi: u32,
ebx: u32,
},
.x86_64 => extern struct {
pub const fp_register = extern struct {
value: [10]u8,
reserved: [6]u8,
};
pub const xmm_register = extern struct {
value: [16]u8,
};
pub const fpu_state = extern struct {
control: u16,
status: u16,
tag: u16,
opcode: u16,
rip: u64,
rdp: u64,
mxcsr: u32,
mscsr_mask: u32,
fp_mmx: [8]fp_register,
xmm: [16]xmm_register,
reserved: [96]u8,
};
pub const xstate_hdr = extern struct {
bv: u64,
xcomp_bv: u64,
reserved: [48]u8,
};
pub const savefpu = extern struct {
fxsave: fpu_state,
xstate: xstate_hdr,
ymm: [16]xmm_register,
};
rax: u64,
rbx: u64,
rcx: u64,
rdx: u64,
rdi: u64,
rsi: u64,
rbp: u64,
r8: u64,
r9: u64,
r10: u64,
r11: u64,
r12: u64,
r13: u64,
r14: u64,
r15: u64,
rsp: u64,
rip: u64,
rflags: u64,
fpu: savefpu,
},
else => void,
};
pub const DirEnt = extern struct {
/// device
dev: dev_t,

View File

@ -144,53 +144,6 @@ pub const TCIO = enum(u32) {
ION = 4,
};
pub const ucontext_t = switch (builtin.cpu.arch) {
.x86_64 => extern struct {
sc_rdi: c_long,
sc_rsi: c_long,
sc_rdx: c_long,
sc_rcx: c_long,
sc_r8: c_long,
sc_r9: c_long,
sc_r10: c_long,
sc_r11: c_long,
sc_r12: c_long,
sc_r13: c_long,
sc_r14: c_long,
sc_r15: c_long,
sc_rbp: c_long,
sc_rbx: c_long,
sc_rax: c_long,
sc_gs: c_long,
sc_fs: c_long,
sc_es: c_long,
sc_ds: c_long,
sc_trapno: c_long,
sc_err: c_long,
sc_rip: c_long,
sc_cs: c_long,
sc_rflags: c_long,
sc_rsp: c_long,
sc_ss: c_long,
sc_fpstate: *anyopaque, // struct fxsave64 *
__sc_unused: c_int,
sc_mask: c_int,
sc_cookie: c_long,
},
.aarch64 => extern struct {
__sc_unused: c_int,
sc_mask: c_int,
sc_sp: c_ulong,
sc_lr: c_ulong,
sc_elr: c_ulong,
sc_spsr: c_ulong,
sc_x: [30]c_ulong,
sc_cookie: c_long,
},
else => @compileError("missing ucontext_t type definition"),
};
pub const E = enum(u16) {
/// No error occurred.
SUCCESS = 0,

View File

@ -22,6 +22,7 @@ pub const ElfFile = @import("debug/ElfFile.zig");
pub const SelfInfo = @import("debug/SelfInfo.zig");
pub const Info = @import("debug/Info.zig");
pub const Coverage = @import("debug/Coverage.zig");
pub const cpu_context = @import("debug/cpu_context.zig");
pub const simple_panic = @import("debug/simple_panic.zig");
pub const no_panic = @import("debug/no_panic.zig");
@ -331,66 +332,8 @@ test dumpHexFallible {
try std.testing.expectEqualStrings(expected, aw.written());
}
/// Platform-specific thread state. This contains register state, and on some platforms
/// information about the stack. This is not safe to trivially copy, because some platforms
/// use internal pointers within this structure. After copying, call `relocateContext`.
pub const ThreadContext = ThreadContext: {
// Allow overriding the target's `ThreadContext` by exposing `root.debug.ThreadContext`.
if (@hasDecl(root, "debug") and @hasDecl(root.debug, "ThreadContext")) {
break :ThreadContext root.debug.ThreadContext;
}
if (native_os == .windows) break :ThreadContext windows.CONTEXT;
if (posix.ucontext_t != void) break :ThreadContext posix.ucontext_t;
break :ThreadContext noreturn;
};
/// Updates any internal pointers of a `ThreadContext` after the caller copies it.
pub fn relocateContext(dest: *ThreadContext) void {
switch (native_os) {
.macos => dest.mcontext = &dest.__mcontext_data,
else => {},
}
}
/// The value which is placed on the stack to make a copy of a `ThreadContext`.
const ThreadContextBuf = if (ThreadContext == noreturn) void else ThreadContext;
/// The pointer through which a `ThreadContext` is received from callers of stack tracing logic.
pub const ThreadContextPtr = if (ThreadContext == noreturn) noreturn else *const ThreadContext;
/// Capture the current context. The register values in the context will reflect the
/// state after the platform `getcontext` function returns.
///
/// It is valid to call this if the platform doesn't have context capturing support,
/// in that case `false` will be returned. This function is `inline` so that the `false`
/// is comptime-known at the call site in that case.
pub inline fn getContext(context: *ThreadContextBuf) bool {
// Allow overriding the target's `getContext` by exposing `root.debug.getContext`.
if (@hasDecl(root, "debug") and @hasDecl(root.debug, "getContext")) {
return root.debug.getContext(context);
}
if (native_os == .windows) {
context.* = std.mem.zeroes(windows.CONTEXT);
windows.ntdll.RtlCaptureContext(context);
return true;
}
if (@TypeOf(posix.system.getcontext) != void) {
if (posix.system.getcontext(context) != 0) return false;
if (native_os == .macos) {
assert(context.mcsize == @sizeOf(std.c.mcontext_t));
// On aarch64-macos, the system getcontext doesn't write anything into the pc
// register slot, it only writes lr. This makes the context consistent with
// other aarch64 getcontext implementations which write the current lr
// (where getcontext will return to) into both the lr and pc slot of the context.
if (native_arch == .aarch64) context.mcontext.ss.pc = context.mcontext.ss.lr;
}
return true;
}
return false;
}
/// The pointer through which a `cpu_context.Native` is received from callers of stack tracing logic.
pub const CpuContextPtr = if (cpu_context.Native == noreturn) noreturn else *const cpu_context.Native;
/// Invokes detectable illegal behavior when `ok` is `false`.
///
@ -616,10 +559,10 @@ pub const StackUnwindOptions = struct {
/// used to omit intermediate handling code (for instance, a panic handler and its machinery)
/// from stack traces.
first_address: ?usize = null,
/// If not `null`, we will unwind from this `ThreadContext` instead of the current top of the
/// stack. The main use case here is printing stack traces from signal handlers, where the
/// kernel provides a `*const ThreadContext` of the state before the signal.
context: ?ThreadContextPtr = null,
/// If not `null`, we will unwind from this `cpu_context.Native` instead of the current top of
/// the stack. The main use case here is printing stack traces from signal handlers, where the
/// kernel provides a `*const cpu_context.Native` of the state before the signal.
context: ?CpuContextPtr = null,
/// If `true`, stack unwinding strategies which may cause crashes are used as a last resort.
/// If `false`, only known-safe mechanisms will be attempted.
allow_unsafe_unwind: bool = false,
@ -630,8 +573,7 @@ pub const StackUnwindOptions = struct {
///
/// See `writeCurrentStackTrace` to immediately print the trace instead of capturing it.
pub fn captureCurrentStackTrace(options: StackUnwindOptions, addr_buf: []usize) std.builtin.StackTrace {
var context_buf: ThreadContextBuf = undefined;
var it = StackIterator.init(options.context, &context_buf) catch {
var it = StackIterator.init(options.context) catch {
return .{ .index = 0, .instruction_addresses = &.{} };
};
defer it.deinit();
@ -670,14 +612,7 @@ pub fn writeCurrentStackTrace(options: StackUnwindOptions, writer: *Writer, tty_
return;
},
};
var context_buf: ThreadContextBuf = undefined;
var it = StackIterator.init(options.context, &context_buf) catch |err| switch (err) {
error.OutOfMemory => {
tty_config.setColor(writer, .dim) catch {};
try writer.print("Cannot print stack trace: out of memory\n", .{});
tty_config.setColor(writer, .reset) catch {};
return;
},
var it = StackIterator.init(options.context) catch |err| switch (err) {
error.CannotUnwindFromContext => {
tty_config.setColor(writer, .dim) catch {};
try writer.print("Cannot print stack trace: context unwind unavailable for target\n", .{});
@ -794,9 +729,9 @@ const StackIterator = union(enum) {
fp: usize,
/// It is important that this function is marked `inline` so that it can safely use
/// `@frameAddress` and `getContext` as the caller's stack frame and our own are one
/// and the same.
inline fn init(context_opt: ?ThreadContextPtr, context_buf: *ThreadContextBuf) error{ OutOfMemory, CannotUnwindFromContext }!StackIterator {
/// `@frameAddress` and `cpu_context.Native.current` as the caller's stack frame and
/// our own are one and the same.
inline fn init(opt_context_ptr: ?CpuContextPtr) error{CannotUnwindFromContext}!StackIterator {
if (builtin.cpu.arch.isSPARC()) {
// Flush all the register windows on stack.
if (builtin.cpu.has(.sparc, .v9)) {
@ -805,14 +740,12 @@ const StackIterator = union(enum) {
asm volatile ("ta 3" ::: .{ .memory = true }); // ST_FLUSH_WINDOWS
}
}
if (context_opt) |context| {
if (opt_context_ptr) |context_ptr| {
if (!SelfInfo.supports_unwinding) return error.CannotUnwindFromContext;
context_buf.* = context.*;
relocateContext(context_buf);
return .{ .di = try .init(context_buf, getDebugInfoAllocator()) };
return .{ .di = .init(context_ptr) };
}
if (SelfInfo.supports_unwinding and getContext(context_buf)) {
return .{ .di = try .init(context_buf, getDebugInfoAllocator()) };
if (SelfInfo.supports_unwinding and cpu_context.Native != noreturn) {
return .{ .di = .init(&.current()) };
}
return .{ .fp = @frameAddress() };
}
@ -1212,7 +1145,7 @@ pub const have_segfault_handling_support = switch (native_os) {
.windows,
=> true,
.freebsd, .openbsd => ThreadContext != noreturn,
.freebsd, .openbsd => cpu_context.Native != noreturn,
else => false,
};
@ -1309,33 +1242,8 @@ fn handleSegfaultPosix(sig: i32, info: *const posix.siginfo_t, ctx_ptr: ?*anyopa
};
break :info .{ addr, name };
};
if (ThreadContext == noreturn) return handleSegfault(addr, name, null);
// Some kernels don't align `ctx_ptr` properly, so we'll copy it into a local buffer.
var copied_ctx: posix.ucontext_t = undefined;
const orig_ctx: *align(1) posix.ucontext_t = @ptrCast(ctx_ptr);
copied_ctx = orig_ctx.*;
if (builtin.os.tag.isDarwin() and builtin.cpu.arch == .aarch64) {
// The kernel incorrectly writes the contents of `__mcontext_data` right after `mcontext`,
// rather than after the 8 bytes of padding that are supposed to sit between the two. Copy the
// contents to the right place so that the `mcontext` pointer will be correct after the
// `relocateContext` call below.
const WrittenContext = extern struct {
onstack: c_int,
sigmask: std.c.sigset_t,
stack: std.c.stack_t,
link: ?*std.c.ucontext_t,
mcsize: u64,
mcontext: *std.c.mcontext_t,
__mcontext_data: std.c.mcontext_t align(@sizeOf(usize)), // Disable padding after `mcontext`.
};
const written_ctx: *align(1) WrittenContext = @ptrCast(ctx_ptr);
copied_ctx.__mcontext_data = written_ctx.__mcontext_data;
}
relocateContext(&copied_ctx);
handleSegfault(addr, name, &copied_ctx);
const opt_cpu_context: ?cpu_context.Native = cpu_context.fromPosixSignalContext(ctx_ptr);
handleSegfault(addr, name, if (opt_cpu_context) |*ctx| ctx else null);
}
fn handleSegfaultWindows(info: *windows.EXCEPTION_POINTERS) callconv(.winapi) c_long {
@ -1347,10 +1255,10 @@ fn handleSegfaultWindows(info: *windows.EXCEPTION_POINTERS) callconv(.winapi) c_
windows.EXCEPTION_STACK_OVERFLOW => .{ "Stack overflow", null },
else => return windows.EXCEPTION_CONTINUE_SEARCH,
};
handleSegfault(addr, name, info.ContextRecord);
handleSegfault(addr, name, &cpu_context.fromWindowsContext(info.ContextRecord));
}
fn handleSegfault(addr: ?usize, name: []const u8, opt_ctx: ?ThreadContextPtr) noreturn {
fn handleSegfault(addr: ?usize, name: []const u8, opt_ctx: ?CpuContextPtr) noreturn {
// Allow overriding the target-agnostic segfault handler by exposing `root.debug.handleSegfault`.
if (@hasDecl(root, "debug") and @hasDecl(root.debug, "handleSegfault")) {
return root.debug.handleSegfault(addr, name, opt_ctx);
@ -1358,7 +1266,7 @@ fn handleSegfault(addr: ?usize, name: []const u8, opt_ctx: ?ThreadContextPtr) no
return defaultHandleSegfault(addr, name, opt_ctx);
}
pub fn defaultHandleSegfault(addr: ?usize, name: []const u8, opt_ctx: ?ThreadContextPtr) noreturn {
pub fn defaultHandleSegfault(addr: ?usize, name: []const u8, opt_ctx: ?CpuContextPtr) noreturn {
// There is very similar logic to the following in `defaultPanic`.
switch (panic_stage) {
0 => {

View File

@ -27,7 +27,6 @@ const Reader = std.Io.Reader;
const Dwarf = @This();
pub const expression = @import("Dwarf/expression.zig");
pub const abi = @import("Dwarf/abi.zig");
pub const call_frame = @import("Dwarf/call_frame.zig");
pub const Unwind = @import("Dwarf/Unwind.zig");
@ -1415,7 +1414,7 @@ pub fn readUnitHeader(r: *Reader, endian: Endian) ScanError!UnitHeader {
}
/// Returns the DWARF register number for an x86_64 register number found in compact unwind info
pub fn compactUnwindToDwarfRegNumber(unwind_reg_number: u3) !u8 {
pub fn compactUnwindToDwarfRegNumber(unwind_reg_number: u3) !u16 {
return switch (unwind_reg_number) {
1 => 3, // RBX
2 => 12, // R12
@ -1427,6 +1426,60 @@ pub fn compactUnwindToDwarfRegNumber(unwind_reg_number: u3) !u8 {
};
}
/// Returns `null` for CPU architectures without an instruction pointer register.
pub fn ipRegNum(arch: std.Target.Cpu.Arch) ?u16 {
return switch (arch) {
.x86 => 8,
.x86_64 => 16,
.arm, .armeb, .thumb, .thumbeb => 15,
.aarch64, .aarch64_be => 32,
else => null,
};
}
pub fn fpRegNum(arch: std.Target.Cpu.Arch) u16 {
return switch (arch) {
.x86 => 5,
.x86_64 => 6,
.arm, .armeb, .thumb, .thumbeb => 11,
.aarch64, .aarch64_be => 29,
else => unreachable,
};
}
pub fn spRegNum(arch: std.Target.Cpu.Arch) u16 {
return switch (arch) {
.x86 => 4,
.x86_64 => 7,
.arm, .armeb, .thumb, .thumbeb => 13,
.aarch64, .aarch64_be => 31,
else => unreachable,
};
}
/// Tells whether unwinding for this target is supported by the Dwarf standard.
///
/// See also `std.debug.SelfInfo.supports_unwinding` which tells whether the Zig
/// standard library has a working implementation of unwinding for this target.
pub fn supportsUnwinding(target: *const std.Target) bool {
return switch (target.cpu.arch) {
.amdgcn,
.nvptx,
.nvptx64,
.spirv32,
.spirv64,
=> false,
// Enabling this causes relocation errors such as:
// error: invalid relocation type R_RISCV_SUB32 at offset 0x20
.riscv64, .riscv64be, .riscv32, .riscv32be => false,
// Conservative guess. Feel free to update this logic with any targets
// that are known to not support Dwarf unwinding.
else => true,
};
}
/// This function is to make it handy to comment out the return and make it
/// into a crash when working on this file.
pub fn bad() error{InvalidDebugInfo} {

View File

@ -1,351 +0,0 @@
const builtin = @import("builtin");
const std = @import("../../std.zig");
const mem = std.mem;
const posix = std.posix;
const Arch = std.Target.Cpu.Arch;
/// Tells whether unwinding for this target is supported by the Dwarf standard.
///
/// See also `std.debug.SelfInfo.supports_unwinding` which tells whether the Zig
/// standard library has a working implementation of unwinding for this target.
pub fn supportsUnwinding(target: *const std.Target) bool {
return switch (target.cpu.arch) {
.amdgcn,
.nvptx,
.nvptx64,
.spirv32,
.spirv64,
=> false,
// Enabling this causes relocation errors such as:
// error: invalid relocation type R_RISCV_SUB32 at offset 0x20
.riscv64, .riscv64be, .riscv32, .riscv32be => false,
// Conservative guess. Feel free to update this logic with any targets
// that are known to not support Dwarf unwinding.
else => true,
};
}
/// Returns `null` for CPU architectures without an instruction pointer register.
pub fn ipRegNum(arch: Arch) ?u8 {
return switch (arch) {
.x86 => 8,
.x86_64 => 16,
.arm, .armeb, .thumb, .thumbeb => 15,
.aarch64, .aarch64_be => 32,
else => null,
};
}
pub fn fpRegNum(arch: Arch, reg_context: RegisterContext) u8 {
return switch (arch) {
// GCC on OS X historically did the opposite of ELF for these registers
// (only in .eh_frame), and that is now the convention for MachO
.x86 => if (reg_context.eh_frame and reg_context.is_macho) 4 else 5,
.x86_64 => 6,
.arm, .armeb, .thumb, .thumbeb => 11,
.aarch64, .aarch64_be => 29,
else => unreachable,
};
}
pub fn spRegNum(arch: Arch, reg_context: RegisterContext) u8 {
return switch (arch) {
.x86 => if (reg_context.eh_frame and reg_context.is_macho) 5 else 4,
.x86_64 => 7,
.arm, .armeb, .thumb, .thumbeb => 13,
.aarch64, .aarch64_be => 31,
else => unreachable,
};
}
pub const RegisterContext = struct {
eh_frame: bool,
is_macho: bool,
};
pub const RegBytesError = error{
InvalidRegister,
UnimplementedArch,
UnimplementedOs,
RegisterContextRequired,
ThreadContextNotSupported,
};
/// Returns a slice containing the backing storage for `reg_number`.
///
/// This function assumes the Dwarf information corresponds not necessarily to
/// the current executable, but at least with a matching CPU architecture and
/// OS. It is planned to lift this limitation with a future enhancement.
///
/// `reg_context` describes in what context the register number is used, as it can have different
/// meanings depending on the DWARF container. It is only required when getting the stack or
/// frame pointer register on some architectures.
pub fn regBytes(
thread_context_ptr: *std.debug.ThreadContext,
reg_number: u8,
reg_context: ?RegisterContext,
) RegBytesError![]u8 {
if (builtin.os.tag == .windows) {
return switch (builtin.cpu.arch) {
.x86 => switch (reg_number) {
0 => mem.asBytes(&thread_context_ptr.Eax),
1 => mem.asBytes(&thread_context_ptr.Ecx),
2 => mem.asBytes(&thread_context_ptr.Edx),
3 => mem.asBytes(&thread_context_ptr.Ebx),
4 => mem.asBytes(&thread_context_ptr.Esp),
5 => mem.asBytes(&thread_context_ptr.Ebp),
6 => mem.asBytes(&thread_context_ptr.Esi),
7 => mem.asBytes(&thread_context_ptr.Edi),
8 => mem.asBytes(&thread_context_ptr.Eip),
9 => mem.asBytes(&thread_context_ptr.EFlags),
10 => mem.asBytes(&thread_context_ptr.SegCs),
11 => mem.asBytes(&thread_context_ptr.SegSs),
12 => mem.asBytes(&thread_context_ptr.SegDs),
13 => mem.asBytes(&thread_context_ptr.SegEs),
14 => mem.asBytes(&thread_context_ptr.SegFs),
15 => mem.asBytes(&thread_context_ptr.SegGs),
else => error.InvalidRegister,
},
.x86_64 => switch (reg_number) {
0 => mem.asBytes(&thread_context_ptr.Rax),
1 => mem.asBytes(&thread_context_ptr.Rdx),
2 => mem.asBytes(&thread_context_ptr.Rcx),
3 => mem.asBytes(&thread_context_ptr.Rbx),
4 => mem.asBytes(&thread_context_ptr.Rsi),
5 => mem.asBytes(&thread_context_ptr.Rdi),
6 => mem.asBytes(&thread_context_ptr.Rbp),
7 => mem.asBytes(&thread_context_ptr.Rsp),
8 => mem.asBytes(&thread_context_ptr.R8),
9 => mem.asBytes(&thread_context_ptr.R9),
10 => mem.asBytes(&thread_context_ptr.R10),
11 => mem.asBytes(&thread_context_ptr.R11),
12 => mem.asBytes(&thread_context_ptr.R12),
13 => mem.asBytes(&thread_context_ptr.R13),
14 => mem.asBytes(&thread_context_ptr.R14),
15 => mem.asBytes(&thread_context_ptr.R15),
16 => mem.asBytes(&thread_context_ptr.Rip),
else => error.InvalidRegister,
},
.aarch64, .aarch64_be => switch (reg_number) {
0...30 => mem.asBytes(&thread_context_ptr.DUMMYUNIONNAME.X[reg_number]),
31 => mem.asBytes(&thread_context_ptr.Sp),
32 => mem.asBytes(&thread_context_ptr.Pc),
else => error.InvalidRegister,
},
else => error.UnimplementedArch,
};
}
if (posix.ucontext_t == void) return error.ThreadContextNotSupported;
const ucontext_ptr = thread_context_ptr;
return switch (builtin.cpu.arch) {
.x86 => switch (builtin.os.tag) {
.linux, .netbsd, .solaris, .illumos => switch (reg_number) {
0 => mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.EAX]),
1 => mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.ECX]),
2 => mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.EDX]),
3 => mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.EBX]),
4...5 => if (reg_context) |r| bytes: {
if (reg_number == 4) {
break :bytes if (r.eh_frame and r.is_macho)
mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.EBP])
else
mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.ESP]);
} else {
break :bytes if (r.eh_frame and r.is_macho)
mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.ESP])
else
mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.EBP]);
}
} else error.RegisterContextRequired,
6 => mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.ESI]),
7 => mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.EDI]),
8 => mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.EIP]),
9 => mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.EFL]),
10 => mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.CS]),
11 => mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.SS]),
12 => mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.DS]),
13 => mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.ES]),
14 => mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.FS]),
15 => mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.GS]),
16...23 => error.InvalidRegister, // TODO: Support loading ST0-ST7 from mcontext.fpregs
32...39 => error.InvalidRegister, // TODO: Support loading XMM0-XMM7 from mcontext.fpregs
else => error.InvalidRegister,
},
else => error.UnimplementedOs,
},
.x86_64 => switch (builtin.os.tag) {
.linux, .solaris, .illumos => switch (reg_number) {
0 => mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.RAX]),
1 => mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.RDX]),
2 => mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.RCX]),
3 => mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.RBX]),
4 => mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.RSI]),
5 => mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.RDI]),
6 => mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.RBP]),
7 => mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.RSP]),
8 => mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.R8]),
9 => mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.R9]),
10 => mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.R10]),
11 => mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.R11]),
12 => mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.R12]),
13 => mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.R13]),
14 => mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.R14]),
15 => mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.R15]),
16 => mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.RIP]),
17...32 => |i| if (builtin.os.tag.isSolarish())
mem.asBytes(&ucontext_ptr.mcontext.fpregs.chip_state.xmm[i - 17])
else
mem.asBytes(&ucontext_ptr.mcontext.fpregs.xmm[i - 17]),
else => error.InvalidRegister,
},
.freebsd => switch (reg_number) {
0 => mem.asBytes(&ucontext_ptr.mcontext.rax),
1 => mem.asBytes(&ucontext_ptr.mcontext.rdx),
2 => mem.asBytes(&ucontext_ptr.mcontext.rcx),
3 => mem.asBytes(&ucontext_ptr.mcontext.rbx),
4 => mem.asBytes(&ucontext_ptr.mcontext.rsi),
5 => mem.asBytes(&ucontext_ptr.mcontext.rdi),
6 => mem.asBytes(&ucontext_ptr.mcontext.rbp),
7 => mem.asBytes(&ucontext_ptr.mcontext.rsp),
8 => mem.asBytes(&ucontext_ptr.mcontext.r8),
9 => mem.asBytes(&ucontext_ptr.mcontext.r9),
10 => mem.asBytes(&ucontext_ptr.mcontext.r10),
11 => mem.asBytes(&ucontext_ptr.mcontext.r11),
12 => mem.asBytes(&ucontext_ptr.mcontext.r12),
13 => mem.asBytes(&ucontext_ptr.mcontext.r13),
14 => mem.asBytes(&ucontext_ptr.mcontext.r14),
15 => mem.asBytes(&ucontext_ptr.mcontext.r15),
16 => mem.asBytes(&ucontext_ptr.mcontext.rip),
// TODO: Extract xmm state from mcontext.fpstate?
else => error.InvalidRegister,
},
.openbsd => switch (reg_number) {
0 => mem.asBytes(&ucontext_ptr.sc_rax),
1 => mem.asBytes(&ucontext_ptr.sc_rdx),
2 => mem.asBytes(&ucontext_ptr.sc_rcx),
3 => mem.asBytes(&ucontext_ptr.sc_rbx),
4 => mem.asBytes(&ucontext_ptr.sc_rsi),
5 => mem.asBytes(&ucontext_ptr.sc_rdi),
6 => mem.asBytes(&ucontext_ptr.sc_rbp),
7 => mem.asBytes(&ucontext_ptr.sc_rsp),
8 => mem.asBytes(&ucontext_ptr.sc_r8),
9 => mem.asBytes(&ucontext_ptr.sc_r9),
10 => mem.asBytes(&ucontext_ptr.sc_r10),
11 => mem.asBytes(&ucontext_ptr.sc_r11),
12 => mem.asBytes(&ucontext_ptr.sc_r12),
13 => mem.asBytes(&ucontext_ptr.sc_r13),
14 => mem.asBytes(&ucontext_ptr.sc_r14),
15 => mem.asBytes(&ucontext_ptr.sc_r15),
16 => mem.asBytes(&ucontext_ptr.sc_rip),
// TODO: Extract xmm state from sc_fpstate?
else => error.InvalidRegister,
},
.macos, .ios => switch (reg_number) {
0 => mem.asBytes(&ucontext_ptr.mcontext.ss.rax),
1 => mem.asBytes(&ucontext_ptr.mcontext.ss.rdx),
2 => mem.asBytes(&ucontext_ptr.mcontext.ss.rcx),
3 => mem.asBytes(&ucontext_ptr.mcontext.ss.rbx),
4 => mem.asBytes(&ucontext_ptr.mcontext.ss.rsi),
5 => mem.asBytes(&ucontext_ptr.mcontext.ss.rdi),
6 => mem.asBytes(&ucontext_ptr.mcontext.ss.rbp),
7 => mem.asBytes(&ucontext_ptr.mcontext.ss.rsp),
8 => mem.asBytes(&ucontext_ptr.mcontext.ss.r8),
9 => mem.asBytes(&ucontext_ptr.mcontext.ss.r9),
10 => mem.asBytes(&ucontext_ptr.mcontext.ss.r10),
11 => mem.asBytes(&ucontext_ptr.mcontext.ss.r11),
12 => mem.asBytes(&ucontext_ptr.mcontext.ss.r12),
13 => mem.asBytes(&ucontext_ptr.mcontext.ss.r13),
14 => mem.asBytes(&ucontext_ptr.mcontext.ss.r14),
15 => mem.asBytes(&ucontext_ptr.mcontext.ss.r15),
16 => mem.asBytes(&ucontext_ptr.mcontext.ss.rip),
else => error.InvalidRegister,
},
else => error.UnimplementedOs,
},
.arm, .armeb, .thumb, .thumbeb => switch (builtin.os.tag) {
.linux => switch (reg_number) {
0 => mem.asBytes(&ucontext_ptr.mcontext.arm_r0),
1 => mem.asBytes(&ucontext_ptr.mcontext.arm_r1),
2 => mem.asBytes(&ucontext_ptr.mcontext.arm_r2),
3 => mem.asBytes(&ucontext_ptr.mcontext.arm_r3),
4 => mem.asBytes(&ucontext_ptr.mcontext.arm_r4),
5 => mem.asBytes(&ucontext_ptr.mcontext.arm_r5),
6 => mem.asBytes(&ucontext_ptr.mcontext.arm_r6),
7 => mem.asBytes(&ucontext_ptr.mcontext.arm_r7),
8 => mem.asBytes(&ucontext_ptr.mcontext.arm_r8),
9 => mem.asBytes(&ucontext_ptr.mcontext.arm_r9),
10 => mem.asBytes(&ucontext_ptr.mcontext.arm_r10),
11 => mem.asBytes(&ucontext_ptr.mcontext.arm_fp),
12 => mem.asBytes(&ucontext_ptr.mcontext.arm_ip),
13 => mem.asBytes(&ucontext_ptr.mcontext.arm_sp),
14 => mem.asBytes(&ucontext_ptr.mcontext.arm_lr),
15 => mem.asBytes(&ucontext_ptr.mcontext.arm_pc),
// CPSR is not allocated a register number (See: https://github.com/ARM-software/abi-aa/blob/main/aadwarf32/aadwarf32.rst, Section 4.1)
else => error.InvalidRegister,
},
else => error.UnimplementedOs,
},
.aarch64, .aarch64_be => switch (builtin.os.tag) {
.macos, .ios, .watchos => switch (reg_number) {
0...28 => mem.asBytes(&ucontext_ptr.mcontext.ss.regs[reg_number]),
29 => mem.asBytes(&ucontext_ptr.mcontext.ss.fp),
30 => mem.asBytes(&ucontext_ptr.mcontext.ss.lr),
31 => mem.asBytes(&ucontext_ptr.mcontext.ss.sp),
32 => mem.asBytes(&ucontext_ptr.mcontext.ss.pc),
// TODO: Find storage for this state
//34 => mem.asBytes(&ucontext_ptr.ra_sign_state),
// V0-V31
64...95 => mem.asBytes(&ucontext_ptr.mcontext.ns.q[reg_number - 64]),
else => error.InvalidRegister,
},
.netbsd => switch (reg_number) {
0...34 => mem.asBytes(&ucontext_ptr.mcontext.gregs[reg_number]),
else => error.InvalidRegister,
},
.freebsd => switch (reg_number) {
0...29 => mem.asBytes(&ucontext_ptr.mcontext.gpregs.x[reg_number]),
30 => mem.asBytes(&ucontext_ptr.mcontext.gpregs.lr),
31 => mem.asBytes(&ucontext_ptr.mcontext.gpregs.sp),
// TODO: This seems wrong, but it was in the previous debug.zig code for mapping PC, check this
32 => mem.asBytes(&ucontext_ptr.mcontext.gpregs.elr),
else => error.InvalidRegister,
},
.openbsd => switch (reg_number) {
0...30 => mem.asBytes(&ucontext_ptr.sc_x[reg_number]),
31 => mem.asBytes(&ucontext_ptr.sc_sp),
32 => mem.asBytes(&ucontext_ptr.sc_lr),
33 => mem.asBytes(&ucontext_ptr.sc_elr),
34 => mem.asBytes(&ucontext_ptr.sc_spsr),
else => error.InvalidRegister,
},
else => switch (reg_number) {
0...30 => mem.asBytes(&ucontext_ptr.mcontext.regs[reg_number]),
31 => mem.asBytes(&ucontext_ptr.mcontext.sp),
32 => mem.asBytes(&ucontext_ptr.mcontext.pc),
else => error.InvalidRegister,
},
},
else => error.UnimplementedArch,
};
}
/// Returns a pointer to a register stored in a ThreadContext, preserving the
/// pointer attributes of the context.
pub fn regValueNative(
thread_context_ptr: *std.debug.ThreadContext,
reg_number: u8,
reg_context: ?RegisterContext,
) !*align(1) usize {
const reg_bytes = try regBytes(thread_context_ptr, reg_number, reg_context);
if (@sizeOf(usize) != reg_bytes.len) return error.IncompatibleRegisterSize;
return @ptrCast(reg_bytes);
}

View File

@ -5,12 +5,17 @@ const native_endian = native_arch.endian();
const std = @import("std");
const leb = std.leb;
const OP = std.dwarf.OP;
const abi = std.debug.Dwarf.abi;
const mem = std.mem;
const assert = std.debug.assert;
const testing = std.testing;
const Writer = std.Io.Writer;
const regNative = std.debug.SelfInfo.DwarfUnwindContext.regNative;
const ip_reg_num = std.debug.Dwarf.ipRegNum(native_arch).?;
const fp_reg_num = std.debug.Dwarf.fpRegNum(native_arch);
const sp_reg_num = std.debug.Dwarf.spRegNum(native_arch);
/// Expressions can be evaluated in different contexts, each requiring its own set of inputs.
/// Callers should specify all the fields relevant to their context. If a field is required
/// by the expression and it isn't in the context, error.IncompleteExpressionContext is returned.
@ -23,9 +28,7 @@ pub const Context = struct {
object_address: ?*const anyopaque = null,
/// .debug_addr section
debug_addr: ?[]const u8 = null,
/// Thread context
thread_context: ?*std.debug.ThreadContext = null,
reg_context: ?abi.RegisterContext = null,
cpu_context: ?*std.debug.cpu_context.Native = null,
/// Call frame address, if in a CFI context
cfa: ?usize = null,
/// This expression is a sub-expression from an OP.entry_value instruction
@ -62,7 +65,9 @@ pub const Error = error{
InvalidTypeLength,
TruncatedIntegralType,
} || abi.RegBytesError || error{ EndOfStream, Overflow, OutOfMemory, DivisionByZero, ReadFailed };
IncompatibleRegisterSize,
} || std.debug.cpu_context.DwarfRegisterError || error{ EndOfStream, Overflow, OutOfMemory, DivisionByZero, ReadFailed };
/// A stack machine that can decode and run DWARF expressions.
/// Expressions can be decoded for non-native address size and endianness,
@ -369,29 +374,20 @@ pub fn StackMachine(comptime options: Options) type {
OP.breg0...OP.breg31,
OP.bregx,
=> {
if (context.thread_context == null) return error.IncompleteExpressionContext;
const cpu_context = context.cpu_context orelse return error.IncompleteExpressionContext;
const base_register = operand.?.base_register;
var value: i64 = @intCast(mem.readInt(usize, (try abi.regBytes(
context.thread_context.?,
base_register.base_register,
context.reg_context,
))[0..@sizeOf(usize)], native_endian));
value += base_register.offset;
try self.stack.append(allocator, .{ .generic = @intCast(value) });
const br = operand.?.base_register;
const value: i64 = @intCast((try regNative(cpu_context, br.base_register)).*);
try self.stack.append(allocator, .{ .generic = @intCast(value + br.offset) });
},
OP.regval_type => {
const register_type = operand.?.register_type;
const value = mem.readInt(usize, (try abi.regBytes(
context.thread_context.?,
register_type.register,
context.reg_context,
))[0..@sizeOf(usize)], native_endian);
const cpu_context = context.cpu_context orelse return error.IncompleteExpressionContext;
const rt = operand.?.register_type;
try self.stack.append(allocator, .{
.regval_type = .{
.type_offset = register_type.type_offset,
.type_offset = rt.type_offset,
.type_size = @sizeOf(addr_type),
.value = value,
.value = (try regNative(cpu_context, rt.register)).*,
},
});
},
@ -734,14 +730,14 @@ pub fn StackMachine(comptime options: Options) type {
// TODO: The spec states that this sub-expression needs to observe the state (ie. registers)
// as it was upon entering the current subprogram. If this isn't being called at the
// end of a frame unwind operation, an additional ThreadContext with this state will be needed.
// end of a frame unwind operation, an additional cpu_context.Native with this state will be needed.
if (isOpcodeRegisterLocation(block[0])) {
if (context.thread_context == null) return error.IncompleteExpressionContext;
const cpu_context = context.cpu_context orelse return error.IncompleteExpressionContext;
var block_stream: std.Io.Reader = .fixed(block);
const register = (try readOperand(&block_stream, block[0], context)).?.register;
const value = mem.readInt(usize, (try abi.regBytes(context.thread_context.?, register, context.reg_context))[0..@sizeOf(usize)], native_endian);
const value = (try regNative(cpu_context, register)).*;
try self.stack.append(allocator, .{ .generic = value });
} else {
var stack_machine: Self = .{};
@ -1149,34 +1145,27 @@ test "basics" {
}
// Register values
if (@sizeOf(std.debug.ThreadContext) != 0) {
if (std.debug.cpu_context.Native != noreturn) {
stack_machine.reset();
program.clearRetainingCapacity();
const reg_context = abi.RegisterContext{
.eh_frame = true,
.is_macho = builtin.os.tag == .macos,
};
var thread_context: std.debug.ThreadContext = undefined;
std.debug.relocateContext(&thread_context);
var cpu_context: std.debug.cpu_context.Native = undefined;
const context = Context{
.thread_context = &thread_context,
.reg_context = reg_context,
.cpu_context = &cpu_context,
};
// Only test register operations on arch / os that have them implemented
if (abi.regBytes(&thread_context, 0, reg_context)) |reg_bytes| {
const reg_bytes = try cpu_context.dwarfRegisterBytes(0);
// TODO: Test fbreg (once implemented): mock a DIE and point compile_unit.frame_base at it
mem.writeInt(usize, reg_bytes[0..@sizeOf(usize)], 0xee, native_endian);
(try abi.regValueNative(&thread_context, abi.fpRegNum(native_arch, reg_context), reg_context)).* = 1;
(try abi.regValueNative(&thread_context, abi.spRegNum(native_arch, reg_context), reg_context)).* = 2;
(try abi.regValueNative(&thread_context, abi.ipRegNum(native_arch).?, reg_context)).* = 3;
(try regNative(&cpu_context, fp_reg_num)).* = 1;
(try regNative(&cpu_context, sp_reg_num)).* = 2;
(try regNative(&cpu_context, ip_reg_num)).* = 3;
try b.writeBreg(writer, abi.fpRegNum(native_arch, reg_context), @as(usize, 100));
try b.writeBreg(writer, abi.spRegNum(native_arch, reg_context), @as(usize, 200));
try b.writeBregx(writer, abi.ipRegNum(native_arch).?, @as(usize, 300));
try b.writeBreg(writer, fp_reg_num, @as(usize, 100));
try b.writeBreg(writer, sp_reg_num, @as(usize, 200));
try b.writeBregx(writer, ip_reg_num, @as(usize, 300));
try b.writeRegvalType(writer, @as(u8, 0), @as(usize, 400));
_ = try stack_machine.run(program.written(), allocator, context, 0);
@ -1189,15 +1178,6 @@ test "basics" {
try testing.expectEqual(@as(usize, 303), stack_machine.stack.pop().?.generic);
try testing.expectEqual(@as(usize, 202), stack_machine.stack.pop().?.generic);
try testing.expectEqual(@as(usize, 101), stack_machine.stack.pop().?.generic);
} else |err| {
switch (err) {
error.UnimplementedArch,
error.UnimplementedOs,
error.ThreadContextNotSupported,
=> {},
else => return err,
}
}
}
// Stack operations
@ -1585,18 +1565,13 @@ test "basics" {
}
// Register location description
const reg_context = abi.RegisterContext{
.eh_frame = true,
.is_macho = builtin.os.tag == .macos,
};
var thread_context: std.debug.ThreadContext = undefined;
std.debug.relocateContext(&thread_context);
var cpu_context: std.debug.cpu_context.Native = undefined;
std.debug.relocateContext(&cpu_context);
context = Context{
.thread_context = &thread_context,
.reg_context = reg_context,
.cpu_context = &cpu_context,
};
if (abi.regBytes(&thread_context, 0, reg_context)) |reg_bytes| {
const reg_bytes = try cpu_context.dwarfRegisterBytes(0);
mem.writeInt(usize, reg_bytes[0..@sizeOf(usize)], 0xee, native_endian);
var sub_program: std.Io.Writer.Allocating = .init(allocator);
@ -1609,14 +1584,5 @@ test "basics" {
try b.writeEntryValue(writer, sub_program.written());
_ = try stack_machine.run(program.written(), allocator, context, null);
try testing.expectEqual(@as(usize, 0xee), stack_machine.stack.pop().?.generic);
} else |err| {
switch (err) {
error.UnimplementedArch,
error.UnimplementedOs,
error.ThreadContextNotSupported,
=> {},
else => return err,
}
}
}
}

View File

@ -11,8 +11,7 @@ const mem = std.mem;
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const Dwarf = std.debug.Dwarf;
const regBytes = Dwarf.abi.regBytes;
const regValueNative = Dwarf.abi.regValueNative;
const CpuContext = std.debug.cpu_context.Native;
const root = @import("root");
@ -38,8 +37,6 @@ pub const Error = error{
pub const target_supported: bool = Module != void;
/// Indicates whether the `SelfInfo` implementation has support for unwinding on this target.
///
/// For whether DWARF unwinding is *theoretically* possible, see `Dwarf.abi.supportsUnwinding`.
pub const supports_unwinding: bool = target_supported and Module.supports_unwinding;
pub const UnwindContext = if (supports_unwinding) Module.UnwindContext;
@ -120,7 +117,7 @@ pub fn getModuleNameForAddress(self: *SelfInfo, gpa: Allocator, address: usize)
/// pub const UnwindContext = struct {
/// /// A PC value inside the function of the last unwound frame.
/// pc: usize,
/// pub fn init(tc: *std.debug.ThreadContext, gpa: Allocator) Allocator.Error!UnwindContext;
/// pub fn init(ctx: *std.debug.cpu_context.Native, gpa: Allocator) Allocator.Error!UnwindContext;
/// pub fn deinit(uc: *UnwindContext, gpa: Allocator) void;
/// /// Returns the frame pointer associated with the last unwound stack frame. If the frame
/// /// pointer is unknown, 0 may be returned instead.
@ -141,9 +138,26 @@ const Module: type = Module: {
break :Module root.debug.Module;
}
break :Module switch (native_os) {
.linux, .netbsd, .freebsd, .dragonfly, .openbsd, .haiku, .solaris, .illumos => @import("SelfInfo/ElfModule.zig"),
.macos, .ios, .watchos, .tvos, .visionos => @import("SelfInfo/DarwinModule.zig"),
.uefi, .windows => @import("SelfInfo/WindowsModule.zig"),
.linux,
.netbsd,
.freebsd,
.dragonfly,
.openbsd,
.solaris,
.illumos,
=> @import("SelfInfo/ElfModule.zig"),
.macos,
.ios,
.watchos,
.tvos,
.visionos,
=> @import("SelfInfo/DarwinModule.zig"),
.uefi,
.windows,
=> @import("SelfInfo/WindowsModule.zig"),
else => void,
};
};
@ -153,26 +167,25 @@ const Module: type = Module: {
pub const DwarfUnwindContext = struct {
cfa: ?usize,
pc: usize,
thread_context: *std.debug.ThreadContext,
reg_context: Dwarf.abi.RegisterContext,
cpu_context: CpuContext,
vm: Dwarf.Unwind.VirtualMachine,
stack_machine: Dwarf.expression.StackMachine(.{ .call_frame_context = true }),
pub fn init(thread_context: *std.debug.ThreadContext, gpa: Allocator) error{}!DwarfUnwindContext {
pub fn init(cpu_context: *const CpuContext) DwarfUnwindContext {
comptime assert(supports_unwinding);
_ = gpa;
const ip_reg_num = Dwarf.abi.ipRegNum(native_arch).?;
const raw_pc_ptr = regValueNative(thread_context, ip_reg_num, null) catch {
unreachable; // error means unsupported, in which case `supports_unwinding` should have been `false`
// `@constCast` is safe because we aren't going to store to the resulting pointer.
const raw_pc_ptr = regNative(@constCast(cpu_context), ip_reg_num) catch |err| switch (err) {
error.InvalidRegister => unreachable, // `ip_reg_num` is definitely valid
error.UnsupportedRegister => unreachable, // the implementation needs to support ip
error.IncompatibleRegisterSize => unreachable, // ip is definitely `usize`-sized
};
const pc = stripInstructionPtrAuthCode(raw_pc_ptr.*);
return .{
.cfa = null,
.pc = pc,
.thread_context = thread_context,
.reg_context = undefined,
.cpu_context = cpu_context.*,
.vm = .{},
.stack_machine = .{},
};
@ -185,17 +198,25 @@ pub const DwarfUnwindContext = struct {
}
pub fn getFp(self: *const DwarfUnwindContext) usize {
return (regValueNative(self.thread_context, Dwarf.abi.fpRegNum(native_arch, self.reg_context), self.reg_context) catch return 0).*;
// `@constCast` is safe because we aren't going to store to the resulting pointer.
const ptr = regNative(@constCast(&self.cpu_context), fp_reg_num) catch |err| switch (err) {
error.InvalidRegister => unreachable, // `fp_reg_num` is definitely valid
error.UnsupportedRegister => unreachable, // the implementation needs to support fp
error.IncompatibleRegisterSize => unreachable, // fp is a pointer so is `usize`-sized
};
return ptr.*;
}
/// Resolves the register rule and places the result into `out` (see regBytes)
/// Resolves the register rule and places the result into `out` (see regBytes). Returns `true`
/// iff the rule was undefined. This is *not* the same as `col.rule == .undefined`, because the
/// default rule may be undefined.
pub fn resolveRegisterRule(
context: *DwarfUnwindContext,
gpa: Allocator,
col: Dwarf.Unwind.VirtualMachine.Column,
expression_context: std.debug.Dwarf.expression.Context,
out: []u8,
) !void {
) !bool {
switch (col.rule) {
.default => {
const register = col.register orelse return error.InvalidRegister;
@ -203,58 +224,74 @@ pub const DwarfUnwindContext = struct {
// See the doc comment on `Dwarf.Unwind.VirtualMachine.RegisterRule.default`.
if (builtin.cpu.arch.isAARCH64() and register >= 19 and register <= 18) {
// Callee-saved registers are initialized as if they had the .same_value rule
const src = try regBytes(context.thread_context, register, context.reg_context);
const src = try context.cpu_context.dwarfRegisterBytes(register);
if (src.len != out.len) return error.RegisterSizeMismatch;
@memcpy(out, src);
return;
return false;
}
@memset(out, undefined);
return true;
},
.undefined => {
@memset(out, undefined);
return true;
},
.same_value => {
// TODO: This copy could be eliminated if callers always copy the state then call this function to update it
const register = col.register orelse return error.InvalidRegister;
const src = try regBytes(context.thread_context, register, context.reg_context);
const src = try context.cpu_context.dwarfRegisterBytes(register);
if (src.len != out.len) return error.RegisterSizeMismatch;
@memcpy(out, src);
return false;
},
.offset => |offset| {
if (context.cfa) |cfa| {
const cfa = context.cfa orelse return error.InvalidCFA;
const addr = try applyOffset(cfa, offset);
const ptr: *const usize = @ptrFromInt(addr);
mem.writeInt(usize, out[0..@sizeOf(usize)], ptr.*, native_endian);
} else return error.InvalidCFA;
return false;
},
.val_offset => |offset| {
if (context.cfa) |cfa| {
const cfa = context.cfa orelse return error.InvalidCFA;
mem.writeInt(usize, out[0..@sizeOf(usize)], try applyOffset(cfa, offset), native_endian);
} else return error.InvalidCFA;
return false;
},
.register => |register| {
const src = try regBytes(context.thread_context, register, context.reg_context);
const src = try context.cpu_context.dwarfRegisterBytes(register);
if (src.len != out.len) return error.RegisterSizeMismatch;
@memcpy(out, src);
return false;
},
.expression => |expression| {
context.stack_machine.reset();
const value = try context.stack_machine.run(expression, gpa, expression_context, context.cfa.?);
const addr = if (value) |v| blk: {
if (v != .generic) return error.InvalidExpressionValue;
break :blk v.generic;
} else return error.NoExpressionValue;
const value = try context.stack_machine.run(
expression,
gpa,
expression_context,
context.cfa.?,
) orelse return error.NoExpressionValue;
const addr = switch (value) {
.generic => |addr| addr,
else => return error.InvalidExpressionValue,
};
const ptr: *usize = @ptrFromInt(addr);
mem.writeInt(usize, out[0..@sizeOf(usize)], ptr.*, native_endian);
return false;
},
.val_expression => |expression| {
context.stack_machine.reset();
const value = try context.stack_machine.run(expression, gpa, expression_context, context.cfa.?);
if (value) |v| {
if (v != .generic) return error.InvalidExpressionValue;
mem.writeInt(usize, out[0..@sizeOf(usize)], v.generic, native_endian);
} else return error.NoExpressionValue;
const value = try context.stack_machine.run(
expression,
gpa,
expression_context,
context.cfa.?,
) orelse return error.NoExpressionValue;
const val_raw = switch (value) {
.generic => |raw| raw,
else => return error.InvalidExpressionValue,
};
mem.writeInt(usize, out[0..@sizeOf(usize)], val_raw, native_endian);
return false;
},
.architectural => return error.UnimplementedRegisterRule,
}
@ -277,9 +314,6 @@ pub const DwarfUnwindContext = struct {
return unwindFrameInner(context, gpa, unwind, load_offset, explicit_fde_offset) catch |err| switch (err) {
error.InvalidDebugInfo, error.MissingDebugInfo, error.OutOfMemory => |e| return e,
error.UnimplementedArch,
error.UnimplementedOs,
error.ThreadContextNotSupported,
error.UnimplementedRegisterRule,
error.UnsupportedAddrSize,
error.UnsupportedDwarfVersion,
@ -289,10 +323,10 @@ pub const DwarfUnwindContext = struct {
error.UnimplementedTypedComparison,
error.UnimplementedTypeConversion,
error.UnknownExpressionOpcode,
error.UnsupportedRegister,
=> return error.UnsupportedDebugInfo,
error.InvalidRegister,
error.RegisterContextRequired,
error.ReadFailed,
error.EndOfStream,
error.IncompatibleRegisterSize,
@ -346,20 +380,17 @@ pub const DwarfUnwindContext = struct {
// may not reference other debug sections anyway.
var expression_context: Dwarf.expression.Context = .{
.format = format,
.thread_context = context.thread_context,
.reg_context = context.reg_context,
.cpu_context = &context.cpu_context,
.cfa = context.cfa,
};
context.vm.reset();
context.reg_context.eh_frame = cie.version != 4;
context.reg_context.is_macho = native_os.isDarwin();
const row = try context.vm.runTo(gpa, pc_vaddr, cie, fde, @sizeOf(usize), native_endian);
context.cfa = switch (row.cfa.rule) {
.val_offset => |offset| blk: {
const register = row.cfa.register orelse return error.InvalidCFARule;
const value = (try regValueNative(context.thread_context, register, context.reg_context)).*;
const value = (try regNative(&context.cpu_context, register)).*;
break :blk try applyOffset(value, offset);
},
.expression => |expr| blk: {
@ -381,73 +412,41 @@ pub const DwarfUnwindContext = struct {
expression_context.cfa = context.cfa;
// Buffering the modifications is done because copying the thread context is not portable,
// some implementations (ie. darwin) use internal pointers to the mcontext.
var arena: std.heap.ArenaAllocator = .init(gpa);
defer arena.deinit();
const update_arena = arena.allocator();
const RegisterUpdate = struct {
// Backed by thread_context
dest: []u8,
// Backed by arena
src: []const u8,
prev: ?*@This(),
};
var update_tail: ?*RegisterUpdate = null;
var has_return_address = true;
for (context.vm.rowColumns(row)) |column| {
if (column.register) |register| {
if (register == cie.return_address_register) {
has_return_address = column.rule != .undefined;
}
const dest = try regBytes(context.thread_context, register, context.reg_context);
const src = try update_arena.alloc(u8, dest.len);
try context.resolveRegisterRule(gpa, column, expression_context, src);
const new_update = try update_arena.create(RegisterUpdate);
new_update.* = .{
.dest = dest,
.src = src,
.prev = update_tail,
};
update_tail = new_update;
}
}
// Create a copy of the CPU context, to which we will apply the new rules.
var new_cpu_context = context.cpu_context;
// On all implemented architectures, the CFA is defined as being the previous frame's SP
(try regValueNative(context.thread_context, Dwarf.abi.spRegNum(native_arch, context.reg_context), context.reg_context)).* = context.cfa.?;
(try regNative(&new_cpu_context, sp_reg_num)).* = context.cfa.?;
while (update_tail) |tail| {
@memcpy(tail.dest, tail.src);
update_tail = tail.prev;
for (context.vm.rowColumns(row)) |column| {
if (column.register) |register| {
const dest = try new_cpu_context.dwarfRegisterBytes(register);
const rule_undef = try context.resolveRegisterRule(gpa, column, expression_context, dest);
if (register == cie.return_address_register) {
has_return_address = !rule_undef;
}
}
}
if (has_return_address) {
context.pc = stripInstructionPtrAuthCode((try regValueNative(
context.thread_context,
cie.return_address_register,
context.reg_context,
)).*);
} else {
context.pc = 0;
}
const return_address: u64 = if (has_return_address) pc: {
const raw_ptr = try regNative(&new_cpu_context, cie.return_address_register);
break :pc stripInstructionPtrAuthCode(raw_ptr.*);
} else 0;
const ip_reg_num = Dwarf.abi.ipRegNum(native_arch).?;
(try regValueNative(context.thread_context, ip_reg_num, context.reg_context)).* = context.pc;
(try regNative(new_cpu_context, ip_reg_num)).* = return_address;
// The call instruction will have pushed the address of the instruction that follows the call as the return address.
// This next instruction may be past the end of the function if the caller was `noreturn` (ie. the last instruction in
// the function was the call). If we were to look up an FDE entry using the return address directly, it could end up
// either not finding an FDE at all, or using the next FDE in the program, producing incorrect results. To prevent this,
// we subtract one so that the next lookup is guaranteed to land inside the
//
// The exception to this rule is signal frames, where we return execution would be returned to the instruction
// that triggered the handler.
const return_address = context.pc;
if (context.pc > 0 and !cie.is_signal_frame) context.pc -= 1;
// The new CPU context is complete; flush changes.
context.cpu_context = new_cpu_context;
// Also update the stored pc. However, because `return_address` points to the instruction
// *after* the call, it could (in the case of noreturn functions) actually point outside of
// the caller's address range, meaning an FDE lookup would fail. We can handle this by
// subtracting 1 from `return_address` so that the next lookup is guaranteed to land inside
// the `call` instruction`. The exception to this rule is signal frames, where the return
// address is the same instruction that triggered the handler.
context.pc = if (cie.is_signal_frame) return_address else return_address -| 1;
return return_address;
}
@ -479,4 +478,18 @@ pub const DwarfUnwindContext = struct {
return ptr;
}
pub fn regNative(ctx: *CpuContext, num: u16) error{
InvalidRegister,
UnsupportedRegister,
IncompatibleRegisterSize,
}!*align(1) usize {
const bytes = try ctx.dwarfRegisterBytes(num);
if (bytes.len != @sizeOf(usize)) return error.IncompatibleRegisterSize;
return @ptrCast(bytes);
}
const ip_reg_num = Dwarf.ipRegNum(native_arch).?;
const fp_reg_num = Dwarf.fpRegNum(native_arch);
const sp_reg_num = Dwarf.spRegNum(native_arch);
};

View File

@ -265,12 +265,9 @@ pub fn unwindFrame(module: *const DarwinModule, gpa: Allocator, di: *DebugInfo,
error.OutOfMemory,
error.Unexpected,
=> |e| return e,
error.UnimplementedArch,
error.UnimplementedOs,
error.ThreadContextNotSupported,
error.UnsupportedRegister,
=> return error.UnsupportedDebugInfo,
error.InvalidRegister,
error.RegisterContextRequired,
error.IncompatibleRegisterSize,
=> return error.InvalidDebugInfo,
};
@ -396,7 +393,6 @@ fn unwindFrameInner(module: *const DarwinModule, gpa: Allocator, di: *DebugInfo,
};
if (entry.raw_encoding == 0) return error.MissingDebugInfo;
const reg_context: Dwarf.abi.RegisterContext = .{ .eh_frame = false, .is_macho = true };
const encoding: macho.CompactUnwindEncoding = @bitCast(entry.raw_encoding);
const new_ip = switch (builtin.cpu.arch) {
@ -405,16 +401,16 @@ fn unwindFrameInner(module: *const DarwinModule, gpa: Allocator, di: *DebugInfo,
.RBP_FRAME => ip: {
const frame = encoding.value.x86_64.frame;
const fp = (try regValueNative(context.thread_context, fpRegNum(reg_context), reg_context)).*;
const fp = (try dwarfRegNative(&context.cpu_context, fp_reg_num)).*;
const new_sp = fp + 2 * @sizeOf(usize);
const ip_ptr = fp + @sizeOf(usize);
const new_ip = @as(*const usize, @ptrFromInt(ip_ptr)).*;
const new_fp = @as(*const usize, @ptrFromInt(fp)).*;
(try regValueNative(context.thread_context, fpRegNum(reg_context), reg_context)).* = new_fp;
(try regValueNative(context.thread_context, spRegNum(reg_context), reg_context)).* = new_sp;
(try regValueNative(context.thread_context, ip_reg_num, reg_context)).* = new_ip;
(try dwarfRegNative(&context.cpu_context, fp_reg_num)).* = new_fp;
(try dwarfRegNative(&context.cpu_context, sp_reg_num)).* = new_sp;
(try dwarfRegNative(&context.cpu_context, ip_reg_num)).* = new_ip;
const regs: [5]u3 = .{
frame.reg0,
@ -427,7 +423,7 @@ fn unwindFrameInner(module: *const DarwinModule, gpa: Allocator, di: *DebugInfo,
if (reg == 0) continue;
const addr = fp - frame.frame_offset * @sizeOf(usize) + i * @sizeOf(usize);
const reg_number = try Dwarf.compactUnwindToDwarfRegNumber(reg);
(try regValueNative(context.thread_context, reg_number, reg_context)).* = @as(*const usize, @ptrFromInt(addr)).*;
(try dwarfRegNative(&context.cpu_context, reg_number)).* = @as(*const usize, @ptrFromInt(addr)).*;
}
break :ip new_ip;
@ -437,7 +433,7 @@ fn unwindFrameInner(module: *const DarwinModule, gpa: Allocator, di: *DebugInfo,
=> ip: {
const frameless = encoding.value.x86_64.frameless;
const sp = (try regValueNative(context.thread_context, spRegNum(reg_context), reg_context)).*;
const sp = (try dwarfRegNative(&context.cpu_context, sp_reg_num)).*;
const stack_size: usize = stack_size: {
if (encoding.mode.x86_64 == .STACK_IMMD) {
break :stack_size @as(usize, frameless.stack.direct.stack_size) * @sizeOf(usize);
@ -487,7 +483,7 @@ fn unwindFrameInner(module: *const DarwinModule, gpa: Allocator, di: *DebugInfo,
var reg_addr = sp + stack_size - @sizeOf(usize) * @as(usize, reg_count + 1);
for (0..reg_count) |i| {
const reg_number = try Dwarf.compactUnwindToDwarfRegNumber(registers[i]);
(try regValueNative(context.thread_context, reg_number, reg_context)).* = @as(*const usize, @ptrFromInt(reg_addr)).*;
(try dwarfRegNative(&context.cpu_context, reg_number)).* = @as(*const usize, @ptrFromInt(reg_addr)).*;
reg_addr += @sizeOf(usize);
}
@ -497,8 +493,8 @@ fn unwindFrameInner(module: *const DarwinModule, gpa: Allocator, di: *DebugInfo,
const new_ip = @as(*const usize, @ptrFromInt(ip_ptr)).*;
const new_sp = ip_ptr + @sizeOf(usize);
(try regValueNative(context.thread_context, spRegNum(reg_context), reg_context)).* = new_sp;
(try regValueNative(context.thread_context, ip_reg_num, reg_context)).* = new_ip;
(try dwarfRegNative(&context.cpu_context, sp_reg_num)).* = new_sp;
(try dwarfRegNative(&context.cpu_context, ip_reg_num)).* = new_ip;
break :ip new_ip;
},
@ -516,10 +512,10 @@ fn unwindFrameInner(module: *const DarwinModule, gpa: Allocator, di: *DebugInfo,
.aarch64, .aarch64_be => switch (encoding.mode.arm64) {
.OLD => return error.UnsupportedDebugInfo,
.FRAMELESS => ip: {
const sp = (try regValueNative(context.thread_context, spRegNum(reg_context), reg_context)).*;
const sp = (try dwarfRegNative(&context.cpu_context, sp_reg_num)).*;
const new_sp = sp + encoding.value.arm64.frameless.stack_size * 16;
const new_ip = (try regValueNative(context.thread_context, 30, reg_context)).*;
(try regValueNative(context.thread_context, spRegNum(reg_context), reg_context)).* = new_sp;
const new_ip = (try dwarfRegNative(&context.cpu_context, 30)).*;
(try dwarfRegNative(&context.cpu_context, sp_reg_num)).* = new_sp;
break :ip new_ip;
},
.DWARF => {
@ -535,15 +531,15 @@ fn unwindFrameInner(module: *const DarwinModule, gpa: Allocator, di: *DebugInfo,
.FRAME => ip: {
const frame = encoding.value.arm64.frame;
const fp = (try regValueNative(context.thread_context, fpRegNum(reg_context), reg_context)).*;
const fp = (try dwarfRegNative(&context.cpu_context, fp_reg_num)).*;
const ip_ptr = fp + @sizeOf(usize);
var reg_addr = fp - @sizeOf(usize);
inline for (@typeInfo(@TypeOf(frame.x_reg_pairs)).@"struct".fields, 0..) |field, i| {
if (@field(frame.x_reg_pairs, field.name) != 0) {
(try regValueNative(context.thread_context, 19 + i, reg_context)).* = @as(*const usize, @ptrFromInt(reg_addr)).*;
(try dwarfRegNative(&context.cpu_context, 19 + i)).* = @as(*const usize, @ptrFromInt(reg_addr)).*;
reg_addr += @sizeOf(usize);
(try regValueNative(context.thread_context, 20 + i, reg_context)).* = @as(*const usize, @ptrFromInt(reg_addr)).*;
(try dwarfRegNative(&context.cpu_context, 20 + i)).* = @as(*const usize, @ptrFromInt(reg_addr)).*;
reg_addr += @sizeOf(usize);
}
}
@ -552,12 +548,12 @@ fn unwindFrameInner(module: *const DarwinModule, gpa: Allocator, di: *DebugInfo,
if (@field(frame.d_reg_pairs, field.name) != 0) {
// Only the lower half of the 128-bit V registers are restored during unwinding
{
const dest: *align(1) usize = @ptrCast(try regBytes(context.thread_context, 64 + 8 + i, context.reg_context));
const dest: *align(1) usize = @ptrCast(try context.cpu_context.dwarfRegisterBytes(64 + 8 + i));
dest.* = @as(*const usize, @ptrFromInt(reg_addr)).*;
}
reg_addr += @sizeOf(usize);
{
const dest: *align(1) usize = @ptrCast(try regBytes(context.thread_context, 64 + 9 + i, context.reg_context));
const dest: *align(1) usize = @ptrCast(try context.cpu_context.dwarfRegisterBytes(64 + 9 + i));
dest.* = @as(*const usize, @ptrFromInt(reg_addr)).*;
}
reg_addr += @sizeOf(usize);
@ -567,8 +563,8 @@ fn unwindFrameInner(module: *const DarwinModule, gpa: Allocator, di: *DebugInfo,
const new_ip = @as(*const usize, @ptrFromInt(ip_ptr)).*;
const new_fp = @as(*const usize, @ptrFromInt(fp)).*;
(try regValueNative(context.thread_context, fpRegNum(reg_context), reg_context)).* = new_fp;
(try regValueNative(context.thread_context, ip_reg_num, reg_context)).* = new_ip;
(try dwarfRegNative(&context.cpu_context, fp_reg_num)).* = new_fp;
(try dwarfRegNative(&context.cpu_context, ip_reg_num)).* = new_ip;
break :ip new_ip;
},
@ -782,13 +778,9 @@ test {
_ = MachoSymbol;
}
fn fpRegNum(reg_context: Dwarf.abi.RegisterContext) u8 {
return Dwarf.abi.fpRegNum(builtin.target.cpu.arch, reg_context);
}
fn spRegNum(reg_context: Dwarf.abi.RegisterContext) u8 {
return Dwarf.abi.spRegNum(builtin.target.cpu.arch, reg_context);
}
const ip_reg_num = Dwarf.abi.ipRegNum(builtin.target.cpu.arch).?;
const ip_reg_num = Dwarf.ipRegNum(builtin.target.cpu.arch).?;
const fp_reg_num = Dwarf.fpRegNum(builtin.target.cpu.arch);
const sp_reg_num = Dwarf.spRegNum(builtin.target.cpu.arch);
/// Uses `mmap` to map the file at `path` into memory.
fn mapDebugInfoFile(path: []const u8) ![]align(std.heap.page_size_min) const u8 {
@ -821,8 +813,7 @@ const mem = std.mem;
const posix = std.posix;
const testing = std.testing;
const Error = std.debug.SelfInfo.Error;
const regBytes = Dwarf.abi.regBytes;
const regValueNative = Dwarf.abi.regValueNative;
const dwarfRegNative = std.debug.SelfInfo.DwarfUnwindContext.regNative;
const builtin = @import("builtin");
const native_endian = builtin.target.cpu.arch.endian();

View File

@ -26,7 +26,6 @@ pub fn key(m: ElfModule) usize {
pub fn lookup(cache: *LookupCache, gpa: Allocator, address: usize) Error!ElfModule {
_ = cache;
_ = gpa;
if (builtin.target.os.tag == .haiku) @panic("TODO implement lookup module for Haiku");
const DlIterContext = struct {
/// input
address: usize,
@ -261,7 +260,7 @@ pub const supports_unwinding: bool = s: {
};
comptime {
if (supports_unwinding) {
std.debug.assert(Dwarf.abi.supportsUnwinding(&builtin.target));
std.debug.assert(Dwarf.supportsUnwinding(&builtin.target));
}
}

View File

@ -295,11 +295,45 @@ pub const UnwindContext = struct {
pc: usize,
cur: windows.CONTEXT,
history_table: windows.UNWIND_HISTORY_TABLE,
pub fn init(ctx: *const windows.CONTEXT, gpa: Allocator) Allocator.Error!UnwindContext {
_ = gpa;
pub fn init(ctx: *const std.debug.cpu_context.Native) UnwindContext {
return .{
.pc = @returnAddress(),
.cur = ctx.*,
.cur = switch (builtin.cpu.arch) {
.x86_64 => std.mem.zeroInit(windows.CONTEXT, .{
.Rax = ctx.gprs.get(.rax),
.Rcx = ctx.gprs.get(.rcx),
.Rdx = ctx.gprs.get(.rdx),
.Rbx = ctx.gprs.get(.rbx),
.Rsp = ctx.gprs.get(.rsp),
.Rbp = ctx.gprs.get(.rbp),
.Rsi = ctx.gprs.get(.rsi),
.Rdi = ctx.gprs.get(.rdi),
.R8 = ctx.gprs.get(.r8),
.R9 = ctx.gprs.get(.r9),
.R10 = ctx.gprs.get(.r10),
.R11 = ctx.gprs.get(.r11),
.R12 = ctx.gprs.get(.r12),
.R13 = ctx.gprs.get(.r13),
.R14 = ctx.gprs.get(.r14),
.R15 = ctx.gprs.get(.r15),
.Rip = ctx.gprs.get(.rip),
}),
.aarch64, .aarch64_be => .{
.ContextFlags = 0,
.Cpsr = 0,
.DUMMYUNIONNAME = .{ .X = ctx.x },
.Sp = ctx.sp,
.Pc = ctx.pc,
.V = @splat(.{ .B = @splat(0) }),
.Fpcr = 0,
.Fpsr = 0,
.Bcr = @splat(0),
.Bvr = @splat(0),
.Wcr = @splat(0),
.Wvr = @splat(0),
},
else => comptime unreachable,
},
.history_table = std.mem.zeroes(windows.UNWIND_HISTORY_TABLE),
};
}

File diff suppressed because it is too large Load Diff

View File

@ -183,7 +183,7 @@ pub fn realloc(uncasted_memory: []u8, new_len: usize, may_move: bool) ?[*]u8 {
if (posix.MREMAP != void) {
// TODO: if the next_mmap_addr_hint is within the remapped range, update it
const new_memory = posix.mremap(memory.ptr, memory.len, new_len, .{ .MAYMOVE = may_move }, null) catch return null;
const new_memory = posix.mremap(memory.ptr, page_aligned_len, new_size_aligned, .{ .MAYMOVE = may_move }, null) catch return null;
return new_memory.ptr;
}

View File

@ -3,6 +3,7 @@ const fd_t = std.c.fd_t;
const off_t = std.c.off_t;
const unexpectedErrno = std.posix.unexpectedErrno;
const errno = std.posix.errno;
const builtin = @import("builtin");
pub const CopyFileRangeError = std.posix.UnexpectedError || error{
/// If infd is not open for reading or outfd is not open for writing, or
@ -47,3 +48,75 @@ pub fn copy_file_range(fd_in: fd_t, off_in: ?*i64, fd_out: fd_t, off_out: ?*i64,
else => |err| return unexpectedErrno(err),
}
}
pub const ucontext_t = extern struct {
sigmask: std.c.sigset_t,
mcontext: mcontext_t,
link: ?*ucontext_t,
stack: std.c.stack_t,
flags: c_int,
__spare__: [4]c_int,
const mcontext_t = switch (builtin.cpu.arch) {
.x86_64 => extern struct {
onstack: u64,
rdi: u64,
rsi: u64,
rdx: u64,
rcx: u64,
r8: u64,
r9: u64,
rax: u64,
rbx: u64,
rbp: u64,
r10: u64,
r11: u64,
r12: u64,
r13: u64,
r14: u64,
r15: u64,
trapno: u32,
fs: u16,
gs: u16,
addr: u64,
flags: u32,
es: u16,
ds: u16,
err: u64,
rip: u64,
cs: u64,
rflags: u64,
rsp: u64,
ss: u64,
len: u64,
fpformat: u64,
ownedfp: u64,
fpstate: [64]u64 align(16),
fsbase: u64,
gsbase: u64,
xfpustate: u64,
xfpustate_len: u64,
spare: [4]u64,
},
.aarch64 => extern struct {
gpregs: extern struct {
x: [30]u64,
lr: u64,
sp: u64,
elr: u64,
spsr: u32,
_pad: u32,
},
fpregs: extern struct {
q: [32]u128,
sr: u32,
cr: u32,
flags: u32,
_pad: u32,
},
flags: u32,
_pad: u32,
_spare: [8]u64,
},
else => void,
};
};

View File

@ -49,7 +49,6 @@ const arch_bits = switch (native_arch) {
.s390x => @import("linux/s390x.zig"),
else => struct {
pub const ucontext_t = void;
pub const getcontext = {};
},
};
@ -112,7 +111,6 @@ pub const timeval = arch_bits.timeval;
pub const timezone = arch_bits.timezone;
pub const ucontext_t = arch_bits.ucontext_t;
pub const user_desc = arch_bits.user_desc;
pub const getcontext = arch_bits.getcontext;
pub const tls = @import("linux/tls.zig");
pub const BPF = @import("linux/bpf.zig");

View File

@ -260,7 +260,4 @@ pub const ucontext_t = extern struct {
mcontext: mcontext_t,
};
/// TODO
pub const getcontext = {};
pub const Elf_Symndx = u32;

View File

@ -310,7 +310,4 @@ pub const ucontext_t = extern struct {
regspace: [64]u64,
};
/// TODO
pub const getcontext = {};
pub const Elf_Symndx = u32;

View File

@ -237,6 +237,3 @@ pub const VDSO = void;
/// TODO
pub const ucontext_t = void;
/// TODO
pub const getcontext = {};

View File

@ -250,6 +250,3 @@ pub const ucontext_t = extern struct {
};
pub const Elf_Symndx = u32;
/// TODO
pub const getcontext = {};

View File

@ -258,6 +258,3 @@ pub const VDSO = void;
/// TODO
pub const ucontext_t = void;
/// TODO
pub const getcontext = {};

View File

@ -349,6 +349,3 @@ pub const Elf_Symndx = u32;
/// TODO
pub const ucontext_t = void;
/// TODO
pub const getcontext = {};

View File

@ -328,6 +328,3 @@ pub const Elf_Symndx = u32;
/// TODO
pub const ucontext_t = void;
/// TODO
pub const getcontext = {};

View File

@ -381,6 +381,3 @@ pub const ucontext_t = extern struct {
};
pub const Elf_Symndx = u32;
/// TODO
pub const getcontext = {};

View File

@ -376,6 +376,3 @@ pub const ucontext_t = extern struct {
};
pub const Elf_Symndx = u32;
/// TODO
pub const getcontext = {};

View File

@ -255,6 +255,3 @@ pub const ucontext_t = extern struct {
sigmask: [1024 / @bitSizeOf(c_ulong)]c_ulong, // Currently a libc-compatible (1024-bit) sigmask
mcontext: mcontext_t,
};
/// TODO
pub const getcontext = {};

View File

@ -255,6 +255,3 @@ pub const ucontext_t = extern struct {
sigmask: [1024 / @bitSizeOf(c_ulong)]c_ulong, // Currently a libc-compatible (1024-bit) sigmask
mcontext: mcontext_t,
};
/// TODO
pub const getcontext = {};

View File

@ -273,6 +273,3 @@ pub const mcontext_t = extern struct {
__regs2: [18]u32,
__regs3: [16]f64,
};
/// TODO
pub const getcontext = {};

View File

@ -426,6 +426,3 @@ pub const ucontext_t = extern struct {
stack: stack_t,
sigset: [1024 / @bitSizeOf(c_ulong)]c_ulong, // Currently a libc-compatible (1024-bit) sigmask
};
/// TODO
pub const getcontext = {};

View File

@ -436,17 +436,3 @@ pub fn getContextInternal() callconv(.naked) usize {
[sigset_size] "i" (linux.NSIG / 8),
: .{ .cc = true, .memory = true, .eax = true, .ecx = true, .edx = true });
}
pub inline fn getcontext(context: *ucontext_t) usize {
// This method is used so that getContextInternal can control
// its prologue in order to read ESP from a constant offset.
// An aligned stack is not needed for getContextInternal.
var clobber_edx: usize = undefined;
return asm volatile (
\\ calll %[getContextInternal:P]
: [_] "={eax}" (-> usize),
[_] "={edx}" (clobber_edx),
: [_] "{edx}" (context),
[getContextInternal] "X" (&getContextInternal),
: .{ .cc = true, .memory = true, .ecx = true });
}

View File

@ -352,98 +352,3 @@ pub const ucontext_t = extern struct {
sigmask: [1024 / @bitSizeOf(c_ulong)]c_ulong, // Currently a glibc-compatible (1024-bit) sigmask.
fpregs_mem: [64]usize, // Not part of kernel ABI, only part of glibc ucontext_t
};
fn gpRegisterOffset(comptime reg_index: comptime_int) usize {
return @offsetOf(ucontext_t, "mcontext") + @offsetOf(mcontext_t, "gregs") + @sizeOf(usize) * reg_index;
}
fn getContextInternal() callconv(.naked) usize {
// TODO: Read GS/FS registers?
asm volatile (
\\ movq $0, %[flags_offset:c](%%rdi)
\\ movq $0, %[link_offset:c](%%rdi)
\\ movq %%r8, %[r8_offset:c](%%rdi)
\\ movq %%r9, %[r9_offset:c](%%rdi)
\\ movq %%r10, %[r10_offset:c](%%rdi)
\\ movq %%r11, %[r11_offset:c](%%rdi)
\\ movq %%r12, %[r12_offset:c](%%rdi)
\\ movq %%r13, %[r13_offset:c](%%rdi)
\\ movq %%r14, %[r14_offset:c](%%rdi)
\\ movq %%r15, %[r15_offset:c](%%rdi)
\\ movq %%rdi, %[rdi_offset:c](%%rdi)
\\ movq %%rsi, %[rsi_offset:c](%%rdi)
\\ movq %%rbp, %[rbp_offset:c](%%rdi)
\\ movq %%rbx, %[rbx_offset:c](%%rdi)
\\ movq %%rdx, %[rdx_offset:c](%%rdi)
\\ movq %%rax, %[rax_offset:c](%%rdi)
\\ movq %%rcx, %[rcx_offset:c](%%rdi)
\\ movq (%%rsp), %%rcx
\\ movq %%rcx, %[rip_offset:c](%%rdi)
\\ leaq 8(%%rsp), %%rcx
\\ movq %%rcx, %[rsp_offset:c](%%rdi)
\\ pushfq
\\ popq %[efl_offset:c](%%rdi)
\\ leaq %[fpmem_offset:c](%%rdi), %%rcx
\\ movq %%rcx, %[fpstate_offset:c](%%rdi)
\\ fnstenv (%%rcx)
\\ fldenv (%%rcx)
\\ stmxcsr %[mxcsr_offset:c](%%rdi)
\\ leaq %[stack_offset:c](%%rdi), %%rsi
\\ movq %%rdi, %%r8
\\ xorl %%edi, %%edi
\\ movl %[sigaltstack], %%eax
\\ syscall
\\ testq %%rax, %%rax
\\ jnz 0f
\\ movl %[sigprocmask], %%eax
\\ xorl %%esi, %%esi
\\ leaq %[sigmask_offset:c](%%r8), %%rdx
\\ movl %[sigset_size], %%r10d
\\ syscall
\\0:
\\ retq
:
: [flags_offset] "i" (@offsetOf(ucontext_t, "flags")),
[link_offset] "i" (@offsetOf(ucontext_t, "link")),
[r8_offset] "i" (comptime gpRegisterOffset(REG.R8)),
[r9_offset] "i" (comptime gpRegisterOffset(REG.R9)),
[r10_offset] "i" (comptime gpRegisterOffset(REG.R10)),
[r11_offset] "i" (comptime gpRegisterOffset(REG.R11)),
[r12_offset] "i" (comptime gpRegisterOffset(REG.R12)),
[r13_offset] "i" (comptime gpRegisterOffset(REG.R13)),
[r14_offset] "i" (comptime gpRegisterOffset(REG.R14)),
[r15_offset] "i" (comptime gpRegisterOffset(REG.R15)),
[rdi_offset] "i" (comptime gpRegisterOffset(REG.RDI)),
[rsi_offset] "i" (comptime gpRegisterOffset(REG.RSI)),
[rbp_offset] "i" (comptime gpRegisterOffset(REG.RBP)),
[rbx_offset] "i" (comptime gpRegisterOffset(REG.RBX)),
[rdx_offset] "i" (comptime gpRegisterOffset(REG.RDX)),
[rax_offset] "i" (comptime gpRegisterOffset(REG.RAX)),
[rcx_offset] "i" (comptime gpRegisterOffset(REG.RCX)),
[rsp_offset] "i" (comptime gpRegisterOffset(REG.RSP)),
[rip_offset] "i" (comptime gpRegisterOffset(REG.RIP)),
[efl_offset] "i" (comptime gpRegisterOffset(REG.EFL)),
[fpstate_offset] "i" (@offsetOf(ucontext_t, "mcontext") + @offsetOf(mcontext_t, "fpregs")),
[fpmem_offset] "i" (@offsetOf(ucontext_t, "fpregs_mem")),
[mxcsr_offset] "i" (@offsetOf(ucontext_t, "fpregs_mem") + @offsetOf(fpstate, "mxcsr")),
[sigaltstack] "i" (@intFromEnum(linux.SYS.sigaltstack)),
[stack_offset] "i" (@offsetOf(ucontext_t, "stack")),
[sigprocmask] "i" (@intFromEnum(linux.SYS.rt_sigprocmask)),
[sigmask_offset] "i" (@offsetOf(ucontext_t, "sigmask")),
[sigset_size] "i" (@sizeOf(sigset_t)),
: .{ .cc = true, .memory = true, .rax = true, .rcx = true, .rdx = true, .rdi = true, .rsi = true, .r8 = true, .r10 = true, .r11 = true });
}
pub inline fn getcontext(context: *ucontext_t) usize {
// This method is used so that getContextInternal can control
// its prologue in order to read RSP from a constant offset
// An aligned stack is not needed for getContextInternal.
var clobber_rdi: usize = undefined;
return asm volatile (
\\ callq %[getContextInternal:P]
: [_] "={rax}" (-> usize),
[_] "={rdi}" (clobber_rdi),
: [_] "{rdi}" (context),
[getContextInternal] "X" (&getContextInternal),
: .{ .cc = true, .memory = true, .rcx = true, .rdx = true, .rsi = true, .r8 = true, .r10 = true, .r11 = true });
}

View File

@ -47,8 +47,6 @@ else switch (native_os) {
.linux => linux,
.plan9 => std.os.plan9,
else => struct {
pub const getcontext = {};
pub const ucontext_t = void;
pub const pid_t = void;
pub const pollfd = void;
pub const fd_t = void;
@ -142,7 +140,6 @@ pub const in_pktinfo = system.in_pktinfo;
pub const in6_pktinfo = system.in6_pktinfo;
pub const ino_t = system.ino_t;
pub const linger = system.linger;
pub const mcontext_t = system.mcontext_t;
pub const mode_t = system.mode_t;
pub const msghdr = system.msghdr;
pub const msghdr_const = system.msghdr_const;
@ -171,7 +168,6 @@ pub const timespec = system.timespec;
pub const timestamp_t = system.timestamp_t;
pub const timeval = system.timeval;
pub const timezone = system.timezone;
pub const ucontext_t = system.ucontext_t;
pub const uid_t = system.uid_t;
pub const user_desc = system.user_desc;
pub const utsname = system.utsname;