mirror of
https://github.com/ziglang/zig.git
synced 2025-12-16 11:13:08 +00:00
std.debug: reorg and clarify API goals
After this commit: `std.debug.SelfInfo` is a cross-platform abstraction for the current executable's own debug information, with a goal of minimal code bloat and compilation speed penalty. `std.debug.Dwarf` does not assume the current executable is itself the thing being debugged, however, it does assume the debug info has the same CPU architecture and OS as the current executable. It is planned to remove this limitation.
This commit is contained in:
parent
290966c249
commit
48d584e3a3
@ -13,6 +13,7 @@ const native_arch = builtin.cpu.arch;
|
|||||||
const native_os = builtin.os.tag;
|
const native_os = builtin.os.tag;
|
||||||
const native_endian = native_arch.endian();
|
const native_endian = native_arch.endian();
|
||||||
|
|
||||||
|
pub const MemoryAccessor = @import("debug/MemoryAccessor.zig");
|
||||||
pub const Dwarf = @import("debug/Dwarf.zig");
|
pub const Dwarf = @import("debug/Dwarf.zig");
|
||||||
pub const Pdb = @import("debug/Pdb.zig");
|
pub const Pdb = @import("debug/Pdb.zig");
|
||||||
pub const SelfInfo = @import("debug/SelfInfo.zig");
|
pub const SelfInfo = @import("debug/SelfInfo.zig");
|
||||||
@ -243,7 +244,7 @@ pub inline fn getContext(context: *ThreadContext) bool {
|
|||||||
/// Tries to print the stack trace starting from the supplied base pointer to stderr,
|
/// Tries to print the stack trace starting from the supplied base pointer to stderr,
|
||||||
/// unbuffered, and ignores any error returned.
|
/// unbuffered, and ignores any error returned.
|
||||||
/// TODO multithreaded awareness
|
/// TODO multithreaded awareness
|
||||||
pub fn dumpStackTraceFromBase(context: *const ThreadContext) void {
|
pub fn dumpStackTraceFromBase(context: *ThreadContext) void {
|
||||||
nosuspend {
|
nosuspend {
|
||||||
if (comptime builtin.target.isWasm()) {
|
if (comptime builtin.target.isWasm()) {
|
||||||
if (native_os == .wasi) {
|
if (native_os == .wasi) {
|
||||||
@ -545,7 +546,7 @@ pub const StackIterator = struct {
|
|||||||
// using DWARF and MachO unwind info.
|
// using DWARF and MachO unwind info.
|
||||||
unwind_state: if (have_ucontext) ?struct {
|
unwind_state: if (have_ucontext) ?struct {
|
||||||
debug_info: *SelfInfo,
|
debug_info: *SelfInfo,
|
||||||
dwarf_context: Dwarf.UnwindContext,
|
dwarf_context: SelfInfo.UnwindContext,
|
||||||
last_error: ?UnwindError = null,
|
last_error: ?UnwindError = null,
|
||||||
failed: bool = false,
|
failed: bool = false,
|
||||||
} else void = if (have_ucontext) null else {},
|
} else void = if (have_ucontext) null else {},
|
||||||
@ -569,16 +570,16 @@ pub const StackIterator = struct {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn initWithContext(first_address: ?usize, debug_info: *SelfInfo, context: *const posix.ucontext_t) !StackIterator {
|
pub fn initWithContext(first_address: ?usize, debug_info: *SelfInfo, context: *posix.ucontext_t) !StackIterator {
|
||||||
// The implementation of DWARF unwinding on aarch64-macos is not complete. However, Apple mandates that
|
// The implementation of DWARF unwinding on aarch64-macos is not complete. However, Apple mandates that
|
||||||
// the frame pointer register is always used, so on this platform we can safely use the FP-based unwinder.
|
// the frame pointer register is always used, so on this platform we can safely use the FP-based unwinder.
|
||||||
if (comptime builtin.target.isDarwin() and native_arch == .aarch64) {
|
if (builtin.target.isDarwin() and native_arch == .aarch64) {
|
||||||
return init(first_address, context.mcontext.ss.fp);
|
return init(first_address, context.mcontext.ss.fp);
|
||||||
} else {
|
} else {
|
||||||
var iterator = init(first_address, null);
|
var iterator = init(first_address, null);
|
||||||
iterator.unwind_state = .{
|
iterator.unwind_state = .{
|
||||||
.debug_info = debug_info,
|
.debug_info = debug_info,
|
||||||
.dwarf_context = try Dwarf.UnwindContext.init(debug_info.allocator, context),
|
.dwarf_context = try SelfInfo.UnwindContext.init(debug_info.allocator, context),
|
||||||
};
|
};
|
||||||
|
|
||||||
return iterator;
|
return iterator;
|
||||||
@ -644,116 +645,6 @@ pub const StackIterator = struct {
|
|||||||
return address;
|
return address;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn isValidMemory(address: usize) bool {
|
|
||||||
// We are unable to determine validity of memory for freestanding targets
|
|
||||||
if (native_os == .freestanding or native_os == .uefi) return true;
|
|
||||||
|
|
||||||
const aligned_address = address & ~@as(usize, @intCast((mem.page_size - 1)));
|
|
||||||
if (aligned_address == 0) return false;
|
|
||||||
const aligned_memory = @as([*]align(mem.page_size) u8, @ptrFromInt(aligned_address))[0..mem.page_size];
|
|
||||||
|
|
||||||
if (native_os == .windows) {
|
|
||||||
var memory_info: windows.MEMORY_BASIC_INFORMATION = undefined;
|
|
||||||
|
|
||||||
// The only error this function can throw is ERROR_INVALID_PARAMETER.
|
|
||||||
// supply an address that invalid i'll be thrown.
|
|
||||||
const rc = windows.VirtualQuery(aligned_memory, &memory_info, aligned_memory.len) catch {
|
|
||||||
return false;
|
|
||||||
};
|
|
||||||
|
|
||||||
// Result code has to be bigger than zero (number of bytes written)
|
|
||||||
if (rc == 0) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Free pages cannot be read, they are unmapped
|
|
||||||
if (memory_info.State == windows.MEM_FREE) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
} else if (have_msync) {
|
|
||||||
posix.msync(aligned_memory, posix.MSF.ASYNC) catch |err| {
|
|
||||||
switch (err) {
|
|
||||||
error.UnmappedMemory => return false,
|
|
||||||
else => unreachable,
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
return true;
|
|
||||||
} else {
|
|
||||||
// We are unable to determine validity of memory on this target.
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub const MemoryAccessor = struct {
|
|
||||||
var cached_pid: posix.pid_t = -1;
|
|
||||||
|
|
||||||
mem: switch (native_os) {
|
|
||||||
.linux => File,
|
|
||||||
else => void,
|
|
||||||
},
|
|
||||||
|
|
||||||
pub const init: MemoryAccessor = .{
|
|
||||||
.mem = switch (native_os) {
|
|
||||||
.linux => .{ .handle = -1 },
|
|
||||||
else => {},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
fn read(ma: *MemoryAccessor, address: usize, buf: []u8) bool {
|
|
||||||
switch (native_os) {
|
|
||||||
.linux => while (true) switch (ma.mem.handle) {
|
|
||||||
-2 => break,
|
|
||||||
-1 => {
|
|
||||||
const linux = std.os.linux;
|
|
||||||
const pid = switch (@atomicLoad(posix.pid_t, &cached_pid, .monotonic)) {
|
|
||||||
-1 => pid: {
|
|
||||||
const pid = linux.getpid();
|
|
||||||
@atomicStore(posix.pid_t, &cached_pid, pid, .monotonic);
|
|
||||||
break :pid pid;
|
|
||||||
},
|
|
||||||
else => |pid| pid,
|
|
||||||
};
|
|
||||||
const bytes_read = linux.process_vm_readv(
|
|
||||||
pid,
|
|
||||||
&.{.{ .base = buf.ptr, .len = buf.len }},
|
|
||||||
&.{.{ .base = @ptrFromInt(address), .len = buf.len }},
|
|
||||||
0,
|
|
||||||
);
|
|
||||||
switch (linux.E.init(bytes_read)) {
|
|
||||||
.SUCCESS => return bytes_read == buf.len,
|
|
||||||
.FAULT => return false,
|
|
||||||
.INVAL, .PERM, .SRCH => unreachable, // own pid is always valid
|
|
||||||
.NOMEM => {},
|
|
||||||
.NOSYS => {}, // QEMU is known not to implement this syscall.
|
|
||||||
else => unreachable, // unexpected
|
|
||||||
}
|
|
||||||
var path_buf: [
|
|
||||||
std.fmt.count("/proc/{d}/mem", .{math.minInt(posix.pid_t)})
|
|
||||||
]u8 = undefined;
|
|
||||||
const path = std.fmt.bufPrint(&path_buf, "/proc/{d}/mem", .{pid}) catch
|
|
||||||
unreachable;
|
|
||||||
ma.mem = std.fs.openFileAbsolute(path, .{}) catch {
|
|
||||||
ma.mem.handle = -2;
|
|
||||||
break;
|
|
||||||
};
|
|
||||||
},
|
|
||||||
else => return (ma.mem.pread(buf, address) catch return false) == buf.len,
|
|
||||||
},
|
|
||||||
else => {},
|
|
||||||
}
|
|
||||||
if (!isValidMemory(address)) return false;
|
|
||||||
@memcpy(buf, @as([*]const u8, @ptrFromInt(address)));
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
pub fn load(ma: *MemoryAccessor, comptime Type: type, address: usize) ?Type {
|
|
||||||
var result: Type = undefined;
|
|
||||||
return if (ma.read(address, std.mem.asBytes(&result))) result else null;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
fn next_unwind(it: *StackIterator) !usize {
|
fn next_unwind(it: *StackIterator) !usize {
|
||||||
const unwind_state = &it.unwind_state.?;
|
const unwind_state = &it.unwind_state.?;
|
||||||
const module = try unwind_state.debug_info.getModuleForAddress(unwind_state.dwarf_context.pc);
|
const module = try unwind_state.debug_info.getModuleForAddress(unwind_state.dwarf_context.pc);
|
||||||
@ -762,7 +653,13 @@ pub const StackIterator = struct {
|
|||||||
// __unwind_info is a requirement for unwinding on Darwin. It may fall back to DWARF, but unwinding
|
// __unwind_info is a requirement for unwinding on Darwin. It may fall back to DWARF, but unwinding
|
||||||
// via DWARF before attempting to use the compact unwind info will produce incorrect results.
|
// via DWARF before attempting to use the compact unwind info will produce incorrect results.
|
||||||
if (module.unwind_info) |unwind_info| {
|
if (module.unwind_info) |unwind_info| {
|
||||||
if (Dwarf.unwindFrameMachO(&unwind_state.dwarf_context, &it.ma, unwind_info, module.eh_frame, module.base_address)) |return_address| {
|
if (SelfInfo.unwindFrameMachO(
|
||||||
|
&unwind_state.dwarf_context,
|
||||||
|
&it.ma,
|
||||||
|
unwind_info,
|
||||||
|
module.eh_frame,
|
||||||
|
module.base_address,
|
||||||
|
)) |return_address| {
|
||||||
return return_address;
|
return return_address;
|
||||||
} else |err| {
|
} else |err| {
|
||||||
if (err != error.RequiresDWARFUnwind) return err;
|
if (err != error.RequiresDWARFUnwind) return err;
|
||||||
@ -773,7 +670,7 @@ pub const StackIterator = struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (try module.getDwarfInfoForAddress(unwind_state.debug_info.allocator, unwind_state.dwarf_context.pc)) |di| {
|
if (try module.getDwarfInfoForAddress(unwind_state.debug_info.allocator, unwind_state.dwarf_context.pc)) |di| {
|
||||||
return di.unwindFrame(&unwind_state.dwarf_context, &it.ma, null);
|
return SelfInfo.unwindFrameDwarf(di, &unwind_state.dwarf_context, &it.ma, null);
|
||||||
} else return error.MissingDebugInfo;
|
} else return error.MissingDebugInfo;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -822,11 +719,6 @@ pub const StackIterator = struct {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
const have_msync = switch (native_os) {
|
|
||||||
.wasi, .emscripten, .windows => false,
|
|
||||||
else => true,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub fn writeCurrentStackTrace(
|
pub fn writeCurrentStackTrace(
|
||||||
out_stream: anytype,
|
out_stream: anytype,
|
||||||
debug_info: *SelfInfo,
|
debug_info: *SelfInfo,
|
||||||
@ -1333,7 +1225,7 @@ fn handleSegfaultPosix(sig: i32, info: *const posix.siginfo_t, ctx_ptr: ?*anyopa
|
|||||||
posix.abort();
|
posix.abort();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn dumpSegfaultInfoPosix(sig: i32, code: i32, addr: usize, ctx_ptr: ?*const anyopaque) void {
|
fn dumpSegfaultInfoPosix(sig: i32, code: i32, addr: usize, ctx_ptr: ?*anyopaque) void {
|
||||||
const stderr = io.getStdErr().writer();
|
const stderr = io.getStdErr().writer();
|
||||||
_ = switch (sig) {
|
_ = switch (sig) {
|
||||||
posix.SIG.SEGV => if (native_arch == .x86_64 and native_os == .linux and code == 128) // SI_KERNEL
|
posix.SIG.SEGV => if (native_arch == .x86_64 and native_os == .linux and code == 128) // SI_KERNEL
|
||||||
@ -1359,7 +1251,7 @@ fn dumpSegfaultInfoPosix(sig: i32, code: i32, addr: usize, ctx_ptr: ?*const anyo
|
|||||||
.arm,
|
.arm,
|
||||||
.aarch64,
|
.aarch64,
|
||||||
=> {
|
=> {
|
||||||
const ctx: *const posix.ucontext_t = @ptrCast(@alignCast(ctx_ptr));
|
const ctx: *posix.ucontext_t = @ptrCast(@alignCast(ctx_ptr));
|
||||||
dumpStackTraceFromBase(ctx);
|
dumpStackTraceFromBase(ctx);
|
||||||
},
|
},
|
||||||
else => {},
|
else => {},
|
||||||
@ -1585,6 +1477,99 @@ pub const SafetyLock = struct {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/// Deprecated. Don't use this, just read from your memory directly.
|
||||||
|
///
|
||||||
|
/// This only exists because someone was too lazy to rework logic that used to
|
||||||
|
/// operate on an open file to operate on a memory buffer instead.
|
||||||
|
pub const DeprecatedFixedBufferReader = struct {
|
||||||
|
buf: []const u8,
|
||||||
|
pos: usize = 0,
|
||||||
|
endian: std.builtin.Endian,
|
||||||
|
|
||||||
|
pub const Error = error{ EndOfBuffer, Overflow, InvalidBuffer };
|
||||||
|
|
||||||
|
pub fn seekTo(fbr: *DeprecatedFixedBufferReader, pos: u64) Error!void {
|
||||||
|
if (pos > fbr.buf.len) return error.EndOfBuffer;
|
||||||
|
fbr.pos = @intCast(pos);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn seekForward(fbr: *DeprecatedFixedBufferReader, amount: u64) Error!void {
|
||||||
|
if (fbr.buf.len - fbr.pos < amount) return error.EndOfBuffer;
|
||||||
|
fbr.pos += @intCast(amount);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub inline fn readByte(fbr: *DeprecatedFixedBufferReader) Error!u8 {
|
||||||
|
if (fbr.pos >= fbr.buf.len) return error.EndOfBuffer;
|
||||||
|
defer fbr.pos += 1;
|
||||||
|
return fbr.buf[fbr.pos];
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn readByteSigned(fbr: *DeprecatedFixedBufferReader) Error!i8 {
|
||||||
|
return @bitCast(try fbr.readByte());
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn readInt(fbr: *DeprecatedFixedBufferReader, comptime T: type) Error!T {
|
||||||
|
const size = @divExact(@typeInfo(T).Int.bits, 8);
|
||||||
|
if (fbr.buf.len - fbr.pos < size) return error.EndOfBuffer;
|
||||||
|
defer fbr.pos += size;
|
||||||
|
return std.mem.readInt(T, fbr.buf[fbr.pos..][0..size], fbr.endian);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn readIntChecked(
|
||||||
|
fbr: *DeprecatedFixedBufferReader,
|
||||||
|
comptime T: type,
|
||||||
|
ma: *MemoryAccessor,
|
||||||
|
) Error!T {
|
||||||
|
if (ma.load(T, @intFromPtr(fbr.buf[fbr.pos..].ptr)) == null)
|
||||||
|
return error.InvalidBuffer;
|
||||||
|
|
||||||
|
return fbr.readInt(T);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn readUleb128(fbr: *DeprecatedFixedBufferReader, comptime T: type) Error!T {
|
||||||
|
return std.leb.readUleb128(T, fbr);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn readIleb128(fbr: *DeprecatedFixedBufferReader, comptime T: type) Error!T {
|
||||||
|
return std.leb.readIleb128(T, fbr);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn readAddress(fbr: *DeprecatedFixedBufferReader, format: std.dwarf.Format) Error!u64 {
|
||||||
|
return switch (format) {
|
||||||
|
.@"32" => try fbr.readInt(u32),
|
||||||
|
.@"64" => try fbr.readInt(u64),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn readAddressChecked(
|
||||||
|
fbr: *DeprecatedFixedBufferReader,
|
||||||
|
format: std.dwarf.Format,
|
||||||
|
ma: *MemoryAccessor,
|
||||||
|
) Error!u64 {
|
||||||
|
return switch (format) {
|
||||||
|
.@"32" => try fbr.readIntChecked(u32, ma),
|
||||||
|
.@"64" => try fbr.readIntChecked(u64, ma),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn readBytes(fbr: *DeprecatedFixedBufferReader, len: usize) Error![]const u8 {
|
||||||
|
if (fbr.buf.len - fbr.pos < len) return error.EndOfBuffer;
|
||||||
|
defer fbr.pos += len;
|
||||||
|
return fbr.buf[fbr.pos..][0..len];
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn readBytesTo(fbr: *DeprecatedFixedBufferReader, comptime sentinel: u8) Error![:sentinel]const u8 {
|
||||||
|
const end = @call(.always_inline, std.mem.indexOfScalarPos, .{
|
||||||
|
u8,
|
||||||
|
fbr.buf,
|
||||||
|
fbr.pos,
|
||||||
|
sentinel,
|
||||||
|
}) orelse return error.EndOfBuffer;
|
||||||
|
defer fbr.pos = end + 1;
|
||||||
|
return fbr.buf[fbr.pos..end :sentinel];
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
/// Detect whether the program is being executed in the Valgrind virtual machine.
|
/// Detect whether the program is being executed in the Valgrind virtual machine.
|
||||||
///
|
///
|
||||||
/// When Valgrind integrations are disabled, this returns comptime-known false.
|
/// When Valgrind integrations are disabled, this returns comptime-known false.
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@ -1,8 +1,9 @@
|
|||||||
const builtin = @import("builtin");
|
const builtin = @import("builtin");
|
||||||
|
|
||||||
const std = @import("../../std.zig");
|
const std = @import("../../std.zig");
|
||||||
const mem = std.mem;
|
const mem = std.mem;
|
||||||
const native_os = builtin.os.tag;
|
|
||||||
const posix = std.posix;
|
const posix = std.posix;
|
||||||
|
const Arch = std.Target.Cpu.Arch;
|
||||||
|
|
||||||
pub fn supportsUnwinding(target: std.Target) bool {
|
pub fn supportsUnwinding(target: std.Target) bool {
|
||||||
return switch (target.cpu.arch) {
|
return switch (target.cpu.arch) {
|
||||||
@ -26,8 +27,8 @@ pub fn supportsUnwinding(target: std.Target) bool {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn ipRegNum() u8 {
|
pub fn ipRegNum(arch: Arch) u8 {
|
||||||
return switch (builtin.cpu.arch) {
|
return switch (arch) {
|
||||||
.x86 => 8,
|
.x86 => 8,
|
||||||
.x86_64 => 16,
|
.x86_64 => 16,
|
||||||
.arm => 15,
|
.arm => 15,
|
||||||
@ -36,9 +37,10 @@ pub fn ipRegNum() u8 {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn fpRegNum(reg_context: RegisterContext) u8 {
|
pub fn fpRegNum(arch: Arch, reg_context: RegisterContext) u8 {
|
||||||
return switch (builtin.cpu.arch) {
|
return switch (arch) {
|
||||||
// GCC on OS X historically did the opposite of ELF for these registers (only in .eh_frame), and that is now the convention for MachO
|
// GCC on OS X historically did the opposite of ELF for these registers
|
||||||
|
// (only in .eh_frame), and that is now the convention for MachO
|
||||||
.x86 => if (reg_context.eh_frame and reg_context.is_macho) 4 else 5,
|
.x86 => if (reg_context.eh_frame and reg_context.is_macho) 4 else 5,
|
||||||
.x86_64 => 6,
|
.x86_64 => 6,
|
||||||
.arm => 11,
|
.arm => 11,
|
||||||
@ -47,8 +49,8 @@ pub fn fpRegNum(reg_context: RegisterContext) u8 {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn spRegNum(reg_context: RegisterContext) u8 {
|
pub fn spRegNum(arch: Arch, reg_context: RegisterContext) u8 {
|
||||||
return switch (builtin.cpu.arch) {
|
return switch (arch) {
|
||||||
.x86 => if (reg_context.eh_frame and reg_context.is_macho) 5 else 4,
|
.x86 => if (reg_context.eh_frame and reg_context.is_macho) 5 else 4,
|
||||||
.x86_64 => 7,
|
.x86_64 => 7,
|
||||||
.arm => 13,
|
.arm => 13,
|
||||||
@ -57,33 +59,12 @@ pub fn spRegNum(reg_context: RegisterContext) u8 {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Some platforms use pointer authentication - the upper bits of instruction pointers contain a signature.
|
|
||||||
/// This function clears these signature bits to make the pointer usable.
|
|
||||||
pub inline fn stripInstructionPtrAuthCode(ptr: usize) usize {
|
|
||||||
if (builtin.cpu.arch == .aarch64) {
|
|
||||||
// `hint 0x07` maps to `xpaclri` (or `nop` if the hardware doesn't support it)
|
|
||||||
// The save / restore is because `xpaclri` operates on x30 (LR)
|
|
||||||
return asm (
|
|
||||||
\\mov x16, x30
|
|
||||||
\\mov x30, x15
|
|
||||||
\\hint 0x07
|
|
||||||
\\mov x15, x30
|
|
||||||
\\mov x30, x16
|
|
||||||
: [ret] "={x15}" (-> usize),
|
|
||||||
: [ptr] "{x15}" (ptr),
|
|
||||||
: "x16"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub const RegisterContext = struct {
|
pub const RegisterContext = struct {
|
||||||
eh_frame: bool,
|
eh_frame: bool,
|
||||||
is_macho: bool,
|
is_macho: bool,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub const AbiError = error{
|
pub const RegBytesError = error{
|
||||||
InvalidRegister,
|
InvalidRegister,
|
||||||
UnimplementedArch,
|
UnimplementedArch,
|
||||||
UnimplementedOs,
|
UnimplementedOs,
|
||||||
@ -91,55 +72,21 @@ pub const AbiError = error{
|
|||||||
ThreadContextNotSupported,
|
ThreadContextNotSupported,
|
||||||
};
|
};
|
||||||
|
|
||||||
fn RegValueReturnType(comptime ContextPtrType: type, comptime T: type) type {
|
|
||||||
const reg_bytes_type = comptime RegBytesReturnType(ContextPtrType);
|
|
||||||
const info = @typeInfo(reg_bytes_type).Pointer;
|
|
||||||
return @Type(.{
|
|
||||||
.Pointer = .{
|
|
||||||
.size = .One,
|
|
||||||
.is_const = info.is_const,
|
|
||||||
.is_volatile = info.is_volatile,
|
|
||||||
.is_allowzero = info.is_allowzero,
|
|
||||||
.alignment = info.alignment,
|
|
||||||
.address_space = info.address_space,
|
|
||||||
.child = T,
|
|
||||||
.sentinel = null,
|
|
||||||
},
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns a pointer to a register stored in a ThreadContext, preserving the pointer attributes of the context.
|
|
||||||
pub fn regValueNative(
|
|
||||||
comptime T: type,
|
|
||||||
thread_context_ptr: anytype,
|
|
||||||
reg_number: u8,
|
|
||||||
reg_context: ?RegisterContext,
|
|
||||||
) !RegValueReturnType(@TypeOf(thread_context_ptr), T) {
|
|
||||||
const reg_bytes = try regBytes(thread_context_ptr, reg_number, reg_context);
|
|
||||||
if (@sizeOf(T) != reg_bytes.len) return error.IncompatibleRegisterSize;
|
|
||||||
return mem.bytesAsValue(T, reg_bytes[0..@sizeOf(T)]);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn RegBytesReturnType(comptime ContextPtrType: type) type {
|
|
||||||
const info = @typeInfo(ContextPtrType);
|
|
||||||
if (info != .Pointer or info.Pointer.child != std.debug.ThreadContext) {
|
|
||||||
@compileError("Expected a pointer to std.debug.ThreadContext, got " ++ @typeName(@TypeOf(ContextPtrType)));
|
|
||||||
}
|
|
||||||
|
|
||||||
return if (info.Pointer.is_const) return []const u8 else []u8;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns a slice containing the backing storage for `reg_number`.
|
/// Returns a slice containing the backing storage for `reg_number`.
|
||||||
///
|
///
|
||||||
|
/// This function assumes the Dwarf information corresponds not necessarily to
|
||||||
|
/// the current executable, but at least with a matching CPU architecture and
|
||||||
|
/// OS. It is planned to lift this limitation with a future enhancement.
|
||||||
|
///
|
||||||
/// `reg_context` describes in what context the register number is used, as it can have different
|
/// `reg_context` describes in what context the register number is used, as it can have different
|
||||||
/// meanings depending on the DWARF container. It is only required when getting the stack or
|
/// meanings depending on the DWARF container. It is only required when getting the stack or
|
||||||
/// frame pointer register on some architectures.
|
/// frame pointer register on some architectures.
|
||||||
pub fn regBytes(
|
pub fn regBytes(
|
||||||
thread_context_ptr: anytype,
|
thread_context_ptr: *std.debug.ThreadContext,
|
||||||
reg_number: u8,
|
reg_number: u8,
|
||||||
reg_context: ?RegisterContext,
|
reg_context: ?RegisterContext,
|
||||||
) AbiError!RegBytesReturnType(@TypeOf(thread_context_ptr)) {
|
) RegBytesError![]u8 {
|
||||||
if (native_os == .windows) {
|
if (builtin.os.tag == .windows) {
|
||||||
return switch (builtin.cpu.arch) {
|
return switch (builtin.cpu.arch) {
|
||||||
.x86 => switch (reg_number) {
|
.x86 => switch (reg_number) {
|
||||||
0 => mem.asBytes(&thread_context_ptr.Eax),
|
0 => mem.asBytes(&thread_context_ptr.Eax),
|
||||||
@ -194,7 +141,7 @@ pub fn regBytes(
|
|||||||
|
|
||||||
const ucontext_ptr = thread_context_ptr;
|
const ucontext_ptr = thread_context_ptr;
|
||||||
return switch (builtin.cpu.arch) {
|
return switch (builtin.cpu.arch) {
|
||||||
.x86 => switch (native_os) {
|
.x86 => switch (builtin.os.tag) {
|
||||||
.linux, .netbsd, .solaris, .illumos => switch (reg_number) {
|
.linux, .netbsd, .solaris, .illumos => switch (reg_number) {
|
||||||
0 => mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.EAX]),
|
0 => mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.EAX]),
|
||||||
1 => mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.ECX]),
|
1 => mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.ECX]),
|
||||||
@ -229,7 +176,7 @@ pub fn regBytes(
|
|||||||
},
|
},
|
||||||
else => error.UnimplementedOs,
|
else => error.UnimplementedOs,
|
||||||
},
|
},
|
||||||
.x86_64 => switch (native_os) {
|
.x86_64 => switch (builtin.os.tag) {
|
||||||
.linux, .solaris, .illumos => switch (reg_number) {
|
.linux, .solaris, .illumos => switch (reg_number) {
|
||||||
0 => mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.RAX]),
|
0 => mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.RAX]),
|
||||||
1 => mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.RDX]),
|
1 => mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.RDX]),
|
||||||
@ -248,7 +195,7 @@ pub fn regBytes(
|
|||||||
14 => mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.R14]),
|
14 => mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.R14]),
|
||||||
15 => mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.R15]),
|
15 => mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.R15]),
|
||||||
16 => mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.RIP]),
|
16 => mem.asBytes(&ucontext_ptr.mcontext.gregs[posix.REG.RIP]),
|
||||||
17...32 => |i| if (native_os.isSolarish())
|
17...32 => |i| if (builtin.os.tag.isSolarish())
|
||||||
mem.asBytes(&ucontext_ptr.mcontext.fpregs.chip_state.xmm[i - 17])
|
mem.asBytes(&ucontext_ptr.mcontext.fpregs.chip_state.xmm[i - 17])
|
||||||
else
|
else
|
||||||
mem.asBytes(&ucontext_ptr.mcontext.fpregs.xmm[i - 17]),
|
mem.asBytes(&ucontext_ptr.mcontext.fpregs.xmm[i - 17]),
|
||||||
@ -318,7 +265,7 @@ pub fn regBytes(
|
|||||||
},
|
},
|
||||||
else => error.UnimplementedOs,
|
else => error.UnimplementedOs,
|
||||||
},
|
},
|
||||||
.arm => switch (native_os) {
|
.arm => switch (builtin.os.tag) {
|
||||||
.linux => switch (reg_number) {
|
.linux => switch (reg_number) {
|
||||||
0 => mem.asBytes(&ucontext_ptr.mcontext.arm_r0),
|
0 => mem.asBytes(&ucontext_ptr.mcontext.arm_r0),
|
||||||
1 => mem.asBytes(&ucontext_ptr.mcontext.arm_r1),
|
1 => mem.asBytes(&ucontext_ptr.mcontext.arm_r1),
|
||||||
@ -341,7 +288,7 @@ pub fn regBytes(
|
|||||||
},
|
},
|
||||||
else => error.UnimplementedOs,
|
else => error.UnimplementedOs,
|
||||||
},
|
},
|
||||||
.aarch64 => switch (native_os) {
|
.aarch64 => switch (builtin.os.tag) {
|
||||||
.macos, .ios => switch (reg_number) {
|
.macos, .ios => switch (reg_number) {
|
||||||
0...28 => mem.asBytes(&ucontext_ptr.mcontext.ss.regs[reg_number]),
|
0...28 => mem.asBytes(&ucontext_ptr.mcontext.ss.regs[reg_number]),
|
||||||
29 => mem.asBytes(&ucontext_ptr.mcontext.ss.fp),
|
29 => mem.asBytes(&ucontext_ptr.mcontext.ss.fp),
|
||||||
@ -389,22 +336,14 @@ pub fn regBytes(
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the ABI-defined default value this register has in the unwinding table
|
/// Returns a pointer to a register stored in a ThreadContext, preserving the
|
||||||
/// before running any of the CIE instructions. The DWARF spec defines these as having
|
/// pointer attributes of the context.
|
||||||
/// the .undefined rule by default, but allows ABI authors to override that.
|
pub fn regValueNative(
|
||||||
pub fn getRegDefaultValue(reg_number: u8, context: *std.debug.Dwarf.UnwindContext, out: []u8) !void {
|
thread_context_ptr: *std.debug.ThreadContext,
|
||||||
switch (builtin.cpu.arch) {
|
reg_number: u8,
|
||||||
.aarch64 => {
|
reg_context: ?RegisterContext,
|
||||||
// Callee-saved registers are initialized as if they had the .same_value rule
|
) !*align(1) usize {
|
||||||
if (reg_number >= 19 and reg_number <= 28) {
|
const reg_bytes = try regBytes(thread_context_ptr, reg_number, reg_context);
|
||||||
const src = try regBytes(context.thread_context, reg_number, context.reg_context);
|
if (@sizeOf(usize) != reg_bytes.len) return error.IncompatibleRegisterSize;
|
||||||
if (src.len != out.len) return error.RegisterSizeMismatch;
|
return mem.bytesAsValue(usize, reg_bytes[0..@sizeOf(usize)]);
|
||||||
@memcpy(out, src);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
},
|
|
||||||
else => {},
|
|
||||||
}
|
|
||||||
|
|
||||||
@memset(out, undefined);
|
|
||||||
}
|
}
|
||||||
|
|||||||
@ -297,391 +297,3 @@ pub const Instruction = union(Opcode) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Since register rules are applied (usually) during a panic,
|
|
||||||
/// checked addition / subtraction is used so that we can return
|
|
||||||
/// an error and fall back to FP-based unwinding.
|
|
||||||
pub fn applyOffset(base: usize, offset: i64) !usize {
|
|
||||||
return if (offset >= 0)
|
|
||||||
try std.math.add(usize, base, @as(usize, @intCast(offset)))
|
|
||||||
else
|
|
||||||
try std.math.sub(usize, base, @as(usize, @intCast(-offset)));
|
|
||||||
}
|
|
||||||
|
|
||||||
/// This is a virtual machine that runs DWARF call frame instructions.
|
|
||||||
pub const VirtualMachine = struct {
|
|
||||||
/// See section 6.4.1 of the DWARF5 specification for details on each
|
|
||||||
const RegisterRule = union(enum) {
|
|
||||||
// The spec says that the default rule for each column is the undefined rule.
|
|
||||||
// However, it also allows ABI / compiler authors to specify alternate defaults, so
|
|
||||||
// there is a distinction made here.
|
|
||||||
default: void,
|
|
||||||
|
|
||||||
undefined: void,
|
|
||||||
same_value: void,
|
|
||||||
|
|
||||||
// offset(N)
|
|
||||||
offset: i64,
|
|
||||||
|
|
||||||
// val_offset(N)
|
|
||||||
val_offset: i64,
|
|
||||||
|
|
||||||
// register(R)
|
|
||||||
register: u8,
|
|
||||||
|
|
||||||
// expression(E)
|
|
||||||
expression: []const u8,
|
|
||||||
|
|
||||||
// val_expression(E)
|
|
||||||
val_expression: []const u8,
|
|
||||||
|
|
||||||
// Augmenter-defined rule
|
|
||||||
architectural: void,
|
|
||||||
};
|
|
||||||
|
|
||||||
/// Each row contains unwinding rules for a set of registers.
|
|
||||||
pub const Row = struct {
|
|
||||||
/// Offset from `FrameDescriptionEntry.pc_begin`
|
|
||||||
offset: u64 = 0,
|
|
||||||
|
|
||||||
/// Special-case column that defines the CFA (Canonical Frame Address) rule.
|
|
||||||
/// The register field of this column defines the register that CFA is derived from.
|
|
||||||
cfa: Column = .{},
|
|
||||||
|
|
||||||
/// The register fields in these columns define the register the rule applies to.
|
|
||||||
columns: ColumnRange = .{},
|
|
||||||
|
|
||||||
/// Indicates that the next write to any column in this row needs to copy
|
|
||||||
/// the backing column storage first, as it may be referenced by previous rows.
|
|
||||||
copy_on_write: bool = false,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub const Column = struct {
|
|
||||||
register: ?u8 = null,
|
|
||||||
rule: RegisterRule = .{ .default = {} },
|
|
||||||
|
|
||||||
/// Resolves the register rule and places the result into `out` (see dwarf.abi.regBytes)
|
|
||||||
pub fn resolveValue(
|
|
||||||
self: Column,
|
|
||||||
context: *std.debug.Dwarf.UnwindContext,
|
|
||||||
expression_context: std.debug.Dwarf.expression.Context,
|
|
||||||
ma: *debug.StackIterator.MemoryAccessor,
|
|
||||||
out: []u8,
|
|
||||||
) !void {
|
|
||||||
switch (self.rule) {
|
|
||||||
.default => {
|
|
||||||
const register = self.register orelse return error.InvalidRegister;
|
|
||||||
try abi.getRegDefaultValue(register, context, out);
|
|
||||||
},
|
|
||||||
.undefined => {
|
|
||||||
@memset(out, undefined);
|
|
||||||
},
|
|
||||||
.same_value => {
|
|
||||||
// TODO: This copy could be eliminated if callers always copy the state then call this function to update it
|
|
||||||
const register = self.register orelse return error.InvalidRegister;
|
|
||||||
const src = try abi.regBytes(context.thread_context, register, context.reg_context);
|
|
||||||
if (src.len != out.len) return error.RegisterSizeMismatch;
|
|
||||||
@memcpy(out, src);
|
|
||||||
},
|
|
||||||
.offset => |offset| {
|
|
||||||
if (context.cfa) |cfa| {
|
|
||||||
const addr = try applyOffset(cfa, offset);
|
|
||||||
if (ma.load(usize, addr) == null) return error.InvalidAddress;
|
|
||||||
const ptr: *const usize = @ptrFromInt(addr);
|
|
||||||
mem.writeInt(usize, out[0..@sizeOf(usize)], ptr.*, native_endian);
|
|
||||||
} else return error.InvalidCFA;
|
|
||||||
},
|
|
||||||
.val_offset => |offset| {
|
|
||||||
if (context.cfa) |cfa| {
|
|
||||||
mem.writeInt(usize, out[0..@sizeOf(usize)], try applyOffset(cfa, offset), native_endian);
|
|
||||||
} else return error.InvalidCFA;
|
|
||||||
},
|
|
||||||
.register => |register| {
|
|
||||||
const src = try abi.regBytes(context.thread_context, register, context.reg_context);
|
|
||||||
if (src.len != out.len) return error.RegisterSizeMismatch;
|
|
||||||
@memcpy(out, try abi.regBytes(context.thread_context, register, context.reg_context));
|
|
||||||
},
|
|
||||||
.expression => |expression| {
|
|
||||||
context.stack_machine.reset();
|
|
||||||
const value = try context.stack_machine.run(expression, context.allocator, expression_context, context.cfa.?);
|
|
||||||
const addr = if (value) |v| blk: {
|
|
||||||
if (v != .generic) return error.InvalidExpressionValue;
|
|
||||||
break :blk v.generic;
|
|
||||||
} else return error.NoExpressionValue;
|
|
||||||
|
|
||||||
if (ma.load(usize, addr) == null) return error.InvalidExpressionAddress;
|
|
||||||
const ptr: *usize = @ptrFromInt(addr);
|
|
||||||
mem.writeInt(usize, out[0..@sizeOf(usize)], ptr.*, native_endian);
|
|
||||||
},
|
|
||||||
.val_expression => |expression| {
|
|
||||||
context.stack_machine.reset();
|
|
||||||
const value = try context.stack_machine.run(expression, context.allocator, expression_context, context.cfa.?);
|
|
||||||
if (value) |v| {
|
|
||||||
if (v != .generic) return error.InvalidExpressionValue;
|
|
||||||
mem.writeInt(usize, out[0..@sizeOf(usize)], v.generic, native_endian);
|
|
||||||
} else return error.NoExpressionValue;
|
|
||||||
},
|
|
||||||
.architectural => return error.UnimplementedRegisterRule,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
const ColumnRange = struct {
|
|
||||||
/// Index into `columns` of the first column in this row.
|
|
||||||
start: usize = undefined,
|
|
||||||
len: u8 = 0,
|
|
||||||
};
|
|
||||||
|
|
||||||
columns: std.ArrayListUnmanaged(Column) = .{},
|
|
||||||
stack: std.ArrayListUnmanaged(ColumnRange) = .{},
|
|
||||||
current_row: Row = .{},
|
|
||||||
|
|
||||||
/// The result of executing the CIE's initial_instructions
|
|
||||||
cie_row: ?Row = null,
|
|
||||||
|
|
||||||
pub fn deinit(self: *VirtualMachine, allocator: std.mem.Allocator) void {
|
|
||||||
self.stack.deinit(allocator);
|
|
||||||
self.columns.deinit(allocator);
|
|
||||||
self.* = undefined;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn reset(self: *VirtualMachine) void {
|
|
||||||
self.stack.clearRetainingCapacity();
|
|
||||||
self.columns.clearRetainingCapacity();
|
|
||||||
self.current_row = .{};
|
|
||||||
self.cie_row = null;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return a slice backed by the row's non-CFA columns
|
|
||||||
pub fn rowColumns(self: VirtualMachine, row: Row) []Column {
|
|
||||||
if (row.columns.len == 0) return &.{};
|
|
||||||
return self.columns.items[row.columns.start..][0..row.columns.len];
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Either retrieves or adds a column for `register` (non-CFA) in the current row.
|
|
||||||
fn getOrAddColumn(self: *VirtualMachine, allocator: std.mem.Allocator, register: u8) !*Column {
|
|
||||||
for (self.rowColumns(self.current_row)) |*c| {
|
|
||||||
if (c.register == register) return c;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (self.current_row.columns.len == 0) {
|
|
||||||
self.current_row.columns.start = self.columns.items.len;
|
|
||||||
}
|
|
||||||
self.current_row.columns.len += 1;
|
|
||||||
|
|
||||||
const column = try self.columns.addOne(allocator);
|
|
||||||
column.* = .{
|
|
||||||
.register = register,
|
|
||||||
};
|
|
||||||
|
|
||||||
return column;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Runs the CIE instructions, then the FDE instructions. Execution halts
|
|
||||||
/// once the row that corresponds to `pc` is known, and the row is returned.
|
|
||||||
pub fn runTo(
|
|
||||||
self: *VirtualMachine,
|
|
||||||
allocator: std.mem.Allocator,
|
|
||||||
pc: u64,
|
|
||||||
cie: std.debug.Dwarf.CommonInformationEntry,
|
|
||||||
fde: std.debug.Dwarf.FrameDescriptionEntry,
|
|
||||||
addr_size_bytes: u8,
|
|
||||||
endian: std.builtin.Endian,
|
|
||||||
) !Row {
|
|
||||||
assert(self.cie_row == null);
|
|
||||||
if (pc < fde.pc_begin or pc >= fde.pc_begin + fde.pc_range) return error.AddressOutOfRange;
|
|
||||||
|
|
||||||
var prev_row: Row = self.current_row;
|
|
||||||
|
|
||||||
var cie_stream = std.io.fixedBufferStream(cie.initial_instructions);
|
|
||||||
var fde_stream = std.io.fixedBufferStream(fde.instructions);
|
|
||||||
var streams = [_]*std.io.FixedBufferStream([]const u8){
|
|
||||||
&cie_stream,
|
|
||||||
&fde_stream,
|
|
||||||
};
|
|
||||||
|
|
||||||
for (&streams, 0..) |stream, i| {
|
|
||||||
while (stream.pos < stream.buffer.len) {
|
|
||||||
const instruction = try std.debug.Dwarf.call_frame.Instruction.read(stream, addr_size_bytes, endian);
|
|
||||||
prev_row = try self.step(allocator, cie, i == 0, instruction);
|
|
||||||
if (pc < fde.pc_begin + self.current_row.offset) return prev_row;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return self.current_row;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn runToNative(
|
|
||||||
self: *VirtualMachine,
|
|
||||||
allocator: std.mem.Allocator,
|
|
||||||
pc: u64,
|
|
||||||
cie: std.debug.Dwarf.CommonInformationEntry,
|
|
||||||
fde: std.debug.Dwarf.FrameDescriptionEntry,
|
|
||||||
) !Row {
|
|
||||||
return self.runTo(allocator, pc, cie, fde, @sizeOf(usize), builtin.target.cpu.arch.endian());
|
|
||||||
}
|
|
||||||
|
|
||||||
fn resolveCopyOnWrite(self: *VirtualMachine, allocator: std.mem.Allocator) !void {
|
|
||||||
if (!self.current_row.copy_on_write) return;
|
|
||||||
|
|
||||||
const new_start = self.columns.items.len;
|
|
||||||
if (self.current_row.columns.len > 0) {
|
|
||||||
try self.columns.ensureUnusedCapacity(allocator, self.current_row.columns.len);
|
|
||||||
self.columns.appendSliceAssumeCapacity(self.rowColumns(self.current_row));
|
|
||||||
self.current_row.columns.start = new_start;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Executes a single instruction.
|
|
||||||
/// If this instruction is from the CIE, `is_initial` should be set.
|
|
||||||
/// Returns the value of `current_row` before executing this instruction.
|
|
||||||
pub fn step(
|
|
||||||
self: *VirtualMachine,
|
|
||||||
allocator: std.mem.Allocator,
|
|
||||||
cie: std.debug.Dwarf.CommonInformationEntry,
|
|
||||||
is_initial: bool,
|
|
||||||
instruction: Instruction,
|
|
||||||
) !Row {
|
|
||||||
// CIE instructions must be run before FDE instructions
|
|
||||||
assert(!is_initial or self.cie_row == null);
|
|
||||||
if (!is_initial and self.cie_row == null) {
|
|
||||||
self.cie_row = self.current_row;
|
|
||||||
self.current_row.copy_on_write = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
const prev_row = self.current_row;
|
|
||||||
switch (instruction) {
|
|
||||||
.set_loc => |i| {
|
|
||||||
if (i.address <= self.current_row.offset) return error.InvalidOperation;
|
|
||||||
// TODO: Check cie.segment_selector_size != 0 for DWARFV4
|
|
||||||
self.current_row.offset = i.address;
|
|
||||||
},
|
|
||||||
inline .advance_loc,
|
|
||||||
.advance_loc1,
|
|
||||||
.advance_loc2,
|
|
||||||
.advance_loc4,
|
|
||||||
=> |i| {
|
|
||||||
self.current_row.offset += i.delta * cie.code_alignment_factor;
|
|
||||||
self.current_row.copy_on_write = true;
|
|
||||||
},
|
|
||||||
inline .offset,
|
|
||||||
.offset_extended,
|
|
||||||
.offset_extended_sf,
|
|
||||||
=> |i| {
|
|
||||||
try self.resolveCopyOnWrite(allocator);
|
|
||||||
const column = try self.getOrAddColumn(allocator, i.register);
|
|
||||||
column.rule = .{ .offset = @as(i64, @intCast(i.offset)) * cie.data_alignment_factor };
|
|
||||||
},
|
|
||||||
inline .restore,
|
|
||||||
.restore_extended,
|
|
||||||
=> |i| {
|
|
||||||
try self.resolveCopyOnWrite(allocator);
|
|
||||||
if (self.cie_row) |cie_row| {
|
|
||||||
const column = try self.getOrAddColumn(allocator, i.register);
|
|
||||||
column.rule = for (self.rowColumns(cie_row)) |cie_column| {
|
|
||||||
if (cie_column.register == i.register) break cie_column.rule;
|
|
||||||
} else .{ .default = {} };
|
|
||||||
} else return error.InvalidOperation;
|
|
||||||
},
|
|
||||||
.nop => {},
|
|
||||||
.undefined => |i| {
|
|
||||||
try self.resolveCopyOnWrite(allocator);
|
|
||||||
const column = try self.getOrAddColumn(allocator, i.register);
|
|
||||||
column.rule = .{ .undefined = {} };
|
|
||||||
},
|
|
||||||
.same_value => |i| {
|
|
||||||
try self.resolveCopyOnWrite(allocator);
|
|
||||||
const column = try self.getOrAddColumn(allocator, i.register);
|
|
||||||
column.rule = .{ .same_value = {} };
|
|
||||||
},
|
|
||||||
.register => |i| {
|
|
||||||
try self.resolveCopyOnWrite(allocator);
|
|
||||||
const column = try self.getOrAddColumn(allocator, i.register);
|
|
||||||
column.rule = .{ .register = i.target_register };
|
|
||||||
},
|
|
||||||
.remember_state => {
|
|
||||||
try self.stack.append(allocator, self.current_row.columns);
|
|
||||||
self.current_row.copy_on_write = true;
|
|
||||||
},
|
|
||||||
.restore_state => {
|
|
||||||
const restored_columns = self.stack.popOrNull() orelse return error.InvalidOperation;
|
|
||||||
self.columns.shrinkRetainingCapacity(self.columns.items.len - self.current_row.columns.len);
|
|
||||||
try self.columns.ensureUnusedCapacity(allocator, restored_columns.len);
|
|
||||||
|
|
||||||
self.current_row.columns.start = self.columns.items.len;
|
|
||||||
self.current_row.columns.len = restored_columns.len;
|
|
||||||
self.columns.appendSliceAssumeCapacity(self.columns.items[restored_columns.start..][0..restored_columns.len]);
|
|
||||||
},
|
|
||||||
.def_cfa => |i| {
|
|
||||||
try self.resolveCopyOnWrite(allocator);
|
|
||||||
self.current_row.cfa = .{
|
|
||||||
.register = i.register,
|
|
||||||
.rule = .{ .val_offset = @intCast(i.offset) },
|
|
||||||
};
|
|
||||||
},
|
|
||||||
.def_cfa_sf => |i| {
|
|
||||||
try self.resolveCopyOnWrite(allocator);
|
|
||||||
self.current_row.cfa = .{
|
|
||||||
.register = i.register,
|
|
||||||
.rule = .{ .val_offset = i.offset * cie.data_alignment_factor },
|
|
||||||
};
|
|
||||||
},
|
|
||||||
.def_cfa_register => |i| {
|
|
||||||
try self.resolveCopyOnWrite(allocator);
|
|
||||||
if (self.current_row.cfa.register == null or self.current_row.cfa.rule != .val_offset) return error.InvalidOperation;
|
|
||||||
self.current_row.cfa.register = i.register;
|
|
||||||
},
|
|
||||||
.def_cfa_offset => |i| {
|
|
||||||
try self.resolveCopyOnWrite(allocator);
|
|
||||||
if (self.current_row.cfa.register == null or self.current_row.cfa.rule != .val_offset) return error.InvalidOperation;
|
|
||||||
self.current_row.cfa.rule = .{
|
|
||||||
.val_offset = @intCast(i.offset),
|
|
||||||
};
|
|
||||||
},
|
|
||||||
.def_cfa_offset_sf => |i| {
|
|
||||||
try self.resolveCopyOnWrite(allocator);
|
|
||||||
if (self.current_row.cfa.register == null or self.current_row.cfa.rule != .val_offset) return error.InvalidOperation;
|
|
||||||
self.current_row.cfa.rule = .{
|
|
||||||
.val_offset = i.offset * cie.data_alignment_factor,
|
|
||||||
};
|
|
||||||
},
|
|
||||||
.def_cfa_expression => |i| {
|
|
||||||
try self.resolveCopyOnWrite(allocator);
|
|
||||||
self.current_row.cfa.register = undefined;
|
|
||||||
self.current_row.cfa.rule = .{
|
|
||||||
.expression = i.block,
|
|
||||||
};
|
|
||||||
},
|
|
||||||
.expression => |i| {
|
|
||||||
try self.resolveCopyOnWrite(allocator);
|
|
||||||
const column = try self.getOrAddColumn(allocator, i.register);
|
|
||||||
column.rule = .{
|
|
||||||
.expression = i.block,
|
|
||||||
};
|
|
||||||
},
|
|
||||||
.val_offset => |i| {
|
|
||||||
try self.resolveCopyOnWrite(allocator);
|
|
||||||
const column = try self.getOrAddColumn(allocator, i.register);
|
|
||||||
column.rule = .{
|
|
||||||
.val_offset = @as(i64, @intCast(i.offset)) * cie.data_alignment_factor,
|
|
||||||
};
|
|
||||||
},
|
|
||||||
.val_offset_sf => |i| {
|
|
||||||
try self.resolveCopyOnWrite(allocator);
|
|
||||||
const column = try self.getOrAddColumn(allocator, i.register);
|
|
||||||
column.rule = .{
|
|
||||||
.val_offset = i.offset * cie.data_alignment_factor,
|
|
||||||
};
|
|
||||||
},
|
|
||||||
.val_expression => |i| {
|
|
||||||
try self.resolveCopyOnWrite(allocator);
|
|
||||||
const column = try self.getOrAddColumn(allocator, i.register);
|
|
||||||
column.rule = .{
|
|
||||||
.val_expression = i.block,
|
|
||||||
};
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
return prev_row;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|||||||
@ -1,11 +1,13 @@
|
|||||||
const std = @import("std");
|
|
||||||
const builtin = @import("builtin");
|
const builtin = @import("builtin");
|
||||||
|
const native_arch = builtin.cpu.arch;
|
||||||
|
const native_endian = native_arch.endian();
|
||||||
|
|
||||||
|
const std = @import("std");
|
||||||
const leb = std.leb;
|
const leb = std.leb;
|
||||||
const OP = std.dwarf.OP;
|
const OP = std.dwarf.OP;
|
||||||
const abi = std.debug.Dwarf.abi;
|
const abi = std.debug.Dwarf.abi;
|
||||||
const mem = std.mem;
|
const mem = std.mem;
|
||||||
const assert = std.debug.assert;
|
const assert = std.debug.assert;
|
||||||
const native_endian = builtin.cpu.arch.endian();
|
|
||||||
|
|
||||||
/// Expressions can be evaluated in different contexts, each requiring its own set of inputs.
|
/// Expressions can be evaluated in different contexts, each requiring its own set of inputs.
|
||||||
/// Callers should specify all the fields relevant to their context. If a field is required
|
/// Callers should specify all the fields relevant to their context. If a field is required
|
||||||
@ -14,7 +16,7 @@ pub const Context = struct {
|
|||||||
/// The dwarf format of the section this expression is in
|
/// The dwarf format of the section this expression is in
|
||||||
format: std.dwarf.Format = .@"32",
|
format: std.dwarf.Format = .@"32",
|
||||||
/// If specified, any addresses will pass through before being accessed
|
/// If specified, any addresses will pass through before being accessed
|
||||||
memory_accessor: ?*std.debug.StackIterator.MemoryAccessor = null,
|
memory_accessor: ?*std.debug.MemoryAccessor = null,
|
||||||
/// The compilation unit this expression relates to, if any
|
/// The compilation unit this expression relates to, if any
|
||||||
compile_unit: ?*const std.debug.Dwarf.CompileUnit = null,
|
compile_unit: ?*const std.debug.Dwarf.CompileUnit = null,
|
||||||
/// When evaluating a user-presented expression, this is the address of the object being evaluated
|
/// When evaluating a user-presented expression, this is the address of the object being evaluated
|
||||||
@ -34,7 +36,7 @@ pub const Options = struct {
|
|||||||
/// The address size of the target architecture
|
/// The address size of the target architecture
|
||||||
addr_size: u8 = @sizeOf(usize),
|
addr_size: u8 = @sizeOf(usize),
|
||||||
/// Endianness of the target architecture
|
/// Endianness of the target architecture
|
||||||
endian: std.builtin.Endian = builtin.target.cpu.arch.endian(),
|
endian: std.builtin.Endian = native_endian,
|
||||||
/// Restrict the stack machine to a subset of opcodes used in call frame instructions
|
/// Restrict the stack machine to a subset of opcodes used in call frame instructions
|
||||||
call_frame_context: bool = false,
|
call_frame_context: bool = false,
|
||||||
};
|
};
|
||||||
@ -60,7 +62,7 @@ pub const Error = error{
|
|||||||
InvalidTypeLength,
|
InvalidTypeLength,
|
||||||
|
|
||||||
TruncatedIntegralType,
|
TruncatedIntegralType,
|
||||||
} || abi.AbiError || error{ EndOfStream, Overflow, OutOfMemory, DivisionByZero };
|
} || abi.RegBytesError || error{ EndOfStream, Overflow, OutOfMemory, DivisionByZero };
|
||||||
|
|
||||||
/// A stack machine that can decode and run DWARF expressions.
|
/// A stack machine that can decode and run DWARF expressions.
|
||||||
/// Expressions can be decoded for non-native address size and endianness,
|
/// Expressions can be decoded for non-native address size and endianness,
|
||||||
@ -304,7 +306,7 @@ pub fn StackMachine(comptime options: Options) type {
|
|||||||
allocator: std.mem.Allocator,
|
allocator: std.mem.Allocator,
|
||||||
context: Context,
|
context: Context,
|
||||||
) Error!bool {
|
) Error!bool {
|
||||||
if (@sizeOf(usize) != @sizeOf(addr_type) or options.endian != comptime builtin.target.cpu.arch.endian())
|
if (@sizeOf(usize) != @sizeOf(addr_type) or options.endian != native_endian)
|
||||||
@compileError("Execution of non-native address sizes / endianness is not supported");
|
@compileError("Execution of non-native address sizes / endianness is not supported");
|
||||||
|
|
||||||
const opcode = try stream.reader().readByte();
|
const opcode = try stream.reader().readByte();
|
||||||
@ -1186,13 +1188,13 @@ test "DWARF expressions" {
|
|||||||
// TODO: Test fbreg (once implemented): mock a DIE and point compile_unit.frame_base at it
|
// TODO: Test fbreg (once implemented): mock a DIE and point compile_unit.frame_base at it
|
||||||
|
|
||||||
mem.writeInt(usize, reg_bytes[0..@sizeOf(usize)], 0xee, native_endian);
|
mem.writeInt(usize, reg_bytes[0..@sizeOf(usize)], 0xee, native_endian);
|
||||||
(try abi.regValueNative(usize, &thread_context, abi.fpRegNum(reg_context), reg_context)).* = 1;
|
(try abi.regValueNative(&thread_context, abi.fpRegNum(native_arch, reg_context), reg_context)).* = 1;
|
||||||
(try abi.regValueNative(usize, &thread_context, abi.spRegNum(reg_context), reg_context)).* = 2;
|
(try abi.regValueNative(&thread_context, abi.spRegNum(native_arch, reg_context), reg_context)).* = 2;
|
||||||
(try abi.regValueNative(usize, &thread_context, abi.ipRegNum(), reg_context)).* = 3;
|
(try abi.regValueNative(&thread_context, abi.ipRegNum(native_arch), reg_context)).* = 3;
|
||||||
|
|
||||||
try b.writeBreg(writer, abi.fpRegNum(reg_context), @as(usize, 100));
|
try b.writeBreg(writer, abi.fpRegNum(native_arch, reg_context), @as(usize, 100));
|
||||||
try b.writeBreg(writer, abi.spRegNum(reg_context), @as(usize, 200));
|
try b.writeBreg(writer, abi.spRegNum(native_arch, reg_context), @as(usize, 200));
|
||||||
try b.writeBregx(writer, abi.ipRegNum(), @as(usize, 300));
|
try b.writeBregx(writer, abi.ipRegNum(native_arch), @as(usize, 300));
|
||||||
try b.writeRegvalType(writer, @as(u8, 0), @as(usize, 400));
|
try b.writeRegvalType(writer, @as(u8, 0), @as(usize, 400));
|
||||||
|
|
||||||
_ = try stack_machine.run(program.items, allocator, context, 0);
|
_ = try stack_machine.run(program.items, allocator, context, 0);
|
||||||
|
|||||||
128
lib/std/debug/MemoryAccessor.zig
Normal file
128
lib/std/debug/MemoryAccessor.zig
Normal file
@ -0,0 +1,128 @@
|
|||||||
|
//! Reads memory from any address of the current location using OS-specific
|
||||||
|
//! syscalls, bypassing memory page protection. Useful for stack unwinding.
|
||||||
|
|
||||||
|
const builtin = @import("builtin");
|
||||||
|
const native_os = builtin.os.tag;
|
||||||
|
|
||||||
|
const std = @import("../std.zig");
|
||||||
|
const posix = std.posix;
|
||||||
|
const File = std.fs.File;
|
||||||
|
const page_size = std.mem.page_size;
|
||||||
|
|
||||||
|
const MemoryAccessor = @This();
|
||||||
|
|
||||||
|
var cached_pid: posix.pid_t = -1;
|
||||||
|
|
||||||
|
mem: switch (native_os) {
|
||||||
|
.linux => File,
|
||||||
|
else => void,
|
||||||
|
},
|
||||||
|
|
||||||
|
pub const init: MemoryAccessor = .{
|
||||||
|
.mem = switch (native_os) {
|
||||||
|
.linux => .{ .handle = -1 },
|
||||||
|
else => {},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
fn read(ma: *MemoryAccessor, address: usize, buf: []u8) bool {
|
||||||
|
switch (native_os) {
|
||||||
|
.linux => while (true) switch (ma.mem.handle) {
|
||||||
|
-2 => break,
|
||||||
|
-1 => {
|
||||||
|
const linux = std.os.linux;
|
||||||
|
const pid = switch (@atomicLoad(posix.pid_t, &cached_pid, .monotonic)) {
|
||||||
|
-1 => pid: {
|
||||||
|
const pid = linux.getpid();
|
||||||
|
@atomicStore(posix.pid_t, &cached_pid, pid, .monotonic);
|
||||||
|
break :pid pid;
|
||||||
|
},
|
||||||
|
else => |pid| pid,
|
||||||
|
};
|
||||||
|
const bytes_read = linux.process_vm_readv(
|
||||||
|
pid,
|
||||||
|
&.{.{ .base = buf.ptr, .len = buf.len }},
|
||||||
|
&.{.{ .base = @ptrFromInt(address), .len = buf.len }},
|
||||||
|
0,
|
||||||
|
);
|
||||||
|
switch (linux.E.init(bytes_read)) {
|
||||||
|
.SUCCESS => return bytes_read == buf.len,
|
||||||
|
.FAULT => return false,
|
||||||
|
.INVAL, .PERM, .SRCH => unreachable, // own pid is always valid
|
||||||
|
.NOMEM => {},
|
||||||
|
.NOSYS => {}, // QEMU is known not to implement this syscall.
|
||||||
|
else => unreachable, // unexpected
|
||||||
|
}
|
||||||
|
var path_buf: [
|
||||||
|
std.fmt.count("/proc/{d}/mem", .{std.math.minInt(posix.pid_t)})
|
||||||
|
]u8 = undefined;
|
||||||
|
const path = std.fmt.bufPrint(&path_buf, "/proc/{d}/mem", .{pid}) catch
|
||||||
|
unreachable;
|
||||||
|
ma.mem = std.fs.openFileAbsolute(path, .{}) catch {
|
||||||
|
ma.mem.handle = -2;
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
},
|
||||||
|
else => return (ma.mem.pread(buf, address) catch return false) == buf.len,
|
||||||
|
},
|
||||||
|
else => {},
|
||||||
|
}
|
||||||
|
if (!isValidMemory(address)) return false;
|
||||||
|
@memcpy(buf, @as([*]const u8, @ptrFromInt(address)));
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn load(ma: *MemoryAccessor, comptime Type: type, address: usize) ?Type {
|
||||||
|
var result: Type = undefined;
|
||||||
|
return if (ma.read(address, std.mem.asBytes(&result))) result else null;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn isValidMemory(address: usize) bool {
|
||||||
|
// We are unable to determine validity of memory for freestanding targets
|
||||||
|
if (native_os == .freestanding or native_os == .uefi) return true;
|
||||||
|
|
||||||
|
const aligned_address = address & ~@as(usize, @intCast((page_size - 1)));
|
||||||
|
if (aligned_address == 0) return false;
|
||||||
|
const aligned_memory = @as([*]align(page_size) u8, @ptrFromInt(aligned_address))[0..page_size];
|
||||||
|
|
||||||
|
if (native_os == .windows) {
|
||||||
|
const windows = std.os.windows;
|
||||||
|
|
||||||
|
var memory_info: windows.MEMORY_BASIC_INFORMATION = undefined;
|
||||||
|
|
||||||
|
// The only error this function can throw is ERROR_INVALID_PARAMETER.
|
||||||
|
// supply an address that invalid i'll be thrown.
|
||||||
|
const rc = windows.VirtualQuery(aligned_memory, &memory_info, aligned_memory.len) catch {
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Result code has to be bigger than zero (number of bytes written)
|
||||||
|
if (rc == 0) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Free pages cannot be read, they are unmapped
|
||||||
|
if (memory_info.State == windows.MEM_FREE) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
} else if (have_msync) {
|
||||||
|
posix.msync(aligned_memory, posix.MSF.ASYNC) catch |err| {
|
||||||
|
switch (err) {
|
||||||
|
error.UnmappedMemory => return false,
|
||||||
|
else => unreachable,
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
// We are unable to determine validity of memory on this target.
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const have_msync = switch (native_os) {
|
||||||
|
.wasi, .emscripten, .windows => false,
|
||||||
|
else => true,
|
||||||
|
};
|
||||||
File diff suppressed because it is too large
Load Diff
@ -256,7 +256,7 @@ const StackContext = union(enum) {
|
|||||||
current: struct {
|
current: struct {
|
||||||
ret_addr: ?usize,
|
ret_addr: ?usize,
|
||||||
},
|
},
|
||||||
exception: *const debug.ThreadContext,
|
exception: *debug.ThreadContext,
|
||||||
not_supported: void,
|
not_supported: void,
|
||||||
|
|
||||||
pub fn dumpStackTrace(ctx: @This()) void {
|
pub fn dumpStackTrace(ctx: @This()) void {
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user