mirror of
https://github.com/ziglang/zig.git
synced 2025-12-06 06:13:07 +00:00
- rework StackIterator to optionally use debug_info to unwind the stack
- add abi routines for getting register values - unwding is working!
This commit is contained in:
parent
69399fbb82
commit
b449d98a93
@ -135,8 +135,9 @@ pub fn dumpCurrentStackTrace(start_addr: ?usize) void {
|
||||
|
||||
/// Tries to print the stack trace starting from the supplied base pointer to stderr,
|
||||
/// unbuffered, and ignores any error returned.
|
||||
/// `context` is either *const os.ucontext_t on posix, or the result of CONTEXT.getRegs() on Windows.
|
||||
/// TODO multithreaded awareness
|
||||
pub fn dumpStackTraceFromBase(bp: usize, ip: usize) void {
|
||||
pub fn dumpStackTraceFromBase(context: anytype) void {
|
||||
nosuspend {
|
||||
if (comptime builtin.target.isWasm()) {
|
||||
if (native_os == .wasi) {
|
||||
@ -156,12 +157,15 @@ pub fn dumpStackTraceFromBase(bp: usize, ip: usize) void {
|
||||
};
|
||||
const tty_config = io.tty.detectConfig(io.getStdErr());
|
||||
if (native_os == .windows) {
|
||||
writeCurrentStackTraceWindows(stderr, debug_info, tty_config, ip) catch return;
|
||||
writeCurrentStackTraceWindows(stderr, debug_info, tty_config, context.ip) catch return;
|
||||
return;
|
||||
}
|
||||
|
||||
printSourceAtAddress(debug_info, stderr, ip, tty_config) catch return;
|
||||
var it = StackIterator.init(null, bp);
|
||||
var it = StackIterator.initWithContext(null, debug_info, context) catch return;
|
||||
|
||||
// TODO: Should `it.dwarf_context.pc` be `it.getIp()`? (but then the non-dwarf case has to store ip)
|
||||
printSourceAtAddress(debug_info, stderr, it.dwarf_context.pc, tty_config) catch return;
|
||||
|
||||
while (it.next()) |return_address| {
|
||||
// On arm64 macOS, the address of the last frame is 0x0 rather than 0x1 as on x86_64 macOS,
|
||||
// therefore, we do a check for `return_address == 0` before subtracting 1 from it to avoid
|
||||
@ -206,6 +210,7 @@ pub fn captureStackTrace(first_address: ?usize, stack_trace: *std.builtin.StackT
|
||||
}
|
||||
stack_trace.index = slice.len;
|
||||
} else {
|
||||
// TODO: This should use the dwarf unwinder if it's available
|
||||
var it = StackIterator.init(first_address, null);
|
||||
for (stack_trace.instruction_addresses, 0..) |*addr, i| {
|
||||
addr.* = it.next() orelse {
|
||||
@ -405,6 +410,11 @@ pub const StackIterator = struct {
|
||||
// Last known value of the frame pointer register.
|
||||
fp: usize,
|
||||
|
||||
// When DebugInfo and a register context is available, this iterator can unwind
|
||||
// stacks with frames that don't use a frame pointer (ie. -fomit-frame-pointer).
|
||||
debug_info: ?*DebugInfo,
|
||||
dwarf_context: if (@hasDecl(os, "ucontext_t")) DW.UnwindContext else void = undefined,
|
||||
|
||||
pub fn init(first_address: ?usize, fp: ?usize) StackIterator {
|
||||
if (native_arch == .sparc64) {
|
||||
// Flush all the register windows on stack.
|
||||
@ -416,9 +426,17 @@ pub const StackIterator = struct {
|
||||
return StackIterator{
|
||||
.first_address = first_address,
|
||||
.fp = fp orelse @frameAddress(),
|
||||
.debug_info = null,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn initWithContext(first_address: ?usize, debug_info: *DebugInfo, context: *const os.ucontext_t) !StackIterator {
|
||||
var iterator = init(first_address, null);
|
||||
iterator.debug_info = debug_info;
|
||||
iterator.dwarf_context = try DW.UnwindContext.init(context);
|
||||
return iterator;
|
||||
}
|
||||
|
||||
// Offset of the saved BP wrt the frame pointer.
|
||||
const fp_offset = if (native_arch.isRISCV())
|
||||
// On RISC-V the frame pointer points to the top of the saved register
|
||||
@ -500,7 +518,28 @@ pub const StackIterator = struct {
|
||||
}
|
||||
}
|
||||
|
||||
fn next_dwarf(self: *StackIterator) !void {
|
||||
const module = try self.debug_info.?.getModuleForAddress(self.dwarf_context.pc);
|
||||
if (module.getDwarfInfo()) |di| {
|
||||
try di.unwindFrame(self.debug_info.?.allocator, &self.dwarf_context, module.base_address);
|
||||
} else return error.MissingDebugInfo;
|
||||
}
|
||||
|
||||
fn next_internal(self: *StackIterator) ?usize {
|
||||
if (self.debug_info != null) {
|
||||
if (self.next_dwarf()) |_| {
|
||||
return self.dwarf_context.pc;
|
||||
} else |err| {
|
||||
// Fall back to fp unwinding on the first failure,
|
||||
// as the register context won't be updated
|
||||
self.fp = self.dwarf_context.getFp() catch 0;
|
||||
self.debug_info = null;
|
||||
|
||||
// TODO: Remove
|
||||
print("\ndwarf unwind error {}, placing fp at 0x{x}\n\n", .{err, self.fp});
|
||||
}
|
||||
}
|
||||
|
||||
const fp = if (comptime native_arch.isSPARC())
|
||||
// On SPARC the offset is positive. (!)
|
||||
math.add(usize, self.fp, fp_offset) catch return null
|
||||
@ -540,6 +579,8 @@ pub fn writeCurrentStackTrace(
|
||||
if (native_os == .windows) {
|
||||
return writeCurrentStackTraceWindows(out_stream, debug_info, tty_config, start_addr);
|
||||
}
|
||||
|
||||
// TODO: Capture a context and use initWithContext
|
||||
var it = StackIterator.init(start_addr, null);
|
||||
while (it.next()) |return_address| {
|
||||
// On arm64 macOS, the address of the last frame is 0x0 rather than 0x1 as on x86_64 macOS,
|
||||
@ -800,12 +841,14 @@ fn readCoffDebugInfo(allocator: mem.Allocator, coff_bytes: []const u8) !ModuleDe
|
||||
// This coff file has embedded DWARF debug info
|
||||
_ = sec;
|
||||
|
||||
const num_sections = std.enums.directEnumArrayLen(DW.DwarfSection, 0);
|
||||
var sections: [num_sections]?[]const u8 = [_]?[]const u8{null} ** num_sections;
|
||||
errdefer for (sections) |section| if (section) |s| allocator.free(s);
|
||||
var sections: DW.DwarfInfo.SectionArray = DW.DwarfInfo.null_section_array;
|
||||
errdefer for (sections) |section| if (section) |s| if (s.owned) allocator.free(s.data);
|
||||
|
||||
inline for (@typeInfo(DW.DwarfSection).Enum.fields, 0..) |section, i| {
|
||||
sections[i] = try coff_obj.getSectionDataAlloc("." ++ section.name, allocator);
|
||||
sections[i] = .{
|
||||
.data = try coff_obj.getSectionDataAlloc("." ++ section.name, allocator),
|
||||
.owned = true,
|
||||
};
|
||||
}
|
||||
|
||||
var dwarf = DW.DwarfInfo{
|
||||
@ -813,7 +856,7 @@ fn readCoffDebugInfo(allocator: mem.Allocator, coff_bytes: []const u8) !ModuleDe
|
||||
.sections = sections,
|
||||
};
|
||||
|
||||
try DW.openDwarfDebugInfo(&dwarf, allocator);
|
||||
try DW.openDwarfDebugInfo(&dwarf, allocator, coff_bytes);
|
||||
di.debug_data = PdbOrDwarf{ .dwarf = dwarf };
|
||||
return di;
|
||||
}
|
||||
@ -854,6 +897,8 @@ pub fn readElfDebugInfo(
|
||||
elf_filename: ?[]const u8,
|
||||
build_id: ?[]const u8,
|
||||
expected_crc: ?u32,
|
||||
parent_sections: *DW.DwarfInfo.SectionArray,
|
||||
parent_mapped_mem: ?[]align(mem.page_size) const u8,
|
||||
) !ModuleDebugInfo {
|
||||
nosuspend {
|
||||
|
||||
@ -891,10 +936,20 @@ pub fn readElfDebugInfo(
|
||||
@ptrCast(@alignCast(&mapped_mem[shoff])),
|
||||
)[0..hdr.e_shnum];
|
||||
|
||||
const num_sections = std.enums.directEnumArrayLen(DW.DwarfSection, 0);
|
||||
var sections: [num_sections]?[]const u8 = [_]?[]const u8{null} ** num_sections;
|
||||
var owned_sections: [num_sections][]const u8 = [_][]const u8{&.{}} ** num_sections;
|
||||
errdefer for (owned_sections) |section| allocator.free(section);
|
||||
var sections: DW.DwarfInfo.SectionArray = DW.DwarfInfo.null_section_array;
|
||||
|
||||
// Take ownership over any owned sections from the parent scope
|
||||
for (parent_sections, §ions) |*parent, *section| {
|
||||
if (parent.*) |*p| {
|
||||
section.* = p.*;
|
||||
p.owned = false;
|
||||
}
|
||||
}
|
||||
|
||||
errdefer for (sections) |section| if (section) |s| if (s.owned) allocator.free(s.data);
|
||||
|
||||
// TODO: This function should take a ptr to GNU_EH_FRAME (which is .eh_frame_hdr) from the ELF headers
|
||||
// and prefil sections[.eh_frame_hdr]
|
||||
|
||||
var separate_debug_filename: ?[]const u8 = null;
|
||||
var separate_debug_crc: ?u32 = null;
|
||||
@ -920,7 +975,7 @@ pub fn readElfDebugInfo(
|
||||
if (section_index == null) continue;
|
||||
|
||||
const section_bytes = try chopSlice(mapped_mem, shdr.sh_offset, shdr.sh_size);
|
||||
if ((shdr.sh_flags & elf.SHF_COMPRESSED) > 0) {
|
||||
sections[section_index.?] = if ((shdr.sh_flags & elf.SHF_COMPRESSED) > 0) blk: {
|
||||
var section_stream = io.fixedBufferStream(section_bytes);
|
||||
var section_reader = section_stream.reader();
|
||||
const chdr = section_reader.readStruct(elf.Chdr) catch continue;
|
||||
@ -937,11 +992,14 @@ pub fn readElfDebugInfo(
|
||||
const read = zlib_stream.reader().readAll(decompressed_section) catch continue;
|
||||
assert(read == decompressed_section.len);
|
||||
|
||||
sections[section_index.?] = decompressed_section;
|
||||
owned_sections[section_index.?] = decompressed_section;
|
||||
} else {
|
||||
sections[section_index.?] = section_bytes;
|
||||
}
|
||||
break :blk .{
|
||||
.data = decompressed_section,
|
||||
.owned = true,
|
||||
};
|
||||
} else .{
|
||||
.data = section_bytes,
|
||||
.owned = false,
|
||||
};
|
||||
}
|
||||
|
||||
const missing_debug_info =
|
||||
@ -953,6 +1011,12 @@ pub fn readElfDebugInfo(
|
||||
// Attempt to load debug info from an external file
|
||||
// See: https://sourceware.org/gdb/onlinedocs/gdb/Separate-Debug-Files.html
|
||||
if (missing_debug_info) {
|
||||
|
||||
// Only allow one level of debug info nesting
|
||||
if (parent_mapped_mem) |_| {
|
||||
return error.MissingDebugInfo;
|
||||
}
|
||||
|
||||
const global_debug_directories = [_][]const u8{
|
||||
"/usr/lib/debug",
|
||||
};
|
||||
@ -977,8 +1041,9 @@ pub fn readElfDebugInfo(
|
||||
// TODO: joinBuf would be ideal (with a fs.MAX_PATH_BYTES buffer)
|
||||
const path = try fs.path.join(allocator, &.{ global_directory, ".build-id", &id_prefix_buf, filename });
|
||||
defer allocator.free(path);
|
||||
// TODO: Remove
|
||||
std.debug.print(" Loading external debug info from {s}\n", .{path});
|
||||
return readElfDebugInfo(allocator, path, null, separate_debug_crc) catch continue;
|
||||
return readElfDebugInfo(allocator, path, null, separate_debug_crc, §ions, mapped_mem) catch continue;
|
||||
}
|
||||
}
|
||||
|
||||
@ -987,14 +1052,14 @@ pub fn readElfDebugInfo(
|
||||
if (elf_filename != null and mem.eql(u8, elf_filename.?, separate_filename)) return error.MissingDebugInfo;
|
||||
|
||||
// <cwd>/<gnu_debuglink>
|
||||
if (readElfDebugInfo(allocator, separate_filename, null, separate_debug_crc)) |debug_info| return debug_info else |_| {}
|
||||
if (readElfDebugInfo(allocator, separate_filename, null, separate_debug_crc, §ions, mapped_mem)) |debug_info| return debug_info else |_| {}
|
||||
|
||||
// <cwd>/.debug/<gnu_debuglink>
|
||||
{
|
||||
const path = try fs.path.join(allocator, &.{ ".debug", separate_filename });
|
||||
defer allocator.free(path);
|
||||
|
||||
if (readElfDebugInfo(allocator, path, null, separate_debug_crc)) |debug_info| return debug_info else |_| {}
|
||||
if (readElfDebugInfo(allocator, path, null, separate_debug_crc, §ions, mapped_mem)) |debug_info| return debug_info else |_| {}
|
||||
}
|
||||
|
||||
var cwd_buf: [fs.MAX_PATH_BYTES]u8 = undefined;
|
||||
@ -1004,7 +1069,7 @@ pub fn readElfDebugInfo(
|
||||
for (global_debug_directories) |global_directory| {
|
||||
const path = try fs.path.join(allocator, &.{ global_directory, cwd_path, separate_filename });
|
||||
defer allocator.free(path);
|
||||
if (readElfDebugInfo(allocator, path, null, separate_debug_crc)) |debug_info| return debug_info else |_| {}
|
||||
if (readElfDebugInfo(allocator, path, null, separate_debug_crc, §ions, mapped_mem)) |debug_info| return debug_info else |_| {}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1016,13 +1081,13 @@ pub fn readElfDebugInfo(
|
||||
.sections = sections,
|
||||
};
|
||||
|
||||
try DW.openDwarfDebugInfo(&di, allocator);
|
||||
try DW.openDwarfDebugInfo(&di, allocator, parent_mapped_mem orelse mapped_mem);
|
||||
|
||||
return ModuleDebugInfo{
|
||||
.base_address = undefined,
|
||||
.dwarf = di,
|
||||
.mapped_memory = mapped_mem,
|
||||
.owned_sections = owned_sections,
|
||||
.mapped_memory = parent_mapped_mem orelse mapped_mem,
|
||||
.external_mapped_memory = if (parent_mapped_mem != null) mapped_mem else null,
|
||||
};
|
||||
}
|
||||
}
|
||||
@ -1426,7 +1491,8 @@ pub const DebugInfo = struct {
|
||||
for (phdrs) |*phdr| {
|
||||
if (phdr.p_type != elf.PT_LOAD) continue;
|
||||
|
||||
const seg_start = info.dlpi_addr + phdr.p_vaddr;
|
||||
// Overflowing addition is used to handle the case of VSDOs having a p_vaddr = 0xffffffffff700000
|
||||
const seg_start = info.dlpi_addr +% phdr.p_vaddr;
|
||||
const seg_end = seg_start + phdr.p_memsz;
|
||||
if (context.address >= seg_start and context.address < seg_end) {
|
||||
// Android libc uses NULL instead of an empty string to mark the
|
||||
@ -1437,6 +1503,8 @@ pub const DebugInfo = struct {
|
||||
}
|
||||
} else return;
|
||||
|
||||
// TODO: Look for the GNU_EH_FRAME section and pass it to readElfDebugInfo
|
||||
|
||||
for (info.dlpi_phdr[0..info.dlpi_phnum]) |phdr| {
|
||||
if (phdr.p_type != elf.PT_NOTE) continue;
|
||||
|
||||
@ -1447,7 +1515,7 @@ pub const DebugInfo = struct {
|
||||
const note_type = mem.readIntSliceNative(u32, note_bytes[8..12]);
|
||||
if (note_type != elf.NT_GNU_BUILD_ID) continue;
|
||||
if (!mem.eql(u8, "GNU\x00", note_bytes[12..16])) continue;
|
||||
context.build_id = note_bytes[16 .. 16 + desc_size];
|
||||
context.build_id = note_bytes[16..][0..desc_size];
|
||||
}
|
||||
|
||||
// Stop the iteration
|
||||
@ -1466,7 +1534,10 @@ pub const DebugInfo = struct {
|
||||
const obj_di = try self.allocator.create(ModuleDebugInfo);
|
||||
errdefer self.allocator.destroy(obj_di);
|
||||
|
||||
obj_di.* = try readElfDebugInfo(self.allocator, if (ctx.name.len > 0) ctx.name else null, ctx.build_id, null);
|
||||
var sections: DW.DwarfInfo.SectionArray = DW.DwarfInfo.null_section_array;
|
||||
// TODO: If GNU_EH_FRAME was found, set it in sections
|
||||
|
||||
obj_di.* = try readElfDebugInfo(self.allocator, if (ctx.name.len > 0) ctx.name else null, ctx.build_id, null, §ions, null);
|
||||
obj_di.base_address = ctx.base_address;
|
||||
|
||||
try self.address_map.putNoClobber(ctx.base_address, obj_di);
|
||||
@ -1491,6 +1562,7 @@ pub const ModuleDebugInfo = switch (native_os) {
|
||||
.macos, .ios, .watchos, .tvos => struct {
|
||||
base_address: usize,
|
||||
mapped_memory: []align(mem.page_size) const u8,
|
||||
external_mapped_memory: ?[]align(mem.page_size) const u8,
|
||||
symbols: []const MachoSymbol,
|
||||
strings: [:0]const u8,
|
||||
ofiles: OFileTable,
|
||||
@ -1511,6 +1583,7 @@ pub const ModuleDebugInfo = switch (native_os) {
|
||||
self.ofiles.deinit();
|
||||
allocator.free(self.symbols);
|
||||
os.munmap(self.mapped_memory);
|
||||
if (self.external_mapped_memory) |m| os.munmap(m);
|
||||
}
|
||||
|
||||
fn loadOFile(self: *@This(), allocator: mem.Allocator, o_file_path: []const u8) !OFileInfo {
|
||||
@ -1723,6 +1796,12 @@ pub const ModuleDebugInfo = switch (native_os) {
|
||||
unreachable;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn getDwarfInfo(self: *@This()) ?*const DW.DwarfInfo {
|
||||
// TODO: Implement
|
||||
_ = self;
|
||||
return null;
|
||||
}
|
||||
},
|
||||
.uefi, .windows => struct {
|
||||
base_address: usize,
|
||||
@ -1803,19 +1882,24 @@ pub const ModuleDebugInfo = switch (native_os) {
|
||||
.line_info = opt_line_info,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn getDwarfInfo(self: *@This()) ?*const DW.DwarfInfo {
|
||||
return switch (self.debug_data) {
|
||||
.dwarf => |*dwarf| dwarf,
|
||||
else => null,
|
||||
};
|
||||
}
|
||||
},
|
||||
.linux, .netbsd, .freebsd, .dragonfly, .openbsd, .haiku, .solaris => struct {
|
||||
base_address: usize,
|
||||
dwarf: DW.DwarfInfo,
|
||||
mapped_memory: []align(mem.page_size) const u8,
|
||||
owned_sections: [num_sections][]const u8 = [_][]const u8{&.{}} ** num_sections,
|
||||
|
||||
const num_sections = 14;
|
||||
external_mapped_memory: ?[]align(mem.page_size) const u8,
|
||||
|
||||
fn deinit(self: *@This(), allocator: mem.Allocator) void {
|
||||
self.dwarf.deinit(allocator);
|
||||
for (self.owned_sections) |section| allocator.free(section);
|
||||
os.munmap(self.mapped_memory);
|
||||
if (self.external_mapped_memory) |m| os.munmap(m);
|
||||
}
|
||||
|
||||
pub fn getSymbolAtAddress(self: *@This(), allocator: mem.Allocator, address: usize) !SymbolInfo {
|
||||
@ -1823,6 +1907,10 @@ pub const ModuleDebugInfo = switch (native_os) {
|
||||
const relocated_address = address - self.base_address;
|
||||
return getSymbolFromDwarf(allocator, relocated_address, &self.dwarf);
|
||||
}
|
||||
|
||||
pub fn getDwarfInfo(self: *@This()) ?*const DW.DwarfInfo {
|
||||
return &self.dwarf;
|
||||
}
|
||||
},
|
||||
.wasi => struct {
|
||||
fn deinit(self: *@This(), allocator: mem.Allocator) void {
|
||||
@ -1836,6 +1924,11 @@ pub const ModuleDebugInfo = switch (native_os) {
|
||||
_ = address;
|
||||
return SymbolInfo{};
|
||||
}
|
||||
|
||||
pub fn getDwarfInfo(self: *@This()) ?*const DW.DwarfInfo {
|
||||
_ = self;
|
||||
return null;
|
||||
}
|
||||
},
|
||||
else => DW.DwarfInfo,
|
||||
};
|
||||
@ -1992,55 +2085,69 @@ fn dumpSegfaultInfoPosix(sig: i32, addr: usize, ctx_ptr: ?*const anyopaque) void
|
||||
} catch os.abort();
|
||||
|
||||
switch (native_arch) {
|
||||
.x86 => {
|
||||
const ctx: *const os.ucontext_t = @ptrCast(@alignCast(ctx_ptr));
|
||||
const ip = @as(usize, @intCast(ctx.mcontext.gregs[os.REG.EIP]));
|
||||
const bp = @as(usize, @intCast(ctx.mcontext.gregs[os.REG.EBP]));
|
||||
dumpStackTraceFromBase(bp, ip);
|
||||
},
|
||||
.x86_64 => {
|
||||
const ctx: *const os.ucontext_t = @ptrCast(@alignCast(ctx_ptr));
|
||||
const ip = switch (native_os) {
|
||||
.linux, .netbsd, .solaris => @as(usize, @intCast(ctx.mcontext.gregs[os.REG.RIP])),
|
||||
.freebsd => @as(usize, @intCast(ctx.mcontext.rip)),
|
||||
.openbsd => @as(usize, @intCast(ctx.sc_rip)),
|
||||
.macos => @as(usize, @intCast(ctx.mcontext.ss.rip)),
|
||||
else => unreachable,
|
||||
};
|
||||
const bp = switch (native_os) {
|
||||
.linux, .netbsd, .solaris => @as(usize, @intCast(ctx.mcontext.gregs[os.REG.RBP])),
|
||||
.openbsd => @as(usize, @intCast(ctx.sc_rbp)),
|
||||
.freebsd => @as(usize, @intCast(ctx.mcontext.rbp)),
|
||||
.macos => @as(usize, @intCast(ctx.mcontext.ss.rbp)),
|
||||
else => unreachable,
|
||||
};
|
||||
dumpStackTraceFromBase(bp, ip);
|
||||
},
|
||||
.arm => {
|
||||
const ctx: *const os.ucontext_t = @ptrCast(@alignCast(ctx_ptr));
|
||||
const ip = @as(usize, @intCast(ctx.mcontext.arm_pc));
|
||||
const bp = @as(usize, @intCast(ctx.mcontext.arm_fp));
|
||||
dumpStackTraceFromBase(bp, ip);
|
||||
},
|
||||
.aarch64 => {
|
||||
const ctx: *const os.ucontext_t = @ptrCast(@alignCast(ctx_ptr));
|
||||
const ip = switch (native_os) {
|
||||
.macos => @as(usize, @intCast(ctx.mcontext.ss.pc)),
|
||||
.netbsd => @as(usize, @intCast(ctx.mcontext.gregs[os.REG.PC])),
|
||||
.freebsd => @as(usize, @intCast(ctx.mcontext.gpregs.elr)),
|
||||
else => @as(usize, @intCast(ctx.mcontext.pc)),
|
||||
};
|
||||
// x29 is the ABI-designated frame pointer
|
||||
const bp = switch (native_os) {
|
||||
.macos => @as(usize, @intCast(ctx.mcontext.ss.fp)),
|
||||
.netbsd => @as(usize, @intCast(ctx.mcontext.gregs[os.REG.FP])),
|
||||
.freebsd => @as(usize, @intCast(ctx.mcontext.gpregs.x[os.REG.FP])),
|
||||
else => @as(usize, @intCast(ctx.mcontext.regs[29])),
|
||||
};
|
||||
dumpStackTraceFromBase(bp, ip);
|
||||
.x86,
|
||||
.x86_64,
|
||||
.arm,
|
||||
.aarch64,
|
||||
=> {
|
||||
const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr));
|
||||
dumpStackTraceFromBase(ctx);
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
|
||||
// TODO: Move this logic to dwarf.abi.regBytes
|
||||
|
||||
// switch (native_arch) {
|
||||
// .x86 => {
|
||||
// const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr));
|
||||
// const ip = @intCast(usize, ctx.mcontext.gregs[os.REG.EIP]) ;
|
||||
// const bp = @intCast(usize, ctx.mcontext.gregs[os.REG.EBP]);
|
||||
// dumpStackTraceFromBase(bp, ip);
|
||||
// },
|
||||
// .x86_64 => {
|
||||
// const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr));
|
||||
// const ip = switch (native_os) {
|
||||
// .linux, .netbsd, .solaris => @intCast(usize, ctx.mcontext.gregs[os.REG.RIP]),
|
||||
// .freebsd => @intCast(usize, ctx.mcontext.rip),
|
||||
// .openbsd => @intCast(usize, ctx.sc_rip),
|
||||
// .macos => @intCast(usize, ctx.mcontext.ss.rip),
|
||||
// else => unreachable,
|
||||
// };
|
||||
// const bp = switch (native_os) {
|
||||
// .linux, .netbsd, .solaris => @intCast(usize, ctx.mcontext.gregs[os.REG.RBP]),
|
||||
// .openbsd => @intCast(usize, ctx.sc_rbp),
|
||||
// .freebsd => @intCast(usize, ctx.mcontext.rbp),
|
||||
// .macos => @intCast(usize, ctx.mcontext.ss.rbp),
|
||||
// else => unreachable,
|
||||
// };
|
||||
// dumpStackTraceFromBase(bp, ip);
|
||||
// },
|
||||
// .arm => {
|
||||
// const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr));
|
||||
// const ip = @intCast(usize, ctx.mcontext.arm_pc);
|
||||
// const bp = @intCast(usize, ctx.mcontext.arm_fp);
|
||||
// dumpStackTraceFromBase(bp, ip);
|
||||
// },
|
||||
// .aarch64 => {
|
||||
// const ctx = @ptrCast(*const os.ucontext_t, @alignCast(@alignOf(os.ucontext_t), ctx_ptr));
|
||||
// const ip = switch (native_os) {
|
||||
// .macos => @intCast(usize, ctx.mcontext.ss.pc),
|
||||
// .netbsd => @intCast(usize, ctx.mcontext.gregs[os.REG.PC]),
|
||||
// .freebsd => @intCast(usize, ctx.mcontext.gpregs.elr),
|
||||
// else => @intCast(usize, ctx.mcontext.pc),
|
||||
// };
|
||||
// // x29 is the ABI-designated frame pointer
|
||||
// const bp = switch (native_os) {
|
||||
// .macos => @intCast(usize, ctx.mcontext.ss.fp),
|
||||
// .netbsd => @intCast(usize, ctx.mcontext.gregs[os.REG.FP]),
|
||||
// .freebsd => @intCast(usize, ctx.mcontext.gpregs.x[os.REG.FP]),
|
||||
// else => @intCast(usize, ctx.mcontext.regs[29]),
|
||||
// };
|
||||
// dumpStackTraceFromBase(bp, ip);
|
||||
// },
|
||||
// else => {},
|
||||
// }
|
||||
}
|
||||
|
||||
fn handleSegfaultWindows(info: *windows.EXCEPTION_POINTERS) callconv(windows.WINAPI) c_long {
|
||||
@ -2105,7 +2212,7 @@ fn dumpSegfaultInfoWindows(info: *windows.EXCEPTION_POINTERS, msg: u8, label: ?[
|
||||
else => unreachable,
|
||||
} catch os.abort();
|
||||
|
||||
dumpStackTraceFromBase(regs.bp, regs.ip);
|
||||
dumpStackTraceFromBase(regs);
|
||||
}
|
||||
|
||||
pub fn dumpStackPointerAddr(prefix: []const u8) void {
|
||||
|
||||
@ -3,6 +3,7 @@ const std = @import("std.zig");
|
||||
const debug = std.debug;
|
||||
const fs = std.fs;
|
||||
const io = std.io;
|
||||
const os = std.os;
|
||||
const mem = std.mem;
|
||||
const math = std.math;
|
||||
const leb = @import("leb128.zig");
|
||||
@ -664,10 +665,17 @@ pub const DwarfSection = enum {
|
||||
};
|
||||
|
||||
pub const DwarfInfo = struct {
|
||||
endian: std.builtin.Endian,
|
||||
pub const Section = struct {
|
||||
data: []const u8,
|
||||
owned: bool,
|
||||
};
|
||||
|
||||
// No section memory is owned by the DwarfInfo
|
||||
sections: [std.enums.directEnumArrayLen(DwarfSection, 0)]?[]const u8,
|
||||
const num_sections = std.enums.directEnumArrayLen(DwarfSection, 0);
|
||||
pub const SectionArray = [num_sections]?Section;
|
||||
pub const null_section_array = [_]?Section{null} ** num_sections;
|
||||
|
||||
endian: std.builtin.Endian,
|
||||
sections: SectionArray,
|
||||
|
||||
// Filled later by the initializer
|
||||
abbrev_table_list: std.ArrayListUnmanaged(AbbrevTableHeader) = .{},
|
||||
@ -679,10 +687,13 @@ pub const DwarfInfo = struct {
|
||||
fde_list: std.ArrayListUnmanaged(FrameDescriptionEntry) = .{},
|
||||
|
||||
pub fn section(di: DwarfInfo, dwarf_section: DwarfSection) ?[]const u8 {
|
||||
return di.sections[@enumToInt(dwarf_section)];
|
||||
return if (di.sections[@enumToInt(dwarf_section)]) |s| s.data else null;
|
||||
}
|
||||
|
||||
pub fn deinit(di: *DwarfInfo, allocator: mem.Allocator) void {
|
||||
for (di.sections) |s| {
|
||||
if (s.owned) allocator.free(s.data);
|
||||
}
|
||||
for (di.abbrev_table_list.items) |*abbrev| {
|
||||
abbrev.deinit();
|
||||
}
|
||||
@ -696,6 +707,8 @@ pub const DwarfInfo = struct {
|
||||
func.deinit(allocator);
|
||||
}
|
||||
di.func_list.deinit(allocator);
|
||||
di.cie_map.deinit(allocator);
|
||||
di.fde_list.deinit(allocator);
|
||||
}
|
||||
|
||||
pub fn getSymbolName(di: *DwarfInfo, address: u64) ?[]const u8 {
|
||||
@ -1443,7 +1456,6 @@ pub const DwarfInfo = struct {
|
||||
return getStringGeneric(di.section(.debug_line_str), offset);
|
||||
}
|
||||
|
||||
|
||||
fn readDebugAddr(di: DwarfInfo, compile_unit: CompileUnit, index: u64) !u64 {
|
||||
const debug_addr = di.section(.debug_addr) orelse return badDwarf();
|
||||
|
||||
@ -1470,12 +1482,13 @@ pub const DwarfInfo = struct {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn scanAllUnwindInfo(di: *DwarfInfo, allocator: mem.Allocator) !void {
|
||||
pub fn scanAllUnwindInfo(di: *DwarfInfo, allocator: mem.Allocator, binary_mem: []const u8) !void {
|
||||
var has_eh_frame_hdr = false;
|
||||
if (di.section(.eh_frame)) |eh_frame_hdr| {
|
||||
if (di.section(.eh_frame_hdr)) |eh_frame_hdr| {
|
||||
has_eh_frame_hdr = true;
|
||||
|
||||
// TODO: Parse this section
|
||||
// TODO: Parse this section to get the lookup table, and skip loading the entire section
|
||||
|
||||
_ = eh_frame_hdr;
|
||||
}
|
||||
|
||||
@ -1494,16 +1507,14 @@ pub const DwarfInfo = struct {
|
||||
}
|
||||
|
||||
const id_len = @as(u8, if (is_64) 8 else 4);
|
||||
const id = if (is_64) try reader.readInt(u64, di.endian) else try reader.readInt(u32, di.endian);
|
||||
const entry_bytes = eh_frame[stream.pos..][0 .. length - id_len];
|
||||
const id = try reader.readInt(u32, di.endian);
|
||||
|
||||
// TODO: Get section_offset here (pass in from headers)
|
||||
|
||||
if (id == 0) {
|
||||
const cie = try CommonInformationEntry.parse(
|
||||
entry_bytes,
|
||||
@ptrToInt(eh_frame.ptr),
|
||||
0,
|
||||
@ptrToInt(eh_frame.ptr) - @ptrToInt(binary_mem.ptr),
|
||||
true,
|
||||
length_offset,
|
||||
@sizeOf(usize),
|
||||
@ -1511,12 +1522,12 @@ pub const DwarfInfo = struct {
|
||||
);
|
||||
try di.cie_map.put(allocator, length_offset, cie);
|
||||
} else {
|
||||
const cie_offset = stream.pos - 4 - id;
|
||||
const cie_offset = stream.pos - id_len - id;
|
||||
const cie = di.cie_map.get(cie_offset) orelse return badDwarf();
|
||||
const fde = try FrameDescriptionEntry.parse(
|
||||
entry_bytes,
|
||||
@ptrToInt(eh_frame.ptr),
|
||||
0,
|
||||
@ptrToInt(eh_frame.ptr) - @ptrToInt(binary_mem.ptr),
|
||||
true,
|
||||
cie,
|
||||
@sizeOf(usize),
|
||||
@ -1524,6 +1535,8 @@ pub const DwarfInfo = struct {
|
||||
);
|
||||
try di.fde_list.append(allocator, fde);
|
||||
}
|
||||
|
||||
stream.pos += entry_bytes.len;
|
||||
}
|
||||
|
||||
// TODO: Avoiding sorting if has_eh_frame_hdr exists
|
||||
@ -1536,16 +1549,116 @@ pub const DwarfInfo = struct {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn unwindFrame(di: *const DwarfInfo, allocator: mem.Allocator, context: *UnwindContext, module_base_address: usize) !void {
|
||||
if (context.pc == 0) return;
|
||||
|
||||
// TODO: Handle signal frame (ie. use_prev_instr in libunwind)
|
||||
// TOOD: Use eh_frame_hdr to accelerate the search if available
|
||||
//const eh_frame_hdr = di.section(.eh_frame_hdr) orelse return error.MissingDebugInfo;
|
||||
|
||||
// Find the FDE
|
||||
const unmapped_pc = context.pc - module_base_address;
|
||||
const index = std.sort.binarySearch(FrameDescriptionEntry, unmapped_pc, di.fde_list.items, {}, struct {
|
||||
pub fn compareFn(_: void, pc: usize, mid_item: FrameDescriptionEntry) std.math.Order {
|
||||
if (pc < mid_item.pc_begin) {
|
||||
return .lt;
|
||||
} else {
|
||||
const range_end = mid_item.pc_begin + mid_item.pc_range;
|
||||
if (pc < range_end) {
|
||||
return .eq;
|
||||
}
|
||||
|
||||
return .gt;
|
||||
}
|
||||
}
|
||||
}.compareFn);
|
||||
|
||||
const fde = if (index) |i| &di.fde_list.items[i] else return error.MissingFDE;
|
||||
const cie = di.cie_map.getPtr(fde.cie_length_offset) orelse return error.MissingCIE;
|
||||
|
||||
// const prev_cfa = context.cfa;
|
||||
// const prev_pc = context.pc;
|
||||
|
||||
// TODO: Cache this on self so we can re-use the allocations?
|
||||
var vm = call_frame.VirtualMachine{};
|
||||
defer vm.deinit(allocator);
|
||||
|
||||
const row = try vm.runToNative(allocator, unmapped_pc, cie.*, fde.*);
|
||||
context.cfa = switch (row.cfa.rule) {
|
||||
.val_offset => |offset| blk: {
|
||||
const register = row.cfa.register orelse return error.InvalidCFARule;
|
||||
const value = mem.readIntSliceNative(usize, try abi.regBytes(&context.ucontext, register));
|
||||
|
||||
// TODO: Check isValidMemory?
|
||||
break :blk try call_frame.applyOffset(value, offset);
|
||||
},
|
||||
.expression => |expression| {
|
||||
|
||||
// TODO: Evaluate expression
|
||||
_ = expression;
|
||||
return error.UnimplementedTODO;
|
||||
|
||||
},
|
||||
else => return error.InvalidCFARule,
|
||||
};
|
||||
|
||||
// Update the context with the unwound values
|
||||
// TODO: Need old cfa and pc?
|
||||
|
||||
var next_ucontext = context.ucontext;
|
||||
|
||||
var has_next_ip = false;
|
||||
for (vm.rowColumns(row)) |column| {
|
||||
if (column.register) |register| {
|
||||
const dest = try abi.regBytes(&next_ucontext, register);
|
||||
if (register == cie.return_address_register) {
|
||||
has_next_ip = column.rule != .undefined;
|
||||
}
|
||||
|
||||
try column.resolveValue(context.*, dest);
|
||||
}
|
||||
}
|
||||
|
||||
context.ucontext = next_ucontext;
|
||||
|
||||
if (has_next_ip) {
|
||||
context.pc = mem.readIntSliceNative(usize, try abi.regBytes(&context.ucontext, @enumToInt(abi.Register.ip)));
|
||||
} else {
|
||||
context.pc = 0;
|
||||
}
|
||||
|
||||
mem.writeIntSliceNative(usize, try abi.regBytes(&context.ucontext, @enumToInt(abi.Register.sp)), context.cfa.?);
|
||||
}
|
||||
};
|
||||
|
||||
pub const UnwindContext = struct {
|
||||
cfa: ?usize,
|
||||
pc: usize,
|
||||
ucontext: os.ucontext_t,
|
||||
|
||||
pub fn init(ucontext: *const os.ucontext_t) !UnwindContext {
|
||||
const pc = mem.readIntSliceNative(usize, try abi.regBytes(ucontext, @enumToInt(abi.Register.ip)));
|
||||
return .{
|
||||
.cfa = null,
|
||||
.pc = pc,
|
||||
.ucontext = ucontext.*,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn getFp(self: *const UnwindContext) !usize {
|
||||
return mem.readIntSliceNative(usize, try abi.regBytes(&self.ucontext, @enumToInt(abi.Register.fp)));
|
||||
}
|
||||
};
|
||||
|
||||
/// Initialize DWARF info. The caller has the responsibility to initialize most
|
||||
/// the DwarfInfo fields before calling.
|
||||
pub fn openDwarfDebugInfo(di: *DwarfInfo, allocator: mem.Allocator) !void {
|
||||
/// the DwarfInfo fields before calling. `binary_mem` is the raw bytes of the
|
||||
/// main binary file (not the secondary debug info file).
|
||||
pub fn openDwarfDebugInfo(di: *DwarfInfo, allocator: mem.Allocator, binary_mem: []const u8) !void {
|
||||
try di.scanAllFunctions(allocator);
|
||||
try di.scanAllCompileUnits(allocator);
|
||||
|
||||
// Unwind info is not required
|
||||
di.scanAllUnwindInfo(allocator) catch {};
|
||||
di.scanAllUnwindInfo(allocator, binary_mem) catch {};
|
||||
}
|
||||
|
||||
/// This function is to make it handy to comment out the return and make it
|
||||
|
||||
@ -1,4 +1,110 @@
|
||||
const builtin = @import("builtin");
|
||||
const std = @import("../std.zig");
|
||||
const os = std.os;
|
||||
const mem = std.mem;
|
||||
|
||||
/// Maps register names to their DWARF register number.
|
||||
/// `bp`, `ip`, and `sp` are provided as aliases.
|
||||
pub const Register = switch (builtin.cpu.arch) {
|
||||
.x86 => {
|
||||
|
||||
//pub const ip = Register.eip;
|
||||
//pub const sp = Register.
|
||||
},
|
||||
.x86_64 => enum(u8) {
|
||||
rax,
|
||||
rdx,
|
||||
rcx,
|
||||
rbx,
|
||||
rsi,
|
||||
rdi,
|
||||
rbp,
|
||||
rsp,
|
||||
r8,
|
||||
r9,
|
||||
r10,
|
||||
r11,
|
||||
r12,
|
||||
r13,
|
||||
r14,
|
||||
r15,
|
||||
rip,
|
||||
xmm0,
|
||||
xmm1,
|
||||
xmm2,
|
||||
xmm3,
|
||||
xmm4,
|
||||
xmm5,
|
||||
xmm6,
|
||||
xmm7,
|
||||
xmm8,
|
||||
xmm9,
|
||||
xmm10,
|
||||
xmm11,
|
||||
xmm12,
|
||||
xmm13,
|
||||
xmm14,
|
||||
xmm15,
|
||||
|
||||
pub const fp = Register.rbp;
|
||||
pub const ip = Register.rip;
|
||||
pub const sp = Register.rsp;
|
||||
},
|
||||
else => enum {},
|
||||
};
|
||||
|
||||
fn RegBytesReturnType(comptime ContextPtrType: type) type {
|
||||
const info = @typeInfo(ContextPtrType);
|
||||
if (info != .Pointer or info.Pointer.child != os.ucontext_t) {
|
||||
@compileError("Expected a pointer to ucontext_t, got " ++ @typeName(@TypeOf(ContextPtrType)));
|
||||
}
|
||||
|
||||
return if (info.Pointer.is_const) return []const u8 else []u8;
|
||||
}
|
||||
|
||||
/// Returns a slice containing the backing storage for `reg_number`
|
||||
pub fn regBytes(ucontext_ptr: anytype, reg_number: u8) !RegBytesReturnType(@TypeOf(ucontext_ptr)) {
|
||||
var m = &ucontext_ptr.mcontext;
|
||||
|
||||
return switch (builtin.cpu.arch) {
|
||||
.x86_64 => switch (builtin.os.tag) {
|
||||
.linux, .netbsd, .solaris => switch (reg_number) {
|
||||
0 => mem.asBytes(&m.gregs[os.REG.RAX]),
|
||||
1 => mem.asBytes(&m.gregs[os.REG.RDX]),
|
||||
2 => mem.asBytes(&m.gregs[os.REG.RCX]),
|
||||
3 => mem.asBytes(&m.gregs[os.REG.RBX]),
|
||||
4 => mem.asBytes(&m.gregs[os.REG.RSI]),
|
||||
5 => mem.asBytes(&m.gregs[os.REG.RDI]),
|
||||
6 => mem.asBytes(&m.gregs[os.REG.RBP]),
|
||||
7 => mem.asBytes(&m.gregs[os.REG.RSP]),
|
||||
8 => mem.asBytes(&m.gregs[os.REG.R8]),
|
||||
9 => mem.asBytes(&m.gregs[os.REG.R9]),
|
||||
10 => mem.asBytes(&m.gregs[os.REG.R10]),
|
||||
11 => mem.asBytes(&m.gregs[os.REG.R11]),
|
||||
12 => mem.asBytes(&m.gregs[os.REG.R12]),
|
||||
13 => mem.asBytes(&m.gregs[os.REG.R13]),
|
||||
14 => mem.asBytes(&m.gregs[os.REG.R14]),
|
||||
15 => mem.asBytes(&m.gregs[os.REG.R15]),
|
||||
16 => mem.asBytes(&m.gregs[os.REG.RIP]),
|
||||
17...32 => |i| mem.asBytes(&m.fpregs.xmm[i - 17]),
|
||||
else => error.InvalidRegister,
|
||||
},
|
||||
//.freebsd => @intCast(usize, ctx.mcontext.rip),
|
||||
//.openbsd => @intCast(usize, ctx.sc_rip),
|
||||
//.macos => @intCast(usize, ctx.mcontext.ss.rip),
|
||||
else => error.UnimplementedOs,
|
||||
},
|
||||
else => error.UnimplementedArch,
|
||||
};
|
||||
}
|
||||
|
||||
/// Returns the ABI-defined default value this register has in the unwinding table
|
||||
/// before running any of the CIE instructions.
|
||||
pub fn getRegDefaultValue(reg_number: u8, out: []u8) void {
|
||||
// TODO: Implement any ABI-specific rules for the default value for registers
|
||||
_ = reg_number;
|
||||
@memset(out, undefined);
|
||||
}
|
||||
|
||||
fn writeUnknownReg(writer: anytype, reg_number: u8) !void {
|
||||
try writer.print("reg{}", .{reg_number});
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
const builtin = @import("builtin");
|
||||
const std = @import("../std.zig");
|
||||
const mem = std.mem;
|
||||
const debug = std.debug;
|
||||
const leb = @import("../leb128.zig");
|
||||
const abi = @import("abi.zig");
|
||||
@ -216,10 +217,19 @@ pub const Instruction = union(Opcode) {
|
||||
}
|
||||
};
|
||||
|
||||
/// This is a virtual machine that runs DWARF call frame instructions.
|
||||
/// See section 6.4.1 of the DWARF5 specification.
|
||||
pub const VirtualMachine = struct {
|
||||
/// Since register rules are applied (usually) during a panic,
|
||||
/// checked addition / subtraction is used so that we can return
|
||||
/// an error and fall back to FP-based unwinding.
|
||||
pub fn applyOffset(base: usize, offset: i64) !usize {
|
||||
return if (offset >= 0)
|
||||
try std.math.add(usize, base, @intCast(usize, offset))
|
||||
else
|
||||
try std.math.sub(usize, base, @intCast(usize, -offset));
|
||||
}
|
||||
|
||||
/// This is a virtual machine that runs DWARF call frame instructions.
|
||||
pub const VirtualMachine = struct {
|
||||
/// See section 6.4.1 of the DWARF5 specification for details on each
|
||||
const RegisterRule = union(enum) {
|
||||
// The spec says that the default rule for each column is the undefined rule.
|
||||
// However, it also allows ABI / compiler authors to specify alternate defaults, so
|
||||
@ -254,20 +264,63 @@ pub const VirtualMachine = struct {
|
||||
offset: u64 = 0,
|
||||
|
||||
/// Special-case column that defines the CFA (Canonical Frame Address) rule.
|
||||
/// The register field of this column defines the register that CFA is derived
|
||||
/// from, while other columns define register rules in terms of the CFA.
|
||||
/// The register field of this column defines the register that CFA is derived from.
|
||||
cfa: Column = .{},
|
||||
|
||||
/// The register fields in these columns define the register the rule applies to.
|
||||
columns: ColumnRange = .{},
|
||||
|
||||
/// Indicates that the next write to any column in this row needs to copy
|
||||
/// the backing column storage first.
|
||||
/// the backing column storage first, as it may be referenced by previous rows.
|
||||
copy_on_write: bool = false,
|
||||
};
|
||||
|
||||
pub const Column = struct {
|
||||
/// Register can only null in the case of the CFA column
|
||||
register: ?u8 = null,
|
||||
rule: RegisterRule = .{ .default = {} },
|
||||
|
||||
/// Resolves the register rule and places the result into `out` (see dwarf.abi.regBytes)
|
||||
pub fn resolveValue(self: Column, context: dwarf.UnwindContext, out: []u8) !void {
|
||||
switch (self.rule) {
|
||||
.default => {
|
||||
const register = self.register orelse return error.InvalidRegister;
|
||||
abi.getRegDefaultValue(register, out);
|
||||
},
|
||||
.undefined => {
|
||||
@memset(out, undefined);
|
||||
},
|
||||
.same_value => {},
|
||||
.offset => |offset| {
|
||||
if (context.cfa) |cfa| {
|
||||
const ptr = @intToPtr(*const usize, try applyOffset(cfa, offset));
|
||||
|
||||
// TODO: context.isValidMemory(ptr)
|
||||
mem.writeIntSliceNative(usize, out, ptr.*);
|
||||
} else return error.InvalidCFA;
|
||||
},
|
||||
.val_offset => |offset| {
|
||||
if (context.cfa) |cfa| {
|
||||
mem.writeIntSliceNative(usize, out, try applyOffset(cfa, offset));
|
||||
} else return error.InvalidCFA;
|
||||
},
|
||||
.register => |register| {
|
||||
const src = try abi.regBytes(&context.ucontext, register);
|
||||
if (src.len != out.len) return error.RegisterTypeMismatch;
|
||||
@memcpy(out, try abi.regBytes(&context.ucontext, register));
|
||||
},
|
||||
.expression => |expression| {
|
||||
// TODO
|
||||
_ = expression;
|
||||
unreachable;
|
||||
},
|
||||
.val_expression => |expression| {
|
||||
// TODO
|
||||
_ = expression;
|
||||
unreachable;
|
||||
},
|
||||
.architectural => return error.UnimplementedRule,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const ColumnRange = struct {
|
||||
@ -294,7 +347,7 @@ pub const VirtualMachine = struct {
|
||||
return self.columns.items[row.columns.start..][0..row.columns.len];
|
||||
}
|
||||
|
||||
/// Either retrieves or adds a column for `register` (non-CFA) in the current row
|
||||
/// Either retrieves or adds a column for `register` (non-CFA) in the current row.
|
||||
fn getOrAddColumn(self: *VirtualMachine, allocator: std.mem.Allocator, register: u8) !*Column {
|
||||
for (self.rowColumns(self.current_row)) |*c| {
|
||||
if (c.register == register) return c;
|
||||
@ -315,7 +368,7 @@ pub const VirtualMachine = struct {
|
||||
|
||||
/// Runs the CIE instructions, then the FDE instructions. Execution halts
|
||||
/// once the row that corresponds to `pc` is known, and it is returned.
|
||||
pub fn unwindTo(
|
||||
pub fn runTo(
|
||||
self: *VirtualMachine,
|
||||
allocator: std.mem.Allocator,
|
||||
pc: u64,
|
||||
@ -328,12 +381,15 @@ pub const VirtualMachine = struct {
|
||||
if (pc < fde.pc_begin or pc >= fde.pc_begin + fde.pc_range) return error.AddressOutOfRange;
|
||||
|
||||
var prev_row: Row = self.current_row;
|
||||
const streams = .{
|
||||
std.io.fixedBufferStream(cie.initial_instructions),
|
||||
std.io.fixedBufferStream(fde.instructions),
|
||||
|
||||
var cie_stream = std.io.fixedBufferStream(cie.initial_instructions);
|
||||
var fde_stream = std.io.fixedBufferStream(fde.instructions);
|
||||
var streams = [_]*std.io.FixedBufferStream([]const u8){
|
||||
&cie_stream,
|
||||
&fde_stream,
|
||||
};
|
||||
|
||||
outer: for (streams, 0..) |*stream, i| {
|
||||
outer: for (&streams, 0..) |stream, i| {
|
||||
while (stream.pos < stream.buffer.len) {
|
||||
const instruction = try dwarf.call_frame.Instruction.read(stream, addr_size_bytes, endian);
|
||||
prev_row = try self.step(allocator, cie, i == 0, instruction);
|
||||
@ -346,14 +402,14 @@ pub const VirtualMachine = struct {
|
||||
return prev_row;
|
||||
}
|
||||
|
||||
pub fn unwindToNative(
|
||||
pub fn runToNative(
|
||||
self: *VirtualMachine,
|
||||
allocator: std.mem.Allocator,
|
||||
pc: u64,
|
||||
cie: dwarf.CommonInformationEntry,
|
||||
fde: dwarf.FrameDescriptionEntry,
|
||||
) void {
|
||||
self.stepTo(allocator, pc, cie, fde, @sizeOf(usize), builtin.target.cpu.arch.endian());
|
||||
) !Row {
|
||||
return self.runTo(allocator, pc, cie, fde, @sizeOf(usize), builtin.target.cpu.arch.endian());
|
||||
}
|
||||
|
||||
fn resolveCopyOnWrite(self: *VirtualMachine, allocator: std.mem.Allocator) !void {
|
||||
@ -451,30 +507,30 @@ pub const VirtualMachine = struct {
|
||||
try self.resolveCopyOnWrite(allocator);
|
||||
self.current_row.cfa = .{
|
||||
.register = i.operands.register,
|
||||
.rule = .{ .offset = @intCast(i64, i.operands.offset) },
|
||||
.rule = .{ .val_offset = @intCast(i64, i.operands.offset) },
|
||||
};
|
||||
},
|
||||
.def_cfa_sf => |i| {
|
||||
try self.resolveCopyOnWrite(allocator);
|
||||
self.current_row.cfa = .{
|
||||
.register = i.operands.register,
|
||||
.rule = .{ .offset = i.operands.offset * cie.data_alignment_factor },
|
||||
.rule = .{ .val_offset = i.operands.offset * cie.data_alignment_factor },
|
||||
};
|
||||
},
|
||||
.def_cfa_register => |i| {
|
||||
try self.resolveCopyOnWrite(allocator);
|
||||
if (self.current_row.cfa.register == null or self.current_row.cfa.rule != .offset) return error.InvalidOperation;
|
||||
if (self.current_row.cfa.register == null or self.current_row.cfa.rule != .val_offset) return error.InvalidOperation;
|
||||
self.current_row.cfa.register = i.operands.register;
|
||||
},
|
||||
.def_cfa_offset => |i| {
|
||||
try self.resolveCopyOnWrite(allocator);
|
||||
if (self.current_row.cfa.register == null or self.current_row.cfa.rule != .offset) return error.InvalidOperation;
|
||||
self.current_row.cfa.rule = .{ .offset = @intCast(i64, i.operands.offset) };
|
||||
if (self.current_row.cfa.register == null or self.current_row.cfa.rule != .val_offset) return error.InvalidOperation;
|
||||
self.current_row.cfa.rule = .{ .val_offset = @intCast(i64, i.operands.offset) };
|
||||
},
|
||||
.def_cfa_offset_sf => |i| {
|
||||
try self.resolveCopyOnWrite(allocator);
|
||||
if (self.current_row.cfa.register == null or self.current_row.cfa.rule != .offset) return error.InvalidOperation;
|
||||
self.current_row.cfa.rule = .{ .offset = i.operands.offset * cie.data_alignment_factor };
|
||||
if (self.current_row.cfa.register == null or self.current_row.cfa.rule != .val_offset) return error.InvalidOperation;
|
||||
self.current_row.cfa.rule = .{ .val_offset = i.operands.offset * cie.data_alignment_factor };
|
||||
},
|
||||
.def_cfa_expression => |i| {
|
||||
try self.resolveCopyOnWrite(allocator);
|
||||
@ -490,9 +546,18 @@ pub const VirtualMachine = struct {
|
||||
.expression = i.operands.block,
|
||||
};
|
||||
},
|
||||
.val_offset => {},
|
||||
.val_offset_sf => {},
|
||||
.val_expression => {},
|
||||
.val_offset => {
|
||||
// TODO: Implement
|
||||
unreachable;
|
||||
},
|
||||
.val_offset_sf => {
|
||||
// TODO: Implement
|
||||
unreachable;
|
||||
},
|
||||
.val_expression => {
|
||||
// TODO: Implement
|
||||
unreachable;
|
||||
},
|
||||
}
|
||||
|
||||
return prev_row;
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user