mirror of
https://github.com/ziglang/zig.git
synced 2025-12-06 06:13:07 +00:00
std.debug: go back to storing return addresses instead of call addresses
...and just deal with signal handlers by adding 1 to create a fake "return address". The system I tried out where the addresses returned by `StackIterator` were pre-subtracted didn't play nicely with error traces, which in hindsight, makes perfect sense. This definition also removes some ugly off-by-one issues in matching `first_address`, so I do think this is a better approach.
This commit is contained in:
parent
9434bab313
commit
2ab650b481
@ -577,14 +577,12 @@ pub fn captureCurrentStackTrace(options: StackUnwindOptions, addr_buf: []usize)
|
|||||||
while (true) switch (it.next()) {
|
while (true) switch (it.next()) {
|
||||||
.switch_to_fp => if (!it.stratOk(options.allow_unsafe_unwind)) break,
|
.switch_to_fp => if (!it.stratOk(options.allow_unsafe_unwind)) break,
|
||||||
.end => break,
|
.end => break,
|
||||||
.frame => |pc_addr| {
|
.frame => |ret_addr| {
|
||||||
if (wait_for) |target| {
|
if (wait_for) |target| {
|
||||||
// Possible off-by-one error: `pc_addr` might be one less than the return address (so
|
if (ret_addr != target) continue;
|
||||||
// that it falls *inside* the function call), while `target` *is* a return address.
|
|
||||||
if (pc_addr != target and pc_addr + 1 != target) continue;
|
|
||||||
wait_for = null;
|
wait_for = null;
|
||||||
}
|
}
|
||||||
if (frame_idx < addr_buf.len) addr_buf[frame_idx] = pc_addr;
|
if (frame_idx < addr_buf.len) addr_buf[frame_idx] = ret_addr;
|
||||||
frame_idx += 1;
|
frame_idx += 1;
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
@ -659,14 +657,14 @@ pub fn writeCurrentStackTrace(options: StackUnwindOptions, writer: *Writer, tty_
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
.end => break,
|
.end => break,
|
||||||
.frame => |pc_addr| {
|
.frame => |ret_addr| {
|
||||||
if (wait_for) |target| {
|
if (wait_for) |target| {
|
||||||
// Possible off-by-one error: `pc_addr` might be one less than the return address (so
|
if (ret_addr != target) continue;
|
||||||
// that it falls *inside* the function call), while `target` *is* a return address.
|
|
||||||
if (pc_addr != target and pc_addr + 1 != target) continue;
|
|
||||||
wait_for = null;
|
wait_for = null;
|
||||||
}
|
}
|
||||||
try printSourceAtAddress(di_gpa, di, writer, pc_addr, tty_config);
|
// `ret_addr` is the return address, which is *after* the function call.
|
||||||
|
// Subtract 1 to get an address *in* the function call for a better source location.
|
||||||
|
try printSourceAtAddress(di_gpa, di, writer, ret_addr -| 1, tty_config);
|
||||||
printed_any_frame = true;
|
printed_any_frame = true;
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
@ -712,8 +710,10 @@ pub fn writeStackTrace(st: *const std.builtin.StackTrace, writer: *Writer, tty_c
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
const captured_frames = @min(n_frames, st.instruction_addresses.len);
|
const captured_frames = @min(n_frames, st.instruction_addresses.len);
|
||||||
for (st.instruction_addresses[0..captured_frames]) |pc_addr| {
|
for (st.instruction_addresses[0..captured_frames]) |ret_addr| {
|
||||||
try printSourceAtAddress(di_gpa, di, writer, pc_addr, tty_config);
|
// `ret_addr` is the return address, which is *after* the function call.
|
||||||
|
// Subtract 1 to get an address *in* the function call for a better source location.
|
||||||
|
try printSourceAtAddress(di_gpa, di, writer, ret_addr -| 1, tty_config);
|
||||||
}
|
}
|
||||||
if (n_frames > captured_frames) {
|
if (n_frames > captured_frames) {
|
||||||
tty_config.setColor(writer, .bold) catch {};
|
tty_config.setColor(writer, .bold) catch {};
|
||||||
@ -787,7 +787,7 @@ const StackIterator = union(enum) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const Result = union(enum) {
|
const Result = union(enum) {
|
||||||
/// A stack frame has been found; this is the corresponding program counter address.
|
/// A stack frame has been found; this is the corresponding return address.
|
||||||
frame: usize,
|
frame: usize,
|
||||||
/// The end of the stack has been reached.
|
/// The end of the stack has been reached.
|
||||||
end,
|
end,
|
||||||
@ -797,18 +797,21 @@ const StackIterator = union(enum) {
|
|||||||
err: SelfInfo.Error,
|
err: SelfInfo.Error,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
fn next(it: *StackIterator) Result {
|
fn next(it: *StackIterator) Result {
|
||||||
switch (it.*) {
|
switch (it.*) {
|
||||||
.di_first => |unwind_context| {
|
.di_first => |unwind_context| {
|
||||||
const first_pc = unwind_context.pc;
|
const first_pc = unwind_context.pc;
|
||||||
if (first_pc == 0) return .end;
|
if (first_pc == 0) return .end;
|
||||||
it.* = .{ .di = unwind_context };
|
it.* = .{ .di = unwind_context };
|
||||||
return .{ .frame = first_pc };
|
// The caller expects *return* addresses, where they will subtract 1 to find the address of the call.
|
||||||
|
// However, we have the actual current PC, which should not be adjusted. Compensate by adding 1.
|
||||||
|
return .{ .frame = first_pc +| 1 };
|
||||||
},
|
},
|
||||||
.di => |*unwind_context| {
|
.di => |*unwind_context| {
|
||||||
const di = getSelfDebugInfo() catch unreachable;
|
const di = getSelfDebugInfo() catch unreachable;
|
||||||
const di_gpa = getDebugInfoAllocator();
|
const di_gpa = getDebugInfoAllocator();
|
||||||
di.unwindFrame(di_gpa, unwind_context) catch |err| {
|
const ret_addr = di.unwindFrame(di_gpa, unwind_context) catch |err| {
|
||||||
const pc = unwind_context.pc;
|
const pc = unwind_context.pc;
|
||||||
it.* = .{ .fp = unwind_context.getFp() };
|
it.* = .{ .fp = unwind_context.getFp() };
|
||||||
return .{ .switch_to_fp = .{
|
return .{ .switch_to_fp = .{
|
||||||
@ -816,8 +819,8 @@ const StackIterator = union(enum) {
|
|||||||
.err = err,
|
.err = err,
|
||||||
} };
|
} };
|
||||||
};
|
};
|
||||||
const pc = unwind_context.pc;
|
if (ret_addr <= 1) return .end;
|
||||||
return if (pc == 0) .end else .{ .frame = pc };
|
return .{ .frame = ret_addr };
|
||||||
},
|
},
|
||||||
.fp => |fp| {
|
.fp => |fp| {
|
||||||
if (fp == 0) return .end; // we reached the "sentinel" base pointer
|
if (fp == 0) return .end; // we reached the "sentinel" base pointer
|
||||||
@ -845,7 +848,7 @@ const StackIterator = union(enum) {
|
|||||||
it.fp = bp;
|
it.fp = bp;
|
||||||
const ra = stripInstructionPtrAuthCode(ra_ptr.*);
|
const ra = stripInstructionPtrAuthCode(ra_ptr.*);
|
||||||
if (ra <= 1) return .end;
|
if (ra <= 1) return .end;
|
||||||
return .{ .frame = ra - 1 };
|
return .{ .frame = ra };
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -53,7 +53,7 @@ pub fn deinit(self: *SelfInfo, gpa: Allocator) void {
|
|||||||
if (Module.LookupCache != void) self.lookup_cache.deinit(gpa);
|
if (Module.LookupCache != void) self.lookup_cache.deinit(gpa);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn unwindFrame(self: *SelfInfo, gpa: Allocator, context: *UnwindContext) Error!void {
|
pub fn unwindFrame(self: *SelfInfo, gpa: Allocator, context: *UnwindContext) Error!usize {
|
||||||
comptime assert(supports_unwinding);
|
comptime assert(supports_unwinding);
|
||||||
const module: Module = try .lookup(&self.lookup_cache, gpa, context.pc);
|
const module: Module = try .lookup(&self.lookup_cache, gpa, context.pc);
|
||||||
const gop = try self.modules.getOrPut(gpa, module.key());
|
const gop = try self.modules.getOrPut(gpa, module.key());
|
||||||
@ -124,15 +124,14 @@ pub fn getModuleNameForAddress(self: *SelfInfo, gpa: Allocator, address: usize)
|
|||||||
/// /// pointer is unknown, 0 may be returned instead.
|
/// /// pointer is unknown, 0 may be returned instead.
|
||||||
/// pub fn getFp(uc: *UnwindContext) usize;
|
/// pub fn getFp(uc: *UnwindContext) usize;
|
||||||
/// };
|
/// };
|
||||||
/// /// Only required if `supports_unwinding == true`. Unwinds a single stack frame.
|
/// /// Only required if `supports_unwinding == true`. Unwinds a single stack frame, and returns
|
||||||
/// /// The caller will read the new instruction poiter from the `pc` field.
|
/// /// the frame's return address.
|
||||||
/// /// `pc = 0` indicates end of stack / no more frames.
|
|
||||||
/// pub fn unwindFrame(
|
/// pub fn unwindFrame(
|
||||||
/// mod: *const Module,
|
/// mod: *const Module,
|
||||||
/// gpa: Allocator,
|
/// gpa: Allocator,
|
||||||
/// di: *DebugInfo,
|
/// di: *DebugInfo,
|
||||||
/// ctx: *UnwindContext,
|
/// ctx: *UnwindContext,
|
||||||
/// ) SelfInfo.Error!void;
|
/// ) SelfInfo.Error!usize;
|
||||||
/// ```
|
/// ```
|
||||||
const Module: type = Module: {
|
const Module: type = Module: {
|
||||||
// Allow overriding the target-specific `SelfInfo` implementation by exposing `root.debug.Module`.
|
// Allow overriding the target-specific `SelfInfo` implementation by exposing `root.debug.Module`.
|
||||||
@ -312,7 +311,7 @@ pub const DwarfUnwindContext = struct {
|
|||||||
unwind: *const Dwarf.Unwind,
|
unwind: *const Dwarf.Unwind,
|
||||||
load_offset: usize,
|
load_offset: usize,
|
||||||
explicit_fde_offset: ?usize,
|
explicit_fde_offset: ?usize,
|
||||||
) Error!void {
|
) Error!usize {
|
||||||
return unwindFrameInner(context, gpa, unwind, load_offset, explicit_fde_offset) catch |err| switch (err) {
|
return unwindFrameInner(context, gpa, unwind, load_offset, explicit_fde_offset) catch |err| switch (err) {
|
||||||
error.InvalidDebugInfo, error.MissingDebugInfo, error.OutOfMemory => |e| return e,
|
error.InvalidDebugInfo, error.MissingDebugInfo, error.OutOfMemory => |e| return e,
|
||||||
|
|
||||||
@ -360,10 +359,10 @@ pub const DwarfUnwindContext = struct {
|
|||||||
unwind: *const Dwarf.Unwind,
|
unwind: *const Dwarf.Unwind,
|
||||||
load_offset: usize,
|
load_offset: usize,
|
||||||
explicit_fde_offset: ?usize,
|
explicit_fde_offset: ?usize,
|
||||||
) !void {
|
) !usize {
|
||||||
comptime assert(supports_unwinding);
|
comptime assert(supports_unwinding);
|
||||||
|
|
||||||
if (context.pc == 0) return;
|
if (context.pc == 0) return 0;
|
||||||
|
|
||||||
const pc_vaddr = context.pc - load_offset;
|
const pc_vaddr = context.pc - load_offset;
|
||||||
|
|
||||||
@ -443,13 +442,19 @@ pub const DwarfUnwindContext = struct {
|
|||||||
// The new CPU context is complete; flush changes.
|
// The new CPU context is complete; flush changes.
|
||||||
context.cpu_context = new_cpu_context;
|
context.cpu_context = new_cpu_context;
|
||||||
|
|
||||||
// Also update the stored pc. However, because `return_address` points to the instruction
|
// The caller will subtract 1 from the return address to get an address corresponding to the
|
||||||
// *after* the call, it could (in the case of noreturn functions) actually point outside of
|
// function call. However, if this is a signal frame, that's actually incorrect, because the
|
||||||
// the caller's address range, meaning an FDE lookup would fail. We can handle this by
|
// "return address" we have is the instruction which triggered the signal (if the signal
|
||||||
// subtracting 1 from `return_address` so that the next lookup is guaranteed to land inside
|
// handler returned, the instruction would be re-run). Compensate for this by incrementing
|
||||||
// the `call` instruction. The exception to this rule is signal frames, where the return
|
// the address in that case.
|
||||||
// address is the same instruction that triggered the handler.
|
const adjusted_ret_addr = if (cie.is_signal_frame) return_address +| 1 else return_address;
|
||||||
context.pc = if (cie.is_signal_frame) return_address else return_address -| 1;
|
|
||||||
|
// We also want to do that same subtraction here to get the PC for the next frame's FDE.
|
||||||
|
// This is because if the callee was noreturn, then the function call might be the caller's
|
||||||
|
// last instruction, so `return_address` might actually point outside of it!
|
||||||
|
context.pc = adjusted_ret_addr -| 1;
|
||||||
|
|
||||||
|
return adjusted_ret_addr;
|
||||||
}
|
}
|
||||||
/// Since register rules are applied (usually) during a panic,
|
/// Since register rules are applied (usually) during a panic,
|
||||||
/// checked addition / subtraction is used so that we can return
|
/// checked addition / subtraction is used so that we can return
|
||||||
|
|||||||
@ -324,7 +324,7 @@ pub const UnwindContext = std.debug.SelfInfo.DwarfUnwindContext;
|
|||||||
/// Unwind a frame using MachO compact unwind info (from __unwind_info).
|
/// Unwind a frame using MachO compact unwind info (from __unwind_info).
|
||||||
/// If the compact encoding can't encode a way to unwind a frame, it will
|
/// If the compact encoding can't encode a way to unwind a frame, it will
|
||||||
/// defer unwinding to DWARF, in which case `.eh_frame` will be used if available.
|
/// defer unwinding to DWARF, in which case `.eh_frame` will be used if available.
|
||||||
pub fn unwindFrame(module: *const DarwinModule, gpa: Allocator, di: *DebugInfo, context: *UnwindContext) Error!void {
|
pub fn unwindFrame(module: *const DarwinModule, gpa: Allocator, di: *DebugInfo, context: *UnwindContext) Error!usize {
|
||||||
return unwindFrameInner(module, gpa, di, context) catch |err| switch (err) {
|
return unwindFrameInner(module, gpa, di, context) catch |err| switch (err) {
|
||||||
error.InvalidDebugInfo,
|
error.InvalidDebugInfo,
|
||||||
error.MissingDebugInfo,
|
error.MissingDebugInfo,
|
||||||
@ -340,7 +340,7 @@ pub fn unwindFrame(module: *const DarwinModule, gpa: Allocator, di: *DebugInfo,
|
|||||||
=> return error.InvalidDebugInfo,
|
=> return error.InvalidDebugInfo,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
fn unwindFrameInner(module: *const DarwinModule, gpa: Allocator, di: *DebugInfo, context: *UnwindContext) !void {
|
fn unwindFrameInner(module: *const DarwinModule, gpa: Allocator, di: *DebugInfo, context: *UnwindContext) !usize {
|
||||||
if (di.unwind == null) di.unwind = module.loadUnwindInfo();
|
if (di.unwind == null) di.unwind = module.loadUnwindInfo();
|
||||||
const unwind = &di.unwind.?;
|
const unwind = &di.unwind.?;
|
||||||
|
|
||||||
@ -640,7 +640,13 @@ fn unwindFrameInner(module: *const DarwinModule, gpa: Allocator, di: *DebugInfo,
|
|||||||
else => comptime unreachable, // unimplemented
|
else => comptime unreachable, // unimplemented
|
||||||
};
|
};
|
||||||
|
|
||||||
context.pc = std.debug.stripInstructionPtrAuthCode(new_ip) -| 1;
|
const ret_addr = std.debug.stripInstructionPtrAuthCode(new_ip);
|
||||||
|
|
||||||
|
// Like `DwarfUnwindContext.unwindFrame`, adjust our next lookup pc in case the `call` was this
|
||||||
|
// function's last instruction making `ret_addr` one byte past its end.
|
||||||
|
context.pc = ret_addr -| 1;
|
||||||
|
|
||||||
|
return ret_addr;
|
||||||
}
|
}
|
||||||
pub const DebugInfo = struct {
|
pub const DebugInfo = struct {
|
||||||
unwind: ?Unwind,
|
unwind: ?Unwind,
|
||||||
|
|||||||
@ -230,7 +230,7 @@ fn loadUnwindInfo(module: *const ElfModule, gpa: Allocator, di: *DebugInfo) Erro
|
|||||||
else => unreachable,
|
else => unreachable,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub fn unwindFrame(module: *const ElfModule, gpa: Allocator, di: *DebugInfo, context: *UnwindContext) Error!void {
|
pub fn unwindFrame(module: *const ElfModule, gpa: Allocator, di: *DebugInfo, context: *UnwindContext) Error!usize {
|
||||||
if (di.unwind[0] == null) try module.loadUnwindInfo(gpa, di);
|
if (di.unwind[0] == null) try module.loadUnwindInfo(gpa, di);
|
||||||
std.debug.assert(di.unwind[0] != null);
|
std.debug.assert(di.unwind[0] != null);
|
||||||
for (&di.unwind) |*opt_unwind| {
|
for (&di.unwind) |*opt_unwind| {
|
||||||
|
|||||||
@ -373,7 +373,7 @@ pub const UnwindContext = struct {
|
|||||||
return ctx.cur.getRegs().bp;
|
return ctx.cur.getRegs().bp;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
pub fn unwindFrame(module: *const WindowsModule, gpa: Allocator, di: *DebugInfo, context: *UnwindContext) !void {
|
pub fn unwindFrame(module: *const WindowsModule, gpa: Allocator, di: *DebugInfo, context: *UnwindContext) !usize {
|
||||||
_ = module;
|
_ = module;
|
||||||
_ = gpa;
|
_ = gpa;
|
||||||
_ = di;
|
_ = di;
|
||||||
@ -403,9 +403,12 @@ pub fn unwindFrame(module: *const WindowsModule, gpa: Allocator, di: *DebugInfo,
|
|||||||
const tib = &windows.teb().NtTib;
|
const tib = &windows.teb().NtTib;
|
||||||
if (next_regs.sp < @intFromPtr(tib.StackLimit) or next_regs.sp > @intFromPtr(tib.StackBase)) {
|
if (next_regs.sp < @intFromPtr(tib.StackLimit) or next_regs.sp > @intFromPtr(tib.StackBase)) {
|
||||||
context.pc = 0;
|
context.pc = 0;
|
||||||
} else {
|
return 0;
|
||||||
context.pc = next_regs.ip -| 1;
|
|
||||||
}
|
}
|
||||||
|
// Like `DwarfUnwindContext.unwindFrame`, adjust our next lookup pc in case the `call` was this
|
||||||
|
// function's last instruction making `next_regs.ip` one byte past its end.
|
||||||
|
context.pc = next_regs.ip -| 1;
|
||||||
|
return next_regs.ip;
|
||||||
}
|
}
|
||||||
|
|
||||||
const WindowsModule = @This();
|
const WindowsModule = @This();
|
||||||
|
|||||||
@ -3,7 +3,7 @@ const builtin = @import("builtin");
|
|||||||
const fatal = std.process.fatal;
|
const fatal = std.process.fatal;
|
||||||
|
|
||||||
noinline fn frame3(expected: *[4]usize, addr_buf: *[4]usize) std.builtin.StackTrace {
|
noinline fn frame3(expected: *[4]usize, addr_buf: *[4]usize) std.builtin.StackTrace {
|
||||||
expected[0] = @returnAddress() - 1;
|
expected[0] = @returnAddress();
|
||||||
return std.debug.captureCurrentStackTrace(.{
|
return std.debug.captureCurrentStackTrace(.{
|
||||||
.first_address = @returnAddress(),
|
.first_address = @returnAddress(),
|
||||||
.allow_unsafe_unwind = true,
|
.allow_unsafe_unwind = true,
|
||||||
@ -58,12 +58,12 @@ noinline fn frame2(expected: *[4]usize, addr_buf: *[4]usize) std.builtin.StackTr
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
expected[1] = @returnAddress() - 1;
|
expected[1] = @returnAddress();
|
||||||
return frame3(expected, addr_buf);
|
return frame3(expected, addr_buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
noinline fn frame1(expected: *[4]usize, addr_buf: *[4]usize) std.builtin.StackTrace {
|
noinline fn frame1(expected: *[4]usize, addr_buf: *[4]usize) std.builtin.StackTrace {
|
||||||
expected[2] = @returnAddress() - 1;
|
expected[2] = @returnAddress();
|
||||||
|
|
||||||
// Use a stack frame that is too big to encode in __unwind_info's stack-immediate encoding
|
// Use a stack frame that is too big to encode in __unwind_info's stack-immediate encoding
|
||||||
// to exercise the stack-indirect encoding path
|
// to exercise the stack-indirect encoding path
|
||||||
@ -74,7 +74,7 @@ noinline fn frame1(expected: *[4]usize, addr_buf: *[4]usize) std.builtin.StackTr
|
|||||||
}
|
}
|
||||||
|
|
||||||
noinline fn frame0(expected: *[4]usize, addr_buf: *[4]usize) std.builtin.StackTrace {
|
noinline fn frame0(expected: *[4]usize, addr_buf: *[4]usize) std.builtin.StackTrace {
|
||||||
expected[3] = @returnAddress() - 1;
|
expected[3] = @returnAddress();
|
||||||
return frame1(expected, addr_buf);
|
return frame1(expected, addr_buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -3,7 +3,7 @@
|
|||||||
const std = @import("std");
|
const std = @import("std");
|
||||||
|
|
||||||
noinline fn frame3(expected: *[4]usize, addr_buf: *[4]usize) std.builtin.StackTrace {
|
noinline fn frame3(expected: *[4]usize, addr_buf: *[4]usize) std.builtin.StackTrace {
|
||||||
expected[0] = @returnAddress() - 1;
|
expected[0] = @returnAddress();
|
||||||
return std.debug.captureCurrentStackTrace(.{
|
return std.debug.captureCurrentStackTrace(.{
|
||||||
.first_address = @returnAddress(),
|
.first_address = @returnAddress(),
|
||||||
.allow_unsafe_unwind = true,
|
.allow_unsafe_unwind = true,
|
||||||
@ -11,12 +11,12 @@ noinline fn frame3(expected: *[4]usize, addr_buf: *[4]usize) std.builtin.StackTr
|
|||||||
}
|
}
|
||||||
|
|
||||||
noinline fn frame2(expected: *[4]usize, addr_buf: *[4]usize) std.builtin.StackTrace {
|
noinline fn frame2(expected: *[4]usize, addr_buf: *[4]usize) std.builtin.StackTrace {
|
||||||
expected[1] = @returnAddress() - 1;
|
expected[1] = @returnAddress();
|
||||||
return frame3(expected, addr_buf);
|
return frame3(expected, addr_buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
noinline fn frame1(expected: *[4]usize, addr_buf: *[4]usize) std.builtin.StackTrace {
|
noinline fn frame1(expected: *[4]usize, addr_buf: *[4]usize) std.builtin.StackTrace {
|
||||||
expected[2] = @returnAddress() - 1;
|
expected[2] = @returnAddress();
|
||||||
|
|
||||||
// Use a stack frame that is too big to encode in __unwind_info's stack-immediate encoding
|
// Use a stack frame that is too big to encode in __unwind_info's stack-immediate encoding
|
||||||
// to exercise the stack-indirect encoding path
|
// to exercise the stack-indirect encoding path
|
||||||
@ -27,7 +27,7 @@ noinline fn frame1(expected: *[4]usize, addr_buf: *[4]usize) std.builtin.StackTr
|
|||||||
}
|
}
|
||||||
|
|
||||||
noinline fn frame0(expected: *[4]usize, addr_buf: *[4]usize) std.builtin.StackTrace {
|
noinline fn frame0(expected: *[4]usize, addr_buf: *[4]usize) std.builtin.StackTrace {
|
||||||
expected[3] = @returnAddress() - 1;
|
expected[3] = @returnAddress();
|
||||||
return frame1(expected, addr_buf);
|
return frame1(expected, addr_buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user