mirror of
https://github.com/ziglang/zig.git
synced 2025-12-06 06:13:07 +00:00
Merge pull request #21075 from ziglang/fuzz
fix several debug info bugs
This commit is contained in:
commit
b7a1ef3e19
@ -634,10 +634,28 @@ fn prepareTables(
|
||||
const pcs = header.pcAddrs();
|
||||
const source_locations = try gpa.alloc(Coverage.SourceLocation, pcs.len);
|
||||
errdefer gpa.free(source_locations);
|
||||
debug_info.resolveAddresses(gpa, pcs, source_locations) catch |err| {
|
||||
|
||||
// Unfortunately the PCs array that LLVM gives us from the 8-bit PC
|
||||
// counters feature is not sorted.
|
||||
var sorted_pcs: std.MultiArrayList(struct { pc: u64, index: u32, sl: Coverage.SourceLocation }) = .{};
|
||||
defer sorted_pcs.deinit(gpa);
|
||||
try sorted_pcs.resize(gpa, pcs.len);
|
||||
@memcpy(sorted_pcs.items(.pc), pcs);
|
||||
for (sorted_pcs.items(.index), 0..) |*v, i| v.* = @intCast(i);
|
||||
sorted_pcs.sortUnstable(struct {
|
||||
addrs: []const u64,
|
||||
|
||||
pub fn lessThan(ctx: @This(), a_index: usize, b_index: usize) bool {
|
||||
return ctx.addrs[a_index] < ctx.addrs[b_index];
|
||||
}
|
||||
}{ .addrs = sorted_pcs.items(.pc) });
|
||||
|
||||
debug_info.resolveAddresses(gpa, sorted_pcs.items(.pc), sorted_pcs.items(.sl)) catch |err| {
|
||||
log.err("failed to resolve addresses to source locations: {s}", .{@errorName(err)});
|
||||
return error.AlreadyReported;
|
||||
};
|
||||
|
||||
for (sorted_pcs.items(.index), sorted_pcs.items(.sl)) |i, sl| source_locations[i] = sl;
|
||||
gop.value_ptr.source_locations = source_locations;
|
||||
|
||||
ws.coverage_condition.broadcast();
|
||||
@ -664,8 +682,8 @@ fn addEntryPoint(ws: *WebServer, coverage_id: u64, addr: u64) error{ AlreadyRepo
|
||||
if (false) {
|
||||
const sl = coverage_map.source_locations[index];
|
||||
const file_name = coverage_map.coverage.stringAt(coverage_map.coverage.fileAt(sl.file).basename);
|
||||
log.debug("server found entry point {s}:{d}:{d}", .{
|
||||
file_name, sl.line, sl.column,
|
||||
log.debug("server found entry point for 0x{x} at {s}:{d}:{d}", .{
|
||||
addr, file_name, sl.line, sl.column,
|
||||
});
|
||||
}
|
||||
const gpa = ws.gpa;
|
||||
|
||||
@ -145,52 +145,48 @@ pub const ResolveAddressesDwarfError = Dwarf.ScanError;
|
||||
pub fn resolveAddressesDwarf(
|
||||
cov: *Coverage,
|
||||
gpa: Allocator,
|
||||
/// Asserts the addresses are in ascending order.
|
||||
sorted_pc_addrs: []const u64,
|
||||
/// Asserts its length equals length of `sorted_pc_addrs`.
|
||||
output: []SourceLocation,
|
||||
d: *Dwarf,
|
||||
) ResolveAddressesDwarfError!void {
|
||||
assert(sorted_pc_addrs.len == output.len);
|
||||
assert(d.compile_units_sorted);
|
||||
assert(d.ranges.items.len != 0); // call `populateRanges` first.
|
||||
|
||||
var cu_i: usize = 0;
|
||||
var line_table_i: usize = 0;
|
||||
var cu: *Dwarf.CompileUnit = &d.compile_unit_list.items[0];
|
||||
var range = cu.pc_range.?;
|
||||
var range_i: usize = 0;
|
||||
var range: *std.debug.Dwarf.Range = &d.ranges.items[0];
|
||||
var line_table_i: usize = undefined;
|
||||
var prev_pc: u64 = 0;
|
||||
var prev_cu: ?*std.debug.Dwarf.CompileUnit = null;
|
||||
// Protects directories and files tables from other threads.
|
||||
cov.mutex.lock();
|
||||
defer cov.mutex.unlock();
|
||||
next_pc: for (sorted_pc_addrs, output) |pc, *out| {
|
||||
assert(pc >= prev_pc);
|
||||
prev_pc = pc;
|
||||
|
||||
while (pc >= range.end) {
|
||||
cu_i += 1;
|
||||
if (cu_i >= d.compile_unit_list.items.len) {
|
||||
range_i += 1;
|
||||
if (range_i >= d.ranges.items.len) {
|
||||
out.* = SourceLocation.invalid;
|
||||
continue :next_pc;
|
||||
}
|
||||
cu = &d.compile_unit_list.items[cu_i];
|
||||
line_table_i = 0;
|
||||
range = cu.pc_range orelse {
|
||||
out.* = SourceLocation.invalid;
|
||||
continue :next_pc;
|
||||
};
|
||||
range = &d.ranges.items[range_i];
|
||||
}
|
||||
if (pc < range.start) {
|
||||
out.* = SourceLocation.invalid;
|
||||
continue :next_pc;
|
||||
}
|
||||
if (line_table_i == 0) {
|
||||
line_table_i = 1;
|
||||
const cu = &d.compile_unit_list.items[range.compile_unit_index];
|
||||
if (cu != prev_cu) {
|
||||
prev_cu = cu;
|
||||
if (cu.src_loc_cache == null) {
|
||||
cov.mutex.unlock();
|
||||
defer cov.mutex.lock();
|
||||
d.populateSrcLocCache(gpa, cu) catch |err| switch (err) {
|
||||
error.MissingDebugInfo, error.InvalidDebugInfo => {
|
||||
out.* = SourceLocation.invalid;
|
||||
cu_i += 1;
|
||||
if (cu_i < d.compile_unit_list.items.len) {
|
||||
cu = &d.compile_unit_list.items[cu_i];
|
||||
line_table_i = 0;
|
||||
if (cu.pc_range) |r| range = r;
|
||||
}
|
||||
continue :next_pc;
|
||||
},
|
||||
else => |e| return e,
|
||||
@ -198,7 +194,15 @@ pub fn resolveAddressesDwarf(
|
||||
}
|
||||
const slc = &cu.src_loc_cache.?;
|
||||
const table_addrs = slc.line_table.keys();
|
||||
while (line_table_i < table_addrs.len and table_addrs[line_table_i] < pc) line_table_i += 1;
|
||||
line_table_i = std.sort.upperBound(u64, table_addrs, pc, struct {
|
||||
fn order(context: u64, item: u64) std.math.Order {
|
||||
return std.math.order(item, context);
|
||||
}
|
||||
}.order);
|
||||
}
|
||||
const slc = &cu.src_loc_cache.?;
|
||||
const table_addrs = slc.line_table.keys();
|
||||
while (line_table_i < table_addrs.len and table_addrs[line_table_i] <= pc) line_table_i += 1;
|
||||
|
||||
const entry = slc.line_table.values()[line_table_i - 1];
|
||||
const corrected_file_index = entry.file - @intFromBool(slc.version < 5);
|
||||
|
||||
@ -26,7 +26,6 @@ const cast = std.math.cast;
|
||||
const maxInt = std.math.maxInt;
|
||||
const MemoryAccessor = std.debug.MemoryAccessor;
|
||||
const Path = std.Build.Cache.Path;
|
||||
|
||||
const FixedBufferReader = std.debug.FixedBufferReader;
|
||||
|
||||
const Dwarf = @This();
|
||||
@ -35,22 +34,36 @@ pub const expression = @import("Dwarf/expression.zig");
|
||||
pub const abi = @import("Dwarf/abi.zig");
|
||||
pub const call_frame = @import("Dwarf/call_frame.zig");
|
||||
|
||||
/// Useful to temporarily enable while working on this file.
|
||||
const debug_debug_mode = false;
|
||||
|
||||
endian: std.builtin.Endian,
|
||||
sections: SectionArray = null_section_array,
|
||||
is_macho: bool,
|
||||
compile_units_sorted: bool,
|
||||
|
||||
// Filled later by the initializer
|
||||
/// Filled later by the initializer
|
||||
abbrev_table_list: std.ArrayListUnmanaged(Abbrev.Table) = .{},
|
||||
/// Filled later by the initializer
|
||||
compile_unit_list: std.ArrayListUnmanaged(CompileUnit) = .{},
|
||||
/// Filled later by the initializer
|
||||
func_list: std.ArrayListUnmanaged(Func) = .{},
|
||||
|
||||
eh_frame_hdr: ?ExceptionFrameHeader = null,
|
||||
// These lookup tables are only used if `eh_frame_hdr` is null
|
||||
/// These lookup tables are only used if `eh_frame_hdr` is null
|
||||
cie_map: std.AutoArrayHashMapUnmanaged(u64, CommonInformationEntry) = .{},
|
||||
// Sorted by start_pc
|
||||
/// Sorted by start_pc
|
||||
fde_list: std.ArrayListUnmanaged(FrameDescriptionEntry) = .{},
|
||||
|
||||
/// Populated by `populateRanges`.
|
||||
ranges: std.ArrayListUnmanaged(Range) = .{},
|
||||
|
||||
pub const Range = struct {
|
||||
start: u64,
|
||||
end: u64,
|
||||
/// Index into `compile_unit_list`.
|
||||
compile_unit_index: usize,
|
||||
};
|
||||
|
||||
pub const Section = struct {
|
||||
data: []const u8,
|
||||
// Module-relative virtual address.
|
||||
@ -154,6 +167,16 @@ pub const CompileUnit = struct {
|
||||
column: u32,
|
||||
/// Offset by 1 depending on whether Dwarf version is >= 5.
|
||||
file: u32,
|
||||
|
||||
pub const invalid: LineEntry = .{
|
||||
.line = undefined,
|
||||
.column = undefined,
|
||||
.file = std.math.maxInt(u32),
|
||||
};
|
||||
|
||||
pub fn isInvalid(le: LineEntry) bool {
|
||||
return le.file == invalid.file;
|
||||
}
|
||||
};
|
||||
|
||||
pub fn findSource(slc: *const SrcLocCache, address: u64) !LineEntry {
|
||||
@ -799,6 +822,7 @@ pub fn deinit(di: *Dwarf, gpa: Allocator) void {
|
||||
di.func_list.deinit(gpa);
|
||||
di.cie_map.deinit(gpa);
|
||||
di.fde_list.deinit(gpa);
|
||||
di.ranges.deinit(gpa);
|
||||
di.* = undefined;
|
||||
}
|
||||
|
||||
@ -985,8 +1009,8 @@ fn scanAllFunctions(di: *Dwarf, allocator: Allocator) ScanError!void {
|
||||
try di.func_list.append(allocator, .{
|
||||
.name = fn_name,
|
||||
.pc_range = .{
|
||||
.start = range.start_addr,
|
||||
.end = range.end_addr,
|
||||
.start = range.start,
|
||||
.end = range.end,
|
||||
},
|
||||
});
|
||||
}
|
||||
@ -1096,37 +1120,38 @@ fn scanAllCompileUnits(di: *Dwarf, allocator: Allocator) ScanError!void {
|
||||
}
|
||||
}
|
||||
|
||||
/// Populate missing PC ranges in compilation units, and then sort them by start address.
|
||||
/// Does not guarantee pc_range to be non-null because there could be missing debug info.
|
||||
pub fn sortCompileUnits(d: *Dwarf) ScanError!void {
|
||||
assert(!d.compile_units_sorted);
|
||||
pub fn populateRanges(d: *Dwarf, gpa: Allocator) ScanError!void {
|
||||
assert(d.ranges.items.len == 0);
|
||||
|
||||
for (d.compile_unit_list.items) |*cu| {
|
||||
if (cu.pc_range != null) continue;
|
||||
for (d.compile_unit_list.items, 0..) |*cu, cu_index| {
|
||||
if (cu.pc_range) |range| {
|
||||
try d.ranges.append(gpa, .{
|
||||
.start = range.start,
|
||||
.end = range.end,
|
||||
.compile_unit_index = cu_index,
|
||||
});
|
||||
continue;
|
||||
}
|
||||
const ranges_value = cu.die.getAttr(AT.ranges) orelse continue;
|
||||
var iter = DebugRangeIterator.init(ranges_value, d, cu) catch continue;
|
||||
var start: u64 = maxInt(u64);
|
||||
var end: u64 = 0;
|
||||
while (try iter.next()) |range| {
|
||||
start = @min(start, range.start_addr);
|
||||
end = @max(end, range.end_addr);
|
||||
// Not sure why LLVM thinks it's OK to emit these...
|
||||
if (range.start == range.end) continue;
|
||||
|
||||
try d.ranges.append(gpa, .{
|
||||
.start = range.start,
|
||||
.end = range.end,
|
||||
.compile_unit_index = cu_index,
|
||||
});
|
||||
}
|
||||
if (end != 0) cu.pc_range = .{
|
||||
.start = start,
|
||||
.end = end,
|
||||
};
|
||||
}
|
||||
|
||||
std.mem.sortUnstable(CompileUnit, d.compile_unit_list.items, {}, struct {
|
||||
pub fn lessThan(ctx: void, a: CompileUnit, b: CompileUnit) bool {
|
||||
std.mem.sortUnstable(Range, d.ranges.items, {}, struct {
|
||||
pub fn lessThan(ctx: void, a: Range, b: Range) bool {
|
||||
_ = ctx;
|
||||
const a_range = a.pc_range orelse return false;
|
||||
const b_range = b.pc_range orelse return true;
|
||||
return a_range.start < b_range.start;
|
||||
return a.start < b.start;
|
||||
}
|
||||
}.lessThan);
|
||||
|
||||
d.compile_units_sorted = true;
|
||||
}
|
||||
|
||||
const DebugRangeIterator = struct {
|
||||
@ -1184,7 +1209,7 @@ const DebugRangeIterator = struct {
|
||||
}
|
||||
|
||||
// Returns the next range in the list, or null if the end was reached.
|
||||
pub fn next(self: *@This()) !?struct { start_addr: u64, end_addr: u64 } {
|
||||
pub fn next(self: *@This()) !?PcRange {
|
||||
switch (self.section_type) {
|
||||
.debug_rnglists => {
|
||||
const kind = try self.fbr.readByte();
|
||||
@ -1203,8 +1228,8 @@ const DebugRangeIterator = struct {
|
||||
const end_addr = try self.di.readDebugAddr(self.compile_unit.*, end_index);
|
||||
|
||||
return .{
|
||||
.start_addr = start_addr,
|
||||
.end_addr = end_addr,
|
||||
.start = start_addr,
|
||||
.end = end_addr,
|
||||
};
|
||||
},
|
||||
RLE.startx_length => {
|
||||
@ -1215,8 +1240,8 @@ const DebugRangeIterator = struct {
|
||||
const end_addr = start_addr + len;
|
||||
|
||||
return .{
|
||||
.start_addr = start_addr,
|
||||
.end_addr = end_addr,
|
||||
.start = start_addr,
|
||||
.end = end_addr,
|
||||
};
|
||||
},
|
||||
RLE.offset_pair => {
|
||||
@ -1225,8 +1250,8 @@ const DebugRangeIterator = struct {
|
||||
|
||||
// This is the only kind that uses the base address
|
||||
return .{
|
||||
.start_addr = self.base_address + start_addr,
|
||||
.end_addr = self.base_address + end_addr,
|
||||
.start = self.base_address + start_addr,
|
||||
.end = self.base_address + end_addr,
|
||||
};
|
||||
},
|
||||
RLE.base_address => {
|
||||
@ -1238,8 +1263,8 @@ const DebugRangeIterator = struct {
|
||||
const end_addr = try self.fbr.readInt(usize);
|
||||
|
||||
return .{
|
||||
.start_addr = start_addr,
|
||||
.end_addr = end_addr,
|
||||
.start = start_addr,
|
||||
.end = end_addr,
|
||||
};
|
||||
},
|
||||
RLE.start_length => {
|
||||
@ -1248,8 +1273,8 @@ const DebugRangeIterator = struct {
|
||||
const end_addr = start_addr + len;
|
||||
|
||||
return .{
|
||||
.start_addr = start_addr,
|
||||
.end_addr = end_addr,
|
||||
.start = start_addr,
|
||||
.end = end_addr,
|
||||
};
|
||||
},
|
||||
else => return bad(),
|
||||
@ -1267,8 +1292,8 @@ const DebugRangeIterator = struct {
|
||||
}
|
||||
|
||||
return .{
|
||||
.start_addr = self.base_address + start_addr,
|
||||
.end_addr = self.base_address + end_addr,
|
||||
.start = self.base_address + start_addr,
|
||||
.end = self.base_address + end_addr,
|
||||
};
|
||||
},
|
||||
else => unreachable,
|
||||
@ -1286,7 +1311,7 @@ pub fn findCompileUnit(di: *const Dwarf, target_address: u64) !*CompileUnit {
|
||||
const ranges_value = compile_unit.die.getAttr(AT.ranges) orelse continue;
|
||||
var iter = DebugRangeIterator.init(ranges_value, di, compile_unit) catch continue;
|
||||
while (try iter.next()) |range| {
|
||||
if (target_address >= range.start_addr and target_address < range.end_addr) return compile_unit;
|
||||
if (target_address >= range.start and target_address < range.end) return compile_unit;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1387,6 +1412,7 @@ fn parseDie(
|
||||
};
|
||||
}
|
||||
|
||||
/// Ensures that addresses in the returned LineTable are monotonically increasing.
|
||||
fn runLineNumberProgram(d: *Dwarf, gpa: Allocator, compile_unit: *CompileUnit) !CompileUnit.SrcLocCache {
|
||||
const compile_unit_cwd = try compile_unit.die.getAttrString(d, AT.comp_dir, d.section(.debug_line_str), compile_unit.*);
|
||||
const line_info_offset = try compile_unit.die.getAttrSecOffset(AT.stmt_list);
|
||||
@ -1562,8 +1588,19 @@ fn runLineNumberProgram(d: *Dwarf, gpa: Allocator, compile_unit: *CompileUnit) !
|
||||
const sub_op = try fbr.readByte();
|
||||
switch (sub_op) {
|
||||
DW.LNE.end_sequence => {
|
||||
prog.end_sequence = true;
|
||||
try prog.addRow(gpa, &line_table);
|
||||
// The row being added here is an "end" address, meaning
|
||||
// that it does not map to the source location here -
|
||||
// rather it marks the previous address as the last address
|
||||
// that maps to this source location.
|
||||
|
||||
// In this implementation we don't mark end of addresses.
|
||||
// This is a performance optimization based on the fact
|
||||
// that we don't need to know if an address is missing
|
||||
// source location info; we are only interested in being
|
||||
// able to look up source location info for addresses that
|
||||
// are known to have debug info.
|
||||
//if (debug_debug_mode) assert(!line_table.contains(prog.address));
|
||||
//try line_table.put(gpa, prog.address, CompileUnit.SrcLocCache.LineEntry.invalid);
|
||||
prog.reset();
|
||||
},
|
||||
DW.LNE.set_address => {
|
||||
@ -1638,6 +1675,17 @@ fn runLineNumberProgram(d: *Dwarf, gpa: Allocator, compile_unit: *CompileUnit) !
|
||||
}
|
||||
}
|
||||
|
||||
// Dwarf standard v5, 6.2.5 says
|
||||
// > Within a sequence, addresses and operation pointers may only increase.
|
||||
// However, this is empirically not the case in reality, so we sort here.
|
||||
line_table.sortUnstable(struct {
|
||||
keys: []const u64,
|
||||
|
||||
pub fn lessThan(ctx: @This(), a_index: usize, b_index: usize) bool {
|
||||
return ctx.keys[a_index] < ctx.keys[b_index];
|
||||
}
|
||||
}{ .keys = line_table.keys() });
|
||||
|
||||
return .{
|
||||
.line_table = line_table,
|
||||
.directories = try directories.toOwnedSlice(gpa),
|
||||
@ -1882,7 +1930,6 @@ const LineNumberProgram = struct {
|
||||
version: u16,
|
||||
is_stmt: bool,
|
||||
basic_block: bool,
|
||||
end_sequence: bool,
|
||||
|
||||
default_is_stmt: bool,
|
||||
|
||||
@ -1894,7 +1941,6 @@ const LineNumberProgram = struct {
|
||||
self.column = 0;
|
||||
self.is_stmt = self.default_is_stmt;
|
||||
self.basic_block = false;
|
||||
self.end_sequence = false;
|
||||
}
|
||||
|
||||
pub fn init(is_stmt: bool, version: u16) LineNumberProgram {
|
||||
@ -1906,13 +1952,16 @@ const LineNumberProgram = struct {
|
||||
.version = version,
|
||||
.is_stmt = is_stmt,
|
||||
.basic_block = false,
|
||||
.end_sequence = false,
|
||||
.default_is_stmt = is_stmt,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn addRow(prog: *LineNumberProgram, gpa: Allocator, table: *CompileUnit.SrcLocCache.LineTable) !void {
|
||||
if (prog.line == 0) return; // garbage data
|
||||
if (prog.line == 0) {
|
||||
//if (debug_debug_mode) @panic("garbage line data");
|
||||
return;
|
||||
}
|
||||
if (debug_debug_mode) assert(!table.contains(prog.address));
|
||||
try table.put(gpa, prog.address, .{
|
||||
.line = cast(u32, prog.line) orelse maxInt(u32),
|
||||
.column = cast(u32, prog.column) orelse maxInt(u32),
|
||||
@ -1959,12 +2008,12 @@ pub fn compactUnwindToDwarfRegNumber(unwind_reg_number: u3) !u8 {
|
||||
/// This function is to make it handy to comment out the return and make it
|
||||
/// into a crash when working on this file.
|
||||
pub fn bad() error{InvalidDebugInfo} {
|
||||
//if (true) @panic("bad dwarf"); // can be handy to uncomment when working on this file
|
||||
if (debug_debug_mode) @panic("bad dwarf");
|
||||
return error.InvalidDebugInfo;
|
||||
}
|
||||
|
||||
fn missing() error{MissingDebugInfo} {
|
||||
//if (true) @panic("missing dwarf"); // can be handy to uncomment when working on this file
|
||||
if (debug_debug_mode) @panic("missing dwarf");
|
||||
return error.MissingDebugInfo;
|
||||
}
|
||||
|
||||
@ -2345,7 +2394,6 @@ pub const ElfModule = struct {
|
||||
.endian = endian,
|
||||
.sections = sections,
|
||||
.is_macho = false,
|
||||
.compile_units_sorted = false,
|
||||
};
|
||||
|
||||
try Dwarf.open(&di, gpa);
|
||||
|
||||
@ -27,7 +27,7 @@ pub const LoadError = Dwarf.ElfModule.LoadError;
|
||||
pub fn load(gpa: Allocator, path: Path, coverage: *Coverage) LoadError!Info {
|
||||
var sections: Dwarf.SectionArray = Dwarf.null_section_array;
|
||||
var elf_module = try Dwarf.ElfModule.loadPath(gpa, path, null, null, §ions, null);
|
||||
try elf_module.dwarf.sortCompileUnits();
|
||||
try elf_module.dwarf.populateRanges(gpa);
|
||||
var info: Info = .{
|
||||
.address_map = .{},
|
||||
.coverage = coverage,
|
||||
@ -51,6 +51,7 @@ pub const ResolveAddressesError = Coverage.ResolveAddressesDwarfError;
|
||||
pub fn resolveAddresses(
|
||||
info: *Info,
|
||||
gpa: Allocator,
|
||||
/// Asserts the addresses are in ascending order.
|
||||
sorted_pc_addrs: []const u64,
|
||||
/// Asserts its length equals length of `sorted_pc_addrs`.
|
||||
output: []SourceLocation,
|
||||
|
||||
@ -606,7 +606,6 @@ pub const Module = switch (native_os) {
|
||||
.endian = .little,
|
||||
.sections = sections,
|
||||
.is_macho = true,
|
||||
.compile_units_sorted = false,
|
||||
};
|
||||
|
||||
try Dwarf.open(&di, allocator);
|
||||
@ -996,7 +995,6 @@ fn readCoffDebugInfo(allocator: Allocator, coff_obj: *coff.Coff) !Module {
|
||||
.endian = native_endian,
|
||||
.sections = sections,
|
||||
.is_macho = false,
|
||||
.compile_units_sorted = false,
|
||||
};
|
||||
|
||||
try Dwarf.open(&dwarf, allocator);
|
||||
@ -1810,7 +1808,6 @@ fn unwindFrameMachODwarf(
|
||||
var di: Dwarf = .{
|
||||
.endian = native_endian,
|
||||
.is_macho = true,
|
||||
.compile_units_sorted = false,
|
||||
};
|
||||
defer di.deinit(context.allocator);
|
||||
|
||||
|
||||
@ -311,7 +311,10 @@ ZIG_EXTERN_C bool ZigLLVMTargetMachineEmitToFile(LLVMTargetMachineRef targ_machi
|
||||
}
|
||||
});
|
||||
|
||||
pass_builder.registerOptimizerEarlyEPCallback([&](ModulePassManager &module_pm, OptimizationLevel OL) {
|
||||
//pass_builder.registerOptimizerEarlyEPCallback([&](ModulePassManager &module_pm, OptimizationLevel OL) {
|
||||
//});
|
||||
|
||||
pass_builder.registerOptimizerLastEPCallback([&](ModulePassManager &module_pm, OptimizationLevel level) {
|
||||
// Code coverage instrumentation.
|
||||
if (options.sancov) {
|
||||
module_pm.addPass(SanitizerCoveragePass(getSanCovOptions(options.coverage)));
|
||||
@ -322,9 +325,7 @@ ZIG_EXTERN_C bool ZigLLVMTargetMachineEmitToFile(LLVMTargetMachineRef targ_machi
|
||||
module_pm.addPass(ModuleThreadSanitizerPass());
|
||||
module_pm.addPass(createModuleToFunctionPassAdaptor(ThreadSanitizerPass()));
|
||||
}
|
||||
});
|
||||
|
||||
pass_builder.registerOptimizerLastEPCallback([&](ModulePassManager &module_pm, OptimizationLevel level) {
|
||||
// Verify the output
|
||||
if (assertions_on) {
|
||||
module_pm.addPass(VerifierPass());
|
||||
|
||||
@ -54,21 +54,30 @@ pub fn main() !void {
|
||||
const header: *SeenPcsHeader = @ptrCast(cov_bytes);
|
||||
try stdout.print("{any}\n", .{header.*});
|
||||
const pcs = header.pcAddrs();
|
||||
for (0.., pcs[0 .. pcs.len - 1], pcs[1..]) |i, a, b| {
|
||||
if (a > b) std.log.err("{d}: 0x{x} > 0x{x}", .{ i, a, b });
|
||||
}
|
||||
assert(std.sort.isSorted(usize, pcs, {}, std.sort.asc(usize)));
|
||||
|
||||
var indexed_pcs: std.AutoArrayHashMapUnmanaged(usize, void) = .{};
|
||||
try indexed_pcs.entries.resize(arena, pcs.len);
|
||||
@memcpy(indexed_pcs.entries.items(.key), pcs);
|
||||
try indexed_pcs.reIndex(arena);
|
||||
|
||||
const sorted_pcs = try arena.dupe(usize, pcs);
|
||||
std.mem.sortUnstable(usize, sorted_pcs, {}, std.sort.asc(usize));
|
||||
|
||||
const source_locations = try arena.alloc(std.debug.Coverage.SourceLocation, sorted_pcs.len);
|
||||
try debug_info.resolveAddresses(gpa, sorted_pcs, source_locations);
|
||||
|
||||
const seen_pcs = header.seenBits();
|
||||
|
||||
const source_locations = try arena.alloc(std.debug.Coverage.SourceLocation, pcs.len);
|
||||
try debug_info.resolveAddresses(gpa, pcs, source_locations);
|
||||
|
||||
for (pcs, source_locations, 0..) |pc, sl, i| {
|
||||
for (sorted_pcs, source_locations) |pc, sl| {
|
||||
if (sl.file == .invalid) {
|
||||
try stdout.print(" {x}: invalid\n", .{pc});
|
||||
continue;
|
||||
}
|
||||
const file = debug_info.coverage.fileAt(sl.file);
|
||||
const dir_name = debug_info.coverage.directories.keys()[file.directory_index];
|
||||
const dir_name_slice = debug_info.coverage.stringAt(dir_name);
|
||||
const hit: u1 = @truncate(seen_pcs[i / @bitSizeOf(usize)] >> @intCast(i % @bitSizeOf(usize)));
|
||||
const seen_i = indexed_pcs.getIndex(pc).?;
|
||||
const hit: u1 = @truncate(seen_pcs[seen_i / @bitSizeOf(usize)] >> @intCast(seen_i % @bitSizeOf(usize)));
|
||||
try stdout.print("{c}{x}: {s}/{s}:{d}:{d}\n", .{
|
||||
"-+"[hit], pc, dir_name_slice, debug_info.coverage.stringAt(file.basename), sl.line, sl.column,
|
||||
});
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user