mirror of
https://github.com/ziglang/zig.git
synced 2025-12-06 14:23:09 +00:00
fuzzing: progress towards web UI
* libfuzzer: close file after mmap
* fuzzer/main.js: connect with EventSource and debug dump the messages.
currently this prints how many fuzzer runs have been attempted to
console.log.
* extract some `std.debug.Info` logic into `std.debug.Coverage`.
Prepares for consolidation across multiple different executables which
share source files, and makes it possible to send all the
PC/SourceLocation mapping data with 4 memcpy'd arrays.
* std.Build.Fuzz:
- spawn a thread to watch the message queue and signal event
subscribers.
- track coverage map data
- respond to /events URL with EventSource messages on a timer
This commit is contained in:
parent
5f92a036f9
commit
517cfb0dd1
@ -218,6 +218,7 @@ const Fuzzer = struct {
|
||||
.read = true,
|
||||
.truncate = false,
|
||||
});
|
||||
defer coverage_file.close();
|
||||
const n_bitset_elems = (flagged_pcs.len + 7) / 8;
|
||||
const bytes_len = @sizeOf(SeenPcsHeader) + flagged_pcs.len * @sizeOf(usize) + n_bitset_elems;
|
||||
const existing_len = coverage_file.getEndPos() catch |err| {
|
||||
|
||||
@ -12,6 +12,9 @@
|
||||
const text_decoder = new TextDecoder();
|
||||
const text_encoder = new TextEncoder();
|
||||
|
||||
const eventSource = new EventSource("events");
|
||||
eventSource.addEventListener('message', onMessage, false);
|
||||
|
||||
WebAssembly.instantiateStreaming(wasm_promise, {
|
||||
js: {
|
||||
log: function(ptr, len) {
|
||||
@ -38,11 +41,15 @@
|
||||
});
|
||||
});
|
||||
|
||||
function render() {
|
||||
domSectSource.classList.add("hidden");
|
||||
function onMessage(e) {
|
||||
console.log("Message", e.data);
|
||||
}
|
||||
|
||||
// TODO this is temporary debugging data
|
||||
renderSource("/home/andy/dev/zig/lib/std/zig/tokenizer.zig");
|
||||
function render() {
|
||||
domSectSource.classList.add("hidden");
|
||||
|
||||
// TODO this is temporary debugging data
|
||||
renderSource("/home/andy/dev/zig/lib/std/zig/tokenizer.zig");
|
||||
}
|
||||
|
||||
function renderSource(path) {
|
||||
|
||||
@ -6,6 +6,7 @@ const assert = std.debug.assert;
|
||||
const fatal = std.process.fatal;
|
||||
const Allocator = std.mem.Allocator;
|
||||
const log = std.log;
|
||||
const Coverage = std.debug.Coverage;
|
||||
|
||||
const Fuzz = @This();
|
||||
const build_runner = @import("root");
|
||||
@ -53,17 +54,30 @@ pub fn start(
|
||||
.global_cache_directory = global_cache_directory,
|
||||
.zig_lib_directory = zig_lib_directory,
|
||||
.zig_exe_path = zig_exe_path,
|
||||
.msg_queue = .{},
|
||||
.mutex = .{},
|
||||
.listen_address = listen_address,
|
||||
.fuzz_run_steps = fuzz_run_steps,
|
||||
|
||||
.msg_queue = .{},
|
||||
.mutex = .{},
|
||||
.condition = .{},
|
||||
|
||||
.coverage_files = .{},
|
||||
.coverage_mutex = .{},
|
||||
.coverage_condition = .{},
|
||||
};
|
||||
|
||||
// For accepting HTTP connections.
|
||||
const web_server_thread = std.Thread.spawn(.{}, WebServer.run, .{&web_server}) catch |err| {
|
||||
fatal("unable to spawn web server thread: {s}", .{@errorName(err)});
|
||||
};
|
||||
defer web_server_thread.join();
|
||||
|
||||
// For polling messages and sending updates to subscribers.
|
||||
const coverage_thread = std.Thread.spawn(.{}, WebServer.coverageRun, .{&web_server}) catch |err| {
|
||||
fatal("unable to spawn coverage thread: {s}", .{@errorName(err)});
|
||||
};
|
||||
defer coverage_thread.join();
|
||||
|
||||
{
|
||||
const fuzz_node = prog_node.start("Fuzzing", fuzz_run_steps.len);
|
||||
defer fuzz_node.end();
|
||||
@ -88,14 +102,38 @@ pub const WebServer = struct {
|
||||
global_cache_directory: Build.Cache.Directory,
|
||||
zig_lib_directory: Build.Cache.Directory,
|
||||
zig_exe_path: []const u8,
|
||||
/// Messages from fuzz workers. Protected by mutex.
|
||||
msg_queue: std.ArrayListUnmanaged(Msg),
|
||||
mutex: std.Thread.Mutex,
|
||||
listen_address: std.net.Address,
|
||||
fuzz_run_steps: []const *Step.Run,
|
||||
|
||||
/// Messages from fuzz workers. Protected by mutex.
|
||||
msg_queue: std.ArrayListUnmanaged(Msg),
|
||||
/// Protects `msg_queue` only.
|
||||
mutex: std.Thread.Mutex,
|
||||
/// Signaled when there is a message in `msg_queue`.
|
||||
condition: std.Thread.Condition,
|
||||
|
||||
coverage_files: std.AutoArrayHashMapUnmanaged(u64, CoverageMap),
|
||||
/// Protects `coverage_files` only.
|
||||
coverage_mutex: std.Thread.Mutex,
|
||||
/// Signaled when `coverage_files` changes.
|
||||
coverage_condition: std.Thread.Condition,
|
||||
|
||||
const CoverageMap = struct {
|
||||
mapped_memory: []align(std.mem.page_size) const u8,
|
||||
coverage: Coverage,
|
||||
|
||||
fn deinit(cm: *CoverageMap, gpa: Allocator) void {
|
||||
std.posix.munmap(cm.mapped_memory);
|
||||
cm.coverage.deinit(gpa);
|
||||
cm.* = undefined;
|
||||
}
|
||||
};
|
||||
|
||||
const Msg = union(enum) {
|
||||
coverage_id: u64,
|
||||
coverage: struct {
|
||||
id: u64,
|
||||
run: *Step.Run,
|
||||
},
|
||||
};
|
||||
|
||||
fn run(ws: *WebServer) void {
|
||||
@ -162,6 +200,10 @@ pub const WebServer = struct {
|
||||
std.mem.eql(u8, request.head.target, "/debug/sources.tar"))
|
||||
{
|
||||
try serveSourcesTar(ws, request);
|
||||
} else if (std.mem.eql(u8, request.head.target, "/events") or
|
||||
std.mem.eql(u8, request.head.target, "/debug/events"))
|
||||
{
|
||||
try serveEvents(ws, request);
|
||||
} else {
|
||||
try request.respond("not found", .{
|
||||
.status = .not_found,
|
||||
@ -384,6 +426,58 @@ pub const WebServer = struct {
|
||||
try file.writeAll(std.mem.asBytes(&header));
|
||||
}
|
||||
|
||||
fn serveEvents(ws: *WebServer, request: *std.http.Server.Request) !void {
|
||||
var send_buffer: [0x4000]u8 = undefined;
|
||||
var response = request.respondStreaming(.{
|
||||
.send_buffer = &send_buffer,
|
||||
.respond_options = .{
|
||||
.extra_headers = &.{
|
||||
.{ .name = "content-type", .value = "text/event-stream" },
|
||||
},
|
||||
.transfer_encoding = .none,
|
||||
},
|
||||
});
|
||||
|
||||
ws.coverage_mutex.lock();
|
||||
defer ws.coverage_mutex.unlock();
|
||||
|
||||
if (getStats(ws)) |stats| {
|
||||
try response.writer().print("data: {d}\n\n", .{stats.n_runs});
|
||||
} else {
|
||||
try response.writeAll("data: loading debug information\n\n");
|
||||
}
|
||||
try response.flush();
|
||||
|
||||
while (true) {
|
||||
ws.coverage_condition.timedWait(&ws.coverage_mutex, std.time.ns_per_ms * 500) catch {};
|
||||
if (getStats(ws)) |stats| {
|
||||
try response.writer().print("data: {d}\n\n", .{stats.n_runs});
|
||||
try response.flush();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const Stats = struct {
|
||||
n_runs: u64,
|
||||
};
|
||||
|
||||
fn getStats(ws: *WebServer) ?Stats {
|
||||
const coverage_maps = ws.coverage_files.values();
|
||||
if (coverage_maps.len == 0) return null;
|
||||
// TODO: make each events URL correspond to one coverage map
|
||||
const ptr = coverage_maps[0].mapped_memory;
|
||||
const SeenPcsHeader = extern struct {
|
||||
n_runs: usize,
|
||||
deduplicated_runs: usize,
|
||||
pcs_len: usize,
|
||||
lowest_stack: usize,
|
||||
};
|
||||
const header: *const SeenPcsHeader = @ptrCast(ptr[0..@sizeOf(SeenPcsHeader)]);
|
||||
return .{
|
||||
.n_runs = @atomicLoad(usize, &header.n_runs, .monotonic),
|
||||
};
|
||||
}
|
||||
|
||||
fn serveSourcesTar(ws: *WebServer, request: *std.http.Server.Request) !void {
|
||||
const gpa = ws.gpa;
|
||||
|
||||
@ -471,6 +565,95 @@ pub const WebServer = struct {
|
||||
.name = "cache-control",
|
||||
.value = "max-age=0, must-revalidate",
|
||||
};
|
||||
|
||||
fn coverageRun(ws: *WebServer) void {
|
||||
ws.mutex.lock();
|
||||
defer ws.mutex.unlock();
|
||||
|
||||
while (true) {
|
||||
ws.condition.wait(&ws.mutex);
|
||||
for (ws.msg_queue.items) |msg| switch (msg) {
|
||||
.coverage => |coverage| prepareTables(ws, coverage.run, coverage.id) catch |err| switch (err) {
|
||||
error.AlreadyReported => continue,
|
||||
else => |e| log.err("failed to prepare code coverage tables: {s}", .{@errorName(e)}),
|
||||
},
|
||||
};
|
||||
ws.msg_queue.clearRetainingCapacity();
|
||||
}
|
||||
}
|
||||
|
||||
fn prepareTables(
|
||||
ws: *WebServer,
|
||||
run_step: *Step.Run,
|
||||
coverage_id: u64,
|
||||
) error{ OutOfMemory, AlreadyReported }!void {
|
||||
const gpa = ws.gpa;
|
||||
|
||||
ws.coverage_mutex.lock();
|
||||
defer ws.coverage_mutex.unlock();
|
||||
|
||||
const gop = try ws.coverage_files.getOrPut(gpa, coverage_id);
|
||||
if (gop.found_existing) {
|
||||
// We are fuzzing the same executable with multiple threads.
|
||||
// Perhaps the same unit test; perhaps a different one. In any
|
||||
// case, since the coverage file is the same, we only have to
|
||||
// notice changes to that one file in order to learn coverage for
|
||||
// this particular executable.
|
||||
return;
|
||||
}
|
||||
errdefer _ = ws.coverage_files.pop();
|
||||
|
||||
gop.value_ptr.* = .{
|
||||
.coverage = std.debug.Coverage.init,
|
||||
.mapped_memory = undefined, // populated below
|
||||
};
|
||||
errdefer gop.value_ptr.coverage.deinit(gpa);
|
||||
|
||||
const rebuilt_exe_path: Build.Cache.Path = .{
|
||||
.root_dir = Build.Cache.Directory.cwd(),
|
||||
.sub_path = run_step.rebuilt_executable.?,
|
||||
};
|
||||
var debug_info = std.debug.Info.load(gpa, rebuilt_exe_path, &gop.value_ptr.coverage) catch |err| {
|
||||
log.err("step '{s}': failed to load debug information for '{}': {s}", .{
|
||||
run_step.step.name, rebuilt_exe_path, @errorName(err),
|
||||
});
|
||||
return error.AlreadyReported;
|
||||
};
|
||||
defer debug_info.deinit(gpa);
|
||||
|
||||
const coverage_file_path: Build.Cache.Path = .{
|
||||
.root_dir = run_step.step.owner.cache_root,
|
||||
.sub_path = "v/" ++ std.fmt.hex(coverage_id),
|
||||
};
|
||||
var coverage_file = coverage_file_path.root_dir.handle.openFile(coverage_file_path.sub_path, .{}) catch |err| {
|
||||
log.err("step '{s}': failed to load coverage file '{}': {s}", .{
|
||||
run_step.step.name, coverage_file_path, @errorName(err),
|
||||
});
|
||||
return error.AlreadyReported;
|
||||
};
|
||||
defer coverage_file.close();
|
||||
|
||||
const file_size = coverage_file.getEndPos() catch |err| {
|
||||
log.err("unable to check len of coverage file '{}': {s}", .{ coverage_file_path, @errorName(err) });
|
||||
return error.AlreadyReported;
|
||||
};
|
||||
|
||||
const mapped_memory = std.posix.mmap(
|
||||
null,
|
||||
file_size,
|
||||
std.posix.PROT.READ,
|
||||
.{ .TYPE = .SHARED },
|
||||
coverage_file.handle,
|
||||
0,
|
||||
) catch |err| {
|
||||
log.err("failed to map coverage file '{}': {s}", .{ coverage_file_path, @errorName(err) });
|
||||
return error.AlreadyReported;
|
||||
};
|
||||
|
||||
gop.value_ptr.mapped_memory = mapped_memory;
|
||||
|
||||
ws.coverage_condition.broadcast();
|
||||
}
|
||||
};
|
||||
|
||||
fn rebuildTestsWorkerRun(run: *Step.Run, ttyconf: std.io.tty.Config, parent_prog_node: std.Progress.Node) void {
|
||||
@ -493,16 +676,16 @@ fn rebuildTestsWorkerRun(run: *Step.Run, ttyconf: std.io.tty.Config, parent_prog
|
||||
build_runner.printErrorMessages(gpa, &compile.step, ttyconf, stderr, false) catch {};
|
||||
}
|
||||
|
||||
if (result) |rebuilt_bin_path| {
|
||||
run.rebuilt_executable = rebuilt_bin_path;
|
||||
} else |err| switch (err) {
|
||||
error.MakeFailed => {},
|
||||
const rebuilt_bin_path = result catch |err| switch (err) {
|
||||
error.MakeFailed => return,
|
||||
else => {
|
||||
std.debug.print("step '{s}': failed to rebuild in fuzz mode: {s}\n", .{
|
||||
log.err("step '{s}': failed to rebuild in fuzz mode: {s}", .{
|
||||
compile.step.name, @errorName(err),
|
||||
});
|
||||
return;
|
||||
},
|
||||
}
|
||||
};
|
||||
run.rebuilt_executable = rebuilt_bin_path;
|
||||
}
|
||||
|
||||
fn fuzzWorkerRun(
|
||||
@ -524,11 +707,13 @@ fn fuzzWorkerRun(
|
||||
std.debug.lockStdErr();
|
||||
defer std.debug.unlockStdErr();
|
||||
build_runner.printErrorMessages(gpa, &run.step, ttyconf, stderr, false) catch {};
|
||||
return;
|
||||
},
|
||||
else => {
|
||||
std.debug.print("step '{s}': failed to rebuild '{s}' in fuzz mode: {s}\n", .{
|
||||
log.err("step '{s}': failed to rerun '{s}' in fuzz mode: {s}", .{
|
||||
run.step.name, test_name, @errorName(err),
|
||||
});
|
||||
return;
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
@ -1521,7 +1521,11 @@ fn evalZigTest(
|
||||
{
|
||||
web_server.mutex.lock();
|
||||
defer web_server.mutex.unlock();
|
||||
try web_server.msg_queue.append(web_server.gpa, .{ .coverage_id = coverage_id });
|
||||
try web_server.msg_queue.append(web_server.gpa, .{ .coverage = .{
|
||||
.id = coverage_id,
|
||||
.run = run,
|
||||
} });
|
||||
web_server.condition.signal();
|
||||
}
|
||||
},
|
||||
else => {}, // ignore other messages
|
||||
|
||||
@ -19,6 +19,7 @@ pub const Dwarf = @import("debug/Dwarf.zig");
|
||||
pub const Pdb = @import("debug/Pdb.zig");
|
||||
pub const SelfInfo = @import("debug/SelfInfo.zig");
|
||||
pub const Info = @import("debug/Info.zig");
|
||||
pub const Coverage = @import("debug/Coverage.zig");
|
||||
|
||||
/// Unresolved source locations can be represented with a single `usize` that
|
||||
/// corresponds to a virtual memory address of the program counter. Combined
|
||||
|
||||
244
lib/std/debug/Coverage.zig
Normal file
244
lib/std/debug/Coverage.zig
Normal file
@ -0,0 +1,244 @@
|
||||
const std = @import("../std.zig");
|
||||
const Allocator = std.mem.Allocator;
|
||||
const Hash = std.hash.Wyhash;
|
||||
const Dwarf = std.debug.Dwarf;
|
||||
const assert = std.debug.assert;
|
||||
|
||||
const Coverage = @This();
|
||||
|
||||
/// Provides a globally-scoped integer index for directories.
|
||||
///
|
||||
/// As opposed to, for example, a directory index that is compilation-unit
|
||||
/// scoped inside a single ELF module.
|
||||
///
|
||||
/// String memory references the memory-mapped debug information.
|
||||
///
|
||||
/// Protected by `mutex`.
|
||||
directories: std.ArrayHashMapUnmanaged(String, void, String.MapContext, false),
|
||||
/// Provides a globally-scoped integer index for files.
|
||||
///
|
||||
/// String memory references the memory-mapped debug information.
|
||||
///
|
||||
/// Protected by `mutex`.
|
||||
files: std.ArrayHashMapUnmanaged(File, void, File.MapContext, false),
|
||||
string_bytes: std.ArrayListUnmanaged(u8),
|
||||
/// Protects the other fields.
|
||||
mutex: std.Thread.Mutex,
|
||||
|
||||
pub const init: Coverage = .{
|
||||
.directories = .{},
|
||||
.files = .{},
|
||||
.mutex = .{},
|
||||
.string_bytes = .{},
|
||||
};
|
||||
|
||||
pub const String = enum(u32) {
|
||||
_,
|
||||
|
||||
pub const MapContext = struct {
|
||||
string_bytes: []const u8,
|
||||
|
||||
pub fn eql(self: @This(), a: String, b: String, b_index: usize) bool {
|
||||
_ = b_index;
|
||||
const a_slice = span(self.string_bytes[@intFromEnum(a)..]);
|
||||
const b_slice = span(self.string_bytes[@intFromEnum(b)..]);
|
||||
return std.mem.eql(u8, a_slice, b_slice);
|
||||
}
|
||||
|
||||
pub fn hash(self: @This(), a: String) u32 {
|
||||
return @truncate(Hash.hash(0, span(self.string_bytes[@intFromEnum(a)..])));
|
||||
}
|
||||
};
|
||||
|
||||
pub const SliceAdapter = struct {
|
||||
string_bytes: []const u8,
|
||||
|
||||
pub fn eql(self: @This(), a_slice: []const u8, b: String, b_index: usize) bool {
|
||||
_ = b_index;
|
||||
const b_slice = span(self.string_bytes[@intFromEnum(b)..]);
|
||||
return std.mem.eql(u8, a_slice, b_slice);
|
||||
}
|
||||
pub fn hash(self: @This(), a: []const u8) u32 {
|
||||
_ = self;
|
||||
return @truncate(Hash.hash(0, a));
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
pub const SourceLocation = struct {
|
||||
file: File.Index,
|
||||
line: u32,
|
||||
column: u32,
|
||||
|
||||
pub const invalid: SourceLocation = .{
|
||||
.file = .invalid,
|
||||
.line = 0,
|
||||
.column = 0,
|
||||
};
|
||||
};
|
||||
|
||||
pub const File = struct {
|
||||
directory_index: u32,
|
||||
basename: String,
|
||||
|
||||
pub const Index = enum(u32) {
|
||||
invalid = std.math.maxInt(u32),
|
||||
_,
|
||||
};
|
||||
|
||||
pub const MapContext = struct {
|
||||
string_bytes: []const u8,
|
||||
|
||||
pub fn hash(self: MapContext, a: File) u32 {
|
||||
const a_basename = span(self.string_bytes[@intFromEnum(a.basename)..]);
|
||||
return @truncate(Hash.hash(a.directory_index, a_basename));
|
||||
}
|
||||
|
||||
pub fn eql(self: MapContext, a: File, b: File, b_index: usize) bool {
|
||||
_ = b_index;
|
||||
if (a.directory_index != b.directory_index) return false;
|
||||
const a_basename = span(self.string_bytes[@intFromEnum(a.basename)..]);
|
||||
const b_basename = span(self.string_bytes[@intFromEnum(b.basename)..]);
|
||||
return std.mem.eql(u8, a_basename, b_basename);
|
||||
}
|
||||
};
|
||||
|
||||
pub const SliceAdapter = struct {
|
||||
string_bytes: []const u8,
|
||||
|
||||
pub const Entry = struct {
|
||||
directory_index: u32,
|
||||
basename: []const u8,
|
||||
};
|
||||
|
||||
pub fn hash(self: @This(), a: Entry) u32 {
|
||||
_ = self;
|
||||
return @truncate(Hash.hash(a.directory_index, a.basename));
|
||||
}
|
||||
|
||||
pub fn eql(self: @This(), a: Entry, b: File, b_index: usize) bool {
|
||||
_ = b_index;
|
||||
if (a.directory_index != b.directory_index) return false;
|
||||
const b_basename = span(self.string_bytes[@intFromEnum(b.basename)..]);
|
||||
return std.mem.eql(u8, a.basename, b_basename);
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
pub fn deinit(cov: *Coverage, gpa: Allocator) void {
|
||||
cov.directories.deinit(gpa);
|
||||
cov.files.deinit(gpa);
|
||||
cov.string_bytes.deinit(gpa);
|
||||
cov.* = undefined;
|
||||
}
|
||||
|
||||
pub fn fileAt(cov: *Coverage, index: File.Index) *File {
|
||||
return &cov.files.keys()[@intFromEnum(index)];
|
||||
}
|
||||
|
||||
pub fn stringAt(cov: *Coverage, index: String) [:0]const u8 {
|
||||
return span(cov.string_bytes.items[@intFromEnum(index)..]);
|
||||
}
|
||||
|
||||
pub const ResolveAddressesDwarfError = Dwarf.ScanError;
|
||||
|
||||
pub fn resolveAddressesDwarf(
|
||||
cov: *Coverage,
|
||||
gpa: Allocator,
|
||||
sorted_pc_addrs: []const u64,
|
||||
/// Asserts its length equals length of `sorted_pc_addrs`.
|
||||
output: []SourceLocation,
|
||||
d: *Dwarf,
|
||||
) ResolveAddressesDwarfError!void {
|
||||
assert(sorted_pc_addrs.len == output.len);
|
||||
assert(d.compile_units_sorted);
|
||||
|
||||
var cu_i: usize = 0;
|
||||
var line_table_i: usize = 0;
|
||||
var cu: *Dwarf.CompileUnit = &d.compile_unit_list.items[0];
|
||||
var range = cu.pc_range.?;
|
||||
// Protects directories and files tables from other threads.
|
||||
cov.mutex.lock();
|
||||
defer cov.mutex.unlock();
|
||||
next_pc: for (sorted_pc_addrs, output) |pc, *out| {
|
||||
while (pc >= range.end) {
|
||||
cu_i += 1;
|
||||
if (cu_i >= d.compile_unit_list.items.len) {
|
||||
out.* = SourceLocation.invalid;
|
||||
continue :next_pc;
|
||||
}
|
||||
cu = &d.compile_unit_list.items[cu_i];
|
||||
line_table_i = 0;
|
||||
range = cu.pc_range orelse {
|
||||
out.* = SourceLocation.invalid;
|
||||
continue :next_pc;
|
||||
};
|
||||
}
|
||||
if (pc < range.start) {
|
||||
out.* = SourceLocation.invalid;
|
||||
continue :next_pc;
|
||||
}
|
||||
if (line_table_i == 0) {
|
||||
line_table_i = 1;
|
||||
cov.mutex.unlock();
|
||||
defer cov.mutex.lock();
|
||||
d.populateSrcLocCache(gpa, cu) catch |err| switch (err) {
|
||||
error.MissingDebugInfo, error.InvalidDebugInfo => {
|
||||
out.* = SourceLocation.invalid;
|
||||
cu_i += 1;
|
||||
if (cu_i < d.compile_unit_list.items.len) {
|
||||
cu = &d.compile_unit_list.items[cu_i];
|
||||
line_table_i = 0;
|
||||
if (cu.pc_range) |r| range = r;
|
||||
}
|
||||
continue :next_pc;
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
}
|
||||
const slc = &cu.src_loc_cache.?;
|
||||
const table_addrs = slc.line_table.keys();
|
||||
while (line_table_i < table_addrs.len and table_addrs[line_table_i] < pc) line_table_i += 1;
|
||||
|
||||
const entry = slc.line_table.values()[line_table_i - 1];
|
||||
const corrected_file_index = entry.file - @intFromBool(slc.version < 5);
|
||||
const file_entry = slc.files[corrected_file_index];
|
||||
const dir_path = slc.directories[file_entry.dir_index].path;
|
||||
try cov.string_bytes.ensureUnusedCapacity(gpa, dir_path.len + file_entry.path.len + 2);
|
||||
const dir_gop = try cov.directories.getOrPutContextAdapted(gpa, dir_path, String.SliceAdapter{
|
||||
.string_bytes = cov.string_bytes.items,
|
||||
}, String.MapContext{
|
||||
.string_bytes = cov.string_bytes.items,
|
||||
});
|
||||
if (!dir_gop.found_existing)
|
||||
dir_gop.key_ptr.* = addStringAssumeCapacity(cov, dir_path);
|
||||
const file_gop = try cov.files.getOrPutContextAdapted(gpa, File.SliceAdapter.Entry{
|
||||
.directory_index = @intCast(dir_gop.index),
|
||||
.basename = file_entry.path,
|
||||
}, File.SliceAdapter{
|
||||
.string_bytes = cov.string_bytes.items,
|
||||
}, File.MapContext{
|
||||
.string_bytes = cov.string_bytes.items,
|
||||
});
|
||||
if (!file_gop.found_existing) file_gop.key_ptr.* = .{
|
||||
.directory_index = @intCast(dir_gop.index),
|
||||
.basename = addStringAssumeCapacity(cov, file_entry.path),
|
||||
};
|
||||
out.* = .{
|
||||
.file = @enumFromInt(file_gop.index),
|
||||
.line = entry.line,
|
||||
.column = entry.column,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
pub fn addStringAssumeCapacity(cov: *Coverage, s: []const u8) String {
|
||||
const result: String = @enumFromInt(cov.string_bytes.items.len);
|
||||
cov.string_bytes.appendSliceAssumeCapacity(s);
|
||||
cov.string_bytes.appendAssumeCapacity(0);
|
||||
return result;
|
||||
}
|
||||
|
||||
fn span(s: []const u8) [:0]const u8 {
|
||||
return std.mem.sliceTo(@as([:0]const u8, @ptrCast(s)), 0);
|
||||
}
|
||||
@ -12,85 +12,31 @@ const Path = std.Build.Cache.Path;
|
||||
const Dwarf = std.debug.Dwarf;
|
||||
const page_size = std.mem.page_size;
|
||||
const assert = std.debug.assert;
|
||||
const Hash = std.hash.Wyhash;
|
||||
const Coverage = std.debug.Coverage;
|
||||
const SourceLocation = std.debug.Coverage.SourceLocation;
|
||||
|
||||
const Info = @This();
|
||||
|
||||
/// Sorted by key, ascending.
|
||||
address_map: std.AutoArrayHashMapUnmanaged(u64, Dwarf.ElfModule),
|
||||
|
||||
/// Provides a globally-scoped integer index for directories.
|
||||
///
|
||||
/// As opposed to, for example, a directory index that is compilation-unit
|
||||
/// scoped inside a single ELF module.
|
||||
///
|
||||
/// String memory references the memory-mapped debug information.
|
||||
///
|
||||
/// Protected by `mutex`.
|
||||
directories: std.StringArrayHashMapUnmanaged(void),
|
||||
/// Provides a globally-scoped integer index for files.
|
||||
///
|
||||
/// String memory references the memory-mapped debug information.
|
||||
///
|
||||
/// Protected by `mutex`.
|
||||
files: std.ArrayHashMapUnmanaged(File, void, File.MapContext, false),
|
||||
/// Protects `directories` and `files`.
|
||||
mutex: std.Thread.Mutex,
|
||||
|
||||
pub const SourceLocation = struct {
|
||||
file: File.Index,
|
||||
line: u32,
|
||||
column: u32,
|
||||
|
||||
pub const invalid: SourceLocation = .{
|
||||
.file = .invalid,
|
||||
.line = 0,
|
||||
.column = 0,
|
||||
};
|
||||
};
|
||||
|
||||
pub const File = struct {
|
||||
directory_index: u32,
|
||||
basename: []const u8,
|
||||
|
||||
pub const Index = enum(u32) {
|
||||
invalid = std.math.maxInt(u32),
|
||||
_,
|
||||
};
|
||||
|
||||
pub const MapContext = struct {
|
||||
pub fn hash(ctx: MapContext, a: File) u32 {
|
||||
_ = ctx;
|
||||
return @truncate(Hash.hash(a.directory_index, a.basename));
|
||||
}
|
||||
|
||||
pub fn eql(ctx: MapContext, a: File, b: File, b_index: usize) bool {
|
||||
_ = ctx;
|
||||
_ = b_index;
|
||||
return a.directory_index == b.directory_index and std.mem.eql(u8, a.basename, b.basename);
|
||||
}
|
||||
};
|
||||
};
|
||||
/// Externally managed, outlives this `Info` instance.
|
||||
coverage: *Coverage,
|
||||
|
||||
pub const LoadError = Dwarf.ElfModule.LoadError;
|
||||
|
||||
pub fn load(gpa: Allocator, path: Path) LoadError!Info {
|
||||
pub fn load(gpa: Allocator, path: Path, coverage: *Coverage) LoadError!Info {
|
||||
var sections: Dwarf.SectionArray = Dwarf.null_section_array;
|
||||
var elf_module = try Dwarf.ElfModule.loadPath(gpa, path, null, null, §ions, null);
|
||||
try elf_module.dwarf.sortCompileUnits();
|
||||
var info: Info = .{
|
||||
.address_map = .{},
|
||||
.directories = .{},
|
||||
.files = .{},
|
||||
.mutex = .{},
|
||||
.coverage = coverage,
|
||||
};
|
||||
try info.address_map.put(gpa, elf_module.base_address, elf_module);
|
||||
return info;
|
||||
}
|
||||
|
||||
pub fn deinit(info: *Info, gpa: Allocator) void {
|
||||
info.directories.deinit(gpa);
|
||||
info.files.deinit(gpa);
|
||||
for (info.address_map.values()) |*elf_module| {
|
||||
elf_module.dwarf.deinit(gpa);
|
||||
}
|
||||
@ -98,98 +44,19 @@ pub fn deinit(info: *Info, gpa: Allocator) void {
|
||||
info.* = undefined;
|
||||
}
|
||||
|
||||
pub fn fileAt(info: *Info, index: File.Index) *File {
|
||||
return &info.files.keys()[@intFromEnum(index)];
|
||||
}
|
||||
|
||||
pub const ResolveSourceLocationsError = Dwarf.ScanError;
|
||||
pub const ResolveAddressesError = Coverage.ResolveAddressesDwarfError;
|
||||
|
||||
/// Given an array of virtual memory addresses, sorted ascending, outputs a
|
||||
/// corresponding array of source locations.
|
||||
pub fn resolveSourceLocations(
|
||||
pub fn resolveAddresses(
|
||||
info: *Info,
|
||||
gpa: Allocator,
|
||||
sorted_pc_addrs: []const u64,
|
||||
/// Asserts its length equals length of `sorted_pc_addrs`.
|
||||
output: []SourceLocation,
|
||||
) ResolveSourceLocationsError!void {
|
||||
) ResolveAddressesError!void {
|
||||
assert(sorted_pc_addrs.len == output.len);
|
||||
if (info.address_map.entries.len != 1) @panic("TODO");
|
||||
const elf_module = &info.address_map.values()[0];
|
||||
return resolveSourceLocationsDwarf(info, gpa, sorted_pc_addrs, output, &elf_module.dwarf);
|
||||
}
|
||||
|
||||
pub fn resolveSourceLocationsDwarf(
|
||||
info: *Info,
|
||||
gpa: Allocator,
|
||||
sorted_pc_addrs: []const u64,
|
||||
/// Asserts its length equals length of `sorted_pc_addrs`.
|
||||
output: []SourceLocation,
|
||||
d: *Dwarf,
|
||||
) ResolveSourceLocationsError!void {
|
||||
assert(sorted_pc_addrs.len == output.len);
|
||||
assert(d.compile_units_sorted);
|
||||
|
||||
var cu_i: usize = 0;
|
||||
var line_table_i: usize = 0;
|
||||
var cu: *Dwarf.CompileUnit = &d.compile_unit_list.items[0];
|
||||
var range = cu.pc_range.?;
|
||||
// Protects directories and files tables from other threads.
|
||||
info.mutex.lock();
|
||||
defer info.mutex.unlock();
|
||||
next_pc: for (sorted_pc_addrs, output) |pc, *out| {
|
||||
while (pc >= range.end) {
|
||||
cu_i += 1;
|
||||
if (cu_i >= d.compile_unit_list.items.len) {
|
||||
out.* = SourceLocation.invalid;
|
||||
continue :next_pc;
|
||||
}
|
||||
cu = &d.compile_unit_list.items[cu_i];
|
||||
line_table_i = 0;
|
||||
range = cu.pc_range orelse {
|
||||
out.* = SourceLocation.invalid;
|
||||
continue :next_pc;
|
||||
};
|
||||
}
|
||||
if (pc < range.start) {
|
||||
out.* = SourceLocation.invalid;
|
||||
continue :next_pc;
|
||||
}
|
||||
if (line_table_i == 0) {
|
||||
line_table_i = 1;
|
||||
info.mutex.unlock();
|
||||
defer info.mutex.lock();
|
||||
d.populateSrcLocCache(gpa, cu) catch |err| switch (err) {
|
||||
error.MissingDebugInfo, error.InvalidDebugInfo => {
|
||||
out.* = SourceLocation.invalid;
|
||||
cu_i += 1;
|
||||
if (cu_i < d.compile_unit_list.items.len) {
|
||||
cu = &d.compile_unit_list.items[cu_i];
|
||||
line_table_i = 0;
|
||||
if (cu.pc_range) |r| range = r;
|
||||
}
|
||||
continue :next_pc;
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
}
|
||||
const slc = &cu.src_loc_cache.?;
|
||||
const table_addrs = slc.line_table.keys();
|
||||
while (line_table_i < table_addrs.len and table_addrs[line_table_i] < pc) line_table_i += 1;
|
||||
|
||||
const entry = slc.line_table.values()[line_table_i - 1];
|
||||
const corrected_file_index = entry.file - @intFromBool(slc.version < 5);
|
||||
const file_entry = slc.files[corrected_file_index];
|
||||
const dir_path = slc.directories[file_entry.dir_index].path;
|
||||
const dir_gop = try info.directories.getOrPut(gpa, dir_path);
|
||||
const file_gop = try info.files.getOrPut(gpa, .{
|
||||
.directory_index = @intCast(dir_gop.index),
|
||||
.basename = file_entry.path,
|
||||
});
|
||||
out.* = .{
|
||||
.file = @enumFromInt(file_gop.index),
|
||||
.line = entry.line,
|
||||
.column = entry.column,
|
||||
};
|
||||
}
|
||||
return info.coverage.resolveAddressesDwarf(gpa, sorted_pc_addrs, output, &elf_module.dwarf);
|
||||
}
|
||||
|
||||
@ -28,7 +28,10 @@ pub fn main() !void {
|
||||
.sub_path = cov_file_name,
|
||||
};
|
||||
|
||||
var debug_info = std.debug.Info.load(gpa, exe_path) catch |err| {
|
||||
var coverage = std.debug.Coverage.init;
|
||||
defer coverage.deinit(gpa);
|
||||
|
||||
var debug_info = std.debug.Info.load(gpa, exe_path, &coverage) catch |err| {
|
||||
fatal("failed to load debug info for {}: {s}", .{ exe_path, @errorName(err) });
|
||||
};
|
||||
defer debug_info.deinit(gpa);
|
||||
@ -50,14 +53,15 @@ pub fn main() !void {
|
||||
}
|
||||
assert(std.sort.isSorted(usize, pcs, {}, std.sort.asc(usize)));
|
||||
|
||||
const source_locations = try arena.alloc(std.debug.Info.SourceLocation, pcs.len);
|
||||
try debug_info.resolveSourceLocations(gpa, pcs, source_locations);
|
||||
const source_locations = try arena.alloc(std.debug.Coverage.SourceLocation, pcs.len);
|
||||
try debug_info.resolveAddresses(gpa, pcs, source_locations);
|
||||
|
||||
for (pcs, source_locations) |pc, sl| {
|
||||
const file = debug_info.fileAt(sl.file);
|
||||
const dir_name = debug_info.directories.keys()[file.directory_index];
|
||||
const file = debug_info.coverage.fileAt(sl.file);
|
||||
const dir_name = debug_info.coverage.directories.keys()[file.directory_index];
|
||||
const dir_name_slice = debug_info.coverage.stringAt(dir_name);
|
||||
try stdout.print("{x}: {s}/{s}:{d}:{d}\n", .{
|
||||
pc, dir_name, file.basename, sl.line, sl.column,
|
||||
pc, dir_name_slice, debug_info.coverage.stringAt(file.basename), sl.line, sl.column,
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user