diff --git a/lib/compiler/test_runner.zig b/lib/compiler/test_runner.zig index 0d6f451947..054fe1eb27 100644 --- a/lib/compiler/test_runner.zig +++ b/lib/compiler/test_runner.zig @@ -184,7 +184,7 @@ fn mainServer() !void { const test_fn = builtin.test_functions[index]; const entry_addr = @intFromPtr(test_fn.func); - try server.serveU64Message(.fuzz_start_addr, entry_addr); + try server.serveU64Message(.fuzz_start_addr, fuzz_abi.fuzzer_unslide_address(entry_addr)); defer if (testing.allocator_instance.deinit() == .leak) std.process.exit(1); is_fuzz_test = false; fuzz_test_index = index; diff --git a/lib/fuzzer.zig b/lib/fuzzer.zig index 6b7a846e4c..5c452340f6 100644 --- a/lib/fuzzer.zig +++ b/lib/fuzzer.zig @@ -116,13 +116,18 @@ const Executable = struct { "failed to init memory map for coverage file '{s}': {t}", .{ &coverage_file_name, e }, ); - map.appendSliceAssumeCapacity(mem.asBytes(&abi.SeenPcsHeader{ + map.appendSliceAssumeCapacity(@ptrCast(&abi.SeenPcsHeader{ .n_runs = 0, .unique_runs = 0, .pcs_len = pcs.len, })); map.appendNTimesAssumeCapacity(0, pc_bitset_usizes * @sizeOf(usize)); - map.appendSliceAssumeCapacity(mem.sliceAsBytes(pcs)); + // Relocations have been applied to `pcs` so it contains runtime addresses (with slide + // applied). We need to translate these to the virtual addresses as on disk. + for (pcs) |pc| { + const pc_vaddr = fuzzer_unslide_address(pc); + map.appendSliceAssumeCapacity(@ptrCast(&pc_vaddr)); + } return map; } else { const size = coverage_file.getEndPos() catch |e| panic( @@ -215,7 +220,16 @@ const Executable = struct { .{ self.pc_counters.len, pcs.len }, ); - self.pc_digest = std.hash.Wyhash.hash(0, mem.sliceAsBytes(pcs)); + self.pc_digest = digest: { + // Relocations have been applied to `pcs` so it contains runtime addresses (with slide + // applied). We need to translate these to the virtual addresses as on disk. + var h: std.hash.Wyhash = .init(0); + for (pcs) |pc| { + const pc_vaddr = fuzzer_unslide_address(pc); + h.update(@ptrCast(&pc_vaddr)); + } + break :digest h.final(); + }; self.shared_seen_pcs = getCoverageFile(cache_dir, pcs, self.pc_digest); return self; @@ -622,6 +636,14 @@ export fn fuzzer_main(limit_kind: abi.LimitKind, amount: u64) void { } } +export fn fuzzer_unslide_address(addr: usize) usize { + const si = std.debug.getSelfDebugInfo() catch @compileError("unsupported"); + const slide = si.getModuleSlide(std.debug.getDebugInfoAllocator(), addr) catch |err| { + std.debug.panic("failed to find virtual address slide: {t}", .{err}); + }; + return addr - slide; +} + /// Helps determine run uniqueness in the face of recursion. /// Currently not used by the fuzzer. export threadlocal var __sancov_lowest_stack: usize = 0; @@ -1185,13 +1207,13 @@ const Mutation = enum { const j = rng.uintAtMostBiased(usize, corpus[splice_i].len - len); out.appendSliceAssumeCapacity(corpus[splice_i][j..][0..len]); }, - .@"const" => out.appendSliceAssumeCapacity(mem.asBytes( + .@"const" => out.appendSliceAssumeCapacity(@ptrCast( &data_ctx[rng.uintLessThanBiased(usize, data_ctx.len)], )), - .small => out.appendSliceAssumeCapacity(mem.asBytes( + .small => out.appendSliceAssumeCapacity(@ptrCast( &mem.nativeTo(data_ctx[0], rng.int(SmallValue), data_ctx[1]), )), - .few => out.appendSliceAssumeCapacity(mem.asBytes( + .few => out.appendSliceAssumeCapacity(@ptrCast( &fewValue(rng, data_ctx[0], data_ctx[1]), )), } diff --git a/lib/std/Build/abi.zig b/lib/std/Build/abi.zig index eb8f6cb1be..b7c1e7379d 100644 --- a/lib/std/Build/abi.zig +++ b/lib/std/Build/abi.zig @@ -145,6 +145,7 @@ pub const fuzz = struct { pub extern fn fuzzer_init_test(test_one: TestOne, unit_test_name: Slice) void; pub extern fn fuzzer_new_input(bytes: Slice) void; pub extern fn fuzzer_main(limit_kind: LimitKind, amount: u64) void; + pub extern fn fuzzer_unslide_address(addr: usize) usize; pub const Slice = extern struct { ptr: [*]const u8, diff --git a/lib/std/debug.zig b/lib/std/debug.zig index 182ea94766..29c0731f4e 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -1367,7 +1367,7 @@ test printLineFromFile { /// The returned allocator should be thread-safe if the compilation is multi-threaded, because /// multiple threads could capture and/or print stack traces simultaneously. -fn getDebugInfoAllocator() Allocator { +pub fn getDebugInfoAllocator() Allocator { // Allow overriding the debug info allocator by exposing `root.debug.getDebugInfoAllocator`. if (@hasDecl(root, "debug") and @hasDecl(root.debug, "getDebugInfoAllocator")) { return root.debug.getDebugInfoAllocator(); diff --git a/lib/std/debug/SelfInfo/Elf.zig b/lib/std/debug/SelfInfo/Elf.zig index 5036d40197..59c0b42451 100644 --- a/lib/std/debug/SelfInfo/Elf.zig +++ b/lib/std/debug/SelfInfo/Elf.zig @@ -80,6 +80,11 @@ pub fn getModuleName(si: *SelfInfo, gpa: Allocator, address: usize) Error![]cons if (module.name.len == 0) return error.MissingDebugInfo; return module.name; } +pub fn getModuleSlide(si: *SelfInfo, gpa: Allocator, address: usize) Error!usize { + const module = try si.findModule(gpa, address, .shared); + defer si.rwlock.unlockShared(); + return module.load_offset; +} pub const can_unwind: bool = s: { // The DWARF code can't deal with ILP32 ABIs yet: https://github.com/ziglang/zig/issues/25447 diff --git a/lib/std/debug/SelfInfo/MachO.zig b/lib/std/debug/SelfInfo/MachO.zig index 83adb6dcd4..94d50bbf77 100644 --- a/lib/std/debug/SelfInfo/MachO.zig +++ b/lib/std/debug/SelfInfo/MachO.zig @@ -82,6 +82,20 @@ pub fn getModuleName(si: *SelfInfo, gpa: Allocator, address: usize) Error![]cons defer si.mutex.unlock(); return module.name; } +pub fn getModuleSlide(si: *SelfInfo, gpa: Allocator, address: usize) Error!usize { + const module = try si.findModule(gpa, address); + defer si.mutex.unlock(); + const header: *std.macho.mach_header_64 = @ptrFromInt(module.text_base); + const raw_macho: [*]u8 = @ptrCast(header); + var it = macho.LoadCommandIterator.init(header, raw_macho[@sizeOf(macho.mach_header_64)..][0..header.sizeofcmds]) catch unreachable; + const text_vmaddr = while (it.next() catch unreachable) |load_cmd| { + if (load_cmd.hdr.cmd != .SEGMENT_64) continue; + const segment_cmd = load_cmd.cast(macho.segment_command_64).?; + if (!mem.eql(u8, segment_cmd.segName(), "__TEXT")) continue; + break segment_cmd.vmaddr; + } else unreachable; + return module.text_base - text_vmaddr; +} pub const can_unwind: bool = true; pub const UnwindContext = std.debug.Dwarf.SelfUnwinder; diff --git a/lib/std/debug/SelfInfo/Windows.zig b/lib/std/debug/SelfInfo/Windows.zig index 70009217db..306287a9e7 100644 --- a/lib/std/debug/SelfInfo/Windows.zig +++ b/lib/std/debug/SelfInfo/Windows.zig @@ -33,6 +33,12 @@ pub fn getModuleName(si: *SelfInfo, gpa: Allocator, address: usize) Error![]cons const module = try si.findModule(gpa, address); return module.name; } +pub fn getModuleSlide(si: *SelfInfo, gpa: Allocator, address: usize) Error!usize { + si.mutex.lock(); + defer si.mutex.unlock(); + const module = try si.findModule(gpa, address); + return module.base_address; +} pub const can_unwind: bool = switch (builtin.cpu.arch) { else => true,