std.debug.Pdb: migrate more towards new Reader API

There was some bug in this branch, and rather than diagnosing it, I
fully finished porting over to new Reader API. Did it fix the bug?
This commit is contained in:
Andrew Kelley 2025-08-28 21:32:53 -07:00
parent 7da9e4b35e
commit 43fbc37a49
3 changed files with 188 additions and 169 deletions

View File

@ -143,8 +143,8 @@ pub const failing: Reader = .{
/// This is generally safe to `@constCast` because it has an empty buffer, so /// This is generally safe to `@constCast` because it has an empty buffer, so
/// there is not really a way to accidentally attempt mutation of these fields. /// there is not really a way to accidentally attempt mutation of these fields.
const ending_state: Reader = .fixed(&.{}); pub const ending_instance: Reader = .fixed(&.{});
pub const ending: *Reader = @constCast(&ending_state); pub const ending: *Reader = @constCast(&ending_instance);
pub fn limited(r: *Reader, limit: Limit, buffer: []u8) Limited { pub fn limited(r: *Reader, limit: Limit, buffer: []u8) Limited {
return .init(r, limit, buffer); return .init(r, limit, buffer);

View File

@ -6,7 +6,7 @@ const assert = std.debug.assert;
const Pdb = @This(); const Pdb = @This();
in_file: File, file_reader: *File.Reader,
msf: Msf, msf: Msf,
allocator: Allocator, allocator: Allocator,
string_table: ?*MsfStream, string_table: ?*MsfStream,
@ -36,31 +36,28 @@ pub const Module = struct {
} }
}; };
pub fn init(allocator: Allocator, path: []const u8) !Pdb { pub fn init(gpa: Allocator, file_reader: *File.Reader) !Pdb {
const file = try std.fs.cwd().openFile(path, .{});
errdefer file.close();
return .{ return .{
.in_file = file, .file_reader = file_reader,
.allocator = allocator, .allocator = gpa,
.string_table = null, .string_table = null,
.dbi = null, .dbi = null,
.msf = try Msf.init(allocator, file), .msf = try Msf.init(gpa, file_reader),
.modules = &[_]Module{}, .modules = &.{},
.sect_contribs = &[_]pdb.SectionContribEntry{}, .sect_contribs = &.{},
.guid = undefined, .guid = undefined,
.age = undefined, .age = undefined,
}; };
} }
pub fn deinit(self: *Pdb) void { pub fn deinit(self: *Pdb) void {
self.in_file.close(); const gpa = self.allocator;
self.msf.deinit(self.allocator); self.msf.deinit(gpa);
for (self.modules) |*module| { for (self.modules) |*module| {
module.deinit(self.allocator); module.deinit(gpa);
} }
self.allocator.free(self.modules); gpa.free(self.modules);
self.allocator.free(self.sect_contribs); gpa.free(self.sect_contribs);
} }
pub fn parseDbiStream(self: *Pdb) !void { pub fn parseDbiStream(self: *Pdb) !void {
@ -68,11 +65,7 @@ pub fn parseDbiStream(self: *Pdb) !void {
return error.InvalidDebugInfo; return error.InvalidDebugInfo;
const gpa = self.allocator; const gpa = self.allocator;
const reader = &stream.interface;
const deprecated_reader = stream.reader();
var adapted_buffer: [1024]u8 = undefined;
var adapted_reader = deprecated_reader.adaptToNewApi(&adapted_buffer);
const reader = &adapted_reader.new_interface;
const header = try reader.takeStruct(std.pdb.DbiStreamHeader, .little); const header = try reader.takeStruct(std.pdb.DbiStreamHeader, .little);
if (header.version_header != 19990903) // V70, only value observed by LLVM team if (header.version_header != 19990903) // V70, only value observed by LLVM team
@ -113,7 +106,7 @@ pub fn parseDbiStream(self: *Pdb) !void {
this_record_len += march_forward_bytes; this_record_len += march_forward_bytes;
} }
try modules.append(Module{ try modules.append(.{
.mod_info = mod_info, .mod_info = mod_info,
.module_name = try module_name.toOwnedSlice(), .module_name = try module_name.toOwnedSlice(),
.obj_file_name = try obj_file_name.toOwnedSlice(), .obj_file_name = try obj_file_name.toOwnedSlice(),
@ -156,29 +149,28 @@ pub fn parseDbiStream(self: *Pdb) !void {
} }
pub fn parseInfoStream(self: *Pdb) !void { pub fn parseInfoStream(self: *Pdb) !void {
var stream = self.getStream(pdb.StreamType.pdb) orelse var stream = self.getStream(pdb.StreamType.pdb) orelse return error.InvalidDebugInfo;
return error.InvalidDebugInfo; const reader = &stream.interface;
const reader = stream.reader();
// Parse the InfoStreamHeader. // Parse the InfoStreamHeader.
const version = try reader.readInt(u32, .little); const version = try reader.takeInt(u32, .little);
const signature = try reader.readInt(u32, .little); const signature = try reader.takeInt(u32, .little);
_ = signature; _ = signature;
const age = try reader.readInt(u32, .little); const age = try reader.takeInt(u32, .little);
const guid = try reader.readBytesNoEof(16); const guid = try reader.takeArray(16);
if (version != 20000404) // VC70, only value observed by LLVM team if (version != 20000404) // VC70, only value observed by LLVM team
return error.UnknownPDBVersion; return error.UnknownPDBVersion;
self.guid = guid; self.guid = guid.*;
self.age = age; self.age = age;
const gpa = self.allocator;
// Find the string table. // Find the string table.
const string_table_index = str_tab_index: { const string_table_index = str_tab_index: {
const name_bytes_len = try reader.readInt(u32, .little); const name_bytes_len = try reader.takeInt(u32, .little);
const name_bytes = try self.allocator.alloc(u8, name_bytes_len); const name_bytes = try reader.readAlloc(gpa, name_bytes_len);
defer self.allocator.free(name_bytes);
try reader.readNoEof(name_bytes);
const HashTableHeader = extern struct { const HashTableHeader = extern struct {
size: u32, size: u32,
@ -188,23 +180,23 @@ pub fn parseInfoStream(self: *Pdb) !void {
return cap * 2 / 3 + 1; return cap * 2 / 3 + 1;
} }
}; };
const hash_tbl_hdr = try reader.readStruct(HashTableHeader); const hash_tbl_hdr = try reader.takeStruct(HashTableHeader, .little);
if (hash_tbl_hdr.capacity == 0) if (hash_tbl_hdr.capacity == 0)
return error.InvalidDebugInfo; return error.InvalidDebugInfo;
if (hash_tbl_hdr.size > HashTableHeader.maxLoad(hash_tbl_hdr.capacity)) if (hash_tbl_hdr.size > HashTableHeader.maxLoad(hash_tbl_hdr.capacity))
return error.InvalidDebugInfo; return error.InvalidDebugInfo;
const present = try readSparseBitVector(&reader, self.allocator); const present = try readSparseBitVector(reader, gpa);
defer self.allocator.free(present); defer gpa.free(present);
if (present.len != hash_tbl_hdr.size) if (present.len != hash_tbl_hdr.size)
return error.InvalidDebugInfo; return error.InvalidDebugInfo;
const deleted = try readSparseBitVector(&reader, self.allocator); const deleted = try readSparseBitVector(reader, gpa);
defer self.allocator.free(deleted); defer gpa.free(deleted);
for (present) |_| { for (present) |_| {
const name_offset = try reader.readInt(u32, .little); const name_offset = try reader.takeInt(u32, .little);
const name_index = try reader.readInt(u32, .little); const name_index = try reader.takeInt(u32, .little);
if (name_offset > name_bytes.len) if (name_offset > name_bytes.len)
return error.InvalidDebugInfo; return error.InvalidDebugInfo;
const name = std.mem.sliceTo(name_bytes[name_offset..], 0); const name = std.mem.sliceTo(name_bytes[name_offset..], 0);
@ -302,14 +294,12 @@ pub fn getLineNumberInfo(self: *Pdb, module: *Module, address: u64) !std.debug.S
const strtab_offset = @sizeOf(pdb.StringTableHeader) + chksum_hdr.file_name_offset; const strtab_offset = @sizeOf(pdb.StringTableHeader) + chksum_hdr.file_name_offset;
try self.string_table.?.seekTo(strtab_offset); try self.string_table.?.seekTo(strtab_offset);
const source_file_name = s: { const source_file_name = s: {
const deprecated_reader = self.string_table.?.reader(); const string_reader = &self.string_table.?.interface;
var adapted_buffer: [1024]u8 = undefined;
var adapted_reader = deprecated_reader.adaptToNewApi(&adapted_buffer);
var source_file_name: std.Io.Writer.Allocating = .init(gpa); var source_file_name: std.Io.Writer.Allocating = .init(gpa);
defer source_file_name.deinit(); defer source_file_name.deinit();
_ = try adapted_reader.new_interface.streamDelimiterLimit(&source_file_name.writer, 0, .limited(1024)); _ = try string_reader.streamDelimiterLimit(&source_file_name.writer, 0, .limited(1024));
assert(adapted_reader.new_interface.buffered()[0] == 0); // TODO change streamDelimiterLimit API assert(string_reader.buffered()[0] == 0); // TODO change streamDelimiterLimit API
adapted_reader.new_interface.toss(1); string_reader.toss(1);
break :s try source_file_name.toOwnedSlice(); break :s try source_file_name.toOwnedSlice();
}; };
errdefer gpa.free(source_file_name); errdefer gpa.free(source_file_name);
@ -366,19 +356,16 @@ pub fn getModule(self: *Pdb, index: usize) !?*Module {
const stream = self.getStreamById(mod.mod_info.module_sym_stream) orelse const stream = self.getStreamById(mod.mod_info.module_sym_stream) orelse
return error.MissingDebugInfo; return error.MissingDebugInfo;
const reader = stream.reader(); const reader = &stream.interface;
const signature = try reader.readInt(u32, .little); const signature = try reader.takeInt(u32, .little);
if (signature != 4) if (signature != 4)
return error.InvalidDebugInfo; return error.InvalidDebugInfo;
mod.symbols = try self.allocator.alloc(u8, mod.mod_info.sym_byte_size - 4); const gpa = self.allocator;
errdefer self.allocator.free(mod.symbols);
try reader.readNoEof(mod.symbols);
mod.subsect_info = try self.allocator.alloc(u8, mod.mod_info.c13_byte_size); mod.symbols = try reader.readAlloc(gpa, mod.mod_info.sym_byte_size - 4);
errdefer self.allocator.free(mod.subsect_info); mod.subsect_info = try reader.readAlloc(gpa, mod.mod_info.c13_byte_size);
try reader.readNoEof(mod.subsect_info);
var sect_offset: usize = 0; var sect_offset: usize = 0;
var skip_len: usize = undefined; var skip_len: usize = undefined;
@ -404,8 +391,7 @@ pub fn getModule(self: *Pdb, index: usize) !?*Module {
} }
pub fn getStreamById(self: *Pdb, id: u32) ?*MsfStream { pub fn getStreamById(self: *Pdb, id: u32) ?*MsfStream {
if (id >= self.msf.streams.len) if (id >= self.msf.streams.len) return null;
return null;
return &self.msf.streams[id]; return &self.msf.streams[id];
} }
@ -419,17 +405,14 @@ const Msf = struct {
directory: MsfStream, directory: MsfStream,
streams: []MsfStream, streams: []MsfStream,
fn init(allocator: Allocator, file: File) !Msf { fn init(gpa: Allocator, file_reader: *File.Reader) !Msf {
const in = file.deprecatedReader(); const superblock = try file_reader.interface.takeStruct(pdb.SuperBlock, .little);
const superblock = try in.readStruct(pdb.SuperBlock);
// Sanity checks
if (!std.mem.eql(u8, &superblock.file_magic, pdb.SuperBlock.expect_magic)) if (!std.mem.eql(u8, &superblock.file_magic, pdb.SuperBlock.expect_magic))
return error.InvalidDebugInfo; return error.InvalidDebugInfo;
if (superblock.free_block_map_block != 1 and superblock.free_block_map_block != 2) if (superblock.free_block_map_block != 1 and superblock.free_block_map_block != 2)
return error.InvalidDebugInfo; return error.InvalidDebugInfo;
const file_len = try file.getEndPos(); const file_len = try file_reader.getSize();
if (superblock.num_blocks * superblock.block_size != file_len) if (superblock.num_blocks * superblock.block_size != file_len)
return error.InvalidDebugInfo; return error.InvalidDebugInfo;
switch (superblock.block_size) { switch (superblock.block_size) {
@ -442,163 +425,182 @@ const Msf = struct {
if (dir_block_count > superblock.block_size / @sizeOf(u32)) if (dir_block_count > superblock.block_size / @sizeOf(u32))
return error.UnhandledBigDirectoryStream; // cf. BlockMapAddr comment. return error.UnhandledBigDirectoryStream; // cf. BlockMapAddr comment.
try file.seekTo(superblock.block_size * superblock.block_map_addr); try file_reader.seekTo(superblock.block_size * superblock.block_map_addr);
const dir_blocks = try allocator.alloc(u32, dir_block_count); const dir_blocks = try gpa.alloc(u32, dir_block_count);
for (dir_blocks) |*b| { for (dir_blocks) |*b| {
b.* = try in.readInt(u32, .little); b.* = try file_reader.interface.takeInt(u32, .little);
} }
var directory = MsfStream.init( var directory_buffer: [64]u8 = undefined;
superblock.block_size, var directory = MsfStream.init(superblock.block_size, file_reader, dir_blocks, &directory_buffer);
file,
dir_blocks,
);
const begin = directory.pos; const begin = directory.logicalPos();
const stream_count = try directory.reader().readInt(u32, .little); const stream_count = try directory.interface.takeInt(u32, .little);
const stream_sizes = try allocator.alloc(u32, stream_count); const stream_sizes = try gpa.alloc(u32, stream_count);
defer allocator.free(stream_sizes); defer gpa.free(stream_sizes);
// Microsoft's implementation uses @as(u32, -1) for inexistent streams. // Microsoft's implementation uses @as(u32, -1) for inexistent streams.
// These streams are not used, but still participate in the file // These streams are not used, but still participate in the file
// and must be taken into account when resolving stream indices. // and must be taken into account when resolving stream indices.
const Nil = 0xFFFFFFFF; const nil_size = 0xFFFFFFFF;
for (stream_sizes) |*s| { for (stream_sizes) |*s| {
const size = try directory.reader().readInt(u32, .little); const size = try directory.interface.takeInt(u32, .little);
s.* = if (size == Nil) 0 else blockCountFromSize(size, superblock.block_size); s.* = if (size == nil_size) 0 else blockCountFromSize(size, superblock.block_size);
} }
const streams = try allocator.alloc(MsfStream, stream_count); const streams = try gpa.alloc(MsfStream, stream_count);
errdefer gpa.free(streams);
for (streams, 0..) |*stream, i| { for (streams, 0..) |*stream, i| {
const size = stream_sizes[i]; const size = stream_sizes[i];
if (size == 0) { if (size == 0) {
stream.* = MsfStream{ stream.* = .empty;
.blocks = &[_]u32{},
};
} else { } else {
var blocks = try allocator.alloc(u32, size); const blocks = try gpa.alloc(u32, size);
var j: u32 = 0; errdefer gpa.free(blocks);
while (j < size) : (j += 1) { for (blocks) |*block| {
const block_id = try directory.reader().readInt(u32, .little); const block_id = try directory.interface.takeInt(u32, .little);
const n = (block_id % superblock.block_size); const n = (block_id % superblock.block_size);
// 0 is for pdb.SuperBlock, 1 and 2 for FPMs. // 0 is for pdb.SuperBlock, 1 and 2 for FPMs.
if (block_id == 0 or n == 1 or n == 2 or block_id * superblock.block_size > file_len) if (block_id == 0 or n == 1 or n == 2 or block_id * superblock.block_size > file_len)
return error.InvalidBlockIndex; return error.InvalidBlockIndex;
blocks[j] = block_id; block.* = block_id;
} }
const buffer = try gpa.alloc(u8, 64);
stream.* = MsfStream.init( errdefer gpa.free(buffer);
superblock.block_size, stream.* = .init(superblock.block_size, file_reader, blocks, buffer);
file,
blocks,
);
} }
} }
const end = directory.pos; const end = directory.logicalPos();
if (end - begin != superblock.num_directory_bytes) if (end - begin != superblock.num_directory_bytes)
return error.InvalidStreamDirectory; return error.InvalidStreamDirectory;
return Msf{ return .{
.directory = directory, .directory = directory,
.streams = streams, .streams = streams,
}; };
} }
fn deinit(self: *Msf, allocator: Allocator) void { fn deinit(self: *Msf, gpa: Allocator) void {
allocator.free(self.directory.blocks); gpa.free(self.directory.blocks);
for (self.streams) |*stream| { for (self.streams) |*stream| {
allocator.free(stream.blocks); gpa.free(stream.interface.buffer);
gpa.free(stream.blocks);
} }
allocator.free(self.streams); gpa.free(self.streams);
} }
}; };
const MsfStream = struct { const MsfStream = struct {
in_file: File = undefined, file_reader: *File.Reader,
pos: u64 = undefined, next_read_pos: u64,
blocks: []u32 = undefined, blocks: []u32,
block_size: u32 = undefined, block_size: u32,
interface: std.Io.Reader,
err: ?Error,
pub const Error = @typeInfo(@typeInfo(@TypeOf(read)).@"fn".return_type.?).error_union.error_set; const Error = File.Reader.SeekError;
fn init(block_size: u32, file: File, blocks: []u32) MsfStream { const empty: MsfStream = .{
const stream = MsfStream{ .file_reader = undefined,
.in_file = file, .next_read_pos = 0,
.pos = 0, .blocks = &.{},
.blocks = blocks, .block_size = undefined,
.block_size = block_size, .interface = .ending_instance,
.err = null,
}; };
return stream; fn init(block_size: u32, file_reader: *File.Reader, blocks: []u32, buffer: []u8) MsfStream {
return .{
.file_reader = file_reader,
.next_read_pos = 0,
.blocks = blocks,
.block_size = block_size,
.interface = .{
.vtable = &.{ .stream = stream },
.buffer = buffer,
.seek = 0,
.end = 0,
},
.err = null,
};
} }
fn read(self: *MsfStream, buffer: []u8) !usize { fn stream(r: *std.Io.Reader, w: *std.Io.Writer, limit: std.Io.Limit) std.Io.Reader.StreamError!usize {
var block_id = @as(usize, @intCast(self.pos / self.block_size)); const ms: *MsfStream = @alignCast(@fieldParentPtr("interface", r));
if (block_id >= self.blocks.len) return 0; // End of Stream
var block = self.blocks[block_id];
var offset = self.pos % self.block_size;
try self.in_file.seekTo(block * self.block_size + offset); var block_id: usize = @intCast(ms.next_read_pos / ms.block_size);
const in = self.in_file.deprecatedReader(); if (block_id >= ms.blocks.len) return error.EndOfStream;
var block = ms.blocks[block_id];
var offset = ms.next_read_pos % ms.block_size;
var size: usize = 0; ms.file_reader.seekTo(block * ms.block_size + offset) catch |err| {
var rem_buffer = buffer; ms.err = err;
while (size < buffer.len) { return error.ReadFailed;
const size_to_read = @min(self.block_size - offset, rem_buffer.len); };
size += try in.read(rem_buffer[0..size_to_read]);
rem_buffer = buffer[size..]; var remaining = @intFromEnum(limit);
offset += size_to_read; while (remaining != 0) {
const stream_len: usize = @min(remaining, ms.block_size - offset);
const n = try ms.file_reader.interface.stream(w, .limited(stream_len));
remaining -= n;
offset += n;
// If we're at the end of a block, go to the next one. // If we're at the end of a block, go to the next one.
if (offset == self.block_size) { if (offset == ms.block_size) {
offset = 0; offset = 0;
block_id += 1; block_id += 1;
if (block_id >= self.blocks.len) break; // End of Stream if (block_id >= ms.blocks.len) break; // End of Stream
block = self.blocks[block_id]; block = ms.blocks[block_id];
try self.in_file.seekTo(block * self.block_size); ms.file_reader.seekTo(block * ms.block_size) catch |err| {
ms.err = err;
return error.ReadFailed;
};
} }
} }
self.pos += buffer.len; const total = @intFromEnum(limit) - remaining;
return buffer.len; ms.next_read_pos += total;
return total;
} }
pub fn seekBy(self: *MsfStream, len: i64) !void { pub fn logicalPos(ms: *const MsfStream) u64 {
self.pos = @as(u64, @intCast(@as(i64, @intCast(self.pos)) + len)); return ms.next_read_pos - ms.interface.bufferedLen();
if (self.pos >= self.blocks.len * self.block_size)
return error.EOF;
} }
pub fn seekTo(self: *MsfStream, len: u64) !void { pub fn seekBy(ms: *MsfStream, len: i64) !void {
self.pos = len; ms.next_read_pos = @as(u64, @intCast(@as(i64, @intCast(ms.logicalPos())) + len));
if (self.pos >= self.blocks.len * self.block_size) if (ms.next_read_pos >= ms.blocks.len * ms.block_size) return error.EOF;
return error.EOF; ms.interface.tossBuffered();
} }
fn getSize(self: *const MsfStream) u64 { pub fn seekTo(ms: *MsfStream, len: u64) !void {
return self.blocks.len * self.block_size; ms.next_read_pos = len;
if (ms.next_read_pos >= ms.blocks.len * ms.block_size) return error.EOF;
ms.interface.tossBuffered();
} }
fn getFilePos(self: MsfStream) u64 { fn getSize(ms: *const MsfStream) u64 {
const block_id = self.pos / self.block_size; return ms.blocks.len * ms.block_size;
const block = self.blocks[block_id];
const offset = self.pos % self.block_size;
return block * self.block_size + offset;
} }
pub fn reader(self: *MsfStream) std.io.GenericReader(*MsfStream, Error, read) { fn getFilePos(ms: *const MsfStream) u64 {
return .{ .context = self }; const pos = ms.logicalPos();
const block_id = pos / ms.block_size;
const block = ms.blocks[block_id];
const offset = pos % ms.block_size;
return block * ms.block_size + offset;
} }
}; };
fn readSparseBitVector(stream: anytype, allocator: Allocator) ![]u32 { fn readSparseBitVector(reader: *std.Io.Reader, allocator: Allocator) ![]u32 {
const num_words = try stream.readInt(u32, .little); const num_words = try reader.takeInt(u32, .little);
var list = std.array_list.Managed(u32).init(allocator); var list = std.array_list.Managed(u32).init(allocator);
errdefer list.deinit(); errdefer list.deinit();
var word_i: u32 = 0; var word_i: u32 = 0;
while (word_i != num_words) : (word_i += 1) { while (word_i != num_words) : (word_i += 1) {
const word = try stream.readInt(u32, .little); const word = try reader.takeInt(u32, .little);
var bit_i: u5 = 0; var bit_i: u5 = 0;
while (true) : (bit_i += 1) { while (true) : (bit_i += 1) {
if (word & (@as(u32, 1) << bit_i) != 0) { if (word & (@as(u32, 1) << bit_i) != 0) {

View File

@ -713,22 +713,26 @@ pub const Module = switch (native_os) {
}, },
.uefi, .windows => struct { .uefi, .windows => struct {
base_address: usize, base_address: usize,
pdb: ?Pdb = null, pdb: ?Pdb,
dwarf: ?Dwarf = null, dwarf: ?Dwarf,
coff_image_base: u64, coff_image_base: u64,
/// Only used if pdb is non-null /// Only used if pdb is non-null
coff_section_headers: []coff.SectionHeader, coff_section_headers: []coff.SectionHeader,
pub fn deinit(self: *@This(), allocator: Allocator) void { pub fn deinit(self: *@This(), gpa: Allocator) void {
if (self.dwarf) |*dwarf| { if (self.dwarf) |*dwarf| {
dwarf.deinit(allocator); dwarf.deinit(gpa);
} }
if (self.pdb) |*p| { if (self.pdb) |*p| {
gpa.free(p.file_reader.interface.buffer);
gpa.destroy(p.file_reader);
p.deinit(); p.deinit();
allocator.free(self.coff_section_headers); gpa.free(self.coff_section_headers);
} }
self.* = undefined;
} }
fn getSymbolFromPdb(self: *@This(), relocated_address: usize) !?std.debug.Symbol { fn getSymbolFromPdb(self: *@This(), relocated_address: usize) !?std.debug.Symbol {
@ -970,23 +974,25 @@ fn readMachODebugInfo(allocator: Allocator, macho_file: File) !Module {
}; };
} }
fn readCoffDebugInfo(allocator: Allocator, coff_obj: *coff.Coff) !Module { fn readCoffDebugInfo(gpa: Allocator, coff_obj: *coff.Coff) !Module {
nosuspend { nosuspend {
var di: Module = .{ var di: Module = .{
.base_address = undefined, .base_address = undefined,
.coff_image_base = coff_obj.getImageBase(), .coff_image_base = coff_obj.getImageBase(),
.coff_section_headers = undefined, .coff_section_headers = undefined,
.pdb = null,
.dwarf = null,
}; };
if (coff_obj.getSectionByName(".debug_info")) |_| { if (coff_obj.getSectionByName(".debug_info")) |_| {
// This coff file has embedded DWARF debug info // This coff file has embedded DWARF debug info
var sections: Dwarf.SectionArray = Dwarf.null_section_array; var sections: Dwarf.SectionArray = Dwarf.null_section_array;
errdefer for (sections) |section| if (section) |s| if (s.owned) allocator.free(s.data); errdefer for (sections) |section| if (section) |s| if (s.owned) gpa.free(s.data);
inline for (@typeInfo(Dwarf.Section.Id).@"enum".fields, 0..) |section, i| { inline for (@typeInfo(Dwarf.Section.Id).@"enum".fields, 0..) |section, i| {
sections[i] = if (coff_obj.getSectionByName("." ++ section.name)) |section_header| blk: { sections[i] = if (coff_obj.getSectionByName("." ++ section.name)) |section_header| blk: {
break :blk .{ break :blk .{
.data = try coff_obj.getSectionDataAlloc(section_header, allocator), .data = try coff_obj.getSectionDataAlloc(section_header, gpa),
.virtual_address = section_header.virtual_address, .virtual_address = section_header.virtual_address,
.owned = true, .owned = true,
}; };
@ -999,7 +1005,7 @@ fn readCoffDebugInfo(allocator: Allocator, coff_obj: *coff.Coff) !Module {
.is_macho = false, .is_macho = false,
}; };
try Dwarf.open(&dwarf, allocator); try Dwarf.open(&dwarf, gpa);
di.dwarf = dwarf; di.dwarf = dwarf;
} }
@ -1008,20 +1014,31 @@ fn readCoffDebugInfo(allocator: Allocator, coff_obj: *coff.Coff) !Module {
if (fs.path.isAbsolute(raw_path)) { if (fs.path.isAbsolute(raw_path)) {
break :blk raw_path; break :blk raw_path;
} else { } else {
const self_dir = try fs.selfExeDirPathAlloc(allocator); const self_dir = try fs.selfExeDirPathAlloc(gpa);
defer allocator.free(self_dir); defer gpa.free(self_dir);
break :blk try fs.path.join(allocator, &.{ self_dir, raw_path }); break :blk try fs.path.join(gpa, &.{ self_dir, raw_path });
} }
}; };
defer if (path.ptr != raw_path.ptr) allocator.free(path); defer if (path.ptr != raw_path.ptr) gpa.free(path);
di.pdb = Pdb.init(allocator, path) catch |err| switch (err) { const pdb_file = std.fs.cwd().openFile(path, .{}) catch |err| switch (err) {
error.FileNotFound, error.IsDir => { error.FileNotFound, error.IsDir => {
if (di.dwarf == null) return error.MissingDebugInfo; if (di.dwarf == null) return error.MissingDebugInfo;
return di; return di;
}, },
else => return err, else => |e| return e,
}; };
errdefer pdb_file.close();
const pdb_file_reader_buffer = try gpa.alloc(u8, 4096);
errdefer gpa.free(pdb_file_reader_buffer);
const pdb_file_reader = try gpa.create(File.Reader);
errdefer gpa.destroy(pdb_file_reader);
pdb_file_reader.* = pdb_file.reader(pdb_file_reader_buffer);
di.pdb = try Pdb.init(gpa, pdb_file_reader);
try di.pdb.?.parseInfoStream(); try di.pdb.?.parseInfoStream();
try di.pdb.?.parseDbiStream(); try di.pdb.?.parseDbiStream();
@ -1029,8 +1046,8 @@ fn readCoffDebugInfo(allocator: Allocator, coff_obj: *coff.Coff) !Module {
return error.InvalidDebugInfo; return error.InvalidDebugInfo;
// Only used by the pdb path // Only used by the pdb path
di.coff_section_headers = try coff_obj.getSectionHeadersAlloc(allocator); di.coff_section_headers = try coff_obj.getSectionHeadersAlloc(gpa);
errdefer allocator.free(di.coff_section_headers); errdefer gpa.free(di.coff_section_headers);
return di; return di;
} }