Merge pull request #13820 from ziglang/dwarf-multiple-files

dwarf: add support for multiple source files
This commit is contained in:
Jakub Konka 2022-12-09 14:11:28 +01:00 committed by GitHub
commit d88eb75a69
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 555 additions and 443 deletions

File diff suppressed because it is too large Load Diff

View File

@ -312,7 +312,7 @@ pub fn createEmpty(gpa: Allocator, options: link.Options) !*Elf {
};
var dwarf: ?Dwarf = if (!options.strip and options.module != null)
Dwarf.init(gpa, .elf, options.target)
Dwarf.init(gpa, &self.base, options.target)
else
null;
@ -931,6 +931,104 @@ pub fn populateMissingMetadata(self: *Elf) !void {
}
}
fn growAllocSection(self: *Elf, shdr_index: u16, phdr_index: u16, needed_size: u64) !void {
// TODO Also detect virtual address collisions.
const shdr = &self.sections.items[shdr_index];
const phdr = &self.program_headers.items[phdr_index];
if (needed_size > self.allocatedSize(shdr.sh_offset)) {
// Must move the entire section.
const new_offset = self.findFreeSpace(needed_size, self.page_size);
const existing_size = if (self.atoms.get(phdr_index)) |last| blk: {
const sym = self.local_symbols.items[last.local_sym_index];
break :blk (sym.st_value + sym.st_size) - phdr.p_vaddr;
} else if (shdr_index == self.got_section_index.?) blk: {
break :blk shdr.sh_size;
} else 0;
shdr.sh_size = 0;
log.debug("new '{s}' file offset 0x{x} to 0x{x}", .{
self.getString(shdr.sh_name),
new_offset,
new_offset + existing_size,
});
const amt = try self.base.file.?.copyRangeAll(shdr.sh_offset, self.base.file.?, new_offset, existing_size);
if (amt != existing_size) return error.InputOutput;
shdr.sh_offset = new_offset;
phdr.p_offset = new_offset;
}
shdr.sh_size = needed_size;
phdr.p_memsz = needed_size;
phdr.p_filesz = needed_size;
self.markDirty(shdr_index, phdr_index);
}
pub fn growNonAllocSection(
self: *Elf,
shdr_index: u16,
needed_size: u64,
min_alignment: u32,
requires_file_copy: bool,
) !void {
const shdr = &self.sections.items[shdr_index];
if (needed_size > self.allocatedSize(shdr.sh_offset)) {
const existing_size = if (self.symtab_section_index.? == shdr_index) blk: {
const sym_size: u64 = switch (self.ptr_width) {
.p32 => @sizeOf(elf.Elf32_Sym),
.p64 => @sizeOf(elf.Elf64_Sym),
};
break :blk @as(u64, shdr.sh_info) * sym_size;
} else shdr.sh_size;
shdr.sh_size = 0;
// Move all the symbols to a new file location.
const new_offset = self.findFreeSpace(needed_size, min_alignment);
log.debug("moving '{s}' from 0x{x} to 0x{x}", .{ self.getString(shdr.sh_name), shdr.sh_offset, new_offset });
if (requires_file_copy) {
const amt = try self.base.file.?.copyRangeAll(
shdr.sh_offset,
self.base.file.?,
new_offset,
existing_size,
);
if (amt != existing_size) return error.InputOutput;
}
shdr.sh_offset = new_offset;
}
shdr.sh_size = needed_size; // anticipating adding the global symbols later
self.markDirty(shdr_index, null);
}
pub fn markDirty(self: *Elf, shdr_index: u16, phdr_index: ?u16) void {
self.shdr_table_dirty = true; // TODO look into only writing one section
if (phdr_index) |_| {
self.phdr_table_dirty = true; // TODO look into making only the one program header dirty
}
if (self.dwarf) |_| {
if (self.debug_info_section_index.? == shdr_index) {
self.debug_info_header_dirty = true;
} else if (self.debug_line_section_index.? == shdr_index) {
self.debug_line_header_dirty = true;
} else if (self.debug_abbrev_section_index.? == shdr_index) {
self.debug_abbrev_section_dirty = true;
} else if (self.debug_str_section_index.? == shdr_index) {
self.debug_strtab_dirty = true;
} else if (self.debug_aranges_section_index.? == shdr_index) {
self.debug_aranges_section_dirty = true;
}
}
}
pub fn flush(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node) link.File.FlushError!void {
if (self.base.options.emit == null) {
if (build_options.have_llvm) {
@ -972,7 +1070,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
const foreign_endian = target_endian != builtin.cpu.arch.endian();
if (self.dwarf) |*dw| {
try dw.flushModule(&self.base, module);
try dw.flushModule(module);
}
{
@ -1020,7 +1118,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
if (self.dwarf) |*dw| {
if (self.debug_abbrev_section_dirty) {
try dw.writeDbgAbbrev(&self.base);
try dw.writeDbgAbbrev();
if (!self.shdr_table_dirty) {
// Then it won't get written with the others and we need to do it.
try self.writeSectHeader(self.debug_abbrev_section_index.?);
@ -1034,7 +1132,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
const text_phdr = &self.program_headers.items[self.phdr_load_re_index.?];
const low_pc = text_phdr.p_vaddr;
const high_pc = text_phdr.p_vaddr + text_phdr.p_memsz;
try dw.writeDbgInfoHeader(&self.base, module, low_pc, high_pc);
try dw.writeDbgInfoHeader(module, low_pc, high_pc);
self.debug_info_header_dirty = false;
}
@ -1042,7 +1140,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
// Currently only one compilation unit is supported, so the address range is simply
// identical to the main program header virtual address and memory size.
const text_phdr = &self.program_headers.items[self.phdr_load_re_index.?];
try dw.writeDbgAranges(&self.base, text_phdr.p_vaddr, text_phdr.p_memsz);
try dw.writeDbgAranges(text_phdr.p_vaddr, text_phdr.p_memsz);
if (!self.shdr_table_dirty) {
// Then it won't get written with the others and we need to do it.
try self.writeSectHeader(self.debug_aranges_section_index.?);
@ -1051,7 +1149,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
}
if (self.debug_line_header_dirty) {
try dw.writeDbgLineHeader(&self.base, module);
try dw.writeDbgLineHeader(module);
self.debug_line_header_dirty = false;
}
}
@ -1103,45 +1201,21 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
}
{
const shstrtab_sect = &self.sections.items[self.shstrtab_index.?];
if (self.shstrtab_dirty or self.shstrtab.items.len != shstrtab_sect.sh_size) {
const allocated_size = self.allocatedSize(shstrtab_sect.sh_offset);
const needed_size = self.shstrtab.items.len;
if (needed_size > allocated_size) {
shstrtab_sect.sh_size = 0; // free the space
shstrtab_sect.sh_offset = self.findFreeSpace(needed_size, 1);
}
shstrtab_sect.sh_size = needed_size;
log.debug("writing shstrtab start=0x{x} end=0x{x}", .{ shstrtab_sect.sh_offset, shstrtab_sect.sh_offset + needed_size });
const shdr_index = self.shstrtab_index.?;
if (self.shstrtab_dirty or self.shstrtab.items.len != self.sections.items[shdr_index].sh_size) {
try self.growNonAllocSection(shdr_index, self.shstrtab.items.len, 1, false);
const shstrtab_sect = self.sections.items[shdr_index];
try self.base.file.?.pwriteAll(self.shstrtab.items, shstrtab_sect.sh_offset);
if (!self.shdr_table_dirty) {
// Then it won't get written with the others and we need to do it.
try self.writeSectHeader(self.shstrtab_index.?);
}
self.shstrtab_dirty = false;
}
}
if (self.dwarf) |dwarf| {
const debug_strtab_sect = &self.sections.items[self.debug_str_section_index.?];
if (self.debug_strtab_dirty or dwarf.strtab.items.len != debug_strtab_sect.sh_size) {
const allocated_size = self.allocatedSize(debug_strtab_sect.sh_offset);
const needed_size = dwarf.strtab.items.len;
if (needed_size > allocated_size) {
debug_strtab_sect.sh_size = 0; // free the space
debug_strtab_sect.sh_offset = self.findFreeSpace(needed_size, 1);
}
debug_strtab_sect.sh_size = needed_size;
log.debug("debug_strtab start=0x{x} end=0x{x}", .{ debug_strtab_sect.sh_offset, debug_strtab_sect.sh_offset + needed_size });
const shdr_index = self.debug_str_section_index.?;
if (self.debug_strtab_dirty or dwarf.strtab.items.len != self.sections.items[shdr_index].sh_size) {
try self.growNonAllocSection(shdr_index, dwarf.strtab.items.len, 1, false);
const debug_strtab_sect = self.sections.items[shdr_index];
try self.base.file.?.pwriteAll(dwarf.strtab.items, debug_strtab_sect.sh_offset);
if (!self.shdr_table_dirty) {
// Then it won't get written with the others and we need to do it.
try self.writeSectHeader(self.debug_str_section_index.?);
}
self.debug_strtab_dirty = false;
}
}
@ -2134,27 +2208,10 @@ fn allocateTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, al
const expand_text_section = block_placement == null or block_placement.?.next == null;
if (expand_text_section) {
const text_capacity = self.allocatedSize(shdr.sh_offset);
const needed_size = (vaddr + new_block_size) - phdr.p_vaddr;
if (needed_size > text_capacity) {
// Must move the entire section.
const new_offset = self.findFreeSpace(needed_size, self.page_size);
const text_size = if (self.atoms.get(phdr_index)) |last| blk: {
const sym = self.local_symbols.items[last.local_sym_index];
break :blk (sym.st_value + sym.st_size) - phdr.p_vaddr;
} else 0;
log.debug("new PT_LOAD file offset 0x{x} to 0x{x}", .{ new_offset, new_offset + text_size });
const amt = try self.base.file.?.copyRangeAll(shdr.sh_offset, self.base.file.?, new_offset, text_size);
if (amt != text_size) return error.InputOutput;
shdr.sh_offset = new_offset;
phdr.p_offset = new_offset;
}
try self.growAllocSection(shdr_index, phdr_index, needed_size);
_ = try self.atoms.put(self.base.allocator, phdr_index, text_block);
shdr.sh_size = needed_size;
phdr.p_memsz = needed_size;
phdr.p_filesz = needed_size;
if (self.dwarf) |_| {
// The .debug_info section has `low_pc` and `high_pc` values which is the virtual address
// range of the compilation unit. When we expand the text section, this range changes,
@ -2165,9 +2222,6 @@ fn allocateTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, al
// model each package as a different compilation unit.
self.debug_aranges_section_dirty = true;
}
self.phdr_table_dirty = true; // TODO look into making only the one program header dirty
self.shdr_table_dirty = true; // TODO look into making only the one section dirty
}
shdr.sh_addralign = math.max(shdr.sh_addralign, alignment);
@ -2422,7 +2476,6 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven
const local_sym = try self.updateDeclCode(decl_index, code, elf.STT_FUNC);
if (decl_state) |*ds| {
try self.dwarf.?.commitDeclState(
&self.base,
module,
decl_index,
local_sym.st_value,
@ -2499,7 +2552,6 @@ pub fn updateDecl(self: *Elf, module: *Module, decl_index: Module.Decl.Index) !v
const local_sym = try self.updateDeclCode(decl_index, code, elf.STT_OBJECT);
if (decl_state) |*ds| {
try self.dwarf.?.commitDeclState(
&self.base,
module,
decl_index,
local_sym.st_value,
@ -2692,7 +2744,7 @@ pub fn updateDeclLineNumber(self: *Elf, mod: *Module, decl: *const Module.Decl)
if (self.llvm_object) |_| return;
if (self.dwarf) |*dw| {
try dw.updateDeclLineNumber(&self.base, decl);
try dw.updateDeclLineNumber(decl);
}
}
@ -2749,31 +2801,14 @@ fn writeSectHeader(self: *Elf, index: usize) !void {
}
fn writeOffsetTableEntry(self: *Elf, index: usize) !void {
const shdr = &self.sections.items[self.got_section_index.?];
const phdr = &self.program_headers.items[self.phdr_got_index.?];
const entry_size: u16 = self.archPtrWidthBytes();
if (self.offset_table_count_dirty) {
// TODO Also detect virtual address collisions.
const allocated_size = self.allocatedSize(shdr.sh_offset);
const needed_size = self.offset_table.items.len * entry_size;
if (needed_size > allocated_size) {
// Must move the entire got section.
const new_offset = self.findFreeSpace(needed_size, self.page_size);
const amt = try self.base.file.?.copyRangeAll(shdr.sh_offset, self.base.file.?, new_offset, shdr.sh_size);
if (amt != shdr.sh_size) return error.InputOutput;
shdr.sh_offset = new_offset;
phdr.p_offset = new_offset;
}
shdr.sh_size = needed_size;
phdr.p_memsz = needed_size;
phdr.p_filesz = needed_size;
self.shdr_table_dirty = true; // TODO look into making only the one section dirty
self.phdr_table_dirty = true; // TODO look into making only the one program header dirty
try self.growAllocSection(self.got_section_index.?, self.phdr_got_index.?, needed_size);
self.offset_table_count_dirty = false;
}
const endian = self.base.options.target.cpu.arch.endian();
const shdr = &self.sections.items[self.got_section_index.?];
const off = shdr.sh_offset + @as(u64, entry_size) * index;
switch (entry_size) {
2 => {
@ -2812,23 +2847,8 @@ fn writeSymbol(self: *Elf, index: usize) !void {
.p64 => @alignOf(elf.Elf64_Sym),
};
const needed_size = (self.local_symbols.items.len + self.global_symbols.items.len) * sym_size;
if (needed_size > self.allocatedSize(syms_sect.sh_offset)) {
// Move all the symbols to a new file location.
const new_offset = self.findFreeSpace(needed_size, sym_align);
log.debug("moving '.symtab' from 0x{x} to 0x{x}", .{ syms_sect.sh_offset, new_offset });
const existing_size = @as(u64, syms_sect.sh_info) * sym_size;
const amt = try self.base.file.?.copyRangeAll(
syms_sect.sh_offset,
self.base.file.?,
new_offset,
existing_size,
);
if (amt != existing_size) return error.InputOutput;
syms_sect.sh_offset = new_offset;
}
try self.growNonAllocSection(self.symtab_section_index.?, needed_size, sym_align, true);
syms_sect.sh_info = @intCast(u32, self.local_symbols.items.len);
syms_sect.sh_size = needed_size; // anticipating adding the global symbols later
self.shdr_table_dirty = true; // TODO look into only writing one section
}
const foreign_endian = self.base.options.target.cpu.arch.endian() != builtin.cpu.arch.endian();
const off = switch (self.ptr_width) {
@ -2876,22 +2896,7 @@ fn writeAllGlobalSymbols(self: *Elf) !void {
.p64 => @alignOf(elf.Elf64_Sym),
};
const needed_size = (self.local_symbols.items.len + self.global_symbols.items.len) * sym_size;
if (needed_size > self.allocatedSize(syms_sect.sh_offset)) {
// Move all the symbols to a new file location.
const new_offset = self.findFreeSpace(needed_size, sym_align);
log.debug("moving '.symtab' from 0x{x} to 0x{x}", .{ syms_sect.sh_offset, new_offset });
const existing_size = @as(u64, syms_sect.sh_info) * sym_size;
const amt = try self.base.file.?.copyRangeAll(
syms_sect.sh_offset,
self.base.file.?,
new_offset,
existing_size,
);
if (amt != existing_size) return error.InputOutput;
syms_sect.sh_offset = new_offset;
}
syms_sect.sh_size = needed_size; // anticipating adding the global symbols later
self.shdr_table_dirty = true; // TODO look into only writing one section
try self.growNonAllocSection(self.symtab_section_index.?, needed_size, sym_align, true);
const foreign_endian = self.base.options.target.cpu.arch.endian() != builtin.cpu.arch.endian();
const global_syms_off = syms_sect.sh_offset + self.local_symbols.items.len * sym_size;

View File

@ -347,7 +347,7 @@ pub fn openPath(allocator: Allocator, options: link.Options) !*MachO {
self.d_sym = .{
.allocator = allocator,
.dwarf = link.File.Dwarf.init(allocator, .macho, options.target),
.dwarf = link.File.Dwarf.init(allocator, &self.base, options.target),
.file = d_sym_file,
.page_size = self.page_size,
};
@ -449,7 +449,7 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No
const module = self.base.options.module orelse return error.LinkingWithoutZigSourceUnimplemented;
if (self.d_sym) |*d_sym| {
try d_sym.dwarf.flushModule(&self.base, module);
try d_sym.dwarf.flushModule(module);
}
var libs = std.StringArrayHashMap(link.SystemLib).init(arena);
@ -2213,7 +2213,6 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv
if (decl_state) |*ds| {
try self.d_sym.?.dwarf.commitDeclState(
&self.base,
module,
decl_index,
addr,
@ -2364,7 +2363,6 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index)
if (decl_state) |*ds| {
try self.d_sym.?.dwarf.commitDeclState(
&self.base,
module,
decl_index,
addr,
@ -2603,7 +2601,7 @@ fn updateDeclCode(self: *MachO, decl_index: Module.Decl.Index, code: []const u8)
pub fn updateDeclLineNumber(self: *MachO, module: *Module, decl: *const Module.Decl) !void {
_ = module;
if (self.d_sym) |*d_sym| {
try d_sym.dwarf.updateDeclLineNumber(&self.base, decl);
try d_sym.dwarf.updateDeclLineNumber(decl);
}
}
@ -4302,6 +4300,11 @@ pub fn getEntryPoint(self: MachO) error{MissingMainEntrypoint}!SymbolWithLoc {
return global;
}
pub fn getDebugSymbols(self: *MachO) ?*DebugSymbols {
if (self.d_sym == null) return null;
return &self.d_sym.?;
}
pub fn findFirst(comptime T: type, haystack: []align(1) const T, start: usize, predicate: anytype) usize {
if (!@hasDecl(@TypeOf(predicate), "predicate"))
@compileError("Predicate is required to define fn predicate(@This(), T) bool");

View File

@ -137,7 +137,6 @@ fn allocateSection(self: *DebugSymbols, sectname: []const u8, size: u64, alignme
off + size,
});
sect.addr = segment.vmaddr + off - segment.fileoff;
sect.offset = @intCast(u32, off);
const index = @intCast(u8, self.sections.items.len);
@ -148,6 +147,52 @@ fn allocateSection(self: *DebugSymbols, sectname: []const u8, size: u64, alignme
return index;
}
pub fn growSection(self: *DebugSymbols, sect_index: u8, needed_size: u32, requires_file_copy: bool) !void {
const sect = self.getSectionPtr(sect_index);
if (needed_size > self.allocatedSize(sect.offset)) {
const existing_size = sect.size;
sect.size = 0; // free the space
const new_offset = self.findFreeSpace(needed_size, 1);
log.debug("moving {s} section: {} bytes from 0x{x} to 0x{x}", .{
sect.sectName(),
existing_size,
sect.offset,
new_offset,
});
if (requires_file_copy) {
const amt = try self.file.copyRangeAll(
sect.offset,
self.file,
new_offset,
existing_size,
);
if (amt != existing_size) return error.InputOutput;
}
sect.offset = @intCast(u32, new_offset);
}
sect.size = needed_size;
self.markDirty(sect_index);
}
pub fn markDirty(self: *DebugSymbols, sect_index: u8) void {
if (self.debug_info_section_index.? == sect_index) {
self.debug_info_header_dirty = true;
} else if (self.debug_line_section_index.? == sect_index) {
self.debug_line_header_dirty = true;
} else if (self.debug_abbrev_section_index.? == sect_index) {
self.debug_abbrev_section_dirty = true;
} else if (self.debug_str_section_index.? == sect_index) {
self.debug_string_table_dirty = true;
} else if (self.debug_aranges_section_index.? == sect_index) {
self.debug_aranges_section_dirty = true;
}
}
fn detectAllocCollision(self: *DebugSymbols, start: u64, size: u64) ?u64 {
const end = start + padToIdeal(size);
for (self.sections.items) |section| {
@ -160,7 +205,7 @@ fn detectAllocCollision(self: *DebugSymbols, start: u64, size: u64) ?u64 {
return null;
}
pub fn findFreeSpace(self: *DebugSymbols, object_size: u64, min_alignment: u64) u64 {
fn findFreeSpace(self: *DebugSymbols, object_size: u64, min_alignment: u64) u64 {
const segment = self.getDwarfSegmentPtr();
var offset: u64 = segment.fileoff;
while (self.detectAllocCollision(offset, object_size)) |item_end| {
@ -213,7 +258,7 @@ pub fn flushModule(self: *DebugSymbols, macho_file: *MachO) !void {
}
if (self.debug_abbrev_section_dirty) {
try self.dwarf.writeDbgAbbrev(&macho_file.base);
try self.dwarf.writeDbgAbbrev();
self.debug_abbrev_section_dirty = false;
}
@ -223,7 +268,7 @@ pub fn flushModule(self: *DebugSymbols, macho_file: *MachO) !void {
const text_section = macho_file.sections.items(.header)[macho_file.text_section_index.?];
const low_pc = text_section.addr;
const high_pc = text_section.addr + text_section.size;
try self.dwarf.writeDbgInfoHeader(&macho_file.base, module, low_pc, high_pc);
try self.dwarf.writeDbgInfoHeader(module, low_pc, high_pc);
self.debug_info_header_dirty = false;
}
@ -231,36 +276,21 @@ pub fn flushModule(self: *DebugSymbols, macho_file: *MachO) !void {
// Currently only one compilation unit is supported, so the address range is simply
// identical to the main program header virtual address and memory size.
const text_section = macho_file.sections.items(.header)[macho_file.text_section_index.?];
try self.dwarf.writeDbgAranges(&macho_file.base, text_section.addr, text_section.size);
try self.dwarf.writeDbgAranges(text_section.addr, text_section.size);
self.debug_aranges_section_dirty = false;
}
if (self.debug_line_header_dirty) {
try self.dwarf.writeDbgLineHeader(&macho_file.base, module);
try self.dwarf.writeDbgLineHeader(module);
self.debug_line_header_dirty = false;
}
{
const dwarf_segment = self.getDwarfSegmentPtr();
const debug_strtab_sect = &self.sections.items[self.debug_str_section_index.?];
if (self.debug_string_table_dirty or self.dwarf.strtab.items.len != debug_strtab_sect.size) {
const allocated_size = self.allocatedSize(debug_strtab_sect.offset);
const needed_size = self.dwarf.strtab.items.len;
if (needed_size > allocated_size) {
debug_strtab_sect.size = 0; // free the space
const new_offset = self.findFreeSpace(needed_size, 1);
debug_strtab_sect.addr = dwarf_segment.vmaddr + new_offset - dwarf_segment.fileoff;
debug_strtab_sect.offset = @intCast(u32, new_offset);
}
debug_strtab_sect.size = @intCast(u32, needed_size);
log.debug("__debug_strtab start=0x{x} end=0x{x}", .{
debug_strtab_sect.offset,
debug_strtab_sect.offset + needed_size,
});
try self.file.pwriteAll(self.dwarf.strtab.items, debug_strtab_sect.offset);
const sect_index = self.debug_str_section_index.?;
if (self.debug_string_table_dirty or self.dwarf.strtab.items.len != self.getSection(sect_index).size) {
const needed_size = @intCast(u32, self.dwarf.strtab.items.len);
try self.growSection(sect_index, needed_size, false);
try self.file.pwriteAll(self.dwarf.strtab.items, self.getSection(sect_index).offset);
self.debug_string_table_dirty = false;
}
}
@ -424,7 +454,7 @@ fn writeHeader(self: *DebugSymbols, macho_file: *MachO, ncmds: u32, sizeofcmds:
try self.file.pwriteAll(mem.asBytes(&header), 0);
}
pub fn allocatedSize(self: *DebugSymbols, start: u64) u64 {
fn allocatedSize(self: *DebugSymbols, start: u64) u64 {
const seg = self.getDwarfSegmentPtr();
assert(start >= seg.fileoff);
var min_pos: u64 = std.math.maxInt(u64);
@ -556,3 +586,13 @@ fn getLinkeditSegmentPtr(self: *DebugSymbols) *macho.segment_command_64 {
const index = self.linkedit_segment_cmd_index.?;
return &self.segments.items[index];
}
pub fn getSectionPtr(self: *DebugSymbols, sect: u8) *macho.section_64 {
assert(sect < self.sections.items.len);
return &self.sections.items[sect];
}
pub fn getSection(self: DebugSymbols, sect: u8) macho.section_64 {
assert(sect < self.sections.items.len);
return self.sections.items[sect];
}

View File

@ -361,7 +361,7 @@ pub fn openPath(allocator: Allocator, sub_path: []const u8, options: link.Option
}
if (!options.strip and options.module != null) {
wasm_bin.dwarf = Dwarf.init(allocator, .wasm, options.target);
wasm_bin.dwarf = Dwarf.init(allocator, &wasm_bin.base, options.target);
try wasm_bin.initDebugSections();
}
@ -910,7 +910,6 @@ pub fn updateFunc(wasm: *Wasm, mod: *Module, func: *Module.Fn, air: Air, livenes
if (wasm.dwarf) |*dwarf| {
try dwarf.commitDeclState(
&wasm.base,
mod,
decl_index,
// Actual value will be written after relocation.
@ -990,7 +989,7 @@ pub fn updateDeclLineNumber(wasm: *Wasm, mod: *Module, decl: *const Module.Decl)
defer wasm.base.allocator.free(decl_name);
log.debug("updateDeclLineNumber {s}{*}", .{ decl_name, decl });
try dw.updateDeclLineNumber(&wasm.base, decl);
try dw.updateDeclLineNumber(decl);
}
}
@ -2299,7 +2298,7 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod
}
if (wasm.dwarf) |*dwarf| {
try dwarf.flushModule(&wasm.base, wasm.base.options.module.?);
try dwarf.flushModule(wasm.base.options.module.?);
}
}
@ -2668,12 +2667,12 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod
if (!wasm.base.options.strip) {
if (wasm.dwarf) |*dwarf| {
const mod = wasm.base.options.module.?;
try dwarf.writeDbgAbbrev(&wasm.base);
try dwarf.writeDbgAbbrev();
// for debug info and ranges, the address is always 0,
// as locations are always offsets relative to 'code' section.
try dwarf.writeDbgInfoHeader(&wasm.base, mod, 0, code_section_size);
try dwarf.writeDbgAranges(&wasm.base, 0, code_section_size);
try dwarf.writeDbgLineHeader(&wasm.base, mod);
try dwarf.writeDbgInfoHeader(mod, 0, code_section_size);
try dwarf.writeDbgAranges(0, code_section_size);
try dwarf.writeDbgLineHeader(mod);
}
var debug_bytes = std.ArrayList(u8).init(wasm.base.allocator);