elf: store Index rather than ?Index in Atom; gen ABS STT_FILE for zig source

This commit is contained in:
Jakub Konka 2023-09-08 21:09:45 +02:00
parent 6ad5db030c
commit 69738a07c2
4 changed files with 69 additions and 56 deletions

View File

@ -834,10 +834,23 @@ pub fn populateMissingMetadata(self: *Elf) !void {
try self.base.file.?.pwriteAll(&[_]u8{0}, max_file_offset);
}
if (self.zig_module_index == null) {
const index = @as(File.Index, @intCast(try self.files.addOne(gpa)));
self.files.set(index, .{ .zig_module = .{ .index = index } });
self.zig_module_index = index;
if (self.base.options.module) |module| {
if (self.zig_module_index == null) {
const index = @as(File.Index, @intCast(try self.files.addOne(gpa)));
self.files.set(index, .{ .zig_module = .{
.index = index,
.path = module.main_pkg.root_src_path,
} });
self.zig_module_index = index;
const zig_module = self.file(index).?.zig_module;
const sym_index = try zig_module.addLocal(self);
const sym = self.symbol(sym_index);
const esym = sym.sourceSymbol(self);
const name_off = try self.strtab.insert(gpa, std.fs.path.stem(module.main_pkg.root_src_path));
sym.name_offset = name_off;
esym.st_name = name_off;
esym.st_info |= elf.STT_FILE;
}
}
}
@ -846,13 +859,12 @@ pub fn growAllocSection(self: *Elf, shdr_index: u16, needed_size: u64) !void {
const shdr = &self.sections.items(.shdr)[shdr_index];
const phdr_index = self.sections.items(.phdr_index)[shdr_index];
const phdr = &self.program_headers.items[phdr_index];
const maybe_last_atom_index = self.sections.items(.last_atom_index)[shdr_index];
const last_atom_index = self.sections.items(.last_atom_index)[shdr_index];
if (needed_size > self.allocatedSize(shdr.sh_offset)) {
// Must move the entire section.
const new_offset = self.findFreeSpace(needed_size, self.page_size);
const existing_size = if (maybe_last_atom_index) |last_atom_index| blk: {
const last = self.atom(last_atom_index);
const existing_size = if (self.atom(last_atom_index)) |last| blk: {
break :blk (last.value + last.size) - phdr.p_vaddr;
} else if (shdr_index == self.got_section_index.?) blk: {
break :blk shdr.sh_size;
@ -1016,7 +1028,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
while (it.next()) |entry| {
const atom_index = entry.key_ptr.*;
const relocs = entry.value_ptr.*;
const atom_ptr = self.atom(atom_index);
const atom_ptr = self.atom(atom_index).?;
const source_shdr = self.sections.items(.shdr)[atom_ptr.output_section_index];
log.debug("relocating '{s}'", .{atom_ptr.name(self)});
@ -2753,6 +2765,7 @@ fn writeSymtab(self: *Elf) !void {
const symtab = try gpa.alloc(elf.Elf64_Sym, nsyms);
defer gpa.free(symtab);
symtab[0] = null_sym;
var ctx: struct { ilocal: usize, iglobal: usize, symtab: []elf.Elf64_Sym } = .{
.ilocal = 1,
@ -3083,7 +3096,8 @@ const CsuObjects = struct {
}
};
pub fn atom(self: *Elf, atom_index: Atom.Index) *Atom {
pub fn atom(self: *Elf, atom_index: Atom.Index) ?*Atom {
if (atom_index == 0) return null;
assert(atom_index < self.atoms.items.len);
return &self.atoms.items[atom_index];
}
@ -3210,7 +3224,7 @@ fn fmtDumpState(
if (self.zig_module_index) |index| {
const zig_module = self.file(index).?.zig_module;
try writer.print("zig_module({d}) : (zig module)\n", .{index});
try writer.print("zig_module({d}) : {s}\n", .{ index, zig_module.path });
try writer.print("{}\n", .{zig_module.fmtSymtab(self)});
}
if (self.linker_defined_index) |index| {
@ -3230,7 +3244,7 @@ const Section = struct {
phdr_index: u16,
/// Index of the last allocated atom in this section.
last_atom_index: ?Atom.Index = null,
last_atom_index: Atom.Index = 0,
/// A list of atoms that have surplus capacity. This list can have false
/// positives, as functions grow and shrink over time, only sometimes being added

View File

@ -39,8 +39,8 @@ fde_end: u32 = 0,
/// Points to the previous and next neighbors, based on the `text_offset`.
/// This can be used to find, for example, the capacity of this `TextBlock`.
prev_index: ?Index = null,
next_index: ?Index = null,
prev_index: Index = 0,
next_index: Index = 0,
pub fn name(self: Atom, elf_file: *Elf) []const u8 {
return elf_file.strtab.getAssumeExists(self.name_offset);
@ -50,14 +50,13 @@ pub fn name(self: Atom, elf_file: *Elf) []const u8 {
/// File offset relocation happens transparently, so it is not included in
/// this calculation.
pub fn capacity(self: Atom, elf_file: *Elf) u64 {
const next_value = if (self.next_index) |next_index| elf_file.atom(next_index).value else std.math.maxInt(u32);
const next_value = if (elf_file.atom(self.next_index)) |next| next.value else std.math.maxInt(u32);
return next_value - self.value;
}
pub fn freeListEligible(self: Atom, elf_file: *Elf) bool {
// No need to keep a free list node for the last block.
const next_index = self.next_index orelse return false;
const next = elf_file.atom(next_index);
const next = elf_file.atom(self.next_index) orelse return false;
const cap = next.value - self.value;
const ideal_cap = Elf.padToIdeal(self.size);
if (cap <= ideal_cap) return false;
@ -84,7 +83,7 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void {
const phdr = &elf_file.program_headers.items[phdr_index];
const shdr = &elf_file.sections.items(.shdr)[self.output_section_index];
const free_list = &elf_file.sections.items(.free_list)[self.output_section_index];
const maybe_last_atom_index = &elf_file.sections.items(.last_atom_index)[self.output_section_index];
const last_atom_index = &elf_file.sections.items(.last_atom_index)[self.output_section_index];
const new_atom_ideal_capacity = Elf.padToIdeal(self.size);
const alignment = try std.math.powi(u64, 2, self.alignment);
@ -102,7 +101,7 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void {
var i: usize = if (elf_file.base.child_pid == null) 0 else free_list.items.len;
while (i < free_list.items.len) {
const big_atom_index = free_list.items[i];
const big_atom = elf_file.atom(big_atom_index);
const big_atom = elf_file.atom(big_atom_index).?;
// We now have a pointer to a live atom that has too much capacity.
// Is it enough that we could fit this new atom?
const cap = big_atom.capacity(elf_file);
@ -134,13 +133,12 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void {
free_list_removal = i;
}
break :blk new_start_vaddr;
} else if (maybe_last_atom_index.*) |last_index| {
const last = elf_file.atom(last_index);
} else if (elf_file.atom(last_atom_index.*)) |last| {
const ideal_capacity = Elf.padToIdeal(last.size);
const ideal_capacity_end_vaddr = last.value + ideal_capacity;
const new_start_vaddr = std.mem.alignForward(u64, ideal_capacity_end_vaddr, alignment);
// Set up the metadata to be updated, after errors are no longer possible.
atom_placement = last_index;
atom_placement = last.atom_index;
break :blk new_start_vaddr;
} else {
break :blk phdr.p_vaddr;
@ -148,13 +146,13 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void {
};
const expand_section = if (atom_placement) |placement_index|
elf_file.atom(placement_index).next_index == null
elf_file.atom(placement_index).?.next_index == 0
else
true;
if (expand_section) {
const needed_size = (self.value + self.size) - phdr.p_vaddr;
try elf_file.growAllocSection(self.output_section_index, needed_size);
maybe_last_atom_index.* = self.atom_index;
last_atom_index.* = self.atom_index;
if (elf_file.dwarf) |_| {
// The .debug_info section has `low_pc` and `high_pc` values which is the virtual address
@ -172,23 +170,21 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void {
// This function can also reallocate an atom.
// In this case we need to "unplug" it from its previous location before
// plugging it in to its new location.
if (self.prev_index) |prev_index| {
const prev = elf_file.atom(prev_index);
if (elf_file.atom(self.prev_index)) |prev| {
prev.next_index = self.next_index;
}
if (self.next_index) |next_index| {
const next = elf_file.atom(next_index);
if (elf_file.atom(self.next_index)) |next| {
next.prev_index = self.prev_index;
}
if (atom_placement) |big_atom_index| {
const big_atom = elf_file.atom(big_atom_index);
const big_atom = elf_file.atom(big_atom_index).?;
self.prev_index = big_atom_index;
self.next_index = big_atom.next_index;
big_atom.next_index = self.atom_index;
} else {
self.prev_index = null;
self.next_index = null;
self.prev_index = 0;
self.next_index = 0;
}
if (free_list_removal) |i| {
_ = free_list.swapRemove(i);
@ -231,35 +227,33 @@ pub fn free(self: *Atom, elf_file: *Elf) void {
}
}
const maybe_last_atom_index = &elf_file.sections.items(.last_atom_index)[shndx];
if (maybe_last_atom_index.*) |last_atom_index| {
if (last_atom_index == self.atom_index) {
if (self.prev_index) |prev_index| {
const last_atom_index = &elf_file.sections.items(.last_atom_index)[shndx];
if (elf_file.atom(last_atom_index.*)) |last_atom| {
if (last_atom.atom_index == self.atom_index) {
if (elf_file.atom(self.prev_index)) |_| {
// TODO shrink the section size here
maybe_last_atom_index.* = prev_index;
last_atom_index.* = self.prev_index;
} else {
maybe_last_atom_index.* = null;
last_atom_index.* = 0;
}
}
}
if (self.prev_index) |prev_index| {
const prev = elf_file.atom(prev_index);
if (elf_file.atom(self.prev_index)) |prev| {
prev.next_index = self.next_index;
if (!already_have_free_list_node and prev.*.freeListEligible(elf_file)) {
// The free list is heuristics, it doesn't have to be perfect, so we can
// ignore the OOM here.
free_list.append(gpa, prev_index) catch {};
free_list.append(gpa, prev.atom_index) catch {};
}
} else {
self.prev_index = null;
self.prev_index = 0;
}
if (self.next_index) |next_index| {
elf_file.atom(next_index).prev_index = self.prev_index;
if (elf_file.atom(self.next_index)) |next| {
next.prev_index = self.prev_index;
} else {
self.next_index = null;
self.next_index = 0;
}
self.* = .{};

View File

@ -36,7 +36,7 @@ pub fn isAbs(symbol: Symbol, elf_file: *Elf) bool {
const file_ptr = symbol.file(elf_file).?;
// if (file_ptr == .shared) return symbol.sourceSymbol(elf_file).st_shndx == elf.SHN_ABS;
return !symbol.flags.import and symbol.atom(elf_file) == null and symbol.output_section_index == 0 and
file_ptr != .linker_defined and file_ptr != .zig_module;
file_ptr != .linker_defined;
}
pub fn isLocal(symbol: Symbol) bool {
@ -175,7 +175,7 @@ pub fn setOutputSym(symbol: Symbol, elf_file: *Elf, out: *elf.Elf64_Sym) void {
const st_shndx = blk: {
// if (symbol.flags.copy_rel) break :blk elf_file.copy_rel_sect_index.?;
// if (file_ptr == .shared or s_sym.st_shndx == elf.SHN_UNDEF) break :blk elf.SHN_UNDEF;
if (symbol.atom(elf_file) == null and file_ptr != .linker_defined and file_ptr != .zig_module)
if (symbol.atom(elf_file) == null and file_ptr != .linker_defined)
break :blk elf.SHN_ABS;
break :blk symbol.output_section_index;
};

View File

@ -1,3 +1,5 @@
/// Path is owned by Module and lives as long as *Module.
path: []const u8,
index: File.Index,
elf_local_symbols: std.ArrayListUnmanaged(elf.Elf64_Sym) = .{},
@ -23,26 +25,29 @@ pub fn deinit(self: *ZigModule, allocator: Allocator) void {
pub fn createAtom(self: *ZigModule, output_section_index: u16, elf_file: *Elf) !Symbol.Index {
const gpa = elf_file.base.allocator;
const atom_index = try elf_file.addAtom();
const symbol_index = try elf_file.addSymbol();
const atom_ptr = elf_file.atom(atom_index);
const symbol_index = try self.addLocal(elf_file);
const atom_ptr = elf_file.atom(atom_index).?;
atom_ptr.file_index = self.index;
atom_ptr.output_section_index = output_section_index;
const symbol_ptr = elf_file.symbol(symbol_index);
symbol_ptr.file_index = self.index;
symbol_ptr.atom_index = atom_index;
symbol_ptr.output_section_index = output_section_index;
symbol_ptr.esym_index = @as(Symbol.Index, @intCast(self.elf_local_symbols.items.len));
const local_esym = symbol_ptr.sourceSymbol(elf_file);
local_esym.st_shndx = output_section_index;
try self.atoms.append(gpa, atom_index);
return symbol_index;
}
pub fn addLocal(self: *ZigModule, elf_file: *Elf) !Symbol.Index {
const gpa = elf_file.base.allocator;
const symbol_index = try elf_file.addSymbol();
const symbol_ptr = elf_file.symbol(symbol_index);
symbol_ptr.file_index = self.index;
symbol_ptr.esym_index = @as(Symbol.Index, @intCast(self.elf_local_symbols.items.len));
const local_esym = try self.elf_local_symbols.addOne(gpa);
local_esym.* = Elf.null_sym;
local_esym.st_info = elf.STB_LOCAL << 4;
local_esym.st_shndx = output_section_index;
try self.atoms.append(gpa, atom_index);
try self.local_symbols.putNoClobber(gpa, symbol_index, {});
return symbol_index;
}