Merge pull request #21305 from ziglang/elf-incr

elf: redo how we allocate atoms extracted from input relocatable object files
This commit is contained in:
Jakub Konka 2024-09-04 21:56:45 +02:00 committed by GitHub
commit 7e31804870
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
10 changed files with 1147 additions and 990 deletions

View File

@ -600,6 +600,7 @@ set(ZIG_STAGE2_SOURCES
src/link/Elf.zig
src/link/Elf/Archive.zig
src/link/Elf/Atom.zig
src/link/Elf/AtomList.zig
src/link/Elf/LdScript.zig
src/link/Elf/LinkerDefined.zig
src/link/Elf/Object.zig

View File

@ -261,7 +261,6 @@ pub const Section = struct {
index: u32,
first: Unit.Index.Optional,
last: Unit.Index.Optional,
off: u64,
len: u64,
units: std.ArrayListUnmanaged(Unit),
@ -284,9 +283,8 @@ pub const Section = struct {
.index = std.math.maxInt(u32),
.first = .none,
.last = .none,
.off = 0,
.len = 0,
.units = .{},
.len = 0,
};
fn deinit(sec: *Section, gpa: std.mem.Allocator) void {
@ -295,6 +293,20 @@ pub const Section = struct {
sec.* = undefined;
}
fn off(sec: Section, dwarf: *Dwarf) u64 {
if (dwarf.bin_file.cast(.elf)) |elf_file| {
const zo = elf_file.zigObjectPtr().?;
const atom = zo.symbol(sec.index).atom(elf_file).?;
return atom.offset(elf_file);
} else if (dwarf.bin_file.cast(.macho)) |macho_file| {
const header = if (macho_file.d_sym) |d_sym|
d_sym.sections.items[sec.index]
else
macho_file.sections.items(.header)[sec.index];
return header.offset;
} else unreachable;
}
fn addUnit(sec: *Section, header_len: u32, trailer_len: u32, dwarf: *Dwarf) UpdateError!Unit.Index {
const unit: Unit.Index = @enumFromInt(sec.units.items.len);
const unit_ptr = try sec.units.addOne(dwarf.gpa);
@ -306,9 +318,9 @@ pub const Section = struct {
.next = .none,
.first = .none,
.last = .none,
.off = 0,
.header_len = aligned_header_len,
.trailer_len = aligned_trailer_len,
.off = 0,
.len = aligned_header_len + aligned_trailer_len,
.entries = .{},
.cross_unit_relocs = .{},
@ -375,12 +387,16 @@ pub const Section = struct {
fn resize(sec: *Section, dwarf: *Dwarf, len: u64) UpdateError!void {
if (len <= sec.len) return;
if (dwarf.bin_file.cast(.elf)) |elf_file| {
const zo = elf_file.zigObjectPtr().?;
const atom = zo.symbol(sec.index).atom(elf_file).?;
const shndx = atom.output_section_index;
if (sec == &dwarf.debug_frame.section)
try elf_file.growAllocSection(sec.index, len)
try elf_file.growAllocSection(shndx, len, sec.alignment.toByteUnits().?)
else
try elf_file.growNonAllocSection(sec.index, len, @intCast(sec.alignment.toByteUnits().?), true);
const shdr = &elf_file.sections.items(.shdr)[sec.index];
sec.off = shdr.sh_offset;
try elf_file.growNonAllocSection(shndx, len, sec.alignment.toByteUnits().?, true);
const shdr = elf_file.sections.items(.shdr)[shndx];
atom.size = shdr.sh_size;
atom.alignment = InternPool.Alignment.fromNonzeroByteUnits(shdr.sh_addralign);
sec.len = shdr.sh_size;
} else if (dwarf.bin_file.cast(.macho)) |macho_file| {
const header = if (macho_file.d_sym) |*d_sym| header: {
@ -390,7 +406,6 @@ pub const Section = struct {
try macho_file.growSection(@intCast(sec.index), len);
break :header &macho_file.sections.items(.header)[sec.index];
};
sec.off = header.offset;
sec.len = header.size;
}
}
@ -399,18 +414,21 @@ pub const Section = struct {
const len = sec.getUnit(sec.first.unwrap() orelse return).off;
if (len == 0) return;
for (sec.units.items) |*unit| unit.off -= len;
sec.off += len;
sec.len -= len;
if (dwarf.bin_file.cast(.elf)) |elf_file| {
const shdr = &elf_file.sections.items(.shdr)[sec.index];
shdr.sh_offset = sec.off;
const zo = elf_file.zigObjectPtr().?;
const atom = zo.symbol(sec.index).atom(elf_file).?;
const shndx = atom.output_section_index;
const shdr = &elf_file.sections.items(.shdr)[shndx];
atom.size = sec.len;
shdr.sh_offset += len;
shdr.sh_size = sec.len;
} else if (dwarf.bin_file.cast(.macho)) |macho_file| {
const header = if (macho_file.d_sym) |*d_sym|
&d_sym.sections.items[sec.index]
else
&macho_file.sections.items(.header)[sec.index];
header.offset = @intCast(sec.off);
header.offset += @intCast(len);
header.size = sec.len;
}
}
@ -539,9 +557,9 @@ const Unit = struct {
fn move(unit: *Unit, sec: *Section, dwarf: *Dwarf, new_off: u32) UpdateError!void {
if (unit.off == new_off) return;
if (try dwarf.getFile().?.copyRangeAll(
sec.off + unit.off,
sec.off(dwarf) + unit.off,
dwarf.getFile().?,
sec.off + new_off,
sec.off(dwarf) + new_off,
unit.len,
) != unit.len) return error.InputOutput;
unit.off = new_off;
@ -573,7 +591,7 @@ const Unit = struct {
fn replaceHeader(unit: *Unit, sec: *Section, dwarf: *Dwarf, contents: []const u8) UpdateError!void {
assert(contents.len == unit.header_len);
try dwarf.getFile().?.pwriteAll(contents, sec.off + unit.off);
try dwarf.getFile().?.pwriteAll(contents, sec.off(dwarf) + unit.off);
}
fn writeTrailer(unit: *Unit, sec: *Section, dwarf: *Dwarf) UpdateError!void {
@ -605,7 +623,7 @@ const Unit = struct {
assert(fbs.pos == extended_op_bytes + op_len_bytes);
writer.writeByte(DW.LNE.padding) catch unreachable;
assert(fbs.pos >= unit.trailer_len and fbs.pos <= len);
return dwarf.getFile().?.pwriteAll(fbs.getWritten(), sec.off + start);
return dwarf.getFile().?.pwriteAll(fbs.getWritten(), sec.off(dwarf) + start);
}
var trailer = try std.ArrayList(u8).initCapacity(dwarf.gpa, len);
defer trailer.deinit();
@ -664,11 +682,11 @@ const Unit = struct {
assert(trailer.items.len == unit.trailer_len);
trailer.appendNTimesAssumeCapacity(fill_byte, len - unit.trailer_len);
assert(trailer.items.len == len);
try dwarf.getFile().?.pwriteAll(trailer.items, sec.off + start);
try dwarf.getFile().?.pwriteAll(trailer.items, sec.off(dwarf) + start);
}
fn resolveRelocs(unit: *Unit, sec: *Section, dwarf: *Dwarf) RelocError!void {
const unit_off = sec.off + unit.off;
const unit_off = sec.off(dwarf) + unit.off;
for (unit.cross_unit_relocs.items) |reloc| {
const target_unit = sec.getUnit(reloc.target_unit);
try dwarf.resolveReloc(
@ -755,12 +773,12 @@ const Entry = struct {
dwarf.writeInt(unit_len[0..dwarf.sectionOffsetBytes()], len - dwarf.unitLengthBytes());
try dwarf.getFile().?.pwriteAll(
unit_len[0..dwarf.sectionOffsetBytes()],
sec.off + unit.off + unit.header_len + entry.off,
sec.off(dwarf) + unit.off + unit.header_len + entry.off,
);
const buf = try dwarf.gpa.alloc(u8, len - entry.len);
defer dwarf.gpa.free(buf);
@memset(buf, DW.CFA.nop);
try dwarf.getFile().?.pwriteAll(buf, sec.off + unit.off + unit.header_len + start);
try dwarf.getFile().?.pwriteAll(buf, sec.off(dwarf) + unit.off + unit.header_len + start);
return;
}
const len = unit.getEntry(entry.next.unwrap() orelse return).off - start;
@ -816,7 +834,7 @@ const Entry = struct {
},
} else assert(!sec.pad_to_ideal and len == 0);
assert(fbs.pos <= len);
try dwarf.getFile().?.pwriteAll(fbs.getWritten(), sec.off + unit.off + unit.header_len + start);
try dwarf.getFile().?.pwriteAll(fbs.getWritten(), sec.off(dwarf) + unit.off + unit.header_len + start);
}
fn resize(entry_ptr: *Entry, unit: *Unit, sec: *Section, dwarf: *Dwarf, len: u32) UpdateError!void {
@ -851,15 +869,15 @@ const Entry = struct {
fn replace(entry_ptr: *Entry, unit: *Unit, sec: *Section, dwarf: *Dwarf, contents: []const u8) UpdateError!void {
assert(contents.len == entry_ptr.len);
try dwarf.getFile().?.pwriteAll(contents, sec.off + unit.off + unit.header_len + entry_ptr.off);
try dwarf.getFile().?.pwriteAll(contents, sec.off(dwarf) + unit.off + unit.header_len + entry_ptr.off);
if (false) {
const buf = try dwarf.gpa.alloc(u8, sec.len);
defer dwarf.gpa.free(buf);
_ = try dwarf.getFile().?.preadAll(buf, sec.off);
_ = try dwarf.getFile().?.preadAll(buf, sec.off(dwarf));
log.info("Section{{ .first = {}, .last = {}, .off = 0x{x}, .len = 0x{x} }}", .{
@intFromEnum(sec.first),
@intFromEnum(sec.last),
sec.off,
sec.off(dwarf),
sec.len,
});
for (sec.units.items) |*unit_ptr| {
@ -891,9 +909,11 @@ const Entry = struct {
if (std.debug.runtime_safety) {
log.err("missing {} from {s}", .{
@as(Entry.Index, @enumFromInt(entry - unit.entries.items.ptr)),
std.mem.sliceTo(if (dwarf.bin_file.cast(.elf)) |elf_file|
elf_file.shstrtab.items[elf_file.sections.items(.shdr)[sec.index].sh_name..]
else if (dwarf.bin_file.cast(.macho)) |macho_file|
std.mem.sliceTo(if (dwarf.bin_file.cast(.elf)) |elf_file| sh_name: {
const zo = elf_file.zigObjectPtr().?;
const shndx = zo.symbol(sec.index).atom(elf_file).?.output_section_index;
break :sh_name elf_file.shstrtab.items[elf_file.sections.items(.shdr)[shndx].sh_name..];
} else if (dwarf.bin_file.cast(.macho)) |macho_file|
if (macho_file.d_sym) |*d_sym|
&d_sym.sections.items[sec.index].segname
else
@ -924,7 +944,7 @@ const Entry = struct {
}
fn resolveRelocs(entry: *Entry, unit: *Unit, sec: *Section, dwarf: *Dwarf) RelocError!void {
const entry_off = sec.off + unit.off + unit.header_len + entry.off;
const entry_off = sec.off(dwarf) + unit.off + unit.header_len + entry.off;
for (entry.cross_entry_relocs.items) |reloc| {
try dwarf.resolveReloc(
entry_off + reloc.source_off,
@ -961,7 +981,8 @@ const Entry = struct {
.none, .debug_frame => {},
.eh_frame => return if (dwarf.bin_file.cast(.elf)) |elf_file| {
const zo = elf_file.zigObjectPtr().?;
const entry_addr: i64 = @intCast(entry_off - sec.off + elf_file.shdrs.items[sec.index].sh_addr);
const shndx = zo.symbol(sec.index).atom(elf_file).?.output_section_index;
const entry_addr: i64 = @intCast(entry_off - sec.off(dwarf) + elf_file.shdrs.items[shndx].sh_addr);
for (entry.external_relocs.items) |reloc| {
const symbol = zo.symbol(reloc.target_sym);
try dwarf.resolveReloc(
@ -1877,34 +1898,7 @@ pub fn init(lf: *link.File, format: DW.Format) Dwarf {
}
pub fn reloadSectionMetadata(dwarf: *Dwarf) void {
if (dwarf.bin_file.cast(.elf)) |elf_file| {
for ([_]*Section{
&dwarf.debug_abbrev.section,
&dwarf.debug_aranges.section,
&dwarf.debug_frame.section,
&dwarf.debug_info.section,
&dwarf.debug_line.section,
&dwarf.debug_line_str.section,
&dwarf.debug_loclists.section,
&dwarf.debug_rnglists.section,
&dwarf.debug_str.section,
}, [_]u32{
elf_file.debug_abbrev_section_index.?,
elf_file.debug_aranges_section_index.?,
elf_file.eh_frame_section_index.?,
elf_file.debug_info_section_index.?,
elf_file.debug_line_section_index.?,
elf_file.debug_line_str_section_index.?,
elf_file.debug_loclists_section_index.?,
elf_file.debug_rnglists_section_index.?,
elf_file.debug_str_section_index.?,
}) |sec, section_index| {
const shdr = &elf_file.sections.items(.shdr)[section_index];
sec.index = section_index;
sec.off = shdr.sh_offset;
sec.len = shdr.sh_size;
}
} else if (dwarf.bin_file.cast(.macho)) |macho_file| {
if (dwarf.bin_file.cast(.macho)) |macho_file| {
if (macho_file.d_sym) |*d_sym| {
for ([_]*Section{
&dwarf.debug_abbrev.section,
@ -1927,7 +1921,6 @@ pub fn reloadSectionMetadata(dwarf: *Dwarf) void {
}) |sec, sect_index| {
const header = &d_sym.sections.items[sect_index];
sec.index = sect_index;
sec.off = header.offset;
sec.len = header.size;
}
} else {
@ -1952,7 +1945,6 @@ pub fn reloadSectionMetadata(dwarf: *Dwarf) void {
}) |sec, sect_index| {
const header = &macho_file.sections.items(.header)[sect_index];
sec.index = sect_index;
sec.off = header.offset;
sec.len = header.size;
}
}
@ -1960,6 +1952,32 @@ pub fn reloadSectionMetadata(dwarf: *Dwarf) void {
}
pub fn initMetadata(dwarf: *Dwarf) UpdateError!void {
if (dwarf.bin_file.cast(.elf)) |elf_file| {
const zo = elf_file.zigObjectPtr().?;
for ([_]*Section{
&dwarf.debug_abbrev.section,
&dwarf.debug_aranges.section,
&dwarf.debug_frame.section,
&dwarf.debug_info.section,
&dwarf.debug_line.section,
&dwarf.debug_line_str.section,
&dwarf.debug_loclists.section,
&dwarf.debug_rnglists.section,
&dwarf.debug_str.section,
}, [_]u32{
zo.debug_abbrev_index.?,
zo.debug_aranges_index.?,
zo.eh_frame_index.?,
zo.debug_info_index.?,
zo.debug_line_index.?,
zo.debug_line_str_index.?,
zo.debug_loclists_index.?,
zo.debug_rnglists_index.?,
zo.debug_str_index.?,
}) |sec, sym_index| {
sec.index = sym_index;
}
}
dwarf.reloadSectionMetadata();
dwarf.debug_abbrev.section.pad_to_ideal = false;
@ -2523,7 +2541,7 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool
var abbrev_code_buf: [AbbrevCode.decl_bytes]u8 = undefined;
if (try dwarf.getFile().?.preadAll(
&abbrev_code_buf,
dwarf.debug_info.section.off + unit_ptr.off + unit_ptr.header_len + entry_ptr.off,
dwarf.debug_info.section.off(dwarf) + unit_ptr.off + unit_ptr.header_len + entry_ptr.off,
) != abbrev_code_buf.len) return error.InputOutput;
var abbrev_code_fbs = std.io.fixedBufferStream(&abbrev_code_buf);
const abbrev_code: AbbrevCode = @enumFromInt(
@ -3934,7 +3952,7 @@ pub fn flushModule(dwarf: *Dwarf, pt: Zcu.PerThread) FlushError!void {
if (dwarf.debug_str.section.dirty) {
const contents = dwarf.debug_str.contents.items;
try dwarf.debug_str.section.resize(dwarf, contents.len);
try dwarf.getFile().?.pwriteAll(contents, dwarf.debug_str.section.off);
try dwarf.getFile().?.pwriteAll(contents, dwarf.debug_str.section.off(dwarf));
dwarf.debug_str.section.dirty = false;
}
if (dwarf.debug_line.section.dirty) {
@ -4040,7 +4058,7 @@ pub fn flushModule(dwarf: *Dwarf, pt: Zcu.PerThread) FlushError!void {
if (dwarf.debug_line_str.section.dirty) {
const contents = dwarf.debug_line_str.contents.items;
try dwarf.debug_line_str.section.resize(dwarf, contents.len);
try dwarf.getFile().?.pwriteAll(contents, dwarf.debug_line_str.section.off);
try dwarf.getFile().?.pwriteAll(contents, dwarf.debug_line_str.section.off(dwarf));
dwarf.debug_line_str.section.dirty = false;
}
if (dwarf.debug_loclists.section.dirty) {

View File

@ -54,16 +54,6 @@ shdr_table_offset: ?u64 = null,
/// Same order as in the file.
phdrs: std.ArrayListUnmanaged(elf.Elf64_Phdr) = .{},
/// Tracked loadable segments during incremental linking.
/// The index into the program headers of a PT_LOAD program header with Read and Execute flags
phdr_zig_load_re_index: ?u16 = null,
/// The index into the program headers of a PT_LOAD program header with Read flag
phdr_zig_load_ro_index: ?u16 = null,
/// The index into the program headers of a PT_LOAD program header with Write flag
phdr_zig_load_rw_index: ?u16 = null,
/// The index into the program headers of a PT_LOAD program header with zerofill data.
phdr_zig_load_zerofill_index: ?u16 = null,
/// Special program headers
/// PT_PHDR
phdr_table_index: ?u16 = null,
@ -124,22 +114,6 @@ rela_plt: std.ArrayListUnmanaged(elf.Elf64_Rela) = .{},
/// Applies only to a relocatable.
comdat_group_sections: std.ArrayListUnmanaged(ComdatGroupSection) = .{},
/// Tracked section headers with incremental updates to Zig object.
/// .rela.* sections are only used when emitting a relocatable object file.
zig_text_section_index: ?u32 = null,
zig_data_rel_ro_section_index: ?u32 = null,
zig_data_section_index: ?u32 = null,
zig_bss_section_index: ?u32 = null,
debug_info_section_index: ?u32 = null,
debug_abbrev_section_index: ?u32 = null,
debug_str_section_index: ?u32 = null,
debug_aranges_section_index: ?u32 = null,
debug_line_section_index: ?u32 = null,
debug_line_str_section_index: ?u32 = null,
debug_loclists_section_index: ?u32 = null,
debug_rnglists_section_index: ?u32 = null,
copy_rel_section_index: ?u32 = null,
dynamic_section_index: ?u32 = null,
dynstrtab_section_index: ?u32 = null,
@ -419,7 +393,8 @@ pub fn deinit(self: *Elf) void {
self.objects.deinit(gpa);
self.shared_objects.deinit(gpa);
for (self.sections.items(.atom_list), self.sections.items(.free_list)) |*atoms, *free_list| {
for (self.sections.items(.atom_list_2), self.sections.items(.atom_list), self.sections.items(.free_list)) |*atom_list, *atoms, *free_list| {
atom_list.deinit(gpa);
atoms.deinit(gpa);
free_list.deinit(gpa);
}
@ -554,7 +529,7 @@ pub fn findFreeSpace(self: *Elf, object_size: u64, min_alignment: u64) !u64 {
return start;
}
pub fn growAllocSection(self: *Elf, shdr_index: u32, needed_size: u64) !void {
pub fn growAllocSection(self: *Elf, shdr_index: u32, needed_size: u64, min_alignment: u64) !void {
const slice = self.sections.slice();
const shdr = &slice.items(.shdr)[shdr_index];
assert(shdr.sh_flags & elf.SHF_ALLOC != 0);
@ -573,8 +548,7 @@ pub fn growAllocSection(self: *Elf, shdr_index: u32, needed_size: u64) !void {
const existing_size = shdr.sh_size;
shdr.sh_size = 0;
// Must move the entire section.
const alignment = if (maybe_phdr) |phdr| phdr.p_align else shdr.sh_addralign;
const new_offset = try self.findFreeSpace(needed_size, alignment);
const new_offset = try self.findFreeSpace(needed_size, min_alignment);
log.debug("new '{s}' file offset 0x{x} to 0x{x}", .{
self.getShString(shdr.sh_name),
@ -614,7 +588,7 @@ pub fn growNonAllocSection(
self: *Elf,
shdr_index: u32,
needed_size: u64,
min_alignment: u32,
min_alignment: u64,
requires_file_copy: bool,
) !void {
const shdr = &self.sections.items(.shdr)[shdr_index];
@ -648,33 +622,124 @@ pub fn growNonAllocSection(
try self.base.file.?.setEndPos(shdr.sh_offset + needed_size);
}
shdr.sh_size = needed_size;
self.markDirty(shdr_index);
}
pub fn markDirty(self: *Elf, shdr_index: u32) void {
const zig_object = self.zigObjectPtr().?;
if (zig_object.dwarf) |_| {
if (self.debug_info_section_index.? == shdr_index) {
zig_object.debug_info_section_dirty = true;
} else if (self.debug_abbrev_section_index.? == shdr_index) {
zig_object.debug_abbrev_section_dirty = true;
} else if (self.debug_str_section_index.? == shdr_index) {
zig_object.debug_str_section_dirty = true;
} else if (self.debug_aranges_section_index.? == shdr_index) {
zig_object.debug_aranges_section_dirty = true;
} else if (self.debug_line_section_index.? == shdr_index) {
zig_object.debug_line_section_dirty = true;
} else if (self.debug_line_str_section_index.? == shdr_index) {
zig_object.debug_line_str_section_dirty = true;
} else if (self.debug_loclists_section_index.? == shdr_index) {
zig_object.debug_loclists_section_dirty = true;
} else if (self.debug_rnglists_section_index.? == shdr_index) {
zig_object.debug_rnglists_section_dirty = true;
if (self.zigObjectPtr()) |zo| {
for ([_]?Symbol.Index{
zo.debug_info_index,
zo.debug_abbrev_index,
zo.debug_aranges_index,
zo.debug_str_index,
zo.debug_line_index,
zo.debug_line_str_index,
zo.debug_loclists_index,
zo.debug_rnglists_index,
}, [_]*bool{
&zo.debug_info_section_dirty,
&zo.debug_abbrev_section_dirty,
&zo.debug_aranges_section_dirty,
&zo.debug_str_section_dirty,
&zo.debug_line_section_dirty,
&zo.debug_line_str_section_dirty,
&zo.debug_loclists_section_dirty,
&zo.debug_rnglists_section_dirty,
}) |maybe_sym_index, dirty| {
const sym_index = maybe_sym_index orelse continue;
if (zo.symbol(sym_index).atom(self).?.output_section_index == shdr_index) {
dirty.* = true;
break;
}
}
}
}
const AllocateChunkResult = struct {
value: u64,
placement: Ref,
};
pub fn allocateChunk(self: *Elf, args: struct {
size: u64,
shndx: u32,
alignment: Atom.Alignment,
requires_padding: bool = true,
}) !AllocateChunkResult {
const slice = self.sections.slice();
const shdr = &slice.items(.shdr)[args.shndx];
const free_list = &slice.items(.free_list)[args.shndx];
const last_atom_ref = &slice.items(.last_atom)[args.shndx];
const new_atom_ideal_capacity = if (args.requires_padding) padToIdeal(args.size) else args.size;
// First we look for an appropriately sized free list node.
// The list is unordered. We'll just take the first thing that works.
const res: AllocateChunkResult = blk: {
var i: usize = if (self.base.child_pid == null) 0 else free_list.items.len;
while (i < free_list.items.len) {
const big_atom_ref = free_list.items[i];
const big_atom = self.atom(big_atom_ref).?;
// We now have a pointer to a live atom that has too much capacity.
// Is it enough that we could fit this new atom?
const cap = big_atom.capacity(self);
const ideal_capacity = if (args.requires_padding) padToIdeal(cap) else cap;
const ideal_capacity_end_vaddr = std.math.add(u64, @intCast(big_atom.value), ideal_capacity) catch ideal_capacity;
const capacity_end_vaddr = @as(u64, @intCast(big_atom.value)) + cap;
const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity;
const new_start_vaddr = args.alignment.backward(new_start_vaddr_unaligned);
if (new_start_vaddr < ideal_capacity_end_vaddr) {
// Additional bookkeeping here to notice if this free list node
// should be deleted because the block that it points to has grown to take up
// more of the extra capacity.
if (!big_atom.freeListEligible(self)) {
_ = free_list.swapRemove(i);
} else {
i += 1;
}
continue;
}
// At this point we know that we will place the new block here. But the
// remaining question is whether there is still yet enough capacity left
// over for there to still be a free list node.
const remaining_capacity = new_start_vaddr - ideal_capacity_end_vaddr;
const keep_free_list_node = remaining_capacity >= min_text_capacity;
if (!keep_free_list_node) {
_ = free_list.swapRemove(i);
}
break :blk .{ .value = new_start_vaddr, .placement = big_atom_ref };
} else if (self.atom(last_atom_ref.*)) |last_atom| {
const ideal_capacity = if (args.requires_padding) padToIdeal(last_atom.size) else last_atom.size;
const ideal_capacity_end_vaddr = @as(u64, @intCast(last_atom.value)) + ideal_capacity;
const new_start_vaddr = args.alignment.forward(ideal_capacity_end_vaddr);
break :blk .{ .value = new_start_vaddr, .placement = last_atom.ref() };
} else {
break :blk .{ .value = 0, .placement = .{} };
}
};
log.debug("allocated chunk (size({x}),align({x})) at 0x{x} (file(0x{x}))", .{
args.size,
args.alignment.toByteUnits().?,
shdr.sh_addr + res.value,
shdr.sh_offset + res.value,
});
const expand_section = if (self.atom(res.placement)) |placement_atom|
placement_atom.nextAtom(self) == null
else
true;
if (expand_section) {
const needed_size = res.value + args.size;
if (shdr.sh_flags & elf.SHF_ALLOC != 0)
try self.growAllocSection(args.shndx, needed_size, args.alignment.toByteUnits().?)
else
try self.growNonAllocSection(args.shndx, needed_size, args.alignment.toByteUnits().?, true);
}
return res;
}
pub fn flush(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
const use_lld = build_options.have_llvm and self.base.comp.config.use_lld;
if (use_lld) {
@ -972,14 +1037,13 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod
try self.initSyntheticSections();
try self.initSpecialPhdrs();
try self.sortShdrs();
for (self.objects.items) |index| {
try self.file(index).?.object.addAtomsToOutputSections(self);
}
try self.sortInitFini();
try self.setDynamicSection(rpath_table.keys());
self.sortDynamicSymtab();
try self.setHashSections();
try self.setVersionSymtab();
try self.sortInitFini();
try self.updateMergeSectionSizes();
try self.updateSectionSizes();
@ -1010,7 +1074,7 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod
if (shdr.sh_type == elf.SHT_NOBITS) continue;
const code = try zo.codeAlloc(self, atom_index);
defer gpa.free(code);
const file_offset = shdr.sh_offset + @as(u64, @intCast(atom_ptr.value));
const file_offset = atom_ptr.offset(self);
atom_ptr.resolveRelocsAlloc(self, code) catch |err| switch (err) {
error.RelocFailure, error.RelaxFailure => has_reloc_errors = true,
error.UnsupportedCpuArch => {
@ -1750,6 +1814,59 @@ fn scanRelocs(self: *Elf) !void {
}
}
pub fn initOutputSection(self: *Elf, args: struct {
name: [:0]const u8,
flags: u64,
type: u32,
}) error{OutOfMemory}!u32 {
const name = blk: {
if (self.base.isRelocatable()) break :blk args.name;
if (args.flags & elf.SHF_MERGE != 0) break :blk args.name;
const name_prefixes: []const [:0]const u8 = &.{
".text", ".data.rel.ro", ".data", ".rodata", ".bss.rel.ro", ".bss",
".init_array", ".fini_array", ".tbss", ".tdata", ".gcc_except_table", ".ctors",
".dtors", ".gnu.warning",
};
inline for (name_prefixes) |prefix| {
if (std.mem.eql(u8, args.name, prefix) or std.mem.startsWith(u8, args.name, prefix ++ ".")) {
break :blk prefix;
}
}
break :blk args.name;
};
const @"type" = tt: {
if (self.getTarget().cpu.arch == .x86_64 and args.type == elf.SHT_X86_64_UNWIND)
break :tt elf.SHT_PROGBITS;
switch (args.type) {
elf.SHT_NULL => unreachable,
elf.SHT_PROGBITS => {
if (std.mem.eql(u8, args.name, ".init_array") or std.mem.startsWith(u8, args.name, ".init_array."))
break :tt elf.SHT_INIT_ARRAY;
if (std.mem.eql(u8, args.name, ".fini_array") or std.mem.startsWith(u8, args.name, ".fini_array."))
break :tt elf.SHT_FINI_ARRAY;
break :tt args.type;
},
else => break :tt args.type,
}
};
const flags = blk: {
var flags = args.flags;
if (!self.base.isRelocatable()) {
flags &= ~@as(u64, elf.SHF_COMPRESSED | elf.SHF_GROUP | elf.SHF_GNU_RETAIN);
}
break :blk switch (@"type") {
elf.SHT_INIT_ARRAY, elf.SHT_FINI_ARRAY => flags | elf.SHF_WRITE,
else => flags,
};
};
const out_shndx = self.sectionByName(name) orelse try self.addSection(.{
.type = @"type",
.flags = flags,
.name = try self.insertShString(name),
});
return out_shndx;
}
fn linkWithLLD(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) !void {
dev.check(.lld_linker);
@ -2783,12 +2900,16 @@ fn initSyntheticSections(self: *Elf) !void {
const target = self.getTarget();
const ptr_size = self.ptrWidthBytes();
const needs_eh_frame = for (self.objects.items) |index| {
if (self.file(index).?.object.cies.items.len > 0) break true;
} else false;
const needs_eh_frame = blk: {
if (self.zigObjectPtr()) |zo|
if (zo.eh_frame_index != null) break :blk true;
break :blk for (self.objects.items) |index| {
if (self.file(index).?.object.cies.items.len > 0) break true;
} else false;
};
if (needs_eh_frame) {
if (self.eh_frame_section_index == null) {
self.eh_frame_section_index = try self.addSection(.{
self.eh_frame_section_index = self.sectionByName(".eh_frame") orelse try self.addSection(.{
.name = try self.insertShString(".eh_frame"),
.type = if (target.cpu.arch == .x86_64)
elf.SHT_X86_64_UNWIND
@ -3084,8 +3205,9 @@ fn sortInitFini(self: *Elf) !void {
}
};
for (slice.items(.shdr), slice.items(.atom_list)) |shdr, *atom_list| {
for (slice.items(.shdr), slice.items(.atom_list_2)) |shdr, *atom_list| {
if (shdr.sh_flags & elf.SHF_ALLOC == 0) continue;
if (atom_list.atoms.items.len == 0) continue;
var is_init_fini = false;
var is_ctor_dtor = false;
@ -3099,15 +3221,13 @@ fn sortInitFini(self: *Elf) !void {
is_ctor_dtor = mem.indexOf(u8, name, ".ctors") != null or mem.indexOf(u8, name, ".dtors") != null;
},
}
if (!is_init_fini and !is_ctor_dtor) continue;
if (atom_list.items.len == 0) continue;
var entries = std.ArrayList(Entry).init(gpa);
try entries.ensureTotalCapacityPrecise(atom_list.items.len);
try entries.ensureTotalCapacityPrecise(atom_list.atoms.items.len);
defer entries.deinit();
for (atom_list.items) |ref| {
for (atom_list.atoms.items) |ref| {
const atom_ptr = self.atom(ref).?;
const object = atom_ptr.file(self).?.object;
const priority = blk: {
@ -3126,9 +3246,9 @@ fn sortInitFini(self: *Elf) !void {
mem.sort(Entry, entries.items, self, Entry.lessThan);
atom_list.clearRetainingCapacity();
atom_list.atoms.clearRetainingCapacity();
for (entries.items) |entry| {
atom_list.appendAssumeCapacity(entry.atom_ref);
atom_list.atoms.appendAssumeCapacity(entry.atom_ref);
}
}
}
@ -3233,9 +3353,6 @@ fn sortPhdrs(self: *Elf) error{OutOfMemory}!void {
}
for (&[_]*?u16{
&self.phdr_zig_load_re_index,
&self.phdr_zig_load_ro_index,
&self.phdr_zig_load_zerofill_index,
&self.phdr_table_index,
&self.phdr_table_load_index,
&self.phdr_interp_index,
@ -3272,33 +3389,36 @@ fn shdrRank(self: *Elf, shndx: u32) u8 {
elf.SHT_PREINIT_ARRAY,
elf.SHT_INIT_ARRAY,
elf.SHT_FINI_ARRAY,
=> return 0xf2,
=> return 0xf1,
elf.SHT_DYNAMIC => return 0xf3,
elf.SHT_DYNAMIC => return 0xf2,
elf.SHT_RELA, elf.SHT_GROUP => return 0xf,
elf.SHT_PROGBITS => if (flags & elf.SHF_ALLOC != 0) {
if (flags & elf.SHF_EXECINSTR != 0) {
return 0xf1;
return 0xf0;
} else if (flags & elf.SHF_WRITE != 0) {
return if (flags & elf.SHF_TLS != 0) 0xf4 else 0xf6;
return if (flags & elf.SHF_TLS != 0) 0xf3 else 0xf5;
} else if (mem.eql(u8, name, ".interp")) {
return 1;
} else if (mem.startsWith(u8, name, ".eh_frame")) {
return 0xe1;
} else {
return 0xf0;
return 0xe0;
}
} else {
if (mem.startsWith(u8, name, ".debug")) {
return 0xf8;
return 0xf7;
} else {
return 0xf9;
return 0xf8;
}
},
elf.SHT_X86_64_UNWIND => return 0xe1,
elf.SHT_NOBITS => return if (flags & elf.SHF_TLS != 0) 0xf5 else 0xf7,
elf.SHT_SYMTAB => return 0xfa,
elf.SHT_STRTAB => return if (mem.eql(u8, name, ".dynstr")) 0x4 else 0xfb,
elf.SHT_NOBITS => return if (flags & elf.SHF_TLS != 0) 0xf4 else 0xf6,
elf.SHT_SYMTAB => return 0xf9,
elf.SHT_STRTAB => return if (mem.eql(u8, name, ".dynstr")) 0x4 else 0xfa,
else => return 0xff,
}
}
@ -3361,18 +3481,6 @@ fn resetShdrIndexes(self: *Elf, backlinks: []const u32) void {
&self.copy_rel_section_index,
&self.versym_section_index,
&self.verneed_section_index,
&self.zig_text_section_index,
&self.zig_data_rel_ro_section_index,
&self.zig_data_section_index,
&self.zig_bss_section_index,
&self.debug_info_section_index,
&self.debug_abbrev_section_index,
&self.debug_str_section_index,
&self.debug_aranges_section_index,
&self.debug_line_section_index,
&self.debug_line_str_section_index,
&self.debug_loclists_section_index,
&self.debug_rnglists_section_index,
}) |maybe_index| {
if (maybe_index.*) |*index| {
index.* = backlinks[index.*];
@ -3383,13 +3491,19 @@ fn resetShdrIndexes(self: *Elf, backlinks: []const u32) void {
msec.output_section_index = backlinks[msec.output_section_index];
}
for (self.sections.items(.shdr)) |*shdr| {
if (shdr.sh_type != elf.SHT_RELA) continue;
// FIXME:JK we should spin up .symtab potentially earlier, or set all non-dynamic RELA sections
// to point at symtab
// shdr.sh_link = backlinks[shdr.sh_link];
shdr.sh_link = self.symtab_section_index.?;
shdr.sh_info = backlinks[shdr.sh_info];
const slice = self.sections.slice();
for (slice.items(.shdr), slice.items(.atom_list_2)) |*shdr, *atom_list| {
atom_list.output_section_index = backlinks[atom_list.output_section_index];
for (atom_list.atoms.items) |ref| {
self.atom(ref).?.output_section_index = atom_list.output_section_index;
}
if (shdr.sh_type == elf.SHT_RELA) {
// FIXME:JK we should spin up .symtab potentially earlier, or set all non-dynamic RELA sections
// to point at symtab
// shdr.sh_link = backlinks[shdr.sh_link];
shdr.sh_link = self.symtab_section_index.?;
shdr.sh_info = backlinks[shdr.sh_info];
}
}
if (self.zigObjectPtr()) |zo| {
@ -3397,7 +3511,6 @@ fn resetShdrIndexes(self: *Elf, backlinks: []const u32) void {
const atom_ptr = zo.atom(atom_index) orelse continue;
atom_ptr.output_section_index = backlinks[atom_ptr.output_section_index];
}
if (zo.dwarf) |*dwarf| dwarf.reloadSectionMetadata();
}
for (self.comdat_group_sections.items) |*cg| {
@ -3405,53 +3518,53 @@ fn resetShdrIndexes(self: *Elf, backlinks: []const u32) void {
}
if (self.symtab_section_index) |index| {
const shdr = &self.sections.items(.shdr)[index];
const shdr = &slice.items(.shdr)[index];
shdr.sh_link = self.strtab_section_index.?;
}
if (self.dynamic_section_index) |index| {
const shdr = &self.sections.items(.shdr)[index];
const shdr = &slice.items(.shdr)[index];
shdr.sh_link = self.dynstrtab_section_index.?;
}
if (self.dynsymtab_section_index) |index| {
const shdr = &self.sections.items(.shdr)[index];
const shdr = &slice.items(.shdr)[index];
shdr.sh_link = self.dynstrtab_section_index.?;
}
if (self.hash_section_index) |index| {
const shdr = &self.sections.items(.shdr)[index];
const shdr = &slice.items(.shdr)[index];
shdr.sh_link = self.dynsymtab_section_index.?;
}
if (self.gnu_hash_section_index) |index| {
const shdr = &self.sections.items(.shdr)[index];
const shdr = &slice.items(.shdr)[index];
shdr.sh_link = self.dynsymtab_section_index.?;
}
if (self.versym_section_index) |index| {
const shdr = &self.sections.items(.shdr)[index];
const shdr = &slice.items(.shdr)[index];
shdr.sh_link = self.dynsymtab_section_index.?;
}
if (self.verneed_section_index) |index| {
const shdr = &self.sections.items(.shdr)[index];
const shdr = &slice.items(.shdr)[index];
shdr.sh_link = self.dynstrtab_section_index.?;
}
if (self.rela_dyn_section_index) |index| {
const shdr = &self.sections.items(.shdr)[index];
const shdr = &slice.items(.shdr)[index];
shdr.sh_link = self.dynsymtab_section_index orelse 0;
}
if (self.rela_plt_section_index) |index| {
const shdr = &self.sections.items(.shdr)[index];
const shdr = &slice.items(.shdr)[index];
shdr.sh_link = self.dynsymtab_section_index.?;
shdr.sh_info = self.plt_section_index.?;
}
if (self.eh_frame_rela_section_index) |index| {
const shdr = &self.sections.items(.shdr)[index];
const shdr = &slice.items(.shdr)[index];
shdr.sh_link = self.symtab_section_index.?;
shdr.sh_info = self.eh_frame_section_index.?;
}
@ -3459,37 +3572,32 @@ fn resetShdrIndexes(self: *Elf, backlinks: []const u32) void {
fn updateSectionSizes(self: *Elf) !void {
const slice = self.sections.slice();
for (slice.items(.shdr), slice.items(.atom_list)) |*shdr, atom_list| {
if (atom_list.items.len == 0) continue;
for (slice.items(.shdr), slice.items(.atom_list_2)) |shdr, *atom_list| {
if (atom_list.atoms.items.len == 0) continue;
if (self.requiresThunks() and shdr.sh_flags & elf.SHF_EXECINSTR != 0) continue;
for (atom_list.items) |ref| {
const atom_ptr = self.atom(ref) orelse continue;
if (!atom_ptr.alive) continue;
const offset = atom_ptr.alignment.forward(shdr.sh_size);
const padding = offset - shdr.sh_size;
atom_ptr.value = @intCast(offset);
shdr.sh_size += padding + atom_ptr.size;
shdr.sh_addralign = @max(shdr.sh_addralign, atom_ptr.alignment.toByteUnits() orelse 1);
}
atom_list.updateSize(self);
try atom_list.allocate(self);
}
if (self.requiresThunks()) {
for (slice.items(.shdr), slice.items(.atom_list), 0..) |*shdr, atom_list, shndx| {
for (slice.items(.shdr), slice.items(.atom_list_2)) |shdr, *atom_list| {
if (shdr.sh_flags & elf.SHF_EXECINSTR == 0) continue;
if (atom_list.items.len == 0) continue;
if (atom_list.atoms.items.len == 0) continue;
// Create jump/branch range extenders if needed.
try self.createThunks(shdr, @intCast(shndx));
try self.createThunks(atom_list);
try atom_list.allocate(self);
}
// FIXME:JK this will hopefully not be needed once we create a link from Atom/Thunk to AtomList.
for (self.thunks.items) |*th| {
th.value += slice.items(.atom_list_2)[th.output_section_index].value;
}
}
const shdrs = slice.items(.shdr);
if (self.eh_frame_section_index) |index| {
shdrs[index].sh_size = existing_size: {
const zo = self.zigObjectPtr() orelse break :existing_size 0;
const sym = zo.symbol(zo.eh_frame_index orelse break :existing_size 0);
break :existing_size sym.atom(self).?.size;
} + try eh_frame.calcEhFrameSize(self);
shdrs[index].sh_size = try eh_frame.calcEhFrameSize(self);
}
if (self.eh_frame_hdr_section_index) |index| {
@ -3587,13 +3695,11 @@ fn shdrToPhdrFlags(sh_flags: u64) u32 {
/// (This is an upper bound so that we can reserve enough space for the header and progam header
/// table without running out of space and being forced to move things around.)
fn getMaxNumberOfPhdrs() u64 {
// First, assume we compile Zig's source incrementally, this gives us:
var num: u64 = number_of_zig_segments;
// Next, the estimated maximum number of segments the linker can emit for input sections are:
num += max_number_of_object_segments;
// Next, any other non-loadable program headers, including TLS, DYNAMIC, GNU_STACK, GNU_EH_FRAME, INTERP:
// The estimated maximum number of segments the linker can emit for input sections are:
var num: u64 = max_number_of_object_segments;
// Any other non-loadable program headers, including TLS, DYNAMIC, GNU_STACK, GNU_EH_FRAME, INTERP:
num += max_number_of_special_phdrs;
// Finally, PHDR program header and corresponding read-only load segment:
// PHDR program header and corresponding read-only load segment:
num += 2;
return num;
}
@ -3603,10 +3709,9 @@ fn getMaxNumberOfPhdrs() u64 {
/// We permit a maximum of 3**2 number of segments.
fn calcNumberOfSegments(self: *Elf) usize {
var covers: [9]bool = [_]bool{false} ** 9;
for (self.sections.items(.shdr), 0..) |shdr, shndx| {
for (self.sections.items(.shdr)) |shdr| {
if (shdr.sh_type == elf.SHT_NULL) continue;
if (shdr.sh_flags & elf.SHF_ALLOC == 0) continue;
if (self.isZigSection(@intCast(shndx))) continue;
const flags = shdrToPhdrFlags(shdr.sh_flags);
covers[flags - 1] = true;
}
@ -3704,7 +3809,6 @@ pub fn allocateAllocSections(self: *Elf) !void {
for (slice.items(.shdr), 0..) |shdr, shndx| {
if (shdr.sh_type == elf.SHT_NULL) continue;
if (shdr.sh_flags & elf.SHF_ALLOC == 0) continue;
if (self.isZigSection(@intCast(shndx))) continue;
const flags = shdrToPhdrFlags(shdr.sh_flags);
try covers[flags - 1].append(@intCast(shndx));
}
@ -3794,10 +3898,20 @@ pub fn allocateAllocSections(self: *Elf) !void {
}
new_offset = alignment.@"align"(shndx, shdr.sh_addralign, new_offset);
if (shndx == self.eh_frame_section_index) eh_frame: {
const zo = self.zigObjectPtr() orelse break :eh_frame;
const sym = zo.symbol(zo.eh_frame_index orelse break :eh_frame);
const existing_size = sym.atom(self).?.size;
if (self.zigObjectPtr()) |zo| blk: {
const existing_size = for ([_]?Symbol.Index{
zo.text_index,
zo.rodata_index,
zo.data_relro_index,
zo.data_index,
zo.tdata_index,
zo.eh_frame_index,
}) |maybe_sym_index| {
const sect_sym_index = maybe_sym_index orelse continue;
const sect_atom_ptr = zo.symbol(sect_sym_index).atom(self).?;
if (sect_atom_ptr.output_section_index != shndx) continue;
break sect_atom_ptr.size;
} else break :blk;
log.debug("moving {s} from 0x{x} to 0x{x}", .{
self.getShString(shdr.sh_name),
shdr.sh_offset,
@ -3830,27 +3944,27 @@ pub fn allocateNonAllocSections(self: *Elf) !void {
shdr.sh_size = 0;
const new_offset = try self.findFreeSpace(needed_size, shdr.sh_addralign);
if (self.isDebugSection(@intCast(shndx))) {
if (self.zigObjectPtr()) |zo| blk: {
const existing_size = for ([_]?Symbol.Index{
zo.debug_info_index,
zo.debug_abbrev_index,
zo.debug_aranges_index,
zo.debug_str_index,
zo.debug_line_index,
zo.debug_line_str_index,
zo.debug_loclists_index,
zo.debug_rnglists_index,
}) |maybe_sym_index| {
const sym_index = maybe_sym_index orelse continue;
const sym = zo.symbol(sym_index);
const atom_ptr = sym.atom(self).?;
if (atom_ptr.output_section_index == shndx) break atom_ptr.size;
} else break :blk;
log.debug("moving {s} from 0x{x} to 0x{x}", .{
self.getShString(shdr.sh_name),
shdr.sh_offset,
new_offset,
});
const zo = self.zigObjectPtr().?;
const existing_size = for ([_]Symbol.Index{
zo.debug_info_index.?,
zo.debug_abbrev_index.?,
zo.debug_aranges_index.?,
zo.debug_str_index.?,
zo.debug_line_index.?,
zo.debug_line_str_index.?,
zo.debug_loclists_index.?,
zo.debug_rnglists_index.?,
}) |sym_index| {
const sym = zo.symbol(sym_index);
const atom_ptr = sym.atom(self).?;
if (atom_ptr.output_section_index == shndx) break atom_ptr.size;
} else 0;
const amt = try self.base.file.?.copyRangeAll(
shdr.sh_offset,
self.base.file.?,
@ -3934,91 +4048,28 @@ fn writeAtoms(self: *Elf) !void {
undefs.deinit();
}
var has_reloc_errors = false;
var buffer = std.ArrayList(u8).init(gpa);
defer buffer.deinit();
const slice = self.sections.slice();
for (slice.items(.shdr), slice.items(.atom_list), 0..) |shdr, atom_list, shndx| {
if (shdr.sh_type == elf.SHT_NULL) continue;
var has_reloc_errors = false;
for (slice.items(.shdr), slice.items(.atom_list_2)) |shdr, atom_list| {
if (shdr.sh_type == elf.SHT_NOBITS) continue;
if (atom_list.items.len == 0) continue;
log.debug("writing atoms in '{s}' section", .{self.getShString(shdr.sh_name)});
// TODO really, really handle debug section separately
const base_offset = if (self.isDebugSection(@intCast(shndx))) base_offset: {
const zo = self.zigObjectPtr().?;
for ([_]Symbol.Index{
zo.debug_info_index.?,
zo.debug_abbrev_index.?,
zo.debug_aranges_index.?,
zo.debug_str_index.?,
zo.debug_line_index.?,
zo.debug_line_str_index.?,
zo.debug_loclists_index.?,
zo.debug_rnglists_index.?,
}) |sym_index| {
const sym = zo.symbol(sym_index);
const atom_ptr = sym.atom(self).?;
if (atom_ptr.output_section_index == shndx) break :base_offset atom_ptr.size;
}
break :base_offset 0;
} else if (@as(u32, @intCast(shndx)) == self.eh_frame_section_index) base_offset: {
const zo = self.zigObjectPtr() orelse break :base_offset 0;
const sym = zo.symbol(zo.eh_frame_index orelse break :base_offset 0);
break :base_offset sym.atom(self).?.size;
} else 0;
const sh_offset = shdr.sh_offset + base_offset;
const sh_size = math.cast(usize, shdr.sh_size - base_offset) orelse return error.Overflow;
const buffer = try gpa.alloc(u8, sh_size);
defer gpa.free(buffer);
const padding_byte: u8 = if (shdr.sh_type == elf.SHT_PROGBITS and
shdr.sh_flags & elf.SHF_EXECINSTR != 0 and self.getTarget().cpu.arch == .x86_64)
0xcc // int3
else
0;
@memset(buffer, padding_byte);
for (atom_list.items) |ref| {
const atom_ptr = self.atom(ref).?;
assert(atom_ptr.alive);
const offset = math.cast(usize, atom_ptr.value - @as(i64, @intCast(base_offset))) orelse
return error.Overflow;
const size = math.cast(usize, atom_ptr.size) orelse return error.Overflow;
log.debug("writing atom({}) at 0x{x}", .{ ref, sh_offset + offset });
// TODO decompress directly into provided buffer
const out_code = buffer[offset..][0..size];
const in_code = switch (atom_ptr.file(self).?) {
.object => |x| try x.codeDecompressAlloc(self, ref.index),
.zig_object => |x| try x.codeAlloc(self, ref.index),
else => unreachable,
};
defer gpa.free(in_code);
@memcpy(out_code, in_code);
const res = if (shdr.sh_flags & elf.SHF_ALLOC == 0)
atom_ptr.resolveRelocsNonAlloc(self, out_code, &undefs)
else
atom_ptr.resolveRelocsAlloc(self, out_code);
_ = res catch |err| switch (err) {
error.UnsupportedCpuArch => {
try self.reportUnsupportedCpuArch();
return error.FlushFailure;
},
error.RelocFailure, error.RelaxFailure => has_reloc_errors = true,
else => |e| return e,
};
}
try self.base.file.?.pwriteAll(buffer, sh_offset);
if (atom_list.atoms.items.len == 0) continue;
atom_list.write(&buffer, &undefs, self) catch |err| switch (err) {
error.UnsupportedCpuArch => {
try self.reportUnsupportedCpuArch();
return error.FlushFailure;
},
error.RelocFailure, error.RelaxFailure => has_reloc_errors = true,
else => |e| return e,
};
}
if (self.requiresThunks()) {
var buffer = std.ArrayList(u8).init(gpa);
defer buffer.deinit();
try self.reportUndefinedSymbols(&undefs);
if (has_reloc_errors) return error.FlushFailure;
if (self.requiresThunks()) {
for (self.thunks.items) |th| {
const thunk_size = th.size(self);
try buffer.ensureUnusedCapacity(thunk_size);
@ -4030,10 +4081,6 @@ fn writeAtoms(self: *Elf) !void {
buffer.clearRetainingCapacity();
}
}
try self.reportUndefinedSymbols(&undefs);
if (has_reloc_errors) return error.FlushFailure;
}
pub fn updateSymtabSize(self: *Elf) !void {
@ -4667,34 +4714,6 @@ pub fn isEffectivelyDynLib(self: Elf) bool {
};
}
pub fn isZigSection(self: Elf, shndx: u32) bool {
inline for (&[_]?u32{
self.zig_text_section_index,
self.zig_data_rel_ro_section_index,
self.zig_data_section_index,
self.zig_bss_section_index,
}) |index| {
if (index == shndx) return true;
}
return false;
}
pub fn isDebugSection(self: Elf, shndx: u32) bool {
inline for (&[_]?u32{
self.debug_info_section_index,
self.debug_abbrev_section_index,
self.debug_str_section_index,
self.debug_aranges_section_index,
self.debug_line_section_index,
self.debug_line_str_section_index,
self.debug_loclists_section_index,
self.debug_rnglists_section_index,
}) |index| {
if (index == shndx) return true;
}
return false;
}
pub fn addPhdr(self: *Elf, opts: struct {
type: u32 = 0,
flags: u32 = 0,
@ -5070,7 +5089,7 @@ fn reportMissingLibraryError(
}
}
pub fn reportUnsupportedCpuArch(self: *Elf) error{OutOfMemory}!void {
fn reportUnsupportedCpuArch(self: *Elf) error{OutOfMemory}!void {
var err = try self.base.addErrorWithNotes(0);
try err.addMsg("fatal linker error: unsupported CPU architecture {s}", .{
@tagName(self.getTarget().cpu.arch),
@ -5282,6 +5301,14 @@ fn fmtDumpState(
try writer.print("{}\n", .{linker_defined.fmtSymtab(self)});
}
const slice = self.sections.slice();
{
try writer.writeAll("atom lists\n");
for (slice.items(.shdr), slice.items(.atom_list_2), 0..) |shdr, atom_list, shndx| {
try writer.print("shdr({d}) : {s} : {}", .{ shndx, self.getShString(shdr.sh_name), atom_list.fmt(self) });
}
}
if (self.requiresThunks()) {
try writer.writeAll("thunks\n");
for (self.thunks.items, 0..) |th, index| {
@ -5303,7 +5330,7 @@ fn fmtDumpState(
}
try writer.writeAll("\nOutput shdrs\n");
for (self.sections.items(.shdr), self.sections.items(.phndx), 0..) |shdr, phndx, shndx| {
for (slice.items(.shdr), slice.items(.phndx), 0..) |shdr, phndx, shndx| {
try writer.print(" shdr({d}) : phdr({?d}) : {}\n", .{
shndx,
phndx,
@ -5373,7 +5400,6 @@ fn requiresThunks(self: Elf) bool {
/// so that we reserve enough space for the program header table up-front.
/// Bump these numbers when adding or deleting a Zig specific pre-allocated segment, or adding
/// more special-purpose program headers.
pub const number_of_zig_segments = 4;
const max_number_of_object_segments = 9;
const max_number_of_special_phdrs = 5;
@ -5558,8 +5584,14 @@ const Section = struct {
phndx: ?u32 = null,
/// List of atoms contributing to this section.
/// TODO currently this is only used for relocations tracking in relocatable mode
/// but will be merged with atom_list_2.
atom_list: std.ArrayListUnmanaged(Ref) = .{},
/// List of atoms contributing to this section.
/// This can be used by sections that require special handling such as init/fini array, etc.
atom_list_2: AtomList = .{},
/// Index of the last allocated atom in this section.
last_atom: Ref = .{ .index = 0, .file = 0 },
@ -5588,9 +5620,10 @@ fn defaultEntrySymbolName(cpu_arch: std.Target.Cpu.Arch) []const u8 {
};
}
fn createThunks(elf_file: *Elf, shdr: *elf.Elf64_Shdr, shndx: u32) !void {
fn createThunks(elf_file: *Elf, atom_list: *AtomList) !void {
const gpa = elf_file.base.comp.gpa;
const cpu_arch = elf_file.getTarget().cpu.arch;
// A branch will need an extender if its target is larger than
// `2^(jump_bits - 1) - margin` where margin is some arbitrary number.
const max_distance = switch (cpu_arch) {
@ -5598,36 +5631,44 @@ fn createThunks(elf_file: *Elf, shdr: *elf.Elf64_Shdr, shndx: u32) !void {
.x86_64, .riscv64 => unreachable,
else => @panic("unhandled arch"),
};
const atoms = elf_file.sections.items(.atom_list)[shndx].items;
assert(atoms.len > 0);
for (atoms) |ref| {
const advance = struct {
fn advance(list: *AtomList, size: u64, alignment: Atom.Alignment) !i64 {
const offset = alignment.forward(list.size);
const padding = offset - list.size;
list.size += padding + size;
list.alignment = list.alignment.max(alignment);
return @intCast(offset);
}
}.advance;
for (atom_list.atoms.items) |ref| {
elf_file.atom(ref).?.value = -1;
}
var i: usize = 0;
while (i < atoms.len) {
while (i < atom_list.atoms.items.len) {
const start = i;
const start_atom = elf_file.atom(atoms[start]).?;
const start_atom = elf_file.atom(atom_list.atoms.items[start]).?;
assert(start_atom.alive);
start_atom.value = try advanceSection(shdr, start_atom.size, start_atom.alignment);
start_atom.value = try advance(atom_list, start_atom.size, start_atom.alignment);
i += 1;
while (i < atoms.len) : (i += 1) {
const atom_ptr = elf_file.atom(atoms[i]).?;
while (i < atom_list.atoms.items.len) : (i += 1) {
const atom_ptr = elf_file.atom(atom_list.atoms.items[i]).?;
assert(atom_ptr.alive);
if (@as(i64, @intCast(atom_ptr.alignment.forward(shdr.sh_size))) - start_atom.value >= max_distance)
if (@as(i64, @intCast(atom_ptr.alignment.forward(atom_list.size))) - start_atom.value >= max_distance)
break;
atom_ptr.value = try advanceSection(shdr, atom_ptr.size, atom_ptr.alignment);
atom_ptr.value = try advance(atom_list, atom_ptr.size, atom_ptr.alignment);
}
// Insert a thunk at the group end
const thunk_index = try elf_file.addThunk();
const thunk_ptr = elf_file.thunk(thunk_index);
thunk_ptr.output_section_index = shndx;
thunk_ptr.output_section_index = atom_list.output_section_index;
// Scan relocs in the group and create trampolines for any unreachable callsite
for (atoms[start..i]) |ref| {
for (atom_list.atoms.items[start..i]) |ref| {
const atom_ptr = elf_file.atom(ref).?;
const file_ptr = atom_ptr.file(elf_file).?;
log.debug("atom({}) {s}", .{ ref, atom_ptr.name(elf_file) });
@ -5657,18 +5698,11 @@ fn createThunks(elf_file: *Elf, shdr: *elf.Elf64_Shdr, shndx: u32) !void {
atom_ptr.addExtra(.{ .thunk = thunk_index }, elf_file);
}
thunk_ptr.value = try advanceSection(shdr, thunk_ptr.size(elf_file), Atom.Alignment.fromNonzeroByteUnits(2));
thunk_ptr.value = try advance(atom_list, thunk_ptr.size(elf_file), Atom.Alignment.fromNonzeroByteUnits(2));
log.debug("thunk({d}) : {}", .{ thunk_index, thunk_ptr.fmt(elf_file) });
}
}
fn advanceSection(shdr: *elf.Elf64_Shdr, adv_size: u64, alignment: Atom.Alignment) !i64 {
const offset = alignment.forward(shdr.sh_size);
const padding = offset - shdr.sh_size;
shdr.sh_size += padding + adv_size;
shdr.sh_addralign = @max(shdr.sh_addralign, alignment.toByteUnits() orelse 1);
return @intCast(offset);
}
const std = @import("std");
const build_options = @import("build_options");
@ -5699,6 +5733,7 @@ const Air = @import("../Air.zig");
const Allocator = std.mem.Allocator;
const Archive = @import("Elf/Archive.zig");
pub const Atom = @import("Elf/Atom.zig");
const AtomList = @import("Elf/AtomList.zig");
const Cache = std.Build.Cache;
const Path = Cache.Path;
const Compilation = @import("../Compilation.zig");

View File

@ -51,6 +51,11 @@ pub fn address(self: Atom, elf_file: *Elf) i64 {
return @as(i64, @intCast(shdr.sh_addr)) + self.value;
}
pub fn offset(self: Atom, elf_file: *Elf) u64 {
const shdr = elf_file.sections.items(.shdr)[self.output_section_index];
return shdr.sh_offset + @as(u64, @intCast(self.value));
}
pub fn ref(self: Atom) Elf.Ref {
return .{ .index = self.atom_index, .file = self.file_index };
}
@ -123,140 +128,6 @@ pub fn freeListEligible(self: Atom, elf_file: *Elf) bool {
return surplus >= Elf.min_text_capacity;
}
pub fn allocate(self: *Atom, elf_file: *Elf) !void {
const slice = elf_file.sections.slice();
const shdr = &slice.items(.shdr)[self.output_section_index];
const free_list = &slice.items(.free_list)[self.output_section_index];
const last_atom_ref = &slice.items(.last_atom)[self.output_section_index];
const new_atom_ideal_capacity = Elf.padToIdeal(self.size);
// We use these to indicate our intention to update metadata, placing the new atom,
// and possibly removing a free list node.
// It would be simpler to do it inside the for loop below, but that would cause a
// problem if an error was returned later in the function. So this action
// is actually carried out at the end of the function, when errors are no longer possible.
var atom_placement: ?Elf.Ref = null;
var free_list_removal: ?usize = null;
// First we look for an appropriately sized free list node.
// The list is unordered. We'll just take the first thing that works.
self.value = blk: {
var i: usize = if (elf_file.base.child_pid == null) 0 else free_list.items.len;
while (i < free_list.items.len) {
const big_atom_ref = free_list.items[i];
const big_atom = elf_file.atom(big_atom_ref).?;
// We now have a pointer to a live atom that has too much capacity.
// Is it enough that we could fit this new atom?
const cap = big_atom.capacity(elf_file);
const ideal_capacity = Elf.padToIdeal(cap);
const ideal_capacity_end_vaddr = std.math.add(u64, @intCast(big_atom.value), ideal_capacity) catch ideal_capacity;
const capacity_end_vaddr = @as(u64, @intCast(big_atom.value)) + cap;
const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity;
const new_start_vaddr = self.alignment.backward(new_start_vaddr_unaligned);
if (new_start_vaddr < ideal_capacity_end_vaddr) {
// Additional bookkeeping here to notice if this free list node
// should be deleted because the block that it points to has grown to take up
// more of the extra capacity.
if (!big_atom.freeListEligible(elf_file)) {
_ = free_list.swapRemove(i);
} else {
i += 1;
}
continue;
}
// At this point we know that we will place the new block here. But the
// remaining question is whether there is still yet enough capacity left
// over for there to still be a free list node.
const remaining_capacity = new_start_vaddr - ideal_capacity_end_vaddr;
const keep_free_list_node = remaining_capacity >= Elf.min_text_capacity;
// Set up the metadata to be updated, after errors are no longer possible.
atom_placement = big_atom_ref;
if (!keep_free_list_node) {
free_list_removal = i;
}
break :blk @intCast(new_start_vaddr);
} else if (elf_file.atom(last_atom_ref.*)) |last_atom| {
const ideal_capacity = Elf.padToIdeal(last_atom.size);
const ideal_capacity_end_vaddr = @as(u64, @intCast(last_atom.value)) + ideal_capacity;
const new_start_vaddr = self.alignment.forward(ideal_capacity_end_vaddr);
// Set up the metadata to be updated, after errors are no longer possible.
atom_placement = last_atom.ref();
break :blk @intCast(new_start_vaddr);
} else {
break :blk 0;
}
};
log.debug("allocated atom({}) : '{s}' at 0x{x} to 0x{x}", .{
self.ref(),
self.name(elf_file),
self.address(elf_file),
self.address(elf_file) + @as(i64, @intCast(self.size)),
});
const expand_section = if (atom_placement) |placement_ref|
elf_file.atom(placement_ref).?.nextAtom(elf_file) == null
else
true;
if (expand_section) {
const needed_size: u64 = @intCast(self.value + @as(i64, @intCast(self.size)));
try elf_file.growAllocSection(self.output_section_index, needed_size);
last_atom_ref.* = self.ref();
switch (self.file(elf_file).?) {
.zig_object => |zo| if (zo.dwarf) |_| {
// The .debug_info section has `low_pc` and `high_pc` values which is the virtual address
// range of the compilation unit. When we expand the text section, this range changes,
// so the DW_TAG.compile_unit tag of the .debug_info section becomes dirty.
zo.debug_info_section_dirty = true;
// This becomes dirty for the same reason. We could potentially make this more
// fine-grained with the addition of support for more compilation units. It is planned to
// model each package as a different compilation unit.
zo.debug_aranges_section_dirty = true;
zo.debug_rnglists_section_dirty = true;
},
else => {},
}
}
shdr.sh_addralign = @max(shdr.sh_addralign, self.alignment.toByteUnits().?);
// This function can also reallocate an atom.
// In this case we need to "unplug" it from its previous location before
// plugging it in to its new location.
if (self.prevAtom(elf_file)) |prev| {
prev.next_atom_ref = self.next_atom_ref;
}
if (self.nextAtom(elf_file)) |next| {
next.prev_atom_ref = self.prev_atom_ref;
}
if (atom_placement) |big_atom_ref| {
const big_atom = elf_file.atom(big_atom_ref).?;
self.prev_atom_ref = big_atom_ref;
self.next_atom_ref = big_atom.next_atom_ref;
big_atom.next_atom_ref = self.ref();
} else {
self.prev_atom_ref = .{ .index = 0, .file = 0 };
self.next_atom_ref = .{ .index = 0, .file = 0 };
}
if (free_list_removal) |i| {
_ = free_list.swapRemove(i);
}
self.alive = true;
}
pub fn shrink(self: *Atom, elf_file: *Elf) void {
_ = self;
_ = elf_file;
}
pub fn grow(self: *Atom, elf_file: *Elf) !void {
if (!self.alignment.check(@intCast(self.value)) or self.size > self.capacity(elf_file))
try self.allocate(elf_file);
}
pub fn free(self: *Atom, elf_file: *Elf) void {
log.debug("freeAtom atom({}) ({s})", .{ self.ref(), self.name(elf_file) });
@ -1807,7 +1678,7 @@ const aarch64 = struct {
=> {
// TODO: NC means no overflow check
const taddr = @as(u64, @intCast(S + A));
const offset: u12 = switch (r_type) {
const off: u12 = switch (r_type) {
.LDST8_ABS_LO12_NC => @truncate(taddr),
.LDST16_ABS_LO12_NC => @divExact(@as(u12, @truncate(taddr)), 2),
.LDST32_ABS_LO12_NC => @divExact(@as(u12, @truncate(taddr)), 4),
@ -1815,7 +1686,7 @@ const aarch64 = struct {
.LDST128_ABS_LO12_NC => @divExact(@as(u12, @truncate(taddr)), 16),
else => unreachable,
};
aarch64_util.writeLoadStoreRegInst(offset, code);
aarch64_util.writeLoadStoreRegInst(off, code);
},
.TLSLE_ADD_TPREL_HI12 => {
@ -1839,8 +1710,8 @@ const aarch64 = struct {
.TLSIE_LD64_GOTTPREL_LO12_NC => {
const S_ = target.gotTpAddress(elf_file);
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
const offset: u12 = try math.divExact(u12, @truncate(@as(u64, @bitCast(S_ + A))), 8);
aarch64_util.writeLoadStoreRegInst(offset, code);
const off: u12 = try math.divExact(u12, @truncate(@as(u64, @bitCast(S_ + A))), 8);
aarch64_util.writeLoadStoreRegInst(off, code);
},
.TLSGD_ADR_PAGE21 => {
@ -1853,8 +1724,8 @@ const aarch64 = struct {
.TLSGD_ADD_LO12_NC => {
const S_ = target.tlsGdAddress(elf_file);
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
const offset: u12 = @truncate(@as(u64, @bitCast(S_ + A)));
aarch64_util.writeAddImmInst(offset, code);
const off: u12 = @truncate(@as(u64, @bitCast(S_ + A)));
aarch64_util.writeAddImmInst(off, code);
},
.TLSDESC_ADR_PAGE21 => {
@ -1873,8 +1744,8 @@ const aarch64 = struct {
if (target.flags.has_tlsdesc) {
const S_ = target.tlsDescAddress(elf_file);
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
const offset: u12 = try math.divExact(u12, @truncate(@as(u64, @bitCast(S_ + A))), 8);
aarch64_util.writeLoadStoreRegInst(offset, code);
const off: u12 = try math.divExact(u12, @truncate(@as(u64, @bitCast(S_ + A))), 8);
aarch64_util.writeLoadStoreRegInst(off, code);
} else {
relocs_log.debug(" relaxing ldr => nop", .{});
mem.writeInt(u32, code, Instruction.nop().toU32(), .little);
@ -1885,8 +1756,8 @@ const aarch64 = struct {
if (target.flags.has_tlsdesc) {
const S_ = target.tlsDescAddress(elf_file);
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
const offset: u12 = @truncate(@as(u64, @bitCast(S_ + A)));
aarch64_util.writeAddImmInst(offset, code);
const off: u12 = @truncate(@as(u64, @bitCast(S_ + A)));
aarch64_util.writeAddImmInst(off, code);
} else {
const old_inst = Instruction{
.add_subtract_immediate = mem.bytesToValue(std.meta.TagPayload(

208
src/link/Elf/AtomList.zig Normal file
View File

@ -0,0 +1,208 @@
value: i64 = 0,
size: u64 = 0,
alignment: Atom.Alignment = .@"1",
output_section_index: u32 = 0,
atoms: std.ArrayListUnmanaged(Elf.Ref) = .{},
pub fn deinit(list: *AtomList, allocator: Allocator) void {
list.atoms.deinit(allocator);
}
pub fn address(list: AtomList, elf_file: *Elf) i64 {
const shdr = elf_file.sections.items(.shdr)[list.output_section_index];
return @as(i64, @intCast(shdr.sh_addr)) + list.value;
}
pub fn offset(list: AtomList, elf_file: *Elf) u64 {
const shdr = elf_file.sections.items(.shdr)[list.output_section_index];
return shdr.sh_offset + @as(u64, @intCast(list.value));
}
pub fn updateSize(list: *AtomList, elf_file: *Elf) void {
for (list.atoms.items) |ref| {
const atom_ptr = elf_file.atom(ref).?;
assert(atom_ptr.alive);
const off = atom_ptr.alignment.forward(list.size);
const padding = off - list.size;
atom_ptr.value = @intCast(off);
list.size += padding + atom_ptr.size;
list.alignment = list.alignment.max(atom_ptr.alignment);
}
}
pub fn allocate(list: *AtomList, elf_file: *Elf) !void {
const alloc_res = try elf_file.allocateChunk(.{
.shndx = list.output_section_index,
.size = list.size,
.alignment = list.alignment,
.requires_padding = false,
});
list.value = @intCast(alloc_res.value);
const slice = elf_file.sections.slice();
const shdr = &slice.items(.shdr)[list.output_section_index];
const last_atom_ref = &slice.items(.last_atom)[list.output_section_index];
const expand_section = if (elf_file.atom(alloc_res.placement)) |placement_atom|
placement_atom.nextAtom(elf_file) == null
else
true;
if (expand_section) last_atom_ref.* = list.lastAtom(elf_file).ref();
shdr.sh_addralign = @max(shdr.sh_addralign, list.alignment.toByteUnits().?);
// FIXME:JK this currently ignores Thunks as valid chunks.
{
var idx: usize = 0;
while (idx < list.atoms.items.len) : (idx += 1) {
const curr_atom_ptr = elf_file.atom(list.atoms.items[idx]).?;
if (idx > 0) {
curr_atom_ptr.prev_atom_ref = list.atoms.items[idx - 1];
}
if (idx + 1 < list.atoms.items.len) {
curr_atom_ptr.next_atom_ref = list.atoms.items[idx + 1];
}
}
}
if (elf_file.atom(alloc_res.placement)) |placement_atom| {
list.firstAtom(elf_file).prev_atom_ref = placement_atom.ref();
list.lastAtom(elf_file).next_atom_ref = placement_atom.next_atom_ref;
placement_atom.next_atom_ref = list.firstAtom(elf_file).ref();
}
// FIXME:JK if we had a link from Atom to parent AtomList we would not need to update Atom's value or osec index
for (list.atoms.items) |ref| {
const atom_ptr = elf_file.atom(ref).?;
atom_ptr.output_section_index = list.output_section_index;
atom_ptr.value += list.value;
}
}
pub fn write(list: AtomList, buffer: *std.ArrayList(u8), undefs: anytype, elf_file: *Elf) !void {
const gpa = elf_file.base.comp.gpa;
const osec = elf_file.sections.items(.shdr)[list.output_section_index];
assert(osec.sh_type != elf.SHT_NOBITS);
log.debug("writing atoms in section '{s}'", .{elf_file.getShString(osec.sh_name)});
const list_size = math.cast(usize, list.size) orelse return error.Overflow;
try buffer.ensureUnusedCapacity(list_size);
buffer.appendNTimesAssumeCapacity(0, list_size);
for (list.atoms.items) |ref| {
const atom_ptr = elf_file.atom(ref).?;
assert(atom_ptr.alive);
const off = math.cast(usize, atom_ptr.value - list.value) orelse return error.Overflow;
const size = math.cast(usize, atom_ptr.size) orelse return error.Overflow;
log.debug(" atom({}) at 0x{x}", .{ ref, list.offset(elf_file) + off });
const object = atom_ptr.file(elf_file).?.object;
const code = try object.codeDecompressAlloc(elf_file, ref.index);
defer gpa.free(code);
const out_code = buffer.items[off..][0..size];
@memcpy(out_code, code);
if (osec.sh_flags & elf.SHF_ALLOC == 0)
try atom_ptr.resolveRelocsNonAlloc(elf_file, out_code, undefs)
else
try atom_ptr.resolveRelocsAlloc(elf_file, out_code);
}
try elf_file.base.file.?.pwriteAll(buffer.items, list.offset(elf_file));
buffer.clearRetainingCapacity();
}
pub fn writeRelocatable(list: AtomList, buffer: *std.ArrayList(u8), elf_file: *Elf) !void {
const gpa = elf_file.base.comp.gpa;
const osec = elf_file.sections.items(.shdr)[list.output_section_index];
assert(osec.sh_type != elf.SHT_NOBITS);
log.debug("writing atoms in section '{s}'", .{elf_file.getShString(osec.sh_name)});
const list_size = math.cast(usize, list.size) orelse return error.Overflow;
try buffer.ensureUnusedCapacity(list_size);
buffer.appendNTimesAssumeCapacity(0, list_size);
for (list.atoms.items) |ref| {
const atom_ptr = elf_file.atom(ref).?;
assert(atom_ptr.alive);
const off = math.cast(usize, atom_ptr.value - list.value) orelse return error.Overflow;
const size = math.cast(usize, atom_ptr.size) orelse return error.Overflow;
log.debug(" atom({}) at 0x{x}", .{ ref, list.offset(elf_file) + off });
const object = atom_ptr.file(elf_file).?.object;
const code = try object.codeDecompressAlloc(elf_file, ref.index);
defer gpa.free(code);
const out_code = buffer.items[off..][0..size];
@memcpy(out_code, code);
}
try elf_file.base.file.?.pwriteAll(buffer.items, list.offset(elf_file));
buffer.clearRetainingCapacity();
}
pub fn firstAtom(list: AtomList, elf_file: *Elf) *Atom {
assert(list.atoms.items.len > 0);
return elf_file.atom(list.atoms.items[0]).?;
}
pub fn lastAtom(list: AtomList, elf_file: *Elf) *Atom {
assert(list.atoms.items.len > 0);
return elf_file.atom(list.atoms.items[list.atoms.items.len - 1]).?;
}
pub fn format(
list: AtomList,
comptime unused_fmt_string: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
_ = list;
_ = unused_fmt_string;
_ = options;
_ = writer;
@compileError("do not format AtomList directly");
}
const FormatCtx = struct { AtomList, *Elf };
pub fn fmt(list: AtomList, elf_file: *Elf) std.fmt.Formatter(format2) {
return .{ .data = .{ list, elf_file } };
}
fn format2(
ctx: FormatCtx,
comptime unused_fmt_string: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
_ = unused_fmt_string;
_ = options;
const list, const elf_file = ctx;
try writer.print("list : @{x} : shdr({d}) : align({x}) : size({x})", .{
list.address(elf_file), list.output_section_index,
list.alignment.toByteUnits() orelse 0, list.size,
});
try writer.writeAll(" : atoms{ ");
for (list.atoms.items, 0..) |ref, i| {
try writer.print("{}", .{ref});
if (i < list.atoms.items.len - 1) try writer.writeAll(", ");
}
try writer.writeAll(" }");
}
const assert = std.debug.assert;
const elf = std.elf;
const log = std.log.scoped(.link);
const math = std.math;
const std = @import("std");
const Allocator = std.mem.Allocator;
const Atom = @import("Atom.zig");
const AtomList = @This();
const Elf = @import("../Elf.zig");
const Object = @import("Object.zig");

View File

@ -311,58 +311,6 @@ fn initAtoms(self: *Object, allocator: Allocator, handle: std.fs.File, elf_file:
};
}
fn initOutputSection(self: Object, elf_file: *Elf, shdr: elf.Elf64_Shdr) error{OutOfMemory}!u32 {
const name = blk: {
const name = self.getString(shdr.sh_name);
if (elf_file.base.isRelocatable()) break :blk name;
if (shdr.sh_flags & elf.SHF_MERGE != 0) break :blk name;
const sh_name_prefixes: []const [:0]const u8 = &.{
".text", ".data.rel.ro", ".data", ".rodata", ".bss.rel.ro", ".bss",
".init_array", ".fini_array", ".tbss", ".tdata", ".gcc_except_table", ".ctors",
".dtors", ".gnu.warning",
};
inline for (sh_name_prefixes) |prefix| {
if (std.mem.eql(u8, name, prefix) or std.mem.startsWith(u8, name, prefix ++ ".")) {
break :blk prefix;
}
}
break :blk name;
};
const @"type" = tt: {
if (elf_file.getTarget().cpu.arch == .x86_64 and
shdr.sh_type == elf.SHT_X86_64_UNWIND) break :tt elf.SHT_PROGBITS;
const @"type" = switch (shdr.sh_type) {
elf.SHT_NULL => unreachable,
elf.SHT_PROGBITS => blk: {
if (std.mem.eql(u8, name, ".init_array") or std.mem.startsWith(u8, name, ".init_array."))
break :blk elf.SHT_INIT_ARRAY;
if (std.mem.eql(u8, name, ".fini_array") or std.mem.startsWith(u8, name, ".fini_array."))
break :blk elf.SHT_FINI_ARRAY;
break :blk shdr.sh_type;
},
else => shdr.sh_type,
};
break :tt @"type";
};
const flags = blk: {
var flags = shdr.sh_flags;
if (!elf_file.base.isRelocatable()) {
flags &= ~@as(u64, elf.SHF_COMPRESSED | elf.SHF_GROUP | elf.SHF_GNU_RETAIN);
}
break :blk switch (@"type") {
elf.SHT_INIT_ARRAY, elf.SHT_FINI_ARRAY => flags | elf.SHF_WRITE,
else => flags,
};
};
const out_shndx = elf_file.sectionByName(name) orelse try elf_file.addSection(.{
.type = @"type",
.flags = flags,
.name = try elf_file.insertShString(name),
});
return out_shndx;
}
fn skipShdr(self: *Object, index: u32, elf_file: *Elf) bool {
const comp = elf_file.base.comp;
const shdr = self.shdrs.items[index];
@ -438,15 +386,24 @@ fn parseEhFrame(self: *Object, allocator: Allocator, handle: std.fs.File, shndx:
.input_section_index = shndx,
.file_index = self.index,
}),
.fde => try self.fdes.append(allocator, .{
.offset = data_start + rec.offset,
.size = rec.size,
.cie_index = undefined,
.rel_index = rel_start + @as(u32, @intCast(rel_range.start)),
.rel_num = @as(u32, @intCast(rel_range.len)),
.input_section_index = shndx,
.file_index = self.index,
}),
.fde => {
if (rel_range.len == 0) {
// No relocs for an FDE means we cannot associate this FDE to an Atom
// so we skip it. According to mold source code
// (https://github.com/rui314/mold/blob/a3e69502b0eaf1126d6093e8ea5e6fdb95219811/src/input-files.cc#L525-L528)
// this can happen for object files built with -r flag by the linker.
continue;
}
try self.fdes.append(allocator, .{
.offset = data_start + rec.offset,
.size = rec.size,
.cie_index = undefined,
.rel_index = rel_start + @as(u32, @intCast(rel_range.start)),
.rel_num = @as(u32, @intCast(rel_range.len)),
.input_section_index = shndx,
.file_index = self.index,
});
},
}
}
@ -622,7 +579,7 @@ pub fn claimUnresolved(self: *Object, elf_file: *Elf) void {
}
}
pub fn claimUnresolvedObject(self: *Object, elf_file: *Elf) void {
pub fn claimUnresolvedRelocatable(self: *Object, elf_file: *Elf) void {
const first_global = self.first_global orelse return;
for (self.globals(), 0..) |*sym, i| {
const esym_index = @as(u32, @intCast(first_global + i));
@ -985,21 +942,14 @@ pub fn initOutputSections(self: *Object, elf_file: *Elf) !void {
const atom_ptr = self.atom(atom_index) orelse continue;
if (!atom_ptr.alive) continue;
const shdr = atom_ptr.inputShdr(elf_file);
_ = try self.initOutputSection(elf_file, shdr);
}
}
pub fn addAtomsToOutputSections(self: *Object, elf_file: *Elf) !void {
for (self.atoms_indexes.items) |atom_index| {
const atom_ptr = self.atom(atom_index) orelse continue;
if (!atom_ptr.alive) continue;
const shdr = atom_ptr.inputShdr(elf_file);
atom_ptr.output_section_index = self.initOutputSection(elf_file, shdr) catch unreachable;
const comp = elf_file.base.comp;
const gpa = comp.gpa;
const atom_list = &elf_file.sections.items(.atom_list)[atom_ptr.output_section_index];
try atom_list.append(gpa, .{ .index = atom_index, .file = self.index });
const osec = try elf_file.initOutputSection(.{
.name = self.getString(shdr.sh_name),
.flags = shdr.sh_flags,
.type = shdr.sh_type,
});
const atom_list = &elf_file.sections.items(.atom_list_2)[osec];
atom_list.output_section_index = osec;
try atom_list.atoms.append(elf_file.base.comp.gpa, atom_ptr.ref());
}
}
@ -1007,9 +957,14 @@ pub fn initRelaSections(self: *Object, elf_file: *Elf) !void {
for (self.atoms_indexes.items) |atom_index| {
const atom_ptr = self.atom(atom_index) orelse continue;
if (!atom_ptr.alive) continue;
if (atom_ptr.output_section_index == elf_file.eh_frame_section_index) continue;
const shndx = atom_ptr.relocsShndx() orelse continue;
const shdr = self.shdrs.items[shndx];
const out_shndx = try self.initOutputSection(elf_file, shdr);
const out_shndx = try elf_file.initOutputSection(.{
.name = self.getString(shdr.sh_name),
.flags = shdr.sh_flags,
.type = shdr.sh_type,
});
const out_shdr = &elf_file.sections.items(.shdr)[out_shndx];
out_shdr.sh_type = elf.SHT_RELA;
out_shdr.sh_addralign = @alignOf(elf.Elf64_Rela);
@ -1022,10 +977,15 @@ pub fn addAtomsToRelaSections(self: *Object, elf_file: *Elf) !void {
for (self.atoms_indexes.items) |atom_index| {
const atom_ptr = self.atom(atom_index) orelse continue;
if (!atom_ptr.alive) continue;
if (atom_ptr.output_section_index == elf_file.eh_frame_section_index) continue;
const shndx = blk: {
const shndx = atom_ptr.relocsShndx() orelse continue;
const shdr = self.shdrs.items[shndx];
break :blk self.initOutputSection(elf_file, shdr) catch unreachable;
break :blk elf_file.initOutputSection(.{
.name = self.getString(shdr.sh_name),
.flags = shdr.sh_flags,
.type = shdr.sh_type,
}) catch unreachable;
};
const slice = elf_file.sections.slice();
const shdr = &slice.items(.shdr)[shndx];
@ -1538,12 +1498,12 @@ fn formatComdatGroups(
}
}
pub fn fmtPath(self: *Object) std.fmt.Formatter(formatPath) {
pub fn fmtPath(self: Object) std.fmt.Formatter(formatPath) {
return .{ .data = self };
}
fn formatPath(
object: *Object,
object: Object,
comptime unused_fmt_string: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
@ -1578,6 +1538,7 @@ const mem = std.mem;
const Allocator = mem.Allocator;
const Archive = @import("Archive.zig");
const Atom = @import("Atom.zig");
const AtomList = @import("AtomList.zig");
const Cie = eh_frame.Cie;
const Elf = @import("../Elf.zig");
const Fde = eh_frame.Fde;

View File

@ -51,6 +51,14 @@ debug_loclists_section_dirty: bool = false,
debug_rnglists_section_dirty: bool = false,
eh_frame_section_dirty: bool = false,
text_index: ?Symbol.Index = null,
rodata_index: ?Symbol.Index = null,
data_relro_index: ?Symbol.Index = null,
data_index: ?Symbol.Index = null,
bss_index: ?Symbol.Index = null,
tdata_index: ?Symbol.Index = null,
tbss_index: ?Symbol.Index = null,
eh_frame_index: ?Symbol.Index = null,
debug_info_index: ?Symbol.Index = null,
debug_abbrev_index: ?Symbol.Index = null,
debug_aranges_index: ?Symbol.Index = null,
@ -59,7 +67,6 @@ debug_line_index: ?Symbol.Index = null,
debug_line_str_index: ?Symbol.Index = null,
debug_loclists_index: ?Symbol.Index = null,
debug_rnglists_index: ?Symbol.Index = null,
eh_frame_index: ?Symbol.Index = null,
pub const global_symbol_bit: u32 = 0x80000000;
pub const symbol_mask: u32 = 0x7fffffff;
@ -71,6 +78,7 @@ const InitOptions = struct {
};
pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void {
_ = options;
const comp = elf_file.base.comp;
const gpa = comp.gpa;
const ptr_size = elf_file.ptrWidthBytes();
@ -88,190 +96,13 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void {
esym.st_shndx = elf.SHN_ABS;
}
const fillSection = struct {
fn fillSection(ef: *Elf, shdr: *elf.Elf64_Shdr, size: u64, phndx: ?u16) !void {
if (ef.base.isRelocatable()) {
const off = try ef.findFreeSpace(size, shdr.sh_addralign);
shdr.sh_offset = off;
shdr.sh_size = size;
} else {
const phdr = ef.phdrs.items[phndx.?];
shdr.sh_addr = phdr.p_vaddr;
shdr.sh_offset = phdr.p_offset;
shdr.sh_size = phdr.p_memsz;
}
}
}.fillSection;
comptime assert(Elf.number_of_zig_segments == 4);
if (!elf_file.base.isRelocatable()) {
if (elf_file.phdr_zig_load_re_index == null) {
const filesz = options.program_code_size_hint;
const off = try elf_file.findFreeSpace(filesz, elf_file.page_size);
elf_file.phdr_zig_load_re_index = try elf_file.addPhdr(.{
.type = elf.PT_LOAD,
.offset = off,
.filesz = filesz,
.addr = if (ptr_size >= 4) 0x4000000 else 0x4000,
.memsz = filesz,
.@"align" = elf_file.page_size,
.flags = elf.PF_X | elf.PF_R | elf.PF_W,
});
}
if (elf_file.phdr_zig_load_ro_index == null) {
const alignment = elf_file.page_size;
const filesz: u64 = 1024;
const off = try elf_file.findFreeSpace(filesz, alignment);
elf_file.phdr_zig_load_ro_index = try elf_file.addPhdr(.{
.type = elf.PT_LOAD,
.offset = off,
.filesz = filesz,
.addr = if (ptr_size >= 4) 0xc000000 else 0xa000,
.memsz = filesz,
.@"align" = alignment,
.flags = elf.PF_R | elf.PF_W,
});
}
if (elf_file.phdr_zig_load_rw_index == null) {
const alignment = elf_file.page_size;
const filesz: u64 = 1024;
const off = try elf_file.findFreeSpace(filesz, alignment);
elf_file.phdr_zig_load_rw_index = try elf_file.addPhdr(.{
.type = elf.PT_LOAD,
.offset = off,
.filesz = filesz,
.addr = if (ptr_size >= 4) 0x10000000 else 0xc000,
.memsz = filesz,
.@"align" = alignment,
.flags = elf.PF_R | elf.PF_W,
});
}
if (elf_file.phdr_zig_load_zerofill_index == null) {
const alignment = elf_file.page_size;
elf_file.phdr_zig_load_zerofill_index = try elf_file.addPhdr(.{
.type = elf.PT_LOAD,
.addr = if (ptr_size >= 4) 0x14000000 else 0xf000,
.memsz = 1024,
.@"align" = alignment,
.flags = elf.PF_R | elf.PF_W,
});
}
}
if (elf_file.zig_text_section_index == null) {
elf_file.zig_text_section_index = try elf_file.addSection(.{
.name = try elf_file.insertShString(".text.zig"),
.type = elf.SHT_PROGBITS,
.flags = elf.SHF_ALLOC | elf.SHF_EXECINSTR,
.addralign = 1,
.offset = std.math.maxInt(u64),
});
const shdr = &elf_file.sections.items(.shdr)[elf_file.zig_text_section_index.?];
const phndx = &elf_file.sections.items(.phndx)[elf_file.zig_text_section_index.?];
try fillSection(elf_file, shdr, options.program_code_size_hint, elf_file.phdr_zig_load_re_index);
if (elf_file.base.isRelocatable()) {
_ = try elf_file.addRelaShdr(
try elf_file.insertShString(".rela.text.zig"),
elf_file.zig_text_section_index.?,
);
} else {
phndx.* = elf_file.phdr_zig_load_re_index.?;
}
}
if (elf_file.zig_data_rel_ro_section_index == null) {
elf_file.zig_data_rel_ro_section_index = try elf_file.addSection(.{
.name = try elf_file.insertShString(".data.rel.ro.zig"),
.type = elf.SHT_PROGBITS,
.addralign = 1,
.flags = elf.SHF_ALLOC | elf.SHF_WRITE,
.offset = std.math.maxInt(u64),
});
const shdr = &elf_file.sections.items(.shdr)[elf_file.zig_data_rel_ro_section_index.?];
const phndx = &elf_file.sections.items(.phndx)[elf_file.zig_data_rel_ro_section_index.?];
try fillSection(elf_file, shdr, 1024, elf_file.phdr_zig_load_ro_index);
if (elf_file.base.isRelocatable()) {
_ = try elf_file.addRelaShdr(
try elf_file.insertShString(".rela.data.rel.ro.zig"),
elf_file.zig_data_rel_ro_section_index.?,
);
} else {
phndx.* = elf_file.phdr_zig_load_ro_index.?;
}
}
if (elf_file.zig_data_section_index == null) {
elf_file.zig_data_section_index = try elf_file.addSection(.{
.name = try elf_file.insertShString(".data.zig"),
.type = elf.SHT_PROGBITS,
.addralign = ptr_size,
.flags = elf.SHF_ALLOC | elf.SHF_WRITE,
.offset = std.math.maxInt(u64),
});
const shdr = &elf_file.sections.items(.shdr)[elf_file.zig_data_section_index.?];
const phndx = &elf_file.sections.items(.phndx)[elf_file.zig_data_section_index.?];
try fillSection(elf_file, shdr, 1024, elf_file.phdr_zig_load_rw_index);
if (elf_file.base.isRelocatable()) {
_ = try elf_file.addRelaShdr(
try elf_file.insertShString(".rela.data.zig"),
elf_file.zig_data_section_index.?,
);
} else {
phndx.* = elf_file.phdr_zig_load_rw_index.?;
}
}
if (elf_file.zig_bss_section_index == null) {
elf_file.zig_bss_section_index = try elf_file.addSection(.{
.name = try elf_file.insertShString(".bss.zig"),
.type = elf.SHT_NOBITS,
.addralign = ptr_size,
.flags = elf.SHF_ALLOC | elf.SHF_WRITE,
.offset = 0,
});
const shdr = &elf_file.sections.items(.shdr)[elf_file.zig_bss_section_index.?];
const phndx = &elf_file.sections.items(.phndx)[elf_file.zig_bss_section_index.?];
if (elf_file.base.isRelocatable()) {
shdr.sh_size = 1024;
} else {
phndx.* = elf_file.phdr_zig_load_zerofill_index.?;
const phdr = elf_file.phdrs.items[phndx.*.?];
shdr.sh_addr = phdr.p_vaddr;
shdr.sh_size = phdr.p_memsz;
}
}
switch (comp.config.debug_format) {
.strip => {},
.dwarf => |v| {
var dwarf = Dwarf.init(&elf_file.base, v);
const addSectionSymbol = struct {
fn addSectionSymbol(
zig_object: *ZigObject,
alloc: Allocator,
name: [:0]const u8,
alignment: Atom.Alignment,
shndx: u32,
) !Symbol.Index {
const name_off = try zig_object.addString(alloc, name);
const index = try zig_object.newSymbolWithAtom(alloc, name_off);
const sym = zig_object.symbol(index);
const esym = &zig_object.symtab.items(.elf_sym)[sym.esym_index];
esym.st_info |= elf.STT_SECTION;
const atom_ptr = zig_object.atom(sym.ref.index).?;
atom_ptr.alignment = alignment;
atom_ptr.output_section_index = shndx;
return index;
}
}.addSectionSymbol;
if (elf_file.debug_str_section_index == null) {
elf_file.debug_str_section_index = try elf_file.addSection(.{
if (self.debug_str_index == null) {
const osec = try elf_file.addSection(.{
.name = try elf_file.insertShString(".debug_str"),
.flags = elf.SHF_MERGE | elf.SHF_STRINGS,
.entsize = 1,
@ -279,51 +110,56 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void {
.addralign = 1,
});
self.debug_str_section_dirty = true;
self.debug_str_index = try addSectionSymbol(self, gpa, ".debug_str", .@"1", elf_file.debug_str_section_index.?);
self.debug_str_index = try self.addSectionSymbol(gpa, ".debug_str", .@"1", osec);
elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_str_index.?).ref;
}
if (elf_file.debug_info_section_index == null) {
elf_file.debug_info_section_index = try elf_file.addSection(.{
if (self.debug_info_index == null) {
const osec = try elf_file.addSection(.{
.name = try elf_file.insertShString(".debug_info"),
.type = elf.SHT_PROGBITS,
.addralign = 1,
});
self.debug_info_section_dirty = true;
self.debug_info_index = try addSectionSymbol(self, gpa, ".debug_info", .@"1", elf_file.debug_info_section_index.?);
self.debug_info_index = try self.addSectionSymbol(gpa, ".debug_info", .@"1", osec);
elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_info_index.?).ref;
}
if (elf_file.debug_abbrev_section_index == null) {
elf_file.debug_abbrev_section_index = try elf_file.addSection(.{
if (self.debug_abbrev_index == null) {
const osec = try elf_file.addSection(.{
.name = try elf_file.insertShString(".debug_abbrev"),
.type = elf.SHT_PROGBITS,
.addralign = 1,
});
self.debug_abbrev_section_dirty = true;
self.debug_abbrev_index = try addSectionSymbol(self, gpa, ".debug_abbrev", .@"1", elf_file.debug_abbrev_section_index.?);
self.debug_abbrev_index = try self.addSectionSymbol(gpa, ".debug_abbrev", .@"1", osec);
elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_abbrev_index.?).ref;
}
if (elf_file.debug_aranges_section_index == null) {
elf_file.debug_aranges_section_index = try elf_file.addSection(.{
if (self.debug_aranges_index == null) {
const osec = try elf_file.addSection(.{
.name = try elf_file.insertShString(".debug_aranges"),
.type = elf.SHT_PROGBITS,
.addralign = 16,
});
self.debug_aranges_section_dirty = true;
self.debug_aranges_index = try addSectionSymbol(self, gpa, ".debug_aranges", .@"16", elf_file.debug_aranges_section_index.?);
self.debug_aranges_index = try self.addSectionSymbol(gpa, ".debug_aranges", .@"16", osec);
elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_aranges_index.?).ref;
}
if (elf_file.debug_line_section_index == null) {
elf_file.debug_line_section_index = try elf_file.addSection(.{
if (self.debug_line_index == null) {
const osec = try elf_file.addSection(.{
.name = try elf_file.insertShString(".debug_line"),
.type = elf.SHT_PROGBITS,
.addralign = 1,
});
self.debug_line_section_dirty = true;
self.debug_line_index = try addSectionSymbol(self, gpa, ".debug_line", .@"1", elf_file.debug_line_section_index.?);
self.debug_line_index = try self.addSectionSymbol(gpa, ".debug_line", .@"1", osec);
elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_line_index.?).ref;
}
if (elf_file.debug_line_str_section_index == null) {
elf_file.debug_line_str_section_index = try elf_file.addSection(.{
if (self.debug_line_str_index == null) {
const osec = try elf_file.addSection(.{
.name = try elf_file.insertShString(".debug_line_str"),
.flags = elf.SHF_MERGE | elf.SHF_STRINGS,
.entsize = 1,
@ -331,31 +167,34 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void {
.addralign = 1,
});
self.debug_line_str_section_dirty = true;
self.debug_line_str_index = try addSectionSymbol(self, gpa, ".debug_line_str", .@"1", elf_file.debug_line_str_section_index.?);
self.debug_line_str_index = try self.addSectionSymbol(gpa, ".debug_line_str", .@"1", osec);
elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_line_str_index.?).ref;
}
if (elf_file.debug_loclists_section_index == null) {
elf_file.debug_loclists_section_index = try elf_file.addSection(.{
if (self.debug_loclists_index == null) {
const osec = try elf_file.addSection(.{
.name = try elf_file.insertShString(".debug_loclists"),
.type = elf.SHT_PROGBITS,
.addralign = 1,
});
self.debug_loclists_section_dirty = true;
self.debug_loclists_index = try addSectionSymbol(self, gpa, ".debug_loclists", .@"1", elf_file.debug_loclists_section_index.?);
self.debug_loclists_index = try self.addSectionSymbol(gpa, ".debug_loclists", .@"1", osec);
elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_loclists_index.?).ref;
}
if (elf_file.debug_rnglists_section_index == null) {
elf_file.debug_rnglists_section_index = try elf_file.addSection(.{
if (self.debug_rnglists_index == null) {
const osec = try elf_file.addSection(.{
.name = try elf_file.insertShString(".debug_rnglists"),
.type = elf.SHT_PROGBITS,
.addralign = 1,
});
self.debug_rnglists_section_dirty = true;
self.debug_rnglists_index = try addSectionSymbol(self, gpa, ".debug_rnglists", .@"1", elf_file.debug_rnglists_section_index.?);
self.debug_rnglists_index = try self.addSectionSymbol(gpa, ".debug_rnglists", .@"1", osec);
elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_rnglists_index.?).ref;
}
if (elf_file.eh_frame_section_index == null) {
elf_file.eh_frame_section_index = try elf_file.addSection(.{
if (self.eh_frame_index == null) {
const osec = try elf_file.addSection(.{
.name = try elf_file.insertShString(".eh_frame"),
.type = if (elf_file.getTarget().cpu.arch == .x86_64)
elf.SHT_X86_64_UNWIND
@ -365,7 +204,8 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void {
.addralign = ptr_size,
});
self.eh_frame_section_dirty = true;
self.eh_frame_index = try addSectionSymbol(self, gpa, ".eh_frame", Atom.Alignment.fromNonzeroByteUnits(ptr_size), elf_file.eh_frame_section_index.?);
self.eh_frame_index = try self.addSectionSymbol(gpa, ".eh_frame", Atom.Alignment.fromNonzeroByteUnits(ptr_size), osec);
elf_file.sections.items(.last_atom)[osec] = self.symbol(self.eh_frame_index.?).ref;
}
try dwarf.initMetadata();
@ -404,10 +244,6 @@ pub fn deinit(self: *ZigObject, allocator: Allocator) void {
meta.exports.deinit(allocator);
}
self.uavs.deinit(allocator);
for (self.tls_variables.values()) |*tlv| {
tlv.deinit(allocator);
}
self.tls_variables.deinit(allocator);
if (self.dwarf) |*dwarf| {
@ -499,12 +335,6 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !voi
const sym = self.symbol(sym_index);
const atom_ptr = self.atom(sym.ref.index).?;
if (!atom_ptr.alive) continue;
const shndx = sym.outputShndx(elf_file).?;
const shdr = elf_file.sections.items(.shdr)[shndx];
const esym = &self.symtab.items(.elf_sym)[sym.esym_index];
esym.st_size = shdr.sh_size;
atom_ptr.size = shdr.sh_size;
atom_ptr.alignment = Atom.Alignment.fromNonzeroByteUnits(shdr.sh_addralign);
log.debug("parsing relocs in {s}", .{sym.name(elf_file)});
@ -665,14 +495,6 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !voi
}
}
}
if (elf_file.base.isRelocatable() and relocs.items.len > 0) {
const rela_sect_name = try std.fmt.allocPrintZ(gpa, ".rela{s}", .{elf_file.getShString(shdr.sh_name)});
defer gpa.free(rela_sect_name);
if (elf_file.sectionByName(rela_sect_name) == null) {
_ = try elf_file.addRelaShdr(try elf_file.insertShString(rela_sect_name), shndx);
}
}
}
self.debug_abbrev_section_dirty = false;
@ -835,7 +657,7 @@ pub fn claimUnresolved(self: *ZigObject, elf_file: *Elf) void {
}
}
pub fn claimUnresolvedObject(self: ZigObject, elf_file: *Elf) void {
pub fn claimUnresolvedRelocatable(self: ZigObject, elf_file: *Elf) void {
for (self.global_symbols.items, 0..) |index, i| {
const global = &self.symbols.items[index];
const esym = self.symtab.items(.elf_sym)[index];
@ -990,21 +812,48 @@ pub fn writeAr(self: ZigObject, writer: anytype) !void {
try writer.writeAll(self.data.items);
}
pub fn addAtomsToRelaSections(self: *ZigObject, elf_file: *Elf) !void {
pub fn initRelaSections(self: *ZigObject, elf_file: *Elf) !void {
const gpa = elf_file.base.comp.gpa;
for (self.atoms_indexes.items) |atom_index| {
const atom_ptr = self.atom(atom_index) orelse continue;
if (!atom_ptr.alive) continue;
if (atom_ptr.output_section_index == elf_file.eh_frame_section_index) continue;
const rela_shndx = atom_ptr.relocsShndx() orelse continue;
// TODO this check will become obsolete when we rework our relocs mechanism at the ZigObject level
if (self.relocs.items[rela_shndx].items.len == 0) continue;
const out_shndx = atom_ptr.output_section_index;
const out_shdr = elf_file.sections.items(.shdr)[out_shndx];
if (out_shdr.sh_type == elf.SHT_NOBITS) continue;
const out_rela_shndx = for (elf_file.sections.items(.shdr), 0..) |out_rela_shdr, out_rela_shndx| {
if (out_rela_shdr.sh_type == elf.SHT_RELA and out_rela_shdr.sh_info == out_shndx) break out_rela_shndx;
} else unreachable;
const rela_sect_name = try std.fmt.allocPrintZ(gpa, ".rela{s}", .{
elf_file.getShString(out_shdr.sh_name),
});
defer gpa.free(rela_sect_name);
_ = elf_file.sectionByName(rela_sect_name) orelse
try elf_file.addRelaShdr(try elf_file.insertShString(rela_sect_name), out_shndx);
}
}
pub fn addAtomsToRelaSections(self: *ZigObject, elf_file: *Elf) !void {
const gpa = elf_file.base.comp.gpa;
for (self.atoms_indexes.items) |atom_index| {
const atom_ptr = self.atom(atom_index) orelse continue;
if (!atom_ptr.alive) continue;
if (atom_ptr.output_section_index == elf_file.eh_frame_section_index) continue;
const rela_shndx = atom_ptr.relocsShndx() orelse continue;
// TODO this check will become obsolete when we rework our relocs mechanism at the ZigObject level
if (self.relocs.items[rela_shndx].items.len == 0) continue;
const out_shndx = atom_ptr.output_section_index;
const out_shdr = elf_file.sections.items(.shdr)[out_shndx];
if (out_shdr.sh_type == elf.SHT_NOBITS) continue;
const rela_sect_name = try std.fmt.allocPrintZ(gpa, ".rela{s}", .{
elf_file.getShString(out_shdr.sh_name),
});
defer gpa.free(rela_sect_name);
const out_rela_shndx = elf_file.sectionByName(rela_sect_name).?;
const out_rela_shdr = &elf_file.sections.items(.shdr)[out_rela_shndx];
out_rela_shdr.sh_info = out_shndx;
out_rela_shdr.sh_link = elf_file.symtab_section_index.?;
const atom_list = &elf_file.sections.items(.atom_list)[out_rela_shndx];
const gpa = elf_file.base.comp.gpa;
try atom_list.append(gpa, .{ .index = atom_index, .file = self.index });
}
}
@ -1075,15 +924,7 @@ pub fn writeSymtab(self: ZigObject, elf_file: *Elf) void {
pub fn codeAlloc(self: *ZigObject, elf_file: *Elf, atom_index: Atom.Index) ![]u8 {
const gpa = elf_file.base.comp.gpa;
const atom_ptr = self.atom(atom_index).?;
const shdr = &elf_file.sections.items(.shdr)[atom_ptr.output_section_index];
if (shdr.sh_flags & elf.SHF_TLS != 0) {
const tlv = self.tls_variables.get(atom_index).?;
const code = try gpa.dupe(u8, tlv.code);
return code;
}
const file_offset = shdr.sh_offset + @as(u64, @intCast(atom_ptr.value));
const file_offset = atom_ptr.offset(elf_file);
const size = std.math.cast(usize, atom_ptr.size) orelse return error.Overflow;
const code = try gpa.alloc(u8, size);
errdefer gpa.free(code);
@ -1168,6 +1009,20 @@ pub fn lowerUav(
return .{ .mcv = .{ .load_symbol = metadata.symbol_index } };
}
const osec = if (self.data_relro_index) |sym_index|
self.symbol(sym_index).atom(elf_file).?.output_section_index
else osec: {
const osec = try elf_file.addSection(.{
.name = try elf_file.insertShString(".data.rel.ro"),
.type = elf.SHT_PROGBITS,
.addralign = 1,
.flags = elf.SHF_ALLOC | elf.SHF_WRITE,
.offset = std.math.maxInt(u64),
});
self.data_relro_index = try self.addSectionSymbol(gpa, ".data.rel.ro", .@"1", osec);
break :osec osec;
};
var name_buf: [32]u8 = undefined;
const name = std.fmt.bufPrint(&name_buf, "__anon_{d}", .{
@intFromEnum(uav),
@ -1178,7 +1033,7 @@ pub fn lowerUav(
name,
val,
uav_alignment,
elf_file.zig_data_rel_ro_section_index.?,
osec,
src_loc,
) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
@ -1270,6 +1125,27 @@ pub fn getOrCreateMetadataForNav(
return gop.value_ptr.symbol_index;
}
// FIXME: we always create an atom to basically store size and alignment, however, this is only true for
// sections that have a single atom like the debug sections. It would be a better solution to decouple this
// concept from the atom, maybe.
fn addSectionSymbol(
self: *ZigObject,
allocator: Allocator,
name: [:0]const u8,
alignment: Atom.Alignment,
shndx: u32,
) !Symbol.Index {
const name_off = try self.addString(allocator, name);
const index = try self.newSymbolWithAtom(allocator, name_off);
const sym = self.symbol(index);
const esym = &self.symtab.items(.elf_sym)[sym.esym_index];
esym.st_info |= elf.STT_SECTION;
const atom_ptr = self.atom(sym.ref.index).?;
atom_ptr.alignment = alignment;
atom_ptr.output_section_index = shndx;
return index;
}
fn getNavShdrIndex(
self: *ZigObject,
elf_file: *Elf,
@ -1278,10 +1154,24 @@ fn getNavShdrIndex(
sym_index: Symbol.Index,
code: []const u8,
) error{OutOfMemory}!u32 {
const gpa = elf_file.base.comp.gpa;
const ptr_size = elf_file.ptrWidthBytes();
const ip = &zcu.intern_pool;
const any_non_single_threaded = elf_file.base.comp.config.any_non_single_threaded;
const nav_val = zcu.navValue(nav_index);
if (ip.isFunctionType(nav_val.typeOf(zcu).toIntern())) return elf_file.zig_text_section_index.?;
if (ip.isFunctionType(nav_val.typeOf(zcu).toIntern())) {
if (self.text_index) |symbol_index|
return self.symbol(symbol_index).atom(elf_file).?.output_section_index;
const osec = try elf_file.addSection(.{
.type = elf.SHT_PROGBITS,
.flags = elf.SHF_ALLOC | elf.SHF_EXECINSTR,
.name = try elf_file.insertShString(".text"),
.addralign = 1,
.offset = std.math.maxInt(u64),
});
self.text_index = try self.addSectionSymbol(gpa, ".text", .@"1", osec);
return osec;
}
const is_const, const is_threadlocal, const nav_init = switch (ip.indexToKey(nav_val.toIntern())) {
.variable => |variable| .{ false, variable.is_threadlocal, variable.init },
.@"extern" => |@"extern"| .{ @"extern".is_const, @"extern".is_threadlocal, .none },
@ -1292,30 +1182,107 @@ fn getNavShdrIndex(
const is_bss = !has_relocs and for (code) |byte| {
if (byte != 0) break false;
} else true;
if (is_bss) return elf_file.sectionByName(".tbss") orelse try elf_file.addSection(.{
.type = elf.SHT_NOBITS,
.flags = elf.SHF_ALLOC | elf.SHF_WRITE | elf.SHF_TLS,
.name = try elf_file.insertShString(".tbss"),
.offset = std.math.maxInt(u64),
});
return elf_file.sectionByName(".tdata") orelse try elf_file.addSection(.{
if (is_bss) {
if (self.tbss_index) |symbol_index|
return self.symbol(symbol_index).atom(elf_file).?.output_section_index;
const osec = try elf_file.addSection(.{
.name = try elf_file.insertShString(".tbss"),
.flags = elf.SHF_ALLOC | elf.SHF_WRITE | elf.SHF_TLS,
.type = elf.SHT_NOBITS,
.addralign = 1,
});
self.tbss_index = try self.addSectionSymbol(gpa, ".tbss", .@"1", osec);
return osec;
}
if (self.tdata_index) |symbol_index|
return self.symbol(symbol_index).atom(elf_file).?.output_section_index;
const osec = try elf_file.addSection(.{
.type = elf.SHT_PROGBITS,
.flags = elf.SHF_ALLOC | elf.SHF_WRITE | elf.SHF_TLS,
.name = try elf_file.insertShString(".tdata"),
.addralign = 1,
.offset = std.math.maxInt(u64),
});
self.tdata_index = try self.addSectionSymbol(gpa, ".tdata", .@"1", osec);
return osec;
}
if (is_const) {
if (self.data_relro_index) |symbol_index|
return self.symbol(symbol_index).atom(elf_file).?.output_section_index;
const osec = try elf_file.addSection(.{
.name = try elf_file.insertShString(".data.rel.ro"),
.type = elf.SHT_PROGBITS,
.addralign = 1,
.flags = elf.SHF_ALLOC | elf.SHF_WRITE,
.offset = std.math.maxInt(u64),
});
self.data_relro_index = try self.addSectionSymbol(gpa, ".data.rel.ro", .@"1", osec);
return osec;
}
if (is_const) return elf_file.zig_data_rel_ro_section_index.?;
if (nav_init != .none and Value.fromInterned(nav_init).isUndefDeep(zcu))
return switch (zcu.navFileScope(nav_index).mod.optimize_mode) {
.Debug, .ReleaseSafe => elf_file.zig_data_section_index.?,
.ReleaseFast, .ReleaseSmall => elf_file.zig_bss_section_index.?,
.Debug, .ReleaseSafe => {
if (self.data_index) |symbol_index|
return self.symbol(symbol_index).atom(elf_file).?.output_section_index;
const osec = try elf_file.addSection(.{
.name = try elf_file.insertShString(".data"),
.type = elf.SHT_PROGBITS,
.addralign = ptr_size,
.flags = elf.SHF_ALLOC | elf.SHF_WRITE,
.offset = std.math.maxInt(u64),
});
self.data_index = try self.addSectionSymbol(
gpa,
".data",
Atom.Alignment.fromNonzeroByteUnits(ptr_size),
osec,
);
return osec;
},
.ReleaseFast, .ReleaseSmall => {
if (self.bss_index) |symbol_index|
return self.symbol(symbol_index).atom(elf_file).?.output_section_index;
const osec = try elf_file.addSection(.{
.type = elf.SHT_NOBITS,
.flags = elf.SHF_ALLOC | elf.SHF_WRITE,
.name = try elf_file.insertShString(".bss"),
.addralign = 1,
});
self.bss_index = try self.addSectionSymbol(gpa, ".bss", .@"1", osec);
return osec;
},
};
const is_bss = !has_relocs and for (code) |byte| {
if (byte != 0) break false;
} else true;
if (is_bss) return elf_file.zig_bss_section_index.?;
return elf_file.zig_data_section_index.?;
if (is_bss) {
if (self.bss_index) |symbol_index|
return self.symbol(symbol_index).atom(elf_file).?.output_section_index;
const osec = try elf_file.addSection(.{
.type = elf.SHT_NOBITS,
.flags = elf.SHF_ALLOC | elf.SHF_WRITE,
.name = try elf_file.insertShString(".bss"),
.addralign = 1,
});
self.bss_index = try self.addSectionSymbol(gpa, ".bss", .@"1", osec);
return osec;
}
if (self.data_index) |symbol_index|
return self.symbol(symbol_index).atom(elf_file).?.output_section_index;
const osec = try elf_file.addSection(.{
.name = try elf_file.insertShString(".data"),
.type = elf.SHT_PROGBITS,
.addralign = ptr_size,
.flags = elf.SHF_ALLOC | elf.SHF_WRITE,
.offset = std.math.maxInt(u64),
});
self.data_index = try self.addSectionSymbol(
gpa,
".data",
Atom.Alignment.fromNonzeroByteUnits(ptr_size),
osec,
);
return osec;
}
fn updateNavCode(
@ -1362,19 +1329,18 @@ fn updateNavCode(
const capacity = atom_ptr.capacity(elf_file);
const need_realloc = code.len > capacity or !required_alignment.check(@intCast(atom_ptr.value));
if (need_realloc) {
try atom_ptr.grow(elf_file);
try self.growAtom(atom_ptr, elf_file);
log.debug("growing {} from 0x{x} to 0x{x}", .{ nav.fqn.fmt(ip), old_vaddr, atom_ptr.value });
if (old_vaddr != atom_ptr.value) {
sym.value = 0;
esym.st_value = 0;
}
} else if (code.len < old_size) {
atom_ptr.shrink(elf_file);
// TODO shrink section size
}
} else {
try atom_ptr.allocate(elf_file);
try self.allocateAtom(atom_ptr, elf_file);
errdefer self.freeNavMetadata(elf_file, sym_index);
sym.value = 0;
esym.st_value = 0;
}
@ -1404,7 +1370,7 @@ fn updateNavCode(
const shdr = elf_file.sections.items(.shdr)[shdr_index];
if (shdr.sh_type != elf.SHT_NOBITS) {
const file_offset = shdr.sh_offset + @as(u64, @intCast(atom_ptr.value));
const file_offset = atom_ptr.offset(elf_file);
try elf_file.base.file.?.pwriteAll(code, file_offset);
log.debug("writing {} from 0x{x} to 0x{x}", .{ nav.fqn.fmt(ip), file_offset, file_offset + code.len });
}
@ -1433,15 +1399,11 @@ fn updateTlv(
const atom_ptr = sym.atom(elf_file).?;
const name_offset = try self.strtab.insert(gpa, nav.fqn.toSlice(ip));
sym.value = 0;
sym.name_offset = name_offset;
atom_ptr.output_section_index = shndx;
atom_ptr.alive = true;
atom_ptr.name_offset = name_offset;
atom_ptr.output_section_index = shndx;
sym.name_offset = name_offset;
esym.st_value = 0;
esym.st_name = name_offset;
esym.st_info = elf.STT_TLS;
esym.st_size = code.len;
@ -1449,21 +1411,25 @@ fn updateTlv(
atom_ptr.alignment = required_alignment;
atom_ptr.size = code.len;
const gop = try self.tls_variables.getOrPut(gpa, atom_ptr.atom_index);
assert(!gop.found_existing); // TODO incremental updates
try self.allocateAtom(atom_ptr, elf_file);
sym.value = 0;
esym.st_value = 0;
self.navs.getPtr(nav_index).?.allocated = true;
{
const gop = try self.tls_variables.getOrPut(gpa, atom_ptr.atom_index);
assert(!gop.found_existing); // TODO incremental updates
gop.value_ptr.* = .{ .symbol_index = sym_index };
// We only store the data for the TLV if it's non-zerofill.
if (elf_file.sections.items(.shdr)[shndx].sh_type != elf.SHT_NOBITS) {
gop.value_ptr.code = try gpa.dupe(u8, code);
}
const shdr = elf_file.sections.items(.shdr)[shndx];
if (shdr.sh_type != elf.SHT_NOBITS) {
const file_offset = atom_ptr.offset(elf_file);
try elf_file.base.file.?.pwriteAll(code, file_offset);
log.debug("writing TLV {s} from 0x{x} to 0x{x}", .{
atom_ptr.name(elf_file),
file_offset,
file_offset + code.len,
});
}
const atom_list = &elf_file.sections.items(.atom_list)[atom_ptr.output_section_index];
try atom_list.append(gpa, .{ .index = atom_ptr.atom_index, .file = self.index });
}
pub fn updateFunc(
@ -1558,6 +1524,19 @@ pub fn updateFunc(
self.symbol(sym_index).name(elf_file),
});
defer gpa.free(name);
const osec = if (self.text_index) |sect_sym_index|
self.symbol(sect_sym_index).atom(elf_file).?.output_section_index
else osec: {
const osec = try elf_file.addSection(.{
.name = try elf_file.insertShString(".text"),
.flags = elf.SHF_ALLOC | elf.SHF_EXECINSTR,
.type = elf.SHT_PROGBITS,
.addralign = 1,
.offset = std.math.maxInt(u64),
});
self.text_index = try self.addSectionSymbol(gpa, ".text", .@"1", osec);
break :osec osec;
};
const name_off = try self.addString(gpa, name);
const tr_size = trampolineSize(elf_file.getTarget().cpu.arch);
const tr_sym_index = try self.newSymbolWithAtom(gpa, name_off);
@ -1569,7 +1548,7 @@ pub fn updateFunc(
tr_atom_ptr.value = old_rva;
tr_atom_ptr.alive = true;
tr_atom_ptr.alignment = old_alignment;
tr_atom_ptr.output_section_index = elf_file.zig_text_section_index.?;
tr_atom_ptr.output_section_index = osec;
tr_atom_ptr.size = tr_size;
const target_sym = self.symbol(sym_index);
target_sym.addExtra(.{ .trampoline = tr_sym_index }, elf_file);
@ -1723,8 +1702,32 @@ fn updateLazySymbol(
};
const output_section_index = switch (sym.kind) {
.code => elf_file.zig_text_section_index.?,
.const_data => elf_file.zig_data_rel_ro_section_index.?,
.code => if (self.text_index) |sym_index|
self.symbol(sym_index).atom(elf_file).?.output_section_index
else osec: {
const osec = try elf_file.addSection(.{
.name = try elf_file.insertShString(".text"),
.type = elf.SHT_PROGBITS,
.addralign = 1,
.flags = elf.SHF_ALLOC | elf.SHF_EXECINSTR,
.offset = std.math.maxInt(u64),
});
self.text_index = try self.addSectionSymbol(gpa, ".text", .@"1", osec);
break :osec osec;
},
.const_data => if (self.rodata_index) |sym_index|
self.symbol(sym_index).atom(elf_file).?.output_section_index
else osec: {
const osec = try elf_file.addSection(.{
.name = try elf_file.insertShString(".rodata"),
.type = elf.SHT_PROGBITS,
.addralign = 1,
.flags = elf.SHF_ALLOC,
.offset = std.math.maxInt(u64),
});
self.rodata_index = try self.addSectionSymbol(gpa, ".rodata", .@"1", osec);
break :osec osec;
},
};
const local_sym = self.symbol(symbol_index);
local_sym.name_offset = name_str_index;
@ -1739,15 +1742,13 @@ fn updateLazySymbol(
atom_ptr.size = code.len;
atom_ptr.output_section_index = output_section_index;
try atom_ptr.allocate(elf_file);
try self.allocateAtom(atom_ptr, elf_file);
errdefer self.freeNavMetadata(elf_file, symbol_index);
local_sym.value = 0;
local_esym.st_value = 0;
const shdr = elf_file.sections.items(.shdr)[output_section_index];
const file_offset = shdr.sh_offset + @as(u64, @intCast(atom_ptr.value));
try elf_file.base.file.?.pwriteAll(code, file_offset);
try elf_file.base.file.?.pwriteAll(code, atom_ptr.offset(elf_file));
}
const LowerConstResult = union(enum) {
@ -1797,13 +1798,10 @@ fn lowerConst(
atom_ptr.size = code.len;
atom_ptr.output_section_index = output_section_index;
try atom_ptr.allocate(elf_file);
// TODO rename and re-audit this method
try self.allocateAtom(atom_ptr, elf_file);
errdefer self.freeNavMetadata(elf_file, sym_index);
const shdr = elf_file.sections.items(.shdr)[output_section_index];
const file_offset = shdr.sh_offset + @as(u64, @intCast(atom_ptr.value));
try elf_file.base.file.?.pwriteAll(code, file_offset);
try elf_file.base.file.?.pwriteAll(code, atom_ptr.offset(elf_file));
return .{ .ok = sym_index };
}
@ -1965,8 +1963,7 @@ fn trampolineSize(cpu_arch: std.Target.Cpu.Arch) u64 {
fn writeTrampoline(tr_sym: Symbol, target: Symbol, elf_file: *Elf) !void {
const atom_ptr = tr_sym.atom(elf_file).?;
const shdr = elf_file.sections.items(.shdr)[atom_ptr.output_section_index];
const fileoff = shdr.sh_offset + @as(u64, @intCast(atom_ptr.value));
const fileoff = atom_ptr.offset(elf_file);
const source_addr = tr_sym.address(.{}, elf_file);
const target_addr = target.address(.{ .trampoline = false }, elf_file);
var buf: [max_trampoline_len]u8 = undefined;
@ -1998,6 +1995,80 @@ fn writeTrampoline(tr_sym: Symbol, target: Symbol, elf_file: *Elf) !void {
}
}
fn allocateAtom(self: *ZigObject, atom_ptr: *Atom, elf_file: *Elf) !void {
const alloc_res = try elf_file.allocateChunk(.{
.shndx = atom_ptr.output_section_index,
.size = atom_ptr.size,
.alignment = atom_ptr.alignment,
});
atom_ptr.value = @intCast(alloc_res.value);
const slice = elf_file.sections.slice();
const shdr = &slice.items(.shdr)[atom_ptr.output_section_index];
const last_atom_ref = &slice.items(.last_atom)[atom_ptr.output_section_index];
const expand_section = if (elf_file.atom(alloc_res.placement)) |placement_atom|
placement_atom.nextAtom(elf_file) == null
else
true;
if (expand_section) {
last_atom_ref.* = atom_ptr.ref();
if (self.dwarf) |_| {
// The .debug_info section has `low_pc` and `high_pc` values which is the virtual address
// range of the compilation unit. When we expand the text section, this range changes,
// so the DW_TAG.compile_unit tag of the .debug_info section becomes dirty.
self.debug_info_section_dirty = true;
// This becomes dirty for the same reason. We could potentially make this more
// fine-grained with the addition of support for more compilation units. It is planned to
// model each package as a different compilation unit.
self.debug_aranges_section_dirty = true;
self.debug_rnglists_section_dirty = true;
}
}
shdr.sh_addralign = @max(shdr.sh_addralign, atom_ptr.alignment.toByteUnits().?);
const sect_atom_ptr = for ([_]?Symbol.Index{
self.text_index,
self.rodata_index,
self.data_relro_index,
self.data_index,
self.tdata_index,
}) |maybe_sym_index| {
const sect_sym_index = maybe_sym_index orelse continue;
const sect_atom_ptr = self.symbol(sect_sym_index).atom(elf_file).?;
if (sect_atom_ptr.output_section_index == atom_ptr.output_section_index) break sect_atom_ptr;
} else null;
if (sect_atom_ptr) |sap| {
sap.size = shdr.sh_size;
sap.alignment = Atom.Alignment.fromNonzeroByteUnits(shdr.sh_addralign);
}
// This function can also reallocate an atom.
// In this case we need to "unplug" it from its previous location before
// plugging it in to its new location.
if (atom_ptr.prevAtom(elf_file)) |prev| {
prev.next_atom_ref = atom_ptr.next_atom_ref;
}
if (atom_ptr.nextAtom(elf_file)) |next| {
next.prev_atom_ref = atom_ptr.prev_atom_ref;
}
if (elf_file.atom(alloc_res.placement)) |big_atom| {
atom_ptr.prev_atom_ref = alloc_res.placement;
atom_ptr.next_atom_ref = big_atom.next_atom_ref;
big_atom.next_atom_ref = atom_ptr.ref();
} else {
atom_ptr.prev_atom_ref = .{ .index = 0, .file = 0 };
atom_ptr.next_atom_ref = .{ .index = 0, .file = 0 };
}
}
fn growAtom(self: *ZigObject, atom_ptr: *Atom, elf_file: *Elf) !void {
if (!atom_ptr.alignment.check(@intCast(atom_ptr.value)) or atom_ptr.size > atom_ptr.capacity(elf_file)) {
try self.allocateAtom(atom_ptr, elf_file);
}
}
pub fn asFile(self: *ZigObject) File {
return .{ .zig_object = self };
}
@ -2271,7 +2342,7 @@ const AtomList = std.ArrayListUnmanaged(Atom.Index);
const NavTable = std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, AvMetadata);
const UavTable = std.AutoArrayHashMapUnmanaged(InternPool.Index, AvMetadata);
const LazySymbolTable = std.AutoArrayHashMapUnmanaged(InternPool.Index, LazySymbolMetadata);
const TlsTable = std.AutoArrayHashMapUnmanaged(Atom.Index, TlsVariable);
const TlsTable = std.AutoArrayHashMapUnmanaged(Atom.Index, void);
const x86_64 = struct {
fn writeTrampolineCode(source_addr: i64, target_addr: i64, buf: *[max_trampoline_len]u8) ![]u8 {

View File

@ -233,7 +233,10 @@ pub fn calcEhFrameSize(elf_file: *Elf) !usize {
const comp = elf_file.base.comp;
const gpa = comp.gpa;
var offset: usize = 0;
var offset: usize = if (elf_file.zigObjectPtr()) |zo| blk: {
const sym = zo.symbol(zo.eh_frame_index orelse break :blk 0);
break :blk math.cast(usize, sym.atom(elf_file).?.size) orelse return error.Overflow;
} else 0;
var cies = std.ArrayList(Cie).init(gpa);
defer cies.deinit();
@ -288,6 +291,13 @@ pub fn calcEhFrameHdrSize(elf_file: *Elf) usize {
pub fn calcEhFrameRelocs(elf_file: *Elf) usize {
var count: usize = 0;
if (elf_file.zigObjectPtr()) |zo| zo: {
const sym_index = zo.eh_frame_index orelse break :zo;
const sym = zo.symbol(sym_index);
const atom_ptr = zo.atom(sym.ref.index).?;
if (!atom_ptr.alive) break :zo;
count += atom_ptr.relocs(elf_file).len;
}
for (elf_file.objects.items) |index| {
const object = elf_file.file(index).?.object;
for (object.cies.items) |cie| {
@ -386,7 +396,7 @@ pub fn writeEhFrame(elf_file: *Elf, writer: anytype) !void {
if (has_reloc_errors) return error.RelocFailure;
}
pub fn writeEhFrameObject(elf_file: *Elf, writer: anytype) !void {
pub fn writeEhFrameRelocatable(elf_file: *Elf, writer: anytype) !void {
for (elf_file.objects.items) |index| {
const object = elf_file.file(index).?.object;
@ -416,9 +426,8 @@ pub fn writeEhFrameObject(elf_file: *Elf, writer: anytype) !void {
}
}
fn emitReloc(elf_file: *Elf, rec: anytype, sym: *const Symbol, rel: elf.Elf64_Rela) elf.Elf64_Rela {
fn emitReloc(elf_file: *Elf, r_offset: u64, sym: *const Symbol, rel: elf.Elf64_Rela) elf.Elf64_Rela {
const cpu_arch = elf_file.getTarget().cpu.arch;
const r_offset = rec.address(elf_file) + rel.r_offset - rec.offset;
const r_type = rel.r_type();
var r_addend = rel.r_addend;
var r_sym: u32 = 0;
@ -452,6 +461,19 @@ pub fn writeEhFrameRelocs(elf_file: *Elf, writer: anytype) !void {
elf_file.sections.items(.shdr)[elf_file.eh_frame_section_index.?].sh_addr,
});
if (elf_file.zigObjectPtr()) |zo| zo: {
const sym_index = zo.eh_frame_index orelse break :zo;
const sym = zo.symbol(sym_index);
const atom_ptr = zo.atom(sym.ref.index).?;
if (!atom_ptr.alive) break :zo;
for (atom_ptr.relocs(elf_file)) |rel| {
const ref = zo.resolveSymbol(rel.r_sym(), elf_file);
const target = elf_file.symbol(ref).?;
const out_rel = emitReloc(elf_file, rel.r_offset, target, rel);
try writer.writeStruct(out_rel);
}
}
for (elf_file.objects.items) |index| {
const object = elf_file.file(index).?.object;
@ -460,7 +482,8 @@ pub fn writeEhFrameRelocs(elf_file: *Elf, writer: anytype) !void {
for (cie.relocs(elf_file)) |rel| {
const ref = object.resolveSymbol(rel.r_sym(), elf_file);
const sym = elf_file.symbol(ref).?;
const out_rel = emitReloc(elf_file, cie, sym, rel);
const r_offset = cie.address(elf_file) + rel.r_offset - cie.offset;
const out_rel = emitReloc(elf_file, r_offset, sym, rel);
try writer.writeStruct(out_rel);
}
}
@ -470,7 +493,8 @@ pub fn writeEhFrameRelocs(elf_file: *Elf, writer: anytype) !void {
for (fde.relocs(elf_file)) |rel| {
const ref = object.resolveSymbol(rel.r_sym(), elf_file);
const sym = elf_file.symbol(ref).?;
const out_rel = emitReloc(elf_file, fde, sym, rel);
const r_offset = fde.address(elf_file) + rel.r_offset - fde.offset;
const out_rel = emitReloc(elf_file, r_offset, sym, rel);
try writer.writeStruct(out_rel);
}
}

View File

@ -18,7 +18,7 @@ pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation, module_obj_path: ?[]co
}
for (positionals.items) |obj| {
parsePositional(elf_file, obj.path) catch |err| switch (err) {
parsePositionalStaticLib(elf_file, obj.path) catch |err| switch (err) {
error.MalformedObject,
error.MalformedArchive,
error.InvalidMachineType,
@ -38,17 +38,12 @@ pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation, module_obj_path: ?[]co
// First, we flush relocatable object file generated with our backends.
if (elf_file.zigObjectPtr()) |zig_object| {
try zig_object.resolveSymbols(elf_file);
elf_file.markEhFrameAtomsDead();
try elf_file.addCommentString();
try elf_file.finalizeMergeSections();
zig_object.claimUnresolvedObject(elf_file);
zig_object.claimUnresolvedRelocatable(elf_file);
for (elf_file.merge_sections.items) |*msec| {
if (msec.finalized_subsections.items.len == 0) continue;
try msec.initOutputSection(elf_file);
}
try elf_file.initSymtab();
try elf_file.initShStrtab();
try initSections(elf_file);
try elf_file.sortShdrs();
try zig_object.addAtomsToRelaSections(elf_file);
try elf_file.updateMergeSectionSizes();
@ -208,7 +203,6 @@ pub fn flushObject(elf_file: *Elf, comp: *Compilation, module_obj_path: ?[]const
}
for (elf_file.objects.items) |index| {
const object = elf_file.file(index).?.object;
try object.addAtomsToOutputSections(elf_file);
try object.addAtomsToRelaSections(elf_file);
}
try elf_file.updateMergeSectionSizes();
@ -230,17 +224,17 @@ pub fn flushObject(elf_file: *Elf, comp: *Compilation, module_obj_path: ?[]const
if (elf_file.base.hasErrors()) return error.FlushFailure;
}
fn parsePositional(elf_file: *Elf, path: []const u8) Elf.ParseError!void {
fn parsePositionalStaticLib(elf_file: *Elf, path: []const u8) Elf.ParseError!void {
if (try Object.isObject(path)) {
try parseObject(elf_file, path);
try parseObjectStaticLib(elf_file, path);
} else if (try Archive.isArchive(path)) {
try parseArchive(elf_file, path);
try parseArchiveStaticLib(elf_file, path);
} else return error.UnknownFileType;
// TODO: should we check for LD script?
// Actually, should we even unpack an archive?
}
fn parseObject(elf_file: *Elf, path: []const u8) Elf.ParseError!void {
fn parseObjectStaticLib(elf_file: *Elf, path: []const u8) Elf.ParseError!void {
const gpa = elf_file.base.comp.gpa;
const handle = try std.fs.cwd().openFile(path, .{});
const fh = try elf_file.addFileHandle(handle);
@ -257,7 +251,7 @@ fn parseObject(elf_file: *Elf, path: []const u8) Elf.ParseError!void {
try object.parseAr(elf_file);
}
fn parseArchive(elf_file: *Elf, path: []const u8) Elf.ParseError!void {
fn parseArchiveStaticLib(elf_file: *Elf, path: []const u8) Elf.ParseError!void {
const gpa = elf_file.base.comp.gpa;
const handle = try std.fs.cwd().openFile(path, .{});
const fh = try elf_file.addFileHandle(handle);
@ -281,14 +275,17 @@ fn parseArchive(elf_file: *Elf, path: []const u8) Elf.ParseError!void {
fn claimUnresolved(elf_file: *Elf) void {
if (elf_file.zigObjectPtr()) |zig_object| {
zig_object.claimUnresolvedObject(elf_file);
zig_object.claimUnresolvedRelocatable(elf_file);
}
for (elf_file.objects.items) |index| {
elf_file.file(index).?.object.claimUnresolvedObject(elf_file);
elf_file.file(index).?.object.claimUnresolvedRelocatable(elf_file);
}
}
fn initSections(elf_file: *Elf) !void {
if (elf_file.zigObjectPtr()) |zo| {
try zo.initRelaSections(elf_file);
}
for (elf_file.objects.items) |index| {
const object = elf_file.file(index).?.object;
try object.initOutputSections(elf_file);
@ -300,12 +297,17 @@ fn initSections(elf_file: *Elf) !void {
try msec.initOutputSection(elf_file);
}
const needs_eh_frame = for (elf_file.objects.items) |index| {
if (elf_file.file(index).?.object.cies.items.len > 0) break true;
} else false;
const needs_eh_frame = blk: {
if (elf_file.zigObjectPtr()) |zo|
if (zo.eh_frame_index != null) break :blk true;
break :blk for (elf_file.objects.items) |index| {
if (elf_file.file(index).?.object.cies.items.len > 0) break true;
} else false;
};
if (needs_eh_frame) {
if (elf_file.eh_frame_section_index == null) {
elf_file.eh_frame_section_index = try elf_file.addSection(.{
elf_file.eh_frame_section_index = elf_file.sectionByName(".eh_frame") orelse
try elf_file.addSection(.{
.name = try elf_file.insertShString(".eh_frame"),
.type = if (elf_file.getTarget().cpu.arch == .x86_64)
elf.SHT_X86_64_UNWIND
@ -316,7 +318,8 @@ fn initSections(elf_file: *Elf) !void {
.offset = std.math.maxInt(u64),
});
}
elf_file.eh_frame_rela_section_index = try elf_file.addRelaShdr(
elf_file.eh_frame_rela_section_index = elf_file.sectionByName(".rela.eh_frame") orelse
try elf_file.addRelaShdr(
try elf_file.insertShString(".rela.eh_frame"),
elf_file.eh_frame_section_index.?,
);
@ -351,36 +354,28 @@ fn initComdatGroups(elf_file: *Elf) !void {
fn updateSectionSizes(elf_file: *Elf) !void {
const slice = elf_file.sections.slice();
for (slice.items(.atom_list_2)) |*atom_list| {
if (atom_list.atoms.items.len == 0) continue;
atom_list.updateSize(elf_file);
try atom_list.allocate(elf_file);
}
for (slice.items(.shdr), 0..) |*shdr, shndx| {
const atom_list = slice.items(.atom_list)[shndx];
if (shdr.sh_type != elf.SHT_RELA) {
for (atom_list.items) |ref| {
const atom_ptr = elf_file.atom(ref) orelse continue;
if (!atom_ptr.alive) continue;
const offset = atom_ptr.alignment.forward(shdr.sh_size);
const padding = offset - shdr.sh_size;
atom_ptr.value = @intCast(offset);
shdr.sh_size += padding + atom_ptr.size;
shdr.sh_addralign = @max(shdr.sh_addralign, atom_ptr.alignment.toByteUnits() orelse 1);
}
} else {
for (atom_list.items) |ref| {
const atom_ptr = elf_file.atom(ref) orelse continue;
if (!atom_ptr.alive) continue;
const relocs = atom_ptr.relocs(elf_file);
shdr.sh_size += shdr.sh_entsize * relocs.len;
}
if (shdr.sh_size == 0) shdr.sh_offset = 0;
if (shdr.sh_type != elf.SHT_RELA) continue;
if (@as(u32, @intCast(shndx)) == elf_file.eh_frame_section_index) continue;
for (atom_list.items) |ref| {
const atom_ptr = elf_file.atom(ref) orelse continue;
if (!atom_ptr.alive) continue;
const relocs = atom_ptr.relocs(elf_file);
shdr.sh_size += shdr.sh_entsize * relocs.len;
}
if (shdr.sh_size == 0) shdr.sh_offset = 0;
}
if (elf_file.eh_frame_section_index) |index| {
slice.items(.shdr)[index].sh_size = existing_size: {
const zo = elf_file.zigObjectPtr() orelse break :existing_size 0;
const sym = zo.symbol(zo.eh_frame_index orelse break :existing_size 0);
break :existing_size sym.atom(elf_file).?.size;
} + try eh_frame.calcEhFrameSize(elf_file);
slice.items(.shdr)[index].sh_size = try eh_frame.calcEhFrameSize(elf_file);
}
if (elf_file.eh_frame_rela_section_index) |index| {
const shdr = &slice.items(.shdr)[index];
@ -405,7 +400,7 @@ fn updateComdatGroupsSizes(elf_file: *Elf) void {
/// Allocates alloc sections when merging relocatable objects files together.
fn allocateAllocSections(elf_file: *Elf) !void {
for (elf_file.sections.items(.shdr)) |*shdr| {
for (elf_file.sections.items(.shdr), 0..) |*shdr, shndx| {
if (shdr.sh_type == elf.SHT_NULL) continue;
if (shdr.sh_flags & elf.SHF_ALLOC == 0) continue;
if (shdr.sh_type == elf.SHT_NOBITS) {
@ -416,6 +411,34 @@ fn allocateAllocSections(elf_file: *Elf) !void {
if (needed_size > elf_file.allocatedSize(shdr.sh_offset)) {
shdr.sh_size = 0;
const new_offset = try elf_file.findFreeSpace(needed_size, shdr.sh_addralign);
if (elf_file.zigObjectPtr()) |zo| blk: {
const existing_size = for ([_]?Symbol.Index{
zo.text_index,
zo.rodata_index,
zo.data_relro_index,
zo.data_index,
zo.tdata_index,
zo.eh_frame_index,
}) |maybe_sym_index| {
const sect_sym_index = maybe_sym_index orelse continue;
const sect_atom_ptr = zo.symbol(sect_sym_index).atom(elf_file).?;
if (sect_atom_ptr.output_section_index == shndx) break sect_atom_ptr.size;
} else break :blk;
log.debug("moving {s} from 0x{x} to 0x{x}", .{
elf_file.getShString(shdr.sh_name),
shdr.sh_offset,
new_offset,
});
const amt = try elf_file.base.file.?.copyRangeAll(
shdr.sh_offset,
elf_file.base.file.?,
new_offset,
existing_size,
);
if (amt != existing_size) return error.InputOutput;
}
shdr.sh_offset = new_offset;
shdr.sh_size = needed_size;
}
@ -424,73 +447,15 @@ fn allocateAllocSections(elf_file: *Elf) !void {
fn writeAtoms(elf_file: *Elf) !void {
const gpa = elf_file.base.comp.gpa;
var buffer = std.ArrayList(u8).init(gpa);
defer buffer.deinit();
const slice = elf_file.sections.slice();
// TODO iterate over `output_sections` directly
for (slice.items(.shdr), slice.items(.atom_list), 0..) |shdr, atom_list, shndx| {
if (shdr.sh_type == elf.SHT_NULL) continue;
for (slice.items(.shdr), slice.items(.atom_list_2)) |shdr, atom_list| {
if (shdr.sh_type == elf.SHT_NOBITS) continue;
if (shdr.sh_type == elf.SHT_RELA) continue;
if (atom_list.items.len == 0) continue;
log.debug("writing atoms in '{s}' section", .{elf_file.getShString(shdr.sh_name)});
// TODO really, really handle debug section separately
const base_offset = if (elf_file.isDebugSection(@intCast(shndx))) blk: {
const zo = elf_file.zigObjectPtr().?;
break :blk for ([_]Symbol.Index{
zo.debug_info_index.?,
zo.debug_abbrev_index.?,
zo.debug_aranges_index.?,
zo.debug_str_index.?,
zo.debug_line_index.?,
zo.debug_line_str_index.?,
zo.debug_loclists_index.?,
zo.debug_rnglists_index.?,
}) |sym_index| {
const sym = zo.symbol(sym_index);
const atom_ptr = sym.atom(elf_file).?;
if (atom_ptr.output_section_index == shndx) break atom_ptr.size;
} else 0;
} else 0;
const sh_offset = shdr.sh_offset + base_offset;
const sh_size = math.cast(usize, shdr.sh_size - base_offset) orelse return error.Overflow;
const buffer = try gpa.alloc(u8, sh_size);
defer gpa.free(buffer);
const padding_byte: u8 = if (shdr.sh_type == elf.SHT_PROGBITS and
shdr.sh_flags & elf.SHF_EXECINSTR != 0)
0xcc // int3
else
0;
@memset(buffer, padding_byte);
for (atom_list.items) |ref| {
const atom_ptr = elf_file.atom(ref).?;
assert(atom_ptr.alive);
const offset = math.cast(usize, atom_ptr.value - @as(i64, @intCast(shdr.sh_addr - base_offset))) orelse
return error.Overflow;
const size = math.cast(usize, atom_ptr.size) orelse return error.Overflow;
log.debug("writing atom({}) from 0x{x} to 0x{x}", .{
ref,
sh_offset + offset,
sh_offset + offset + size,
});
// TODO decompress directly into provided buffer
const out_code = buffer[offset..][0..size];
const in_code = switch (atom_ptr.file(elf_file).?) {
.object => |x| try x.codeDecompressAlloc(elf_file, ref.index),
.zig_object => |x| try x.codeAlloc(elf_file, ref.index),
else => unreachable,
};
defer gpa.free(in_code);
@memcpy(out_code, in_code);
}
try elf_file.base.file.?.pwriteAll(buffer, sh_offset);
if (atom_list.atoms.items.len == 0) continue;
try atom_list.writeRelocatable(&buffer, elf_file);
}
}
@ -498,9 +463,10 @@ fn writeSyntheticSections(elf_file: *Elf) !void {
const gpa = elf_file.base.comp.gpa;
const slice = elf_file.sections.slice();
for (slice.items(.shdr), slice.items(.atom_list)) |shdr, atom_list| {
for (slice.items(.shdr), slice.items(.atom_list), 0..) |shdr, atom_list, shndx| {
if (shdr.sh_type != elf.SHT_RELA) continue;
if (atom_list.items.len == 0) continue;
if (@as(u32, @intCast(shndx)) == elf_file.eh_frame_section_index) continue;
const num_relocs = math.cast(usize, @divExact(shdr.sh_size, shdr.sh_entsize)) orelse
return error.Overflow;
@ -542,7 +508,7 @@ fn writeSyntheticSections(elf_file: *Elf) !void {
const sh_size = math.cast(usize, shdr.sh_size) orelse return error.Overflow;
var buffer = try std.ArrayList(u8).initCapacity(gpa, @intCast(sh_size - existing_size));
defer buffer.deinit();
try eh_frame.writeEhFrameObject(elf_file, buffer.writer());
try eh_frame.writeEhFrameRelocatable(elf_file, buffer.writer());
log.debug("writing .eh_frame from 0x{x} to 0x{x}", .{
shdr.sh_offset + existing_size,
shdr.sh_offset + sh_size,

View File

@ -55,6 +55,7 @@ pub fn testAll(b: *Build, build_opts: BuildOptions) *Step {
// Exercise linker in ar mode
elf_step.dependOn(testEmitStaticLib(b, .{ .target = musl_target }));
elf_step.dependOn(testEmitStaticLibZig(b, .{ .target = musl_target }));
// Exercise linker with LLVM backend
// musl tests
@ -66,6 +67,7 @@ pub fn testAll(b: *Build, build_opts: BuildOptions) *Step {
elf_step.dependOn(testEmptyObject(b, .{ .target = musl_target }));
elf_step.dependOn(testEntryPoint(b, .{ .target = musl_target }));
elf_step.dependOn(testGcSections(b, .{ .target = musl_target }));
elf_step.dependOn(testGcSectionsZig(b, .{ .target = musl_target }));
elf_step.dependOn(testImageBase(b, .{ .target = musl_target }));
elf_step.dependOn(testInitArrayOrder(b, .{ .target = musl_target }));
elf_step.dependOn(testLargeAlignmentExe(b, .{ .target = musl_target }));