mirror of
https://github.com/ziglang/zig.git
synced 2026-01-30 03:03:46 +00:00
Merge pull request #13260 from ziglang/zld-sync
macho: faster and more memory efficient linker
This commit is contained in:
commit
e67c756b91
@ -768,11 +768,15 @@ set(ZIG_STAGE2_SOURCES
|
||||
"${CMAKE_SOURCE_DIR}/src/link/MachO/Atom.zig"
|
||||
"${CMAKE_SOURCE_DIR}/src/link/MachO/CodeSignature.zig"
|
||||
"${CMAKE_SOURCE_DIR}/src/link/MachO/DebugSymbols.zig"
|
||||
"${CMAKE_SOURCE_DIR}/src/link/MachO/DwarfInfo.zig"
|
||||
"${CMAKE_SOURCE_DIR}/src/link/MachO/Dylib.zig"
|
||||
"${CMAKE_SOURCE_DIR}/src/link/MachO/Object.zig"
|
||||
"${CMAKE_SOURCE_DIR}/src/link/MachO/Trie.zig"
|
||||
"${CMAKE_SOURCE_DIR}/src/link/MachO/ZldAtom.zig"
|
||||
"${CMAKE_SOURCE_DIR}/src/link/MachO/bind.zig"
|
||||
"${CMAKE_SOURCE_DIR}/src/link/MachO/dead_strip.zig"
|
||||
"${CMAKE_SOURCE_DIR}/src/link/MachO/thunks.zig"
|
||||
"${CMAKE_SOURCE_DIR}/src/link/MachO/zld.zig"
|
||||
"${CMAKE_SOURCE_DIR}/src/link/Plan9.zig"
|
||||
"${CMAKE_SOURCE_DIR}/src/link/Plan9/aout.zig"
|
||||
"${CMAKE_SOURCE_DIR}/src/link/Wasm.zig"
|
||||
|
||||
1103
src/link/MachO.zig
1103
src/link/MachO.zig
File diff suppressed because it is too large
Load Diff
@ -165,6 +165,7 @@ fn parseTableOfContents(self: *Archive, allocator: Allocator, reader: anytype) !
|
||||
while (true) {
|
||||
const n_strx = symtab_reader.readIntLittle(u32) catch |err| switch (err) {
|
||||
error.EndOfStream => break,
|
||||
else => |e| return e,
|
||||
};
|
||||
const object_offset = try symtab_reader.readIntLittle(u32);
|
||||
|
||||
@ -183,7 +184,7 @@ fn parseTableOfContents(self: *Archive, allocator: Allocator, reader: anytype) !
|
||||
|
||||
pub fn parseObject(
|
||||
self: Archive,
|
||||
allocator: Allocator,
|
||||
gpa: Allocator,
|
||||
cpu_arch: std.Target.Cpu.Arch,
|
||||
offset: u32,
|
||||
) !Object {
|
||||
@ -198,15 +199,15 @@ pub fn parseObject(
|
||||
}
|
||||
|
||||
const name_or_length = try object_header.nameOrLength();
|
||||
const object_name = try parseName(allocator, name_or_length, reader);
|
||||
defer allocator.free(object_name);
|
||||
const object_name = try parseName(gpa, name_or_length, reader);
|
||||
defer gpa.free(object_name);
|
||||
|
||||
log.debug("extracting object '{s}' from archive '{s}'", .{ object_name, self.name });
|
||||
|
||||
const name = name: {
|
||||
var buffer: [std.fs.MAX_PATH_BYTES]u8 = undefined;
|
||||
const path = try std.os.realpath(self.name, &buffer);
|
||||
break :name try std.fmt.allocPrint(allocator, "{s}({s})", .{ path, object_name });
|
||||
break :name try std.fmt.allocPrint(gpa, "{s}({s})", .{ path, object_name });
|
||||
};
|
||||
|
||||
const object_name_len = switch (name_or_length) {
|
||||
@ -214,7 +215,7 @@ pub fn parseObject(
|
||||
.Length => |len| len,
|
||||
};
|
||||
const object_size = (try object_header.size()) - object_name_len;
|
||||
const contents = try allocator.allocWithOptions(u8, object_size, @alignOf(u64), null);
|
||||
const contents = try gpa.allocWithOptions(u8, object_size, @alignOf(u64), null);
|
||||
const amt = try reader.readAll(contents);
|
||||
if (amt != object_size) {
|
||||
return error.InputOutput;
|
||||
@ -222,11 +223,11 @@ pub fn parseObject(
|
||||
|
||||
var object = Object{
|
||||
.name = name,
|
||||
.mtime = try self.header.date(),
|
||||
.mtime = object_header.date() catch 0,
|
||||
.contents = contents,
|
||||
};
|
||||
|
||||
try object.parse(allocator, cpu_arch);
|
||||
try object.parse(gpa, cpu_arch);
|
||||
|
||||
return object;
|
||||
}
|
||||
|
||||
@ -15,8 +15,7 @@ const Allocator = mem.Allocator;
|
||||
const Arch = std.Target.Cpu.Arch;
|
||||
const Dwarf = @import("../Dwarf.zig");
|
||||
const MachO = @import("../MachO.zig");
|
||||
const Object = @import("Object.zig");
|
||||
const RelocationIncr = @import("Relocation.zig"); // temporary name until we clean up object-file relocation scanning
|
||||
const Relocation = @import("Relocation.zig");
|
||||
const SymbolWithLoc = MachO.SymbolWithLoc;
|
||||
|
||||
/// Each decl always gets a local symbol with the fully qualified name.
|
||||
@ -30,12 +29,6 @@ sym_index: u32,
|
||||
/// null means symbol defined by Zig source.
|
||||
file: ?u32,
|
||||
|
||||
/// List of symbols contained within this atom
|
||||
contained: std.ArrayListUnmanaged(SymbolAtOffset) = .{},
|
||||
|
||||
/// Code (may be non-relocated) this atom represents
|
||||
code: std.ArrayListUnmanaged(u8) = .{},
|
||||
|
||||
/// Size and alignment of this atom
|
||||
/// Unlike in Elf, we need to store the size of this symbol as part of
|
||||
/// the atom since macho.nlist_64 lacks this information.
|
||||
@ -45,21 +38,6 @@ size: u64,
|
||||
/// For instance, alignment of 0 should be read as 2^0 = 1 byte aligned.
|
||||
alignment: u32,
|
||||
|
||||
/// List of relocations belonging to this atom.
|
||||
relocs: std.ArrayListUnmanaged(Relocation) = .{},
|
||||
|
||||
/// List of offsets contained within this atom that need rebasing by the dynamic
|
||||
/// loader for example in presence of ASLR.
|
||||
rebases: std.ArrayListUnmanaged(u64) = .{},
|
||||
|
||||
/// List of offsets contained within this atom that will be dynamically bound
|
||||
/// by the dynamic loader and contain pointers to resolved (at load time) extern
|
||||
/// symbols (aka proxies aka imports).
|
||||
bindings: std.ArrayListUnmanaged(Binding) = .{},
|
||||
|
||||
/// List of lazy bindings (cf bindings above).
|
||||
lazy_bindings: std.ArrayListUnmanaged(Binding) = .{},
|
||||
|
||||
/// Points to the previous and next neighbours
|
||||
next: ?*Atom,
|
||||
prev: ?*Atom,
|
||||
@ -76,50 +54,6 @@ pub const SymbolAtOffset = struct {
|
||||
offset: u64,
|
||||
};
|
||||
|
||||
pub const Relocation = struct {
|
||||
/// Offset within the atom's code buffer.
|
||||
/// Note relocation size can be inferred by relocation's kind.
|
||||
offset: u32,
|
||||
|
||||
target: MachO.SymbolWithLoc,
|
||||
|
||||
addend: i64,
|
||||
|
||||
subtractor: ?MachO.SymbolWithLoc,
|
||||
|
||||
pcrel: bool,
|
||||
|
||||
length: u2,
|
||||
|
||||
@"type": u4,
|
||||
|
||||
pub fn getTargetAtom(self: Relocation, macho_file: *MachO) ?*Atom {
|
||||
const is_via_got = got: {
|
||||
switch (macho_file.base.options.target.cpu.arch) {
|
||||
.aarch64 => break :got switch (@intToEnum(macho.reloc_type_arm64, self.@"type")) {
|
||||
.ARM64_RELOC_GOT_LOAD_PAGE21,
|
||||
.ARM64_RELOC_GOT_LOAD_PAGEOFF12,
|
||||
.ARM64_RELOC_POINTER_TO_GOT,
|
||||
=> true,
|
||||
else => false,
|
||||
},
|
||||
.x86_64 => break :got switch (@intToEnum(macho.reloc_type_x86_64, self.@"type")) {
|
||||
.X86_64_RELOC_GOT, .X86_64_RELOC_GOT_LOAD => true,
|
||||
else => false,
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
};
|
||||
|
||||
if (is_via_got) {
|
||||
return macho_file.getGotAtomForSymbol(self.target).?; // panic means fatal error
|
||||
}
|
||||
if (macho_file.getStubsAtomForSymbol(self.target)) |stubs_atom| return stubs_atom;
|
||||
if (macho_file.getTlvPtrAtomForSymbol(self.target)) |tlv_ptr_atom| return tlv_ptr_atom;
|
||||
return macho_file.getAtomForSymbol(self.target);
|
||||
}
|
||||
};
|
||||
|
||||
pub const empty = Atom{
|
||||
.sym_index = 0,
|
||||
.file = null,
|
||||
@ -130,24 +64,6 @@ pub const empty = Atom{
|
||||
.dbg_info_atom = undefined,
|
||||
};
|
||||
|
||||
pub fn deinit(self: *Atom, allocator: Allocator) void {
|
||||
self.lazy_bindings.deinit(allocator);
|
||||
self.bindings.deinit(allocator);
|
||||
self.rebases.deinit(allocator);
|
||||
self.relocs.deinit(allocator);
|
||||
self.contained.deinit(allocator);
|
||||
self.code.deinit(allocator);
|
||||
}
|
||||
|
||||
pub fn clearRetainingCapacity(self: *Atom) void {
|
||||
self.lazy_bindings.clearRetainingCapacity();
|
||||
self.bindings.clearRetainingCapacity();
|
||||
self.rebases.clearRetainingCapacity();
|
||||
self.relocs.clearRetainingCapacity();
|
||||
self.contained.clearRetainingCapacity();
|
||||
self.code.clearRetainingCapacity();
|
||||
}
|
||||
|
||||
/// Returns symbol referencing this atom.
|
||||
pub fn getSymbol(self: Atom, macho_file: *MachO) macho.nlist_64 {
|
||||
return self.getSymbolPtr(macho_file).*;
|
||||
@ -165,17 +81,6 @@ pub fn getSymbolWithLoc(self: Atom) SymbolWithLoc {
|
||||
return .{ .sym_index = self.sym_index, .file = self.file };
|
||||
}
|
||||
|
||||
/// Returns true if the symbol pointed at with `sym_loc` is contained within this atom.
|
||||
/// WARNING this function assumes all atoms have been allocated in the virtual memory.
|
||||
/// Calling it without allocating with `MachO.allocateSymbols` (or equivalent) will
|
||||
/// give bogus results.
|
||||
pub fn isSymbolContained(self: Atom, sym_loc: SymbolWithLoc, macho_file: *MachO) bool {
|
||||
const sym = macho_file.getSymbol(sym_loc);
|
||||
if (!sym.sect()) return false;
|
||||
const self_sym = self.getSymbol(macho_file);
|
||||
return sym.n_value >= self_sym.n_value and sym.n_value < self_sym.n_value + self.size;
|
||||
}
|
||||
|
||||
/// Returns the name of this atom.
|
||||
pub fn getName(self: Atom, macho_file: *MachO) []const u8 {
|
||||
return macho_file.getSymbolName(.{
|
||||
@ -211,690 +116,7 @@ pub fn freeListEligible(self: Atom, macho_file: *MachO) bool {
|
||||
return surplus >= MachO.min_text_capacity;
|
||||
}
|
||||
|
||||
const RelocContext = struct {
|
||||
macho_file: *MachO,
|
||||
base_addr: u64 = 0,
|
||||
base_offset: i32 = 0,
|
||||
};
|
||||
|
||||
pub fn parseRelocs(self: *Atom, relocs: []align(1) const macho.relocation_info, context: RelocContext) !void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
const gpa = context.macho_file.base.allocator;
|
||||
|
||||
const arch = context.macho_file.base.options.target.cpu.arch;
|
||||
var addend: i64 = 0;
|
||||
var subtractor: ?SymbolWithLoc = null;
|
||||
|
||||
for (relocs) |rel, i| {
|
||||
blk: {
|
||||
switch (arch) {
|
||||
.aarch64 => switch (@intToEnum(macho.reloc_type_arm64, rel.r_type)) {
|
||||
.ARM64_RELOC_ADDEND => {
|
||||
assert(addend == 0);
|
||||
addend = rel.r_symbolnum;
|
||||
// Verify that it's followed by ARM64_RELOC_PAGE21 or ARM64_RELOC_PAGEOFF12.
|
||||
if (relocs.len <= i + 1) {
|
||||
log.err("no relocation after ARM64_RELOC_ADDEND", .{});
|
||||
return error.UnexpectedRelocationType;
|
||||
}
|
||||
const next = @intToEnum(macho.reloc_type_arm64, relocs[i + 1].r_type);
|
||||
switch (next) {
|
||||
.ARM64_RELOC_PAGE21, .ARM64_RELOC_PAGEOFF12 => {},
|
||||
else => {
|
||||
log.err("unexpected relocation type after ARM64_RELOC_ADDEND", .{});
|
||||
log.err(" expected ARM64_RELOC_PAGE21 or ARM64_RELOC_PAGEOFF12", .{});
|
||||
log.err(" found {s}", .{@tagName(next)});
|
||||
return error.UnexpectedRelocationType;
|
||||
},
|
||||
}
|
||||
continue;
|
||||
},
|
||||
.ARM64_RELOC_SUBTRACTOR => {},
|
||||
else => break :blk,
|
||||
},
|
||||
.x86_64 => switch (@intToEnum(macho.reloc_type_x86_64, rel.r_type)) {
|
||||
.X86_64_RELOC_SUBTRACTOR => {},
|
||||
else => break :blk,
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
|
||||
assert(subtractor == null);
|
||||
const sym_loc = MachO.SymbolWithLoc{
|
||||
.sym_index = rel.r_symbolnum,
|
||||
.file = self.file,
|
||||
};
|
||||
const sym = context.macho_file.getSymbol(sym_loc);
|
||||
if (sym.sect() and !sym.ext()) {
|
||||
subtractor = sym_loc;
|
||||
} else {
|
||||
const sym_name = context.macho_file.getSymbolName(sym_loc);
|
||||
subtractor = context.macho_file.getGlobal(sym_name).?;
|
||||
}
|
||||
// Verify that *_SUBTRACTOR is followed by *_UNSIGNED.
|
||||
if (relocs.len <= i + 1) {
|
||||
log.err("no relocation after *_RELOC_SUBTRACTOR", .{});
|
||||
return error.UnexpectedRelocationType;
|
||||
}
|
||||
switch (arch) {
|
||||
.aarch64 => switch (@intToEnum(macho.reloc_type_arm64, relocs[i + 1].r_type)) {
|
||||
.ARM64_RELOC_UNSIGNED => {},
|
||||
else => {
|
||||
log.err("unexpected relocation type after ARM64_RELOC_ADDEND", .{});
|
||||
log.err(" expected ARM64_RELOC_UNSIGNED", .{});
|
||||
log.err(" found {s}", .{
|
||||
@tagName(@intToEnum(macho.reloc_type_arm64, relocs[i + 1].r_type)),
|
||||
});
|
||||
return error.UnexpectedRelocationType;
|
||||
},
|
||||
},
|
||||
.x86_64 => switch (@intToEnum(macho.reloc_type_x86_64, relocs[i + 1].r_type)) {
|
||||
.X86_64_RELOC_UNSIGNED => {},
|
||||
else => {
|
||||
log.err("unexpected relocation type after X86_64_RELOC_ADDEND", .{});
|
||||
log.err(" expected X86_64_RELOC_UNSIGNED", .{});
|
||||
log.err(" found {s}", .{
|
||||
@tagName(@intToEnum(macho.reloc_type_x86_64, relocs[i + 1].r_type)),
|
||||
});
|
||||
return error.UnexpectedRelocationType;
|
||||
},
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
const object = &context.macho_file.objects.items[self.file.?];
|
||||
const target = target: {
|
||||
if (rel.r_extern == 0) {
|
||||
const sect_id = @intCast(u16, rel.r_symbolnum - 1);
|
||||
const sym_index = object.sections_as_symbols.get(sect_id) orelse blk: {
|
||||
const sect = object.getSourceSection(sect_id);
|
||||
const out_sect_id = (try context.macho_file.getOutputSection(sect)) orelse
|
||||
unreachable;
|
||||
const sym_index = @intCast(u32, object.symtab.items.len);
|
||||
try object.symtab.append(gpa, .{
|
||||
.n_strx = 0,
|
||||
.n_type = macho.N_SECT,
|
||||
.n_sect = out_sect_id + 1,
|
||||
.n_desc = 0,
|
||||
.n_value = sect.addr,
|
||||
});
|
||||
try object.sections_as_symbols.putNoClobber(gpa, sect_id, sym_index);
|
||||
break :blk sym_index;
|
||||
};
|
||||
break :target MachO.SymbolWithLoc{ .sym_index = sym_index, .file = self.file };
|
||||
}
|
||||
|
||||
const sym_loc = MachO.SymbolWithLoc{
|
||||
.sym_index = rel.r_symbolnum,
|
||||
.file = self.file,
|
||||
};
|
||||
const sym = context.macho_file.getSymbol(sym_loc);
|
||||
|
||||
if (sym.sect() and !sym.ext()) {
|
||||
break :target sym_loc;
|
||||
} else {
|
||||
const sym_name = context.macho_file.getSymbolName(sym_loc);
|
||||
break :target context.macho_file.getGlobal(sym_name).?;
|
||||
}
|
||||
};
|
||||
const offset = @intCast(u32, rel.r_address - context.base_offset);
|
||||
|
||||
switch (arch) {
|
||||
.aarch64 => {
|
||||
switch (@intToEnum(macho.reloc_type_arm64, rel.r_type)) {
|
||||
.ARM64_RELOC_BRANCH26 => {
|
||||
// TODO rewrite relocation
|
||||
try addStub(target, context);
|
||||
},
|
||||
.ARM64_RELOC_GOT_LOAD_PAGE21,
|
||||
.ARM64_RELOC_GOT_LOAD_PAGEOFF12,
|
||||
.ARM64_RELOC_POINTER_TO_GOT,
|
||||
=> {
|
||||
// TODO rewrite relocation
|
||||
try addGotEntry(target, context);
|
||||
},
|
||||
.ARM64_RELOC_UNSIGNED => {
|
||||
addend = if (rel.r_length == 3)
|
||||
mem.readIntLittle(i64, self.code.items[offset..][0..8])
|
||||
else
|
||||
mem.readIntLittle(i32, self.code.items[offset..][0..4]);
|
||||
if (rel.r_extern == 0) {
|
||||
const target_sect_base_addr = object.getSourceSection(@intCast(u16, rel.r_symbolnum - 1)).addr;
|
||||
addend -= @intCast(i64, target_sect_base_addr);
|
||||
}
|
||||
try self.addPtrBindingOrRebase(rel, target, context);
|
||||
},
|
||||
.ARM64_RELOC_TLVP_LOAD_PAGE21,
|
||||
.ARM64_RELOC_TLVP_LOAD_PAGEOFF12,
|
||||
=> {
|
||||
try addTlvPtrEntry(target, context);
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
},
|
||||
.x86_64 => {
|
||||
const rel_type = @intToEnum(macho.reloc_type_x86_64, rel.r_type);
|
||||
switch (rel_type) {
|
||||
.X86_64_RELOC_BRANCH => {
|
||||
// TODO rewrite relocation
|
||||
try addStub(target, context);
|
||||
addend = mem.readIntLittle(i32, self.code.items[offset..][0..4]);
|
||||
},
|
||||
.X86_64_RELOC_GOT, .X86_64_RELOC_GOT_LOAD => {
|
||||
// TODO rewrite relocation
|
||||
try addGotEntry(target, context);
|
||||
addend = mem.readIntLittle(i32, self.code.items[offset..][0..4]);
|
||||
},
|
||||
.X86_64_RELOC_UNSIGNED => {
|
||||
addend = if (rel.r_length == 3)
|
||||
mem.readIntLittle(i64, self.code.items[offset..][0..8])
|
||||
else
|
||||
mem.readIntLittle(i32, self.code.items[offset..][0..4]);
|
||||
if (rel.r_extern == 0) {
|
||||
const target_sect_base_addr = object.getSourceSection(@intCast(u16, rel.r_symbolnum - 1)).addr;
|
||||
addend -= @intCast(i64, target_sect_base_addr);
|
||||
}
|
||||
try self.addPtrBindingOrRebase(rel, target, context);
|
||||
},
|
||||
.X86_64_RELOC_SIGNED,
|
||||
.X86_64_RELOC_SIGNED_1,
|
||||
.X86_64_RELOC_SIGNED_2,
|
||||
.X86_64_RELOC_SIGNED_4,
|
||||
=> {
|
||||
const correction: u3 = switch (rel_type) {
|
||||
.X86_64_RELOC_SIGNED => 0,
|
||||
.X86_64_RELOC_SIGNED_1 => 1,
|
||||
.X86_64_RELOC_SIGNED_2 => 2,
|
||||
.X86_64_RELOC_SIGNED_4 => 4,
|
||||
else => unreachable,
|
||||
};
|
||||
addend = mem.readIntLittle(i32, self.code.items[offset..][0..4]) + correction;
|
||||
if (rel.r_extern == 0) {
|
||||
// Note for the future self: when r_extern == 0, we should subtract correction from the
|
||||
// addend.
|
||||
const target_sect_base_addr = object.getSourceSection(@intCast(u16, rel.r_symbolnum - 1)).addr;
|
||||
// We need to add base_offset, i.e., offset of this atom wrt to the source
|
||||
// section. Otherwise, the addend will over-/under-shoot.
|
||||
addend += @intCast(i64, context.base_addr + offset + 4) -
|
||||
@intCast(i64, target_sect_base_addr) + context.base_offset;
|
||||
}
|
||||
},
|
||||
.X86_64_RELOC_TLV => {
|
||||
try addTlvPtrEntry(target, context);
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
|
||||
try self.relocs.append(gpa, .{
|
||||
.offset = offset,
|
||||
.target = target,
|
||||
.addend = addend,
|
||||
.subtractor = subtractor,
|
||||
.pcrel = rel.r_pcrel == 1,
|
||||
.length = rel.r_length,
|
||||
.@"type" = rel.r_type,
|
||||
});
|
||||
|
||||
addend = 0;
|
||||
subtractor = null;
|
||||
}
|
||||
}
|
||||
|
||||
fn addPtrBindingOrRebase(
|
||||
self: *Atom,
|
||||
rel: macho.relocation_info,
|
||||
target: MachO.SymbolWithLoc,
|
||||
context: RelocContext,
|
||||
) !void {
|
||||
const gpa = context.macho_file.base.allocator;
|
||||
const sym = context.macho_file.getSymbol(target);
|
||||
if (sym.undf()) {
|
||||
try self.bindings.append(gpa, .{
|
||||
.target = target,
|
||||
.offset = @intCast(u32, rel.r_address - context.base_offset),
|
||||
});
|
||||
} else {
|
||||
const source_sym = self.getSymbol(context.macho_file);
|
||||
const section = context.macho_file.sections.get(source_sym.n_sect - 1);
|
||||
const header = section.header;
|
||||
const segment_index = section.segment_index;
|
||||
const sect_type = header.@"type"();
|
||||
|
||||
const should_rebase = rebase: {
|
||||
if (rel.r_length != 3) break :rebase false;
|
||||
|
||||
// TODO actually, a check similar to what dyld is doing, that is, verifying
|
||||
// that the segment is writable should be enough here.
|
||||
const is_right_segment = blk: {
|
||||
if (context.macho_file.data_segment_cmd_index) |idx| {
|
||||
if (segment_index == idx) {
|
||||
break :blk true;
|
||||
}
|
||||
}
|
||||
if (context.macho_file.data_const_segment_cmd_index) |idx| {
|
||||
if (segment_index == idx) {
|
||||
break :blk true;
|
||||
}
|
||||
}
|
||||
break :blk false;
|
||||
};
|
||||
|
||||
if (!is_right_segment) break :rebase false;
|
||||
if (sect_type != macho.S_LITERAL_POINTERS and
|
||||
sect_type != macho.S_REGULAR and
|
||||
sect_type != macho.S_MOD_INIT_FUNC_POINTERS and
|
||||
sect_type != macho.S_MOD_TERM_FUNC_POINTERS)
|
||||
{
|
||||
break :rebase false;
|
||||
}
|
||||
|
||||
break :rebase true;
|
||||
};
|
||||
|
||||
if (should_rebase) {
|
||||
try self.rebases.append(gpa, @intCast(u32, rel.r_address - context.base_offset));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn addTlvPtrEntry(target: MachO.SymbolWithLoc, context: RelocContext) !void {
|
||||
const target_sym = context.macho_file.getSymbol(target);
|
||||
if (!target_sym.undf()) return;
|
||||
if (context.macho_file.tlv_ptr_entries_table.contains(target)) return;
|
||||
|
||||
const index = try context.macho_file.allocateTlvPtrEntry(target);
|
||||
const atom = try context.macho_file.createTlvPtrAtom(target);
|
||||
context.macho_file.tlv_ptr_entries.items[index].sym_index = atom.sym_index;
|
||||
}
|
||||
|
||||
fn addGotEntry(target: MachO.SymbolWithLoc, context: RelocContext) !void {
|
||||
if (context.macho_file.got_entries_table.contains(target)) return;
|
||||
|
||||
const index = try context.macho_file.allocateGotEntry(target);
|
||||
const atom = try context.macho_file.createGotAtom(target);
|
||||
context.macho_file.got_entries.items[index].sym_index = atom.sym_index;
|
||||
}
|
||||
|
||||
fn addStub(target: MachO.SymbolWithLoc, context: RelocContext) !void {
|
||||
const target_sym = context.macho_file.getSymbol(target);
|
||||
if (!target_sym.undf()) return;
|
||||
if (context.macho_file.stubs_table.contains(target)) return;
|
||||
|
||||
const stub_index = try context.macho_file.allocateStubEntry(target);
|
||||
|
||||
const stub_helper_atom = try context.macho_file.createStubHelperAtom();
|
||||
const laptr_atom = try context.macho_file.createLazyPointerAtom(stub_helper_atom.sym_index, target);
|
||||
const stub_atom = try context.macho_file.createStubAtom(laptr_atom.sym_index);
|
||||
|
||||
context.macho_file.stubs.items[stub_index].sym_index = stub_atom.sym_index;
|
||||
}
|
||||
|
||||
pub fn resolveRelocs(self: *Atom, macho_file: *MachO) !void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
log.debug("ATOM(%{d}, '{s}')", .{ self.sym_index, self.getName(macho_file) });
|
||||
|
||||
for (self.relocs.items) |rel| {
|
||||
const arch = macho_file.base.options.target.cpu.arch;
|
||||
switch (arch) {
|
||||
.aarch64 => {
|
||||
log.debug(" RELA({s}) @ {x} => %{d} in object({?d})", .{
|
||||
@tagName(@intToEnum(macho.reloc_type_arm64, rel.@"type")),
|
||||
rel.offset,
|
||||
rel.target.sym_index,
|
||||
rel.target.file,
|
||||
});
|
||||
},
|
||||
.x86_64 => {
|
||||
log.debug(" RELA({s}) @ {x} => %{d} in object({?d})", .{
|
||||
@tagName(@intToEnum(macho.reloc_type_x86_64, rel.@"type")),
|
||||
rel.offset,
|
||||
rel.target.sym_index,
|
||||
rel.target.file,
|
||||
});
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
|
||||
const source_addr = blk: {
|
||||
const source_sym = self.getSymbol(macho_file);
|
||||
break :blk source_sym.n_value + rel.offset;
|
||||
};
|
||||
const is_tlv = is_tlv: {
|
||||
const source_sym = self.getSymbol(macho_file);
|
||||
const header = macho_file.sections.items(.header)[source_sym.n_sect - 1];
|
||||
break :is_tlv header.@"type"() == macho.S_THREAD_LOCAL_VARIABLES;
|
||||
};
|
||||
const target_addr = blk: {
|
||||
const target_atom = rel.getTargetAtom(macho_file) orelse {
|
||||
// If there is no atom for target, we still need to check for special, atom-less
|
||||
// symbols such as `___dso_handle`.
|
||||
const target_name = macho_file.getSymbolName(rel.target);
|
||||
assert(macho_file.getGlobal(target_name) != null);
|
||||
const atomless_sym = macho_file.getSymbol(rel.target);
|
||||
log.debug(" | atomless target '{s}'", .{target_name});
|
||||
break :blk atomless_sym.n_value;
|
||||
};
|
||||
log.debug(" | target ATOM(%{d}, '{s}') in object({?d})", .{
|
||||
target_atom.sym_index,
|
||||
target_atom.getName(macho_file),
|
||||
target_atom.file,
|
||||
});
|
||||
// If `rel.target` is contained within the target atom, pull its address value.
|
||||
const target_sym = if (target_atom.isSymbolContained(rel.target, macho_file))
|
||||
macho_file.getSymbol(rel.target)
|
||||
else
|
||||
target_atom.getSymbol(macho_file);
|
||||
assert(target_sym.n_desc != MachO.N_DESC_GCED);
|
||||
const base_address: u64 = if (is_tlv) base_address: {
|
||||
// For TLV relocations, the value specified as a relocation is the displacement from the
|
||||
// TLV initializer (either value in __thread_data or zero-init in __thread_bss) to the first
|
||||
// defined TLV template init section in the following order:
|
||||
// * wrt to __thread_data if defined, then
|
||||
// * wrt to __thread_bss
|
||||
const sect_id: u16 = sect_id: {
|
||||
if (macho_file.getSectionByName("__DATA", "__thread_data")) |i| {
|
||||
break :sect_id i;
|
||||
} else if (macho_file.getSectionByName("__DATA", "__thread_bss")) |i| {
|
||||
break :sect_id i;
|
||||
} else {
|
||||
log.err("threadlocal variables present but no initializer sections found", .{});
|
||||
log.err(" __thread_data not found", .{});
|
||||
log.err(" __thread_bss not found", .{});
|
||||
return error.FailedToResolveRelocationTarget;
|
||||
}
|
||||
};
|
||||
break :base_address macho_file.sections.items(.header)[sect_id].addr;
|
||||
} else 0;
|
||||
break :blk target_sym.n_value - base_address;
|
||||
};
|
||||
|
||||
log.debug(" | source_addr = 0x{x}", .{source_addr});
|
||||
|
||||
switch (arch) {
|
||||
.aarch64 => {
|
||||
switch (@intToEnum(macho.reloc_type_arm64, rel.@"type")) {
|
||||
.ARM64_RELOC_BRANCH26 => {
|
||||
log.debug(" | target_addr = 0x{x}", .{target_addr});
|
||||
const displacement = math.cast(
|
||||
i28,
|
||||
@intCast(i64, target_addr) - @intCast(i64, source_addr),
|
||||
) orelse {
|
||||
log.err("jump too big to encode as i28 displacement value", .{});
|
||||
log.err(" (target - source) = displacement => 0x{x} - 0x{x} = 0x{x}", .{
|
||||
target_addr,
|
||||
source_addr,
|
||||
@intCast(i64, target_addr) - @intCast(i64, source_addr),
|
||||
});
|
||||
log.err(" TODO implement branch islands to extend jump distance for arm64", .{});
|
||||
return error.TODOImplementBranchIslands;
|
||||
};
|
||||
const code = self.code.items[rel.offset..][0..4];
|
||||
var inst = aarch64.Instruction{
|
||||
.unconditional_branch_immediate = mem.bytesToValue(meta.TagPayload(
|
||||
aarch64.Instruction,
|
||||
aarch64.Instruction.unconditional_branch_immediate,
|
||||
), code),
|
||||
};
|
||||
inst.unconditional_branch_immediate.imm26 = @truncate(u26, @bitCast(u28, displacement >> 2));
|
||||
mem.writeIntLittle(u32, code, inst.toU32());
|
||||
},
|
||||
.ARM64_RELOC_PAGE21,
|
||||
.ARM64_RELOC_GOT_LOAD_PAGE21,
|
||||
.ARM64_RELOC_TLVP_LOAD_PAGE21,
|
||||
=> {
|
||||
const actual_target_addr = @intCast(i64, target_addr) + rel.addend;
|
||||
log.debug(" | target_addr = 0x{x}", .{actual_target_addr});
|
||||
const source_page = @intCast(i32, source_addr >> 12);
|
||||
const target_page = @intCast(i32, actual_target_addr >> 12);
|
||||
const pages = @bitCast(u21, @intCast(i21, target_page - source_page));
|
||||
const code = self.code.items[rel.offset..][0..4];
|
||||
var inst = aarch64.Instruction{
|
||||
.pc_relative_address = mem.bytesToValue(meta.TagPayload(
|
||||
aarch64.Instruction,
|
||||
aarch64.Instruction.pc_relative_address,
|
||||
), code),
|
||||
};
|
||||
inst.pc_relative_address.immhi = @truncate(u19, pages >> 2);
|
||||
inst.pc_relative_address.immlo = @truncate(u2, pages);
|
||||
mem.writeIntLittle(u32, code, inst.toU32());
|
||||
},
|
||||
.ARM64_RELOC_PAGEOFF12 => {
|
||||
const code = self.code.items[rel.offset..][0..4];
|
||||
const actual_target_addr = @intCast(i64, target_addr) + rel.addend;
|
||||
log.debug(" | target_addr = 0x{x}", .{actual_target_addr});
|
||||
const narrowed = @truncate(u12, @intCast(u64, actual_target_addr));
|
||||
if (isArithmeticOp(self.code.items[rel.offset..][0..4])) {
|
||||
var inst = aarch64.Instruction{
|
||||
.add_subtract_immediate = mem.bytesToValue(meta.TagPayload(
|
||||
aarch64.Instruction,
|
||||
aarch64.Instruction.add_subtract_immediate,
|
||||
), code),
|
||||
};
|
||||
inst.add_subtract_immediate.imm12 = narrowed;
|
||||
mem.writeIntLittle(u32, code, inst.toU32());
|
||||
} else {
|
||||
var inst = aarch64.Instruction{
|
||||
.load_store_register = mem.bytesToValue(meta.TagPayload(
|
||||
aarch64.Instruction,
|
||||
aarch64.Instruction.load_store_register,
|
||||
), code),
|
||||
};
|
||||
const offset: u12 = blk: {
|
||||
if (inst.load_store_register.size == 0) {
|
||||
if (inst.load_store_register.v == 1) {
|
||||
// 128-bit SIMD is scaled by 16.
|
||||
break :blk try math.divExact(u12, narrowed, 16);
|
||||
}
|
||||
// Otherwise, 8-bit SIMD or ldrb.
|
||||
break :blk narrowed;
|
||||
} else {
|
||||
const denom: u4 = try math.powi(u4, 2, inst.load_store_register.size);
|
||||
break :blk try math.divExact(u12, narrowed, denom);
|
||||
}
|
||||
};
|
||||
inst.load_store_register.offset = offset;
|
||||
mem.writeIntLittle(u32, code, inst.toU32());
|
||||
}
|
||||
},
|
||||
.ARM64_RELOC_GOT_LOAD_PAGEOFF12 => {
|
||||
const code = self.code.items[rel.offset..][0..4];
|
||||
const actual_target_addr = @intCast(i64, target_addr) + rel.addend;
|
||||
log.debug(" | target_addr = 0x{x}", .{actual_target_addr});
|
||||
const narrowed = @truncate(u12, @intCast(u64, actual_target_addr));
|
||||
var inst: aarch64.Instruction = .{
|
||||
.load_store_register = mem.bytesToValue(meta.TagPayload(
|
||||
aarch64.Instruction,
|
||||
aarch64.Instruction.load_store_register,
|
||||
), code),
|
||||
};
|
||||
const offset = try math.divExact(u12, narrowed, 8);
|
||||
inst.load_store_register.offset = offset;
|
||||
mem.writeIntLittle(u32, code, inst.toU32());
|
||||
},
|
||||
.ARM64_RELOC_TLVP_LOAD_PAGEOFF12 => {
|
||||
const code = self.code.items[rel.offset..][0..4];
|
||||
const actual_target_addr = @intCast(i64, target_addr) + rel.addend;
|
||||
log.debug(" | target_addr = 0x{x}", .{actual_target_addr});
|
||||
|
||||
const RegInfo = struct {
|
||||
rd: u5,
|
||||
rn: u5,
|
||||
size: u2,
|
||||
};
|
||||
const reg_info: RegInfo = blk: {
|
||||
if (isArithmeticOp(code)) {
|
||||
const inst = mem.bytesToValue(meta.TagPayload(
|
||||
aarch64.Instruction,
|
||||
aarch64.Instruction.add_subtract_immediate,
|
||||
), code);
|
||||
break :blk .{
|
||||
.rd = inst.rd,
|
||||
.rn = inst.rn,
|
||||
.size = inst.sf,
|
||||
};
|
||||
} else {
|
||||
const inst = mem.bytesToValue(meta.TagPayload(
|
||||
aarch64.Instruction,
|
||||
aarch64.Instruction.load_store_register,
|
||||
), code);
|
||||
break :blk .{
|
||||
.rd = inst.rt,
|
||||
.rn = inst.rn,
|
||||
.size = inst.size,
|
||||
};
|
||||
}
|
||||
};
|
||||
const narrowed = @truncate(u12, @intCast(u64, actual_target_addr));
|
||||
var inst = if (macho_file.tlv_ptr_entries_table.contains(rel.target)) blk: {
|
||||
const offset = try math.divExact(u12, narrowed, 8);
|
||||
break :blk aarch64.Instruction{
|
||||
.load_store_register = .{
|
||||
.rt = reg_info.rd,
|
||||
.rn = reg_info.rn,
|
||||
.offset = offset,
|
||||
.opc = 0b01,
|
||||
.op1 = 0b01,
|
||||
.v = 0,
|
||||
.size = reg_info.size,
|
||||
},
|
||||
};
|
||||
} else aarch64.Instruction{
|
||||
.add_subtract_immediate = .{
|
||||
.rd = reg_info.rd,
|
||||
.rn = reg_info.rn,
|
||||
.imm12 = narrowed,
|
||||
.sh = 0,
|
||||
.s = 0,
|
||||
.op = 0,
|
||||
.sf = @truncate(u1, reg_info.size),
|
||||
},
|
||||
};
|
||||
mem.writeIntLittle(u32, code, inst.toU32());
|
||||
},
|
||||
.ARM64_RELOC_POINTER_TO_GOT => {
|
||||
log.debug(" | target_addr = 0x{x}", .{target_addr});
|
||||
const result = math.cast(i32, @intCast(i64, target_addr) - @intCast(i64, source_addr)) orelse return error.Overflow;
|
||||
mem.writeIntLittle(u32, self.code.items[rel.offset..][0..4], @bitCast(u32, result));
|
||||
},
|
||||
.ARM64_RELOC_UNSIGNED => {
|
||||
const result = blk: {
|
||||
if (rel.subtractor) |subtractor| {
|
||||
const sym = macho_file.getSymbol(subtractor);
|
||||
break :blk @intCast(i64, target_addr) - @intCast(i64, sym.n_value) + rel.addend;
|
||||
} else {
|
||||
break :blk @intCast(i64, target_addr) + rel.addend;
|
||||
}
|
||||
};
|
||||
log.debug(" | target_addr = 0x{x}", .{result});
|
||||
|
||||
if (rel.length == 3) {
|
||||
mem.writeIntLittle(u64, self.code.items[rel.offset..][0..8], @bitCast(u64, result));
|
||||
} else {
|
||||
mem.writeIntLittle(
|
||||
u32,
|
||||
self.code.items[rel.offset..][0..4],
|
||||
@truncate(u32, @bitCast(u64, result)),
|
||||
);
|
||||
}
|
||||
},
|
||||
.ARM64_RELOC_SUBTRACTOR => unreachable,
|
||||
.ARM64_RELOC_ADDEND => unreachable,
|
||||
}
|
||||
},
|
||||
.x86_64 => {
|
||||
switch (@intToEnum(macho.reloc_type_x86_64, rel.@"type")) {
|
||||
.X86_64_RELOC_BRANCH => {
|
||||
log.debug(" | target_addr = 0x{x}", .{target_addr});
|
||||
const displacement = math.cast(
|
||||
i32,
|
||||
@intCast(i64, target_addr) - @intCast(i64, source_addr) - 4 + rel.addend,
|
||||
) orelse return error.Overflow;
|
||||
mem.writeIntLittle(u32, self.code.items[rel.offset..][0..4], @bitCast(u32, displacement));
|
||||
},
|
||||
.X86_64_RELOC_GOT, .X86_64_RELOC_GOT_LOAD => {
|
||||
log.debug(" | target_addr = 0x{x}", .{target_addr});
|
||||
const displacement = math.cast(
|
||||
i32,
|
||||
@intCast(i64, target_addr) - @intCast(i64, source_addr) - 4 + rel.addend,
|
||||
) orelse return error.Overflow;
|
||||
mem.writeIntLittle(u32, self.code.items[rel.offset..][0..4], @bitCast(u32, displacement));
|
||||
},
|
||||
.X86_64_RELOC_TLV => {
|
||||
log.debug(" | target_addr = 0x{x}", .{target_addr});
|
||||
if (!macho_file.tlv_ptr_entries_table.contains(rel.target)) {
|
||||
// We need to rewrite the opcode from movq to leaq.
|
||||
self.code.items[rel.offset - 2] = 0x8d;
|
||||
}
|
||||
const displacement = math.cast(
|
||||
i32,
|
||||
@intCast(i64, target_addr) - @intCast(i64, source_addr) - 4 + rel.addend,
|
||||
) orelse return error.Overflow;
|
||||
mem.writeIntLittle(u32, self.code.items[rel.offset..][0..4], @bitCast(u32, displacement));
|
||||
},
|
||||
.X86_64_RELOC_SIGNED,
|
||||
.X86_64_RELOC_SIGNED_1,
|
||||
.X86_64_RELOC_SIGNED_2,
|
||||
.X86_64_RELOC_SIGNED_4,
|
||||
=> {
|
||||
const correction: u3 = switch (@intToEnum(macho.reloc_type_x86_64, rel.@"type")) {
|
||||
.X86_64_RELOC_SIGNED => 0,
|
||||
.X86_64_RELOC_SIGNED_1 => 1,
|
||||
.X86_64_RELOC_SIGNED_2 => 2,
|
||||
.X86_64_RELOC_SIGNED_4 => 4,
|
||||
else => unreachable,
|
||||
};
|
||||
const actual_target_addr = @intCast(i64, target_addr) + rel.addend;
|
||||
log.debug(" | target_addr = 0x{x}", .{actual_target_addr});
|
||||
const displacement = math.cast(
|
||||
i32,
|
||||
actual_target_addr - @intCast(i64, source_addr + correction + 4),
|
||||
) orelse return error.Overflow;
|
||||
mem.writeIntLittle(u32, self.code.items[rel.offset..][0..4], @bitCast(u32, displacement));
|
||||
},
|
||||
.X86_64_RELOC_UNSIGNED => {
|
||||
const result = blk: {
|
||||
if (rel.subtractor) |subtractor| {
|
||||
const sym = macho_file.getSymbol(subtractor);
|
||||
break :blk @intCast(i64, target_addr) - @intCast(i64, sym.n_value) + rel.addend;
|
||||
} else {
|
||||
break :blk @intCast(i64, target_addr) + rel.addend;
|
||||
}
|
||||
};
|
||||
log.debug(" | target_addr = 0x{x}", .{result});
|
||||
|
||||
if (rel.length == 3) {
|
||||
mem.writeIntLittle(u64, self.code.items[rel.offset..][0..8], @bitCast(u64, result));
|
||||
} else {
|
||||
mem.writeIntLittle(
|
||||
u32,
|
||||
self.code.items[rel.offset..][0..4],
|
||||
@truncate(u32, @bitCast(u64, result)),
|
||||
);
|
||||
}
|
||||
},
|
||||
.X86_64_RELOC_SUBTRACTOR => unreachable,
|
||||
}
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
inline fn isArithmeticOp(inst: *const [4]u8) bool {
|
||||
const group_decode = @truncate(u5, inst[3]);
|
||||
return ((group_decode >> 2) == 4);
|
||||
}
|
||||
|
||||
pub fn addRelocation(self: *Atom, macho_file: *MachO, reloc: RelocationIncr) !void {
|
||||
pub fn addRelocation(self: *Atom, macho_file: *MachO, reloc: Relocation) !void {
|
||||
return self.addRelocations(macho_file, 1, .{reloc});
|
||||
}
|
||||
|
||||
@ -902,7 +124,7 @@ pub fn addRelocations(
|
||||
self: *Atom,
|
||||
macho_file: *MachO,
|
||||
comptime count: comptime_int,
|
||||
relocs: [count]RelocationIncr,
|
||||
relocs: [count]Relocation,
|
||||
) !void {
|
||||
const gpa = macho_file.base.allocator;
|
||||
const target = macho_file.base.options.target;
|
||||
|
||||
@ -477,7 +477,6 @@ fn writeSymtab(self: *DebugSymbols, lc: *macho.symtab_command) !void {
|
||||
|
||||
for (self.base.locals.items) |sym, sym_id| {
|
||||
if (sym.n_strx == 0) continue; // no name, skip
|
||||
if (sym.n_desc == MachO.N_DESC_GCED) continue; // GCed, skip
|
||||
const sym_loc = MachO.SymbolWithLoc{ .sym_index = @intCast(u32, sym_id), .file = null };
|
||||
if (self.base.symbolIsTemp(sym_loc)) continue; // local temp symbol, skip
|
||||
if (self.base.getGlobal(self.base.getSymbolName(sym_loc)) != null) continue; // global symbol is either an export or import, skip
|
||||
@ -492,7 +491,6 @@ fn writeSymtab(self: *DebugSymbols, lc: *macho.symtab_command) !void {
|
||||
for (self.base.globals.items) |global| {
|
||||
const sym = self.base.getSymbol(global);
|
||||
if (sym.undf()) continue; // import, skip
|
||||
if (sym.n_desc == MachO.N_DESC_GCED) continue; // GCed, skip
|
||||
var out_sym = sym;
|
||||
out_sym.n_strx = try self.strtab.insert(gpa, self.base.getSymbolName(global));
|
||||
try exports.append(out_sym);
|
||||
|
||||
467
src/link/MachO/DwarfInfo.zig
Normal file
467
src/link/MachO/DwarfInfo.zig
Normal file
@ -0,0 +1,467 @@
|
||||
const DwarfInfo = @This();
|
||||
|
||||
const std = @import("std");
|
||||
const assert = std.debug.assert;
|
||||
const dwarf = std.dwarf;
|
||||
const leb = std.leb;
|
||||
const log = std.log.scoped(.macho);
|
||||
const math = std.math;
|
||||
const mem = std.mem;
|
||||
|
||||
const Allocator = mem.Allocator;
|
||||
pub const AbbrevLookupTable = std.AutoHashMap(u64, struct { pos: usize, len: usize });
|
||||
pub const SubprogramLookupByName = std.StringHashMap(struct { addr: u64, size: u64 });
|
||||
|
||||
debug_info: []const u8,
|
||||
debug_abbrev: []const u8,
|
||||
debug_str: []const u8,
|
||||
|
||||
pub fn getCompileUnitIterator(self: DwarfInfo) CompileUnitIterator {
|
||||
return .{ .ctx = self };
|
||||
}
|
||||
|
||||
const CompileUnitIterator = struct {
|
||||
ctx: DwarfInfo,
|
||||
pos: usize = 0,
|
||||
|
||||
pub fn next(self: *CompileUnitIterator) !?CompileUnit {
|
||||
if (self.pos >= self.ctx.debug_info.len) return null;
|
||||
|
||||
var stream = std.io.fixedBufferStream(self.ctx.debug_info);
|
||||
var creader = std.io.countingReader(stream.reader());
|
||||
const reader = creader.reader();
|
||||
|
||||
const cuh = try CompileUnit.Header.read(reader);
|
||||
const total_length = cuh.length + @as(u64, if (cuh.is_64bit) @sizeOf(u64) else @sizeOf(u32));
|
||||
const offset = math.cast(usize, creader.bytes_read) orelse return error.Overflow;
|
||||
|
||||
const cu = CompileUnit{
|
||||
.cuh = cuh,
|
||||
.debug_info_off = offset,
|
||||
};
|
||||
|
||||
self.pos += (math.cast(usize, total_length) orelse return error.Overflow);
|
||||
|
||||
return cu;
|
||||
}
|
||||
};
|
||||
|
||||
pub fn genSubprogramLookupByName(
|
||||
self: DwarfInfo,
|
||||
compile_unit: CompileUnit,
|
||||
abbrev_lookup: AbbrevLookupTable,
|
||||
lookup: *SubprogramLookupByName,
|
||||
) !void {
|
||||
var abbrev_it = compile_unit.getAbbrevEntryIterator(self);
|
||||
while (try abbrev_it.next(abbrev_lookup)) |entry| switch (entry.tag) {
|
||||
dwarf.TAG.subprogram => {
|
||||
var attr_it = entry.getAttributeIterator(self, compile_unit.cuh);
|
||||
|
||||
var name: ?[]const u8 = null;
|
||||
var low_pc: ?u64 = null;
|
||||
var high_pc: ?u64 = null;
|
||||
|
||||
while (try attr_it.next()) |attr| switch (attr.name) {
|
||||
dwarf.AT.name => if (attr.getString(self, compile_unit.cuh)) |str| {
|
||||
log.warn("subprogram: {s}", .{str});
|
||||
name = str;
|
||||
},
|
||||
dwarf.AT.low_pc => {
|
||||
if (attr.getAddr(self, compile_unit.cuh)) |addr| {
|
||||
low_pc = addr;
|
||||
}
|
||||
if (try attr.getConstant(self)) |constant| {
|
||||
low_pc = @intCast(u64, constant);
|
||||
}
|
||||
},
|
||||
dwarf.AT.high_pc => {
|
||||
if (attr.getAddr(self, compile_unit.cuh)) |addr| {
|
||||
high_pc = addr;
|
||||
}
|
||||
if (try attr.getConstant(self)) |constant| {
|
||||
high_pc = @intCast(u64, constant);
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
};
|
||||
|
||||
if (name == null or low_pc == null or high_pc == null) continue;
|
||||
|
||||
try lookup.putNoClobber(name.?, .{ .addr = low_pc.?, .size = high_pc.? });
|
||||
},
|
||||
else => {},
|
||||
};
|
||||
}
|
||||
|
||||
pub fn genAbbrevLookupByKind(self: DwarfInfo, off: usize, lookup: *AbbrevLookupTable) !void {
|
||||
const data = self.debug_abbrev[off..];
|
||||
var stream = std.io.fixedBufferStream(data);
|
||||
var creader = std.io.countingReader(stream.reader());
|
||||
const reader = creader.reader();
|
||||
|
||||
while (true) {
|
||||
const kind = try leb.readULEB128(u64, reader);
|
||||
|
||||
if (kind == 0) break;
|
||||
|
||||
const pos = math.cast(usize, creader.bytes_read) orelse return error.Overflow;
|
||||
_ = try leb.readULEB128(u64, reader); // TAG
|
||||
_ = try reader.readByte(); // CHILDREN
|
||||
|
||||
while (true) {
|
||||
const name = try leb.readULEB128(u64, reader);
|
||||
const form = try leb.readULEB128(u64, reader);
|
||||
|
||||
if (name == 0 and form == 0) break;
|
||||
}
|
||||
|
||||
const next_pos = math.cast(usize, creader.bytes_read) orelse return error.Overflow;
|
||||
|
||||
try lookup.putNoClobber(kind, .{
|
||||
.pos = pos,
|
||||
.len = next_pos - pos - 2,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
pub const CompileUnit = struct {
|
||||
cuh: Header,
|
||||
debug_info_off: usize,
|
||||
|
||||
pub const Header = struct {
|
||||
is_64bit: bool,
|
||||
length: u64,
|
||||
version: u16,
|
||||
debug_abbrev_offset: u64,
|
||||
address_size: u8,
|
||||
|
||||
fn read(reader: anytype) !Header {
|
||||
var length: u64 = try reader.readIntLittle(u32);
|
||||
|
||||
const is_64bit = length == 0xffffffff;
|
||||
if (is_64bit) {
|
||||
length = try reader.readIntLittle(u64);
|
||||
}
|
||||
|
||||
const version = try reader.readIntLittle(u16);
|
||||
const debug_abbrev_offset = if (is_64bit)
|
||||
try reader.readIntLittle(u64)
|
||||
else
|
||||
try reader.readIntLittle(u32);
|
||||
const address_size = try reader.readIntLittle(u8);
|
||||
|
||||
return Header{
|
||||
.is_64bit = is_64bit,
|
||||
.length = length,
|
||||
.version = version,
|
||||
.debug_abbrev_offset = debug_abbrev_offset,
|
||||
.address_size = address_size,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
inline fn getDebugInfo(self: CompileUnit, ctx: DwarfInfo) []const u8 {
|
||||
return ctx.debug_info[self.debug_info_off..][0..self.cuh.length];
|
||||
}
|
||||
|
||||
pub fn getAbbrevEntryIterator(self: CompileUnit, ctx: DwarfInfo) AbbrevEntryIterator {
|
||||
return .{ .cu = self, .ctx = ctx };
|
||||
}
|
||||
};
|
||||
|
||||
const AbbrevEntryIterator = struct {
|
||||
cu: CompileUnit,
|
||||
ctx: DwarfInfo,
|
||||
pos: usize = 0,
|
||||
|
||||
pub fn next(self: *AbbrevEntryIterator, lookup: AbbrevLookupTable) !?AbbrevEntry {
|
||||
if (self.pos + self.cu.debug_info_off >= self.ctx.debug_info.len) return null;
|
||||
|
||||
const debug_info = self.ctx.debug_info[self.pos + self.cu.debug_info_off ..];
|
||||
var stream = std.io.fixedBufferStream(debug_info);
|
||||
var creader = std.io.countingReader(stream.reader());
|
||||
const reader = creader.reader();
|
||||
|
||||
const kind = try leb.readULEB128(u64, reader);
|
||||
self.pos += (math.cast(usize, creader.bytes_read) orelse return error.Overflow);
|
||||
|
||||
if (kind == 0) {
|
||||
return AbbrevEntry.@"null"();
|
||||
}
|
||||
|
||||
const abbrev_pos = lookup.get(kind) orelse return error.MalformedDwarf;
|
||||
const len = try findAbbrevEntrySize(
|
||||
self.ctx,
|
||||
abbrev_pos.pos,
|
||||
abbrev_pos.len,
|
||||
self.pos + self.cu.debug_info_off,
|
||||
self.cu.cuh,
|
||||
);
|
||||
const entry = try getAbbrevEntry(
|
||||
self.ctx,
|
||||
abbrev_pos.pos,
|
||||
abbrev_pos.len,
|
||||
self.pos + self.cu.debug_info_off,
|
||||
len,
|
||||
);
|
||||
|
||||
self.pos += len;
|
||||
|
||||
return entry;
|
||||
}
|
||||
};
|
||||
|
||||
pub const AbbrevEntry = struct {
|
||||
tag: u64,
|
||||
children: u8,
|
||||
debug_abbrev_off: usize,
|
||||
debug_abbrev_len: usize,
|
||||
debug_info_off: usize,
|
||||
debug_info_len: usize,
|
||||
|
||||
fn @"null"() AbbrevEntry {
|
||||
return .{
|
||||
.tag = 0,
|
||||
.children = dwarf.CHILDREN.no,
|
||||
.debug_abbrev_off = 0,
|
||||
.debug_abbrev_len = 0,
|
||||
.debug_info_off = 0,
|
||||
.debug_info_len = 0,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn hasChildren(self: AbbrevEntry) bool {
|
||||
return self.children == dwarf.CHILDREN.yes;
|
||||
}
|
||||
|
||||
inline fn getDebugInfo(self: AbbrevEntry, ctx: DwarfInfo) []const u8 {
|
||||
return ctx.debug_info[self.debug_info_off..][0..self.debug_info_len];
|
||||
}
|
||||
|
||||
inline fn getDebugAbbrev(self: AbbrevEntry, ctx: DwarfInfo) []const u8 {
|
||||
return ctx.debug_abbrev[self.debug_abbrev_off..][0..self.debug_abbrev_len];
|
||||
}
|
||||
|
||||
pub fn getAttributeIterator(self: AbbrevEntry, ctx: DwarfInfo, cuh: CompileUnit.Header) AttributeIterator {
|
||||
return .{ .entry = self, .ctx = ctx, .cuh = cuh };
|
||||
}
|
||||
};
|
||||
|
||||
pub const Attribute = struct {
|
||||
name: u64,
|
||||
form: u64,
|
||||
debug_info_off: usize,
|
||||
debug_info_len: usize,
|
||||
|
||||
inline fn getDebugInfo(self: Attribute, ctx: DwarfInfo) []const u8 {
|
||||
return ctx.debug_info[self.debug_info_off..][0..self.debug_info_len];
|
||||
}
|
||||
|
||||
pub fn getString(self: Attribute, ctx: DwarfInfo, cuh: CompileUnit.Header) ?[]const u8 {
|
||||
if (self.form != dwarf.FORM.strp) return null;
|
||||
const debug_info = self.getDebugInfo(ctx);
|
||||
const off = if (cuh.is_64bit)
|
||||
mem.readIntLittle(u64, debug_info[0..8])
|
||||
else
|
||||
mem.readIntLittle(u32, debug_info[0..4]);
|
||||
return ctx.getString(off);
|
||||
}
|
||||
|
||||
pub fn getConstant(self: Attribute, ctx: DwarfInfo) !?i128 {
|
||||
const debug_info = self.getDebugInfo(ctx);
|
||||
var stream = std.io.fixedBufferStream(debug_info);
|
||||
const reader = stream.reader();
|
||||
|
||||
return switch (self.form) {
|
||||
dwarf.FORM.data1 => debug_info[0],
|
||||
dwarf.FORM.data2 => mem.readIntLittle(u16, debug_info[0..2]),
|
||||
dwarf.FORM.data4 => mem.readIntLittle(u32, debug_info[0..4]),
|
||||
dwarf.FORM.data8 => mem.readIntLittle(u64, debug_info[0..8]),
|
||||
dwarf.FORM.udata => try leb.readULEB128(u64, reader),
|
||||
dwarf.FORM.sdata => try leb.readILEB128(i64, reader),
|
||||
else => null,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn getReference(self: Attribute, ctx: DwarfInfo) !?u64 {
|
||||
const debug_info = self.getDebugInfo(ctx);
|
||||
var stream = std.io.fixedBufferStream(debug_info);
|
||||
const reader = stream.reader();
|
||||
|
||||
return switch (self.form) {
|
||||
dwarf.FORM.ref1 => debug_info[0],
|
||||
dwarf.FORM.ref2 => mem.readIntLittle(u16, debug_info[0..2]),
|
||||
dwarf.FORM.ref4 => mem.readIntLittle(u32, debug_info[0..4]),
|
||||
dwarf.FORM.ref8 => mem.readIntLittle(u64, debug_info[0..8]),
|
||||
dwarf.FORM.ref_udata => try leb.readULEB128(u64, reader),
|
||||
else => null,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn getAddr(self: Attribute, ctx: DwarfInfo, cuh: CompileUnit.Header) ?u64 {
|
||||
if (self.form != dwarf.FORM.addr) return null;
|
||||
const debug_info = self.getDebugInfo(ctx);
|
||||
return switch (cuh.address_size) {
|
||||
1 => debug_info[0],
|
||||
2 => mem.readIntLittle(u16, debug_info[0..2]),
|
||||
4 => mem.readIntLittle(u32, debug_info[0..4]),
|
||||
8 => mem.readIntLittle(u64, debug_info[0..8]),
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
const AttributeIterator = struct {
|
||||
entry: AbbrevEntry,
|
||||
ctx: DwarfInfo,
|
||||
cuh: CompileUnit.Header,
|
||||
debug_abbrev_pos: usize = 0,
|
||||
debug_info_pos: usize = 0,
|
||||
|
||||
pub fn next(self: *AttributeIterator) !?Attribute {
|
||||
const debug_abbrev = self.entry.getDebugAbbrev(self.ctx);
|
||||
if (self.debug_abbrev_pos >= debug_abbrev.len) return null;
|
||||
|
||||
var stream = std.io.fixedBufferStream(debug_abbrev[self.debug_abbrev_pos..]);
|
||||
var creader = std.io.countingReader(stream.reader());
|
||||
const reader = creader.reader();
|
||||
|
||||
const name = try leb.readULEB128(u64, reader);
|
||||
const form = try leb.readULEB128(u64, reader);
|
||||
|
||||
self.debug_abbrev_pos += (math.cast(usize, creader.bytes_read) orelse return error.Overflow);
|
||||
|
||||
const len = try findFormSize(
|
||||
self.ctx,
|
||||
form,
|
||||
self.debug_info_pos + self.entry.debug_info_off,
|
||||
self.cuh,
|
||||
);
|
||||
const attr = Attribute{
|
||||
.name = name,
|
||||
.form = form,
|
||||
.debug_info_off = self.debug_info_pos + self.entry.debug_info_off,
|
||||
.debug_info_len = len,
|
||||
};
|
||||
|
||||
self.debug_info_pos += len;
|
||||
|
||||
return attr;
|
||||
}
|
||||
};
|
||||
|
||||
fn getAbbrevEntry(self: DwarfInfo, da_off: usize, da_len: usize, di_off: usize, di_len: usize) !AbbrevEntry {
|
||||
const debug_abbrev = self.debug_abbrev[da_off..][0..da_len];
|
||||
var stream = std.io.fixedBufferStream(debug_abbrev);
|
||||
var creader = std.io.countingReader(stream.reader());
|
||||
const reader = creader.reader();
|
||||
|
||||
const tag = try leb.readULEB128(u64, reader);
|
||||
const children = switch (tag) {
|
||||
std.dwarf.TAG.const_type,
|
||||
std.dwarf.TAG.packed_type,
|
||||
std.dwarf.TAG.pointer_type,
|
||||
std.dwarf.TAG.reference_type,
|
||||
std.dwarf.TAG.restrict_type,
|
||||
std.dwarf.TAG.rvalue_reference_type,
|
||||
std.dwarf.TAG.shared_type,
|
||||
std.dwarf.TAG.volatile_type,
|
||||
=> if (creader.bytes_read == da_len) std.dwarf.CHILDREN.no else try reader.readByte(),
|
||||
else => try reader.readByte(),
|
||||
};
|
||||
|
||||
const pos = math.cast(usize, creader.bytes_read) orelse return error.Overflow;
|
||||
|
||||
return AbbrevEntry{
|
||||
.tag = tag,
|
||||
.children = children,
|
||||
.debug_abbrev_off = pos + da_off,
|
||||
.debug_abbrev_len = da_len - pos,
|
||||
.debug_info_off = di_off,
|
||||
.debug_info_len = di_len,
|
||||
};
|
||||
}
|
||||
|
||||
fn findFormSize(self: DwarfInfo, form: u64, di_off: usize, cuh: CompileUnit.Header) !usize {
|
||||
const debug_info = self.debug_info[di_off..];
|
||||
var stream = std.io.fixedBufferStream(debug_info);
|
||||
var creader = std.io.countingReader(stream.reader());
|
||||
const reader = creader.reader();
|
||||
|
||||
switch (form) {
|
||||
dwarf.FORM.strp => return if (cuh.is_64bit) @sizeOf(u64) else @sizeOf(u32),
|
||||
dwarf.FORM.sec_offset => return if (cuh.is_64bit) @sizeOf(u64) else @sizeOf(u32),
|
||||
dwarf.FORM.addr => return cuh.address_size,
|
||||
dwarf.FORM.exprloc => {
|
||||
const expr_len = try leb.readULEB128(u64, reader);
|
||||
var i: u64 = 0;
|
||||
while (i < expr_len) : (i += 1) {
|
||||
_ = try reader.readByte();
|
||||
}
|
||||
return math.cast(usize, creader.bytes_read) orelse error.Overflow;
|
||||
},
|
||||
dwarf.FORM.flag_present => return 0,
|
||||
|
||||
dwarf.FORM.data1 => return @sizeOf(u8),
|
||||
dwarf.FORM.data2 => return @sizeOf(u16),
|
||||
dwarf.FORM.data4 => return @sizeOf(u32),
|
||||
dwarf.FORM.data8 => return @sizeOf(u64),
|
||||
dwarf.FORM.udata => {
|
||||
_ = try leb.readULEB128(u64, reader);
|
||||
return math.cast(usize, creader.bytes_read) orelse error.Overflow;
|
||||
},
|
||||
dwarf.FORM.sdata => {
|
||||
_ = try leb.readILEB128(i64, reader);
|
||||
return math.cast(usize, creader.bytes_read) orelse error.Overflow;
|
||||
},
|
||||
|
||||
dwarf.FORM.ref1 => return @sizeOf(u8),
|
||||
dwarf.FORM.ref2 => return @sizeOf(u16),
|
||||
dwarf.FORM.ref4 => return @sizeOf(u32),
|
||||
dwarf.FORM.ref8 => return @sizeOf(u64),
|
||||
dwarf.FORM.ref_udata => {
|
||||
_ = try leb.readULEB128(u64, reader);
|
||||
return math.cast(usize, creader.bytes_read) orelse error.Overflow;
|
||||
},
|
||||
|
||||
else => return error.ToDo,
|
||||
}
|
||||
}
|
||||
|
||||
fn findAbbrevEntrySize(self: DwarfInfo, da_off: usize, da_len: usize, di_off: usize, cuh: CompileUnit.Header) !usize {
|
||||
const debug_abbrev = self.debug_abbrev[da_off..][0..da_len];
|
||||
var stream = std.io.fixedBufferStream(debug_abbrev);
|
||||
var creader = std.io.countingReader(stream.reader());
|
||||
const reader = creader.reader();
|
||||
|
||||
const tag = try leb.readULEB128(u64, reader);
|
||||
switch (tag) {
|
||||
std.dwarf.TAG.const_type,
|
||||
std.dwarf.TAG.packed_type,
|
||||
std.dwarf.TAG.pointer_type,
|
||||
std.dwarf.TAG.reference_type,
|
||||
std.dwarf.TAG.restrict_type,
|
||||
std.dwarf.TAG.rvalue_reference_type,
|
||||
std.dwarf.TAG.shared_type,
|
||||
std.dwarf.TAG.volatile_type,
|
||||
=> if (creader.bytes_read != da_len) {
|
||||
_ = try reader.readByte();
|
||||
},
|
||||
else => _ = try reader.readByte(),
|
||||
}
|
||||
|
||||
var len: usize = 0;
|
||||
while (creader.bytes_read < debug_abbrev.len) {
|
||||
_ = try leb.readULEB128(u64, reader);
|
||||
const form = try leb.readULEB128(u64, reader);
|
||||
const form_len = try self.findFormSize(form, di_off + len, cuh);
|
||||
len += form_len;
|
||||
}
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
fn getString(self: DwarfInfo, off: u64) []const u8 {
|
||||
assert(off < self.debug_str.len);
|
||||
return mem.sliceTo(@ptrCast([*:0]const u8, self.debug_str.ptr + @intCast(usize, off)), 0);
|
||||
}
|
||||
@ -1,3 +1,7 @@
|
||||
//! Represents an input relocatable Object file.
|
||||
//! Each Object is fully loaded into memory for easier
|
||||
//! access into different data within.
|
||||
|
||||
const Object = @This();
|
||||
|
||||
const std = @import("std");
|
||||
@ -14,10 +18,12 @@ const sort = std.sort;
|
||||
const trace = @import("../../tracy.zig").trace;
|
||||
|
||||
const Allocator = mem.Allocator;
|
||||
const Atom = @import("Atom.zig");
|
||||
const Atom = @import("ZldAtom.zig");
|
||||
const AtomIndex = @import("zld.zig").AtomIndex;
|
||||
const DwarfInfo = @import("DwarfInfo.zig");
|
||||
const LoadCommandIterator = macho.LoadCommandIterator;
|
||||
const MachO = @import("../MachO.zig");
|
||||
const SymbolWithLoc = MachO.SymbolWithLoc;
|
||||
const Zld = @import("zld.zig").Zld;
|
||||
const SymbolWithLoc = @import("zld.zig").SymbolWithLoc;
|
||||
|
||||
name: []const u8,
|
||||
mtime: u64,
|
||||
@ -30,31 +36,33 @@ header: macho.mach_header_64 = undefined,
|
||||
in_symtab: ?[]align(1) const macho.nlist_64 = null,
|
||||
in_strtab: ?[]const u8 = null,
|
||||
|
||||
symtab: std.ArrayListUnmanaged(macho.nlist_64) = .{},
|
||||
sections: std.ArrayListUnmanaged(macho.section_64) = .{},
|
||||
/// Output symtab is sorted so that we can easily reference symbols following each
|
||||
/// other in address space.
|
||||
/// The length of the symtab is at least of the input symtab length however there
|
||||
/// can be trailing section symbols.
|
||||
symtab: []macho.nlist_64 = undefined,
|
||||
/// Can be undefined as set together with in_symtab.
|
||||
source_symtab_lookup: []u32 = undefined,
|
||||
/// Can be undefined as set together with in_symtab.
|
||||
strtab_lookup: []u32 = undefined,
|
||||
/// Can be undefined as set together with in_symtab.
|
||||
atom_by_index_table: []AtomIndex = undefined,
|
||||
/// Can be undefined as set together with in_symtab.
|
||||
globals_lookup: []i64 = undefined,
|
||||
|
||||
sections_as_symbols: std.AutoHashMapUnmanaged(u16, u32) = .{},
|
||||
|
||||
/// List of atoms that map to the symbols parsed from this object file.
|
||||
managed_atoms: std.ArrayListUnmanaged(*Atom) = .{},
|
||||
|
||||
/// Table of atoms belonging to this object file indexed by the symbol index.
|
||||
atom_by_index_table: std.AutoHashMapUnmanaged(u32, *Atom) = .{},
|
||||
atoms: std.ArrayListUnmanaged(AtomIndex) = .{},
|
||||
|
||||
pub fn deinit(self: *Object, gpa: Allocator) void {
|
||||
self.symtab.deinit(gpa);
|
||||
self.sections.deinit(gpa);
|
||||
self.sections_as_symbols.deinit(gpa);
|
||||
self.atom_by_index_table.deinit(gpa);
|
||||
|
||||
for (self.managed_atoms.items) |atom| {
|
||||
atom.deinit(gpa);
|
||||
gpa.destroy(atom);
|
||||
}
|
||||
self.managed_atoms.deinit(gpa);
|
||||
|
||||
self.atoms.deinit(gpa);
|
||||
gpa.free(self.name);
|
||||
gpa.free(self.contents);
|
||||
if (self.in_symtab) |_| {
|
||||
gpa.free(self.source_symtab_lookup);
|
||||
gpa.free(self.strtab_lookup);
|
||||
gpa.free(self.symtab);
|
||||
gpa.free(self.atom_by_index_table);
|
||||
gpa.free(self.globals_lookup);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse(self: *Object, allocator: Allocator, cpu_arch: std.Target.Cpu.Arch) !void {
|
||||
@ -93,230 +101,245 @@ pub fn parse(self: *Object, allocator: Allocator, cpu_arch: std.Target.Cpu.Arch)
|
||||
};
|
||||
while (it.next()) |cmd| {
|
||||
switch (cmd.cmd()) {
|
||||
.SEGMENT_64 => {
|
||||
const segment = cmd.cast(macho.segment_command_64).?;
|
||||
try self.sections.ensureUnusedCapacity(allocator, segment.nsects);
|
||||
for (cmd.getSections()) |sect| {
|
||||
self.sections.appendAssumeCapacity(sect);
|
||||
}
|
||||
},
|
||||
.SYMTAB => {
|
||||
const symtab = cmd.cast(macho.symtab_command).?;
|
||||
// Sadly, SYMTAB may be at an unaligned offset within the object file.
|
||||
self.in_symtab = @ptrCast(
|
||||
[*]align(1) const macho.nlist_64,
|
||||
self.contents.ptr + symtab.symoff,
|
||||
[*]const macho.nlist_64,
|
||||
@alignCast(@alignOf(macho.nlist_64), &self.contents[symtab.symoff]),
|
||||
)[0..symtab.nsyms];
|
||||
self.in_strtab = self.contents[symtab.stroff..][0..symtab.strsize];
|
||||
try self.symtab.appendUnalignedSlice(allocator, self.in_symtab.?);
|
||||
|
||||
const nsects = self.getSourceSections().len;
|
||||
|
||||
self.symtab = try allocator.alloc(macho.nlist_64, self.in_symtab.?.len + nsects);
|
||||
self.source_symtab_lookup = try allocator.alloc(u32, self.in_symtab.?.len);
|
||||
self.strtab_lookup = try allocator.alloc(u32, self.in_symtab.?.len);
|
||||
self.globals_lookup = try allocator.alloc(i64, self.in_symtab.?.len);
|
||||
self.atom_by_index_table = try allocator.alloc(AtomIndex, self.in_symtab.?.len + nsects);
|
||||
|
||||
for (self.symtab) |*sym| {
|
||||
sym.* = .{
|
||||
.n_value = 0,
|
||||
.n_sect = 0,
|
||||
.n_desc = 0,
|
||||
.n_strx = 0,
|
||||
.n_type = 0,
|
||||
};
|
||||
}
|
||||
|
||||
mem.set(i64, self.globals_lookup, -1);
|
||||
mem.set(AtomIndex, self.atom_by_index_table, 0);
|
||||
|
||||
// You would expect that the symbol table is at least pre-sorted based on symbol's type:
|
||||
// local < extern defined < undefined. Unfortunately, this is not guaranteed! For instance,
|
||||
// the GO compiler does not necessarily respect that therefore we sort immediately by type
|
||||
// and address within.
|
||||
var sorted_all_syms = try std.ArrayList(SymbolAtIndex).initCapacity(allocator, self.in_symtab.?.len);
|
||||
defer sorted_all_syms.deinit();
|
||||
|
||||
for (self.in_symtab.?) |_, index| {
|
||||
sorted_all_syms.appendAssumeCapacity(.{ .index = @intCast(u32, index) });
|
||||
}
|
||||
|
||||
// We sort by type: defined < undefined, and
|
||||
// afterwards by address in each group. Normally, dysymtab should
|
||||
// be enough to guarantee the sort, but turns out not every compiler
|
||||
// is kind enough to specify the symbols in the correct order.
|
||||
sort.sort(SymbolAtIndex, sorted_all_syms.items, self, SymbolAtIndex.lessThan);
|
||||
|
||||
for (sorted_all_syms.items) |sym_id, i| {
|
||||
const sym = sym_id.getSymbol(self);
|
||||
|
||||
self.symtab[i] = sym;
|
||||
self.source_symtab_lookup[i] = sym_id.index;
|
||||
|
||||
const sym_name_len = mem.sliceTo(@ptrCast([*:0]const u8, self.in_strtab.?.ptr + sym.n_strx), 0).len + 1;
|
||||
self.strtab_lookup[i] = @intCast(u32, sym_name_len);
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const Context = struct {
|
||||
object: *const Object,
|
||||
};
|
||||
|
||||
const SymbolAtIndex = struct {
|
||||
index: u32,
|
||||
|
||||
const Context = *const Object;
|
||||
|
||||
fn getSymbol(self: SymbolAtIndex, ctx: Context) macho.nlist_64 {
|
||||
return ctx.object.getSourceSymbol(self.index).?;
|
||||
return ctx.in_symtab.?[self.index];
|
||||
}
|
||||
|
||||
fn getSymbolName(self: SymbolAtIndex, ctx: Context) []const u8 {
|
||||
const sym = self.getSymbol(ctx);
|
||||
return ctx.object.getString(sym.n_strx);
|
||||
const off = self.getSymbol(ctx).n_strx;
|
||||
return mem.sliceTo(@ptrCast([*:0]const u8, ctx.in_strtab.?.ptr + off), 0);
|
||||
}
|
||||
|
||||
/// Returns whether lhs is less than rhs by allocated address in object file.
|
||||
/// Undefined symbols are pushed to the back (always evaluate to true).
|
||||
/// Performs lexicographic-like check.
|
||||
/// * lhs and rhs defined
|
||||
/// * if lhs == rhs
|
||||
/// * if lhs.n_sect == rhs.n_sect
|
||||
/// * ext < weak < local < temp
|
||||
/// * lhs.n_sect < rhs.n_sect
|
||||
/// * lhs < rhs
|
||||
/// * !rhs is undefined
|
||||
fn lessThan(ctx: Context, lhs_index: SymbolAtIndex, rhs_index: SymbolAtIndex) bool {
|
||||
const lhs = lhs_index.getSymbol(ctx);
|
||||
const rhs = rhs_index.getSymbol(ctx);
|
||||
if (lhs.sect()) {
|
||||
if (rhs.sect()) {
|
||||
// Same group, sort by address.
|
||||
return lhs.n_value < rhs.n_value;
|
||||
} else {
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
if (lhs.sect() and rhs.sect()) {
|
||||
if (lhs.n_value == rhs.n_value) {
|
||||
if (lhs.n_sect == rhs.n_sect) {
|
||||
if (lhs.ext() and rhs.ext()) {
|
||||
if ((lhs.pext() or lhs.weakDef()) and (rhs.pext() or rhs.weakDef())) {
|
||||
return false;
|
||||
} else return rhs.pext() or rhs.weakDef();
|
||||
} else {
|
||||
const lhs_name = lhs_index.getSymbolName(ctx);
|
||||
const lhs_temp = mem.startsWith(u8, lhs_name, "l") or mem.startsWith(u8, lhs_name, "L");
|
||||
const rhs_name = rhs_index.getSymbolName(ctx);
|
||||
const rhs_temp = mem.startsWith(u8, rhs_name, "l") or mem.startsWith(u8, rhs_name, "L");
|
||||
if (lhs_temp and rhs_temp) {
|
||||
return false;
|
||||
} else return rhs_temp;
|
||||
}
|
||||
} else return lhs.n_sect < rhs.n_sect;
|
||||
} else return lhs.n_value < rhs.n_value;
|
||||
} else if (lhs.undf() and rhs.undf()) {
|
||||
return false;
|
||||
}
|
||||
} else return rhs.undf();
|
||||
}
|
||||
|
||||
/// Returns whether lhs is less senior than rhs. The rules are:
|
||||
/// 1. ext
|
||||
/// 2. weak
|
||||
/// 3. local
|
||||
/// 4. temp (local starting with `l` prefix).
|
||||
fn lessThanBySeniority(ctx: Context, lhs_index: SymbolAtIndex, rhs_index: SymbolAtIndex) bool {
|
||||
const lhs = lhs_index.getSymbol(ctx);
|
||||
const rhs = rhs_index.getSymbol(ctx);
|
||||
if (!rhs.ext()) {
|
||||
const lhs_name = lhs_index.getSymbolName(ctx);
|
||||
return mem.startsWith(u8, lhs_name, "l") or mem.startsWith(u8, lhs_name, "L");
|
||||
} else if (rhs.pext() or rhs.weakDef()) {
|
||||
return !lhs.ext();
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/// Like lessThanBySeniority but negated.
|
||||
fn greaterThanBySeniority(ctx: Context, lhs_index: SymbolAtIndex, rhs_index: SymbolAtIndex) bool {
|
||||
return !lessThanBySeniority(ctx, lhs_index, rhs_index);
|
||||
fn lessThanByNStrx(ctx: Context, lhs: SymbolAtIndex, rhs: SymbolAtIndex) bool {
|
||||
return lhs.getSymbol(ctx).n_strx < rhs.getSymbol(ctx).n_strx;
|
||||
}
|
||||
};
|
||||
|
||||
fn filterSymbolsByAddress(
|
||||
indexes: []SymbolAtIndex,
|
||||
start_addr: u64,
|
||||
end_addr: u64,
|
||||
ctx: Context,
|
||||
) []SymbolAtIndex {
|
||||
const Predicate = struct {
|
||||
addr: u64,
|
||||
ctx: Context,
|
||||
fn filterSymbolsBySection(symbols: []macho.nlist_64, n_sect: u8) struct {
|
||||
index: u32,
|
||||
len: u32,
|
||||
} {
|
||||
const FirstMatch = struct {
|
||||
n_sect: u8,
|
||||
|
||||
pub fn predicate(pred: @This(), index: SymbolAtIndex) bool {
|
||||
return index.getSymbol(pred.ctx).n_value >= pred.addr;
|
||||
pub fn predicate(pred: @This(), symbol: macho.nlist_64) bool {
|
||||
return symbol.n_sect == pred.n_sect;
|
||||
}
|
||||
};
|
||||
const FirstNonMatch = struct {
|
||||
n_sect: u8,
|
||||
|
||||
pub fn predicate(pred: @This(), symbol: macho.nlist_64) bool {
|
||||
return symbol.n_sect != pred.n_sect;
|
||||
}
|
||||
};
|
||||
|
||||
const start = MachO.findFirst(SymbolAtIndex, indexes, 0, Predicate{
|
||||
const index = @import("zld.zig").lsearch(macho.nlist_64, symbols, FirstMatch{
|
||||
.n_sect = n_sect,
|
||||
});
|
||||
const len = @import("zld.zig").lsearch(macho.nlist_64, symbols[index..], FirstNonMatch{
|
||||
.n_sect = n_sect,
|
||||
});
|
||||
|
||||
return .{ .index = @intCast(u32, index), .len = @intCast(u32, len) };
|
||||
}
|
||||
|
||||
fn filterSymbolsByAddress(symbols: []macho.nlist_64, n_sect: u8, start_addr: u64, end_addr: u64) struct {
|
||||
index: u32,
|
||||
len: u32,
|
||||
} {
|
||||
const Predicate = struct {
|
||||
addr: u64,
|
||||
n_sect: u8,
|
||||
|
||||
pub fn predicate(pred: @This(), symbol: macho.nlist_64) bool {
|
||||
return symbol.n_value >= pred.addr;
|
||||
}
|
||||
};
|
||||
|
||||
const index = @import("zld.zig").lsearch(macho.nlist_64, symbols, Predicate{
|
||||
.addr = start_addr,
|
||||
.ctx = ctx,
|
||||
.n_sect = n_sect,
|
||||
});
|
||||
const end = MachO.findFirst(SymbolAtIndex, indexes, start, Predicate{
|
||||
const len = @import("zld.zig").lsearch(macho.nlist_64, symbols[index..], Predicate{
|
||||
.addr = end_addr,
|
||||
.ctx = ctx,
|
||||
.n_sect = n_sect,
|
||||
});
|
||||
|
||||
return indexes[start..end];
|
||||
return .{ .index = @intCast(u32, index), .len = @intCast(u32, len) };
|
||||
}
|
||||
|
||||
fn filterRelocs(
|
||||
relocs: []align(1) const macho.relocation_info,
|
||||
start_addr: u64,
|
||||
end_addr: u64,
|
||||
) []align(1) const macho.relocation_info {
|
||||
const Predicate = struct {
|
||||
addr: u64,
|
||||
const SortedSection = struct {
|
||||
header: macho.section_64,
|
||||
id: u8,
|
||||
};
|
||||
|
||||
pub fn predicate(self: @This(), rel: macho.relocation_info) bool {
|
||||
return rel.r_address < self.addr;
|
||||
}
|
||||
};
|
||||
|
||||
const start = MachO.findFirst(macho.relocation_info, relocs, 0, Predicate{ .addr = end_addr });
|
||||
const end = MachO.findFirst(macho.relocation_info, relocs, start, Predicate{ .addr = start_addr });
|
||||
|
||||
return relocs[start..end];
|
||||
fn sectionLessThanByAddress(ctx: void, lhs: SortedSection, rhs: SortedSection) bool {
|
||||
_ = ctx;
|
||||
if (lhs.header.addr == rhs.header.addr) {
|
||||
return lhs.id < rhs.id;
|
||||
}
|
||||
return lhs.header.addr < rhs.header.addr;
|
||||
}
|
||||
|
||||
pub fn scanInputSections(self: Object, macho_file: *MachO) !void {
|
||||
for (self.sections.items) |sect| {
|
||||
const sect_id = (try macho_file.getOutputSection(sect)) orelse {
|
||||
log.debug(" unhandled section", .{});
|
||||
/// Splits input sections into Atoms.
|
||||
/// If the Object was compiled with `MH_SUBSECTIONS_VIA_SYMBOLS`, splits section
|
||||
/// into subsections where each subsection then represents an Atom.
|
||||
pub fn splitIntoAtoms(self: *Object, zld: *Zld, object_id: u31) !void {
|
||||
const gpa = zld.gpa;
|
||||
|
||||
log.debug("splitting object({d}, {s}) into atoms", .{ object_id, self.name });
|
||||
|
||||
const sections = self.getSourceSections();
|
||||
for (sections) |sect, id| {
|
||||
if (sect.isDebug()) continue;
|
||||
const out_sect_id = (try zld.getOutputSection(sect)) orelse {
|
||||
log.debug(" unhandled section '{s},{s}'", .{ sect.segName(), sect.sectName() });
|
||||
continue;
|
||||
};
|
||||
const output = macho_file.sections.items(.header)[sect_id];
|
||||
log.debug("mapping '{s},{s}' into output sect({d}, '{s},{s}')", .{
|
||||
sect.segName(),
|
||||
sect.sectName(),
|
||||
sect_id + 1,
|
||||
output.segName(),
|
||||
output.sectName(),
|
||||
});
|
||||
if (sect.size == 0) continue;
|
||||
|
||||
const sect_id = @intCast(u8, id);
|
||||
const sym = self.getSectionAliasSymbolPtr(sect_id);
|
||||
sym.* = .{
|
||||
.n_strx = 0,
|
||||
.n_type = macho.N_SECT,
|
||||
.n_sect = out_sect_id + 1,
|
||||
.n_desc = 0,
|
||||
.n_value = sect.addr,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/// Splits object into atoms assuming one-shot linking mode.
|
||||
pub fn splitIntoAtoms(self: *Object, macho_file: *MachO, object_id: u32) !void {
|
||||
assert(macho_file.mode == .one_shot);
|
||||
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
const gpa = macho_file.base.allocator;
|
||||
|
||||
log.debug("splitting object({d}, {s}) into atoms: one-shot mode", .{ object_id, self.name });
|
||||
|
||||
const in_symtab = self.in_symtab orelse {
|
||||
for (self.sections.items) |sect, id| {
|
||||
if (self.in_symtab == null) {
|
||||
for (sections) |sect, id| {
|
||||
if (sect.isDebug()) continue;
|
||||
const out_sect_id = (try macho_file.getOutputSection(sect)) orelse {
|
||||
log.debug(" unhandled section", .{});
|
||||
continue;
|
||||
};
|
||||
const out_sect_id = (try zld.getOutputSection(sect)) orelse continue;
|
||||
if (sect.size == 0) continue;
|
||||
|
||||
const sect_id = @intCast(u8, id);
|
||||
const sym_index = self.sections_as_symbols.get(sect_id) orelse blk: {
|
||||
const sym_index = @intCast(u32, self.symtab.items.len);
|
||||
try self.symtab.append(gpa, .{
|
||||
.n_strx = 0,
|
||||
.n_type = macho.N_SECT,
|
||||
.n_sect = out_sect_id + 1,
|
||||
.n_desc = 0,
|
||||
.n_value = sect.addr,
|
||||
});
|
||||
try self.sections_as_symbols.putNoClobber(gpa, sect_id, sym_index);
|
||||
break :blk sym_index;
|
||||
};
|
||||
const code: ?[]const u8 = if (!sect.isZerofill()) try self.getSectionContents(sect) else null;
|
||||
const relocs = @ptrCast(
|
||||
[*]align(1) const macho.relocation_info,
|
||||
self.contents.ptr + sect.reloff,
|
||||
)[0..sect.nreloc];
|
||||
const atom = try self.createAtomFromSubsection(
|
||||
macho_file,
|
||||
const sym_index = self.getSectionAliasSymbolIndex(sect_id);
|
||||
const atom_index = try self.createAtomFromSubsection(
|
||||
zld,
|
||||
object_id,
|
||||
sym_index,
|
||||
0,
|
||||
0,
|
||||
sect.size,
|
||||
sect.@"align",
|
||||
code,
|
||||
relocs,
|
||||
&.{},
|
||||
out_sect_id,
|
||||
sect,
|
||||
);
|
||||
try macho_file.addAtomToSection(atom);
|
||||
zld.addAtomToSection(atom_index);
|
||||
}
|
||||
return;
|
||||
};
|
||||
|
||||
// You would expect that the symbol table is at least pre-sorted based on symbol's type:
|
||||
// local < extern defined < undefined. Unfortunately, this is not guaranteed! For instance,
|
||||
// the GO compiler does not necessarily respect that therefore we sort immediately by type
|
||||
// and address within.
|
||||
const context = Context{
|
||||
.object = self,
|
||||
};
|
||||
var sorted_all_syms = try std.ArrayList(SymbolAtIndex).initCapacity(gpa, in_symtab.len);
|
||||
defer sorted_all_syms.deinit();
|
||||
|
||||
for (in_symtab) |_, index| {
|
||||
sorted_all_syms.appendAssumeCapacity(.{ .index = @intCast(u32, index) });
|
||||
}
|
||||
|
||||
// We sort by type: defined < undefined, and
|
||||
// afterwards by address in each group. Normally, dysymtab should
|
||||
// be enough to guarantee the sort, but turns out not every compiler
|
||||
// is kind enough to specify the symbols in the correct order.
|
||||
sort.sort(SymbolAtIndex, sorted_all_syms.items, context, SymbolAtIndex.lessThan);
|
||||
|
||||
// Well, shit, sometimes compilers skip the dysymtab load command altogether, meaning we
|
||||
// have to infer the start of undef section in the symtab ourselves.
|
||||
const iundefsym = blk: {
|
||||
const dysymtab = self.parseDysymtab() orelse {
|
||||
var iundefsym: usize = sorted_all_syms.items.len;
|
||||
var iundefsym: usize = self.in_symtab.?.len;
|
||||
while (iundefsym > 0) : (iundefsym -= 1) {
|
||||
const sym = sorted_all_syms.items[iundefsym - 1].getSymbol(context);
|
||||
const sym = self.symtab[iundefsym - 1];
|
||||
if (sym.sect()) break;
|
||||
}
|
||||
break :blk iundefsym;
|
||||
@ -325,271 +348,210 @@ pub fn splitIntoAtoms(self: *Object, macho_file: *MachO, object_id: u32) !void {
|
||||
};
|
||||
|
||||
// We only care about defined symbols, so filter every other out.
|
||||
const sorted_syms = sorted_all_syms.items[0..iundefsym];
|
||||
const symtab = try gpa.dupe(macho.nlist_64, self.symtab[0..iundefsym]);
|
||||
defer gpa.free(symtab);
|
||||
|
||||
const subsections_via_symbols = self.header.flags & macho.MH_SUBSECTIONS_VIA_SYMBOLS != 0;
|
||||
|
||||
for (self.sections.items) |sect, id| {
|
||||
// Sort section headers by address.
|
||||
var sorted_sections = try gpa.alloc(SortedSection, sections.len);
|
||||
defer gpa.free(sorted_sections);
|
||||
|
||||
for (sections) |sect, id| {
|
||||
sorted_sections[id] = .{ .header = sect, .id = @intCast(u8, id) };
|
||||
}
|
||||
|
||||
std.sort.sort(SortedSection, sorted_sections, {}, sectionLessThanByAddress);
|
||||
|
||||
var sect_sym_index: u32 = 0;
|
||||
for (sorted_sections) |section| {
|
||||
const sect = section.header;
|
||||
if (sect.isDebug()) continue;
|
||||
|
||||
const sect_id = @intCast(u8, id);
|
||||
const sect_id = section.id;
|
||||
log.debug("splitting section '{s},{s}' into atoms", .{ sect.segName(), sect.sectName() });
|
||||
|
||||
// Get matching segment/section in the final artifact.
|
||||
const out_sect_id = (try macho_file.getOutputSection(sect)) orelse {
|
||||
log.debug(" unhandled section", .{});
|
||||
continue;
|
||||
};
|
||||
// Get output segment/section in the final artifact.
|
||||
const out_sect_id = (try zld.getOutputSection(sect)) orelse continue;
|
||||
|
||||
log.debug(" output sect({d}, '{s},{s}')", .{
|
||||
out_sect_id + 1,
|
||||
macho_file.sections.items(.header)[out_sect_id].segName(),
|
||||
macho_file.sections.items(.header)[out_sect_id].sectName(),
|
||||
zld.sections.items(.header)[out_sect_id].segName(),
|
||||
zld.sections.items(.header)[out_sect_id].sectName(),
|
||||
});
|
||||
|
||||
const cpu_arch = macho_file.base.options.target.cpu.arch;
|
||||
const cpu_arch = zld.options.target.cpu.arch;
|
||||
const sect_loc = filterSymbolsBySection(symtab[sect_sym_index..], sect_id + 1);
|
||||
const sect_start_index = sect_sym_index + sect_loc.index;
|
||||
|
||||
// Read section's code
|
||||
const code: ?[]const u8 = if (!sect.isZerofill()) try self.getSectionContents(sect) else null;
|
||||
sect_sym_index += sect_loc.len;
|
||||
|
||||
// Read section's list of relocations
|
||||
const relocs = @ptrCast(
|
||||
[*]align(1) const macho.relocation_info,
|
||||
self.contents.ptr + sect.reloff,
|
||||
)[0..sect.nreloc];
|
||||
|
||||
// Symbols within this section only.
|
||||
const filtered_syms = filterSymbolsByAddress(
|
||||
sorted_syms,
|
||||
sect.addr,
|
||||
sect.addr + sect.size,
|
||||
context,
|
||||
);
|
||||
|
||||
if (subsections_via_symbols and filtered_syms.len > 0) {
|
||||
if (sect.size == 0) continue;
|
||||
if (subsections_via_symbols and sect_loc.len > 0) {
|
||||
// If the first nlist does not match the start of the section,
|
||||
// then we need to encapsulate the memory range [section start, first symbol)
|
||||
// as a temporary symbol and insert the matching Atom.
|
||||
const first_sym = filtered_syms[0].getSymbol(context);
|
||||
const first_sym = symtab[sect_start_index];
|
||||
if (first_sym.n_value > sect.addr) {
|
||||
const sym_index = self.sections_as_symbols.get(sect_id) orelse blk: {
|
||||
const sym_index = @intCast(u32, self.symtab.items.len);
|
||||
try self.symtab.append(gpa, .{
|
||||
.n_strx = 0,
|
||||
.n_type = macho.N_SECT,
|
||||
.n_sect = out_sect_id + 1,
|
||||
.n_desc = 0,
|
||||
.n_value = sect.addr,
|
||||
});
|
||||
try self.sections_as_symbols.putNoClobber(gpa, sect_id, sym_index);
|
||||
break :blk sym_index;
|
||||
};
|
||||
const sym_index = self.getSectionAliasSymbolIndex(sect_id);
|
||||
const atom_size = first_sym.n_value - sect.addr;
|
||||
const atom_code: ?[]const u8 = if (code) |cc| blk: {
|
||||
const size = math.cast(usize, atom_size) orelse return error.Overflow;
|
||||
break :blk cc[0..size];
|
||||
} else null;
|
||||
const atom = try self.createAtomFromSubsection(
|
||||
macho_file,
|
||||
const atom_index = try self.createAtomFromSubsection(
|
||||
zld,
|
||||
object_id,
|
||||
sym_index,
|
||||
0,
|
||||
0,
|
||||
atom_size,
|
||||
sect.@"align",
|
||||
atom_code,
|
||||
relocs,
|
||||
&.{},
|
||||
out_sect_id,
|
||||
sect,
|
||||
);
|
||||
try macho_file.addAtomToSection(atom);
|
||||
zld.addAtomToSection(atom_index);
|
||||
}
|
||||
|
||||
var next_sym_count: usize = 0;
|
||||
while (next_sym_count < filtered_syms.len) {
|
||||
const next_sym = filtered_syms[next_sym_count].getSymbol(context);
|
||||
var next_sym_index = sect_start_index;
|
||||
while (next_sym_index < sect_start_index + sect_loc.len) {
|
||||
const next_sym = symtab[next_sym_index];
|
||||
const addr = next_sym.n_value;
|
||||
const atom_syms = filterSymbolsByAddress(
|
||||
filtered_syms[next_sym_count..],
|
||||
const atom_loc = filterSymbolsByAddress(
|
||||
symtab[next_sym_index..],
|
||||
sect_id + 1,
|
||||
addr,
|
||||
addr + 1,
|
||||
context,
|
||||
);
|
||||
next_sym_count += atom_syms.len;
|
||||
assert(atom_loc.len > 0);
|
||||
const atom_sym_index = atom_loc.index + next_sym_index;
|
||||
const nsyms_trailing = atom_loc.len - 1;
|
||||
next_sym_index += atom_loc.len;
|
||||
|
||||
// We want to bubble up the first externally defined symbol here.
|
||||
assert(atom_syms.len > 0);
|
||||
var sorted_atom_syms = std.ArrayList(SymbolAtIndex).init(gpa);
|
||||
defer sorted_atom_syms.deinit();
|
||||
try sorted_atom_syms.appendSlice(atom_syms);
|
||||
sort.sort(
|
||||
SymbolAtIndex,
|
||||
sorted_atom_syms.items,
|
||||
context,
|
||||
SymbolAtIndex.greaterThanBySeniority,
|
||||
);
|
||||
// TODO: We want to bubble up the first externally defined symbol here.
|
||||
const atom_size = if (next_sym_index < sect_start_index + sect_loc.len)
|
||||
symtab[next_sym_index].n_value - addr
|
||||
else
|
||||
sect.addr + sect.size - addr;
|
||||
|
||||
const atom_size = blk: {
|
||||
const end_addr = if (next_sym_count < filtered_syms.len)
|
||||
filtered_syms[next_sym_count].getSymbol(context).n_value
|
||||
else
|
||||
sect.addr + sect.size;
|
||||
break :blk end_addr - addr;
|
||||
};
|
||||
const atom_code: ?[]const u8 = if (code) |cc| blk: {
|
||||
const start = math.cast(usize, addr - sect.addr) orelse return error.Overflow;
|
||||
const size = math.cast(usize, atom_size) orelse return error.Overflow;
|
||||
break :blk cc[start..][0..size];
|
||||
} else null;
|
||||
const atom_align = if (addr > 0)
|
||||
math.min(@ctz(addr), sect.@"align")
|
||||
else
|
||||
sect.@"align";
|
||||
const atom = try self.createAtomFromSubsection(
|
||||
macho_file,
|
||||
|
||||
const atom_index = try self.createAtomFromSubsection(
|
||||
zld,
|
||||
object_id,
|
||||
sorted_atom_syms.items[0].index,
|
||||
atom_sym_index,
|
||||
atom_sym_index + 1,
|
||||
nsyms_trailing,
|
||||
atom_size,
|
||||
atom_align,
|
||||
atom_code,
|
||||
relocs,
|
||||
sorted_atom_syms.items[1..],
|
||||
out_sect_id,
|
||||
sect,
|
||||
);
|
||||
|
||||
// TODO rework this at the relocation level
|
||||
if (cpu_arch == .x86_64 and addr == sect.addr) {
|
||||
// In x86_64 relocs, it can so happen that the compiler refers to the same
|
||||
// atom by both the actual assigned symbol and the start of the section. In this
|
||||
// case, we need to link the two together so add an alias.
|
||||
const alias = self.sections_as_symbols.get(sect_id) orelse blk: {
|
||||
const alias = @intCast(u32, self.symtab.items.len);
|
||||
try self.symtab.append(gpa, .{
|
||||
.n_strx = 0,
|
||||
.n_type = macho.N_SECT,
|
||||
.n_sect = out_sect_id + 1,
|
||||
.n_desc = 0,
|
||||
.n_value = addr,
|
||||
});
|
||||
try self.sections_as_symbols.putNoClobber(gpa, sect_id, alias);
|
||||
break :blk alias;
|
||||
};
|
||||
try atom.contained.append(gpa, .{
|
||||
.sym_index = alias,
|
||||
.offset = 0,
|
||||
});
|
||||
try self.atom_by_index_table.put(gpa, alias, atom);
|
||||
const alias_index = self.getSectionAliasSymbolIndex(sect_id);
|
||||
self.atom_by_index_table[alias_index] = atom_index;
|
||||
}
|
||||
|
||||
try macho_file.addAtomToSection(atom);
|
||||
zld.addAtomToSection(atom_index);
|
||||
}
|
||||
} else {
|
||||
// If there is no symbol to refer to this atom, we create
|
||||
// a temp one, unless we already did that when working out the relocations
|
||||
// of other atoms.
|
||||
const sym_index = self.sections_as_symbols.get(sect_id) orelse blk: {
|
||||
const sym_index = @intCast(u32, self.symtab.items.len);
|
||||
try self.symtab.append(gpa, .{
|
||||
.n_strx = 0,
|
||||
.n_type = macho.N_SECT,
|
||||
.n_sect = out_sect_id + 1,
|
||||
.n_desc = 0,
|
||||
.n_value = sect.addr,
|
||||
});
|
||||
try self.sections_as_symbols.putNoClobber(gpa, sect_id, sym_index);
|
||||
break :blk sym_index;
|
||||
};
|
||||
const atom = try self.createAtomFromSubsection(
|
||||
macho_file,
|
||||
const alias_index = self.getSectionAliasSymbolIndex(sect_id);
|
||||
const atom_index = try self.createAtomFromSubsection(
|
||||
zld,
|
||||
object_id,
|
||||
sym_index,
|
||||
alias_index,
|
||||
sect_start_index,
|
||||
sect_loc.len,
|
||||
sect.size,
|
||||
sect.@"align",
|
||||
code,
|
||||
relocs,
|
||||
filtered_syms,
|
||||
out_sect_id,
|
||||
sect,
|
||||
);
|
||||
try macho_file.addAtomToSection(atom);
|
||||
zld.addAtomToSection(atom_index);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn createAtomFromSubsection(
|
||||
self: *Object,
|
||||
macho_file: *MachO,
|
||||
object_id: u32,
|
||||
zld: *Zld,
|
||||
object_id: u31,
|
||||
sym_index: u32,
|
||||
inner_sym_index: u32,
|
||||
inner_nsyms_trailing: u32,
|
||||
size: u64,
|
||||
alignment: u32,
|
||||
code: ?[]const u8,
|
||||
relocs: []align(1) const macho.relocation_info,
|
||||
indexes: []const SymbolAtIndex,
|
||||
out_sect_id: u8,
|
||||
sect: macho.section_64,
|
||||
) !*Atom {
|
||||
const gpa = macho_file.base.allocator;
|
||||
const sym = self.symtab.items[sym_index];
|
||||
const atom = try MachO.createEmptyAtom(gpa, sym_index, size, alignment);
|
||||
) !AtomIndex {
|
||||
const gpa = zld.gpa;
|
||||
const atom_index = try zld.createEmptyAtom(sym_index, size, alignment);
|
||||
const atom = zld.getAtomPtr(atom_index);
|
||||
atom.inner_sym_index = inner_sym_index;
|
||||
atom.inner_nsyms_trailing = inner_nsyms_trailing;
|
||||
atom.file = object_id;
|
||||
self.symtab.items[sym_index].n_sect = out_sect_id + 1;
|
||||
self.symtab[sym_index].n_sect = out_sect_id + 1;
|
||||
|
||||
log.debug("creating ATOM(%{d}, '{s}') in sect({d}, '{s},{s}') in object({d})", .{
|
||||
sym_index,
|
||||
self.getString(sym.n_strx),
|
||||
self.getSymbolName(sym_index),
|
||||
out_sect_id + 1,
|
||||
macho_file.sections.items(.header)[out_sect_id].segName(),
|
||||
macho_file.sections.items(.header)[out_sect_id].sectName(),
|
||||
zld.sections.items(.header)[out_sect_id].segName(),
|
||||
zld.sections.items(.header)[out_sect_id].sectName(),
|
||||
object_id,
|
||||
});
|
||||
|
||||
try self.atom_by_index_table.putNoClobber(gpa, sym_index, atom);
|
||||
try self.managed_atoms.append(gpa, atom);
|
||||
try self.atoms.append(gpa, atom_index);
|
||||
self.atom_by_index_table[sym_index] = atom_index;
|
||||
|
||||
if (code) |cc| {
|
||||
assert(size == cc.len);
|
||||
mem.copy(u8, atom.code.items, cc);
|
||||
var it = Atom.getInnerSymbolsIterator(zld, atom_index);
|
||||
while (it.next()) |sym_loc| {
|
||||
const inner = zld.getSymbolPtr(sym_loc);
|
||||
inner.n_sect = out_sect_id + 1;
|
||||
self.atom_by_index_table[sym_loc.sym_index] = atom_index;
|
||||
}
|
||||
|
||||
const base_offset = sym.n_value - sect.addr;
|
||||
const filtered_relocs = filterRelocs(relocs, base_offset, base_offset + size);
|
||||
try atom.parseRelocs(filtered_relocs, .{
|
||||
.macho_file = macho_file,
|
||||
.base_addr = sect.addr,
|
||||
.base_offset = @intCast(i32, base_offset),
|
||||
});
|
||||
|
||||
// Since this is atom gets a helper local temporary symbol that didn't exist
|
||||
// in the object file which encompasses the entire section, we need traverse
|
||||
// the filtered symbols and note which symbol is contained within so that
|
||||
// we can properly allocate addresses down the line.
|
||||
// While we're at it, we need to update segment,section mapping of each symbol too.
|
||||
try atom.contained.ensureTotalCapacity(gpa, indexes.len);
|
||||
for (indexes) |inner_sym_index| {
|
||||
const inner_sym = &self.symtab.items[inner_sym_index.index];
|
||||
inner_sym.n_sect = out_sect_id + 1;
|
||||
atom.contained.appendAssumeCapacity(.{
|
||||
.sym_index = inner_sym_index.index,
|
||||
.offset = inner_sym.n_value - sym.n_value,
|
||||
});
|
||||
|
||||
try self.atom_by_index_table.putNoClobber(gpa, inner_sym_index.index, atom);
|
||||
}
|
||||
|
||||
return atom;
|
||||
return atom_index;
|
||||
}
|
||||
|
||||
pub fn getSourceSymbol(self: Object, index: u32) ?macho.nlist_64 {
|
||||
const symtab = self.in_symtab.?;
|
||||
if (index >= symtab.len) return null;
|
||||
return symtab[index];
|
||||
const mapped_index = self.source_symtab_lookup[index];
|
||||
return symtab[mapped_index];
|
||||
}
|
||||
|
||||
/// Expects an arena allocator.
|
||||
/// Caller owns memory.
|
||||
pub fn createReverseSymbolLookup(self: Object, arena: Allocator) ![]u32 {
|
||||
const symtab = self.in_symtab orelse return &[0]u32{};
|
||||
const lookup = try arena.alloc(u32, symtab.len);
|
||||
for (self.source_symtab_lookup) |source_id, id| {
|
||||
lookup[source_id] = @intCast(u32, id);
|
||||
}
|
||||
return lookup;
|
||||
}
|
||||
|
||||
pub fn getSourceSection(self: Object, index: u16) macho.section_64 {
|
||||
assert(index < self.sections.items.len);
|
||||
return self.sections.items[index];
|
||||
const sections = self.getSourceSections();
|
||||
assert(index < sections.len);
|
||||
return sections[index];
|
||||
}
|
||||
|
||||
pub fn parseDataInCode(self: Object) ?[]align(1) const macho.data_in_code_entry {
|
||||
pub fn getSourceSections(self: Object) []const macho.section_64 {
|
||||
var it = LoadCommandIterator{
|
||||
.ncmds = self.header.ncmds,
|
||||
.buffer = self.contents[@sizeOf(macho.mach_header_64)..][0..self.header.sizeofcmds],
|
||||
};
|
||||
while (it.next()) |cmd| switch (cmd.cmd()) {
|
||||
.SEGMENT_64 => {
|
||||
return cmd.getSections();
|
||||
},
|
||||
else => {},
|
||||
} else unreachable;
|
||||
}
|
||||
|
||||
pub fn parseDataInCode(self: Object) ?[]const macho.data_in_code_entry {
|
||||
var it = LoadCommandIterator{
|
||||
.ncmds = self.header.ncmds,
|
||||
.buffer = self.contents[@sizeOf(macho.mach_header_64)..][0..self.header.sizeofcmds],
|
||||
@ -600,8 +562,8 @@ pub fn parseDataInCode(self: Object) ?[]align(1) const macho.data_in_code_entry
|
||||
const dice = cmd.cast(macho.linkedit_data_command).?;
|
||||
const ndice = @divExact(dice.datasize, @sizeOf(macho.data_in_code_entry));
|
||||
return @ptrCast(
|
||||
[*]align(1) const macho.data_in_code_entry,
|
||||
self.contents.ptr + dice.dataoff,
|
||||
[*]const macho.data_in_code_entry,
|
||||
@alignCast(@alignOf(macho.data_in_code_entry), &self.contents[dice.dataoff]),
|
||||
)[0..ndice];
|
||||
},
|
||||
else => {},
|
||||
@ -624,73 +586,66 @@ fn parseDysymtab(self: Object) ?macho.dysymtab_command {
|
||||
} else return null;
|
||||
}
|
||||
|
||||
pub fn parseDwarfInfo(self: Object) error{Overflow}!dwarf.DwarfInfo {
|
||||
var di = dwarf.DwarfInfo{
|
||||
.endian = .Little,
|
||||
pub fn parseDwarfInfo(self: Object) DwarfInfo {
|
||||
var di = DwarfInfo{
|
||||
.debug_info = &[0]u8{},
|
||||
.debug_abbrev = &[0]u8{},
|
||||
.debug_str = &[0]u8{},
|
||||
.debug_str_offsets = &[0]u8{},
|
||||
.debug_line = &[0]u8{},
|
||||
.debug_line_str = &[0]u8{},
|
||||
.debug_ranges = &[0]u8{},
|
||||
.debug_loclists = &[0]u8{},
|
||||
.debug_rnglists = &[0]u8{},
|
||||
.debug_addr = &[0]u8{},
|
||||
.debug_names = &[0]u8{},
|
||||
.debug_frame = &[0]u8{},
|
||||
};
|
||||
for (self.sections.items) |sect| {
|
||||
const segname = sect.segName();
|
||||
for (self.getSourceSections()) |sect| {
|
||||
if (!sect.isDebug()) continue;
|
||||
const sectname = sect.sectName();
|
||||
if (mem.eql(u8, segname, "__DWARF")) {
|
||||
if (mem.eql(u8, sectname, "__debug_info")) {
|
||||
di.debug_info = try self.getSectionContents(sect);
|
||||
} else if (mem.eql(u8, sectname, "__debug_abbrev")) {
|
||||
di.debug_abbrev = try self.getSectionContents(sect);
|
||||
} else if (mem.eql(u8, sectname, "__debug_str")) {
|
||||
di.debug_str = try self.getSectionContents(sect);
|
||||
} else if (mem.eql(u8, sectname, "__debug_str_offsets")) {
|
||||
di.debug_str_offsets = try self.getSectionContents(sect);
|
||||
} else if (mem.eql(u8, sectname, "__debug_line")) {
|
||||
di.debug_line = try self.getSectionContents(sect);
|
||||
} else if (mem.eql(u8, sectname, "__debug_line_str")) {
|
||||
di.debug_line_str = try self.getSectionContents(sect);
|
||||
} else if (mem.eql(u8, sectname, "__debug_ranges")) {
|
||||
di.debug_ranges = try self.getSectionContents(sect);
|
||||
} else if (mem.eql(u8, sectname, "__debug_loclists")) {
|
||||
di.debug_loclists = try self.getSectionContents(sect);
|
||||
} else if (mem.eql(u8, sectname, "__debug_rnglists")) {
|
||||
di.debug_rnglists = try self.getSectionContents(sect);
|
||||
} else if (mem.eql(u8, sectname, "__debug_addr")) {
|
||||
di.debug_addr = try self.getSectionContents(sect);
|
||||
} else if (mem.eql(u8, sectname, "__debug_names")) {
|
||||
di.debug_names = try self.getSectionContents(sect);
|
||||
} else if (mem.eql(u8, sectname, "__debug_frame")) {
|
||||
di.debug_frame = try self.getSectionContents(sect);
|
||||
}
|
||||
if (mem.eql(u8, sectname, "__debug_info")) {
|
||||
di.debug_info = self.getSectionContents(sect);
|
||||
} else if (mem.eql(u8, sectname, "__debug_abbrev")) {
|
||||
di.debug_abbrev = self.getSectionContents(sect);
|
||||
} else if (mem.eql(u8, sectname, "__debug_str")) {
|
||||
di.debug_str = self.getSectionContents(sect);
|
||||
}
|
||||
}
|
||||
return di;
|
||||
}
|
||||
|
||||
pub fn getSectionContents(self: Object, sect: macho.section_64) error{Overflow}![]const u8 {
|
||||
const size = math.cast(usize, sect.size) orelse return error.Overflow;
|
||||
log.debug("getting {s},{s} data at 0x{x} - 0x{x}", .{
|
||||
sect.segName(),
|
||||
sect.sectName(),
|
||||
sect.offset,
|
||||
sect.offset + sect.size,
|
||||
});
|
||||
pub fn getSectionContents(self: Object, sect: macho.section_64) []const u8 {
|
||||
const size = @intCast(usize, sect.size);
|
||||
return self.contents[sect.offset..][0..size];
|
||||
}
|
||||
|
||||
pub fn getString(self: Object, off: u32) []const u8 {
|
||||
const strtab = self.in_strtab.?;
|
||||
assert(off < strtab.len);
|
||||
return mem.sliceTo(@ptrCast([*:0]const u8, strtab.ptr + off), 0);
|
||||
pub fn getSectionAliasSymbolIndex(self: Object, sect_id: u8) u32 {
|
||||
const start = @intCast(u32, self.in_symtab.?.len);
|
||||
return start + sect_id;
|
||||
}
|
||||
|
||||
pub fn getAtomForSymbol(self: Object, sym_index: u32) ?*Atom {
|
||||
return self.atom_by_index_table.get(sym_index);
|
||||
pub fn getSectionAliasSymbol(self: *Object, sect_id: u8) macho.nlist_64 {
|
||||
return self.symtab[self.getSectionAliasSymbolIndex(sect_id)];
|
||||
}
|
||||
|
||||
pub fn getSectionAliasSymbolPtr(self: *Object, sect_id: u8) *macho.nlist_64 {
|
||||
return &self.symtab[self.getSectionAliasSymbolIndex(sect_id)];
|
||||
}
|
||||
|
||||
pub fn getRelocs(self: Object, sect: macho.section_64) []align(1) const macho.relocation_info {
|
||||
if (sect.nreloc == 0) return &[0]macho.relocation_info{};
|
||||
return @ptrCast([*]align(1) const macho.relocation_info, self.contents.ptr + sect.reloff)[0..sect.nreloc];
|
||||
}
|
||||
|
||||
pub fn getSymbolName(self: Object, index: u32) []const u8 {
|
||||
const strtab = self.in_strtab.?;
|
||||
const sym = self.symtab[index];
|
||||
|
||||
if (self.getSourceSymbol(index) == null) {
|
||||
assert(sym.n_strx == 0);
|
||||
return "";
|
||||
}
|
||||
|
||||
const start = sym.n_strx;
|
||||
const len = self.strtab_lookup[index];
|
||||
|
||||
return strtab[start..][0 .. len - 1 :0];
|
||||
}
|
||||
|
||||
pub fn getAtomIndexForSymbol(self: Object, sym_index: u32) ?AtomIndex {
|
||||
const atom_index = self.atom_by_index_table[sym_index];
|
||||
if (atom_index == 0) return null;
|
||||
return atom_index;
|
||||
}
|
||||
|
||||
@ -47,7 +47,6 @@ pub fn getTargetAtom(self: Relocation, macho_file: *MachO) ?*Atom {
|
||||
else => unreachable,
|
||||
}
|
||||
if (macho_file.getStubsAtomForSymbol(self.target)) |stubs_atom| return stubs_atom;
|
||||
if (macho_file.getTlvPtrAtomForSymbol(self.target)) |tlv_ptr_atom| return tlv_ptr_atom;
|
||||
return macho_file.getAtomForSymbol(self.target);
|
||||
}
|
||||
|
||||
|
||||
@ -108,7 +108,7 @@ pub const Node = struct {
|
||||
.label = to_label,
|
||||
});
|
||||
|
||||
return if (match == label.len) to_node else mid.put(allocator, label[match..]);
|
||||
return if (match == label.len) mid else mid.put(allocator, label[match..]);
|
||||
}
|
||||
|
||||
// Add a new node.
|
||||
@ -489,6 +489,21 @@ test "Trie basic" {
|
||||
}
|
||||
}
|
||||
|
||||
fn expectEqualHexStrings(expected: []const u8, given: []const u8) !void {
|
||||
assert(expected.len > 0);
|
||||
if (mem.eql(u8, expected, given)) return;
|
||||
const expected_fmt = try std.fmt.allocPrint(testing.allocator, "{x}", .{std.fmt.fmtSliceHexLower(expected)});
|
||||
defer testing.allocator.free(expected_fmt);
|
||||
const given_fmt = try std.fmt.allocPrint(testing.allocator, "{x}", .{std.fmt.fmtSliceHexLower(given)});
|
||||
defer testing.allocator.free(given_fmt);
|
||||
const idx = mem.indexOfDiff(u8, expected_fmt, given_fmt).?;
|
||||
var padding = try testing.allocator.alloc(u8, idx + 5);
|
||||
defer testing.allocator.free(padding);
|
||||
mem.set(u8, padding, ' ');
|
||||
std.debug.print("\nEXP: {s}\nGIV: {s}\n{s}^ -- first differing byte\n", .{ expected_fmt, given_fmt, padding });
|
||||
return error.TestFailed;
|
||||
}
|
||||
|
||||
test "write Trie to a byte stream" {
|
||||
var gpa = testing.allocator;
|
||||
var trie: Trie = .{};
|
||||
@ -523,16 +538,14 @@ test "write Trie to a byte stream" {
|
||||
defer gpa.free(buffer);
|
||||
var stream = std.io.fixedBufferStream(buffer);
|
||||
{
|
||||
const nwritten = try trie.write(stream.writer());
|
||||
try testing.expect(nwritten == trie.size);
|
||||
try testing.expect(mem.eql(u8, buffer, &exp_buffer));
|
||||
_ = try trie.write(stream.writer());
|
||||
try expectEqualHexStrings(&exp_buffer, buffer);
|
||||
}
|
||||
{
|
||||
// Writing finalized trie again should yield the same result.
|
||||
try stream.seekTo(0);
|
||||
const nwritten = try trie.write(stream.writer());
|
||||
try testing.expect(nwritten == trie.size);
|
||||
try testing.expect(mem.eql(u8, buffer, &exp_buffer));
|
||||
_ = try trie.write(stream.writer());
|
||||
try expectEqualHexStrings(&exp_buffer, buffer);
|
||||
}
|
||||
}
|
||||
|
||||
@ -562,8 +575,37 @@ test "parse Trie from byte stream" {
|
||||
var out_buffer = try gpa.alloc(u8, trie.size);
|
||||
defer gpa.free(out_buffer);
|
||||
var out_stream = std.io.fixedBufferStream(out_buffer);
|
||||
const nwritten = try trie.write(out_stream.writer());
|
||||
|
||||
try testing.expect(nwritten == trie.size);
|
||||
try testing.expect(mem.eql(u8, &in_buffer, out_buffer));
|
||||
_ = try trie.write(out_stream.writer());
|
||||
try expectEqualHexStrings(&in_buffer, out_buffer);
|
||||
}
|
||||
|
||||
test "ordering bug" {
|
||||
var gpa = testing.allocator;
|
||||
var trie: Trie = .{};
|
||||
defer trie.deinit(gpa);
|
||||
|
||||
try trie.put(gpa, .{
|
||||
.name = "_asStr",
|
||||
.vmaddr_offset = 0x558,
|
||||
.export_flags = 0,
|
||||
});
|
||||
try trie.put(gpa, .{
|
||||
.name = "_a",
|
||||
.vmaddr_offset = 0x8008,
|
||||
.export_flags = 0,
|
||||
});
|
||||
try trie.finalize(gpa);
|
||||
|
||||
const exp_buffer = [_]u8{
|
||||
0x00, 0x01, 0x5F, 0x61, 0x00, 0x06, 0x04, 0x00,
|
||||
0x88, 0x80, 0x02, 0x01, 0x73, 0x53, 0x74, 0x72,
|
||||
0x00, 0x12, 0x03, 0x00, 0xD8, 0x0A, 0x00,
|
||||
};
|
||||
|
||||
var buffer = try gpa.alloc(u8, trie.size);
|
||||
defer gpa.free(buffer);
|
||||
var stream = std.io.fixedBufferStream(buffer);
|
||||
// Writing finalized trie again should yield the same result.
|
||||
_ = try trie.write(stream.writer());
|
||||
try expectEqualHexStrings(&exp_buffer, buffer);
|
||||
}
|
||||
|
||||
1057
src/link/MachO/ZldAtom.zig
Normal file
1057
src/link/MachO/ZldAtom.zig
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,3 +1,5 @@
|
||||
//! An algorithm for dead stripping of unreferenced Atoms.
|
||||
|
||||
const std = @import("std");
|
||||
const assert = std.debug.assert;
|
||||
const log = std.log.scoped(.dead_strip);
|
||||
@ -6,93 +8,105 @@ const math = std.math;
|
||||
const mem = std.mem;
|
||||
|
||||
const Allocator = mem.Allocator;
|
||||
const Atom = @import("Atom.zig");
|
||||
const MachO = @import("../MachO.zig");
|
||||
const AtomIndex = @import("zld.zig").AtomIndex;
|
||||
const Atom = @import("ZldAtom.zig");
|
||||
const SymbolWithLoc = @import("zld.zig").SymbolWithLoc;
|
||||
const Zld = @import("zld.zig").Zld;
|
||||
|
||||
pub fn gcAtoms(macho_file: *MachO) !void {
|
||||
const gpa = macho_file.base.allocator;
|
||||
var arena_allocator = std.heap.ArenaAllocator.init(gpa);
|
||||
defer arena_allocator.deinit();
|
||||
const arena = arena_allocator.allocator();
|
||||
const N_DEAD = @import("zld.zig").N_DEAD;
|
||||
|
||||
var roots = std.AutoHashMap(*Atom, void).init(arena);
|
||||
try collectRoots(&roots, macho_file);
|
||||
const AtomTable = std.AutoHashMap(AtomIndex, void);
|
||||
|
||||
var alive = std.AutoHashMap(*Atom, void).init(arena);
|
||||
try mark(roots, &alive, macho_file);
|
||||
pub fn gcAtoms(zld: *Zld, reverse_lookups: [][]u32) !void {
|
||||
const gpa = zld.gpa;
|
||||
|
||||
try prune(arena, alive, macho_file);
|
||||
var arena = std.heap.ArenaAllocator.init(gpa);
|
||||
defer arena.deinit();
|
||||
|
||||
var roots = AtomTable.init(arena.allocator());
|
||||
try roots.ensureUnusedCapacity(@intCast(u32, zld.globals.items.len));
|
||||
|
||||
var alive = AtomTable.init(arena.allocator());
|
||||
try alive.ensureTotalCapacity(@intCast(u32, zld.atoms.items.len));
|
||||
|
||||
try collectRoots(zld, &roots);
|
||||
try mark(zld, roots, &alive, reverse_lookups);
|
||||
try prune(zld, alive);
|
||||
}
|
||||
|
||||
fn removeAtomFromSection(atom: *Atom, match: u8, macho_file: *MachO) void {
|
||||
var section = macho_file.sections.get(match);
|
||||
fn collectRoots(zld: *Zld, roots: *AtomTable) !void {
|
||||
log.debug("collecting roots", .{});
|
||||
|
||||
// If we want to enable GC for incremental codepath, we need to take into
|
||||
// account any padding that might have been left here.
|
||||
section.header.size -= atom.size;
|
||||
|
||||
if (atom.prev) |prev| {
|
||||
prev.next = atom.next;
|
||||
}
|
||||
if (atom.next) |next| {
|
||||
next.prev = atom.prev;
|
||||
} else {
|
||||
if (atom.prev) |prev| {
|
||||
section.last_atom = prev;
|
||||
} else {
|
||||
// The section will be GCed in the next step.
|
||||
section.last_atom = null;
|
||||
section.header.size = 0;
|
||||
}
|
||||
}
|
||||
|
||||
macho_file.sections.set(match, section);
|
||||
}
|
||||
|
||||
fn collectRoots(roots: *std.AutoHashMap(*Atom, void), macho_file: *MachO) !void {
|
||||
const output_mode = macho_file.base.options.output_mode;
|
||||
|
||||
switch (output_mode) {
|
||||
switch (zld.options.output_mode) {
|
||||
.Exe => {
|
||||
// Add entrypoint as GC root
|
||||
const global = try macho_file.getEntryPoint();
|
||||
const atom = macho_file.getAtomForSymbol(global).?; // panic here means fatal error
|
||||
_ = try roots.getOrPut(atom);
|
||||
const global: SymbolWithLoc = zld.getEntryPoint();
|
||||
const object = zld.objects.items[global.getFile().?];
|
||||
const atom_index = object.getAtomIndexForSymbol(global.sym_index).?; // panic here means fatal error
|
||||
_ = try roots.getOrPut(atom_index);
|
||||
|
||||
log.debug("root(ATOM({d}, %{d}, {d}))", .{
|
||||
atom_index,
|
||||
zld.getAtom(atom_index).sym_index,
|
||||
zld.getAtom(atom_index).file,
|
||||
});
|
||||
},
|
||||
else => |other| {
|
||||
assert(other == .Lib);
|
||||
// Add exports as GC roots
|
||||
for (macho_file.globals.items) |global| {
|
||||
const sym = macho_file.getSymbol(global);
|
||||
if (!sym.sect()) continue;
|
||||
const atom = macho_file.getAtomForSymbol(global) orelse {
|
||||
log.debug("skipping {s}", .{macho_file.getSymbolName(global)});
|
||||
continue;
|
||||
};
|
||||
_ = try roots.getOrPut(atom);
|
||||
log.debug("adding root", .{});
|
||||
macho_file.logAtom(atom);
|
||||
for (zld.globals.items) |global| {
|
||||
const sym = zld.getSymbol(global);
|
||||
if (sym.undf()) continue;
|
||||
|
||||
const object = zld.objects.items[global.getFile().?];
|
||||
const atom_index = object.getAtomIndexForSymbol(global.sym_index).?; // panic here means fatal error
|
||||
_ = try roots.getOrPut(atom_index);
|
||||
|
||||
log.debug("root(ATOM({d}, %{d}, {d}))", .{
|
||||
atom_index,
|
||||
zld.getAtom(atom_index).sym_index,
|
||||
zld.getAtom(atom_index).file,
|
||||
});
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// TODO just a temp until we learn how to parse unwind records
|
||||
if (macho_file.getGlobal("___gxx_personality_v0")) |global| {
|
||||
if (macho_file.getAtomForSymbol(global)) |atom| {
|
||||
_ = try roots.getOrPut(atom);
|
||||
log.debug("adding root", .{});
|
||||
macho_file.logAtom(atom);
|
||||
for (zld.globals.items) |global| {
|
||||
if (mem.eql(u8, "___gxx_personality_v0", zld.getSymbolName(global))) {
|
||||
const object = zld.objects.items[global.getFile().?];
|
||||
if (object.getAtomIndexForSymbol(global.sym_index)) |atom_index| {
|
||||
_ = try roots.getOrPut(atom_index);
|
||||
|
||||
log.debug("root(ATOM({d}, %{d}, {d}))", .{
|
||||
atom_index,
|
||||
zld.getAtom(atom_index).sym_index,
|
||||
zld.getAtom(atom_index).file,
|
||||
});
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
for (macho_file.objects.items) |object| {
|
||||
for (object.managed_atoms.items) |atom| {
|
||||
const source_sym = object.getSourceSymbol(atom.sym_index) orelse continue;
|
||||
if (source_sym.tentative()) continue;
|
||||
const source_sect = object.getSourceSection(source_sym.n_sect - 1);
|
||||
for (zld.objects.items) |object| {
|
||||
const has_subsections = object.header.flags & macho.MH_SUBSECTIONS_VIA_SYMBOLS != 0;
|
||||
|
||||
for (object.atoms.items) |atom_index| {
|
||||
const is_gc_root = blk: {
|
||||
// Modelled after ld64 which treats each object file compiled without MH_SUBSECTIONS_VIA_SYMBOLS
|
||||
// as a root.
|
||||
if (!has_subsections) break :blk true;
|
||||
|
||||
const atom = zld.getAtom(atom_index);
|
||||
const sect_id = if (object.getSourceSymbol(atom.sym_index)) |source_sym|
|
||||
source_sym.n_sect - 1
|
||||
else sect_id: {
|
||||
const nbase = @intCast(u32, object.in_symtab.?.len);
|
||||
const sect_id = @intCast(u16, atom.sym_index - nbase);
|
||||
break :sect_id sect_id;
|
||||
};
|
||||
const source_sect = object.getSourceSection(sect_id);
|
||||
if (source_sect.isDontDeadStrip()) break :blk true;
|
||||
if (mem.eql(u8, "__StaticInit", source_sect.sectName())) break :blk true;
|
||||
switch (source_sect.@"type"()) {
|
||||
macho.S_MOD_INIT_FUNC_POINTERS,
|
||||
macho.S_MOD_TERM_FUNC_POINTERS,
|
||||
@ -100,197 +114,229 @@ fn collectRoots(roots: *std.AutoHashMap(*Atom, void), macho_file: *MachO) !void
|
||||
else => break :blk false,
|
||||
}
|
||||
};
|
||||
|
||||
if (is_gc_root) {
|
||||
try roots.putNoClobber(atom, {});
|
||||
log.debug("adding root", .{});
|
||||
macho_file.logAtom(atom);
|
||||
try roots.putNoClobber(atom_index, {});
|
||||
|
||||
log.debug("root(ATOM({d}, %{d}, {d}))", .{
|
||||
atom_index,
|
||||
zld.getAtom(atom_index).sym_index,
|
||||
zld.getAtom(atom_index).file,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn markLive(atom: *Atom, alive: *std.AutoHashMap(*Atom, void), macho_file: *MachO) anyerror!void {
|
||||
const gop = try alive.getOrPut(atom);
|
||||
if (gop.found_existing) return;
|
||||
fn markLive(
|
||||
zld: *Zld,
|
||||
atom_index: AtomIndex,
|
||||
alive: *AtomTable,
|
||||
reverse_lookups: [][]u32,
|
||||
) anyerror!void {
|
||||
if (alive.contains(atom_index)) return;
|
||||
|
||||
log.debug("marking live", .{});
|
||||
macho_file.logAtom(atom);
|
||||
const atom = zld.getAtom(atom_index);
|
||||
const sym_loc = atom.getSymbolWithLoc();
|
||||
|
||||
for (atom.relocs.items) |rel| {
|
||||
const target_atom = rel.getTargetAtom(macho_file) orelse continue;
|
||||
try markLive(target_atom, alive, macho_file);
|
||||
log.debug("mark(ATOM({d}, %{d}, {d}))", .{ atom_index, sym_loc.sym_index, sym_loc.file });
|
||||
|
||||
alive.putAssumeCapacityNoClobber(atom_index, {});
|
||||
|
||||
const cpu_arch = zld.options.target.cpu.arch;
|
||||
|
||||
const sym = zld.getSymbol(atom.getSymbolWithLoc());
|
||||
const header = zld.sections.items(.header)[sym.n_sect - 1];
|
||||
if (header.isZerofill()) return;
|
||||
|
||||
const relocs = Atom.getAtomRelocs(zld, atom_index);
|
||||
const reverse_lookup = reverse_lookups[atom.getFile().?];
|
||||
for (relocs) |rel| {
|
||||
const target = switch (cpu_arch) {
|
||||
.aarch64 => switch (@intToEnum(macho.reloc_type_arm64, rel.r_type)) {
|
||||
.ARM64_RELOC_ADDEND => continue,
|
||||
else => Atom.parseRelocTarget(zld, atom_index, rel, reverse_lookup),
|
||||
},
|
||||
.x86_64 => Atom.parseRelocTarget(zld, atom_index, rel, reverse_lookup),
|
||||
else => unreachable,
|
||||
};
|
||||
const target_sym = zld.getSymbol(target);
|
||||
|
||||
if (rel.r_extern == 0) {
|
||||
// We are pessimistic and mark all atoms within the target section as live.
|
||||
// TODO: this can be improved by marking only the relevant atoms.
|
||||
const sect_id = target_sym.n_sect;
|
||||
const object = zld.objects.items[target.getFile().?];
|
||||
for (object.atoms.items) |other_atom_index| {
|
||||
const other_atom = zld.getAtom(other_atom_index);
|
||||
const other_sym = zld.getSymbol(other_atom.getSymbolWithLoc());
|
||||
if (other_sym.n_sect == sect_id) {
|
||||
try markLive(zld, other_atom_index, alive, reverse_lookups);
|
||||
}
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if (target_sym.undf()) continue;
|
||||
if (target.getFile() == null) {
|
||||
const target_sym_name = zld.getSymbolName(target);
|
||||
if (mem.eql(u8, "__mh_execute_header", target_sym_name)) continue;
|
||||
if (mem.eql(u8, "___dso_handle", target_sym_name)) continue;
|
||||
|
||||
unreachable; // referenced symbol not found
|
||||
}
|
||||
|
||||
const object = zld.objects.items[target.getFile().?];
|
||||
const target_atom_index = object.getAtomIndexForSymbol(target.sym_index).?;
|
||||
log.debug(" following ATOM({d}, %{d}, {d})", .{
|
||||
target_atom_index,
|
||||
zld.getAtom(target_atom_index).sym_index,
|
||||
zld.getAtom(target_atom_index).file,
|
||||
});
|
||||
|
||||
try markLive(zld, target_atom_index, alive, reverse_lookups);
|
||||
}
|
||||
}
|
||||
|
||||
fn refersLive(atom: *Atom, alive: std.AutoHashMap(*Atom, void), macho_file: *MachO) bool {
|
||||
for (atom.relocs.items) |rel| {
|
||||
const target_atom = rel.getTargetAtom(macho_file) orelse continue;
|
||||
if (alive.contains(target_atom)) return true;
|
||||
fn refersLive(zld: *Zld, atom_index: AtomIndex, alive: AtomTable, reverse_lookups: [][]u32) !bool {
|
||||
const atom = zld.getAtom(atom_index);
|
||||
const sym_loc = atom.getSymbolWithLoc();
|
||||
|
||||
log.debug("refersLive(ATOM({d}, %{d}, {d}))", .{ atom_index, sym_loc.sym_index, sym_loc.file });
|
||||
|
||||
const cpu_arch = zld.options.target.cpu.arch;
|
||||
|
||||
const sym = zld.getSymbol(sym_loc);
|
||||
const header = zld.sections.items(.header)[sym.n_sect - 1];
|
||||
assert(!header.isZerofill());
|
||||
|
||||
const relocs = Atom.getAtomRelocs(zld, atom_index);
|
||||
const reverse_lookup = reverse_lookups[atom.getFile().?];
|
||||
for (relocs) |rel| {
|
||||
const target = switch (cpu_arch) {
|
||||
.aarch64 => switch (@intToEnum(macho.reloc_type_arm64, rel.r_type)) {
|
||||
.ARM64_RELOC_ADDEND => continue,
|
||||
else => Atom.parseRelocTarget(zld, atom_index, rel, reverse_lookup),
|
||||
},
|
||||
.x86_64 => Atom.parseRelocTarget(zld, atom_index, rel, reverse_lookup),
|
||||
else => unreachable,
|
||||
};
|
||||
|
||||
const object = zld.objects.items[target.getFile().?];
|
||||
const target_atom_index = object.getAtomIndexForSymbol(target.sym_index) orelse {
|
||||
log.debug("atom for symbol '{s}' not found; skipping...", .{zld.getSymbolName(target)});
|
||||
continue;
|
||||
};
|
||||
if (alive.contains(target_atom_index)) {
|
||||
log.debug(" refers live ATOM({d}, %{d}, {d})", .{
|
||||
target_atom_index,
|
||||
zld.getAtom(target_atom_index).sym_index,
|
||||
zld.getAtom(target_atom_index).file,
|
||||
});
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
fn refersDead(atom: *Atom, macho_file: *MachO) bool {
|
||||
for (atom.relocs.items) |rel| {
|
||||
const target_atom = rel.getTargetAtom(macho_file) orelse continue;
|
||||
const target_sym = target_atom.getSymbol(macho_file);
|
||||
if (target_sym.n_desc == MachO.N_DESC_GCED) return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
fn mark(
|
||||
roots: std.AutoHashMap(*Atom, void),
|
||||
alive: *std.AutoHashMap(*Atom, void),
|
||||
macho_file: *MachO,
|
||||
) !void {
|
||||
try alive.ensureUnusedCapacity(roots.count());
|
||||
|
||||
fn mark(zld: *Zld, roots: AtomTable, alive: *AtomTable, reverse_lookups: [][]u32) !void {
|
||||
var it = roots.keyIterator();
|
||||
while (it.next()) |root| {
|
||||
try markLive(root.*, alive, macho_file);
|
||||
try markLive(zld, root.*, alive, reverse_lookups);
|
||||
}
|
||||
|
||||
var loop: bool = true;
|
||||
while (loop) {
|
||||
loop = false;
|
||||
|
||||
for (macho_file.objects.items) |object| {
|
||||
for (object.managed_atoms.items) |atom| {
|
||||
if (alive.contains(atom)) continue;
|
||||
const source_sym = object.getSourceSymbol(atom.sym_index) orelse continue;
|
||||
if (source_sym.tentative()) continue;
|
||||
const source_sect = object.getSourceSection(source_sym.n_sect - 1);
|
||||
if (source_sect.isDontDeadStripIfReferencesLive() and refersLive(atom, alive.*, macho_file)) {
|
||||
try markLive(atom, alive, macho_file);
|
||||
loop = true;
|
||||
for (zld.objects.items) |object| {
|
||||
for (object.atoms.items) |atom_index| {
|
||||
if (alive.contains(atom_index)) continue;
|
||||
|
||||
const atom = zld.getAtom(atom_index);
|
||||
const sect_id = if (object.getSourceSymbol(atom.sym_index)) |source_sym|
|
||||
source_sym.n_sect - 1
|
||||
else blk: {
|
||||
const nbase = @intCast(u32, object.in_symtab.?.len);
|
||||
const sect_id = @intCast(u16, atom.sym_index - nbase);
|
||||
break :blk sect_id;
|
||||
};
|
||||
const source_sect = object.getSourceSection(sect_id);
|
||||
|
||||
if (source_sect.isDontDeadStripIfReferencesLive()) {
|
||||
if (try refersLive(zld, atom_index, alive.*, reverse_lookups)) {
|
||||
try markLive(zld, atom_index, alive, reverse_lookups);
|
||||
loop = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn prune(arena: Allocator, alive: std.AutoHashMap(*Atom, void), macho_file: *MachO) !void {
|
||||
// Any section that ends up here will be updated, that is,
|
||||
// its size and alignment recalculated.
|
||||
var gc_sections = std.AutoHashMap(u8, void).init(arena);
|
||||
var loop: bool = true;
|
||||
while (loop) {
|
||||
loop = false;
|
||||
fn prune(zld: *Zld, alive: AtomTable) !void {
|
||||
log.debug("pruning dead atoms", .{});
|
||||
for (zld.objects.items) |*object| {
|
||||
var i: usize = 0;
|
||||
while (i < object.atoms.items.len) {
|
||||
const atom_index = object.atoms.items[i];
|
||||
if (alive.contains(atom_index)) {
|
||||
i += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
for (macho_file.objects.items) |object| {
|
||||
const in_symtab = object.in_symtab orelse continue;
|
||||
const atom = zld.getAtom(atom_index);
|
||||
const sym_loc = atom.getSymbolWithLoc();
|
||||
|
||||
for (in_symtab) |_, source_index| {
|
||||
const atom = object.getAtomForSymbol(@intCast(u32, source_index)) orelse continue;
|
||||
if (alive.contains(atom)) continue;
|
||||
log.debug("prune(ATOM({d}, %{d}, {d}))", .{
|
||||
atom_index,
|
||||
sym_loc.sym_index,
|
||||
sym_loc.file,
|
||||
});
|
||||
log.debug(" {s} in {s}", .{ zld.getSymbolName(sym_loc), object.name });
|
||||
|
||||
const global = atom.getSymbolWithLoc();
|
||||
const sym = atom.getSymbolPtr(macho_file);
|
||||
const match = sym.n_sect - 1;
|
||||
const sym = zld.getSymbolPtr(sym_loc);
|
||||
const sect_id = sym.n_sect - 1;
|
||||
var section = zld.sections.get(sect_id);
|
||||
section.header.size -= atom.size;
|
||||
|
||||
if (sym.n_desc == MachO.N_DESC_GCED) continue;
|
||||
if (!sym.ext() and !refersDead(atom, macho_file)) continue;
|
||||
|
||||
macho_file.logAtom(atom);
|
||||
sym.n_desc = MachO.N_DESC_GCED;
|
||||
removeAtomFromSection(atom, match, macho_file);
|
||||
_ = try gc_sections.put(match, {});
|
||||
|
||||
for (atom.contained.items) |sym_off| {
|
||||
const inner = macho_file.getSymbolPtr(.{
|
||||
.sym_index = sym_off.sym_index,
|
||||
.file = atom.file,
|
||||
});
|
||||
inner.n_desc = MachO.N_DESC_GCED;
|
||||
if (atom.prev_index) |prev_index| {
|
||||
const prev = zld.getAtomPtr(prev_index);
|
||||
prev.next_index = atom.next_index;
|
||||
} else {
|
||||
if (atom.next_index) |next_index| {
|
||||
section.first_atom_index = next_index;
|
||||
}
|
||||
|
||||
if (macho_file.got_entries_table.contains(global)) {
|
||||
const got_atom = macho_file.getGotAtomForSymbol(global).?;
|
||||
const got_sym = got_atom.getSymbolPtr(macho_file);
|
||||
got_sym.n_desc = MachO.N_DESC_GCED;
|
||||
}
|
||||
if (atom.next_index) |next_index| {
|
||||
const next = zld.getAtomPtr(next_index);
|
||||
next.prev_index = atom.prev_index;
|
||||
} else {
|
||||
if (atom.prev_index) |prev_index| {
|
||||
section.last_atom_index = prev_index;
|
||||
} else {
|
||||
assert(section.header.size == 0);
|
||||
section.first_atom_index = undefined;
|
||||
section.last_atom_index = undefined;
|
||||
}
|
||||
}
|
||||
|
||||
if (macho_file.stubs_table.contains(global)) {
|
||||
const stubs_atom = macho_file.getStubsAtomForSymbol(global).?;
|
||||
const stubs_sym = stubs_atom.getSymbolPtr(macho_file);
|
||||
stubs_sym.n_desc = MachO.N_DESC_GCED;
|
||||
}
|
||||
zld.sections.set(sect_id, section);
|
||||
_ = object.atoms.swapRemove(i);
|
||||
|
||||
if (macho_file.tlv_ptr_entries_table.contains(global)) {
|
||||
const tlv_ptr_atom = macho_file.getTlvPtrAtomForSymbol(global).?;
|
||||
const tlv_ptr_sym = tlv_ptr_atom.getSymbolPtr(macho_file);
|
||||
tlv_ptr_sym.n_desc = MachO.N_DESC_GCED;
|
||||
}
|
||||
sym.n_desc = N_DEAD;
|
||||
|
||||
loop = true;
|
||||
var inner_sym_it = Atom.getInnerSymbolsIterator(zld, atom_index);
|
||||
while (inner_sym_it.next()) |inner| {
|
||||
const inner_sym = zld.getSymbolPtr(inner);
|
||||
inner_sym.n_desc = N_DEAD;
|
||||
}
|
||||
|
||||
if (Atom.getSectionAlias(zld, atom_index)) |alias| {
|
||||
const alias_sym = zld.getSymbolPtr(alias);
|
||||
alias_sym.n_desc = N_DEAD;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (macho_file.got_entries.items) |entry| {
|
||||
const sym = entry.getSymbol(macho_file);
|
||||
if (sym.n_desc != MachO.N_DESC_GCED) continue;
|
||||
|
||||
// TODO tombstone
|
||||
const atom = entry.getAtom(macho_file).?;
|
||||
const match = sym.n_sect - 1;
|
||||
removeAtomFromSection(atom, match, macho_file);
|
||||
_ = try gc_sections.put(match, {});
|
||||
_ = macho_file.got_entries_table.remove(entry.target);
|
||||
}
|
||||
|
||||
for (macho_file.stubs.items) |entry| {
|
||||
const sym = entry.getSymbol(macho_file);
|
||||
if (sym.n_desc != MachO.N_DESC_GCED) continue;
|
||||
|
||||
// TODO tombstone
|
||||
const atom = entry.getAtom(macho_file).?;
|
||||
const match = sym.n_sect - 1;
|
||||
removeAtomFromSection(atom, match, macho_file);
|
||||
_ = try gc_sections.put(match, {});
|
||||
_ = macho_file.stubs_table.remove(entry.target);
|
||||
}
|
||||
|
||||
for (macho_file.tlv_ptr_entries.items) |entry| {
|
||||
const sym = entry.getSymbol(macho_file);
|
||||
if (sym.n_desc != MachO.N_DESC_GCED) continue;
|
||||
|
||||
// TODO tombstone
|
||||
const atom = entry.getAtom(macho_file).?;
|
||||
const match = sym.n_sect - 1;
|
||||
removeAtomFromSection(atom, match, macho_file);
|
||||
_ = try gc_sections.put(match, {});
|
||||
_ = macho_file.tlv_ptr_entries_table.remove(entry.target);
|
||||
}
|
||||
|
||||
var gc_sections_it = gc_sections.iterator();
|
||||
while (gc_sections_it.next()) |entry| {
|
||||
const match = entry.key_ptr.*;
|
||||
var section = macho_file.sections.get(match);
|
||||
if (section.header.size == 0) continue; // Pruning happens automatically in next step.
|
||||
|
||||
section.header.@"align" = 0;
|
||||
section.header.size = 0;
|
||||
|
||||
var atom = section.last_atom.?;
|
||||
|
||||
while (atom.prev) |prev| {
|
||||
atom = prev;
|
||||
}
|
||||
|
||||
while (true) {
|
||||
const atom_alignment = try math.powi(u32, 2, atom.alignment);
|
||||
const aligned_end_addr = mem.alignForwardGeneric(u64, section.header.size, atom_alignment);
|
||||
const padding = aligned_end_addr - section.header.size;
|
||||
section.header.size += padding + atom.size;
|
||||
section.header.@"align" = @max(section.header.@"align", atom.alignment);
|
||||
|
||||
if (atom.next) |next| {
|
||||
atom = next;
|
||||
} else break;
|
||||
}
|
||||
|
||||
macho_file.sections.set(match, section);
|
||||
}
|
||||
}
|
||||
|
||||
364
src/link/MachO/thunks.zig
Normal file
364
src/link/MachO/thunks.zig
Normal file
@ -0,0 +1,364 @@
|
||||
//! An algorithm for allocating output machine code section (aka `__TEXT,__text`),
|
||||
//! and insertion of range extending thunks. As such, this algorithm is only run
|
||||
//! for a target that requires range extenders such as arm64.
|
||||
//!
|
||||
//! The algorithm works pessimistically and assumes that any reference to an Atom in
|
||||
//! another output section is out of range.
|
||||
|
||||
const std = @import("std");
|
||||
const assert = std.debug.assert;
|
||||
const log = std.log.scoped(.thunks);
|
||||
const macho = std.macho;
|
||||
const math = std.math;
|
||||
const mem = std.mem;
|
||||
|
||||
const aarch64 = @import("../../arch/aarch64/bits.zig");
|
||||
|
||||
const Allocator = mem.Allocator;
|
||||
const Atom = @import("ZldAtom.zig");
|
||||
const AtomIndex = @import("zld.zig").AtomIndex;
|
||||
const SymbolWithLoc = @import("zld.zig").SymbolWithLoc;
|
||||
const Zld = @import("zld.zig").Zld;
|
||||
|
||||
pub const ThunkIndex = u32;
|
||||
|
||||
/// Branch instruction has 26 bits immediate but 4 byte aligned.
|
||||
const jump_bits = @bitSizeOf(i28);
|
||||
|
||||
const max_distance = (1 << (jump_bits - 1));
|
||||
|
||||
/// A branch will need an extender if its target is larger than
|
||||
/// `2^(jump_bits - 1) - margin` where margin is some arbitrary number.
|
||||
/// mold uses 5MiB margin, while ld64 uses 4MiB margin. We will follow mold
|
||||
/// and assume margin to be 5MiB.
|
||||
const max_allowed_distance = max_distance - 0x500_000;
|
||||
|
||||
pub const Thunk = struct {
|
||||
start_index: AtomIndex,
|
||||
len: u32,
|
||||
|
||||
lookup: std.AutoArrayHashMapUnmanaged(SymbolWithLoc, AtomIndex) = .{},
|
||||
|
||||
pub fn deinit(self: *Thunk, gpa: Allocator) void {
|
||||
self.lookup.deinit(gpa);
|
||||
}
|
||||
|
||||
pub fn getStartAtomIndex(self: Thunk) AtomIndex {
|
||||
assert(self.len != 0);
|
||||
return self.start_index;
|
||||
}
|
||||
|
||||
pub fn getEndAtomIndex(self: Thunk) AtomIndex {
|
||||
assert(self.len != 0);
|
||||
return self.start_index + self.len - 1;
|
||||
}
|
||||
|
||||
pub fn getSize(self: Thunk) u64 {
|
||||
return 12 * self.len;
|
||||
}
|
||||
|
||||
pub fn getAlignment() u32 {
|
||||
return @alignOf(u32);
|
||||
}
|
||||
|
||||
pub fn getTrampolineForSymbol(self: Thunk, zld: *Zld, target: SymbolWithLoc) ?SymbolWithLoc {
|
||||
const atom_index = self.lookup.get(target) orelse return null;
|
||||
const atom = zld.getAtom(atom_index);
|
||||
return atom.getSymbolWithLoc();
|
||||
}
|
||||
};
|
||||
|
||||
pub fn createThunks(zld: *Zld, sect_id: u8, reverse_lookups: [][]u32) !void {
|
||||
const header = &zld.sections.items(.header)[sect_id];
|
||||
if (header.size == 0) return;
|
||||
|
||||
const gpa = zld.gpa;
|
||||
const first_atom_index = zld.sections.items(.first_atom_index)[sect_id];
|
||||
|
||||
header.size = 0;
|
||||
header.@"align" = 0;
|
||||
|
||||
var atom_count: u32 = 0;
|
||||
|
||||
{
|
||||
var atom_index = first_atom_index;
|
||||
while (true) {
|
||||
const atom = zld.getAtom(atom_index);
|
||||
const sym = zld.getSymbolPtr(atom.getSymbolWithLoc());
|
||||
sym.n_value = 0;
|
||||
atom_count += 1;
|
||||
|
||||
if (atom.next_index) |next_index| {
|
||||
atom_index = next_index;
|
||||
} else break;
|
||||
}
|
||||
}
|
||||
|
||||
var allocated = std.AutoHashMap(AtomIndex, void).init(gpa);
|
||||
defer allocated.deinit();
|
||||
try allocated.ensureTotalCapacity(atom_count);
|
||||
|
||||
var group_start = first_atom_index;
|
||||
var group_end = first_atom_index;
|
||||
var offset: u64 = 0;
|
||||
|
||||
while (true) {
|
||||
const group_start_atom = zld.getAtom(group_start);
|
||||
log.debug("GROUP START at {d}", .{group_start});
|
||||
|
||||
while (true) {
|
||||
const atom = zld.getAtom(group_end);
|
||||
offset = mem.alignForwardGeneric(u64, offset, try math.powi(u32, 2, atom.alignment));
|
||||
|
||||
const sym = zld.getSymbolPtr(atom.getSymbolWithLoc());
|
||||
sym.n_value = offset;
|
||||
offset += atom.size;
|
||||
|
||||
zld.logAtom(group_end, log);
|
||||
|
||||
header.@"align" = @max(header.@"align", atom.alignment);
|
||||
|
||||
allocated.putAssumeCapacityNoClobber(group_end, {});
|
||||
|
||||
const group_start_sym = zld.getSymbol(group_start_atom.getSymbolWithLoc());
|
||||
if (offset - group_start_sym.n_value >= max_allowed_distance) break;
|
||||
|
||||
if (atom.next_index) |next_index| {
|
||||
group_end = next_index;
|
||||
} else break;
|
||||
}
|
||||
log.debug("GROUP END at {d}", .{group_end});
|
||||
|
||||
// Insert thunk at group_end
|
||||
const thunk_index = @intCast(u32, zld.thunks.items.len);
|
||||
try zld.thunks.append(gpa, .{ .start_index = undefined, .len = 0 });
|
||||
|
||||
// Scan relocs in the group and create trampolines for any unreachable callsite.
|
||||
var atom_index = group_start;
|
||||
while (true) {
|
||||
const atom = zld.getAtom(atom_index);
|
||||
try scanRelocs(
|
||||
zld,
|
||||
atom_index,
|
||||
reverse_lookups[atom.getFile().?],
|
||||
allocated,
|
||||
thunk_index,
|
||||
group_end,
|
||||
);
|
||||
|
||||
if (atom_index == group_end) break;
|
||||
|
||||
if (atom.next_index) |next_index| {
|
||||
atom_index = next_index;
|
||||
} else break;
|
||||
}
|
||||
|
||||
offset = mem.alignForwardGeneric(u64, offset, Thunk.getAlignment());
|
||||
allocateThunk(zld, thunk_index, offset, header);
|
||||
offset += zld.thunks.items[thunk_index].getSize();
|
||||
|
||||
const thunk = zld.thunks.items[thunk_index];
|
||||
if (thunk.len == 0) {
|
||||
const group_end_atom = zld.getAtom(group_end);
|
||||
if (group_end_atom.next_index) |next_index| {
|
||||
group_start = next_index;
|
||||
group_end = next_index;
|
||||
} else break;
|
||||
} else {
|
||||
const thunk_end_atom_index = thunk.getEndAtomIndex();
|
||||
const thunk_end_atom = zld.getAtom(thunk_end_atom_index);
|
||||
if (thunk_end_atom.next_index) |next_index| {
|
||||
group_start = next_index;
|
||||
group_end = next_index;
|
||||
} else break;
|
||||
}
|
||||
}
|
||||
|
||||
header.size = @intCast(u32, offset);
|
||||
}
|
||||
|
||||
fn allocateThunk(
|
||||
zld: *Zld,
|
||||
thunk_index: ThunkIndex,
|
||||
base_offset: u64,
|
||||
header: *macho.section_64,
|
||||
) void {
|
||||
const thunk = zld.thunks.items[thunk_index];
|
||||
if (thunk.len == 0) return;
|
||||
|
||||
const first_atom_index = thunk.getStartAtomIndex();
|
||||
const end_atom_index = thunk.getEndAtomIndex();
|
||||
|
||||
var atom_index = first_atom_index;
|
||||
var offset = base_offset;
|
||||
while (true) {
|
||||
const atom = zld.getAtom(atom_index);
|
||||
offset = mem.alignForwardGeneric(u64, offset, Thunk.getAlignment());
|
||||
|
||||
const sym = zld.getSymbolPtr(atom.getSymbolWithLoc());
|
||||
sym.n_value = offset;
|
||||
offset += atom.size;
|
||||
|
||||
zld.logAtom(atom_index, log);
|
||||
|
||||
header.@"align" = @max(header.@"align", atom.alignment);
|
||||
|
||||
if (end_atom_index == atom_index) break;
|
||||
|
||||
if (atom.next_index) |next_index| {
|
||||
atom_index = next_index;
|
||||
} else break;
|
||||
}
|
||||
}
|
||||
|
||||
fn scanRelocs(
|
||||
zld: *Zld,
|
||||
atom_index: AtomIndex,
|
||||
reverse_lookup: []u32,
|
||||
allocated: std.AutoHashMap(AtomIndex, void),
|
||||
thunk_index: ThunkIndex,
|
||||
group_end: AtomIndex,
|
||||
) !void {
|
||||
const atom = zld.getAtom(atom_index);
|
||||
const object = zld.objects.items[atom.getFile().?];
|
||||
|
||||
const base_offset = if (object.getSourceSymbol(atom.sym_index)) |source_sym| blk: {
|
||||
const source_sect = object.getSourceSection(source_sym.n_sect - 1);
|
||||
break :blk @intCast(i32, source_sym.n_value - source_sect.addr);
|
||||
} else 0;
|
||||
|
||||
const relocs = Atom.getAtomRelocs(zld, atom_index);
|
||||
for (relocs) |rel| {
|
||||
if (!relocNeedsThunk(rel)) continue;
|
||||
|
||||
const target = Atom.parseRelocTarget(zld, atom_index, rel, reverse_lookup);
|
||||
if (isReachable(zld, atom_index, rel, base_offset, target, allocated)) continue;
|
||||
|
||||
log.debug("{x}: source = {s}@{x}, target = {s}@{x} unreachable", .{
|
||||
rel.r_address - base_offset,
|
||||
zld.getSymbolName(atom.getSymbolWithLoc()),
|
||||
zld.getSymbol(atom.getSymbolWithLoc()).n_value,
|
||||
zld.getSymbolName(target),
|
||||
zld.getSymbol(target).n_value,
|
||||
});
|
||||
|
||||
const gpa = zld.gpa;
|
||||
const target_sym = zld.getSymbol(target);
|
||||
|
||||
const actual_target: SymbolWithLoc = if (target_sym.undf()) blk: {
|
||||
const stub_atom_index = zld.getStubsAtomIndexForSymbol(target).?;
|
||||
break :blk .{ .sym_index = zld.getAtom(stub_atom_index).sym_index };
|
||||
} else target;
|
||||
|
||||
const thunk = &zld.thunks.items[thunk_index];
|
||||
const gop = try thunk.lookup.getOrPut(gpa, actual_target);
|
||||
if (!gop.found_existing) {
|
||||
const thunk_atom_index = try createThunkAtom(zld);
|
||||
gop.value_ptr.* = thunk_atom_index;
|
||||
|
||||
const thunk_atom = zld.getAtomPtr(thunk_atom_index);
|
||||
const end_atom_index = if (thunk.len == 0) group_end else thunk.getEndAtomIndex();
|
||||
const end_atom = zld.getAtomPtr(end_atom_index);
|
||||
|
||||
if (end_atom.next_index) |first_after_index| {
|
||||
const first_after_atom = zld.getAtomPtr(first_after_index);
|
||||
first_after_atom.prev_index = thunk_atom_index;
|
||||
thunk_atom.next_index = first_after_index;
|
||||
}
|
||||
|
||||
end_atom.next_index = thunk_atom_index;
|
||||
thunk_atom.prev_index = end_atom_index;
|
||||
|
||||
if (thunk.len == 0) {
|
||||
thunk.start_index = thunk_atom_index;
|
||||
}
|
||||
|
||||
thunk.len += 1;
|
||||
}
|
||||
|
||||
try zld.thunk_table.put(gpa, atom_index, thunk_index);
|
||||
}
|
||||
}
|
||||
|
||||
inline fn relocNeedsThunk(rel: macho.relocation_info) bool {
|
||||
const rel_type = @intToEnum(macho.reloc_type_arm64, rel.r_type);
|
||||
return rel_type == .ARM64_RELOC_BRANCH26;
|
||||
}
|
||||
|
||||
fn isReachable(
|
||||
zld: *Zld,
|
||||
atom_index: AtomIndex,
|
||||
rel: macho.relocation_info,
|
||||
base_offset: i32,
|
||||
target: SymbolWithLoc,
|
||||
allocated: std.AutoHashMap(AtomIndex, void),
|
||||
) bool {
|
||||
if (zld.getStubsAtomIndexForSymbol(target)) |_| return false;
|
||||
|
||||
const source_atom = zld.getAtom(atom_index);
|
||||
const source_sym = zld.getSymbol(source_atom.getSymbolWithLoc());
|
||||
|
||||
const target_object = zld.objects.items[target.getFile().?];
|
||||
const target_atom_index = target_object.getAtomIndexForSymbol(target.sym_index).?;
|
||||
const target_atom = zld.getAtom(target_atom_index);
|
||||
const target_sym = zld.getSymbol(target_atom.getSymbolWithLoc());
|
||||
|
||||
if (source_sym.n_sect != target_sym.n_sect) return false;
|
||||
|
||||
if (!allocated.contains(target_atom_index)) return false;
|
||||
|
||||
const source_addr = source_sym.n_value + @intCast(u32, rel.r_address - base_offset);
|
||||
const target_addr = Atom.getRelocTargetAddress(zld, rel, target, false) catch unreachable;
|
||||
_ = Atom.calcPcRelativeDisplacementArm64(source_addr, target_addr) catch
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
fn createThunkAtom(zld: *Zld) !AtomIndex {
|
||||
const sym_index = try zld.allocateSymbol();
|
||||
const atom_index = try zld.createEmptyAtom(sym_index, @sizeOf(u32) * 3, 2);
|
||||
const sym = zld.getSymbolPtr(.{ .sym_index = sym_index });
|
||||
sym.n_type = macho.N_SECT;
|
||||
|
||||
const sect_id = zld.getSectionByName("__TEXT", "__text") orelse unreachable;
|
||||
sym.n_sect = sect_id + 1;
|
||||
|
||||
return atom_index;
|
||||
}
|
||||
|
||||
fn getThunkIndex(zld: *Zld, atom_index: AtomIndex) ?ThunkIndex {
|
||||
const atom = zld.getAtom(atom_index);
|
||||
const sym = zld.getSymbol(atom.getSymbolWithLoc());
|
||||
for (zld.thunks.items) |thunk, i| {
|
||||
if (thunk.len == 0) continue;
|
||||
|
||||
const thunk_atom_index = thunk.getStartAtomIndex();
|
||||
const thunk_atom = zld.getAtom(thunk_atom_index);
|
||||
const thunk_sym = zld.getSymbol(thunk_atom.getSymbolWithLoc());
|
||||
const start_addr = thunk_sym.n_value;
|
||||
const end_addr = start_addr + thunk.getSize();
|
||||
|
||||
if (start_addr <= sym.n_value and sym.n_value < end_addr) {
|
||||
return @intCast(u32, i);
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
pub fn writeThunkCode(zld: *Zld, atom_index: AtomIndex, writer: anytype) !void {
|
||||
const atom = zld.getAtom(atom_index);
|
||||
const sym = zld.getSymbol(atom.getSymbolWithLoc());
|
||||
const source_addr = sym.n_value;
|
||||
const thunk = zld.thunks.items[getThunkIndex(zld, atom_index).?];
|
||||
const target_addr = for (thunk.lookup.keys()) |target| {
|
||||
const target_atom_index = thunk.lookup.get(target).?;
|
||||
if (atom_index == target_atom_index) break zld.getSymbol(target).n_value;
|
||||
} else unreachable;
|
||||
|
||||
const pages = Atom.calcNumberOfPages(source_addr, target_addr);
|
||||
try writer.writeIntLittle(u32, aarch64.Instruction.adrp(.x16, pages).toU32());
|
||||
const off = try Atom.calcPageOffset(target_addr, .arithmetic);
|
||||
try writer.writeIntLittle(u32, aarch64.Instruction.add(.x16, .x16, off, false).toU32());
|
||||
try writer.writeIntLittle(u32, aarch64.Instruction.br(.x16).toU32());
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
Loading…
x
Reference in New Issue
Block a user