mirror of
https://github.com/ziglang/zig.git
synced 2025-12-26 16:13:07 +00:00
macho: move to incremental writes and global relocs for incremental
This commit is contained in:
parent
34f9360ea2
commit
53bd7bd044
@ -680,16 +680,15 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) !void {
|
||||
break :blk offset;
|
||||
};
|
||||
// Add relocation to the decl.
|
||||
const atom = macho_file.atom_by_index_table.get(relocation.atom_index).?;
|
||||
const atom = macho_file.getAtomForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
|
||||
const target = macho_file.getGlobalByIndex(relocation.sym_index);
|
||||
try atom.relocs.append(emit.bin_file.allocator, .{
|
||||
.offset = offset,
|
||||
try atom.addRelocation(macho_file, .{
|
||||
.@"type" = @enumToInt(std.macho.reloc_type_arm64.ARM64_RELOC_BRANCH26),
|
||||
.target = target,
|
||||
.offset = offset,
|
||||
.addend = 0,
|
||||
.subtractor = null,
|
||||
.pcrel = true,
|
||||
.length = 2,
|
||||
.@"type" = @enumToInt(std.macho.reloc_type_arm64.ARM64_RELOC_BRANCH26),
|
||||
});
|
||||
} else {
|
||||
return emit.fail("Implement call_extern for linking backends != MachO", .{});
|
||||
@ -882,13 +881,13 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void {
|
||||
}
|
||||
|
||||
if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
|
||||
const atom = macho_file.atom_by_index_table.get(data.atom_index).?;
|
||||
// Page reloc for adrp instruction.
|
||||
try atom.relocs.append(emit.bin_file.allocator, .{
|
||||
.offset = offset,
|
||||
const atom = macho_file.getAtomForSymbol(.{ .sym_index = data.atom_index, .file = null }).?;
|
||||
// TODO this causes segfault in stage1
|
||||
// try atom.addRelocations(macho_file, 2, .{
|
||||
try atom.addRelocation(macho_file, .{
|
||||
.target = .{ .sym_index = data.sym_index, .file = null },
|
||||
.offset = offset,
|
||||
.addend = 0,
|
||||
.subtractor = null,
|
||||
.pcrel = true,
|
||||
.length = 2,
|
||||
.@"type" = switch (tag) {
|
||||
@ -901,12 +900,10 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void {
|
||||
else => unreachable,
|
||||
},
|
||||
});
|
||||
// Pageoff reloc for adrp instruction.
|
||||
try atom.relocs.append(emit.bin_file.allocator, .{
|
||||
.offset = offset + 4,
|
||||
try atom.addRelocation(macho_file, .{
|
||||
.target = .{ .sym_index = data.sym_index, .file = null },
|
||||
.offset = offset + 4,
|
||||
.addend = 0,
|
||||
.subtractor = null,
|
||||
.pcrel = false,
|
||||
.length = 2,
|
||||
.@"type" = switch (tag) {
|
||||
|
||||
@ -996,7 +996,6 @@ fn mirLeaPic(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
|
||||
);
|
||||
|
||||
const end_offset = emit.code.items.len;
|
||||
const gpa = emit.bin_file.allocator;
|
||||
|
||||
if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
|
||||
const reloc_type = switch (ops.flags) {
|
||||
@ -1004,19 +1003,17 @@ fn mirLeaPic(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
|
||||
0b01 => @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_SIGNED),
|
||||
else => unreachable,
|
||||
};
|
||||
const atom = macho_file.atom_by_index_table.get(relocation.atom_index).?;
|
||||
log.debug("adding reloc of type {} to local @{d}", .{ reloc_type, relocation.sym_index });
|
||||
try atom.relocs.append(gpa, .{
|
||||
.offset = @intCast(u32, end_offset - 4),
|
||||
const atom = macho_file.getAtomForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
|
||||
try atom.addRelocation(macho_file, .{
|
||||
.@"type" = reloc_type,
|
||||
.target = .{ .sym_index = relocation.sym_index, .file = null },
|
||||
.offset = @intCast(u32, end_offset - 4),
|
||||
.addend = 0,
|
||||
.subtractor = null,
|
||||
.pcrel = true,
|
||||
.length = 2,
|
||||
.@"type" = reloc_type,
|
||||
});
|
||||
} else if (emit.bin_file.cast(link.File.Coff)) |coff_file| {
|
||||
const atom = coff_file.atom_by_index_table.get(relocation.atom_index).?;
|
||||
const atom = coff_file.getAtomForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
|
||||
try atom.addRelocation(coff_file, .{
|
||||
.@"type" = switch (ops.flags) {
|
||||
0b00 => .got,
|
||||
@ -1145,20 +1142,19 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
|
||||
|
||||
if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
|
||||
// Add relocation to the decl.
|
||||
const atom = macho_file.atom_by_index_table.get(relocation.atom_index).?;
|
||||
const atom = macho_file.getAtomForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
|
||||
const target = macho_file.getGlobalByIndex(relocation.sym_index);
|
||||
try atom.relocs.append(emit.bin_file.allocator, .{
|
||||
.offset = offset,
|
||||
try atom.addRelocation(macho_file, .{
|
||||
.@"type" = @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_BRANCH),
|
||||
.target = target,
|
||||
.offset = offset,
|
||||
.addend = 0,
|
||||
.subtractor = null,
|
||||
.pcrel = true,
|
||||
.length = 2,
|
||||
.@"type" = @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_BRANCH),
|
||||
});
|
||||
} else if (emit.bin_file.cast(link.File.Coff)) |coff_file| {
|
||||
// Add relocation to the decl.
|
||||
const atom = coff_file.atom_by_index_table.get(relocation.atom_index).?;
|
||||
const atom = coff_file.getAtomForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
|
||||
const target = coff_file.getGlobalByIndex(relocation.sym_index);
|
||||
try atom.addRelocation(coff_file, .{
|
||||
.@"type" = .direct,
|
||||
|
||||
@ -1527,7 +1527,7 @@ pub fn getDeclVAddr(
|
||||
assert(self.llvm_object == null);
|
||||
assert(decl.link.coff.sym_index != 0);
|
||||
|
||||
const atom = self.atom_by_index_table.get(reloc_info.parent_atom_index).?;
|
||||
const atom = self.getAtomForSymbol(.{ .sym_index = reloc_info.parent_atom_index, .file = null }).?;
|
||||
const target = SymbolWithLoc{ .sym_index = decl.link.coff.sym_index, .file = null };
|
||||
try atom.addRelocation(self, .{
|
||||
.@"type" = .direct,
|
||||
|
||||
1265
src/link/MachO.zig
1265
src/link/MachO.zig
File diff suppressed because it is too large
Load Diff
@ -66,8 +66,6 @@ prev: ?*Atom,
|
||||
|
||||
dbg_info_atom: Dwarf.Atom,
|
||||
|
||||
dirty: bool = true,
|
||||
|
||||
pub const Binding = struct {
|
||||
target: SymbolWithLoc,
|
||||
offset: u64,
|
||||
@ -898,46 +896,81 @@ inline fn isArithmeticOp(inst: *const [4]u8) bool {
|
||||
}
|
||||
|
||||
pub fn addRelocation(self: *Atom, macho_file: *MachO, reloc: RelocationIncr) !void {
|
||||
return self.addRelocations(macho_file, 1, .{reloc});
|
||||
}
|
||||
|
||||
pub fn addRelocations(
|
||||
self: *Atom,
|
||||
macho_file: *MachO,
|
||||
comptime count: comptime_int,
|
||||
relocs: [count]RelocationIncr,
|
||||
) !void {
|
||||
const gpa = macho_file.base.allocator;
|
||||
log.debug(" (adding reloc of type {s} to target %{d})", .{ @tagName(reloc.@"type"), reloc.target.sym_index });
|
||||
const target = macho_file.base.options.target;
|
||||
const gop = try macho_file.relocs.getOrPut(gpa, self);
|
||||
if (!gop.found_existing) {
|
||||
gop.value_ptr.* = .{};
|
||||
}
|
||||
try gop.value_ptr.append(gpa, reloc);
|
||||
}
|
||||
|
||||
pub fn resolveRelocationsInCodeBuffer(self: *Atom, macho_file: *MachO, code: []u8) !void {
|
||||
const relocs = macho_file.relocs.get(self) orelse return;
|
||||
|
||||
log.debug("relocating '{s}'", .{self.getName(macho_file)});
|
||||
|
||||
for (relocs.items) |*reloc| {
|
||||
// We don't check for dirty relocation as we resolve in memory so it's effectively free.
|
||||
try reloc.resolve(self, macho_file, code);
|
||||
reloc.dirty = false;
|
||||
try gop.value_ptr.ensureUnusedCapacity(gpa, count);
|
||||
for (relocs) |reloc| {
|
||||
log.debug(" (adding reloc of type {s} to target %{d})", .{
|
||||
reloc.fmtType(target),
|
||||
reloc.target.sym_index,
|
||||
});
|
||||
gop.value_ptr.appendAssumeCapacity(reloc);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn resolveRelocationsInFile(self: *Atom, macho_file: *MachO) !void {
|
||||
const relocs = macho_file.relocs.get(self) orelse return;
|
||||
pub fn addRebase(self: *Atom, macho_file: *MachO, offset: u32) !void {
|
||||
const gpa = macho_file.base.allocator;
|
||||
log.debug(" (adding rebase at offset 0x{x} in %{d})", .{ offset, self.sym_index });
|
||||
const gop = try macho_file.rebases.getOrPut(gpa, self);
|
||||
if (!gop.found_existing) {
|
||||
gop.value_ptr.* = .{};
|
||||
}
|
||||
try gop.value_ptr.append(gpa, offset);
|
||||
}
|
||||
|
||||
// No code available in a buffer; we need to read it in from the binary.
|
||||
pub fn addBinding(self: *Atom, macho_file: *MachO, binding: Binding) !void {
|
||||
const gpa = macho_file.base.allocator;
|
||||
log.debug(" (adding binding to symbol {s} at offset 0x{x} in %{d})", .{
|
||||
macho_file.getSymbolName(binding.target),
|
||||
binding.offset,
|
||||
self.sym_index,
|
||||
});
|
||||
const gop = try macho_file.bindings.getOrPut(gpa, self);
|
||||
if (!gop.found_existing) {
|
||||
gop.value_ptr.* = .{};
|
||||
}
|
||||
try gop.value_ptr.append(gpa, binding);
|
||||
}
|
||||
|
||||
pub fn addLazyBinding(self: *Atom, macho_file: *MachO, binding: Binding) !void {
|
||||
const gpa = macho_file.base.allocator;
|
||||
log.debug(" (adding lazy binding to symbol {s} at offset 0x{x} in %{d})", .{
|
||||
macho_file.getSymbolName(binding.target),
|
||||
binding.offset,
|
||||
self.sym_index,
|
||||
});
|
||||
const gop = try macho_file.lazy_bindings.getOrPut(gpa, self);
|
||||
if (!gop.found_existing) {
|
||||
gop.value_ptr.* = .{};
|
||||
}
|
||||
try gop.value_ptr.append(gpa, binding);
|
||||
}
|
||||
|
||||
pub fn resolveRelocations(self: *Atom, macho_file: *MachO) !void {
|
||||
const relocs = macho_file.relocs.get(self) orelse return;
|
||||
const source_sym = self.getSymbol(macho_file);
|
||||
const source_section = macho_file.sections.get(source_sym.n_sect - 1).header;
|
||||
const file_offset = source_section.offset + source_sym.value - source_section.addr;
|
||||
const code = try gpa.alloc(u8, self.size);
|
||||
try self.base.file.?.preadAll(code, file_offset);
|
||||
defer gpa.free(code);
|
||||
const file_offset = source_section.offset + source_sym.n_value - source_section.addr;
|
||||
|
||||
log.debug("relocating '{s}'", .{self.getName(macho_file)});
|
||||
|
||||
for (relocs.items) |*reloc| {
|
||||
if (!reloc.dirty) continue;
|
||||
try reloc.resolve(self, macho_file, code);
|
||||
|
||||
try reloc.resolve(self, macho_file, file_offset);
|
||||
reloc.dirty = false;
|
||||
}
|
||||
|
||||
try self.base.file.?.pwriteAll(code, file_offset);
|
||||
}
|
||||
|
||||
@ -13,31 +13,35 @@ const Atom = @import("Atom.zig");
|
||||
const MachO = @import("../MachO.zig");
|
||||
const SymbolWithLoc = MachO.SymbolWithLoc;
|
||||
|
||||
pub const Table = std.AutoHashMapUnmanaged(*Atom, std.ArrayListUnmanaged(Relocation));
|
||||
|
||||
/// Offset within the atom's code buffer.
|
||||
/// Note relocation size can be inferred by relocation's kind.
|
||||
offset: u32,
|
||||
@"type": u4,
|
||||
target: SymbolWithLoc,
|
||||
offset: u32,
|
||||
addend: i64,
|
||||
pcrel: bool,
|
||||
length: u2,
|
||||
@"type": u4,
|
||||
dirty: bool = true,
|
||||
|
||||
pub fn fmtType(self: Relocation, target: std.Target) []const u8 {
|
||||
switch (target.cpu.arch) {
|
||||
.aarch64 => return @tagName(@intToEnum(macho.reloc_type_arm64, self.@"type")),
|
||||
.x86_64 => return @tagName(@intToEnum(macho.reloc_type_x86_64, self.@"type")),
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn getTargetAtom(self: Relocation, macho_file: *MachO) ?*Atom {
|
||||
switch (macho_file.base.options.target.cpu.arch) {
|
||||
.aarch64 => switch (@intToEnum(macho.reloc_type_arm64, self.@"type")) {
|
||||
.ARM64_RELOC_GOT_LOAD_PAGE21,
|
||||
.ARM64_RELOC_GOT_LOAD_PAGEOFF12,
|
||||
.ARM64_RELOC_POINTER_TO_GOT,
|
||||
=> return macho_file.getGotAtomForSymbol(self.target).?,
|
||||
=> return macho_file.getGotAtomForSymbol(self.target),
|
||||
else => {},
|
||||
},
|
||||
.x86_64 => switch (@intToEnum(macho.reloc_type_x86_64, self.@"type")) {
|
||||
.X86_64_RELOC_GOT,
|
||||
.X86_64_RELOC_GOT_LOAD,
|
||||
=> return macho_file.getGotAtomForSymbol(self.target).?,
|
||||
=> return macho_file.getGotAtomForSymbol(self.target),
|
||||
else => {},
|
||||
},
|
||||
else => unreachable,
|
||||
@ -47,54 +51,72 @@ pub fn getTargetAtom(self: Relocation, macho_file: *MachO) ?*Atom {
|
||||
return macho_file.getAtomForSymbol(self.target);
|
||||
}
|
||||
|
||||
pub fn resolve(self: Relocation, atom: *Atom, macho_file: *MachO, code: []u8) !void {
|
||||
pub fn resolve(self: Relocation, atom: *Atom, macho_file: *MachO, base_offset: u64) !void {
|
||||
const arch = macho_file.base.options.target.cpu.arch;
|
||||
const source_sym = atom.getSymbol(macho_file);
|
||||
const source_addr = source_sym.n_value + self.offset;
|
||||
|
||||
const target_atom = self.getTargetAtom(macho_file) orelse return;
|
||||
const target_addr = target_atom.getSymbol(macho_file).n_value + self.addend;
|
||||
const target_addr = @intCast(i64, target_atom.getSymbol(macho_file).n_value) + self.addend;
|
||||
|
||||
log.debug(" ({x}: [() => 0x{x} ({s})) ({s})", .{
|
||||
source_addr,
|
||||
target_addr,
|
||||
macho_file.getSymbolName(self.target),
|
||||
switch (arch) {
|
||||
.aarch64 => @tagName(@intToEnum(macho.reloc_type_arm64, self.@"type")),
|
||||
.x86_64 => @tagName(@intToEnum(macho.reloc_type_x86_64, self.@"type")),
|
||||
else => unreachable,
|
||||
},
|
||||
self.fmtType(macho_file.base.options.target),
|
||||
});
|
||||
|
||||
switch (arch) {
|
||||
.aarch64 => return self.resolveAarch64(source_addr, target_addr, macho_file, code),
|
||||
.x86_64 => return self.resolveX8664(source_addr, target_addr, code),
|
||||
.aarch64 => return self.resolveAarch64(macho_file, source_addr, target_addr, base_offset),
|
||||
.x86_64 => return self.resolveX8664(macho_file, source_addr, target_addr, base_offset),
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
fn resolveAarch64(self: Relocation, source_addr: u64, target_addr: u64, macho_file: *MachO, code: []u8) !void {
|
||||
fn resolveAarch64(
|
||||
self: Relocation,
|
||||
macho_file: *MachO,
|
||||
source_addr: u64,
|
||||
target_addr: i64,
|
||||
base_offset: u64,
|
||||
) !void {
|
||||
const rel_type = @intToEnum(macho.reloc_type_arm64, self.@"type");
|
||||
if (rel_type == .ARM64_RELOC_UNSIGNED) {
|
||||
var buffer: [@sizeOf(u64)]u8 = undefined;
|
||||
const code = blk: {
|
||||
switch (self.length) {
|
||||
2 => {
|
||||
mem.writeIntLittle(u32, buffer[0..4], @truncate(u32, @bitCast(u64, target_addr)));
|
||||
break :blk buffer[0..4];
|
||||
},
|
||||
3 => {
|
||||
mem.writeIntLittle(u64, &buffer, @bitCast(u64, target_addr));
|
||||
break :blk &buffer;
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
};
|
||||
return macho_file.base.file.?.pwriteAll(code, base_offset + self.offset);
|
||||
}
|
||||
|
||||
var buffer: [@sizeOf(u32)]u8 = undefined;
|
||||
const amt = try macho_file.base.file.?.preadAll(&buffer, base_offset + self.offset);
|
||||
if (amt != buffer.len) return error.InputOutput;
|
||||
|
||||
switch (rel_type) {
|
||||
.ARM64_RELOC_BRANCH26 => {
|
||||
const displacement = math.cast(i28, @intCast(i64, target_addr) - @intCast(i64, source_addr)) orelse {
|
||||
log.err("jump too big to encode as i28 displacement value", .{});
|
||||
log.err(" (target - source) = displacement => 0x{x} - 0x{x} = 0x{x}", .{
|
||||
target_addr,
|
||||
source_addr,
|
||||
@intCast(i64, target_addr) - @intCast(i64, source_addr),
|
||||
});
|
||||
log.err(" TODO implement branch islands to extend jump distance for arm64", .{});
|
||||
return error.TODOImplementBranchIslands;
|
||||
};
|
||||
const displacement = math.cast(
|
||||
i28,
|
||||
@intCast(i64, target_addr) - @intCast(i64, source_addr),
|
||||
) orelse unreachable; // TODO codegen should never allow for jump larger than i28 displacement
|
||||
var inst = aarch64.Instruction{
|
||||
.unconditional_branch_immediate = mem.bytesToValue(meta.TagPayload(
|
||||
aarch64.Instruction,
|
||||
aarch64.Instruction.unconditional_branch_immediate,
|
||||
), code),
|
||||
), &buffer),
|
||||
};
|
||||
inst.unconditional_branch_immediate.imm26 = @truncate(u26, @bitCast(u28, displacement >> 2));
|
||||
mem.writeIntLittle(u32, code, inst.toU32());
|
||||
mem.writeIntLittle(u32, &buffer, inst.toU32());
|
||||
},
|
||||
.ARM64_RELOC_PAGE21,
|
||||
.ARM64_RELOC_GOT_LOAD_PAGE21,
|
||||
@ -107,45 +129,45 @@ fn resolveAarch64(self: Relocation, source_addr: u64, target_addr: u64, macho_fi
|
||||
.pc_relative_address = mem.bytesToValue(meta.TagPayload(
|
||||
aarch64.Instruction,
|
||||
aarch64.Instruction.pc_relative_address,
|
||||
), code),
|
||||
), &buffer),
|
||||
};
|
||||
inst.pc_relative_address.immhi = @truncate(u19, pages >> 2);
|
||||
inst.pc_relative_address.immlo = @truncate(u2, pages);
|
||||
mem.writeIntLittle(u32, code, inst.toU32());
|
||||
mem.writeIntLittle(u32, &buffer, inst.toU32());
|
||||
},
|
||||
.ARM64_RELOC_PAGEOFF12 => {
|
||||
const narrowed = @truncate(u12, @intCast(u64, target_addr));
|
||||
if (isArithmeticOp(code)) {
|
||||
if (isArithmeticOp(&buffer)) {
|
||||
var inst = aarch64.Instruction{
|
||||
.add_subtract_immediate = mem.bytesToValue(meta.TagPayload(
|
||||
aarch64.Instruction,
|
||||
aarch64.Instruction.add_subtract_immediate,
|
||||
), code),
|
||||
), &buffer),
|
||||
};
|
||||
inst.add_subtract_immediate.imm12 = narrowed;
|
||||
mem.writeIntLittle(u32, code, inst.toU32());
|
||||
mem.writeIntLittle(u32, &buffer, inst.toU32());
|
||||
} else {
|
||||
var inst = aarch64.Instruction{
|
||||
.load_store_register = mem.bytesToValue(meta.TagPayload(
|
||||
aarch64.Instruction,
|
||||
aarch64.Instruction.load_store_register,
|
||||
), code),
|
||||
), &buffer),
|
||||
};
|
||||
const offset: u12 = blk: {
|
||||
if (inst.load_store_register.size == 0) {
|
||||
if (inst.load_store_register.v == 1) {
|
||||
// 128-bit SIMD is scaled by 16.
|
||||
break :blk try math.divExact(u12, narrowed, 16);
|
||||
break :blk @divExact(narrowed, 16);
|
||||
}
|
||||
// Otherwise, 8-bit SIMD or ldrb.
|
||||
break :blk narrowed;
|
||||
} else {
|
||||
const denom: u4 = try math.powi(u4, 2, inst.load_store_register.size);
|
||||
break :blk try math.divExact(u12, narrowed, denom);
|
||||
const denom: u4 = math.powi(u4, 2, inst.load_store_register.size) catch unreachable;
|
||||
break :blk @divExact(narrowed, denom);
|
||||
}
|
||||
};
|
||||
inst.load_store_register.offset = offset;
|
||||
mem.writeIntLittle(u32, code, inst.toU32());
|
||||
mem.writeIntLittle(u32, &buffer, inst.toU32());
|
||||
}
|
||||
},
|
||||
.ARM64_RELOC_GOT_LOAD_PAGEOFF12 => {
|
||||
@ -154,11 +176,12 @@ fn resolveAarch64(self: Relocation, source_addr: u64, target_addr: u64, macho_fi
|
||||
.load_store_register = mem.bytesToValue(meta.TagPayload(
|
||||
aarch64.Instruction,
|
||||
aarch64.Instruction.load_store_register,
|
||||
), code),
|
||||
), &buffer),
|
||||
};
|
||||
const offset = try math.divExact(u12, narrowed, 8);
|
||||
const offset = @divExact(narrowed, 8);
|
||||
inst.load_store_register.offset = offset;
|
||||
mem.writeIntLittle(u32, code, inst.toU32());
|
||||
mem.writeIntLittle(u32, &buffer, inst.toU32());
|
||||
log.debug("HMM = {x}", .{std.fmt.fmtSliceHexLower(&buffer)});
|
||||
},
|
||||
.ARM64_RELOC_TLVP_LOAD_PAGEOFF12 => {
|
||||
const RegInfo = struct {
|
||||
@ -167,11 +190,11 @@ fn resolveAarch64(self: Relocation, source_addr: u64, target_addr: u64, macho_fi
|
||||
size: u2,
|
||||
};
|
||||
const reg_info: RegInfo = blk: {
|
||||
if (isArithmeticOp(code)) {
|
||||
if (isArithmeticOp(&buffer)) {
|
||||
const inst = mem.bytesToValue(meta.TagPayload(
|
||||
aarch64.Instruction,
|
||||
aarch64.Instruction.add_subtract_immediate,
|
||||
), code);
|
||||
), &buffer);
|
||||
break :blk .{
|
||||
.rd = inst.rd,
|
||||
.rn = inst.rn,
|
||||
@ -181,7 +204,7 @@ fn resolveAarch64(self: Relocation, source_addr: u64, target_addr: u64, macho_fi
|
||||
const inst = mem.bytesToValue(meta.TagPayload(
|
||||
aarch64.Instruction,
|
||||
aarch64.Instruction.load_store_register,
|
||||
), code);
|
||||
), &buffer);
|
||||
break :blk .{
|
||||
.rd = inst.rt,
|
||||
.rn = inst.rn,
|
||||
@ -190,20 +213,7 @@ fn resolveAarch64(self: Relocation, source_addr: u64, target_addr: u64, macho_fi
|
||||
}
|
||||
};
|
||||
const narrowed = @truncate(u12, @intCast(u64, target_addr));
|
||||
var inst = if (macho_file.tlv_ptr_entries_table.contains(self.target)) blk: {
|
||||
const offset = try math.divExact(u12, narrowed, 8);
|
||||
break :blk aarch64.Instruction{
|
||||
.load_store_register = .{
|
||||
.rt = reg_info.rd,
|
||||
.rn = reg_info.rn,
|
||||
.offset = offset,
|
||||
.opc = 0b01,
|
||||
.op1 = 0b01,
|
||||
.v = 0,
|
||||
.size = reg_info.size,
|
||||
},
|
||||
};
|
||||
} else aarch64.Instruction{
|
||||
var inst = aarch64.Instruction{
|
||||
.add_subtract_immediate = .{
|
||||
.rd = reg_info.rd,
|
||||
.rn = reg_info.rn,
|
||||
@ -214,61 +224,81 @@ fn resolveAarch64(self: Relocation, source_addr: u64, target_addr: u64, macho_fi
|
||||
.sf = @truncate(u1, reg_info.size),
|
||||
},
|
||||
};
|
||||
mem.writeIntLittle(u32, code, inst.toU32());
|
||||
mem.writeIntLittle(u32, &buffer, inst.toU32());
|
||||
},
|
||||
.ARM64_RELOC_POINTER_TO_GOT => {
|
||||
const result = math.cast(i32, @intCast(i64, target_addr) - @intCast(i64, source_addr)) orelse
|
||||
return error.Overflow;
|
||||
mem.writeIntLittle(u32, code, @bitCast(u32, result));
|
||||
},
|
||||
.ARM64_RELOC_UNSIGNED => {
|
||||
switch (self.length) {
|
||||
2 => mem.writeIntLittle(u32, code, @truncate(u32, @bitCast(u64, target_addr))),
|
||||
3 => mem.writeIntLittle(u64, code, target_addr),
|
||||
else => unreachable,
|
||||
}
|
||||
const result = math.cast(
|
||||
i32,
|
||||
@intCast(i64, target_addr) - @intCast(i64, source_addr),
|
||||
) orelse return error.Overflow;
|
||||
mem.writeIntLittle(u32, &buffer, @bitCast(u32, result));
|
||||
},
|
||||
.ARM64_RELOC_SUBTRACTOR => unreachable,
|
||||
.ARM64_RELOC_ADDEND => unreachable,
|
||||
.ARM64_RELOC_UNSIGNED => unreachable,
|
||||
}
|
||||
try macho_file.base.file.?.pwriteAll(&buffer, base_offset + self.offset);
|
||||
}
|
||||
|
||||
fn resolveX8664(self: Relocation, source_addr: u64, target_addr: u64, code: []u8) !void {
|
||||
fn resolveX8664(
|
||||
self: Relocation,
|
||||
macho_file: *MachO,
|
||||
source_addr: u64,
|
||||
target_addr: i64,
|
||||
base_offset: u64,
|
||||
) !void {
|
||||
const rel_type = @intToEnum(macho.reloc_type_x86_64, self.@"type");
|
||||
switch (rel_type) {
|
||||
.X86_64_RELOC_BRANCH,
|
||||
.X86_64_RELOC_GOT,
|
||||
.X86_64_RELOC_GOT_LOAD,
|
||||
.X86_64_RELOC_TLV,
|
||||
=> {
|
||||
const displacement = math.cast(i32, @intCast(i64, target_addr) - @intCast(i64, source_addr) - 4) orelse
|
||||
return error.Overflow;
|
||||
mem.writeIntLittle(u32, code, @bitCast(u32, displacement));
|
||||
},
|
||||
.X86_64_RELOC_SIGNED,
|
||||
.X86_64_RELOC_SIGNED_1,
|
||||
.X86_64_RELOC_SIGNED_2,
|
||||
.X86_64_RELOC_SIGNED_4,
|
||||
=> {
|
||||
const correction: u3 = switch (rel_type) {
|
||||
.X86_64_RELOC_SIGNED => 0,
|
||||
.X86_64_RELOC_SIGNED_1 => 1,
|
||||
.X86_64_RELOC_SIGNED_2 => 2,
|
||||
.X86_64_RELOC_SIGNED_4 => 4,
|
||||
else => unreachable,
|
||||
};
|
||||
const displacement = math.cast(i32, target_addr - @intCast(i64, source_addr + correction + 4)) orelse
|
||||
return error.Overflow;
|
||||
mem.writeIntLittle(u32, code, @bitCast(u32, displacement));
|
||||
},
|
||||
.X86_64_RELOC_UNSIGNED => {
|
||||
switch (self.length) {
|
||||
2 => mem.writeIntLittle(u32, code, @truncate(u32, @bitCast(u64, target_addr))),
|
||||
3 => mem.writeIntLittle(u64, code, target_addr),
|
||||
}
|
||||
},
|
||||
.X86_64_RELOC_SUBTRACTOR => unreachable,
|
||||
}
|
||||
var buffer: [@sizeOf(u64)]u8 = undefined;
|
||||
const code = blk: {
|
||||
switch (rel_type) {
|
||||
.X86_64_RELOC_BRANCH,
|
||||
.X86_64_RELOC_GOT,
|
||||
.X86_64_RELOC_GOT_LOAD,
|
||||
.X86_64_RELOC_TLV,
|
||||
=> {
|
||||
const displacement = math.cast(
|
||||
i32,
|
||||
@intCast(i64, target_addr) - @intCast(i64, source_addr) - 4,
|
||||
) orelse return error.Overflow;
|
||||
mem.writeIntLittle(u32, buffer[0..4], @bitCast(u32, displacement));
|
||||
break :blk buffer[0..4];
|
||||
},
|
||||
.X86_64_RELOC_SIGNED,
|
||||
.X86_64_RELOC_SIGNED_1,
|
||||
.X86_64_RELOC_SIGNED_2,
|
||||
.X86_64_RELOC_SIGNED_4,
|
||||
=> {
|
||||
const correction: u3 = switch (rel_type) {
|
||||
.X86_64_RELOC_SIGNED => 0,
|
||||
.X86_64_RELOC_SIGNED_1 => 1,
|
||||
.X86_64_RELOC_SIGNED_2 => 2,
|
||||
.X86_64_RELOC_SIGNED_4 => 4,
|
||||
else => unreachable,
|
||||
};
|
||||
const displacement = math.cast(
|
||||
i32,
|
||||
target_addr - @intCast(i64, source_addr + correction + 4),
|
||||
) orelse return error.Overflow;
|
||||
mem.writeIntLittle(u32, buffer[0..4], @bitCast(u32, displacement));
|
||||
break :blk buffer[0..4];
|
||||
},
|
||||
.X86_64_RELOC_UNSIGNED => {
|
||||
switch (self.length) {
|
||||
2 => {
|
||||
mem.writeIntLittle(u32, buffer[0..4], @truncate(u32, @bitCast(u64, target_addr)));
|
||||
break :blk buffer[0..4];
|
||||
},
|
||||
3 => {
|
||||
mem.writeIntLittle(u64, buffer[0..8], @bitCast(u64, target_addr));
|
||||
break :blk &buffer;
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
},
|
||||
.X86_64_RELOC_SUBTRACTOR => unreachable,
|
||||
}
|
||||
};
|
||||
try macho_file.base.file.?.pwriteAll(code, base_offset + self.offset);
|
||||
}
|
||||
|
||||
inline fn isArithmeticOp(inst: *const [4]u8) bool {
|
||||
|
||||
@ -233,7 +233,7 @@ fn prune(arena: Allocator, alive: std.AutoHashMap(*Atom, void), macho_file: *Mac
|
||||
if (sym.n_desc != MachO.N_DESC_GCED) continue;
|
||||
|
||||
// TODO tombstone
|
||||
const atom = entry.getAtom(macho_file);
|
||||
const atom = entry.getAtom(macho_file).?;
|
||||
const match = sym.n_sect - 1;
|
||||
removeAtomFromSection(atom, match, macho_file);
|
||||
_ = try gc_sections.put(match, {});
|
||||
@ -245,7 +245,7 @@ fn prune(arena: Allocator, alive: std.AutoHashMap(*Atom, void), macho_file: *Mac
|
||||
if (sym.n_desc != MachO.N_DESC_GCED) continue;
|
||||
|
||||
// TODO tombstone
|
||||
const atom = entry.getAtom(macho_file);
|
||||
const atom = entry.getAtom(macho_file).?;
|
||||
const match = sym.n_sect - 1;
|
||||
removeAtomFromSection(atom, match, macho_file);
|
||||
_ = try gc_sections.put(match, {});
|
||||
@ -257,7 +257,7 @@ fn prune(arena: Allocator, alive: std.AutoHashMap(*Atom, void), macho_file: *Mac
|
||||
if (sym.n_desc != MachO.N_DESC_GCED) continue;
|
||||
|
||||
// TODO tombstone
|
||||
const atom = entry.getAtom(macho_file);
|
||||
const atom = entry.getAtom(macho_file).?;
|
||||
const match = sym.n_sect - 1;
|
||||
removeAtomFromSection(atom, match, macho_file);
|
||||
_ = try gc_sections.put(match, {});
|
||||
|
||||
@ -7,14 +7,19 @@ const macho = std.macho;
|
||||
const math = std.math;
|
||||
const mem = std.mem;
|
||||
|
||||
const aarch64 = @import("../../arch/aarch64/bits.zig");
|
||||
const bind = @import("bind.zig");
|
||||
const link = @import("../../link.zig");
|
||||
const trace = @import("../../tracy.zig").trace;
|
||||
|
||||
const Atom = MachO.Atom;
|
||||
const Cache = @import("../../Cache.zig");
|
||||
const CodeSignature = @import("CodeSignature.zig");
|
||||
const Compilation = @import("../../Compilation.zig");
|
||||
const Dylib = @import("Dylib.zig");
|
||||
const MachO = @import("../MachO.zig");
|
||||
const SymbolWithLoc = MachO.SymbolWithLoc;
|
||||
const Trie = @import("Trie.zig");
|
||||
|
||||
const dead_strip = @import("dead_strip.zig");
|
||||
|
||||
@ -545,7 +550,7 @@ pub fn linkWithZld(macho_file: *MachO, comp: *Compilation, prog_node: *std.Progr
|
||||
const lc_writer = lc_buffer.writer();
|
||||
var ncmds: u32 = 0;
|
||||
|
||||
try macho_file.writeLinkeditSegmentData(&ncmds, lc_writer);
|
||||
try writeLinkeditSegmentData(macho_file, &ncmds, lc_writer);
|
||||
|
||||
// If the last section of __DATA segment is zerofill section, we need to ensure
|
||||
// that the free space between the end of the last non-zerofill section of __DATA
|
||||
@ -952,3 +957,326 @@ fn allocateSymbols(macho_file: *MachO) !void {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn writeLinkeditSegmentData(macho_file: *MachO, ncmds: *u32, lc_writer: anytype) !void {
|
||||
const seg = &macho_file.segments.items[macho_file.linkedit_segment_cmd_index.?];
|
||||
seg.filesize = 0;
|
||||
seg.vmsize = 0;
|
||||
|
||||
try writeDyldInfoData(macho_file, ncmds, lc_writer);
|
||||
try macho_file.writeFunctionStarts(ncmds, lc_writer);
|
||||
try macho_file.writeDataInCode(ncmds, lc_writer);
|
||||
try macho_file.writeSymtabs(ncmds, lc_writer);
|
||||
|
||||
seg.vmsize = mem.alignForwardGeneric(u64, seg.filesize, macho_file.page_size);
|
||||
}
|
||||
|
||||
fn writeDyldInfoData(macho_file: *MachO, ncmds: *u32, lc_writer: anytype) !void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
const gpa = macho_file.base.allocator;
|
||||
|
||||
var rebase_pointers = std.ArrayList(bind.Pointer).init(gpa);
|
||||
defer rebase_pointers.deinit();
|
||||
var bind_pointers = std.ArrayList(bind.Pointer).init(gpa);
|
||||
defer bind_pointers.deinit();
|
||||
var lazy_bind_pointers = std.ArrayList(bind.Pointer).init(gpa);
|
||||
defer lazy_bind_pointers.deinit();
|
||||
|
||||
const slice = macho_file.sections.slice();
|
||||
for (slice.items(.last_atom)) |last_atom, sect_id| {
|
||||
var atom = last_atom orelse continue;
|
||||
const segment_index = slice.items(.segment_index)[sect_id];
|
||||
const header = slice.items(.header)[sect_id];
|
||||
|
||||
if (mem.eql(u8, header.segName(), "__TEXT")) continue; // __TEXT is non-writable
|
||||
|
||||
log.debug("dyld info for {s},{s}", .{ header.segName(), header.sectName() });
|
||||
|
||||
const seg = macho_file.segments.items[segment_index];
|
||||
|
||||
while (true) {
|
||||
log.debug(" ATOM(%{d}, '{s}')", .{ atom.sym_index, atom.getName(macho_file) });
|
||||
const sym = atom.getSymbol(macho_file);
|
||||
const base_offset = sym.n_value - seg.vmaddr;
|
||||
|
||||
for (atom.rebases.items) |offset| {
|
||||
log.debug(" | rebase at {x}", .{base_offset + offset});
|
||||
try rebase_pointers.append(.{
|
||||
.offset = base_offset + offset,
|
||||
.segment_id = segment_index,
|
||||
});
|
||||
}
|
||||
|
||||
for (atom.bindings.items) |binding| {
|
||||
const bind_sym = macho_file.getSymbol(binding.target);
|
||||
const bind_sym_name = macho_file.getSymbolName(binding.target);
|
||||
const dylib_ordinal = @divTrunc(
|
||||
@bitCast(i16, bind_sym.n_desc),
|
||||
macho.N_SYMBOL_RESOLVER,
|
||||
);
|
||||
var flags: u4 = 0;
|
||||
log.debug(" | bind at {x}, import('{s}') in dylib({d})", .{
|
||||
binding.offset + base_offset,
|
||||
bind_sym_name,
|
||||
dylib_ordinal,
|
||||
});
|
||||
if (bind_sym.weakRef()) {
|
||||
log.debug(" | marking as weak ref ", .{});
|
||||
flags |= @truncate(u4, macho.BIND_SYMBOL_FLAGS_WEAK_IMPORT);
|
||||
}
|
||||
try bind_pointers.append(.{
|
||||
.offset = binding.offset + base_offset,
|
||||
.segment_id = segment_index,
|
||||
.dylib_ordinal = dylib_ordinal,
|
||||
.name = bind_sym_name,
|
||||
.bind_flags = flags,
|
||||
});
|
||||
}
|
||||
|
||||
for (atom.lazy_bindings.items) |binding| {
|
||||
const bind_sym = macho_file.getSymbol(binding.target);
|
||||
const bind_sym_name = macho_file.getSymbolName(binding.target);
|
||||
const dylib_ordinal = @divTrunc(
|
||||
@bitCast(i16, bind_sym.n_desc),
|
||||
macho.N_SYMBOL_RESOLVER,
|
||||
);
|
||||
var flags: u4 = 0;
|
||||
log.debug(" | lazy bind at {x} import('{s}') ord({d})", .{
|
||||
binding.offset + base_offset,
|
||||
bind_sym_name,
|
||||
dylib_ordinal,
|
||||
});
|
||||
if (bind_sym.weakRef()) {
|
||||
log.debug(" | marking as weak ref ", .{});
|
||||
flags |= @truncate(u4, macho.BIND_SYMBOL_FLAGS_WEAK_IMPORT);
|
||||
}
|
||||
try lazy_bind_pointers.append(.{
|
||||
.offset = binding.offset + base_offset,
|
||||
.segment_id = segment_index,
|
||||
.dylib_ordinal = dylib_ordinal,
|
||||
.name = bind_sym_name,
|
||||
.bind_flags = flags,
|
||||
});
|
||||
}
|
||||
|
||||
if (atom.prev) |prev| {
|
||||
atom = prev;
|
||||
} else break;
|
||||
}
|
||||
}
|
||||
|
||||
var trie: Trie = .{};
|
||||
defer trie.deinit(gpa);
|
||||
|
||||
{
|
||||
// TODO handle macho.EXPORT_SYMBOL_FLAGS_REEXPORT and macho.EXPORT_SYMBOL_FLAGS_STUB_AND_RESOLVER.
|
||||
log.debug("generating export trie", .{});
|
||||
|
||||
const text_segment = macho_file.segments.items[macho_file.text_segment_cmd_index.?];
|
||||
const base_address = text_segment.vmaddr;
|
||||
|
||||
if (macho_file.base.options.output_mode == .Exe) {
|
||||
for (&[_]SymbolWithLoc{
|
||||
try macho_file.getEntryPoint(),
|
||||
macho_file.getGlobal("__mh_execute_header").?,
|
||||
}) |global| {
|
||||
const sym = macho_file.getSymbol(global);
|
||||
const sym_name = macho_file.getSymbolName(global);
|
||||
log.debug(" (putting '{s}' defined at 0x{x})", .{ sym_name, sym.n_value });
|
||||
try trie.put(gpa, .{
|
||||
.name = sym_name,
|
||||
.vmaddr_offset = sym.n_value - base_address,
|
||||
.export_flags = macho.EXPORT_SYMBOL_FLAGS_KIND_REGULAR,
|
||||
});
|
||||
}
|
||||
} else {
|
||||
assert(macho_file.base.options.output_mode == .Lib);
|
||||
for (macho_file.globals.items) |global| {
|
||||
const sym = macho_file.getSymbol(global);
|
||||
|
||||
if (sym.undf()) continue;
|
||||
if (!sym.ext()) continue;
|
||||
if (sym.n_desc == MachO.N_DESC_GCED) continue;
|
||||
|
||||
const sym_name = macho_file.getSymbolName(global);
|
||||
log.debug(" (putting '{s}' defined at 0x{x})", .{ sym_name, sym.n_value });
|
||||
try trie.put(gpa, .{
|
||||
.name = sym_name,
|
||||
.vmaddr_offset = sym.n_value - base_address,
|
||||
.export_flags = macho.EXPORT_SYMBOL_FLAGS_KIND_REGULAR,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
try trie.finalize(gpa);
|
||||
}
|
||||
|
||||
const link_seg = &macho_file.segments.items[macho_file.linkedit_segment_cmd_index.?];
|
||||
const rebase_off = mem.alignForwardGeneric(u64, link_seg.fileoff, @alignOf(u64));
|
||||
assert(rebase_off == link_seg.fileoff);
|
||||
const rebase_size = try bind.rebaseInfoSize(rebase_pointers.items);
|
||||
log.debug("writing rebase info from 0x{x} to 0x{x}", .{ rebase_off, rebase_off + rebase_size });
|
||||
|
||||
const bind_off = mem.alignForwardGeneric(u64, rebase_off + rebase_size, @alignOf(u64));
|
||||
const bind_size = try bind.bindInfoSize(bind_pointers.items);
|
||||
log.debug("writing bind info from 0x{x} to 0x{x}", .{ bind_off, bind_off + bind_size });
|
||||
|
||||
const lazy_bind_off = mem.alignForwardGeneric(u64, bind_off + bind_size, @alignOf(u64));
|
||||
const lazy_bind_size = try bind.lazyBindInfoSize(lazy_bind_pointers.items);
|
||||
log.debug("writing lazy bind info from 0x{x} to 0x{x}", .{ lazy_bind_off, lazy_bind_off + lazy_bind_size });
|
||||
|
||||
const export_off = mem.alignForwardGeneric(u64, lazy_bind_off + lazy_bind_size, @alignOf(u64));
|
||||
const export_size = trie.size;
|
||||
log.debug("writing export trie from 0x{x} to 0x{x}", .{ export_off, export_off + export_size });
|
||||
|
||||
const needed_size = export_off + export_size - rebase_off;
|
||||
link_seg.filesize = needed_size;
|
||||
|
||||
var buffer = try gpa.alloc(u8, math.cast(usize, needed_size) orelse return error.Overflow);
|
||||
defer gpa.free(buffer);
|
||||
mem.set(u8, buffer, 0);
|
||||
|
||||
var stream = std.io.fixedBufferStream(buffer);
|
||||
const writer = stream.writer();
|
||||
|
||||
try bind.writeRebaseInfo(rebase_pointers.items, writer);
|
||||
try stream.seekTo(bind_off - rebase_off);
|
||||
|
||||
try bind.writeBindInfo(bind_pointers.items, writer);
|
||||
try stream.seekTo(lazy_bind_off - rebase_off);
|
||||
|
||||
try bind.writeLazyBindInfo(lazy_bind_pointers.items, writer);
|
||||
try stream.seekTo(export_off - rebase_off);
|
||||
|
||||
_ = try trie.write(writer);
|
||||
|
||||
log.debug("writing dyld info from 0x{x} to 0x{x}", .{
|
||||
rebase_off,
|
||||
rebase_off + needed_size,
|
||||
});
|
||||
|
||||
try macho_file.base.file.?.pwriteAll(buffer, rebase_off);
|
||||
const start = math.cast(usize, lazy_bind_off - rebase_off) orelse return error.Overflow;
|
||||
const end = start + (math.cast(usize, lazy_bind_size) orelse return error.Overflow);
|
||||
try populateLazyBindOffsetsInStubHelper(macho_file, buffer[start..end]);
|
||||
|
||||
try lc_writer.writeStruct(macho.dyld_info_command{
|
||||
.cmd = .DYLD_INFO_ONLY,
|
||||
.cmdsize = @sizeOf(macho.dyld_info_command),
|
||||
.rebase_off = @intCast(u32, rebase_off),
|
||||
.rebase_size = @intCast(u32, rebase_size),
|
||||
.bind_off = @intCast(u32, bind_off),
|
||||
.bind_size = @intCast(u32, bind_size),
|
||||
.weak_bind_off = 0,
|
||||
.weak_bind_size = 0,
|
||||
.lazy_bind_off = @intCast(u32, lazy_bind_off),
|
||||
.lazy_bind_size = @intCast(u32, lazy_bind_size),
|
||||
.export_off = @intCast(u32, export_off),
|
||||
.export_size = @intCast(u32, export_size),
|
||||
});
|
||||
ncmds.* += 1;
|
||||
}
|
||||
|
||||
fn populateLazyBindOffsetsInStubHelper(macho_file: *MachO, buffer: []const u8) !void {
|
||||
const gpa = macho_file.base.allocator;
|
||||
|
||||
const stub_helper_section_index = macho_file.stub_helper_section_index orelse return;
|
||||
if (macho_file.stub_helper_preamble_atom == null) return;
|
||||
|
||||
const section = macho_file.sections.get(stub_helper_section_index);
|
||||
const last_atom = section.last_atom orelse return;
|
||||
if (last_atom == macho_file.stub_helper_preamble_atom.?) return; // TODO is this a redundant check?
|
||||
|
||||
var table = std.AutoHashMap(i64, *Atom).init(gpa);
|
||||
defer table.deinit();
|
||||
|
||||
{
|
||||
var stub_atom = last_atom;
|
||||
var laptr_atom = macho_file.sections.items(.last_atom)[macho_file.la_symbol_ptr_section_index.?].?;
|
||||
const base_addr = blk: {
|
||||
const seg = macho_file.segments.items[macho_file.data_segment_cmd_index.?];
|
||||
break :blk seg.vmaddr;
|
||||
};
|
||||
|
||||
while (true) {
|
||||
const laptr_off = blk: {
|
||||
const sym = laptr_atom.getSymbol(macho_file);
|
||||
break :blk @intCast(i64, sym.n_value - base_addr);
|
||||
};
|
||||
try table.putNoClobber(laptr_off, stub_atom);
|
||||
if (laptr_atom.prev) |prev| {
|
||||
laptr_atom = prev;
|
||||
stub_atom = stub_atom.prev.?;
|
||||
} else break;
|
||||
}
|
||||
}
|
||||
|
||||
var stream = std.io.fixedBufferStream(buffer);
|
||||
var reader = stream.reader();
|
||||
var offsets = std.ArrayList(struct { sym_offset: i64, offset: u32 }).init(gpa);
|
||||
try offsets.append(.{ .sym_offset = undefined, .offset = 0 });
|
||||
defer offsets.deinit();
|
||||
var valid_block = false;
|
||||
|
||||
while (true) {
|
||||
const inst = reader.readByte() catch |err| switch (err) {
|
||||
error.EndOfStream => break,
|
||||
};
|
||||
const opcode: u8 = inst & macho.BIND_OPCODE_MASK;
|
||||
|
||||
switch (opcode) {
|
||||
macho.BIND_OPCODE_DO_BIND => {
|
||||
valid_block = true;
|
||||
},
|
||||
macho.BIND_OPCODE_DONE => {
|
||||
if (valid_block) {
|
||||
const offset = try stream.getPos();
|
||||
try offsets.append(.{ .sym_offset = undefined, .offset = @intCast(u32, offset) });
|
||||
}
|
||||
valid_block = false;
|
||||
},
|
||||
macho.BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM => {
|
||||
var next = try reader.readByte();
|
||||
while (next != @as(u8, 0)) {
|
||||
next = try reader.readByte();
|
||||
}
|
||||
},
|
||||
macho.BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB => {
|
||||
var inserted = offsets.pop();
|
||||
inserted.sym_offset = try std.leb.readILEB128(i64, reader);
|
||||
try offsets.append(inserted);
|
||||
},
|
||||
macho.BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB => {
|
||||
_ = try std.leb.readULEB128(u64, reader);
|
||||
},
|
||||
macho.BIND_OPCODE_SET_ADDEND_SLEB => {
|
||||
_ = try std.leb.readILEB128(i64, reader);
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
|
||||
const header = macho_file.sections.items(.header)[stub_helper_section_index];
|
||||
const stub_offset: u4 = switch (macho_file.base.options.target.cpu.arch) {
|
||||
.x86_64 => 1,
|
||||
.aarch64 => 2 * @sizeOf(u32),
|
||||
else => unreachable,
|
||||
};
|
||||
var buf: [@sizeOf(u32)]u8 = undefined;
|
||||
_ = offsets.pop();
|
||||
|
||||
while (offsets.popOrNull()) |bind_offset| {
|
||||
const atom = table.get(bind_offset.sym_offset).?;
|
||||
const sym = atom.getSymbol(macho_file);
|
||||
const file_offset = header.offset + sym.n_value - header.addr + stub_offset;
|
||||
mem.writeIntLittle(u32, &buf, bind_offset.offset);
|
||||
log.debug("writing lazy bind offset in stub helper of 0x{x} for symbol {s} at offset 0x{x}", .{
|
||||
bind_offset.offset,
|
||||
atom.getName(macho_file),
|
||||
file_offset,
|
||||
});
|
||||
try macho_file.base.file.?.pwriteAll(&buf, file_offset);
|
||||
}
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user