macho: simplify writing atoms for stage2

Also, fix premature exit in `link.File.makeWritable` in case we
are running M1 but executing binaries using Rosetta2.
This commit is contained in:
Jakub Konka 2022-03-13 14:15:23 +01:00
parent 633c4a2a60
commit 68c224d6ec
2 changed files with 16 additions and 54 deletions

View File

@ -373,17 +373,18 @@ pub const File = struct {
return;
}
if (comptime builtin.target.isDarwin() and builtin.target.cpu.arch == .aarch64) {
if (base.options.target.cpu.arch != .aarch64) return; // If we're not targeting aarch64, nothing to do.
// XNU starting with Big Sur running on arm64 is caching inodes of running binaries.
// Any change to the binary will effectively invalidate the kernel's cache
// resulting in a SIGKILL on each subsequent run. Since when doing incremental
// linking we're modifying a binary in-place, this will end up with the kernel
// killing it on every subsequent run. To circumvent it, we will copy the file
// into a new inode, remove the original file, and rename the copy to match
// the original file. This is super messy, but there doesn't seem any other
// way to please the XNU.
const emit = base.options.emit orelse return;
try emit.directory.handle.copyFile(emit.sub_path, emit.directory.handle, emit.sub_path, .{});
if (base.options.target.cpu.arch == .aarch64) {
// XNU starting with Big Sur running on arm64 is caching inodes of running binaries.
// Any change to the binary will effectively invalidate the kernel's cache
// resulting in a SIGKILL on each subsequent run. Since when doing incremental
// linking we're modifying a binary in-place, this will end up with the kernel
// killing it on every subsequent run. To circumvent it, we will copy the file
// into a new inode, remove the original file, and rename the copy to match
// the original file. This is super messy, but there doesn't seem any other
// way to please the XNU.
const emit = base.options.emit orelse return;
try emit.directory.handle.copyFile(emit.sub_path, emit.directory.handle, emit.sub_path, .{});
}
}
f.close();
base.file = null;

View File

@ -2189,10 +2189,6 @@ fn writePadding(self: *MachO, match: MatchingSection, size: usize, writer: anyty
}
fn writeAtoms(self: *MachO) !void {
var buffer = std.ArrayList(u8).init(self.base.allocator);
defer buffer.deinit();
var file_offset: ?u64 = null;
var it = self.atoms.iterator();
while (it.next()) |entry| {
const match = entry.key_ptr.*;
@ -2205,50 +2201,15 @@ fn writeAtoms(self: *MachO) !void {
log.debug("writing atoms in {s},{s}", .{ sect.segName(), sect.sectName() });
while (atom.prev) |prev| {
atom = prev;
}
while (true) {
if (atom.dirty or self.invalidate_relocs) {
const atom_sym = self.locals.items[atom.local_sym_index];
const padding_size: usize = if (atom.next) |next| blk: {
const next_sym = self.locals.items[next.local_sym_index];
const size = next_sym.n_value - (atom_sym.n_value + atom.size);
break :blk try math.cast(usize, size);
} else 0;
log.debug(" (adding atom {s} to buffer: {})", .{ self.getString(atom_sym.n_strx), atom_sym });
try atom.resolveRelocs(self);
try buffer.appendSlice(atom.code.items);
try buffer.ensureUnusedCapacity(padding_size);
try self.writePadding(match, padding_size, buffer.writer());
if (file_offset == null) {
file_offset = sect.offset + atom_sym.n_value - sect.addr;
}
try self.writeAtom(atom, match);
atom.dirty = false;
} else {
if (file_offset) |off| {
log.debug(" (writing at file offset 0x{x})", .{off});
try self.base.file.?.pwriteAll(buffer.items, off);
}
file_offset = null;
buffer.clearRetainingCapacity();
}
if (atom.next) |next| {
atom = next;
} else {
if (file_offset) |off| {
log.debug(" (writing at file offset 0x{x})", .{off});
try self.base.file.?.pwriteAll(buffer.items, off);
}
file_offset = null;
buffer.clearRetainingCapacity();
break;
}
if (atom.prev) |prev| {
atom = prev;
} else break;
}
}
}