Merge pull request #18793 from ziglang/macho-zig-object

macho: emit relocatable with self-hosted x86_64 backend
This commit is contained in:
Jakub Konka 2024-02-04 09:12:59 +01:00 committed by GitHub
commit 9bf97b8494
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
11 changed files with 468 additions and 185 deletions

View File

@ -285,8 +285,7 @@ pub fn createEmpty(
};
try self.d_sym.?.initMetadata(self);
} else {
try self.reportUnexpectedError("TODO: implement generating and emitting __DWARF in .o file", .{});
return error.Unexpected;
@panic("TODO: implement generating and emitting __DWARF in .o file");
},
.code_view => unreachable,
}
@ -597,7 +596,6 @@ pub fn flushModule(self: *MachO, arena: Allocator, prog_node: *std.Progress.Node
try self.allocateSections();
self.allocateSegments();
self.allocateAtoms();
self.allocateSyntheticSymbols();
try self.allocateLinkeditSegment();
@ -615,7 +613,8 @@ pub fn flushModule(self: *MachO, arena: Allocator, prog_node: *std.Progress.Node
if (!atom.flags.alive) continue;
const sect = &self.sections.items(.header)[atom.out_n_sect];
if (sect.isZerofill()) continue;
if (mem.indexOf(u8, sect.segName(), "ZIG") == null) continue; // Non-Zig sections are handled separately
if (!self.isZigSection(atom.out_n_sect)) continue; // Non-Zig sections are handled separately
if (atom.getRelocs(self).len == 0) continue;
// TODO: we will resolve and write ZigObject's TLS data twice:
// once here, and once in writeAtoms
const atom_size = math.cast(usize, atom.size) orelse return error.Overflow;
@ -636,7 +635,7 @@ pub fn flushModule(self: *MachO, arena: Allocator, prog_node: *std.Progress.Node
return error.FlushFailure;
},
};
const file_offset = sect.offset + atom.value - sect.addr;
const file_offset = sect.offset + atom.value;
atom.resolveRelocs(self, code) catch |err| switch (err) {
error.ResolveFailed => has_resolve_error = true,
else => |e| {
@ -2025,7 +2024,7 @@ pub fn sortSections(self: *MachO) !void {
for (zo.symtab.items(.nlist)) |*sym| {
if (sym.sect()) {
sym.n_sect = backlinks[sym.n_sect];
sym.n_sect = backlinks[sym.n_sect - 1] + 1;
}
}
@ -2232,11 +2231,11 @@ fn initSegments(self: *MachO) !void {
log.warn("requested __PAGEZERO size (0x{x}) is not page aligned", .{pagezero_size});
log.warn(" rounding down to 0x{x}", .{aligned_pagezero_size});
}
_ = try self.addSegment("__PAGEZERO", .{ .vmsize = aligned_pagezero_size });
self.pagezero_seg_index = try self.addSegment("__PAGEZERO", .{ .vmsize = aligned_pagezero_size });
}
// __TEXT segment is non-optional
_ = try self.addSegment("__TEXT", .{ .prot = getSegmentProt("__TEXT") });
self.text_seg_index = try self.addSegment("__TEXT", .{ .prot = getSegmentProt("__TEXT") });
// Next, create segments required by sections
for (slice.items(.header)) |header| {
@ -2248,15 +2247,57 @@ fn initSegments(self: *MachO) !void {
}
// Add __LINKEDIT
_ = try self.addSegment("__LINKEDIT", .{ .prot = getSegmentProt("__LINKEDIT") });
self.linkedit_seg_index = try self.addSegment("__LINKEDIT", .{ .prot = getSegmentProt("__LINKEDIT") });
// Sort segments
const sortFn = struct {
fn sortFn(ctx: void, lhs: macho.segment_command_64, rhs: macho.segment_command_64) bool {
return segmentLessThan(ctx, lhs.segName(), rhs.segName());
const Entry = struct {
index: u8,
pub fn lessThan(macho_file: *MachO, lhs: @This(), rhs: @This()) bool {
return segmentLessThan(
{},
macho_file.segments.items[lhs.index].segName(),
macho_file.segments.items[rhs.index].segName(),
);
}
}.sortFn;
mem.sort(macho.segment_command_64, self.segments.items, {}, sortFn);
};
var entries = try std.ArrayList(Entry).initCapacity(gpa, self.segments.items.len);
defer entries.deinit();
for (0..self.segments.items.len) |index| {
entries.appendAssumeCapacity(.{ .index = @intCast(index) });
}
mem.sort(Entry, entries.items, self, Entry.lessThan);
const backlinks = try gpa.alloc(u8, entries.items.len);
defer gpa.free(backlinks);
for (entries.items, 0..) |entry, i| {
backlinks[entry.index] = @intCast(i);
}
const segments = try self.segments.toOwnedSlice(gpa);
defer gpa.free(segments);
try self.segments.ensureTotalCapacityPrecise(gpa, segments.len);
for (entries.items) |sorted| {
self.segments.appendAssumeCapacity(segments[sorted.index]);
}
for (&[_]*?u8{
&self.pagezero_seg_index,
&self.text_seg_index,
&self.linkedit_seg_index,
&self.zig_text_seg_index,
&self.zig_got_seg_index,
&self.zig_const_seg_index,
&self.zig_data_seg_index,
&self.zig_bss_seg_index,
}) |maybe_index| {
if (maybe_index.*) |*index| {
index.* = backlinks[index.*];
}
}
// Attach sections to segments
for (slice.items(.header), slice.items(.segment_id)) |header, *seg_id| {
@ -2277,15 +2318,6 @@ fn initSegments(self: *MachO) !void {
segment.nsects += 1;
seg_id.* = segment_id;
}
self.pagezero_seg_index = self.getSegmentByName("__PAGEZERO");
self.text_seg_index = self.getSegmentByName("__TEXT").?;
self.linkedit_seg_index = self.getSegmentByName("__LINKEDIT").?;
self.zig_text_seg_index = self.getSegmentByName("__TEXT_ZIG");
self.zig_got_seg_index = self.getSegmentByName("__GOT_ZIG");
self.zig_const_seg_index = self.getSegmentByName("__CONST_ZIG");
self.zig_data_seg_index = self.getSegmentByName("__DATA_ZIG");
self.zig_bss_seg_index = self.getSegmentByName("__BSS_ZIG");
}
fn allocateSections(self: *MachO) !void {
@ -2300,8 +2332,8 @@ fn allocateSections(self: *MachO) !void {
const page_size = self.getPageSize();
const slice = self.sections.slice();
const last_index = for (slice.items(.header), 0..) |header, i| {
if (mem.indexOf(u8, header.segName(), "ZIG")) |_| break i;
const last_index = for (0..slice.items(.header).len) |i| {
if (self.isZigSection(@intCast(i))) break i;
} else slice.items(.header).len;
for (slice.items(.header)[0..last_index], slice.items(.segment_id)[0..last_index]) |*header, curr_seg_id| {
@ -2354,8 +2386,8 @@ fn allocateSections(self: *MachO) !void {
/// We allocate segments in a separate step to also consider segments that have no sections.
fn allocateSegments(self: *MachO) void {
const first_index = if (self.pagezero_seg_index) |index| index + 1 else 0;
const last_index = for (self.segments.items, 0..) |seg, i| {
if (mem.indexOf(u8, seg.segName(), "ZIG")) |_| break i;
const last_index = for (0..self.segments.items.len) |i| {
if (self.isZigSegment(@intCast(i))) break i;
} else self.segments.items.len;
var vmaddr: u64 = if (self.pagezero_seg_index) |index|
@ -2392,23 +2424,6 @@ fn allocateSegments(self: *MachO) void {
}
}
pub fn allocateAtoms(self: *MachO) void {
const slice = self.sections.slice();
for (slice.items(.header), slice.items(.atoms)) |header, atoms| {
if (atoms.items.len == 0) continue;
for (atoms.items) |atom_index| {
const atom = self.getAtom(atom_index).?;
assert(atom.flags.alive);
atom.value += header.addr;
}
}
for (self.thunks.items) |*thunk| {
const header = self.sections.items(.header)[thunk.out_n_sect];
thunk.value += header.addr;
}
}
fn allocateSyntheticSymbols(self: *MachO) void {
const text_seg = self.getTextSegment();
@ -2603,7 +2618,7 @@ fn writeAtoms(self: *MachO) !void {
for (atoms.items) |atom_index| {
const atom = self.getAtom(atom_index).?;
assert(atom.flags.alive);
const off = math.cast(usize, atom.value - header.addr) orelse return error.Overflow;
const off = math.cast(usize, atom.value) orelse return error.Overflow;
const atom_size = math.cast(usize, atom.size) orelse return error.Overflow;
try atom.getData(self, buffer[off..][0..atom_size]);
atom.resolveRelocs(self, buffer[off..][0..atom_size]) catch |err| switch (err) {
@ -2617,7 +2632,7 @@ fn writeAtoms(self: *MachO) !void {
for (self.thunks.items) |thunk| {
const header = slice.items(.header)[thunk.out_n_sect];
const offset = thunk.value - header.addr + header.offset;
const offset = thunk.value + header.offset;
const buffer = try gpa.alloc(u8, thunk.size());
defer gpa.free(buffer);
var stream = std.io.fixedBufferStream(buffer);
@ -2825,7 +2840,7 @@ pub fn writeDataInCode(self: *MachO, base_address: u64, off: u32) !u32 {
if (atom.flags.alive) for (in_dices[start_dice..next_dice]) |dice| {
dices.appendAssumeCapacity(.{
.offset = @intCast(atom.value + dice.offset - start_off - base_address),
.offset = @intCast(atom.getAddress(self) + dice.offset - start_off - base_address),
.length = dice.length,
.kind = dice.kind,
});
@ -3276,6 +3291,34 @@ fn detectAllocCollision(self: *MachO, start: u64, size: u64) ?u64 {
return null;
}
fn detectAllocCollisionVirtual(self: *MachO, start: u64, size: u64) ?u64 {
// Conservatively commit one page size as reserved space for the headers as we
// expect it to grow and everything else be moved in flush anyhow.
const header_size = self.getPageSize();
if (start < header_size)
return header_size;
const end = start + padToIdeal(size);
for (self.sections.items(.header)) |header| {
const increased_size = padToIdeal(header.size);
const test_end = header.addr + increased_size;
if (end > header.addr and start < test_end) {
return test_end;
}
}
for (self.segments.items) |seg| {
const increased_size = padToIdeal(seg.vmsize);
const test_end = seg.vmaddr +| increased_size;
if (end > seg.vmaddr and start < test_end) {
return test_end;
}
}
return null;
}
fn allocatedSize(self: *MachO, start: u64) u64 {
if (start == 0) return 0;
var min_pos: u64 = std.math.maxInt(u64);
@ -3290,7 +3333,7 @@ fn allocatedSize(self: *MachO, start: u64) u64 {
return min_pos - start;
}
fn allocatedVirtualSize(self: *MachO, start: u64) u64 {
fn allocatedSizeVirtual(self: *MachO, start: u64) u64 {
if (start == 0) return 0;
var min_pos: u64 = std.math.maxInt(u64);
for (self.segments.items) |seg| {
@ -3300,7 +3343,7 @@ fn allocatedVirtualSize(self: *MachO, start: u64) u64 {
return min_pos - start;
}
fn findFreeSpace(self: *MachO, object_size: u64, min_alignment: u32) u64 {
pub fn findFreeSpace(self: *MachO, object_size: u64, min_alignment: u32) u64 {
var start: u64 = 0;
while (self.detectAllocCollision(start, object_size)) |item_end| {
start = mem.alignForward(u64, item_end, min_alignment);
@ -3308,18 +3351,30 @@ fn findFreeSpace(self: *MachO, object_size: u64, min_alignment: u32) u64 {
return start;
}
pub fn findFreeSpaceVirtual(self: *MachO, object_size: u64, min_alignment: u32) u64 {
var start: u64 = 0;
while (self.detectAllocCollisionVirtual(start, object_size)) |item_end| {
start = mem.alignForward(u64, item_end, min_alignment);
}
return start;
}
pub fn copyRangeAll(self: *MachO, old_offset: u64, new_offset: u64, size: u64) !void {
const file = self.base.file.?;
const amt = try file.copyRangeAll(old_offset, file, new_offset, size);
if (amt != size) return error.InputOutput;
}
/// Like File.copyRangeAll but also ensures the source region is zeroed out after copy.
/// This is so that we guarantee zeroed out regions for mapping of zerofill sections by the loader.
fn copyRangeAllZeroOut(self: *MachO, old_offset: u64, new_offset: u64, size: u64) !void {
const gpa = self.base.comp.gpa;
const file = self.base.file.?;
const amt = try file.copyRangeAll(old_offset, file, new_offset, size);
if (amt != size) return error.InputOutput;
try self.copyRangeAll(old_offset, new_offset, size);
const size_u = math.cast(usize, size) orelse return error.Overflow;
const zeroes = try gpa.alloc(u8, size_u);
defer gpa.free(zeroes);
@memset(zeroes, 0);
try file.pwriteAll(zeroes, old_offset);
try self.base.file.?.pwriteAll(zeroes, old_offset);
}
const InitMetadataOptions = struct {
@ -3391,8 +3446,6 @@ fn initMetadata(self: *MachO, options: InitMetadataOptions) !void {
.prot = macho.PROT.READ | macho.PROT.WRITE,
});
}
} else {
@panic("TODO initMetadata when relocatable");
}
const appendSect = struct {
@ -3406,6 +3459,19 @@ fn initMetadata(self: *MachO, options: InitMetadataOptions) !void {
}
}.appendSect;
const allocSect = struct {
fn allocSect(macho_file: *MachO, sect_id: u8, size: u64) !void {
const sect = &macho_file.sections.items(.header)[sect_id];
const alignment = try math.powi(u32, 2, sect.@"align");
if (!sect.isZerofill()) {
sect.offset = math.cast(u32, macho_file.findFreeSpace(size, alignment)) orelse
return error.Overflow;
}
sect.addr = macho_file.findFreeSpaceVirtual(size, alignment);
sect.size = size;
}
}.allocSect;
{
self.zig_text_sect_index = try self.addSection("__TEXT_ZIG", "__text_zig", .{
.alignment = switch (self.getTarget().cpu.arch) {
@ -3415,7 +3481,11 @@ fn initMetadata(self: *MachO, options: InitMetadataOptions) !void {
},
.flags = macho.S_REGULAR | macho.S_ATTR_PURE_INSTRUCTIONS | macho.S_ATTR_SOME_INSTRUCTIONS,
});
appendSect(self, self.zig_text_sect_index.?, self.zig_text_seg_index.?);
if (self.base.isRelocatable()) {
try allocSect(self, self.zig_text_sect_index.?, options.program_code_size_hint);
} else {
appendSect(self, self.zig_text_sect_index.?, self.zig_text_seg_index.?);
}
}
if (!self.base.isRelocatable()) {
@ -3427,33 +3497,52 @@ fn initMetadata(self: *MachO, options: InitMetadataOptions) !void {
{
self.zig_const_sect_index = try self.addSection("__CONST_ZIG", "__const_zig", .{});
appendSect(self, self.zig_const_sect_index.?, self.zig_const_seg_index.?);
if (self.base.isRelocatable()) {
try allocSect(self, self.zig_const_sect_index.?, 1024);
} else {
appendSect(self, self.zig_const_sect_index.?, self.zig_const_seg_index.?);
}
}
{
self.zig_data_sect_index = try self.addSection("__DATA_ZIG", "__data_zig", .{});
appendSect(self, self.zig_data_sect_index.?, self.zig_data_seg_index.?);
if (self.base.isRelocatable()) {
try allocSect(self, self.zig_data_sect_index.?, 1024);
} else {
appendSect(self, self.zig_data_sect_index.?, self.zig_data_seg_index.?);
}
}
{
self.zig_bss_sect_index = try self.addSection("__BSS_ZIG", "__bss_zig", .{
.flags = macho.S_ZEROFILL,
});
appendSect(self, self.zig_bss_sect_index.?, self.zig_bss_seg_index.?);
if (self.base.isRelocatable()) {
try allocSect(self, self.zig_bss_sect_index.?, 1024);
} else {
appendSect(self, self.zig_bss_sect_index.?, self.zig_bss_seg_index.?);
}
}
}
pub fn growSection(self: *MachO, sect_index: u8, needed_size: u64) !void {
if (self.base.isRelocatable()) {
try self.growSectionRelocatable(sect_index, needed_size);
} else {
try self.growSectionNonRelocatable(sect_index, needed_size);
}
}
fn growSectionNonRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !void {
const sect = &self.sections.items(.header)[sect_index];
const seg_id = self.sections.items(.segment_id)[sect_index];
const seg = &self.segments.items[seg_id];
if (needed_size > self.allocatedSize(sect.offset) and !sect.isZerofill()) {
const existing_size = sect.size;
sect.size = 0;
// Must move the entire section.
const new_offset = self.findFreeSpace(needed_size, self.getPageSize());
const alignment = self.getPageSize();
const new_offset = self.findFreeSpace(needed_size, alignment);
log.debug("new '{s},{s}' file offset 0x{x} to 0x{x}", .{
sect.segName(),
@ -3465,15 +3554,19 @@ pub fn growSection(self: *MachO, sect_index: u8, needed_size: u64) !void {
try self.copyRangeAllZeroOut(sect.offset, new_offset, existing_size);
sect.offset = @intCast(new_offset);
seg.fileoff = new_offset;
}
sect.size = needed_size;
const seg_id = self.sections.items(.segment_id)[sect_index];
const seg = &self.segments.items[seg_id];
seg.fileoff = sect.offset;
if (!sect.isZerofill()) {
seg.filesize = needed_size;
}
const mem_capacity = self.allocatedVirtualSize(seg.vmaddr);
const mem_capacity = self.allocatedSizeVirtual(seg.vmaddr);
if (needed_size > mem_capacity) {
var err = try self.addErrorWithNotes(2);
try err.addMsg(self, "fatal linker error: cannot expand segment seg({d})({s}) in virtual memory", .{
@ -3487,6 +3580,36 @@ pub fn growSection(self: *MachO, sect_index: u8, needed_size: u64) !void {
seg.vmsize = needed_size;
}
fn growSectionRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !void {
const sect = &self.sections.items(.header)[sect_index];
if (needed_size > self.allocatedSize(sect.offset) and !sect.isZerofill()) {
const existing_size = sect.size;
sect.size = 0;
// Must move the entire section.
const alignment = try math.powi(u32, 2, sect.@"align");
const new_offset = self.findFreeSpace(needed_size, alignment);
const new_addr = self.findFreeSpaceVirtual(needed_size, alignment);
log.debug("new '{s},{s}' file offset 0x{x} to 0x{x} (0x{x} - 0x{x})", .{
sect.segName(),
sect.sectName(),
new_offset,
new_offset + existing_size,
new_addr,
new_addr + existing_size,
});
try self.copyRangeAll(sect.offset, new_offset, existing_size);
sect.offset = @intCast(new_offset);
sect.addr = new_addr;
}
sect.size = needed_size;
}
pub fn getTarget(self: MachO) std.Target {
return self.base.comp.root_mod.resolved_target.result;
}
@ -3532,6 +3655,36 @@ inline fn requiresThunks(self: MachO) bool {
return self.getTarget().cpu.arch == .aarch64;
}
pub fn isZigSegment(self: MachO, seg_id: u8) bool {
inline for (&[_]?u8{
self.zig_text_seg_index,
self.zig_got_seg_index,
self.zig_const_seg_index,
self.zig_data_seg_index,
self.zig_bss_seg_index,
}) |maybe_index| {
if (maybe_index) |index| {
if (index == seg_id) return true;
}
}
return false;
}
pub fn isZigSection(self: MachO, sect_id: u8) bool {
inline for (&[_]?u8{
self.zig_text_sect_index,
self.zig_got_sect_index,
self.zig_const_sect_index,
self.zig_data_sect_index,
self.zig_bss_sect_index,
}) |maybe_index| {
if (maybe_index) |index| {
if (index == sect_id) return true;
}
}
return false;
}
pub fn addSegment(self: *MachO, name: []const u8, opts: struct {
vmaddr: u64 = 0,
vmsize: u64 = 0,
@ -4033,10 +4186,13 @@ fn formatSections(
_ = unused_fmt_string;
const slice = self.sections.slice();
for (slice.items(.header), slice.items(.segment_id), 0..) |header, seg_id, i| {
try writer.print("sect({d}) : seg({d}) : {s},{s} : @{x} ({x}) : align({x}) : size({x})\n", .{
i, seg_id, header.segName(), header.sectName(), header.offset, header.addr,
header.@"align", header.size,
});
try writer.print(
"sect({d}) : seg({d}) : {s},{s} : @{x} ({x}) : align({x}) : size({x}) : relocs({x};{d})\n",
.{
i, seg_id, header.segName(), header.sectName(), header.addr, header.offset,
header.@"align", header.size, header.reloff, header.nreloc,
},
);
}
}

View File

@ -1,4 +1,4 @@
/// Address allocated for this Atom.
/// Address offset allocated for this Atom wrt to its section start address.
value: u64 = 0,
/// Name of this Atom.
@ -84,6 +84,11 @@ pub fn getInputAddress(self: Atom, macho_file: *MachO) u64 {
return self.getInputSection(macho_file).addr + self.off;
}
pub fn getAddress(self: Atom, macho_file: *MachO) u64 {
const header = macho_file.sections.items(.header)[self.out_n_sect];
return header.addr + self.value;
}
pub fn getPriority(self: Atom, macho_file: *MachO) u64 {
const file = self.getFile(macho_file);
return (@as(u64, @intCast(file.getIndex())) << 32) | @as(u64, @intCast(self.n_sect));
@ -114,9 +119,12 @@ pub fn getThunk(self: Atom, macho_file: *MachO) *Thunk {
pub fn initOutputSection(sect: macho.section_64, macho_file: *MachO) !u8 {
const segname, const sectname, const flags = blk: {
const segname = sect.segName();
const sectname = sect.sectName();
if (sect.isCode()) break :blk .{
"__TEXT",
sect.sectName(),
sectname,
macho.S_REGULAR | macho.S_ATTR_PURE_INSTRUCTIONS | macho.S_ATTR_SOME_INSTRUCTIONS,
};
@ -127,34 +135,29 @@ pub fn initOutputSection(sect: macho.section_64, macho_file: *MachO) !u8 {
=> break :blk .{ "__TEXT", "__const", macho.S_REGULAR },
macho.S_CSTRING_LITERALS => {
if (mem.startsWith(u8, sect.sectName(), "__objc")) break :blk .{
sect.segName(), sect.sectName(), macho.S_REGULAR,
if (mem.startsWith(u8, sectname, "__objc")) break :blk .{
segname, sectname, macho.S_REGULAR,
};
break :blk .{ "__TEXT", "__cstring", macho.S_CSTRING_LITERALS };
},
macho.S_MOD_INIT_FUNC_POINTERS,
macho.S_MOD_TERM_FUNC_POINTERS,
=> break :blk .{ "__DATA_CONST", sect.sectName(), sect.flags },
macho.S_LITERAL_POINTERS,
=> break :blk .{ "__DATA_CONST", sectname, sect.flags },
macho.S_ZEROFILL,
macho.S_GB_ZEROFILL,
macho.S_THREAD_LOCAL_VARIABLES,
macho.S_THREAD_LOCAL_VARIABLE_POINTERS,
macho.S_THREAD_LOCAL_REGULAR,
macho.S_THREAD_LOCAL_ZEROFILL,
=> break :blk .{ sect.segName(), sect.sectName(), sect.flags },
=> break :blk .{ "__DATA", sectname, sect.flags },
macho.S_COALESCED => break :blk .{
sect.segName(),
sect.sectName(),
macho.S_REGULAR,
},
// TODO: do we need this check here?
macho.S_COALESCED => break :blk .{ segname, sectname, macho.S_REGULAR },
macho.S_REGULAR => {
const segname = sect.segName();
const sectname = sect.sectName();
if (mem.eql(u8, segname, "__DATA")) {
if (mem.eql(u8, sectname, "__const") or
mem.eql(u8, sectname, "__cfstring") or
@ -168,7 +171,7 @@ pub fn initOutputSection(sect: macho.section_64, macho_file: *MachO) !u8 {
break :blk .{ segname, sectname, sect.flags };
},
else => break :blk .{ sect.segName(), sect.sectName(), sect.flags },
else => break :blk .{ segname, sectname, sect.flags },
}
};
const osec = macho_file.getSectionByName(segname, sectname) orelse try macho_file.addSection(
@ -189,14 +192,17 @@ pub fn initOutputSection(sect: macho.section_64, macho_file: *MachO) !u8 {
/// File offset relocation happens transparently, so it is not included in
/// this calculation.
pub fn capacity(self: Atom, macho_file: *MachO) u64 {
const next_value = if (macho_file.getAtom(self.next_index)) |next| next.value else std.math.maxInt(u32);
return next_value - self.value;
const next_addr = if (macho_file.getAtom(self.next_index)) |next|
next.getAddress(macho_file)
else
std.math.maxInt(u32);
return next_addr - self.getAddress(macho_file);
}
pub fn freeListEligible(self: Atom, macho_file: *MachO) bool {
// No need to keep a free list node for the last block.
const next = macho_file.getAtom(self.next_index) orelse return false;
const cap = next.value - self.value;
const cap = next.getAddress(macho_file) - self.getAddress(macho_file);
const ideal_cap = MachO.padToIdeal(self.size);
if (cap <= ideal_cap) return false;
const surplus = cap - ideal_cap;
@ -263,15 +269,15 @@ pub fn allocate(self: *Atom, macho_file: *MachO) !void {
atom_placement = last.atom_index;
break :blk new_start_vaddr;
} else {
break :blk sect.addr;
break :blk 0;
}
};
log.debug("allocated atom({d}) : '{s}' at 0x{x} to 0x{x}", .{
self.atom_index,
self.getName(macho_file),
self.value,
self.value + self.size,
self.getAddress(macho_file),
self.getAddress(macho_file) + self.size,
});
const expand_section = if (atom_placement) |placement_index|
@ -279,7 +285,7 @@ pub fn allocate(self: *Atom, macho_file: *MachO) !void {
else
true;
if (expand_section) {
const needed_size = (self.value + self.size) - sect.addr;
const needed_size = self.value + self.size;
try macho_file.growSection(self.out_n_sect, needed_size);
last_atom_index.* = self.atom_index;
@ -544,7 +550,7 @@ pub fn resolveRelocs(self: Atom, macho_file: *MachO, buffer: []u8) !void {
const name = self.getName(macho_file);
const relocs = self.getRelocs(macho_file);
relocs_log.debug("{x}: {s}", .{ self.value, name });
relocs_log.debug("{x}: {s}", .{ self.getAddress(macho_file), name });
var has_error = false;
var stream = std.io.fixedBufferStream(buffer);
@ -569,7 +575,7 @@ pub fn resolveRelocs(self: Atom, macho_file: *MachO, buffer: []u8) !void {
try macho_file.reportParseError2(
file.getIndex(),
"{s}: 0x{x}: 0x{x}: failed to relax relocation: type {s}, target {s}",
.{ name, self.value, rel.offset, @tagName(rel.type), target },
.{ name, self.getAddress(macho_file), rel.offset, @tagName(rel.type), target },
);
has_error = true;
},
@ -604,7 +610,7 @@ fn resolveRelocInner(
const rel_offset = math.cast(usize, rel.offset - self.off) orelse return error.Overflow;
const seg_id = macho_file.sections.items(.segment_id)[self.out_n_sect];
const seg = macho_file.segments.items[seg_id];
const P = @as(i64, @intCast(self.value)) + @as(i64, @intCast(rel_offset));
const P = @as(i64, @intCast(self.getAddress(macho_file))) + @as(i64, @intCast(rel_offset));
const A = rel.addend + rel.getRelocAddend(cpu_arch);
const S: i64 = @intCast(rel.getTargetAddress(macho_file));
const G: i64 = @intCast(rel.getGotTargetAddress(macho_file));
@ -690,7 +696,7 @@ fn resolveRelocInner(
.aarch64 => {
const disp: i28 = math.cast(i28, S + A - P) orelse blk: {
const thunk = self.getThunk(macho_file);
const S_: i64 = @intCast(thunk.getAddress(rel.target));
const S_: i64 = @intCast(thunk.getTargetAddress(rel.target, macho_file));
break :blk math.cast(i28, S_ + A - P) orelse return error.Overflow;
};
var inst = aarch64.Instruction{
@ -919,7 +925,7 @@ const x86_64 = struct {
var err = try macho_file.addErrorWithNotes(2);
try err.addMsg(macho_file, "{s}: 0x{x}: 0x{x}: failed to relax relocation of type {s}", .{
self.getName(macho_file),
self.value,
self.getAddress(macho_file),
rel.offset,
@tagName(rel.type),
});
@ -990,12 +996,11 @@ pub fn writeRelocs(self: Atom, macho_file: *MachO, code: []u8, buffer: *std.Arra
const cpu_arch = macho_file.getTarget().cpu.arch;
const relocs = self.getRelocs(macho_file);
const sect = macho_file.sections.items(.header)[self.out_n_sect];
var stream = std.io.fixedBufferStream(code);
for (relocs) |rel| {
const rel_offset = rel.offset - self.off;
const r_address: i32 = math.cast(i32, self.value + rel_offset - sect.addr) orelse return error.Overflow;
const r_address: i32 = math.cast(i32, self.value + rel_offset) orelse return error.Overflow;
const r_symbolnum = r_symbolnum: {
const r_symbolnum: u32 = switch (rel.tag) {
.local => rel.getTargetAtom(macho_file).out_n_sect + 1,
@ -1062,7 +1067,7 @@ pub fn writeRelocs(self: Atom, macho_file: *MachO, code: []u8, buffer: *std.Arra
.x86_64 => {
if (rel.meta.pcrel) {
if (rel.tag == .local) {
addend -= @as(i64, @intCast(self.value + rel_offset));
addend -= @as(i64, @intCast(self.getAddress(macho_file) + rel_offset));
} else {
addend += 4;
}
@ -1143,10 +1148,10 @@ fn format2(
_ = unused_fmt_string;
const atom = ctx.atom;
const macho_file = ctx.macho_file;
try writer.print("atom({d}) : {s} : @{x} : sect({d}) : align({x}) : size({x}) : thunk({d})", .{
atom.atom_index, atom.getName(macho_file), atom.value,
atom.out_n_sect, atom.alignment, atom.size,
atom.thunk_index,
try writer.print("atom({d}) : {s} : @{x} : sect({d}) : align({x}) : size({x}) : nreloc({d}) : thunk({d})", .{
atom.atom_index, atom.getName(macho_file), atom.getAddress(macho_file),
atom.out_n_sect, atom.alignment, atom.size,
atom.getRelocs(macho_file).len, atom.thunk_index,
});
if (!atom.flags.alive) try writer.writeAll(" : [*]");
if (atom.unwind_records.len > 0) {

View File

@ -22,7 +22,7 @@ pub fn getTargetAtom(rel: Relocation, macho_file: *MachO) *Atom {
pub fn getTargetAddress(rel: Relocation, macho_file: *MachO) u64 {
return switch (rel.tag) {
.local => rel.getTargetAtom(macho_file).value,
.local => rel.getTargetAtom(macho_file).getAddress(macho_file),
.@"extern" => rel.getTargetSymbol(macho_file).getAddress(.{}, macho_file),
};
}

View File

@ -118,7 +118,7 @@ pub fn getAddress(symbol: Symbol, opts: struct {
return symbol.getObjcStubsAddress(macho_file);
}
}
if (symbol.getAtom(macho_file)) |atom| return atom.value + symbol.value;
if (symbol.getAtom(macho_file)) |atom| return atom.getAddress(macho_file) + symbol.value;
return symbol.value;
}
@ -145,7 +145,7 @@ pub fn getObjcSelrefsAddress(symbol: Symbol, macho_file: *MachO) u64 {
const extra = symbol.getExtra(macho_file).?;
const atom = macho_file.getAtom(extra.objc_selrefs).?;
assert(atom.flags.alive);
return atom.value;
return atom.getAddress(macho_file);
}
pub fn getTlvPtrAddress(symbol: Symbol, macho_file: *MachO) u64 {

View File

@ -490,12 +490,12 @@ pub const Record = struct {
pub fn getAtomAddress(rec: Record, macho_file: *MachO) u64 {
const atom = rec.getAtom(macho_file);
return atom.value + rec.atom_offset;
return atom.getAddress(macho_file) + rec.atom_offset;
}
pub fn getLsdaAddress(rec: Record, macho_file: *MachO) u64 {
const lsda = rec.getLsdaAtom(macho_file) orelse return 0;
return lsda.value + rec.lsda_offset;
return lsda.getAddress(macho_file) + rec.lsda_offset;
}
pub fn format(

View File

@ -154,7 +154,7 @@ pub fn getAtomData(self: ZigObject, macho_file: *MachO, atom: Atom, buffer: []u8
@memset(buffer, 0);
},
else => {
const file_offset = sect.offset + atom.value - sect.addr;
const file_offset = sect.offset + atom.value;
const amt = try macho_file.base.file.?.preadAll(buffer, file_offset);
if (amt != buffer.len) return error.InputOutput;
},
@ -196,8 +196,10 @@ pub fn resolveSymbols(self: *ZigObject, macho_file: *MachO) void {
const atom = macho_file.getAtom(atom_index).?;
break :blk nlist.n_value - atom.getInputAddress(macho_file);
} else nlist.n_value;
const out_n_sect = if (nlist.sect()) macho_file.getAtom(atom_index).?.out_n_sect else 0;
symbol.value = value;
symbol.atom = atom_index;
symbol.out_n_sect = out_n_sect;
symbol.nlist_idx = nlist_idx;
symbol.file = self.index;
symbol.flags.weak = nlist.weakDef();
@ -715,7 +717,7 @@ fn updateDeclCode(
} else if (code.len < old_size) {
atom.shrink(macho_file);
} else if (macho_file.getAtom(atom.next_index) == null) {
const needed_size = atom.value + code.len - sect.addr;
const needed_size = atom.value + code.len;
sect.size = needed_size;
}
} else {
@ -733,7 +735,7 @@ fn updateDeclCode(
}
if (!sect.isZerofill()) {
const file_offset = sect.offset + atom.value - sect.addr;
const file_offset = sect.offset + atom.value;
try macho_file.base.file.?.pwriteAll(code, file_offset);
}
}
@ -1036,7 +1038,7 @@ fn lowerConst(
nlist.n_value = 0;
const sect = macho_file.sections.items(.header)[output_section_index];
const file_offset = sect.offset + atom.value - sect.addr;
const file_offset = sect.offset + atom.value;
try macho_file.base.file.?.pwriteAll(code, file_offset);
return .{ .ok = sym_index };
@ -1213,7 +1215,7 @@ fn updateLazySymbol(
}
const sect = macho_file.sections.items(.header)[output_section_index];
const file_offset = sect.offset + atom.value - sect.addr;
const file_offset = sect.offset + atom.value;
try macho_file.base.file.?.pwriteAll(code, file_offset);
}

View File

@ -416,7 +416,7 @@ pub fn write(macho_file: *MachO, buffer: []u8) void {
{
const offset = fde.out_offset + 8;
const saddr = sect.addr + offset;
const taddr = fde.getAtom(macho_file).value;
const taddr = fde.getAtom(macho_file).getAddress(macho_file);
std.mem.writeInt(
i64,
buffer[offset..][0..8],
@ -428,7 +428,7 @@ pub fn write(macho_file: *MachO, buffer: []u8) void {
if (fde.getLsdaAtom(macho_file)) |atom| {
const offset = fde.out_offset + fde.lsda_ptr_offset;
const saddr = sect.addr + offset;
const taddr = atom.value + fde.lsda_offset;
const taddr = atom.getAddress(macho_file) + fde.lsda_offset;
switch (fde.getCie(macho_file).lsda_size.?) {
.p32 => std.mem.writeInt(
i32,
@ -501,7 +501,7 @@ pub fn writeRelocs(macho_file: *MachO, code: []u8, relocs: *std.ArrayList(macho.
{
const offset = fde.out_offset + 8;
const saddr = sect.addr + offset;
const taddr = fde.getAtom(macho_file).value;
const taddr = fde.getAtom(macho_file).getAddress(macho_file);
std.mem.writeInt(
i64,
code[offset..][0..8],
@ -513,7 +513,7 @@ pub fn writeRelocs(macho_file: *MachO, code: []u8, relocs: *std.ArrayList(macho.
if (fde.getLsdaAtom(macho_file)) |atom| {
const offset = fde.out_offset + fde.lsda_ptr_offset;
const saddr = sect.addr + offset;
const taddr = atom.value + fde.lsda_offset;
const taddr = atom.getAddress(macho_file) + fde.lsda_offset;
switch (fde.getCie(macho_file).lsda_size.?) {
.p32 => std.mem.writeInt(
i32,

View File

@ -12,7 +12,7 @@ pub fn flush(macho_file: *MachO, comp: *Compilation, module_obj_path: ?[]const u
if (module_obj_path) |path| try positionals.append(.{ .path = path });
if (positionals.items.len == 1) {
if (macho_file.getZigObject() == null and positionals.items.len == 1) {
// Instead of invoking a full-blown `-r` mode on the input which sadly will strip all
// debug info segments/sections (this is apparently by design by Apple), we copy
// the *only* input file over.
@ -46,50 +46,23 @@ pub fn flush(macho_file: *MachO, comp: *Compilation, module_obj_path: ?[]const u
try macho_file.addUndefinedGlobals();
try macho_file.resolveSymbols();
markExports(macho_file);
claimUnresolved(macho_file);
try markExports(macho_file);
try claimUnresolved(macho_file);
try initOutputSections(macho_file);
try macho_file.sortSections();
try macho_file.addAtomsToSections();
try calcSectionSizes(macho_file);
{
// For relocatable, we only ever need a single segment so create it now.
const prot: macho.vm_prot_t = macho.PROT.READ | macho.PROT.WRITE | macho.PROT.EXEC;
try macho_file.segments.append(gpa, .{
.cmdsize = @sizeOf(macho.segment_command_64),
.segname = MachO.makeStaticString(""),
.maxprot = prot,
.initprot = prot,
});
const seg = &macho_file.segments.items[0];
seg.nsects = @intCast(macho_file.sections.items(.header).len);
seg.cmdsize += seg.nsects * @sizeOf(macho.section_64);
}
try createSegment(macho_file);
try allocateSections(macho_file);
allocateSegment(macho_file);
var off = try allocateSections(macho_file);
{
// Allocate the single segment.
assert(macho_file.segments.items.len == 1);
const seg = &macho_file.segments.items[0];
var vmaddr: u64 = 0;
var fileoff: u64 = load_commands.calcLoadCommandsSizeObject(macho_file) + @sizeOf(macho.mach_header_64);
seg.vmaddr = vmaddr;
seg.fileoff = fileoff;
for (macho_file.sections.items(.header)) |header| {
vmaddr = header.addr + header.size;
if (!header.isZerofill()) {
fileoff = header.offset + header.size;
}
}
seg.vmsize = vmaddr - seg.vmaddr;
seg.filesize = fileoff - seg.fileoff;
}
macho_file.allocateAtoms();
var off = off: {
const seg = macho_file.segments.items[0];
const off = math.cast(u32, seg.fileoff + seg.filesize) orelse return error.Overflow;
break :off mem.alignForward(u32, off, @alignOf(macho.relocation_info));
};
off = allocateSectionsRelocs(macho_file, off);
state_log.debug("{}", .{macho_file.dumpState()});
@ -109,8 +82,13 @@ pub fn flush(macho_file: *MachO, comp: *Compilation, module_obj_path: ?[]const u
try writeHeader(macho_file, ncmds, sizeofcmds);
}
fn markExports(macho_file: *MachO) void {
for (macho_file.objects.items) |index| {
fn markExports(macho_file: *MachO) error{OutOfMemory}!void {
var objects = try std.ArrayList(File.Index).initCapacity(macho_file.base.comp.gpa, macho_file.objects.items.len + 1);
defer objects.deinit();
if (macho_file.getZigObject()) |zo| objects.appendAssumeCapacity(zo.index);
objects.appendSliceAssumeCapacity(macho_file.objects.items);
for (objects.items) |index| {
for (macho_file.getFile(index).?.getSymbols()) |sym_index| {
const sym = macho_file.getSymbol(sym_index);
const file = sym.getFile(macho_file) orelse continue;
@ -122,13 +100,22 @@ fn markExports(macho_file: *MachO) void {
}
}
fn claimUnresolved(macho_file: *MachO) void {
for (macho_file.objects.items) |index| {
const object = macho_file.getFile(index).?.object;
fn claimUnresolved(macho_file: *MachO) error{OutOfMemory}!void {
var objects = try std.ArrayList(File.Index).initCapacity(macho_file.base.comp.gpa, macho_file.objects.items.len + 1);
defer objects.deinit();
if (macho_file.getZigObject()) |zo| objects.appendAssumeCapacity(zo.index);
objects.appendSliceAssumeCapacity(macho_file.objects.items);
for (object.symbols.items, 0..) |sym_index, i| {
for (objects.items) |index| {
const file = macho_file.getFile(index).?;
for (file.getSymbols(), 0..) |sym_index, i| {
const nlist_idx = @as(Symbol.Index, @intCast(i));
const nlist = object.symtab.items(.nlist)[nlist_idx];
const nlist = switch (file) {
.object => |x| x.symtab.items(.nlist)[nlist_idx],
.zig_object => |x| x.symtab.items(.nlist)[nlist_idx],
else => unreachable,
};
if (!nlist.ext()) continue;
if (!nlist.undf()) continue;
@ -203,6 +190,16 @@ fn calcSectionSizes(macho_file: *MachO) !void {
sect.@"align" = 3;
sect.nreloc = eh_frame.calcNumRelocs(macho_file);
}
if (macho_file.getZigObject()) |zo| {
for (zo.atoms.items) |atom_index| {
const atom = macho_file.getAtom(atom_index) orelse continue;
if (!atom.flags.alive) continue;
const header = &macho_file.sections.items(.header)[atom.out_n_sect];
if (!macho_file.isZigSection(atom.out_n_sect)) continue;
header.nreloc += atom.calcNumRelocs(macho_file);
}
}
}
fn calcCompactUnwindSize(macho_file: *MachO, sect_index: u8) void {
@ -231,30 +228,66 @@ fn calcCompactUnwindSize(macho_file: *MachO, sect_index: u8) void {
sect.@"align" = 3;
}
fn allocateSections(macho_file: *MachO) !u32 {
var fileoff = load_commands.calcLoadCommandsSizeObject(macho_file) + @sizeOf(macho.mach_header_64);
var vmaddr: u64 = 0;
fn allocateSections(macho_file: *MachO) !void {
const slice = macho_file.sections.slice();
for (slice.items(.header)) |*header| {
const alignment = try math.powi(u32, 2, header.@"align");
vmaddr = mem.alignForward(u64, vmaddr, alignment);
header.addr = vmaddr;
vmaddr += header.size;
const last_index = for (0..slice.items(.header).len) |i| {
if (macho_file.isZigSection(@intCast(i))) break i;
} else slice.items(.header).len;
for (slice.items(.header)[0..last_index]) |*header| {
const alignment = try math.powi(u32, 2, header.@"align");
if (!header.isZerofill()) {
fileoff = mem.alignForward(u32, fileoff, alignment);
header.offset = fileoff;
fileoff += @intCast(header.size);
header.offset = math.cast(u32, macho_file.findFreeSpace(header.size, alignment)) orelse
return error.Overflow;
}
header.addr = macho_file.findFreeSpaceVirtual(header.size, alignment);
}
}
fn createSegment(macho_file: *MachO) !void {
const gpa = macho_file.base.comp.gpa;
// For relocatable, we only ever need a single segment so create it now.
const prot: macho.vm_prot_t = macho.PROT.READ | macho.PROT.WRITE | macho.PROT.EXEC;
try macho_file.segments.append(gpa, .{
.cmdsize = @sizeOf(macho.segment_command_64),
.segname = MachO.makeStaticString(""),
.maxprot = prot,
.initprot = prot,
});
const seg = &macho_file.segments.items[0];
seg.nsects = @intCast(macho_file.sections.items(.header).len);
seg.cmdsize += seg.nsects * @sizeOf(macho.section_64);
}
fn allocateSegment(macho_file: *MachO) void {
// Allocate the single segment.
const seg = &macho_file.segments.items[0];
var vmaddr: u64 = 0;
var fileoff: u64 = load_commands.calcLoadCommandsSizeObject(macho_file) + @sizeOf(macho.mach_header_64);
seg.vmaddr = vmaddr;
seg.fileoff = fileoff;
for (macho_file.sections.items(.header)) |header| {
vmaddr = @max(vmaddr, header.addr + header.size);
if (!header.isZerofill()) {
fileoff = @max(fileoff, header.offset + header.size);
}
}
seg.vmsize = vmaddr - seg.vmaddr;
seg.filesize = fileoff - seg.fileoff;
}
fn allocateSectionsRelocs(macho_file: *MachO, off: u32) u32 {
var fileoff = off;
const slice = macho_file.sections.slice();
for (slice.items(.header)) |*header| {
if (header.nreloc == 0) continue;
header.reloff = mem.alignForward(u32, fileoff, @alignOf(macho.relocation_info));
fileoff = header.reloff + header.nreloc * @sizeOf(macho.relocation_info);
}
return fileoff;
}
@ -272,9 +305,10 @@ fn writeAtoms(macho_file: *MachO) !void {
const cpu_arch = macho_file.getTarget().cpu.arch;
const slice = macho_file.sections.slice();
for (slice.items(.header), slice.items(.atoms)) |header, atoms| {
for (slice.items(.header), slice.items(.atoms), 0..) |header, atoms, i| {
if (atoms.items.len == 0) continue;
if (header.isZerofill()) continue;
if (macho_file.isZigSection(@intCast(i))) continue;
const size = math.cast(usize, header.size) orelse return error.Overflow;
const code = try gpa.alloc(u8, size);
@ -288,9 +322,9 @@ fn writeAtoms(macho_file: *MachO) !void {
for (atoms.items) |atom_index| {
const atom = macho_file.getAtom(atom_index).?;
assert(atom.flags.alive);
const off = math.cast(usize, atom.value - header.addr) orelse return error.Overflow;
const off = math.cast(usize, atom.value) orelse return error.Overflow;
const atom_size = math.cast(usize, atom.size) orelse return error.Overflow;
try atom.getFile(macho_file).object.getAtomData(atom.*, code[off..][0..atom_size]);
try atom.getData(macho_file, code[off..][0..atom_size]);
try atom.writeRelocs(macho_file, code[off..][0..atom_size], &relocs);
}
@ -302,6 +336,63 @@ fn writeAtoms(macho_file: *MachO) !void {
try macho_file.base.file.?.pwriteAll(code, header.offset);
try macho_file.base.file.?.pwriteAll(mem.sliceAsBytes(relocs.items), header.reloff);
}
if (macho_file.getZigObject()) |zo| {
// TODO: this is ugly; perhaps we should aggregrate before?
var relocs = std.AutoArrayHashMap(u8, std.ArrayList(macho.relocation_info)).init(gpa);
defer {
for (relocs.values()) |*list| {
list.deinit();
}
relocs.deinit();
}
for (macho_file.sections.items(.header), 0..) |header, n_sect| {
if (header.isZerofill()) continue;
if (!macho_file.isZigSection(@intCast(n_sect))) continue;
const gop = try relocs.getOrPut(@intCast(n_sect));
if (gop.found_existing) continue;
gop.value_ptr.* = try std.ArrayList(macho.relocation_info).initCapacity(gpa, header.nreloc);
}
for (zo.atoms.items) |atom_index| {
const atom = macho_file.getAtom(atom_index) orelse continue;
if (!atom.flags.alive) continue;
const header = macho_file.sections.items(.header)[atom.out_n_sect];
if (header.isZerofill()) continue;
if (!macho_file.isZigSection(atom.out_n_sect)) continue;
if (atom.getRelocs(macho_file).len == 0) continue;
const atom_size = math.cast(usize, atom.size) orelse return error.Overflow;
const code = try gpa.alloc(u8, atom_size);
defer gpa.free(code);
atom.getData(macho_file, code) catch |err| switch (err) {
error.InputOutput => {
try macho_file.reportUnexpectedError("fetching code for '{s}' failed", .{
atom.getName(macho_file),
});
return error.FlushFailure;
},
else => |e| {
try macho_file.reportUnexpectedError("unexpected error while fetching code for '{s}': {s}", .{
atom.getName(macho_file),
@errorName(e),
});
return error.FlushFailure;
},
};
const file_offset = header.offset + atom.value;
const rels = relocs.getPtr(atom.out_n_sect).?;
try atom.writeRelocs(macho_file, code, rels);
try macho_file.base.file.?.pwriteAll(code, file_offset);
}
for (relocs.keys(), relocs.values()) |sect_id, rels| {
const header = macho_file.sections.items(.header)[sect_id];
assert(rels.items.len == header.nreloc);
mem.sort(macho.relocation_info, rels.items, {}, sortReloc);
try macho_file.base.file.?.pwriteAll(mem.sliceAsBytes(rels.items), header.reloff);
}
}
}
fn writeCompactUnwind(macho_file: *MachO) !void {
@ -492,6 +583,7 @@ const assert = std.debug.assert;
const eh_frame = @import("eh_frame.zig");
const link = @import("../../link.zig");
const load_commands = @import("load_commands.zig");
const log = std.log.scoped(.link);
const macho = std.macho;
const math = std.math;
const mem = std.mem;
@ -501,5 +593,6 @@ const trace = @import("../../tracy.zig").trace;
const Atom = @import("Atom.zig");
const Compilation = @import("../../Compilation.zig");
const File = @import("file.zig").File;
const MachO = @import("../MachO.zig");
const Symbol = @import("Symbol.zig");

View File

@ -66,7 +66,7 @@ fn isReachable(atom: *const Atom, rel: Relocation, macho_file: *MachO) bool {
if (atom.out_n_sect != target.out_n_sect) return false;
const target_atom = target.getAtom(macho_file).?;
if (target_atom.value == @as(u64, @bitCast(@as(i64, -1)))) return false;
const saddr = @as(i64, @intCast(atom.value)) + @as(i64, @intCast(rel.offset - atom.off));
const saddr = @as(i64, @intCast(atom.getAddress(macho_file))) + @as(i64, @intCast(rel.offset - atom.off));
const taddr: i64 = @intCast(rel.getTargetAddress(macho_file));
_ = math.cast(i28, taddr + rel.addend - saddr) orelse return false;
return true;
@ -85,14 +85,19 @@ pub const Thunk = struct {
return thunk.symbols.keys().len * trampoline_size;
}
pub fn getAddress(thunk: Thunk, sym_index: Symbol.Index) u64 {
return thunk.value + thunk.symbols.getIndex(sym_index).? * trampoline_size;
pub fn getAddress(thunk: Thunk, macho_file: *MachO) u64 {
const header = macho_file.sections.items(.header)[thunk.out_n_sect];
return header.addr + thunk.value;
}
pub fn getTargetAddress(thunk: Thunk, sym_index: Symbol.Index, macho_file: *MachO) u64 {
return thunk.getAddress(macho_file) + thunk.symbols.getIndex(sym_index).? * trampoline_size;
}
pub fn write(thunk: Thunk, macho_file: *MachO, writer: anytype) !void {
for (thunk.symbols.keys(), 0..) |sym_index, i| {
const sym = macho_file.getSymbol(sym_index);
const saddr = thunk.value + i * trampoline_size;
const saddr = thunk.getAddress(macho_file) + i * trampoline_size;
const taddr = sym.getAddress(.{}, macho_file);
const pages = try Relocation.calcNumberOfPages(saddr, taddr);
try writer.writeInt(u32, aarch64.Instruction.adrp(.x16, pages).toU32(), .little);

View File

@ -27,14 +27,23 @@ pub const Options = struct {
optimize: std.builtin.OptimizeMode = .Debug,
use_llvm: bool = true,
use_lld: bool = false,
strip: ?bool = null,
};
pub fn addTestStep(b: *Build, prefix: []const u8, opts: Options) *Step {
const target = opts.target.result.zigTriple(b.allocator) catch @panic("OOM");
const optimize = @tagName(opts.optimize);
const use_llvm = if (opts.use_llvm) "llvm" else "no-llvm";
const name = std.fmt.allocPrint(b.allocator, "test-{s}-{s}-{s}-{s}", .{
prefix, target, optimize, use_llvm,
const use_lld = if (opts.use_lld) "lld" else "no-lld";
if (opts.strip) |strip| {
const s = if (strip) "strip" else "no-strip";
const name = std.fmt.allocPrint(b.allocator, "test-{s}-{s}-{s}-{s}-{s}-{s}", .{
prefix, target, optimize, use_llvm, use_lld, s,
}) catch @panic("OOM");
return b.step(name, "");
}
const name = std.fmt.allocPrint(b.allocator, "test-{s}-{s}-{s}-{s}-{s}", .{
prefix, target, optimize, use_llvm, use_lld,
}) catch @panic("OOM");
return b.step(name, "");
}
@ -87,7 +96,7 @@ fn addCompileStep(
break :rsf b.addWriteFiles().add("a.zig", bytes);
},
.pic = overlay.pic,
.strip = overlay.strip,
.strip = if (base.strip) |s| s else overlay.strip,
},
.use_llvm = base.use_llvm,
.use_lld = base.use_lld,

View File

@ -15,6 +15,11 @@ pub fn testAll(b: *Build, build_opts: BuildOptions) *Step {
.os_tag = .macos,
});
// Exercise linker with self-hosted backend (no LLVM)
macho_step.dependOn(testHelloZig(b, .{ .use_llvm = false, .target = x86_64_target }));
macho_step.dependOn(testRelocatableZig(b, .{ .use_llvm = false, .strip = true, .target = x86_64_target }));
// Exercise linker with LLVM backend
macho_step.dependOn(testDeadStrip(b, .{ .target = default_target }));
macho_step.dependOn(testEmptyObject(b, .{ .target = default_target }));
macho_step.dependOn(testEmptyZig(b, .{ .target = default_target }));
@ -1234,7 +1239,14 @@ fn testRelocatableZig(b: *Build, opts: Options) *Step {
const run = addRunArtifact(exe);
run.addCheck(.{ .expect_stderr_match = b.dupe("incrFoo=1") });
run.addCheck(.{ .expect_stderr_match = b.dupe("decrFoo=0") });
run.addCheck(.{ .expect_stderr_match = b.dupe("panic: Oh no!") });
if (opts.use_llvm) {
// TODO: enable this once self-hosted can print panics and stack traces
run.addCheck(.{ .expect_stderr_match = b.dupe("panic: Oh no!") });
}
if (builtin.os.tag == .macos) {
const signal: u32 = if (opts.use_llvm) std.os.darwin.SIG.ABRT else std.os.darwin.SIG.TRAP;
run.addCheck(.{ .expect_term = .{ .Signal = signal } });
}
test_step.dependOn(&run.step);
return test_step;
@ -2307,6 +2319,7 @@ fn addTestStep(b: *Build, comptime prefix: []const u8, opts: Options) *Step {
return link.addTestStep(b, "macho-" ++ prefix, opts);
}
const builtin = @import("builtin");
const addAsmSourceBytes = link.addAsmSourceBytes;
const addCSourceBytes = link.addCSourceBytes;
const addRunArtifact = link.addRunArtifact;