zld: parse relocs per generated TextBlock

This commit is contained in:
Jakub Konka 2021-07-06 16:31:47 +02:00
parent 54888c6f46
commit 15b85df3dd
5 changed files with 662 additions and 622 deletions

View File

@ -53,6 +53,7 @@ initializers: std.ArrayListUnmanaged(u32) = .{},
data_in_code_entries: std.ArrayListUnmanaged(macho.data_in_code_entry) = .{},
symbols: std.ArrayListUnmanaged(*Symbol) = .{},
sections_as_symbols: std.AutoHashMapUnmanaged(u8, *Symbol) = .{},
const DebugInfo = struct {
inner: dwarf.DwarfInfo,
@ -160,6 +161,7 @@ pub fn deinit(self: *Object) void {
self.symtab.deinit(self.allocator);
self.strtab.deinit(self.allocator);
self.symbols.deinit(self.allocator);
self.sections_as_symbols.deinit(self.allocator);
if (self.name) |n| {
self.allocator.free(n);
@ -312,7 +314,7 @@ fn filterRelocs(relocs: []macho.relocation_info, start: u64, end: u64) []macho.r
while (true) {
var change = false;
if (relocs[start_id].r_address > end) {
if (relocs[start_id].r_address >= end) {
start_id += 1;
change = true;
}
@ -332,6 +334,7 @@ const TextBlockParser = struct {
allocator: *Allocator,
section: macho.section_64,
code: []u8,
relocs: []macho.relocation_info,
object: *Object,
zld: *Zld,
nlists: []NlistWithIndex,
@ -405,6 +408,7 @@ const TextBlockParser = struct {
const start_addr = senior_nlist.nlist.n_value - self.section.addr;
const end_addr = if (next_nlist) |n| n.nlist.n_value - self.section.addr else self.section.size;
log.warn("{} - {}", .{ start_addr, end_addr });
const code = self.code[start_addr..end_addr];
const size = code.len;
@ -424,11 +428,18 @@ const TextBlockParser = struct {
block.* = .{
.local_sym_index = senior_nlist.index,
.aliases = alias_only_indices,
.code = code,
.references = std.AutoArrayHashMap(u32, void).init(self.allocator),
.code = try self.allocator.dupe(u8, code),
.relocs = std.ArrayList(*Relocation).init(self.allocator),
.size = size,
.alignment = self.section.@"align",
};
const relocs = filterRelocs(self.relocs, start_addr, end_addr);
if (relocs.len > 0) {
try self.object.parseRelocs(self.zld, relocs, block, start_addr);
}
self.index += 1;
return block;
@ -457,7 +468,8 @@ pub fn parseTextBlocks(self: *Object, zld: *Zld) !void {
sort.sort(NlistWithIndex, sorted_nlists.items, {}, NlistWithIndex.lessThan);
for (seg.sections.items) |sect, sect_id| {
for (seg.sections.items) |sect, id| {
const sect_id = @intCast(u8, id);
log.warn("putting section '{s},{s}' as a TextBlock", .{
segmentName(sect),
sectionName(sect),
@ -474,6 +486,12 @@ pub fn parseTextBlocks(self: *Object, zld: *Zld) !void {
defer self.allocator.free(code);
_ = try self.file.?.preadAll(code, sect.offset);
// Read section's list of relocations
var raw_relocs = try self.allocator.alloc(u8, sect.nreloc * @sizeOf(macho.relocation_info));
defer self.allocator.free(raw_relocs);
_ = try self.file.?.preadAll(raw_relocs, sect.reloff);
const relocs = mem.bytesAsSlice(macho.relocation_info, raw_relocs);
// Is there any padding between symbols within the section?
const is_padded = self.header.?.flags & macho.MH_SUBSECTIONS_VIA_SYMBOLS != 0;
@ -481,7 +499,7 @@ pub fn parseTextBlocks(self: *Object, zld: *Zld) !void {
if (is_padded) blocks: {
const filtered_nlists = NlistWithIndex.filterInSection(
sorted_nlists.items,
@intCast(u8, sect_id + 1),
sect_id + 1,
);
if (filtered_nlists.len == 0) break :blocks;
@ -490,6 +508,7 @@ pub fn parseTextBlocks(self: *Object, zld: *Zld) !void {
.allocator = self.allocator,
.section = sect,
.code = code,
.relocs = relocs,
.object = self,
.zld = zld,
.nlists = filtered_nlists,
@ -518,8 +537,6 @@ pub fn parseTextBlocks(self: *Object, zld: *Zld) !void {
}
}
// TODO parse relocs
if (zld.last_text_block) |last| {
last.next = block;
block.prev = last;
@ -531,14 +548,19 @@ pub fn parseTextBlocks(self: *Object, zld: *Zld) !void {
}
// Since there is no symbol to refer to this block, we create
// a temp one.
const name = try std.fmt.allocPrint(self.allocator, "l_{s}_{s}_{s}", .{
self.name.?,
segmentName(sect),
sectionName(sect),
});
defer self.allocator.free(name);
const symbol = try Symbol.new(self.allocator, name);
// a temp one, unless we already did that when working out the relocations
// of other text blocks.
const symbol = self.sections_as_symbols.get(sect_id) orelse symbol: {
const name = try std.fmt.allocPrint(self.allocator, "l_{s}_{s}_{s}", .{
self.name.?,
segmentName(sect),
sectionName(sect),
});
defer self.allocator.free(name);
const symbol = try Symbol.new(self.allocator, name);
try self.sections_as_symbols.putNoClobber(self.allocator, sect_id, symbol);
break :symbol symbol;
};
symbol.payload = .{
.regular = .{
.linkage = .translation_unit,
@ -555,12 +577,16 @@ pub fn parseTextBlocks(self: *Object, zld: *Zld) !void {
block.* = .{
.local_sym_index = local_sym_index,
.code = code,
.references = std.AutoArrayHashMap(u32, void).init(self.allocator),
.code = try self.allocator.dupe(u8, code),
.relocs = std.ArrayList(*Relocation).init(self.allocator),
.size = sect.size,
.alignment = sect.@"align",
};
// TODO parse relocs
if (relocs.len > 0) {
try self.parseRelocs(zld, relocs, block, 0);
}
if (zld.last_text_block) |last| {
last.next = block;
@ -571,6 +597,70 @@ pub fn parseTextBlocks(self: *Object, zld: *Zld) !void {
}
}
fn parseRelocs(
self: *Object,
zld: *Zld,
relocs: []const macho.relocation_info,
block: *TextBlock,
base_addr: u64,
) !void {
var it = reloc.RelocIterator{
.buffer = relocs,
};
switch (self.arch.?) {
.aarch64 => {
var parser = reloc.aarch64.Parser{
.object = self,
.zld = zld,
.it = &it,
.block = block,
.base_addr = base_addr,
};
try parser.parse();
},
.x86_64 => {
var parser = reloc.x86_64.Parser{
.object = self,
.zld = zld,
.it = &it,
.block = block,
.base_addr = base_addr,
};
try parser.parse();
},
else => unreachable,
}
}
pub fn symbolFromReloc(self: *Object, rel: macho.relocation_info) !*Symbol {
const symbol = blk: {
if (rel.r_extern == 1) {
break :blk self.symbols.items[rel.r_symbolnum];
} else {
const sect_id = @intCast(u8, rel.r_symbolnum - 1);
const symbol = self.sections_as_symbols.get(sect_id) orelse symbol: {
// We need a valid pointer to Symbol even if there is no symbol, so we create a
// dummy symbol upfront which will later be populated when created a TextBlock from
// the target section here.
const seg = self.load_commands.items[self.segment_cmd_index.?].Segment;
const sect = seg.sections.items[sect_id];
const name = try std.fmt.allocPrint(self.allocator, "l_{s}_{s}_{s}", .{
self.name.?,
segmentName(sect),
sectionName(sect),
});
defer self.allocator.free(name);
const symbol = try Symbol.new(self.allocator, name);
try self.sections_as_symbols.putNoClobber(self.allocator, sect_id, symbol);
break :symbol symbol;
};
break :blk symbol;
}
};
return symbol;
}
pub fn parseInitializers(self: *Object) !void {
const index = self.mod_init_func_section_index orelse return;
const section = self.sections.items[index];

View File

@ -135,9 +135,9 @@ const TlvOffset = struct {
pub const TextBlock = struct {
local_sym_index: u32,
aliases: ?[]u32 = null,
references: ?[]u32 = null,
references: std.AutoArrayHashMap(u32, void),
code: []u8,
relocs: ?[]*Relocation = null,
relocs: std.ArrayList(*Relocation),
size: u64,
alignment: u32,
next: ?*TextBlock = null,
@ -147,15 +147,8 @@ pub const TextBlock = struct {
if (block.aliases) |aliases| {
allocator.free(aliases);
}
if (block.references) |references| {
allocator.free(references);
}
for (block.relocs.items) |reloc| {
allocator.destroy(reloc);
}
if (block.relocs) |relocs| {
allocator.free(relocs);
}
block.relocs.deinit();
block.references.deinit();
allocator.free(code);
}
@ -168,12 +161,19 @@ pub const TextBlock = struct {
log.warn(" | {}: {}", .{ index, zld.locals.items[index] });
}
}
if (self.references) |references| {
if (self.references.count() > 0) {
log.warn(" | References:", .{});
for (references) |index| {
for (self.references.keys()) |index| {
log.warn(" | {}: {}", .{ index, zld.locals.items[index] });
}
}
log.warn(" | code.len = {}", .{self.code.len});
if (self.relocs.items.len > 0) {
log.warn("Relocations:", .{});
for (self.relocs.items) |rel| {
log.warn(" | {}", .{rel});
}
}
log.warn(" | size = {}", .{self.size});
log.warn(" | align = {}", .{self.alignment});
}
@ -280,7 +280,6 @@ pub fn link(self: *Zld, files: []const []const u8, output: Output, args: LinkArg
try self.resolveSymbols();
try self.parseTextBlocks();
return error.TODO;
// try self.resolveStubsAndGotEntries();
// try self.updateMetadata();
// try self.sortSections();
// try self.addRpaths(args.rpaths);
@ -1603,7 +1602,9 @@ fn resolveSymbols(self: *Zld) !void {
block.* = .{
.local_sym_index = local_sym_index,
.references = std.AutoArrayHashMap(u32, void).init(self.allocator),
.code = code,
.relocs = std.ArrayList(*Relocation).init(self.allocator),
.size = size,
.alignment = alignment,
};
@ -1624,6 +1625,9 @@ fn resolveSymbols(self: *Zld) !void {
{
// Put dyld_stub_binder as an undefined special symbol.
const symbol = try Symbol.new(self.allocator, "dyld_stub_binder");
const index = @intCast(u32, self.got_entries.items.len);
symbol.got_index = index;
try self.got_entries.append(self.allocator, symbol);
try self.globals.putNoClobber(self.allocator, symbol.name, symbol);
}
@ -1699,54 +1703,6 @@ fn parseTextBlocks(self: *Zld) !void {
}
}
fn resolveStubsAndGotEntries(self: *Zld) !void {
for (self.objects.items) |object| {
log.debug("resolving stubs and got entries from {s}", .{object.name});
for (object.sections.items) |sect| {
const relocs = sect.relocs orelse continue;
for (relocs) |rel| {
switch (rel.@"type") {
.unsigned => continue,
.got_page, .got_page_off, .got_load, .got, .pointer_to_got => {
const sym = object.symbols.items[rel.target.symbol];
if (sym.got_index != null) continue;
const index = @intCast(u32, self.got_entries.items.len);
sym.got_index = index;
try self.got_entries.append(self.allocator, sym);
log.debug(" | found GOT entry {s}: {*}", .{ sym.name, sym });
},
else => {
if (rel.target != .symbol) continue;
const sym = object.symbols.items[rel.target.symbol];
assert(sym.payload != .undef);
if (sym.stubs_index != null) continue;
if (sym.payload != .proxy) continue;
const index = @intCast(u32, self.stubs.items.len);
sym.stubs_index = index;
try self.stubs.append(self.allocator, sym);
log.debug(" | found stub {s}: {*}", .{ sym.name, sym });
},
}
}
}
}
// Finally, put dyld_stub_binder as the final GOT entry
const sym = self.globals.get("dyld_stub_binder") orelse unreachable;
const index = @intCast(u32, self.got_entries.items.len);
sym.got_index = index;
try self.got_entries.append(self.allocator, sym);
log.debug(" | found GOT entry {s}: {*}", .{ sym.name, sym });
}
fn resolveRelocsAndWriteSections(self: *Zld) !void {
for (self.objects.items) |object| {
log.debug("relocating object {s}", .{object.name});

View File

@ -6,16 +6,18 @@ const math = std.math;
const mem = std.mem;
const meta = std.meta;
const aarch64 = @import("reloc/aarch64.zig");
const x86_64 = @import("reloc/x86_64.zig");
pub const aarch64 = @import("reloc/aarch64.zig");
pub const x86_64 = @import("reloc/x86_64.zig");
const Allocator = mem.Allocator;
const Symbol = @import("Symbol.zig");
const TextBlock = @import("Zld.zig").TextBlock;
pub const Relocation = struct {
@"type": Type,
code: []u8,
offset: u32,
target: Target,
block: *TextBlock,
target: *Symbol,
pub fn cast(base: *Relocation, comptime T: type) ?*T {
if (base.@"type" != T.base_type)
@ -24,43 +26,24 @@ pub const Relocation = struct {
return @fieldParentPtr(T, "base", base);
}
pub const ResolveArgs = struct {
source_addr: u64,
target_addr: u64,
subtractor: ?u64 = null,
source_source_sect_addr: ?u64 = null,
source_target_sect_addr: ?u64 = null,
};
pub fn resolve(base: *Relocation, args: ResolveArgs) !void {
log.debug("{s}", .{base.@"type"});
log.debug(" | offset 0x{x}", .{base.offset});
log.debug(" | source address 0x{x}", .{args.source_addr});
log.debug(" | target address 0x{x}", .{args.target_addr});
if (args.subtractor) |sub|
log.debug(" | subtractor address 0x{x}", .{sub});
if (args.source_source_sect_addr) |addr|
log.debug(" | source source section address 0x{x}", .{addr});
if (args.source_target_sect_addr) |addr|
log.debug(" | source target section address 0x{x}", .{addr});
return switch (base.@"type") {
.unsigned => @fieldParentPtr(Unsigned, "base", base).resolve(args),
.branch_aarch64 => @fieldParentPtr(aarch64.Branch, "base", base).resolve(args),
.page => @fieldParentPtr(aarch64.Page, "base", base).resolve(args),
.page_off => @fieldParentPtr(aarch64.PageOff, "base", base).resolve(args),
.got_page => @fieldParentPtr(aarch64.GotPage, "base", base).resolve(args),
.got_page_off => @fieldParentPtr(aarch64.GotPageOff, "base", base).resolve(args),
.pointer_to_got => @fieldParentPtr(aarch64.PointerToGot, "base", base).resolve(args),
.tlvp_page => @fieldParentPtr(aarch64.TlvpPage, "base", base).resolve(args),
.tlvp_page_off => @fieldParentPtr(aarch64.TlvpPageOff, "base", base).resolve(args),
.branch_x86_64 => @fieldParentPtr(x86_64.Branch, "base", base).resolve(args),
.signed => @fieldParentPtr(x86_64.Signed, "base", base).resolve(args),
.got_load => @fieldParentPtr(x86_64.GotLoad, "base", base).resolve(args),
.got => @fieldParentPtr(x86_64.Got, "base", base).resolve(args),
.tlv => @fieldParentPtr(x86_64.Tlv, "base", base).resolve(args),
};
}
// pub fn resolve(base: *Relocation) !void {
// return switch (base.@"type") {
// .unsigned => @fieldParentPtr(Unsigned, "base", base).resolve(),
// .branch_aarch64 => @fieldParentPtr(aarch64.Branch, "base", base).resolve(),
// .page => @fieldParentPtr(aarch64.Page, "base", base).resolve(),
// .page_off => @fieldParentPtr(aarch64.PageOff, "base", base).resolve(),
// .got_page => @fieldParentPtr(aarch64.GotPage, "base", base).resolve(),
// .got_page_off => @fieldParentPtr(aarch64.GotPageOff, "base", base).resolve(),
// .pointer_to_got => @fieldParentPtr(aarch64.PointerToGot, "base", base).resolve(),
// .tlvp_page => @fieldParentPtr(aarch64.TlvpPage, "base", base).resolve(),
// .tlvp_page_off => @fieldParentPtr(aarch64.TlvpPageOff, "base", base).resolve(),
// .branch_x86_64 => @fieldParentPtr(x86_64.Branch, "base", base).resolve(),
// .signed => @fieldParentPtr(x86_64.Signed, "base", base).resolve(),
// .got_load => @fieldParentPtr(x86_64.GotLoad, "base", base).resolve(),
// .got => @fieldParentPtr(x86_64.Got, "base", base).resolve(),
// .tlv => @fieldParentPtr(x86_64.Tlv, "base", base).resolve(),
// };
// }
pub const Type = enum {
branch_aarch64,
@ -79,23 +62,37 @@ pub const Relocation = struct {
tlv,
};
pub const Target = union(enum) {
symbol: u32,
section: u16,
pub fn format(base: *const Relocation, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
try std.fmt.format(writer, "Relocation {{ ", .{});
try std.fmt.format(writer, ".type = {s}, ", .{base.@"type"});
try std.fmt.format(writer, ".offset = {}, ", .{base.offset});
try std.fmt.format(writer, ".block = {}", .{base.block.local_sym_index});
try std.fmt.format(writer, ".target = {}, ", .{base.target});
pub fn fromReloc(reloc: macho.relocation_info) Target {
return if (reloc.r_extern == 1) .{
.symbol = reloc.r_symbolnum,
} else .{
.section = @intCast(u16, reloc.r_symbolnum - 1),
};
}
};
try switch (base.@"type") {
.unsigned => @fieldParentPtr(Unsigned, "base", base).format(fmt, options, writer),
.branch_aarch64 => @fieldParentPtr(aarch64.Branch, "base", base).format(fmt, options, writer),
.page => @fieldParentPtr(aarch64.Page, "base", base).format(fmt, options, writer),
.page_off => @fieldParentPtr(aarch64.PageOff, "base", base).format(fmt, options, writer),
.got_page => @fieldParentPtr(aarch64.GotPage, "base", base).format(fmt, options, writer),
.got_page_off => @fieldParentPtr(aarch64.GotPageOff, "base", base).format(fmt, options, writer),
.pointer_to_got => @fieldParentPtr(aarch64.PointerToGot, "base", base).format(fmt, options, writer),
.tlvp_page => @fieldParentPtr(aarch64.TlvpPage, "base", base).format(fmt, options, writer),
.tlvp_page_off => @fieldParentPtr(aarch64.TlvpPageOff, "base", base).format(fmt, options, writer),
.branch_x86_64 => @fieldParentPtr(x86_64.Branch, "base", base).format(fmt, options, writer),
.signed => @fieldParentPtr(x86_64.Signed, "base", base).format(fmt, options, writer),
.got_load => @fieldParentPtr(x86_64.GotLoad, "base", base).format(fmt, options, writer),
.got => @fieldParentPtr(x86_64.Got, "base", base).format(fmt, options, writer),
.tlv => @fieldParentPtr(x86_64.Tlv, "base", base).format(fmt, options, writer),
};
try std.fmt.format(writer, "}}", .{});
}
};
pub const Unsigned = struct {
base: Relocation,
subtractor: ?Relocation.Target = null,
subtractor: ?*Symbol = null,
/// Addend embedded directly in the relocation slot
addend: i64,
/// Extracted from r_length:
@ -106,75 +103,47 @@ pub const Unsigned = struct {
pub const base_type: Relocation.Type = .unsigned;
pub fn resolve(unsigned: Unsigned, args: Relocation.ResolveArgs) !void {
const addend = if (unsigned.base.target == .section)
unsigned.addend - @intCast(i64, args.source_target_sect_addr.?)
else
unsigned.addend;
// pub fn resolve(unsigned: Unsigned) !void {
// const addend = if (unsigned.base.target == .section)
// unsigned.addend - @intCast(i64, args.source_target_sect_addr.?)
// else
// unsigned.addend;
const result = if (args.subtractor) |subtractor|
@intCast(i64, args.target_addr) - @intCast(i64, subtractor) + addend
else
@intCast(i64, args.target_addr) + addend;
// const result = if (args.subtractor) |subtractor|
// @intCast(i64, args.target_addr) - @intCast(i64, subtractor) + addend
// else
// @intCast(i64, args.target_addr) + addend;
log.debug(" | calculated addend 0x{x}", .{addend});
log.debug(" | calculated unsigned value 0x{x}", .{result});
// log.debug(" | calculated addend 0x{x}", .{addend});
// log.debug(" | calculated unsigned value 0x{x}", .{result});
if (unsigned.is_64bit) {
mem.writeIntLittle(
u64,
unsigned.base.code[0..8],
@bitCast(u64, result),
);
} else {
mem.writeIntLittle(
u32,
unsigned.base.code[0..4],
@truncate(u32, @bitCast(u64, result)),
);
// if (unsigned.is_64bit) {
// mem.writeIntLittle(
// u64,
// unsigned.base.code[0..8],
// @bitCast(u64, result),
// );
// } else {
// mem.writeIntLittle(
// u32,
// unsigned.base.code[0..4],
// @truncate(u32, @bitCast(u64, result)),
// );
// }
// }
pub fn format(self: Unsigned, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
_ = fmt;
_ = options;
if (self.subtractor) |sub| {
try std.fmt.format(writer, ".subtractor = {}, ", .{sub});
}
try std.fmt.format(writer, ".addend = {}, ", .{self.addend});
const length: usize = if (self.is_64bit) 8 else 4;
try std.fmt.format(writer, ".length = {}, ", .{length});
}
};
pub fn parse(
allocator: *Allocator,
arch: std.Target.Cpu.Arch,
code: []u8,
relocs: []const macho.relocation_info,
) ![]*Relocation {
var it = RelocIterator{
.buffer = relocs,
};
switch (arch) {
.aarch64 => {
var parser = aarch64.Parser{
.allocator = allocator,
.it = &it,
.code = code,
.parsed = std.ArrayList(*Relocation).init(allocator),
};
defer parser.deinit();
try parser.parse();
return parser.parsed.toOwnedSlice();
},
.x86_64 => {
var parser = x86_64.Parser{
.allocator = allocator,
.it = &it,
.code = code,
.parsed = std.ArrayList(*Relocation).init(allocator),
};
defer parser.deinit();
try parser.parse();
return parser.parsed.toOwnedSlice();
},
else => unreachable,
}
}
pub const RelocIterator = struct {
buffer: []const macho.relocation_info,
index: i32 = -1,
@ -182,15 +151,7 @@ pub const RelocIterator = struct {
pub fn next(self: *RelocIterator) ?macho.relocation_info {
self.index += 1;
if (self.index < self.buffer.len) {
const reloc = self.buffer[@intCast(u32, self.index)];
log.debug("relocation", .{});
log.debug(" | type = {}", .{reloc.r_type});
log.debug(" | offset = {}", .{reloc.r_address});
log.debug(" | PC = {}", .{reloc.r_pcrel == 1});
log.debug(" | length = {}", .{reloc.r_length});
log.debug(" | symbolnum = {}", .{reloc.r_symbolnum});
log.debug(" | extern = {}", .{reloc.r_extern == 1});
return reloc;
return self.buffer[@intCast(u32, self.index)];
}
return null;
}

View File

@ -9,24 +9,34 @@ const meta = std.meta;
const reloc = @import("../reloc.zig");
const Allocator = mem.Allocator;
const Object = @import("../Object.zig");
const Relocation = reloc.Relocation;
const Symbol = @import("../Symbol.zig");
const TextBlock = Zld.TextBlock;
const Zld = @import("../Zld.zig");
pub const Branch = struct {
base: Relocation,
/// Always .UnconditionalBranchImmediate
inst: aarch64.Instruction,
// inst: aarch64.Instruction,
pub const base_type: Relocation.Type = .branch_aarch64;
pub fn resolve(branch: Branch, args: Relocation.ResolveArgs) !void {
const displacement = try math.cast(i28, @intCast(i64, args.target_addr) - @intCast(i64, args.source_addr));
// pub fn resolve(branch: Branch, args: Relocation.ResolveArgs) !void {
// const displacement = try math.cast(i28, @intCast(i64, args.target_addr) - @intCast(i64, args.source_addr));
log.debug(" | displacement 0x{x}", .{displacement});
// log.debug(" | displacement 0x{x}", .{displacement});
var inst = branch.inst;
inst.unconditional_branch_immediate.imm26 = @truncate(u26, @bitCast(u28, displacement >> 2));
mem.writeIntLittle(u32, branch.base.code[0..4], inst.toU32());
// var inst = branch.inst;
// inst.unconditional_branch_immediate.imm26 = @truncate(u26, @bitCast(u28, displacement >> 2));
// mem.writeIntLittle(u32, branch.base.code[0..4], inst.toU32());
// }
pub fn format(self: Branch, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
_ = self;
_ = fmt;
_ = options;
_ = writer;
}
};
@ -34,24 +44,32 @@ pub const Page = struct {
base: Relocation,
addend: ?u32 = null,
/// Always .PCRelativeAddress
inst: aarch64.Instruction,
// inst: aarch64.Instruction,
pub const base_type: Relocation.Type = .page;
pub fn resolve(page: Page, args: Relocation.ResolveArgs) !void {
const target_addr = if (page.addend) |addend| args.target_addr + addend else args.target_addr;
const source_page = @intCast(i32, args.source_addr >> 12);
const target_page = @intCast(i32, target_addr >> 12);
const pages = @bitCast(u21, @intCast(i21, target_page - source_page));
// pub fn resolve(page: Page, args: Relocation.ResolveArgs) !void {
// const target_addr = if (page.addend) |addend| args.target_addr + addend else args.target_addr;
// const source_page = @intCast(i32, args.source_addr >> 12);
// const target_page = @intCast(i32, target_addr >> 12);
// const pages = @bitCast(u21, @intCast(i21, target_page - source_page));
log.debug(" | calculated addend 0x{x}", .{page.addend});
log.debug(" | moving by {} pages", .{pages});
// log.debug(" | calculated addend 0x{x}", .{page.addend});
// log.debug(" | moving by {} pages", .{pages});
var inst = page.inst;
inst.pc_relative_address.immhi = @truncate(u19, pages >> 2);
inst.pc_relative_address.immlo = @truncate(u2, pages);
// var inst = page.inst;
// inst.pc_relative_address.immhi = @truncate(u19, pages >> 2);
// inst.pc_relative_address.immlo = @truncate(u2, pages);
mem.writeIntLittle(u32, page.base.code[0..4], inst.toU32());
// mem.writeIntLittle(u32, page.base.code[0..4], inst.toU32());
// }
pub fn format(self: Page, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
_ = fmt;
_ = options;
if (self.addend) |addend| {
try std.fmt.format(writer, ".addend = {}, ", .{addend});
}
}
};
@ -59,7 +77,7 @@ pub const PageOff = struct {
base: Relocation,
addend: ?u32 = null,
op_kind: OpKind,
inst: aarch64.Instruction,
// inst: aarch64.Instruction,
pub const base_type: Relocation.Type = .page_off;
@ -68,76 +86,99 @@ pub const PageOff = struct {
load_store,
};
pub fn resolve(page_off: PageOff, args: Relocation.ResolveArgs) !void {
const target_addr = if (page_off.addend) |addend| args.target_addr + addend else args.target_addr;
const narrowed = @truncate(u12, target_addr);
// pub fn resolve(page_off: PageOff, args: Relocation.ResolveArgs) !void {
// const target_addr = if (page_off.addend) |addend| args.target_addr + addend else args.target_addr;
// const narrowed = @truncate(u12, target_addr);
log.debug(" | narrowed address within the page 0x{x}", .{narrowed});
log.debug(" | {s} opcode", .{page_off.op_kind});
// log.debug(" | narrowed address within the page 0x{x}", .{narrowed});
// log.debug(" | {s} opcode", .{page_off.op_kind});
var inst = page_off.inst;
if (page_off.op_kind == .arithmetic) {
inst.add_subtract_immediate.imm12 = narrowed;
} else {
const offset: u12 = blk: {
if (inst.load_store_register.size == 0) {
if (inst.load_store_register.v == 1) {
// 128-bit SIMD is scaled by 16.
break :blk try math.divExact(u12, narrowed, 16);
}
// Otherwise, 8-bit SIMD or ldrb.
break :blk narrowed;
} else {
const denom: u4 = try math.powi(u4, 2, inst.load_store_register.size);
break :blk try math.divExact(u12, narrowed, denom);
}
};
inst.load_store_register.offset = offset;
// var inst = page_off.inst;
// if (page_off.op_kind == .arithmetic) {
// inst.add_subtract_immediate.imm12 = narrowed;
// } else {
// const offset: u12 = blk: {
// if (inst.load_store_register.size == 0) {
// if (inst.load_store_register.v == 1) {
// // 128-bit SIMD is scaled by 16.
// break :blk try math.divExact(u12, narrowed, 16);
// }
// // Otherwise, 8-bit SIMD or ldrb.
// break :blk narrowed;
// } else {
// const denom: u4 = try math.powi(u4, 2, inst.load_store_register.size);
// break :blk try math.divExact(u12, narrowed, denom);
// }
// };
// inst.load_store_register.offset = offset;
// }
// mem.writeIntLittle(u32, page_off.base.code[0..4], inst.toU32());
// }
pub fn format(self: PageOff, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
_ = fmt;
_ = options;
if (self.addend) |addend| {
try std.fmt.format(writer, ".addend = {}, ", .{addend});
}
mem.writeIntLittle(u32, page_off.base.code[0..4], inst.toU32());
try std.fmt.format(writer, ".op_kind = {s}, ", .{self.op_kind});
}
};
pub const GotPage = struct {
base: Relocation,
/// Always .PCRelativeAddress
inst: aarch64.Instruction,
// inst: aarch64.Instruction,
pub const base_type: Relocation.Type = .got_page;
pub fn resolve(page: GotPage, args: Relocation.ResolveArgs) !void {
const source_page = @intCast(i32, args.source_addr >> 12);
const target_page = @intCast(i32, args.target_addr >> 12);
const pages = @bitCast(u21, @intCast(i21, target_page - source_page));
// pub fn resolve(page: GotPage, args: Relocation.ResolveArgs) !void {
// const source_page = @intCast(i32, args.source_addr >> 12);
// const target_page = @intCast(i32, args.target_addr >> 12);
// const pages = @bitCast(u21, @intCast(i21, target_page - source_page));
log.debug(" | moving by {} pages", .{pages});
// log.debug(" | moving by {} pages", .{pages});
var inst = page.inst;
inst.pc_relative_address.immhi = @truncate(u19, pages >> 2);
inst.pc_relative_address.immlo = @truncate(u2, pages);
// var inst = page.inst;
// inst.pc_relative_address.immhi = @truncate(u19, pages >> 2);
// inst.pc_relative_address.immlo = @truncate(u2, pages);
mem.writeIntLittle(u32, page.base.code[0..4], inst.toU32());
// mem.writeIntLittle(u32, page.base.code[0..4], inst.toU32());
// }
pub fn format(self: GotPage, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
_ = self;
_ = fmt;
_ = options;
_ = writer;
}
};
pub const GotPageOff = struct {
base: Relocation,
/// Always .LoadStoreRegister with size = 3 for GOT indirection
inst: aarch64.Instruction,
// inst: aarch64.Instruction,
pub const base_type: Relocation.Type = .got_page_off;
pub fn resolve(page_off: GotPageOff, args: Relocation.ResolveArgs) !void {
const narrowed = @truncate(u12, args.target_addr);
// pub fn resolve(page_off: GotPageOff, args: Relocation.ResolveArgs) !void {
// const narrowed = @truncate(u12, args.target_addr);
log.debug(" | narrowed address within the page 0x{x}", .{narrowed});
// log.debug(" | narrowed address within the page 0x{x}", .{narrowed});
var inst = page_off.inst;
const offset = try math.divExact(u12, narrowed, 8);
inst.load_store_register.offset = offset;
// var inst = page_off.inst;
// const offset = try math.divExact(u12, narrowed, 8);
// inst.load_store_register.offset = offset;
mem.writeIntLittle(u32, page_off.base.code[0..4], inst.toU32());
// mem.writeIntLittle(u32, page_off.base.code[0..4], inst.toU32());
// }
pub fn format(self: GotPageOff, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
_ = self;
_ = fmt;
_ = options;
_ = writer;
}
};
@ -146,34 +187,48 @@ pub const PointerToGot = struct {
pub const base_type: Relocation.Type = .pointer_to_got;
pub fn resolve(ptr_to_got: PointerToGot, args: Relocation.ResolveArgs) !void {
const result = try math.cast(i32, @intCast(i64, args.target_addr) - @intCast(i64, args.source_addr));
// pub fn resolve(ptr_to_got: PointerToGot, args: Relocation.ResolveArgs) !void {
// const result = try math.cast(i32, @intCast(i64, args.target_addr) - @intCast(i64, args.source_addr));
log.debug(" | calculated value 0x{x}", .{result});
// log.debug(" | calculated value 0x{x}", .{result});
mem.writeIntLittle(u32, ptr_to_got.base.code[0..4], @bitCast(u32, result));
// mem.writeIntLittle(u32, ptr_to_got.base.code[0..4], @bitCast(u32, result));
// }
pub fn format(self: PointerToGot, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
_ = self;
_ = fmt;
_ = options;
_ = writer;
}
};
pub const TlvpPage = struct {
base: Relocation,
/// Always .PCRelativeAddress
inst: aarch64.Instruction,
// inst: aarch64.Instruction,
pub const base_type: Relocation.Type = .tlvp_page;
pub fn resolve(page: TlvpPage, args: Relocation.ResolveArgs) !void {
const source_page = @intCast(i32, args.source_addr >> 12);
const target_page = @intCast(i32, args.target_addr >> 12);
const pages = @bitCast(u21, @intCast(i21, target_page - source_page));
// pub fn resolve(page: TlvpPage, args: Relocation.ResolveArgs) !void {
// const source_page = @intCast(i32, args.source_addr >> 12);
// const target_page = @intCast(i32, args.target_addr >> 12);
// const pages = @bitCast(u21, @intCast(i21, target_page - source_page));
log.debug(" | moving by {} pages", .{pages});
// log.debug(" | moving by {} pages", .{pages});
var inst = page.inst;
inst.pc_relative_address.immhi = @truncate(u19, pages >> 2);
inst.pc_relative_address.immlo = @truncate(u2, pages);
// var inst = page.inst;
// inst.pc_relative_address.immhi = @truncate(u19, pages >> 2);
// inst.pc_relative_address.immlo = @truncate(u2, pages);
mem.writeIntLittle(u32, page.base.code[0..4], inst.toU32());
// mem.writeIntLittle(u32, page.base.code[0..4], inst.toU32());
// }
pub fn format(self: TlvpPage, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
_ = self;
_ = fmt;
_ = options;
_ = writer;
}
};
@ -182,82 +237,110 @@ pub const TlvpPageOff = struct {
/// Always .AddSubtractImmediate regardless of the source instruction.
/// This means, we always rewrite the instruction to add even if the
/// source instruction was an ldr.
inst: aarch64.Instruction,
// inst: aarch64.Instruction,
pub const base_type: Relocation.Type = .tlvp_page_off;
pub fn resolve(page_off: TlvpPageOff, args: Relocation.ResolveArgs) !void {
const narrowed = @truncate(u12, args.target_addr);
// pub fn resolve(page_off: TlvpPageOff, args: Relocation.ResolveArgs) !void {
// const narrowed = @truncate(u12, args.target_addr);
log.debug(" | narrowed address within the page 0x{x}", .{narrowed});
// log.debug(" | narrowed address within the page 0x{x}", .{narrowed});
var inst = page_off.inst;
inst.add_subtract_immediate.imm12 = narrowed;
// var inst = page_off.inst;
// inst.add_subtract_immediate.imm12 = narrowed;
mem.writeIntLittle(u32, page_off.base.code[0..4], inst.toU32());
// mem.writeIntLittle(u32, page_off.base.code[0..4], inst.toU32());
// }
pub fn format(self: TlvpPageOff, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
_ = self;
_ = fmt;
_ = options;
_ = writer;
}
};
pub const Parser = struct {
allocator: *Allocator,
object: *Object,
zld: *Zld,
it: *reloc.RelocIterator,
code: []u8,
parsed: std.ArrayList(*Relocation),
block: *TextBlock,
base_addr: u64,
addend: ?u32 = null,
subtractor: ?Relocation.Target = null,
subtractor: ?*Symbol = null,
pub fn deinit(parser: *Parser) void {
parser.parsed.deinit();
}
pub fn parse(parser: *Parser) !void {
while (parser.it.next()) |rel| {
switch (@intToEnum(macho.reloc_type_arm64, rel.r_type)) {
.ARM64_RELOC_BRANCH26 => {
try parser.parseBranch(rel);
},
pub fn parse(self: *Parser) !void {
while (self.it.next()) |rel| {
const out_rel = switch (@intToEnum(macho.reloc_type_arm64, rel.r_type)) {
.ARM64_RELOC_BRANCH26 => try self.parseBranch(rel),
.ARM64_RELOC_SUBTRACTOR => {
try parser.parseSubtractor(rel);
},
.ARM64_RELOC_UNSIGNED => {
try parser.parseUnsigned(rel);
// Subtractor is not a relocation with effect on the TextBlock, so
// parse it and carry on.
try self.parseSubtractor(rel);
continue;
},
.ARM64_RELOC_UNSIGNED => try self.parseUnsigned(rel),
.ARM64_RELOC_ADDEND => {
try parser.parseAddend(rel);
// Addend is not a relocation with effect on the TextBlock, so
// parse it and carry on.
try self.parseAddend(rel);
continue;
},
.ARM64_RELOC_PAGE21,
.ARM64_RELOC_GOT_LOAD_PAGE21,
.ARM64_RELOC_TLVP_LOAD_PAGE21,
=> {
try parser.parsePage(rel);
=> try self.parsePage(rel),
.ARM64_RELOC_PAGEOFF12 => try self.parsePageOff(rel),
.ARM64_RELOC_GOT_LOAD_PAGEOFF12 => try self.parseGotLoadPageOff(rel),
.ARM64_RELOC_TLVP_LOAD_PAGEOFF12 => try self.parseTlvpLoadPageOff(rel),
.ARM64_RELOC_POINTER_TO_GOT => try self.parsePointerToGot(rel),
};
try self.block.relocs.append(out_rel);
if (out_rel.target.payload == .regular) {
try self.block.references.put(out_rel.target.payload.regular.local_sym_index, {});
}
switch (out_rel.@"type") {
.got_page, .got_page_off, .pointer_to_got => {
const sym = out_rel.target;
if (sym.got_index != null) continue;
const index = @intCast(u32, self.zld.got_entries.items.len);
sym.got_index = index;
try self.zld.got_entries.append(self.zld.allocator, sym);
log.debug("adding GOT entry for symbol {s} at index {}", .{ sym.name, index });
},
.ARM64_RELOC_PAGEOFF12 => {
try parser.parsePageOff(rel);
},
.ARM64_RELOC_GOT_LOAD_PAGEOFF12 => {
try parser.parseGotLoadPageOff(rel);
},
.ARM64_RELOC_TLVP_LOAD_PAGEOFF12 => {
try parser.parseTlvpLoadPageOff(rel);
},
.ARM64_RELOC_POINTER_TO_GOT => {
try parser.parsePointerToGot(rel);
.branch_aarch64 => {
const sym = out_rel.target;
if (sym.stubs_index != null) continue;
if (sym.payload != .proxy) continue;
const index = @intCast(u32, self.zld.stubs.items.len);
sym.stubs_index = index;
try self.zld.stubs.append(self.zld.allocator, sym);
log.debug("adding stub entry for symbol {s} at index {}", .{ sym.name, index });
},
else => {},
}
}
}
fn parseAddend(parser: *Parser, rel: macho.relocation_info) !void {
fn parseAddend(self: *Parser, rel: macho.relocation_info) !void {
const rel_type = @intToEnum(macho.reloc_type_arm64, rel.r_type);
assert(rel_type == .ARM64_RELOC_ADDEND);
assert(rel.r_pcrel == 0);
assert(rel.r_extern == 0);
assert(parser.addend == null);
assert(self.addend == null);
parser.addend = rel.r_symbolnum;
self.addend = rel.r_symbolnum;
// Verify ADDEND is followed by a load.
const next = @intToEnum(macho.reloc_type_arm64, parser.it.peek().r_type);
const next = @intToEnum(macho.reloc_type_arm64, self.it.peek().r_type);
switch (next) {
.ARM64_RELOC_PAGE21, .ARM64_RELOC_PAGEOFF12 => {},
else => {
@ -267,127 +350,101 @@ pub const Parser = struct {
}
}
fn parseBranch(parser: *Parser, rel: macho.relocation_info) !void {
fn parseBranch(self: *Parser, rel: macho.relocation_info) !*Relocation {
const rel_type = @intToEnum(macho.reloc_type_arm64, rel.r_type);
assert(rel_type == .ARM64_RELOC_BRANCH26);
assert(rel.r_pcrel == 1);
assert(rel.r_length == 2);
const offset = @intCast(u32, rel.r_address);
const inst = parser.code[offset..][0..4];
const parsed_inst = aarch64.Instruction{ .unconditional_branch_immediate = mem.bytesToValue(
meta.TagPayload(
aarch64.Instruction,
aarch64.Instruction.unconditional_branch_immediate,
),
inst,
) };
const offset = @intCast(u32, @intCast(u64, rel.r_address) - self.base_addr);
const target = try self.object.symbolFromReloc(rel);
var branch = try parser.allocator.create(Branch);
errdefer parser.allocator.destroy(branch);
const target = Relocation.Target.fromReloc(rel);
var branch = try self.object.allocator.create(Branch);
errdefer self.object.allocator.destroy(branch);
branch.* = .{
.base = .{
.@"type" = .branch_aarch64,
.code = inst,
.offset = offset,
.target = target,
.block = self.block,
},
.inst = parsed_inst,
};
log.debug(" | emitting {}", .{branch});
try parser.parsed.append(&branch.base);
return &branch.base;
}
fn parsePage(parser: *Parser, rel: macho.relocation_info) !void {
fn parsePage(self: *Parser, rel: macho.relocation_info) !*Relocation {
assert(rel.r_pcrel == 1);
assert(rel.r_length == 2);
const rel_type = @intToEnum(macho.reloc_type_arm64, rel.r_type);
const target = Relocation.Target.fromReloc(rel);
const offset = @intCast(u32, rel.r_address);
const inst = parser.code[offset..][0..4];
const parsed_inst = aarch64.Instruction{ .pc_relative_address = mem.bytesToValue(meta.TagPayload(
aarch64.Instruction,
aarch64.Instruction.pc_relative_address,
), inst) };
const target = try self.object.symbolFromReloc(rel);
const offset = @intCast(u32, @intCast(u64, rel.r_address) - self.base_addr);
const ptr: *Relocation = ptr: {
switch (rel_type) {
.ARM64_RELOC_PAGE21 => {
defer {
// Reset parser's addend state
parser.addend = null;
self.addend = null;
}
var page = try parser.allocator.create(Page);
errdefer parser.allocator.destroy(page);
var page = try self.object.allocator.create(Page);
errdefer self.object.allocator.destroy(page);
page.* = .{
.base = .{
.@"type" = .page,
.code = inst,
.offset = offset,
.target = target,
.block = self.block,
},
.addend = parser.addend,
.inst = parsed_inst,
.addend = self.addend,
};
log.debug(" | emitting {}", .{page});
break :ptr &page.base;
},
.ARM64_RELOC_GOT_LOAD_PAGE21 => {
var page = try parser.allocator.create(GotPage);
errdefer parser.allocator.destroy(page);
var page = try self.object.allocator.create(GotPage);
errdefer self.object.allocator.destroy(page);
page.* = .{
.base = .{
.@"type" = .got_page,
.code = inst,
.offset = offset,
.target = target,
.block = self.block,
},
.inst = parsed_inst,
};
log.debug(" | emitting {}", .{page});
break :ptr &page.base;
},
.ARM64_RELOC_TLVP_LOAD_PAGE21 => {
var page = try parser.allocator.create(TlvpPage);
errdefer parser.allocator.destroy(page);
var page = try self.object.allocator.create(TlvpPage);
errdefer self.object.allocator.destroy(page);
page.* = .{
.base = .{
.@"type" = .tlvp_page,
.code = inst,
.offset = offset,
.target = target,
.block = self.block,
},
.inst = parsed_inst,
};
log.debug(" | emitting {}", .{page});
break :ptr &page.base;
},
else => unreachable,
}
};
try parser.parsed.append(ptr);
return ptr;
}
fn parsePageOff(parser: *Parser, rel: macho.relocation_info) !void {
fn parsePageOff(self: *Parser, rel: macho.relocation_info) !*Relocation {
defer {
// Reset parser's addend state
parser.addend = null;
self.addend = null;
}
const rel_type = @intToEnum(macho.reloc_type_arm64, rel.r_type);
@ -395,83 +452,56 @@ pub const Parser = struct {
assert(rel.r_pcrel == 0);
assert(rel.r_length == 2);
const offset = @intCast(u32, rel.r_address);
const inst = parser.code[offset..][0..4];
const target = try self.object.symbolFromReloc(rel);
const offset = @intCast(u32, @intCast(u64, rel.r_address) - self.base_addr);
const op_kind: PageOff.OpKind = if (isArithmeticOp(self.block.code[offset..][0..4]))
.arithmetic
else
.load_store;
var op_kind: PageOff.OpKind = undefined;
var parsed_inst: aarch64.Instruction = undefined;
if (isArithmeticOp(inst)) {
op_kind = .arithmetic;
parsed_inst = .{ .add_subtract_immediate = mem.bytesToValue(meta.TagPayload(
aarch64.Instruction,
aarch64.Instruction.add_subtract_immediate,
), inst) };
} else {
op_kind = .load_store;
parsed_inst = .{ .load_store_register = mem.bytesToValue(meta.TagPayload(
aarch64.Instruction,
aarch64.Instruction.load_store_register,
), inst) };
}
const target = Relocation.Target.fromReloc(rel);
var page_off = try parser.allocator.create(PageOff);
errdefer parser.allocator.destroy(page_off);
var page_off = try self.object.allocator.create(PageOff);
errdefer self.object.allocator.destroy(page_off);
page_off.* = .{
.base = .{
.@"type" = .page_off,
.code = inst,
.offset = offset,
.target = target,
.block = self.block,
},
.op_kind = op_kind,
.inst = parsed_inst,
.addend = parser.addend,
.addend = self.addend,
};
log.debug(" | emitting {}", .{page_off});
try parser.parsed.append(&page_off.base);
return &page_off.base;
}
fn parseGotLoadPageOff(parser: *Parser, rel: macho.relocation_info) !void {
fn parseGotLoadPageOff(self: *Parser, rel: macho.relocation_info) !*Relocation {
const rel_type = @intToEnum(macho.reloc_type_arm64, rel.r_type);
assert(rel_type == .ARM64_RELOC_GOT_LOAD_PAGEOFF12);
assert(rel.r_pcrel == 0);
assert(rel.r_length == 2);
const offset = @intCast(u32, rel.r_address);
const inst = parser.code[offset..][0..4];
assert(!isArithmeticOp(inst));
const target = try self.object.symbolFromReloc(rel);
const offset = @intCast(u32, @intCast(u64, rel.r_address) - self.base_addr);
assert(!isArithmeticOp(self.block.code[offset..][0..4]));
const parsed_inst = mem.bytesToValue(meta.TagPayload(
aarch64.Instruction,
aarch64.Instruction.load_store_register,
), inst);
assert(parsed_inst.size == 3);
const target = Relocation.Target.fromReloc(rel);
var page_off = try parser.allocator.create(GotPageOff);
errdefer parser.allocator.destroy(page_off);
var page_off = try self.object.allocator.create(GotPageOff);
errdefer self.object.allocator.destroy(page_off);
page_off.* = .{
.base = .{
.@"type" = .got_page_off,
.code = inst,
.offset = offset,
.target = target,
},
.inst = .{
.load_store_register = parsed_inst,
.block = self.block,
},
};
log.debug(" | emitting {}", .{page_off});
try parser.parsed.append(&page_off.base);
return &page_off.base;
}
fn parseTlvpLoadPageOff(parser: *Parser, rel: macho.relocation_info) !void {
fn parseTlvpLoadPageOff(self: *Parser, rel: macho.relocation_info) !*Relocation {
const rel_type = @intToEnum(macho.reloc_type_arm64, rel.r_type);
assert(rel_type == .ARM64_RELOC_TLVP_LOAD_PAGEOFF12);
assert(rel.r_pcrel == 0);
@ -483,141 +513,102 @@ pub const Parser = struct {
size: u1,
};
const offset = @intCast(u32, rel.r_address);
const inst = parser.code[offset..][0..4];
const parsed: RegInfo = parsed: {
if (isArithmeticOp(inst)) {
const parsed_inst = mem.bytesAsValue(meta.TagPayload(
aarch64.Instruction,
aarch64.Instruction.add_subtract_immediate,
), inst);
break :parsed .{
.rd = parsed_inst.rd,
.rn = parsed_inst.rn,
.size = parsed_inst.sf,
};
} else {
const parsed_inst = mem.bytesAsValue(meta.TagPayload(
aarch64.Instruction,
aarch64.Instruction.load_store_register,
), inst);
break :parsed .{
.rd = parsed_inst.rt,
.rn = parsed_inst.rn,
.size = @truncate(u1, parsed_inst.size),
};
}
};
const target = try self.object.symbolFromReloc(rel);
const offset = @intCast(u32, @intCast(u64, rel.r_address) - self.base_addr);
const target = Relocation.Target.fromReloc(rel);
var page_off = try parser.allocator.create(TlvpPageOff);
errdefer parser.allocator.destroy(page_off);
var page_off = try self.object.allocator.create(TlvpPageOff);
errdefer self.object.allocator.destroy(page_off);
page_off.* = .{
.base = .{
.@"type" = .tlvp_page_off,
.code = inst,
.offset = offset,
.target = target,
},
.inst = .{
.add_subtract_immediate = .{
.rd = parsed.rd,
.rn = parsed.rn,
.imm12 = 0, // This will be filled when target addresses are known.
.sh = 0,
.s = 0,
.op = 0,
.sf = parsed.size,
},
.block = self.block,
},
};
log.debug(" | emitting {}", .{page_off});
try parser.parsed.append(&page_off.base);
return &page_off.base;
}
fn parseSubtractor(parser: *Parser, rel: macho.relocation_info) !void {
fn parseSubtractor(self: *Parser, rel: macho.relocation_info) !void {
const rel_type = @intToEnum(macho.reloc_type_arm64, rel.r_type);
assert(rel_type == .ARM64_RELOC_SUBTRACTOR);
assert(rel.r_pcrel == 0);
assert(parser.subtractor == null);
assert(self.subtractor == null);
parser.subtractor = Relocation.Target.fromReloc(rel);
self.subtractor = try self.object.symbolFromReloc(rel);
// Verify SUBTRACTOR is followed by UNSIGNED.
const next = @intToEnum(macho.reloc_type_arm64, parser.it.peek().r_type);
const next = @intToEnum(macho.reloc_type_arm64, self.it.peek().r_type);
if (next != .ARM64_RELOC_UNSIGNED) {
log.err("unexpected relocation type: expected UNSIGNED, found {s}", .{next});
return error.UnexpectedRelocationType;
}
}
fn parseUnsigned(parser: *Parser, rel: macho.relocation_info) !void {
fn parseUnsigned(self: *Parser, rel: macho.relocation_info) !*Relocation {
defer {
// Reset parser's subtractor state
parser.subtractor = null;
self.subtractor = null;
}
const rel_type = @intToEnum(macho.reloc_type_arm64, rel.r_type);
assert(rel_type == .ARM64_RELOC_UNSIGNED);
assert(rel.r_pcrel == 0);
var unsigned = try parser.allocator.create(reloc.Unsigned);
errdefer parser.allocator.destroy(unsigned);
const target = Relocation.Target.fromReloc(rel);
const target = try self.object.symbolFromReloc(rel);
const offset = @intCast(u32, @intCast(u64, rel.r_address) - self.base_addr);
const is_64bit: bool = switch (rel.r_length) {
3 => true,
2 => false,
else => unreachable,
};
const offset = @intCast(u32, rel.r_address);
const addend: i64 = if (is_64bit)
mem.readIntLittle(i64, parser.code[offset..][0..8])
mem.readIntLittle(i64, self.block.code[offset..][0..8])
else
mem.readIntLittle(i32, parser.code[offset..][0..4]);
mem.readIntLittle(i32, self.block.code[offset..][0..4]);
var unsigned = try self.object.allocator.create(reloc.Unsigned);
errdefer self.object.allocator.destroy(unsigned);
unsigned.* = .{
.base = .{
.@"type" = .unsigned,
.code = if (is_64bit) parser.code[offset..][0..8] else parser.code[offset..][0..4],
.offset = offset,
.target = target,
.block = self.block,
},
.subtractor = parser.subtractor,
.subtractor = self.subtractor,
.is_64bit = is_64bit,
.addend = addend,
};
log.debug(" | emitting {}", .{unsigned});
try parser.parsed.append(&unsigned.base);
return &unsigned.base;
}
fn parsePointerToGot(parser: *Parser, rel: macho.relocation_info) !void {
fn parsePointerToGot(self: *Parser, rel: macho.relocation_info) !*Relocation {
const rel_type = @intToEnum(macho.reloc_type_arm64, rel.r_type);
assert(rel_type == .ARM64_RELOC_POINTER_TO_GOT);
assert(rel.r_pcrel == 1);
assert(rel.r_length == 2);
var ptr_to_got = try parser.allocator.create(PointerToGot);
errdefer parser.allocator.destroy(ptr_to_got);
var ptr_to_got = try self.object.allocator.create(PointerToGot);
errdefer self.object.allocator.destroy(ptr_to_got);
const target = Relocation.Target.fromReloc(rel);
const offset = @intCast(u32, rel.r_address);
const target = try self.object.symbolFromReloc(rel);
const offset = @intCast(u32, @intCast(u64, rel.r_address) - self.base_addr);
ptr_to_got.* = .{
.base = .{
.@"type" = .pointer_to_got,
.code = parser.code[offset..][0..4],
.offset = offset,
.target = target,
.block = self.block,
},
};
log.debug(" | emitting {}", .{ptr_to_got});
try parser.parsed.append(&ptr_to_got.base);
return &ptr_to_got.base;
}
};

View File

@ -8,17 +8,28 @@ const meta = std.meta;
const reloc = @import("../reloc.zig");
const Allocator = mem.Allocator;
const Object = @import("../Object.zig");
const Relocation = reloc.Relocation;
const Symbol = @import("../Symbol.zig");
const TextBlock = Zld.TextBlock;
const Zld = @import("../Zld.zig");
pub const Branch = struct {
base: Relocation,
pub const base_type: Relocation.Type = .branch_x86_64;
pub fn resolve(branch: Branch, args: Relocation.ResolveArgs) !void {
const displacement = try math.cast(i32, @intCast(i64, args.target_addr) - @intCast(i64, args.source_addr) - 4);
log.debug(" | displacement 0x{x}", .{displacement});
mem.writeIntLittle(u32, branch.base.code[0..4], @bitCast(u32, displacement));
// pub fn resolve(branch: Branch, args: Relocation.ResolveArgs) !void {
// const displacement = try math.cast(i32, @intCast(i64, args.target_addr) - @intCast(i64, args.source_addr) - 4);
// log.debug(" | displacement 0x{x}", .{displacement});
// mem.writeIntLittle(u32, branch.base.code[0..4], @bitCast(u32, displacement));
// }
pub fn format(self: Branch, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
_ = self;
_ = fmt;
_ = options;
_ = writer;
}
};
@ -29,25 +40,32 @@ pub const Signed = struct {
pub const base_type: Relocation.Type = .signed;
pub fn resolve(signed: Signed, args: Relocation.ResolveArgs) !void {
const target_addr = target_addr: {
if (signed.base.target == .section) {
const source_target = @intCast(i64, args.source_source_sect_addr.?) + @intCast(i64, signed.base.offset) + signed.addend + 4;
const source_disp = source_target - @intCast(i64, args.source_target_sect_addr.?);
break :target_addr @intCast(i64, args.target_addr) + source_disp;
}
break :target_addr @intCast(i64, args.target_addr) + signed.addend;
};
const displacement = try math.cast(
i32,
target_addr - @intCast(i64, args.source_addr) - signed.correction - 4,
);
// pub fn resolve(signed: Signed, args: Relocation.ResolveArgs) !void {
// const target_addr = target_addr: {
// if (signed.base.target == .section) {
// const source_target = @intCast(i64, args.source_source_sect_addr.?) + @intCast(i64, signed.base.offset) + signed.addend + 4;
// const source_disp = source_target - @intCast(i64, args.source_target_sect_addr.?);
// break :target_addr @intCast(i64, args.target_addr) + source_disp;
// }
// break :target_addr @intCast(i64, args.target_addr) + signed.addend;
// };
// const displacement = try math.cast(
// i32,
// target_addr - @intCast(i64, args.source_addr) - signed.correction - 4,
// );
log.debug(" | addend 0x{x}", .{signed.addend});
log.debug(" | correction 0x{x}", .{signed.correction});
log.debug(" | displacement 0x{x}", .{displacement});
// log.debug(" | addend 0x{x}", .{signed.addend});
// log.debug(" | correction 0x{x}", .{signed.correction});
// log.debug(" | displacement 0x{x}", .{displacement});
mem.writeIntLittle(u32, signed.base.code[0..4], @bitCast(u32, displacement));
// mem.writeIntLittle(u32, signed.base.code[0..4], @bitCast(u32, displacement));
// }
pub fn format(self: Signed, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
_ = fmt;
_ = options;
try std.fmt.format(writer, ".addend = {}, ", .{self.addend});
try std.fmt.format(writer, ".correction = {}, ", .{self.correction});
}
};
@ -56,10 +74,17 @@ pub const GotLoad = struct {
pub const base_type: Relocation.Type = .got_load;
pub fn resolve(got_load: GotLoad, args: Relocation.ResolveArgs) !void {
const displacement = try math.cast(i32, @intCast(i64, args.target_addr) - @intCast(i64, args.source_addr) - 4);
log.debug(" | displacement 0x{x}", .{displacement});
mem.writeIntLittle(u32, got_load.base.code[0..4], @bitCast(u32, displacement));
// pub fn resolve(got_load: GotLoad, args: Relocation.ResolveArgs) !void {
// const displacement = try math.cast(i32, @intCast(i64, args.target_addr) - @intCast(i64, args.source_addr) - 4);
// log.debug(" | displacement 0x{x}", .{displacement});
// mem.writeIntLittle(u32, got_load.base.code[0..4], @bitCast(u32, displacement));
// }
pub fn format(self: GotLoad, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
_ = self;
_ = fmt;
_ = options;
_ = writer;
}
};
@ -69,113 +94,139 @@ pub const Got = struct {
pub const base_type: Relocation.Type = .got;
pub fn resolve(got: Got, args: Relocation.ResolveArgs) !void {
const displacement = try math.cast(
i32,
@intCast(i64, args.target_addr) - @intCast(i64, args.source_addr) - 4 + got.addend,
);
log.debug(" | displacement 0x{x}", .{displacement});
mem.writeIntLittle(u32, got.base.code[0..4], @bitCast(u32, displacement));
// pub fn resolve(got: Got, args: Relocation.ResolveArgs) !void {
// const displacement = try math.cast(
// i32,
// @intCast(i64, args.target_addr) - @intCast(i64, args.source_addr) - 4 + got.addend,
// );
// log.debug(" | displacement 0x{x}", .{displacement});
// mem.writeIntLittle(u32, got.base.code[0..4], @bitCast(u32, displacement));
// }
pub fn format(self: Got, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
_ = fmt;
_ = options;
try std.fmt.format(writer, ".addend = {}, ", .{self.addend});
}
};
pub const Tlv = struct {
base: Relocation,
op: *u8,
pub const base_type: Relocation.Type = .tlv;
pub fn resolve(tlv: Tlv, args: Relocation.ResolveArgs) !void {
// We need to rewrite the opcode from movq to leaq.
tlv.op.* = 0x8d;
log.debug(" | rewriting op to leaq", .{});
// pub fn resolve(tlv: Tlv, args: Relocation.ResolveArgs) !void {
// // We need to rewrite the opcode from movq to leaq.
// tlv.op.* = 0x8d;
// log.debug(" | rewriting op to leaq", .{});
const displacement = try math.cast(i32, @intCast(i64, args.target_addr) - @intCast(i64, args.source_addr) - 4);
log.debug(" | displacement 0x{x}", .{displacement});
// const displacement = try math.cast(i32, @intCast(i64, args.target_addr) - @intCast(i64, args.source_addr) - 4);
// log.debug(" | displacement 0x{x}", .{displacement});
mem.writeIntLittle(u32, tlv.base.code[0..4], @bitCast(u32, displacement));
// mem.writeIntLittle(u32, tlv.base.code[0..4], @bitCast(u32, displacement));
// }
pub fn format(self: Tlv, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
_ = self;
_ = fmt;
_ = options;
_ = writer;
}
};
pub const Parser = struct {
allocator: *Allocator,
object: *Object,
zld: *Zld,
it: *reloc.RelocIterator,
code: []u8,
parsed: std.ArrayList(*Relocation),
subtractor: ?Relocation.Target = null,
block: *TextBlock,
base_addr: u64,
subtractor: ?*Symbol = null,
pub fn deinit(parser: *Parser) void {
parser.parsed.deinit();
}
pub fn parse(parser: *Parser) !void {
while (parser.it.next()) |rel| {
switch (@intToEnum(macho.reloc_type_x86_64, rel.r_type)) {
.X86_64_RELOC_BRANCH => {
try parser.parseBranch(rel);
},
pub fn parse(self: *Parser) !void {
while (self.it.next()) |rel| {
const out_rel = switch (@intToEnum(macho.reloc_type_x86_64, rel.r_type)) {
.X86_64_RELOC_BRANCH => try self.parseBranch(rel),
.X86_64_RELOC_SUBTRACTOR => {
try parser.parseSubtractor(rel);
},
.X86_64_RELOC_UNSIGNED => {
try parser.parseUnsigned(rel);
// Subtractor is not a relocation with effect on the TextBlock, so
// parse it and carry on.
try self.parseSubtractor(rel);
continue;
},
.X86_64_RELOC_UNSIGNED => try self.parseUnsigned(rel),
.X86_64_RELOC_SIGNED,
.X86_64_RELOC_SIGNED_1,
.X86_64_RELOC_SIGNED_2,
.X86_64_RELOC_SIGNED_4,
=> {
try parser.parseSigned(rel);
=> try self.parseSigned(rel),
.X86_64_RELOC_GOT_LOAD => try self.parseGotLoad(rel),
.X86_64_RELOC_GOT => try self.parseGot(rel),
.X86_64_RELOC_TLV => try self.parseTlv(rel),
};
try self.block.relocs.append(out_rel);
if (out_rel.target.payload == .regular) {
try self.block.references.put(out_rel.target.payload.regular.local_sym_index, {});
}
switch (out_rel.@"type") {
.got_load, .got => {
const sym = out_rel.target;
if (sym.got_index != null) continue;
const index = @intCast(u32, self.zld.got_entries.items.len);
sym.got_index = index;
try self.zld.got_entries.append(self.zld.allocator, sym);
log.debug("adding GOT entry for symbol {s} at index {}", .{ sym.name, index });
},
.X86_64_RELOC_GOT_LOAD => {
try parser.parseGotLoad(rel);
},
.X86_64_RELOC_GOT => {
try parser.parseGot(rel);
},
.X86_64_RELOC_TLV => {
try parser.parseTlv(rel);
.branch_x86_64 => {
const sym = out_rel.target;
if (sym.stubs_index != null) continue;
if (sym.payload != .proxy) continue;
const index = @intCast(u32, self.zld.stubs.items.len);
sym.stubs_index = index;
try self.zld.stubs.append(self.zld.allocator, sym);
log.debug("adding stub entry for symbol {s} at index {}", .{ sym.name, index });
},
else => {},
}
}
}
fn parseBranch(parser: *Parser, rel: macho.relocation_info) !void {
fn parseBranch(self: *Parser, rel: macho.relocation_info) !*Relocation {
const rel_type = @intToEnum(macho.reloc_type_x86_64, rel.r_type);
assert(rel_type == .X86_64_RELOC_BRANCH);
assert(rel.r_pcrel == 1);
assert(rel.r_length == 2);
const offset = @intCast(u32, rel.r_address);
const inst = parser.code[offset..][0..4];
const offset = @intCast(u32, @intCast(u64, rel.r_address) - self.base_addr);
const target = try self.object.symbolFromReloc(rel);
var branch = try parser.allocator.create(Branch);
errdefer parser.allocator.destroy(branch);
const target = Relocation.Target.fromReloc(rel);
var branch = try self.object.allocator.create(Branch);
errdefer self.object.allocator.destroy(branch);
branch.* = .{
.base = .{
.@"type" = .branch_x86_64,
.code = inst,
.offset = offset,
.target = target,
.block = self.block,
},
};
log.debug(" | emitting {}", .{branch});
try parser.parsed.append(&branch.base);
return &branch.base;
}
fn parseSigned(parser: *Parser, rel: macho.relocation_info) !void {
fn parseSigned(self: *Parser, rel: macho.relocation_info) !*Relocation {
assert(rel.r_pcrel == 1);
assert(rel.r_length == 2);
const rel_type = @intToEnum(macho.reloc_type_x86_64, rel.r_type);
const target = Relocation.Target.fromReloc(rel);
const offset = @intCast(u32, rel.r_address);
const inst = parser.code[offset..][0..4];
const target = try self.object.symbolFromReloc(rel);
const offset = @intCast(u32, @intCast(u64, rel.r_address) - self.base_addr);
const correction: i4 = switch (rel_type) {
.X86_64_RELOC_SIGNED => 0,
.X86_64_RELOC_SIGNED_1 => 1,
@ -183,161 +234,152 @@ pub const Parser = struct {
.X86_64_RELOC_SIGNED_4 => 4,
else => unreachable,
};
const addend = mem.readIntLittle(i32, inst) + correction;
const addend = mem.readIntLittle(i32, self.block.code[offset..][0..4]) + correction;
var signed = try parser.allocator.create(Signed);
errdefer parser.allocator.destroy(signed);
var signed = try self.object.allocator.create(Signed);
errdefer self.object.allocator.destroy(signed);
signed.* = .{
.base = .{
.@"type" = .signed,
.code = inst,
.offset = offset,
.target = target,
.block = self.block,
},
.addend = addend,
.correction = correction,
};
log.debug(" | emitting {}", .{signed});
try parser.parsed.append(&signed.base);
return &signed.base;
}
fn parseGotLoad(parser: *Parser, rel: macho.relocation_info) !void {
fn parseGotLoad(self: *Parser, rel: macho.relocation_info) !*Relocation {
const rel_type = @intToEnum(macho.reloc_type_x86_64, rel.r_type);
assert(rel_type == .X86_64_RELOC_GOT_LOAD);
assert(rel.r_pcrel == 1);
assert(rel.r_length == 2);
const offset = @intCast(u32, rel.r_address);
const inst = parser.code[offset..][0..4];
const target = Relocation.Target.fromReloc(rel);
const offset = @intCast(u32, @intCast(u64, rel.r_address) - self.base_addr);
const target = try self.object.symbolFromReloc(rel);
var got_load = try parser.allocator.create(GotLoad);
errdefer parser.allocator.destroy(got_load);
var got_load = try self.object.allocator.create(GotLoad);
errdefer self.object.allocator.destroy(got_load);
got_load.* = .{
.base = .{
.@"type" = .got_load,
.code = inst,
.offset = offset,
.target = target,
.block = self.block,
},
};
log.debug(" | emitting {}", .{got_load});
try parser.parsed.append(&got_load.base);
return &got_load.base;
}
fn parseGot(parser: *Parser, rel: macho.relocation_info) !void {
fn parseGot(self: *Parser, rel: macho.relocation_info) !*Relocation {
const rel_type = @intToEnum(macho.reloc_type_x86_64, rel.r_type);
assert(rel_type == .X86_64_RELOC_GOT);
assert(rel.r_pcrel == 1);
assert(rel.r_length == 2);
const offset = @intCast(u32, rel.r_address);
const inst = parser.code[offset..][0..4];
const target = Relocation.Target.fromReloc(rel);
const addend = mem.readIntLittle(i32, inst);
const offset = @intCast(u32, @intCast(u64, rel.r_address) - self.base_addr);
const target = try self.object.symbolFromReloc(rel);
const addend = mem.readIntLittle(i32, self.block.code[offset..][0..4]);
var got = try parser.allocator.create(Got);
errdefer parser.allocator.destroy(got);
var got = try self.object.allocator.create(Got);
errdefer self.object.allocator.destroy(got);
got.* = .{
.base = .{
.@"type" = .got,
.code = inst,
.offset = offset,
.target = target,
.block = self.block,
},
.addend = addend,
};
log.debug(" | emitting {}", .{got});
try parser.parsed.append(&got.base);
return &got.base;
}
fn parseTlv(parser: *Parser, rel: macho.relocation_info) !void {
fn parseTlv(self: *Parser, rel: macho.relocation_info) !*Relocation {
const rel_type = @intToEnum(macho.reloc_type_x86_64, rel.r_type);
assert(rel_type == .X86_64_RELOC_TLV);
assert(rel.r_pcrel == 1);
assert(rel.r_length == 2);
const offset = @intCast(u32, rel.r_address);
const inst = parser.code[offset..][0..4];
const target = Relocation.Target.fromReloc(rel);
const offset = @intCast(u32, @intCast(u64, rel.r_address) - self.base_addr);
const target = try self.object.symbolFromReloc(rel);
var tlv = try parser.allocator.create(Tlv);
errdefer parser.allocator.destroy(tlv);
var tlv = try self.object.allocator.create(Tlv);
errdefer self.object.allocator.destroy(tlv);
tlv.* = .{
.base = .{
.@"type" = .tlv,
.code = inst,
.offset = offset,
.target = target,
.block = self.block,
},
.op = &parser.code[offset - 2],
};
log.debug(" | emitting {}", .{tlv});
try parser.parsed.append(&tlv.base);
return &tlv.base;
}
fn parseSubtractor(parser: *Parser, rel: macho.relocation_info) !void {
fn parseSubtractor(self: *Parser, rel: macho.relocation_info) !void {
const rel_type = @intToEnum(macho.reloc_type_x86_64, rel.r_type);
assert(rel_type == .X86_64_RELOC_SUBTRACTOR);
assert(rel.r_pcrel == 0);
assert(parser.subtractor == null);
assert(self.subtractor == null);
parser.subtractor = Relocation.Target.fromReloc(rel);
self.subtractor = try self.object.symbolFromReloc(rel);
// Verify SUBTRACTOR is followed by UNSIGNED.
const next = @intToEnum(macho.reloc_type_x86_64, parser.it.peek().r_type);
const next = @intToEnum(macho.reloc_type_x86_64, self.it.peek().r_type);
if (next != .X86_64_RELOC_UNSIGNED) {
log.err("unexpected relocation type: expected UNSIGNED, found {s}", .{next});
return error.UnexpectedRelocationType;
}
}
fn parseUnsigned(parser: *Parser, rel: macho.relocation_info) !void {
fn parseUnsigned(self: *Parser, rel: macho.relocation_info) !*Relocation {
defer {
// Reset parser's subtractor state
parser.subtractor = null;
self.subtractor = null;
}
const rel_type = @intToEnum(macho.reloc_type_x86_64, rel.r_type);
assert(rel_type == .X86_64_RELOC_UNSIGNED);
assert(rel.r_pcrel == 0);
var unsigned = try parser.allocator.create(reloc.Unsigned);
errdefer parser.allocator.destroy(unsigned);
const target = Relocation.Target.fromReloc(rel);
const target = try self.object.symbolFromReloc(rel);
const is_64bit: bool = switch (rel.r_length) {
3 => true,
2 => false,
else => unreachable,
};
const offset = @intCast(u32, rel.r_address);
const offset = @intCast(u32, @intCast(u64, rel.r_address) - self.base_addr);
const addend: i64 = if (is_64bit)
mem.readIntLittle(i64, parser.code[offset..][0..8])
mem.readIntLittle(i64, self.block.code[offset..][0..8])
else
mem.readIntLittle(i32, parser.code[offset..][0..4]);
mem.readIntLittle(i32, self.block.code[offset..][0..4]);
var unsigned = try self.object.allocator.create(reloc.Unsigned);
errdefer self.object.allocator.destroy(unsigned);
unsigned.* = .{
.base = .{
.@"type" = .unsigned,
.code = if (is_64bit) parser.code[offset..][0..8] else parser.code[offset..][0..4],
.offset = offset,
.target = target,
.block = self.block,
},
.subtractor = parser.subtractor,
.subtractor = self.subtractor,
.is_64bit = is_64bit,
.addend = addend,
};
log.debug(" | emitting {}", .{unsigned});
try parser.parsed.append(&unsigned.base);
return &unsigned.base;
}
};