mirror of
https://github.com/ziglang/zig.git
synced 2026-01-04 04:25:05 +00:00
aarch64: introduce MCValue.got_load and MCValue.direct_load
This matches the current design in x86_64 backend and significantly simplifies handling of PIE targets in aarch64 backend.
This commit is contained in:
parent
9c82f3ae6f
commit
5bba041bae
@ -115,6 +115,14 @@ const MCValue = union(enum) {
|
||||
/// The value is in memory at a hard-coded address.
|
||||
/// If the type is a pointer, it means the pointer address is at this memory location.
|
||||
memory: u64,
|
||||
/// The value is in memory referenced indirectly via a GOT entry index.
|
||||
/// If the type is a pointer, it means the pointer is referenced indirectly via GOT.
|
||||
/// When lowered, linker will emit relocations of type ARM64_RELOC_GOT_LOAD_PAGE21 and ARM64_RELOC_GOT_LOAD_PAGEOFF12.
|
||||
got_load: u32,
|
||||
/// The value is in memory referenced directly via symbol index.
|
||||
/// If the type is a pointer, it means the pointer is referenced directly via symbol index.
|
||||
/// When lowered, linker will emit a relocation of type ARM64_RELOC_PAGE21 and ARM64_RELOC_PAGEOFF12.
|
||||
direct_load: u32,
|
||||
/// The value is one of the stack variables.
|
||||
/// If the type is a pointer, it means the pointer address is in the stack at this offset.
|
||||
stack_offset: u32,
|
||||
@ -1802,6 +1810,8 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
|
||||
},
|
||||
.memory,
|
||||
.stack_offset,
|
||||
.got_load,
|
||||
.direct_load,
|
||||
=> {
|
||||
const reg = try self.register_manager.allocReg(null);
|
||||
self.register_manager.freezeRegs(&.{reg});
|
||||
@ -1946,6 +1956,11 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
|
||||
const addr_reg = try self.copyToTmpRegister(ptr_ty, ptr);
|
||||
try self.store(.{ .register = addr_reg }, value, ptr_ty, value_ty);
|
||||
},
|
||||
.got_load,
|
||||
.direct_load,
|
||||
=> {
|
||||
return self.fail("TODO implement storing to {}", .{ptr});
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@ -2114,6 +2129,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index) !void {
|
||||
.memory => unreachable,
|
||||
.compare_flags_signed => unreachable,
|
||||
.compare_flags_unsigned => unreachable,
|
||||
.got_load => unreachable,
|
||||
.direct_load => unreachable,
|
||||
.register => |reg| {
|
||||
try self.register_manager.getReg(reg, null);
|
||||
try self.genSetReg(arg_ty, reg, arg_mcv);
|
||||
@ -2160,10 +2177,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index) !void {
|
||||
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
|
||||
if (func_value.castTag(.function)) |func_payload| {
|
||||
const func = func_payload.data;
|
||||
// TODO I'm hacking my way through here by repurposing .memory for storing
|
||||
// index to the GOT target symbol index.
|
||||
try self.genSetReg(Type.initTag(.u64), .x30, .{
|
||||
.memory = func.owner_decl.link.macho.local_sym_index,
|
||||
.got_load = func.owner_decl.link.macho.local_sym_index,
|
||||
});
|
||||
// blr x30
|
||||
_ = try self.addInst(.{
|
||||
@ -3015,6 +3030,12 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
|
||||
else => return self.fail("TODO implement storing other types abi_size={}", .{abi_size}),
|
||||
}
|
||||
},
|
||||
.got_load,
|
||||
.direct_load,
|
||||
=> |sym_index| {
|
||||
_ = sym_index;
|
||||
return self.fail("TODO implement set stack variable from {}", .{mcv});
|
||||
},
|
||||
.memory => |vaddr| {
|
||||
_ = vaddr;
|
||||
return self.fail("TODO implement set stack variable from memory vaddr", .{});
|
||||
@ -3151,22 +3172,34 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
|
||||
.data = .{ .rr = .{ .rd = reg, .rn = src_reg } },
|
||||
});
|
||||
},
|
||||
.memory => |addr| {
|
||||
const owner_decl = self.mod_fn.owner_decl;
|
||||
// TODO when refactoring LinkBlock, make this into a generic function.
|
||||
const atom_index = switch (self.bin_file.tag) {
|
||||
.macho => owner_decl.link.macho.local_sym_index,
|
||||
.elf => owner_decl.link.elf.local_sym_index,
|
||||
.plan9 => @intCast(u32, owner_decl.link.plan9.sym_index orelse 0),
|
||||
else => return self.fail("TODO handle aarch64 load memory in {}", .{self.bin_file.tag}),
|
||||
.got_load,
|
||||
.direct_load,
|
||||
=> |sym_index| {
|
||||
const tag: Mir.Inst.Tag = switch (mcv) {
|
||||
.got_load => .load_memory_got,
|
||||
.direct_load => .load_memory_direct,
|
||||
else => unreachable,
|
||||
};
|
||||
_ = try self.addInst(.{
|
||||
.tag = tag,
|
||||
.data = .{
|
||||
.payload = try self.addExtra(Mir.LoadMemoryPie{
|
||||
.register = @enumToInt(reg),
|
||||
.atom_index = self.mod_fn.owner_decl.link.macho.local_sym_index,
|
||||
.sym_index = sym_index,
|
||||
}),
|
||||
},
|
||||
});
|
||||
},
|
||||
.memory => |addr| {
|
||||
_ = try self.addInst(.{
|
||||
.tag = .load_memory,
|
||||
.data = .{ .payload = try self.addExtra(Mir.LoadMemory{
|
||||
.atom_index = atom_index,
|
||||
.register = @enumToInt(reg),
|
||||
.addr = @intCast(u32, addr),
|
||||
}) },
|
||||
.data = .{
|
||||
.load_memory = .{
|
||||
.register = @enumToInt(reg),
|
||||
.addr = @intCast(u32, addr),
|
||||
},
|
||||
},
|
||||
});
|
||||
},
|
||||
.stack_offset => |unadjusted_off| {
|
||||
@ -3385,9 +3418,9 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCVa
|
||||
const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes;
|
||||
return MCValue{ .memory = got_addr };
|
||||
} else if (self.bin_file.cast(link.File.MachO)) |_| {
|
||||
// TODO I'm hacking my way through here by repurposing .memory for storing
|
||||
// index to the GOT target symbol index.
|
||||
return MCValue{ .memory = decl.link.macho.local_sym_index };
|
||||
// Because MachO is PIE-always-on, we defer memory address resolution until
|
||||
// the linker has enough info to perform relocations.
|
||||
return MCValue{ .got_load = decl.link.macho.local_sym_index };
|
||||
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
|
||||
const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes;
|
||||
return MCValue{ .memory = got_addr };
|
||||
|
||||
@ -109,6 +109,8 @@ pub fn emitMir(
|
||||
.eor_shifted_register => try emit.mirLogicalShiftedRegister(inst),
|
||||
|
||||
.load_memory => try emit.mirLoadMemory(inst),
|
||||
.load_memory_got => try emit.mirLoadMemoryPie(inst),
|
||||
.load_memory_direct => try emit.mirLoadMemoryPie(inst),
|
||||
|
||||
.ldp => try emit.mirLoadStoreRegisterPair(inst),
|
||||
.stp => try emit.mirLoadStoreRegisterPair(inst),
|
||||
@ -205,21 +207,18 @@ fn instructionSize(emit: *Emit, inst: Mir.Inst.Index) usize {
|
||||
}
|
||||
|
||||
switch (tag) {
|
||||
.load_memory_got,
|
||||
.load_memory_direct,
|
||||
=> return 2 * 4,
|
||||
.load_memory => {
|
||||
if (emit.bin_file.options.pie) {
|
||||
// adrp, ldr
|
||||
return 2 * 4;
|
||||
} else {
|
||||
const payload = emit.mir.instructions.items(.data)[inst].payload;
|
||||
const load_memory = emit.mir.extraData(Mir.LoadMemory, payload).data;
|
||||
const addr = load_memory.addr;
|
||||
const load_memory = emit.mir.instructions.items(.data)[inst].load_memory;
|
||||
const addr = load_memory.addr;
|
||||
|
||||
// movz, [movk, ...], ldr
|
||||
if (addr <= math.maxInt(u16)) return 2 * 4;
|
||||
if (addr <= math.maxInt(u32)) return 3 * 4;
|
||||
if (addr <= math.maxInt(u48)) return 4 * 4;
|
||||
return 5 * 4;
|
||||
}
|
||||
// movz, [movk, ...], ldr
|
||||
if (addr <= math.maxInt(u16)) return 2 * 4;
|
||||
if (addr <= math.maxInt(u32)) return 3 * 4;
|
||||
if (addr <= math.maxInt(u48)) return 4 * 4;
|
||||
return 5 * 4;
|
||||
},
|
||||
.pop_regs, .push_regs => {
|
||||
const reg_list = emit.mir.instructions.items(.data)[inst].reg_list;
|
||||
@ -658,58 +657,69 @@ fn mirLogicalShiftedRegister(emit: *Emit, inst: Mir.Inst.Index) !void {
|
||||
|
||||
fn mirLoadMemory(emit: *Emit, inst: Mir.Inst.Index) !void {
|
||||
assert(emit.mir.instructions.items(.tag)[inst] == .load_memory);
|
||||
const payload = emit.mir.instructions.items(.data)[inst].payload;
|
||||
const load_memory = emit.mir.extraData(Mir.LoadMemory, payload).data;
|
||||
const load_memory = emit.mir.instructions.items(.data)[inst].load_memory;
|
||||
const reg = @intToEnum(Register, load_memory.register);
|
||||
const addr = load_memory.addr;
|
||||
// The value is in memory at a hard-coded address.
|
||||
// If the type is a pointer, it means the pointer address is at this memory location.
|
||||
try emit.moveImmediate(reg, addr);
|
||||
try emit.writeInstruction(Instruction.ldr(
|
||||
reg,
|
||||
reg,
|
||||
Instruction.LoadStoreOffset.none,
|
||||
));
|
||||
}
|
||||
|
||||
if (emit.bin_file.options.pie) {
|
||||
// PC-relative displacement to the entry in the GOT table.
|
||||
// adrp
|
||||
const offset = @intCast(u32, emit.code.items.len);
|
||||
try emit.writeInstruction(Instruction.adrp(reg, 0));
|
||||
fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void {
|
||||
const tag = emit.mir.instructions.items(.tag)[inst];
|
||||
const payload = emit.mir.instructions.items(.data)[inst].payload;
|
||||
const data = emit.mir.extraData(Mir.LoadMemoryPie, payload).data;
|
||||
const reg = @intToEnum(Register, data.register);
|
||||
|
||||
// ldr reg, reg, offset
|
||||
try emit.writeInstruction(Instruction.ldr(
|
||||
reg,
|
||||
reg,
|
||||
Instruction.LoadStoreOffset.imm(0),
|
||||
));
|
||||
// PC-relative displacement to the entry in the GOT table.
|
||||
// adrp
|
||||
const offset = @intCast(u32, emit.code.items.len);
|
||||
try emit.writeInstruction(Instruction.adrp(reg, 0));
|
||||
|
||||
if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
|
||||
const atom = macho_file.atom_by_index_table.get(load_memory.atom_index).?;
|
||||
// Page reloc for adrp instruction.
|
||||
try atom.relocs.append(emit.bin_file.allocator, .{
|
||||
.offset = offset,
|
||||
.target = .{ .local = addr },
|
||||
.addend = 0,
|
||||
.subtractor = null,
|
||||
.pcrel = true,
|
||||
.length = 2,
|
||||
.@"type" = @enumToInt(std.macho.reloc_type_arm64.ARM64_RELOC_GOT_LOAD_PAGE21),
|
||||
});
|
||||
// Pageoff reloc for adrp instruction.
|
||||
try atom.relocs.append(emit.bin_file.allocator, .{
|
||||
.offset = offset + 4,
|
||||
.target = .{ .local = addr },
|
||||
.addend = 0,
|
||||
.subtractor = null,
|
||||
.pcrel = false,
|
||||
.length = 2,
|
||||
.@"type" = @enumToInt(std.macho.reloc_type_arm64.ARM64_RELOC_GOT_LOAD_PAGEOFF12),
|
||||
});
|
||||
} else {
|
||||
return emit.fail("TODO implement load_memory for PIE GOT indirection on this platform", .{});
|
||||
}
|
||||
// ldr reg, reg, offset
|
||||
try emit.writeInstruction(Instruction.ldr(
|
||||
reg,
|
||||
reg,
|
||||
Instruction.LoadStoreOffset.imm(0),
|
||||
));
|
||||
|
||||
if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
|
||||
const atom = macho_file.atom_by_index_table.get(data.atom_index).?;
|
||||
// Page reloc for adrp instruction.
|
||||
try atom.relocs.append(emit.bin_file.allocator, .{
|
||||
.offset = offset,
|
||||
.target = .{ .local = data.sym_index },
|
||||
.addend = 0,
|
||||
.subtractor = null,
|
||||
.pcrel = true,
|
||||
.length = 2,
|
||||
.@"type" = switch (tag) {
|
||||
.load_memory_got => @enumToInt(std.macho.reloc_type_arm64.ARM64_RELOC_GOT_LOAD_PAGE21),
|
||||
.load_memory_direct => @enumToInt(std.macho.reloc_type_arm64.ARM64_RELOC_PAGE21),
|
||||
else => unreachable,
|
||||
},
|
||||
});
|
||||
// Pageoff reloc for adrp instruction.
|
||||
try atom.relocs.append(emit.bin_file.allocator, .{
|
||||
.offset = offset + 4,
|
||||
.target = .{ .local = data.sym_index },
|
||||
.addend = 0,
|
||||
.subtractor = null,
|
||||
.pcrel = false,
|
||||
.length = 2,
|
||||
.@"type" = switch (tag) {
|
||||
.load_memory_got => @enumToInt(std.macho.reloc_type_arm64.ARM64_RELOC_GOT_LOAD_PAGEOFF12),
|
||||
.load_memory_direct => @enumToInt(std.macho.reloc_type_arm64.ARM64_RELOC_PAGEOFF12),
|
||||
else => unreachable,
|
||||
},
|
||||
});
|
||||
} else {
|
||||
// The value is in memory at a hard-coded address.
|
||||
// If the type is a pointer, it means the pointer address is at this memory location.
|
||||
try emit.moveImmediate(reg, addr);
|
||||
try emit.writeInstruction(Instruction.ldr(
|
||||
reg,
|
||||
reg,
|
||||
Instruction.LoadStoreOffset.none,
|
||||
));
|
||||
return emit.fail("TODO implement load_memory for PIE GOT indirection on this platform", .{});
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -58,8 +58,12 @@ pub const Inst = struct {
|
||||
eor_shifted_register,
|
||||
/// Pseudo-instruction: Load memory
|
||||
///
|
||||
/// Payload is `LoadMemory`
|
||||
/// Payload is `load_memory`
|
||||
load_memory,
|
||||
/// Payload is `LoadMemoryPie`
|
||||
load_memory_got,
|
||||
/// Payload is `LoadMemoryPie`
|
||||
load_memory_direct,
|
||||
/// Load Pair of Registers
|
||||
ldp,
|
||||
/// Pseudo-instruction: Load from stack
|
||||
@ -157,8 +161,6 @@ pub const Inst = struct {
|
||||
/// Used by e.g. svc
|
||||
imm16: u16,
|
||||
/// Index into `extra`. Meaning of what can be found there is context-dependent.
|
||||
///
|
||||
/// Used by e.g. load_memory
|
||||
payload: u32,
|
||||
/// A register
|
||||
///
|
||||
@ -298,6 +300,10 @@ pub const Inst = struct {
|
||||
line: u32,
|
||||
column: u32,
|
||||
},
|
||||
load_memory: struct {
|
||||
register: u32,
|
||||
addr: u32,
|
||||
},
|
||||
};
|
||||
|
||||
// Make sure we don't accidentally make instructions bigger than expected.
|
||||
@ -335,8 +341,10 @@ pub fn extraData(mir: Mir, comptime T: type, index: usize) struct { data: T, end
|
||||
};
|
||||
}
|
||||
|
||||
pub const LoadMemory = struct {
|
||||
atom_index: u32,
|
||||
pub const LoadMemoryPie = struct {
|
||||
register: u32,
|
||||
addr: u32,
|
||||
/// Index of the containing atom.
|
||||
atom_index: u32,
|
||||
/// Index into the linker's symbol table.
|
||||
sym_index: u32,
|
||||
};
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user