x86_64+elf: fix jump table indirection for functions

This commit is contained in:
Jakub Konka 2024-08-12 22:51:33 +02:00
parent 16abf51cee
commit e3f6ebaea9
2 changed files with 29 additions and 43 deletions

View File

@ -12311,33 +12311,25 @@ fn genCall(self: *Self, info: union(enum) {
const zo = elf_file.zigObjectPtr().?;
const sym_index = try zo.getOrCreateMetadataForNav(elf_file, func.owner_nav);
if (self.mod.pic) {
// const callee_reg: Register = switch (resolved_cc) {
// .SysV => callee: {
// if (!fn_info.is_var_args) break :callee .rax;
// const param_regs = abi.getCAbiIntParamRegs(resolved_cc);
// break :callee if (call_info.gp_count < param_regs.len)
// param_regs[call_info.gp_count]
// else
// .r10;
// },
// .Win64 => .rax,
// else => unreachable,
// };
// TODO convert to near jump
try self.asmMemory(.{ ._, .call }, .{
.base = .{ .reloc = .{
.atom_index = try self.owner.getSymbolIndex(self),
.sym_index = sym_index,
} },
.mod = .{ .rm = .{ .size = .qword } },
});
// try self.genSetReg(
// callee_reg,
// Type.usize,
// .{ .load_symbol = .{ .sym = sym_index } },
// .{},
// );
// try self.asmRegister(.{ ._, .call }, callee_reg);
const callee_reg: Register = switch (resolved_cc) {
.SysV => callee: {
if (!fn_info.is_var_args) break :callee .rax;
const param_regs = abi.getCAbiIntParamRegs(resolved_cc);
break :callee if (call_info.gp_count < param_regs.len)
param_regs[call_info.gp_count]
else
.r10;
},
.Win64 => .rax,
else => unreachable,
};
try self.genSetReg(
callee_reg,
Type.usize,
.{ .lea_symbol = .{ .sym = sym_index } },
.{},
);
try self.asmRegister(.{ ._, .call }, callee_reg);
} else try self.asmMemory(.{ ._, .call }, .{
.base = .{ .reloc = .{
.atom_index = try self.owner.getSymbolIndex(self),

View File

@ -107,23 +107,17 @@ pub fn deinit(self: *ZigObject, allocator: Allocator) void {
}
self.relocs.deinit(allocator);
{
var it = self.navs.iterator();
while (it.next()) |entry| {
entry.value_ptr.exports.deinit(allocator);
}
self.navs.deinit(allocator);
for (self.navs.values()) |*meta| {
meta.exports.deinit(allocator);
}
self.navs.deinit(allocator);
self.lazy_syms.deinit(allocator);
{
var it = self.uavs.iterator();
while (it.next()) |entry| {
entry.value_ptr.exports.deinit(allocator);
}
self.uavs.deinit(allocator);
for (self.uavs.values()) |*meta| {
meta.exports.deinit(allocator);
}
self.uavs.deinit(allocator);
for (self.tls_variables.values()) |*tlv| {
tlv.deinit(allocator);
@ -1721,8 +1715,8 @@ const TlsVariable = struct {
};
const AtomList = std.ArrayListUnmanaged(Atom.Index);
const NavTable = std.AutoHashMapUnmanaged(InternPool.Nav.Index, AvMetadata);
const UavTable = std.AutoHashMapUnmanaged(InternPool.Index, AvMetadata);
const NavTable = std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, AvMetadata);
const UavTable = std.AutoArrayHashMapUnmanaged(InternPool.Index, AvMetadata);
const LazySymbolTable = std.AutoArrayHashMapUnmanaged(InternPool.Index, LazySymbolMetadata);
const TlsTable = std.AutoArrayHashMapUnmanaged(Atom.Index, TlsVariable);
@ -1874,9 +1868,9 @@ pub const OffsetTable = struct {
const x86_64 = struct {
fn writeEntry(source_addr: i64, target_addr: i64, buf: *[max_jump_seq_len]u8) ![]u8 {
const disp = @as(i64, @intCast(target_addr)) - source_addr - 4;
const disp = @as(i64, @intCast(target_addr)) - source_addr - 5;
var bytes = [_]u8{
0xe8, 0x00, 0x00, 0x00, 0x00, // jmp rel32
0xe9, 0x00, 0x00, 0x00, 0x00, // jmp rel32
};
assert(bytes.len == entrySize(.x86_64));
mem.writeInt(i32, bytes[1..][0..4], @intCast(disp), .little);