elf+aarch64: implement enough to link dynamically with gcc as the driver

This commit is contained in:
Jakub Konka 2024-03-05 23:15:39 +01:00
parent f3227598eb
commit 1cf45fb209
6 changed files with 137 additions and 51 deletions

View File

@ -1373,7 +1373,14 @@ pub fn flushModule(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node)
try self.writePhdrTable();
try self.writeShdrTable();
try self.writeAtoms();
try self.writeSyntheticSections();
self.writeSyntheticSections() catch |err| switch (err) {
error.RelocFailure => return error.FlushFailure,
error.UnsupportedCpuArch => {
try self.reportUnsupportedCpuArch();
return error.FlushFailure;
},
else => |e| return e,
};
if (self.entry_index == null and self.base.isExe()) {
log.debug("flushing. no_entry_point_found = true", .{});
@ -4047,7 +4054,7 @@ fn updateSectionSizes(self: *Elf) !void {
}
if (self.plt_section_index) |index| {
self.shdrs.items[index].sh_size = self.plt.size();
self.shdrs.items[index].sh_size = self.plt.size(self);
}
if (self.got_plt_section_index) |index| {
@ -4692,14 +4699,7 @@ fn writeSyntheticSections(self: *Elf) !void {
const sh_size = math.cast(usize, shdr.sh_size) orelse return error.Overflow;
var buffer = try std.ArrayList(u8).initCapacity(gpa, sh_size);
defer buffer.deinit();
eh_frame.writeEhFrame(self, buffer.writer()) catch |err| switch (err) {
error.RelocFailure => return error.FlushFailure,
error.UnsupportedCpuArch => {
try self.reportUnsupportedCpuArch();
return error.FlushFailure;
},
else => |e| return e,
};
try eh_frame.writeEhFrame(self, buffer.writer());
try self.base.file.?.pwriteAll(buffer.items, shdr.sh_offset);
}
@ -4731,7 +4731,7 @@ fn writeSyntheticSections(self: *Elf) !void {
if (self.plt_section_index) |shndx| {
const shdr = self.shdrs.items[shndx];
var buffer = try std.ArrayList(u8).initCapacity(gpa, self.plt.size());
var buffer = try std.ArrayList(u8).initCapacity(gpa, self.plt.size(self));
defer buffer.deinit();
try self.plt.write(self, buffer.writer());
try self.base.file.?.pwriteAll(buffer.items, shdr.sh_offset);

View File

@ -139,6 +139,7 @@ const Parser = struct {
} else return error.UnexpectedToken;
};
if (std.mem.eql(u8, value, "elf64-x86-64")) return .x86_64;
if (std.mem.eql(u8, value, "elf64-littleaarch64")) return .aarch64;
return error.UnknownCpuArch;
}

View File

@ -371,17 +371,16 @@ fn parseEhFrame(self: *Object, allocator: Allocator, handle: std.fs.File, shndx:
const relocs_shndx = for (self.shdrs.items, 0..) |shdr, i| switch (shdr.sh_type) {
elf.SHT_RELA => if (shdr.sh_info == shndx) break @as(u32, @intCast(i)),
else => {},
} else {
// TODO: convert into an error
log.debug("{s}: missing reloc section for unwind info section", .{self.fmtPath()});
return;
};
} else null;
const raw = try self.preadShdrContentsAlloc(allocator, handle, shndx);
defer allocator.free(raw);
const data_start = @as(u32, @intCast(self.eh_frame_data.items.len));
try self.eh_frame_data.appendSlice(allocator, raw);
const relocs = try self.preadRelocsAlloc(allocator, handle, relocs_shndx);
const relocs = if (relocs_shndx) |index|
try self.preadRelocsAlloc(allocator, handle, index)
else
&[0]elf.Elf64_Rela{};
defer allocator.free(relocs);
const rel_start = @as(u32, @intCast(self.relocs.items.len));
try self.relocs.appendUnalignedSlice(allocator, relocs);

View File

@ -146,7 +146,8 @@ pub fn pltAddress(symbol: Symbol, elf_file: *Elf) u64 {
if (!symbol.flags.has_plt) return 0;
const extras = symbol.extra(elf_file).?;
const shdr = elf_file.shdrs.items[elf_file.plt_section_index.?];
return shdr.sh_addr + extras.plt * 16 + PltSection.preamble_size;
const cpu_arch = elf_file.getTarget().cpu.arch;
return shdr.sh_addr + extras.plt * PltSection.entrySize(cpu_arch) + PltSection.preambleSize(cpu_arch);
}
pub fn gotPltAddress(symbol: Symbol, elf_file: *Elf) u64 {

View File

@ -214,6 +214,7 @@ pub const Iterator = struct {
const reader = stream.reader();
const size = try reader.readInt(u32, .little);
if (size == 0) return null;
if (size == 0xFFFFFFFF) @panic("TODO");
const id = try reader.readInt(u32, .little);

View File

@ -857,8 +857,6 @@ pub const PltSection = struct {
symbols: std.ArrayListUnmanaged(Symbol.Index) = .{},
output_symtab_ctx: Elf.SymtabCtx = .{},
pub const preamble_size = 32;
pub fn deinit(plt: *PltSection, allocator: Allocator) void {
plt.symbols.deinit(allocator);
}
@ -877,39 +875,33 @@ pub const PltSection = struct {
try plt.symbols.append(gpa, sym_index);
}
pub fn size(plt: PltSection) usize {
return preamble_size + plt.symbols.items.len * 16;
pub fn size(plt: PltSection, elf_file: *Elf) usize {
const cpu_arch = elf_file.getTarget().cpu.arch;
return preambleSize(cpu_arch) + plt.symbols.items.len * entrySize(cpu_arch);
}
pub fn preambleSize(cpu_arch: std.Target.Cpu.Arch) usize {
return switch (cpu_arch) {
.x86_64 => 32,
.aarch64 => 8 * @sizeOf(u32),
else => @panic("TODO implement preambleSize for this cpu arch"),
};
}
pub fn entrySize(cpu_arch: std.Target.Cpu.Arch) usize {
return switch (cpu_arch) {
.x86_64 => 16,
.aarch64 => 4 * @sizeOf(u32),
else => @panic("TODO implement entrySize for this cpu arch"),
};
}
pub fn write(plt: PltSection, elf_file: *Elf, writer: anytype) !void {
const plt_addr = elf_file.shdrs.items[elf_file.plt_section_index.?].sh_addr;
const got_plt_addr = elf_file.shdrs.items[elf_file.got_plt_section_index.?].sh_addr;
var preamble = [_]u8{
0xf3, 0x0f, 0x1e, 0xfa, // endbr64
0x41, 0x53, // push r11
0xff, 0x35, 0x00, 0x00, 0x00, 0x00, // push qword ptr [rip] -> .got.plt[1]
0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // jmp qword ptr [rip] -> .got.plt[2]
};
var disp = @as(i64, @intCast(got_plt_addr + 8)) - @as(i64, @intCast(plt_addr + 8)) - 4;
mem.writeInt(i32, preamble[8..][0..4], @as(i32, @intCast(disp)), .little);
disp = @as(i64, @intCast(got_plt_addr + 16)) - @as(i64, @intCast(plt_addr + 14)) - 4;
mem.writeInt(i32, preamble[14..][0..4], @as(i32, @intCast(disp)), .little);
try writer.writeAll(&preamble);
try writer.writeByteNTimes(0xcc, preamble_size - preamble.len);
for (plt.symbols.items, 0..) |sym_index, i| {
const sym = elf_file.symbol(sym_index);
const target_addr = sym.gotPltAddress(elf_file);
const source_addr = sym.pltAddress(elf_file);
disp = @as(i64, @intCast(target_addr)) - @as(i64, @intCast(source_addr + 12)) - 4;
var entry = [_]u8{
0xf3, 0x0f, 0x1e, 0xfa, // endbr64
0x41, 0xbb, 0x00, 0x00, 0x00, 0x00, // mov r11d, N
0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // jmp qword ptr [rip] -> .got.plt[N]
};
mem.writeInt(i32, entry[6..][0..4], @as(i32, @intCast(i)), .little);
mem.writeInt(i32, entry[12..][0..4], @as(i32, @intCast(disp)), .little);
try writer.writeAll(&entry);
const cpu_arch = elf_file.getTarget().cpu.arch;
switch (cpu_arch) {
.x86_64 => try x86_64.write(plt, elf_file, writer),
.aarch64 => try aarch64.write(plt, elf_file, writer),
else => return error.UnsupportedCpuArch,
}
}
@ -946,6 +938,7 @@ pub const PltSection = struct {
}
pub fn writeSymtab(plt: PltSection, elf_file: *Elf) void {
const cpu_arch = elf_file.getTarget().cpu.arch;
for (plt.symbols.items, plt.output_symtab_ctx.ilocal..) |sym_index, ilocal| {
const sym = elf_file.symbol(sym_index);
const st_name = @as(u32, @intCast(elf_file.strtab.items.len));
@ -958,7 +951,7 @@ pub const PltSection = struct {
.st_other = 0,
.st_shndx = @intCast(elf_file.plt_section_index.?),
.st_value = sym.pltAddress(elf_file),
.st_size = 16,
.st_size = entrySize(cpu_arch),
};
}
}
@ -992,6 +985,97 @@ pub const PltSection = struct {
});
}
}
const x86_64 = struct {
fn write(plt: PltSection, elf_file: *Elf, writer: anytype) !void {
const plt_addr = elf_file.shdrs.items[elf_file.plt_section_index.?].sh_addr;
const got_plt_addr = elf_file.shdrs.items[elf_file.got_plt_section_index.?].sh_addr;
var preamble = [_]u8{
0xf3, 0x0f, 0x1e, 0xfa, // endbr64
0x41, 0x53, // push r11
0xff, 0x35, 0x00, 0x00, 0x00, 0x00, // push qword ptr [rip] -> .got.plt[1]
0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // jmp qword ptr [rip] -> .got.plt[2]
};
var disp = @as(i64, @intCast(got_plt_addr + 8)) - @as(i64, @intCast(plt_addr + 8)) - 4;
mem.writeInt(i32, preamble[8..][0..4], @as(i32, @intCast(disp)), .little);
disp = @as(i64, @intCast(got_plt_addr + 16)) - @as(i64, @intCast(plt_addr + 14)) - 4;
mem.writeInt(i32, preamble[14..][0..4], @as(i32, @intCast(disp)), .little);
try writer.writeAll(&preamble);
try writer.writeByteNTimes(0xcc, preambleSize(.x86_64) - preamble.len);
for (plt.symbols.items, 0..) |sym_index, i| {
const sym = elf_file.symbol(sym_index);
const target_addr = sym.gotPltAddress(elf_file);
const source_addr = sym.pltAddress(elf_file);
disp = @as(i64, @intCast(target_addr)) - @as(i64, @intCast(source_addr + 12)) - 4;
var entry = [_]u8{
0xf3, 0x0f, 0x1e, 0xfa, // endbr64
0x41, 0xbb, 0x00, 0x00, 0x00, 0x00, // mov r11d, N
0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // jmp qword ptr [rip] -> .got.plt[N]
};
mem.writeInt(i32, entry[6..][0..4], @as(i32, @intCast(i)), .little);
mem.writeInt(i32, entry[12..][0..4], @as(i32, @intCast(disp)), .little);
try writer.writeAll(&entry);
}
}
};
const aarch64 = struct {
fn write(plt: PltSection, elf_file: *Elf, writer: anytype) !void {
{
const plt_addr = elf_file.shdrs.items[elf_file.plt_section_index.?].sh_addr;
const got_plt_addr = elf_file.shdrs.items[elf_file.got_plt_section_index.?].sh_addr;
// TODO: relax if possible
// .got.plt[2]
const pages = try aarch64_util.calcNumberOfPages(plt_addr + 4, got_plt_addr + 16);
const ldr_off = try aarch64_util.calcPageOffset(.load_store_64, got_plt_addr + 16);
const add_off = try aarch64_util.calcPageOffset(.arithmetic, got_plt_addr + 16);
const preamble = &[_]Instruction{
Instruction.stp(
.x16,
.x30,
Register.sp,
Instruction.LoadStorePairOffset.pre_index(-16),
),
Instruction.adrp(.x16, pages),
Instruction.ldr(.x17, .x16, Instruction.LoadStoreOffset.imm(ldr_off)),
Instruction.add(.x16, .x16, add_off, false),
Instruction.br(.x17),
Instruction.nop(),
Instruction.nop(),
Instruction.nop(),
};
comptime assert(preamble.len == 8);
for (preamble) |inst| {
try writer.writeInt(u32, inst.toU32(), .little);
}
}
for (plt.symbols.items) |sym_index| {
const sym = elf_file.symbol(sym_index);
const target_addr = sym.gotPltAddress(elf_file);
const source_addr = sym.pltAddress(elf_file);
const pages = try aarch64_util.calcNumberOfPages(source_addr, target_addr);
const ldr_off = try aarch64_util.calcPageOffset(.load_store_64, target_addr);
const add_off = try aarch64_util.calcPageOffset(.arithmetic, target_addr);
const insts = &[_]Instruction{
Instruction.adrp(.x16, pages),
Instruction.ldr(.x17, .x16, Instruction.LoadStoreOffset.imm(ldr_off)),
Instruction.add(.x16, .x16, add_off, false),
Instruction.br(.x17),
};
comptime assert(insts.len == 4);
for (insts) |inst| {
try writer.writeInt(u32, inst.toU32(), .little);
}
}
}
const aarch64_util = @import("../aarch64.zig");
const Instruction = aarch64_util.Instruction;
const Register = aarch64_util.Register;
};
};
pub const GotPltSection = struct {