Merge pull request #19034 from ziglang/elf-riscv

elf: add basic aarch64 and riscv64 support
This commit is contained in:
Jakub Konka 2024-02-23 21:41:14 +01:00 committed by GitHub
commit 7230b68b35
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
14 changed files with 1411 additions and 669 deletions

View File

@ -12,7 +12,7 @@ CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.12.0-dev.203+d3bc1cfc4"
PREFIX="$HOME/deps/$CACHE_BASENAME"
ZIG="$PREFIX/bin/zig"
export PATH="$HOME/deps/wasmtime-v10.0.2-$ARCH-linux:$HOME/deps/qemu-linux-x86_64-6.1.0.1/bin:$PATH"
export PATH="$HOME/deps/wasmtime-v10.0.2-$ARCH-linux:$HOME/deps/qemu-linux-x86_64-8.2.1/bin:$PATH"
# Make the `zig version` number consistent.
# This will affect the cmake command below.

View File

@ -12,7 +12,7 @@ CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.12.0-dev.203+d3bc1cfc4"
PREFIX="$HOME/deps/$CACHE_BASENAME"
ZIG="$PREFIX/bin/zig"
export PATH="$HOME/deps/wasmtime-v10.0.2-$ARCH-linux:$HOME/deps/qemu-linux-x86_64-6.1.0.1/bin:$PATH"
export PATH="$HOME/deps/wasmtime-v10.0.2-$ARCH-linux:$HOME/deps/qemu-linux-x86_64-8.2.1/bin:$PATH"
# Make the `zig version` number consistent.
# This will affect the cmake command below.

View File

@ -189,6 +189,7 @@ gnu_eh_frame_hdr_index: ?Symbol.Index = null,
dso_handle_index: ?Symbol.Index = null,
rela_iplt_start_index: ?Symbol.Index = null,
rela_iplt_end_index: ?Symbol.Index = null,
global_pointer_index: ?Symbol.Index = null,
start_stop_indexes: std.ArrayListUnmanaged(u32) = .{},
/// An array of symbols parsed across all input files.
@ -1343,6 +1344,7 @@ pub fn flushModule(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node)
// Beyond this point, everything has been allocated a virtual address and we can resolve
// the relocations, and commit objects to file.
if (self.zigObjectPtr()) |zig_object| {
var has_reloc_errors = false;
for (zig_object.atoms.items) |atom_index| {
const atom_ptr = self.atom(atom_index) orelse continue;
if (!atom_ptr.flags.alive) continue;
@ -1353,10 +1355,7 @@ pub fn flushModule(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node)
defer gpa.free(code);
const file_offset = shdr.sh_offset + atom_ptr.value;
atom_ptr.resolveRelocsAlloc(self, code) catch |err| switch (err) {
// TODO
error.RelaxFail, error.InvalidInstruction, error.CannotEncode => {
log.err("relaxing intructions failed; TODO this should be a fatal linker error", .{});
},
error.RelocFailure, error.RelaxFailure => has_reloc_errors = true,
error.UnsupportedCpuArch => {
try self.reportUnsupportedCpuArch();
return error.FlushFailure;
@ -1365,19 +1364,14 @@ pub fn flushModule(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node)
};
try self.base.file.?.pwriteAll(code, file_offset);
}
if (has_reloc_errors) return error.FlushFailure;
}
try self.writePhdrTable();
try self.writeShdrTable();
try self.writeAtoms();
self.writeSyntheticSections() catch |err| switch (err) {
error.UnsupportedCpuArch => {
try self.reportUnsupportedCpuArch();
return error.FlushFailure;
},
else => |e| return e,
};
try self.writeSyntheticSections();
if (self.entry_index == null and self.base.isExe()) {
log.debug("flushing. no_entry_point_found = true", .{});
@ -2048,18 +2042,23 @@ fn scanRelocs(self: *Elf) !void {
if (self.zigObjectPtr()) |zo| objects.appendAssumeCapacity(zo.index);
objects.appendSliceAssumeCapacity(self.objects.items);
var has_reloc_errors = false;
for (objects.items) |index| {
self.file(index).?.scanRelocs(self, &undefs) catch |err| switch (err) {
error.RelaxFailure => unreachable,
error.UnsupportedCpuArch => {
try self.reportUnsupportedCpuArch();
return error.FlushFailure;
},
error.RelocFailure => has_reloc_errors = true,
else => |e| return e,
};
}
try self.reportUndefinedSymbols(&undefs);
if (has_reloc_errors) return error.FlushFailure;
for (self.symbols.items, 0..) |*sym, i| {
const index = @as(u32, @intCast(i));
if (!sym.isLocal(self) and !sym.flags.has_dynamic) {
@ -3095,6 +3094,10 @@ fn addLinkerDefinedSymbols(self: *Elf) !void {
}
}
if (self.getTarget().cpu.arch == .riscv64 and self.base.isDynLib()) {
self.global_pointer_index = try linker_defined.addGlobal("__global_pointer$", self);
}
linker_defined.resolveSymbols(self);
}
@ -3222,6 +3225,19 @@ fn allocateLinkerDefinedSymbols(self: *Elf) void {
stop.output_section_index = shndx;
}
}
// __global_pointer$
if (self.global_pointer_index) |index| {
const sym = self.symbol(index);
if (self.sectionByName(".sdata")) |shndx| {
const shdr = self.shdrs.items[shndx];
sym.value = shdr.sh_addr + 0x800;
sym.output_section_index = shndx;
} else {
sym.value = 0;
sym.output_section_index = 0;
}
}
}
fn checkDuplicates(self: *Elf) !void {
@ -4431,6 +4447,8 @@ fn writeAtoms(self: *Elf) !void {
undefs.deinit();
}
var has_reloc_errors = false;
// TODO iterate over `output_sections` directly
for (self.shdrs.items, 0..) |shdr, shndx| {
if (shdr.sh_type == elf.SHT_NULL) continue;
@ -4493,14 +4511,11 @@ fn writeAtoms(self: *Elf) !void {
else
atom_ptr.resolveRelocsAlloc(self, out_code);
_ = res catch |err| switch (err) {
// TODO
error.RelaxFail, error.InvalidInstruction, error.CannotEncode => {
log.err("relaxing intructions failed; TODO this should be a fatal linker error", .{});
},
error.UnsupportedCpuArch => {
try self.reportUnsupportedCpuArch();
return error.FlushFailure;
},
error.RelocFailure, error.RelaxFailure => has_reloc_errors = true,
else => |e| return e,
};
}
@ -4509,6 +4524,8 @@ fn writeAtoms(self: *Elf) !void {
}
try self.reportUndefinedSymbols(&undefs);
if (has_reloc_errors) return error.FlushFailure;
}
pub fn updateSymtabSize(self: *Elf) !void {
@ -4665,7 +4682,14 @@ fn writeSyntheticSections(self: *Elf) !void {
const sh_size = math.cast(usize, shdr.sh_size) orelse return error.Overflow;
var buffer = try std.ArrayList(u8).initCapacity(gpa, sh_size);
defer buffer.deinit();
try eh_frame.writeEhFrame(self, buffer.writer());
eh_frame.writeEhFrame(self, buffer.writer()) catch |err| switch (err) {
error.RelocFailure => return error.FlushFailure,
error.UnsupportedCpuArch => {
try self.reportUnsupportedCpuArch();
return error.FlushFailure;
},
else => |e| return e,
};
try self.base.file.?.pwriteAll(buffer.items, shdr.sh_offset);
}
@ -5526,6 +5550,15 @@ pub fn comdatGroupOwner(self: *Elf, index: ComdatGroupOwner.Index) *ComdatGroupO
return &self.comdat_groups_owners.items[index];
}
pub fn gotAddress(self: *Elf) u64 {
const shndx = blk: {
if (self.getTarget().cpu.arch == .x86_64 and self.got_plt_section_index != null)
break :blk self.got_plt_section_index.?;
break :blk if (self.got_section_index) |shndx| shndx else null;
};
return if (shndx) |index| self.shdrs.items[index].sh_addr else 0;
}
pub fn tpAddress(self: *Elf) u64 {
const index = self.phdr_tls_index orelse return 0;
const phdr = self.phdrs.items[index];

File diff suppressed because it is too large Load Diff

View File

@ -245,6 +245,9 @@ fn initAtoms(self: *Object, allocator: Allocator, handle: std.fs.File, elf_file:
atom.rel_index = @intCast(self.relocs.items.len);
atom.rel_num = @intCast(relocs.len);
try self.relocs.appendUnalignedSlice(allocator, relocs);
if (elf_file.getTarget().cpu.arch == .riscv64) {
sortRelocs(self.relocs.items[atom.rel_index..][0..atom.rel_num]);
}
}
},
else => {},
@ -333,6 +336,7 @@ fn skipShdr(self: *Object, index: u32, elf_file: *Elf) bool {
if (mem.startsWith(u8, name, ".note")) break :blk true;
if (mem.startsWith(u8, name, ".comment")) break :blk true;
if (mem.startsWith(u8, name, ".llvm_addrsig")) break :blk true;
if (mem.startsWith(u8, name, ".riscv.attributes")) break :blk true; // TODO: riscv attributes
if (comp.config.debug_format == .strip and shdr.sh_flags & elf.SHF_ALLOC == 0 and
mem.startsWith(u8, name, ".debug")) break :blk true;
break :blk false;
@ -381,12 +385,15 @@ fn parseEhFrame(self: *Object, allocator: Allocator, handle: std.fs.File, shndx:
defer allocator.free(relocs);
const rel_start = @as(u32, @intCast(self.relocs.items.len));
try self.relocs.appendUnalignedSlice(allocator, relocs);
if (elf_file.getTarget().cpu.arch == .riscv64) {
sortRelocs(self.relocs.items[rel_start..][0..relocs.len]);
}
const fdes_start = self.fdes.items.len;
const cies_start = self.cies.items.len;
var it = eh_frame.Iterator{ .data = raw };
while (try it.next()) |rec| {
const rel_range = filterRelocs(relocs, rec.offset, rec.size + 4);
const rel_range = filterRelocs(self.relocs.items[rel_start..][0..relocs.len], rec.offset, rec.size + 4);
switch (rec.tag) {
.cie => try self.cies.append(allocator, .{
.offset = data_start + rec.offset,
@ -449,8 +456,18 @@ fn parseEhFrame(self: *Object, allocator: Allocator, handle: std.fs.File, shndx:
}
}
fn sortRelocs(relocs: []elf.Elf64_Rela) void {
const sortFn = struct {
fn lessThan(c: void, lhs: elf.Elf64_Rela, rhs: elf.Elf64_Rela) bool {
_ = c;
return lhs.r_offset < rhs.r_offset;
}
}.lessThan;
mem.sort(elf.Elf64_Rela, relocs, {}, sortFn);
}
fn filterRelocs(
relocs: []align(1) const elf.Elf64_Rela,
relocs: []const elf.Elf64_Rela,
start: u64,
len: u64,
) struct { start: u64, len: u64 } {
@ -832,7 +849,8 @@ pub fn updateSymtabSize(self: *Object, elf_file: *Elf) !void {
if (local.atom(elf_file)) |atom| if (!atom.flags.alive) continue;
const esym = local.elfSym(elf_file);
switch (esym.st_type()) {
elf.STT_SECTION, elf.STT_NOTYPE => continue,
elf.STT_SECTION => continue,
elf.STT_NOTYPE => if (esym.st_shndx == elf.SHN_UNDEF) continue,
else => {},
}
local.flags.output_symtab = true;

View File

@ -317,7 +317,9 @@ fn resolveReloc(rec: anytype, sym: *const Symbol, rel: elf.Elf64_Rela, elf_file:
});
switch (cpu_arch) {
.x86_64 => x86_64.resolveReloc(rel, P, S + A, contents[offset..]),
.x86_64 => try x86_64.resolveReloc(rec, elf_file, rel, P, S + A, contents[offset..]),
.aarch64 => try aarch64.resolveReloc(rec, elf_file, rel, P, S + A, contents[offset..]),
.riscv64 => try riscv.resolveReloc(rec, elf_file, rel, P, S + A, contents[offset..]),
else => return error.UnsupportedCpuArch,
}
}
@ -325,6 +327,8 @@ fn resolveReloc(rec: anytype, sym: *const Symbol, rel: elf.Elf64_Rela, elf_file:
pub fn writeEhFrame(elf_file: *Elf, writer: anytype) !void {
relocs_log.debug("{x}: .eh_frame", .{elf_file.shdrs.items[elf_file.eh_frame_section_index.?].sh_addr});
var has_reloc_errors = false;
for (elf_file.objects.items) |index| {
const object = elf_file.file(index).?.object;
@ -335,7 +339,10 @@ pub fn writeEhFrame(elf_file: *Elf, writer: anytype) !void {
for (cie.relocs(elf_file)) |rel| {
const sym = elf_file.symbol(object.symbols.items[rel.r_sym()]);
try resolveReloc(cie, sym, rel, elf_file, contents);
resolveReloc(cie, sym, rel, elf_file, contents) catch |err| switch (err) {
error.RelocFailure => has_reloc_errors = true,
else => |e| return e,
};
}
try writer.writeAll(contents);
@ -359,7 +366,10 @@ pub fn writeEhFrame(elf_file: *Elf, writer: anytype) !void {
for (fde.relocs(elf_file)) |rel| {
const sym = elf_file.symbol(object.symbols.items[rel.r_sym()]);
try resolveReloc(fde, sym, rel, elf_file, contents);
resolveReloc(fde, sym, rel, elf_file, contents) catch |err| switch (err) {
error.RelocFailure => has_reloc_errors = true,
else => |e| return e,
};
}
try writer.writeAll(contents);
@ -367,6 +377,8 @@ pub fn writeEhFrame(elf_file: *Elf, writer: anytype) !void {
}
try writer.writeInt(u32, 0, .little);
if (has_reloc_errors) return error.RelocFailure;
}
pub fn writeEhFrameObject(elf_file: *Elf, writer: anytype) !void {
@ -540,18 +552,53 @@ const EH_PE = struct {
};
const x86_64 = struct {
fn resolveReloc(rel: elf.Elf64_Rela, source: i64, target: i64, data: []u8) void {
fn resolveReloc(rec: anytype, elf_file: *Elf, rel: elf.Elf64_Rela, source: i64, target: i64, data: []u8) !void {
const r_type: elf.R_X86_64 = @enumFromInt(rel.r_type());
switch (r_type) {
.NONE => {},
.@"32" => std.mem.writeInt(i32, data[0..4], @as(i32, @truncate(target)), .little),
.@"64" => std.mem.writeInt(i64, data[0..8], target, .little),
.PC32 => std.mem.writeInt(i32, data[0..4], @as(i32, @intCast(target - source)), .little),
.PC64 => std.mem.writeInt(i64, data[0..8], target - source, .little),
else => unreachable,
else => try reportInvalidReloc(rec, elf_file, rel),
}
}
};
const aarch64 = struct {
fn resolveReloc(rec: anytype, elf_file: *Elf, rel: elf.Elf64_Rela, source: i64, target: i64, data: []u8) !void {
const r_type: elf.R_AARCH64 = @enumFromInt(rel.r_type());
switch (r_type) {
.NONE => {},
.ABS64 => std.mem.writeInt(i64, data[0..8], target, .little),
.PREL32 => std.mem.writeInt(i32, data[0..4], @as(i32, @intCast(target - source)), .little),
.PREL64 => std.mem.writeInt(i64, data[0..8], target - source, .little),
else => try reportInvalidReloc(rec, elf_file, rel),
}
}
};
const riscv = struct {
fn resolveReloc(rec: anytype, elf_file: *Elf, rel: elf.Elf64_Rela, source: i64, target: i64, data: []u8) !void {
const r_type: elf.R_RISCV = @enumFromInt(rel.r_type());
switch (r_type) {
.NONE => {},
.@"32_PCREL" => std.mem.writeInt(i32, data[0..4], @as(i32, @intCast(target - source)), .little),
else => try reportInvalidReloc(rec, elf_file, rel),
}
}
};
fn reportInvalidReloc(rec: anytype, elf_file: *Elf, rel: elf.Elf64_Rela) !void {
var err = try elf_file.addErrorWithNotes(1);
try err.addMsg(elf_file, "invalid relocation type {} at offset 0x{x}", .{
relocation.fmtRelocType(rel.r_type(), elf_file.getTarget().cpu.arch),
rel.r_offset,
});
try err.addNote(elf_file, "in {}:.eh_frame", .{elf_file.file(rec.file_index).?.fmtPath()});
return error.RelocFailure;
}
const std = @import("std");
const assert = std.debug.assert;
const elf = std.elf;

View File

@ -1,4 +1,6 @@
pub const Kind = enum {
none,
other,
abs,
copy,
rel,
@ -13,23 +15,24 @@ pub const Kind = enum {
fn Table(comptime len: comptime_int, comptime RelType: type, comptime mapping: [len]struct { Kind, RelType }) type {
return struct {
fn decode(r_type: u32) ?Kind {
fn decode(r_type: u32) Kind {
inline for (mapping) |entry| {
if (@intFromEnum(entry[1]) == r_type) return entry[0];
}
return null;
return .other;
}
fn encode(comptime kind: Kind) u32 {
inline for (mapping) |entry| {
if (entry[0] == kind) return @intFromEnum(entry[1]);
}
unreachable;
@panic("encoding .other is ambiguous");
}
};
}
const x86_64_relocs = Table(10, elf.R_X86_64, .{
const x86_64_relocs = Table(11, elf.R_X86_64, .{
.{ .none, .NONE },
.{ .abs, .@"64" },
.{ .copy, .COPY },
.{ .rel, .RELATIVE },
@ -42,7 +45,8 @@ const x86_64_relocs = Table(10, elf.R_X86_64, .{
.{ .tlsdesc, .TLSDESC },
});
const aarch64_relocs = Table(10, elf.R_AARCH64, .{
const aarch64_relocs = Table(11, elf.R_AARCH64, .{
.{ .none, .NONE },
.{ .abs, .ABS64 },
.{ .copy, .COPY },
.{ .rel, .RELATIVE },
@ -55,7 +59,8 @@ const aarch64_relocs = Table(10, elf.R_AARCH64, .{
.{ .tlsdesc, .TLSDESC },
});
const riscv64_relocs = Table(10, elf.R_RISCV, .{
const riscv64_relocs = Table(11, elf.R_RISCV, .{
.{ .none, .NONE },
.{ .abs, .@"64" },
.{ .copy, .COPY },
.{ .rel, .RELATIVE },

View File

@ -699,14 +699,7 @@ fn resolveRelocInner(
const S_: i64 = @intCast(thunk.getTargetAddress(rel.target, macho_file));
break :blk math.cast(i28, S_ + A - P) orelse return error.Overflow;
};
var inst = aarch64.Instruction{
.unconditional_branch_immediate = mem.bytesToValue(std.meta.TagPayload(
aarch64.Instruction,
aarch64.Instruction.unconditional_branch_immediate,
), code[rel_offset..][0..4]),
};
inst.unconditional_branch_immediate.imm26 = @as(u26, @truncate(@as(u28, @bitCast(disp >> 2))));
try writer.writeInt(u32, inst.toU32(), .little);
try aarch64.writeBranchImm(disp, code[rel_offset..][0..4]);
},
else => unreachable,
}
@ -776,16 +769,8 @@ fn resolveRelocInner(
};
break :target math.cast(u64, target) orelse return error.Overflow;
};
const pages = @as(u21, @bitCast(try Relocation.calcNumberOfPages(source, target)));
var inst = aarch64.Instruction{
.pc_relative_address = mem.bytesToValue(std.meta.TagPayload(
aarch64.Instruction,
aarch64.Instruction.pc_relative_address,
), code[rel_offset..][0..4]),
};
inst.pc_relative_address.immhi = @as(u19, @truncate(pages >> 2));
inst.pc_relative_address.immlo = @as(u2, @truncate(pages));
try writer.writeInt(u32, inst.toU32(), .little);
const pages = @as(u21, @bitCast(try aarch64.calcNumberOfPages(source, target)));
try aarch64.writePages(pages, code[rel_offset..][0..4]);
},
.pageoff => {
@ -794,35 +779,8 @@ fn resolveRelocInner(
assert(!rel.meta.pcrel);
const target = math.cast(u64, S + A) orelse return error.Overflow;
const inst_code = code[rel_offset..][0..4];
if (Relocation.isArithmeticOp(inst_code)) {
const off = try Relocation.calcPageOffset(target, .arithmetic);
var inst = aarch64.Instruction{
.add_subtract_immediate = mem.bytesToValue(std.meta.TagPayload(
aarch64.Instruction,
aarch64.Instruction.add_subtract_immediate,
), inst_code),
};
inst.add_subtract_immediate.imm12 = off;
try writer.writeInt(u32, inst.toU32(), .little);
} else {
var inst = aarch64.Instruction{
.load_store_register = mem.bytesToValue(std.meta.TagPayload(
aarch64.Instruction,
aarch64.Instruction.load_store_register,
), inst_code),
};
const off = try Relocation.calcPageOffset(target, switch (inst.load_store_register.size) {
0 => if (inst.load_store_register.v == 1)
Relocation.PageOffsetInstKind.load_store_128
else
Relocation.PageOffsetInstKind.load_store_8,
1 => .load_store_16,
2 => .load_store_32,
3 => .load_store_64,
});
inst.load_store_register.offset = off;
try writer.writeInt(u32, inst.toU32(), .little);
}
const kind = aarch64.classifyInst(inst_code);
try aarch64.writePageOffset(kind, target, inst_code);
},
.got_load_pageoff => {
@ -830,15 +788,7 @@ fn resolveRelocInner(
assert(rel.meta.length == 2);
assert(!rel.meta.pcrel);
const target = math.cast(u64, G + A) orelse return error.Overflow;
const off = try Relocation.calcPageOffset(target, .load_store_64);
var inst: aarch64.Instruction = .{
.load_store_register = mem.bytesToValue(std.meta.TagPayload(
aarch64.Instruction,
aarch64.Instruction.load_store_register,
), code[rel_offset..][0..4]),
};
inst.load_store_register.offset = off;
try writer.writeInt(u32, inst.toU32(), .little);
try aarch64.writePageOffset(.load_store_64, target, code[rel_offset..][0..4]);
},
.tlvp_pageoff => {
@ -863,7 +813,7 @@ fn resolveRelocInner(
const inst_code = code[rel_offset..][0..4];
const reg_info: RegInfo = blk: {
if (Relocation.isArithmeticOp(inst_code)) {
if (aarch64.isArithmeticOp(inst_code)) {
const inst = mem.bytesToValue(std.meta.TagPayload(
aarch64.Instruction,
aarch64.Instruction.add_subtract_immediate,
@ -890,7 +840,7 @@ fn resolveRelocInner(
.load_store_register = .{
.rt = reg_info.rd,
.rn = reg_info.rn,
.offset = try Relocation.calcPageOffset(target, .load_store_64),
.offset = try aarch64.calcPageOffset(.load_store_64, target),
.opc = 0b01,
.op1 = 0b01,
.v = 0,
@ -900,7 +850,7 @@ fn resolveRelocInner(
.add_subtract_immediate = .{
.rd = reg_info.rd,
.rn = reg_info.rn,
.imm12 = try Relocation.calcPageOffset(target, .arithmetic),
.imm12 = try aarch64.calcPageOffset(.arithmetic, target),
.sh = 0,
.s = 0,
.op = 0,
@ -1183,7 +1133,7 @@ pub const Loc = struct {
pub const Alignment = @import("../../InternPool.zig").Alignment;
const aarch64 = @import("../../arch/aarch64/bits.zig");
const aarch64 = @import("../aarch64.zig");
const assert = std.debug.assert;
const bind = @import("dyld_info/bind.zig");
const macho = std.macho;

View File

@ -60,38 +60,6 @@ pub fn lessThan(ctx: void, lhs: Relocation, rhs: Relocation) bool {
return lhs.offset < rhs.offset;
}
pub fn calcNumberOfPages(saddr: u64, taddr: u64) error{Overflow}!i21 {
const spage = math.cast(i32, saddr >> 12) orelse return error.Overflow;
const tpage = math.cast(i32, taddr >> 12) orelse return error.Overflow;
const pages = math.cast(i21, tpage - spage) orelse return error.Overflow;
return pages;
}
pub const PageOffsetInstKind = enum {
arithmetic,
load_store_8,
load_store_16,
load_store_32,
load_store_64,
load_store_128,
};
pub fn calcPageOffset(taddr: u64, kind: PageOffsetInstKind) !u12 {
const narrowed = @as(u12, @truncate(taddr));
return switch (kind) {
.arithmetic, .load_store_8 => narrowed,
.load_store_16 => try math.divExact(u12, narrowed, 2),
.load_store_32 => try math.divExact(u12, narrowed, 4),
.load_store_64 => try math.divExact(u12, narrowed, 8),
.load_store_128 => try math.divExact(u12, narrowed, 16),
};
}
pub inline fn isArithmeticOp(inst: *const [4]u8) bool {
const group_decode = @as(u5, @truncate(inst[3]));
return ((group_decode >> 2) == 4);
}
pub const Type = enum {
// x86_64
/// RIP-relative displacement (X86_64_RELOC_SIGNED)

View File

@ -267,9 +267,9 @@ pub const StubsSection = struct {
},
.aarch64 => {
// TODO relax if possible
const pages = try Relocation.calcNumberOfPages(source, target);
const pages = try aarch64.calcNumberOfPages(source, target);
try writer.writeInt(u32, aarch64.Instruction.adrp(.x16, pages).toU32(), .little);
const off = try Relocation.calcPageOffset(target, .load_store_64);
const off = try aarch64.calcPageOffset(.load_store_64, target);
try writer.writeInt(
u32,
aarch64.Instruction.ldr(.x16, .x16, aarch64.Instruction.LoadStoreOffset.imm(off)).toU32(),
@ -411,9 +411,9 @@ pub const StubsHelperSection = struct {
.aarch64 => {
{
// TODO relax if possible
const pages = try Relocation.calcNumberOfPages(sect.addr, dyld_private_addr);
const pages = try aarch64.calcNumberOfPages(sect.addr, dyld_private_addr);
try writer.writeInt(u32, aarch64.Instruction.adrp(.x17, pages).toU32(), .little);
const off = try Relocation.calcPageOffset(dyld_private_addr, .arithmetic);
const off = try aarch64.calcPageOffset(.arithmetic, dyld_private_addr);
try writer.writeInt(u32, aarch64.Instruction.add(.x17, .x17, off, false).toU32(), .little);
}
try writer.writeInt(u32, aarch64.Instruction.stp(
@ -424,9 +424,9 @@ pub const StubsHelperSection = struct {
).toU32(), .little);
{
// TODO relax if possible
const pages = try Relocation.calcNumberOfPages(sect.addr + 12, dyld_stub_binder_addr);
const pages = try aarch64.calcNumberOfPages(sect.addr + 12, dyld_stub_binder_addr);
try writer.writeInt(u32, aarch64.Instruction.adrp(.x16, pages).toU32(), .little);
const off = try Relocation.calcPageOffset(dyld_stub_binder_addr, .load_store_64);
const off = try aarch64.calcPageOffset(.load_store_64, dyld_stub_binder_addr);
try writer.writeInt(u32, aarch64.Instruction.ldr(
.x16,
.x16,
@ -679,9 +679,9 @@ pub const ObjcStubsSection = struct {
{
const target = sym.getObjcSelrefsAddress(macho_file);
const source = addr;
const pages = try Relocation.calcNumberOfPages(source, target);
const pages = try aarch64.calcNumberOfPages(source, target);
try writer.writeInt(u32, aarch64.Instruction.adrp(.x1, pages).toU32(), .little);
const off = try Relocation.calcPageOffset(target, .load_store_64);
const off = try aarch64.calcPageOffset(.load_store_64, target);
try writer.writeInt(
u32,
aarch64.Instruction.ldr(.x1, .x1, aarch64.Instruction.LoadStoreOffset.imm(off)).toU32(),
@ -692,9 +692,9 @@ pub const ObjcStubsSection = struct {
const target_sym = macho_file.getSymbol(macho_file.objc_msg_send_index.?);
const target = target_sym.getGotAddress(macho_file);
const source = addr + 2 * @sizeOf(u32);
const pages = try Relocation.calcNumberOfPages(source, target);
const pages = try aarch64.calcNumberOfPages(source, target);
try writer.writeInt(u32, aarch64.Instruction.adrp(.x16, pages).toU32(), .little);
const off = try Relocation.calcPageOffset(target, .load_store_64);
const off = try aarch64.calcPageOffset(.load_store_64, target);
try writer.writeInt(
u32,
aarch64.Instruction.ldr(.x16, .x16, aarch64.Instruction.LoadStoreOffset.imm(off)).toU32(),
@ -778,7 +778,7 @@ pub const WeakBindSection = bind.WeakBind;
pub const LazyBindSection = bind.LazyBind;
pub const ExportTrieSection = Trie;
const aarch64 = @import("../../arch/aarch64/bits.zig");
const aarch64 = @import("../aarch64.zig");
const assert = std.debug.assert;
const bind = @import("dyld_info/bind.zig");
const math = std.math;
@ -788,6 +788,5 @@ const trace = @import("../../tracy.zig").trace;
const Allocator = std.mem.Allocator;
const MachO = @import("../MachO.zig");
const Rebase = @import("dyld_info/Rebase.zig");
const Relocation = @import("Relocation.zig");
const Symbol = @import("Symbol.zig");
const Trie = @import("dyld_info/Trie.zig");

View File

@ -99,9 +99,9 @@ pub const Thunk = struct {
const sym = macho_file.getSymbol(sym_index);
const saddr = thunk.getAddress(macho_file) + i * trampoline_size;
const taddr = sym.getAddress(.{}, macho_file);
const pages = try Relocation.calcNumberOfPages(saddr, taddr);
const pages = try aarch64.calcNumberOfPages(saddr, taddr);
try writer.writeInt(u32, aarch64.Instruction.adrp(.x16, pages).toU32(), .little);
const off = try Relocation.calcPageOffset(taddr, .arithmetic);
const off = try aarch64.calcPageOffset(.arithmetic, taddr);
try writer.writeInt(u32, aarch64.Instruction.add(.x16, .x16, off, false).toU32(), .little);
try writer.writeInt(u32, aarch64.Instruction.br(.x16).toU32(), .little);
}
@ -164,7 +164,7 @@ const max_distance = (1 << (jump_bits - 1));
/// and assume margin to be 5MiB.
const max_allowed_distance = max_distance - 0x500_000;
const aarch64 = @import("../../arch/aarch64/bits.zig");
const aarch64 = @import("../aarch64.zig");
const assert = std.debug.assert;
const log = std.log.scoped(.link);
const macho = std.macho;

106
src/link/aarch64.zig Normal file
View File

@ -0,0 +1,106 @@
pub inline fn isArithmeticOp(inst: *const [4]u8) bool {
const group_decode = @as(u5, @truncate(inst[3]));
return ((group_decode >> 2) == 4);
}
pub const PageOffsetInstKind = enum {
arithmetic,
load_store_8,
load_store_16,
load_store_32,
load_store_64,
load_store_128,
};
pub fn classifyInst(code: *const [4]u8) PageOffsetInstKind {
if (isArithmeticOp(code)) return .arithmetic;
const inst = Instruction{
.load_store_register = mem.bytesToValue(std.meta.TagPayload(
Instruction,
Instruction.load_store_register,
), code),
};
return switch (inst.load_store_register.size) {
0 => if (inst.load_store_register.v == 1) .load_store_128 else .load_store_8,
1 => .load_store_16,
2 => .load_store_32,
3 => .load_store_64,
};
}
pub fn calcPageOffset(kind: PageOffsetInstKind, taddr: u64) !u12 {
const narrowed = @as(u12, @truncate(taddr));
return switch (kind) {
.arithmetic, .load_store_8 => narrowed,
.load_store_16 => try math.divExact(u12, narrowed, 2),
.load_store_32 => try math.divExact(u12, narrowed, 4),
.load_store_64 => try math.divExact(u12, narrowed, 8),
.load_store_128 => try math.divExact(u12, narrowed, 16),
};
}
pub fn writePageOffset(kind: PageOffsetInstKind, taddr: u64, code: *[4]u8) !void {
const value = try calcPageOffset(kind, taddr);
switch (kind) {
.arithmetic => {
var inst = Instruction{
.add_subtract_immediate = mem.bytesToValue(std.meta.TagPayload(
Instruction,
Instruction.add_subtract_immediate,
), code),
};
inst.add_subtract_immediate.imm12 = value;
mem.writeInt(u32, code, inst.toU32(), .little);
},
else => {
var inst: Instruction = .{
.load_store_register = mem.bytesToValue(std.meta.TagPayload(
Instruction,
Instruction.load_store_register,
), code),
};
inst.load_store_register.offset = value;
mem.writeInt(u32, code, inst.toU32(), .little);
},
}
}
pub fn calcNumberOfPages(saddr: u64, taddr: u64) error{Overflow}!i21 {
const spage = math.cast(i32, saddr >> 12) orelse return error.Overflow;
const tpage = math.cast(i32, taddr >> 12) orelse return error.Overflow;
const pages = math.cast(i21, tpage - spage) orelse return error.Overflow;
return pages;
}
pub fn writePages(pages: u21, code: *[4]u8) !void {
var inst = Instruction{
.pc_relative_address = mem.bytesToValue(std.meta.TagPayload(
Instruction,
Instruction.pc_relative_address,
), code),
};
inst.pc_relative_address.immhi = @as(u19, @truncate(pages >> 2));
inst.pc_relative_address.immlo = @as(u2, @truncate(pages));
mem.writeInt(u32, code, inst.toU32(), .little);
}
pub fn writeBranchImm(disp: i28, code: *[4]u8) !void {
var inst = Instruction{
.unconditional_branch_immediate = mem.bytesToValue(std.meta.TagPayload(
Instruction,
Instruction.unconditional_branch_immediate,
), code),
};
inst.unconditional_branch_immediate.imm26 = @as(u26, @truncate(@as(u28, @bitCast(disp >> 2))));
mem.writeInt(u32, code, inst.toU32(), .little);
}
const assert = std.debug.assert;
const bits = @import("../arch/aarch64/bits.zig");
const builtin = @import("builtin");
const math = std.math;
const mem = std.mem;
const std = @import("std");
pub const Instruction = bits.Instruction;
pub const Register = bits.Register;

74
src/link/riscv.zig Normal file
View File

@ -0,0 +1,74 @@
pub fn writeSetSub6(comptime op: enum { set, sub }, code: *[1]u8, addend: anytype) void {
const mask: u8 = 0b11_000000;
const actual: i8 = @truncate(addend);
var value: u8 = mem.readInt(u8, code, .little);
switch (op) {
.set => value = (value & mask) | @as(u8, @bitCast(actual & ~mask)),
.sub => value = (value & mask) | (@as(u8, @bitCast(@as(i8, @bitCast(value)) -| actual)) & ~mask),
}
mem.writeInt(u8, code, value, .little);
}
pub fn writeAddend(
comptime Int: type,
comptime op: enum { add, sub },
code: *[@typeInfo(Int).Int.bits / 8]u8,
value: anytype,
) void {
var V: Int = mem.readInt(Int, code, .little);
const addend: Int = @truncate(value);
switch (op) {
.add => V +|= addend, // TODO: I think saturating arithmetic is correct here
.sub => V -|= addend,
}
mem.writeInt(Int, code, V, .little);
}
pub fn writeInstU(code: *[4]u8, value: u32) void {
var inst = Instruction{
.U = mem.bytesToValue(std.meta.TagPayload(
Instruction,
Instruction.U,
), code),
};
const compensated: u32 = @bitCast(@as(i32, @bitCast(value)) + 0x800);
inst.U.imm12_31 = bitSlice(compensated, 31, 12);
mem.writeInt(u32, code, inst.toU32(), .little);
}
pub fn writeInstI(code: *[4]u8, value: u32) void {
var inst = Instruction{
.I = mem.bytesToValue(std.meta.TagPayload(
Instruction,
Instruction.I,
), code),
};
inst.I.imm0_11 = bitSlice(value, 11, 0);
mem.writeInt(u32, code, inst.toU32(), .little);
}
pub fn writeInstS(code: *[4]u8, value: u32) void {
var inst = Instruction{
.S = mem.bytesToValue(std.meta.TagPayload(
Instruction,
Instruction.S,
), code),
};
inst.S.imm0_4 = bitSlice(value, 4, 0);
inst.S.imm5_11 = bitSlice(value, 11, 5);
mem.writeInt(u32, code, inst.toU32(), .little);
}
fn bitSlice(
value: anytype,
comptime high: comptime_int,
comptime low: comptime_int,
) std.math.IntFittingRange(0, 1 << high - low) {
return @truncate((value >> low) & (1 << (high - low + 1)) - 1);
}
const bits = @import("../arch/riscv64/bits.zig");
const mem = std.mem;
const std = @import("std");
pub const Instruction = bits.Instruction;

View File

@ -10,124 +10,141 @@ pub fn testAll(b: *Build, build_opts: BuildOptions) *Step {
.cpu_arch = .x86_64, // TODO relax this once ELF linker is able to handle other archs
.os_tag = .linux,
});
const musl_target = b.resolveTargetQuery(.{
const x86_64_musl = b.resolveTargetQuery(.{
.cpu_arch = .x86_64,
.os_tag = .linux,
.abi = .musl,
});
const glibc_target = b.resolveTargetQuery(.{
const x86_64_gnu = b.resolveTargetQuery(.{
.cpu_arch = .x86_64,
.os_tag = .linux,
.abi = .gnu,
});
const aarch64_musl = b.resolveTargetQuery(.{
.cpu_arch = .aarch64,
.os_tag = .linux,
.abi = .musl,
});
const riscv64_musl = b.resolveTargetQuery(.{
.cpu_arch = .riscv64,
.os_tag = .linux,
.abi = .musl,
});
// x86_64 tests
// Exercise linker in -r mode
elf_step.dependOn(testEmitRelocatable(b, .{ .use_llvm = false, .target = musl_target }));
elf_step.dependOn(testEmitRelocatable(b, .{ .target = musl_target }));
elf_step.dependOn(testRelocatableArchive(b, .{ .target = musl_target }));
elf_step.dependOn(testRelocatableEhFrame(b, .{ .target = musl_target }));
elf_step.dependOn(testRelocatableNoEhFrame(b, .{ .target = musl_target }));
elf_step.dependOn(testEmitRelocatable(b, .{ .use_llvm = false, .target = x86_64_musl }));
elf_step.dependOn(testEmitRelocatable(b, .{ .target = x86_64_musl }));
elf_step.dependOn(testRelocatableArchive(b, .{ .target = x86_64_musl }));
elf_step.dependOn(testRelocatableEhFrame(b, .{ .target = x86_64_musl }));
elf_step.dependOn(testRelocatableNoEhFrame(b, .{ .target = x86_64_musl }));
// Exercise linker in ar mode
elf_step.dependOn(testEmitStaticLib(b, .{ .target = musl_target }));
elf_step.dependOn(testEmitStaticLibZig(b, .{ .use_llvm = false, .target = musl_target }));
elf_step.dependOn(testEmitStaticLib(b, .{ .target = x86_64_musl }));
elf_step.dependOn(testEmitStaticLibZig(b, .{ .use_llvm = false, .target = x86_64_musl }));
// Exercise linker with self-hosted backend (no LLVM)
elf_step.dependOn(testGcSectionsZig(b, .{ .use_llvm = false, .target = default_target }));
elf_step.dependOn(testLinkingObj(b, .{ .use_llvm = false, .target = default_target }));
elf_step.dependOn(testLinkingStaticLib(b, .{ .use_llvm = false, .target = default_target }));
elf_step.dependOn(testLinkingZig(b, .{ .use_llvm = false, .target = default_target }));
elf_step.dependOn(testImportingDataDynamic(b, .{ .use_llvm = false, .target = glibc_target }));
elf_step.dependOn(testImportingDataStatic(b, .{ .use_llvm = false, .target = musl_target }));
elf_step.dependOn(testImportingDataDynamic(b, .{ .use_llvm = false, .target = x86_64_gnu }));
elf_step.dependOn(testImportingDataStatic(b, .{ .use_llvm = false, .target = x86_64_musl }));
// Exercise linker with LLVM backend
// musl tests
elf_step.dependOn(testAbsSymbols(b, .{ .target = musl_target }));
elf_step.dependOn(testCommonSymbols(b, .{ .target = musl_target }));
elf_step.dependOn(testCommonSymbolsInArchive(b, .{ .target = musl_target }));
elf_step.dependOn(testEmptyObject(b, .{ .target = musl_target }));
elf_step.dependOn(testEntryPoint(b, .{ .target = musl_target }));
elf_step.dependOn(testGcSections(b, .{ .target = musl_target }));
elf_step.dependOn(testImageBase(b, .{ .target = musl_target }));
elf_step.dependOn(testInitArrayOrder(b, .{ .target = musl_target }));
elf_step.dependOn(testLargeAlignmentExe(b, .{ .target = musl_target }));
elf_step.dependOn(testAbsSymbols(b, .{ .target = x86_64_musl }));
elf_step.dependOn(testCommonSymbols(b, .{ .target = x86_64_musl }));
elf_step.dependOn(testCommonSymbolsInArchive(b, .{ .target = x86_64_musl }));
elf_step.dependOn(testEmptyObject(b, .{ .target = x86_64_musl }));
elf_step.dependOn(testEntryPoint(b, .{ .target = x86_64_musl }));
elf_step.dependOn(testGcSections(b, .{ .target = x86_64_musl }));
elf_step.dependOn(testImageBase(b, .{ .target = x86_64_musl }));
elf_step.dependOn(testInitArrayOrder(b, .{ .target = x86_64_musl }));
elf_step.dependOn(testLargeAlignmentExe(b, .{ .target = x86_64_musl }));
// https://github.com/ziglang/zig/issues/17449
// elf_step.dependOn(testLargeBss(b, .{ .target = musl_target }));
elf_step.dependOn(testLinkingC(b, .{ .target = musl_target }));
elf_step.dependOn(testLinkingCpp(b, .{ .target = musl_target }));
elf_step.dependOn(testLinkingZig(b, .{ .target = musl_target }));
// elf_step.dependOn(testLargeBss(b, .{ .target = x86_64_musl }));
elf_step.dependOn(testLinkingC(b, .{ .target = x86_64_musl }));
elf_step.dependOn(testLinkingCpp(b, .{ .target = x86_64_musl }));
elf_step.dependOn(testLinkingZig(b, .{ .target = x86_64_musl }));
// https://github.com/ziglang/zig/issues/17451
// elf_step.dependOn(testNoEhFrameHdr(b, .{ .target = musl_target }));
elf_step.dependOn(testTlsStatic(b, .{ .target = musl_target }));
elf_step.dependOn(testStrip(b, .{ .target = musl_target }));
// elf_step.dependOn(testNoEhFrameHdr(b, .{ .target = x86_64_musl }));
elf_step.dependOn(testTlsStatic(b, .{ .target = x86_64_musl }));
elf_step.dependOn(testStrip(b, .{ .target = x86_64_musl }));
// glibc tests
elf_step.dependOn(testAsNeeded(b, .{ .target = glibc_target }));
elf_step.dependOn(testAsNeeded(b, .{ .target = x86_64_gnu }));
// https://github.com/ziglang/zig/issues/17430
// elf_step.dependOn(testCanonicalPlt(b, .{ .target = glibc_target }));
elf_step.dependOn(testCopyrel(b, .{ .target = glibc_target }));
// elf_step.dependOn(testCanonicalPlt(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testCopyrel(b, .{ .target = x86_64_gnu }));
// https://github.com/ziglang/zig/issues/17430
// elf_step.dependOn(testCopyrelAlias(b, .{ .target = glibc_target }));
// elf_step.dependOn(testCopyrelAlias(b, .{ .target = x86_64_gnu }));
// https://github.com/ziglang/zig/issues/17430
// elf_step.dependOn(testCopyrelAlignment(b, .{ .target = glibc_target }));
elf_step.dependOn(testDsoPlt(b, .{ .target = glibc_target }));
elf_step.dependOn(testDsoUndef(b, .{ .target = glibc_target }));
elf_step.dependOn(testExportDynamic(b, .{ .target = glibc_target }));
elf_step.dependOn(testExportSymbolsFromExe(b, .{ .target = glibc_target }));
// elf_step.dependOn(testCopyrelAlignment(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testDsoPlt(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testDsoUndef(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testExportDynamic(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testExportSymbolsFromExe(b, .{ .target = x86_64_gnu }));
// https://github.com/ziglang/zig/issues/17430
// elf_step.dependOn(testFuncAddress(b, .{ .target = glibc_target }));
elf_step.dependOn(testHiddenWeakUndef(b, .{ .target = glibc_target }));
elf_step.dependOn(testIFuncAlias(b, .{ .target = glibc_target }));
// elf_step.dependOn(testFuncAddress(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testHiddenWeakUndef(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testIFuncAlias(b, .{ .target = x86_64_gnu }));
// https://github.com/ziglang/zig/issues/17430
// elf_step.dependOn(testIFuncDlopen(b, .{ .target = glibc_target }));
elf_step.dependOn(testIFuncDso(b, .{ .target = glibc_target }));
elf_step.dependOn(testIFuncDynamic(b, .{ .target = glibc_target }));
elf_step.dependOn(testIFuncExport(b, .{ .target = glibc_target }));
elf_step.dependOn(testIFuncFuncPtr(b, .{ .target = glibc_target }));
elf_step.dependOn(testIFuncNoPlt(b, .{ .target = glibc_target }));
// elf_step.dependOn(testIFuncDlopen(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testIFuncDso(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testIFuncDynamic(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testIFuncExport(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testIFuncFuncPtr(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testIFuncNoPlt(b, .{ .target = x86_64_gnu }));
// https://github.com/ziglang/zig/issues/17430 ??
// elf_step.dependOn(testIFuncStatic(b, .{ .target = glibc_target }));
// elf_step.dependOn(testIFuncStaticPie(b, .{ .target = glibc_target }));
elf_step.dependOn(testInitArrayOrder(b, .{ .target = glibc_target }));
elf_step.dependOn(testLargeAlignmentDso(b, .{ .target = glibc_target }));
elf_step.dependOn(testLargeAlignmentExe(b, .{ .target = glibc_target }));
elf_step.dependOn(testLargeBss(b, .{ .target = glibc_target }));
elf_step.dependOn(testLinkOrder(b, .{ .target = glibc_target }));
elf_step.dependOn(testLdScript(b, .{ .target = glibc_target }));
elf_step.dependOn(testLdScriptPathError(b, .{ .target = glibc_target }));
elf_step.dependOn(testLdScriptAllowUndefinedVersion(b, .{ .target = glibc_target, .use_lld = true }));
elf_step.dependOn(testLdScriptDisallowUndefinedVersion(b, .{ .target = glibc_target, .use_lld = true }));
elf_step.dependOn(testMismatchedCpuArchitectureError(b, .{ .target = glibc_target }));
// elf_step.dependOn(testIFuncStatic(b, .{ .target = x86_64_gnu }));
// elf_step.dependOn(testIFuncStaticPie(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testInitArrayOrder(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testLargeAlignmentDso(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testLargeAlignmentExe(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testLargeBss(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testLinkOrder(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testLdScript(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testLdScriptPathError(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testLdScriptAllowUndefinedVersion(b, .{ .target = x86_64_gnu, .use_lld = true }));
elf_step.dependOn(testLdScriptDisallowUndefinedVersion(b, .{ .target = x86_64_gnu, .use_lld = true }));
elf_step.dependOn(testMismatchedCpuArchitectureError(b, .{ .target = x86_64_gnu }));
// https://github.com/ziglang/zig/issues/17451
// elf_step.dependOn(testNoEhFrameHdr(b, .{ .target = glibc_target }));
elf_step.dependOn(testPie(b, .{ .target = glibc_target }));
elf_step.dependOn(testPltGot(b, .{ .target = glibc_target }));
elf_step.dependOn(testPreinitArray(b, .{ .target = glibc_target }));
elf_step.dependOn(testSharedAbsSymbol(b, .{ .target = glibc_target }));
elf_step.dependOn(testTlsDfStaticTls(b, .{ .target = glibc_target }));
elf_step.dependOn(testTlsDso(b, .{ .target = glibc_target }));
elf_step.dependOn(testTlsGd(b, .{ .target = glibc_target }));
elf_step.dependOn(testTlsGdNoPlt(b, .{ .target = glibc_target }));
elf_step.dependOn(testTlsGdToIe(b, .{ .target = glibc_target }));
elf_step.dependOn(testTlsIe(b, .{ .target = glibc_target }));
elf_step.dependOn(testTlsLargeAlignment(b, .{ .target = glibc_target }));
elf_step.dependOn(testTlsLargeTbss(b, .{ .target = glibc_target }));
elf_step.dependOn(testTlsLargeStaticImage(b, .{ .target = glibc_target }));
elf_step.dependOn(testTlsLd(b, .{ .target = glibc_target }));
elf_step.dependOn(testTlsLdDso(b, .{ .target = glibc_target }));
elf_step.dependOn(testTlsLdNoPlt(b, .{ .target = glibc_target }));
// elf_step.dependOn(testNoEhFrameHdr(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testPie(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testPltGot(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testPreinitArray(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testSharedAbsSymbol(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testTlsDfStaticTls(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testTlsDso(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testTlsGd(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testTlsGdNoPlt(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testTlsGdToIe(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testTlsIe(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testTlsLargeAlignment(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testTlsLargeTbss(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testTlsLargeStaticImage(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testTlsLd(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testTlsLdDso(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testTlsLdNoPlt(b, .{ .target = x86_64_gnu }));
// https://github.com/ziglang/zig/issues/17430
// elf_step.dependOn(testTlsNoPic(b, .{ .target = glibc_target }));
elf_step.dependOn(testTlsOffsetAlignment(b, .{ .target = glibc_target }));
elf_step.dependOn(testTlsPic(b, .{ .target = glibc_target }));
elf_step.dependOn(testTlsSmallAlignment(b, .{ .target = glibc_target }));
elf_step.dependOn(testUnknownFileTypeError(b, .{ .target = glibc_target }));
elf_step.dependOn(testUnresolvedError(b, .{ .target = glibc_target }));
elf_step.dependOn(testWeakExports(b, .{ .target = glibc_target }));
elf_step.dependOn(testWeakUndefsDso(b, .{ .target = glibc_target }));
elf_step.dependOn(testZNow(b, .{ .target = glibc_target }));
elf_step.dependOn(testZStackSize(b, .{ .target = glibc_target }));
elf_step.dependOn(testZText(b, .{ .target = glibc_target }));
// elf_step.dependOn(testTlsNoPic(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testTlsOffsetAlignment(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testTlsPic(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testTlsSmallAlignment(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testUnknownFileTypeError(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testUnresolvedError(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testWeakExports(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testWeakUndefsDso(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testZNow(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testZStackSize(b, .{ .target = x86_64_gnu }));
elf_step.dependOn(testZText(b, .{ .target = x86_64_gnu }));
// aarch64 tests
elf_step.dependOn(testLinkingC(b, .{ .target = aarch64_musl }));
// riscv64 tests
elf_step.dependOn(testLinkingC(b, .{ .target = riscv64_musl }));
return elf_step;
}