mirror of
https://github.com/ziglang/zig.git
synced 2026-01-29 02:35:28 +00:00
Merge branch 'master' into crc
This commit is contained in:
commit
6089ed9ee7
@ -593,10 +593,12 @@ set(ZIG_STAGE2_SOURCES
|
||||
"${CMAKE_SOURCE_DIR}/src/link/MachO/Object.zig"
|
||||
"${CMAKE_SOURCE_DIR}/src/link/MachO/Relocation.zig"
|
||||
"${CMAKE_SOURCE_DIR}/src/link/MachO/Trie.zig"
|
||||
"${CMAKE_SOURCE_DIR}/src/link/MachO/UnwindInfo.zig"
|
||||
"${CMAKE_SOURCE_DIR}/src/link/MachO/ZldAtom.zig"
|
||||
"${CMAKE_SOURCE_DIR}/src/link/MachO/dyld_info/bind.zig"
|
||||
"${CMAKE_SOURCE_DIR}/src/link/MachO/dyld_info/Rebase.zig"
|
||||
"${CMAKE_SOURCE_DIR}/src/link/MachO/dead_strip.zig"
|
||||
"${CMAKE_SOURCE_DIR}/src/link/MachO/eh_frame.zig"
|
||||
"${CMAKE_SOURCE_DIR}/src/link/MachO/fat.zig"
|
||||
"${CMAKE_SOURCE_DIR}/src/link/MachO/load_commands.zig"
|
||||
"${CMAKE_SOURCE_DIR}/src/link/MachO/thunks.zig"
|
||||
|
||||
@ -155,6 +155,8 @@ pub const EAI = enum(c_int) {
|
||||
|
||||
pub const EAI_MAX = 15;
|
||||
|
||||
pub const IFNAMESIZE = 16;
|
||||
|
||||
pub const AI = struct {
|
||||
/// get address to use bind()
|
||||
pub const PASSIVE = 0x00000001;
|
||||
@ -1271,62 +1273,98 @@ pub const siginfo_t = extern struct {
|
||||
},
|
||||
};
|
||||
|
||||
pub usingnamespace switch (builtin.cpu.arch) {
|
||||
.x86_64 => struct {
|
||||
pub const ucontext_t = extern struct {
|
||||
sigmask: sigset_t,
|
||||
mcontext: mcontext_t,
|
||||
link: ?*ucontext_t,
|
||||
stack: stack_t,
|
||||
flags: c_int,
|
||||
__spare__: [4]c_int,
|
||||
};
|
||||
|
||||
/// XXX x86_64 specific
|
||||
pub const mcontext_t = extern struct {
|
||||
onstack: u64,
|
||||
rdi: u64,
|
||||
rsi: u64,
|
||||
rdx: u64,
|
||||
rcx: u64,
|
||||
r8: u64,
|
||||
r9: u64,
|
||||
rax: u64,
|
||||
rbx: u64,
|
||||
rbp: u64,
|
||||
r10: u64,
|
||||
r11: u64,
|
||||
r12: u64,
|
||||
r13: u64,
|
||||
r14: u64,
|
||||
r15: u64,
|
||||
trapno: u32,
|
||||
fs: u16,
|
||||
gs: u16,
|
||||
addr: u64,
|
||||
pub const mcontext_t = switch (builtin.cpu.arch) {
|
||||
.x86_64 => extern struct {
|
||||
onstack: u64,
|
||||
rdi: u64,
|
||||
rsi: u64,
|
||||
rdx: u64,
|
||||
rcx: u64,
|
||||
r8: u64,
|
||||
r9: u64,
|
||||
rax: u64,
|
||||
rbx: u64,
|
||||
rbp: u64,
|
||||
r10: u64,
|
||||
r11: u64,
|
||||
r12: u64,
|
||||
r13: u64,
|
||||
r14: u64,
|
||||
r15: u64,
|
||||
trapno: u32,
|
||||
fs: u16,
|
||||
gs: u16,
|
||||
addr: u64,
|
||||
flags: u32,
|
||||
es: u16,
|
||||
ds: u16,
|
||||
err: u64,
|
||||
rip: u64,
|
||||
cs: u64,
|
||||
rflags: u64,
|
||||
rsp: u64,
|
||||
ss: u64,
|
||||
len: u64,
|
||||
fpformat: u64,
|
||||
ownedfp: u64,
|
||||
fpstate: [64]u64 align(16),
|
||||
fsbase: u64,
|
||||
gsbase: u64,
|
||||
xfpustate: u64,
|
||||
xfpustate_len: u64,
|
||||
spare: [4]u64,
|
||||
},
|
||||
.aarch64 => extern struct {
|
||||
gpregs: extern struct {
|
||||
x: [30]u64,
|
||||
lr: u64,
|
||||
sp: u64,
|
||||
elr: u64,
|
||||
spsr: u32,
|
||||
_pad: u32,
|
||||
},
|
||||
fpregs: extern struct {
|
||||
q: [32]u128,
|
||||
sr: u32,
|
||||
cr: u32,
|
||||
flags: u32,
|
||||
es: u16,
|
||||
ds: u16,
|
||||
err: u64,
|
||||
rip: u64,
|
||||
cs: u64,
|
||||
rflags: u64,
|
||||
rsp: u64,
|
||||
ss: u64,
|
||||
len: u64,
|
||||
fpformat: u64,
|
||||
ownedfp: u64,
|
||||
fpstate: [64]u64 align(16),
|
||||
fsbase: u64,
|
||||
gsbase: u64,
|
||||
xfpustate: u64,
|
||||
xfpustate_len: u64,
|
||||
spare: [4]u64,
|
||||
};
|
||||
_pad: u32,
|
||||
},
|
||||
flags: u32,
|
||||
_pad: u32,
|
||||
_spare: [8]u64,
|
||||
},
|
||||
else => struct {},
|
||||
};
|
||||
|
||||
pub const REG = switch (builtin.cpu.arch) {
|
||||
.aarch64 => struct {
|
||||
pub const FP = 29;
|
||||
pub const SP = 31;
|
||||
pub const PC = 32;
|
||||
},
|
||||
.arm => struct {
|
||||
pub const FP = 11;
|
||||
pub const SP = 13;
|
||||
pub const PC = 15;
|
||||
},
|
||||
.x86_64 => struct {
|
||||
pub const RBP = 12;
|
||||
pub const RIP = 21;
|
||||
pub const RSP = 24;
|
||||
},
|
||||
else => struct {},
|
||||
};
|
||||
|
||||
pub const ucontext_t = extern struct {
|
||||
sigmask: sigset_t,
|
||||
mcontext: mcontext_t,
|
||||
link: ?*ucontext_t,
|
||||
stack: stack_t,
|
||||
flags: c_int,
|
||||
__spare__: [4]c_int,
|
||||
};
|
||||
|
||||
pub const E = enum(u16) {
|
||||
/// No error occurred.
|
||||
SUCCESS = 0,
|
||||
|
||||
@ -4,6 +4,36 @@ pub const deflate = @import("compress/deflate.zig");
|
||||
pub const gzip = @import("compress/gzip.zig");
|
||||
pub const zlib = @import("compress/zlib.zig");
|
||||
|
||||
pub fn HashedReader(
|
||||
comptime ReaderType: anytype,
|
||||
comptime HasherType: anytype,
|
||||
) type {
|
||||
return struct {
|
||||
child_reader: ReaderType,
|
||||
hasher: HasherType,
|
||||
|
||||
pub const Error = ReaderType.Error;
|
||||
pub const Reader = std.io.Reader(*@This(), Error, read);
|
||||
|
||||
pub fn read(self: *@This(), buf: []u8) Error!usize {
|
||||
const amt = try self.child_reader.read(buf);
|
||||
self.hasher.update(buf);
|
||||
return amt;
|
||||
}
|
||||
|
||||
pub fn reader(self: *@This()) Reader {
|
||||
return .{ .context = self };
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub fn hashedReader(
|
||||
reader: anytype,
|
||||
hasher: anytype,
|
||||
) HashedReader(@TypeOf(reader), @TypeOf(hasher)) {
|
||||
return .{ .child_reader = reader, .hasher = hasher };
|
||||
}
|
||||
|
||||
test {
|
||||
_ = deflate;
|
||||
_ = gzip;
|
||||
|
||||
@ -44,8 +44,11 @@ pub fn GzipStream(comptime ReaderType: type) type {
|
||||
},
|
||||
|
||||
fn init(allocator: mem.Allocator, source: ReaderType) !Self {
|
||||
var hasher = std.compress.hashedReader(source, std.hash.Crc32.init());
|
||||
const hashed_reader = hasher.reader();
|
||||
|
||||
// gzip header format is specified in RFC1952
|
||||
const header = try source.readBytesNoEof(10);
|
||||
const header = try hashed_reader.readBytesNoEof(10);
|
||||
|
||||
// Check the ID1/ID2 fields
|
||||
if (header[0] != 0x1f or header[1] != 0x8b)
|
||||
@ -66,31 +69,31 @@ pub fn GzipStream(comptime ReaderType: type) type {
|
||||
_ = XFL;
|
||||
|
||||
const extra = if (FLG & FEXTRA != 0) blk: {
|
||||
const len = try source.readIntLittle(u16);
|
||||
const len = try hashed_reader.readIntLittle(u16);
|
||||
const tmp_buf = try allocator.alloc(u8, len);
|
||||
errdefer allocator.free(tmp_buf);
|
||||
|
||||
try source.readNoEof(tmp_buf);
|
||||
try hashed_reader.readNoEof(tmp_buf);
|
||||
break :blk tmp_buf;
|
||||
} else null;
|
||||
errdefer if (extra) |p| allocator.free(p);
|
||||
|
||||
const filename = if (FLG & FNAME != 0)
|
||||
try source.readUntilDelimiterAlloc(allocator, 0, max_string_len)
|
||||
try hashed_reader.readUntilDelimiterAlloc(allocator, 0, max_string_len)
|
||||
else
|
||||
null;
|
||||
errdefer if (filename) |p| allocator.free(p);
|
||||
|
||||
const comment = if (FLG & FCOMMENT != 0)
|
||||
try source.readUntilDelimiterAlloc(allocator, 0, max_string_len)
|
||||
try hashed_reader.readUntilDelimiterAlloc(allocator, 0, max_string_len)
|
||||
else
|
||||
null;
|
||||
errdefer if (comment) |p| allocator.free(p);
|
||||
|
||||
if (FLG & FHCRC != 0) {
|
||||
// TODO: Evaluate and check the header checksum. The stdlib has
|
||||
// no CRC16 yet :(
|
||||
_ = try source.readIntLittle(u16);
|
||||
const hash = try source.readIntLittle(u16);
|
||||
if (hash != @truncate(u16, hasher.hasher.final()))
|
||||
return error.WrongChecksum;
|
||||
}
|
||||
|
||||
return Self{
|
||||
@ -230,3 +233,16 @@ test "sanity checks" {
|
||||
}, ""),
|
||||
);
|
||||
}
|
||||
|
||||
test "header checksum" {
|
||||
try testReader(&[_]u8{
|
||||
// GZIP header
|
||||
0x1f, 0x8b, 0x08, 0x12, 0x00, 0x09, 0x6e, 0x88, 0x00, 0xff, 0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x00,
|
||||
|
||||
// header.FHCRC (should cover entire header)
|
||||
0x99, 0xd6,
|
||||
|
||||
// GZIP data
|
||||
0x01, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
}, "");
|
||||
}
|
||||
|
||||
@ -1986,12 +1986,14 @@ fn handleSegfaultPosix(sig: i32, info: *const os.siginfo_t, ctx_ptr: ?*const any
|
||||
const ip = switch (native_os) {
|
||||
.macos => @intCast(usize, ctx.mcontext.ss.pc),
|
||||
.netbsd => @intCast(usize, ctx.mcontext.gregs[os.REG.PC]),
|
||||
.freebsd => @intCast(usize, ctx.mcontext.gpregs.elr),
|
||||
else => @intCast(usize, ctx.mcontext.pc),
|
||||
};
|
||||
// x29 is the ABI-designated frame pointer
|
||||
const bp = switch (native_os) {
|
||||
.macos => @intCast(usize, ctx.mcontext.ss.fp),
|
||||
.netbsd => @intCast(usize, ctx.mcontext.gregs[os.REG.FP]),
|
||||
.freebsd => @intCast(usize, ctx.mcontext.gpregs.x[os.REG.FP]),
|
||||
else => @intCast(usize, ctx.mcontext.regs[29]),
|
||||
};
|
||||
dumpStackTraceFromBase(bp, ip);
|
||||
|
||||
@ -2011,6 +2011,7 @@ pub const UNWIND_PERSONALITY_MASK: u32 = 0x30000000;
|
||||
// x86_64
|
||||
pub const UNWIND_X86_64_MODE_MASK: u32 = 0x0F000000;
|
||||
pub const UNWIND_X86_64_MODE = enum(u4) {
|
||||
OLD = 0,
|
||||
RBP_FRAME = 1,
|
||||
STACK_IMMD = 2,
|
||||
STACK_IND = 3,
|
||||
@ -2039,6 +2040,7 @@ pub const UNWIND_X86_64_REG = enum(u3) {
|
||||
// arm64
|
||||
pub const UNWIND_ARM64_MODE_MASK: u32 = 0x0F000000;
|
||||
pub const UNWIND_ARM64_MODE = enum(u4) {
|
||||
OLD = 0,
|
||||
FRAMELESS = 2,
|
||||
DWARF = 3,
|
||||
FRAME = 4,
|
||||
|
||||
57
src/Sema.zig
57
src/Sema.zig
@ -3948,6 +3948,7 @@ fn validateArrayInitTy(
|
||||
return;
|
||||
},
|
||||
.Struct => if (ty.isTuple()) {
|
||||
_ = try sema.resolveTypeFields(ty);
|
||||
const array_len = ty.arrayLen();
|
||||
if (extra.init_count > array_len) {
|
||||
return sema.fail(block, src, "expected at most {d} tuple fields; found {d}", .{
|
||||
@ -4642,11 +4643,11 @@ fn failWithBadMemberAccess(
|
||||
.Enum => "enum",
|
||||
else => unreachable,
|
||||
};
|
||||
if (sema.mod.declIsRoot(agg_ty.getOwnerDecl())) {
|
||||
if (agg_ty.getOwnerDeclOrNull()) |some| if (sema.mod.declIsRoot(some)) {
|
||||
return sema.fail(block, field_src, "root struct of file '{}' has no member named '{s}'", .{
|
||||
agg_ty.fmt(sema.mod), field_name,
|
||||
});
|
||||
}
|
||||
};
|
||||
const msg = msg: {
|
||||
const msg = try sema.errMsg(block, field_src, "{s} '{}' has no member named '{s}'", .{
|
||||
kw_name, agg_ty.fmt(sema.mod), field_name,
|
||||
@ -7514,7 +7515,7 @@ fn resolveGenericInstantiationType(
|
||||
}
|
||||
|
||||
fn resolveTupleLazyValues(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!void {
|
||||
if (!ty.isSimpleTuple()) return;
|
||||
if (!ty.isSimpleTupleOrAnonStruct()) return;
|
||||
const tuple = ty.tupleFields();
|
||||
for (tuple.values) |field_val, i| {
|
||||
try sema.resolveTupleLazyValues(block, src, tuple.types[i]);
|
||||
@ -11771,8 +11772,8 @@ fn zirShl(
|
||||
// TODO coerce rhs if air_tag is not shl_sat
|
||||
const rhs_is_comptime_int = try sema.checkIntType(block, rhs_src, scalar_rhs_ty);
|
||||
|
||||
const maybe_lhs_val = try sema.resolveMaybeUndefVal(lhs);
|
||||
const maybe_rhs_val = try sema.resolveMaybeUndefVal(rhs);
|
||||
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(lhs);
|
||||
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(rhs);
|
||||
|
||||
if (maybe_rhs_val) |rhs_val| {
|
||||
if (rhs_val.isUndef()) {
|
||||
@ -11842,7 +11843,7 @@ fn zirShl(
|
||||
if (scalar_ty.zigTypeTag() == .ComptimeInt) {
|
||||
break :val shifted.wrapped_result;
|
||||
}
|
||||
if (shifted.overflow_bit.compareAllWithZero(.eq)) {
|
||||
if (shifted.overflow_bit.compareAllWithZero(.eq, sema.mod)) {
|
||||
break :val shifted.wrapped_result;
|
||||
}
|
||||
return sema.fail(block, src, "operation caused overflow", .{});
|
||||
@ -11959,8 +11960,8 @@ fn zirShr(
|
||||
const target = sema.mod.getTarget();
|
||||
const scalar_ty = lhs_ty.scalarType();
|
||||
|
||||
const maybe_lhs_val = try sema.resolveMaybeUndefVal(lhs);
|
||||
const maybe_rhs_val = try sema.resolveMaybeUndefVal(rhs);
|
||||
const maybe_lhs_val = try sema.resolveMaybeUndefValIntable(lhs);
|
||||
const maybe_rhs_val = try sema.resolveMaybeUndefValIntable(rhs);
|
||||
|
||||
const runtime_src = if (maybe_rhs_val) |rhs_val| rs: {
|
||||
if (rhs_val.isUndef()) {
|
||||
@ -12799,7 +12800,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
|
||||
const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison();
|
||||
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison();
|
||||
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
|
||||
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty, .div);
|
||||
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty);
|
||||
|
||||
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
|
||||
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{
|
||||
@ -12831,7 +12832,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
|
||||
const lhs_val = maybe_lhs_val orelse unreachable;
|
||||
const rhs_val = maybe_rhs_val orelse unreachable;
|
||||
const rem = lhs_val.floatRem(rhs_val, resolved_type, sema.arena, mod) catch unreachable;
|
||||
if (!rem.compareAllWithZero(.eq)) {
|
||||
if (!rem.compareAllWithZero(.eq, mod)) {
|
||||
return sema.fail(block, src, "ambiguous coercion of division operands '{s}' and '{s}'; non-zero remainder '{}'", .{
|
||||
@tagName(lhs_ty.tag()), @tagName(rhs_ty.tag()), rem.fmtValue(resolved_type, sema.mod),
|
||||
});
|
||||
@ -12959,7 +12960,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
|
||||
const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison();
|
||||
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison();
|
||||
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
|
||||
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty, .div_exact);
|
||||
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty);
|
||||
|
||||
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
|
||||
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{
|
||||
@ -13024,7 +13025,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
|
||||
if (maybe_rhs_val) |rhs_val| {
|
||||
if (is_int) {
|
||||
const modulus_val = try lhs_val.intMod(rhs_val, resolved_type, sema.arena, mod);
|
||||
if (!(modulus_val.compareAllWithZero(.eq))) {
|
||||
if (!(modulus_val.compareAllWithZero(.eq, mod))) {
|
||||
return sema.fail(block, src, "exact division produced remainder", .{});
|
||||
}
|
||||
const res = try lhs_val.intDiv(rhs_val, resolved_type, sema.arena, mod);
|
||||
@ -13035,7 +13036,7 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
|
||||
return sema.addConstant(resolved_type, res);
|
||||
} else {
|
||||
const modulus_val = try lhs_val.floatMod(rhs_val, resolved_type, sema.arena, mod);
|
||||
if (!(modulus_val.compareAllWithZero(.eq))) {
|
||||
if (!(modulus_val.compareAllWithZero(.eq, mod))) {
|
||||
return sema.fail(block, src, "exact division produced remainder", .{});
|
||||
}
|
||||
return sema.addConstant(
|
||||
@ -13122,7 +13123,7 @@ fn zirDivFloor(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
|
||||
const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison();
|
||||
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison();
|
||||
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
|
||||
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty, .div_floor);
|
||||
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty);
|
||||
|
||||
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
|
||||
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{
|
||||
@ -13238,7 +13239,7 @@ fn zirDivTrunc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
|
||||
const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison();
|
||||
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison();
|
||||
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
|
||||
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty, .div_trunc);
|
||||
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty);
|
||||
|
||||
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
|
||||
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{
|
||||
@ -13481,7 +13482,7 @@ fn zirModRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
|
||||
const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison();
|
||||
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison();
|
||||
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
|
||||
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty, .mod_rem);
|
||||
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty);
|
||||
|
||||
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
|
||||
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{
|
||||
@ -13664,7 +13665,7 @@ fn zirMod(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
|
||||
const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison();
|
||||
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison();
|
||||
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
|
||||
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty, .mod);
|
||||
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty);
|
||||
|
||||
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
|
||||
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{
|
||||
@ -13766,7 +13767,7 @@ fn zirRem(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
|
||||
const lhs_zig_ty_tag = try lhs_ty.zigTypeTagOrPoison();
|
||||
const rhs_zig_ty_tag = try rhs_ty.zigTypeTagOrPoison();
|
||||
try sema.checkVectorizableBinaryOperands(block, src, lhs_ty, rhs_ty, lhs_src, rhs_src);
|
||||
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty, .rem);
|
||||
try sema.checkInvalidPtrArithmetic(block, src, lhs_ty);
|
||||
|
||||
const instructions = &[_]Air.Inst.Ref{ lhs, rhs };
|
||||
const resolved_type = try sema.resolvePeerTypes(block, src, instructions, .{
|
||||
@ -14106,12 +14107,7 @@ fn analyzeArithmetic(
|
||||
const air_tag: Air.Inst.Tag = switch (zir_tag) {
|
||||
.add => .ptr_add,
|
||||
.sub => .ptr_sub,
|
||||
else => return sema.fail(
|
||||
block,
|
||||
src,
|
||||
"invalid pointer arithmetic operand: '{s}''",
|
||||
.{@tagName(zir_tag)},
|
||||
),
|
||||
else => return sema.fail(block, src, "invalid pointer arithmetic operator", .{}),
|
||||
};
|
||||
return sema.analyzePtrArithmetic(block, src, lhs, rhs, air_tag, lhs_src, rhs_src);
|
||||
},
|
||||
@ -19697,7 +19693,7 @@ fn zirTruncate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
|
||||
}
|
||||
}
|
||||
|
||||
if (try sema.resolveMaybeUndefVal(operand)) |val| {
|
||||
if (try sema.resolveMaybeUndefValIntable(operand)) |val| {
|
||||
if (val.isUndef()) return sema.addConstUndef(dest_ty);
|
||||
if (!is_vector) {
|
||||
return sema.addConstant(
|
||||
@ -19901,7 +19897,7 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
|
||||
const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
||||
const operand = try sema.resolveInst(inst_data.operand);
|
||||
const operand_ty = sema.typeOf(operand);
|
||||
_ = try sema.checkIntOrVectorAllowComptime(block, operand_ty, operand_src);
|
||||
const scalar_ty = try sema.checkIntOrVector(block, operand, operand_src);
|
||||
|
||||
if (try sema.typeHasOnePossibleValue(operand_ty)) |val| {
|
||||
return sema.addConstant(operand_ty, val);
|
||||
@ -19909,7 +19905,7 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
|
||||
|
||||
const target = sema.mod.getTarget();
|
||||
switch (operand_ty.zigTypeTag()) {
|
||||
.Int, .ComptimeInt => {
|
||||
.Int => {
|
||||
const runtime_src = if (try sema.resolveMaybeUndefVal(operand)) |val| {
|
||||
if (val.isUndef()) return sema.addConstUndef(operand_ty);
|
||||
const result_val = try val.bitReverse(operand_ty, target, sema.arena);
|
||||
@ -19929,7 +19925,7 @@ fn zirBitReverse(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
|
||||
const elems = try sema.arena.alloc(Value, vec_len);
|
||||
for (elems) |*elem, i| {
|
||||
const elem_val = val.elemValueBuffer(sema.mod, i, &elem_buf);
|
||||
elem.* = try elem_val.bitReverse(operand_ty, target, sema.arena);
|
||||
elem.* = try elem_val.bitReverse(scalar_ty, target, sema.arena);
|
||||
}
|
||||
return sema.addConstant(
|
||||
operand_ty,
|
||||
@ -20028,7 +20024,6 @@ fn checkInvalidPtrArithmetic(
|
||||
block: *Block,
|
||||
src: LazySrcLoc,
|
||||
ty: Type,
|
||||
zir_tag: Zir.Inst.Tag,
|
||||
) CompileError!void {
|
||||
switch (try ty.zigTypeTagOrPoison()) {
|
||||
.Pointer => switch (ty.ptrSize()) {
|
||||
@ -20036,8 +20031,8 @@ fn checkInvalidPtrArithmetic(
|
||||
.Many, .C => return sema.fail(
|
||||
block,
|
||||
src,
|
||||
"invalid pointer arithmetic operand: '{s}''",
|
||||
.{@tagName(zir_tag)},
|
||||
"invalid pointer arithmetic operator",
|
||||
.{},
|
||||
),
|
||||
},
|
||||
else => return,
|
||||
|
||||
@ -77,11 +77,11 @@ end_di_column: u32,
|
||||
/// which is a relative jump, based on the address following the reloc.
|
||||
exitlude_jump_relocs: std.ArrayListUnmanaged(usize) = .{},
|
||||
|
||||
/// For every argument, we postpone the creation of debug info for
|
||||
/// later after all Mir instructions have been generated. Only then we
|
||||
/// We postpone the creation of debug info for function args and locals
|
||||
/// until after all Mir instructions have been generated. Only then we
|
||||
/// will know saved_regs_stack_space which is necessary in order to
|
||||
/// address parameters passed on the stack.
|
||||
dbg_arg_relocs: std.ArrayListUnmanaged(DbgArgReloc) = .{},
|
||||
/// calculate the right stack offsest with respect to the `.fp` register.
|
||||
dbg_info_relocs: std.ArrayListUnmanaged(DbgInfoReloc) = .{},
|
||||
|
||||
/// Whenever there is a runtime branch, we push a Branch onto this stack,
|
||||
/// and pop it off when the runtime branch joins. This provides an "overlay"
|
||||
@ -243,9 +243,107 @@ const BigTomb = struct {
|
||||
}
|
||||
};
|
||||
|
||||
const DbgArgReloc = struct {
|
||||
inst: Air.Inst.Index,
|
||||
index: u32,
|
||||
const DbgInfoReloc = struct {
|
||||
tag: Air.Inst.Tag,
|
||||
ty: Type,
|
||||
name: [:0]const u8,
|
||||
mcv: MCValue,
|
||||
|
||||
fn genDbgInfo(reloc: DbgInfoReloc, function: Self) !void {
|
||||
switch (reloc.tag) {
|
||||
.arg => try reloc.genArgDbgInfo(function),
|
||||
|
||||
.dbg_var_ptr,
|
||||
.dbg_var_val,
|
||||
=> try reloc.genVarDbgInfo(function),
|
||||
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
fn genArgDbgInfo(reloc: DbgInfoReloc, function: Self) error{OutOfMemory}!void {
|
||||
switch (function.debug_output) {
|
||||
.dwarf => |dw| {
|
||||
const loc: link.File.Dwarf.DeclState.DbgInfoLoc = switch (reloc.mcv) {
|
||||
.register => |reg| .{ .register = reg.dwarfLocOp() },
|
||||
.stack_offset,
|
||||
.stack_argument_offset,
|
||||
=> blk: {
|
||||
const adjusted_stack_offset = switch (reloc.mcv) {
|
||||
.stack_offset => |offset| -@intCast(i32, offset),
|
||||
.stack_argument_offset => |offset| @intCast(i32, function.saved_regs_stack_space + offset),
|
||||
else => unreachable,
|
||||
};
|
||||
break :blk .{ .stack = .{
|
||||
.fp_register = DW.OP.breg11,
|
||||
.offset = adjusted_stack_offset,
|
||||
} };
|
||||
},
|
||||
else => unreachable, // not a possible argument
|
||||
};
|
||||
|
||||
try dw.genArgDbgInfo(
|
||||
reloc.name,
|
||||
reloc.ty,
|
||||
function.bin_file.tag,
|
||||
function.mod_fn.owner_decl,
|
||||
loc,
|
||||
);
|
||||
},
|
||||
.plan9 => {},
|
||||
.none => {},
|
||||
}
|
||||
}
|
||||
|
||||
fn genVarDbgInfo(reloc: DbgInfoReloc, function: Self) !void {
|
||||
const is_ptr = switch (reloc.tag) {
|
||||
.dbg_var_ptr => true,
|
||||
.dbg_var_val => false,
|
||||
else => unreachable,
|
||||
};
|
||||
|
||||
switch (function.debug_output) {
|
||||
.dwarf => |dw| {
|
||||
const loc: link.File.Dwarf.DeclState.DbgInfoLoc = switch (reloc.mcv) {
|
||||
.register => |reg| .{ .register = reg.dwarfLocOp() },
|
||||
.ptr_stack_offset,
|
||||
.stack_offset,
|
||||
.stack_argument_offset,
|
||||
=> |offset| blk: {
|
||||
const adjusted_offset = switch (reloc.mcv) {
|
||||
.ptr_stack_offset,
|
||||
.stack_offset,
|
||||
=> -@intCast(i32, offset),
|
||||
.stack_argument_offset => @intCast(i32, function.saved_regs_stack_space + offset),
|
||||
else => unreachable,
|
||||
};
|
||||
break :blk .{ .stack = .{
|
||||
.fp_register = DW.OP.breg11,
|
||||
.offset = adjusted_offset,
|
||||
} };
|
||||
},
|
||||
.memory => |address| .{ .memory = address },
|
||||
.immediate => |x| .{ .immediate = x },
|
||||
.undef => .undef,
|
||||
.none => .none,
|
||||
else => blk: {
|
||||
log.debug("TODO generate debug info for {}", .{reloc.mcv});
|
||||
break :blk .nop;
|
||||
},
|
||||
};
|
||||
try dw.genVarDbgInfo(
|
||||
reloc.name,
|
||||
reloc.ty,
|
||||
function.bin_file.tag,
|
||||
function.mod_fn.owner_decl,
|
||||
is_ptr,
|
||||
loc,
|
||||
);
|
||||
},
|
||||
.plan9 => {},
|
||||
.none => {},
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const Self = @This();
|
||||
@ -298,7 +396,7 @@ pub fn generate(
|
||||
defer function.stack.deinit(bin_file.allocator);
|
||||
defer function.blocks.deinit(bin_file.allocator);
|
||||
defer function.exitlude_jump_relocs.deinit(bin_file.allocator);
|
||||
defer function.dbg_arg_relocs.deinit(bin_file.allocator);
|
||||
defer function.dbg_info_relocs.deinit(bin_file.allocator);
|
||||
|
||||
var call_info = function.resolveCallingConventionValues(fn_type) catch |err| switch (err) {
|
||||
error.CodegenFail => return FnResult{ .fail = function.err_msg.? },
|
||||
@ -322,8 +420,8 @@ pub fn generate(
|
||||
else => |e| return e,
|
||||
};
|
||||
|
||||
for (function.dbg_arg_relocs.items) |reloc| {
|
||||
try function.genArgDbgInfo(reloc.inst, reloc.index);
|
||||
for (function.dbg_info_relocs.items) |reloc| {
|
||||
try reloc.genDbgInfo(function);
|
||||
}
|
||||
|
||||
var mir = Mir{
|
||||
@ -896,9 +994,6 @@ fn allocMem(
|
||||
assert(abi_size > 0);
|
||||
assert(abi_align > 0);
|
||||
|
||||
if (abi_align > self.stack_align)
|
||||
self.stack_align = abi_align;
|
||||
|
||||
// TODO find a free slot instead of always appending
|
||||
const offset = mem.alignForwardGeneric(u32, self.next_stack_offset, abi_align) + abi_size;
|
||||
self.next_stack_offset = offset;
|
||||
@ -4035,46 +4130,20 @@ fn genInlineMemsetCode(
|
||||
// end:
|
||||
}
|
||||
|
||||
fn genArgDbgInfo(self: Self, inst: Air.Inst.Index, arg_index: u32) error{OutOfMemory}!void {
|
||||
const mcv = self.args[arg_index];
|
||||
const arg = self.air.instructions.items(.data)[inst].arg;
|
||||
const ty = self.air.getRefType(arg.ty);
|
||||
const name = self.mod_fn.getParamName(self.bin_file.options.module.?, arg.src_index);
|
||||
|
||||
switch (self.debug_output) {
|
||||
.dwarf => |dw| {
|
||||
const loc: link.File.Dwarf.DeclState.DbgInfoLoc = switch (mcv) {
|
||||
.register => |reg| .{ .register = reg.dwarfLocOp() },
|
||||
.stack_offset,
|
||||
.stack_argument_offset,
|
||||
=> blk: {
|
||||
const adjusted_stack_offset = switch (mcv) {
|
||||
.stack_offset => |offset| -@intCast(i32, offset),
|
||||
.stack_argument_offset => |offset| @intCast(i32, self.saved_regs_stack_space + offset),
|
||||
else => unreachable,
|
||||
};
|
||||
break :blk .{ .stack = .{
|
||||
.fp_register = DW.OP.breg11,
|
||||
.offset = adjusted_stack_offset,
|
||||
} };
|
||||
},
|
||||
else => unreachable, // not a possible argument
|
||||
|
||||
};
|
||||
try dw.genArgDbgInfo(name, ty, self.bin_file.tag, self.mod_fn.owner_decl, loc);
|
||||
},
|
||||
.plan9 => {},
|
||||
.none => {},
|
||||
}
|
||||
}
|
||||
|
||||
fn airArg(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const arg_index = self.arg_index;
|
||||
self.arg_index += 1;
|
||||
|
||||
try self.dbg_arg_relocs.append(self.gpa, .{
|
||||
.inst = inst,
|
||||
.index = arg_index,
|
||||
const ty = self.air.typeOfIndex(inst);
|
||||
const tag = self.air.instructions.items(.tag)[inst];
|
||||
const src_index = self.air.instructions.items(.data)[inst].arg.src_index;
|
||||
const name = self.mod_fn.getParamName(self.bin_file.options.module.?, src_index);
|
||||
|
||||
try self.dbg_info_relocs.append(self.gpa, .{
|
||||
.tag = tag,
|
||||
.ty = ty,
|
||||
.name = name,
|
||||
.mcv = self.args[arg_index],
|
||||
});
|
||||
|
||||
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else self.args[arg_index];
|
||||
@ -4485,10 +4554,21 @@ fn airDbgBlock(self: *Self, inst: Air.Inst.Index) !void {
|
||||
|
||||
fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
|
||||
const name = self.air.nullTerminatedString(pl_op.payload);
|
||||
const operand = pl_op.operand;
|
||||
// TODO emit debug info for this variable
|
||||
_ = name;
|
||||
const tag = self.air.instructions.items(.tag)[inst];
|
||||
const ty = self.air.typeOf(operand);
|
||||
const mcv = try self.resolveInst(operand);
|
||||
const name = self.air.nullTerminatedString(pl_op.payload);
|
||||
|
||||
log.debug("airDbgVar: %{d}: {}, {}", .{ inst, ty.fmtDebug(), mcv });
|
||||
|
||||
try self.dbg_info_relocs.append(self.gpa, .{
|
||||
.tag = tag,
|
||||
.ty = ty,
|
||||
.name = name,
|
||||
.mcv = mcv,
|
||||
});
|
||||
|
||||
return self.finishAir(inst, .dead, .{ operand, .none, .none });
|
||||
}
|
||||
|
||||
|
||||
@ -10396,12 +10396,7 @@ fn firstParamSRet(fn_info: Type.Payload.Function.Data, target: std.Target) bool
|
||||
.mips, .mipsel => return false,
|
||||
.x86_64 => switch (target.os.tag) {
|
||||
.windows => return x86_64_abi.classifyWindows(fn_info.return_type, target) == .memory,
|
||||
else => {
|
||||
const class = x86_64_abi.classifySystemV(fn_info.return_type, target, .ret);
|
||||
if (class[0] == .memory) return true;
|
||||
if (class[0] == .x87 and class[2] != .none) return true;
|
||||
return false;
|
||||
},
|
||||
else => return firstParamSRetSystemV(fn_info.return_type, target),
|
||||
},
|
||||
.wasm32 => return wasm_c_abi.classifyType(fn_info.return_type, target)[0] == .indirect,
|
||||
.aarch64, .aarch64_be => return aarch64_c_abi.classifyType(fn_info.return_type, target) == .memory,
|
||||
@ -10413,11 +10408,20 @@ fn firstParamSRet(fn_info: Type.Payload.Function.Data, target: std.Target) bool
|
||||
.riscv32, .riscv64 => return riscv_c_abi.classifyType(fn_info.return_type, target) == .memory,
|
||||
else => return false, // TODO investigate C ABI for other architectures
|
||||
},
|
||||
.SysV => return firstParamSRetSystemV(fn_info.return_type, target),
|
||||
.Win64 => return x86_64_abi.classifyWindows(fn_info.return_type, target) == .memory,
|
||||
.Stdcall => return !isScalar(fn_info.return_type),
|
||||
else => return false,
|
||||
}
|
||||
}
|
||||
|
||||
fn firstParamSRetSystemV(ty: Type, target: std.Target) bool {
|
||||
const class = x86_64_abi.classifySystemV(ty, target, .ret);
|
||||
if (class[0] == .memory) return true;
|
||||
if (class[0] == .x87 and class[2] != .none) return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
/// In order to support the C calling convention, some return types need to be lowered
|
||||
/// completely differently in the function prototype to honor the C ABI, and then
|
||||
/// be effectively bitcasted to the actual return type.
|
||||
@ -10442,77 +10446,14 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type {
|
||||
}
|
||||
},
|
||||
.C => {
|
||||
const is_scalar = isScalar(fn_info.return_type);
|
||||
switch (target.cpu.arch) {
|
||||
.mips, .mipsel => return dg.lowerType(fn_info.return_type),
|
||||
.x86_64 => switch (target.os.tag) {
|
||||
.windows => switch (x86_64_abi.classifyWindows(fn_info.return_type, target)) {
|
||||
.integer => {
|
||||
if (is_scalar) {
|
||||
return dg.lowerType(fn_info.return_type);
|
||||
} else {
|
||||
const abi_size = fn_info.return_type.abiSize(target);
|
||||
return dg.context.intType(@intCast(c_uint, abi_size * 8));
|
||||
}
|
||||
},
|
||||
.win_i128 => return dg.context.intType(64).vectorType(2),
|
||||
.memory => return dg.context.voidType(),
|
||||
.sse => return dg.lowerType(fn_info.return_type),
|
||||
else => unreachable,
|
||||
},
|
||||
else => {
|
||||
if (is_scalar) {
|
||||
return dg.lowerType(fn_info.return_type);
|
||||
}
|
||||
const classes = x86_64_abi.classifySystemV(fn_info.return_type, target, .ret);
|
||||
if (classes[0] == .memory) {
|
||||
return dg.context.voidType();
|
||||
}
|
||||
var llvm_types_buffer: [8]*llvm.Type = undefined;
|
||||
var llvm_types_index: u32 = 0;
|
||||
for (classes) |class| {
|
||||
switch (class) {
|
||||
.integer => {
|
||||
llvm_types_buffer[llvm_types_index] = dg.context.intType(64);
|
||||
llvm_types_index += 1;
|
||||
},
|
||||
.sse, .sseup => {
|
||||
llvm_types_buffer[llvm_types_index] = dg.context.doubleType();
|
||||
llvm_types_index += 1;
|
||||
},
|
||||
.float => {
|
||||
llvm_types_buffer[llvm_types_index] = dg.context.floatType();
|
||||
llvm_types_index += 1;
|
||||
},
|
||||
.float_combine => {
|
||||
llvm_types_buffer[llvm_types_index] = dg.context.floatType().vectorType(2);
|
||||
llvm_types_index += 1;
|
||||
},
|
||||
.x87 => {
|
||||
if (llvm_types_index != 0 or classes[2] != .none) {
|
||||
return dg.context.voidType();
|
||||
}
|
||||
llvm_types_buffer[llvm_types_index] = dg.context.x86FP80Type();
|
||||
llvm_types_index += 1;
|
||||
},
|
||||
.x87up => continue,
|
||||
.complex_x87 => {
|
||||
@panic("TODO");
|
||||
},
|
||||
.memory => unreachable, // handled above
|
||||
.win_i128 => unreachable, // windows only
|
||||
.none => break,
|
||||
}
|
||||
}
|
||||
if (classes[0] == .integer and classes[1] == .none) {
|
||||
const abi_size = fn_info.return_type.abiSize(target);
|
||||
return dg.context.intType(@intCast(c_uint, abi_size * 8));
|
||||
}
|
||||
return dg.context.structType(&llvm_types_buffer, llvm_types_index, .False);
|
||||
},
|
||||
.windows => return lowerWin64FnRetTy(dg, fn_info),
|
||||
else => return lowerSystemVFnRetTy(dg, fn_info),
|
||||
},
|
||||
.wasm32 => {
|
||||
if (is_scalar) {
|
||||
if (isScalar(fn_info.return_type)) {
|
||||
return dg.lowerType(fn_info.return_type);
|
||||
}
|
||||
const classes = wasm_c_abi.classifyType(fn_info.return_type, target);
|
||||
@ -10569,6 +10510,8 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type {
|
||||
else => return dg.lowerType(fn_info.return_type),
|
||||
}
|
||||
},
|
||||
.Win64 => return lowerWin64FnRetTy(dg, fn_info),
|
||||
.SysV => return lowerSystemVFnRetTy(dg, fn_info),
|
||||
.Stdcall => {
|
||||
if (isScalar(fn_info.return_type)) {
|
||||
return dg.lowerType(fn_info.return_type);
|
||||
@ -10580,6 +10523,76 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type {
|
||||
}
|
||||
}
|
||||
|
||||
fn lowerWin64FnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type {
|
||||
const target = dg.module.getTarget();
|
||||
switch (x86_64_abi.classifyWindows(fn_info.return_type, target)) {
|
||||
.integer => {
|
||||
if (isScalar(fn_info.return_type)) {
|
||||
return dg.lowerType(fn_info.return_type);
|
||||
} else {
|
||||
const abi_size = fn_info.return_type.abiSize(target);
|
||||
return dg.context.intType(@intCast(c_uint, abi_size * 8));
|
||||
}
|
||||
},
|
||||
.win_i128 => return dg.context.intType(64).vectorType(2),
|
||||
.memory => return dg.context.voidType(),
|
||||
.sse => return dg.lowerType(fn_info.return_type),
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
fn lowerSystemVFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*llvm.Type {
|
||||
if (isScalar(fn_info.return_type)) {
|
||||
return dg.lowerType(fn_info.return_type);
|
||||
}
|
||||
const target = dg.module.getTarget();
|
||||
const classes = x86_64_abi.classifySystemV(fn_info.return_type, target, .ret);
|
||||
if (classes[0] == .memory) {
|
||||
return dg.context.voidType();
|
||||
}
|
||||
var llvm_types_buffer: [8]*llvm.Type = undefined;
|
||||
var llvm_types_index: u32 = 0;
|
||||
for (classes) |class| {
|
||||
switch (class) {
|
||||
.integer => {
|
||||
llvm_types_buffer[llvm_types_index] = dg.context.intType(64);
|
||||
llvm_types_index += 1;
|
||||
},
|
||||
.sse, .sseup => {
|
||||
llvm_types_buffer[llvm_types_index] = dg.context.doubleType();
|
||||
llvm_types_index += 1;
|
||||
},
|
||||
.float => {
|
||||
llvm_types_buffer[llvm_types_index] = dg.context.floatType();
|
||||
llvm_types_index += 1;
|
||||
},
|
||||
.float_combine => {
|
||||
llvm_types_buffer[llvm_types_index] = dg.context.floatType().vectorType(2);
|
||||
llvm_types_index += 1;
|
||||
},
|
||||
.x87 => {
|
||||
if (llvm_types_index != 0 or classes[2] != .none) {
|
||||
return dg.context.voidType();
|
||||
}
|
||||
llvm_types_buffer[llvm_types_index] = dg.context.x86FP80Type();
|
||||
llvm_types_index += 1;
|
||||
},
|
||||
.x87up => continue,
|
||||
.complex_x87 => {
|
||||
@panic("TODO");
|
||||
},
|
||||
.memory => unreachable, // handled above
|
||||
.win_i128 => unreachable, // windows only
|
||||
.none => break,
|
||||
}
|
||||
}
|
||||
if (classes[0] == .integer and classes[1] == .none) {
|
||||
const abi_size = fn_info.return_type.abiSize(target);
|
||||
return dg.context.intType(@intCast(c_uint, abi_size * 8));
|
||||
}
|
||||
return dg.context.structType(&llvm_types_buffer, llvm_types_index, .False);
|
||||
}
|
||||
|
||||
const ParamTypeIterator = struct {
|
||||
dg: *DeclGen,
|
||||
fn_info: Type.Payload.Function.Data,
|
||||
@ -10629,7 +10642,6 @@ const ParamTypeIterator = struct {
|
||||
it.zig_index += 1;
|
||||
return .no_bits;
|
||||
}
|
||||
const dg = it.dg;
|
||||
switch (it.fn_info.cc) {
|
||||
.Unspecified, .Inline => {
|
||||
it.zig_index += 1;
|
||||
@ -10648,7 +10660,6 @@ const ParamTypeIterator = struct {
|
||||
@panic("TODO implement async function lowering in the LLVM backend");
|
||||
},
|
||||
.C => {
|
||||
const is_scalar = isScalar(ty);
|
||||
switch (it.target.cpu.arch) {
|
||||
.mips, .mipsel => {
|
||||
it.zig_index += 1;
|
||||
@ -10656,99 +10667,13 @@ const ParamTypeIterator = struct {
|
||||
return .byval;
|
||||
},
|
||||
.x86_64 => switch (it.target.os.tag) {
|
||||
.windows => switch (x86_64_abi.classifyWindows(ty, it.target)) {
|
||||
.integer => {
|
||||
if (is_scalar) {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
return .byval;
|
||||
} else {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
return .abi_sized_int;
|
||||
}
|
||||
},
|
||||
.win_i128 => {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
return .byref;
|
||||
},
|
||||
.memory => {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
return .byref_mut;
|
||||
},
|
||||
.sse => {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
return .byval;
|
||||
},
|
||||
else => unreachable,
|
||||
},
|
||||
else => {
|
||||
const classes = x86_64_abi.classifySystemV(ty, it.target, .arg);
|
||||
if (classes[0] == .memory) {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
it.byval_attr = true;
|
||||
return .byref;
|
||||
}
|
||||
if (is_scalar) {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
return .byval;
|
||||
}
|
||||
var llvm_types_buffer: [8]*llvm.Type = undefined;
|
||||
var llvm_types_index: u32 = 0;
|
||||
for (classes) |class| {
|
||||
switch (class) {
|
||||
.integer => {
|
||||
llvm_types_buffer[llvm_types_index] = dg.context.intType(64);
|
||||
llvm_types_index += 1;
|
||||
},
|
||||
.sse, .sseup => {
|
||||
llvm_types_buffer[llvm_types_index] = dg.context.doubleType();
|
||||
llvm_types_index += 1;
|
||||
},
|
||||
.float => {
|
||||
llvm_types_buffer[llvm_types_index] = dg.context.floatType();
|
||||
llvm_types_index += 1;
|
||||
},
|
||||
.float_combine => {
|
||||
llvm_types_buffer[llvm_types_index] = dg.context.floatType().vectorType(2);
|
||||
llvm_types_index += 1;
|
||||
},
|
||||
.x87 => {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
it.byval_attr = true;
|
||||
return .byref;
|
||||
},
|
||||
.x87up => unreachable,
|
||||
.complex_x87 => {
|
||||
@panic("TODO");
|
||||
},
|
||||
.memory => unreachable, // handled above
|
||||
.win_i128 => unreachable, // windows only
|
||||
.none => break,
|
||||
}
|
||||
}
|
||||
if (classes[0] == .integer and classes[1] == .none) {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
return .abi_sized_int;
|
||||
}
|
||||
it.llvm_types_buffer = llvm_types_buffer;
|
||||
it.llvm_types_len = llvm_types_index;
|
||||
it.llvm_index += llvm_types_index;
|
||||
it.zig_index += 1;
|
||||
return .multiple_llvm_types;
|
||||
},
|
||||
.windows => return it.nextWin64(ty),
|
||||
else => return it.nextSystemV(ty),
|
||||
},
|
||||
.wasm32 => {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
if (is_scalar) {
|
||||
if (isScalar(ty)) {
|
||||
return .byval;
|
||||
}
|
||||
const classes = wasm_c_abi.classifyType(ty, it.target);
|
||||
@ -10766,7 +10691,7 @@ const ParamTypeIterator = struct {
|
||||
.byval => return .byval,
|
||||
.integer => {
|
||||
it.llvm_types_len = 1;
|
||||
it.llvm_types_buffer[0] = dg.context.intType(64);
|
||||
it.llvm_types_buffer[0] = it.dg.context.intType(64);
|
||||
return .multiple_llvm_types;
|
||||
},
|
||||
.double_integer => return Lowering{ .i64_array = 2 },
|
||||
@ -10806,6 +10731,8 @@ const ParamTypeIterator = struct {
|
||||
},
|
||||
}
|
||||
},
|
||||
.Win64 => return it.nextWin64(ty),
|
||||
.SysV => return it.nextSystemV(ty),
|
||||
.Stdcall => {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
@ -10824,6 +10751,98 @@ const ParamTypeIterator = struct {
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn nextWin64(it: *ParamTypeIterator, ty: Type) ?Lowering {
|
||||
switch (x86_64_abi.classifyWindows(ty, it.target)) {
|
||||
.integer => {
|
||||
if (isScalar(ty)) {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
return .byval;
|
||||
} else {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
return .abi_sized_int;
|
||||
}
|
||||
},
|
||||
.win_i128 => {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
return .byref;
|
||||
},
|
||||
.memory => {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
return .byref_mut;
|
||||
},
|
||||
.sse => {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
return .byval;
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
fn nextSystemV(it: *ParamTypeIterator, ty: Type) ?Lowering {
|
||||
const classes = x86_64_abi.classifySystemV(ty, it.target, .arg);
|
||||
if (classes[0] == .memory) {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
it.byval_attr = true;
|
||||
return .byref;
|
||||
}
|
||||
if (isScalar(ty)) {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
return .byval;
|
||||
}
|
||||
var llvm_types_buffer: [8]*llvm.Type = undefined;
|
||||
var llvm_types_index: u32 = 0;
|
||||
for (classes) |class| {
|
||||
switch (class) {
|
||||
.integer => {
|
||||
llvm_types_buffer[llvm_types_index] = it.dg.context.intType(64);
|
||||
llvm_types_index += 1;
|
||||
},
|
||||
.sse, .sseup => {
|
||||
llvm_types_buffer[llvm_types_index] = it.dg.context.doubleType();
|
||||
llvm_types_index += 1;
|
||||
},
|
||||
.float => {
|
||||
llvm_types_buffer[llvm_types_index] = it.dg.context.floatType();
|
||||
llvm_types_index += 1;
|
||||
},
|
||||
.float_combine => {
|
||||
llvm_types_buffer[llvm_types_index] = it.dg.context.floatType().vectorType(2);
|
||||
llvm_types_index += 1;
|
||||
},
|
||||
.x87 => {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
it.byval_attr = true;
|
||||
return .byref;
|
||||
},
|
||||
.x87up => unreachable,
|
||||
.complex_x87 => {
|
||||
@panic("TODO");
|
||||
},
|
||||
.memory => unreachable, // handled above
|
||||
.win_i128 => unreachable, // windows only
|
||||
.none => break,
|
||||
}
|
||||
}
|
||||
if (classes[0] == .integer and classes[1] == .none) {
|
||||
it.zig_index += 1;
|
||||
it.llvm_index += 1;
|
||||
return .abi_sized_int;
|
||||
}
|
||||
it.llvm_types_buffer = llvm_types_buffer;
|
||||
it.llvm_types_len = llvm_types_index;
|
||||
it.llvm_index += llvm_types_index;
|
||||
it.zig_index += 1;
|
||||
return .multiple_llvm_types;
|
||||
}
|
||||
};
|
||||
|
||||
fn iterateParamTypes(dg: *DeclGen, fn_info: Type.Payload.Function.Data) ParamTypeIterator {
|
||||
|
||||
@ -238,12 +238,14 @@ fn handleSegfaultPosix(sig: i32, info: *const os.siginfo_t, ctx_ptr: ?*const any
|
||||
const ip = switch (native_os) {
|
||||
.macos => @intCast(usize, ctx.mcontext.ss.pc),
|
||||
.netbsd => @intCast(usize, ctx.mcontext.gregs[os.REG.PC]),
|
||||
.freebsd => @intCast(usize, ctx.mcontext.gpregs.elr),
|
||||
else => @intCast(usize, ctx.mcontext.pc),
|
||||
};
|
||||
// x29 is the ABI-designated frame pointer
|
||||
const bp = switch (native_os) {
|
||||
.macos => @intCast(usize, ctx.mcontext.ss.fp),
|
||||
.netbsd => @intCast(usize, ctx.mcontext.gregs[os.REG.FP]),
|
||||
.freebsd => @intCast(usize, ctx.mcontext.gpregs.x[os.REG.FP]),
|
||||
else => @intCast(usize, ctx.mcontext.regs[29]),
|
||||
};
|
||||
break :ctx StackContext{ .exception = .{ .bp = bp, .ip = ip } };
|
||||
|
||||
@ -697,6 +697,7 @@ pub const File = struct {
|
||||
/// TODO audit this error set. most of these should be collapsed into one error,
|
||||
/// and ErrorFlags should be updated to convey the meaning to the user.
|
||||
pub const FlushError = error{
|
||||
BadDwarfCfi,
|
||||
CacheUnavailable,
|
||||
CurrentWorkingDirectoryUnlinked,
|
||||
DivisionByZero,
|
||||
@ -737,6 +738,8 @@ pub const File = struct {
|
||||
MissingEndForExpression,
|
||||
/// TODO: this should be removed from the error set in favor of using ErrorFlags
|
||||
MissingMainEntrypoint,
|
||||
/// TODO: this should be removed from the error set in favor of using ErrorFlags
|
||||
MissingSection,
|
||||
MissingSymbol,
|
||||
MissingTableSymbols,
|
||||
ModuleNameMismatch,
|
||||
|
||||
@ -8,6 +8,7 @@ const std = @import("std");
|
||||
const build_options = @import("build_options");
|
||||
const assert = std.debug.assert;
|
||||
const dwarf = std.dwarf;
|
||||
const eh_frame = @import("eh_frame.zig");
|
||||
const fs = std.fs;
|
||||
const io = std.io;
|
||||
const log = std.log.scoped(.link);
|
||||
@ -24,6 +25,7 @@ const DwarfInfo = @import("DwarfInfo.zig");
|
||||
const LoadCommandIterator = macho.LoadCommandIterator;
|
||||
const Zld = @import("zld.zig").Zld;
|
||||
const SymbolWithLoc = @import("zld.zig").SymbolWithLoc;
|
||||
const UnwindInfo = @import("UnwindInfo.zig");
|
||||
|
||||
name: []const u8,
|
||||
mtime: u64,
|
||||
@ -44,6 +46,8 @@ symtab: []macho.nlist_64 = undefined,
|
||||
/// Can be undefined as set together with in_symtab.
|
||||
source_symtab_lookup: []u32 = undefined,
|
||||
/// Can be undefined as set together with in_symtab.
|
||||
reverse_symtab_lookup: []u32 = undefined,
|
||||
/// Can be undefined as set together with in_symtab.
|
||||
source_address_lookup: []i64 = undefined,
|
||||
/// Can be undefined as set together with in_symtab.
|
||||
source_section_index_lookup: []i64 = undefined,
|
||||
@ -53,22 +57,49 @@ strtab_lookup: []u32 = undefined,
|
||||
atom_by_index_table: []AtomIndex = undefined,
|
||||
/// Can be undefined as set together with in_symtab.
|
||||
globals_lookup: []i64 = undefined,
|
||||
/// Can be undefined as set together with in_symtab.
|
||||
relocs_lookup: []RelocEntry = undefined,
|
||||
|
||||
atoms: std.ArrayListUnmanaged(AtomIndex) = .{},
|
||||
exec_atoms: std.ArrayListUnmanaged(AtomIndex) = .{},
|
||||
|
||||
eh_frame_sect: ?macho.section_64 = null,
|
||||
eh_frame_relocs_lookup: std.AutoArrayHashMapUnmanaged(u32, Record) = .{},
|
||||
eh_frame_records_lookup: std.AutoArrayHashMapUnmanaged(AtomIndex, u32) = .{},
|
||||
|
||||
unwind_info_sect: ?macho.section_64 = null,
|
||||
unwind_relocs_lookup: []Record = undefined,
|
||||
unwind_records_lookup: std.AutoHashMapUnmanaged(AtomIndex, u32) = .{},
|
||||
|
||||
const RelocEntry = struct { start: u32, len: u32 };
|
||||
|
||||
const Record = struct {
|
||||
dead: bool,
|
||||
reloc: RelocEntry,
|
||||
};
|
||||
|
||||
pub fn deinit(self: *Object, gpa: Allocator) void {
|
||||
self.atoms.deinit(gpa);
|
||||
self.exec_atoms.deinit(gpa);
|
||||
gpa.free(self.name);
|
||||
gpa.free(self.contents);
|
||||
if (self.in_symtab) |_| {
|
||||
gpa.free(self.source_symtab_lookup);
|
||||
gpa.free(self.reverse_symtab_lookup);
|
||||
gpa.free(self.source_address_lookup);
|
||||
gpa.free(self.source_section_index_lookup);
|
||||
gpa.free(self.strtab_lookup);
|
||||
gpa.free(self.symtab);
|
||||
gpa.free(self.atom_by_index_table);
|
||||
gpa.free(self.globals_lookup);
|
||||
gpa.free(self.relocs_lookup);
|
||||
}
|
||||
self.eh_frame_relocs_lookup.deinit(gpa);
|
||||
self.eh_frame_records_lookup.deinit(gpa);
|
||||
if (self.hasUnwindRecords()) {
|
||||
gpa.free(self.unwind_relocs_lookup);
|
||||
}
|
||||
self.unwind_records_lookup.deinit(gpa);
|
||||
}
|
||||
|
||||
pub fn parse(self: *Object, allocator: Allocator, cpu_arch: std.Target.Cpu.Arch) !void {
|
||||
@ -105,76 +136,95 @@ pub fn parse(self: *Object, allocator: Allocator, cpu_arch: std.Target.Cpu.Arch)
|
||||
.ncmds = self.header.ncmds,
|
||||
.buffer = self.contents[@sizeOf(macho.mach_header_64)..][0..self.header.sizeofcmds],
|
||||
};
|
||||
while (it.next()) |cmd| {
|
||||
switch (cmd.cmd()) {
|
||||
.SYMTAB => {
|
||||
const symtab = cmd.cast(macho.symtab_command).?;
|
||||
self.in_symtab = @ptrCast(
|
||||
[*]const macho.nlist_64,
|
||||
@alignCast(@alignOf(macho.nlist_64), &self.contents[symtab.symoff]),
|
||||
)[0..symtab.nsyms];
|
||||
self.in_strtab = self.contents[symtab.stroff..][0..symtab.strsize];
|
||||
const nsects = self.getSourceSections().len;
|
||||
const symtab = while (it.next()) |cmd| switch (cmd.cmd()) {
|
||||
.SYMTAB => break cmd.cast(macho.symtab_command).?,
|
||||
else => {},
|
||||
} else return;
|
||||
|
||||
const nsects = self.getSourceSections().len;
|
||||
self.in_symtab = @ptrCast(
|
||||
[*]const macho.nlist_64,
|
||||
@alignCast(@alignOf(macho.nlist_64), &self.contents[symtab.symoff]),
|
||||
)[0..symtab.nsyms];
|
||||
self.in_strtab = self.contents[symtab.stroff..][0..symtab.strsize];
|
||||
|
||||
self.symtab = try allocator.alloc(macho.nlist_64, self.in_symtab.?.len + nsects);
|
||||
self.source_symtab_lookup = try allocator.alloc(u32, self.in_symtab.?.len);
|
||||
self.strtab_lookup = try allocator.alloc(u32, self.in_symtab.?.len);
|
||||
self.globals_lookup = try allocator.alloc(i64, self.in_symtab.?.len);
|
||||
self.atom_by_index_table = try allocator.alloc(AtomIndex, self.in_symtab.?.len + nsects);
|
||||
// This is wasteful but we need to be able to lookup source symbol address after stripping and
|
||||
// allocating of sections.
|
||||
self.source_address_lookup = try allocator.alloc(i64, self.in_symtab.?.len);
|
||||
self.source_section_index_lookup = try allocator.alloc(i64, nsects);
|
||||
self.symtab = try allocator.alloc(macho.nlist_64, self.in_symtab.?.len + nsects);
|
||||
self.source_symtab_lookup = try allocator.alloc(u32, self.in_symtab.?.len);
|
||||
self.reverse_symtab_lookup = try allocator.alloc(u32, self.in_symtab.?.len);
|
||||
self.strtab_lookup = try allocator.alloc(u32, self.in_symtab.?.len);
|
||||
self.globals_lookup = try allocator.alloc(i64, self.in_symtab.?.len);
|
||||
self.atom_by_index_table = try allocator.alloc(AtomIndex, self.in_symtab.?.len + nsects);
|
||||
self.relocs_lookup = try allocator.alloc(RelocEntry, self.in_symtab.?.len + nsects);
|
||||
// This is wasteful but we need to be able to lookup source symbol address after stripping and
|
||||
// allocating of sections.
|
||||
self.source_address_lookup = try allocator.alloc(i64, self.in_symtab.?.len);
|
||||
self.source_section_index_lookup = try allocator.alloc(i64, nsects);
|
||||
|
||||
for (self.symtab) |*sym| {
|
||||
sym.* = .{
|
||||
.n_value = 0,
|
||||
.n_sect = 0,
|
||||
.n_desc = 0,
|
||||
.n_strx = 0,
|
||||
.n_type = 0,
|
||||
};
|
||||
}
|
||||
for (self.symtab) |*sym| {
|
||||
sym.* = .{
|
||||
.n_value = 0,
|
||||
.n_sect = 0,
|
||||
.n_desc = 0,
|
||||
.n_strx = 0,
|
||||
.n_type = 0,
|
||||
};
|
||||
}
|
||||
|
||||
mem.set(i64, self.globals_lookup, -1);
|
||||
mem.set(AtomIndex, self.atom_by_index_table, 0);
|
||||
mem.set(i64, self.source_section_index_lookup, -1);
|
||||
mem.set(i64, self.globals_lookup, -1);
|
||||
mem.set(AtomIndex, self.atom_by_index_table, 0);
|
||||
mem.set(i64, self.source_section_index_lookup, -1);
|
||||
mem.set(RelocEntry, self.relocs_lookup, .{
|
||||
.start = 0,
|
||||
.len = 0,
|
||||
});
|
||||
|
||||
// You would expect that the symbol table is at least pre-sorted based on symbol's type:
|
||||
// local < extern defined < undefined. Unfortunately, this is not guaranteed! For instance,
|
||||
// the GO compiler does not necessarily respect that therefore we sort immediately by type
|
||||
// and address within.
|
||||
var sorted_all_syms = try std.ArrayList(SymbolAtIndex).initCapacity(allocator, self.in_symtab.?.len);
|
||||
defer sorted_all_syms.deinit();
|
||||
// You would expect that the symbol table is at least pre-sorted based on symbol's type:
|
||||
// local < extern defined < undefined. Unfortunately, this is not guaranteed! For instance,
|
||||
// the GO compiler does not necessarily respect that therefore we sort immediately by type
|
||||
// and address within.
|
||||
var sorted_all_syms = try std.ArrayList(SymbolAtIndex).initCapacity(allocator, self.in_symtab.?.len);
|
||||
defer sorted_all_syms.deinit();
|
||||
|
||||
for (self.in_symtab.?) |_, index| {
|
||||
sorted_all_syms.appendAssumeCapacity(.{ .index = @intCast(u32, index) });
|
||||
}
|
||||
for (self.in_symtab.?) |_, index| {
|
||||
sorted_all_syms.appendAssumeCapacity(.{ .index = @intCast(u32, index) });
|
||||
}
|
||||
|
||||
// We sort by type: defined < undefined, and
|
||||
// afterwards by address in each group. Normally, dysymtab should
|
||||
// be enough to guarantee the sort, but turns out not every compiler
|
||||
// is kind enough to specify the symbols in the correct order.
|
||||
sort.sort(SymbolAtIndex, sorted_all_syms.items, self, SymbolAtIndex.lessThan);
|
||||
// We sort by type: defined < undefined, and
|
||||
// afterwards by address in each group. Normally, dysymtab should
|
||||
// be enough to guarantee the sort, but turns out not every compiler
|
||||
// is kind enough to specify the symbols in the correct order.
|
||||
sort.sort(SymbolAtIndex, sorted_all_syms.items, self, SymbolAtIndex.lessThan);
|
||||
|
||||
for (sorted_all_syms.items) |sym_id, i| {
|
||||
const sym = sym_id.getSymbol(self);
|
||||
for (sorted_all_syms.items) |sym_id, i| {
|
||||
const sym = sym_id.getSymbol(self);
|
||||
|
||||
if (sym.sect() and self.source_section_index_lookup[sym.n_sect - 1] == -1) {
|
||||
self.source_section_index_lookup[sym.n_sect - 1] = @intCast(i64, i);
|
||||
}
|
||||
|
||||
self.symtab[i] = sym;
|
||||
self.source_symtab_lookup[i] = sym_id.index;
|
||||
self.source_address_lookup[i] = if (sym.undf()) -1 else @intCast(i64, sym.n_value);
|
||||
|
||||
const sym_name_len = mem.sliceTo(@ptrCast([*:0]const u8, self.in_strtab.?.ptr + sym.n_strx), 0).len + 1;
|
||||
self.strtab_lookup[i] = @intCast(u32, sym_name_len);
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
if (sym.sect() and self.source_section_index_lookup[sym.n_sect - 1] == -1) {
|
||||
self.source_section_index_lookup[sym.n_sect - 1] = @intCast(i64, i);
|
||||
}
|
||||
|
||||
self.symtab[i] = sym;
|
||||
self.source_symtab_lookup[i] = sym_id.index;
|
||||
self.reverse_symtab_lookup[sym_id.index] = @intCast(u32, i);
|
||||
self.source_address_lookup[i] = if (sym.undf()) -1 else @intCast(i64, sym.n_value);
|
||||
|
||||
const sym_name_len = mem.sliceTo(@ptrCast([*:0]const u8, self.in_strtab.?.ptr + sym.n_strx), 0).len + 1;
|
||||
self.strtab_lookup[i] = @intCast(u32, sym_name_len);
|
||||
}
|
||||
|
||||
// Parse __TEXT,__eh_frame header if one exists
|
||||
self.eh_frame_sect = self.getSourceSectionByName("__TEXT", "__eh_frame");
|
||||
|
||||
// Parse __LD,__compact_unwind header if one exists
|
||||
self.unwind_info_sect = self.getSourceSectionByName("__LD", "__compact_unwind");
|
||||
if (self.hasUnwindRecords()) {
|
||||
self.unwind_relocs_lookup = try allocator.alloc(Record, self.getUnwindRecords().len);
|
||||
mem.set(Record, self.unwind_relocs_lookup, .{
|
||||
.dead = true,
|
||||
.reloc = .{
|
||||
.start = 0,
|
||||
.len = 0,
|
||||
},
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@ -192,6 +242,17 @@ const SymbolAtIndex = struct {
|
||||
return mem.sliceTo(@ptrCast([*:0]const u8, ctx.in_strtab.?.ptr + off), 0);
|
||||
}
|
||||
|
||||
fn getSymbolSeniority(self: SymbolAtIndex, ctx: Context) u2 {
|
||||
const sym = self.getSymbol(ctx);
|
||||
if (!sym.ext()) {
|
||||
const sym_name = self.getSymbolName(ctx);
|
||||
if (mem.startsWith(u8, sym_name, "l") or mem.startsWith(u8, sym_name, "L")) return 0;
|
||||
return 1;
|
||||
}
|
||||
if (sym.weakDef() or sym.pext()) return 2;
|
||||
return 3;
|
||||
}
|
||||
|
||||
/// Performs lexicographic-like check.
|
||||
/// * lhs and rhs defined
|
||||
/// * if lhs == rhs
|
||||
@ -206,23 +267,15 @@ const SymbolAtIndex = struct {
|
||||
if (lhs.sect() and rhs.sect()) {
|
||||
if (lhs.n_value == rhs.n_value) {
|
||||
if (lhs.n_sect == rhs.n_sect) {
|
||||
if (lhs.ext() and rhs.ext()) {
|
||||
if ((lhs.pext() or lhs.weakDef()) and (rhs.pext() or rhs.weakDef())) {
|
||||
return false;
|
||||
} else return rhs.pext() or rhs.weakDef();
|
||||
} else {
|
||||
const lhs_name = lhs_index.getSymbolName(ctx);
|
||||
const lhs_temp = mem.startsWith(u8, lhs_name, "l") or mem.startsWith(u8, lhs_name, "L");
|
||||
const rhs_name = rhs_index.getSymbolName(ctx);
|
||||
const rhs_temp = mem.startsWith(u8, rhs_name, "l") or mem.startsWith(u8, rhs_name, "L");
|
||||
if (lhs_temp and rhs_temp) {
|
||||
return false;
|
||||
} else return rhs_temp;
|
||||
}
|
||||
const lhs_senior = lhs_index.getSymbolSeniority(ctx);
|
||||
const rhs_senior = rhs_index.getSymbolSeniority(ctx);
|
||||
if (lhs_senior == rhs_senior) {
|
||||
return lessThanByNStrx(ctx, lhs_index, rhs_index);
|
||||
} else return lhs_senior < rhs_senior;
|
||||
} else return lhs.n_sect < rhs.n_sect;
|
||||
} else return lhs.n_value < rhs.n_value;
|
||||
} else if (lhs.undf() and rhs.undf()) {
|
||||
return false;
|
||||
return lessThanByNStrx(ctx, lhs_index, rhs_index);
|
||||
} else return rhs.undf();
|
||||
}
|
||||
|
||||
@ -295,14 +348,20 @@ fn sectionLessThanByAddress(ctx: void, lhs: SortedSection, rhs: SortedSection) b
|
||||
return lhs.header.addr < rhs.header.addr;
|
||||
}
|
||||
|
||||
/// Splits input sections into Atoms.
|
||||
pub fn splitIntoAtoms(self: *Object, zld: *Zld, object_id: u32) !void {
|
||||
log.debug("splitting object({d}, {s}) into atoms", .{ object_id, self.name });
|
||||
|
||||
try self.splitRegularSections(zld, object_id);
|
||||
try self.parseEhFrameSection(zld, object_id);
|
||||
try self.parseUnwindInfo(zld, object_id);
|
||||
}
|
||||
|
||||
/// Splits input regular sections into Atoms.
|
||||
/// If the Object was compiled with `MH_SUBSECTIONS_VIA_SYMBOLS`, splits section
|
||||
/// into subsections where each subsection then represents an Atom.
|
||||
pub fn splitIntoAtoms(self: *Object, zld: *Zld, object_id: u31) !void {
|
||||
pub fn splitRegularSections(self: *Object, zld: *Zld, object_id: u32) !void {
|
||||
const gpa = zld.gpa;
|
||||
|
||||
log.debug("splitting object({d}, {s}) into atoms", .{ object_id, self.name });
|
||||
|
||||
const sections = self.getSourceSections();
|
||||
for (sections) |sect, id| {
|
||||
if (sect.isDebug()) continue;
|
||||
@ -418,6 +477,9 @@ pub fn splitIntoAtoms(self: *Object, zld: *Zld, object_id: u31) !void {
|
||||
sect.@"align",
|
||||
out_sect_id,
|
||||
);
|
||||
if (!sect.isZerofill()) {
|
||||
try self.cacheRelocs(zld, atom_index);
|
||||
}
|
||||
zld.addAtomToSection(atom_index);
|
||||
}
|
||||
|
||||
@ -431,7 +493,6 @@ pub fn splitIntoAtoms(self: *Object, zld: *Zld, object_id: u31) !void {
|
||||
const nsyms_trailing = atom_loc.len - 1;
|
||||
next_sym_index += atom_loc.len;
|
||||
|
||||
// TODO: We want to bubble up the first externally defined symbol here.
|
||||
const atom_size = if (next_sym_index < sect_start_index + sect_loc.len)
|
||||
symtab[next_sym_index].n_value - addr
|
||||
else
|
||||
@ -461,7 +522,9 @@ pub fn splitIntoAtoms(self: *Object, zld: *Zld, object_id: u31) !void {
|
||||
const alias_index = self.getSectionAliasSymbolIndex(sect_id);
|
||||
self.atom_by_index_table[alias_index] = atom_index;
|
||||
}
|
||||
|
||||
if (!sect.isZerofill()) {
|
||||
try self.cacheRelocs(zld, atom_index);
|
||||
}
|
||||
zld.addAtomToSection(atom_index);
|
||||
}
|
||||
} else {
|
||||
@ -476,6 +539,9 @@ pub fn splitIntoAtoms(self: *Object, zld: *Zld, object_id: u31) !void {
|
||||
sect.@"align",
|
||||
out_sect_id,
|
||||
);
|
||||
if (!sect.isZerofill()) {
|
||||
try self.cacheRelocs(zld, atom_index);
|
||||
}
|
||||
zld.addAtomToSection(atom_index);
|
||||
}
|
||||
}
|
||||
@ -484,7 +550,7 @@ pub fn splitIntoAtoms(self: *Object, zld: *Zld, object_id: u31) !void {
|
||||
fn createAtomFromSubsection(
|
||||
self: *Object,
|
||||
zld: *Zld,
|
||||
object_id: u31,
|
||||
object_id: u32,
|
||||
sym_index: u32,
|
||||
inner_sym_index: u32,
|
||||
inner_nsyms_trailing: u32,
|
||||
@ -497,7 +563,7 @@ fn createAtomFromSubsection(
|
||||
const atom = zld.getAtomPtr(atom_index);
|
||||
atom.inner_sym_index = inner_sym_index;
|
||||
atom.inner_nsyms_trailing = inner_nsyms_trailing;
|
||||
atom.file = object_id;
|
||||
atom.file = object_id + 1;
|
||||
self.symtab[sym_index].n_sect = out_sect_id + 1;
|
||||
|
||||
log.debug("creating ATOM(%{d}, '{s}') in sect({d}, '{s},{s}') in object({d})", .{
|
||||
@ -519,9 +585,220 @@ fn createAtomFromSubsection(
|
||||
self.atom_by_index_table[sym_loc.sym_index] = atom_index;
|
||||
}
|
||||
|
||||
const out_sect = zld.sections.items(.header)[out_sect_id];
|
||||
if (out_sect.isCode() and
|
||||
mem.eql(u8, "__TEXT", out_sect.segName()) and
|
||||
mem.eql(u8, "__text", out_sect.sectName()))
|
||||
{
|
||||
// TODO currently assuming a single section for executable machine code
|
||||
try self.exec_atoms.append(gpa, atom_index);
|
||||
}
|
||||
|
||||
return atom_index;
|
||||
}
|
||||
|
||||
fn filterRelocs(
|
||||
relocs: []align(1) const macho.relocation_info,
|
||||
start_addr: u64,
|
||||
end_addr: u64,
|
||||
) RelocEntry {
|
||||
const Predicate = struct {
|
||||
addr: u64,
|
||||
|
||||
pub fn predicate(self: @This(), rel: macho.relocation_info) bool {
|
||||
return rel.r_address >= self.addr;
|
||||
}
|
||||
};
|
||||
const LPredicate = struct {
|
||||
addr: u64,
|
||||
|
||||
pub fn predicate(self: @This(), rel: macho.relocation_info) bool {
|
||||
return rel.r_address < self.addr;
|
||||
}
|
||||
};
|
||||
|
||||
const start = @import("zld.zig").bsearch(macho.relocation_info, relocs, Predicate{ .addr = end_addr });
|
||||
const len = @import("zld.zig").lsearch(macho.relocation_info, relocs[start..], LPredicate{ .addr = start_addr });
|
||||
|
||||
return .{ .start = @intCast(u32, start), .len = @intCast(u32, len) };
|
||||
}
|
||||
|
||||
fn cacheRelocs(self: *Object, zld: *Zld, atom_index: AtomIndex) !void {
|
||||
const atom = zld.getAtom(atom_index);
|
||||
|
||||
const source_sect = if (self.getSourceSymbol(atom.sym_index)) |source_sym| blk: {
|
||||
const source_sect = self.getSourceSection(source_sym.n_sect - 1);
|
||||
assert(!source_sect.isZerofill());
|
||||
break :blk source_sect;
|
||||
} else blk: {
|
||||
// If there was no matching symbol present in the source symtab, this means
|
||||
// we are dealing with either an entire section, or part of it, but also
|
||||
// starting at the beginning.
|
||||
const nbase = @intCast(u32, self.in_symtab.?.len);
|
||||
const sect_id = @intCast(u16, atom.sym_index - nbase);
|
||||
const source_sect = self.getSourceSection(sect_id);
|
||||
assert(!source_sect.isZerofill());
|
||||
break :blk source_sect;
|
||||
};
|
||||
|
||||
const relocs = self.getRelocs(source_sect);
|
||||
|
||||
self.relocs_lookup[atom.sym_index] = if (self.getSourceSymbol(atom.sym_index)) |source_sym| blk: {
|
||||
const offset = source_sym.n_value - source_sect.addr;
|
||||
break :blk filterRelocs(relocs, offset, offset + atom.size);
|
||||
} else filterRelocs(relocs, 0, atom.size);
|
||||
}
|
||||
|
||||
fn parseEhFrameSection(self: *Object, zld: *Zld, object_id: u32) !void {
|
||||
const sect = self.eh_frame_sect orelse return;
|
||||
|
||||
log.debug("parsing __TEXT,__eh_frame section", .{});
|
||||
|
||||
if (zld.getSectionByName("__TEXT", "__eh_frame") == null) {
|
||||
_ = try zld.initSection("__TEXT", "__eh_frame", .{});
|
||||
}
|
||||
|
||||
const gpa = zld.gpa;
|
||||
const cpu_arch = zld.options.target.cpu.arch;
|
||||
const relocs = self.getRelocs(sect);
|
||||
|
||||
var it = self.getEhFrameRecordsIterator();
|
||||
var record_count: u32 = 0;
|
||||
while (try it.next()) |_| {
|
||||
record_count += 1;
|
||||
}
|
||||
|
||||
try self.eh_frame_relocs_lookup.ensureTotalCapacity(gpa, record_count);
|
||||
try self.eh_frame_records_lookup.ensureTotalCapacity(gpa, record_count);
|
||||
|
||||
it.reset();
|
||||
|
||||
while (try it.next()) |record| {
|
||||
const offset = it.pos - record.getSize();
|
||||
const rel_pos = switch (cpu_arch) {
|
||||
.aarch64 => filterRelocs(relocs, offset, offset + record.getSize()),
|
||||
.x86_64 => RelocEntry{ .start = 0, .len = 0 },
|
||||
else => unreachable,
|
||||
};
|
||||
self.eh_frame_relocs_lookup.putAssumeCapacityNoClobber(offset, .{
|
||||
.dead = false,
|
||||
.reloc = rel_pos,
|
||||
});
|
||||
|
||||
if (record.tag == .fde) {
|
||||
const target = blk: {
|
||||
switch (cpu_arch) {
|
||||
.aarch64 => {
|
||||
assert(rel_pos.len > 0); // TODO convert to an error as the FDE eh frame is malformed
|
||||
// Find function symbol that this record describes
|
||||
const rel = relocs[rel_pos.start..][rel_pos.len - 1];
|
||||
const target = UnwindInfo.parseRelocTarget(
|
||||
zld,
|
||||
object_id,
|
||||
rel,
|
||||
it.data[offset..],
|
||||
@intCast(i32, offset),
|
||||
);
|
||||
break :blk target;
|
||||
},
|
||||
.x86_64 => {
|
||||
const target_address = record.getTargetSymbolAddress(.{
|
||||
.base_addr = sect.addr,
|
||||
.base_offset = offset,
|
||||
});
|
||||
const target_sym_index = self.getSymbolByAddress(target_address, null);
|
||||
const target = if (self.getGlobal(target_sym_index)) |global_index|
|
||||
zld.globals.items[global_index]
|
||||
else
|
||||
SymbolWithLoc{ .sym_index = target_sym_index, .file = object_id + 1 };
|
||||
break :blk target;
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
};
|
||||
log.debug("FDE at offset {x} tracks {s}", .{ offset, zld.getSymbolName(target) });
|
||||
if (target.getFile() != object_id) {
|
||||
self.eh_frame_relocs_lookup.getPtr(offset).?.dead = true;
|
||||
} else {
|
||||
const atom_index = self.getAtomIndexForSymbol(target.sym_index).?;
|
||||
self.eh_frame_records_lookup.putAssumeCapacityNoClobber(atom_index, offset);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn parseUnwindInfo(self: *Object, zld: *Zld, object_id: u32) !void {
|
||||
const sect = self.unwind_info_sect orelse {
|
||||
// If it so happens that the object had `__eh_frame` section defined but no `__compact_unwind`,
|
||||
// we will try fully synthesising unwind info records to somewhat match Apple ld's
|
||||
// approach. However, we will only synthesise DWARF records and nothing more. For this reason,
|
||||
// we still create the output `__TEXT,__unwind_info` section.
|
||||
if (self.eh_frame_sect != null) {
|
||||
if (zld.getSectionByName("__TEXT", "__unwind_info") == null) {
|
||||
_ = try zld.initSection("__TEXT", "__unwind_info", .{});
|
||||
}
|
||||
}
|
||||
return;
|
||||
};
|
||||
|
||||
log.debug("parsing unwind info in {s}", .{self.name});
|
||||
|
||||
const gpa = zld.gpa;
|
||||
const cpu_arch = zld.options.target.cpu.arch;
|
||||
|
||||
if (zld.getSectionByName("__TEXT", "__unwind_info") == null) {
|
||||
_ = try zld.initSection("__TEXT", "__unwind_info", .{});
|
||||
}
|
||||
|
||||
try self.unwind_records_lookup.ensureTotalCapacity(gpa, @intCast(u32, self.exec_atoms.items.len));
|
||||
|
||||
const unwind_records = self.getUnwindRecords();
|
||||
|
||||
const needs_eh_frame = for (unwind_records) |record| {
|
||||
if (UnwindInfo.UnwindEncoding.isDwarf(record.compactUnwindEncoding, cpu_arch)) break true;
|
||||
} else false;
|
||||
|
||||
if (needs_eh_frame) {
|
||||
if (self.eh_frame_sect == null) {
|
||||
log.err("missing __TEXT,__eh_frame section", .{});
|
||||
log.err(" in object {s}", .{self.name});
|
||||
return error.MissingSection;
|
||||
}
|
||||
}
|
||||
|
||||
const relocs = self.getRelocs(sect);
|
||||
for (unwind_records) |record, record_id| {
|
||||
const offset = record_id * @sizeOf(macho.compact_unwind_entry);
|
||||
const rel_pos = filterRelocs(
|
||||
relocs,
|
||||
offset,
|
||||
offset + @sizeOf(macho.compact_unwind_entry),
|
||||
);
|
||||
assert(rel_pos.len > 0); // TODO convert to an error as the unwind info is malformed
|
||||
self.unwind_relocs_lookup[record_id] = .{
|
||||
.dead = false,
|
||||
.reloc = rel_pos,
|
||||
};
|
||||
|
||||
// Find function symbol that this record describes
|
||||
const rel = relocs[rel_pos.start..][rel_pos.len - 1];
|
||||
const target = UnwindInfo.parseRelocTarget(
|
||||
zld,
|
||||
object_id,
|
||||
rel,
|
||||
mem.asBytes(&record),
|
||||
@intCast(i32, offset),
|
||||
);
|
||||
log.debug("unwind record {d} tracks {s}", .{ record_id, zld.getSymbolName(target) });
|
||||
if (target.getFile() != object_id) {
|
||||
self.unwind_relocs_lookup[record_id].dead = true;
|
||||
} else {
|
||||
const atom_index = self.getAtomIndexForSymbol(target.sym_index).?;
|
||||
self.unwind_records_lookup.putAssumeCapacityNoClobber(atom_index, @intCast(u32, record_id));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn getSourceSymbol(self: Object, index: u32) ?macho.nlist_64 {
|
||||
const symtab = self.in_symtab.?;
|
||||
if (index >= symtab.len) return null;
|
||||
@ -529,23 +806,28 @@ pub fn getSourceSymbol(self: Object, index: u32) ?macho.nlist_64 {
|
||||
return symtab[mapped_index];
|
||||
}
|
||||
|
||||
/// Expects an arena allocator.
|
||||
/// Caller owns memory.
|
||||
pub fn createReverseSymbolLookup(self: Object, arena: Allocator) ![]u32 {
|
||||
const symtab = self.in_symtab orelse return &[0]u32{};
|
||||
const lookup = try arena.alloc(u32, symtab.len);
|
||||
for (self.source_symtab_lookup) |source_id, id| {
|
||||
lookup[source_id] = @intCast(u32, id);
|
||||
}
|
||||
return lookup;
|
||||
}
|
||||
|
||||
pub fn getSourceSection(self: Object, index: u16) macho.section_64 {
|
||||
const sections = self.getSourceSections();
|
||||
assert(index < sections.len);
|
||||
return sections[index];
|
||||
}
|
||||
|
||||
pub fn getSourceSectionByName(self: Object, segname: []const u8, sectname: []const u8) ?macho.section_64 {
|
||||
const sections = self.getSourceSections();
|
||||
for (sections) |sect| {
|
||||
if (mem.eql(u8, segname, sect.segName()) and mem.eql(u8, sectname, sect.sectName()))
|
||||
return sect;
|
||||
} else return null;
|
||||
}
|
||||
|
||||
pub fn getSourceSectionIndexByName(self: Object, segname: []const u8, sectname: []const u8) ?u8 {
|
||||
const sections = self.getSourceSections();
|
||||
for (sections) |sect, i| {
|
||||
if (mem.eql(u8, segname, sect.segName()) and mem.eql(u8, sectname, sect.sectName()))
|
||||
return @intCast(u8, i + 1);
|
||||
} else return null;
|
||||
}
|
||||
|
||||
pub fn getSourceSections(self: Object) []const macho.section_64 {
|
||||
var it = LoadCommandIterator{
|
||||
.ncmds = self.header.ncmds,
|
||||
@ -652,8 +934,64 @@ pub fn getSymbolName(self: Object, index: u32) []const u8 {
|
||||
return strtab[start..][0 .. len - 1 :0];
|
||||
}
|
||||
|
||||
pub fn getSymbolByAddress(self: Object, addr: u64, sect_hint: ?u8) u32 {
|
||||
// Find containing atom
|
||||
const Predicate = struct {
|
||||
addr: i64,
|
||||
|
||||
pub fn predicate(pred: @This(), other: i64) bool {
|
||||
return if (other == -1) true else other > pred.addr;
|
||||
}
|
||||
};
|
||||
|
||||
if (sect_hint) |sect_id| {
|
||||
if (self.source_section_index_lookup[sect_id] > -1) {
|
||||
const first_sym_index = @intCast(usize, self.source_section_index_lookup[sect_id]);
|
||||
const target_sym_index = @import("zld.zig").lsearch(i64, self.source_address_lookup[first_sym_index..], Predicate{
|
||||
.addr = @intCast(i64, addr),
|
||||
});
|
||||
if (target_sym_index > 0) {
|
||||
return @intCast(u32, first_sym_index + target_sym_index - 1);
|
||||
}
|
||||
}
|
||||
return self.getSectionAliasSymbolIndex(sect_id);
|
||||
}
|
||||
|
||||
const target_sym_index = @import("zld.zig").lsearch(i64, self.source_address_lookup, Predicate{
|
||||
.addr = @intCast(i64, addr),
|
||||
});
|
||||
assert(target_sym_index > 0);
|
||||
return @intCast(u32, target_sym_index - 1);
|
||||
}
|
||||
|
||||
pub fn getGlobal(self: Object, sym_index: u32) ?u32 {
|
||||
if (self.globals_lookup[sym_index] == -1) return null;
|
||||
return @intCast(u32, self.globals_lookup[sym_index]);
|
||||
}
|
||||
|
||||
pub fn getAtomIndexForSymbol(self: Object, sym_index: u32) ?AtomIndex {
|
||||
const atom_index = self.atom_by_index_table[sym_index];
|
||||
if (atom_index == 0) return null;
|
||||
return atom_index;
|
||||
}
|
||||
|
||||
pub fn hasUnwindRecords(self: Object) bool {
|
||||
return self.unwind_info_sect != null;
|
||||
}
|
||||
|
||||
pub fn getUnwindRecords(self: Object) []align(1) const macho.compact_unwind_entry {
|
||||
const sect = self.unwind_info_sect orelse return &[0]macho.compact_unwind_entry{};
|
||||
const data = self.getSectionContents(sect);
|
||||
const num_entries = @divExact(data.len, @sizeOf(macho.compact_unwind_entry));
|
||||
return @ptrCast([*]align(1) const macho.compact_unwind_entry, data)[0..num_entries];
|
||||
}
|
||||
|
||||
pub fn hasEhFrameRecords(self: Object) bool {
|
||||
return self.eh_frame_sect != null;
|
||||
}
|
||||
|
||||
pub fn getEhFrameRecordsIterator(self: Object) eh_frame.Iterator {
|
||||
const sect = self.eh_frame_sect orelse return .{ .data = &[0]u8{} };
|
||||
const data = self.getSectionContents(sect);
|
||||
return .{ .data = data };
|
||||
}
|
||||
|
||||
845
src/link/MachO/UnwindInfo.zig
Normal file
845
src/link/MachO/UnwindInfo.zig
Normal file
@ -0,0 +1,845 @@
|
||||
const UnwindInfo = @This();
|
||||
|
||||
const std = @import("std");
|
||||
const assert = std.debug.assert;
|
||||
const eh_frame = @import("eh_frame.zig");
|
||||
const fs = std.fs;
|
||||
const leb = std.leb;
|
||||
const log = std.log.scoped(.unwind_info);
|
||||
const macho = std.macho;
|
||||
const math = std.math;
|
||||
const mem = std.mem;
|
||||
const trace = @import("../../tracy.zig").trace;
|
||||
|
||||
const Allocator = mem.Allocator;
|
||||
const Atom = @import("ZldAtom.zig");
|
||||
const AtomIndex = @import("zld.zig").AtomIndex;
|
||||
const EhFrameRecord = eh_frame.EhFrameRecord;
|
||||
const Object = @import("Object.zig");
|
||||
const SymbolWithLoc = @import("zld.zig").SymbolWithLoc;
|
||||
const Zld = @import("zld.zig").Zld;
|
||||
|
||||
const N_DEAD = @import("zld.zig").N_DEAD;
|
||||
|
||||
gpa: Allocator,
|
||||
|
||||
/// List of all unwind records gathered from all objects and sorted
|
||||
/// by source function address.
|
||||
records: std.ArrayListUnmanaged(macho.compact_unwind_entry) = .{},
|
||||
records_lookup: std.AutoHashMapUnmanaged(AtomIndex, RecordIndex) = .{},
|
||||
|
||||
/// List of all personalities referenced by either unwind info entries
|
||||
/// or __eh_frame entries.
|
||||
personalities: [max_personalities]SymbolWithLoc = undefined,
|
||||
personalities_count: u2 = 0,
|
||||
|
||||
/// List of common encodings sorted in descending order with the most common first.
|
||||
common_encodings: [max_common_encodings]macho.compact_unwind_encoding_t = undefined,
|
||||
common_encodings_count: u7 = 0,
|
||||
|
||||
/// List of record indexes containing an LSDA pointer.
|
||||
lsdas: std.ArrayListUnmanaged(RecordIndex) = .{},
|
||||
lsdas_lookup: std.AutoHashMapUnmanaged(RecordIndex, u32) = .{},
|
||||
|
||||
/// List of second level pages.
|
||||
pages: std.ArrayListUnmanaged(Page) = .{},
|
||||
|
||||
const RecordIndex = u32;
|
||||
|
||||
const max_personalities = 3;
|
||||
const max_common_encodings = 127;
|
||||
const max_compact_encodings = 256;
|
||||
|
||||
const second_level_page_bytes = 0x1000;
|
||||
const second_level_page_words = second_level_page_bytes / @sizeOf(u32);
|
||||
|
||||
const max_regular_second_level_entries =
|
||||
(second_level_page_bytes - @sizeOf(macho.unwind_info_regular_second_level_page_header)) /
|
||||
@sizeOf(macho.unwind_info_regular_second_level_entry);
|
||||
|
||||
const max_compressed_second_level_entries =
|
||||
(second_level_page_bytes - @sizeOf(macho.unwind_info_compressed_second_level_page_header)) /
|
||||
@sizeOf(u32);
|
||||
|
||||
const compressed_entry_func_offset_mask = ~@as(u24, 0);
|
||||
|
||||
const Page = struct {
|
||||
kind: enum { regular, compressed },
|
||||
start: RecordIndex,
|
||||
count: u16,
|
||||
page_encodings: [max_compact_encodings]RecordIndex = undefined,
|
||||
page_encodings_count: u8 = 0,
|
||||
|
||||
fn appendPageEncoding(page: *Page, record_id: RecordIndex) void {
|
||||
assert(page.page_encodings_count <= max_compact_encodings);
|
||||
page.page_encodings[page.page_encodings_count] = record_id;
|
||||
page.page_encodings_count += 1;
|
||||
}
|
||||
|
||||
fn getPageEncoding(
|
||||
page: *const Page,
|
||||
info: *const UnwindInfo,
|
||||
enc: macho.compact_unwind_encoding_t,
|
||||
) ?u8 {
|
||||
comptime var index: u8 = 0;
|
||||
inline while (index < max_compact_encodings) : (index += 1) {
|
||||
if (index >= page.page_encodings_count) return null;
|
||||
const record_id = page.page_encodings[index];
|
||||
const record = info.records.items[record_id];
|
||||
if (record.compactUnwindEncoding == enc) {
|
||||
return index;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
fn format(
|
||||
page: *const Page,
|
||||
comptime unused_format_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) !void {
|
||||
_ = page;
|
||||
_ = unused_format_string;
|
||||
_ = options;
|
||||
_ = writer;
|
||||
@compileError("do not format Page directly; use page.fmtDebug()");
|
||||
}
|
||||
|
||||
const DumpCtx = struct {
|
||||
page: *const Page,
|
||||
info: *const UnwindInfo,
|
||||
};
|
||||
|
||||
fn dump(
|
||||
ctx: DumpCtx,
|
||||
comptime unused_format_string: []const u8,
|
||||
options: std.fmt.FormatOptions,
|
||||
writer: anytype,
|
||||
) @TypeOf(writer).Error!void {
|
||||
_ = options;
|
||||
comptime assert(unused_format_string.len == 0);
|
||||
try writer.writeAll("Page:\n");
|
||||
try writer.print(" kind: {s}\n", .{@tagName(ctx.page.kind)});
|
||||
try writer.print(" entries: {d} - {d}\n", .{
|
||||
ctx.page.start,
|
||||
ctx.page.start + ctx.page.count,
|
||||
});
|
||||
try writer.print(" encodings (count = {d})\n", .{ctx.page.page_encodings_count});
|
||||
for (ctx.page.page_encodings[0..ctx.page.page_encodings_count]) |record_id, i| {
|
||||
const record = ctx.info.records.items[record_id];
|
||||
const enc = record.compactUnwindEncoding;
|
||||
try writer.print(" {d}: 0x{x:0>8}\n", .{ ctx.info.common_encodings_count + i, enc });
|
||||
}
|
||||
}
|
||||
|
||||
fn fmtDebug(page: *const Page, info: *const UnwindInfo) std.fmt.Formatter(dump) {
|
||||
return .{ .data = .{
|
||||
.page = page,
|
||||
.info = info,
|
||||
} };
|
||||
}
|
||||
|
||||
fn write(page: *const Page, info: *const UnwindInfo, writer: anytype) !void {
|
||||
switch (page.kind) {
|
||||
.regular => {
|
||||
try writer.writeStruct(macho.unwind_info_regular_second_level_page_header{
|
||||
.entryPageOffset = @sizeOf(macho.unwind_info_regular_second_level_page_header),
|
||||
.entryCount = page.count,
|
||||
});
|
||||
|
||||
for (info.records.items[page.start..][0..page.count]) |record| {
|
||||
try writer.writeStruct(macho.unwind_info_regular_second_level_entry{
|
||||
.functionOffset = @intCast(u32, record.rangeStart),
|
||||
.encoding = record.compactUnwindEncoding,
|
||||
});
|
||||
}
|
||||
},
|
||||
.compressed => {
|
||||
const entry_offset = @sizeOf(macho.unwind_info_compressed_second_level_page_header) +
|
||||
@intCast(u16, page.page_encodings_count) * @sizeOf(u32);
|
||||
try writer.writeStruct(macho.unwind_info_compressed_second_level_page_header{
|
||||
.entryPageOffset = entry_offset,
|
||||
.entryCount = page.count,
|
||||
.encodingsPageOffset = @sizeOf(
|
||||
macho.unwind_info_compressed_second_level_page_header,
|
||||
),
|
||||
.encodingsCount = page.page_encodings_count,
|
||||
});
|
||||
|
||||
for (page.page_encodings[0..page.page_encodings_count]) |record_id| {
|
||||
const enc = info.records.items[record_id].compactUnwindEncoding;
|
||||
try writer.writeIntLittle(u32, enc);
|
||||
}
|
||||
|
||||
assert(page.count > 0);
|
||||
const first_entry = info.records.items[page.start];
|
||||
for (info.records.items[page.start..][0..page.count]) |record| {
|
||||
const enc_index = blk: {
|
||||
if (info.getCommonEncoding(record.compactUnwindEncoding)) |id| {
|
||||
break :blk id;
|
||||
}
|
||||
const ncommon = info.common_encodings_count;
|
||||
break :blk ncommon + page.getPageEncoding(info, record.compactUnwindEncoding).?;
|
||||
};
|
||||
const compressed = macho.UnwindInfoCompressedEntry{
|
||||
.funcOffset = @intCast(u24, record.rangeStart - first_entry.rangeStart),
|
||||
.encodingIndex = @intCast(u8, enc_index),
|
||||
};
|
||||
try writer.writeStruct(compressed);
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
pub fn deinit(info: *UnwindInfo) void {
|
||||
info.records.deinit(info.gpa);
|
||||
info.records_lookup.deinit(info.gpa);
|
||||
info.pages.deinit(info.gpa);
|
||||
info.lsdas.deinit(info.gpa);
|
||||
info.lsdas_lookup.deinit(info.gpa);
|
||||
}
|
||||
|
||||
pub fn scanRelocs(zld: *Zld) !void {
|
||||
if (zld.getSectionByName("__TEXT", "__unwind_info") == null) return;
|
||||
|
||||
const cpu_arch = zld.options.target.cpu.arch;
|
||||
for (zld.objects.items) |*object, object_id| {
|
||||
const unwind_records = object.getUnwindRecords();
|
||||
for (object.exec_atoms.items) |atom_index| {
|
||||
const record_id = object.unwind_records_lookup.get(atom_index) orelse continue;
|
||||
if (object.unwind_relocs_lookup[record_id].dead) continue;
|
||||
const record = unwind_records[record_id];
|
||||
if (!UnwindEncoding.isDwarf(record.compactUnwindEncoding, cpu_arch)) {
|
||||
if (getPersonalityFunctionReloc(
|
||||
zld,
|
||||
@intCast(u32, object_id),
|
||||
record_id,
|
||||
)) |rel| {
|
||||
// Personality function; add GOT pointer.
|
||||
const target = parseRelocTarget(
|
||||
zld,
|
||||
@intCast(u32, object_id),
|
||||
rel,
|
||||
mem.asBytes(&record),
|
||||
@intCast(i32, record_id * @sizeOf(macho.compact_unwind_entry)),
|
||||
);
|
||||
try Atom.addGotEntry(zld, target);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn collect(info: *UnwindInfo, zld: *Zld) !void {
|
||||
if (zld.getSectionByName("__TEXT", "__unwind_info") == null) return;
|
||||
|
||||
const cpu_arch = zld.options.target.cpu.arch;
|
||||
|
||||
var records = std.ArrayList(macho.compact_unwind_entry).init(info.gpa);
|
||||
defer records.deinit();
|
||||
|
||||
var atom_indexes = std.ArrayList(AtomIndex).init(info.gpa);
|
||||
defer atom_indexes.deinit();
|
||||
|
||||
// TODO handle dead stripping
|
||||
for (zld.objects.items) |*object, object_id| {
|
||||
log.debug("collecting unwind records in {s} ({d})", .{ object.name, object_id });
|
||||
const unwind_records = object.getUnwindRecords();
|
||||
|
||||
// Contents of unwind records does not have to cover all symbol in executable section
|
||||
// so we need insert them ourselves.
|
||||
try records.ensureUnusedCapacity(object.exec_atoms.items.len);
|
||||
try atom_indexes.ensureUnusedCapacity(object.exec_atoms.items.len);
|
||||
|
||||
for (object.exec_atoms.items) |atom_index| {
|
||||
var record = if (object.unwind_records_lookup.get(atom_index)) |record_id| blk: {
|
||||
if (object.unwind_relocs_lookup[record_id].dead) continue;
|
||||
var record = unwind_records[record_id];
|
||||
|
||||
if (UnwindEncoding.isDwarf(record.compactUnwindEncoding, cpu_arch)) {
|
||||
try info.collectPersonalityFromDwarf(zld, @intCast(u32, object_id), atom_index, &record);
|
||||
} else {
|
||||
if (getPersonalityFunctionReloc(
|
||||
zld,
|
||||
@intCast(u32, object_id),
|
||||
record_id,
|
||||
)) |rel| {
|
||||
const target = parseRelocTarget(
|
||||
zld,
|
||||
@intCast(u32, object_id),
|
||||
rel,
|
||||
mem.asBytes(&record),
|
||||
@intCast(i32, record_id * @sizeOf(macho.compact_unwind_entry)),
|
||||
);
|
||||
const personality_index = info.getPersonalityFunction(target) orelse inner: {
|
||||
const personality_index = info.personalities_count;
|
||||
info.personalities[personality_index] = target;
|
||||
info.personalities_count += 1;
|
||||
break :inner personality_index;
|
||||
};
|
||||
|
||||
record.personalityFunction = personality_index + 1;
|
||||
UnwindEncoding.setPersonalityIndex(&record.compactUnwindEncoding, personality_index + 1);
|
||||
}
|
||||
|
||||
if (getLsdaReloc(zld, @intCast(u32, object_id), record_id)) |rel| {
|
||||
const target = parseRelocTarget(
|
||||
zld,
|
||||
@intCast(u32, object_id),
|
||||
rel,
|
||||
mem.asBytes(&record),
|
||||
@intCast(i32, record_id * @sizeOf(macho.compact_unwind_entry)),
|
||||
);
|
||||
record.lsda = @bitCast(u64, target);
|
||||
}
|
||||
}
|
||||
break :blk record;
|
||||
} else blk: {
|
||||
const atom = zld.getAtom(atom_index);
|
||||
const sym = zld.getSymbol(atom.getSymbolWithLoc());
|
||||
if (sym.n_desc == N_DEAD) continue;
|
||||
|
||||
if (!object.hasUnwindRecords()) {
|
||||
if (object.eh_frame_records_lookup.get(atom_index)) |fde_offset| {
|
||||
if (object.eh_frame_relocs_lookup.get(fde_offset).?.dead) continue;
|
||||
var record = nullRecord();
|
||||
try info.collectPersonalityFromDwarf(zld, @intCast(u32, object_id), atom_index, &record);
|
||||
switch (cpu_arch) {
|
||||
.aarch64 => UnwindEncoding.setMode(&record.compactUnwindEncoding, macho.UNWIND_ARM64_MODE.DWARF),
|
||||
.x86_64 => UnwindEncoding.setMode(&record.compactUnwindEncoding, macho.UNWIND_X86_64_MODE.DWARF),
|
||||
else => unreachable,
|
||||
}
|
||||
break :blk record;
|
||||
}
|
||||
}
|
||||
|
||||
break :blk nullRecord();
|
||||
};
|
||||
|
||||
const atom = zld.getAtom(atom_index);
|
||||
const sym_loc = atom.getSymbolWithLoc();
|
||||
const sym = zld.getSymbol(sym_loc);
|
||||
assert(sym.n_desc != N_DEAD);
|
||||
record.rangeStart = sym.n_value;
|
||||
record.rangeLength = @intCast(u32, atom.size);
|
||||
|
||||
records.appendAssumeCapacity(record);
|
||||
atom_indexes.appendAssumeCapacity(atom_index);
|
||||
}
|
||||
}
|
||||
|
||||
// Fold records
|
||||
try info.records.ensureTotalCapacity(info.gpa, records.items.len);
|
||||
try info.records_lookup.ensureTotalCapacity(info.gpa, @intCast(u32, atom_indexes.items.len));
|
||||
|
||||
var maybe_prev: ?macho.compact_unwind_entry = null;
|
||||
for (records.items) |record, i| {
|
||||
const record_id = blk: {
|
||||
if (maybe_prev) |prev| {
|
||||
const is_dwarf = UnwindEncoding.isDwarf(record.compactUnwindEncoding, cpu_arch);
|
||||
if (is_dwarf or
|
||||
(prev.compactUnwindEncoding != record.compactUnwindEncoding) or
|
||||
(prev.personalityFunction != record.personalityFunction) or
|
||||
record.lsda > 0)
|
||||
{
|
||||
const record_id = @intCast(RecordIndex, info.records.items.len);
|
||||
info.records.appendAssumeCapacity(record);
|
||||
maybe_prev = record;
|
||||
break :blk record_id;
|
||||
} else {
|
||||
break :blk @intCast(RecordIndex, info.records.items.len - 1);
|
||||
}
|
||||
} else {
|
||||
const record_id = @intCast(RecordIndex, info.records.items.len);
|
||||
info.records.appendAssumeCapacity(record);
|
||||
maybe_prev = record;
|
||||
break :blk record_id;
|
||||
}
|
||||
};
|
||||
info.records_lookup.putAssumeCapacityNoClobber(atom_indexes.items[i], record_id);
|
||||
}
|
||||
|
||||
// Calculate common encodings
|
||||
{
|
||||
const CommonEncWithCount = struct {
|
||||
enc: macho.compact_unwind_encoding_t,
|
||||
count: u32,
|
||||
|
||||
fn greaterThan(ctx: void, lhs: @This(), rhs: @This()) bool {
|
||||
_ = ctx;
|
||||
return lhs.count > rhs.count;
|
||||
}
|
||||
};
|
||||
|
||||
const Context = struct {
|
||||
pub fn hash(ctx: @This(), key: macho.compact_unwind_encoding_t) u32 {
|
||||
_ = ctx;
|
||||
return key;
|
||||
}
|
||||
|
||||
pub fn eql(
|
||||
ctx: @This(),
|
||||
key1: macho.compact_unwind_encoding_t,
|
||||
key2: macho.compact_unwind_encoding_t,
|
||||
b_index: usize,
|
||||
) bool {
|
||||
_ = ctx;
|
||||
_ = b_index;
|
||||
return key1 == key2;
|
||||
}
|
||||
};
|
||||
|
||||
var common_encodings_counts = std.ArrayHashMap(
|
||||
macho.compact_unwind_encoding_t,
|
||||
CommonEncWithCount,
|
||||
Context,
|
||||
false,
|
||||
).init(info.gpa);
|
||||
defer common_encodings_counts.deinit();
|
||||
|
||||
for (info.records.items) |record| {
|
||||
assert(!isNull(record));
|
||||
if (UnwindEncoding.isDwarf(record.compactUnwindEncoding, cpu_arch)) continue;
|
||||
const enc = record.compactUnwindEncoding;
|
||||
const gop = try common_encodings_counts.getOrPut(enc);
|
||||
if (!gop.found_existing) {
|
||||
gop.value_ptr.* = .{
|
||||
.enc = enc,
|
||||
.count = 0,
|
||||
};
|
||||
}
|
||||
gop.value_ptr.count += 1;
|
||||
}
|
||||
|
||||
var slice = common_encodings_counts.values();
|
||||
std.sort.sort(CommonEncWithCount, slice, {}, CommonEncWithCount.greaterThan);
|
||||
|
||||
var i: u7 = 0;
|
||||
while (i < slice.len) : (i += 1) {
|
||||
if (i >= max_common_encodings) break;
|
||||
if (slice[i].count < 2) continue;
|
||||
info.appendCommonEncoding(slice[i].enc);
|
||||
log.debug("adding common encoding: {d} => 0x{x:0>8}", .{ i, slice[i].enc });
|
||||
}
|
||||
}
|
||||
|
||||
// Compute page allocations
|
||||
{
|
||||
var i: u32 = 0;
|
||||
while (i < info.records.items.len) {
|
||||
const range_start_max: u64 =
|
||||
info.records.items[i].rangeStart + compressed_entry_func_offset_mask;
|
||||
var encoding_count: u9 = info.common_encodings_count;
|
||||
var space_left: u32 = second_level_page_words -
|
||||
@sizeOf(macho.unwind_info_compressed_second_level_page_header) / @sizeOf(u32);
|
||||
var page = Page{
|
||||
.kind = undefined,
|
||||
.start = i,
|
||||
.count = 0,
|
||||
};
|
||||
|
||||
while (space_left >= 1 and i < info.records.items.len) {
|
||||
const record = info.records.items[i];
|
||||
const enc = record.compactUnwindEncoding;
|
||||
const is_dwarf = UnwindEncoding.isDwarf(record.compactUnwindEncoding, cpu_arch);
|
||||
|
||||
if (record.rangeStart >= range_start_max) {
|
||||
break;
|
||||
} else if (info.getCommonEncoding(enc) != null or
|
||||
page.getPageEncoding(info, enc) != null and !is_dwarf)
|
||||
{
|
||||
i += 1;
|
||||
space_left -= 1;
|
||||
} else if (space_left >= 2 and encoding_count < max_compact_encodings) {
|
||||
page.appendPageEncoding(i);
|
||||
i += 1;
|
||||
space_left -= 2;
|
||||
encoding_count += 1;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
page.count = @intCast(u16, i - page.start);
|
||||
|
||||
if (i < info.records.items.len and page.count < max_regular_second_level_entries) {
|
||||
page.kind = .regular;
|
||||
page.count = @intCast(u16, @min(
|
||||
max_regular_second_level_entries,
|
||||
info.records.items.len - page.start,
|
||||
));
|
||||
i = page.start + page.count;
|
||||
} else {
|
||||
page.kind = .compressed;
|
||||
}
|
||||
|
||||
log.debug("{}", .{page.fmtDebug(info)});
|
||||
|
||||
try info.pages.append(info.gpa, page);
|
||||
}
|
||||
}
|
||||
|
||||
// Save indices of records requiring LSDA relocation
|
||||
try info.lsdas_lookup.ensureTotalCapacity(info.gpa, @intCast(u32, info.records.items.len));
|
||||
for (info.records.items) |rec, i| {
|
||||
info.lsdas_lookup.putAssumeCapacityNoClobber(@intCast(RecordIndex, i), @intCast(u32, info.lsdas.items.len));
|
||||
if (rec.lsda == 0) continue;
|
||||
try info.lsdas.append(info.gpa, @intCast(RecordIndex, i));
|
||||
}
|
||||
}
|
||||
|
||||
fn collectPersonalityFromDwarf(
|
||||
info: *UnwindInfo,
|
||||
zld: *Zld,
|
||||
object_id: u32,
|
||||
atom_index: u32,
|
||||
record: *macho.compact_unwind_entry,
|
||||
) !void {
|
||||
const object = &zld.objects.items[object_id];
|
||||
var it = object.getEhFrameRecordsIterator();
|
||||
const fde_offset = object.eh_frame_records_lookup.get(atom_index).?;
|
||||
it.seekTo(fde_offset);
|
||||
const fde = (try it.next()).?;
|
||||
const cie_ptr = fde.getCiePointer();
|
||||
const cie_offset = fde_offset + 4 - cie_ptr;
|
||||
it.seekTo(cie_offset);
|
||||
const cie = (try it.next()).?;
|
||||
|
||||
if (cie.getPersonalityPointerReloc(
|
||||
zld,
|
||||
@intCast(u32, object_id),
|
||||
cie_offset,
|
||||
)) |target| {
|
||||
const personality_index = info.getPersonalityFunction(target) orelse inner: {
|
||||
const personality_index = info.personalities_count;
|
||||
info.personalities[personality_index] = target;
|
||||
info.personalities_count += 1;
|
||||
break :inner personality_index;
|
||||
};
|
||||
|
||||
record.personalityFunction = personality_index + 1;
|
||||
UnwindEncoding.setPersonalityIndex(&record.compactUnwindEncoding, personality_index + 1);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn calcSectionSize(info: UnwindInfo, zld: *Zld) !void {
|
||||
const sect_id = zld.getSectionByName("__TEXT", "__unwind_info") orelse return;
|
||||
const sect = &zld.sections.items(.header)[sect_id];
|
||||
sect.@"align" = 2;
|
||||
sect.size = info.calcRequiredSize();
|
||||
}
|
||||
|
||||
fn calcRequiredSize(info: UnwindInfo) usize {
|
||||
var total_size: usize = 0;
|
||||
total_size += @sizeOf(macho.unwind_info_section_header);
|
||||
total_size +=
|
||||
@intCast(usize, info.common_encodings_count) * @sizeOf(macho.compact_unwind_encoding_t);
|
||||
total_size += @intCast(usize, info.personalities_count) * @sizeOf(u32);
|
||||
total_size += (info.pages.items.len + 1) * @sizeOf(macho.unwind_info_section_header_index_entry);
|
||||
total_size += info.lsdas.items.len * @sizeOf(macho.unwind_info_section_header_lsda_index_entry);
|
||||
total_size += info.pages.items.len * second_level_page_bytes;
|
||||
return total_size;
|
||||
}
|
||||
|
||||
pub fn write(info: *UnwindInfo, zld: *Zld) !void {
|
||||
const sect_id = zld.getSectionByName("__TEXT", "__unwind_info") orelse return;
|
||||
const sect = &zld.sections.items(.header)[sect_id];
|
||||
const seg_id = zld.sections.items(.segment_index)[sect_id];
|
||||
const seg = zld.segments.items[seg_id];
|
||||
|
||||
const text_sect_id = zld.getSectionByName("__TEXT", "__text").?;
|
||||
const text_sect = zld.sections.items(.header)[text_sect_id];
|
||||
|
||||
var personalities: [max_personalities]u32 = undefined;
|
||||
const cpu_arch = zld.options.target.cpu.arch;
|
||||
|
||||
log.debug("Personalities:", .{});
|
||||
for (info.personalities[0..info.personalities_count]) |target, i| {
|
||||
const atom_index = zld.getGotAtomIndexForSymbol(target).?;
|
||||
const atom = zld.getAtom(atom_index);
|
||||
const sym = zld.getSymbol(atom.getSymbolWithLoc());
|
||||
personalities[i] = @intCast(u32, sym.n_value - seg.vmaddr);
|
||||
log.debug(" {d}: 0x{x} ({s})", .{ i, personalities[i], zld.getSymbolName(target) });
|
||||
}
|
||||
|
||||
for (info.records.items) |*rec| {
|
||||
// Finalize missing address values
|
||||
rec.rangeStart += text_sect.addr - seg.vmaddr;
|
||||
if (rec.personalityFunction > 0) {
|
||||
const index = math.cast(usize, rec.personalityFunction - 1) orelse return error.Overflow;
|
||||
rec.personalityFunction = personalities[index];
|
||||
}
|
||||
|
||||
if (rec.compactUnwindEncoding > 0 and !UnwindEncoding.isDwarf(rec.compactUnwindEncoding, cpu_arch)) {
|
||||
const lsda_target = @bitCast(SymbolWithLoc, rec.lsda);
|
||||
if (lsda_target.getFile()) |_| {
|
||||
const sym = zld.getSymbol(lsda_target);
|
||||
rec.lsda = sym.n_value - seg.vmaddr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (info.records.items) |record, i| {
|
||||
log.debug("Unwind record at offset 0x{x}", .{i * @sizeOf(macho.compact_unwind_entry)});
|
||||
log.debug(" start: 0x{x}", .{record.rangeStart});
|
||||
log.debug(" length: 0x{x}", .{record.rangeLength});
|
||||
log.debug(" compact encoding: 0x{x:0>8}", .{record.compactUnwindEncoding});
|
||||
log.debug(" personality: 0x{x}", .{record.personalityFunction});
|
||||
log.debug(" LSDA: 0x{x}", .{record.lsda});
|
||||
}
|
||||
|
||||
var buffer = std.ArrayList(u8).init(info.gpa);
|
||||
defer buffer.deinit();
|
||||
|
||||
const size = info.calcRequiredSize();
|
||||
try buffer.ensureTotalCapacityPrecise(size);
|
||||
|
||||
var cwriter = std.io.countingWriter(buffer.writer());
|
||||
const writer = cwriter.writer();
|
||||
|
||||
const common_encodings_offset: u32 = @sizeOf(macho.unwind_info_section_header);
|
||||
const common_encodings_count: u32 = info.common_encodings_count;
|
||||
const personalities_offset: u32 = common_encodings_offset + common_encodings_count * @sizeOf(u32);
|
||||
const personalities_count: u32 = info.personalities_count;
|
||||
const indexes_offset: u32 = personalities_offset + personalities_count * @sizeOf(u32);
|
||||
const indexes_count: u32 = @intCast(u32, info.pages.items.len + 1);
|
||||
|
||||
try writer.writeStruct(macho.unwind_info_section_header{
|
||||
.commonEncodingsArraySectionOffset = common_encodings_offset,
|
||||
.commonEncodingsArrayCount = common_encodings_count,
|
||||
.personalityArraySectionOffset = personalities_offset,
|
||||
.personalityArrayCount = personalities_count,
|
||||
.indexSectionOffset = indexes_offset,
|
||||
.indexCount = indexes_count,
|
||||
});
|
||||
|
||||
try writer.writeAll(mem.sliceAsBytes(info.common_encodings[0..info.common_encodings_count]));
|
||||
try writer.writeAll(mem.sliceAsBytes(personalities[0..info.personalities_count]));
|
||||
|
||||
const pages_base_offset = @intCast(u32, size - (info.pages.items.len * second_level_page_bytes));
|
||||
const lsda_base_offset = @intCast(u32, pages_base_offset -
|
||||
(info.lsdas.items.len * @sizeOf(macho.unwind_info_section_header_lsda_index_entry)));
|
||||
for (info.pages.items) |page, i| {
|
||||
assert(page.count > 0);
|
||||
const first_entry = info.records.items[page.start];
|
||||
try writer.writeStruct(macho.unwind_info_section_header_index_entry{
|
||||
.functionOffset = @intCast(u32, first_entry.rangeStart),
|
||||
.secondLevelPagesSectionOffset = @intCast(u32, pages_base_offset + i * second_level_page_bytes),
|
||||
.lsdaIndexArraySectionOffset = lsda_base_offset +
|
||||
info.lsdas_lookup.get(page.start).? * @sizeOf(macho.unwind_info_section_header_lsda_index_entry),
|
||||
});
|
||||
}
|
||||
|
||||
const last_entry = info.records.items[info.records.items.len - 1];
|
||||
const sentinel_address = @intCast(u32, last_entry.rangeStart + last_entry.rangeLength);
|
||||
try writer.writeStruct(macho.unwind_info_section_header_index_entry{
|
||||
.functionOffset = sentinel_address,
|
||||
.secondLevelPagesSectionOffset = 0,
|
||||
.lsdaIndexArraySectionOffset = lsda_base_offset +
|
||||
@intCast(u32, info.lsdas.items.len) * @sizeOf(macho.unwind_info_section_header_lsda_index_entry),
|
||||
});
|
||||
|
||||
for (info.lsdas.items) |record_id| {
|
||||
const record = info.records.items[record_id];
|
||||
try writer.writeStruct(macho.unwind_info_section_header_lsda_index_entry{
|
||||
.functionOffset = @intCast(u32, record.rangeStart),
|
||||
.lsdaOffset = @intCast(u32, record.lsda),
|
||||
});
|
||||
}
|
||||
|
||||
for (info.pages.items) |page| {
|
||||
const start = cwriter.bytes_written;
|
||||
try page.write(info, writer);
|
||||
const nwritten = cwriter.bytes_written - start;
|
||||
if (nwritten < second_level_page_bytes) {
|
||||
const offset = math.cast(usize, second_level_page_bytes - nwritten) orelse return error.Overflow;
|
||||
try writer.writeByteNTimes(0, offset);
|
||||
}
|
||||
}
|
||||
|
||||
const padding = buffer.items.len - cwriter.bytes_written;
|
||||
if (padding > 0) {
|
||||
const offset = math.cast(usize, cwriter.bytes_written) orelse return error.Overflow;
|
||||
mem.set(u8, buffer.items[offset..], 0);
|
||||
}
|
||||
|
||||
try zld.file.pwriteAll(buffer.items, sect.offset);
|
||||
}
|
||||
|
||||
pub fn parseRelocTarget(
|
||||
zld: *Zld,
|
||||
object_id: u32,
|
||||
rel: macho.relocation_info,
|
||||
code: []const u8,
|
||||
base_offset: i32,
|
||||
) SymbolWithLoc {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
const object = &zld.objects.items[object_id];
|
||||
|
||||
const sym_index = if (rel.r_extern == 0) blk: {
|
||||
const sect_id = @intCast(u8, rel.r_symbolnum - 1);
|
||||
const rel_offset = @intCast(u32, rel.r_address - base_offset);
|
||||
assert(rel.r_pcrel == 0 and rel.r_length == 3);
|
||||
const address_in_section = mem.readIntLittle(u64, code[rel_offset..][0..8]);
|
||||
const sym_index = object.getSymbolByAddress(address_in_section, sect_id);
|
||||
break :blk sym_index;
|
||||
} else object.reverse_symtab_lookup[rel.r_symbolnum];
|
||||
|
||||
const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = object_id + 1 };
|
||||
const sym = zld.getSymbol(sym_loc);
|
||||
|
||||
if (sym.sect() and !sym.ext()) {
|
||||
// Make sure we are not dealing with a local alias.
|
||||
const atom_index = object.getAtomIndexForSymbol(sym_index) orelse
|
||||
return sym_loc;
|
||||
const atom = zld.getAtom(atom_index);
|
||||
return atom.getSymbolWithLoc();
|
||||
} else if (object.getGlobal(sym_index)) |global_index| {
|
||||
return zld.globals.items[global_index];
|
||||
} else return sym_loc;
|
||||
}
|
||||
|
||||
fn getRelocs(
|
||||
zld: *Zld,
|
||||
object_id: u32,
|
||||
record_id: usize,
|
||||
) []align(1) const macho.relocation_info {
|
||||
const object = &zld.objects.items[object_id];
|
||||
assert(object.hasUnwindRecords());
|
||||
const rel_pos = object.unwind_relocs_lookup[record_id].reloc;
|
||||
const relocs = object.getRelocs(object.unwind_info_sect.?);
|
||||
return relocs[rel_pos.start..][0..rel_pos.len];
|
||||
}
|
||||
|
||||
fn isPersonalityFunction(record_id: usize, rel: macho.relocation_info) bool {
|
||||
const base_offset = @intCast(i32, record_id * @sizeOf(macho.compact_unwind_entry));
|
||||
const rel_offset = rel.r_address - base_offset;
|
||||
return rel_offset == 16;
|
||||
}
|
||||
|
||||
pub fn getPersonalityFunctionReloc(
|
||||
zld: *Zld,
|
||||
object_id: u32,
|
||||
record_id: usize,
|
||||
) ?macho.relocation_info {
|
||||
const relocs = getRelocs(zld, object_id, record_id);
|
||||
for (relocs) |rel| {
|
||||
if (isPersonalityFunction(record_id, rel)) return rel;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
fn getPersonalityFunction(info: UnwindInfo, global_index: SymbolWithLoc) ?u2 {
|
||||
comptime var index: u2 = 0;
|
||||
inline while (index < max_personalities) : (index += 1) {
|
||||
if (index >= info.personalities_count) return null;
|
||||
if (info.personalities[index].eql(global_index)) {
|
||||
return index;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
fn isLsda(record_id: usize, rel: macho.relocation_info) bool {
|
||||
const base_offset = @intCast(i32, record_id * @sizeOf(macho.compact_unwind_entry));
|
||||
const rel_offset = rel.r_address - base_offset;
|
||||
return rel_offset == 24;
|
||||
}
|
||||
|
||||
pub fn getLsdaReloc(zld: *Zld, object_id: u32, record_id: usize) ?macho.relocation_info {
|
||||
const relocs = getRelocs(zld, object_id, record_id);
|
||||
for (relocs) |rel| {
|
||||
if (isLsda(record_id, rel)) return rel;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
pub fn isNull(rec: macho.compact_unwind_entry) bool {
|
||||
return rec.rangeStart == 0 and
|
||||
rec.rangeLength == 0 and
|
||||
rec.compactUnwindEncoding == 0 and
|
||||
rec.lsda == 0 and
|
||||
rec.personalityFunction == 0;
|
||||
}
|
||||
|
||||
inline fn nullRecord() macho.compact_unwind_entry {
|
||||
return .{
|
||||
.rangeStart = 0,
|
||||
.rangeLength = 0,
|
||||
.compactUnwindEncoding = 0,
|
||||
.personalityFunction = 0,
|
||||
.lsda = 0,
|
||||
};
|
||||
}
|
||||
|
||||
fn appendCommonEncoding(info: *UnwindInfo, enc: macho.compact_unwind_encoding_t) void {
|
||||
assert(info.common_encodings_count <= max_common_encodings);
|
||||
info.common_encodings[info.common_encodings_count] = enc;
|
||||
info.common_encodings_count += 1;
|
||||
}
|
||||
|
||||
fn getCommonEncoding(info: UnwindInfo, enc: macho.compact_unwind_encoding_t) ?u7 {
|
||||
comptime var index: u7 = 0;
|
||||
inline while (index < max_common_encodings) : (index += 1) {
|
||||
if (index >= info.common_encodings_count) return null;
|
||||
if (info.common_encodings[index] == enc) {
|
||||
return index;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
pub const UnwindEncoding = struct {
|
||||
pub fn getMode(enc: macho.compact_unwind_encoding_t) u4 {
|
||||
comptime assert(macho.UNWIND_ARM64_MODE_MASK == macho.UNWIND_X86_64_MODE_MASK);
|
||||
return @truncate(u4, (enc & macho.UNWIND_ARM64_MODE_MASK) >> 24);
|
||||
}
|
||||
|
||||
pub fn isDwarf(enc: macho.compact_unwind_encoding_t, cpu_arch: std.Target.Cpu.Arch) bool {
|
||||
const mode = getMode(enc);
|
||||
return switch (cpu_arch) {
|
||||
.aarch64 => @intToEnum(macho.UNWIND_ARM64_MODE, mode) == .DWARF,
|
||||
.x86_64 => @intToEnum(macho.UNWIND_X86_64_MODE, mode) == .DWARF,
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn setMode(enc: *macho.compact_unwind_encoding_t, mode: anytype) void {
|
||||
enc.* |= @intCast(u32, @enumToInt(mode)) << 24;
|
||||
}
|
||||
|
||||
pub fn hasLsda(enc: macho.compact_unwind_encoding_t) bool {
|
||||
const has_lsda = @truncate(u1, (enc & macho.UNWIND_HAS_LSDA) >> 31);
|
||||
return has_lsda == 1;
|
||||
}
|
||||
|
||||
pub fn setHasLsda(enc: *macho.compact_unwind_encoding_t, has_lsda: bool) void {
|
||||
const mask = @intCast(u32, @boolToInt(has_lsda)) << 31;
|
||||
enc.* |= mask;
|
||||
}
|
||||
|
||||
pub fn getPersonalityIndex(enc: macho.compact_unwind_encoding_t) u2 {
|
||||
const index = @truncate(u2, (enc & macho.UNWIND_PERSONALITY_MASK) >> 28);
|
||||
return index;
|
||||
}
|
||||
|
||||
pub fn setPersonalityIndex(enc: *macho.compact_unwind_encoding_t, index: u2) void {
|
||||
const mask = @intCast(u32, index) << 28;
|
||||
enc.* |= mask;
|
||||
}
|
||||
|
||||
pub fn getDwarfSectionOffset(enc: macho.compact_unwind_encoding_t, cpu_arch: std.Target.Cpu.Arch) u24 {
|
||||
assert(isDwarf(enc, cpu_arch));
|
||||
const offset = @truncate(u24, enc);
|
||||
return offset;
|
||||
}
|
||||
|
||||
pub fn setDwarfSectionOffset(enc: *macho.compact_unwind_encoding_t, cpu_arch: std.Target.Cpu.Arch, offset: u24) void {
|
||||
assert(isDwarf(enc.*, cpu_arch));
|
||||
enc.* |= offset;
|
||||
}
|
||||
};
|
||||
@ -29,11 +29,11 @@ const Zld = @import("zld.zig").Zld;
|
||||
/// a stub trampoline, it can be found in the linkers `locals` arraylist.
|
||||
sym_index: u32,
|
||||
|
||||
/// -1 means an Atom is a synthetic Atom such as a GOT cell defined by the linker.
|
||||
/// Otherwise, it is the index into appropriate object file.
|
||||
/// 0 means an Atom is a synthetic Atom such as a GOT cell defined by the linker.
|
||||
/// Otherwise, it is the index into appropriate object file (indexing from 1).
|
||||
/// Prefer using `getFile()` helper to get the file index out rather than using
|
||||
/// the field directly.
|
||||
file: i32,
|
||||
file: u32,
|
||||
|
||||
/// If this Atom is not a synthetic Atom, i.e., references a subsection in an
|
||||
/// Object file, `inner_sym_index` and `inner_nsyms_trailing` tell where and if
|
||||
@ -51,13 +51,6 @@ size: u64,
|
||||
/// For instance, aligmment of 0 should be read as 2^0 = 1 byte aligned.
|
||||
alignment: u32,
|
||||
|
||||
/// Cached index and length into the relocations records array that correspond to
|
||||
/// this Atom and need to be resolved before the Atom can be committed into the
|
||||
/// final linked image.
|
||||
/// Do not use these fields directly. Instead, use `getAtomRelocs()` helper.
|
||||
cached_relocs_start: i32,
|
||||
cached_relocs_len: u32,
|
||||
|
||||
/// Points to the previous and next neighbours
|
||||
next_index: ?AtomIndex,
|
||||
prev_index: ?AtomIndex,
|
||||
@ -66,20 +59,18 @@ pub const empty = Atom{
|
||||
.sym_index = 0,
|
||||
.inner_sym_index = 0,
|
||||
.inner_nsyms_trailing = 0,
|
||||
.file = -1,
|
||||
.file = 0,
|
||||
.size = 0,
|
||||
.alignment = 0,
|
||||
.cached_relocs_start = -1,
|
||||
.cached_relocs_len = 0,
|
||||
.prev_index = null,
|
||||
.next_index = null,
|
||||
};
|
||||
|
||||
/// Returns `null` if the Atom is a synthetic Atom.
|
||||
/// Otherwise, returns an index into an array of Objects.
|
||||
pub inline fn getFile(self: Atom) ?u31 {
|
||||
if (self.file == -1) return null;
|
||||
return @intCast(u31, self.file);
|
||||
pub fn getFile(self: Atom) ?u32 {
|
||||
if (self.file == 0) return null;
|
||||
return self.file - 1;
|
||||
}
|
||||
|
||||
pub inline fn getSymbolWithLoc(self: Atom) SymbolWithLoc {
|
||||
@ -92,7 +83,7 @@ pub inline fn getSymbolWithLoc(self: Atom) SymbolWithLoc {
|
||||
const InnerSymIterator = struct {
|
||||
sym_index: u32,
|
||||
count: u32,
|
||||
file: i32,
|
||||
file: u32,
|
||||
|
||||
pub fn next(it: *@This()) ?SymbolWithLoc {
|
||||
if (it.count == 0) return null;
|
||||
@ -159,19 +150,14 @@ pub fn calcInnerSymbolOffset(zld: *Zld, atom_index: AtomIndex, sym_index: u32) u
|
||||
return source_sym.n_value - base_addr;
|
||||
}
|
||||
|
||||
pub fn scanAtomRelocs(
|
||||
zld: *Zld,
|
||||
atom_index: AtomIndex,
|
||||
relocs: []align(1) const macho.relocation_info,
|
||||
reverse_lookup: []u32,
|
||||
) !void {
|
||||
pub fn scanAtomRelocs(zld: *Zld, atom_index: AtomIndex, relocs: []align(1) const macho.relocation_info) !void {
|
||||
const arch = zld.options.target.cpu.arch;
|
||||
const atom = zld.getAtom(atom_index);
|
||||
assert(atom.getFile() != null); // synthetic atoms do not have relocs
|
||||
|
||||
return switch (arch) {
|
||||
.aarch64 => scanAtomRelocsArm64(zld, atom_index, relocs, reverse_lookup),
|
||||
.x86_64 => scanAtomRelocsX86(zld, atom_index, relocs, reverse_lookup),
|
||||
.aarch64 => scanAtomRelocsArm64(zld, atom_index, relocs),
|
||||
.x86_64 => scanAtomRelocsX86(zld, atom_index, relocs),
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
@ -202,16 +188,11 @@ pub fn getRelocContext(zld: *Zld, atom_index: AtomIndex) RelocContext {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn parseRelocTarget(
|
||||
zld: *Zld,
|
||||
atom_index: AtomIndex,
|
||||
rel: macho.relocation_info,
|
||||
reverse_lookup: []u32,
|
||||
) SymbolWithLoc {
|
||||
pub fn parseRelocTarget(zld: *Zld, atom_index: AtomIndex, rel: macho.relocation_info) SymbolWithLoc {
|
||||
const atom = zld.getAtom(atom_index);
|
||||
const object = &zld.objects.items[atom.getFile().?];
|
||||
|
||||
if (rel.r_extern == 0) {
|
||||
const sym_index = if (rel.r_extern == 0) sym_index: {
|
||||
const sect_id = @intCast(u8, rel.r_symbolnum - 1);
|
||||
const ctx = getRelocContext(zld, atom_index);
|
||||
const atom_code = getAtomCode(zld, atom_index);
|
||||
@ -219,9 +200,9 @@ pub fn parseRelocTarget(
|
||||
|
||||
const address_in_section = if (rel.r_pcrel == 0) blk: {
|
||||
break :blk if (rel.r_length == 3)
|
||||
mem.readIntLittle(i64, atom_code[rel_offset..][0..8])
|
||||
mem.readIntLittle(u64, atom_code[rel_offset..][0..8])
|
||||
else
|
||||
mem.readIntLittle(i32, atom_code[rel_offset..][0..4]);
|
||||
mem.readIntLittle(u32, atom_code[rel_offset..][0..4]);
|
||||
} else blk: {
|
||||
const correction: u3 = switch (@intToEnum(macho.reloc_type_x86_64, rel.r_type)) {
|
||||
.X86_64_RELOC_SIGNED => 0,
|
||||
@ -232,38 +213,14 @@ pub fn parseRelocTarget(
|
||||
};
|
||||
const addend = mem.readIntLittle(i32, atom_code[rel_offset..][0..4]);
|
||||
const target_address = @intCast(i64, ctx.base_addr) + rel.r_address + 4 + correction + addend;
|
||||
break :blk target_address;
|
||||
break :blk @intCast(u64, target_address);
|
||||
};
|
||||
|
||||
// Find containing atom
|
||||
const Predicate = struct {
|
||||
addr: i64,
|
||||
const sym_index = object.getSymbolByAddress(address_in_section, sect_id);
|
||||
break :sym_index sym_index;
|
||||
} else object.reverse_symtab_lookup[rel.r_symbolnum];
|
||||
|
||||
pub fn predicate(pred: @This(), other: i64) bool {
|
||||
return if (other == -1) true else other > pred.addr;
|
||||
}
|
||||
};
|
||||
|
||||
if (object.source_section_index_lookup[sect_id] > -1) {
|
||||
const first_sym_index = @intCast(usize, object.source_section_index_lookup[sect_id]);
|
||||
const target_sym_index = @import("zld.zig").lsearch(i64, object.source_address_lookup[first_sym_index..], Predicate{
|
||||
.addr = address_in_section,
|
||||
});
|
||||
|
||||
if (target_sym_index > 0) {
|
||||
return SymbolWithLoc{
|
||||
.sym_index = @intCast(u32, first_sym_index + target_sym_index - 1),
|
||||
.file = atom.file,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Start of section is not contained anywhere, return synthetic atom.
|
||||
const sym_index = object.getSectionAliasSymbolIndex(sect_id);
|
||||
return SymbolWithLoc{ .sym_index = sym_index, .file = atom.file };
|
||||
}
|
||||
|
||||
const sym_index = reverse_lookup[rel.r_symbolnum];
|
||||
const sym_loc = SymbolWithLoc{
|
||||
.sym_index = sym_index,
|
||||
.file = atom.file,
|
||||
@ -272,30 +229,12 @@ pub fn parseRelocTarget(
|
||||
|
||||
if (sym.sect() and !sym.ext()) {
|
||||
return sym_loc;
|
||||
} else if (object.globals_lookup[sym_index] > -1) {
|
||||
const global_index = @intCast(u32, object.globals_lookup[sym_index]);
|
||||
} else if (object.getGlobal(sym_index)) |global_index| {
|
||||
return zld.globals.items[global_index];
|
||||
} else return sym_loc;
|
||||
}
|
||||
|
||||
pub fn getRelocTargetAtomIndex(zld: *Zld, rel: macho.relocation_info, target: SymbolWithLoc) ?AtomIndex {
|
||||
const is_via_got = got: {
|
||||
switch (zld.options.target.cpu.arch) {
|
||||
.aarch64 => break :got switch (@intToEnum(macho.reloc_type_arm64, rel.r_type)) {
|
||||
.ARM64_RELOC_GOT_LOAD_PAGE21,
|
||||
.ARM64_RELOC_GOT_LOAD_PAGEOFF12,
|
||||
.ARM64_RELOC_POINTER_TO_GOT,
|
||||
=> true,
|
||||
else => false,
|
||||
},
|
||||
.x86_64 => break :got switch (@intToEnum(macho.reloc_type_x86_64, rel.r_type)) {
|
||||
.X86_64_RELOC_GOT, .X86_64_RELOC_GOT_LOAD => true,
|
||||
else => false,
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
};
|
||||
|
||||
pub fn getRelocTargetAtomIndex(zld: *Zld, target: SymbolWithLoc, is_via_got: bool) ?AtomIndex {
|
||||
if (is_via_got) {
|
||||
return zld.getGotAtomIndexForSymbol(target).?; // panic means fatal error
|
||||
}
|
||||
@ -314,12 +253,7 @@ pub fn getRelocTargetAtomIndex(zld: *Zld, rel: macho.relocation_info, target: Sy
|
||||
return object.getAtomIndexForSymbol(target.sym_index);
|
||||
}
|
||||
|
||||
fn scanAtomRelocsArm64(
|
||||
zld: *Zld,
|
||||
atom_index: AtomIndex,
|
||||
relocs: []align(1) const macho.relocation_info,
|
||||
reverse_lookup: []u32,
|
||||
) !void {
|
||||
fn scanAtomRelocsArm64(zld: *Zld, atom_index: AtomIndex, relocs: []align(1) const macho.relocation_info) !void {
|
||||
for (relocs) |rel| {
|
||||
const rel_type = @intToEnum(macho.reloc_type_arm64, rel.r_type);
|
||||
|
||||
@ -332,7 +266,7 @@ fn scanAtomRelocsArm64(
|
||||
|
||||
const atom = zld.getAtom(atom_index);
|
||||
const object = &zld.objects.items[atom.getFile().?];
|
||||
const sym_index = reverse_lookup[rel.r_symbolnum];
|
||||
const sym_index = object.reverse_symtab_lookup[rel.r_symbolnum];
|
||||
const sym_loc = SymbolWithLoc{
|
||||
.sym_index = sym_index,
|
||||
.file = atom.file,
|
||||
@ -341,10 +275,10 @@ fn scanAtomRelocsArm64(
|
||||
|
||||
if (sym.sect() and !sym.ext()) continue;
|
||||
|
||||
const target = if (object.globals_lookup[sym_index] > -1) blk: {
|
||||
const global_index = @intCast(u32, object.globals_lookup[sym_index]);
|
||||
break :blk zld.globals.items[global_index];
|
||||
} else sym_loc;
|
||||
const target = if (object.getGlobal(sym_index)) |global_index|
|
||||
zld.globals.items[global_index]
|
||||
else
|
||||
sym_loc;
|
||||
|
||||
switch (rel_type) {
|
||||
.ARM64_RELOC_BRANCH26 => {
|
||||
@ -368,12 +302,7 @@ fn scanAtomRelocsArm64(
|
||||
}
|
||||
}
|
||||
|
||||
fn scanAtomRelocsX86(
|
||||
zld: *Zld,
|
||||
atom_index: AtomIndex,
|
||||
relocs: []align(1) const macho.relocation_info,
|
||||
reverse_lookup: []u32,
|
||||
) !void {
|
||||
fn scanAtomRelocsX86(zld: *Zld, atom_index: AtomIndex, relocs: []align(1) const macho.relocation_info) !void {
|
||||
for (relocs) |rel| {
|
||||
const rel_type = @intToEnum(macho.reloc_type_x86_64, rel.r_type);
|
||||
|
||||
@ -386,7 +315,7 @@ fn scanAtomRelocsX86(
|
||||
|
||||
const atom = zld.getAtom(atom_index);
|
||||
const object = &zld.objects.items[atom.getFile().?];
|
||||
const sym_index = reverse_lookup[rel.r_symbolnum];
|
||||
const sym_index = object.reverse_symtab_lookup[rel.r_symbolnum];
|
||||
const sym_loc = SymbolWithLoc{
|
||||
.sym_index = sym_index,
|
||||
.file = atom.file,
|
||||
@ -395,10 +324,10 @@ fn scanAtomRelocsX86(
|
||||
|
||||
if (sym.sect() and !sym.ext()) continue;
|
||||
|
||||
const target = if (object.globals_lookup[sym_index] > -1) blk: {
|
||||
const global_index = @intCast(u32, object.globals_lookup[sym_index]);
|
||||
break :blk zld.globals.items[global_index];
|
||||
} else sym_loc;
|
||||
const target = if (object.getGlobal(sym_index)) |global_index|
|
||||
zld.globals.items[global_index]
|
||||
else
|
||||
sym_loc;
|
||||
|
||||
switch (rel_type) {
|
||||
.X86_64_RELOC_BRANCH => {
|
||||
@ -432,7 +361,7 @@ fn addTlvPtrEntry(zld: *Zld, target: SymbolWithLoc) !void {
|
||||
try zld.tlv_ptr_table.putNoClobber(gpa, target, tlv_ptr_index);
|
||||
}
|
||||
|
||||
fn addGotEntry(zld: *Zld, target: SymbolWithLoc) !void {
|
||||
pub fn addGotEntry(zld: *Zld, target: SymbolWithLoc) !void {
|
||||
if (zld.got_table.contains(target)) return;
|
||||
const gpa = zld.gpa;
|
||||
const atom_index = try zld.createGotAtom();
|
||||
@ -466,7 +395,6 @@ pub fn resolveRelocs(
|
||||
atom_index: AtomIndex,
|
||||
atom_code: []u8,
|
||||
atom_relocs: []align(1) const macho.relocation_info,
|
||||
reverse_lookup: []u32,
|
||||
) !void {
|
||||
const arch = zld.options.target.cpu.arch;
|
||||
const atom = zld.getAtom(atom_index);
|
||||
@ -480,14 +408,14 @@ pub fn resolveRelocs(
|
||||
const ctx = getRelocContext(zld, atom_index);
|
||||
|
||||
return switch (arch) {
|
||||
.aarch64 => resolveRelocsArm64(zld, atom_index, atom_code, atom_relocs, reverse_lookup, ctx),
|
||||
.x86_64 => resolveRelocsX86(zld, atom_index, atom_code, atom_relocs, reverse_lookup, ctx),
|
||||
.aarch64 => resolveRelocsArm64(zld, atom_index, atom_code, atom_relocs, ctx),
|
||||
.x86_64 => resolveRelocsX86(zld, atom_index, atom_code, atom_relocs, ctx),
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn getRelocTargetAddress(zld: *Zld, rel: macho.relocation_info, target: SymbolWithLoc, is_tlv: bool) !u64 {
|
||||
const target_atom_index = getRelocTargetAtomIndex(zld, rel, target) orelse {
|
||||
pub fn getRelocTargetAddress(zld: *Zld, target: SymbolWithLoc, is_via_got: bool, is_tlv: bool) !u64 {
|
||||
const target_atom_index = getRelocTargetAtomIndex(zld, target, is_via_got) orelse {
|
||||
// If there is no atom for target, we still need to check for special, atom-less
|
||||
// symbols such as `___dso_handle`.
|
||||
const target_name = zld.getSymbolName(target);
|
||||
@ -499,7 +427,7 @@ pub fn getRelocTargetAddress(zld: *Zld, rel: macho.relocation_info, target: Symb
|
||||
log.debug(" | target ATOM(%{d}, '{s}') in object({?})", .{
|
||||
target_atom.sym_index,
|
||||
zld.getSymbolName(target_atom.getSymbolWithLoc()),
|
||||
target_atom.file,
|
||||
target_atom.getFile(),
|
||||
});
|
||||
|
||||
const target_sym = zld.getSymbol(target_atom.getSymbolWithLoc());
|
||||
@ -541,7 +469,6 @@ fn resolveRelocsArm64(
|
||||
atom_index: AtomIndex,
|
||||
atom_code: []u8,
|
||||
atom_relocs: []align(1) const macho.relocation_info,
|
||||
reverse_lookup: []u32,
|
||||
context: RelocContext,
|
||||
) !void {
|
||||
const atom = zld.getAtom(atom_index);
|
||||
@ -565,20 +492,20 @@ fn resolveRelocsArm64(
|
||||
.ARM64_RELOC_SUBTRACTOR => {
|
||||
assert(subtractor == null);
|
||||
|
||||
log.debug(" RELA({s}) @ {x} => %{d} in object({d})", .{
|
||||
log.debug(" RELA({s}) @ {x} => %{d} in object({?d})", .{
|
||||
@tagName(rel_type),
|
||||
rel.r_address,
|
||||
rel.r_symbolnum,
|
||||
atom.file,
|
||||
atom.getFile(),
|
||||
});
|
||||
|
||||
subtractor = parseRelocTarget(zld, atom_index, rel, reverse_lookup);
|
||||
subtractor = parseRelocTarget(zld, atom_index, rel);
|
||||
continue;
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
|
||||
const target = parseRelocTarget(zld, atom_index, rel, reverse_lookup);
|
||||
const target = parseRelocTarget(zld, atom_index, rel);
|
||||
const rel_offset = @intCast(u32, rel.r_address - context.base_offset);
|
||||
|
||||
log.debug(" RELA({s}) @ {x} => %{d} ('{s}') in object({?})", .{
|
||||
@ -586,19 +513,20 @@ fn resolveRelocsArm64(
|
||||
rel.r_address,
|
||||
target.sym_index,
|
||||
zld.getSymbolName(target),
|
||||
target.file,
|
||||
target.getFile(),
|
||||
});
|
||||
|
||||
const source_addr = blk: {
|
||||
const source_sym = zld.getSymbol(atom.getSymbolWithLoc());
|
||||
break :blk source_sym.n_value + rel_offset;
|
||||
};
|
||||
const is_via_got = relocRequiresGot(zld, rel);
|
||||
const is_tlv = is_tlv: {
|
||||
const source_sym = zld.getSymbol(atom.getSymbolWithLoc());
|
||||
const header = zld.sections.items(.header)[source_sym.n_sect - 1];
|
||||
break :is_tlv header.type() == macho.S_THREAD_LOCAL_VARIABLES;
|
||||
};
|
||||
const target_addr = try getRelocTargetAddress(zld, rel, target, is_tlv);
|
||||
const target_addr = try getRelocTargetAddress(zld, target, is_via_got, is_tlv);
|
||||
|
||||
log.debug(" | source_addr = 0x{x}", .{source_addr});
|
||||
|
||||
@ -610,9 +538,9 @@ fn resolveRelocsArm64(
|
||||
} else target;
|
||||
log.debug(" source {s} (object({?})), target {s} (object({?}))", .{
|
||||
zld.getSymbolName(atom.getSymbolWithLoc()),
|
||||
atom.file,
|
||||
atom.getFile(),
|
||||
zld.getSymbolName(target),
|
||||
zld.getAtom(getRelocTargetAtomIndex(zld, rel, target).?).file,
|
||||
zld.getAtom(getRelocTargetAtomIndex(zld, target, is_via_got).?).getFile(),
|
||||
});
|
||||
|
||||
const displacement = if (calcPcRelativeDisplacementArm64(
|
||||
@ -628,7 +556,7 @@ fn resolveRelocsArm64(
|
||||
zld,
|
||||
actual_target,
|
||||
).?);
|
||||
log.debug(" | target_addr = 0x{x}", .{thunk_sym.n_value});
|
||||
log.debug(" | target_addr = 0x{x} (thunk)", .{thunk_sym.n_value});
|
||||
break :blk try calcPcRelativeDisplacementArm64(source_addr, thunk_sym.n_value);
|
||||
};
|
||||
|
||||
@ -832,7 +760,6 @@ fn resolveRelocsX86(
|
||||
atom_index: AtomIndex,
|
||||
atom_code: []u8,
|
||||
atom_relocs: []align(1) const macho.relocation_info,
|
||||
reverse_lookup: []u32,
|
||||
context: RelocContext,
|
||||
) !void {
|
||||
const atom = zld.getAtom(atom_index);
|
||||
@ -847,33 +774,34 @@ fn resolveRelocsX86(
|
||||
.X86_64_RELOC_SUBTRACTOR => {
|
||||
assert(subtractor == null);
|
||||
|
||||
log.debug(" RELA({s}) @ {x} => %{d} in object({d})", .{
|
||||
log.debug(" RELA({s}) @ {x} => %{d} in object({?d})", .{
|
||||
@tagName(rel_type),
|
||||
rel.r_address,
|
||||
rel.r_symbolnum,
|
||||
atom.file,
|
||||
atom.getFile(),
|
||||
});
|
||||
|
||||
subtractor = parseRelocTarget(zld, atom_index, rel, reverse_lookup);
|
||||
subtractor = parseRelocTarget(zld, atom_index, rel);
|
||||
continue;
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
|
||||
const target = parseRelocTarget(zld, atom_index, rel, reverse_lookup);
|
||||
const target = parseRelocTarget(zld, atom_index, rel);
|
||||
const rel_offset = @intCast(u32, rel.r_address - context.base_offset);
|
||||
|
||||
log.debug(" RELA({s}) @ {x} => %{d} in object({?})", .{
|
||||
@tagName(rel_type),
|
||||
rel.r_address,
|
||||
target.sym_index,
|
||||
target.file,
|
||||
target.getFile(),
|
||||
});
|
||||
|
||||
const source_addr = blk: {
|
||||
const source_sym = zld.getSymbol(atom.getSymbolWithLoc());
|
||||
break :blk source_sym.n_value + rel_offset;
|
||||
};
|
||||
const is_via_got = relocRequiresGot(zld, rel);
|
||||
const is_tlv = is_tlv: {
|
||||
const source_sym = zld.getSymbol(atom.getSymbolWithLoc());
|
||||
const header = zld.sections.items(.header)[source_sym.n_sect - 1];
|
||||
@ -882,7 +810,7 @@ fn resolveRelocsX86(
|
||||
|
||||
log.debug(" | source_addr = 0x{x}", .{source_addr});
|
||||
|
||||
const target_addr = try getRelocTargetAddress(zld, rel, target, is_tlv);
|
||||
const target_addr = try getRelocTargetAddress(zld, target, is_via_got, is_tlv);
|
||||
|
||||
switch (rel_type) {
|
||||
.X86_64_RELOC_BRANCH => {
|
||||
@ -1016,9 +944,10 @@ pub fn getAtomCode(zld: *Zld, atom_index: AtomIndex) []const u8 {
|
||||
}
|
||||
|
||||
pub fn getAtomRelocs(zld: *Zld, atom_index: AtomIndex) []align(1) const macho.relocation_info {
|
||||
const atom = zld.getAtomPtr(atom_index);
|
||||
const atom = zld.getAtom(atom_index);
|
||||
assert(atom.getFile() != null); // Synthetic atom shouldn't need to unique for relocs.
|
||||
const object = zld.objects.items[atom.getFile().?];
|
||||
const cache = object.relocs_lookup[atom.sym_index];
|
||||
|
||||
const source_sect = if (object.getSourceSymbol(atom.sym_index)) |source_sym| blk: {
|
||||
const source_sect = object.getSourceSection(source_sym.n_sect - 1);
|
||||
@ -1036,43 +965,7 @@ pub fn getAtomRelocs(zld: *Zld, atom_index: AtomIndex) []align(1) const macho.re
|
||||
};
|
||||
|
||||
const relocs = object.getRelocs(source_sect);
|
||||
|
||||
if (atom.cached_relocs_start == -1) {
|
||||
const indexes = if (object.getSourceSymbol(atom.sym_index)) |source_sym| blk: {
|
||||
const offset = source_sym.n_value - source_sect.addr;
|
||||
break :blk filterRelocs(relocs, offset, offset + atom.size);
|
||||
} else filterRelocs(relocs, 0, atom.size);
|
||||
atom.cached_relocs_start = indexes.start;
|
||||
atom.cached_relocs_len = indexes.len;
|
||||
}
|
||||
|
||||
return relocs[@intCast(u32, atom.cached_relocs_start)..][0..atom.cached_relocs_len];
|
||||
}
|
||||
|
||||
fn filterRelocs(
|
||||
relocs: []align(1) const macho.relocation_info,
|
||||
start_addr: u64,
|
||||
end_addr: u64,
|
||||
) struct { start: i32, len: u32 } {
|
||||
const Predicate = struct {
|
||||
addr: u64,
|
||||
|
||||
pub fn predicate(self: @This(), rel: macho.relocation_info) bool {
|
||||
return rel.r_address >= self.addr;
|
||||
}
|
||||
};
|
||||
const LPredicate = struct {
|
||||
addr: u64,
|
||||
|
||||
pub fn predicate(self: @This(), rel: macho.relocation_info) bool {
|
||||
return rel.r_address < self.addr;
|
||||
}
|
||||
};
|
||||
|
||||
const start = @import("zld.zig").bsearch(macho.relocation_info, relocs, Predicate{ .addr = end_addr });
|
||||
const len = @import("zld.zig").lsearch(macho.relocation_info, relocs[start..], LPredicate{ .addr = start_addr });
|
||||
|
||||
return .{ .start = @intCast(i32, start), .len = @intCast(u32, len) };
|
||||
return relocs[cache.start..][0..cache.len];
|
||||
}
|
||||
|
||||
pub fn calcPcRelativeDisplacementX86(source_addr: u64, target_addr: u64, correction: u3) error{Overflow}!i32 {
|
||||
@ -1111,3 +1004,22 @@ pub fn calcPageOffset(target_addr: u64, kind: PageOffsetInstKind) !u12 {
|
||||
.load_store_128 => try math.divExact(u12, narrowed, 16),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn relocRequiresGot(zld: *Zld, rel: macho.relocation_info) bool {
|
||||
switch (zld.options.target.cpu.arch) {
|
||||
.aarch64 => switch (@intToEnum(macho.reloc_type_arm64, rel.r_type)) {
|
||||
.ARM64_RELOC_GOT_LOAD_PAGE21,
|
||||
.ARM64_RELOC_GOT_LOAD_PAGEOFF12,
|
||||
.ARM64_RELOC_POINTER_TO_GOT,
|
||||
=> return true,
|
||||
else => return false,
|
||||
},
|
||||
.x86_64 => switch (@intToEnum(macho.reloc_type_x86_64, rel.r_type)) {
|
||||
.X86_64_RELOC_GOT,
|
||||
.X86_64_RELOC_GOT_LOAD,
|
||||
=> return true,
|
||||
else => return false,
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
@ -2,6 +2,7 @@
|
||||
|
||||
const std = @import("std");
|
||||
const assert = std.debug.assert;
|
||||
const eh_frame = @import("eh_frame.zig");
|
||||
const log = std.log.scoped(.dead_strip);
|
||||
const macho = std.macho;
|
||||
const math = std.math;
|
||||
@ -11,13 +12,14 @@ const Allocator = mem.Allocator;
|
||||
const AtomIndex = @import("zld.zig").AtomIndex;
|
||||
const Atom = @import("ZldAtom.zig");
|
||||
const SymbolWithLoc = @import("zld.zig").SymbolWithLoc;
|
||||
const UnwindInfo = @import("UnwindInfo.zig");
|
||||
const Zld = @import("zld.zig").Zld;
|
||||
|
||||
const N_DEAD = @import("zld.zig").N_DEAD;
|
||||
|
||||
const AtomTable = std.AutoHashMap(AtomIndex, void);
|
||||
|
||||
pub fn gcAtoms(zld: *Zld, reverse_lookups: [][]u32) Allocator.Error!void {
|
||||
pub fn gcAtoms(zld: *Zld) !void {
|
||||
const gpa = zld.gpa;
|
||||
|
||||
var arena = std.heap.ArenaAllocator.init(gpa);
|
||||
@ -30,7 +32,7 @@ pub fn gcAtoms(zld: *Zld, reverse_lookups: [][]u32) Allocator.Error!void {
|
||||
try alive.ensureTotalCapacity(@intCast(u32, zld.atoms.items.len));
|
||||
|
||||
try collectRoots(zld, &roots);
|
||||
mark(zld, roots, &alive, reverse_lookups);
|
||||
try mark(zld, roots, &alive);
|
||||
prune(zld, alive);
|
||||
}
|
||||
|
||||
@ -45,10 +47,10 @@ fn collectRoots(zld: *Zld, roots: *AtomTable) !void {
|
||||
const atom_index = object.getAtomIndexForSymbol(global.sym_index).?; // panic here means fatal error
|
||||
_ = try roots.getOrPut(atom_index);
|
||||
|
||||
log.debug("root(ATOM({d}, %{d}, {d}))", .{
|
||||
log.debug("root(ATOM({d}, %{d}, {?d}))", .{
|
||||
atom_index,
|
||||
zld.getAtom(atom_index).sym_index,
|
||||
zld.getAtom(atom_index).file,
|
||||
zld.getAtom(atom_index).getFile(),
|
||||
});
|
||||
},
|
||||
else => |other| {
|
||||
@ -63,32 +65,15 @@ fn collectRoots(zld: *Zld, roots: *AtomTable) !void {
|
||||
const atom_index = object.getAtomIndexForSymbol(global.sym_index).?; // panic here means fatal error
|
||||
_ = try roots.getOrPut(atom_index);
|
||||
|
||||
log.debug("root(ATOM({d}, %{d}, {d}))", .{
|
||||
log.debug("root(ATOM({d}, %{d}, {?d}))", .{
|
||||
atom_index,
|
||||
zld.getAtom(atom_index).sym_index,
|
||||
zld.getAtom(atom_index).file,
|
||||
zld.getAtom(atom_index).getFile(),
|
||||
});
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// TODO just a temp until we learn how to parse unwind records
|
||||
for (zld.globals.items) |global| {
|
||||
if (mem.eql(u8, "___gxx_personality_v0", zld.getSymbolName(global))) {
|
||||
const object = zld.objects.items[global.getFile().?];
|
||||
if (object.getAtomIndexForSymbol(global.sym_index)) |atom_index| {
|
||||
_ = try roots.getOrPut(atom_index);
|
||||
|
||||
log.debug("root(ATOM({d}, %{d}, {d}))", .{
|
||||
atom_index,
|
||||
zld.getAtom(atom_index).sym_index,
|
||||
zld.getAtom(atom_index).file,
|
||||
});
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
for (zld.objects.items) |object| {
|
||||
const has_subsections = object.header.flags & macho.MH_SUBSECTIONS_VIA_SYMBOLS != 0;
|
||||
|
||||
@ -119,28 +104,23 @@ fn collectRoots(zld: *Zld, roots: *AtomTable) !void {
|
||||
if (is_gc_root) {
|
||||
try roots.putNoClobber(atom_index, {});
|
||||
|
||||
log.debug("root(ATOM({d}, %{d}, {d}))", .{
|
||||
log.debug("root(ATOM({d}, %{d}, {?d}))", .{
|
||||
atom_index,
|
||||
zld.getAtom(atom_index).sym_index,
|
||||
zld.getAtom(atom_index).file,
|
||||
zld.getAtom(atom_index).getFile(),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn markLive(
|
||||
zld: *Zld,
|
||||
atom_index: AtomIndex,
|
||||
alive: *AtomTable,
|
||||
reverse_lookups: [][]u32,
|
||||
) void {
|
||||
fn markLive(zld: *Zld, atom_index: AtomIndex, alive: *AtomTable) void {
|
||||
if (alive.contains(atom_index)) return;
|
||||
|
||||
const atom = zld.getAtom(atom_index);
|
||||
const sym_loc = atom.getSymbolWithLoc();
|
||||
|
||||
log.debug("mark(ATOM({d}, %{d}, {d}))", .{ atom_index, sym_loc.sym_index, sym_loc.file });
|
||||
log.debug("mark(ATOM({d}, %{d}, {?d}))", .{ atom_index, sym_loc.sym_index, sym_loc.getFile() });
|
||||
|
||||
alive.putAssumeCapacityNoClobber(atom_index, {});
|
||||
|
||||
@ -151,14 +131,13 @@ fn markLive(
|
||||
if (header.isZerofill()) return;
|
||||
|
||||
const relocs = Atom.getAtomRelocs(zld, atom_index);
|
||||
const reverse_lookup = reverse_lookups[atom.getFile().?];
|
||||
for (relocs) |rel| {
|
||||
const target = switch (cpu_arch) {
|
||||
.aarch64 => switch (@intToEnum(macho.reloc_type_arm64, rel.r_type)) {
|
||||
.ARM64_RELOC_ADDEND => continue,
|
||||
else => Atom.parseRelocTarget(zld, atom_index, rel, reverse_lookup),
|
||||
else => Atom.parseRelocTarget(zld, atom_index, rel),
|
||||
},
|
||||
.x86_64 => Atom.parseRelocTarget(zld, atom_index, rel, reverse_lookup),
|
||||
.x86_64 => Atom.parseRelocTarget(zld, atom_index, rel),
|
||||
else => unreachable,
|
||||
};
|
||||
const target_sym = zld.getSymbol(target);
|
||||
@ -174,21 +153,21 @@ fn markLive(
|
||||
|
||||
const object = zld.objects.items[target.getFile().?];
|
||||
const target_atom_index = object.getAtomIndexForSymbol(target.sym_index).?;
|
||||
log.debug(" following ATOM({d}, %{d}, {d})", .{
|
||||
log.debug(" following ATOM({d}, %{d}, {?d})", .{
|
||||
target_atom_index,
|
||||
zld.getAtom(target_atom_index).sym_index,
|
||||
zld.getAtom(target_atom_index).file,
|
||||
zld.getAtom(target_atom_index).getFile(),
|
||||
});
|
||||
|
||||
markLive(zld, target_atom_index, alive, reverse_lookups);
|
||||
markLive(zld, target_atom_index, alive);
|
||||
}
|
||||
}
|
||||
|
||||
fn refersLive(zld: *Zld, atom_index: AtomIndex, alive: AtomTable, reverse_lookups: [][]u32) bool {
|
||||
fn refersLive(zld: *Zld, atom_index: AtomIndex, alive: AtomTable) bool {
|
||||
const atom = zld.getAtom(atom_index);
|
||||
const sym_loc = atom.getSymbolWithLoc();
|
||||
|
||||
log.debug("refersLive(ATOM({d}, %{d}, {d}))", .{ atom_index, sym_loc.sym_index, sym_loc.file });
|
||||
log.debug("refersLive(ATOM({d}, %{d}, {?d}))", .{ atom_index, sym_loc.sym_index, sym_loc.getFile() });
|
||||
|
||||
const cpu_arch = zld.options.target.cpu.arch;
|
||||
|
||||
@ -197,14 +176,13 @@ fn refersLive(zld: *Zld, atom_index: AtomIndex, alive: AtomTable, reverse_lookup
|
||||
assert(!header.isZerofill());
|
||||
|
||||
const relocs = Atom.getAtomRelocs(zld, atom_index);
|
||||
const reverse_lookup = reverse_lookups[atom.getFile().?];
|
||||
for (relocs) |rel| {
|
||||
const target = switch (cpu_arch) {
|
||||
.aarch64 => switch (@intToEnum(macho.reloc_type_arm64, rel.r_type)) {
|
||||
.ARM64_RELOC_ADDEND => continue,
|
||||
else => Atom.parseRelocTarget(zld, atom_index, rel, reverse_lookup),
|
||||
else => Atom.parseRelocTarget(zld, atom_index, rel),
|
||||
},
|
||||
.x86_64 => Atom.parseRelocTarget(zld, atom_index, rel, reverse_lookup),
|
||||
.x86_64 => Atom.parseRelocTarget(zld, atom_index, rel),
|
||||
else => unreachable,
|
||||
};
|
||||
|
||||
@ -214,10 +192,10 @@ fn refersLive(zld: *Zld, atom_index: AtomIndex, alive: AtomTable, reverse_lookup
|
||||
continue;
|
||||
};
|
||||
if (alive.contains(target_atom_index)) {
|
||||
log.debug(" refers live ATOM({d}, %{d}, {d})", .{
|
||||
log.debug(" refers live ATOM({d}, %{d}, {?d})", .{
|
||||
target_atom_index,
|
||||
zld.getAtom(target_atom_index).sym_index,
|
||||
zld.getAtom(target_atom_index).file,
|
||||
zld.getAtom(target_atom_index).getFile(),
|
||||
});
|
||||
return true;
|
||||
}
|
||||
@ -226,10 +204,10 @@ fn refersLive(zld: *Zld, atom_index: AtomIndex, alive: AtomTable, reverse_lookup
|
||||
return false;
|
||||
}
|
||||
|
||||
fn mark(zld: *Zld, roots: AtomTable, alive: *AtomTable, reverse_lookups: [][]u32) void {
|
||||
fn mark(zld: *Zld, roots: AtomTable, alive: *AtomTable) !void {
|
||||
var it = roots.keyIterator();
|
||||
while (it.next()) |root| {
|
||||
markLive(zld, root.*, alive, reverse_lookups);
|
||||
markLive(zld, root.*, alive);
|
||||
}
|
||||
|
||||
var loop: bool = true;
|
||||
@ -251,14 +229,151 @@ fn mark(zld: *Zld, roots: AtomTable, alive: *AtomTable, reverse_lookups: [][]u32
|
||||
const source_sect = object.getSourceSection(sect_id);
|
||||
|
||||
if (source_sect.isDontDeadStripIfReferencesLive()) {
|
||||
if (refersLive(zld, atom_index, alive.*, reverse_lookups)) {
|
||||
markLive(zld, atom_index, alive, reverse_lookups);
|
||||
if (refersLive(zld, atom_index, alive.*)) {
|
||||
markLive(zld, atom_index, alive);
|
||||
loop = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (zld.objects.items) |_, object_id| {
|
||||
// Traverse unwind and eh_frame records noting if the source symbol has been marked, and if so,
|
||||
// marking all references as live.
|
||||
try markUnwindRecords(zld, @intCast(u32, object_id), alive);
|
||||
}
|
||||
}
|
||||
|
||||
fn markUnwindRecords(zld: *Zld, object_id: u32, alive: *AtomTable) !void {
|
||||
const object = &zld.objects.items[object_id];
|
||||
const cpu_arch = zld.options.target.cpu.arch;
|
||||
|
||||
const unwind_records = object.getUnwindRecords();
|
||||
|
||||
for (object.exec_atoms.items) |atom_index| {
|
||||
if (!object.hasUnwindRecords()) {
|
||||
if (object.eh_frame_records_lookup.get(atom_index)) |fde_offset| {
|
||||
const ptr = object.eh_frame_relocs_lookup.getPtr(fde_offset).?;
|
||||
if (ptr.dead) continue; // already marked
|
||||
if (!alive.contains(atom_index)) {
|
||||
// Mark dead and continue.
|
||||
ptr.dead = true;
|
||||
} else {
|
||||
// Mark references live and continue.
|
||||
try markEhFrameRecord(zld, object_id, atom_index, alive);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
const record_id = object.unwind_records_lookup.get(atom_index) orelse continue;
|
||||
if (object.unwind_relocs_lookup[record_id].dead) continue; // already marked, nothing to do
|
||||
if (!alive.contains(atom_index)) {
|
||||
// Mark the record dead and continue.
|
||||
object.unwind_relocs_lookup[record_id].dead = true;
|
||||
if (object.eh_frame_records_lookup.get(atom_index)) |fde_offset| {
|
||||
object.eh_frame_relocs_lookup.getPtr(fde_offset).?.dead = true;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
const record = unwind_records[record_id];
|
||||
if (UnwindInfo.UnwindEncoding.isDwarf(record.compactUnwindEncoding, cpu_arch)) {
|
||||
try markEhFrameRecord(zld, object_id, atom_index, alive);
|
||||
} else {
|
||||
if (UnwindInfo.getPersonalityFunctionReloc(zld, object_id, record_id)) |rel| {
|
||||
const target = UnwindInfo.parseRelocTarget(
|
||||
zld,
|
||||
object_id,
|
||||
rel,
|
||||
mem.asBytes(&record),
|
||||
@intCast(i32, record_id * @sizeOf(macho.compact_unwind_entry)),
|
||||
);
|
||||
const target_sym = zld.getSymbol(target);
|
||||
if (!target_sym.undf()) {
|
||||
const target_object = zld.objects.items[target.getFile().?];
|
||||
const target_atom_index = target_object.getAtomIndexForSymbol(target.sym_index).?;
|
||||
markLive(zld, target_atom_index, alive);
|
||||
}
|
||||
}
|
||||
|
||||
if (UnwindInfo.getLsdaReloc(zld, object_id, record_id)) |rel| {
|
||||
const target = UnwindInfo.parseRelocTarget(
|
||||
zld,
|
||||
object_id,
|
||||
rel,
|
||||
mem.asBytes(&record),
|
||||
@intCast(i32, record_id * @sizeOf(macho.compact_unwind_entry)),
|
||||
);
|
||||
const target_object = zld.objects.items[target.getFile().?];
|
||||
const target_atom_index = target_object.getAtomIndexForSymbol(target.sym_index).?;
|
||||
markLive(zld, target_atom_index, alive);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn markEhFrameRecord(zld: *Zld, object_id: u32, atom_index: AtomIndex, alive: *AtomTable) !void {
|
||||
const cpu_arch = zld.options.target.cpu.arch;
|
||||
const object = &zld.objects.items[object_id];
|
||||
var it = object.getEhFrameRecordsIterator();
|
||||
|
||||
const fde_offset = object.eh_frame_records_lookup.get(atom_index).?;
|
||||
it.seekTo(fde_offset);
|
||||
const fde = (try it.next()).?;
|
||||
|
||||
const cie_ptr = fde.getCiePointer();
|
||||
const cie_offset = fde_offset + 4 - cie_ptr;
|
||||
it.seekTo(cie_offset);
|
||||
const cie = (try it.next()).?;
|
||||
|
||||
switch (cpu_arch) {
|
||||
.aarch64 => {
|
||||
// Mark FDE references which should include any referenced LSDA record
|
||||
const relocs = eh_frame.getRelocs(zld, object_id, fde_offset);
|
||||
for (relocs) |rel| {
|
||||
const target = UnwindInfo.parseRelocTarget(
|
||||
zld,
|
||||
object_id,
|
||||
rel,
|
||||
fde.data,
|
||||
@intCast(i32, fde_offset) + 4,
|
||||
);
|
||||
const target_sym = zld.getSymbol(target);
|
||||
if (!target_sym.undf()) blk: {
|
||||
const target_object = zld.objects.items[target.getFile().?];
|
||||
const target_atom_index = target_object.getAtomIndexForSymbol(target.sym_index) orelse
|
||||
break :blk;
|
||||
markLive(zld, target_atom_index, alive);
|
||||
}
|
||||
}
|
||||
},
|
||||
.x86_64 => {
|
||||
const lsda_ptr = try fde.getLsdaPointer(cie, .{
|
||||
.base_addr = object.eh_frame_sect.?.addr,
|
||||
.base_offset = fde_offset,
|
||||
});
|
||||
if (lsda_ptr) |lsda_address| {
|
||||
// Mark LSDA record as live
|
||||
const sym_index = object.getSymbolByAddress(lsda_address, null);
|
||||
const target_atom_index = object.getAtomIndexForSymbol(sym_index).?;
|
||||
markLive(zld, target_atom_index, alive);
|
||||
}
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
|
||||
// Mark CIE references which should include any referenced personalities
|
||||
// that are defined locally.
|
||||
if (cie.getPersonalityPointerReloc(zld, object_id, cie_offset)) |target| {
|
||||
const target_sym = zld.getSymbol(target);
|
||||
if (!target_sym.undf()) {
|
||||
const target_object = zld.objects.items[target.getFile().?];
|
||||
const target_atom_index = target_object.getAtomIndexForSymbol(target.sym_index).?;
|
||||
markLive(zld, target_atom_index, alive);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn prune(zld: *Zld, alive: AtomTable) void {
|
||||
@ -275,10 +390,10 @@ fn prune(zld: *Zld, alive: AtomTable) void {
|
||||
const atom = zld.getAtom(atom_index);
|
||||
const sym_loc = atom.getSymbolWithLoc();
|
||||
|
||||
log.debug("prune(ATOM({d}, %{d}, {d}))", .{
|
||||
log.debug("prune(ATOM({d}, %{d}, {?d}))", .{
|
||||
atom_index,
|
||||
sym_loc.sym_index,
|
||||
sym_loc.file,
|
||||
sym_loc.getFile(),
|
||||
});
|
||||
log.debug(" {s} in {s}", .{ zld.getSymbolName(sym_loc), object.name });
|
||||
|
||||
|
||||
625
src/link/MachO/eh_frame.zig
Normal file
625
src/link/MachO/eh_frame.zig
Normal file
@ -0,0 +1,625 @@
|
||||
const std = @import("std");
|
||||
const assert = std.debug.assert;
|
||||
const macho = std.macho;
|
||||
const math = std.math;
|
||||
const mem = std.mem;
|
||||
const leb = std.leb;
|
||||
const log = std.log.scoped(.eh_frame);
|
||||
|
||||
const Allocator = mem.Allocator;
|
||||
const AtomIndex = @import("zld.zig").AtomIndex;
|
||||
const Atom = @import("ZldAtom.zig");
|
||||
const SymbolWithLoc = @import("zld.zig").SymbolWithLoc;
|
||||
const UnwindInfo = @import("UnwindInfo.zig");
|
||||
const Zld = @import("zld.zig").Zld;
|
||||
|
||||
pub fn scanRelocs(zld: *Zld) !void {
|
||||
const gpa = zld.gpa;
|
||||
|
||||
for (zld.objects.items) |*object, object_id| {
|
||||
var cies = std.AutoHashMap(u32, void).init(gpa);
|
||||
defer cies.deinit();
|
||||
|
||||
var it = object.getEhFrameRecordsIterator();
|
||||
|
||||
for (object.exec_atoms.items) |atom_index| {
|
||||
const fde_offset = object.eh_frame_records_lookup.get(atom_index) orelse continue;
|
||||
if (object.eh_frame_relocs_lookup.get(fde_offset).?.dead) continue;
|
||||
it.seekTo(fde_offset);
|
||||
const fde = (try it.next()).?;
|
||||
|
||||
const cie_ptr = fde.getCiePointer();
|
||||
const cie_offset = fde_offset + 4 - cie_ptr;
|
||||
|
||||
if (!cies.contains(cie_offset)) {
|
||||
try cies.putNoClobber(cie_offset, {});
|
||||
it.seekTo(cie_offset);
|
||||
const cie = (try it.next()).?;
|
||||
try cie.scanRelocs(zld, @intCast(u32, object_id), cie_offset);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn calcSectionSize(zld: *Zld, unwind_info: *const UnwindInfo) !void {
|
||||
const sect_id = zld.getSectionByName("__TEXT", "__eh_frame") orelse return;
|
||||
const sect = &zld.sections.items(.header)[sect_id];
|
||||
sect.@"align" = 3;
|
||||
sect.size = 0;
|
||||
|
||||
const cpu_arch = zld.options.target.cpu.arch;
|
||||
const gpa = zld.gpa;
|
||||
var size: u32 = 0;
|
||||
|
||||
for (zld.objects.items) |*object| {
|
||||
var cies = std.AutoHashMap(u32, u32).init(gpa);
|
||||
defer cies.deinit();
|
||||
|
||||
var eh_it = object.getEhFrameRecordsIterator();
|
||||
|
||||
for (object.exec_atoms.items) |atom_index| {
|
||||
const fde_record_offset = object.eh_frame_records_lookup.get(atom_index) orelse continue;
|
||||
if (object.eh_frame_relocs_lookup.get(fde_record_offset).?.dead) continue;
|
||||
|
||||
const record_id = unwind_info.records_lookup.get(atom_index) orelse continue;
|
||||
const record = unwind_info.records.items[record_id];
|
||||
|
||||
// TODO skip this check if no __compact_unwind is present
|
||||
const is_dwarf = UnwindInfo.UnwindEncoding.isDwarf(record.compactUnwindEncoding, cpu_arch);
|
||||
if (!is_dwarf) continue;
|
||||
|
||||
eh_it.seekTo(fde_record_offset);
|
||||
const source_fde_record = (try eh_it.next()).?;
|
||||
|
||||
const cie_ptr = source_fde_record.getCiePointer();
|
||||
const cie_offset = fde_record_offset + 4 - cie_ptr;
|
||||
|
||||
const gop = try cies.getOrPut(cie_offset);
|
||||
if (!gop.found_existing) {
|
||||
eh_it.seekTo(cie_offset);
|
||||
const source_cie_record = (try eh_it.next()).?;
|
||||
gop.value_ptr.* = size;
|
||||
size += source_cie_record.getSize();
|
||||
}
|
||||
|
||||
size += source_fde_record.getSize();
|
||||
}
|
||||
}
|
||||
|
||||
sect.size = size;
|
||||
}
|
||||
|
||||
pub fn write(zld: *Zld, unwind_info: *UnwindInfo) !void {
|
||||
const sect_id = zld.getSectionByName("__TEXT", "__eh_frame") orelse return;
|
||||
const sect = zld.sections.items(.header)[sect_id];
|
||||
const seg_id = zld.sections.items(.segment_index)[sect_id];
|
||||
const seg = zld.segments.items[seg_id];
|
||||
|
||||
const cpu_arch = zld.options.target.cpu.arch;
|
||||
const gpa = zld.gpa;
|
||||
|
||||
var eh_records = std.AutoArrayHashMap(u32, EhFrameRecord(true)).init(gpa);
|
||||
defer {
|
||||
for (eh_records.values()) |*rec| {
|
||||
rec.deinit(gpa);
|
||||
}
|
||||
eh_records.deinit();
|
||||
}
|
||||
|
||||
var eh_frame_offset: u32 = 0;
|
||||
|
||||
for (zld.objects.items) |*object, object_id| {
|
||||
try eh_records.ensureUnusedCapacity(2 * @intCast(u32, object.exec_atoms.items.len));
|
||||
|
||||
var cies = std.AutoHashMap(u32, u32).init(gpa);
|
||||
defer cies.deinit();
|
||||
|
||||
var eh_it = object.getEhFrameRecordsIterator();
|
||||
|
||||
for (object.exec_atoms.items) |atom_index| {
|
||||
const fde_record_offset = object.eh_frame_records_lookup.get(atom_index) orelse continue;
|
||||
if (object.eh_frame_relocs_lookup.get(fde_record_offset).?.dead) continue;
|
||||
|
||||
const record_id = unwind_info.records_lookup.get(atom_index) orelse continue;
|
||||
const record = &unwind_info.records.items[record_id];
|
||||
|
||||
// TODO skip this check if no __compact_unwind is present
|
||||
const is_dwarf = UnwindInfo.UnwindEncoding.isDwarf(record.compactUnwindEncoding, cpu_arch);
|
||||
if (!is_dwarf) continue;
|
||||
|
||||
eh_it.seekTo(fde_record_offset);
|
||||
const source_fde_record = (try eh_it.next()).?;
|
||||
|
||||
const cie_ptr = source_fde_record.getCiePointer();
|
||||
const cie_offset = fde_record_offset + 4 - cie_ptr;
|
||||
|
||||
const gop = try cies.getOrPut(cie_offset);
|
||||
if (!gop.found_existing) {
|
||||
eh_it.seekTo(cie_offset);
|
||||
const source_cie_record = (try eh_it.next()).?;
|
||||
var cie_record = try source_cie_record.toOwned(gpa);
|
||||
try cie_record.relocate(zld, @intCast(u32, object_id), .{
|
||||
.source_offset = cie_offset,
|
||||
.out_offset = eh_frame_offset,
|
||||
.sect_addr = sect.addr,
|
||||
});
|
||||
eh_records.putAssumeCapacityNoClobber(eh_frame_offset, cie_record);
|
||||
gop.value_ptr.* = eh_frame_offset;
|
||||
eh_frame_offset += cie_record.getSize();
|
||||
}
|
||||
|
||||
var fde_record = try source_fde_record.toOwned(gpa);
|
||||
fde_record.setCiePointer(eh_frame_offset + 4 - gop.value_ptr.*);
|
||||
try fde_record.relocate(zld, @intCast(u32, object_id), .{
|
||||
.source_offset = fde_record_offset,
|
||||
.out_offset = eh_frame_offset,
|
||||
.sect_addr = sect.addr,
|
||||
});
|
||||
|
||||
switch (cpu_arch) {
|
||||
.aarch64 => {}, // relocs take care of LSDA pointers
|
||||
.x86_64 => {
|
||||
// We need to relocate target symbol address ourselves.
|
||||
const atom = zld.getAtom(atom_index);
|
||||
const atom_sym = zld.getSymbol(atom.getSymbolWithLoc());
|
||||
try fde_record.setTargetSymbolAddress(atom_sym.n_value, .{
|
||||
.base_addr = sect.addr,
|
||||
.base_offset = eh_frame_offset,
|
||||
});
|
||||
|
||||
// We need to parse LSDA pointer and relocate ourselves.
|
||||
const cie_record = eh_records.get(
|
||||
eh_frame_offset + 4 - fde_record.getCiePointer(),
|
||||
).?;
|
||||
const source_lsda_ptr = try fde_record.getLsdaPointer(cie_record, .{
|
||||
.base_addr = object.eh_frame_sect.?.addr,
|
||||
.base_offset = fde_record_offset,
|
||||
});
|
||||
if (source_lsda_ptr) |ptr| {
|
||||
const sym_index = object.getSymbolByAddress(ptr, null);
|
||||
const sym = object.symtab[sym_index];
|
||||
try fde_record.setLsdaPointer(cie_record, sym.n_value, .{
|
||||
.base_addr = sect.addr,
|
||||
.base_offset = eh_frame_offset,
|
||||
});
|
||||
}
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
|
||||
eh_records.putAssumeCapacityNoClobber(eh_frame_offset, fde_record);
|
||||
|
||||
UnwindInfo.UnwindEncoding.setDwarfSectionOffset(
|
||||
&record.compactUnwindEncoding,
|
||||
cpu_arch,
|
||||
@intCast(u24, eh_frame_offset),
|
||||
);
|
||||
|
||||
const cie_record = eh_records.get(
|
||||
eh_frame_offset + 4 - fde_record.getCiePointer(),
|
||||
).?;
|
||||
const lsda_ptr = try fde_record.getLsdaPointer(cie_record, .{
|
||||
.base_addr = sect.addr,
|
||||
.base_offset = eh_frame_offset,
|
||||
});
|
||||
if (lsda_ptr) |ptr| {
|
||||
record.lsda = ptr - seg.vmaddr;
|
||||
}
|
||||
|
||||
eh_frame_offset += fde_record.getSize();
|
||||
}
|
||||
}
|
||||
|
||||
var buffer = std.ArrayList(u8).init(gpa);
|
||||
defer buffer.deinit();
|
||||
const writer = buffer.writer();
|
||||
|
||||
for (eh_records.values()) |record| {
|
||||
try writer.writeIntLittle(u32, record.size);
|
||||
try buffer.appendSlice(record.data);
|
||||
}
|
||||
|
||||
try zld.file.pwriteAll(buffer.items, sect.offset);
|
||||
}
|
||||
const EhFrameRecordTag = enum { cie, fde };
|
||||
|
||||
pub fn EhFrameRecord(comptime is_mutable: bool) type {
|
||||
return struct {
|
||||
tag: EhFrameRecordTag,
|
||||
size: u32,
|
||||
data: if (is_mutable) []u8 else []const u8,
|
||||
|
||||
const Record = @This();
|
||||
|
||||
pub fn deinit(rec: *Record, gpa: Allocator) void {
|
||||
comptime assert(is_mutable);
|
||||
gpa.free(rec.data);
|
||||
}
|
||||
|
||||
pub fn toOwned(rec: Record, gpa: Allocator) Allocator.Error!EhFrameRecord(true) {
|
||||
const data = try gpa.dupe(u8, rec.data);
|
||||
return EhFrameRecord(true){
|
||||
.tag = rec.tag,
|
||||
.size = rec.size,
|
||||
.data = data,
|
||||
};
|
||||
}
|
||||
|
||||
pub inline fn getSize(rec: Record) u32 {
|
||||
return 4 + rec.size;
|
||||
}
|
||||
|
||||
pub fn scanRelocs(
|
||||
rec: Record,
|
||||
zld: *Zld,
|
||||
object_id: u32,
|
||||
source_offset: u32,
|
||||
) !void {
|
||||
if (rec.getPersonalityPointerReloc(zld, object_id, source_offset)) |target| {
|
||||
try Atom.addGotEntry(zld, target);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn getTargetSymbolAddress(rec: Record, ctx: struct {
|
||||
base_addr: u64,
|
||||
base_offset: u64,
|
||||
}) u64 {
|
||||
assert(rec.tag == .fde);
|
||||
const addend = mem.readIntLittle(i64, rec.data[4..][0..8]);
|
||||
return @intCast(u64, @intCast(i64, ctx.base_addr + ctx.base_offset + 8) + addend);
|
||||
}
|
||||
|
||||
pub fn setTargetSymbolAddress(rec: *Record, value: u64, ctx: struct {
|
||||
base_addr: u64,
|
||||
base_offset: u64,
|
||||
}) !void {
|
||||
assert(rec.tag == .fde);
|
||||
const addend = @intCast(i64, value) - @intCast(i64, ctx.base_addr + ctx.base_offset + 8);
|
||||
mem.writeIntLittle(i64, rec.data[4..][0..8], addend);
|
||||
}
|
||||
|
||||
pub fn getPersonalityPointerReloc(
|
||||
rec: Record,
|
||||
zld: *Zld,
|
||||
object_id: u32,
|
||||
source_offset: u32,
|
||||
) ?SymbolWithLoc {
|
||||
const cpu_arch = zld.options.target.cpu.arch;
|
||||
const relocs = getRelocs(zld, object_id, source_offset);
|
||||
for (relocs) |rel| {
|
||||
switch (cpu_arch) {
|
||||
.aarch64 => {
|
||||
const rel_type = @intToEnum(macho.reloc_type_arm64, rel.r_type);
|
||||
switch (rel_type) {
|
||||
.ARM64_RELOC_SUBTRACTOR,
|
||||
.ARM64_RELOC_UNSIGNED,
|
||||
=> continue,
|
||||
.ARM64_RELOC_POINTER_TO_GOT => {},
|
||||
else => unreachable,
|
||||
}
|
||||
},
|
||||
.x86_64 => {
|
||||
const rel_type = @intToEnum(macho.reloc_type_x86_64, rel.r_type);
|
||||
switch (rel_type) {
|
||||
.X86_64_RELOC_GOT => {},
|
||||
else => unreachable,
|
||||
}
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
const target = UnwindInfo.parseRelocTarget(
|
||||
zld,
|
||||
object_id,
|
||||
rel,
|
||||
rec.data,
|
||||
@intCast(i32, source_offset) + 4,
|
||||
);
|
||||
return target;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
pub fn relocate(rec: *Record, zld: *Zld, object_id: u32, ctx: struct {
|
||||
source_offset: u32,
|
||||
out_offset: u32,
|
||||
sect_addr: u64,
|
||||
}) !void {
|
||||
comptime assert(is_mutable);
|
||||
|
||||
const cpu_arch = zld.options.target.cpu.arch;
|
||||
const relocs = getRelocs(zld, object_id, ctx.source_offset);
|
||||
|
||||
for (relocs) |rel| {
|
||||
const target = UnwindInfo.parseRelocTarget(
|
||||
zld,
|
||||
object_id,
|
||||
rel,
|
||||
rec.data,
|
||||
@intCast(i32, ctx.source_offset) + 4,
|
||||
);
|
||||
const rel_offset = @intCast(u32, rel.r_address - @intCast(i32, ctx.source_offset) - 4);
|
||||
const source_addr = ctx.sect_addr + rel_offset + ctx.out_offset + 4;
|
||||
|
||||
switch (cpu_arch) {
|
||||
.aarch64 => {
|
||||
const rel_type = @intToEnum(macho.reloc_type_arm64, rel.r_type);
|
||||
switch (rel_type) {
|
||||
.ARM64_RELOC_SUBTRACTOR => {
|
||||
// Address of the __eh_frame in the source object file
|
||||
},
|
||||
.ARM64_RELOC_POINTER_TO_GOT => {
|
||||
const target_addr = try Atom.getRelocTargetAddress(zld, target, true, false);
|
||||
const result = math.cast(i32, @intCast(i64, target_addr) - @intCast(i64, source_addr)) orelse
|
||||
return error.Overflow;
|
||||
mem.writeIntLittle(i32, rec.data[rel_offset..][0..4], result);
|
||||
},
|
||||
.ARM64_RELOC_UNSIGNED => {
|
||||
assert(rel.r_extern == 1);
|
||||
const target_addr = try Atom.getRelocTargetAddress(zld, target, false, false);
|
||||
const result = @intCast(i64, target_addr) - @intCast(i64, source_addr);
|
||||
mem.writeIntLittle(i64, rec.data[rel_offset..][0..8], @intCast(i64, result));
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
},
|
||||
.x86_64 => {
|
||||
const rel_type = @intToEnum(macho.reloc_type_x86_64, rel.r_type);
|
||||
switch (rel_type) {
|
||||
.X86_64_RELOC_GOT => {
|
||||
const target_addr = try Atom.getRelocTargetAddress(zld, target, true, false);
|
||||
const addend = mem.readIntLittle(i32, rec.data[rel_offset..][0..4]);
|
||||
const adjusted_target_addr = @intCast(u64, @intCast(i64, target_addr) + addend);
|
||||
const disp = try Atom.calcPcRelativeDisplacementX86(source_addr, adjusted_target_addr, 0);
|
||||
mem.writeIntLittle(i32, rec.data[rel_offset..][0..4], disp);
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn getCiePointer(rec: Record) u32 {
|
||||
assert(rec.tag == .fde);
|
||||
return mem.readIntLittle(u32, rec.data[0..4]);
|
||||
}
|
||||
|
||||
pub fn setCiePointer(rec: *Record, ptr: u32) void {
|
||||
assert(rec.tag == .fde);
|
||||
mem.writeIntLittle(u32, rec.data[0..4], ptr);
|
||||
}
|
||||
|
||||
pub fn getAugmentationString(rec: Record) []const u8 {
|
||||
assert(rec.tag == .cie);
|
||||
return mem.sliceTo(@ptrCast([*:0]const u8, rec.data.ptr + 5), 0);
|
||||
}
|
||||
|
||||
pub fn getPersonalityPointer(rec: Record, ctx: struct {
|
||||
base_addr: u64,
|
||||
base_offset: u64,
|
||||
}) !?u64 {
|
||||
assert(rec.tag == .cie);
|
||||
const aug_str = rec.getAugmentationString();
|
||||
|
||||
var stream = std.io.fixedBufferStream(rec.data[9 + aug_str.len ..]);
|
||||
var creader = std.io.countingReader(stream.reader());
|
||||
const reader = creader.reader();
|
||||
|
||||
for (aug_str) |ch, i| switch (ch) {
|
||||
'z' => if (i > 0) {
|
||||
return error.BadDwarfCfi;
|
||||
} else {
|
||||
_ = try leb.readULEB128(u64, reader);
|
||||
},
|
||||
'R' => {
|
||||
_ = try reader.readByte();
|
||||
},
|
||||
'P' => {
|
||||
const enc = try reader.readByte();
|
||||
const offset = ctx.base_offset + 13 + aug_str.len + creader.bytes_read;
|
||||
const ptr = try getEncodedPointer(enc, @intCast(i64, ctx.base_addr + offset), reader);
|
||||
return ptr;
|
||||
},
|
||||
'L' => {
|
||||
_ = try reader.readByte();
|
||||
},
|
||||
'S', 'B', 'G' => {},
|
||||
else => return error.BadDwarfCfi,
|
||||
};
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
pub fn getLsdaPointer(rec: Record, cie: Record, ctx: struct {
|
||||
base_addr: u64,
|
||||
base_offset: u64,
|
||||
}) !?u64 {
|
||||
assert(rec.tag == .fde);
|
||||
const enc = (try cie.getLsdaEncoding()) orelse return null;
|
||||
var stream = std.io.fixedBufferStream(rec.data[20..]);
|
||||
const reader = stream.reader();
|
||||
_ = try reader.readByte();
|
||||
const offset = ctx.base_offset + 25;
|
||||
const ptr = try getEncodedPointer(enc, @intCast(i64, ctx.base_addr + offset), reader);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
pub fn setLsdaPointer(rec: *Record, cie: Record, value: u64, ctx: struct {
|
||||
base_addr: u64,
|
||||
base_offset: u64,
|
||||
}) !void {
|
||||
assert(rec.tag == .fde);
|
||||
const enc = (try cie.getLsdaEncoding()) orelse unreachable;
|
||||
var stream = std.io.fixedBufferStream(rec.data[21..]);
|
||||
const writer = stream.writer();
|
||||
const offset = ctx.base_offset + 25;
|
||||
try setEncodedPointer(enc, @intCast(i64, ctx.base_addr + offset), value, writer);
|
||||
}
|
||||
|
||||
fn getLsdaEncoding(rec: Record) !?u8 {
|
||||
assert(rec.tag == .cie);
|
||||
const aug_str = rec.getAugmentationString();
|
||||
|
||||
const base_offset = 9 + aug_str.len;
|
||||
var stream = std.io.fixedBufferStream(rec.data[base_offset..]);
|
||||
var creader = std.io.countingReader(stream.reader());
|
||||
const reader = creader.reader();
|
||||
|
||||
for (aug_str) |ch, i| switch (ch) {
|
||||
'z' => if (i > 0) {
|
||||
return error.BadDwarfCfi;
|
||||
} else {
|
||||
_ = try leb.readULEB128(u64, reader);
|
||||
},
|
||||
'R' => {
|
||||
_ = try reader.readByte();
|
||||
},
|
||||
'P' => {
|
||||
const enc = try reader.readByte();
|
||||
_ = try getEncodedPointer(enc, 0, reader);
|
||||
},
|
||||
'L' => {
|
||||
const enc = try reader.readByte();
|
||||
return enc;
|
||||
},
|
||||
'S', 'B', 'G' => {},
|
||||
else => return error.BadDwarfCfi,
|
||||
};
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
fn getEncodedPointer(enc: u8, pcrel_offset: i64, reader: anytype) !?u64 {
|
||||
if (enc == EH_PE.omit) return null;
|
||||
|
||||
var ptr: i64 = switch (enc & 0x0F) {
|
||||
EH_PE.absptr => @bitCast(i64, try reader.readIntLittle(u64)),
|
||||
EH_PE.udata2 => @bitCast(i16, try reader.readIntLittle(u16)),
|
||||
EH_PE.udata4 => @bitCast(i32, try reader.readIntLittle(u32)),
|
||||
EH_PE.udata8 => @bitCast(i64, try reader.readIntLittle(u64)),
|
||||
EH_PE.uleb128 => @bitCast(i64, try leb.readULEB128(u64, reader)),
|
||||
EH_PE.sdata2 => try reader.readIntLittle(i16),
|
||||
EH_PE.sdata4 => try reader.readIntLittle(i32),
|
||||
EH_PE.sdata8 => try reader.readIntLittle(i64),
|
||||
EH_PE.sleb128 => try leb.readILEB128(i64, reader),
|
||||
else => return null,
|
||||
};
|
||||
|
||||
switch (enc & 0x70) {
|
||||
EH_PE.absptr => {},
|
||||
EH_PE.pcrel => ptr += pcrel_offset,
|
||||
EH_PE.datarel,
|
||||
EH_PE.textrel,
|
||||
EH_PE.funcrel,
|
||||
EH_PE.aligned,
|
||||
=> return null,
|
||||
else => return null,
|
||||
}
|
||||
|
||||
return @bitCast(u64, ptr);
|
||||
}
|
||||
|
||||
fn setEncodedPointer(enc: u8, pcrel_offset: i64, value: u64, writer: anytype) !void {
|
||||
if (enc == EH_PE.omit) return;
|
||||
|
||||
var actual = @intCast(i64, value);
|
||||
|
||||
switch (enc & 0x70) {
|
||||
EH_PE.absptr => {},
|
||||
EH_PE.pcrel => actual -= pcrel_offset,
|
||||
EH_PE.datarel,
|
||||
EH_PE.textrel,
|
||||
EH_PE.funcrel,
|
||||
EH_PE.aligned,
|
||||
=> unreachable,
|
||||
else => unreachable,
|
||||
}
|
||||
|
||||
switch (enc & 0x0F) {
|
||||
EH_PE.absptr => try writer.writeIntLittle(u64, @bitCast(u64, actual)),
|
||||
EH_PE.udata2 => try writer.writeIntLittle(u16, @bitCast(u16, @intCast(i16, actual))),
|
||||
EH_PE.udata4 => try writer.writeIntLittle(u32, @bitCast(u32, @intCast(i32, actual))),
|
||||
EH_PE.udata8 => try writer.writeIntLittle(u64, @bitCast(u64, actual)),
|
||||
EH_PE.uleb128 => try leb.writeULEB128(writer, @bitCast(u64, actual)),
|
||||
EH_PE.sdata2 => try writer.writeIntLittle(i16, @intCast(i16, actual)),
|
||||
EH_PE.sdata4 => try writer.writeIntLittle(i32, @intCast(i32, actual)),
|
||||
EH_PE.sdata8 => try writer.writeIntLittle(i64, actual),
|
||||
EH_PE.sleb128 => try leb.writeILEB128(writer, actual),
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub fn getRelocs(
|
||||
zld: *Zld,
|
||||
object_id: u32,
|
||||
source_offset: u32,
|
||||
) []align(1) const macho.relocation_info {
|
||||
const object = &zld.objects.items[object_id];
|
||||
assert(object.hasEhFrameRecords());
|
||||
const urel = object.eh_frame_relocs_lookup.get(source_offset) orelse
|
||||
return &[0]macho.relocation_info{};
|
||||
const all_relocs = object.getRelocs(object.eh_frame_sect.?);
|
||||
return all_relocs[urel.reloc.start..][0..urel.reloc.len];
|
||||
}
|
||||
|
||||
pub const Iterator = struct {
|
||||
data: []const u8,
|
||||
pos: u32 = 0,
|
||||
|
||||
pub fn next(it: *Iterator) !?EhFrameRecord(false) {
|
||||
if (it.pos >= it.data.len) return null;
|
||||
|
||||
var stream = std.io.fixedBufferStream(it.data[it.pos..]);
|
||||
const reader = stream.reader();
|
||||
|
||||
var size = try reader.readIntLittle(u32);
|
||||
if (size == 0xFFFFFFFF) {
|
||||
log.err("MachO doesn't support 64bit DWARF CFI __eh_frame records", .{});
|
||||
return error.BadDwarfCfi;
|
||||
}
|
||||
|
||||
const id = try reader.readIntLittle(u32);
|
||||
const tag: EhFrameRecordTag = if (id == 0) .cie else .fde;
|
||||
const offset: u32 = 4;
|
||||
const record = EhFrameRecord(false){
|
||||
.tag = tag,
|
||||
.size = size,
|
||||
.data = it.data[it.pos + offset ..][0..size],
|
||||
};
|
||||
|
||||
it.pos += size + offset;
|
||||
|
||||
return record;
|
||||
}
|
||||
|
||||
pub fn reset(it: *Iterator) void {
|
||||
it.pos = 0;
|
||||
}
|
||||
|
||||
pub fn seekTo(it: *Iterator, pos: u32) void {
|
||||
assert(pos >= 0 and pos < it.data.len);
|
||||
it.pos = pos;
|
||||
}
|
||||
};
|
||||
|
||||
pub const EH_PE = struct {
|
||||
pub const absptr = 0x00;
|
||||
pub const uleb128 = 0x01;
|
||||
pub const udata2 = 0x02;
|
||||
pub const udata4 = 0x03;
|
||||
pub const udata8 = 0x04;
|
||||
pub const sleb128 = 0x09;
|
||||
pub const sdata2 = 0x0A;
|
||||
pub const sdata4 = 0x0B;
|
||||
pub const sdata8 = 0x0C;
|
||||
pub const pcrel = 0x10;
|
||||
pub const textrel = 0x20;
|
||||
pub const datarel = 0x30;
|
||||
pub const funcrel = 0x40;
|
||||
pub const aligned = 0x50;
|
||||
pub const indirect = 0x80;
|
||||
pub const omit = 0xFF;
|
||||
};
|
||||
@ -68,7 +68,7 @@ pub const Thunk = struct {
|
||||
}
|
||||
};
|
||||
|
||||
pub fn createThunks(zld: *Zld, sect_id: u8, reverse_lookups: [][]u32) !void {
|
||||
pub fn createThunks(zld: *Zld, sect_id: u8) !void {
|
||||
const header = &zld.sections.items(.header)[sect_id];
|
||||
if (header.size == 0) return;
|
||||
|
||||
@ -140,7 +140,6 @@ pub fn createThunks(zld: *Zld, sect_id: u8, reverse_lookups: [][]u32) !void {
|
||||
try scanRelocs(
|
||||
zld,
|
||||
atom_index,
|
||||
reverse_lookups[atom.getFile().?],
|
||||
allocated,
|
||||
thunk_index,
|
||||
group_end,
|
||||
@ -214,7 +213,6 @@ fn allocateThunk(
|
||||
fn scanRelocs(
|
||||
zld: *Zld,
|
||||
atom_index: AtomIndex,
|
||||
reverse_lookup: []u32,
|
||||
allocated: std.AutoHashMap(AtomIndex, void),
|
||||
thunk_index: ThunkIndex,
|
||||
group_end: AtomIndex,
|
||||
@ -231,7 +229,7 @@ fn scanRelocs(
|
||||
for (relocs) |rel| {
|
||||
if (!relocNeedsThunk(rel)) continue;
|
||||
|
||||
const target = Atom.parseRelocTarget(zld, atom_index, rel, reverse_lookup);
|
||||
const target = Atom.parseRelocTarget(zld, atom_index, rel);
|
||||
if (isReachable(zld, atom_index, rel, base_offset, target, allocated)) continue;
|
||||
|
||||
log.debug("{x}: source = {s}@{x}, target = {s}@{x} unreachable", .{
|
||||
@ -308,7 +306,8 @@ fn isReachable(
|
||||
if (!allocated.contains(target_atom_index)) return false;
|
||||
|
||||
const source_addr = source_sym.n_value + @intCast(u32, rel.r_address - base_offset);
|
||||
const target_addr = Atom.getRelocTargetAddress(zld, rel, target, false) catch unreachable;
|
||||
const is_via_got = Atom.relocRequiresGot(zld, rel);
|
||||
const target_addr = Atom.getRelocTargetAddress(zld, target, is_via_got, false) catch unreachable;
|
||||
_ = Atom.calcPcRelativeDisplacementArm64(source_addr, target_addr) catch
|
||||
return false;
|
||||
|
||||
|
||||
@ -10,6 +10,7 @@ const mem = std.mem;
|
||||
|
||||
const aarch64 = @import("../../arch/aarch64/bits.zig");
|
||||
const dead_strip = @import("dead_strip.zig");
|
||||
const eh_frame = @import("eh_frame.zig");
|
||||
const fat = @import("fat.zig");
|
||||
const link = @import("../../link.zig");
|
||||
const load_commands = @import("load_commands.zig");
|
||||
@ -30,6 +31,7 @@ const LibStub = @import("../tapi.zig").LibStub;
|
||||
const Object = @import("Object.zig");
|
||||
const StringTable = @import("../strtab.zig").StringTable;
|
||||
const Trie = @import("Trie.zig");
|
||||
const UnwindInfo = @import("UnwindInfo.zig");
|
||||
|
||||
const Bind = @import("dyld_info/bind.zig").Bind(*const Zld, SymbolWithLoc);
|
||||
const LazyBind = @import("dyld_info/bind.zig").LazyBind(*const Zld, SymbolWithLoc);
|
||||
@ -389,6 +391,14 @@ pub const Zld = struct {
|
||||
break :blk null;
|
||||
}
|
||||
|
||||
// We handle unwind info separately.
|
||||
if (mem.eql(u8, "__TEXT", segname) and mem.eql(u8, "__eh_frame", sectname)) {
|
||||
break :blk null;
|
||||
}
|
||||
if (mem.eql(u8, "__LD", segname) and mem.eql(u8, "__compact_unwind", sectname)) {
|
||||
break :blk null;
|
||||
}
|
||||
|
||||
if (sect.isCode()) {
|
||||
break :blk self.getSectionByName("__TEXT", "__text") orelse try self.initSection(
|
||||
"__TEXT",
|
||||
@ -402,12 +412,6 @@ pub const Zld = struct {
|
||||
}
|
||||
|
||||
if (sect.isDebug()) {
|
||||
// TODO debug attributes
|
||||
if (mem.eql(u8, "__LD", segname) and mem.eql(u8, "__compact_unwind", sectname)) {
|
||||
log.debug("TODO compact unwind section: type 0x{x}, name '{s},{s}'", .{
|
||||
sect.flags, segname, sectname,
|
||||
});
|
||||
}
|
||||
break :blk null;
|
||||
}
|
||||
|
||||
@ -459,13 +463,6 @@ pub const Zld = struct {
|
||||
);
|
||||
},
|
||||
macho.S_COALESCED => {
|
||||
// TODO unwind info
|
||||
if (mem.eql(u8, "__TEXT", segname) and mem.eql(u8, "__eh_frame", sectname)) {
|
||||
log.debug("TODO eh frame section: type 0x{x}, name '{s},{s}'", .{
|
||||
sect.flags, segname, sectname,
|
||||
});
|
||||
break :blk null;
|
||||
}
|
||||
break :blk self.getSectionByName(segname, sectname) orelse try self.initSection(
|
||||
segname,
|
||||
sectname,
|
||||
@ -937,7 +934,7 @@ pub const Zld = struct {
|
||||
}
|
||||
}
|
||||
|
||||
fn resolveSymbolsInObject(self: *Zld, object_id: u16, resolver: *SymbolResolver) !void {
|
||||
fn resolveSymbolsInObject(self: *Zld, object_id: u32, resolver: *SymbolResolver) !void {
|
||||
const object = &self.objects.items[object_id];
|
||||
const in_symtab = object.in_symtab orelse return;
|
||||
|
||||
@ -977,7 +974,7 @@ pub const Zld = struct {
|
||||
continue;
|
||||
}
|
||||
|
||||
const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = object_id };
|
||||
const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = object_id + 1 };
|
||||
|
||||
const global_index = resolver.table.get(sym_name) orelse {
|
||||
const gpa = self.gpa;
|
||||
@ -1378,7 +1375,7 @@ pub const Zld = struct {
|
||||
}
|
||||
}
|
||||
|
||||
fn writeAtoms(self: *Zld, reverse_lookups: [][]u32) !void {
|
||||
fn writeAtoms(self: *Zld) !void {
|
||||
const gpa = self.gpa;
|
||||
const slice = self.sections.slice();
|
||||
|
||||
@ -1386,6 +1383,7 @@ pub const Zld = struct {
|
||||
const header = slice.items(.header)[sect_id];
|
||||
var atom_index = first_atom_index;
|
||||
|
||||
if (atom_index == 0) continue;
|
||||
if (header.isZerofill()) continue;
|
||||
|
||||
var buffer = std.ArrayList(u8).init(gpa);
|
||||
@ -1407,7 +1405,7 @@ pub const Zld = struct {
|
||||
log.debug(" (adding ATOM(%{d}, '{s}') from object({?}) to buffer)", .{
|
||||
atom.sym_index,
|
||||
self.getSymbolName(atom.getSymbolWithLoc()),
|
||||
atom.file,
|
||||
atom.getFile(),
|
||||
});
|
||||
if (padding_size > 0) {
|
||||
log.debug(" (with padding {x})", .{padding_size});
|
||||
@ -1460,7 +1458,6 @@ pub const Zld = struct {
|
||||
atom_index,
|
||||
buffer.items[offset..][0..size],
|
||||
relocs,
|
||||
reverse_lookups[atom.getFile().?],
|
||||
);
|
||||
}
|
||||
|
||||
@ -1501,9 +1498,10 @@ pub const Zld = struct {
|
||||
while (i < slice.len) : (i += 1) {
|
||||
const section = self.sections.get(i);
|
||||
if (section.header.size == 0) {
|
||||
log.debug("pruning section {s},{s}", .{
|
||||
log.debug("pruning section {s},{s} {d}", .{
|
||||
section.header.segName(),
|
||||
section.header.sectName(),
|
||||
section.first_atom_index,
|
||||
});
|
||||
continue;
|
||||
}
|
||||
@ -1519,7 +1517,7 @@ pub const Zld = struct {
|
||||
}
|
||||
}
|
||||
|
||||
fn calcSectionSizes(self: *Zld, reverse_lookups: [][]u32) !void {
|
||||
fn calcSectionSizes(self: *Zld) !void {
|
||||
const slice = self.sections.slice();
|
||||
for (slice.items(.header)) |*header, sect_id| {
|
||||
if (header.size == 0) continue;
|
||||
@ -1528,6 +1526,8 @@ pub const Zld = struct {
|
||||
}
|
||||
|
||||
var atom_index = slice.items(.first_atom_index)[sect_id];
|
||||
if (atom_index == 0) continue;
|
||||
|
||||
header.size = 0;
|
||||
header.@"align" = 0;
|
||||
|
||||
@ -1556,7 +1556,7 @@ pub const Zld = struct {
|
||||
if (mem.eql(u8, header.sectName(), "__stub_helper")) continue;
|
||||
|
||||
// Create jump/branch range extenders if needed.
|
||||
try thunks.createThunks(self, @intCast(u8, sect_id), reverse_lookups);
|
||||
try thunks.createThunks(self, @intCast(u8, sect_id));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1601,8 +1601,6 @@ pub const Zld = struct {
|
||||
|
||||
const slice = self.sections.slice();
|
||||
for (slice.items(.header)[indexes.start..indexes.end]) |*header, sect_id| {
|
||||
var atom_index = slice.items(.first_atom_index)[indexes.start + sect_id];
|
||||
|
||||
const alignment = try math.powi(u32, 2, header.@"align");
|
||||
const start_aligned = mem.alignForwardGeneric(u64, start, alignment);
|
||||
const n_sect = @intCast(u8, indexes.start + sect_id + 1);
|
||||
@ -1613,48 +1611,51 @@ pub const Zld = struct {
|
||||
@intCast(u32, segment.fileoff + start_aligned);
|
||||
header.addr = segment.vmaddr + start_aligned;
|
||||
|
||||
log.debug("allocating local symbols in sect({d}, '{s},{s}')", .{
|
||||
n_sect,
|
||||
header.segName(),
|
||||
header.sectName(),
|
||||
});
|
||||
|
||||
while (true) {
|
||||
const atom = self.getAtom(atom_index);
|
||||
const sym = self.getSymbolPtr(atom.getSymbolWithLoc());
|
||||
sym.n_value += header.addr;
|
||||
sym.n_sect = n_sect;
|
||||
|
||||
log.debug(" ATOM(%{d}, '{s}') @{x}", .{
|
||||
atom.sym_index,
|
||||
self.getSymbolName(atom.getSymbolWithLoc()),
|
||||
sym.n_value,
|
||||
var atom_index = slice.items(.first_atom_index)[indexes.start + sect_id];
|
||||
if (atom_index > 0) {
|
||||
log.debug("allocating local symbols in sect({d}, '{s},{s}')", .{
|
||||
n_sect,
|
||||
header.segName(),
|
||||
header.sectName(),
|
||||
});
|
||||
|
||||
if (atom.getFile() != null) {
|
||||
// Update each symbol contained within the atom
|
||||
var it = Atom.getInnerSymbolsIterator(self, atom_index);
|
||||
while (it.next()) |sym_loc| {
|
||||
const inner_sym = self.getSymbolPtr(sym_loc);
|
||||
inner_sym.n_value = sym.n_value + Atom.calcInnerSymbolOffset(
|
||||
self,
|
||||
atom_index,
|
||||
sym_loc.sym_index,
|
||||
);
|
||||
inner_sym.n_sect = n_sect;
|
||||
while (true) {
|
||||
const atom = self.getAtom(atom_index);
|
||||
const sym = self.getSymbolPtr(atom.getSymbolWithLoc());
|
||||
sym.n_value += header.addr;
|
||||
sym.n_sect = n_sect;
|
||||
|
||||
log.debug(" ATOM(%{d}, '{s}') @{x}", .{
|
||||
atom.sym_index,
|
||||
self.getSymbolName(atom.getSymbolWithLoc()),
|
||||
sym.n_value,
|
||||
});
|
||||
|
||||
if (atom.getFile() != null) {
|
||||
// Update each symbol contained within the atom
|
||||
var it = Atom.getInnerSymbolsIterator(self, atom_index);
|
||||
while (it.next()) |sym_loc| {
|
||||
const inner_sym = self.getSymbolPtr(sym_loc);
|
||||
inner_sym.n_value = sym.n_value + Atom.calcInnerSymbolOffset(
|
||||
self,
|
||||
atom_index,
|
||||
sym_loc.sym_index,
|
||||
);
|
||||
inner_sym.n_sect = n_sect;
|
||||
}
|
||||
|
||||
// If there is a section alias, update it now too
|
||||
if (Atom.getSectionAlias(self, atom_index)) |sym_loc| {
|
||||
const alias = self.getSymbolPtr(sym_loc);
|
||||
alias.n_value = sym.n_value;
|
||||
alias.n_sect = n_sect;
|
||||
}
|
||||
}
|
||||
|
||||
// If there is a section alias, update it now too
|
||||
if (Atom.getSectionAlias(self, atom_index)) |sym_loc| {
|
||||
const alias = self.getSymbolPtr(sym_loc);
|
||||
alias.n_value = sym.n_value;
|
||||
alias.n_sect = n_sect;
|
||||
}
|
||||
if (atom.next_index) |next_index| {
|
||||
atom_index = next_index;
|
||||
} else break;
|
||||
}
|
||||
|
||||
if (atom.next_index) |next_index| {
|
||||
atom_index = next_index;
|
||||
} else break;
|
||||
}
|
||||
|
||||
start = start_aligned + header.size;
|
||||
@ -1675,7 +1676,7 @@ pub const Zld = struct {
|
||||
reserved2: u32 = 0,
|
||||
};
|
||||
|
||||
fn initSection(
|
||||
pub fn initSection(
|
||||
self: *Zld,
|
||||
segname: []const u8,
|
||||
sectname: []const u8,
|
||||
@ -1685,7 +1686,7 @@ pub const Zld = struct {
|
||||
log.debug("creating section '{s},{s}'", .{ segname, sectname });
|
||||
const index = @intCast(u8, self.sections.slice().len);
|
||||
try self.sections.append(gpa, .{
|
||||
.segment_index = undefined,
|
||||
.segment_index = undefined, // Segments will be created automatically later down the pipeline
|
||||
.header = .{
|
||||
.sectname = makeStaticString(sectname),
|
||||
.segname = makeStaticString(segname),
|
||||
@ -1693,13 +1694,13 @@ pub const Zld = struct {
|
||||
.reserved1 = opts.reserved1,
|
||||
.reserved2 = opts.reserved2,
|
||||
},
|
||||
.first_atom_index = undefined,
|
||||
.last_atom_index = undefined,
|
||||
.first_atom_index = 0,
|
||||
.last_atom_index = 0,
|
||||
});
|
||||
return index;
|
||||
}
|
||||
|
||||
inline fn getSegmentPrecedence(segname: []const u8) u4 {
|
||||
fn getSegmentPrecedence(segname: []const u8) u4 {
|
||||
if (mem.eql(u8, segname, "__PAGEZERO")) return 0x0;
|
||||
if (mem.eql(u8, segname, "__TEXT")) return 0x1;
|
||||
if (mem.eql(u8, segname, "__DATA_CONST")) return 0x2;
|
||||
@ -1708,14 +1709,14 @@ pub const Zld = struct {
|
||||
return 0x4;
|
||||
}
|
||||
|
||||
inline fn getSegmentMemoryProtection(segname: []const u8) macho.vm_prot_t {
|
||||
fn getSegmentMemoryProtection(segname: []const u8) macho.vm_prot_t {
|
||||
if (mem.eql(u8, segname, "__PAGEZERO")) return macho.PROT.NONE;
|
||||
if (mem.eql(u8, segname, "__TEXT")) return macho.PROT.READ | macho.PROT.EXEC;
|
||||
if (mem.eql(u8, segname, "__LINKEDIT")) return macho.PROT.READ;
|
||||
return macho.PROT.READ | macho.PROT.WRITE;
|
||||
}
|
||||
|
||||
inline fn getSectionPrecedence(header: macho.section_64) u8 {
|
||||
fn getSectionPrecedence(header: macho.section_64) u8 {
|
||||
const segment_precedence: u4 = getSegmentPrecedence(header.segName());
|
||||
const section_precedence: u4 = blk: {
|
||||
if (header.isCode()) {
|
||||
@ -1732,10 +1733,11 @@ pub const Zld = struct {
|
||||
macho.S_ZEROFILL => break :blk 0xf,
|
||||
macho.S_THREAD_LOCAL_REGULAR => break :blk 0xd,
|
||||
macho.S_THREAD_LOCAL_ZEROFILL => break :blk 0xe,
|
||||
else => if (mem.eql(u8, "__eh_frame", header.sectName()))
|
||||
break :blk 0xf
|
||||
else
|
||||
break :blk 0x3,
|
||||
else => {
|
||||
if (mem.eql(u8, "__unwind_info", header.sectName())) break :blk 0xe;
|
||||
if (mem.eql(u8, "__eh_frame", header.sectName())) break :blk 0xf;
|
||||
break :blk 0x3;
|
||||
},
|
||||
}
|
||||
};
|
||||
return (@intCast(u8, segment_precedence) << 4) + section_precedence;
|
||||
@ -1768,8 +1770,8 @@ pub const Zld = struct {
|
||||
}
|
||||
}
|
||||
|
||||
fn writeLinkeditSegmentData(self: *Zld, reverse_lookups: [][]u32) !void {
|
||||
try self.writeDyldInfoData(reverse_lookups);
|
||||
fn writeLinkeditSegmentData(self: *Zld) !void {
|
||||
try self.writeDyldInfoData();
|
||||
try self.writeFunctionStarts();
|
||||
try self.writeDataInCode();
|
||||
try self.writeSymtabs();
|
||||
@ -1806,7 +1808,7 @@ pub const Zld = struct {
|
||||
}
|
||||
}
|
||||
|
||||
fn collectRebaseData(self: *Zld, rebase: *Rebase, reverse_lookups: [][]u32) !void {
|
||||
fn collectRebaseData(self: *Zld, rebase: *Rebase) !void {
|
||||
log.debug("collecting rebase data", .{});
|
||||
|
||||
// First, unpack GOT entries
|
||||
@ -1862,6 +1864,7 @@ pub const Zld = struct {
|
||||
|
||||
const cpu_arch = self.options.target.cpu.arch;
|
||||
var atom_index = slice.items(.first_atom_index)[sect_id];
|
||||
if (atom_index == 0) continue;
|
||||
|
||||
while (true) {
|
||||
const atom = self.getAtom(atom_index);
|
||||
@ -1899,7 +1902,7 @@ pub const Zld = struct {
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
const target = Atom.parseRelocTarget(self, atom_index, rel, reverse_lookups[atom.getFile().?]);
|
||||
const target = Atom.parseRelocTarget(self, atom_index, rel);
|
||||
const target_sym = self.getSymbol(target);
|
||||
if (target_sym.undf()) continue;
|
||||
|
||||
@ -1962,7 +1965,10 @@ pub const Zld = struct {
|
||||
}
|
||||
}
|
||||
|
||||
fn collectBindData(self: *Zld, bind: *Bind, reverse_lookups: [][]u32) !void {
|
||||
fn collectBindData(
|
||||
self: *Zld,
|
||||
bind: *Bind,
|
||||
) !void {
|
||||
log.debug("collecting bind data", .{});
|
||||
|
||||
// First, unpack GOT section
|
||||
@ -1993,6 +1999,7 @@ pub const Zld = struct {
|
||||
|
||||
const cpu_arch = self.options.target.cpu.arch;
|
||||
var atom_index = slice.items(.first_atom_index)[sect_id];
|
||||
if (atom_index == 0) continue;
|
||||
|
||||
log.debug("{s},{s}", .{ header.segName(), header.sectName() });
|
||||
|
||||
@ -2033,7 +2040,7 @@ pub const Zld = struct {
|
||||
else => unreachable,
|
||||
}
|
||||
|
||||
const global = Atom.parseRelocTarget(self, atom_index, rel, reverse_lookups[atom.getFile().?]);
|
||||
const global = Atom.parseRelocTarget(self, atom_index, rel);
|
||||
const bind_sym_name = self.getSymbolName(global);
|
||||
const bind_sym = self.getSymbol(global);
|
||||
if (!bind_sym.undf()) continue;
|
||||
@ -2164,16 +2171,18 @@ pub const Zld = struct {
|
||||
try trie.finalize(gpa);
|
||||
}
|
||||
|
||||
fn writeDyldInfoData(self: *Zld, reverse_lookups: [][]u32) !void {
|
||||
fn writeDyldInfoData(
|
||||
self: *Zld,
|
||||
) !void {
|
||||
const gpa = self.gpa;
|
||||
|
||||
var rebase = Rebase{};
|
||||
defer rebase.deinit(gpa);
|
||||
try self.collectRebaseData(&rebase, reverse_lookups);
|
||||
try self.collectRebaseData(&rebase);
|
||||
|
||||
var bind = Bind{};
|
||||
defer bind.deinit(gpa);
|
||||
try self.collectBindData(&bind, reverse_lookups);
|
||||
try self.collectBindData(&bind);
|
||||
|
||||
var lazy_bind = LazyBind{};
|
||||
defer lazy_bind.deinit(gpa);
|
||||
@ -2873,12 +2882,12 @@ pub const Zld = struct {
|
||||
return buf;
|
||||
}
|
||||
|
||||
pub inline fn getAtomPtr(self: *Zld, atom_index: AtomIndex) *Atom {
|
||||
pub fn getAtomPtr(self: *Zld, atom_index: AtomIndex) *Atom {
|
||||
assert(atom_index < self.atoms.items.len);
|
||||
return &self.atoms.items[atom_index];
|
||||
}
|
||||
|
||||
pub inline fn getAtom(self: Zld, atom_index: AtomIndex) Atom {
|
||||
pub fn getAtom(self: Zld, atom_index: AtomIndex) Atom {
|
||||
assert(atom_index < self.atoms.items.len);
|
||||
return self.atoms.items[atom_index];
|
||||
}
|
||||
@ -2889,17 +2898,17 @@ pub const Zld = struct {
|
||||
} else return null;
|
||||
}
|
||||
|
||||
pub inline fn getSegment(self: Zld, sect_id: u8) macho.segment_command_64 {
|
||||
pub fn getSegment(self: Zld, sect_id: u8) macho.segment_command_64 {
|
||||
const index = self.sections.items(.segment_index)[sect_id];
|
||||
return self.segments.items[index];
|
||||
}
|
||||
|
||||
pub inline fn getSegmentPtr(self: *Zld, sect_id: u8) *macho.segment_command_64 {
|
||||
pub fn getSegmentPtr(self: *Zld, sect_id: u8) *macho.segment_command_64 {
|
||||
const index = self.sections.items(.segment_index)[sect_id];
|
||||
return &self.segments.items[index];
|
||||
}
|
||||
|
||||
pub inline fn getLinkeditSegmentPtr(self: *Zld) *macho.segment_command_64 {
|
||||
pub fn getLinkeditSegmentPtr(self: *Zld) *macho.segment_command_64 {
|
||||
assert(self.segments.items.len > 0);
|
||||
const seg = &self.segments.items[self.segments.items.len - 1];
|
||||
assert(mem.eql(u8, seg.segName(), "__LINKEDIT"));
|
||||
@ -3384,6 +3393,8 @@ pub const Zld = struct {
|
||||
const slice = self.sections.slice();
|
||||
for (slice.items(.first_atom_index)) |first_atom_index, sect_id| {
|
||||
var atom_index = first_atom_index;
|
||||
if (atom_index == 0) continue;
|
||||
|
||||
const header = slice.items(.header)[sect_id];
|
||||
|
||||
log.debug("{s},{s}", .{ header.segName(), header.sectName() });
|
||||
@ -3412,7 +3423,7 @@ pub const Zld = struct {
|
||||
sym.n_value,
|
||||
atom.size,
|
||||
atom.alignment,
|
||||
atom.file,
|
||||
atom.getFile(),
|
||||
sym.n_sect,
|
||||
});
|
||||
|
||||
@ -3475,19 +3486,19 @@ const IndirectPointer = struct {
|
||||
}
|
||||
};
|
||||
|
||||
pub const SymbolWithLoc = struct {
|
||||
pub const SymbolWithLoc = extern struct {
|
||||
// Index into the respective symbol table.
|
||||
sym_index: u32,
|
||||
|
||||
// -1 means it's a synthetic global.
|
||||
file: i32 = -1,
|
||||
// 0 means it's a synthetic global.
|
||||
file: u32 = 0,
|
||||
|
||||
pub inline fn getFile(self: SymbolWithLoc) ?u31 {
|
||||
if (self.file == -1) return null;
|
||||
return @intCast(u31, self.file);
|
||||
pub fn getFile(self: SymbolWithLoc) ?u32 {
|
||||
if (self.file == 0) return null;
|
||||
return self.file - 1;
|
||||
}
|
||||
|
||||
pub inline fn eql(self: SymbolWithLoc, other: SymbolWithLoc) bool {
|
||||
pub fn eql(self: SymbolWithLoc, other: SymbolWithLoc) bool {
|
||||
return self.file == other.file and self.sym_index == other.sym_index;
|
||||
}
|
||||
};
|
||||
@ -3965,7 +3976,7 @@ pub fn linkWithZld(macho_file: *MachO, comp: *Compilation, prog_node: *std.Progr
|
||||
};
|
||||
|
||||
for (zld.objects.items) |_, object_id| {
|
||||
try zld.resolveSymbolsInObject(@intCast(u16, object_id), &resolver);
|
||||
try zld.resolveSymbolsInObject(@intCast(u32, object_id), &resolver);
|
||||
}
|
||||
|
||||
try zld.resolveSymbolsInArchives(&resolver);
|
||||
@ -3995,16 +4006,11 @@ pub fn linkWithZld(macho_file: *MachO, comp: *Compilation, prog_node: *std.Progr
|
||||
}
|
||||
|
||||
for (zld.objects.items) |*object, object_id| {
|
||||
try object.splitIntoAtoms(&zld, @intCast(u31, object_id));
|
||||
}
|
||||
|
||||
var reverse_lookups: [][]u32 = try arena.alloc([]u32, zld.objects.items.len);
|
||||
for (zld.objects.items) |object, i| {
|
||||
reverse_lookups[i] = try object.createReverseSymbolLookup(arena);
|
||||
try object.splitIntoAtoms(&zld, @intCast(u32, object_id));
|
||||
}
|
||||
|
||||
if (gc_sections) {
|
||||
try dead_strip.gcAtoms(&zld, reverse_lookups);
|
||||
try dead_strip.gcAtoms(&zld);
|
||||
}
|
||||
|
||||
try zld.createDyldPrivateAtom();
|
||||
@ -4019,13 +4025,24 @@ pub fn linkWithZld(macho_file: *MachO, comp: *Compilation, prog_node: *std.Progr
|
||||
if (header.isZerofill()) continue;
|
||||
|
||||
const relocs = Atom.getAtomRelocs(&zld, atom_index);
|
||||
try Atom.scanAtomRelocs(&zld, atom_index, relocs, reverse_lookups[atom.getFile().?]);
|
||||
try Atom.scanAtomRelocs(&zld, atom_index, relocs);
|
||||
}
|
||||
}
|
||||
|
||||
try eh_frame.scanRelocs(&zld);
|
||||
try UnwindInfo.scanRelocs(&zld);
|
||||
|
||||
try zld.createDyldStubBinderGotAtom();
|
||||
|
||||
try zld.calcSectionSizes(reverse_lookups);
|
||||
try zld.calcSectionSizes();
|
||||
|
||||
var unwind_info = UnwindInfo{ .gpa = zld.gpa };
|
||||
defer unwind_info.deinit();
|
||||
try unwind_info.collect(&zld);
|
||||
|
||||
try eh_frame.calcSectionSize(&zld, &unwind_info);
|
||||
try unwind_info.calcSectionSize(&zld);
|
||||
|
||||
try zld.pruneAndSortSections();
|
||||
try zld.createSegments();
|
||||
try zld.allocateSegments();
|
||||
@ -4039,8 +4056,10 @@ pub fn linkWithZld(macho_file: *MachO, comp: *Compilation, prog_node: *std.Progr
|
||||
zld.logAtoms();
|
||||
}
|
||||
|
||||
try zld.writeAtoms(reverse_lookups);
|
||||
try zld.writeLinkeditSegmentData(reverse_lookups);
|
||||
try zld.writeAtoms();
|
||||
try eh_frame.write(&zld, &unwind_info);
|
||||
try unwind_info.write(&zld);
|
||||
try zld.writeLinkeditSegmentData();
|
||||
|
||||
// If the last section of __DATA segment is zerofill section, we need to ensure
|
||||
// that the free space between the end of the last non-zerofill section of __DATA
|
||||
|
||||
46
src/type.zig
46
src/type.zig
@ -3789,6 +3789,39 @@ pub const Type = extern union {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if the type's layout is already resolved and it is safe
|
||||
/// to use `abiSize`, `abiAlignment` and `bitSize` on it.
|
||||
pub fn layoutIsResolved(ty: Type) bool {
|
||||
switch (ty.zigTypeTag()) {
|
||||
.Struct => {
|
||||
if (ty.castTag(.@"struct")) |struct_ty| {
|
||||
return struct_ty.data.haveLayout();
|
||||
}
|
||||
return true;
|
||||
},
|
||||
.Union => {
|
||||
if (ty.cast(Payload.Union)) |union_ty| {
|
||||
return union_ty.data.haveLayout();
|
||||
}
|
||||
return true;
|
||||
},
|
||||
.Array => {
|
||||
if (ty.arrayLenIncludingSentinel() == 0) return true;
|
||||
return ty.childType().layoutIsResolved();
|
||||
},
|
||||
.Optional => {
|
||||
var buf: Type.Payload.ElemType = undefined;
|
||||
const payload_ty = ty.optionalChild(&buf);
|
||||
return payload_ty.layoutIsResolved();
|
||||
},
|
||||
.ErrorUnion => {
|
||||
const payload_ty = ty.errorUnionPayload();
|
||||
return payload_ty.layoutIsResolved();
|
||||
},
|
||||
else => return true,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn isSinglePointer(self: Type) bool {
|
||||
return switch (self.tag()) {
|
||||
.single_const_pointer,
|
||||
@ -5500,7 +5533,7 @@ pub const Type = extern union {
|
||||
}
|
||||
const S = struct {
|
||||
fn fieldWithRange(int_ty: Type, int_val: Value, end: usize, m: *Module) ?usize {
|
||||
if (int_val.compareAllWithZero(.lt)) return null;
|
||||
if (int_val.compareAllWithZero(.lt, m)) return null;
|
||||
var end_payload: Value.Payload.U64 = .{
|
||||
.base = .{ .tag = .int_u64 },
|
||||
.data = end,
|
||||
@ -6498,12 +6531,7 @@ pub const Type = extern union {
|
||||
// pointee type needs to be resolved more, that needs to be done before calling
|
||||
// this ptr() function.
|
||||
if (d.@"align" != 0) canonicalize: {
|
||||
if (d.pointee_type.castTag(.@"struct")) |struct_ty| {
|
||||
if (!struct_ty.data.haveLayout()) break :canonicalize;
|
||||
}
|
||||
if (d.pointee_type.cast(Payload.Union)) |union_ty| {
|
||||
if (!union_ty.data.haveLayout()) break :canonicalize;
|
||||
}
|
||||
if (!d.pointee_type.layoutIsResolved()) break :canonicalize;
|
||||
if (d.@"align" == d.pointee_type.abiAlignment(target)) {
|
||||
d.@"align" = 0;
|
||||
}
|
||||
@ -6528,12 +6556,12 @@ pub const Type = extern union {
|
||||
if (!d.mutable and d.pointee_type.eql(Type.u8, mod)) {
|
||||
switch (d.size) {
|
||||
.Slice => {
|
||||
if (sent.compareAllWithZero(.eq)) {
|
||||
if (sent.compareAllWithZero(.eq, mod)) {
|
||||
return Type.initTag(.const_slice_u8_sentinel_0);
|
||||
}
|
||||
},
|
||||
.Many => {
|
||||
if (sent.compareAllWithZero(.eq)) {
|
||||
if (sent.compareAllWithZero(.eq, mod)) {
|
||||
return Type.initTag(.manyptr_const_u8_sentinel_0);
|
||||
}
|
||||
},
|
||||
|
||||
@ -2076,13 +2076,22 @@ pub const Value = extern union {
|
||||
/// For vectors, returns true if comparison is true for ALL elements.
|
||||
///
|
||||
/// Note that `!compareAllWithZero(.eq, ...) != compareAllWithZero(.neq, ...)`
|
||||
pub fn compareAllWithZero(lhs: Value, op: std.math.CompareOperator) bool {
|
||||
return compareAllWithZeroAdvanced(lhs, op, null) catch unreachable;
|
||||
pub fn compareAllWithZero(lhs: Value, op: std.math.CompareOperator, mod: *Module) bool {
|
||||
return compareAllWithZeroAdvancedExtra(lhs, op, mod, null) catch unreachable;
|
||||
}
|
||||
|
||||
pub fn compareAllWithZeroAdvanced(
|
||||
lhs: Value,
|
||||
op: std.math.CompareOperator,
|
||||
sema: *Sema,
|
||||
) Module.CompileError!bool {
|
||||
return compareAllWithZeroAdvancedExtra(lhs, op, sema.mod, sema);
|
||||
}
|
||||
|
||||
pub fn compareAllWithZeroAdvancedExtra(
|
||||
lhs: Value,
|
||||
op: std.math.CompareOperator,
|
||||
mod: *Module,
|
||||
opt_sema: ?*Sema,
|
||||
) Module.CompileError!bool {
|
||||
if (lhs.isInf()) {
|
||||
@ -2095,10 +2104,25 @@ pub const Value = extern union {
|
||||
}
|
||||
|
||||
switch (lhs.tag()) {
|
||||
.repeated => return lhs.castTag(.repeated).?.data.compareAllWithZeroAdvanced(op, opt_sema),
|
||||
.repeated => return lhs.castTag(.repeated).?.data.compareAllWithZeroAdvancedExtra(op, mod, opt_sema),
|
||||
.aggregate => {
|
||||
for (lhs.castTag(.aggregate).?.data) |elem_val| {
|
||||
if (!(try elem_val.compareAllWithZeroAdvanced(op, opt_sema))) return false;
|
||||
if (!(try elem_val.compareAllWithZeroAdvancedExtra(op, mod, opt_sema))) return false;
|
||||
}
|
||||
return true;
|
||||
},
|
||||
.str_lit => {
|
||||
const str_lit = lhs.castTag(.str_lit).?.data;
|
||||
const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len];
|
||||
for (bytes) |byte| {
|
||||
if (!std.math.compare(byte, op, 0)) return false;
|
||||
}
|
||||
return true;
|
||||
},
|
||||
.bytes => {
|
||||
const bytes = lhs.castTag(.bytes).?.data;
|
||||
for (bytes) |byte| {
|
||||
if (!std.math.compare(byte, op, 0)) return false;
|
||||
}
|
||||
return true;
|
||||
},
|
||||
@ -3103,7 +3127,7 @@ pub const Value = extern union {
|
||||
.int_i64,
|
||||
.int_big_positive,
|
||||
.int_big_negative,
|
||||
=> compareAllWithZero(self, .eq),
|
||||
=> self.orderAgainstZero().compare(.eq),
|
||||
|
||||
.undef => unreachable,
|
||||
.unreachable_value => unreachable,
|
||||
|
||||
@ -106,7 +106,6 @@ test {
|
||||
_ = @import("behavior/bugs/12430.zig");
|
||||
_ = @import("behavior/bugs/12450.zig");
|
||||
_ = @import("behavior/bugs/12486.zig");
|
||||
_ = @import("behavior/bugs/12488.zig");
|
||||
_ = @import("behavior/bugs/12498.zig");
|
||||
_ = @import("behavior/bugs/12551.zig");
|
||||
_ = @import("behavior/bugs/12571.zig");
|
||||
|
||||
@ -1,13 +0,0 @@
|
||||
const expect = @import("std").testing.expect;
|
||||
|
||||
const A = struct {
|
||||
a: u32,
|
||||
};
|
||||
|
||||
fn foo(comptime a: anytype) !void {
|
||||
try expect(a[0][0] == @sizeOf(A));
|
||||
}
|
||||
|
||||
test {
|
||||
try foo(.{[_]usize{@sizeOf(A)}});
|
||||
}
|
||||
@ -517,3 +517,26 @@ test "peer type resolution of inferred error set with non-void payload" {
|
||||
};
|
||||
try expect(try S.openDataFile(.read) == 1);
|
||||
}
|
||||
|
||||
test "lazy values passed to anytype parameter" {
|
||||
const A = struct {
|
||||
a: u32,
|
||||
fn foo(comptime a: anytype) !void {
|
||||
try expect(a[0][0] == @sizeOf(@This()));
|
||||
}
|
||||
};
|
||||
try A.foo(.{[_]usize{@sizeOf(A)}});
|
||||
|
||||
const B = struct {
|
||||
fn foo(comptime a: anytype) !void {
|
||||
try expect(a.x == 0);
|
||||
}
|
||||
};
|
||||
try B.foo(.{ .x = @sizeOf(B) });
|
||||
|
||||
const C = struct {};
|
||||
try expect(@truncate(u32, @sizeOf(C)) == 0);
|
||||
|
||||
const D = struct {};
|
||||
try expect(@sizeOf(D) << 1 == 0);
|
||||
}
|
||||
|
||||
@ -509,6 +509,7 @@ test "ptrCast comptime known slice to C pointer" {
|
||||
test "ptrToInt on a generic function" {
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
|
||||
@ -532,3 +533,18 @@ test "pointer alignment and element type include call expression" {
|
||||
};
|
||||
try expect(@alignOf(S.P) > 0);
|
||||
}
|
||||
|
||||
test "pointer to array has explicit alignment" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
|
||||
const S = struct {
|
||||
const Base = extern struct { a: u8 };
|
||||
const Base2 = extern struct { a: u8 };
|
||||
fn func(ptr: *[4]Base) *align(1) [4]Base2 {
|
||||
return @alignCast(1, @ptrCast(*[4]Base2, ptr));
|
||||
}
|
||||
};
|
||||
var bases = [_]S.Base{.{ .a = 2 }} ** 4;
|
||||
const casted = S.func(&bases);
|
||||
try expect(casted[0].a == 2);
|
||||
}
|
||||
|
||||
@ -1573,3 +1573,8 @@ test "struct fields get automatically reordered" {
|
||||
};
|
||||
try expect(@sizeOf(S1) == @sizeOf(S2));
|
||||
}
|
||||
|
||||
test "directly initiating tuple like struct" {
|
||||
const a = struct { u8 }{8};
|
||||
try expect(a[0] == 8);
|
||||
}
|
||||
|
||||
@ -1286,3 +1286,14 @@ test "store to vector in slice" {
|
||||
s[i] = s[0];
|
||||
try expectEqual(v[1], v[0]);
|
||||
}
|
||||
|
||||
test "addition of vectors represented as strings" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
|
||||
const V = @Vector(3, u8);
|
||||
const foo: V = "foo".*;
|
||||
const bar: V = @typeName(u32).*;
|
||||
try expectEqual(V{ 219, 162, 161 }, foo + bar);
|
||||
}
|
||||
|
||||
@ -1015,3 +1015,15 @@ void __attribute__((stdcall)) stdcall_big_union(union BigUnion x) {
|
||||
assert_or_panic(x.a.c == 3);
|
||||
assert_or_panic(x.a.d == 4);
|
||||
}
|
||||
|
||||
#ifdef __x86_64__
|
||||
struct ByRef __attribute__((ms_abi)) c_explict_win64(struct ByRef in) {
|
||||
in.val = 42;
|
||||
return in;
|
||||
}
|
||||
|
||||
struct ByRef __attribute__((sysv_abi)) c_explict_sys_v(struct ByRef in) {
|
||||
in.val = 42;
|
||||
return in;
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -1190,3 +1190,19 @@ test "Stdcall ABI big union" {
|
||||
};
|
||||
stdcall_big_union(x);
|
||||
}
|
||||
|
||||
extern fn c_explict_win64(ByRef) callconv(.Win64) ByRef;
|
||||
test "explicit SysV calling convention" {
|
||||
if (builtin.cpu.arch != .x86_64) return error.SkipZigTest;
|
||||
|
||||
const res = c_explict_win64(.{ .val = 1, .arr = undefined });
|
||||
try expect(res.val == 42);
|
||||
}
|
||||
|
||||
extern fn c_explict_sys_v(ByRef) callconv(.SysV) ByRef;
|
||||
test "explicit Win64 calling convention" {
|
||||
if (builtin.cpu.arch != .x86_64) return error.SkipZigTest;
|
||||
|
||||
const res = c_explict_sys_v(.{ .val = 1, .arr = undefined });
|
||||
try expect(res.val == 42);
|
||||
}
|
||||
|
||||
9
test/cases/compile_errors/bad_member_access_on_tuple.zig
Normal file
9
test/cases/compile_errors/bad_member_access_on_tuple.zig
Normal file
@ -0,0 +1,9 @@
|
||||
comptime {
|
||||
_ = @TypeOf(.{}).is_optional;
|
||||
}
|
||||
|
||||
// error
|
||||
// backend=stage2
|
||||
// target=native
|
||||
//
|
||||
// :2:21: error: struct '@TypeOf(.{})' has no member named 'is_optional'
|
||||
@ -190,6 +190,11 @@ fn addMachOCases(cases: *tests.StandaloneContext) void {
|
||||
.requires_symlinks = true,
|
||||
});
|
||||
|
||||
cases.addBuildFile("test/link/macho/unwind_info/build.zig", .{
|
||||
.build_modes = true,
|
||||
.requires_symlinks = true,
|
||||
});
|
||||
|
||||
cases.addBuildFile("test/link/macho/uuid/build.zig", .{
|
||||
.build_modes = false,
|
||||
.requires_symlinks = true,
|
||||
|
||||
41
test/link/macho/unwind_info/all.h
Normal file
41
test/link/macho/unwind_info/all.h
Normal file
@ -0,0 +1,41 @@
|
||||
#ifndef ALL
|
||||
#define ALL
|
||||
|
||||
#include <cstddef>
|
||||
#include <string>
|
||||
#include <stdexcept>
|
||||
|
||||
struct SimpleString {
|
||||
SimpleString(size_t max_size);
|
||||
~SimpleString();
|
||||
|
||||
void print(const char* tag) const;
|
||||
bool append_line(const char* x);
|
||||
|
||||
private:
|
||||
size_t max_size;
|
||||
char* buffer;
|
||||
size_t length;
|
||||
};
|
||||
|
||||
struct SimpleStringOwner {
|
||||
SimpleStringOwner(const char* x);
|
||||
~SimpleStringOwner();
|
||||
|
||||
private:
|
||||
SimpleString string;
|
||||
};
|
||||
|
||||
class Error: public std::exception {
|
||||
public:
|
||||
explicit Error(const char* msg) : msg{ msg } {}
|
||||
virtual ~Error() noexcept {}
|
||||
virtual const char* what() const noexcept {
|
||||
return msg.c_str();
|
||||
}
|
||||
|
||||
protected:
|
||||
std::string msg;
|
||||
};
|
||||
|
||||
#endif
|
||||
68
test/link/macho/unwind_info/build.zig
Normal file
68
test/link/macho/unwind_info/build.zig
Normal file
@ -0,0 +1,68 @@
|
||||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
const Builder = std.build.Builder;
|
||||
const LibExeObjectStep = std.build.LibExeObjStep;
|
||||
|
||||
pub fn build(b: *Builder) void {
|
||||
const mode = b.standardReleaseOptions();
|
||||
const target: std.zig.CrossTarget = .{ .os_tag = .macos };
|
||||
|
||||
const test_step = b.step("test", "Test the program");
|
||||
|
||||
testUnwindInfo(b, test_step, mode, target, false);
|
||||
testUnwindInfo(b, test_step, mode, target, true);
|
||||
}
|
||||
|
||||
fn testUnwindInfo(
|
||||
b: *Builder,
|
||||
test_step: *std.build.Step,
|
||||
mode: std.builtin.Mode,
|
||||
target: std.zig.CrossTarget,
|
||||
dead_strip: bool,
|
||||
) void {
|
||||
const exe = createScenario(b, mode, target);
|
||||
exe.link_gc_sections = dead_strip;
|
||||
|
||||
const check = exe.checkObject(.macho);
|
||||
check.checkStart("segname __TEXT");
|
||||
check.checkNext("sectname __gcc_except_tab");
|
||||
check.checkNext("sectname __unwind_info");
|
||||
|
||||
switch (builtin.cpu.arch) {
|
||||
.aarch64 => {
|
||||
check.checkNext("sectname __eh_frame");
|
||||
},
|
||||
.x86_64 => {}, // We do not expect `__eh_frame` section on x86_64 in this case
|
||||
else => unreachable,
|
||||
}
|
||||
|
||||
check.checkInSymtab();
|
||||
check.checkNext("{*} (__TEXT,__text) external ___gxx_personality_v0");
|
||||
|
||||
const run_cmd = check.runAndCompare();
|
||||
run_cmd.expectStdOutEqual(
|
||||
\\Constructed: a
|
||||
\\Constructed: b
|
||||
\\About to destroy: b
|
||||
\\About to destroy: a
|
||||
\\Error: Not enough memory!
|
||||
\\
|
||||
);
|
||||
|
||||
test_step.dependOn(&run_cmd.step);
|
||||
}
|
||||
|
||||
fn createScenario(b: *Builder, mode: std.builtin.Mode, target: std.zig.CrossTarget) *LibExeObjectStep {
|
||||
const exe = b.addExecutable("test", null);
|
||||
b.default_step.dependOn(&exe.step);
|
||||
exe.addIncludePath(".");
|
||||
exe.addCSourceFiles(&[_][]const u8{
|
||||
"main.cpp",
|
||||
"simple_string.cpp",
|
||||
"simple_string_owner.cpp",
|
||||
}, &[0][]const u8{});
|
||||
exe.setBuildMode(mode);
|
||||
exe.setTarget(target);
|
||||
exe.linkLibCpp();
|
||||
return exe;
|
||||
}
|
||||
24
test/link/macho/unwind_info/main.cpp
Normal file
24
test/link/macho/unwind_info/main.cpp
Normal file
@ -0,0 +1,24 @@
|
||||
#include "all.h"
|
||||
#include <cstdio>
|
||||
|
||||
void fn_c() {
|
||||
SimpleStringOwner c{ "cccccccccc" };
|
||||
}
|
||||
|
||||
void fn_b() {
|
||||
SimpleStringOwner b{ "b" };
|
||||
fn_c();
|
||||
}
|
||||
|
||||
int main() {
|
||||
try {
|
||||
SimpleStringOwner a{ "a" };
|
||||
fn_b();
|
||||
SimpleStringOwner d{ "d" };
|
||||
} catch (const Error& e) {
|
||||
printf("Error: %s\n", e.what());
|
||||
} catch(const std::exception& e) {
|
||||
printf("Exception: %s\n", e.what());
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
30
test/link/macho/unwind_info/simple_string.cpp
Normal file
30
test/link/macho/unwind_info/simple_string.cpp
Normal file
@ -0,0 +1,30 @@
|
||||
#include "all.h"
|
||||
#include <cstdio>
|
||||
#include <cstring>
|
||||
|
||||
SimpleString::SimpleString(size_t max_size)
|
||||
: max_size{ max_size }, length{} {
|
||||
if (max_size == 0) {
|
||||
throw Error{ "Max size must be at least 1." };
|
||||
}
|
||||
buffer = new char[max_size];
|
||||
buffer[0] = 0;
|
||||
}
|
||||
|
||||
SimpleString::~SimpleString() {
|
||||
delete[] buffer;
|
||||
}
|
||||
|
||||
void SimpleString::print(const char* tag) const {
|
||||
printf("%s: %s", tag, buffer);
|
||||
}
|
||||
|
||||
bool SimpleString::append_line(const char* x) {
|
||||
const auto x_len = strlen(x);
|
||||
if (x_len + length + 2 > max_size) return false;
|
||||
std::strncpy(buffer + length, x, max_size - length);
|
||||
length += x_len;
|
||||
buffer[length++] = '\n';
|
||||
buffer[length] = 0;
|
||||
return true;
|
||||
}
|
||||
12
test/link/macho/unwind_info/simple_string_owner.cpp
Normal file
12
test/link/macho/unwind_info/simple_string_owner.cpp
Normal file
@ -0,0 +1,12 @@
|
||||
#include "all.h"
|
||||
|
||||
SimpleStringOwner::SimpleStringOwner(const char* x) : string{ 10 } {
|
||||
if (!string.append_line(x)) {
|
||||
throw Error{ "Not enough memory!" };
|
||||
}
|
||||
string.print("Constructed");
|
||||
}
|
||||
|
||||
SimpleStringOwner::~SimpleStringOwner() {
|
||||
string.print("About to destroy");
|
||||
}
|
||||
@ -12,18 +12,18 @@ pub fn build(b: *Builder) void {
|
||||
.os_tag = .macos,
|
||||
};
|
||||
|
||||
testUuid(b, test_step, .ReleaseSafe, aarch64_macos, "af0f4c21a07c30daba59213d80262e45");
|
||||
testUuid(b, test_step, .ReleaseFast, aarch64_macos, "af0f4c21a07c30daba59213d80262e45");
|
||||
testUuid(b, test_step, .ReleaseSmall, aarch64_macos, "af0f4c21a07c30daba59213d80262e45");
|
||||
testUuid(b, test_step, .ReleaseSafe, aarch64_macos, "675bb6ba8e5d3d3191f7936d7168f0e9");
|
||||
testUuid(b, test_step, .ReleaseFast, aarch64_macos, "675bb6ba8e5d3d3191f7936d7168f0e9");
|
||||
testUuid(b, test_step, .ReleaseSmall, aarch64_macos, "675bb6ba8e5d3d3191f7936d7168f0e9");
|
||||
|
||||
const x86_64_macos = std.zig.CrossTarget{
|
||||
.cpu_arch = .x86_64,
|
||||
.os_tag = .macos,
|
||||
};
|
||||
|
||||
testUuid(b, test_step, .ReleaseSafe, x86_64_macos, "63f47191c7153f5fba48bd63cb2f5f57");
|
||||
testUuid(b, test_step, .ReleaseFast, x86_64_macos, "63f47191c7153f5fba48bd63cb2f5f57");
|
||||
testUuid(b, test_step, .ReleaseSmall, x86_64_macos, "e7bba66220e33eda9e73ab293ccf93d2");
|
||||
testUuid(b, test_step, .ReleaseSafe, x86_64_macos, "5b7071b4587c3071b0d2352fadce0e48");
|
||||
testUuid(b, test_step, .ReleaseFast, x86_64_macos, "5b7071b4587c3071b0d2352fadce0e48");
|
||||
testUuid(b, test_step, .ReleaseSmall, x86_64_macos, "4b58f2583c383169bbe3a716bd240048");
|
||||
}
|
||||
|
||||
fn testUuid(
|
||||
|
||||
@ -31,6 +31,8 @@ pub fn build(b: *Builder) void {
|
||||
|
||||
check.checkInSymtab();
|
||||
check.checkNext("(undefined) weak external _a (from liba)");
|
||||
|
||||
check.checkInSymtab();
|
||||
check.checkNext("(undefined) weak external _asStr (from liba)");
|
||||
|
||||
const run_cmd = check.runAndCompare();
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user