Merge pull request #17978 from ziglang/elf-x86-tls

x86_64+elf: TLS support
This commit is contained in:
Jakub Konka 2023-11-14 22:09:15 +01:00 committed by GitHub
commit 6fd1c64f23
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 349 additions and 132 deletions

View File

@ -1443,6 +1443,9 @@ test "big.int divFloor #11166" {
}
test "big.int gcd #10932" {
// TODO https://github.com/ziglang/zig/issues/17998
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
var a = try Managed.init(testing.allocator);
defer a.deinit();

View File

@ -1107,6 +1107,7 @@ fn formatWipMir(
.cc = .Unspecified,
.src_loc = data.self.src_loc,
};
var first = true;
for ((lower.lowerMir(data.inst) catch |err| switch (err) {
error.LowerFail => {
defer {
@ -1125,7 +1126,11 @@ fn formatWipMir(
return;
},
else => |e| return e,
}).insts) |lowered_inst| try writer.print(" | {}", .{lowered_inst});
}).insts) |lowered_inst| {
if (!first) try writer.writeAll("\ndebug(wip_mir): ");
try writer.print(" | {}", .{lowered_inst});
first = false;
}
}
fn fmtWipMir(self: *Self, inst: Mir.Inst.Index) std.fmt.Formatter(formatWipMir) {
return .{ .data = .{ .self = self, .inst = inst } };
@ -10802,7 +10807,6 @@ fn genCall(self: *Self, info: union(enum) {
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const sym_index = try elf_file.zigObjectPtr().?.getOrCreateMetadataForDecl(elf_file, func.owner_decl);
const sym = elf_file.symbol(sym_index);
sym.flags.needs_zig_got = true;
if (self.bin_file.options.pic) {
const callee_reg: Register = switch (resolved_cc) {
.SysV => callee: {
@ -13382,35 +13386,25 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr
},
.lea_direct, .lea_got => |sym_index| {
const atom_index = try self.owner.getSymbolIndex(self);
if (self.bin_file.cast(link.File.Elf)) |_| {
try self.asmRegisterMemory(.{ ._, .lea }, dst_reg.to64(), .{
.base = .{ .reloc = .{
_ = try self.addInst(.{
.tag = switch (src_mcv) {
.lea_direct => .lea,
.lea_got => .mov,
else => unreachable,
},
.ops = switch (src_mcv) {
.lea_direct => .direct_reloc,
.lea_got => .got_reloc,
else => unreachable,
},
.data = .{ .rx = .{
.r1 = dst_reg.to64(),
.payload = try self.addExtra(bits.Symbol{
.atom_index = atom_index,
.sym_index = sym_index,
} },
.mod = .{ .rm = .{ .size = .qword } },
});
} else {
_ = try self.addInst(.{
.tag = switch (src_mcv) {
.lea_direct => .lea,
.lea_got => .mov,
else => unreachable,
},
.ops = switch (src_mcv) {
.lea_direct => .direct_reloc,
.lea_got => .got_reloc,
else => unreachable,
},
.data = .{ .rx = .{
.r1 = dst_reg.to64(),
.payload = try self.addExtra(bits.Symbol{
.atom_index = atom_index,
.sym_index = sym_index,
}),
} },
});
}
}),
} },
});
},
.lea_tlv => |sym_index| {
const atom_index = try self.owner.getSymbolIndex(self);
@ -13690,7 +13684,6 @@ fn genLazySymbolRef(
const sym_index = elf_file.zigObjectPtr().?.getOrCreateMetadataForLazySymbol(elf_file, lazy_sym) catch |err|
return self.fail("{s} creating lazy symbol", .{@errorName(err)});
const sym = elf_file.symbol(sym_index);
sym.flags.needs_zig_got = true;
if (self.bin_file.options.pic) {
switch (tag) {
.lea, .call => try self.genSetReg(reg, Type.usize, .{
@ -15810,11 +15803,30 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!MCValue {
} else mcv: {
const ip_index = Air.refToInterned(ref).?;
const gop = try self.const_tracking.getOrPut(self.gpa, ip_index);
const mcv = try self.genTypedValue(.{
.ty = ty,
.val = ip_index.toValue(),
if (!gop.found_existing) gop.value_ptr.* = InstTracking.init(init: {
const const_mcv = try self.genTypedValue(.{ .ty = ty, .val = ip_index.toValue() });
switch (const_mcv) {
.lea_tlv => |tlv_sym| if (self.bin_file.cast(link.File.Elf)) |_| {
if (self.bin_file.options.pic) {
try self.spillRegisters(&.{ .rdi, .rax });
} else {
try self.spillRegisters(&.{.rax});
}
const frame_index = try self.allocFrameIndex(FrameAlloc.init(.{
.size = 8,
.alignment = .@"8",
}));
try self.genSetMem(
.{ .frame = frame_index },
0,
Type.usize,
.{ .lea_symbol = .{ .sym = tlv_sym } },
);
break :init .{ .load_frame = .{ .index = frame_index } };
} else break :init const_mcv,
else => break :init const_mcv,
}
});
if (!gop.found_existing) gop.value_ptr.* = InstTracking.init(mcv);
break :mcv gop.value_ptr.short;
};

View File

@ -84,6 +84,24 @@ pub fn emitMir(emit: *Emit) Error!void {
} else return emit.fail("TODO implement extern reloc for {s}", .{
@tagName(emit.lower.bin_file.tag),
}),
.linker_tlsld => |data| {
const elf_file = emit.lower.bin_file.cast(link.File.Elf).?;
const atom = elf_file.symbol(data.atom_index).atom(elf_file).?;
try atom.addReloc(elf_file, .{
.r_offset = end_offset - 4,
.r_info = (@as(u64, @intCast(data.sym_index)) << 32) | std.elf.R_X86_64_TLSLD,
.r_addend = -4,
});
},
.linker_dtpoff => |data| {
const elf_file = emit.lower.bin_file.cast(link.File.Elf).?;
const atom = elf_file.symbol(data.atom_index).atom(elf_file).?;
try atom.addReloc(elf_file, .{
.r_offset = end_offset - 4,
.r_info = (@as(u64, @intCast(data.sym_index)) << 32) | std.elf.R_X86_64_DTPOFF32,
.r_addend = 0,
});
},
.linker_reloc => |data| if (emit.lower.bin_file.cast(link.File.Elf)) |elf_file| {
const is_obj_or_static_lib = switch (emit.lower.bin_file.options.output_mode) {
.Exe => false,
@ -120,6 +138,8 @@ pub fn emitMir(emit: *Emit) Error!void {
link.File.Elf.R_X86_64_ZIG_GOT32
else if (sym.flags.needs_got)
std.elf.R_X86_64_GOT32
else if (sym.flags.is_tls)
std.elf.R_X86_64_TPOFF32
else
std.elf.R_X86_64_32;
try atom.addReloc(elf_file, .{

View File

@ -11,6 +11,7 @@ result_relocs_len: u8 = undefined,
result_insts: [
std.mem.max(usize, &.{
1, // non-pseudo instructions
3, // TLS local dynamic (LD) sequence in PIC mode
2, // cmovcc: cmovcc \ cmovcc
3, // setcc: setcc \ setcc \ logicop
2, // jcc: jcc \ jcc
@ -28,6 +29,7 @@ result_relocs: [
2, // jcc: jcc \ jcc
2, // test \ jcc \ probe \ sub \ jmp
1, // probe \ sub \ jcc
3, // TLS local dynamic (LD) sequence in PIC mode
})
]Reloc = undefined,
@ -51,6 +53,8 @@ pub const Reloc = struct {
const Target = union(enum) {
inst: Mir.Inst.Index,
linker_reloc: bits.Symbol,
linker_tlsld: bits.Symbol,
linker_dtpoff: bits.Symbol,
linker_extern_fn: bits.Symbol,
linker_got: bits.Symbol,
linker_direct: bits.Symbol,
@ -319,20 +323,25 @@ fn reloc(lower: *Lower, target: Reloc.Target) Immediate {
return Immediate.s(0);
}
fn emit(lower: *Lower, prefix: Prefix, mnemonic: Mnemonic, ops: []const Operand) Error!void {
const needsZigGot = struct {
fn needsZigGot(sym: bits.Symbol, ctx: *link.File) bool {
const elf_file = ctx.cast(link.File.Elf).?;
const sym_index = elf_file.zigObjectPtr().?.symbol(sym.sym_index);
return elf_file.symbol(sym_index).flags.needs_zig_got;
}
}.needsZigGot;
fn needsZigGot(sym: bits.Symbol, ctx: *link.File) bool {
const elf_file = ctx.cast(link.File.Elf).?;
const sym_index = elf_file.zigObjectPtr().?.symbol(sym.sym_index);
return elf_file.symbol(sym_index).flags.needs_zig_got;
}
fn isTls(sym: bits.Symbol, ctx: *link.File) bool {
const elf_file = ctx.cast(link.File.Elf).?;
const sym_index = elf_file.zigObjectPtr().?.symbol(sym.sym_index);
return elf_file.symbol(sym_index).flags.is_tls;
}
fn emit(lower: *Lower, prefix: Prefix, mnemonic: Mnemonic, ops: []const Operand) Error!void {
const is_obj_or_static_lib = switch (lower.bin_file.options.output_mode) {
.Exe => false,
.Obj => true,
.Lib => lower.bin_file.options.link_mode == .Static,
};
var emit_prefix = prefix;
var emit_mnemonic = mnemonic;
var emit_ops_storage: [4]Operand = undefined;
@ -346,6 +355,53 @@ fn emit(lower: *Lower, prefix: Prefix, mnemonic: Mnemonic, ops: []const Operand)
assert(prefix == .none);
assert(mem_op.sib.disp == 0);
assert(mem_op.sib.scale_index.scale == 0);
if (isTls(sym, lower.bin_file)) {
// TODO handle extern TLS vars, i.e., emit GD model
if (lower.bin_file.options.pic) {
// Here, we currently assume local dynamic TLS vars, and so
// we emit LD model.
_ = lower.reloc(.{ .linker_tlsld = sym });
lower.result_insts[lower.result_insts_len] =
try Instruction.new(.none, .lea, &[_]Operand{
.{ .reg = .rdi },
.{ .mem = Memory.rip(mem_op.sib.ptr_size, 0) },
});
lower.result_insts_len += 1;
if (lower.bin_file.cast(link.File.Elf)) |elf_file| {
_ = lower.reloc(.{ .linker_extern_fn = .{
.atom_index = sym.atom_index,
.sym_index = try elf_file.getGlobalSymbol("__tls_get_addr", null),
} });
}
lower.result_insts[lower.result_insts_len] =
try Instruction.new(.none, .call, &[_]Operand{
.{ .imm = Immediate.s(0) },
});
lower.result_insts_len += 1;
_ = lower.reloc(.{ .linker_dtpoff = sym });
emit_mnemonic = .lea;
break :op .{ .mem = Memory.sib(mem_op.sib.ptr_size, .{
.base = .{ .reg = .rax },
.disp = std.math.minInt(i32),
}) };
} else {
// Since we are linking statically, we emit LE model directly.
lower.result_insts[lower.result_insts_len] =
try Instruction.new(.none, .mov, &[_]Operand{
.{ .reg = .rax },
.{ .mem = Memory.sib(.qword, .{ .base = .{ .reg = .fs } }) },
});
lower.result_insts_len += 1;
_ = lower.reloc(.{ .linker_reloc = sym });
emit_mnemonic = .lea;
break :op .{ .mem = Memory.sib(mem_op.sib.ptr_size, .{
.base = .{ .reg = .rax },
.disp = std.math.minInt(i32),
}) };
}
}
_ = lower.reloc(.{ .linker_reloc = sym });
break :op if (lower.bin_file.options.pic) switch (mnemonic) {
.lea => {

View File

@ -912,7 +912,9 @@ fn genDeclRef(
}
const sym_index = try elf_file.zigObjectPtr().?.getOrCreateMetadataForDecl(elf_file, decl_index);
const sym = elf_file.symbol(sym_index);
sym.flags.needs_zig_got = true;
if (is_threadlocal) {
return GenResult.mcv(.{ .load_tlv = sym.esym_index });
}
return GenResult.mcv(.{ .load_symbol = sym.esym_index });
} else if (bin_file.cast(link.File.MachO)) |macho_file| {
if (is_extern) {

View File

@ -388,7 +388,6 @@ fn populateMissingMetadata(self: *Coff) !void {
self.rdata_section_index = try self.allocateSection(".rdata", file_size, .{
.CNT_INITIALIZED_DATA = 1,
.MEM_READ = 1,
.MEM_WRITE = 1,
});
}
@ -920,7 +919,7 @@ fn markRelocsDirtyByTarget(self: *Coff, target: SymbolWithLoc) void {
fn markRelocsDirtyByAddress(self: *Coff, addr: u32) void {
const got_moved = blk: {
const sect_id = self.got_section_index orelse break :blk false;
break :blk self.sections.items(.header)[sect_id].virtual_address > addr;
break :blk self.sections.items(.header)[sect_id].virtual_address >= addr;
};
// TODO: dirty relocations targeting import table if that got moved in memory
@ -931,7 +930,7 @@ fn markRelocsDirtyByAddress(self: *Coff, addr: u32) void {
reloc.dirty = reloc.dirty or got_moved;
} else {
const target_vaddr = reloc.getTargetAddress(self) orelse continue;
if (target_vaddr > addr) reloc.dirty = true;
if (target_vaddr >= addr) reloc.dirty = true;
}
}
}
@ -939,7 +938,7 @@ fn markRelocsDirtyByAddress(self: *Coff, addr: u32) void {
// TODO: dirty only really affected GOT cells
for (self.got_table.entries.items) |entry| {
const target_addr = self.getSymbol(entry).value;
if (target_addr > addr) {
if (target_addr >= addr) {
self.got_table_contents_dirty = true;
break;
}
@ -1722,6 +1721,7 @@ pub fn flushModule(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Nod
var code = std.ArrayList(u8).init(gpa);
defer code.deinit();
try code.resize(math.cast(usize, atom.size) orelse return error.Overflow);
assert(atom.size > 0);
const amt = try self.base.file.?.preadAll(code.items, file_offset);
if (amt != code.items.len) return error.InputOutput;

View File

@ -4512,7 +4512,10 @@ fn allocateAllocSections(self: *Elf) error{OutOfMemory}!void {
for (cover.items) |shndx| {
const shdr = &self.shdrs.items[shndx];
if (shdr.sh_type == elf.SHT_NOBITS) continue;
if (shdr.sh_type == elf.SHT_NOBITS) {
shdr.sh_offset = 0;
continue;
}
off = alignment.@"align"(shndx, shdr.sh_addralign, off);
shdr.sh_offset = off;
off += shdr.sh_size;
@ -4528,7 +4531,10 @@ fn allocateAllocSectionsObject(self: *Elf) !void {
for (self.shdrs.items) |*shdr| {
if (shdr.sh_type == elf.SHT_NULL) continue;
if (shdr.sh_flags & elf.SHF_ALLOC == 0) continue;
if (shdr.sh_type == elf.SHT_NOBITS) continue;
if (shdr.sh_type == elf.SHT_NOBITS) {
shdr.sh_offset = 0;
continue;
}
const needed_size = shdr.sh_size;
if (needed_size > self.allocatedSize(shdr.sh_offset)) {
shdr.sh_size = 0;
@ -4640,6 +4646,9 @@ fn allocateSpecialPhdrs(self: *Elf) void {
}
fn allocateAtoms(self: *Elf) void {
if (self.zigObjectPtr()) |zig_object| {
zig_object.allocateTlvAtoms(self);
}
for (self.objects.items) |index| {
self.file(index).?.object.allocateAtoms(self);
}
@ -4698,7 +4707,6 @@ fn writeAtoms(self: *Elf) !void {
const atom_ptr = self.atom(atom_index).?;
assert(atom_ptr.flags.alive);
const object = atom_ptr.file(self).?.object;
const offset = math.cast(usize, atom_ptr.value - shdr.sh_addr - base_offset) orelse
return error.Overflow;
const size = math.cast(usize, atom_ptr.size) orelse return error.Overflow;
@ -4707,7 +4715,11 @@ fn writeAtoms(self: *Elf) !void {
// TODO decompress directly into provided buffer
const out_code = buffer[offset..][0..size];
const in_code = try object.codeDecompressAlloc(self, atom_index);
const in_code = switch (atom_ptr.file(self).?) {
.object => |x| try x.codeDecompressAlloc(self, atom_index),
.zig_object => |x| try x.codeAlloc(self, atom_index),
else => unreachable,
};
defer gpa.free(in_code);
@memcpy(out_code, in_code);
@ -4774,7 +4786,6 @@ fn writeAtomsObject(self: *Elf) !void {
const atom_ptr = self.atom(atom_index).?;
assert(atom_ptr.flags.alive);
const object = atom_ptr.file(self).?.object;
const offset = math.cast(usize, atom_ptr.value - shdr.sh_addr - base_offset) orelse
return error.Overflow;
const size = math.cast(usize, atom_ptr.size) orelse return error.Overflow;
@ -4787,7 +4798,11 @@ fn writeAtomsObject(self: *Elf) !void {
// TODO decompress directly into provided buffer
const out_code = buffer[offset..][0..size];
const in_code = try object.codeDecompressAlloc(self, atom_index);
const in_code = switch (atom_ptr.file(self).?) {
.object => |x| try x.codeDecompressAlloc(self, atom_index),
.zig_object => |x| try x.codeAlloc(self, atom_index),
else => unreachable,
};
defer gpa.free(in_code);
@memcpy(out_code, in_code);
}
@ -6250,8 +6265,9 @@ fn fmtDumpState(
try writer.print("linker_defined({d}) : (linker defined)\n", .{index});
try writer.print("{}\n", .{linker_defined.fmtSymtab(self)});
}
try writer.print("{}\n", .{self.got.fmt(self)});
try writer.print("{}\n", .{self.zig_got.fmt(self)});
try writer.print("{}\n", .{self.got.fmt(self)});
try writer.print("{}\n", .{self.plt.fmt(self)});
try writer.writeAll("Output COMDAT groups\n");
for (self.comdat_group_sections.items) |cg| {

View File

@ -399,6 +399,11 @@ pub const Flags = packed struct {
/// Whether the symbol contains .zig.got indirection.
needs_zig_got: bool = false,
has_zig_got: bool = false,
/// Whether the symbol is a TLS variable.
/// TODO this is really not needed if only we operated on esyms between
/// codegen and ZigObject.
is_tls: bool = false,
};
pub const Extra = struct {

View File

@ -29,6 +29,9 @@ lazy_syms: LazySymbolTable = .{},
/// Table of tracked Decls.
decls: DeclTable = .{},
/// TLS variables indexed by Atom.Index.
tls_variables: TlsTable = .{},
/// Table of unnamed constants associated with a parent `Decl`.
/// We store them here so that we can free the constants whenever the `Decl`
/// needs updating or is freed.
@ -137,6 +140,11 @@ pub fn deinit(self: *ZigObject, allocator: Allocator) void {
self.anon_decls.deinit(allocator);
}
for (self.tls_variables.values()) |*tlv| {
tlv.deinit(allocator);
}
self.tls_variables.deinit(allocator);
if (self.dwarf) |*dw| {
dw.deinit();
}
@ -212,8 +220,6 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf) !void {
self.saveDebugSectionsSizes(elf_file);
}
try self.sortSymbols(elf_file);
// The point of flushModule() is to commit changes, so in theory, nothing should
// be dirty after this. However, it is possible for some things to remain
// dirty because they fail to be written in the event of compile errors,
@ -280,6 +286,7 @@ pub fn addAtom(self: *ZigObject, elf_file: *Elf) !Symbol.Index {
self.local_esyms.items(.elf_sym)[esym_index].st_shndx = SHN_ATOM;
symbol_ptr.esym_index = esym_index;
// TODO I'm thinking that maybe we shouldn' set this value unless it's actually needed?
const relocs_index = @as(u32, @intCast(self.relocs.items.len));
const relocs = try self.relocs.addOne(gpa);
relocs.* = .{};
@ -388,6 +395,19 @@ pub fn claimUnresolvedObject(self: ZigObject, elf_file: *Elf) void {
}
}
pub fn allocateTlvAtoms(self: ZigObject, elf_file: *Elf) void {
for (self.tls_variables.keys(), self.tls_variables.values()) |atom_index, tlv| {
const atom = elf_file.atom(atom_index) orelse continue;
if (!atom.flags.alive) continue;
const local = elf_file.symbol(tlv.symbol_index);
const shdr = elf_file.shdrs.items[atom.output_section_index];
atom.value += shdr.sh_addr;
local.value = atom.value;
// TODO exported TLS vars
}
}
pub fn scanRelocs(self: *ZigObject, elf_file: *Elf, undefs: anytype) !void {
for (self.atoms.items) |atom_index| {
const atom = elf_file.atom(atom_index) orelse continue;
@ -421,72 +441,6 @@ pub fn markLive(self: *ZigObject, elf_file: *Elf) void {
}
}
fn sortSymbols(self: *ZigObject, elf_file: *Elf) error{OutOfMemory}!void {
_ = self;
_ = elf_file;
// const Entry = struct {
// index: Symbol.Index,
// const Ctx = struct {
// zobj: ZigObject,
// efile: *Elf,
// };
// pub fn lessThan(ctx: Ctx, lhs: @This(), rhs: @This()) bool {
// const lhs_sym = ctx.efile.symbol(zobj.symbol(lhs.index));
// const rhs_sym = ctx.efile.symbol(zobj.symbol(rhs.index));
// if (lhs_sym.outputShndx() != null and rhs_sym.outputShndx() != null) {
// if (lhs_sym.output_section_index == rhs_sym.output_section_index) {
// if (lhs_sym.value == rhs_sym.value) {
// return lhs_sym.name_offset < rhs_sym.name_offset;
// }
// return lhs_sym.value < rhs_sym.value;
// }
// return lhs_sym.output_section_index < rhs_sym.output_section_index;
// }
// if (lhs_sym.outputShndx() != null) {
// if (rhs_sym.isAbs(ctx.efile)) return false;
// return true;
// }
// return false;
// }
// };
// const gpa = elf_file.base.allocator;
// {
// const sorted = try gpa.alloc(Entry, self.local_symbols.items.len);
// defer gpa.free(sorted);
// for (0..self.local_symbols.items.len) |index| {
// sorted[i] = .{ .index = @as(Symbol.Index, @intCast(index)) };
// }
// mem.sort(Entry, sorted, .{ .zobj = self, .efile = elf_file }, Entry.lessThan);
// const backlinks = try gpa.alloc(Symbol.Index, sorted.len);
// defer gpa.free(backlinks);
// for (sorted, 0..) |entry, i| {
// backlinks[entry.index] = @as(Symbol.Index, @intCast(i));
// }
// const local_symbols = try self.local_symbols.toOwnedSlice(gpa);
// defer gpa.free(local_symbols);
// try self.local_symbols.ensureTotalCapacityPrecise(gpa, local_symbols.len);
// for (sorted) |entry| {
// self.local_symbols.appendAssumeCapacity(local_symbols[entry.index]);
// }
// for (self.)
// }
// const sorted_globals = try gpa.alloc(Entry, self.global_symbols.items.len);
// defer gpa.free(sorted_globals);
// for (self.global_symbols.items, 0..) |index, i| {
// sorted_globals[i] = .{ .index = index };
// }
// mem.sort(Entry, sorted_globals, elf_file, Entry.lessThan);
}
pub fn updateArSymtab(self: ZigObject, ar_symtab: *Archive.ArSymtab, elf_file: *Elf) error{OutOfMemory}!void {
const gpa = elf_file.base.allocator;
@ -537,7 +491,9 @@ pub fn addAtomsToRelaSections(self: ZigObject, elf_file: *Elf) !void {
for (self.atoms.items) |atom_index| {
const atom = elf_file.atom(atom_index) orelse continue;
if (!atom.flags.alive) continue;
_ = atom.relocsShndx() orelse continue;
const rela_shndx = atom.relocsShndx() orelse continue;
// TODO this check will become obsolete when we rework our relocs mechanism at the ZigObject level
if (self.relocs.items[rela_shndx].items.len == 0) continue;
const out_shndx = atom.outputShndx().?;
const out_shdr = elf_file.shdrs.items[out_shndx];
if (out_shdr.sh_type == elf.SHT_NOBITS) continue;
@ -583,6 +539,13 @@ pub fn codeAlloc(self: ZigObject, elf_file: *Elf, atom_index: Atom.Index) ![]u8
const atom = elf_file.atom(atom_index).?;
assert(atom.file_index == self.index);
const shdr = &elf_file.shdrs.items[atom.outputShndx().?];
if (shdr.sh_flags & elf.SHF_TLS != 0) {
const tlv = self.tls_variables.get(atom_index).?;
const code = try gpa.dupe(u8, tlv.code);
return code;
}
const file_offset = shdr.sh_offset + atom.value - shdr.sh_addr;
const size = std.math.cast(usize, atom.size) orelse return error.Overflow;
const code = try gpa.alloc(u8, size);
@ -705,7 +668,12 @@ pub fn getOrCreateMetadataForLazySymbol(
},
};
switch (metadata.state.*) {
.unused => metadata.symbol_index.* = try self.addAtom(elf_file),
.unused => {
const symbol_index = try self.addAtom(elf_file);
const sym = elf_file.symbol(symbol_index);
sym.flags.needs_zig_got = true;
metadata.symbol_index.* = symbol_index;
},
.pending_flush => return metadata.symbol_index.*,
.flushed => {},
}
@ -760,20 +728,55 @@ pub fn getOrCreateMetadataForDecl(
) !Symbol.Index {
const gop = try self.decls.getOrPut(elf_file.base.allocator, decl_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{ .symbol_index = try self.addAtom(elf_file) };
const single_threaded = elf_file.base.options.single_threaded;
const symbol_index = try self.addAtom(elf_file);
const mod = elf_file.base.options.module.?;
const decl = mod.declPtr(decl_index);
const sym = elf_file.symbol(symbol_index);
if (decl.getOwnedVariable(mod)) |variable| {
if (variable.is_threadlocal and !single_threaded) {
sym.flags.is_tls = true;
}
}
if (!sym.flags.is_tls) {
sym.flags.needs_zig_got = true;
}
gop.value_ptr.* = .{ .symbol_index = symbol_index };
}
return gop.value_ptr.symbol_index;
}
fn getDeclShdrIndex(self: *ZigObject, elf_file: *Elf, decl_index: Module.Decl.Index, code: []const u8) u16 {
fn getDeclShdrIndex(
self: *ZigObject,
elf_file: *Elf,
decl: *const Module.Decl,
code: []const u8,
) error{OutOfMemory}!u16 {
_ = self;
const mod = elf_file.base.options.module.?;
const decl = mod.declPtr(decl_index);
const single_threaded = elf_file.base.options.single_threaded;
const shdr_index = switch (decl.ty.zigTypeTag(mod)) {
// TODO: what if this is a function pointer?
.Fn => elf_file.zig_text_section_index.?,
else => blk: {
if (decl.getOwnedVariable(mod)) |variable| {
if (variable.is_threadlocal and !single_threaded) {
const is_all_zeroes = for (code) |byte| {
if (byte != 0) break false;
} else true;
if (is_all_zeroes) break :blk elf_file.sectionByName(".tbss") orelse try elf_file.addSection(.{
.type = elf.SHT_NOBITS,
.flags = elf.SHF_ALLOC | elf.SHF_WRITE | elf.SHF_TLS,
.name = ".tbss",
.offset = std.math.maxInt(u64),
});
break :blk elf_file.sectionByName(".tdata") orelse try elf_file.addSection(.{
.type = elf.SHT_PROGBITS,
.flags = elf.SHF_ALLOC | elf.SHF_WRITE | elf.SHF_TLS,
.name = ".tdata",
.offset = std.math.maxInt(u64),
});
}
if (variable.is_const) break :blk elf_file.zig_data_rel_ro_section_index.?;
if (variable.init.toValue().isUndefDeep(mod)) {
const mode = elf_file.base.options.optimize_mode;
@ -799,6 +802,7 @@ fn updateDeclCode(
elf_file: *Elf,
decl_index: Module.Decl.Index,
sym_index: Symbol.Index,
shdr_index: u16,
code: []const u8,
stt_bits: u8,
) !void {
@ -815,7 +819,6 @@ fn updateDeclCode(
const esym = &self.local_esyms.items(.elf_sym)[sym.esym_index];
const atom_ptr = sym.atom(elf_file).?;
const shdr_index = self.getDeclShdrIndex(elf_file, decl_index, code);
sym.output_section_index = shdr_index;
atom_ptr.output_section_index = shdr_index;
@ -893,6 +896,58 @@ fn updateDeclCode(
}
}
fn updateTlv(
self: *ZigObject,
elf_file: *Elf,
decl_index: Module.Decl.Index,
sym_index: Symbol.Index,
shndx: u16,
code: []const u8,
) !void {
const gpa = elf_file.base.allocator;
const mod = elf_file.base.options.module.?;
const decl = mod.declPtr(decl_index);
const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod));
log.debug("updateTlv {s} ({*})", .{ decl_name, decl });
const required_alignment = decl.getAlignment(mod);
const sym = elf_file.symbol(sym_index);
const esym = &self.local_esyms.items(.elf_sym)[sym.esym_index];
const atom_ptr = sym.atom(elf_file).?;
sym.output_section_index = shndx;
atom_ptr.output_section_index = shndx;
sym.name_offset = try self.strtab.insert(gpa, decl_name);
atom_ptr.flags.alive = true;
atom_ptr.name_offset = sym.name_offset;
esym.st_name = sym.name_offset;
esym.st_info = elf.STT_TLS;
esym.st_size = code.len;
atom_ptr.alignment = required_alignment;
atom_ptr.size = code.len;
{
const gop = try self.tls_variables.getOrPut(gpa, atom_ptr.atom_index);
assert(!gop.found_existing); // TODO incremental updates
gop.value_ptr.* = .{ .symbol_index = sym_index };
// We only store the data for the TLV if it's non-zerofill.
if (elf_file.shdrs.items[shndx].sh_type != elf.SHT_NOBITS) {
gop.value_ptr.code = try gpa.dupe(u8, code);
}
}
{
const gop = try elf_file.output_sections.getOrPut(gpa, atom_ptr.output_section_index);
if (!gop.found_existing) gop.value_ptr.* = .{};
try gop.value_ptr.append(gpa, atom_ptr.atom_index);
}
}
pub fn updateFunc(
self: *ZigObject,
elf_file: *Elf,
@ -947,7 +1002,10 @@ pub fn updateFunc(
return;
},
};
try self.updateDeclCode(elf_file, decl_index, sym_index, code, elf.STT_FUNC);
const shndx = try self.getDeclShdrIndex(elf_file, decl, code);
try self.updateDeclCode(elf_file, decl_index, sym_index, shndx, code, elf.STT_FUNC);
if (decl_state) |*ds| {
const sym = elf_file.symbol(sym_index);
try self.dwarf.?.commitDeclState(
@ -1026,7 +1084,12 @@ pub fn updateDecl(
},
};
try self.updateDeclCode(elf_file, decl_index, sym_index, code, elf.STT_OBJECT);
const shndx = try self.getDeclShdrIndex(elf_file, decl, code);
if (elf_file.shdrs.items[shndx].sh_flags & elf.SHF_TLS != 0)
try self.updateTlv(elf_file, decl_index, sym_index, shndx, code)
else
try self.updateDeclCode(elf_file, decl_index, sym_index, shndx, code, elf.STT_OBJECT);
if (decl_state) |*ds| {
const sym = elf_file.symbol(sym_index);
try self.dwarf.?.commitDeclState(
@ -1454,11 +1517,21 @@ const DeclMetadata = struct {
}
};
const TlsVariable = struct {
symbol_index: Symbol.Index,
code: []const u8 = &[0]u8{},
fn deinit(tlv: *TlsVariable, allocator: Allocator) void {
allocator.free(tlv.code);
}
};
const AtomList = std.ArrayListUnmanaged(Atom.Index);
const UnnamedConstTable = std.AutoHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(Symbol.Index));
const DeclTable = std.AutoHashMapUnmanaged(Module.Decl.Index, DeclMetadata);
const AnonDeclTable = std.AutoHashMapUnmanaged(InternPool.Index, DeclMetadata);
const LazySymbolTable = std.AutoArrayHashMapUnmanaged(Module.Decl.OptionalIndex, LazySymbolMetadata);
const TlsTable = std.AutoArrayHashMapUnmanaged(Atom.Index, TlsVariable);
const assert = std.debug.assert;
const builtin = @import("builtin");

View File

@ -935,6 +935,36 @@ pub const PltSection = struct {
ilocal += 1;
}
}
const FormatCtx = struct {
plt: PltSection,
elf_file: *Elf,
};
pub fn fmt(plt: PltSection, elf_file: *Elf) std.fmt.Formatter(format2) {
return .{ .data = .{ .plt = plt, .elf_file = elf_file } };
}
pub fn format2(
ctx: FormatCtx,
comptime unused_fmt_string: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
_ = options;
_ = unused_fmt_string;
try writer.writeAll("PLT\n");
for (ctx.plt.symbols.items, 0..) |symbol_index, i| {
const symbol = ctx.elf_file.symbol(symbol_index);
try writer.print(" {d}@0x{x} => {d}@0x{x} ({s})\n", .{
i,
symbol.pltAddress(ctx.elf_file),
symbol_index,
symbol.address(.{}, ctx.elf_file),
symbol.name(ctx.elf_file),
});
}
}
};
pub const GotPltSection = struct {

View File

@ -654,7 +654,7 @@ pub fn supportsTailCall(target: std.Target, backend: std.builtin.CompilerBackend
pub fn supportsThreads(target: std.Target, backend: std.builtin.CompilerBackend) bool {
return switch (backend) {
.stage2_x86_64 => target.ofmt == .macho,
.stage2_x86_64 => target.ofmt == .macho or target.ofmt == .elf,
else => true,
};
}