elf: create new synthetic section ZigGotSection

This commit is contained in:
Jakub Konka 2023-10-13 14:54:52 +02:00
parent 9b6337ab06
commit 7be983ac92
10 changed files with 341 additions and 122 deletions

View File

@ -4318,8 +4318,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const sym_index = try elf_file.getOrCreateMetadataForDecl(func.owner_decl);
const sym = elf_file.symbol(sym_index);
_ = try sym.getOrCreateGotEntry(sym_index, elf_file);
const got_addr = @as(u32, @intCast(sym.gotAddress(elf_file)));
_ = try sym.getOrCreateZigGotEntry(sym_index, elf_file);
const got_addr = @as(u32, @intCast(sym.zigGotAddress(elf_file)));
try self.genSetReg(Type.usize, .x30, .{ .memory = got_addr });
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
const atom = try macho_file.getOrCreateAtomForDecl(func.owner_decl);

View File

@ -4304,8 +4304,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const sym_index = try elf_file.getOrCreateMetadataForDecl(func.owner_decl);
const sym = elf_file.symbol(sym_index);
_ = try sym.getOrCreateGotEntry(sym_index, elf_file);
const got_addr = @as(u32, @intCast(sym.gotAddress(elf_file)));
_ = try sym.getOrCreateZigGotEntry(sym_index, elf_file);
const got_addr = @as(u32, @intCast(sym.zigGotAddress(elf_file)));
try self.genSetReg(Type.usize, .lr, .{ .memory = got_addr });
} else if (self.bin_file.cast(link.File.MachO)) |_| {
unreachable; // unsupported architecture for MachO

View File

@ -1754,8 +1754,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
.func => |func| {
const sym_index = try elf_file.getOrCreateMetadataForDecl(func.owner_decl);
const sym = elf_file.symbol(sym_index);
_ = try sym.getOrCreateGotEntry(sym_index, elf_file);
const got_addr = @as(u32, @intCast(sym.gotAddress(elf_file)));
_ = try sym.getOrCreateZigGotEntry(sym_index, elf_file);
const got_addr = @as(u32, @intCast(sym.zigGotAddress(elf_file)));
try self.genSetReg(Type.usize, .ra, .{ .memory = got_addr });
_ = try self.addInst(.{
.tag = .jalr,

View File

@ -1349,8 +1349,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
const sym_index = try elf_file.getOrCreateMetadataForDecl(func.owner_decl);
const sym = elf_file.symbol(sym_index);
_ = try sym.getOrCreateGotEntry(sym_index, elf_file);
break :blk @as(u32, @intCast(sym.gotAddress(elf_file)));
_ = try sym.getOrCreateZigGotEntry(sym_index, elf_file);
break :blk @as(u32, @intCast(sym.zigGotAddress(elf_file)));
} else unreachable;
try self.genSetReg(Type.usize, .o7, .{ .memory = got_addr });

View File

@ -9189,8 +9189,7 @@ fn genCall(self: *Self, info: union(enum) {
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const sym_index = try elf_file.getOrCreateMetadataForDecl(owner_decl);
const sym = elf_file.symbol(sym_index);
sym.flags.needs_got = true;
_ = try sym.getOrCreateGotEntry(sym_index, elf_file);
_ = try sym.getOrCreateZigGotEntry(sym_index, elf_file);
_ = try self.addInst(.{
.tag = .call,
.ops = .direct_got_reloc,
@ -11637,8 +11636,7 @@ fn genLazySymbolRef(
const sym_index = elf_file.getOrCreateMetadataForLazySymbol(lazy_sym) catch |err|
return self.fail("{s} creating lazy symbol", .{@errorName(err)});
const sym = elf_file.symbol(sym_index);
sym.flags.needs_got = true;
_ = try sym.getOrCreateGotEntry(sym_index, elf_file);
_ = try sym.getOrCreateZigGotEntry(sym_index, elf_file);
const reloc = Mir.Reloc{
.atom_index = try self.owner.getSymbolIndex(self),
.sym_index = sym.esym_index,

View File

@ -889,9 +889,8 @@ fn genDeclRef(
if (bin_file.cast(link.File.Elf)) |elf_file| {
const sym_index = try elf_file.getOrCreateMetadataForDecl(decl_index);
const sym = elf_file.symbol(sym_index);
sym.flags.needs_got = true;
_ = try sym.getOrCreateGotEntry(sym_index, elf_file);
return GenResult.mcv(.{ .memory = sym.gotAddress(elf_file) });
_ = try sym.getOrCreateZigGotEntry(sym_index, elf_file);
return GenResult.mcv(.{ .memory = sym.zigGotAddress(elf_file) });
} else if (bin_file.cast(link.File.MachO)) |macho_file| {
const atom_index = try macho_file.getOrCreateAtomForDecl(decl_index);
const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?;

View File

@ -33,16 +33,16 @@ phdrs: std.ArrayListUnmanaged(elf.Elf64_Phdr) = .{},
/// Tracked loadable segments during incremental linking.
/// The index into the program headers of a PT_LOAD program header with Read and Execute flags
phdr_load_re_zig_index: ?u16 = null,
phdr_zig_load_re_index: ?u16 = null,
/// The index into the program headers of the global offset table.
/// It needs PT_LOAD and Read flags.
phdr_got_zig_index: ?u16 = null,
phdr_zig_got_index: ?u16 = null,
/// The index into the program headers of a PT_LOAD program header with Read flag
phdr_load_ro_zig_index: ?u16 = null,
phdr_zig_load_ro_index: ?u16 = null,
/// The index into the program headers of a PT_LOAD program header with Write flag
phdr_load_rw_zig_index: ?u16 = null,
phdr_zig_load_rw_index: ?u16 = null,
/// The index into the program headers of a PT_LOAD program header with zerofill data.
phdr_load_zerofill_zig_index: ?u16 = null,
phdr_zig_load_zerofill_index: ?u16 = null,
/// Special program headers
/// PT_PHDR
@ -99,13 +99,15 @@ plt_got: PltGotSection = .{},
copy_rel: CopyRelSection = .{},
/// .rela.plt section
rela_plt: std.ArrayListUnmanaged(elf.Elf64_Rela) = .{},
/// .zig.got section
zig_got: ZigGotSection = .{},
/// Tracked section headers with incremental updates to Zig module
text_zig_section_index: ?u16 = null,
rodata_zig_section_index: ?u16 = null,
data_zig_section_index: ?u16 = null,
bss_zig_section_index: ?u16 = null,
got_zig_section_index: ?u16 = null,
zig_text_section_index: ?u16 = null,
zig_rodata_section_index: ?u16 = null,
zig_data_section_index: ?u16 = null,
zig_bss_section_index: ?u16 = null,
zig_got_section_index: ?u16 = null,
debug_info_section_index: ?u16 = null,
debug_abbrev_section_index: ?u16 = null,
debug_str_section_index: ?u16 = null,
@ -479,7 +481,7 @@ pub fn lowerAnonDecl(self: *Elf, decl_val: InternPool.Index, src_loc: Module.Src
const tv = TypedValue{ .ty = ty, .val = val };
const name = try std.fmt.allocPrint(gpa, "__anon_{d}", .{@intFromEnum(decl_val)});
defer gpa.free(name);
const res = self.lowerConst(name, tv, self.rodata_zig_section_index.?, src_loc) catch |err| switch (err) {
const res = self.lowerConst(name, tv, self.zig_rodata_section_index.?, src_loc) catch |err| switch (err) {
else => {
// TODO improve error message
const em = try Module.ErrorMsg.create(gpa, src_loc, "lowerAnonDecl failed with error: {s}", .{
@ -687,8 +689,8 @@ pub fn initMetadata(self: *Elf) !void {
const ptr_bit_width = self.base.options.target.ptrBitWidth();
const is_linux = self.base.options.target.os.tag == .linux;
if (self.phdr_load_re_zig_index == null) {
self.phdr_load_re_zig_index = try self.allocateSegment(.{
if (self.phdr_zig_load_re_index == null) {
self.phdr_zig_load_re_index = try self.allocateSegment(.{
.addr = if (ptr_bit_width >= 32) 0x8000000 else 0x8000,
.memsz = self.base.options.program_code_size_hint,
.filesz = self.base.options.program_code_size_hint,
@ -697,11 +699,11 @@ pub fn initMetadata(self: *Elf) !void {
});
}
if (self.phdr_got_zig_index == null) {
if (self.phdr_zig_got_index == null) {
// We really only need ptr alignment but since we are using PROGBITS, linux requires
// page align.
const alignment = if (is_linux) self.page_size else @as(u16, ptr_size);
self.phdr_got_zig_index = try self.allocateSegment(.{
self.phdr_zig_got_index = try self.allocateSegment(.{
.addr = if (ptr_bit_width >= 32) 0x4000000 else 0x4000,
.memsz = @as(u64, ptr_size) * self.base.options.symbol_count_hint,
.filesz = @as(u64, ptr_size) * self.base.options.symbol_count_hint,
@ -710,9 +712,9 @@ pub fn initMetadata(self: *Elf) !void {
});
}
if (self.phdr_load_ro_zig_index == null) {
if (self.phdr_zig_load_ro_index == null) {
const alignment = if (is_linux) self.page_size else @as(u16, ptr_size);
self.phdr_load_ro_zig_index = try self.allocateSegment(.{
self.phdr_zig_load_ro_index = try self.allocateSegment(.{
.addr = if (ptr_bit_width >= 32) 0xc000000 else 0xa000,
.memsz = 1024,
.filesz = 1024,
@ -721,9 +723,9 @@ pub fn initMetadata(self: *Elf) !void {
});
}
if (self.phdr_load_rw_zig_index == null) {
if (self.phdr_zig_load_rw_index == null) {
const alignment = if (is_linux) self.page_size else @as(u16, ptr_size);
self.phdr_load_rw_zig_index = try self.allocateSegment(.{
self.phdr_zig_load_rw_index = try self.allocateSegment(.{
.addr = if (ptr_bit_width >= 32) 0x10000000 else 0xc000,
.memsz = 1024,
.filesz = 1024,
@ -732,64 +734,64 @@ pub fn initMetadata(self: *Elf) !void {
});
}
if (self.phdr_load_zerofill_zig_index == null) {
if (self.phdr_zig_load_zerofill_index == null) {
const alignment = if (is_linux) self.page_size else @as(u16, ptr_size);
self.phdr_load_zerofill_zig_index = try self.allocateSegment(.{
self.phdr_zig_load_zerofill_index = try self.allocateSegment(.{
.addr = if (ptr_bit_width >= 32) 0x14000000 else 0xf000,
.memsz = 0,
.filesz = 0,
.alignment = alignment,
.flags = elf.PF_R | elf.PF_W,
});
const phdr = &self.phdrs.items[self.phdr_load_zerofill_zig_index.?];
phdr.p_offset = self.phdrs.items[self.phdr_load_rw_zig_index.?].p_offset; // .bss overlaps .data
const phdr = &self.phdrs.items[self.phdr_zig_load_zerofill_index.?];
phdr.p_offset = self.phdrs.items[self.phdr_zig_load_rw_index.?].p_offset; // .bss overlaps .data
phdr.p_memsz = 1024;
}
if (self.text_zig_section_index == null) {
self.text_zig_section_index = try self.allocateAllocSection(.{
.name = ".text.zig",
.phdr_index = self.phdr_load_re_zig_index.?,
if (self.zig_text_section_index == null) {
self.zig_text_section_index = try self.allocateAllocSection(.{
.name = ".zig.text",
.phdr_index = self.phdr_zig_load_re_index.?,
.flags = elf.SHF_ALLOC | elf.SHF_EXECINSTR,
});
try self.last_atom_and_free_list_table.putNoClobber(gpa, self.text_zig_section_index.?, .{});
try self.last_atom_and_free_list_table.putNoClobber(gpa, self.zig_text_section_index.?, .{});
}
if (self.got_zig_section_index == null) {
self.got_zig_section_index = try self.allocateAllocSection(.{
.name = ".got.zig",
.phdr_index = self.phdr_got_zig_index.?,
if (self.zig_got_section_index == null) {
self.zig_got_section_index = try self.allocateAllocSection(.{
.name = ".zig.got",
.phdr_index = self.phdr_zig_got_index.?,
.alignment = ptr_size,
});
}
if (self.rodata_zig_section_index == null) {
self.rodata_zig_section_index = try self.allocateAllocSection(.{
.name = ".rodata.zig",
.phdr_index = self.phdr_load_ro_zig_index.?,
if (self.zig_rodata_section_index == null) {
self.zig_rodata_section_index = try self.allocateAllocSection(.{
.name = ".zig.rodata",
.phdr_index = self.phdr_zig_load_ro_index.?,
});
try self.last_atom_and_free_list_table.putNoClobber(gpa, self.rodata_zig_section_index.?, .{});
try self.last_atom_and_free_list_table.putNoClobber(gpa, self.zig_rodata_section_index.?, .{});
}
if (self.data_zig_section_index == null) {
self.data_zig_section_index = try self.allocateAllocSection(.{
.name = ".data.zig",
.phdr_index = self.phdr_load_rw_zig_index.?,
if (self.zig_data_section_index == null) {
self.zig_data_section_index = try self.allocateAllocSection(.{
.name = ".zig.data",
.phdr_index = self.phdr_zig_load_rw_index.?,
.alignment = ptr_size,
.flags = elf.SHF_ALLOC | elf.SHF_WRITE,
});
try self.last_atom_and_free_list_table.putNoClobber(gpa, self.data_zig_section_index.?, .{});
try self.last_atom_and_free_list_table.putNoClobber(gpa, self.zig_data_section_index.?, .{});
}
if (self.bss_zig_section_index == null) {
self.bss_zig_section_index = try self.allocateAllocSection(.{
if (self.zig_bss_section_index == null) {
self.zig_bss_section_index = try self.allocateAllocSection(.{
.name = ".bss.zig",
.phdr_index = self.phdr_load_zerofill_zig_index.?,
.phdr_index = self.phdr_zig_load_zerofill_index.?,
.alignment = ptr_size,
.flags = elf.SHF_ALLOC | elf.SHF_WRITE,
.type = elf.SHT_NOBITS,
});
try self.last_atom_and_free_list_table.putNoClobber(gpa, self.bss_zig_section_index.?, .{});
try self.last_atom_and_free_list_table.putNoClobber(gpa, self.zig_bss_section_index.?, .{});
}
if (self.dwarf) |*dw| {
@ -875,7 +877,7 @@ pub fn growAllocSection(self: *Elf, shdr_index: u16, needed_size: u64) !void {
}
const mem_capacity = self.allocatedVirtualSize(phdr.p_vaddr);
if (needed_size <= mem_capacity) {
if (needed_size > mem_capacity) {
var err = try self.addErrorWithNotes(2);
try err.addMsg(self, "fatal linker error: cannot expand load segment phdr({d}) in virtual memory", .{
phdr_index,
@ -1544,8 +1546,8 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
if (self.zig_module_index) |index| {
// .bss.zig always overlaps .data.zig in file offset, but is zero-sized in file so it doesn't
// get mapped by the loader
if (self.data_zig_section_index) |data_shndx| blk: {
const bss_shndx = self.bss_zig_section_index orelse break :blk;
if (self.zig_data_section_index) |data_shndx| blk: {
const bss_shndx = self.zig_bss_section_index orelse break :blk;
const data_phndx = self.phdr_to_shdr_table.get(data_shndx).?;
const bss_phndx = self.phdr_to_shdr_table.get(bss_shndx).?;
self.shdrs.items[bss_shndx].sh_offset = self.shdrs.items[data_shndx].sh_offset;
@ -1580,7 +1582,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
if (self.debug_info_header_dirty) {
// Currently only one compilation unit is supported, so the address range is simply
// identical to the main program header virtual address and memory size.
const text_phdr = &self.phdrs.items[self.phdr_load_re_zig_index.?];
const text_phdr = &self.phdrs.items[self.phdr_zig_load_re_index.?];
const low_pc = text_phdr.p_vaddr;
const high_pc = text_phdr.p_vaddr + text_phdr.p_memsz;
try dw.writeDbgInfoHeader(self.base.options.module.?, low_pc, high_pc);
@ -1590,7 +1592,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
if (self.debug_aranges_section_dirty) {
// Currently only one compilation unit is supported, so the address range is simply
// identical to the main program header virtual address and memory size.
const text_phdr = &self.phdrs.items[self.phdr_load_re_zig_index.?];
const text_phdr = &self.phdrs.items[self.phdr_zig_load_re_index.?];
try dw.writeDbgAranges(text_phdr.p_vaddr, text_phdr.p_memsz);
self.debug_aranges_section_dirty = false;
}
@ -2999,24 +3001,24 @@ fn getDeclShdrIndex(self: *Elf, decl_index: Module.Decl.Index, code: []const u8)
const decl = mod.declPtr(decl_index);
const shdr_index = switch (decl.ty.zigTypeTag(mod)) {
// TODO: what if this is a function pointer?
.Fn => self.text_zig_section_index.?,
.Fn => self.zig_text_section_index.?,
else => blk: {
if (decl.getOwnedVariable(mod)) |variable| {
if (variable.is_const) break :blk self.rodata_zig_section_index.?;
if (variable.is_const) break :blk self.zig_rodata_section_index.?;
if (variable.init.toValue().isUndefDeep(mod)) {
const mode = self.base.options.optimize_mode;
if (mode == .Debug or mode == .ReleaseSafe) break :blk self.data_zig_section_index.?;
break :blk self.bss_zig_section_index.?;
if (mode == .Debug or mode == .ReleaseSafe) break :blk self.zig_data_section_index.?;
break :blk self.zig_bss_section_index.?;
}
// TODO I blatantly copied the logic from the Wasm linker, but is there a less
// intrusive check for all zeroes than this?
const is_all_zeroes = for (code) |byte| {
if (byte != 0) break false;
} else true;
if (is_all_zeroes) break :blk self.bss_zig_section_index.?;
break :blk self.data_zig_section_index.?;
if (is_all_zeroes) break :blk self.zig_bss_section_index.?;
break :blk self.zig_data_section_index.?;
}
break :blk self.rodata_zig_section_index.?;
break :blk self.zig_rodata_section_index.?;
},
};
return shdr_index;
@ -3070,8 +3072,9 @@ fn updateDeclCode(
esym.st_value = atom_ptr.value;
log.debug(" (writing new offset table entry)", .{});
// const extra = sym.extra(self).?;
// try self.got.writeEntry(self, extra.got);
assert(sym.flags.has_zig_got);
const extra = sym.extra(self).?;
try self.zig_got.writeOne(self, extra.zig_got);
}
} else if (code.len < old_size) {
atom_ptr.shrink(self);
@ -3083,10 +3086,8 @@ fn updateDeclCode(
sym.value = atom_ptr.value;
esym.st_value = atom_ptr.value;
sym.flags.needs_got = true;
const gop = try sym.getOrCreateGotEntry(sym_index, self);
_ = gop;
// try self.got.writeEntry(self, gop.index);
const gop = try sym.getOrCreateZigGotEntry(sym_index, self);
try self.zig_got.writeOne(self, gop.index);
}
if (self.base.child_pid) |pid| {
@ -3296,8 +3297,8 @@ fn updateLazySymbol(self: *Elf, sym: link.File.LazySymbol, symbol_index: Symbol.
};
const output_section_index = switch (sym.kind) {
.code => self.text_zig_section_index.?,
.const_data => self.rodata_zig_section_index.?,
.code => self.zig_text_section_index.?,
.const_data => self.zig_rodata_section_index.?,
};
const local_sym = self.symbol(symbol_index);
const phdr_index = self.phdr_to_shdr_table.get(output_section_index).?;
@ -3320,10 +3321,8 @@ fn updateLazySymbol(self: *Elf, sym: link.File.LazySymbol, symbol_index: Symbol.
local_sym.value = atom_ptr.value;
local_esym.st_value = atom_ptr.value;
local_sym.flags.needs_got = true;
const gop = try local_sym.getOrCreateGotEntry(symbol_index, self);
_ = gop;
// try self.got.writeEntry(self, gop.index);
const gop = try local_sym.getOrCreateZigGotEntry(symbol_index, self);
try self.zig_got.writeOne(self, gop.index);
const section_offset = atom_ptr.value - self.phdrs.items[phdr_index].p_vaddr;
const file_offset = self.shdrs.items[output_section_index].sh_offset + section_offset;
@ -3343,7 +3342,7 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module
const index = unnamed_consts.items.len;
const name = try std.fmt.allocPrint(gpa, "__unnamed_{s}_{d}", .{ decl_name, index });
defer gpa.free(name);
const sym_index = switch (try self.lowerConst(name, typed_value, self.rodata_zig_section_index.?, decl.srcLoc(mod))) {
const sym_index = switch (try self.lowerConst(name, typed_value, self.zig_rodata_section_index.?, decl.srcLoc(mod))) {
.ok => |sym_index| sym_index,
.fail => |em| {
decl.analysis = .codegen_failure;
@ -4183,11 +4182,11 @@ fn sortSections(self: *Elf) !void {
&self.copy_rel_section_index,
&self.versym_section_index,
&self.verneed_section_index,
&self.text_zig_section_index,
&self.got_zig_section_index,
&self.rodata_zig_section_index,
&self.data_zig_section_index,
&self.bss_zig_section_index,
&self.zig_text_section_index,
&self.zig_got_section_index,
&self.zig_rodata_section_index,
&self.zig_data_section_index,
&self.zig_bss_section_index,
&self.debug_str_section_index,
&self.debug_info_section_index,
&self.debug_abbrev_section_index,
@ -4340,6 +4339,9 @@ fn updateSectionSizes(self: *Elf) !void {
if (self.strtab_section_index) |index| {
// TODO I don't really this here but we need it to add symbol names from GOT and other synthetic
// sections into .strtab for easier debugging.
if (self.zig_got_section_index) |_| {
try self.zig_got.updateStrtab(self);
}
if (self.got_section_index) |_| {
try self.got.updateStrtab(self);
}
@ -4719,6 +4721,11 @@ fn updateSymtabSize(self: *Elf) !void {
sizes.nglobals += shared_object.output_symtab_size.nglobals;
}
if (self.zig_got_section_index) |_| {
self.zig_got.updateSymtabSize(self);
sizes.nlocals += self.zig_got.output_symtab_size.nlocals;
}
if (self.got_section_index) |_| {
self.got.updateSymtabSize(self);
sizes.nlocals += self.got.output_symtab_size.nlocals;
@ -4930,6 +4937,11 @@ fn writeSymtab(self: *Elf) !void {
ctx.iglobal += shared_object.output_symtab_size.nglobals;
}
if (self.zig_got_section_index) |_| {
try self.zig_got.writeSymtab(self, ctx);
ctx.ilocal += self.zig_got.output_symtab_size.nlocals;
}
if (self.got_section_index) |_| {
try self.got.writeSymtab(self, ctx);
ctx.ilocal += self.got.output_symtab_size.nlocals;
@ -5289,11 +5301,11 @@ pub fn isDynLib(self: Elf) bool {
pub fn isZigSection(self: Elf, shndx: u16) bool {
inline for (&[_]?u16{
self.text_zig_section_index,
self.rodata_zig_section_index,
self.data_zig_section_index,
self.bss_zig_section_index,
self.got_zig_section_index,
self.zig_text_section_index,
self.zig_rodata_section_index,
self.zig_data_section_index,
self.zig_bss_section_index,
self.zig_got_section_index,
}) |maybe_index| {
if (maybe_index) |index| {
if (index == shndx) return true;
@ -5851,6 +5863,7 @@ fn fmtDumpState(
try writer.print("{}\n", .{linker_defined.fmtSymtab(self)});
}
try writer.print("{}\n", .{self.got.fmt(self)});
try writer.print("{}\n", .{self.zig_got.fmt(self)});
try writer.writeAll("Output shdrs\n");
for (self.shdrs.items, 0..) |shdr, shndx| {
try writer.print("shdr({d}) : phdr({?d}) : {}\n", .{
@ -6030,4 +6043,5 @@ const Type = @import("../type.zig").Type;
const TypedValue = @import("../TypedValue.zig");
const Value = @import("../value.zig").Value;
const VerneedSection = synthetic_sections.VerneedSection;
const ZigGotSection = synthetic_sections.ZigGotSection;
const ZigModule = @import("Elf/ZigModule.zig");

View File

@ -367,8 +367,16 @@ pub fn scanRelocs(self: Atom, elf_file: *Elf, code: ?[]const u8, undefs: anytype
try self.scanReloc(symbol, rel, dynAbsRelocAction(symbol, elf_file), elf_file);
},
// TODO I have temporarily repurposed those for handling .zig.got indirection
// but we should probably claim unused custom values for incremental linking
// that get rewritten to standard relocs when lowering to a relocatable object
// file.
elf.R_X86_64_GOT32,
elf.R_X86_64_GOT64,
=> {
assert(symbol.flags.has_zig_got);
},
elf.R_X86_64_GOTPC32,
elf.R_X86_64_GOTPC64,
elf.R_X86_64_GOTPCREL,
@ -736,6 +744,8 @@ pub fn resolveRelocsAlloc(self: Atom, elf_file: *Elf, code: []u8) !void {
null;
break :blk if (shndx) |index| @as(i64, @intCast(elf_file.shdrs.items[index].sh_addr)) else 0;
};
// Address of the .zig.got table entry if any.
const ZIG_GOT = @as(i64, @intCast(target.zigGotAddress(elf_file)));
// Relative offset to the start of the global offset table.
const G = @as(i64, @intCast(target.gotAddress(elf_file))) - GOT;
// // Address of the thread pointer.
@ -796,8 +806,12 @@ pub fn resolveRelocsAlloc(self: Atom, elf_file: *Elf, code: []u8) !void {
elf.R_X86_64_32 => try cwriter.writeIntLittle(u32, @as(u32, @truncate(@as(u64, @intCast(S + A))))),
elf.R_X86_64_32S => try cwriter.writeIntLittle(i32, @as(i32, @truncate(S + A))),
elf.R_X86_64_GOT32 => try cwriter.writeIntLittle(u32, @as(u32, @intCast(G + GOT + A))),
elf.R_X86_64_GOT64 => try cwriter.writeIntLittle(u64, @as(u64, @intCast(G + GOT + A))),
// TODO I have temporarily repurposed those for handling .zig.got indirection
// but we should probably claim unused custom values for incremental linking
// that get rewritten to standard relocs when lowering to a relocatable object
// file.
elf.R_X86_64_GOT32 => try cwriter.writeIntLittle(u32, @as(u32, @intCast(ZIG_GOT + A))),
elf.R_X86_64_GOT64 => try cwriter.writeIntLittle(u64, @as(u64, @intCast(ZIG_GOT + A))),
elf.R_X86_64_TPOFF32 => try cwriter.writeIntLittle(i32, @as(i32, @truncate(S + A - TP))),
elf.R_X86_64_TPOFF64 => try cwriter.writeIntLittle(i64, S + A - TP),

View File

@ -137,19 +137,6 @@ pub fn copyRelAddress(symbol: Symbol, elf_file: *Elf) u64 {
return shdr.sh_addr + symbol.value;
}
const GetOrCreateGotEntryResult = struct {
found_existing: bool,
index: GotSection.Index,
};
pub fn getOrCreateGotEntry(symbol: *Symbol, symbol_index: Index, elf_file: *Elf) !GetOrCreateGotEntryResult {
assert(symbol.flags.needs_got);
if (symbol.flags.has_got) return .{ .found_existing = true, .index = symbol.extra(elf_file).?.got };
const index = try elf_file.got.addGotSymbol(symbol_index, elf_file);
symbol.flags.has_got = true;
return .{ .found_existing = false, .index = index };
}
pub fn tlsGdAddress(symbol: Symbol, elf_file: *Elf) u64 {
if (!symbol.flags.has_tlsgd) return 0;
const extras = symbol.extra(elf_file).?;
@ -171,6 +158,23 @@ pub fn tlsDescAddress(symbol: Symbol, elf_file: *Elf) u64 {
return entry.address(elf_file);
}
const GetOrCreateZigGotEntryResult = struct {
found_existing: bool,
index: ZigGotSection.Index,
};
pub fn getOrCreateZigGotEntry(symbol: *Symbol, symbol_index: Index, elf_file: *Elf) !GetOrCreateZigGotEntryResult {
if (symbol.flags.has_zig_got) return .{ .found_existing = true, .index = symbol.extra(elf_file).?.zig_got };
const index = try elf_file.zig_got.addSymbol(symbol_index, elf_file);
return .{ .found_existing = false, .index = index };
}
pub fn zigGotAddress(symbol: Symbol, elf_file: *Elf) u64 {
if (!symbol.flags.has_zig_got) return 0;
const extras = symbol.extra(elf_file).?;
return elf_file.zig_got.entryAddress(extras.zig_got, elf_file);
}
pub fn dsoAlignment(symbol: Symbol, elf_file: *Elf) !u64 {
const file_ptr = symbol.file(elf_file) orelse return 0;
assert(file_ptr == .shared_object);
@ -367,6 +371,9 @@ pub const Flags = packed struct {
/// Whether the symbol contains TLSDESC indirection.
needs_tlsdesc: bool = false,
has_tlsdesc: bool = false,
/// Whether the symbol contains .zig.got indirection.
has_zig_got: bool = false,
};
pub const Extra = struct {
@ -378,6 +385,7 @@ pub const Extra = struct {
tlsgd: u32 = 0,
gottp: u32 = 0,
tlsdesc: u32 = 0,
zig_got: u32 = 0,
};
pub const Index = u32;
@ -397,4 +405,5 @@ const Object = @import("Object.zig");
const PltSection = synthetic_sections.PltSection;
const SharedObject = @import("SharedObject.zig");
const Symbol = @This();
const ZigGotSection = synthetic_sections.ZigGotSection;
const ZigModule = @import("ZigModule.zig");

View File

@ -220,6 +220,191 @@ pub const DynamicSection = struct {
}
};
pub const ZigGotSection = struct {
entries: std.ArrayListUnmanaged(Symbol.Index) = .{},
output_symtab_size: Elf.SymtabSize = .{},
flags: Flags = .{},
const Flags = packed struct {
needs_rela: bool = false, // TODO in prep for PIC/PIE and base relocations
dirty: bool = false,
};
pub const Index = u32;
pub fn deinit(zig_got: *ZigGotSection, allocator: Allocator) void {
zig_got.entries.deinit(allocator);
}
fn allocateEntry(zig_got: *ZigGotSection, allocator: Allocator) !Index {
try zig_got.entries.ensureUnusedCapacity(allocator, 1);
// TODO add free list
const index = @as(Index, @intCast(zig_got.entries.items.len));
_ = zig_got.entries.addOneAssumeCapacity();
zig_got.flags.dirty = true;
return index;
}
pub fn addSymbol(zig_got: *ZigGotSection, sym_index: Symbol.Index, elf_file: *Elf) !Index {
const index = try zig_got.allocateEntry(elf_file.base.allocator);
const entry = &zig_got.entries.items[index];
entry.* = sym_index;
const symbol = elf_file.symbol(sym_index);
symbol.flags.has_zig_got = true;
if (symbol.extra(elf_file)) |extra| {
var new_extra = extra;
new_extra.zig_got = index;
symbol.setExtra(new_extra, elf_file);
} else try symbol.addExtra(.{ .zig_got = index }, elf_file);
return index;
}
pub fn entryOffset(zig_got: ZigGotSection, index: Index, elf_file: *Elf) u64 {
_ = zig_got;
const entry_size = elf_file.archPtrWidthBytes();
const shdr = elf_file.shdrs.items[elf_file.zig_got_section_index.?];
return shdr.sh_offset + @as(u64, entry_size) * index;
}
pub fn entryAddress(zig_got: ZigGotSection, index: Index, elf_file: *Elf) u64 {
_ = zig_got;
const entry_size = elf_file.archPtrWidthBytes();
const shdr = elf_file.shdrs.items[elf_file.zig_got_section_index.?];
return shdr.sh_addr + @as(u64, entry_size) * index;
}
pub fn size(zig_got: ZigGotSection, elf_file: *Elf) usize {
return elf_file.archPtrWidthBytes() * zig_got.entries.items.len;
}
pub fn writeOne(zig_got: *ZigGotSection, elf_file: *Elf, index: Index) !void {
if (zig_got.flags.dirty) {
const needed_size = zig_got.size(elf_file);
try elf_file.growAllocSection(elf_file.zig_got_section_index.?, needed_size);
zig_got.flags.dirty = false;
}
const entry_size: u16 = elf_file.archPtrWidthBytes();
const endian = elf_file.base.options.target.cpu.arch.endian();
const off = zig_got.entryOffset(index, elf_file);
const vaddr = zig_got.entryAddress(index, elf_file);
const entry = zig_got.entries.items[index];
const value = elf_file.symbol(entry).value;
switch (entry_size) {
2 => {
var buf: [2]u8 = undefined;
std.mem.writeInt(u16, &buf, @as(u16, @intCast(value)), endian);
try elf_file.base.file.?.pwriteAll(&buf, off);
},
4 => {
var buf: [4]u8 = undefined;
std.mem.writeInt(u32, &buf, @as(u32, @intCast(value)), endian);
try elf_file.base.file.?.pwriteAll(&buf, off);
},
8 => {
var buf: [8]u8 = undefined;
std.mem.writeInt(u64, &buf, value, endian);
try elf_file.base.file.?.pwriteAll(&buf, off);
if (elf_file.base.child_pid) |pid| {
switch (builtin.os.tag) {
.linux => {
var local_vec: [1]std.os.iovec_const = .{.{
.iov_base = &buf,
.iov_len = buf.len,
}};
var remote_vec: [1]std.os.iovec_const = .{.{
.iov_base = @as([*]u8, @ptrFromInt(@as(usize, @intCast(vaddr)))),
.iov_len = buf.len,
}};
const rc = std.os.linux.process_vm_writev(pid, &local_vec, &remote_vec, 0);
switch (std.os.errno(rc)) {
.SUCCESS => assert(rc == buf.len),
else => |errno| log.warn("process_vm_writev failure: {s}", .{@tagName(errno)}),
}
},
else => return error.HotSwapUnavailableOnHostOperatingSystem,
}
}
},
else => unreachable,
}
}
pub fn writeAll(zig_got: ZigGotSection, elf_file: *Elf, writer: anytype) !void {
for (zig_got.entries.items) |entry| {
const symbol = elf_file.symbol(entry);
const value = symbol.address(.{ .plt = false }, elf_file);
try writeInt(value, elf_file, writer);
}
}
pub fn updateSymtabSize(zig_got: *ZigGotSection, elf_file: *Elf) void {
_ = elf_file;
zig_got.output_symtab_size.nlocals = @as(u32, @intCast(zig_got.entries.items.len));
}
pub fn updateStrtab(zig_got: ZigGotSection, elf_file: *Elf) !void {
const gpa = elf_file.base.allocator;
for (zig_got.entries.items) |entry| {
const symbol_name = elf_file.symbol(entry).name(elf_file);
const name = try std.fmt.allocPrint(gpa, "{s}$ziggot", .{symbol_name});
defer gpa.free(name);
_ = try elf_file.strtab.insert(gpa, name);
}
}
pub fn writeSymtab(zig_got: ZigGotSection, elf_file: *Elf, ctx: anytype) !void {
const gpa = elf_file.base.allocator;
for (zig_got.entries.items, ctx.ilocal.., 0..) |entry, ilocal, index| {
const symbol = elf_file.symbol(entry);
const symbol_name = symbol.name(elf_file);
const name = try std.fmt.allocPrint(gpa, "{s}$ziggot", .{symbol_name});
defer gpa.free(name);
const st_name = try elf_file.strtab.insert(gpa, name);
const st_value = zig_got.entryAddress(@intCast(index), elf_file);
const st_size = elf_file.archPtrWidthBytes();
ctx.symtab[ilocal] = .{
.st_name = st_name,
.st_info = elf.STT_OBJECT,
.st_other = 0,
.st_shndx = elf_file.zig_got_section_index.?,
.st_value = st_value,
.st_size = st_size,
};
}
}
const FormatCtx = struct {
zig_got: ZigGotSection,
elf_file: *Elf,
};
pub fn fmt(zig_got: ZigGotSection, elf_file: *Elf) std.fmt.Formatter(format2) {
return .{ .data = .{ .zig_got = zig_got, .elf_file = elf_file } };
}
pub fn format2(
ctx: FormatCtx,
comptime unused_fmt_string: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
_ = options;
_ = unused_fmt_string;
try writer.writeAll(".zig.got\n");
for (ctx.zig_got.entries.items, 0..) |entry, index| {
const symbol = ctx.elf_file.symbol(entry);
try writer.print(" {d}@0x{x} => {d}@0x{x} ({s})\n", .{
index,
ctx.zig_got.entryAddress(@intCast(index), ctx.elf_file),
entry,
symbol.address(.{}, ctx.elf_file),
symbol.name(ctx.elf_file),
});
}
}
};
pub const GotSection = struct {
entries: std.ArrayListUnmanaged(Entry) = .{},
output_symtab_size: Elf.SymtabSize = .{},
@ -420,17 +605,6 @@ pub const GotSection = struct {
}
}
fn writeInt(value: anytype, elf_file: *Elf, writer: anytype) !void {
const entry_size = elf_file.archPtrWidthBytes();
const endian = elf_file.base.options.target.cpu.arch.endian();
switch (entry_size) {
2 => try writer.writeInt(u16, @intCast(value), endian),
4 => try writer.writeInt(u32, @intCast(value), endian),
8 => try writer.writeInt(u64, @intCast(value), endian),
else => unreachable,
}
}
pub fn addRela(got: GotSection, elf_file: *Elf) !void {
const is_dyn_lib = elf_file.isDynLib();
try elf_file.rela_dyn.ensureUnusedCapacity(elf_file.base.allocator, got.numRela(elf_file));
@ -1327,6 +1501,17 @@ pub const VerneedSection = struct {
}
};
fn writeInt(value: anytype, elf_file: *Elf, writer: anytype) !void {
const entry_size = elf_file.archPtrWidthBytes();
const endian = elf_file.base.options.target.cpu.arch.endian();
switch (entry_size) {
2 => try writer.writeInt(u16, @intCast(value), endian),
4 => try writer.writeInt(u32, @intCast(value), endian),
8 => try writer.writeInt(u64, @intCast(value), endian),
else => unreachable,
}
}
const assert = std.debug.assert;
const builtin = @import("builtin");
const elf = std.elf;