mirror of
https://github.com/ziglang/zig.git
synced 2026-02-12 20:37:54 +00:00
link/elf: implement string merging
This commit is contained in:
parent
09820a96b6
commit
9e0bca73e2
274
src/link/Elf.zig
274
src/link/Elf.zig
@ -210,6 +210,14 @@ atoms_extra: std.ArrayListUnmanaged(u32) = .{},
|
||||
/// List of range extension thunks.
|
||||
thunks: std.ArrayListUnmanaged(Thunk) = .{},
|
||||
|
||||
/// List of output merge sections with deduped contents.
|
||||
merge_sections: std.ArrayListUnmanaged(MergeSection) = .{},
|
||||
/// List of output merge subsections.
|
||||
/// Each subsection is akin to Atom but belongs to a MergeSection.
|
||||
merge_subsections: std.ArrayListUnmanaged(MergeSubsection) = .{},
|
||||
/// List of input merge sections as parsed from input relocatables.
|
||||
merge_input_sections: std.ArrayListUnmanaged(InputMergeSection) = .{},
|
||||
|
||||
/// Table of last atom index in a section and matching atom free list if any.
|
||||
last_atom_and_free_list_table: LastAtomAndFreeListTable = .{},
|
||||
|
||||
@ -380,6 +388,8 @@ pub fn createEmpty(
|
||||
_ = try self.addSection(.{ .name = "" });
|
||||
// Append null symbol in output symtab
|
||||
try self.symtab.append(gpa, null_sym);
|
||||
// Append null input merge section.
|
||||
try self.merge_input_sections.append(gpa, .{});
|
||||
|
||||
if (!is_obj_or_ar) {
|
||||
try self.dynstrtab.append(gpa, 0);
|
||||
@ -498,6 +508,15 @@ pub fn deinit(self: *Elf) void {
|
||||
th.deinit(gpa);
|
||||
}
|
||||
self.thunks.deinit(gpa);
|
||||
for (self.merge_sections.items) |*sect| {
|
||||
sect.deinit(gpa);
|
||||
}
|
||||
self.merge_sections.deinit(gpa);
|
||||
self.merge_subsections.deinit(gpa);
|
||||
for (self.merge_input_sections.items) |*sect| {
|
||||
sect.deinit(gpa);
|
||||
}
|
||||
self.merge_input_sections.deinit(gpa);
|
||||
for (self.last_atom_and_free_list_table.values()) |*value| {
|
||||
value.free_list.deinit(gpa);
|
||||
}
|
||||
@ -1295,6 +1314,7 @@ pub fn flushModule(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node)
|
||||
// symbol for potential resolution at load-time.
|
||||
self.resolveSymbols();
|
||||
self.markEhFrameAtomsDead();
|
||||
try self.resolveMergeSections();
|
||||
|
||||
try self.convertCommonSymbols();
|
||||
self.markImportsExports();
|
||||
@ -1319,6 +1339,8 @@ pub fn flushModule(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node)
|
||||
else => |e| return e,
|
||||
};
|
||||
|
||||
try self.addCommentString();
|
||||
try self.sortMergeSections();
|
||||
try self.initOutputSections();
|
||||
try self.addLinkerDefinedSymbols();
|
||||
self.claimUnresolved();
|
||||
@ -1338,6 +1360,7 @@ pub fn flushModule(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node)
|
||||
self.sortDynamicSymtab();
|
||||
try self.setHashSections();
|
||||
try self.setVersionSymtab();
|
||||
try self.updateMergeSectionSizes();
|
||||
try self.updateSectionSizes();
|
||||
|
||||
try self.allocatePhdrTable();
|
||||
@ -1365,7 +1388,7 @@ pub fn flushModule(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node)
|
||||
if (shdr.sh_type == elf.SHT_NOBITS) continue;
|
||||
const code = try zig_object.codeAlloc(self, atom_index);
|
||||
defer gpa.free(code);
|
||||
const file_offset = shdr.sh_offset + atom_ptr.value;
|
||||
const file_offset = shdr.sh_offset + @as(u64, @intCast(atom_ptr.value));
|
||||
atom_ptr.resolveRelocsAlloc(self, code) catch |err| switch (err) {
|
||||
error.RelocFailure, error.RelaxFailure => has_reloc_errors = true,
|
||||
error.UnsupportedCpuArch => {
|
||||
@ -1383,6 +1406,7 @@ pub fn flushModule(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node)
|
||||
try self.writePhdrTable();
|
||||
try self.writeShdrTable();
|
||||
try self.writeAtoms();
|
||||
try self.writeMergeSections();
|
||||
self.writeSyntheticSections() catch |err| switch (err) {
|
||||
error.RelocFailure => return error.FlushFailure,
|
||||
error.UnsupportedCpuArch => {
|
||||
@ -2952,7 +2976,10 @@ pub fn writeElfHeader(self: *Elf) !void {
|
||||
mem.writeInt(u32, hdr_buf[index..][0..4], 1, endian);
|
||||
index += 4;
|
||||
|
||||
const e_entry = if (self.entry_index) |entry_index| self.symbol(entry_index).address(.{}, self) else 0;
|
||||
const e_entry = if (self.entry_index) |entry_index|
|
||||
@as(u64, @intCast(self.symbol(entry_index).address(.{}, self)))
|
||||
else
|
||||
0;
|
||||
const phdr_table_offset = if (self.phdr_table_index) |phndx| self.phdrs.items[phndx].p_offset else 0;
|
||||
switch (self.ptr_width) {
|
||||
.p32 => {
|
||||
@ -3138,14 +3165,14 @@ fn allocateLinkerDefinedSymbols(self: *Elf) void {
|
||||
if (self.dynamic_section_index) |shndx| {
|
||||
const shdr = &self.shdrs.items[shndx];
|
||||
const symbol_ptr = self.symbol(self.dynamic_index.?);
|
||||
symbol_ptr.value = shdr.sh_addr;
|
||||
symbol_ptr.value = @intCast(shdr.sh_addr);
|
||||
symbol_ptr.output_section_index = shndx;
|
||||
}
|
||||
|
||||
// __ehdr_start
|
||||
{
|
||||
const symbol_ptr = self.symbol(self.ehdr_start_index.?);
|
||||
symbol_ptr.value = self.image_base;
|
||||
symbol_ptr.value = @intCast(self.image_base);
|
||||
symbol_ptr.output_section_index = 1;
|
||||
}
|
||||
|
||||
@ -3155,9 +3182,9 @@ fn allocateLinkerDefinedSymbols(self: *Elf) void {
|
||||
const end_sym = self.symbol(self.init_array_end_index.?);
|
||||
const shdr = &self.shdrs.items[shndx];
|
||||
start_sym.output_section_index = shndx;
|
||||
start_sym.value = shdr.sh_addr;
|
||||
start_sym.value = @intCast(shdr.sh_addr);
|
||||
end_sym.output_section_index = shndx;
|
||||
end_sym.value = shdr.sh_addr + shdr.sh_size;
|
||||
end_sym.value = @intCast(shdr.sh_addr + shdr.sh_size);
|
||||
}
|
||||
|
||||
// __fini_array_start, __fini_array_end
|
||||
@ -3166,9 +3193,9 @@ fn allocateLinkerDefinedSymbols(self: *Elf) void {
|
||||
const end_sym = self.symbol(self.fini_array_end_index.?);
|
||||
const shdr = &self.shdrs.items[shndx];
|
||||
start_sym.output_section_index = shndx;
|
||||
start_sym.value = shdr.sh_addr;
|
||||
start_sym.value = @intCast(shdr.sh_addr);
|
||||
end_sym.output_section_index = shndx;
|
||||
end_sym.value = shdr.sh_addr + shdr.sh_size;
|
||||
end_sym.value = @intCast(shdr.sh_addr + shdr.sh_size);
|
||||
}
|
||||
|
||||
// __preinit_array_start, __preinit_array_end
|
||||
@ -3177,9 +3204,9 @@ fn allocateLinkerDefinedSymbols(self: *Elf) void {
|
||||
const end_sym = self.symbol(self.preinit_array_end_index.?);
|
||||
const shdr = &self.shdrs.items[shndx];
|
||||
start_sym.output_section_index = shndx;
|
||||
start_sym.value = shdr.sh_addr;
|
||||
start_sym.value = @intCast(shdr.sh_addr);
|
||||
end_sym.output_section_index = shndx;
|
||||
end_sym.value = shdr.sh_addr + shdr.sh_size;
|
||||
end_sym.value = @intCast(shdr.sh_addr + shdr.sh_size);
|
||||
}
|
||||
|
||||
// _GLOBAL_OFFSET_TABLE_
|
||||
@ -3187,14 +3214,14 @@ fn allocateLinkerDefinedSymbols(self: *Elf) void {
|
||||
if (self.got_plt_section_index) |shndx| {
|
||||
const shdr = self.shdrs.items[shndx];
|
||||
const sym = self.symbol(self.got_index.?);
|
||||
sym.value = shdr.sh_addr;
|
||||
sym.value = @intCast(shdr.sh_addr);
|
||||
sym.output_section_index = shndx;
|
||||
}
|
||||
} else {
|
||||
if (self.got_section_index) |shndx| {
|
||||
const shdr = self.shdrs.items[shndx];
|
||||
const sym = self.symbol(self.got_index.?);
|
||||
sym.value = shdr.sh_addr;
|
||||
sym.value = @intCast(shdr.sh_addr);
|
||||
sym.output_section_index = shndx;
|
||||
}
|
||||
}
|
||||
@ -3203,7 +3230,7 @@ fn allocateLinkerDefinedSymbols(self: *Elf) void {
|
||||
if (self.plt_section_index) |shndx| {
|
||||
const shdr = &self.shdrs.items[shndx];
|
||||
const symbol_ptr = self.symbol(self.plt_index.?);
|
||||
symbol_ptr.value = shdr.sh_addr;
|
||||
symbol_ptr.value = @intCast(shdr.sh_addr);
|
||||
symbol_ptr.output_section_index = shndx;
|
||||
}
|
||||
|
||||
@ -3211,7 +3238,7 @@ fn allocateLinkerDefinedSymbols(self: *Elf) void {
|
||||
if (self.dso_handle_index) |index| {
|
||||
const shdr = &self.shdrs.items[1];
|
||||
const symbol_ptr = self.symbol(index);
|
||||
symbol_ptr.value = shdr.sh_addr;
|
||||
symbol_ptr.value = @intCast(shdr.sh_addr);
|
||||
symbol_ptr.output_section_index = 0;
|
||||
}
|
||||
|
||||
@ -3219,7 +3246,7 @@ fn allocateLinkerDefinedSymbols(self: *Elf) void {
|
||||
if (self.eh_frame_hdr_section_index) |shndx| {
|
||||
const shdr = &self.shdrs.items[shndx];
|
||||
const symbol_ptr = self.symbol(self.gnu_eh_frame_hdr_index.?);
|
||||
symbol_ptr.value = shdr.sh_addr;
|
||||
symbol_ptr.value = @intCast(shdr.sh_addr);
|
||||
symbol_ptr.output_section_index = shndx;
|
||||
}
|
||||
|
||||
@ -3231,9 +3258,9 @@ fn allocateLinkerDefinedSymbols(self: *Elf) void {
|
||||
const start_addr = end_addr - self.calcNumIRelativeRelocs() * @sizeOf(elf.Elf64_Rela);
|
||||
const start_sym = self.symbol(self.rela_iplt_start_index.?);
|
||||
const end_sym = self.symbol(self.rela_iplt_end_index.?);
|
||||
start_sym.value = start_addr;
|
||||
start_sym.value = @intCast(start_addr);
|
||||
start_sym.output_section_index = shndx;
|
||||
end_sym.value = end_addr;
|
||||
end_sym.value = @intCast(end_addr);
|
||||
end_sym.output_section_index = shndx;
|
||||
}
|
||||
|
||||
@ -3242,7 +3269,7 @@ fn allocateLinkerDefinedSymbols(self: *Elf) void {
|
||||
const end_symbol = self.symbol(self.end_index.?);
|
||||
for (self.shdrs.items, 0..) |shdr, shndx| {
|
||||
if (shdr.sh_flags & elf.SHF_ALLOC != 0) {
|
||||
end_symbol.value = shdr.sh_addr + shdr.sh_size;
|
||||
end_symbol.value = @intCast(shdr.sh_addr + shdr.sh_size);
|
||||
end_symbol.output_section_index = @intCast(shndx);
|
||||
}
|
||||
}
|
||||
@ -3257,9 +3284,9 @@ fn allocateLinkerDefinedSymbols(self: *Elf) void {
|
||||
const stop = self.symbol(self.start_stop_indexes.items[index + 1]);
|
||||
const shndx = self.sectionByName(name["__start_".len..]).?;
|
||||
const shdr = &self.shdrs.items[shndx];
|
||||
start.value = shdr.sh_addr;
|
||||
start.value = @intCast(shdr.sh_addr);
|
||||
start.output_section_index = shndx;
|
||||
stop.value = shdr.sh_addr + shdr.sh_size;
|
||||
stop.value = @intCast(shdr.sh_addr + shdr.sh_size);
|
||||
stop.output_section_index = shndx;
|
||||
}
|
||||
}
|
||||
@ -3269,7 +3296,7 @@ fn allocateLinkerDefinedSymbols(self: *Elf) void {
|
||||
const sym = self.symbol(index);
|
||||
if (self.sectionByName(".sdata")) |shndx| {
|
||||
const shdr = self.shdrs.items[shndx];
|
||||
sym.value = shdr.sh_addr + 0x800;
|
||||
sym.value = @intCast(shdr.sh_addr + 0x800);
|
||||
sym.output_section_index = shndx;
|
||||
} else {
|
||||
sym.value = 0;
|
||||
@ -3299,10 +3326,109 @@ fn checkDuplicates(self: *Elf) !void {
|
||||
try self.reportDuplicates(dupes);
|
||||
}
|
||||
|
||||
pub fn addCommentString(self: *Elf) !void {
|
||||
const msec_index = try self.getOrCreateMergeSection(".comment", elf.SHF_MERGE | elf.SHF_STRINGS, elf.SHT_PROGBITS);
|
||||
const msec = self.mergeSection(msec_index);
|
||||
const res = try msec.insertZ(self.base.comp.gpa, "zig version x.x.x"); // TODO get actual version
|
||||
if (res.found_existing) return;
|
||||
const msub_index = try self.addMergeSubsection();
|
||||
const msub = self.mergeSubsection(msub_index);
|
||||
msub.merge_section_index = msec_index;
|
||||
msub.string_index = res.key.pos;
|
||||
msub.alignment = .@"1";
|
||||
msub.size = res.key.len;
|
||||
msub.alive = true;
|
||||
res.sub.* = msub_index;
|
||||
}
|
||||
|
||||
fn resolveMergeSections(self: *Elf) !void {
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
var has_errors = false;
|
||||
for (self.objects.items) |index| {
|
||||
const file_ptr = self.file(index).?;
|
||||
if (!file_ptr.isAlive()) continue;
|
||||
file_ptr.object.initMergeSections(self) catch |err| switch (err) {
|
||||
error.MalformedObject => has_errors = true,
|
||||
else => |e| return e,
|
||||
};
|
||||
}
|
||||
|
||||
if (has_errors) return error.FlushFailure;
|
||||
|
||||
for (self.objects.items) |index| {
|
||||
const file_ptr = self.file(index).?;
|
||||
if (!file_ptr.isAlive()) continue;
|
||||
file_ptr.object.resolveMergeSubsections(self) catch |err| switch (err) {
|
||||
error.MalformedObject => has_errors = true,
|
||||
else => |e| return e,
|
||||
};
|
||||
}
|
||||
|
||||
if (has_errors) return error.FlushFailure;
|
||||
}
|
||||
|
||||
pub fn sortMergeSections(self: *Elf) !void {
|
||||
for (self.merge_sections.items) |*msec| {
|
||||
try msec.sort(self);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn updateMergeSectionSizes(self: *Elf) !void {
|
||||
for (self.merge_sections.items) |*msec| {
|
||||
const shdr = &self.shdrs.items[msec.output_section_index];
|
||||
for (msec.subsections.items) |msub_index| {
|
||||
const msub = self.mergeSubsection(msub_index);
|
||||
assert(msub.alive);
|
||||
const offset = msub.alignment.forward(shdr.sh_size);
|
||||
const padding = offset - shdr.sh_size;
|
||||
msub.value = @intCast(offset);
|
||||
shdr.sh_size += padding + msub.size;
|
||||
shdr.sh_addralign = @max(shdr.sh_addralign, msub.alignment.toByteUnits() orelse 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn writeMergeSections(self: *Elf) !void {
|
||||
const gpa = self.base.comp.gpa;
|
||||
var buffer = std.ArrayList(u8).init(gpa);
|
||||
defer buffer.deinit();
|
||||
|
||||
for (self.merge_sections.items) |msec| {
|
||||
const shdr = self.shdrs.items[msec.output_section_index];
|
||||
|
||||
try buffer.ensureTotalCapacity(shdr.sh_size);
|
||||
buffer.appendNTimesAssumeCapacity(0, shdr.sh_size);
|
||||
|
||||
for (msec.subsections.items) |msub_index| {
|
||||
const msub = self.mergeSubsection(msub_index);
|
||||
assert(msub.alive);
|
||||
const string = msub.getString(self);
|
||||
const off: u64 = @intCast(msub.value);
|
||||
@memcpy(buffer.items[off..][0..string.len], string);
|
||||
}
|
||||
|
||||
try self.base.file.?.pwriteAll(buffer.items, shdr.sh_offset);
|
||||
buffer.clearRetainingCapacity();
|
||||
}
|
||||
}
|
||||
|
||||
fn initOutputSections(self: *Elf) !void {
|
||||
for (self.objects.items) |index| {
|
||||
try self.file(index).?.object.initOutputSections(self);
|
||||
}
|
||||
|
||||
for (self.merge_sections.items) |*msec| {
|
||||
if (msec.subsections.items.len == 0) continue;
|
||||
const name = msec.name(self);
|
||||
const shndx = self.sectionByName(name) orelse try self.addSection(.{
|
||||
.name = name,
|
||||
.type = msec.type,
|
||||
.flags = msec.flags,
|
||||
});
|
||||
msec.output_section_index = shndx;
|
||||
}
|
||||
}
|
||||
|
||||
fn initSyntheticSections(self: *Elf) !void {
|
||||
@ -3971,6 +4097,10 @@ fn resetShdrIndexes(self: *Elf, backlinks: []const u32) !void {
|
||||
}
|
||||
}
|
||||
|
||||
for (self.merge_sections.items) |*msec| {
|
||||
msec.output_section_index = backlinks[msec.output_section_index];
|
||||
}
|
||||
|
||||
{
|
||||
var output_rela_sections = try self.output_rela_sections.clone(gpa);
|
||||
defer output_rela_sections.deinit(gpa);
|
||||
@ -4058,7 +4188,7 @@ fn updateSectionSizes(self: *Elf) !void {
|
||||
if (!atom_ptr.flags.alive) continue;
|
||||
const offset = atom_ptr.alignment.forward(shdr.sh_size);
|
||||
const padding = offset - shdr.sh_size;
|
||||
atom_ptr.value = offset;
|
||||
atom_ptr.value = @intCast(offset);
|
||||
shdr.sh_size += padding + atom_ptr.size;
|
||||
shdr.sh_addralign = @max(shdr.sh_addralign, atom_ptr.alignment.toByteUnits() orelse 1);
|
||||
}
|
||||
@ -4541,7 +4671,7 @@ fn writeAtoms(self: *Elf) !void {
|
||||
const atom_ptr = self.atom(atom_index).?;
|
||||
assert(atom_ptr.flags.alive);
|
||||
|
||||
const offset = math.cast(usize, atom_ptr.value - base_offset) orelse
|
||||
const offset = math.cast(usize, atom_ptr.value - @as(i64, @intCast(base_offset))) orelse
|
||||
return error.Overflow;
|
||||
const size = math.cast(usize, atom_ptr.size) orelse return error.Overflow;
|
||||
|
||||
@ -4582,7 +4712,7 @@ fn writeAtoms(self: *Elf) !void {
|
||||
const thunk_size = th.size(self);
|
||||
try buffer.ensureUnusedCapacity(thunk_size);
|
||||
const shdr = self.shdrs.items[th.output_section_index];
|
||||
const offset = th.value + shdr.sh_offset;
|
||||
const offset = @as(u64, @intCast(th.value)) + shdr.sh_offset;
|
||||
try th.write(self, buffer.writer());
|
||||
assert(buffer.items.len == thunk_size);
|
||||
try self.base.file.?.pwriteAll(buffer.items, offset);
|
||||
@ -4617,6 +4747,7 @@ pub fn updateSymtabSize(self: *Elf) !void {
|
||||
if (self.eh_frame_section_index) |_| {
|
||||
nlocals += 1;
|
||||
}
|
||||
nlocals += @intCast(self.merge_sections.items.len);
|
||||
|
||||
if (self.requiresThunks()) for (self.thunks.items) |*th| {
|
||||
th.output_symtab_ctx.ilocal = nlocals + 1;
|
||||
@ -4953,12 +5084,29 @@ fn writeSectionSymbols(self: *Elf) void {
|
||||
};
|
||||
ilocal += 1;
|
||||
}
|
||||
|
||||
for (self.merge_sections.items) |msec| {
|
||||
const shdr = self.shdrs.items[msec.output_section_index];
|
||||
const out_sym = &self.symtab.items[ilocal];
|
||||
out_sym.* = .{
|
||||
.st_name = 0,
|
||||
.st_value = shdr.sh_addr,
|
||||
.st_info = elf.STT_SECTION,
|
||||
.st_shndx = @intCast(msec.output_section_index),
|
||||
.st_size = 0,
|
||||
.st_other = 0,
|
||||
};
|
||||
ilocal += 1;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn sectionSymbolOutputSymtabIndex(self: Elf, shndx: u32) u32 {
|
||||
if (self.eh_frame_section_index) |index| {
|
||||
if (index == shndx) return @intCast(self.output_sections.keys().len + 1);
|
||||
}
|
||||
for (self.merge_sections.items, 1..) |msec, index| {
|
||||
if (msec.output_section_index == shndx) return @intCast(self.output_sections.keys().len + 1 + index);
|
||||
}
|
||||
return @intCast(self.output_sections.getIndex(shndx).? + 1);
|
||||
}
|
||||
|
||||
@ -5687,35 +5835,88 @@ pub fn comdatGroupOwner(self: *Elf, index: ComdatGroupOwner.Index) *ComdatGroupO
|
||||
return &self.comdat_groups_owners.items[index];
|
||||
}
|
||||
|
||||
pub fn gotAddress(self: *Elf) u64 {
|
||||
pub fn addInputMergeSection(self: *Elf) !InputMergeSection.Index {
|
||||
const index: InputMergeSection.Index = @intCast(self.merge_input_sections.items.len);
|
||||
const msec = try self.merge_input_sections.addOne(self.base.comp.gpa);
|
||||
msec.* = .{};
|
||||
return index;
|
||||
}
|
||||
|
||||
pub fn inputMergeSection(self: *Elf, index: InputMergeSection.Index) ?*InputMergeSection {
|
||||
if (index == 0) return null;
|
||||
return &self.merge_input_sections.items[index];
|
||||
}
|
||||
|
||||
pub fn addMergeSubsection(self: *Elf) !MergeSubsection.Index {
|
||||
const index: MergeSubsection.Index = @intCast(self.merge_subsections.items.len);
|
||||
const msec = try self.merge_subsections.addOne(self.base.comp.gpa);
|
||||
msec.* = .{};
|
||||
return index;
|
||||
}
|
||||
|
||||
pub fn mergeSubsection(self: *Elf, index: MergeSubsection.Index) *MergeSubsection {
|
||||
assert(index < self.merge_subsections.items.len);
|
||||
return &self.merge_subsections.items[index];
|
||||
}
|
||||
|
||||
pub fn getOrCreateMergeSection(self: *Elf, name: []const u8, flags: u64, @"type": u32) !MergeSection.Index {
|
||||
const gpa = self.base.comp.gpa;
|
||||
const out_name = name: {
|
||||
if (self.base.isRelocatable()) break :name name;
|
||||
if (mem.eql(u8, name, ".rodata") or mem.startsWith(u8, name, ".rodata"))
|
||||
break :name if (flags & elf.SHF_STRINGS != 0) ".rodata.str" else ".rodata.cst";
|
||||
break :name name;
|
||||
};
|
||||
const out_off = try self.strings.insert(gpa, out_name);
|
||||
const out_flags = flags & ~@as(u64, elf.SHF_COMPRESSED | elf.SHF_GROUP);
|
||||
for (self.merge_sections.items, 0..) |msec, index| {
|
||||
if (msec.name_offset == out_off) return @intCast(index);
|
||||
}
|
||||
const index = @as(MergeSection.Index, @intCast(self.merge_sections.items.len));
|
||||
const msec = try self.merge_sections.addOne(gpa);
|
||||
msec.* = .{
|
||||
.name_offset = out_off,
|
||||
.flags = out_flags,
|
||||
.type = @"type",
|
||||
};
|
||||
return index;
|
||||
}
|
||||
|
||||
pub fn mergeSection(self: *Elf, index: MergeSection.Index) *MergeSection {
|
||||
assert(index < self.merge_sections.items.len);
|
||||
return &self.merge_sections.items[index];
|
||||
}
|
||||
|
||||
pub fn gotAddress(self: *Elf) i64 {
|
||||
const shndx = blk: {
|
||||
if (self.getTarget().cpu.arch == .x86_64 and self.got_plt_section_index != null)
|
||||
break :blk self.got_plt_section_index.?;
|
||||
break :blk if (self.got_section_index) |shndx| shndx else null;
|
||||
};
|
||||
return if (shndx) |index| self.shdrs.items[index].sh_addr else 0;
|
||||
return if (shndx) |index| @intCast(self.shdrs.items[index].sh_addr) else 0;
|
||||
}
|
||||
|
||||
pub fn tpAddress(self: *Elf) u64 {
|
||||
pub fn tpAddress(self: *Elf) i64 {
|
||||
const index = self.phdr_tls_index orelse return 0;
|
||||
const phdr = self.phdrs.items[index];
|
||||
return switch (self.getTarget().cpu.arch) {
|
||||
const addr = switch (self.getTarget().cpu.arch) {
|
||||
.x86_64 => mem.alignForward(u64, phdr.p_vaddr + phdr.p_memsz, phdr.p_align),
|
||||
.aarch64 => mem.alignBackward(u64, phdr.p_vaddr - 16, phdr.p_align),
|
||||
else => @panic("TODO implement getTpAddress for this arch"),
|
||||
};
|
||||
return @intCast(addr);
|
||||
}
|
||||
|
||||
pub fn dtpAddress(self: *Elf) u64 {
|
||||
pub fn dtpAddress(self: *Elf) i64 {
|
||||
const index = self.phdr_tls_index orelse return 0;
|
||||
const phdr = self.phdrs.items[index];
|
||||
return phdr.p_vaddr;
|
||||
return @intCast(phdr.p_vaddr);
|
||||
}
|
||||
|
||||
pub fn tlsAddress(self: *Elf) u64 {
|
||||
pub fn tlsAddress(self: *Elf) i64 {
|
||||
const index = self.phdr_tls_index orelse return 0;
|
||||
const phdr = self.phdrs.items[index];
|
||||
return phdr.p_vaddr;
|
||||
return @intCast(phdr.p_vaddr);
|
||||
}
|
||||
|
||||
const ErrorWithNotes = struct {
|
||||
@ -6093,6 +6294,11 @@ fn fmtDumpState(
|
||||
try writer.print(" shdr({d}) : COMDAT({d})\n", .{ cg.shndx, cg.cg_index });
|
||||
}
|
||||
|
||||
try writer.writeAll("\nOutput merge sections\n");
|
||||
for (self.merge_sections.items) |msec| {
|
||||
try writer.print(" shdr({d}) : {}\n", .{ msec.output_section_index, msec.fmt(self) });
|
||||
}
|
||||
|
||||
try writer.writeAll("\nOutput shdrs\n");
|
||||
for (self.shdrs.items, 0..) |shdr, shndx| {
|
||||
try writer.print(" shdr({d}) : phdr({?d}) : {}\n", .{
|
||||
@ -6285,6 +6491,7 @@ const gc = @import("Elf/gc.zig");
|
||||
const glibc = @import("../glibc.zig");
|
||||
const link = @import("../link.zig");
|
||||
const lldMain = @import("../main.zig").lldMain;
|
||||
const merge_section = @import("Elf/merge_section.zig");
|
||||
const musl = @import("../musl.zig");
|
||||
const relocatable = @import("Elf/relocatable.zig");
|
||||
const relocation = @import("Elf/relocation.zig");
|
||||
@ -6310,10 +6517,13 @@ const GnuHashSection = synthetic_sections.GnuHashSection;
|
||||
const GotSection = synthetic_sections.GotSection;
|
||||
const GotPltSection = synthetic_sections.GotPltSection;
|
||||
const HashSection = synthetic_sections.HashSection;
|
||||
const InputMergeSection = merge_section.InputMergeSection;
|
||||
const LdScript = @import("Elf/LdScript.zig");
|
||||
const LinkerDefined = @import("Elf/LinkerDefined.zig");
|
||||
const Liveness = @import("../Liveness.zig");
|
||||
const LlvmObject = @import("../codegen/llvm.zig").Object;
|
||||
const MergeSection = merge_section.MergeSection;
|
||||
const MergeSubsection = merge_section.MergeSubsection;
|
||||
const Module = @import("../Module.zig");
|
||||
const Object = @import("Elf/Object.zig");
|
||||
const InternPool = @import("../InternPool.zig");
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/// Address allocated for this Atom.
|
||||
value: u64 = 0,
|
||||
value: i64 = 0,
|
||||
|
||||
/// Name of this Atom.
|
||||
name_offset: u32 = 0,
|
||||
@ -44,10 +44,22 @@ pub fn name(self: Atom, elf_file: *Elf) []const u8 {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn address(self: Atom, elf_file: *Elf) u64 {
|
||||
pub fn address(self: Atom, elf_file: *Elf) i64 {
|
||||
const shndx = self.outputShndx() orelse return self.value;
|
||||
const shdr = elf_file.shdrs.items[shndx];
|
||||
return shdr.sh_addr + self.value;
|
||||
return @as(i64, @intCast(shdr.sh_addr)) + self.value;
|
||||
}
|
||||
|
||||
pub fn debugTombstoneValue(self: Atom, target: Symbol, elf_file: *Elf) ?u64 {
|
||||
if (target.mergeSubsection(elf_file)) |msub| {
|
||||
if (msub.alive) return null;
|
||||
}
|
||||
if (target.atom(elf_file)) |atom_ptr| {
|
||||
if (atom_ptr.flags.alive) return null;
|
||||
}
|
||||
const atom_name = self.name(elf_file);
|
||||
if (!mem.startsWith(u8, atom_name, ".debug")) return null;
|
||||
return if (mem.eql(u8, atom_name, ".debug_loc") or mem.eql(u8, atom_name, ".debug_ranges")) 1 else 0;
|
||||
}
|
||||
|
||||
pub fn file(self: Atom, elf_file: *Elf) ?File {
|
||||
@ -91,13 +103,13 @@ pub fn capacity(self: Atom, elf_file: *Elf) u64 {
|
||||
next.address(elf_file)
|
||||
else
|
||||
std.math.maxInt(u32);
|
||||
return next_addr - self.address(elf_file);
|
||||
return @intCast(next_addr - self.address(elf_file));
|
||||
}
|
||||
|
||||
pub fn freeListEligible(self: Atom, elf_file: *Elf) bool {
|
||||
// No need to keep a free list node for the last block.
|
||||
const next = elf_file.atom(self.next_index) orelse return false;
|
||||
const cap = next.address(elf_file) - self.address(elf_file);
|
||||
const cap: u64 = @intCast(next.address(elf_file) - self.address(elf_file));
|
||||
const ideal_cap = Elf.padToIdeal(self.size);
|
||||
if (cap <= ideal_cap) return false;
|
||||
const surplus = cap - ideal_cap;
|
||||
@ -130,8 +142,8 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void {
|
||||
// Is it enough that we could fit this new atom?
|
||||
const cap = big_atom.capacity(elf_file);
|
||||
const ideal_capacity = Elf.padToIdeal(cap);
|
||||
const ideal_capacity_end_vaddr = std.math.add(u64, big_atom.value, ideal_capacity) catch ideal_capacity;
|
||||
const capacity_end_vaddr = big_atom.value + cap;
|
||||
const ideal_capacity_end_vaddr = std.math.add(u64, @intCast(big_atom.value), ideal_capacity) catch ideal_capacity;
|
||||
const capacity_end_vaddr = @as(u64, @intCast(big_atom.value)) + cap;
|
||||
const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity;
|
||||
const new_start_vaddr = self.alignment.backward(new_start_vaddr_unaligned);
|
||||
if (new_start_vaddr < ideal_capacity_end_vaddr) {
|
||||
@ -156,14 +168,14 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void {
|
||||
if (!keep_free_list_node) {
|
||||
free_list_removal = i;
|
||||
}
|
||||
break :blk new_start_vaddr;
|
||||
break :blk @intCast(new_start_vaddr);
|
||||
} else if (elf_file.atom(last_atom_index.*)) |last| {
|
||||
const ideal_capacity = Elf.padToIdeal(last.size);
|
||||
const ideal_capacity_end_vaddr = last.value + ideal_capacity;
|
||||
const ideal_capacity_end_vaddr = @as(u64, @intCast(last.value)) + ideal_capacity;
|
||||
const new_start_vaddr = self.alignment.forward(ideal_capacity_end_vaddr);
|
||||
// Set up the metadata to be updated, after errors are no longer possible.
|
||||
atom_placement = last.atom_index;
|
||||
break :blk new_start_vaddr;
|
||||
break :blk @intCast(new_start_vaddr);
|
||||
} else {
|
||||
break :blk 0;
|
||||
}
|
||||
@ -173,7 +185,7 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void {
|
||||
self.atom_index,
|
||||
self.name(elf_file),
|
||||
self.address(elf_file),
|
||||
self.address(elf_file) + self.size,
|
||||
self.address(elf_file) + @as(i64, @intCast(self.size)),
|
||||
});
|
||||
|
||||
const expand_section = if (atom_placement) |placement_index|
|
||||
@ -181,7 +193,7 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void {
|
||||
else
|
||||
true;
|
||||
if (expand_section) {
|
||||
const needed_size = self.value + self.size;
|
||||
const needed_size: u64 = @intCast(self.value + @as(i64, @intCast(self.size)));
|
||||
try elf_file.growAllocSection(self.outputShndx().?, needed_size);
|
||||
last_atom_index.* = self.atom_index;
|
||||
|
||||
@ -231,7 +243,7 @@ pub fn shrink(self: *Atom, elf_file: *Elf) void {
|
||||
}
|
||||
|
||||
pub fn grow(self: *Atom, elf_file: *Elf) !void {
|
||||
if (!self.alignment.check(self.value) or self.size > self.capacity(elf_file))
|
||||
if (!self.alignment.check(@intCast(self.value)) or self.size > self.capacity(elf_file))
|
||||
try self.allocate(elf_file);
|
||||
}
|
||||
|
||||
@ -321,11 +333,14 @@ pub fn writeRelocs(self: Atom, elf_file: *Elf, out_relocs: *std.ArrayList(elf.El
|
||||
};
|
||||
const target = elf_file.symbol(target_index);
|
||||
const r_type = rel.r_type();
|
||||
const r_offset = self.value + rel.r_offset;
|
||||
const r_offset: u64 = @intCast(self.value + @as(i64, @intCast(rel.r_offset)));
|
||||
var r_addend = rel.r_addend;
|
||||
var r_sym: u32 = 0;
|
||||
switch (target.type(elf_file)) {
|
||||
elf.STT_SECTION => {
|
||||
elf.STT_SECTION => if (target.mergeSubsection(elf_file)) |msub| {
|
||||
r_addend += @intCast(target.address(.{}, elf_file));
|
||||
r_sym = elf_file.sectionSymbolOutputSymtabIndex(msub.mergeSection(elf_file).output_section_index);
|
||||
} else {
|
||||
r_addend += @intCast(target.address(.{}, elf_file));
|
||||
r_sym = elf_file.sectionSymbolOutputSymtabIndex(target.outputShndx().?);
|
||||
},
|
||||
@ -412,6 +427,12 @@ pub fn scanRelocs(self: Atom, elf_file: *Elf, code: ?[]const u8, undefs: anytype
|
||||
};
|
||||
const symbol = elf_file.symbol(symbol_index);
|
||||
|
||||
const is_synthetic_symbol = switch (file_ptr) {
|
||||
.zig_object => false, // TODO: implement this once we support merge sections in ZigObject
|
||||
.object => |x| rel.r_sym() >= x.symtab.items.len,
|
||||
else => unreachable,
|
||||
};
|
||||
|
||||
// Check for violation of One Definition Rule for COMDATs.
|
||||
if (symbol.file(elf_file) == null) {
|
||||
// TODO convert into an error
|
||||
@ -424,7 +445,8 @@ pub fn scanRelocs(self: Atom, elf_file: *Elf, code: ?[]const u8, undefs: anytype
|
||||
}
|
||||
|
||||
// Report an undefined symbol.
|
||||
if (try self.reportUndefined(elf_file, symbol, symbol_index, rel, undefs)) continue;
|
||||
if (!is_synthetic_symbol and (try self.reportUndefined(elf_file, symbol, symbol_index, rel, undefs)))
|
||||
continue;
|
||||
|
||||
if (symbol.isIFunc(elf_file)) {
|
||||
symbol.flags.needs_got = true;
|
||||
@ -733,21 +755,21 @@ pub fn resolveRelocsAlloc(self: Atom, elf_file: *Elf, code: []u8) RelocError!voi
|
||||
// https://intezer.com/blog/malware-analysis/executable-and-linkable-format-101-part-3-relocations/
|
||||
//
|
||||
// Address of the source atom.
|
||||
const P = @as(i64, @intCast(self.address(elf_file) + rel.r_offset));
|
||||
const P = self.address(elf_file) + @as(i64, @intCast(rel.r_offset));
|
||||
// Addend from the relocation.
|
||||
const A = rel.r_addend;
|
||||
// Address of the target symbol - can be address of the symbol within an atom or address of PLT stub.
|
||||
const S = @as(i64, @intCast(target.address(.{}, elf_file)));
|
||||
const S = target.address(.{}, elf_file);
|
||||
// Address of the global offset table.
|
||||
const GOT = @as(i64, @intCast(elf_file.gotAddress()));
|
||||
const GOT = elf_file.gotAddress();
|
||||
// Address of the .zig.got table entry if any.
|
||||
const ZIG_GOT = @as(i64, @intCast(target.zigGotAddress(elf_file)));
|
||||
const ZIG_GOT = target.zigGotAddress(elf_file);
|
||||
// Relative offset to the start of the global offset table.
|
||||
const G = @as(i64, @intCast(target.gotAddress(elf_file))) - GOT;
|
||||
const G = target.gotAddress(elf_file) - GOT;
|
||||
// // Address of the thread pointer.
|
||||
const TP = @as(i64, @intCast(elf_file.tpAddress()));
|
||||
const TP = elf_file.tpAddress();
|
||||
// Address of the dynamic thread pointer.
|
||||
const DTP = @as(i64, @intCast(elf_file.dtpAddress()));
|
||||
const DTP = elf_file.dtpAddress();
|
||||
|
||||
relocs_log.debug(" {s}: {x}: [{x} => {x}] G({x}) ZG({x}) ({s})", .{
|
||||
relocation.fmtRelocType(rel.r_type(), cpu_arch),
|
||||
@ -804,9 +826,9 @@ fn resolveDynAbsReloc(
|
||||
const comp = elf_file.base.comp;
|
||||
const gpa = comp.gpa;
|
||||
const cpu_arch = elf_file.getTarget().cpu.arch;
|
||||
const P = self.address(elf_file) + rel.r_offset;
|
||||
const P: u64 = @intCast(self.address(elf_file) + @as(i64, @intCast(rel.r_offset)));
|
||||
const A = rel.r_addend;
|
||||
const S = @as(i64, @intCast(target.address(.{}, elf_file)));
|
||||
const S = target.address(.{}, elf_file);
|
||||
const is_writeable = self.inputShdr(elf_file).sh_flags & elf.SHF_WRITE != 0;
|
||||
|
||||
const num_dynrelocs = switch (self.file(elf_file).?) {
|
||||
@ -874,7 +896,7 @@ fn resolveDynAbsReloc(
|
||||
},
|
||||
|
||||
.ifunc => {
|
||||
const S_ = @as(i64, @intCast(target.address(.{ .plt = false }, elf_file)));
|
||||
const S_ = target.address(.{ .plt = false }, elf_file);
|
||||
elf_file.addRelaDynAssumeCapacity(.{
|
||||
.offset = P,
|
||||
.type = relocation.encode(.irel, cpu_arch),
|
||||
@ -914,6 +936,11 @@ pub fn resolveRelocsNonAlloc(self: Atom, elf_file: *Elf, code: []u8, undefs: any
|
||||
else => unreachable,
|
||||
};
|
||||
const target = elf_file.symbol(target_index);
|
||||
const is_synthetic_symbol = switch (file_ptr) {
|
||||
.zig_object => false, // TODO: implement this once we support merge sections in ZigObject
|
||||
.object => |x| rel.r_sym() >= x.symtab.items.len,
|
||||
else => unreachable,
|
||||
};
|
||||
|
||||
// Check for violation of One Definition Rule for COMDATs.
|
||||
if (target.file(elf_file) == null) {
|
||||
@ -927,20 +954,21 @@ pub fn resolveRelocsNonAlloc(self: Atom, elf_file: *Elf, code: []u8, undefs: any
|
||||
}
|
||||
|
||||
// Report an undefined symbol.
|
||||
if (try self.reportUndefined(elf_file, target, target_index, rel, undefs)) continue;
|
||||
if (!is_synthetic_symbol and (try self.reportUndefined(elf_file, target, target_index, rel, undefs)))
|
||||
continue;
|
||||
|
||||
// We will use equation format to resolve relocations:
|
||||
// https://intezer.com/blog/malware-analysis/executable-and-linkable-format-101-part-3-relocations/
|
||||
//
|
||||
const P = @as(i64, @intCast(self.address(elf_file) + rel.r_offset));
|
||||
const P = self.address(elf_file) + @as(i64, @intCast(rel.r_offset));
|
||||
// Addend from the relocation.
|
||||
const A = rel.r_addend;
|
||||
// Address of the target symbol - can be address of the symbol within an atom or address of PLT stub.
|
||||
const S = @as(i64, @intCast(target.address(.{}, elf_file)));
|
||||
const S = target.address(.{}, elf_file);
|
||||
// Address of the global offset table.
|
||||
const GOT = @as(i64, @intCast(elf_file.gotAddress()));
|
||||
const GOT = elf_file.gotAddress();
|
||||
// Address of the dynamic thread pointer.
|
||||
const DTP = @as(i64, @intCast(elf_file.dtpAddress()));
|
||||
const DTP = elf_file.dtpAddress();
|
||||
|
||||
const args = ResolveArgs{ P, A, S, GOT, 0, 0, DTP, 0 };
|
||||
|
||||
@ -1261,10 +1289,10 @@ const x86_64 = struct {
|
||||
|
||||
.TLSGD => {
|
||||
if (target.flags.has_tlsgd) {
|
||||
const S_ = @as(i64, @intCast(target.tlsGdAddress(elf_file)));
|
||||
const S_ = target.tlsGdAddress(elf_file);
|
||||
try cwriter.writeInt(i32, @as(i32, @intCast(S_ + A - P)), .little);
|
||||
} else if (target.flags.has_gottp) {
|
||||
const S_ = @as(i64, @intCast(target.gotTpAddress(elf_file)));
|
||||
const S_ = target.gotTpAddress(elf_file);
|
||||
try x86_64.relaxTlsGdToIe(atom, &.{ rel, it.next().? }, @intCast(S_ - P), elf_file, stream);
|
||||
} else {
|
||||
try x86_64.relaxTlsGdToLe(
|
||||
@ -1280,13 +1308,13 @@ const x86_64 = struct {
|
||||
.TLSLD => {
|
||||
if (elf_file.got.tlsld_index) |entry_index| {
|
||||
const tlsld_entry = elf_file.got.entries.items[entry_index];
|
||||
const S_ = @as(i64, @intCast(tlsld_entry.address(elf_file)));
|
||||
const S_ = tlsld_entry.address(elf_file);
|
||||
try cwriter.writeInt(i32, @as(i32, @intCast(S_ + A - P)), .little);
|
||||
} else {
|
||||
try x86_64.relaxTlsLdToLe(
|
||||
atom,
|
||||
&.{ rel, it.next().? },
|
||||
@as(i32, @intCast(TP - @as(i64, @intCast(elf_file.tlsAddress())))),
|
||||
@as(i32, @intCast(TP - elf_file.tlsAddress())),
|
||||
elf_file,
|
||||
stream,
|
||||
);
|
||||
@ -1295,7 +1323,7 @@ const x86_64 = struct {
|
||||
|
||||
.GOTPC32_TLSDESC => {
|
||||
if (target.flags.has_tlsdesc) {
|
||||
const S_ = @as(i64, @intCast(target.tlsDescAddress(elf_file)));
|
||||
const S_ = target.tlsDescAddress(elf_file);
|
||||
try cwriter.writeInt(i32, @as(i32, @intCast(S_ + A - P)), .little);
|
||||
} else {
|
||||
x86_64.relaxGotPcTlsDesc(code[r_offset - 3 ..]) catch {
|
||||
@ -1319,7 +1347,7 @@ const x86_64 = struct {
|
||||
|
||||
.GOTTPOFF => {
|
||||
if (target.flags.has_gottp) {
|
||||
const S_ = @as(i64, @intCast(target.gotTpAddress(elf_file)));
|
||||
const S_ = target.gotTpAddress(elf_file);
|
||||
try cwriter.writeInt(i32, @as(i32, @intCast(S_ + A - P)), .little);
|
||||
} else {
|
||||
x86_64.relaxGotTpOff(code[r_offset - 3 ..]);
|
||||
@ -1362,9 +1390,18 @@ const x86_64 = struct {
|
||||
.@"16" => try cwriter.writeInt(u16, @as(u16, @bitCast(@as(i16, @intCast(S + A)))), .little),
|
||||
.@"32" => try cwriter.writeInt(u32, @as(u32, @bitCast(@as(i32, @intCast(S + A)))), .little),
|
||||
.@"32S" => try cwriter.writeInt(i32, @as(i32, @intCast(S + A)), .little),
|
||||
.@"64" => try cwriter.writeInt(i64, S + A, .little),
|
||||
.DTPOFF32 => try cwriter.writeInt(i32, @as(i32, @intCast(S + A - DTP)), .little),
|
||||
.DTPOFF64 => try cwriter.writeInt(i64, S + A - DTP, .little),
|
||||
.@"64" => if (atom.debugTombstoneValue(target.*, elf_file)) |value|
|
||||
try cwriter.writeInt(u64, value, .little)
|
||||
else
|
||||
try cwriter.writeInt(i64, S + A, .little),
|
||||
.DTPOFF32 => if (atom.debugTombstoneValue(target.*, elf_file)) |value|
|
||||
try cwriter.writeInt(u64, value, .little)
|
||||
else
|
||||
try cwriter.writeInt(i32, @as(i32, @intCast(S + A - DTP)), .little),
|
||||
.DTPOFF64 => if (atom.debugTombstoneValue(target.*, elf_file)) |value|
|
||||
try cwriter.writeInt(u64, value, .little)
|
||||
else
|
||||
try cwriter.writeInt(i64, S + A - DTP, .little),
|
||||
.GOTOFF64 => try cwriter.writeInt(i64, S + A - GOT, .little),
|
||||
.GOTPC64 => try cwriter.writeInt(i64, GOT + A, .little),
|
||||
.SIZE32 => {
|
||||
@ -1746,7 +1783,7 @@ const aarch64 = struct {
|
||||
.object => |x| x.symbols.items[rel.r_sym()],
|
||||
else => unreachable,
|
||||
};
|
||||
const S_: i64 = @intCast(th.targetAddress(target_index, elf_file));
|
||||
const S_ = th.targetAddress(target_index, elf_file);
|
||||
break :blk math.cast(i28, S_ + A - P) orelse return error.Overflow;
|
||||
};
|
||||
aarch64_util.writeBranchImm(disp, code);
|
||||
@ -1764,16 +1801,12 @@ const aarch64 = struct {
|
||||
|
||||
.ADR_PREL_PG_HI21 => {
|
||||
// TODO: check for relaxation of ADRP+ADD
|
||||
const saddr = @as(u64, @intCast(P));
|
||||
const taddr = @as(u64, @intCast(S + A));
|
||||
const pages = @as(u21, @bitCast(try aarch64_util.calcNumberOfPages(saddr, taddr)));
|
||||
const pages = @as(u21, @bitCast(try aarch64_util.calcNumberOfPages(P, S + A)));
|
||||
aarch64_util.writeAdrpInst(pages, code);
|
||||
},
|
||||
|
||||
.ADR_GOT_PAGE => if (target.flags.has_got) {
|
||||
const saddr = @as(u64, @intCast(P));
|
||||
const taddr = @as(u64, @intCast(G + GOT + A));
|
||||
const pages = @as(u21, @bitCast(try aarch64_util.calcNumberOfPages(saddr, taddr)));
|
||||
const pages = @as(u21, @bitCast(try aarch64_util.calcNumberOfPages(P, G + GOT + A)));
|
||||
aarch64_util.writeAdrpInst(pages, code);
|
||||
} else {
|
||||
// TODO: relax
|
||||
@ -1828,46 +1861,38 @@ const aarch64 = struct {
|
||||
},
|
||||
|
||||
.TLSIE_ADR_GOTTPREL_PAGE21 => {
|
||||
const S_: i64 = @intCast(target.gotTpAddress(elf_file));
|
||||
const saddr: u64 = @intCast(P);
|
||||
const taddr: u64 = @intCast(S_ + A);
|
||||
relocs_log.debug(" [{x} => {x}]", .{ P, taddr });
|
||||
const pages: u21 = @bitCast(try aarch64_util.calcNumberOfPages(saddr, taddr));
|
||||
const S_ = target.gotTpAddress(elf_file);
|
||||
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
|
||||
const pages: u21 = @bitCast(try aarch64_util.calcNumberOfPages(P, S_ + A));
|
||||
aarch64_util.writeAdrpInst(pages, code);
|
||||
},
|
||||
|
||||
.TLSIE_LD64_GOTTPREL_LO12_NC => {
|
||||
const S_: i64 = @intCast(target.gotTpAddress(elf_file));
|
||||
const taddr: u64 = @intCast(S_ + A);
|
||||
relocs_log.debug(" [{x} => {x}]", .{ P, taddr });
|
||||
const offset: u12 = try math.divExact(u12, @truncate(taddr), 8);
|
||||
const S_ = target.gotTpAddress(elf_file);
|
||||
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
|
||||
const offset: u12 = try math.divExact(u12, @truncate(@as(u64, @bitCast(S_ + A))), 8);
|
||||
aarch64_util.writeLoadStoreRegInst(offset, code);
|
||||
},
|
||||
|
||||
.TLSGD_ADR_PAGE21 => {
|
||||
const S_: i64 = @intCast(target.tlsGdAddress(elf_file));
|
||||
const saddr: u64 = @intCast(P);
|
||||
const taddr: u64 = @intCast(S_ + A);
|
||||
relocs_log.debug(" [{x} => {x}]", .{ P, taddr });
|
||||
const pages: u21 = @bitCast(try aarch64_util.calcNumberOfPages(saddr, taddr));
|
||||
const S_ = target.tlsGdAddress(elf_file);
|
||||
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
|
||||
const pages: u21 = @bitCast(try aarch64_util.calcNumberOfPages(P, S_ + A));
|
||||
aarch64_util.writeAdrpInst(pages, code);
|
||||
},
|
||||
|
||||
.TLSGD_ADD_LO12_NC => {
|
||||
const S_: i64 = @intCast(target.tlsGdAddress(elf_file));
|
||||
const taddr: u64 = @intCast(S_ + A);
|
||||
relocs_log.debug(" [{x} => {x}]", .{ P, taddr });
|
||||
const offset: u12 = @truncate(taddr);
|
||||
const S_ = target.tlsGdAddress(elf_file);
|
||||
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
|
||||
const offset: u12 = @truncate(@as(u64, @bitCast(S_ + A)));
|
||||
aarch64_util.writeAddImmInst(offset, code);
|
||||
},
|
||||
|
||||
.TLSDESC_ADR_PAGE21 => {
|
||||
if (target.flags.has_tlsdesc) {
|
||||
const S_: i64 = @intCast(target.tlsDescAddress(elf_file));
|
||||
const saddr: u64 = @intCast(P);
|
||||
const taddr: u64 = @intCast(S_ + A);
|
||||
relocs_log.debug(" [{x} => {x}]", .{ P, taddr });
|
||||
const pages: u21 = @bitCast(try aarch64_util.calcNumberOfPages(saddr, taddr));
|
||||
const S_ = target.tlsDescAddress(elf_file);
|
||||
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
|
||||
const pages: u21 = @bitCast(try aarch64_util.calcNumberOfPages(P, S_ + A));
|
||||
aarch64_util.writeAdrpInst(pages, code);
|
||||
} else {
|
||||
relocs_log.debug(" relaxing adrp => nop", .{});
|
||||
@ -1877,10 +1902,9 @@ const aarch64 = struct {
|
||||
|
||||
.TLSDESC_LD64_LO12 => {
|
||||
if (target.flags.has_tlsdesc) {
|
||||
const S_: i64 = @intCast(target.tlsDescAddress(elf_file));
|
||||
const taddr: u64 = @intCast(S_ + A);
|
||||
relocs_log.debug(" [{x} => {x}]", .{ P, taddr });
|
||||
const offset: u12 = try math.divExact(u12, @truncate(taddr), 8);
|
||||
const S_ = target.tlsDescAddress(elf_file);
|
||||
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
|
||||
const offset: u12 = try math.divExact(u12, @truncate(@as(u64, @bitCast(S_ + A))), 8);
|
||||
aarch64_util.writeLoadStoreRegInst(offset, code);
|
||||
} else {
|
||||
relocs_log.debug(" relaxing ldr => nop", .{});
|
||||
@ -1890,10 +1914,9 @@ const aarch64 = struct {
|
||||
|
||||
.TLSDESC_ADD_LO12 => {
|
||||
if (target.flags.has_tlsdesc) {
|
||||
const S_: i64 = @intCast(target.tlsDescAddress(elf_file));
|
||||
const taddr: u64 = @intCast(S_ + A);
|
||||
relocs_log.debug(" [{x} => {x}]", .{ P, taddr });
|
||||
const offset: u12 = @truncate(taddr);
|
||||
const S_ = target.tlsDescAddress(elf_file);
|
||||
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
|
||||
const offset: u12 = @truncate(@as(u64, @bitCast(S_ + A)));
|
||||
aarch64_util.writeAddImmInst(offset, code);
|
||||
} else {
|
||||
const old_inst = Instruction{
|
||||
@ -1938,7 +1961,6 @@ const aarch64 = struct {
|
||||
) !void {
|
||||
_ = it;
|
||||
_ = code;
|
||||
_ = target;
|
||||
|
||||
const r_type: elf.R_AARCH64 = @enumFromInt(rel.r_type());
|
||||
const cwriter = stream.writer();
|
||||
@ -1948,7 +1970,10 @@ const aarch64 = struct {
|
||||
switch (r_type) {
|
||||
.NONE => unreachable,
|
||||
.ABS32 => try cwriter.writeInt(i32, @as(i32, @intCast(S + A)), .little),
|
||||
.ABS64 => try cwriter.writeInt(i64, S + A, .little),
|
||||
.ABS64 => if (atom.debugTombstoneValue(target.*, elf_file)) |value|
|
||||
try cwriter.writeInt(u64, value, .little)
|
||||
else
|
||||
try cwriter.writeInt(i64, S + A, .little),
|
||||
else => try atom.reportUnhandledRelocError(rel, elf_file),
|
||||
}
|
||||
}
|
||||
@ -2073,7 +2098,7 @@ const riscv = struct {
|
||||
const atom_addr = atom.address(elf_file);
|
||||
const pos = it.pos;
|
||||
const pair = while (it.prev()) |pair| {
|
||||
if (S == atom_addr + pair.r_offset) break pair;
|
||||
if (S == atom_addr + @as(i64, @intCast(pair.r_offset))) break pair;
|
||||
} else {
|
||||
// TODO: implement searching forward
|
||||
var err = try elf_file.addErrorWithNotes(1);
|
||||
@ -2091,10 +2116,10 @@ const riscv = struct {
|
||||
.object => |x| elf_file.symbol(x.symbols.items[pair.r_sym()]),
|
||||
else => unreachable,
|
||||
};
|
||||
const S_ = @as(i64, @intCast(target_.address(.{}, elf_file)));
|
||||
const S_ = target_.address(.{}, elf_file);
|
||||
const A_ = pair.r_addend;
|
||||
const P_ = @as(i64, @intCast(atom_addr + pair.r_offset));
|
||||
const G_ = @as(i64, @intCast(target_.gotAddress(elf_file))) - GOT;
|
||||
const P_ = atom_addr + @as(i64, @intCast(pair.r_offset));
|
||||
const G_ = target_.gotAddress(elf_file) - GOT;
|
||||
const disp = switch (@as(elf.R_RISCV, @enumFromInt(pair.r_type()))) {
|
||||
.PCREL_HI20 => math.cast(i32, S_ + A_ - P_) orelse return error.Overflow,
|
||||
.GOT_HI20 => math.cast(i32, G_ + GOT + A_ - P_) orelse return error.Overflow,
|
||||
@ -2122,7 +2147,6 @@ const riscv = struct {
|
||||
code: []u8,
|
||||
stream: anytype,
|
||||
) !void {
|
||||
_ = target;
|
||||
_ = it;
|
||||
|
||||
const r_type: elf.R_RISCV = @enumFromInt(rel.r_type());
|
||||
@ -2137,7 +2161,10 @@ const riscv = struct {
|
||||
.NONE => unreachable,
|
||||
|
||||
.@"32" => try cwriter.writeInt(i32, @as(i32, @intCast(S + A)), .little),
|
||||
.@"64" => try cwriter.writeInt(i64, S + A, .little),
|
||||
.@"64" => if (atom.debugTombstoneValue(target.*, elf_file)) |value|
|
||||
try cwriter.writeInt(u64, value, .little)
|
||||
else
|
||||
try cwriter.writeInt(i64, S + A, .little),
|
||||
|
||||
.ADD8 => riscv_util.writeAddend(i8, .add, code[r_offset..][0..1], S + A),
|
||||
.SUB8 => riscv_util.writeAddend(i8, .sub, code[r_offset..][0..1], S + A),
|
||||
|
||||
@ -15,6 +15,8 @@ comdat_groups: std.ArrayListUnmanaged(Elf.ComdatGroup.Index) = .{},
|
||||
comdat_group_data: std.ArrayListUnmanaged(u32) = .{},
|
||||
relocs: std.ArrayListUnmanaged(elf.Elf64_Rela) = .{},
|
||||
|
||||
merge_sections: std.ArrayListUnmanaged(InputMergeSection.Index) = .{},
|
||||
|
||||
fdes: std.ArrayListUnmanaged(Fde) = .{},
|
||||
cies: std.ArrayListUnmanaged(Cie) = .{},
|
||||
eh_frame_data: std.ArrayListUnmanaged(u8) = .{},
|
||||
@ -51,6 +53,7 @@ pub fn deinit(self: *Object, allocator: Allocator) void {
|
||||
self.fdes.deinit(allocator);
|
||||
self.cies.deinit(allocator);
|
||||
self.eh_frame_data.deinit(allocator);
|
||||
self.merge_sections.deinit(allocator);
|
||||
}
|
||||
|
||||
pub fn parse(self: *Object, elf_file: *Elf) !void {
|
||||
@ -280,8 +283,6 @@ fn initOutputSection(self: Object, elf_file: *Elf, shdr: elf.Elf64_Shdr) error{O
|
||||
const name = blk: {
|
||||
const name = self.getString(shdr.sh_name);
|
||||
if (elf_file.base.isRelocatable()) break :blk name;
|
||||
if (shdr.sh_flags & elf.SHF_MERGE != 0 and shdr.sh_flags & elf.SHF_STRINGS == 0)
|
||||
break :blk name; // TODO: consider dropping SHF_STRINGS once ICF is implemented
|
||||
const sh_name_prefixes: []const [:0]const u8 = &.{
|
||||
".text", ".data.rel.ro", ".data", ".rodata", ".bss.rel.ro", ".bss",
|
||||
".init_array", ".fini_array", ".tbss", ".tdata", ".gcc_except_table", ".ctors",
|
||||
@ -335,7 +336,6 @@ fn skipShdr(self: *Object, index: u32, elf_file: *Elf) bool {
|
||||
const name = self.getString(shdr.sh_name);
|
||||
const ignore = blk: {
|
||||
if (mem.startsWith(u8, name, ".note")) break :blk true;
|
||||
if (mem.startsWith(u8, name, ".comment")) break :blk true;
|
||||
if (mem.startsWith(u8, name, ".llvm_addrsig")) break :blk true;
|
||||
if (mem.startsWith(u8, name, ".riscv.attributes")) break :blk true; // TODO: riscv attributes
|
||||
if (comp.config.debug_format == .strip and shdr.sh_flags & elf.SHF_ALLOC == 0 and
|
||||
@ -354,7 +354,7 @@ fn initSymtab(self: *Object, allocator: Allocator, elf_file: *Elf) !void {
|
||||
const index = try elf_file.addSymbol();
|
||||
self.symbols.appendAssumeCapacity(index);
|
||||
const sym_ptr = elf_file.symbol(index);
|
||||
sym_ptr.value = sym.st_value;
|
||||
sym_ptr.value = @intCast(sym.st_value);
|
||||
sym_ptr.name_offset = sym.st_name;
|
||||
sym_ptr.esym_index = @as(u32, @intCast(i));
|
||||
sym_ptr.atom_index = if (sym.st_shndx == elf.SHN_ABS) 0 else self.atoms.items[sym.st_shndx];
|
||||
@ -547,7 +547,7 @@ pub fn resolveSymbols(self: *Object, elf_file: *Elf) void {
|
||||
elf.SHN_ABS, elf.SHN_COMMON => 0,
|
||||
else => self.atoms.items[esym.st_shndx],
|
||||
};
|
||||
global.value = esym.st_value;
|
||||
global.value = @intCast(esym.st_value);
|
||||
global.atom_index = atom_index;
|
||||
global.esym_index = esym_index;
|
||||
global.file_index = self.index;
|
||||
@ -659,6 +659,173 @@ pub fn checkDuplicates(self: *Object, dupes: anytype, elf_file: *Elf) error{OutO
|
||||
}
|
||||
}
|
||||
|
||||
pub fn initMergeSections(self: *Object, elf_file: *Elf) !void {
|
||||
const gpa = elf_file.base.comp.gpa;
|
||||
|
||||
try self.merge_sections.resize(gpa, self.shdrs.items.len);
|
||||
@memset(self.merge_sections.items, 0);
|
||||
|
||||
for (self.shdrs.items, 0..) |shdr, shndx| {
|
||||
if (shdr.sh_flags & elf.SHF_MERGE == 0) continue;
|
||||
|
||||
const atom_index = self.atoms.items[shndx];
|
||||
const atom_ptr = elf_file.atom(atom_index) orelse continue;
|
||||
if (!atom_ptr.flags.alive) continue;
|
||||
if (atom_ptr.relocs(elf_file).len > 0) continue;
|
||||
|
||||
const imsec_idx = try elf_file.addInputMergeSection();
|
||||
const imsec = elf_file.inputMergeSection(imsec_idx).?;
|
||||
self.merge_sections.items[shndx] = imsec_idx;
|
||||
|
||||
imsec.merge_section_index = try elf_file.getOrCreateMergeSection(atom_ptr.name(elf_file), shdr.sh_flags, shdr.sh_type);
|
||||
imsec.atom_index = atom_index;
|
||||
|
||||
const data = try self.codeDecompressAlloc(elf_file, atom_index);
|
||||
defer gpa.free(data);
|
||||
const sh_entsize: u32 = @intCast(shdr.sh_entsize);
|
||||
|
||||
if (shdr.sh_flags & elf.SHF_STRINGS != 0) {
|
||||
var pos: u32 = 0;
|
||||
while (pos < data.len) switch (sh_entsize) {
|
||||
0, 1 => {
|
||||
// According to mold's source code, GHC emits MS sections with sh_entsize = 0.
|
||||
// This actually can also happen for output created with `-r` mode.
|
||||
const string = mem.sliceTo(@as([*:0]const u8, @ptrCast(data.ptr + pos)), 0);
|
||||
if (pos + string.len == data.len) {
|
||||
var err = try elf_file.addErrorWithNotes(1);
|
||||
try err.addMsg(elf_file, "string not null terminated", .{});
|
||||
try err.addNote(elf_file, "in {}:{s}", .{ self.fmtPath(), atom_ptr.name(elf_file) });
|
||||
return error.MalformedObject;
|
||||
}
|
||||
try imsec.insertZ(gpa, string);
|
||||
try imsec.offsets.append(gpa, pos);
|
||||
pos += @as(u32, @intCast(string.len)) + 1; // account for null
|
||||
},
|
||||
else => |entsize| {
|
||||
const string = data.ptr[pos..][0..entsize];
|
||||
if (string[string.len - 1] != 0) {
|
||||
var err = try elf_file.addErrorWithNotes(1);
|
||||
try err.addMsg(elf_file, "string not null terminated", .{});
|
||||
try err.addNote(elf_file, "in {}:{s}", .{ self.fmtPath(), atom_ptr.name(elf_file) });
|
||||
return error.MalformedObject;
|
||||
}
|
||||
try imsec.insert(gpa, string);
|
||||
try imsec.offsets.append(gpa, pos);
|
||||
pos += @as(u32, @intCast(string.len));
|
||||
},
|
||||
};
|
||||
} else {
|
||||
if (sh_entsize == 0) continue; // Malformed, don't split but don't error out
|
||||
if (shdr.sh_size % sh_entsize != 0) {
|
||||
var err = try elf_file.addErrorWithNotes(1);
|
||||
try err.addMsg(elf_file, "size not a multiple of sh_entsize", .{});
|
||||
try err.addNote(elf_file, "in {}:{s}", .{ self.fmtPath(), atom_ptr.name(elf_file) });
|
||||
return error.MalformedObject;
|
||||
}
|
||||
|
||||
var pos: u32 = 0;
|
||||
while (pos < data.len) : (pos += sh_entsize) {
|
||||
const string = data.ptr[pos..][0..sh_entsize];
|
||||
try imsec.insert(gpa, string);
|
||||
try imsec.offsets.append(gpa, pos);
|
||||
}
|
||||
}
|
||||
|
||||
atom_ptr.flags.alive = false;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn resolveMergeSubsections(self: *Object, elf_file: *Elf) !void {
|
||||
const gpa = elf_file.base.comp.gpa;
|
||||
|
||||
for (self.merge_sections.items) |index| {
|
||||
const imsec = elf_file.inputMergeSection(index) orelse continue;
|
||||
const msec = elf_file.mergeSection(imsec.merge_section_index);
|
||||
const atom_ptr = elf_file.atom(imsec.atom_index).?;
|
||||
const isec = atom_ptr.inputShdr(elf_file);
|
||||
|
||||
try imsec.subsections.resize(gpa, imsec.strings.items.len);
|
||||
|
||||
for (imsec.strings.items, imsec.subsections.items) |str, *imsec_msub| {
|
||||
const string = imsec.bytes.items[str.pos..][0..str.len];
|
||||
const res = try msec.insert(gpa, string);
|
||||
if (!res.found_existing) {
|
||||
const msub_index = try elf_file.addMergeSubsection();
|
||||
const msub = elf_file.mergeSubsection(msub_index);
|
||||
msub.merge_section_index = imsec.merge_section_index;
|
||||
msub.string_index = res.key.pos;
|
||||
msub.alignment = atom_ptr.alignment;
|
||||
msub.size = res.key.len;
|
||||
msub.alive = !elf_file.base.gc_sections or isec.sh_flags & elf.SHF_ALLOC == 0;
|
||||
res.sub.* = msub_index;
|
||||
}
|
||||
imsec_msub.* = res.sub.*;
|
||||
}
|
||||
|
||||
imsec.clearAndFree(gpa);
|
||||
}
|
||||
|
||||
for (self.symtab.items, 0..) |*esym, idx| {
|
||||
const sym_index = self.symbols.items[idx];
|
||||
const sym = elf_file.symbol(sym_index);
|
||||
|
||||
if (esym.st_shndx == elf.SHN_COMMON or esym.st_shndx == elf.SHN_UNDEF or esym.st_shndx == elf.SHN_ABS) continue;
|
||||
|
||||
const imsec_index = self.merge_sections.items[esym.st_shndx];
|
||||
const imsec = elf_file.inputMergeSection(imsec_index) orelse continue;
|
||||
const msub_index, const offset = imsec.findSubsection(@intCast(esym.st_value)) orelse {
|
||||
var err = try elf_file.addErrorWithNotes(2);
|
||||
try err.addMsg(elf_file, "invalid symbol value: {x}", .{esym.st_value});
|
||||
try err.addNote(elf_file, "for symbol {s}", .{sym.name(elf_file)});
|
||||
try err.addNote(elf_file, "in {}", .{self.fmtPath()});
|
||||
return error.MalformedObject;
|
||||
};
|
||||
|
||||
try sym.addExtra(.{ .subsection = msub_index }, elf_file);
|
||||
sym.flags.merge_subsection = true;
|
||||
sym.value = offset;
|
||||
}
|
||||
|
||||
for (self.atoms.items) |atom_index| {
|
||||
const atom_ptr = elf_file.atom(atom_index) orelse continue;
|
||||
if (!atom_ptr.flags.alive) continue;
|
||||
const extras = atom_ptr.extra(elf_file) orelse continue;
|
||||
const relocs = self.relocs.items[extras.rel_index..][0..extras.rel_count];
|
||||
for (relocs) |*rel| {
|
||||
const esym = self.symtab.items[rel.r_sym()];
|
||||
if (esym.st_type() != elf.STT_SECTION) continue;
|
||||
|
||||
const imsec_index = self.merge_sections.items[esym.st_shndx];
|
||||
const imsec = elf_file.inputMergeSection(imsec_index) orelse continue;
|
||||
const msub_index, const offset = imsec.findSubsection(@intCast(@as(i64, @intCast(esym.st_value)) + rel.r_addend)) orelse {
|
||||
var err = try elf_file.addErrorWithNotes(1);
|
||||
try err.addMsg(elf_file, "invalid relocation at offset 0x{x}", .{rel.r_offset});
|
||||
try err.addNote(elf_file, "in {}:{s}", .{ self.fmtPath(), atom_ptr.name(elf_file) });
|
||||
return error.MalformedObject;
|
||||
};
|
||||
const msub = elf_file.mergeSubsection(msub_index);
|
||||
const msec = msub.mergeSection(elf_file);
|
||||
|
||||
const out_sym_idx: u64 = @intCast(self.symbols.items.len);
|
||||
try self.symbols.ensureUnusedCapacity(gpa, 1);
|
||||
const name = try std.fmt.allocPrint(gpa, "{s}$subsection{d}", .{ msec.name(elf_file), msub_index });
|
||||
defer gpa.free(name);
|
||||
const sym_index = try elf_file.addSymbol();
|
||||
const sym = elf_file.symbol(sym_index);
|
||||
sym.* = .{
|
||||
.value = @bitCast(@as(i64, @intCast(offset)) - rel.r_addend),
|
||||
.name_offset = try self.addString(gpa, name),
|
||||
.esym_index = rel.r_sym(),
|
||||
.file_index = self.index,
|
||||
};
|
||||
try sym.addExtra(.{ .subsection = msub_index }, elf_file);
|
||||
sym.flags.merge_subsection = true;
|
||||
self.symbols.addOneAssumeCapacity().* = sym_index;
|
||||
rel.r_info = (out_sym_idx << 32) | rel.r_type();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// We will create dummy shdrs per each resolved common symbols to make it
|
||||
/// play nicely with the rest of the system.
|
||||
pub fn convertCommonSymbols(self: *Object, elf_file: *Elf) !void {
|
||||
@ -749,6 +916,11 @@ pub fn addAtomsToOutputSections(self: *Object, elf_file: *Elf) !void {
|
||||
|
||||
for (self.locals()) |local_index| {
|
||||
const local = elf_file.symbol(local_index);
|
||||
if (local.mergeSubsection(elf_file)) |msub| {
|
||||
if (!msub.alive) continue;
|
||||
local.output_section_index = msub.mergeSection(elf_file).output_section_index;
|
||||
continue;
|
||||
}
|
||||
const atom = local.atom(elf_file) orelse continue;
|
||||
if (!atom.flags.alive) continue;
|
||||
local.output_section_index = atom.output_section_index;
|
||||
@ -756,11 +928,23 @@ pub fn addAtomsToOutputSections(self: *Object, elf_file: *Elf) !void {
|
||||
|
||||
for (self.globals()) |global_index| {
|
||||
const global = elf_file.symbol(global_index);
|
||||
if (global.file(elf_file).?.index() != self.index) continue;
|
||||
if (global.mergeSubsection(elf_file)) |msub| {
|
||||
if (!msub.alive) continue;
|
||||
global.output_section_index = msub.mergeSection(elf_file).output_section_index;
|
||||
continue;
|
||||
}
|
||||
const atom = global.atom(elf_file) orelse continue;
|
||||
if (!atom.flags.alive) continue;
|
||||
if (global.file(elf_file).?.index() != self.index) continue;
|
||||
global.output_section_index = atom.output_section_index;
|
||||
}
|
||||
|
||||
for (self.symbols.items[self.symtab.items.len..]) |local_index| {
|
||||
const local = elf_file.symbol(local_index);
|
||||
const msub = local.mergeSubsection(elf_file).?;
|
||||
if (!msub.alive) continue;
|
||||
local.output_section_index = msub.mergeSection(elf_file).output_section_index;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn initRelaSections(self: Object, elf_file: *Elf) !void {
|
||||
@ -845,9 +1029,17 @@ pub fn writeAr(self: Object, elf_file: *Elf, writer: anytype) !void {
|
||||
}
|
||||
|
||||
pub fn updateSymtabSize(self: *Object, elf_file: *Elf) !void {
|
||||
const isAlive = struct {
|
||||
fn isAlive(sym: *const Symbol, ctx: *Elf) bool {
|
||||
if (sym.mergeSubsection(ctx)) |msub| return msub.alive;
|
||||
if (sym.atom(ctx)) |atom_ptr| return atom_ptr.flags.alive;
|
||||
return true;
|
||||
}
|
||||
}.isAlive;
|
||||
|
||||
for (self.locals()) |local_index| {
|
||||
const local = elf_file.symbol(local_index);
|
||||
if (local.atom(elf_file)) |atom| if (!atom.flags.alive) continue;
|
||||
if (!isAlive(local, elf_file)) continue;
|
||||
const esym = local.elfSym(elf_file);
|
||||
switch (esym.st_type()) {
|
||||
elf.STT_SECTION => continue,
|
||||
@ -864,7 +1056,7 @@ pub fn updateSymtabSize(self: *Object, elf_file: *Elf) !void {
|
||||
const global = elf_file.symbol(global_index);
|
||||
const file_ptr = global.file(elf_file) orelse continue;
|
||||
if (file_ptr.index() != self.index) continue;
|
||||
if (global.atom(elf_file)) |atom| if (!atom.flags.alive) continue;
|
||||
if (!isAlive(global, elf_file)) continue;
|
||||
global.flags.output_symtab = true;
|
||||
if (global.isLocal(elf_file)) {
|
||||
try global.addExtra(.{ .symtab = self.output_symtab_ctx.nlocals }, elf_file);
|
||||
@ -904,14 +1096,16 @@ pub fn writeSymtab(self: Object, elf_file: *Elf) void {
|
||||
|
||||
pub fn locals(self: Object) []const Symbol.Index {
|
||||
if (self.symbols.items.len == 0) return &[0]Symbol.Index{};
|
||||
const end = self.first_global orelse self.symbols.items.len;
|
||||
assert(self.symbols.items.len >= self.symtab.items.len);
|
||||
const end = self.first_global orelse self.symtab.items.len;
|
||||
return self.symbols.items[0..end];
|
||||
}
|
||||
|
||||
pub fn globals(self: Object) []const Symbol.Index {
|
||||
if (self.symbols.items.len == 0) return &[0]Symbol.Index{};
|
||||
const start = self.first_global orelse self.symbols.items.len;
|
||||
return self.symbols.items[start..];
|
||||
assert(self.symbols.items.len >= self.symtab.items.len);
|
||||
const start = self.first_global orelse self.symtab.items.len;
|
||||
return self.symbols.items[start..self.symtab.items.len];
|
||||
}
|
||||
|
||||
/// Returns atom's code and optionally uncompresses data if required (for compressed sections).
|
||||
@ -956,6 +1150,14 @@ pub fn getString(self: Object, off: u32) [:0]const u8 {
|
||||
return mem.sliceTo(@as([*:0]const u8, @ptrCast(self.strtab.items.ptr + off)), 0);
|
||||
}
|
||||
|
||||
fn addString(self: *Object, allocator: Allocator, str: []const u8) !u32 {
|
||||
const off: u32 = @intCast(self.strtab.items.len);
|
||||
try self.strtab.ensureUnusedCapacity(allocator, str.len + 1);
|
||||
self.strtab.appendSliceAssumeCapacity(str);
|
||||
self.strtab.appendAssumeCapacity(0);
|
||||
return off;
|
||||
}
|
||||
|
||||
/// Caller owns the memory.
|
||||
fn preadShdrContentsAlloc(self: Object, allocator: Allocator, handle: std.fs.File, index: u32) ![]u8 {
|
||||
assert(index < self.shdrs.items.len);
|
||||
@ -1161,5 +1363,6 @@ const Cie = eh_frame.Cie;
|
||||
const Elf = @import("../Elf.zig");
|
||||
const Fde = eh_frame.Fde;
|
||||
const File = @import("file.zig").File;
|
||||
const InputMergeSection = @import("merge_section.zig").InputMergeSection;
|
||||
const Symbol = @import("Symbol.zig");
|
||||
const Alignment = Atom.Alignment;
|
||||
|
||||
@ -231,7 +231,7 @@ pub fn resolveSymbols(self: *SharedObject, elf_file: *Elf) void {
|
||||
|
||||
const global = elf_file.symbol(index);
|
||||
if (self.asFile().symbolRank(this_sym, false) < global.symbolRank(elf_file)) {
|
||||
global.value = this_sym.st_value;
|
||||
global.value = @intCast(this_sym.st_value);
|
||||
global.atom_index = 0;
|
||||
global.esym_index = esym_index;
|
||||
global.version_index = self.versyms.items[esym_index];
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
//! Represents a defined symbol.
|
||||
|
||||
/// Allocated address value of this symbol.
|
||||
value: u64 = 0,
|
||||
value: i64 = 0,
|
||||
|
||||
/// Offset into the linker's string table.
|
||||
name_offset: u32 = 0,
|
||||
@ -14,7 +14,7 @@ file_index: File.Index = 0,
|
||||
/// Use `atom` to get the pointer to the atom.
|
||||
atom_index: Atom.Index = 0,
|
||||
|
||||
/// Assigned output section index for this atom.
|
||||
/// Assigned output section index for this symbol.
|
||||
output_section_index: u32 = 0,
|
||||
|
||||
/// Index of the source symbol this symbol references.
|
||||
@ -33,7 +33,8 @@ extra_index: u32 = 0,
|
||||
pub fn isAbs(symbol: Symbol, elf_file: *Elf) bool {
|
||||
const file_ptr = symbol.file(elf_file).?;
|
||||
if (file_ptr == .shared_object) return symbol.elfSym(elf_file).st_shndx == elf.SHN_ABS;
|
||||
return !symbol.flags.import and symbol.atom(elf_file) == null and symbol.outputShndx() == null and
|
||||
return !symbol.flags.import and symbol.atom(elf_file) == null and
|
||||
symbol.mergeSubsection(elf_file) == null and symbol.outputShndx() == null and
|
||||
file_ptr != .linker_defined;
|
||||
}
|
||||
|
||||
@ -70,6 +71,12 @@ pub fn atom(symbol: Symbol, elf_file: *Elf) ?*Atom {
|
||||
return elf_file.atom(symbol.atom_index);
|
||||
}
|
||||
|
||||
pub fn mergeSubsection(symbol: Symbol, elf_file: *Elf) ?*MergeSubsection {
|
||||
if (!symbol.flags.merge_subsection) return null;
|
||||
const extras = symbol.extra(elf_file).?;
|
||||
return elf_file.mergeSubsection(extras.subsection);
|
||||
}
|
||||
|
||||
pub fn file(symbol: Symbol, elf_file: *Elf) ?File {
|
||||
return elf_file.file(symbol.file_index);
|
||||
}
|
||||
@ -92,7 +99,11 @@ pub fn symbolRank(symbol: Symbol, elf_file: *Elf) u32 {
|
||||
return file_ptr.symbolRank(sym, in_archive);
|
||||
}
|
||||
|
||||
pub fn address(symbol: Symbol, opts: struct { plt: bool = true }, elf_file: *Elf) u64 {
|
||||
pub fn address(symbol: Symbol, opts: struct { plt: bool = true }, elf_file: *Elf) i64 {
|
||||
if (symbol.mergeSubsection(elf_file)) |msub| {
|
||||
if (!msub.alive) return 0;
|
||||
return msub.address(elf_file) + symbol.value;
|
||||
}
|
||||
if (symbol.flags.has_copy_rel) {
|
||||
return symbol.copyRelAddress(elf_file);
|
||||
}
|
||||
@ -108,19 +119,23 @@ pub fn address(symbol: Symbol, opts: struct { plt: bool = true }, elf_file: *Elf
|
||||
if (!atom_ptr.flags.alive) {
|
||||
if (mem.eql(u8, atom_ptr.name(elf_file), ".eh_frame")) {
|
||||
const sym_name = symbol.name(elf_file);
|
||||
const sh_addr, const sh_size = blk: {
|
||||
const shndx = elf_file.eh_frame_section_index orelse break :blk .{ 0, 0 };
|
||||
const shdr = elf_file.shdrs.items[shndx];
|
||||
break :blk .{ shdr.sh_addr, shdr.sh_size };
|
||||
};
|
||||
if (mem.startsWith(u8, sym_name, "__EH_FRAME_BEGIN__") or
|
||||
mem.startsWith(u8, sym_name, "__EH_FRAME_LIST__") or
|
||||
mem.startsWith(u8, sym_name, ".eh_frame_seg") or
|
||||
symbol.elfSym(elf_file).st_type() == elf.STT_SECTION)
|
||||
{
|
||||
return elf_file.shdrs.items[elf_file.eh_frame_section_index.?].sh_addr;
|
||||
return @intCast(sh_addr);
|
||||
}
|
||||
|
||||
if (mem.startsWith(u8, sym_name, "__FRAME_END__") or
|
||||
mem.startsWith(u8, sym_name, "__EH_FRAME_LIST_END__"))
|
||||
{
|
||||
const shdr = elf_file.shdrs.items[elf_file.eh_frame_section_index.?];
|
||||
return shdr.sh_addr + shdr.sh_size;
|
||||
return @intCast(sh_addr + sh_size);
|
||||
}
|
||||
|
||||
// TODO I think we potentially should error here
|
||||
@ -143,57 +158,57 @@ pub fn outputSymtabIndex(symbol: Symbol, elf_file: *Elf) ?u32 {
|
||||
return if (symbol.isLocal(elf_file)) idx + symtab_ctx.ilocal else idx + symtab_ctx.iglobal;
|
||||
}
|
||||
|
||||
pub fn gotAddress(symbol: Symbol, elf_file: *Elf) u64 {
|
||||
pub fn gotAddress(symbol: Symbol, elf_file: *Elf) i64 {
|
||||
if (!symbol.flags.has_got) return 0;
|
||||
const extras = symbol.extra(elf_file).?;
|
||||
const entry = elf_file.got.entries.items[extras.got];
|
||||
return entry.address(elf_file);
|
||||
}
|
||||
|
||||
pub fn pltGotAddress(symbol: Symbol, elf_file: *Elf) u64 {
|
||||
pub fn pltGotAddress(symbol: Symbol, elf_file: *Elf) i64 {
|
||||
if (!(symbol.flags.has_plt and symbol.flags.has_got)) return 0;
|
||||
const extras = symbol.extra(elf_file).?;
|
||||
const shdr = elf_file.shdrs.items[elf_file.plt_got_section_index.?];
|
||||
const cpu_arch = elf_file.getTarget().cpu.arch;
|
||||
return shdr.sh_addr + extras.plt_got * PltGotSection.entrySize(cpu_arch);
|
||||
return @intCast(shdr.sh_addr + extras.plt_got * PltGotSection.entrySize(cpu_arch));
|
||||
}
|
||||
|
||||
pub fn pltAddress(symbol: Symbol, elf_file: *Elf) u64 {
|
||||
pub fn pltAddress(symbol: Symbol, elf_file: *Elf) i64 {
|
||||
if (!symbol.flags.has_plt) return 0;
|
||||
const extras = symbol.extra(elf_file).?;
|
||||
const shdr = elf_file.shdrs.items[elf_file.plt_section_index.?];
|
||||
const cpu_arch = elf_file.getTarget().cpu.arch;
|
||||
return shdr.sh_addr + extras.plt * PltSection.entrySize(cpu_arch) + PltSection.preambleSize(cpu_arch);
|
||||
return @intCast(shdr.sh_addr + extras.plt * PltSection.entrySize(cpu_arch) + PltSection.preambleSize(cpu_arch));
|
||||
}
|
||||
|
||||
pub fn gotPltAddress(symbol: Symbol, elf_file: *Elf) u64 {
|
||||
pub fn gotPltAddress(symbol: Symbol, elf_file: *Elf) i64 {
|
||||
if (!symbol.flags.has_plt) return 0;
|
||||
const extras = symbol.extra(elf_file).?;
|
||||
const shdr = elf_file.shdrs.items[elf_file.got_plt_section_index.?];
|
||||
return shdr.sh_addr + extras.plt * 8 + GotPltSection.preamble_size;
|
||||
return @intCast(shdr.sh_addr + extras.plt * 8 + GotPltSection.preamble_size);
|
||||
}
|
||||
|
||||
pub fn copyRelAddress(symbol: Symbol, elf_file: *Elf) u64 {
|
||||
pub fn copyRelAddress(symbol: Symbol, elf_file: *Elf) i64 {
|
||||
if (!symbol.flags.has_copy_rel) return 0;
|
||||
const shdr = elf_file.shdrs.items[elf_file.copy_rel_section_index.?];
|
||||
return shdr.sh_addr + symbol.value;
|
||||
return @as(i64, @intCast(shdr.sh_addr)) + symbol.value;
|
||||
}
|
||||
|
||||
pub fn tlsGdAddress(symbol: Symbol, elf_file: *Elf) u64 {
|
||||
pub fn tlsGdAddress(symbol: Symbol, elf_file: *Elf) i64 {
|
||||
if (!symbol.flags.has_tlsgd) return 0;
|
||||
const extras = symbol.extra(elf_file).?;
|
||||
const entry = elf_file.got.entries.items[extras.tlsgd];
|
||||
return entry.address(elf_file);
|
||||
}
|
||||
|
||||
pub fn gotTpAddress(symbol: Symbol, elf_file: *Elf) u64 {
|
||||
pub fn gotTpAddress(symbol: Symbol, elf_file: *Elf) i64 {
|
||||
if (!symbol.flags.has_gottp) return 0;
|
||||
const extras = symbol.extra(elf_file).?;
|
||||
const entry = elf_file.got.entries.items[extras.gottp];
|
||||
return entry.address(elf_file);
|
||||
}
|
||||
|
||||
pub fn tlsDescAddress(symbol: Symbol, elf_file: *Elf) u64 {
|
||||
pub fn tlsDescAddress(symbol: Symbol, elf_file: *Elf) i64 {
|
||||
if (!symbol.flags.has_tlsdesc) return 0;
|
||||
const extras = symbol.extra(elf_file).?;
|
||||
const entry = elf_file.got.entries.items[extras.tlsdesc];
|
||||
@ -213,7 +228,7 @@ pub fn getOrCreateZigGotEntry(symbol: *Symbol, symbol_index: Index, elf_file: *E
|
||||
return .{ .found_existing = false, .index = index };
|
||||
}
|
||||
|
||||
pub fn zigGotAddress(symbol: Symbol, elf_file: *Elf) u64 {
|
||||
pub fn zigGotAddress(symbol: Symbol, elf_file: *Elf) i64 {
|
||||
if (!symbol.flags.has_zig_got) return 0;
|
||||
const extras = symbol.extra(elf_file).?;
|
||||
return elf_file.zig_got.entryAddress(extras.zig_got, elf_file);
|
||||
@ -243,6 +258,7 @@ const AddExtraOpts = struct {
|
||||
gottp: ?u32 = null,
|
||||
tlsdesc: ?u32 = null,
|
||||
zig_got: ?u32 = null,
|
||||
subsection: ?u32 = null,
|
||||
};
|
||||
|
||||
pub fn addExtra(symbol: *Symbol, opts: AddExtraOpts, elf_file: *Elf) !void {
|
||||
@ -280,6 +296,7 @@ pub fn setOutputSym(symbol: Symbol, elf_file: *Elf, out: *elf.Elf64_Sym) void {
|
||||
if (symbol.flags.has_copy_rel) break :blk @intCast(elf_file.copy_rel_section_index.?);
|
||||
if (file_ptr == .shared_object or esym.st_shndx == elf.SHN_UNDEF) break :blk elf.SHN_UNDEF;
|
||||
if (elf_file.base.isRelocatable() and esym.st_shndx == elf.SHN_COMMON) break :blk elf.SHN_COMMON;
|
||||
if (symbol.mergeSubsection(elf_file)) |msub| break :blk @intCast(msub.mergeSection(elf_file).output_section_index);
|
||||
if (symbol.atom(elf_file) == null and file_ptr != .linker_defined) break :blk elf.SHN_ABS;
|
||||
break :blk @intCast(symbol.outputShndx() orelse elf.SHN_UNDEF);
|
||||
};
|
||||
@ -298,7 +315,7 @@ pub fn setOutputSym(symbol: Symbol, elf_file: *Elf, out: *elf.Elf64_Sym) void {
|
||||
out.st_info = (st_bind << 4) | st_type;
|
||||
out.st_other = esym.st_other;
|
||||
out.st_shndx = st_shndx;
|
||||
out.st_value = st_value;
|
||||
out.st_value = @intCast(st_value);
|
||||
out.st_size = esym.st_size;
|
||||
}
|
||||
|
||||
@ -450,6 +467,9 @@ pub const Flags = packed struct {
|
||||
/// TODO this is really not needed if only we operated on esyms between
|
||||
/// codegen and ZigObject.
|
||||
is_tls: bool = false,
|
||||
|
||||
/// Whether the symbol is a merge subsection.
|
||||
merge_subsection: bool = false,
|
||||
};
|
||||
|
||||
pub const Extra = struct {
|
||||
@ -463,6 +483,7 @@ pub const Extra = struct {
|
||||
gottp: u32 = 0,
|
||||
tlsdesc: u32 = 0,
|
||||
zig_got: u32 = 0,
|
||||
subsection: u32 = 0,
|
||||
};
|
||||
|
||||
pub const Index = u32;
|
||||
@ -479,6 +500,7 @@ const File = @import("file.zig").File;
|
||||
const GotSection = synthetic_sections.GotSection;
|
||||
const GotPltSection = synthetic_sections.GotPltSection;
|
||||
const LinkerDefined = @import("LinkerDefined.zig");
|
||||
const MergeSubsection = @import("merge_section.zig").MergeSubsection;
|
||||
const Object = @import("Object.zig");
|
||||
const PltSection = synthetic_sections.PltSection;
|
||||
const PltGotSection = synthetic_sections.PltGotSection;
|
||||
|
||||
@ -343,7 +343,7 @@ pub fn resolveSymbols(self: *ZigObject, elf_file: *Elf) void {
|
||||
atom.outputShndx().?
|
||||
else
|
||||
elf.SHN_UNDEF;
|
||||
global.value = esym.st_value;
|
||||
global.value = @intCast(esym.st_value);
|
||||
global.atom_index = atom_index;
|
||||
global.esym_index = esym_index;
|
||||
global.file_index = self.index;
|
||||
@ -631,7 +631,7 @@ pub fn codeAlloc(self: ZigObject, elf_file: *Elf, atom_index: Atom.Index) ![]u8
|
||||
return code;
|
||||
}
|
||||
|
||||
const file_offset = shdr.sh_offset + atom.value;
|
||||
const file_offset = shdr.sh_offset + @as(u64, @intCast(atom.value));
|
||||
const size = std.math.cast(usize, atom.size) orelse return error.Overflow;
|
||||
const code = try gpa.alloc(u8, size);
|
||||
errdefer gpa.free(code);
|
||||
@ -659,7 +659,7 @@ pub fn getDeclVAddr(
|
||||
.r_info = (@as(u64, @intCast(this_sym.esym_index)) << 32) | r_type,
|
||||
.r_addend = reloc_info.addend,
|
||||
});
|
||||
return vaddr;
|
||||
return @intCast(vaddr);
|
||||
}
|
||||
|
||||
pub fn getAnonDeclVAddr(
|
||||
@ -678,7 +678,7 @@ pub fn getAnonDeclVAddr(
|
||||
.r_info = (@as(u64, @intCast(sym.esym_index)) << 32) | r_type,
|
||||
.r_addend = reloc_info.addend,
|
||||
});
|
||||
return vaddr;
|
||||
return @intCast(vaddr);
|
||||
}
|
||||
|
||||
pub fn lowerAnonDecl(
|
||||
@ -929,7 +929,7 @@ fn updateDeclCode(
|
||||
|
||||
if (old_size > 0 and elf_file.base.child_pid == null) {
|
||||
const capacity = atom_ptr.capacity(elf_file);
|
||||
const need_realloc = code.len > capacity or !required_alignment.check(atom_ptr.value);
|
||||
const need_realloc = code.len > capacity or !required_alignment.check(@intCast(atom_ptr.value));
|
||||
if (need_realloc) {
|
||||
try atom_ptr.grow(elf_file);
|
||||
log.debug("growing {} from 0x{x} to 0x{x}", .{ decl_name.fmt(&mod.intern_pool), old_vaddr, atom_ptr.value });
|
||||
@ -984,7 +984,7 @@ fn updateDeclCode(
|
||||
|
||||
const shdr = elf_file.shdrs.items[shdr_index];
|
||||
if (shdr.sh_type != elf.SHT_NOBITS) {
|
||||
const file_offset = shdr.sh_offset + atom_ptr.value;
|
||||
const file_offset = shdr.sh_offset + @as(u64, @intCast(atom_ptr.value));
|
||||
try elf_file.base.file.?.pwriteAll(code, file_offset);
|
||||
}
|
||||
}
|
||||
@ -1107,7 +1107,7 @@ pub fn updateFunc(
|
||||
try self.dwarf.?.commitDeclState(
|
||||
mod,
|
||||
decl_index,
|
||||
sym.address(.{}, elf_file),
|
||||
@intCast(sym.address(.{}, elf_file)),
|
||||
sym.atom(elf_file).?.size,
|
||||
ds,
|
||||
);
|
||||
@ -1186,7 +1186,7 @@ pub fn updateDecl(
|
||||
try self.dwarf.?.commitDeclState(
|
||||
mod,
|
||||
decl_index,
|
||||
sym.address(.{}, elf_file),
|
||||
@intCast(sym.address(.{}, elf_file)),
|
||||
sym.atom(elf_file).?.size,
|
||||
ds,
|
||||
);
|
||||
@ -1275,7 +1275,7 @@ fn updateLazySymbol(
|
||||
}
|
||||
|
||||
const shdr = elf_file.shdrs.items[output_section_index];
|
||||
const file_offset = shdr.sh_offset + atom_ptr.value;
|
||||
const file_offset = shdr.sh_offset + @as(u64, @intCast(atom_ptr.value));
|
||||
try elf_file.base.file.?.pwriteAll(code, file_offset);
|
||||
}
|
||||
|
||||
@ -1373,7 +1373,7 @@ fn lowerConst(
|
||||
local_esym.st_value = 0;
|
||||
|
||||
const shdr = elf_file.shdrs.items[output_section_index];
|
||||
const file_offset = shdr.sh_offset + atom_ptr.value;
|
||||
const file_offset = shdr.sh_offset + @as(u64, @intCast(atom_ptr.value));
|
||||
try elf_file.base.file.?.pwriteAll(code, file_offset);
|
||||
|
||||
return .{ .ok = sym_index };
|
||||
@ -1457,7 +1457,7 @@ pub fn updateExports(
|
||||
|
||||
const actual_esym_index = global_esym_index & symbol_mask;
|
||||
const global_esym = &self.global_esyms.items(.elf_sym)[actual_esym_index];
|
||||
global_esym.st_value = elf_file.symbol(sym_index).value;
|
||||
global_esym.st_value = @intCast(elf_file.symbol(sym_index).value);
|
||||
global_esym.st_shndx = esym.st_shndx;
|
||||
global_esym.st_info = (stb_bits << 4) | stt_bits;
|
||||
global_esym.st_name = name_off;
|
||||
|
||||
@ -68,6 +68,10 @@ fn collectRoots(roots: *std.ArrayList(*Atom), files: []const File.Index, elf_fil
|
||||
}
|
||||
|
||||
fn markSymbol(sym: *Symbol, roots: *std.ArrayList(*Atom), elf_file: *Elf) !void {
|
||||
if (sym.mergeSubsection(elf_file)) |msub| {
|
||||
msub.alive = true;
|
||||
return;
|
||||
}
|
||||
const atom = sym.atom(elf_file) orelse return;
|
||||
if (markAtom(atom)) try roots.append(atom);
|
||||
}
|
||||
@ -96,6 +100,10 @@ fn markLive(atom: *Atom, elf_file: *Elf) void {
|
||||
|
||||
for (atom.relocs(elf_file)) |rel| {
|
||||
const target_sym = elf_file.symbol(file.symbol(rel.r_sym()));
|
||||
if (target_sym.mergeSubsection(elf_file)) |msub| {
|
||||
msub.alive = true;
|
||||
continue;
|
||||
}
|
||||
const target_atom = target_sym.atom(elf_file) orelse continue;
|
||||
target_atom.flags.alive = true;
|
||||
gc_track_live_log.debug("{}marking live atom({d})", .{ track_live_level, target_atom.atom_index });
|
||||
|
||||
@ -181,6 +181,8 @@ pub fn flushObject(elf_file: *Elf, comp: *Compilation, module_obj_path: ?[]const
|
||||
elf_file.markEhFrameAtomsDead();
|
||||
claimUnresolved(elf_file);
|
||||
|
||||
try elf_file.addCommentString();
|
||||
try elf_file.sortMergeSections();
|
||||
try initSections(elf_file);
|
||||
try elf_file.sortShdrs();
|
||||
if (elf_file.zigObjectPtr()) |zig_object| {
|
||||
@ -191,6 +193,7 @@ pub fn flushObject(elf_file: *Elf, comp: *Compilation, module_obj_path: ?[]const
|
||||
try object.addAtomsToOutputSections(elf_file);
|
||||
try object.addAtomsToRelaSections(elf_file);
|
||||
}
|
||||
try elf_file.updateMergeSectionSizes();
|
||||
try updateSectionSizes(elf_file);
|
||||
|
||||
try allocateAllocSections(elf_file);
|
||||
@ -201,6 +204,7 @@ pub fn flushObject(elf_file: *Elf, comp: *Compilation, module_obj_path: ?[]const
|
||||
}
|
||||
|
||||
try writeAtoms(elf_file);
|
||||
try elf_file.writeMergeSections();
|
||||
try writeSyntheticSections(elf_file);
|
||||
try elf_file.writeShdrTable();
|
||||
try elf_file.writeElfHeader();
|
||||
@ -275,6 +279,17 @@ fn initSections(elf_file: *Elf) !void {
|
||||
try object.initRelaSections(elf_file);
|
||||
}
|
||||
|
||||
for (elf_file.merge_sections.items) |*msec| {
|
||||
if (msec.subsections.items.len == 0) continue;
|
||||
const name = msec.name(elf_file);
|
||||
const shndx = elf_file.sectionByName(name) orelse try elf_file.addSection(.{
|
||||
.name = name,
|
||||
.type = msec.type,
|
||||
.flags = msec.flags,
|
||||
});
|
||||
msec.output_section_index = shndx;
|
||||
}
|
||||
|
||||
const needs_eh_frame = for (elf_file.objects.items) |index| {
|
||||
if (elf_file.file(index).?.object.cies.items.len > 0) break true;
|
||||
} else false;
|
||||
@ -328,7 +343,7 @@ fn updateSectionSizes(elf_file: *Elf) !void {
|
||||
if (!atom_ptr.flags.alive) continue;
|
||||
const offset = atom_ptr.alignment.forward(shdr.sh_size);
|
||||
const padding = offset - shdr.sh_size;
|
||||
atom_ptr.value = offset;
|
||||
atom_ptr.value = @intCast(offset);
|
||||
shdr.sh_size += padding + atom_ptr.size;
|
||||
shdr.sh_addralign = @max(shdr.sh_addralign, atom_ptr.alignment.toByteUnits() orelse 1);
|
||||
}
|
||||
@ -434,7 +449,7 @@ fn writeAtoms(elf_file: *Elf) !void {
|
||||
const atom_ptr = elf_file.atom(atom_index).?;
|
||||
assert(atom_ptr.flags.alive);
|
||||
|
||||
const offset = math.cast(usize, atom_ptr.value - shdr.sh_addr - base_offset) orelse
|
||||
const offset = math.cast(usize, atom_ptr.value - @as(i64, @intCast(shdr.sh_addr - base_offset))) orelse
|
||||
return error.Overflow;
|
||||
const size = math.cast(usize, atom_ptr.size) orelse return error.Overflow;
|
||||
|
||||
|
||||
@ -270,11 +270,11 @@ pub const ZigGotSection = struct {
|
||||
return shdr.sh_offset + @as(u64, entry_size) * index;
|
||||
}
|
||||
|
||||
pub fn entryAddress(zig_got: ZigGotSection, index: Index, elf_file: *Elf) u64 {
|
||||
pub fn entryAddress(zig_got: ZigGotSection, index: Index, elf_file: *Elf) i64 {
|
||||
_ = zig_got;
|
||||
const entry_size = elf_file.archPtrWidthBytes();
|
||||
const shdr = elf_file.shdrs.items[elf_file.zig_got_section_index.?];
|
||||
return shdr.sh_addr + @as(u64, entry_size) * index;
|
||||
return @as(i64, @intCast(shdr.sh_addr)) + entry_size * index;
|
||||
}
|
||||
|
||||
pub fn size(zig_got: ZigGotSection, elf_file: *Elf) usize {
|
||||
@ -291,23 +291,23 @@ pub const ZigGotSection = struct {
|
||||
const target = elf_file.getTarget();
|
||||
const endian = target.cpu.arch.endian();
|
||||
const off = zig_got.entryOffset(index, elf_file);
|
||||
const vaddr = zig_got.entryAddress(index, elf_file);
|
||||
const vaddr: u64 = @intCast(zig_got.entryAddress(index, elf_file));
|
||||
const entry = zig_got.entries.items[index];
|
||||
const value = elf_file.symbol(entry).address(.{}, elf_file);
|
||||
switch (entry_size) {
|
||||
2 => {
|
||||
var buf: [2]u8 = undefined;
|
||||
std.mem.writeInt(u16, &buf, @as(u16, @intCast(value)), endian);
|
||||
std.mem.writeInt(u16, &buf, @intCast(value), endian);
|
||||
try elf_file.base.file.?.pwriteAll(&buf, off);
|
||||
},
|
||||
4 => {
|
||||
var buf: [4]u8 = undefined;
|
||||
std.mem.writeInt(u32, &buf, @as(u32, @intCast(value)), endian);
|
||||
std.mem.writeInt(u32, &buf, @intCast(value), endian);
|
||||
try elf_file.base.file.?.pwriteAll(&buf, off);
|
||||
},
|
||||
8 => {
|
||||
var buf: [8]u8 = undefined;
|
||||
std.mem.writeInt(u64, &buf, value, endian);
|
||||
std.mem.writeInt(u64, &buf, @intCast(value), endian);
|
||||
try elf_file.base.file.?.pwriteAll(&buf, off);
|
||||
|
||||
if (elf_file.base.child_pid) |pid| {
|
||||
@ -356,9 +356,9 @@ pub const ZigGotSection = struct {
|
||||
const symbol = elf_file.symbol(entry);
|
||||
const offset = symbol.zigGotAddress(elf_file);
|
||||
elf_file.addRelaDynAssumeCapacity(.{
|
||||
.offset = offset,
|
||||
.offset = @intCast(offset),
|
||||
.type = relocation.encode(.rel, cpu_arch),
|
||||
.addend = @intCast(symbol.address(.{ .plt = false }, elf_file)),
|
||||
.addend = symbol.address(.{ .plt = false }, elf_file),
|
||||
});
|
||||
}
|
||||
}
|
||||
@ -386,7 +386,7 @@ pub const ZigGotSection = struct {
|
||||
.st_info = elf.STT_OBJECT,
|
||||
.st_other = 0,
|
||||
.st_shndx = @intCast(elf_file.zig_got_section_index.?),
|
||||
.st_value = st_value,
|
||||
.st_value = @intCast(st_value),
|
||||
.st_size = st_size,
|
||||
};
|
||||
}
|
||||
@ -457,10 +457,10 @@ pub const GotSection = struct {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn address(entry: Entry, elf_file: *Elf) u64 {
|
||||
const ptr_bytes = @as(u64, elf_file.archPtrWidthBytes());
|
||||
pub fn address(entry: Entry, elf_file: *Elf) i64 {
|
||||
const ptr_bytes = elf_file.archPtrWidthBytes();
|
||||
const shdr = &elf_file.shdrs.items[elf_file.got_section_index.?];
|
||||
return shdr.sh_addr + @as(u64, entry.cell_index) * ptr_bytes;
|
||||
return @as(i64, @intCast(shdr.sh_addr)) + entry.cell_index * ptr_bytes;
|
||||
}
|
||||
};
|
||||
|
||||
@ -608,8 +608,7 @@ pub const GotSection = struct {
|
||||
0;
|
||||
try writeInt(offset, elf_file, writer);
|
||||
} else {
|
||||
const offset = @as(i64, @intCast(symbol.?.address(.{}, elf_file))) -
|
||||
@as(i64, @intCast(elf_file.tpAddress()));
|
||||
const offset = symbol.?.address(.{}, elf_file) - elf_file.tpAddress();
|
||||
try writeInt(offset, elf_file, writer);
|
||||
}
|
||||
},
|
||||
@ -620,7 +619,7 @@ pub const GotSection = struct {
|
||||
} else {
|
||||
try writeInt(0, elf_file, writer);
|
||||
const offset = if (apply_relocs)
|
||||
@as(i64, @intCast(symbol.?.address(.{}, elf_file))) - @as(i64, @intCast(elf_file.tlsAddress()))
|
||||
symbol.?.address(.{}, elf_file) - elf_file.tlsAddress()
|
||||
else
|
||||
0;
|
||||
try writeInt(offset, elf_file, writer);
|
||||
@ -646,7 +645,7 @@ pub const GotSection = struct {
|
||||
|
||||
switch (entry.tag) {
|
||||
.got => {
|
||||
const offset = symbol.?.gotAddress(elf_file);
|
||||
const offset: u64 = @intCast(symbol.?.gotAddress(elf_file));
|
||||
if (symbol.?.flags.import) {
|
||||
elf_file.addRelaDynAssumeCapacity(.{
|
||||
.offset = offset,
|
||||
@ -659,7 +658,7 @@ pub const GotSection = struct {
|
||||
elf_file.addRelaDynAssumeCapacity(.{
|
||||
.offset = offset,
|
||||
.type = relocation.encode(.irel, cpu_arch),
|
||||
.addend = @intCast(symbol.?.address(.{ .plt = false }, elf_file)),
|
||||
.addend = symbol.?.address(.{ .plt = false }, elf_file),
|
||||
});
|
||||
continue;
|
||||
}
|
||||
@ -669,14 +668,14 @@ pub const GotSection = struct {
|
||||
elf_file.addRelaDynAssumeCapacity(.{
|
||||
.offset = offset,
|
||||
.type = relocation.encode(.rel, cpu_arch),
|
||||
.addend = @intCast(symbol.?.address(.{ .plt = false }, elf_file)),
|
||||
.addend = symbol.?.address(.{ .plt = false }, elf_file),
|
||||
});
|
||||
}
|
||||
},
|
||||
|
||||
.tlsld => {
|
||||
if (is_dyn_lib) {
|
||||
const offset = entry.address(elf_file);
|
||||
const offset: u64 = @intCast(entry.address(elf_file));
|
||||
elf_file.addRelaDynAssumeCapacity(.{
|
||||
.offset = offset,
|
||||
.type = relocation.encode(.dtpmod, cpu_arch),
|
||||
@ -685,7 +684,7 @@ pub const GotSection = struct {
|
||||
},
|
||||
|
||||
.tlsgd => {
|
||||
const offset = symbol.?.tlsGdAddress(elf_file);
|
||||
const offset: u64 = @intCast(symbol.?.tlsGdAddress(elf_file));
|
||||
if (symbol.?.flags.import) {
|
||||
elf_file.addRelaDynAssumeCapacity(.{
|
||||
.offset = offset,
|
||||
@ -707,7 +706,7 @@ pub const GotSection = struct {
|
||||
},
|
||||
|
||||
.gottp => {
|
||||
const offset = symbol.?.gotTpAddress(elf_file);
|
||||
const offset: u64 = @intCast(symbol.?.gotTpAddress(elf_file));
|
||||
if (symbol.?.flags.import) {
|
||||
elf_file.addRelaDynAssumeCapacity(.{
|
||||
.offset = offset,
|
||||
@ -718,18 +717,18 @@ pub const GotSection = struct {
|
||||
elf_file.addRelaDynAssumeCapacity(.{
|
||||
.offset = offset,
|
||||
.type = relocation.encode(.tpoff, cpu_arch),
|
||||
.addend = @intCast(symbol.?.address(.{}, elf_file) - elf_file.tlsAddress()),
|
||||
.addend = symbol.?.address(.{}, elf_file) - elf_file.tlsAddress(),
|
||||
});
|
||||
}
|
||||
},
|
||||
|
||||
.tlsdesc => {
|
||||
const offset = symbol.?.tlsDescAddress(elf_file);
|
||||
const offset: u64 = @intCast(symbol.?.tlsDescAddress(elf_file));
|
||||
elf_file.addRelaDynAssumeCapacity(.{
|
||||
.offset = offset,
|
||||
.sym = if (symbol.?.flags.import) extra.?.dynamic else 0,
|
||||
.type = relocation.encode(.tlsdesc, cpu_arch),
|
||||
.addend = if (symbol.?.flags.import) 0 else @intCast(symbol.?.address(.{}, elf_file) - elf_file.tlsAddress()),
|
||||
.addend = if (symbol.?.flags.import) 0 else symbol.?.address(.{}, elf_file) - elf_file.tlsAddress(),
|
||||
});
|
||||
},
|
||||
}
|
||||
@ -806,7 +805,7 @@ pub const GotSection = struct {
|
||||
.st_info = elf.STT_OBJECT,
|
||||
.st_other = 0,
|
||||
.st_shndx = @intCast(elf_file.got_section_index.?),
|
||||
.st_value = st_value,
|
||||
.st_value = @intCast(st_value),
|
||||
.st_size = st_size,
|
||||
};
|
||||
}
|
||||
@ -900,7 +899,7 @@ pub const PltSection = struct {
|
||||
const sym = elf_file.symbol(sym_index);
|
||||
assert(sym.flags.import);
|
||||
const extra = sym.extra(elf_file).?;
|
||||
const r_offset = sym.gotPltAddress(elf_file);
|
||||
const r_offset: u64 = @intCast(sym.gotPltAddress(elf_file));
|
||||
const r_sym: u64 = extra.dynamic;
|
||||
const r_type = relocation.encode(.jump_slot, cpu_arch);
|
||||
elf_file.rela_plt.appendAssumeCapacity(.{
|
||||
@ -936,7 +935,7 @@ pub const PltSection = struct {
|
||||
.st_info = elf.STT_FUNC,
|
||||
.st_other = 0,
|
||||
.st_shndx = @intCast(elf_file.plt_section_index.?),
|
||||
.st_value = sym.pltAddress(elf_file),
|
||||
.st_value = @intCast(sym.pltAddress(elf_file)),
|
||||
.st_size = entrySize(cpu_arch),
|
||||
};
|
||||
}
|
||||
@ -1009,13 +1008,13 @@ pub const PltSection = struct {
|
||||
const aarch64 = struct {
|
||||
fn write(plt: PltSection, elf_file: *Elf, writer: anytype) !void {
|
||||
{
|
||||
const plt_addr = elf_file.shdrs.items[elf_file.plt_section_index.?].sh_addr;
|
||||
const got_plt_addr = elf_file.shdrs.items[elf_file.got_plt_section_index.?].sh_addr;
|
||||
const plt_addr: i64 = @intCast(elf_file.shdrs.items[elf_file.plt_section_index.?].sh_addr);
|
||||
const got_plt_addr: i64 = @intCast(elf_file.shdrs.items[elf_file.got_plt_section_index.?].sh_addr);
|
||||
// TODO: relax if possible
|
||||
// .got.plt[2]
|
||||
const pages = try aarch64_util.calcNumberOfPages(plt_addr + 4, got_plt_addr + 16);
|
||||
const ldr_off = try math.divExact(u12, @truncate(got_plt_addr + 16), 8);
|
||||
const add_off: u12 = @truncate(got_plt_addr + 16);
|
||||
const ldr_off = try math.divExact(u12, @truncate(@as(u64, @bitCast(got_plt_addr + 16))), 8);
|
||||
const add_off: u12 = @truncate(@as(u64, @bitCast(got_plt_addr + 16)));
|
||||
|
||||
const preamble = &[_]Instruction{
|
||||
Instruction.stp(
|
||||
@ -1043,8 +1042,8 @@ pub const PltSection = struct {
|
||||
const target_addr = sym.gotPltAddress(elf_file);
|
||||
const source_addr = sym.pltAddress(elf_file);
|
||||
const pages = try aarch64_util.calcNumberOfPages(source_addr, target_addr);
|
||||
const ldr_off = try math.divExact(u12, @truncate(target_addr), 8);
|
||||
const add_off: u12 = @truncate(target_addr);
|
||||
const ldr_off = try math.divExact(u12, @truncate(@as(u64, @bitCast(target_addr))), 8);
|
||||
const add_off: u12 = @truncate(@as(u64, @bitCast(target_addr)));
|
||||
const insts = &[_]Instruction{
|
||||
Instruction.adrp(.x16, pages),
|
||||
Instruction.ldr(.x17, .x16, Instruction.LoadStoreOffset.imm(ldr_off)),
|
||||
@ -1077,7 +1076,7 @@ pub const GotPltSection = struct {
|
||||
{
|
||||
// [0]: _DYNAMIC
|
||||
const symbol = elf_file.symbol(elf_file.dynamic_index.?);
|
||||
try writer.writeInt(u64, symbol.address(.{}, elf_file), .little);
|
||||
try writer.writeInt(u64, @intCast(symbol.address(.{}, elf_file)), .little);
|
||||
}
|
||||
// [1]: 0x0
|
||||
// [2]: 0x0
|
||||
@ -1153,7 +1152,7 @@ pub const PltGotSection = struct {
|
||||
.st_info = elf.STT_FUNC,
|
||||
.st_other = 0,
|
||||
.st_shndx = @intCast(elf_file.plt_got_section_index.?),
|
||||
.st_value = sym.pltGotAddress(elf_file),
|
||||
.st_value = @intCast(sym.pltGotAddress(elf_file)),
|
||||
.st_size = 16,
|
||||
};
|
||||
}
|
||||
@ -1184,7 +1183,7 @@ pub const PltGotSection = struct {
|
||||
const target_addr = sym.gotAddress(elf_file);
|
||||
const source_addr = sym.pltGotAddress(elf_file);
|
||||
const pages = try aarch64_util.calcNumberOfPages(source_addr, target_addr);
|
||||
const off = try math.divExact(u12, @truncate(target_addr), 8);
|
||||
const off = try math.divExact(u12, @truncate(@as(u64, @bitCast(target_addr))), 8);
|
||||
const insts = &[_]Instruction{
|
||||
Instruction.adrp(.x16, pages),
|
||||
Instruction.ldr(.x17, .x16, Instruction.LoadStoreOffset.imm(off)),
|
||||
@ -1247,9 +1246,9 @@ pub const CopyRelSection = struct {
|
||||
const symbol = elf_file.symbol(sym_index);
|
||||
const shared_object = symbol.file(elf_file).?.shared_object;
|
||||
const alignment = try symbol.dsoAlignment(elf_file);
|
||||
symbol.value = mem.alignForward(u64, shdr.sh_size, alignment);
|
||||
symbol.value = @intCast(mem.alignForward(u64, shdr.sh_size, alignment));
|
||||
shdr.sh_addralign = @max(shdr.sh_addralign, alignment);
|
||||
shdr.sh_size = symbol.value + symbol.elfSym(elf_file).st_size;
|
||||
shdr.sh_size = @as(u64, @intCast(symbol.value)) + symbol.elfSym(elf_file).st_size;
|
||||
|
||||
const aliases = shared_object.symbolAliases(sym_index, elf_file);
|
||||
for (aliases) |alias| {
|
||||
@ -1270,7 +1269,7 @@ pub const CopyRelSection = struct {
|
||||
assert(sym.flags.import and sym.flags.has_copy_rel);
|
||||
const extra = sym.extra(elf_file).?;
|
||||
elf_file.addRelaDynAssumeCapacity(.{
|
||||
.offset = sym.address(.{}, elf_file),
|
||||
.offset = @intCast(sym.address(.{}, elf_file)),
|
||||
.sym = extra.dynamic,
|
||||
.type = relocation.encode(.copy, cpu_arch),
|
||||
});
|
||||
|
||||
@ -7,7 +7,7 @@ pub fn createThunks(shndx: u32, elf_file: *Elf) !void {
|
||||
assert(atoms.len > 0);
|
||||
|
||||
for (atoms) |atom_index| {
|
||||
elf_file.atom(atom_index).?.value = @bitCast(@as(i64, -1));
|
||||
elf_file.atom(atom_index).?.value = -1;
|
||||
}
|
||||
|
||||
var i: usize = 0;
|
||||
@ -22,7 +22,8 @@ pub fn createThunks(shndx: u32, elf_file: *Elf) !void {
|
||||
const atom_index = atoms[i];
|
||||
const atom = elf_file.atom(atom_index).?;
|
||||
assert(atom.flags.alive);
|
||||
if (atom.alignment.forward(shdr.sh_size) - start_atom.value >= max_distance) break;
|
||||
if (@as(i64, @intCast(atom.alignment.forward(shdr.sh_size))) - start_atom.value >= max_distance)
|
||||
break;
|
||||
atom.value = try advance(shdr, atom.size, atom.alignment);
|
||||
}
|
||||
|
||||
@ -60,12 +61,12 @@ pub fn createThunks(shndx: u32, elf_file: *Elf) !void {
|
||||
}
|
||||
}
|
||||
|
||||
fn advance(shdr: *elf.Elf64_Shdr, size: u64, alignment: Atom.Alignment) !u64 {
|
||||
fn advance(shdr: *elf.Elf64_Shdr, size: u64, alignment: Atom.Alignment) !i64 {
|
||||
const offset = alignment.forward(shdr.sh_size);
|
||||
const padding = offset - shdr.sh_size;
|
||||
shdr.sh_size += padding + size;
|
||||
shdr.sh_addralign = @max(shdr.sh_addralign, alignment.toByteUnits() orelse 1);
|
||||
return offset;
|
||||
return @intCast(offset);
|
||||
}
|
||||
|
||||
/// A branch will need an extender if its target is larger than
|
||||
@ -79,7 +80,7 @@ fn maxAllowedDistance(cpu_arch: std.Target.Cpu.Arch) u32 {
|
||||
}
|
||||
|
||||
pub const Thunk = struct {
|
||||
value: u64 = 0,
|
||||
value: i64 = 0,
|
||||
output_section_index: u32 = 0,
|
||||
symbols: std.AutoArrayHashMapUnmanaged(Symbol.Index, void) = .{},
|
||||
output_symtab_ctx: Elf.SymtabCtx = .{},
|
||||
@ -93,14 +94,14 @@ pub const Thunk = struct {
|
||||
return thunk.symbols.keys().len * trampolineSize(cpu_arch);
|
||||
}
|
||||
|
||||
pub fn address(thunk: Thunk, elf_file: *Elf) u64 {
|
||||
pub fn address(thunk: Thunk, elf_file: *Elf) i64 {
|
||||
const shdr = elf_file.shdrs.items[thunk.output_section_index];
|
||||
return shdr.sh_addr + thunk.value;
|
||||
return @as(i64, @intCast(shdr.sh_addr)) + thunk.value;
|
||||
}
|
||||
|
||||
pub fn targetAddress(thunk: Thunk, sym_index: Symbol.Index, elf_file: *Elf) u64 {
|
||||
pub fn targetAddress(thunk: Thunk, sym_index: Symbol.Index, elf_file: *Elf) i64 {
|
||||
const cpu_arch = elf_file.getTarget().cpu.arch;
|
||||
return thunk.address(elf_file) + thunk.symbols.getIndex(sym_index).? * trampolineSize(cpu_arch);
|
||||
return thunk.address(elf_file) + @as(i64, @intCast(thunk.symbols.getIndex(sym_index).? * trampolineSize(cpu_arch)));
|
||||
}
|
||||
|
||||
pub fn write(thunk: Thunk, elf_file: *Elf, writer: anytype) !void {
|
||||
@ -132,7 +133,7 @@ pub const Thunk = struct {
|
||||
.st_info = elf.STT_FUNC,
|
||||
.st_other = 0,
|
||||
.st_shndx = @intCast(thunk.output_section_index),
|
||||
.st_value = thunk.targetAddress(sym_index, elf_file),
|
||||
.st_value = @intCast(thunk.targetAddress(sym_index, elf_file)),
|
||||
.st_size = trampolineSize(cpu_arch),
|
||||
};
|
||||
}
|
||||
@ -205,9 +206,9 @@ const aarch64 = struct {
|
||||
if (target.flags.has_plt) return false;
|
||||
if (atom.output_section_index != target.output_section_index) return false;
|
||||
const target_atom = target.atom(elf_file).?;
|
||||
if (target_atom.value == @as(u64, @bitCast(@as(i64, -1)))) return false;
|
||||
const saddr = @as(i64, @intCast(atom.address(elf_file) + rel.r_offset));
|
||||
const taddr: i64 = @intCast(target.address(.{}, elf_file));
|
||||
if (target_atom.value == -1) return false;
|
||||
const saddr = atom.address(elf_file) + @as(i64, @intCast(rel.r_offset));
|
||||
const taddr = target.address(.{}, elf_file);
|
||||
_ = math.cast(i28, taddr + rel.r_addend - saddr) orelse return false;
|
||||
return true;
|
||||
}
|
||||
@ -215,11 +216,11 @@ const aarch64 = struct {
|
||||
fn write(thunk: Thunk, elf_file: *Elf, writer: anytype) !void {
|
||||
for (thunk.symbols.keys(), 0..) |sym_index, i| {
|
||||
const sym = elf_file.symbol(sym_index);
|
||||
const saddr = thunk.address(elf_file) + i * trampoline_size;
|
||||
const saddr = thunk.address(elf_file) + @as(i64, @intCast(i * trampoline_size));
|
||||
const taddr = sym.address(.{}, elf_file);
|
||||
const pages = try util.calcNumberOfPages(saddr, taddr);
|
||||
try writer.writeInt(u32, Instruction.adrp(.x16, pages).toU32(), .little);
|
||||
const off: u12 = @truncate(taddr);
|
||||
const off: u12 = @truncate(@as(u64, @bitCast(taddr)));
|
||||
try writer.writeInt(u32, Instruction.add(.x16, .x16, off, false).toU32(), .little);
|
||||
try writer.writeInt(u32, Instruction.br(.x16).toU32(), .little);
|
||||
}
|
||||
|
||||
@ -770,7 +770,7 @@ fn resolveRelocInner(
|
||||
};
|
||||
break :target math.cast(u64, target) orelse return error.Overflow;
|
||||
};
|
||||
const pages = @as(u21, @bitCast(try aarch64.calcNumberOfPages(source, target)));
|
||||
const pages = @as(u21, @bitCast(try aarch64.calcNumberOfPages(@intCast(source), @intCast(target))));
|
||||
aarch64.writeAdrpInst(pages, code[rel_offset..][0..4]);
|
||||
},
|
||||
|
||||
|
||||
@ -267,7 +267,7 @@ pub const StubsSection = struct {
|
||||
},
|
||||
.aarch64 => {
|
||||
// TODO relax if possible
|
||||
const pages = try aarch64.calcNumberOfPages(source, target);
|
||||
const pages = try aarch64.calcNumberOfPages(@intCast(source), @intCast(target));
|
||||
try writer.writeInt(u32, aarch64.Instruction.adrp(.x16, pages).toU32(), .little);
|
||||
const off = try math.divExact(u12, @truncate(target), 8);
|
||||
try writer.writeInt(
|
||||
@ -411,7 +411,7 @@ pub const StubsHelperSection = struct {
|
||||
.aarch64 => {
|
||||
{
|
||||
// TODO relax if possible
|
||||
const pages = try aarch64.calcNumberOfPages(sect.addr, dyld_private_addr);
|
||||
const pages = try aarch64.calcNumberOfPages(@intCast(sect.addr), @intCast(dyld_private_addr));
|
||||
try writer.writeInt(u32, aarch64.Instruction.adrp(.x17, pages).toU32(), .little);
|
||||
const off: u12 = @truncate(dyld_private_addr);
|
||||
try writer.writeInt(u32, aarch64.Instruction.add(.x17, .x17, off, false).toU32(), .little);
|
||||
@ -424,7 +424,7 @@ pub const StubsHelperSection = struct {
|
||||
).toU32(), .little);
|
||||
{
|
||||
// TODO relax if possible
|
||||
const pages = try aarch64.calcNumberOfPages(sect.addr + 12, dyld_stub_binder_addr);
|
||||
const pages = try aarch64.calcNumberOfPages(@intCast(sect.addr + 12), @intCast(dyld_stub_binder_addr));
|
||||
try writer.writeInt(u32, aarch64.Instruction.adrp(.x16, pages).toU32(), .little);
|
||||
const off = try math.divExact(u12, @truncate(dyld_stub_binder_addr), 8);
|
||||
try writer.writeInt(u32, aarch64.Instruction.ldr(
|
||||
@ -679,7 +679,7 @@ pub const ObjcStubsSection = struct {
|
||||
{
|
||||
const target = sym.getObjcSelrefsAddress(macho_file);
|
||||
const source = addr;
|
||||
const pages = try aarch64.calcNumberOfPages(source, target);
|
||||
const pages = try aarch64.calcNumberOfPages(@intCast(source), @intCast(target));
|
||||
try writer.writeInt(u32, aarch64.Instruction.adrp(.x1, pages).toU32(), .little);
|
||||
const off = try math.divExact(u12, @truncate(target), 8);
|
||||
try writer.writeInt(
|
||||
@ -692,7 +692,7 @@ pub const ObjcStubsSection = struct {
|
||||
const target_sym = macho_file.getSymbol(macho_file.objc_msg_send_index.?);
|
||||
const target = target_sym.getGotAddress(macho_file);
|
||||
const source = addr + 2 * @sizeOf(u32);
|
||||
const pages = try aarch64.calcNumberOfPages(source, target);
|
||||
const pages = try aarch64.calcNumberOfPages(@intCast(source), @intCast(target));
|
||||
try writer.writeInt(u32, aarch64.Instruction.adrp(.x16, pages).toU32(), .little);
|
||||
const off = try math.divExact(u12, @truncate(target), 8);
|
||||
try writer.writeInt(
|
||||
|
||||
@ -99,7 +99,7 @@ pub const Thunk = struct {
|
||||
const sym = macho_file.getSymbol(sym_index);
|
||||
const saddr = thunk.getAddress(macho_file) + i * trampoline_size;
|
||||
const taddr = sym.getAddress(.{}, macho_file);
|
||||
const pages = try aarch64.calcNumberOfPages(saddr, taddr);
|
||||
const pages = try aarch64.calcNumberOfPages(@intCast(saddr), @intCast(taddr));
|
||||
try writer.writeInt(u32, aarch64.Instruction.adrp(.x16, pages).toU32(), .little);
|
||||
const off: u12 = @truncate(taddr);
|
||||
try writer.writeInt(u32, aarch64.Instruction.add(.x16, .x16, off, false).toU32(), .little);
|
||||
|
||||
@ -25,7 +25,7 @@ pub fn writeLoadStoreRegInst(value: u12, code: *[4]u8) void {
|
||||
mem.writeInt(u32, code, inst.toU32(), .little);
|
||||
}
|
||||
|
||||
pub fn calcNumberOfPages(saddr: u64, taddr: u64) error{Overflow}!i21 {
|
||||
pub fn calcNumberOfPages(saddr: i64, taddr: i64) error{Overflow}!i21 {
|
||||
const spage = math.cast(i32, saddr >> 12) orelse return error.Overflow;
|
||||
const tpage = math.cast(i32, taddr >> 12) orelse return error.Overflow;
|
||||
const pages = math.cast(i21, tpage - spage) orelse return error.Overflow;
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user