elf: actually write allocated atoms in object files

This commit is contained in:
Jakub Konka 2024-09-02 08:42:47 +02:00
parent 874ef6308e
commit f87a7251a3
2 changed files with 77 additions and 102 deletions

View File

@ -4037,107 +4037,19 @@ fn allocateSpecialPhdrs(self: *Elf) void {
}
fn writeAtoms(self: *Elf) !void {
const gpa = self.base.comp.gpa;
var undefs = std.AutoArrayHashMap(SymbolResolver.Index, std.ArrayList(Ref)).init(gpa);
defer {
for (undefs.values()) |*refs| {
refs.deinit();
}
undefs.deinit();
}
var has_reloc_errors = false;
const slice = self.sections.slice();
for (slice.items(.shdr), slice.items(.atom_list), 0..) |shdr, atom_list, shndx| {
if (shdr.sh_type == elf.SHT_NULL) continue;
if (shdr.sh_type == elf.SHT_NOBITS) continue;
if (atom_list.items.len == 0) continue;
log.debug("writing atoms in '{s}' section", .{self.getShString(shdr.sh_name)});
// TODO really, really handle debug section separately
const base_offset = if (self.zigObjectPtr()) |zo| base_offset: {
for ([_]?Symbol.Index{
zo.text_index,
zo.rodata_index,
zo.data_relro_index,
zo.data_index,
zo.tdata_index,
zo.eh_frame_index,
zo.debug_info_index,
zo.debug_abbrev_index,
zo.debug_aranges_index,
zo.debug_str_index,
zo.debug_line_index,
zo.debug_line_str_index,
zo.debug_loclists_index,
zo.debug_rnglists_index,
}) |maybe_sym_index| {
const sym_index = maybe_sym_index orelse continue;
const sym = zo.symbol(sym_index);
const atom_ptr = sym.atom(self).?;
if (atom_ptr.output_section_index == shndx) break :base_offset atom_ptr.size;
}
break :base_offset 0;
} else 0;
const sh_offset = shdr.sh_offset + base_offset;
const sh_size = math.cast(usize, shdr.sh_size - base_offset) orelse return error.Overflow;
const buffer = try gpa.alloc(u8, sh_size);
defer gpa.free(buffer);
const padding_byte: u8 = if (shdr.sh_type == elf.SHT_PROGBITS and
shdr.sh_flags & elf.SHF_EXECINSTR != 0 and self.getTarget().cpu.arch == .x86_64)
0xcc // int3
else
0;
@memset(buffer, padding_byte);
for (atom_list.items) |ref| {
const atom_ptr = self.atom(ref).?;
assert(atom_ptr.alive);
const offset = math.cast(usize, atom_ptr.value - @as(i64, @intCast(base_offset))) orelse
return error.Overflow;
const size = math.cast(usize, atom_ptr.size) orelse return error.Overflow;
log.debug("writing atom({}) at 0x{x}", .{ ref, sh_offset + offset });
// TODO decompress directly into provided buffer
const out_code = buffer[offset..][0..size];
const in_code = switch (atom_ptr.file(self).?) {
.object => |x| try x.codeDecompressAlloc(self, ref.index),
.zig_object => |x| try x.codeAlloc(self, ref.index),
else => unreachable,
};
defer gpa.free(in_code);
@memcpy(out_code, in_code);
const res = if (shdr.sh_flags & elf.SHF_ALLOC == 0)
atom_ptr.resolveRelocsNonAlloc(self, out_code, &undefs)
else
atom_ptr.resolveRelocsAlloc(self, out_code);
_ = res catch |err| switch (err) {
error.UnsupportedCpuArch => {
try self.reportUnsupportedCpuArch();
return error.FlushFailure;
},
error.RelocFailure, error.RelaxFailure => has_reloc_errors = true,
else => |e| return e,
};
}
try self.base.file.?.pwriteAll(buffer, sh_offset);
for (self.objects.items) |index| {
try self.file(index).?.object.writeAtoms(self);
}
if (self.requiresThunks()) {
const gpa = self.base.comp.gpa;
var buffer = std.ArrayList(u8).init(gpa);
defer buffer.deinit();
for (self.thunks.items) |th| {
const thunk_size = th.size(self);
try buffer.ensureUnusedCapacity(thunk_size);
const shdr = slice.items(.shdr)[th.output_section_index];
const shdr = self.sections.items(.shdr)[th.output_section_index];
const offset = @as(u64, @intCast(th.value)) + shdr.sh_offset;
try th.write(self, buffer.writer());
assert(buffer.items.len == thunk_size);
@ -4145,10 +4057,6 @@ fn writeAtoms(self: *Elf) !void {
buffer.clearRetainingCapacity();
}
}
try self.reportUndefinedSymbols(&undefs);
if (has_reloc_errors) return error.FlushFailure;
}
pub fn updateSymtabSize(self: *Elf) !void {
@ -5089,7 +4997,7 @@ pub fn insertDynString(self: *Elf, name: []const u8) error{OutOfMemory}!u32 {
return off;
}
fn reportUndefinedSymbols(self: *Elf, undefs: anytype) !void {
pub fn reportUndefinedSymbols(self: *Elf, undefs: anytype) !void {
const gpa = self.base.comp.gpa;
const max_notes = 4;

View File

@ -978,6 +978,68 @@ pub fn allocateAtoms(self: *Object, elf_file: *Elf) !void {
}
}
pub fn writeAtoms(self: *Object, elf_file: *Elf) !void {
const gpa = elf_file.base.comp.gpa;
var undefs = std.AutoArrayHashMap(Elf.SymbolResolver.Index, std.ArrayList(Elf.Ref)).init(gpa);
defer {
for (undefs.values()) |*refs| {
refs.deinit();
}
undefs.deinit();
}
var buffer = std.ArrayList(u8).init(gpa);
defer buffer.deinit();
log.debug("writing atoms in {}", .{self.fmtPath()});
var has_reloc_errors = false;
for (self.section_chunks.items) |chunk| {
const osec = elf_file.sections.items(.shdr)[chunk.output_section_index];
if (osec.sh_type == elf.SHT_NOBITS) continue;
log.debug(" in section '{s}'", .{elf_file.getShString(osec.sh_name)});
try buffer.ensureUnusedCapacity(chunk.size);
buffer.appendNTimesAssumeCapacity(0, chunk.size);
for (chunk.atoms.items) |atom_index| {
const atom_ptr = self.atom(atom_index).?;
assert(atom_ptr.alive);
const offset = math.cast(usize, atom_ptr.value) orelse return error.Overflow;
const size = math.cast(usize, atom_ptr.size) orelse return error.Overflow;
log.debug(" * atom({d}) at 0x{x}", .{ atom_index, chunk.offset(elf_file) + offset });
const code = try self.codeDecompressAlloc(elf_file, atom_index);
defer gpa.free(code);
const out_code = buffer.items[offset..][0..size];
@memcpy(out_code, code);
const res = if (osec.sh_flags & elf.SHF_ALLOC == 0)
atom_ptr.resolveRelocsNonAlloc(elf_file, out_code, &undefs)
else
atom_ptr.resolveRelocsAlloc(elf_file, out_code);
_ = res catch |err| switch (err) {
error.UnsupportedCpuArch => {
try elf_file.reportUnsupportedCpuArch();
return error.FlushFailure;
},
error.RelocFailure, error.RelaxFailure => has_reloc_errors = true,
else => |e| return e,
};
}
try elf_file.base.file.?.pwriteAll(buffer.items, chunk.offset(elf_file));
buffer.clearRetainingCapacity();
}
try elf_file.reportUndefinedSymbols(&undefs);
if (has_reloc_errors) return error.FlushFailure;
}
pub fn initRelaSections(self: *Object, elf_file: *Elf) !void {
for (self.atoms_indexes.items) |atom_index| {
const atom_ptr = self.atom(atom_index) orelse continue;
@ -1544,12 +1606,12 @@ fn formatComdatGroups(
}
}
pub fn fmtPath(self: *Object) std.fmt.Formatter(formatPath) {
pub fn fmtPath(self: Object) std.fmt.Formatter(formatPath) {
return .{ .data = self };
}
fn formatPath(
object: *Object,
object: Object,
comptime unused_fmt_string: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
@ -1586,13 +1648,18 @@ const SectionChunk = struct {
return @as(i64, @intCast(shdr.sh_addr)) + chunk.value;
}
fn offset(chunk: SectionChunk, elf_file: *Elf) u64 {
const shdr = elf_file.sections.items(.shdr)[chunk.output_section_index];
return shdr.sh_offset + @as(u64, @intCast(chunk.value));
}
fn updateSize(chunk: *SectionChunk, object: *Object) void {
for (chunk.atoms.items) |atom_index| {
const atom_ptr = object.atom(atom_index).?;
assert(atom_ptr.alive);
const offset = atom_ptr.alignment.forward(chunk.size);
const padding = offset - chunk.size;
atom_ptr.value = @intCast(offset);
const off = atom_ptr.alignment.forward(chunk.size);
const padding = off - chunk.size;
atom_ptr.value = @intCast(off);
chunk.size += padding + atom_ptr.size;
chunk.alignment = chunk.alignment.max(atom_ptr.alignment);
}