elf: dynamically allocate remaining alloc sections (and segments)

This commit is contained in:
Jakub Konka 2023-09-25 19:40:11 +02:00
parent 5e617e4b0c
commit eb497c50e3
2 changed files with 76 additions and 22 deletions

View File

@ -409,16 +409,24 @@ fn findFreeSpace(self: *Elf, object_size: u64, min_alignment: u64) u64 {
}
const AllocateSegmentOpts = struct {
addr: u64, // TODO find free VM space
size: u64,
alignment: u64,
addr: ?u64 = null, // TODO find free VM space
flags: u32 = elf.PF_R,
};
fn allocateSegment(self: *Elf, opts: AllocateSegmentOpts) error{OutOfMemory}!u16 {
pub fn allocateSegment(self: *Elf, opts: AllocateSegmentOpts) error{OutOfMemory}!u16 {
const index = @as(u16, @intCast(self.phdrs.items.len));
try self.phdrs.ensureUnusedCapacity(self.base.allocator, 1);
const off = self.findFreeSpace(opts.size, opts.alignment);
// Memory is always allocated in sequence.
// TODO is this correct? Or should we implement something similar to `findFreeSpace`?
// How would that impact HCS?
const addr = opts.addr orelse blk: {
assert(self.phdr_table_load_index != null);
const phdr = &self.phdrs.items[index - 1];
break :blk mem.alignForward(u64, phdr.p_vaddr + phdr.p_memsz, opts.alignment);
};
log.debug("allocating phdr({d})({c}{c}{c}) from 0x{x} to 0x{x} (0x{x} - 0x{x})", .{
index,
if (opts.flags & elf.PF_R != 0) @as(u8, 'R') else '_',
@ -426,15 +434,15 @@ fn allocateSegment(self: *Elf, opts: AllocateSegmentOpts) error{OutOfMemory}!u16
if (opts.flags & elf.PF_X != 0) @as(u8, 'X') else '_',
off,
off + opts.size,
opts.addr,
opts.addr + opts.size,
addr,
addr + opts.size,
});
self.phdrs.appendAssumeCapacity(.{
.p_type = elf.PT_LOAD,
.p_offset = off,
.p_filesz = opts.size,
.p_vaddr = opts.addr,
.p_paddr = opts.addr,
.p_vaddr = addr,
.p_paddr = addr,
.p_memsz = opts.size,
.p_align = opts.alignment,
.p_flags = opts.flags,
@ -446,12 +454,12 @@ fn allocateSegment(self: *Elf, opts: AllocateSegmentOpts) error{OutOfMemory}!u16
const AllocateAllocSectionOpts = struct {
name: [:0]const u8,
phdr_index: u16,
alignment: u16 = 1,
flags: u16 = elf.SHF_ALLOC,
alignment: u64 = 1,
flags: u64 = elf.SHF_ALLOC,
type: u32 = elf.SHT_PROGBITS,
};
fn allocateAllocSection(self: *Elf, opts: AllocateAllocSectionOpts) error{OutOfMemory}!u16 {
pub fn allocateAllocSection(self: *Elf, opts: AllocateAllocSectionOpts) error{OutOfMemory}!u16 {
const gpa = self.base.allocator;
const phdr = &self.phdrs.items[opts.phdr_index];
const index = @as(u16, @intCast(self.shdrs.items.len));
@ -622,6 +630,7 @@ pub fn populateMissingMetadata(self: *Elf) !void {
});
const phdr = &self.phdrs.items[self.phdr_load_zerofill_index.?];
phdr.p_offset = self.phdrs.items[self.phdr_load_rw_index.?].p_offset; // .bss overlaps .data
phdr.p_memsz = 1024;
}
if (self.shstrtab_section_index == null) {
@ -994,6 +1003,12 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
try positionals.append(.{ .path = key.status.success.object_path });
}
// csu prelude
var csu = try CsuObjects.init(arena, self.base.options, comp);
if (csu.crt0) |v| try positionals.append(.{ .path = v });
if (csu.crti) |v| try positionals.append(.{ .path = v });
if (csu.crtbegin) |v| try positionals.append(.{ .path = v });
for (positionals.items) |obj| {
const in_file = try std.fs.cwd().openFile(obj.path, .{});
defer in_file.close();
@ -1039,18 +1054,29 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
try self.handleAndReportParseError(lib.path, err, &parse_ctx);
}
// Finally, as the last input object add compiler_rt if any.
// Finally, as the last input objects we add compiler_rt and CSU postlude (if any).
positionals.clearRetainingCapacity();
// compiler-rt. Since compiler_rt exports symbols like `memset`, it needs
// to be after the shared libraries, so they are picked up from the shared
// libraries, not libcompiler_rt.
const compiler_rt_path: ?[]const u8 = blk: {
if (comp.compiler_rt_lib) |x| break :blk x.full_object_path;
if (comp.compiler_rt_obj) |x| break :blk x.full_object_path;
break :blk null;
};
if (compiler_rt_path) |path| {
const in_file = try std.fs.cwd().openFile(path, .{});
if (compiler_rt_path) |path| try positionals.append(.{ .path = path });
// csu postlude
if (csu.crtend) |v| try positionals.append(.{ .path = v });
if (csu.crtn) |v| try positionals.append(.{ .path = v });
for (positionals.items) |obj| {
const in_file = try std.fs.cwd().openFile(obj.path, .{});
defer in_file.close();
var parse_ctx: ParseErrorCtx = .{ .detected_cpu_arch = undefined };
self.parsePositional(in_file, path, false, &parse_ctx) catch |err|
try self.handleAndReportParseError(path, err, &parse_ctx);
self.parsePositional(in_file, obj.path, obj.must_link, &parse_ctx) catch |err|
try self.handleAndReportParseError(obj.path, err, &parse_ctx);
}
// Handle any lazy symbols that were emitted by incremental compilation.

View File

@ -136,7 +136,7 @@ fn initAtoms(self: *Object, elf_file: *Elf) !void {
try self.comdat_groups.append(elf_file.base.allocator, comdat_group_index);
},
elf.SHT_SYMTAB_SHNDX => @panic("TODO"),
elf.SHT_SYMTAB_SHNDX => @panic("TODO SHT_SYMTAB_SHNDX"),
elf.SHT_NULL,
elf.SHT_REL,
@ -166,14 +166,20 @@ fn initAtoms(self: *Object, elf_file: *Elf) !void {
};
}
fn addAtom(self: *Object, shdr: elf.Elf64_Shdr, shndx: u16, name: [:0]const u8, elf_file: *Elf) !void {
fn addAtom(
self: *Object,
shdr: elf.Elf64_Shdr,
shndx: u16,
name: [:0]const u8,
elf_file: *Elf,
) error{ OutOfMemory, Overflow }!void {
const atom_index = try elf_file.addAtom();
const atom = elf_file.atom(atom_index).?;
atom.atom_index = atom_index;
atom.name_offset = try elf_file.strtab.insert(elf_file.base.allocator, name);
atom.file_index = self.index;
atom.input_section_index = shndx;
atom.output_section_index = self.getOutputSectionIndex(elf_file, shdr);
atom.output_section_index = try self.getOutputSectionIndex(elf_file, shdr);
atom.alive = true;
self.atoms.items[shndx] = atom_index;
@ -188,7 +194,7 @@ fn addAtom(self: *Object, shdr: elf.Elf64_Shdr, shndx: u16, name: [:0]const u8,
}
}
fn getOutputSectionIndex(self: *Object, elf_file: *Elf, shdr: elf.Elf64_Shdr) u16 {
fn getOutputSectionIndex(self: *Object, elf_file: *Elf, shdr: elf.Elf64_Shdr) error{OutOfMemory}!u16 {
const name = blk: {
const name = self.strings.getAssumeExists(shdr.sh_name);
// if (shdr.sh_flags & elf.SHF_MERGE != 0) break :blk name;
@ -223,10 +229,32 @@ fn getOutputSectionIndex(self: *Object, elf_file: *Elf, shdr: elf.Elf64_Shdr) u1
else => flags,
};
};
_ = flags;
const out_shndx = elf_file.sectionByName(name) orelse {
log.err("{}: output section {s} not found", .{ self.fmtPath(), name });
@panic("TODO: missing output section!");
const out_shndx = elf_file.sectionByName(name) orelse blk: {
const is_alloc = flags & elf.SHF_ALLOC != 0;
const is_write = flags & elf.SHF_WRITE != 0;
const is_exec = flags & elf.SHF_EXECINSTR != 0;
const is_tls = flags & elf.SHF_TLS != 0;
if (!is_alloc or is_tls) {
log.err("{}: output section {s} not found", .{ self.fmtPath(), name });
@panic("TODO: missing output section!");
}
var phdr_flags: u32 = elf.PF_R;
if (is_write) phdr_flags |= elf.PF_W;
if (is_exec) phdr_flags |= elf.PF_X;
const phdr_index = try elf_file.allocateSegment(.{
.size = Elf.padToIdeal(shdr.sh_size),
.alignment = if (is_tls) shdr.sh_addralign else elf_file.page_size,
.flags = phdr_flags,
});
const shndx = try elf_file.allocateAllocSection(.{
.name = name,
.phdr_index = phdr_index,
.alignment = shdr.sh_addralign,
.flags = flags,
.type = @"type",
});
try elf_file.last_atom_and_free_list_table.putNoClobber(elf_file.base.allocator, shndx, .{});
break :blk shndx;
};
return out_shndx;
}