diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 5f772d6abb..bf5c232d5c 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -1,4 +1,5 @@ base: link.File, + dwarf: ?Dwarf = null, ptr_width: PtrWidth, @@ -103,6 +104,7 @@ phdr_table_dirty: bool = false, shdr_table_dirty: bool = false, shstrtab_dirty: bool = false, strtab_dirty: bool = false, +got_dirty: bool = false, debug_strtab_dirty: bool = false, debug_abbrev_section_dirty: bool = false, @@ -411,21 +413,31 @@ fn findFreeSpace(self: *Elf, object_size: u64, min_alignment: u64) u64 { const AllocateSegmentOpts = struct { size: u64, alignment: u64, - addr: ?u64 = null, // TODO find free VM space + addr: ?u64 = null, flags: u32 = elf.PF_R, }; pub fn allocateSegment(self: *Elf, opts: AllocateSegmentOpts) error{OutOfMemory}!u16 { + const gpa = self.base.allocator; const index = @as(u16, @intCast(self.phdrs.items.len)); - try self.phdrs.ensureUnusedCapacity(self.base.allocator, 1); + try self.phdrs.ensureUnusedCapacity(gpa, 1); const off = self.findFreeSpace(opts.size, opts.alignment); - // Memory is always allocated in sequence. - // TODO is this correct? Or should we implement something similar to `findFreeSpace`? - // How would that impact HCS? + // Currently, we automatically allocate memory in sequence by finding the largest + // allocated virtual address and going from there. + // TODO we want to keep machine code segment in the furthest memory range among all + // segments as it is most likely to grow. const addr = opts.addr orelse blk: { - assert(self.phdr_table_load_index != null); - const phdr = &self.phdrs.items[index - 1]; - break :blk mem.alignForward(u64, phdr.p_vaddr + phdr.p_memsz, opts.alignment); + const reserved_capacity = self.calcImageBase() * 4; + // Calculate largest VM address + const count = self.phdrs.items.len; + var addresses = std.ArrayList(u64).init(gpa); + defer addresses.deinit(); + try addresses.ensureTotalCapacityPrecise(count); + for (self.phdrs.items) |phdr| { + addresses.appendAssumeCapacity(phdr.p_vaddr + reserved_capacity); + } + mem.sort(u64, addresses.items, {}, std.sort.asc(u64)); + break :blk mem.alignForward(u64, addresses.items[count - 1], opts.alignment); }; log.debug("allocating phdr({d})({c}{c}{c}) from 0x{x} to 0x{x} (0x{x} - 0x{x})", .{ index, @@ -530,6 +542,8 @@ pub fn populateMissingMetadata(self: *Elf) !void { .p64 => false, }; const ptr_size: u8 = self.ptrWidthBytes(); + const is_linux = self.base.options.target.os.tag == .linux; + const large_addrspace = self.base.options.target.ptrBitWidth() >= 32; const image_base = self.calcImageBase(); if (self.phdr_table_index == null) { @@ -577,13 +591,10 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.phdr_got_index == null) { - // TODO instead of hard coding the vaddr, make a function to find a vaddr to put things at. - // we'll need to re-use that function anyway, in case the GOT grows and overlaps something - // else in virtual memory. - const addr: u64 = if (self.base.options.target.ptrBitWidth() >= 32) 0x4000000 else 0x8000; + const addr: u64 = if (large_addrspace) 0x4000000 else 0x8000; // We really only need ptr alignment but since we are using PROGBITS, linux requires // page align. - const alignment = if (self.base.options.target.os.tag == .linux) self.page_size else @as(u16, ptr_size); + const alignment = if (is_linux) self.page_size else @as(u16, ptr_size); self.phdr_got_index = try self.allocateSegment(.{ .addr = addr, .size = @as(u64, ptr_size) * self.base.options.symbol_count_hint, @@ -593,10 +604,8 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.phdr_load_ro_index == null) { - // TODO Same as for GOT - const addr: u64 = if (self.base.options.target.ptrBitWidth() >= 32) 0xc000000 else 0xa000; - // Same reason as for GOT - const alignment = if (self.base.options.target.os.tag == .linux) self.page_size else @as(u16, ptr_size); + const addr: u64 = if (large_addrspace) 0xc000000 else 0xa000; + const alignment = if (is_linux) self.page_size else @as(u16, ptr_size); self.phdr_load_ro_index = try self.allocateSegment(.{ .addr = addr, .size = 1024, @@ -606,10 +615,8 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.phdr_load_rw_index == null) { - // TODO Same as for GOT - const addr: u64 = if (self.base.options.target.ptrBitWidth() >= 32) 0x10000000 else 0xc000; - // Same reason as for GOT - const alignment = if (self.base.options.target.os.tag == .linux) self.page_size else @as(u16, ptr_size); + const addr: u64 = if (large_addrspace) 0x10000000 else 0xc000; + const alignment = if (is_linux) self.page_size else @as(u16, ptr_size); self.phdr_load_rw_index = try self.allocateSegment(.{ .addr = addr, .size = 1024, @@ -619,9 +626,8 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.phdr_load_zerofill_index == null) { - // TODO Same as for GOT - const addr: u64 = if (self.base.options.target.ptrBitWidth() >= 32) 0x14000000 else 0xf000; - const alignment = if (self.base.options.target.os.tag == .linux) self.page_size else @as(u16, ptr_size); + const addr: u64 = if (large_addrspace) 0x14000000 else 0xf000; + const alignment = if (is_linux) self.page_size else @as(u16, ptr_size); self.phdr_load_zerofill_index = try self.allocateSegment(.{ .addr = addr, .size = 0, @@ -803,7 +809,7 @@ pub fn populateMissingMetadata(self: *Elf) !void { } if (self.base.options.module) |module| { - if (self.zig_module_index == null) { + if (self.zig_module_index == null and !self.base.options.use_llvm) { const index = @as(File.Index, @intCast(try self.files.addOne(gpa))); self.files.set(index, .{ .zig_module = .{ .index = index, @@ -858,7 +864,30 @@ pub fn growAllocSection(self: *Elf, shdr_index: u16, needed_size: u64) !void { } const mem_capacity = self.allocatedVirtualSize(phdr.p_vaddr); - assert(needed_size <= mem_capacity); // TODO grow section in virtual memory + if (needed_size > mem_capacity) { + // We are exceeding our allocated VM capacity so we need to shift everything in memory + // and grow. + { + const dirty_addr = phdr.p_vaddr + phdr.p_memsz; + self.got_dirty = for (self.got.entries.items) |entry| { + if (self.symbol(entry.symbol_index).value >= dirty_addr) break true; + } else false; + + // TODO mark relocs dirty + } + try self.growSegment(shdr_index, needed_size); + + if (self.zig_module_index != null) { + // TODO self-hosted backends cannot yet handle this condition correctly as the linker + // cannot update emitted virtual addresses of symbols already committed to the final file. + var err = try self.addErrorWithNotes(2); + try err.addMsg(self, "fatal linker error: cannot expand load segment phdr({d}) in virtual memory", .{ + phdr_index, + }); + try err.addNote(self, "TODO: emit relocations to memory locations in self-hosted backends", .{}); + try err.addNote(self, "as a workaround, try increasing pre-allocated virtual memory of each segment", .{}); + } + } shdr.sh_size = needed_size; phdr.p_memsz = needed_size; @@ -870,6 +899,62 @@ pub fn growAllocSection(self: *Elf, shdr_index: u16, needed_size: u64) !void { self.markDirty(shdr_index, phdr_index); } +fn growSegment(self: *Elf, shndx: u16, needed_size: u64) !void { + const phdr_index = self.phdr_to_shdr_table.get(shndx).?; + const phdr = &self.phdrs.items[phdr_index]; + const increased_size = padToIdeal(needed_size); + const end_addr = phdr.p_vaddr + phdr.p_memsz; + const old_aligned_end = phdr.p_vaddr + mem.alignForward(u64, phdr.p_memsz, phdr.p_align); + const new_aligned_end = phdr.p_vaddr + mem.alignForward(u64, increased_size, phdr.p_align); + const diff = new_aligned_end - old_aligned_end; + log.debug("growing phdr({d}) in memory by {x}", .{ phdr_index, diff }); + + // Update symbols and atoms. + var files = std.ArrayList(File.Index).init(self.base.allocator); + defer files.deinit(); + try files.ensureTotalCapacityPrecise(self.objects.items.len + 1); + + if (self.zig_module_index) |index| files.appendAssumeCapacity(index); + files.appendSliceAssumeCapacity(self.objects.items); + + for (files.items) |index| { + const file_ptr = self.file(index).?; + + for (file_ptr.locals()) |sym_index| { + const sym = self.symbol(sym_index); + const atom_ptr = sym.atom(self) orelse continue; + if (!atom_ptr.flags.alive or !atom_ptr.flags.allocated) continue; + if (sym.value >= end_addr) sym.value += diff; + } + + for (file_ptr.globals()) |sym_index| { + const sym = self.symbol(sym_index); + if (sym.file_index != index) continue; + const atom_ptr = sym.atom(self) orelse continue; + if (!atom_ptr.flags.alive or !atom_ptr.flags.allocated) continue; + if (sym.value >= end_addr) sym.value += diff; + } + + for (file_ptr.atoms()) |atom_index| { + const atom_ptr = self.atom(atom_index) orelse continue; + if (!atom_ptr.flags.alive or !atom_ptr.flags.allocated) continue; + if (atom_ptr.value >= end_addr) atom_ptr.value += diff; + } + } + + // Finally, update section headers. + for (self.shdrs.items, 0..) |*other_shdr, other_shndx| { + if (other_shdr.sh_flags & elf.SHF_ALLOC == 0) continue; + if (other_shndx == shndx) continue; + const other_phdr_index = self.phdr_to_shdr_table.get(@intCast(other_shndx)) orelse continue; + const other_phdr = &self.phdrs.items[other_phdr_index]; + if (other_phdr.p_vaddr < end_addr) continue; + other_shdr.sh_addr += diff; + other_phdr.p_vaddr += diff; + other_phdr.p_paddr += diff; + } +} + pub fn growNonAllocSection( self: *Elf, shdr_index: u16, @@ -1143,7 +1228,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node if (self.zig_module_index) |index| { for (self.file(index).?.zig_module.atoms.keys()) |atom_index| { const atom_ptr = self.atom(atom_index).?; - if (!atom_ptr.alive) continue; + if (!atom_ptr.flags.alive) continue; const shdr = &self.shdrs.items[atom_ptr.outputShndx().?]; const file_offset = shdr.sh_offset + atom_ptr.value - shdr.sh_addr; const size = math.cast(usize, atom_ptr.size) orelse return error.Overflow; @@ -1157,6 +1242,15 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node } try self.writeObjects(); + if (self.got_dirty) { + const shdr = &self.shdrs.items[self.got_section_index.?]; + var buffer = try std.ArrayList(u8).initCapacity(gpa, self.got.size(self)); + defer buffer.deinit(); + try self.got.writeAllEntries(self, buffer.writer()); + try self.base.file.?.pwriteAll(buffer.items, shdr.sh_offset); + self.got_dirty = false; + } + // Look for entry address in objects if not set by the incremental compiler. if (self.entry_addr == null) { const entry: ?[]const u8 = entry: { @@ -1537,7 +1631,7 @@ fn resolveSymbols(self: *Elf) error{Overflow}!void { for (try object.comdatGroupMembers(cg.shndx)) |shndx| { const atom_index = object.atoms.items[shndx]; if (self.atom(atom_index)) |atom_ptr| { - atom_ptr.alive = false; + atom_ptr.flags.alive = false; // atom_ptr.markFdesDead(self); } } @@ -1645,25 +1739,26 @@ fn scanRelocs(self: *Elf) !void { fn allocateObjects(self: *Elf) !void { for (self.objects.items) |index| { const object = self.file(index).?.object; + for (object.atoms.items) |atom_index| { const atom_ptr = self.atom(atom_index) orelse continue; - if (!atom_ptr.alive) continue; + if (!atom_ptr.flags.alive or atom_ptr.flags.allocated) continue; try atom_ptr.allocate(self); } for (object.locals()) |local_index| { const local = self.symbol(local_index); const atom_ptr = local.atom(self) orelse continue; - if (!atom_ptr.alive) continue; - local.value += atom_ptr.value; + if (!atom_ptr.flags.alive) continue; + local.value = local.elfSym(self).st_value + atom_ptr.value; } for (object.globals()) |global_index| { const global = self.symbol(global_index); const atom_ptr = global.atom(self) orelse continue; - if (!atom_ptr.alive) continue; + if (!atom_ptr.flags.alive) continue; if (global.file_index == index) { - global.value += atom_ptr.value; + global.value = global.elfSym(self).st_value + atom_ptr.value; } } } @@ -1676,7 +1771,7 @@ fn writeObjects(self: *Elf) !void { const object = self.file(index).?.object; for (object.atoms.items) |atom_index| { const atom_ptr = self.atom(atom_index) orelse continue; - if (!atom_ptr.alive) continue; + if (!atom_ptr.flags.alive) continue; const shdr = &self.shdrs.items[atom_ptr.outputShndx().?]; if (shdr.sh_type == elf.SHT_NOBITS) continue; @@ -2650,7 +2745,7 @@ fn updateDeclCode( atom_ptr.output_section_index = shdr_index; sym.name_offset = try self.strtab.insert(gpa, decl_name); - atom_ptr.alive = true; + atom_ptr.flags.alive = true; atom_ptr.name_offset = sym.name_offset; esym.st_name = sym.name_offset; esym.st_info |= stt_bits; @@ -2681,11 +2776,6 @@ fn updateDeclCode( } else { try atom_ptr.allocate(self); errdefer self.freeDeclMetadata(sym_index); - log.debug("allocated atom for {s} at 0x{x} to 0x{x}", .{ - decl_name, - atom_ptr.value, - atom_ptr.value + atom_ptr.size, - }); sym.value = atom_ptr.value; esym.st_value = atom_ptr.value; @@ -2873,7 +2963,6 @@ fn updateLazySymbol(self: *Elf, sym: link.File.LazySymbol, symbol_index: Symbol. defer gpa.free(name); break :blk try self.strtab.insert(gpa, name); }; - const name = self.strtab.get(name_str_index).?; const src = if (sym.ty.getOwnerDeclOrNull(mod)) |owner_decl| mod.declPtr(owner_decl).srcLoc(mod) @@ -2913,7 +3002,7 @@ fn updateLazySymbol(self: *Elf, sym: link.File.LazySymbol, symbol_index: Symbol. local_esym.st_info |= elf.STT_OBJECT; local_esym.st_size = code.len; const atom_ptr = local_sym.atom(self).?; - atom_ptr.alive = true; + atom_ptr.flags.alive = true; atom_ptr.name_offset = name_str_index; atom_ptr.alignment = required_alignment; atom_ptr.size = code.len; @@ -2922,12 +3011,6 @@ fn updateLazySymbol(self: *Elf, sym: link.File.LazySymbol, symbol_index: Symbol. try atom_ptr.allocate(self); errdefer self.freeDeclMetadata(symbol_index); - log.debug("allocated atom for {s} at 0x{x} to 0x{x}", .{ - name, - atom_ptr.value, - atom_ptr.value + atom_ptr.size, - }); - local_sym.value = atom_ptr.value; local_esym.st_value = atom_ptr.value; @@ -2961,7 +3044,6 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module defer gpa.free(name); break :blk try self.strtab.insert(gpa, name); }; - const name = self.strtab.get(name_str_index).?; const zig_module = self.file(self.zig_module_index.?).?.zig_module; const sym_index = try zig_module.addAtom(self); @@ -2992,7 +3074,7 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module local_esym.st_info |= elf.STT_OBJECT; local_esym.st_size = code.len; const atom_ptr = local_sym.atom(self).?; - atom_ptr.alive = true; + atom_ptr.flags.alive = true; atom_ptr.name_offset = name_str_index; atom_ptr.alignment = required_alignment; atom_ptr.size = code.len; @@ -3001,8 +3083,6 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module try atom_ptr.allocate(self); errdefer self.freeDeclMetadata(sym_index); - log.debug("allocated atom for {s} at 0x{x} to 0x{x}", .{ name, atom_ptr.value, atom_ptr.value + atom_ptr.size }); - local_sym.value = atom_ptr.value; local_esym.st_value = atom_ptr.value; @@ -3931,6 +4011,50 @@ pub fn comdatGroupOwner(self: *Elf, index: ComdatGroupOwner.Index) *ComdatGroupO return &self.comdat_groups_owners.items[index]; } +const ErrorWithNotes = struct { + /// Allocated index in misc_errors array. + index: usize, + + /// Next available note slot. + note_slot: usize = 0, + + pub fn addMsg( + err: ErrorWithNotes, + elf_file: *Elf, + comptime format: []const u8, + args: anytype, + ) error{OutOfMemory}!void { + const gpa = elf_file.base.allocator; + const err_msg = &elf_file.misc_errors.items[err.index]; + err_msg.msg = try std.fmt.allocPrint(gpa, format, args); + } + + pub fn addNote( + err: *ErrorWithNotes, + elf_file: *Elf, + comptime format: []const u8, + args: anytype, + ) error{OutOfMemory}!void { + const gpa = elf_file.base.allocator; + const err_msg = &elf_file.misc_errors.items[err.index]; + assert(err.note_slot < err_msg.notes.len); + err_msg.notes[err.note_slot] = .{ .msg = try std.fmt.allocPrint(gpa, format, args) }; + err.note_slot += 1; + } +}; + +fn addErrorWithNotes(self: *Elf, note_count: usize) error{OutOfMemory}!ErrorWithNotes { + try self.misc_errors.ensureUnusedCapacity(self.base.allocator, 1); + return self.addErrorWithNotesAssumeCapacity(note_count); +} + +fn addErrorWithNotesAssumeCapacity(self: *Elf, note_count: usize) error{OutOfMemory}!ErrorWithNotes { + const index = self.misc_errors.items.len; + const err = self.misc_errors.addOneAssumeCapacity(); + err.* = .{ .msg = undefined, .notes = try self.base.allocator.alloc(link.File.ErrorMsg, note_count) }; + return .{ .index = index }; +} + fn reportUndefined(self: *Elf, undefs: anytype) !void { const gpa = self.base.allocator; const max_notes = 4; @@ -3941,33 +4065,22 @@ fn reportUndefined(self: *Elf, undefs: anytype) !void { while (it.next()) |entry| { const undef_index = entry.key_ptr.*; const atoms = entry.value_ptr.*.items; - const nnotes = @min(atoms.len, max_notes); + const natoms = @min(atoms.len, max_notes); + const nnotes = natoms + @intFromBool(atoms.len > max_notes); - var notes = try std.ArrayList(link.File.ErrorMsg).initCapacity(gpa, max_notes + 1); - defer notes.deinit(); + var err = try self.addErrorWithNotesAssumeCapacity(nnotes); + try err.addMsg(self, "undefined symbol: {s}", .{self.symbol(undef_index).name(self)}); - for (atoms[0..nnotes]) |atom_index| { + for (atoms[0..natoms]) |atom_index| { const atom_ptr = self.atom(atom_index).?; const file_ptr = self.file(atom_ptr.file_index).?; - const note = try std.fmt.allocPrint(gpa, "referenced by {s}:{s}", .{ - file_ptr.fmtPath(), - atom_ptr.name(self), - }); - notes.appendAssumeCapacity(.{ .msg = note }); + try err.addNote(self, "referenced by {s}:{s}", .{ file_ptr.fmtPath(), atom_ptr.name(self) }); } if (atoms.len > max_notes) { const remaining = atoms.len - max_notes; - const note = try std.fmt.allocPrint(gpa, "referenced {d} more times", .{remaining}); - notes.appendAssumeCapacity(.{ .msg = note }); + try err.addNote(self, "referenced {d} more times", .{remaining}); } - - var err_msg = link.File.ErrorMsg{ - .msg = try std.fmt.allocPrint(gpa, "undefined symbol: {s}", .{self.symbol(undef_index).name(self)}), - }; - err_msg.notes = try notes.toOwnedSlice(); - - self.misc_errors.appendAssumeCapacity(err_msg); } } @@ -4003,15 +4116,9 @@ fn reportParseError( comptime format: []const u8, args: anytype, ) error{OutOfMemory}!void { - const gpa = self.base.allocator; - try self.misc_errors.ensureUnusedCapacity(gpa, 1); - var notes = try gpa.alloc(link.File.ErrorMsg, 1); - errdefer gpa.free(notes); - notes[0] = .{ .msg = try std.fmt.allocPrint(gpa, "while parsing {s}", .{path}) }; - self.misc_errors.appendAssumeCapacity(.{ - .msg = try std.fmt.allocPrint(gpa, format, args), - .notes = notes, - }); + var err = try self.addErrorWithNotes(1); + try err.addMsg(self, format, args); + try err.addNote(self, "while parsing {s}", .{path}); } fn fmtShdrs(self: *Elf) std.fmt.Formatter(formatShdrs) { diff --git a/src/link/Elf/Atom.zig b/src/link/Elf/Atom.zig index c7f4fe6765..d044ef40a2 100644 --- a/src/link/Elf/Atom.zig +++ b/src/link/Elf/Atom.zig @@ -25,11 +25,8 @@ relocs_section_index: Index = 0, /// Index of this atom in the linker's atoms table. atom_index: Index = 0, -/// Specifies whether this atom is alive or has been garbage collected. -alive: bool = false, - -/// Specifies if the atom has been visited during garbage collection. -visited: bool = false, +/// Flags we use for state tracking. +flags: Flags = .{}, /// Start index of FDEs referencing this atom. fde_start: u32 = 0, @@ -48,8 +45,12 @@ pub fn name(self: Atom, elf_file: *Elf) []const u8 { return elf_file.strtab.getAssumeExists(self.name_offset); } +pub fn file(self: Atom, elf_file: *Elf) ?File { + return elf_file.file(self.file_index); +} + pub fn inputShdr(self: Atom, elf_file: *Elf) elf.Elf64_Shdr { - const object = elf_file.file(self.file_index).?.object; + const object = self.file(elf_file).?.object; return object.shdrs.items[self.input_section_index]; } @@ -59,7 +60,7 @@ pub fn outputShndx(self: Atom) ?u16 { } pub fn codeInObject(self: Atom, elf_file: *Elf) error{Overflow}![]const u8 { - const object = elf_file.file(self.file_index).?.object; + const object = self.file(elf_file).?.object; return object.shdrContents(self.input_section_index); } @@ -91,7 +92,7 @@ pub fn codeInObjectUncompressAlloc(self: Atom, elf_file: *Elf) ![]u8 { } pub fn priority(self: Atom, elf_file: *Elf) u64 { - const index = elf_file.file(self.file_index).?.index(); + const index = self.file(elf_file).?.index(); return (@as(u64, @intCast(index)) << 32) | @as(u64, @intCast(self.input_section_index)); } @@ -178,6 +179,13 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void { } }; + log.debug("allocated atom({d}) : '{s}' at 0x{x} to 0x{x}", .{ + self.atom_index, + self.name(elf_file), + self.value, + self.value + self.size, + }); + const expand_section = if (atom_placement) |placement_index| elf_file.atom(placement_index).?.next_index == 0 else @@ -222,6 +230,8 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void { if (free_list_removal) |i| { _ = free_list.swapRemove(i); } + + self.flags.allocated = true; } pub fn shrink(self: *Atom, elf_file: *Elf) void { @@ -238,7 +248,7 @@ pub fn free(self: *Atom, elf_file: *Elf) void { log.debug("freeAtom {d} ({s})", .{ self.atom_index, self.name(elf_file) }); const gpa = elf_file.base.allocator; - const zig_module = elf_file.file(self.file_index).?.zig_module; + const zig_module = self.file(elf_file).?.zig_module; const shndx = self.outputShndx().?; const meta = elf_file.last_atom_and_free_list_table.getPtr(shndx).?; const free_list = &meta.free_list; @@ -294,7 +304,7 @@ pub fn free(self: *Atom, elf_file: *Elf) void { } pub fn relocs(self: Atom, elf_file: *Elf) error{Overflow}![]align(1) const elf.Elf64_Rela { - return switch (elf_file.file(self.file_index).?) { + return switch (self.file(elf_file).?) { .zig_module => |x| x.relocs.items[self.relocs_section_index].items, .object => |x| x.getRelocs(self.relocs_section_index), else => unreachable, @@ -303,7 +313,7 @@ pub fn relocs(self: Atom, elf_file: *Elf) error{Overflow}![]align(1) const elf.E pub fn addReloc(self: Atom, elf_file: *Elf, reloc: elf.Elf64_Rela) !void { const gpa = elf_file.base.allocator; - const file_ptr = elf_file.file(self.file_index).?; + const file_ptr = self.file(elf_file).?; assert(file_ptr == .zig_module); const zig_module = file_ptr.zig_module; const rels = &zig_module.relocs.items[self.relocs_section_index]; @@ -311,14 +321,14 @@ pub fn addReloc(self: Atom, elf_file: *Elf, reloc: elf.Elf64_Rela) !void { } pub fn freeRelocs(self: Atom, elf_file: *Elf) void { - const file_ptr = elf_file.file(self.file_index).?; + const file_ptr = self.file(elf_file).?; assert(file_ptr == .zig_module); const zig_module = file_ptr.zig_module; zig_module.relocs.items[self.relocs_section_index].clearRetainingCapacity(); } pub fn scanRelocs(self: Atom, elf_file: *Elf, undefs: anytype) !void { - const file_ptr = elf_file.file(self.file_index).?; + const file_ptr = self.file(elf_file).?; const rels = try self.relocs(elf_file); var i: usize = 0; while (i < rels.len) : (i += 1) { @@ -392,7 +402,7 @@ fn reportUndefined( rel: elf.Elf64_Rela, undefs: anytype, ) !void { - const rel_esym = switch (elf_file.file(self.file_index).?) { + const rel_esym = switch (self.file(elf_file).?) { .zig_module => |x| x.elfSym(rel.r_sym()).*, .object => |x| x.symtab[rel.r_sym()], else => unreachable, @@ -416,7 +426,7 @@ fn reportUndefined( pub fn resolveRelocs(self: Atom, elf_file: *Elf, code: []u8) !void { relocs_log.debug("0x{x}: {s}", .{ self.value, self.name(elf_file) }); - const file_ptr = elf_file.file(self.file_index).?; + const file_ptr = self.file(elf_file).?; var stream = std.io.fixedBufferStream(code); const cwriter = stream.writer(); @@ -619,7 +629,7 @@ fn format2( // try writer.writeAll(" }"); // } const gc_sections = if (elf_file.base.options.gc_sections) |gc_sections| gc_sections else false; - if (gc_sections and !atom.alive) { + if (gc_sections and !atom.flags.alive) { try writer.writeAll(" : [*]"); } } @@ -629,6 +639,17 @@ fn format2( // future. pub const Index = u16; +pub const Flags = packed struct { + /// Specifies whether this atom is alive or has been garbage collected. + alive: bool = false, + + /// Specifies if the atom has been visited during garbage collection. + visited: bool = false, + + /// Specifies whether this atom has been allocated in the output section. + allocated: bool = false, +}; + const x86_64 = struct { pub fn relaxGotpcrelx(code: []u8) !void { const old_inst = disassemble(code) orelse return error.RelaxFail; diff --git a/src/link/Elf/Object.zig b/src/link/Elf/Object.zig index 8ee37b7676..fe14831d1a 100644 --- a/src/link/Elf/Object.zig +++ b/src/link/Elf/Object.zig @@ -180,7 +180,7 @@ fn addAtom( atom.file_index = self.index; atom.input_section_index = shndx; atom.output_section_index = try self.getOutputSectionIndex(elf_file, shdr); - atom.alive = true; + atom.flags.alive = true; self.atoms.items[shndx] = atom_index; if (shdr.sh_flags & elf.SHF_COMPRESSED != 0) { @@ -424,7 +424,7 @@ fn filterRelocs( pub fn scanRelocs(self: *Object, elf_file: *Elf, undefs: anytype) !void { for (self.atoms.items) |atom_index| { const atom = elf_file.atom(atom_index) orelse continue; - if (!atom.alive) continue; + if (!atom.flags.alive) continue; const shdr = atom.inputShdr(elf_file); if (shdr.sh_flags & elf.SHF_ALLOC == 0) continue; if (shdr.sh_type == elf.SHT_NOBITS) continue; @@ -458,7 +458,7 @@ pub fn resolveSymbols(self: *Object, elf_file: *Elf) void { if (esym.st_shndx != elf.SHN_ABS and esym.st_shndx != elf.SHN_COMMON) { const atom_index = self.atoms.items[esym.st_shndx]; const atom = elf_file.atom(atom_index) orelse continue; - if (!atom.alive) continue; + if (!atom.flags.alive) continue; } const global = elf_file.symbol(index); @@ -553,7 +553,7 @@ pub fn checkDuplicates(self: *Object, elf_file: *Elf) void { if (this_sym.st_shndx != elf.SHN_ABS) { const atom_index = self.atoms.items[this_sym.st_shndx]; const atom = elf_file.atom(atom_index) orelse continue; - if (!atom.alive) continue; + if (!atom.flags.alive) continue; } elf_file.base.fatal("multiple definition: {}: {}: {s}", .{ @@ -628,7 +628,7 @@ pub fn convertCommonSymbols(self: *Object, elf_file: *Elf) !void { pub fn updateSymtabSize(self: *Object, elf_file: *Elf) void { for (self.locals()) |local_index| { const local = elf_file.symbol(local_index); - if (local.atom(elf_file)) |atom| if (!atom.alive) continue; + if (local.atom(elf_file)) |atom| if (!atom.flags.alive) continue; const esym = local.elfSym(elf_file); switch (esym.st_type()) { elf.STT_SECTION, elf.STT_NOTYPE => continue, @@ -641,7 +641,7 @@ pub fn updateSymtabSize(self: *Object, elf_file: *Elf) void { for (self.globals()) |global_index| { const global = elf_file.symbol(global_index); if (global.file(elf_file)) |file| if (file.index() != self.index) continue; - if (global.atom(elf_file)) |atom| if (!atom.alive) continue; + if (global.atom(elf_file)) |atom| if (!atom.flags.alive) continue; global.flags.output_symtab = true; if (global.isLocal()) { self.output_symtab_size.nlocals += 1; diff --git a/src/link/Elf/ZigModule.zig b/src/link/Elf/ZigModule.zig index 091c72fad9..93908e5f1f 100644 --- a/src/link/Elf/ZigModule.zig +++ b/src/link/Elf/ZigModule.zig @@ -53,17 +53,19 @@ pub fn addAtom(self: *ZigModule, elf_file: *Elf) !Symbol.Index { const gpa = elf_file.base.allocator; const atom_index = try elf_file.addAtom(); + const symbol_index = try elf_file.addSymbol(); + const esym_index = try self.addLocalEsym(gpa); + try self.atoms.putNoClobber(gpa, atom_index, {}); + try self.local_symbols.append(gpa, symbol_index); + const atom_ptr = elf_file.atom(atom_index).?; atom_ptr.file_index = self.index; - const symbol_index = try elf_file.addSymbol(); - try self.local_symbols.append(gpa, symbol_index); const symbol_ptr = elf_file.symbol(symbol_index); symbol_ptr.file_index = self.index; symbol_ptr.atom_index = atom_index; - const esym_index = try self.addLocalEsym(gpa); const esym = &self.local_esyms.items[esym_index]; esym.st_shndx = atom_index; symbol_ptr.esym_index = esym_index; @@ -86,7 +88,7 @@ pub fn resolveSymbols(self: *ZigModule, elf_file: *Elf) void { if (esym.st_shndx != elf.SHN_ABS and esym.st_shndx != elf.SHN_COMMON) { const atom_index = esym.st_shndx; const atom = elf_file.atom(atom_index) orelse continue; - if (!atom.alive) continue; + if (!atom.flags.alive) continue; } const global = elf_file.symbol(index); @@ -141,7 +143,7 @@ pub fn claimUnresolved(self: *ZigModule, elf_file: *Elf) void { pub fn scanRelocs(self: *ZigModule, elf_file: *Elf, undefs: anytype) !void { for (self.atoms.keys()) |atom_index| { const atom = elf_file.atom(atom_index) orelse continue; - if (!atom.alive) continue; + if (!atom.flags.alive) continue; try atom.scanRelocs(elf_file, undefs); } } diff --git a/src/link/Elf/file.zig b/src/link/Elf/file.zig index e1235140d2..a664061fda 100644 --- a/src/link/Elf/file.zig +++ b/src/link/Elf/file.zig @@ -89,6 +89,21 @@ pub const File = union(enum) { } } + pub fn atoms(file: File) []const Atom.Index { + return switch (file) { + .linker_defined => unreachable, + .zig_module => |x| x.atoms.keys(), + .object => |x| x.atoms.items, + }; + } + + pub fn locals(file: File) []const Symbol.Index { + return switch (file) { + .linker_defined => unreachable, + inline else => |x| x.locals(), + }; + } + pub fn globals(file: File) []const Symbol.Index { return switch (file) { inline else => |x| x.globals(), @@ -110,6 +125,7 @@ const std = @import("std"); const elf = std.elf; const Allocator = std.mem.Allocator; +const Atom = @import("Atom.zig"); const Elf = @import("../Elf.zig"); const LinkerDefined = @import("LinkerDefined.zig"); const Object = @import("Object.zig"); diff --git a/src/link/Elf/synthetic_sections.zig b/src/link/Elf/synthetic_sections.zig index f178f1370f..caffd401be 100644 --- a/src/link/Elf/synthetic_sections.zig +++ b/src/link/Elf/synthetic_sections.zig @@ -169,6 +169,21 @@ pub const GotSection = struct { } } + pub fn writeAllEntries(got: GotSection, elf_file: *Elf, writer: anytype) !void { + assert(!got.dirty); + const entry_size: u16 = elf_file.archPtrWidthBytes(); + const endian = elf_file.base.options.target.cpu.arch.endian(); + for (got.entries.items) |entry| { + const value = elf_file.symbol(entry.symbol_index).value; + switch (entry_size) { + 2 => try writer.writeInt(u16, @intCast(value), endian), + 4 => try writer.writeInt(u32, @intCast(value), endian), + 8 => try writer.writeInt(u64, @intCast(value), endian), + else => unreachable, + } + } + } + // pub fn write(got: GotSection, elf_file: *Elf, writer: anytype) !void { // const is_shared = elf_file.options.output_mode == .lib; // const apply_relocs = elf_file.options.apply_dynamic_relocs;