From 7fead5d6dd078beda041ef2f490fb50ffae4dc82 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Thu, 26 Sep 2024 12:24:37 +0200 Subject: [PATCH 01/18] elf: track atoms within AtomList with array hash map --- src/link/Elf.zig | 28 ++++++++++++++-------------- src/link/Elf/AtomList.zig | 33 +++++++++++++++++---------------- src/link/Elf/Object.zig | 2 +- src/link/Elf/relocatable.zig | 4 ++-- 4 files changed, 34 insertions(+), 33 deletions(-) diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 0ccb920a3e..1d9f7f0a47 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -3219,7 +3219,7 @@ fn sortInitFini(self: *Elf) !void { for (slice.items(.shdr), slice.items(.atom_list_2)) |shdr, *atom_list| { if (shdr.sh_flags & elf.SHF_ALLOC == 0) continue; - if (atom_list.atoms.items.len == 0) continue; + if (atom_list.atoms.keys().len == 0) continue; var is_init_fini = false; var is_ctor_dtor = false; @@ -3236,10 +3236,10 @@ fn sortInitFini(self: *Elf) !void { if (!is_init_fini and !is_ctor_dtor) continue; var entries = std.ArrayList(Entry).init(gpa); - try entries.ensureTotalCapacityPrecise(atom_list.atoms.items.len); + try entries.ensureTotalCapacityPrecise(atom_list.atoms.keys().len); defer entries.deinit(); - for (atom_list.atoms.items) |ref| { + for (atom_list.atoms.keys()) |ref| { const atom_ptr = self.atom(ref).?; const object = atom_ptr.file(self).?.object; const priority = blk: { @@ -3260,7 +3260,7 @@ fn sortInitFini(self: *Elf) !void { atom_list.atoms.clearRetainingCapacity(); for (entries.items) |entry| { - atom_list.atoms.appendAssumeCapacity(entry.atom_ref); + _ = atom_list.atoms.getOrPutAssumeCapacity(entry.atom_ref); } } } @@ -3506,7 +3506,7 @@ fn resetShdrIndexes(self: *Elf, backlinks: []const u32) void { const slice = self.sections.slice(); for (slice.items(.shdr), slice.items(.atom_list_2)) |*shdr, *atom_list| { atom_list.output_section_index = backlinks[atom_list.output_section_index]; - for (atom_list.atoms.items) |ref| { + for (atom_list.atoms.keys()) |ref| { self.atom(ref).?.output_section_index = atom_list.output_section_index; } if (shdr.sh_type == elf.SHT_RELA) { @@ -3585,7 +3585,7 @@ fn resetShdrIndexes(self: *Elf, backlinks: []const u32) void { fn updateSectionSizes(self: *Elf) !void { const slice = self.sections.slice(); for (slice.items(.shdr), slice.items(.atom_list_2)) |shdr, *atom_list| { - if (atom_list.atoms.items.len == 0) continue; + if (atom_list.atoms.keys().len == 0) continue; if (self.requiresThunks() and shdr.sh_flags & elf.SHF_EXECINSTR != 0) continue; atom_list.updateSize(self); try atom_list.allocate(self); @@ -3594,7 +3594,7 @@ fn updateSectionSizes(self: *Elf) !void { if (self.requiresThunks()) { for (slice.items(.shdr), slice.items(.atom_list_2)) |shdr, *atom_list| { if (shdr.sh_flags & elf.SHF_EXECINSTR == 0) continue; - if (atom_list.atoms.items.len == 0) continue; + if (atom_list.atoms.keys().len == 0) continue; // Create jump/branch range extenders if needed. try self.createThunks(atom_list); @@ -4058,7 +4058,7 @@ fn writeAtoms(self: *Elf) !void { var has_reloc_errors = false; for (slice.items(.shdr), slice.items(.atom_list_2)) |shdr, atom_list| { if (shdr.sh_type == elf.SHT_NOBITS) continue; - if (atom_list.atoms.items.len == 0) continue; + if (atom_list.atoms.keys().len == 0) continue; atom_list.write(&buffer, &undefs, self) catch |err| switch (err) { error.UnsupportedCpuArch => { try self.reportUnsupportedCpuArch(); @@ -5732,20 +5732,20 @@ fn createThunks(elf_file: *Elf, atom_list: *AtomList) !void { } }.advance; - for (atom_list.atoms.items) |ref| { + for (atom_list.atoms.keys()) |ref| { elf_file.atom(ref).?.value = -1; } var i: usize = 0; - while (i < atom_list.atoms.items.len) { + while (i < atom_list.atoms.keys().len) { const start = i; - const start_atom = elf_file.atom(atom_list.atoms.items[start]).?; + const start_atom = elf_file.atom(atom_list.atoms.keys()[start]).?; assert(start_atom.alive); start_atom.value = try advance(atom_list, start_atom.size, start_atom.alignment); i += 1; - while (i < atom_list.atoms.items.len) : (i += 1) { - const atom_ptr = elf_file.atom(atom_list.atoms.items[i]).?; + while (i < atom_list.atoms.keys().len) : (i += 1) { + const atom_ptr = elf_file.atom(atom_list.atoms.keys()[i]).?; assert(atom_ptr.alive); if (@as(i64, @intCast(atom_ptr.alignment.forward(atom_list.size))) - start_atom.value >= max_distance) break; @@ -5758,7 +5758,7 @@ fn createThunks(elf_file: *Elf, atom_list: *AtomList) !void { thunk_ptr.output_section_index = atom_list.output_section_index; // Scan relocs in the group and create trampolines for any unreachable callsite - for (atom_list.atoms.items[start..i]) |ref| { + for (atom_list.atoms.keys()[start..i]) |ref| { const atom_ptr = elf_file.atom(ref).?; const file_ptr = atom_ptr.file(elf_file).?; log.debug("atom({}) {s}", .{ ref, atom_ptr.name(elf_file) }); diff --git a/src/link/Elf/AtomList.zig b/src/link/Elf/AtomList.zig index dfabbe0ff7..e07b2bbef7 100644 --- a/src/link/Elf/AtomList.zig +++ b/src/link/Elf/AtomList.zig @@ -2,7 +2,8 @@ value: i64 = 0, size: u64 = 0, alignment: Atom.Alignment = .@"1", output_section_index: u32 = 0, -atoms: std.ArrayListUnmanaged(Elf.Ref) = .empty, +// atoms: std.ArrayListUnmanaged(Elf.Ref) = .empty, +atoms: std.AutoArrayHashMapUnmanaged(Elf.Ref, void) = .empty, pub fn deinit(list: *AtomList, allocator: Allocator) void { list.atoms.deinit(allocator); @@ -22,7 +23,7 @@ pub fn updateSize(list: *AtomList, elf_file: *Elf) void { // TODO perhaps a 'stale' flag would be better here? list.size = 0; list.alignment = .@"1"; - for (list.atoms.items) |ref| { + for (list.atoms.keys()) |ref| { const atom_ptr = elf_file.atom(ref).?; assert(atom_ptr.alive); const off = atom_ptr.alignment.forward(list.size); @@ -56,13 +57,13 @@ pub fn allocate(list: *AtomList, elf_file: *Elf) !void { // FIXME:JK this currently ignores Thunks as valid chunks. { var idx: usize = 0; - while (idx < list.atoms.items.len) : (idx += 1) { - const curr_atom_ptr = elf_file.atom(list.atoms.items[idx]).?; + while (idx < list.atoms.keys().len) : (idx += 1) { + const curr_atom_ptr = elf_file.atom(list.atoms.keys()[idx]).?; if (idx > 0) { - curr_atom_ptr.prev_atom_ref = list.atoms.items[idx - 1]; + curr_atom_ptr.prev_atom_ref = list.atoms.keys()[idx - 1]; } - if (idx + 1 < list.atoms.items.len) { - curr_atom_ptr.next_atom_ref = list.atoms.items[idx + 1]; + if (idx + 1 < list.atoms.keys().len) { + curr_atom_ptr.next_atom_ref = list.atoms.keys()[idx + 1]; } } } @@ -74,7 +75,7 @@ pub fn allocate(list: *AtomList, elf_file: *Elf) !void { } // FIXME:JK if we had a link from Atom to parent AtomList we would not need to update Atom's value or osec index - for (list.atoms.items) |ref| { + for (list.atoms.keys()) |ref| { const atom_ptr = elf_file.atom(ref).?; atom_ptr.output_section_index = list.output_section_index; atom_ptr.value += list.value; @@ -92,7 +93,7 @@ pub fn write(list: AtomList, buffer: *std.ArrayList(u8), undefs: anytype, elf_fi try buffer.ensureUnusedCapacity(list_size); buffer.appendNTimesAssumeCapacity(0, list_size); - for (list.atoms.items) |ref| { + for (list.atoms.keys()) |ref| { const atom_ptr = elf_file.atom(ref).?; assert(atom_ptr.alive); @@ -128,7 +129,7 @@ pub fn writeRelocatable(list: AtomList, buffer: *std.ArrayList(u8), elf_file: *E try buffer.ensureUnusedCapacity(list_size); buffer.appendNTimesAssumeCapacity(0, list_size); - for (list.atoms.items) |ref| { + for (list.atoms.keys()) |ref| { const atom_ptr = elf_file.atom(ref).?; assert(atom_ptr.alive); @@ -149,13 +150,13 @@ pub fn writeRelocatable(list: AtomList, buffer: *std.ArrayList(u8), elf_file: *E } pub fn firstAtom(list: AtomList, elf_file: *Elf) *Atom { - assert(list.atoms.items.len > 0); - return elf_file.atom(list.atoms.items[0]).?; + assert(list.atoms.keys().len > 0); + return elf_file.atom(list.atoms.keys()[0]).?; } pub fn lastAtom(list: AtomList, elf_file: *Elf) *Atom { - assert(list.atoms.items.len > 0); - return elf_file.atom(list.atoms.items[list.atoms.items.len - 1]).?; + assert(list.atoms.keys().len > 0); + return elf_file.atom(list.atoms.keys()[list.atoms.keys().len - 1]).?; } pub fn format( @@ -191,9 +192,9 @@ fn format2( list.alignment.toByteUnits() orelse 0, list.size, }); try writer.writeAll(" : atoms{ "); - for (list.atoms.items, 0..) |ref, i| { + for (list.atoms.keys(), 0..) |ref, i| { try writer.print("{}", .{ref}); - if (i < list.atoms.items.len - 1) try writer.writeAll(", "); + if (i < list.atoms.keys().len - 1) try writer.writeAll(", "); } try writer.writeAll(" }"); } diff --git a/src/link/Elf/Object.zig b/src/link/Elf/Object.zig index 1805507aa9..461192049e 100644 --- a/src/link/Elf/Object.zig +++ b/src/link/Elf/Object.zig @@ -915,7 +915,7 @@ pub fn initOutputSections(self: *Object, elf_file: *Elf) !void { }); const atom_list = &elf_file.sections.items(.atom_list_2)[osec]; atom_list.output_section_index = osec; - try atom_list.atoms.append(elf_file.base.comp.gpa, atom_ptr.ref()); + _ = try atom_list.atoms.getOrPut(elf_file.base.comp.gpa, atom_ptr.ref()); } } diff --git a/src/link/Elf/relocatable.zig b/src/link/Elf/relocatable.zig index 4bd42ffcd6..2a6aaf71da 100644 --- a/src/link/Elf/relocatable.zig +++ b/src/link/Elf/relocatable.zig @@ -335,7 +335,7 @@ fn initComdatGroups(elf_file: *Elf) !void { fn updateSectionSizes(elf_file: *Elf) !void { const slice = elf_file.sections.slice(); for (slice.items(.atom_list_2)) |*atom_list| { - if (atom_list.atoms.items.len == 0) continue; + if (atom_list.atoms.keys().len == 0) continue; atom_list.updateSize(elf_file); try atom_list.allocate(elf_file); } @@ -434,7 +434,7 @@ fn writeAtoms(elf_file: *Elf) !void { const slice = elf_file.sections.slice(); for (slice.items(.shdr), slice.items(.atom_list_2)) |shdr, atom_list| { if (shdr.sh_type == elf.SHT_NOBITS) continue; - if (atom_list.atoms.items.len == 0) continue; + if (atom_list.atoms.keys().len == 0) continue; try atom_list.writeRelocatable(&buffer, elf_file); } } From bc7e0342b52a24a1052721396dfa88cd86e1304b Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Thu, 26 Sep 2024 20:59:42 +0200 Subject: [PATCH 02/18] elf: do not re-populate synthetic sections when updating --- src/link/Elf/file.zig | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/link/Elf/file.zig b/src/link/Elf/file.zig index 88dc807274..a1db5110f0 100644 --- a/src/link/Elf/file.zig +++ b/src/link/Elf/file.zig @@ -95,16 +95,16 @@ pub const File = union(enum) { log.debug("'{s}' is non-local", .{sym.name(ef)}); try ef.dynsym.addSymbol(ref, ef); } - if (sym.flags.needs_got) { + if (sym.flags.needs_got and !sym.flags.has_got) { log.debug("'{s}' needs GOT", .{sym.name(ef)}); _ = try ef.got.addGotSymbol(ref, ef); } - if (sym.flags.needs_plt) { + if (sym.flags.needs_plt and !sym.flags.has_plt) { if (sym.flags.is_canonical) { log.debug("'{s}' needs CPLT", .{sym.name(ef)}); sym.flags.@"export" = true; try ef.plt.addSymbol(ref, ef); - } else if (sym.flags.needs_got) { + } else if (sym.flags.needs_got and !sym.flags.has_got) { log.debug("'{s}' needs PLTGOT", .{sym.name(ef)}); try ef.plt_got.addSymbol(ref, ef); } else { @@ -116,15 +116,15 @@ pub const File = union(enum) { log.debug("'{s}' needs COPYREL", .{sym.name(ef)}); try ef.copy_rel.addSymbol(ref, ef); } - if (sym.flags.needs_tlsgd) { + if (sym.flags.needs_tlsgd and !sym.flags.has_tlsgd) { log.debug("'{s}' needs TLSGD", .{sym.name(ef)}); try ef.got.addTlsGdSymbol(ref, ef); } - if (sym.flags.needs_gottp) { + if (sym.flags.needs_gottp and !sym.flags.has_gottp) { log.debug("'{s}' needs GOTTP", .{sym.name(ef)}); try ef.got.addGotTpSymbol(ref, ef); } - if (sym.flags.needs_tlsdesc) { + if (sym.flags.needs_tlsdesc and !sym.flags.has_tlsdesc) { log.debug("'{s}' needs TLSDESC", .{sym.name(ef)}); try ef.got.addTlsDescSymbol(ref, ef); } From bae3dbffdf7a0b00f7e6c9100a8fedf62ced4701 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Thu, 26 Sep 2024 21:22:53 +0200 Subject: [PATCH 03/18] elf: clear dynamic relocs before repopulating --- src/link/Elf.zig | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 1d9f7f0a47..a468f3b929 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -4269,6 +4269,8 @@ fn writeSyntheticSections(self: *Elf) !void { } if (self.rela_dyn_section_index) |shndx| { + // TODO: would state tracking be more appropriate here? perhaps even custom relocation type? + self.rela_dyn.clearRetainingCapacity(); const shdr = slice.items(.shdr)[shndx]; try self.got.addRela(self); try self.copy_rel.addRela(self); @@ -4301,6 +4303,8 @@ fn writeSyntheticSections(self: *Elf) !void { } if (self.rela_plt_section_index) |shndx| { + // TODO: would state tracking be more appropriate here? perhaps even custom relocation type? + self.rela_plt.clearRetainingCapacity(); const shdr = slice.items(.shdr)[shndx]; try self.plt.addRela(self); try self.base.file.?.pwriteAll(mem.sliceAsBytes(self.rela_plt.items), shdr.sh_offset); From bd5fc899dbf134de0bc76b772f99565a6529c75f Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Fri, 27 Sep 2024 07:31:07 +0200 Subject: [PATCH 04/18] elf: do not re-allocate AtomLists unless dirtied --- src/link/Elf.zig | 4 ++++ src/link/Elf/AtomList.zig | 13 ++++++++++--- src/link/Elf/relocatable.zig | 2 ++ 3 files changed, 16 insertions(+), 3 deletions(-) diff --git a/src/link/Elf.zig b/src/link/Elf.zig index a468f3b929..c908e9d201 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -3586,19 +3586,23 @@ fn updateSectionSizes(self: *Elf) !void { const slice = self.sections.slice(); for (slice.items(.shdr), slice.items(.atom_list_2)) |shdr, *atom_list| { if (atom_list.atoms.keys().len == 0) continue; + if (!atom_list.dirty) continue; if (self.requiresThunks() and shdr.sh_flags & elf.SHF_EXECINSTR != 0) continue; atom_list.updateSize(self); try atom_list.allocate(self); + atom_list.dirty = false; } if (self.requiresThunks()) { for (slice.items(.shdr), slice.items(.atom_list_2)) |shdr, *atom_list| { if (shdr.sh_flags & elf.SHF_EXECINSTR == 0) continue; if (atom_list.atoms.keys().len == 0) continue; + if (!atom_list.dirty) continue; // Create jump/branch range extenders if needed. try self.createThunks(atom_list); try atom_list.allocate(self); + atom_list.dirty = false; } // FIXME:JK this will hopefully not be needed once we create a link from Atom/Thunk to AtomList. diff --git a/src/link/Elf/AtomList.zig b/src/link/Elf/AtomList.zig index e07b2bbef7..bab4726f24 100644 --- a/src/link/Elf/AtomList.zig +++ b/src/link/Elf/AtomList.zig @@ -5,6 +5,8 @@ output_section_index: u32 = 0, // atoms: std.ArrayListUnmanaged(Elf.Ref) = .empty, atoms: std.AutoArrayHashMapUnmanaged(Elf.Ref, void) = .empty, +dirty: bool = true, + pub fn deinit(list: *AtomList, allocator: Allocator) void { list.atoms.deinit(allocator); } @@ -20,9 +22,7 @@ pub fn offset(list: AtomList, elf_file: *Elf) u64 { } pub fn updateSize(list: *AtomList, elf_file: *Elf) void { - // TODO perhaps a 'stale' flag would be better here? - list.size = 0; - list.alignment = .@"1"; + assert(list.dirty); for (list.atoms.keys()) |ref| { const atom_ptr = elf_file.atom(ref).?; assert(atom_ptr.alive); @@ -35,6 +35,8 @@ pub fn updateSize(list: *AtomList, elf_file: *Elf) void { } pub fn allocate(list: *AtomList, elf_file: *Elf) !void { + assert(list.dirty); + const alloc_res = try elf_file.allocateChunk(.{ .shndx = list.output_section_index, .size = list.size, @@ -43,6 +45,8 @@ pub fn allocate(list: *AtomList, elf_file: *Elf) !void { }); list.value = @intCast(alloc_res.value); + log.debug("allocated atom_list({d}) at 0x{x}", .{ list.output_section_index, list.address(elf_file) }); + const slice = elf_file.sections.slice(); const shdr = &slice.items(.shdr)[list.output_section_index]; const last_atom_ref = &slice.items(.last_atom)[list.output_section_index]; @@ -80,12 +84,15 @@ pub fn allocate(list: *AtomList, elf_file: *Elf) !void { atom_ptr.output_section_index = list.output_section_index; atom_ptr.value += list.value; } + + list.dirty = false; } pub fn write(list: AtomList, buffer: *std.ArrayList(u8), undefs: anytype, elf_file: *Elf) !void { const gpa = elf_file.base.comp.gpa; const osec = elf_file.sections.items(.shdr)[list.output_section_index]; assert(osec.sh_type != elf.SHT_NOBITS); + assert(!list.dirty); log.debug("writing atoms in section '{s}'", .{elf_file.getShString(osec.sh_name)}); diff --git a/src/link/Elf/relocatable.zig b/src/link/Elf/relocatable.zig index 2a6aaf71da..772025f7db 100644 --- a/src/link/Elf/relocatable.zig +++ b/src/link/Elf/relocatable.zig @@ -336,8 +336,10 @@ fn updateSectionSizes(elf_file: *Elf) !void { const slice = elf_file.sections.slice(); for (slice.items(.atom_list_2)) |*atom_list| { if (atom_list.atoms.keys().len == 0) continue; + if (!atom_list.dirty) continue; atom_list.updateSize(elf_file); try atom_list.allocate(elf_file); + atom_list.dirty = false; } for (slice.items(.shdr), 0..) |*shdr, shndx| { From 4604577ae17ca850621eb40523b0a67bf6ac6971 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Sat, 28 Sep 2024 07:37:55 +0200 Subject: [PATCH 05/18] elf: use arena for incremental cache --- src/link/Elf.zig | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/link/Elf.zig b/src/link/Elf.zig index c908e9d201..72889ca05d 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -809,8 +809,6 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod const csu = try CsuObjects.init(arena, comp); - // Here we will parse object and library files (if referenced). - // csu prelude if (csu.crt0) |path| try parseObjectReportingFailure(self, path); if (csu.crti) |path| try parseObjectReportingFailure(self, path); From 9a15c3e1a1dc01d04c976ffbdec46b206455634e Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Sat, 28 Sep 2024 08:40:17 +0200 Subject: [PATCH 06/18] elf: mark objects as dirty/not-dirty This way we can track if we need to redo the object parsing or not. --- src/link/Elf.zig | 26 ++++++++++++++++---------- src/link/Elf/Object.zig | 1 + 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 72889ca05d..8a394cfeae 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -1038,6 +1038,10 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod // Beyond this point, everything has been allocated a virtual address and we can resolve // the relocations, and commit objects to file. + for (self.objects.items) |index| { + self.file(index).?.object.dirty = false; + } + if (self.zigObjectPtr()) |zo| { var has_reloc_errors = false; for (zo.atoms_indexes.items) |atom_index| { @@ -1399,7 +1403,6 @@ pub fn parseLibraryReportingFailure(self: *Elf, lib: SystemLib, must_link: bool) fn parseLibrary(self: *Elf, lib: SystemLib, must_link: bool) ParseError!void { const tracy = trace(@src()); defer tracy.end(); - if (try Archive.isArchive(lib.path)) { try self.parseArchive(lib.path, must_link); } else if (try SharedObject.isSharedObject(lib.path)) { @@ -2799,9 +2802,10 @@ pub fn resolveMergeSections(self: *Elf) !void { var has_errors = false; for (self.objects.items) |index| { - const file_ptr = self.file(index).?; - if (!file_ptr.isAlive()) continue; - file_ptr.object.initInputMergeSections(self) catch |err| switch (err) { + const object = self.file(index).?.object; + if (!object.alive) continue; + if (!object.dirty) continue; + object.initInputMergeSections(self) catch |err| switch (err) { error.LinkFailure => has_errors = true, else => |e| return e, }; @@ -2810,15 +2814,17 @@ pub fn resolveMergeSections(self: *Elf) !void { if (has_errors) return error.FlushFailure; for (self.objects.items) |index| { - const file_ptr = self.file(index).?; - if (!file_ptr.isAlive()) continue; - try file_ptr.object.initOutputMergeSections(self); + const object = self.file(index).?.object; + if (!object.alive) continue; + if (!object.dirty) continue; + try object.initOutputMergeSections(self); } for (self.objects.items) |index| { - const file_ptr = self.file(index).?; - if (!file_ptr.isAlive()) continue; - file_ptr.object.resolveMergeSubsections(self) catch |err| switch (err) { + const object = self.file(index).?.object; + if (!object.alive) continue; + if (!object.dirty) continue; + object.resolveMergeSubsections(self) catch |err| switch (err) { error.LinkFailure => has_errors = true, else => |e| return e, }; diff --git a/src/link/Elf/Object.zig b/src/link/Elf/Object.zig index 461192049e..670bdc3bf5 100644 --- a/src/link/Elf/Object.zig +++ b/src/link/Elf/Object.zig @@ -29,6 +29,7 @@ cies: std.ArrayListUnmanaged(Cie) = .empty, eh_frame_data: std.ArrayListUnmanaged(u8) = .empty, alive: bool = true, +dirty: bool = true, num_dynrelocs: u32 = 0, output_symtab_ctx: Elf.SymtabCtx = .{}, From 0aa24ac2e3c122ad16d54d640c11f2de6eb2299a Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Mon, 30 Sep 2024 22:09:44 +0200 Subject: [PATCH 07/18] elf: move sections in segments that need moving only --- src/link/Elf.zig | 95 +++++++++++++++++++++----------------- src/link/Elf/ZigObject.zig | 38 --------------- 2 files changed, 52 insertions(+), 81 deletions(-) diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 8a394cfeae..a61cc5ef5f 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -570,8 +570,6 @@ pub fn growAllocSection(self: *Elf, shdr_index: u32, needed_size: u64, min_align const slice = self.sections.slice(); const shdr = &slice.items(.shdr)[shdr_index]; assert(shdr.sh_flags & elf.SHF_ALLOC != 0); - const phndx = slice.items(.phndx)[shdr_index]; - const maybe_phdr = if (phndx) |ndx| &self.phdrs.items[ndx] else null; log.debug("allocated size {x} of {s}, needed size {x}", .{ self.allocatedSize(shdr.sh_offset), @@ -598,11 +596,9 @@ pub fn growAllocSection(self: *Elf, shdr_index: u32, needed_size: u64, min_align if (amt != existing_size) return error.InputOutput; shdr.sh_offset = new_offset; - if (maybe_phdr) |phdr| phdr.p_offset = new_offset; } else if (shdr.sh_offset + allocated_size == std.math.maxInt(u64)) { try self.base.file.?.setEndPos(shdr.sh_offset + needed_size); } - if (maybe_phdr) |phdr| phdr.p_filesz = needed_size; } shdr.sh_size = needed_size; self.markDirty(shdr_index); @@ -3890,57 +3886,70 @@ pub fn allocateAllocSections(self: *Elf) !void { } const first = slice.items(.shdr)[cover.items[0]]; - var new_offset = try self.findFreeSpace(filesz, @"align"); const phndx = self.getPhdr(.{ .type = elf.PT_LOAD, .flags = shdrToPhdrFlags(first.sh_flags) }).?; const phdr = &self.phdrs.items[phndx]; - phdr.p_offset = new_offset; - phdr.p_vaddr = first.sh_addr; - phdr.p_paddr = first.sh_addr; - phdr.p_memsz = memsz; - phdr.p_filesz = filesz; - phdr.p_align = @"align"; + const allocated_size = self.allocatedSize(phdr.p_offset); + if (filesz > allocated_size) { + const old_offset = phdr.p_offset; + phdr.p_offset = 0; + var new_offset = try self.findFreeSpace(filesz, @"align"); + phdr.p_offset = new_offset; - for (cover.items) |shndx| { - const shdr = &slice.items(.shdr)[shndx]; - slice.items(.phndx)[shndx] = phndx; - if (shdr.sh_type == elf.SHT_NOBITS) { - shdr.sh_offset = 0; - continue; - } - new_offset = alignment.@"align"(shndx, shdr.sh_addralign, new_offset); + log.debug("moving phdr({d}) from 0x{x} to 0x{x}", .{ phndx, old_offset, new_offset }); + + for (cover.items) |shndx| { + const shdr = &slice.items(.shdr)[shndx]; + slice.items(.phndx)[shndx] = phndx; + if (shdr.sh_type == elf.SHT_NOBITS) { + shdr.sh_offset = 0; + continue; + } + new_offset = alignment.@"align"(shndx, shdr.sh_addralign, new_offset); - if (self.zigObjectPtr()) |zo| blk: { - const existing_size = for ([_]?Symbol.Index{ - zo.text_index, - zo.rodata_index, - zo.data_relro_index, - zo.data_index, - zo.tdata_index, - zo.eh_frame_index, - }) |maybe_sym_index| { - const sect_sym_index = maybe_sym_index orelse continue; - const sect_atom_ptr = zo.symbol(sect_sym_index).atom(self).?; - if (sect_atom_ptr.output_section_index != shndx) continue; - break sect_atom_ptr.size; - } else break :blk; log.debug("moving {s} from 0x{x} to 0x{x}", .{ self.getShString(shdr.sh_name), shdr.sh_offset, new_offset, }); - const amt = try self.base.file.?.copyRangeAll( - shdr.sh_offset, - self.base.file.?, - new_offset, - existing_size, - ); - if (amt != existing_size) return error.InputOutput; - } - shdr.sh_offset = new_offset; - new_offset += shdr.sh_size; + if (shdr.sh_offset > 0) { + // Get size actually commited to the output file. + const existing_size = if (self.zigObjectPtr()) |zo| for ([_]?Symbol.Index{ + zo.text_index, + zo.rodata_index, + zo.data_relro_index, + zo.data_index, + zo.tdata_index, + zo.eh_frame_index, + }) |maybe_sym_index| { + const sect_sym_index = maybe_sym_index orelse continue; + const sect_atom_ptr = zo.symbol(sect_sym_index).atom(self).?; + if (sect_atom_ptr.output_section_index != shndx) continue; + break sect_atom_ptr.size; + } else 0 else 0 + if (!slice.items(.atom_list_2)[shndx].dirty) + slice.items(.atom_list_2)[shndx].size + else + 0; + const amt = try self.base.file.?.copyRangeAll( + shdr.sh_offset, + self.base.file.?, + new_offset, + existing_size, + ); + if (amt != existing_size) return error.InputOutput; + } + + shdr.sh_offset = new_offset; + new_offset += shdr.sh_size; + } } + phdr.p_vaddr = first.sh_addr; + phdr.p_paddr = first.sh_addr; + phdr.p_memsz = memsz; + phdr.p_filesz = filesz; + phdr.p_align = @"align"; + addr = mem.alignForward(u64, addr, self.page_size); } } diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig index de70462b07..b27474fcab 100644 --- a/src/link/Elf/ZigObject.zig +++ b/src/link/Elf/ZigObject.zig @@ -336,8 +336,6 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !voi const atom_ptr = self.atom(sym.ref.index).?; if (!atom_ptr.alive) continue; - log.debug("parsing relocs in {s}", .{sym.name(elf_file)}); - const relocs = &self.relocs.items[atom_ptr.relocsShndx().?]; for (sect.units.items) |*unit| { try relocs.ensureUnusedCapacity(gpa, unit.cross_unit_relocs.items.len + @@ -350,12 +348,6 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !voi else 0)); const r_type = relocation.dwarf.crossSectionRelocType(dwarf.format, cpu_arch); - log.debug(" {s} <- r_off={x}, r_add={x}, r_type={}", .{ - self.symbol(sym_index).name(elf_file), - r_offset, - r_addend, - relocation.fmtRelocType(r_type, cpu_arch), - }); atom_ptr.addRelocAssumeCapacity(.{ .r_offset = r_offset, .r_addend = r_addend, @@ -384,12 +376,6 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !voi else 0)); const r_type = relocation.dwarf.crossSectionRelocType(dwarf.format, cpu_arch); - log.debug(" {s} <- r_off={x}, r_add={x}, r_type={}", .{ - self.symbol(target_sym_index).name(elf_file), - r_offset, - r_addend, - relocation.fmtRelocType(r_type, cpu_arch), - }); atom_ptr.addRelocAssumeCapacity(.{ .r_offset = r_offset, .r_addend = r_addend, @@ -410,12 +396,6 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !voi else 0)); const r_type = relocation.dwarf.crossSectionRelocType(dwarf.format, cpu_arch); - log.debug(" {s} <- r_off={x}, r_add={x}, r_type={}", .{ - self.symbol(sym_index).name(elf_file), - r_offset, - r_addend, - relocation.fmtRelocType(r_type, cpu_arch), - }); atom_ptr.addRelocAssumeCapacity(.{ .r_offset = r_offset, .r_addend = r_addend, @@ -430,12 +410,6 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !voi else 0)); const r_type = relocation.dwarf.crossSectionRelocType(dwarf.format, cpu_arch); - log.debug(" {s} <- r_off={x}, r_add={x}, r_type={}", .{ - self.symbol(sym_index).name(elf_file), - r_offset, - r_addend, - relocation.fmtRelocType(r_type, cpu_arch), - }); atom_ptr.addRelocAssumeCapacity(.{ .r_offset = r_offset, .r_addend = r_addend, @@ -464,12 +438,6 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !voi else 0)); const r_type = relocation.dwarf.crossSectionRelocType(dwarf.format, cpu_arch); - log.debug(" {s} <- r_off={x}, r_add={x}, r_type={}", .{ - self.symbol(target_sym_index).name(elf_file), - r_offset, - r_addend, - relocation.fmtRelocType(r_type, cpu_arch), - }); atom_ptr.addRelocAssumeCapacity(.{ .r_offset = r_offset, .r_addend = r_addend, @@ -481,12 +449,6 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !voi const r_offset = entry_off + reloc.source_off; const r_addend: i64 = @intCast(reloc.target_off); const r_type = relocation.dwarf.externalRelocType(target_sym.*, sect_index, dwarf.address_size, cpu_arch); - log.debug(" {s} <- r_off={x}, r_add={x}, r_type={}", .{ - target_sym.name(elf_file), - r_offset, - r_addend, - relocation.fmtRelocType(r_type, cpu_arch), - }); atom_ptr.addRelocAssumeCapacity(.{ .r_offset = r_offset, .r_addend = r_addend, From 887f9a29f35ca32521ac6786ba6aab6fd240421f Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Tue, 1 Oct 2024 07:31:13 +0200 Subject: [PATCH 08/18] elf: combine growAllocSection and growNonAllocSection into growSection --- src/link/Dwarf.zig | 15 ++++---- src/link/Elf.zig | 90 +++++++++++----------------------------------- 2 files changed, 28 insertions(+), 77 deletions(-) diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 6efa708d05..840cef69fe 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -390,14 +390,15 @@ pub const Section = struct { const zo = elf_file.zigObjectPtr().?; const atom = zo.symbol(sec.index).atom(elf_file).?; const shndx = atom.output_section_index; - if (sec == &dwarf.debug_frame.section) - try elf_file.growAllocSection(shndx, len, sec.alignment.toByteUnits().?) - else - try elf_file.growNonAllocSection(shndx, len, sec.alignment.toByteUnits().?, true); - const shdr = elf_file.sections.items(.shdr)[shndx]; - atom.size = shdr.sh_size; + const needed_size = len; + const min_alignment = sec.alignment.toByteUnits().?; + try elf_file.growSection(shndx, needed_size, min_alignment); + const shdr = &elf_file.sections.items(.shdr)[shndx]; + shdr.sh_size = needed_size; + elf_file.markDirty(shndx); + atom.size = needed_size; atom.alignment = InternPool.Alignment.fromNonzeroByteUnits(shdr.sh_addralign); - sec.len = shdr.sh_size; + sec.len = len; } else if (dwarf.bin_file.cast(.macho)) |macho_file| { const header = if (macho_file.d_sym) |*d_sym| header: { try d_sym.growSection(@intCast(sec.index), len, true, macho_file); diff --git a/src/link/Elf.zig b/src/link/Elf.zig index a61cc5ef5f..fd7c54dd20 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -548,16 +548,6 @@ pub fn allocatedSize(self: *Elf, start: u64) u64 { return min_pos - start; } -fn allocatedVirtualSize(self: *Elf, start: u64) u64 { - if (start == 0) return 0; - var min_pos: u64 = std.math.maxInt(u64); - for (self.phdrs.items) |phdr| { - if (phdr.p_vaddr <= start) continue; - if (phdr.p_vaddr < min_pos) min_pos = phdr.p_vaddr; - } - return min_pos - start; -} - pub fn findFreeSpace(self: *Elf, object_size: u64, min_alignment: u64) !u64 { var start: u64 = 0; while (try self.detectAllocCollision(start, object_size)) |item_end| { @@ -566,59 +556,21 @@ pub fn findFreeSpace(self: *Elf, object_size: u64, min_alignment: u64) !u64 { return start; } -pub fn growAllocSection(self: *Elf, shdr_index: u32, needed_size: u64, min_alignment: u64) !void { - const slice = self.sections.slice(); - const shdr = &slice.items(.shdr)[shdr_index]; - assert(shdr.sh_flags & elf.SHF_ALLOC != 0); +pub fn growSection(self: *Elf, shdr_index: u32, needed_size: u64, min_alignment: u64) !void { + const shdr = &self.sections.items(.shdr)[shdr_index]; + assert(shdr.sh_type != elf.SHT_NOBITS); - log.debug("allocated size {x} of {s}, needed size {x}", .{ - self.allocatedSize(shdr.sh_offset), + const allocated_size = self.allocatedSize(shdr.sh_offset); + log.debug("allocated size {x} of '{s}', needed size {x}", .{ + allocated_size, self.getShString(shdr.sh_name), needed_size, }); - if (shdr.sh_type != elf.SHT_NOBITS) { - const allocated_size = self.allocatedSize(shdr.sh_offset); - if (needed_size > allocated_size) { - const existing_size = shdr.sh_size; - shdr.sh_size = 0; - // Must move the entire section. - const new_offset = try self.findFreeSpace(needed_size, min_alignment); - - log.debug("new '{s}' file offset 0x{x} to 0x{x}", .{ - self.getShString(shdr.sh_name), - new_offset, - new_offset + existing_size, - }); - - const amt = try self.base.file.?.copyRangeAll(shdr.sh_offset, self.base.file.?, new_offset, existing_size); - // TODO figure out what to about this error condition - how to communicate it up. - if (amt != existing_size) return error.InputOutput; - - shdr.sh_offset = new_offset; - } else if (shdr.sh_offset + allocated_size == std.math.maxInt(u64)) { - try self.base.file.?.setEndPos(shdr.sh_offset + needed_size); - } - } - shdr.sh_size = needed_size; - self.markDirty(shdr_index); -} - -pub fn growNonAllocSection( - self: *Elf, - shdr_index: u32, - needed_size: u64, - min_alignment: u64, - requires_file_copy: bool, -) !void { - const shdr = &self.sections.items(.shdr)[shdr_index]; - assert(shdr.sh_flags & elf.SHF_ALLOC == 0); - - const allocated_size = self.allocatedSize(shdr.sh_offset); if (needed_size > allocated_size) { const existing_size = shdr.sh_size; shdr.sh_size = 0; - // Move all the symbols to a new file location. + // Must move the entire section. const new_offset = try self.findFreeSpace(needed_size, min_alignment); log.debug("new '{s}' file offset 0x{x} to 0x{x}", .{ @@ -627,22 +579,19 @@ pub fn growNonAllocSection( new_offset + existing_size, }); - if (requires_file_copy) { - const amt = try self.base.file.?.copyRangeAll( - shdr.sh_offset, - self.base.file.?, - new_offset, - existing_size, - ); - if (amt != existing_size) return error.InputOutput; - } + const amt = try self.base.file.?.copyRangeAll( + shdr.sh_offset, + self.base.file.?, + new_offset, + existing_size, + ); + // TODO figure out what to about this error condition - how to communicate it up. + if (amt != existing_size) return error.InputOutput; shdr.sh_offset = new_offset; } else if (shdr.sh_offset + allocated_size == std.math.maxInt(u64)) { try self.base.file.?.setEndPos(shdr.sh_offset + needed_size); } - shdr.sh_size = needed_size; - self.markDirty(shdr_index); } pub fn markDirty(self: *Elf, shdr_index: u32) void { @@ -751,10 +700,11 @@ pub fn allocateChunk(self: *Elf, args: struct { true; if (expand_section) { const needed_size = res.value + args.size; - if (shdr.sh_flags & elf.SHF_ALLOC != 0) - try self.growAllocSection(args.shndx, needed_size, args.alignment.toByteUnits().?) - else - try self.growNonAllocSection(args.shndx, needed_size, args.alignment.toByteUnits().?, true); + if (shdr.sh_type != elf.SHT_NOBITS) { + try self.growSection(args.shndx, needed_size, args.alignment.toByteUnits().?); + } + shdr.sh_size = needed_size; + self.markDirty(args.shndx); } return res; From 906cf48e14b6faff9cdc040263b38b982b2f59af Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Tue, 1 Oct 2024 12:00:07 +0200 Subject: [PATCH 09/18] elf: fix creation of synthetic sections --- src/link/Elf/Symbol.zig | 15 ++++++++++----- src/link/Elf/file.zig | 10 +++++----- src/link/Elf/synthetic_sections.zig | 3 +-- 3 files changed, 16 insertions(+), 12 deletions(-) diff --git a/src/link/Elf/Symbol.zig b/src/link/Elf/Symbol.zig index cdf1b6f40a..6eaaedf28c 100644 --- a/src/link/Elf/Symbol.zig +++ b/src/link/Elf/Symbol.zig @@ -112,13 +112,16 @@ pub fn address(symbol: Symbol, opts: struct { plt: bool = true, trampoline: bool if (symbol.flags.has_trampoline and opts.trampoline) { return symbol.trampolineAddress(elf_file); } - if (symbol.flags.has_plt and opts.plt) { - if (!symbol.flags.is_canonical and symbol.flags.has_got) { + if (opts.plt) { + if (symbol.flags.has_pltgot) { + assert(!symbol.flags.is_canonical); // We have a non-lazy bound function pointer, use that! return symbol.pltGotAddress(elf_file); } - // Lazy-bound function it is! - return symbol.pltAddress(elf_file); + if (symbol.flags.has_plt) { + // Lazy-bound function it is! + return symbol.pltAddress(elf_file); + } } if (symbol.atom(elf_file)) |atom_ptr| { if (!atom_ptr.alive) { @@ -171,7 +174,7 @@ pub fn gotAddress(symbol: Symbol, elf_file: *Elf) i64 { } pub fn pltGotAddress(symbol: Symbol, elf_file: *Elf) i64 { - if (!(symbol.flags.has_plt and symbol.flags.has_got)) return 0; + if (!symbol.flags.has_pltgot) return 0; const extras = symbol.extra(elf_file); const shdr = elf_file.sections.items(.shdr)[elf_file.plt_got_section_index.?]; const cpu_arch = elf_file.getTarget().cpu.arch; @@ -430,6 +433,8 @@ pub const Flags = packed struct { has_plt: bool = false, /// Whether the PLT entry is canonical. is_canonical: bool = false, + /// Whether the PLT entry is indirected via GOT. + has_pltgot: bool = false, /// Whether the symbol contains COPYREL directive. needs_copy_rel: bool = false, diff --git a/src/link/Elf/file.zig b/src/link/Elf/file.zig index a1db5110f0..32de82f962 100644 --- a/src/link/Elf/file.zig +++ b/src/link/Elf/file.zig @@ -99,18 +99,18 @@ pub const File = union(enum) { log.debug("'{s}' needs GOT", .{sym.name(ef)}); _ = try ef.got.addGotSymbol(ref, ef); } - if (sym.flags.needs_plt and !sym.flags.has_plt) { - if (sym.flags.is_canonical) { + if (sym.flags.needs_plt) { + if (sym.flags.is_canonical and !sym.flags.has_plt) { log.debug("'{s}' needs CPLT", .{sym.name(ef)}); sym.flags.@"export" = true; try ef.plt.addSymbol(ref, ef); - } else if (sym.flags.needs_got and !sym.flags.has_got) { + } else if (sym.flags.needs_got and !sym.flags.has_pltgot) { log.debug("'{s}' needs PLTGOT", .{sym.name(ef)}); try ef.plt_got.addSymbol(ref, ef); - } else { + } else if (!sym.flags.has_plt) { log.debug("'{s}' needs PLT", .{sym.name(ef)}); try ef.plt.addSymbol(ref, ef); - } + } else unreachable; } if (sym.flags.needs_copy_rel and !sym.flags.has_copy_rel) { log.debug("'{s}' needs COPYREL", .{sym.name(ef)}); diff --git a/src/link/Elf/synthetic_sections.zig b/src/link/Elf/synthetic_sections.zig index f914bb8d84..6a1ae59a62 100644 --- a/src/link/Elf/synthetic_sections.zig +++ b/src/link/Elf/synthetic_sections.zig @@ -895,8 +895,7 @@ pub const PltGotSection = struct { const gpa = comp.gpa; const index = @as(u32, @intCast(plt_got.symbols.items.len)); const symbol = elf_file.symbol(ref).?; - symbol.flags.has_plt = true; - symbol.flags.has_got = true; + symbol.flags.has_pltgot = true; symbol.addExtra(.{ .plt_got = index }, elf_file); try plt_got.symbols.append(gpa, ref); } From 133aa709b07040838007ade90f9cfb1a5643fcff Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Wed, 2 Oct 2024 12:17:00 +0200 Subject: [PATCH 10/18] elf: do not panic if we already have create a PLT entry for a symbol --- src/link/Elf/file.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/link/Elf/file.zig b/src/link/Elf/file.zig index 32de82f962..740987feb2 100644 --- a/src/link/Elf/file.zig +++ b/src/link/Elf/file.zig @@ -110,7 +110,7 @@ pub const File = union(enum) { } else if (!sym.flags.has_plt) { log.debug("'{s}' needs PLT", .{sym.name(ef)}); try ef.plt.addSymbol(ref, ef); - } else unreachable; + } } if (sym.flags.needs_copy_rel and !sym.flags.has_copy_rel) { log.debug("'{s}' needs COPYREL", .{sym.name(ef)}); From cf2e462d91cf1e07f5aedbae404cce311de6b664 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Wed, 2 Oct 2024 13:14:22 +0200 Subject: [PATCH 11/18] elf: add some extra logging for created dynamic relocs --- src/link/Elf.zig | 9 +++++++++ src/link/Elf/Atom.zig | 5 +++++ src/link/Elf/synthetic_sections.zig | 26 ++++++++++++++++++++++++++ 3 files changed, 40 insertions(+) diff --git a/src/link/Elf.zig b/src/link/Elf.zig index fd7c54dd20..51987eb9cc 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -4834,6 +4834,7 @@ const RelaDyn = struct { sym: u64 = 0, type: u32, addend: i64 = 0, + target: ?*const Symbol = null, }; pub fn addRelaDyn(self: *Elf, opts: RelaDyn) !void { @@ -4842,6 +4843,13 @@ pub fn addRelaDyn(self: *Elf, opts: RelaDyn) !void { } pub fn addRelaDynAssumeCapacity(self: *Elf, opts: RelaDyn) void { + relocs_log.debug(" {s}: [{x} => {d}({s})] + {x}", .{ + relocation.fmtRelocType(opts.type, self.getTarget().cpu.arch), + opts.offset, + opts.sym, + if (opts.target) |sym| sym.name(self) else "", + opts.addend, + }); self.rela_dyn.appendAssumeCapacity(.{ .r_offset = opts.offset, .r_info = (opts.sym << 32) | opts.type, @@ -5772,6 +5780,7 @@ const assert = std.debug.assert; const elf = std.elf; const fs = std.fs; const log = std.log.scoped(.link); +const relocs_log = std.log.scoped(.link_relocs); const state_log = std.log.scoped(.link_state); const math = std.math; const mem = std.mem; diff --git a/src/link/Elf/Atom.zig b/src/link/Elf/Atom.zig index ab0e98440b..854fb72afb 100644 --- a/src/link/Elf/Atom.zig +++ b/src/link/Elf/Atom.zig @@ -723,6 +723,7 @@ fn resolveDynAbsReloc( .sym = target.extra(elf_file).dynamic, .type = relocation.encode(.abs, cpu_arch), .addend = A, + .target = target, }); try applyDynamicReloc(A, elf_file, writer); } else { @@ -737,6 +738,7 @@ fn resolveDynAbsReloc( .sym = target.extra(elf_file).dynamic, .type = relocation.encode(.abs, cpu_arch), .addend = A, + .target = target, }); try applyDynamicReloc(A, elf_file, writer); } else { @@ -750,6 +752,7 @@ fn resolveDynAbsReloc( .sym = target.extra(elf_file).dynamic, .type = relocation.encode(.abs, cpu_arch), .addend = A, + .target = target, }); try applyDynamicReloc(A, elf_file, writer); }, @@ -759,6 +762,7 @@ fn resolveDynAbsReloc( .offset = P, .type = relocation.encode(.rel, cpu_arch), .addend = S + A, + .target = target, }); try applyDynamicReloc(S + A, elf_file, writer); }, @@ -769,6 +773,7 @@ fn resolveDynAbsReloc( .offset = P, .type = relocation.encode(.irel, cpu_arch), .addend = S_ + A, + .target = target, }); try applyDynamicReloc(S_ + A, elf_file, writer); }, diff --git a/src/link/Elf/synthetic_sections.zig b/src/link/Elf/synthetic_sections.zig index 6a1ae59a62..987ee4bf9a 100644 --- a/src/link/Elf/synthetic_sections.zig +++ b/src/link/Elf/synthetic_sections.zig @@ -435,6 +435,8 @@ pub const GotSection = struct { const cpu_arch = elf_file.getTarget().cpu.arch; try elf_file.rela_dyn.ensureUnusedCapacity(gpa, got.numRela(elf_file)); + relocs_log.debug(".got", .{}); + for (got.entries.items) |entry| { const symbol = elf_file.symbol(entry.ref); const extra = if (symbol) |s| s.extra(elf_file) else null; @@ -447,6 +449,7 @@ pub const GotSection = struct { .offset = offset, .sym = extra.?.dynamic, .type = relocation.encode(.glob_dat, cpu_arch), + .target = symbol, }); continue; } @@ -455,6 +458,7 @@ pub const GotSection = struct { .offset = offset, .type = relocation.encode(.irel, cpu_arch), .addend = symbol.?.address(.{ .plt = false }, elf_file), + .target = symbol, }); continue; } @@ -465,6 +469,7 @@ pub const GotSection = struct { .offset = offset, .type = relocation.encode(.rel, cpu_arch), .addend = symbol.?.address(.{ .plt = false }, elf_file), + .target = symbol, }); } }, @@ -486,17 +491,20 @@ pub const GotSection = struct { .offset = offset, .sym = extra.?.dynamic, .type = relocation.encode(.dtpmod, cpu_arch), + .target = symbol, }); elf_file.addRelaDynAssumeCapacity(.{ .offset = offset + 8, .sym = extra.?.dynamic, .type = relocation.encode(.dtpoff, cpu_arch), + .target = symbol, }); } else if (is_dyn_lib) { elf_file.addRelaDynAssumeCapacity(.{ .offset = offset, .sym = extra.?.dynamic, .type = relocation.encode(.dtpmod, cpu_arch), + .target = symbol, }); } }, @@ -508,12 +516,14 @@ pub const GotSection = struct { .offset = offset, .sym = extra.?.dynamic, .type = relocation.encode(.tpoff, cpu_arch), + .target = symbol, }); } else if (is_dyn_lib) { elf_file.addRelaDynAssumeCapacity(.{ .offset = offset, .type = relocation.encode(.tpoff, cpu_arch), .addend = symbol.?.address(.{}, elf_file) - elf_file.tlsAddress(), + .target = symbol, }); } }, @@ -525,6 +535,7 @@ pub const GotSection = struct { .sym = if (symbol.?.flags.import) extra.?.dynamic else 0, .type = relocation.encode(.tlsdesc, cpu_arch), .addend = if (symbol.?.flags.import) 0 else symbol.?.address(.{}, elf_file) - elf_file.tlsAddress(), + .target = symbol, }); }, } @@ -681,6 +692,9 @@ pub const PltSection = struct { const gpa = comp.gpa; const cpu_arch = elf_file.getTarget().cpu.arch; try elf_file.rela_plt.ensureUnusedCapacity(gpa, plt.numRela()); + + relocs_log.debug(".plt", .{}); + for (plt.symbols.items) |ref| { const sym = elf_file.symbol(ref).?; assert(sym.flags.import); @@ -688,6 +702,14 @@ pub const PltSection = struct { const r_offset: u64 = @intCast(sym.gotPltAddress(elf_file)); const r_sym: u64 = extra.dynamic; const r_type = relocation.encode(.jump_slot, cpu_arch); + + relocs_log.debug(" {s}: [{x} => {d}({s})] + 0", .{ + relocation.fmtRelocType(r_type, cpu_arch), + r_offset, + r_sym, + sym.name(elf_file), + }); + elf_file.rela_plt.appendAssumeCapacity(.{ .r_offset = r_offset, .r_info = (r_sym << 32) | r_type, @@ -1053,6 +1075,9 @@ pub const CopyRelSection = struct { const gpa = comp.gpa; const cpu_arch = elf_file.getTarget().cpu.arch; try elf_file.rela_dyn.ensureUnusedCapacity(gpa, copy_rel.numRela()); + + relocs_log.debug(".copy.rel", .{}); + for (copy_rel.symbols.items) |ref| { const sym = elf_file.symbol(ref).?; assert(sym.flags.import and sym.flags.has_copy_rel); @@ -1525,6 +1550,7 @@ const elf = std.elf; const math = std.math; const mem = std.mem; const log = std.log.scoped(.link); +const relocs_log = std.log.scoped(.link_relocs); const relocation = @import("relocation.zig"); const std = @import("std"); From 0e5cd112ef6e50cbebd7a65a048f541023dcb49c Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Wed, 2 Oct 2024 13:32:13 +0200 Subject: [PATCH 12/18] elf: clear dynamic relocs before resolving relocs in atoms When resolving and writing atoms to file, we may add dynamic relocs to the output buffer so clear the buffers before that happens. --- src/link/Elf.zig | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 51987eb9cc..14c532640f 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -987,6 +987,9 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod for (self.objects.items) |index| { self.file(index).?.object.dirty = false; } + // TODO: would state tracking be more appropriate here? perhaps even custom relocation type? + self.rela_dyn.clearRetainingCapacity(); + self.rela_plt.clearRetainingCapacity(); if (self.zigObjectPtr()) |zo| { var has_reloc_errors = false; @@ -1017,6 +1020,7 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod try self.writeShdrTable(); try self.writeAtoms(); try self.writeMergeSections(); + self.writeSyntheticSections() catch |err| switch (err) { error.RelocFailure => return error.FlushFailure, error.UnsupportedCpuArch => { @@ -4236,8 +4240,6 @@ fn writeSyntheticSections(self: *Elf) !void { } if (self.rela_dyn_section_index) |shndx| { - // TODO: would state tracking be more appropriate here? perhaps even custom relocation type? - self.rela_dyn.clearRetainingCapacity(); const shdr = slice.items(.shdr)[shndx]; try self.got.addRela(self); try self.copy_rel.addRela(self); @@ -4270,8 +4272,6 @@ fn writeSyntheticSections(self: *Elf) !void { } if (self.rela_plt_section_index) |shndx| { - // TODO: would state tracking be more appropriate here? perhaps even custom relocation type? - self.rela_plt.clearRetainingCapacity(); const shdr = slice.items(.shdr)[shndx]; try self.plt.addRela(self); try self.base.file.?.pwriteAll(mem.sliceAsBytes(self.rela_plt.items), shdr.sh_offset); From 3d315f45d8237dabb9c0a1782177c92265dfc20b Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Thu, 3 Oct 2024 17:04:15 +0200 Subject: [PATCH 13/18] elf: drastically simplify extracting section size logic --- src/link/Elf.zig | 23 +++++++---------------- 1 file changed, 7 insertions(+), 16 deletions(-) diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 14c532640f..5478441482 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -3868,22 +3868,7 @@ pub fn allocateAllocSections(self: *Elf) !void { if (shdr.sh_offset > 0) { // Get size actually commited to the output file. - const existing_size = if (self.zigObjectPtr()) |zo| for ([_]?Symbol.Index{ - zo.text_index, - zo.rodata_index, - zo.data_relro_index, - zo.data_index, - zo.tdata_index, - zo.eh_frame_index, - }) |maybe_sym_index| { - const sect_sym_index = maybe_sym_index orelse continue; - const sect_atom_ptr = zo.symbol(sect_sym_index).atom(self).?; - if (sect_atom_ptr.output_section_index != shndx) continue; - break sect_atom_ptr.size; - } else 0 else 0 + if (!slice.items(.atom_list_2)[shndx].dirty) - slice.items(.atom_list_2)[shndx].size - else - 0; + const existing_size = self.sectionSize(shndx); const amt = try self.base.file.?.copyRangeAll( shdr.sh_offset, self.base.file.?, @@ -5682,6 +5667,12 @@ const Section = struct { free_list: std.ArrayListUnmanaged(Ref) = .empty, }; +pub fn sectionSize(self: *Elf, shndx: u32) u64 { + const last_atom_ref = self.sections.items(.last_atom)[shndx]; + const atom_ptr = self.atom(last_atom_ref) orelse return 0; + return @as(u64, @intCast(atom_ptr.value)) + atom_ptr.size; +} + fn defaultEntrySymbolName(cpu_arch: std.Target.Cpu.Arch) []const u8 { return switch (cpu_arch) { .mips, .mipsel, .mips64, .mips64el => "__start", From ef7bac4aa58abcf860c222f15eaba8e9c8c702a4 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Thu, 3 Oct 2024 17:18:28 +0200 Subject: [PATCH 14/18] elf: move setting section size back to Elf.growSection --- src/link/Dwarf.zig | 6 ++-- src/link/Elf.zig | 68 +++++++++++++++++++++++----------------------- 2 files changed, 36 insertions(+), 38 deletions(-) diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 840cef69fe..cde40294b4 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -393,12 +393,10 @@ pub const Section = struct { const needed_size = len; const min_alignment = sec.alignment.toByteUnits().?; try elf_file.growSection(shndx, needed_size, min_alignment); - const shdr = &elf_file.sections.items(.shdr)[shndx]; - shdr.sh_size = needed_size; - elf_file.markDirty(shndx); + const shdr = elf_file.sections.items(.shdr)[shndx]; atom.size = needed_size; atom.alignment = InternPool.Alignment.fromNonzeroByteUnits(shdr.sh_addralign); - sec.len = len; + sec.len = needed_size; } else if (dwarf.bin_file.cast(.macho)) |macho_file| { const header = if (macho_file.d_sym) |*d_sym| header: { try d_sym.growSection(@intCast(sec.index), len, true, macho_file); diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 5478441482..c8faaa33b6 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -558,43 +558,47 @@ pub fn findFreeSpace(self: *Elf, object_size: u64, min_alignment: u64) !u64 { pub fn growSection(self: *Elf, shdr_index: u32, needed_size: u64, min_alignment: u64) !void { const shdr = &self.sections.items(.shdr)[shdr_index]; - assert(shdr.sh_type != elf.SHT_NOBITS); - const allocated_size = self.allocatedSize(shdr.sh_offset); - log.debug("allocated size {x} of '{s}', needed size {x}", .{ - allocated_size, - self.getShString(shdr.sh_name), - needed_size, - }); - - if (needed_size > allocated_size) { - const existing_size = shdr.sh_size; - shdr.sh_size = 0; - // Must move the entire section. - const new_offset = try self.findFreeSpace(needed_size, min_alignment); - - log.debug("new '{s}' file offset 0x{x} to 0x{x}", .{ + if (shdr.sh_type != elf.SHT_NOBITS) { + const allocated_size = self.allocatedSize(shdr.sh_offset); + log.debug("allocated size {x} of '{s}', needed size {x}", .{ + allocated_size, self.getShString(shdr.sh_name), - new_offset, - new_offset + existing_size, + needed_size, }); - const amt = try self.base.file.?.copyRangeAll( - shdr.sh_offset, - self.base.file.?, - new_offset, - existing_size, - ); - // TODO figure out what to about this error condition - how to communicate it up. - if (amt != existing_size) return error.InputOutput; + if (needed_size > allocated_size) { + const existing_size = shdr.sh_size; + shdr.sh_size = 0; + // Must move the entire section. + const new_offset = try self.findFreeSpace(needed_size, min_alignment); - shdr.sh_offset = new_offset; - } else if (shdr.sh_offset + allocated_size == std.math.maxInt(u64)) { - try self.base.file.?.setEndPos(shdr.sh_offset + needed_size); + log.debug("new '{s}' file offset 0x{x} to 0x{x}", .{ + self.getShString(shdr.sh_name), + new_offset, + new_offset + existing_size, + }); + + const amt = try self.base.file.?.copyRangeAll( + shdr.sh_offset, + self.base.file.?, + new_offset, + existing_size, + ); + // TODO figure out what to about this error condition - how to communicate it up. + if (amt != existing_size) return error.InputOutput; + + shdr.sh_offset = new_offset; + } else if (shdr.sh_offset + allocated_size == std.math.maxInt(u64)) { + try self.base.file.?.setEndPos(shdr.sh_offset + needed_size); + } } + + shdr.sh_size = needed_size; + self.markDirty(shdr_index); } -pub fn markDirty(self: *Elf, shdr_index: u32) void { +fn markDirty(self: *Elf, shdr_index: u32) void { if (self.zigObjectPtr()) |zo| { for ([_]?Symbol.Index{ zo.debug_info_index, @@ -700,11 +704,7 @@ pub fn allocateChunk(self: *Elf, args: struct { true; if (expand_section) { const needed_size = res.value + args.size; - if (shdr.sh_type != elf.SHT_NOBITS) { - try self.growSection(args.shndx, needed_size, args.alignment.toByteUnits().?); - } - shdr.sh_size = needed_size; - self.markDirty(args.shndx); + try self.growSection(args.shndx, needed_size, args.alignment.toByteUnits().?); } return res; From c92c72d08cfb102206594d723e29dd0616290d31 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Fri, 4 Oct 2024 10:38:43 +0200 Subject: [PATCH 15/18] elf: do not create atoms for section symbols that do not require it --- src/link/Elf.zig | 7 +- src/link/Elf/Atom.zig | 3 +- src/link/Elf/ZigObject.zig | 191 +++++++++++++++++++++++-------------- 3 files changed, 123 insertions(+), 78 deletions(-) diff --git a/src/link/Elf.zig b/src/link/Elf.zig index c8faaa33b6..c4cbb107bc 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -3472,12 +3472,7 @@ fn resetShdrIndexes(self: *Elf, backlinks: []const u32) void { } } - if (self.zigObjectPtr()) |zo| { - for (zo.atoms_indexes.items) |atom_index| { - const atom_ptr = zo.atom(atom_index) orelse continue; - atom_ptr.output_section_index = backlinks[atom_ptr.output_section_index]; - } - } + if (self.zigObjectPtr()) |zo| zo.resetShdrIndexes(backlinks); for (self.comdat_group_sections.items) |*cg| { cg.shndx = backlinks[cg.shndx]; diff --git a/src/link/Elf/Atom.zig b/src/link/Elf/Atom.zig index 854fb72afb..ab87d4f38e 100644 --- a/src/link/Elf/Atom.zig +++ b/src/link/Elf/Atom.zig @@ -935,9 +935,10 @@ fn format2( _ = unused_fmt_string; const atom = ctx.atom; const elf_file = ctx.elf_file; - try writer.print("atom({d}) : {s} : @{x} : shdr({d}) : align({x}) : size({x})", .{ + try writer.print("atom({d}) : {s} : @{x} : shdr({d}) : align({x}) : size({x}) : prev({}) : next({})", .{ atom.atom_index, atom.name(elf_file), atom.address(elf_file), atom.output_section_index, atom.alignment.toByteUnits() orelse 0, atom.size, + atom.prev_atom_ref, atom.next_atom_ref, }); if (atom.fdes(elf_file).len > 0) { try writer.writeAll(" : fdes{ "); diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig index b27474fcab..957418f65e 100644 --- a/src/link/Elf/ZigObject.zig +++ b/src/link/Elf/ZigObject.zig @@ -101,6 +101,28 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .dwarf => |v| { var dwarf = Dwarf.init(&elf_file.base, v); + const addSectionSymbolWithAtom = struct { + fn addSectionSymbolWithAtom( + zo: *ZigObject, + allocator: Allocator, + name: [:0]const u8, + alignment: Atom.Alignment, + shndx: u32, + ) !Symbol.Index { + const name_off = try zo.addString(allocator, name); + const sym_index = try zo.addSectionSymbol(allocator, name_off, shndx); + const sym = zo.symbol(sym_index); + const atom_index = try zo.newAtom(allocator, name_off); + const atom_ptr = zo.atom(atom_index).?; + atom_ptr.alignment = alignment; + atom_ptr.output_section_index = shndx; + sym.ref = .{ .index = atom_index, .file = zo.index }; + zo.symtab.items(.shndx)[sym.esym_index] = atom_index; + zo.symtab.items(.elf_sym)[sym.esym_index].st_shndx = SHN_ATOM; + return sym_index; + } + }.addSectionSymbolWithAtom; + if (self.debug_str_index == null) { const osec = try elf_file.addSection(.{ .name = try elf_file.insertShString(".debug_str"), @@ -110,7 +132,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .addralign = 1, }); self.debug_str_section_dirty = true; - self.debug_str_index = try self.addSectionSymbol(gpa, ".debug_str", .@"1", osec); + self.debug_str_index = try addSectionSymbolWithAtom(self, gpa, ".debug_str", .@"1", osec); elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_str_index.?).ref; } @@ -121,7 +143,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .addralign = 1, }); self.debug_info_section_dirty = true; - self.debug_info_index = try self.addSectionSymbol(gpa, ".debug_info", .@"1", osec); + self.debug_info_index = try addSectionSymbolWithAtom(self, gpa, ".debug_info", .@"1", osec); elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_info_index.?).ref; } @@ -132,7 +154,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .addralign = 1, }); self.debug_abbrev_section_dirty = true; - self.debug_abbrev_index = try self.addSectionSymbol(gpa, ".debug_abbrev", .@"1", osec); + self.debug_abbrev_index = try addSectionSymbolWithAtom(self, gpa, ".debug_abbrev", .@"1", osec); elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_abbrev_index.?).ref; } @@ -143,7 +165,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .addralign = 16, }); self.debug_aranges_section_dirty = true; - self.debug_aranges_index = try self.addSectionSymbol(gpa, ".debug_aranges", .@"16", osec); + self.debug_aranges_index = try addSectionSymbolWithAtom(self, gpa, ".debug_aranges", .@"16", osec); elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_aranges_index.?).ref; } @@ -154,7 +176,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .addralign = 1, }); self.debug_line_section_dirty = true; - self.debug_line_index = try self.addSectionSymbol(gpa, ".debug_line", .@"1", osec); + self.debug_line_index = try addSectionSymbolWithAtom(self, gpa, ".debug_line", .@"1", osec); elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_line_index.?).ref; } @@ -167,7 +189,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .addralign = 1, }); self.debug_line_str_section_dirty = true; - self.debug_line_str_index = try self.addSectionSymbol(gpa, ".debug_line_str", .@"1", osec); + self.debug_line_str_index = try addSectionSymbolWithAtom(self, gpa, ".debug_line_str", .@"1", osec); elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_line_str_index.?).ref; } @@ -178,7 +200,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .addralign = 1, }); self.debug_loclists_section_dirty = true; - self.debug_loclists_index = try self.addSectionSymbol(gpa, ".debug_loclists", .@"1", osec); + self.debug_loclists_index = try addSectionSymbolWithAtom(self, gpa, ".debug_loclists", .@"1", osec); elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_loclists_index.?).ref; } @@ -189,7 +211,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .addralign = 1, }); self.debug_rnglists_section_dirty = true; - self.debug_rnglists_index = try self.addSectionSymbol(gpa, ".debug_rnglists", .@"1", osec); + self.debug_rnglists_index = try addSectionSymbolWithAtom(self, gpa, ".debug_rnglists", .@"1", osec); elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_rnglists_index.?).ref; } @@ -204,7 +226,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .addralign = ptr_size, }); self.eh_frame_section_dirty = true; - self.eh_frame_index = try self.addSectionSymbol(gpa, ".eh_frame", Atom.Alignment.fromNonzeroByteUnits(ptr_size), osec); + self.eh_frame_index = try addSectionSymbolWithAtom(self, gpa, ".eh_frame", Atom.Alignment.fromNonzeroByteUnits(ptr_size), osec); elf_file.sections.items(.last_atom)[osec] = self.symbol(self.eh_frame_index.?).ref; } @@ -997,7 +1019,7 @@ pub fn lowerUav( } const osec = if (self.data_relro_index) |sym_index| - self.symbol(sym_index).atom(elf_file).?.output_section_index + self.symbol(sym_index).outputShndx(elf_file).? else osec: { const osec = try elf_file.addSection(.{ .name = try elf_file.insertShString(".data.rel.ro"), @@ -1006,7 +1028,7 @@ pub fn lowerUav( .flags = elf.SHF_ALLOC | elf.SHF_WRITE, .offset = std.math.maxInt(u64), }); - self.data_relro_index = try self.addSectionSymbol(gpa, ".data.rel.ro", .@"1", osec); + self.data_relro_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".data.rel.ro"), osec); break :osec osec; }; @@ -1112,24 +1134,14 @@ pub fn getOrCreateMetadataForNav( return gop.value_ptr.symbol_index; } -// FIXME: we always create an atom to basically store size and alignment, however, this is only true for -// sections that have a single atom like the debug sections. It would be a better solution to decouple this -// concept from the atom, maybe. -fn addSectionSymbol( - self: *ZigObject, - allocator: Allocator, - name: [:0]const u8, - alignment: Atom.Alignment, - shndx: u32, -) !Symbol.Index { - const name_off = try self.addString(allocator, name); - const index = try self.newSymbolWithAtom(allocator, name_off); +fn addSectionSymbol(self: *ZigObject, allocator: Allocator, name_off: u32, shndx: u32) !Symbol.Index { + const index = try self.newLocalSymbol(allocator, name_off); const sym = self.symbol(index); const esym = &self.symtab.items(.elf_sym)[sym.esym_index]; esym.st_info |= elf.STT_SECTION; - const atom_ptr = self.atom(sym.ref.index).?; - atom_ptr.alignment = alignment; - atom_ptr.output_section_index = shndx; + // TODO create fake shdrs? + // esym.st_shndx = shndx; + sym.output_section_index = shndx; return index; } @@ -1148,7 +1160,7 @@ fn getNavShdrIndex( const nav_val = zcu.navValue(nav_index); if (ip.isFunctionType(nav_val.typeOf(zcu).toIntern())) { if (self.text_index) |symbol_index| - return self.symbol(symbol_index).atom(elf_file).?.output_section_index; + return self.symbol(symbol_index).outputShndx(elf_file).?; const osec = try elf_file.addSection(.{ .type = elf.SHT_PROGBITS, .flags = elf.SHF_ALLOC | elf.SHF_EXECINSTR, @@ -1156,7 +1168,7 @@ fn getNavShdrIndex( .addralign = 1, .offset = std.math.maxInt(u64), }); - self.text_index = try self.addSectionSymbol(gpa, ".text", .@"1", osec); + self.text_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".text"), osec); return osec; } const is_const, const is_threadlocal, const nav_init = switch (ip.indexToKey(nav_val.toIntern())) { @@ -1171,18 +1183,18 @@ fn getNavShdrIndex( } else true; if (is_bss) { if (self.tbss_index) |symbol_index| - return self.symbol(symbol_index).atom(elf_file).?.output_section_index; + return self.symbol(symbol_index).outputShndx(elf_file).?; const osec = try elf_file.addSection(.{ .name = try elf_file.insertShString(".tbss"), .flags = elf.SHF_ALLOC | elf.SHF_WRITE | elf.SHF_TLS, .type = elf.SHT_NOBITS, .addralign = 1, }); - self.tbss_index = try self.addSectionSymbol(gpa, ".tbss", .@"1", osec); + self.tbss_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".tbss"), osec); return osec; } if (self.tdata_index) |symbol_index| - return self.symbol(symbol_index).atom(elf_file).?.output_section_index; + return self.symbol(symbol_index).outputShndx(elf_file).?; const osec = try elf_file.addSection(.{ .type = elf.SHT_PROGBITS, .flags = elf.SHF_ALLOC | elf.SHF_WRITE | elf.SHF_TLS, @@ -1190,12 +1202,12 @@ fn getNavShdrIndex( .addralign = 1, .offset = std.math.maxInt(u64), }); - self.tdata_index = try self.addSectionSymbol(gpa, ".tdata", .@"1", osec); + self.tdata_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".tdata"), osec); return osec; } if (is_const) { if (self.data_relro_index) |symbol_index| - return self.symbol(symbol_index).atom(elf_file).?.output_section_index; + return self.symbol(symbol_index).outputShndx(elf_file).?; const osec = try elf_file.addSection(.{ .name = try elf_file.insertShString(".data.rel.ro"), .type = elf.SHT_PROGBITS, @@ -1203,14 +1215,14 @@ fn getNavShdrIndex( .flags = elf.SHF_ALLOC | elf.SHF_WRITE, .offset = std.math.maxInt(u64), }); - self.data_relro_index = try self.addSectionSymbol(gpa, ".data.rel.ro", .@"1", osec); + self.data_relro_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".data.rel.ro"), osec); return osec; } if (nav_init != .none and Value.fromInterned(nav_init).isUndefDeep(zcu)) return switch (zcu.navFileScope(nav_index).mod.optimize_mode) { .Debug, .ReleaseSafe => { if (self.data_index) |symbol_index| - return self.symbol(symbol_index).atom(elf_file).?.output_section_index; + return self.symbol(symbol_index).outputShndx(elf_file).?; const osec = try elf_file.addSection(.{ .name = try elf_file.insertShString(".data"), .type = elf.SHT_PROGBITS, @@ -1218,24 +1230,19 @@ fn getNavShdrIndex( .flags = elf.SHF_ALLOC | elf.SHF_WRITE, .offset = std.math.maxInt(u64), }); - self.data_index = try self.addSectionSymbol( - gpa, - ".data", - Atom.Alignment.fromNonzeroByteUnits(ptr_size), - osec, - ); + self.data_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".data"), osec); return osec; }, .ReleaseFast, .ReleaseSmall => { if (self.bss_index) |symbol_index| - return self.symbol(symbol_index).atom(elf_file).?.output_section_index; + return self.symbol(symbol_index).outputShndx(elf_file).?; const osec = try elf_file.addSection(.{ .type = elf.SHT_NOBITS, .flags = elf.SHF_ALLOC | elf.SHF_WRITE, .name = try elf_file.insertShString(".bss"), .addralign = 1, }); - self.bss_index = try self.addSectionSymbol(gpa, ".bss", .@"1", osec); + self.bss_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".bss"), osec); return osec; }, }; @@ -1244,18 +1251,18 @@ fn getNavShdrIndex( } else true; if (is_bss) { if (self.bss_index) |symbol_index| - return self.symbol(symbol_index).atom(elf_file).?.output_section_index; + return self.symbol(symbol_index).outputShndx(elf_file).?; const osec = try elf_file.addSection(.{ .type = elf.SHT_NOBITS, .flags = elf.SHF_ALLOC | elf.SHF_WRITE, .name = try elf_file.insertShString(".bss"), .addralign = 1, }); - self.bss_index = try self.addSectionSymbol(gpa, ".bss", .@"1", osec); + self.bss_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".bss"), osec); return osec; } if (self.data_index) |symbol_index| - return self.symbol(symbol_index).atom(elf_file).?.output_section_index; + return self.symbol(symbol_index).outputShndx(elf_file).?; const osec = try elf_file.addSection(.{ .name = try elf_file.insertShString(".data"), .type = elf.SHT_PROGBITS, @@ -1263,12 +1270,7 @@ fn getNavShdrIndex( .flags = elf.SHF_ALLOC | elf.SHF_WRITE, .offset = std.math.maxInt(u64), }); - self.data_index = try self.addSectionSymbol( - gpa, - ".data", - Atom.Alignment.fromNonzeroByteUnits(ptr_size), - osec, - ); + self.data_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".data"), osec); return osec; } @@ -1521,7 +1523,7 @@ pub fn updateFunc( .addralign = 1, .offset = std.math.maxInt(u64), }); - self.text_index = try self.addSectionSymbol(gpa, ".text", .@"1", osec); + self.text_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".text"), osec); break :osec osec; }; const name_off = try self.addString(gpa, name); @@ -1688,7 +1690,7 @@ fn updateLazySymbol( const output_section_index = switch (sym.kind) { .code => if (self.text_index) |sym_index| - self.symbol(sym_index).atom(elf_file).?.output_section_index + self.symbol(sym_index).outputShndx(elf_file).? else osec: { const osec = try elf_file.addSection(.{ .name = try elf_file.insertShString(".text"), @@ -1697,11 +1699,11 @@ fn updateLazySymbol( .flags = elf.SHF_ALLOC | elf.SHF_EXECINSTR, .offset = std.math.maxInt(u64), }); - self.text_index = try self.addSectionSymbol(gpa, ".text", .@"1", osec); + self.text_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".text"), osec); break :osec osec; }, .const_data => if (self.rodata_index) |sym_index| - self.symbol(sym_index).atom(elf_file).?.output_section_index + self.symbol(sym_index).outputShndx(elf_file).? else osec: { const osec = try elf_file.addSection(.{ .name = try elf_file.insertShString(".rodata"), @@ -1710,7 +1712,7 @@ fn updateLazySymbol( .flags = elf.SHF_ALLOC, .offset = std.math.maxInt(u64), }); - self.rodata_index = try self.addSectionSymbol(gpa, ".rodata", .@"1", osec); + self.rodata_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".rodata"), osec); break :osec osec; }, }; @@ -2011,20 +2013,10 @@ fn allocateAtom(self: *ZigObject, atom_ptr: *Atom, elf_file: *Elf) !void { } shdr.sh_addralign = @max(shdr.sh_addralign, atom_ptr.alignment.toByteUnits().?); - const sect_atom_ptr = for ([_]?Symbol.Index{ - self.text_index, - self.rodata_index, - self.data_relro_index, - self.data_index, - self.tdata_index, - }) |maybe_sym_index| { - const sect_sym_index = maybe_sym_index orelse continue; - const sect_atom_ptr = self.symbol(sect_sym_index).atom(elf_file).?; - if (sect_atom_ptr.output_section_index == atom_ptr.output_section_index) break sect_atom_ptr; - } else null; - if (sect_atom_ptr) |sap| { - sap.size = shdr.sh_size; - sap.alignment = Atom.Alignment.fromNonzeroByteUnits(shdr.sh_addralign); + if (self.sectionSymbol(atom_ptr.output_section_index, elf_file)) |sym| { + assert(sym.atom(elf_file) == null and sym.mergeSubsection(elf_file) == null); + const esym = &self.symtab.items(.elf_sym)[sym.esym_index]; + esym.st_size += atom_ptr.size + Elf.padToIdeal(atom_ptr.size); } // This function can also reallocate an atom. @@ -2053,10 +2045,67 @@ fn growAtom(self: *ZigObject, atom_ptr: *Atom, elf_file: *Elf) !void { } } +pub fn resetShdrIndexes(self: *ZigObject, backlinks: anytype) void { + for (self.atoms_indexes.items) |atom_index| { + const atom_ptr = self.atom(atom_index) orelse continue; + atom_ptr.output_section_index = backlinks[atom_ptr.output_section_index]; + } + inline for ([_]?Symbol.Index{ + self.text_index, + self.rodata_index, + self.data_relro_index, + self.data_index, + self.bss_index, + self.tdata_index, + self.tbss_index, + self.eh_frame_index, + self.debug_info_index, + self.debug_abbrev_index, + self.debug_aranges_index, + self.debug_str_index, + self.debug_line_index, + self.debug_line_str_index, + self.debug_loclists_index, + self.debug_rnglists_index, + }) |maybe_sym_index| { + if (maybe_sym_index) |sym_index| { + const sym = self.symbol(sym_index); + sym.output_section_index = backlinks[sym.output_section_index]; + } + } +} + pub fn asFile(self: *ZigObject) File { return .{ .zig_object = self }; } +pub fn sectionSymbol(self: *ZigObject, shndx: u32, elf_file: *Elf) ?*Symbol { + inline for ([_]?Symbol.Index{ + self.text_index, + self.rodata_index, + self.data_relro_index, + self.data_index, + self.bss_index, + self.tdata_index, + self.tbss_index, + self.eh_frame_index, + self.debug_info_index, + self.debug_abbrev_index, + self.debug_aranges_index, + self.debug_str_index, + self.debug_line_index, + self.debug_line_str_index, + self.debug_loclists_index, + self.debug_rnglists_index, + }) |maybe_sym_index| { + if (maybe_sym_index) |sym_index| { + const sym = self.symbol(sym_index); + if (sym.outputShndx(elf_file) == shndx) return sym; + } + } + return null; +} + pub fn addString(self: *ZigObject, allocator: Allocator, string: []const u8) !u32 { return self.strtab.insert(allocator, string); } From e448fb960137e5aa83a1e7c2e6cbbf688f4fce05 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Fri, 4 Oct 2024 12:46:57 +0200 Subject: [PATCH 16/18] elf: change how we manage debug atoms in Dwarf linker --- src/link/Dwarf.zig | 44 +++++++++++++++----------- src/link/Elf.zig | 53 ++++++++++++++----------------- src/link/Elf/Atom.zig | 11 ++++++- src/link/Elf/ZigObject.zig | 60 ++++++++++++++++++------------------ src/link/Elf/relocatable.zig | 26 +++++----------- 5 files changed, 97 insertions(+), 97 deletions(-) diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index cde40294b4..59e0cae934 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -389,14 +389,20 @@ pub const Section = struct { if (dwarf.bin_file.cast(.elf)) |elf_file| { const zo = elf_file.zigObjectPtr().?; const atom = zo.symbol(sec.index).atom(elf_file).?; - const shndx = atom.output_section_index; - const needed_size = len; - const min_alignment = sec.alignment.toByteUnits().?; - try elf_file.growSection(shndx, needed_size, min_alignment); - const shdr = elf_file.sections.items(.shdr)[shndx]; - atom.size = needed_size; - atom.alignment = InternPool.Alignment.fromNonzeroByteUnits(shdr.sh_addralign); - sec.len = needed_size; + const old_size = atom.size; + atom.size = len; + atom.alignment = sec.alignment; + sec.len = len; + if (old_size > 0) { + if (!atom.alignment.check(@intCast(atom.value)) or atom.size > atom.fileCapacity(elf_file)) { + try zo.allocateAtom(atom, false, elf_file); + } else { + const shdr = &elf_file.sections.items(.shdr)[atom.output_section_index]; + shdr.sh_size = (shdr.sh_size - old_size) + atom.size; + } + } else { + try zo.allocateAtom(atom, false, elf_file); + } } else if (dwarf.bin_file.cast(.macho)) |macho_file| { const header = if (macho_file.d_sym) |*d_sym| header: { try d_sym.growSection(@intCast(sec.index), len, true, macho_file); @@ -417,11 +423,15 @@ pub const Section = struct { if (dwarf.bin_file.cast(.elf)) |elf_file| { const zo = elf_file.zigObjectPtr().?; const atom = zo.symbol(sec.index).atom(elf_file).?; - const shndx = atom.output_section_index; - const shdr = &elf_file.sections.items(.shdr)[shndx]; - atom.size = sec.len; - shdr.sh_offset += len; - shdr.sh_size = sec.len; + if (atom.prevAtom(elf_file)) |_| { + // FIXME:JK trimming/shrinking has to be reworked on ZigObject/Elf level + atom.value += len; + } else { + const shdr = &elf_file.sections.items(.shdr)[atom.output_section_index]; + shdr.sh_offset += len; + atom.value = 0; + } + atom.size -= len; } else if (dwarf.bin_file.cast(.macho)) |macho_file| { const header = if (macho_file.d_sym) |*d_sym| &d_sym.sections.items[sec.index] @@ -910,11 +920,9 @@ const Entry = struct { if (std.debug.runtime_safety) { log.err("missing {} from {s}", .{ @as(Entry.Index, @enumFromInt(entry - unit.entries.items.ptr)), - std.mem.sliceTo(if (dwarf.bin_file.cast(.elf)) |elf_file| sh_name: { - const zo = elf_file.zigObjectPtr().?; - const shndx = zo.symbol(sec.index).atom(elf_file).?.output_section_index; - break :sh_name elf_file.shstrtab.items[elf_file.sections.items(.shdr)[shndx].sh_name..]; - } else if (dwarf.bin_file.cast(.macho)) |macho_file| + std.mem.sliceTo(if (dwarf.bin_file.cast(.elf)) |elf_file| + elf_file.zigObjectPtr().?.symbol(sec.index).name(elf_file) + else if (dwarf.bin_file.cast(.macho)) |macho_file| if (macho_file.d_sym) |*d_sym| &d_sym.sections.items[sec.index].segname else diff --git a/src/link/Elf.zig b/src/link/Elf.zig index c4cbb107bc..511ce57b3e 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -573,10 +573,10 @@ pub fn growSection(self: *Elf, shdr_index: u32, needed_size: u64, min_alignment: // Must move the entire section. const new_offset = try self.findFreeSpace(needed_size, min_alignment); - log.debug("new '{s}' file offset 0x{x} to 0x{x}", .{ + log.debug("moving '{s}' from 0x{x} to 0x{x}", .{ self.getShString(shdr.sh_name), + shdr.sh_offset, new_offset, - new_offset + existing_size, }); const amt = try self.base.file.?.copyRangeAll( @@ -691,13 +691,6 @@ pub fn allocateChunk(self: *Elf, args: struct { } }; - log.debug("allocated chunk (size({x}),align({x})) at 0x{x} (file(0x{x}))", .{ - args.size, - args.alignment.toByteUnits().?, - shdr.sh_addr + res.value, - shdr.sh_offset + res.value, - }); - const expand_section = if (self.atom(res.placement)) |placement_atom| placement_atom.nextAtom(self) == null else @@ -707,6 +700,18 @@ pub fn allocateChunk(self: *Elf, args: struct { try self.growSection(args.shndx, needed_size, args.alignment.toByteUnits().?); } + log.debug("allocated chunk (size({x}),align({x})) in {s} at 0x{x} (file(0x{x}))", .{ + args.size, + args.alignment.toByteUnits().?, + self.getShString(shdr.sh_name), + shdr.sh_addr + res.value, + shdr.sh_offset + res.value, + }); + log.debug(" placement {}, {s}", .{ + res.placement, + if (self.atom(res.placement)) |atom_ptr| atom_ptr.name(self) else "", + }); + return res; } @@ -1796,6 +1801,7 @@ pub fn initOutputSection(self: *Elf, args: struct { .type = @"type", .flags = flags, .name = try self.insertShString(name), + .offset = std.math.maxInt(u64), }); return out_shndx; } @@ -3898,27 +3904,14 @@ pub fn allocateNonAllocSections(self: *Elf) !void { shdr.sh_size = 0; const new_offset = try self.findFreeSpace(needed_size, shdr.sh_addralign); - if (self.zigObjectPtr()) |zo| blk: { - const existing_size = for ([_]?Symbol.Index{ - zo.debug_info_index, - zo.debug_abbrev_index, - zo.debug_aranges_index, - zo.debug_str_index, - zo.debug_line_index, - zo.debug_line_str_index, - zo.debug_loclists_index, - zo.debug_rnglists_index, - }) |maybe_sym_index| { - const sym_index = maybe_sym_index orelse continue; - const sym = zo.symbol(sym_index); - const atom_ptr = sym.atom(self).?; - if (atom_ptr.output_section_index == shndx) break atom_ptr.size; - } else break :blk; - log.debug("moving {s} from 0x{x} to 0x{x}", .{ - self.getShString(shdr.sh_name), - shdr.sh_offset, - new_offset, - }); + log.debug("moving {s} from 0x{x} to 0x{x}", .{ + self.getShString(shdr.sh_name), + shdr.sh_offset, + new_offset, + }); + + if (shdr.sh_offset > 0) { + const existing_size = self.sectionSize(@intCast(shndx)); const amt = try self.base.file.?.copyRangeAll( shdr.sh_offset, self.base.file.?, diff --git a/src/link/Elf/Atom.zig b/src/link/Elf/Atom.zig index ab87d4f38e..b229114d52 100644 --- a/src/link/Elf/Atom.zig +++ b/src/link/Elf/Atom.zig @@ -118,10 +118,19 @@ pub fn capacity(self: Atom, elf_file: *Elf) u64 { return @intCast(next_addr - self.address(elf_file)); } +pub fn fileCapacity(self: Atom, elf_file: *Elf) u64 { + const self_off = self.offset(elf_file); + const next_off = if (self.nextAtom(elf_file)) |next_atom| + next_atom.offset(elf_file) + else + self_off + elf_file.allocatedSize(self_off); + return @intCast(next_off - self_off); +} + pub fn freeListEligible(self: Atom, elf_file: *Elf) bool { // No need to keep a free list node for the last block. const next = self.nextAtom(elf_file) orelse return false; - const cap: u64 = @intCast(next.address(elf_file) - self.address(elf_file)); + const cap: u64 = @intCast(next.value - self.value); const ideal_cap = Elf.padToIdeal(self.size); if (cap <= ideal_cap) return false; const surplus = cap - ideal_cap; diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig index 957418f65e..0901920ee5 100644 --- a/src/link/Elf/ZigObject.zig +++ b/src/link/Elf/ZigObject.zig @@ -130,10 +130,10 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .entsize = 1, .type = elf.SHT_PROGBITS, .addralign = 1, + .offset = std.math.maxInt(u64), }); self.debug_str_section_dirty = true; self.debug_str_index = try addSectionSymbolWithAtom(self, gpa, ".debug_str", .@"1", osec); - elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_str_index.?).ref; } if (self.debug_info_index == null) { @@ -141,10 +141,10 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .name = try elf_file.insertShString(".debug_info"), .type = elf.SHT_PROGBITS, .addralign = 1, + .offset = std.math.maxInt(u64), }); self.debug_info_section_dirty = true; self.debug_info_index = try addSectionSymbolWithAtom(self, gpa, ".debug_info", .@"1", osec); - elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_info_index.?).ref; } if (self.debug_abbrev_index == null) { @@ -152,10 +152,10 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .name = try elf_file.insertShString(".debug_abbrev"), .type = elf.SHT_PROGBITS, .addralign = 1, + .offset = std.math.maxInt(u64), }); self.debug_abbrev_section_dirty = true; self.debug_abbrev_index = try addSectionSymbolWithAtom(self, gpa, ".debug_abbrev", .@"1", osec); - elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_abbrev_index.?).ref; } if (self.debug_aranges_index == null) { @@ -163,10 +163,10 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .name = try elf_file.insertShString(".debug_aranges"), .type = elf.SHT_PROGBITS, .addralign = 16, + .offset = std.math.maxInt(u64), }); self.debug_aranges_section_dirty = true; self.debug_aranges_index = try addSectionSymbolWithAtom(self, gpa, ".debug_aranges", .@"16", osec); - elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_aranges_index.?).ref; } if (self.debug_line_index == null) { @@ -174,10 +174,10 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .name = try elf_file.insertShString(".debug_line"), .type = elf.SHT_PROGBITS, .addralign = 1, + .offset = std.math.maxInt(u64), }); self.debug_line_section_dirty = true; self.debug_line_index = try addSectionSymbolWithAtom(self, gpa, ".debug_line", .@"1", osec); - elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_line_index.?).ref; } if (self.debug_line_str_index == null) { @@ -187,10 +187,10 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .entsize = 1, .type = elf.SHT_PROGBITS, .addralign = 1, + .offset = std.math.maxInt(u64), }); self.debug_line_str_section_dirty = true; self.debug_line_str_index = try addSectionSymbolWithAtom(self, gpa, ".debug_line_str", .@"1", osec); - elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_line_str_index.?).ref; } if (self.debug_loclists_index == null) { @@ -198,10 +198,10 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .name = try elf_file.insertShString(".debug_loclists"), .type = elf.SHT_PROGBITS, .addralign = 1, + .offset = std.math.maxInt(u64), }); self.debug_loclists_section_dirty = true; self.debug_loclists_index = try addSectionSymbolWithAtom(self, gpa, ".debug_loclists", .@"1", osec); - elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_loclists_index.?).ref; } if (self.debug_rnglists_index == null) { @@ -209,10 +209,10 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .name = try elf_file.insertShString(".debug_rnglists"), .type = elf.SHT_PROGBITS, .addralign = 1, + .offset = std.math.maxInt(u64), }); self.debug_rnglists_section_dirty = true; self.debug_rnglists_index = try addSectionSymbolWithAtom(self, gpa, ".debug_rnglists", .@"1", osec); - elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_rnglists_index.?).ref; } if (self.eh_frame_index == null) { @@ -224,10 +224,10 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { elf.SHT_PROGBITS, .flags = elf.SHF_ALLOC, .addralign = ptr_size, + .offset = std.math.maxInt(u64), }); self.eh_frame_section_dirty = true; self.eh_frame_index = try addSectionSymbolWithAtom(self, gpa, ".eh_frame", Atom.Alignment.fromNonzeroByteUnits(ptr_size), osec); - elf_file.sections.items(.last_atom)[osec] = self.symbol(self.eh_frame_index.?).ref; } try dwarf.initMetadata(); @@ -1318,7 +1318,7 @@ fn updateNavCode( const capacity = atom_ptr.capacity(elf_file); const need_realloc = code.len > capacity or !required_alignment.check(@intCast(atom_ptr.value)); if (need_realloc) { - try self.growAtom(atom_ptr, elf_file); + try self.allocateAtom(atom_ptr, true, elf_file); log.debug("growing {} from 0x{x} to 0x{x}", .{ nav.fqn.fmt(ip), old_vaddr, atom_ptr.value }); if (old_vaddr != atom_ptr.value) { sym.value = 0; @@ -1328,7 +1328,7 @@ fn updateNavCode( // TODO shrink section size } } else { - try self.allocateAtom(atom_ptr, elf_file); + try self.allocateAtom(atom_ptr, true, elf_file); errdefer self.freeNavMetadata(elf_file, sym_index); sym.value = 0; esym.st_value = 0; @@ -1403,7 +1403,7 @@ fn updateTlv( const gop = try self.tls_variables.getOrPut(gpa, atom_ptr.atom_index); assert(!gop.found_existing); // TODO incremental updates - try self.allocateAtom(atom_ptr, elf_file); + try self.allocateAtom(atom_ptr, true, elf_file); sym.value = 0; esym.st_value = 0; @@ -1729,7 +1729,7 @@ fn updateLazySymbol( atom_ptr.size = code.len; atom_ptr.output_section_index = output_section_index; - try self.allocateAtom(atom_ptr, elf_file); + try self.allocateAtom(atom_ptr, true, elf_file); errdefer self.freeNavMetadata(elf_file, symbol_index); local_sym.value = 0; @@ -1784,7 +1784,7 @@ fn lowerConst( atom_ptr.size = code.len; atom_ptr.output_section_index = output_section_index; - try self.allocateAtom(atom_ptr, elf_file); + try self.allocateAtom(atom_ptr, true, elf_file); errdefer self.freeNavMetadata(elf_file, sym_index); try elf_file.base.file.?.pwriteAll(code, atom_ptr.offset(elf_file)); @@ -1981,17 +1981,27 @@ fn writeTrampoline(tr_sym: Symbol, target: Symbol, elf_file: *Elf) !void { } } -fn allocateAtom(self: *ZigObject, atom_ptr: *Atom, elf_file: *Elf) !void { +pub fn allocateAtom(self: *ZigObject, atom_ptr: *Atom, requires_padding: bool, elf_file: *Elf) !void { + const slice = elf_file.sections.slice(); + const shdr = &slice.items(.shdr)[atom_ptr.output_section_index]; + const last_atom_ref = &slice.items(.last_atom)[atom_ptr.output_section_index]; + + // FIXME:JK this only works if this atom is the only atom in the output section + // In every other case, we need to redo the prev/next links + if (last_atom_ref.eql(atom_ptr.ref())) last_atom_ref.* = .{}; + const alloc_res = try elf_file.allocateChunk(.{ .shndx = atom_ptr.output_section_index, .size = atom_ptr.size, .alignment = atom_ptr.alignment, + .requires_padding = requires_padding, }); atom_ptr.value = @intCast(alloc_res.value); - - const slice = elf_file.sections.slice(); - const shdr = &slice.items(.shdr)[atom_ptr.output_section_index]; - const last_atom_ref = &slice.items(.last_atom)[atom_ptr.output_section_index]; + log.debug("allocated {s} at {x}\n placement {?}", .{ + atom_ptr.name(elf_file), + atom_ptr.offset(elf_file), + alloc_res.placement, + }); const expand_section = if (elf_file.atom(alloc_res.placement)) |placement_atom| placement_atom.nextAtom(elf_file) == null @@ -2013,12 +2023,6 @@ fn allocateAtom(self: *ZigObject, atom_ptr: *Atom, elf_file: *Elf) !void { } shdr.sh_addralign = @max(shdr.sh_addralign, atom_ptr.alignment.toByteUnits().?); - if (self.sectionSymbol(atom_ptr.output_section_index, elf_file)) |sym| { - assert(sym.atom(elf_file) == null and sym.mergeSubsection(elf_file) == null); - const esym = &self.symtab.items(.elf_sym)[sym.esym_index]; - esym.st_size += atom_ptr.size + Elf.padToIdeal(atom_ptr.size); - } - // This function can also reallocate an atom. // In this case we need to "unplug" it from its previous location before // plugging it in to its new location. @@ -2037,12 +2041,8 @@ fn allocateAtom(self: *ZigObject, atom_ptr: *Atom, elf_file: *Elf) !void { atom_ptr.prev_atom_ref = .{ .index = 0, .file = 0 }; atom_ptr.next_atom_ref = .{ .index = 0, .file = 0 }; } -} -fn growAtom(self: *ZigObject, atom_ptr: *Atom, elf_file: *Elf) !void { - if (!atom_ptr.alignment.check(@intCast(atom_ptr.value)) or atom_ptr.size > atom_ptr.capacity(elf_file)) { - try self.allocateAtom(atom_ptr, elf_file); - } + log.debug(" prev {?}, next {?}", .{ atom_ptr.prev_atom_ref, atom_ptr.next_atom_ref }); } pub fn resetShdrIndexes(self: *ZigObject, backlinks: anytype) void { diff --git a/src/link/Elf/relocatable.zig b/src/link/Elf/relocatable.zig index 772025f7db..b9f065a2ab 100644 --- a/src/link/Elf/relocatable.zig +++ b/src/link/Elf/relocatable.zig @@ -394,24 +394,14 @@ fn allocateAllocSections(elf_file: *Elf) !void { shdr.sh_size = 0; const new_offset = try elf_file.findFreeSpace(needed_size, shdr.sh_addralign); - if (elf_file.zigObjectPtr()) |zo| blk: { - const existing_size = for ([_]?Symbol.Index{ - zo.text_index, - zo.rodata_index, - zo.data_relro_index, - zo.data_index, - zo.tdata_index, - zo.eh_frame_index, - }) |maybe_sym_index| { - const sect_sym_index = maybe_sym_index orelse continue; - const sect_atom_ptr = zo.symbol(sect_sym_index).atom(elf_file).?; - if (sect_atom_ptr.output_section_index == shndx) break sect_atom_ptr.size; - } else break :blk; - log.debug("moving {s} from 0x{x} to 0x{x}", .{ - elf_file.getShString(shdr.sh_name), - shdr.sh_offset, - new_offset, - }); + log.debug("moving {s} from 0x{x} to 0x{x}", .{ + elf_file.getShString(shdr.sh_name), + shdr.sh_offset, + new_offset, + }); + + if (shdr.sh_offset > 0) { + const existing_size = elf_file.sectionSize(@intCast(shndx)); const amt = try elf_file.base.file.?.copyRangeAll( shdr.sh_offset, elf_file.base.file.?, From e9d819a29e928d1f9069e12609806dacb558bb3f Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Tue, 8 Oct 2024 15:10:38 +0200 Subject: [PATCH 17/18] elf: clean up how we create un-allocated sections --- src/link/Elf.zig | 25 +------------------------ src/link/Elf/ZigObject.zig | 18 ------------------ src/link/Elf/relocatable.zig | 2 -- 3 files changed, 1 insertion(+), 44 deletions(-) diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 511ce57b3e..62bf6be63a 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -1801,7 +1801,6 @@ pub fn initOutputSection(self: *Elf, args: struct { .type = @"type", .flags = flags, .name = try self.insertShString(name), - .offset = std.math.maxInt(u64), }); return out_shndx; } @@ -2867,7 +2866,6 @@ fn initSyntheticSections(self: *Elf) !void { elf.SHT_PROGBITS, .flags = elf.SHF_ALLOC, .addralign = ptr_size, - .offset = std.math.maxInt(u64), }); } if (comp.link_eh_frame_hdr and self.eh_frame_hdr_section_index == null) { @@ -2876,7 +2874,6 @@ fn initSyntheticSections(self: *Elf) !void { .type = elf.SHT_PROGBITS, .flags = elf.SHF_ALLOC, .addralign = 4, - .offset = std.math.maxInt(u64), }); } } @@ -2887,7 +2884,6 @@ fn initSyntheticSections(self: *Elf) !void { .type = elf.SHT_PROGBITS, .flags = elf.SHF_ALLOC | elf.SHF_WRITE, .addralign = ptr_size, - .offset = std.math.maxInt(u64), }); } @@ -2897,7 +2893,6 @@ fn initSyntheticSections(self: *Elf) !void { .type = elf.SHT_PROGBITS, .flags = elf.SHF_ALLOC | elf.SHF_WRITE, .addralign = @alignOf(u64), - .offset = std.math.maxInt(u64), }); } @@ -2919,7 +2914,6 @@ fn initSyntheticSections(self: *Elf) !void { .flags = elf.SHF_ALLOC, .addralign = @alignOf(elf.Elf64_Rela), .entsize = @sizeOf(elf.Elf64_Rela), - .offset = std.math.maxInt(u64), }); } @@ -2930,7 +2924,6 @@ fn initSyntheticSections(self: *Elf) !void { .type = elf.SHT_PROGBITS, .flags = elf.SHF_ALLOC | elf.SHF_EXECINSTR, .addralign = 16, - .offset = std.math.maxInt(u64), }); } if (self.rela_plt_section_index == null) { @@ -2940,7 +2933,6 @@ fn initSyntheticSections(self: *Elf) !void { .flags = elf.SHF_ALLOC, .addralign = @alignOf(elf.Elf64_Rela), .entsize = @sizeOf(elf.Elf64_Rela), - .offset = std.math.maxInt(u64), }); } } @@ -2951,7 +2943,6 @@ fn initSyntheticSections(self: *Elf) !void { .type = elf.SHT_PROGBITS, .flags = elf.SHF_ALLOC | elf.SHF_EXECINSTR, .addralign = 16, - .offset = std.math.maxInt(u64), }); } @@ -2960,7 +2951,6 @@ fn initSyntheticSections(self: *Elf) !void { .name = try self.insertShString(".copyrel"), .type = elf.SHT_NOBITS, .flags = elf.SHF_ALLOC | elf.SHF_WRITE, - .offset = std.math.maxInt(u64), }); } @@ -2979,7 +2969,6 @@ fn initSyntheticSections(self: *Elf) !void { .type = elf.SHT_PROGBITS, .flags = elf.SHF_ALLOC, .addralign = 1, - .offset = std.math.maxInt(u64), }); } @@ -2991,7 +2980,6 @@ fn initSyntheticSections(self: *Elf) !void { .type = elf.SHT_STRTAB, .entsize = 1, .addralign = 1, - .offset = std.math.maxInt(u64), }); } if (self.dynamic_section_index == null) { @@ -3001,7 +2989,6 @@ fn initSyntheticSections(self: *Elf) !void { .type = elf.SHT_DYNAMIC, .entsize = @sizeOf(elf.Elf64_Dyn), .addralign = @alignOf(elf.Elf64_Dyn), - .offset = std.math.maxInt(u64), }); } if (self.dynsymtab_section_index == null) { @@ -3012,7 +2999,6 @@ fn initSyntheticSections(self: *Elf) !void { .addralign = @alignOf(elf.Elf64_Sym), .entsize = @sizeOf(elf.Elf64_Sym), .info = 1, - .offset = std.math.maxInt(u64), }); } if (self.hash_section_index == null) { @@ -3022,7 +3008,6 @@ fn initSyntheticSections(self: *Elf) !void { .type = elf.SHT_HASH, .addralign = 4, .entsize = 4, - .offset = std.math.maxInt(u64), }); } if (self.gnu_hash_section_index == null) { @@ -3031,7 +3016,6 @@ fn initSyntheticSections(self: *Elf) !void { .flags = elf.SHF_ALLOC, .type = elf.SHT_GNU_HASH, .addralign = 8, - .offset = std.math.maxInt(u64), }); } @@ -3047,7 +3031,6 @@ fn initSyntheticSections(self: *Elf) !void { .type = elf.SHT_GNU_VERSYM, .addralign = @alignOf(elf.Elf64_Versym), .entsize = @sizeOf(elf.Elf64_Versym), - .offset = std.math.maxInt(u64), }); } if (self.verneed_section_index == null) { @@ -3056,7 +3039,6 @@ fn initSyntheticSections(self: *Elf) !void { .flags = elf.SHF_ALLOC, .type = elf.SHT_GNU_VERNEED, .addralign = @alignOf(elf.Elf64_Verneed), - .offset = std.math.maxInt(u64), }); } } @@ -3077,7 +3059,6 @@ pub fn initSymtab(self: *Elf) !void { .type = elf.SHT_SYMTAB, .addralign = if (small_ptr) @alignOf(elf.Elf32_Sym) else @alignOf(elf.Elf64_Sym), .entsize = if (small_ptr) @sizeOf(elf.Elf32_Sym) else @sizeOf(elf.Elf64_Sym), - .offset = std.math.maxInt(u64), }); } if (self.strtab_section_index == null) { @@ -3086,7 +3067,6 @@ pub fn initSymtab(self: *Elf) !void { .type = elf.SHT_STRTAB, .entsize = 1, .addralign = 1, - .offset = std.math.maxInt(u64), }); } } @@ -3098,7 +3078,6 @@ pub fn initShStrtab(self: *Elf) !void { .type = elf.SHT_STRTAB, .entsize = 1, .addralign = 1, - .offset = std.math.maxInt(u64), }); } } @@ -4760,7 +4739,6 @@ pub fn addRelaShdr(self: *Elf, name: u32, shndx: u32) !u32 { .entsize = entsize, .info = shndx, .addralign = addralign, - .offset = std.math.maxInt(u64), }); } @@ -4772,7 +4750,6 @@ pub const AddSectionOpts = struct { info: u32 = 0, addralign: u64 = 0, entsize: u64 = 0, - offset: u64 = 0, }; pub fn addSection(self: *Elf, opts: AddSectionOpts) !u32 { @@ -4784,7 +4761,7 @@ pub fn addSection(self: *Elf, opts: AddSectionOpts) !u32 { .sh_type = opts.type, .sh_flags = opts.flags, .sh_addr = 0, - .sh_offset = opts.offset, + .sh_offset = 0, .sh_size = 0, .sh_link = opts.link, .sh_info = opts.info, diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig index 0901920ee5..846bcac15c 100644 --- a/src/link/Elf/ZigObject.zig +++ b/src/link/Elf/ZigObject.zig @@ -130,7 +130,6 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .entsize = 1, .type = elf.SHT_PROGBITS, .addralign = 1, - .offset = std.math.maxInt(u64), }); self.debug_str_section_dirty = true; self.debug_str_index = try addSectionSymbolWithAtom(self, gpa, ".debug_str", .@"1", osec); @@ -141,7 +140,6 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .name = try elf_file.insertShString(".debug_info"), .type = elf.SHT_PROGBITS, .addralign = 1, - .offset = std.math.maxInt(u64), }); self.debug_info_section_dirty = true; self.debug_info_index = try addSectionSymbolWithAtom(self, gpa, ".debug_info", .@"1", osec); @@ -152,7 +150,6 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .name = try elf_file.insertShString(".debug_abbrev"), .type = elf.SHT_PROGBITS, .addralign = 1, - .offset = std.math.maxInt(u64), }); self.debug_abbrev_section_dirty = true; self.debug_abbrev_index = try addSectionSymbolWithAtom(self, gpa, ".debug_abbrev", .@"1", osec); @@ -163,7 +160,6 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .name = try elf_file.insertShString(".debug_aranges"), .type = elf.SHT_PROGBITS, .addralign = 16, - .offset = std.math.maxInt(u64), }); self.debug_aranges_section_dirty = true; self.debug_aranges_index = try addSectionSymbolWithAtom(self, gpa, ".debug_aranges", .@"16", osec); @@ -174,7 +170,6 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .name = try elf_file.insertShString(".debug_line"), .type = elf.SHT_PROGBITS, .addralign = 1, - .offset = std.math.maxInt(u64), }); self.debug_line_section_dirty = true; self.debug_line_index = try addSectionSymbolWithAtom(self, gpa, ".debug_line", .@"1", osec); @@ -187,7 +182,6 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .entsize = 1, .type = elf.SHT_PROGBITS, .addralign = 1, - .offset = std.math.maxInt(u64), }); self.debug_line_str_section_dirty = true; self.debug_line_str_index = try addSectionSymbolWithAtom(self, gpa, ".debug_line_str", .@"1", osec); @@ -198,7 +192,6 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .name = try elf_file.insertShString(".debug_loclists"), .type = elf.SHT_PROGBITS, .addralign = 1, - .offset = std.math.maxInt(u64), }); self.debug_loclists_section_dirty = true; self.debug_loclists_index = try addSectionSymbolWithAtom(self, gpa, ".debug_loclists", .@"1", osec); @@ -209,7 +202,6 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .name = try elf_file.insertShString(".debug_rnglists"), .type = elf.SHT_PROGBITS, .addralign = 1, - .offset = std.math.maxInt(u64), }); self.debug_rnglists_section_dirty = true; self.debug_rnglists_index = try addSectionSymbolWithAtom(self, gpa, ".debug_rnglists", .@"1", osec); @@ -224,7 +216,6 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { elf.SHT_PROGBITS, .flags = elf.SHF_ALLOC, .addralign = ptr_size, - .offset = std.math.maxInt(u64), }); self.eh_frame_section_dirty = true; self.eh_frame_index = try addSectionSymbolWithAtom(self, gpa, ".eh_frame", Atom.Alignment.fromNonzeroByteUnits(ptr_size), osec); @@ -1026,7 +1017,6 @@ pub fn lowerUav( .type = elf.SHT_PROGBITS, .addralign = 1, .flags = elf.SHF_ALLOC | elf.SHF_WRITE, - .offset = std.math.maxInt(u64), }); self.data_relro_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".data.rel.ro"), osec); break :osec osec; @@ -1166,7 +1156,6 @@ fn getNavShdrIndex( .flags = elf.SHF_ALLOC | elf.SHF_EXECINSTR, .name = try elf_file.insertShString(".text"), .addralign = 1, - .offset = std.math.maxInt(u64), }); self.text_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".text"), osec); return osec; @@ -1200,7 +1189,6 @@ fn getNavShdrIndex( .flags = elf.SHF_ALLOC | elf.SHF_WRITE | elf.SHF_TLS, .name = try elf_file.insertShString(".tdata"), .addralign = 1, - .offset = std.math.maxInt(u64), }); self.tdata_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".tdata"), osec); return osec; @@ -1213,7 +1201,6 @@ fn getNavShdrIndex( .type = elf.SHT_PROGBITS, .addralign = 1, .flags = elf.SHF_ALLOC | elf.SHF_WRITE, - .offset = std.math.maxInt(u64), }); self.data_relro_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".data.rel.ro"), osec); return osec; @@ -1228,7 +1215,6 @@ fn getNavShdrIndex( .type = elf.SHT_PROGBITS, .addralign = ptr_size, .flags = elf.SHF_ALLOC | elf.SHF_WRITE, - .offset = std.math.maxInt(u64), }); self.data_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".data"), osec); return osec; @@ -1268,7 +1254,6 @@ fn getNavShdrIndex( .type = elf.SHT_PROGBITS, .addralign = ptr_size, .flags = elf.SHF_ALLOC | elf.SHF_WRITE, - .offset = std.math.maxInt(u64), }); self.data_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".data"), osec); return osec; @@ -1521,7 +1506,6 @@ pub fn updateFunc( .flags = elf.SHF_ALLOC | elf.SHF_EXECINSTR, .type = elf.SHT_PROGBITS, .addralign = 1, - .offset = std.math.maxInt(u64), }); self.text_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".text"), osec); break :osec osec; @@ -1697,7 +1681,6 @@ fn updateLazySymbol( .type = elf.SHT_PROGBITS, .addralign = 1, .flags = elf.SHF_ALLOC | elf.SHF_EXECINSTR, - .offset = std.math.maxInt(u64), }); self.text_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".text"), osec); break :osec osec; @@ -1710,7 +1693,6 @@ fn updateLazySymbol( .type = elf.SHT_PROGBITS, .addralign = 1, .flags = elf.SHF_ALLOC, - .offset = std.math.maxInt(u64), }); self.rodata_index = try self.addSectionSymbol(gpa, try self.addString(gpa, ".rodata"), osec); break :osec osec; diff --git a/src/link/Elf/relocatable.zig b/src/link/Elf/relocatable.zig index b9f065a2ab..8768c1d754 100644 --- a/src/link/Elf/relocatable.zig +++ b/src/link/Elf/relocatable.zig @@ -295,7 +295,6 @@ fn initSections(elf_file: *Elf) !void { elf.SHT_PROGBITS, .flags = elf.SHF_ALLOC, .addralign = elf_file.ptrWidthBytes(), - .offset = std.math.maxInt(u64), }); } elf_file.eh_frame_rela_section_index = elf_file.sectionByName(".rela.eh_frame") orelse @@ -324,7 +323,6 @@ fn initComdatGroups(elf_file: *Elf) !void { .type = elf.SHT_GROUP, .entsize = @sizeOf(u32), .addralign = @alignOf(u32), - .offset = std.math.maxInt(u64), }), .cg_ref = .{ .index = @intCast(cg_index), .file = index }, }; From 73c3b9b8ab056c3bcbde3a7a9b893b8814553c45 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Tue, 8 Oct 2024 16:22:23 +0200 Subject: [PATCH 18/18] elf: revert growing atoms in Dwarf.resize for standard alloc --- src/link/Dwarf.zig | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 59e0cae934..f18e636d2c 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -389,20 +389,10 @@ pub const Section = struct { if (dwarf.bin_file.cast(.elf)) |elf_file| { const zo = elf_file.zigObjectPtr().?; const atom = zo.symbol(sec.index).atom(elf_file).?; - const old_size = atom.size; atom.size = len; atom.alignment = sec.alignment; sec.len = len; - if (old_size > 0) { - if (!atom.alignment.check(@intCast(atom.value)) or atom.size > atom.fileCapacity(elf_file)) { - try zo.allocateAtom(atom, false, elf_file); - } else { - const shdr = &elf_file.sections.items(.shdr)[atom.output_section_index]; - shdr.sh_size = (shdr.sh_size - old_size) + atom.size; - } - } else { - try zo.allocateAtom(atom, false, elf_file); - } + try zo.allocateAtom(atom, false, elf_file); } else if (dwarf.bin_file.cast(.macho)) |macho_file| { const header = if (macho_file.d_sym) |*d_sym| header: { try d_sym.growSection(@intCast(sec.index), len, true, macho_file);