mirror of
https://github.com/ziglang/zig.git
synced 2026-02-13 12:59:04 +00:00
elf: migrate to new non-allocateDeclIndexes API
This commit is contained in:
parent
fb8d754a4b
commit
e1b9800ffa
@ -5324,7 +5324,7 @@ pub fn deleteUnusedDecl(mod: *Module, decl_index: Decl.Index) void {
|
||||
// Until then, we did call `allocateDeclIndexes` on this anonymous Decl and so we
|
||||
// must call `freeDecl` in the linker backend now.
|
||||
switch (mod.comp.bin_file.tag) {
|
||||
.macho, .c => {}, // this linker backend has already migrated to the new API
|
||||
.elf, .macho, .c => {}, // this linker backend has already migrated to the new API
|
||||
else => if (decl.has_tv) {
|
||||
if (decl.ty.isFnOrHasRuntimeBits()) {
|
||||
mod.comp.bin_file.freeDecl(decl_index);
|
||||
|
||||
@ -4307,12 +4307,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
|
||||
const fn_owner_decl = mod.declPtr(func.owner_decl);
|
||||
|
||||
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
|
||||
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
|
||||
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
|
||||
const got_addr = blk: {
|
||||
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
|
||||
break :blk @intCast(u32, got.p_vaddr + fn_owner_decl.link.elf.offset_table_index * ptr_bytes);
|
||||
};
|
||||
try fn_owner_decl.link.elf.ensureInitialized(elf_file);
|
||||
const got_addr = @intCast(u32, fn_owner_decl.link.elf.getOffsetTableAddress(elf_file));
|
||||
try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = got_addr });
|
||||
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
|
||||
try fn_owner_decl.link.macho.ensureInitialized(macho_file);
|
||||
@ -6125,20 +6121,15 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
|
||||
mod.markDeclAlive(decl);
|
||||
|
||||
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
|
||||
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
|
||||
const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes;
|
||||
return MCValue{ .memory = got_addr };
|
||||
try decl.link.elf.ensureInitialized(elf_file);
|
||||
return MCValue{ .memory = decl.link.elf.getOffsetTableAddress(elf_file) };
|
||||
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
|
||||
// Because MachO is PIE-always-on, we defer memory address resolution until
|
||||
// the linker has enough info to perform relocations.
|
||||
try decl.link.macho.ensureInitialized(macho_file);
|
||||
return MCValue{ .linker_load = .{
|
||||
.type = .got,
|
||||
.sym_index = decl.link.macho.getSymbolIndex().?,
|
||||
} };
|
||||
} else if (self.bin_file.cast(link.File.Coff)) |_| {
|
||||
// Because COFF is PIE-always-on, we defer memory address resolution until
|
||||
// the linker has enough info to perform relocations.
|
||||
assert(decl.link.coff.sym_index != 0);
|
||||
return MCValue{ .linker_load = .{
|
||||
.type = .got,
|
||||
|
||||
@ -4253,59 +4253,57 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
|
||||
|
||||
// Due to incremental compilation, how function calls are generated depends
|
||||
// on linking.
|
||||
switch (self.bin_file.tag) {
|
||||
.elf => {
|
||||
if (self.air.value(callee)) |func_value| {
|
||||
if (func_value.castTag(.function)) |func_payload| {
|
||||
const func = func_payload.data;
|
||||
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
|
||||
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
|
||||
const mod = self.bin_file.options.module.?;
|
||||
const fn_owner_decl = mod.declPtr(func.owner_decl);
|
||||
const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
|
||||
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
|
||||
break :blk @intCast(u32, got.p_vaddr + fn_owner_decl.link.elf.offset_table_index * ptr_bytes);
|
||||
} else unreachable;
|
||||
try self.genSetReg(Type.initTag(.usize), .lr, .{ .memory = got_addr });
|
||||
} else if (func_value.castTag(.extern_fn)) |_| {
|
||||
return self.fail("TODO implement calling extern functions", .{});
|
||||
} else {
|
||||
return self.fail("TODO implement calling bitcasted functions", .{});
|
||||
}
|
||||
if (self.air.value(callee)) |func_value| {
|
||||
if (func_value.castTag(.function)) |func_payload| {
|
||||
const func = func_payload.data;
|
||||
const mod = self.bin_file.options.module.?;
|
||||
const fn_owner_decl = mod.declPtr(func.owner_decl);
|
||||
|
||||
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
|
||||
try fn_owner_decl.link.elf.ensureInitialized(elf_file);
|
||||
const got_addr = @intCast(u32, fn_owner_decl.link.elf.getOffsetTableAddress(elf_file));
|
||||
try self.genSetReg(Type.initTag(.usize), .lr, .{ .memory = got_addr });
|
||||
} else if (self.bin_file.cast(link.File.MachO)) |_| {
|
||||
unreachable; // unsupported architecture for MachO
|
||||
} else {
|
||||
assert(ty.zigTypeTag() == .Pointer);
|
||||
const mcv = try self.resolveInst(callee);
|
||||
|
||||
try self.genSetReg(Type.initTag(.usize), .lr, mcv);
|
||||
}
|
||||
|
||||
// TODO: add Instruction.supportedOn
|
||||
// function for ARM
|
||||
if (Target.arm.featureSetHas(self.target.cpu.features, .has_v5t)) {
|
||||
_ = try self.addInst(.{
|
||||
.tag = .blx,
|
||||
.data = .{ .reg = .lr },
|
||||
return self.fail("TODO implement call on {s} for {s}", .{
|
||||
@tagName(self.bin_file.tag),
|
||||
@tagName(self.target.cpu.arch),
|
||||
});
|
||||
} else {
|
||||
return self.fail("TODO fix blx emulation for ARM <v5", .{});
|
||||
// _ = try self.addInst(.{
|
||||
// .tag = .mov,
|
||||
// .data = .{ .rr_op = .{
|
||||
// .rd = .lr,
|
||||
// .rn = .r0,
|
||||
// .op = Instruction.Operand.reg(.pc, Instruction.Operand.Shift.none),
|
||||
// } },
|
||||
// });
|
||||
// _ = try self.addInst(.{
|
||||
// .tag = .bx,
|
||||
// .data = .{ .reg = .lr },
|
||||
// });
|
||||
}
|
||||
},
|
||||
.macho => unreachable, // unsupported architecture for MachO
|
||||
.coff => return self.fail("TODO implement call in COFF for {}", .{self.target.cpu.arch}),
|
||||
.plan9 => return self.fail("TODO implement call on plan9 for {}", .{self.target.cpu.arch}),
|
||||
else => unreachable,
|
||||
} else if (func_value.castTag(.extern_fn)) |_| {
|
||||
return self.fail("TODO implement calling extern functions", .{});
|
||||
} else {
|
||||
return self.fail("TODO implement calling bitcasted functions", .{});
|
||||
}
|
||||
} else {
|
||||
assert(ty.zigTypeTag() == .Pointer);
|
||||
const mcv = try self.resolveInst(callee);
|
||||
|
||||
try self.genSetReg(Type.initTag(.usize), .lr, mcv);
|
||||
}
|
||||
|
||||
// TODO: add Instruction.supportedOn
|
||||
// function for ARM
|
||||
if (Target.arm.featureSetHas(self.target.cpu.features, .has_v5t)) {
|
||||
_ = try self.addInst(.{
|
||||
.tag = .blx,
|
||||
.data = .{ .reg = .lr },
|
||||
});
|
||||
} else {
|
||||
return self.fail("TODO fix blx emulation for ARM <v5", .{});
|
||||
// _ = try self.addInst(.{
|
||||
// .tag = .mov,
|
||||
// .data = .{ .rr_op = .{
|
||||
// .rd = .lr,
|
||||
// .rn = .r0,
|
||||
// .op = Instruction.Operand.reg(.pc, Instruction.Operand.Shift.none),
|
||||
// } },
|
||||
// });
|
||||
// _ = try self.addInst(.{
|
||||
// .tag = .bx,
|
||||
// .data = .{ .reg = .lr },
|
||||
// });
|
||||
}
|
||||
|
||||
const result: MCValue = result: {
|
||||
@ -6086,9 +6084,8 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
|
||||
mod.markDeclAlive(decl);
|
||||
|
||||
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
|
||||
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
|
||||
const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes;
|
||||
return MCValue{ .memory = got_addr };
|
||||
try decl.link.elf.ensureInitialized(elf_file);
|
||||
return MCValue{ .memory = decl.link.elf.getOffsetTableAddress(elf_file) };
|
||||
} else if (self.bin_file.cast(link.File.MachO)) |_| {
|
||||
unreachable; // unsupported architecture for MachO
|
||||
} else if (self.bin_file.cast(link.File.Coff)) |_| {
|
||||
|
||||
@ -1722,14 +1722,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
|
||||
if (func_value.castTag(.function)) |func_payload| {
|
||||
const func = func_payload.data;
|
||||
|
||||
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
|
||||
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
|
||||
const mod = self.bin_file.options.module.?;
|
||||
const fn_owner_decl = mod.declPtr(func.owner_decl);
|
||||
const got_addr = blk: {
|
||||
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
|
||||
break :blk @intCast(u32, got.p_vaddr + fn_owner_decl.link.elf.offset_table_index * ptr_bytes);
|
||||
};
|
||||
try fn_owner_decl.link.elf.ensureInitialized(elf_file);
|
||||
const got_addr = @intCast(u32, fn_owner_decl.link.elf.getOffsetTableAddress(elf_file));
|
||||
|
||||
try self.genSetReg(Type.initTag(.usize), .ra, .{ .memory = got_addr });
|
||||
_ = try self.addInst(.{
|
||||
@ -2557,9 +2553,8 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
|
||||
const decl = mod.declPtr(decl_index);
|
||||
mod.markDeclAlive(decl);
|
||||
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
|
||||
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
|
||||
const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes;
|
||||
return MCValue{ .memory = got_addr };
|
||||
try decl.link.elf.ensureInitialized(elf_file);
|
||||
return MCValue{ .memory = decl.link.elf.getOffsetTableAddress(elf_file) };
|
||||
} else if (self.bin_file.cast(link.File.MachO)) |_| {
|
||||
// TODO I'm hacking my way through here by repurposing .memory for storing
|
||||
// index to the GOT target symbol index.
|
||||
|
||||
@ -1216,12 +1216,11 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
|
||||
if (self.bin_file.tag == link.File.Elf.base_tag) {
|
||||
if (func_value.castTag(.function)) |func_payload| {
|
||||
const func = func_payload.data;
|
||||
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
|
||||
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
|
||||
const mod = self.bin_file.options.module.?;
|
||||
const fn_owner_decl = mod.declPtr(func.owner_decl);
|
||||
const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
|
||||
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
|
||||
const mod = self.bin_file.options.module.?;
|
||||
break :blk @intCast(u32, got.p_vaddr + mod.declPtr(func.owner_decl).link.elf.offset_table_index * ptr_bytes);
|
||||
try fn_owner_decl.link.elf.ensureInitialized(elf_file);
|
||||
break :blk @intCast(u32, fn_owner_decl.link.elf.getOffsetTableAddress(elf_file));
|
||||
} else unreachable;
|
||||
|
||||
try self.genSetReg(Type.initTag(.usize), .o7, .{ .memory = got_addr });
|
||||
@ -4193,9 +4192,6 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
|
||||
}
|
||||
|
||||
fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) InnerError!MCValue {
|
||||
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
|
||||
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
|
||||
|
||||
// TODO this feels clunky. Perhaps we should check for it in `genTypedValue`?
|
||||
if (tv.ty.zigTypeTag() == .Pointer) blk: {
|
||||
if (tv.ty.castPtrToFn()) |_| break :blk;
|
||||
@ -4209,9 +4205,8 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
|
||||
|
||||
mod.markDeclAlive(decl);
|
||||
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
|
||||
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
|
||||
const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes;
|
||||
return MCValue{ .memory = got_addr };
|
||||
try decl.link.elf.ensureInitialized(elf_file);
|
||||
return MCValue{ .memory = decl.link.elf.getOffsetTableAddress(elf_file) };
|
||||
} else {
|
||||
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
|
||||
}
|
||||
|
||||
@ -3998,16 +3998,12 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
|
||||
const fn_owner_decl = mod.declPtr(func.owner_decl);
|
||||
|
||||
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
|
||||
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
|
||||
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
|
||||
const got_addr = blk: {
|
||||
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
|
||||
break :blk @intCast(u32, got.p_vaddr + fn_owner_decl.link.elf.offset_table_index * ptr_bytes);
|
||||
};
|
||||
try fn_owner_decl.link.elf.ensureInitialized(elf_file);
|
||||
const got_addr = @intCast(u32, fn_owner_decl.link.elf.getOffsetTableAddress(elf_file));
|
||||
_ = try self.addInst(.{
|
||||
.tag = .call,
|
||||
.ops = Mir.Inst.Ops.encode(.{ .flags = 0b01 }),
|
||||
.data = .{ .imm = @truncate(u32, got_addr) },
|
||||
.data = .{ .imm = got_addr },
|
||||
});
|
||||
} else if (self.bin_file.cast(link.File.Coff)) |_| {
|
||||
try self.genSetReg(Type.initTag(.usize), .rax, .{
|
||||
@ -6721,9 +6717,8 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
|
||||
module.markDeclAlive(decl);
|
||||
|
||||
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
|
||||
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
|
||||
const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes;
|
||||
return MCValue{ .memory = got_addr };
|
||||
try decl.link.elf.ensureInitialized(elf_file);
|
||||
return MCValue{ .memory = decl.link.elf.getOffsetTableAddress(elf_file) };
|
||||
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
|
||||
try decl.link.macho.ensureInitialized(macho_file);
|
||||
return MCValue{ .linker_load = .{
|
||||
|
||||
@ -616,7 +616,7 @@ pub const File = struct {
|
||||
}
|
||||
switch (base.tag) {
|
||||
.coff => return @fieldParentPtr(Coff, "base", base).allocateDeclIndexes(decl_index),
|
||||
.elf => return @fieldParentPtr(Elf, "base", base).allocateDeclIndexes(decl_index),
|
||||
.elf => {}, // no-op
|
||||
.macho => {}, // no-op
|
||||
.wasm => return @fieldParentPtr(Wasm, "base", base).allocateDeclIndexes(decl_index),
|
||||
.plan9 => return @fieldParentPtr(Plan9, "base", base).allocateDeclIndexes(decl_index),
|
||||
|
||||
110
src/link/Elf.zig
110
src/link/Elf.zig
@ -344,9 +344,10 @@ pub fn getDeclVAddr(self: *Elf, decl_index: Module.Decl.Index, reloc_info: File.
|
||||
const decl = mod.declPtr(decl_index);
|
||||
|
||||
assert(self.llvm_object == null);
|
||||
assert(decl.link.elf.local_sym_index != 0);
|
||||
|
||||
const target = decl.link.elf.local_sym_index;
|
||||
try decl.link.elf.ensureInitialized(self);
|
||||
const target = decl.link.elf.getSymbolIndex().?;
|
||||
|
||||
const vaddr = self.local_symbols.items[target].st_value;
|
||||
const atom = self.atom_by_index_table.get(reloc_info.parent_atom_index).?;
|
||||
const gop = try self.relocs.getOrPut(self.base.allocator, atom);
|
||||
@ -447,7 +448,7 @@ fn makeString(self: *Elf, bytes: []const u8) !u32 {
|
||||
return @intCast(u32, result);
|
||||
}
|
||||
|
||||
fn getString(self: Elf, str_off: u32) []const u8 {
|
||||
pub fn getString(self: Elf, str_off: u32) []const u8 {
|
||||
assert(str_off < self.shstrtab.items.len);
|
||||
return mem.sliceTo(@ptrCast([*:0]const u8, self.shstrtab.items.ptr + str_off), 0);
|
||||
}
|
||||
@ -2069,7 +2070,7 @@ fn freeTextBlock(self: *Elf, text_block: *TextBlock, phdr_index: u16) void {
|
||||
if (text_block.prev) |prev| {
|
||||
prev.next = text_block.next;
|
||||
|
||||
if (!already_have_free_list_node and prev.freeListEligible(self.*)) {
|
||||
if (!already_have_free_list_node and prev.freeListEligible(self)) {
|
||||
// The free list is heuristics, it doesn't have to be perfect, so we can
|
||||
// ignore the OOM here.
|
||||
free_list.append(self.base.allocator, prev) catch {};
|
||||
@ -2084,6 +2085,15 @@ fn freeTextBlock(self: *Elf, text_block: *TextBlock, phdr_index: u16) void {
|
||||
text_block.next = null;
|
||||
}
|
||||
|
||||
// Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
|
||||
const local_sym_index = text_block.getSymbolIndex().?;
|
||||
self.local_symbol_free_list.append(self.base.allocator, local_sym_index) catch {};
|
||||
self.local_symbols.items[local_sym_index].st_info = 0;
|
||||
_ = self.atom_by_index_table.remove(local_sym_index);
|
||||
text_block.local_sym_index = 0;
|
||||
|
||||
self.offset_table_free_list.append(self.base.allocator, text_block.offset_table_index) catch {};
|
||||
|
||||
if (self.dwarf) |*dw| {
|
||||
dw.freeAtom(&text_block.dbg_info_atom);
|
||||
}
|
||||
@ -2099,7 +2109,7 @@ fn shrinkTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, phdr
|
||||
fn growTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, alignment: u64, phdr_index: u16) !u64 {
|
||||
const sym = self.local_symbols.items[text_block.local_sym_index];
|
||||
const align_ok = mem.alignBackwardGeneric(u64, sym.st_value, alignment) == sym.st_value;
|
||||
const need_realloc = !align_ok or new_block_size > text_block.capacity(self.*);
|
||||
const need_realloc = !align_ok or new_block_size > text_block.capacity(self);
|
||||
if (!need_realloc) return sym.st_value;
|
||||
return self.allocateTextBlock(text_block, new_block_size, alignment, phdr_index);
|
||||
}
|
||||
@ -2128,7 +2138,7 @@ fn allocateTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, al
|
||||
// We now have a pointer to a live text block that has too much capacity.
|
||||
// Is it enough that we could fit this new text block?
|
||||
const sym = self.local_symbols.items[big_block.local_sym_index];
|
||||
const capacity = big_block.capacity(self.*);
|
||||
const capacity = big_block.capacity(self);
|
||||
const ideal_capacity = padToIdeal(capacity);
|
||||
const ideal_capacity_end_vaddr = std.math.add(u64, sym.st_value, ideal_capacity) catch ideal_capacity;
|
||||
const capacity_end_vaddr = sym.st_value + capacity;
|
||||
@ -2138,7 +2148,7 @@ fn allocateTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, al
|
||||
// Additional bookkeeping here to notice if this free list node
|
||||
// should be deleted because the block that it points to has grown to take up
|
||||
// more of the extra capacity.
|
||||
if (!big_block.freeListEligible(self.*)) {
|
||||
if (!big_block.freeListEligible(self)) {
|
||||
_ = free_list.swapRemove(i);
|
||||
} else {
|
||||
i += 1;
|
||||
@ -2213,7 +2223,7 @@ fn allocateTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, al
|
||||
return vaddr;
|
||||
}
|
||||
|
||||
fn allocateLocalSymbol(self: *Elf) !u32 {
|
||||
pub fn allocateLocalSymbol(self: *Elf) !u32 {
|
||||
try self.local_symbols.ensureUnusedCapacity(self.base.allocator, 1);
|
||||
|
||||
const index = blk: {
|
||||
@ -2240,7 +2250,7 @@ fn allocateLocalSymbol(self: *Elf) !u32 {
|
||||
return index;
|
||||
}
|
||||
|
||||
fn allocateGotOffset(self: *Elf) !u32 {
|
||||
pub fn allocateGotOffset(self: *Elf) !u32 {
|
||||
try self.offset_table.ensureUnusedCapacity(self.base.allocator, 1);
|
||||
|
||||
const index = blk: {
|
||||
@ -2260,32 +2270,10 @@ fn allocateGotOffset(self: *Elf) !u32 {
|
||||
return index;
|
||||
}
|
||||
|
||||
pub fn allocateDeclIndexes(self: *Elf, decl_index: Module.Decl.Index) !void {
|
||||
if (self.llvm_object) |_| return;
|
||||
|
||||
const mod = self.base.options.module.?;
|
||||
const decl = mod.declPtr(decl_index);
|
||||
const block = &decl.link.elf;
|
||||
if (block.local_sym_index != 0) return;
|
||||
|
||||
const decl_name = try decl.getFullyQualifiedName(mod);
|
||||
defer self.base.allocator.free(decl_name);
|
||||
|
||||
log.debug("allocating symbol indexes for {s}", .{decl_name});
|
||||
|
||||
block.local_sym_index = try self.allocateLocalSymbol();
|
||||
block.offset_table_index = try self.allocateGotOffset();
|
||||
try self.atom_by_index_table.putNoClobber(self.base.allocator, block.local_sym_index, block);
|
||||
try self.decls.putNoClobber(self.base.allocator, decl_index, null);
|
||||
}
|
||||
|
||||
fn freeUnnamedConsts(self: *Elf, decl_index: Module.Decl.Index) void {
|
||||
const unnamed_consts = self.unnamed_const_atoms.getPtr(decl_index) orelse return;
|
||||
for (unnamed_consts.items) |atom| {
|
||||
self.freeTextBlock(atom, self.phdr_load_ro_index.?);
|
||||
self.local_symbol_free_list.append(self.base.allocator, atom.local_sym_index) catch {};
|
||||
self.local_symbols.items[atom.local_sym_index].st_info = 0;
|
||||
_ = self.atom_by_index_table.remove(atom.local_sym_index);
|
||||
}
|
||||
unnamed_consts.clearAndFree(self.base.allocator);
|
||||
}
|
||||
@ -2298,20 +2286,13 @@ pub fn freeDecl(self: *Elf, decl_index: Module.Decl.Index) void {
|
||||
const mod = self.base.options.module.?;
|
||||
const decl = mod.declPtr(decl_index);
|
||||
|
||||
const kv = self.decls.fetchRemove(decl_index);
|
||||
if (kv.?.value) |index| {
|
||||
self.freeTextBlock(&decl.link.elf, index);
|
||||
self.freeUnnamedConsts(decl_index);
|
||||
}
|
||||
log.debug("freeDecl {*}", .{decl});
|
||||
|
||||
// Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
|
||||
if (decl.link.elf.local_sym_index != 0) {
|
||||
self.local_symbol_free_list.append(self.base.allocator, decl.link.elf.local_sym_index) catch {};
|
||||
self.local_symbols.items[decl.link.elf.local_sym_index].st_info = 0;
|
||||
_ = self.atom_by_index_table.remove(decl.link.elf.local_sym_index);
|
||||
decl.link.elf.local_sym_index = 0;
|
||||
|
||||
self.offset_table_free_list.append(self.base.allocator, decl.link.elf.offset_table_index) catch {};
|
||||
if (self.decls.fetchRemove(decl_index)) |kv| {
|
||||
if (kv.value) |index| {
|
||||
self.freeTextBlock(&decl.link.elf, index);
|
||||
self.freeUnnamedConsts(decl_index);
|
||||
}
|
||||
}
|
||||
|
||||
if (self.dwarf) |*dw| {
|
||||
@ -2363,7 +2344,7 @@ fn updateDeclCode(self: *Elf, decl_index: Module.Decl.Index, code: []const u8, s
|
||||
assert(decl.link.elf.local_sym_index != 0); // Caller forgot to allocateDeclIndexes()
|
||||
const local_sym = &self.local_symbols.items[decl.link.elf.local_sym_index];
|
||||
if (local_sym.st_size != 0) {
|
||||
const capacity = decl.link.elf.capacity(self.*);
|
||||
const capacity = decl.link.elf.capacity(self);
|
||||
const need_realloc = code.len > capacity or
|
||||
!mem.isAlignedGeneric(u64, local_sym.st_value, required_alignment);
|
||||
if (need_realloc) {
|
||||
@ -2424,12 +2405,19 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
|
||||
defer code_buffer.deinit();
|
||||
|
||||
const decl_index = func.owner_decl;
|
||||
const decl = module.declPtr(decl_index);
|
||||
self.freeUnnamedConsts(decl_index);
|
||||
const atom = &decl.link.elf;
|
||||
try atom.ensureInitialized(self);
|
||||
const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
|
||||
if (gop.found_existing) {
|
||||
self.freeUnnamedConsts(decl_index);
|
||||
} else {
|
||||
gop.value_ptr.* = null;
|
||||
}
|
||||
|
||||
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
|
||||
defer code_buffer.deinit();
|
||||
|
||||
var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(module, decl_index) else null;
|
||||
defer if (decl_state) |*ds| ds.deinit();
|
||||
@ -2490,6 +2478,13 @@ pub fn updateDecl(self: *Elf, module: *Module, decl_index: Module.Decl.Index) !v
|
||||
|
||||
assert(!self.unnamed_const_atoms.contains(decl_index));
|
||||
|
||||
const atom = &decl.link.elf;
|
||||
try atom.ensureInitialized(self);
|
||||
const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
|
||||
if (!gop.found_existing) {
|
||||
gop.value_ptr.* = null;
|
||||
}
|
||||
|
||||
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
|
||||
defer code_buffer.deinit();
|
||||
|
||||
@ -2633,16 +2628,19 @@ pub fn updateDeclExports(
|
||||
const tracy = trace(@src());
|
||||
defer tracy.end();
|
||||
|
||||
try self.global_symbols.ensureUnusedCapacity(self.base.allocator, exports.len);
|
||||
const decl = module.declPtr(decl_index);
|
||||
if (decl.link.elf.local_sym_index == 0) return;
|
||||
const decl_sym = self.local_symbols.items[decl.link.elf.local_sym_index];
|
||||
const atom = &decl.link.elf;
|
||||
|
||||
const decl_ptr = self.decls.getPtr(decl_index).?;
|
||||
if (decl_ptr.* == null) {
|
||||
decl_ptr.* = try self.getDeclPhdrIndex(decl);
|
||||
if (atom.getSymbolIndex() == null) return;
|
||||
|
||||
const decl_sym = atom.getSymbol(self);
|
||||
try self.global_symbols.ensureUnusedCapacity(self.base.allocator, exports.len);
|
||||
|
||||
const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
|
||||
if (!gop.found_existing) {
|
||||
gop.value_ptr.* = try self.getDeclPhdrIndex(decl);
|
||||
}
|
||||
const phdr_index = decl_ptr.*.?;
|
||||
const phdr_index = gop.value_ptr.*.?;
|
||||
const shdr_index = self.phdr_shdr_table.get(phdr_index).?;
|
||||
|
||||
for (exports) |exp| {
|
||||
|
||||
@ -1,6 +1,8 @@
|
||||
const Atom = @This();
|
||||
|
||||
const std = @import("std");
|
||||
const assert = std.debug.assert;
|
||||
const elf = std.elf;
|
||||
|
||||
const Dwarf = @import("../Dwarf.zig");
|
||||
const Elf = @import("../Elf.zig");
|
||||
@ -12,8 +14,10 @@ const Elf = @import("../Elf.zig");
|
||||
/// If this field is 0, it means the codegen size = 0 and there is no symbol or
|
||||
/// offset table entry.
|
||||
local_sym_index: u32,
|
||||
|
||||
/// This field is undefined for symbols with size = 0.
|
||||
offset_table_index: u32,
|
||||
|
||||
/// Points to the previous and next neighbors, based on the `text_offset`.
|
||||
/// This can be used to find, for example, the capacity of this `TextBlock`.
|
||||
prev: ?*Atom,
|
||||
@ -29,13 +33,49 @@ pub const empty = Atom{
|
||||
.dbg_info_atom = undefined,
|
||||
};
|
||||
|
||||
pub fn ensureInitialized(self: *Atom, elf_file: *Elf) !void {
|
||||
if (self.getSymbolIndex() != null) return; // Already initialized
|
||||
self.local_sym_index = try elf_file.allocateLocalSymbol();
|
||||
self.offset_table_index = try elf_file.allocateGotOffset();
|
||||
try elf_file.atom_by_index_table.putNoClobber(elf_file.base.allocator, self.local_sym_index, self);
|
||||
}
|
||||
|
||||
pub fn getSymbolIndex(self: Atom) ?u32 {
|
||||
if (self.local_sym_index == 0) return null;
|
||||
return self.local_sym_index;
|
||||
}
|
||||
|
||||
pub fn getSymbol(self: Atom, elf_file: *Elf) elf.Elf64_Sym {
|
||||
const sym_index = self.getSymbolIndex().?;
|
||||
return elf_file.local_symbols.items[sym_index];
|
||||
}
|
||||
|
||||
pub fn getSymbolPtr(self: Atom, elf_file: *Elf) *elf.Elf64_Sym {
|
||||
const sym_index = self.getSymbolIndex().?;
|
||||
return &elf_file.local_symbols.items[sym_index];
|
||||
}
|
||||
|
||||
pub fn getName(self: Atom, elf_file: *Elf) []const u8 {
|
||||
const sym = self.getSymbol();
|
||||
return elf_file.getString(sym.st_name);
|
||||
}
|
||||
|
||||
pub fn getOffsetTableAddress(self: Atom, elf_file: *Elf) u64 {
|
||||
assert(self.getSymbolIndex() != null);
|
||||
const target = elf_file.base.options.target;
|
||||
const ptr_bits = target.cpu.arch.ptrBitWidth();
|
||||
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
|
||||
const got = elf_file.program_headers.items[elf_file.phdr_got_index.?];
|
||||
return got.p_vaddr + self.offset_table_index * ptr_bytes;
|
||||
}
|
||||
|
||||
/// Returns how much room there is to grow in virtual address space.
|
||||
/// File offset relocation happens transparently, so it is not included in
|
||||
/// this calculation.
|
||||
pub fn capacity(self: Atom, elf_file: Elf) u64 {
|
||||
const self_sym = elf_file.local_symbols.items[self.local_sym_index];
|
||||
pub fn capacity(self: Atom, elf_file: *Elf) u64 {
|
||||
const self_sym = self.getSymbol(elf_file);
|
||||
if (self.next) |next| {
|
||||
const next_sym = elf_file.local_symbols.items[next.local_sym_index];
|
||||
const next_sym = next.getSymbol(elf_file);
|
||||
return next_sym.st_value - self_sym.st_value;
|
||||
} else {
|
||||
// We are the last block. The capacity is limited only by virtual address space.
|
||||
@ -43,11 +83,11 @@ pub fn capacity(self: Atom, elf_file: Elf) u64 {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn freeListEligible(self: Atom, elf_file: Elf) bool {
|
||||
pub fn freeListEligible(self: Atom, elf_file: *Elf) bool {
|
||||
// No need to keep a free list node for the last block.
|
||||
const next = self.next orelse return false;
|
||||
const self_sym = elf_file.local_symbols.items[self.local_sym_index];
|
||||
const next_sym = elf_file.local_symbols.items[next.local_sym_index];
|
||||
const self_sym = self.getSymbol(elf_file);
|
||||
const next_sym = next.getSymbol(elf_file);
|
||||
const cap = next_sym.st_value - self_sym.st_value;
|
||||
const ideal_cap = Elf.padToIdeal(self_sym.st_size);
|
||||
if (cap <= ideal_cap) return false;
|
||||
|
||||
@ -2472,14 +2472,15 @@ pub fn updateDeclExports(
|
||||
|
||||
const decl = module.declPtr(decl_index);
|
||||
const atom = &decl.link.macho;
|
||||
try atom.ensureInitialized(self);
|
||||
|
||||
if (atom.getSymbolIndex() == null) return;
|
||||
|
||||
const gop = try self.decls.getOrPut(gpa, decl_index);
|
||||
if (!gop.found_existing) {
|
||||
gop.value_ptr.* = null;
|
||||
gop.value_ptr.* = self.getDeclOutputSection(decl);
|
||||
}
|
||||
|
||||
const decl_sym = decl.link.macho.getSymbol(self);
|
||||
const decl_sym = atom.getSymbol(self);
|
||||
|
||||
for (exports) |exp| {
|
||||
const exp_name = try std.fmt.allocPrint(gpa, "_{s}", .{exp.options.name});
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user