Merge pull request #14472 from ziglang/alloc-decls

self-hosted: remove allocateDeclIndexes from the linker API
This commit is contained in:
Jakub Konka 2023-01-28 22:33:56 +01:00 committed by GitHub
commit 5a67ae506a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 701 additions and 790 deletions

View File

@ -4585,7 +4585,6 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
// We don't fully codegen the decl until later, but we do need to reserve a global
// offset table index for it. This allows us to codegen decls out of dependency
// order, increasing how many computations can be done in parallel.
try mod.comp.bin_file.allocateDeclIndexes(decl_index);
try mod.comp.work_queue.writeItem(.{ .codegen_func = func });
if (type_changed and mod.emit_h != null) {
try mod.comp.work_queue.writeItem(.{ .emit_h_decl = decl_index });
@ -4697,7 +4696,6 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
// codegen backend wants full access to the Decl Type.
try sema.resolveTypeFully(decl.ty);
try mod.comp.bin_file.allocateDeclIndexes(decl_index);
try mod.comp.work_queue.writeItem(.{ .codegen_decl = decl_index });
if (type_changed and mod.emit_h != null) {
@ -5315,23 +5313,6 @@ pub fn deleteUnusedDecl(mod: *Module, decl_index: Decl.Index) void {
const decl = mod.declPtr(decl_index);
log.debug("deleteUnusedDecl {d} ({s})", .{ decl_index, decl.name });
// TODO: remove `allocateDeclIndexes` and make the API that the linker backends
// are required to notice the first time `updateDecl` happens and keep track
// of it themselves. However they can rely on getting a `freeDecl` call if any
// `updateDecl` or `updateFunc` calls happen. This will allow us to avoid any call
// into the linker backend here, since the linker backend will never have been told
// about the Decl in the first place.
// Until then, we did call `allocateDeclIndexes` on this anonymous Decl and so we
// must call `freeDecl` in the linker backend now.
switch (mod.comp.bin_file.tag) {
.c => {}, // this linker backend has already migrated to the new API
else => if (decl.has_tv) {
if (decl.ty.isFnOrHasRuntimeBits()) {
mod.comp.bin_file.freeDecl(decl_index);
}
},
}
assert(!mod.declIsRoot(decl_index));
assert(decl.src_namespace.anon_decls.swapRemove(decl_index));
@ -5816,7 +5797,6 @@ pub fn initNewAnonDecl(
// the Decl will be garbage collected by the `codegen_decl` task instead of sent
// to the linker.
if (typed_value.ty.isFnOrHasRuntimeBits()) {
try mod.comp.bin_file.allocateDeclIndexes(new_decl_index);
try mod.comp.anon_work_queue.writeItem(.{ .codegen_decl = new_decl_index });
}
}

View File

@ -7510,7 +7510,6 @@ fn resolveGenericInstantiationType(
// Queue up a `codegen_func` work item for the new Fn. The `comptime_args` field
// will be populated, ensuring it will have `analyzeBody` called with the ZIR
// parameters mapped appropriately.
try mod.comp.bin_file.allocateDeclIndexes(new_decl_index);
try mod.comp.work_queue.writeItem(.{ .codegen_func = new_func });
return new_func;
}

View File

@ -3999,8 +3999,8 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
const mod = self.bin_file.options.module.?;
const owner_decl = mod.declPtr(self.mod_fn.owner_decl);
const atom_index = switch (self.bin_file.tag) {
.macho => owner_decl.link.macho.sym_index,
.coff => owner_decl.link.coff.sym_index,
.macho => owner_decl.link.macho.getSymbolIndex().?,
.coff => owner_decl.link.coff.getSymbolIndex().?,
else => unreachable, // unsupported target format
};
_ = try self.addInst(.{
@ -4302,90 +4302,66 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
// on linking.
const mod = self.bin_file.options.module.?;
if (self.air.value(callee)) |func_value| {
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const fn_owner_decl = mod.declPtr(func.owner_decl);
const got_addr = blk: {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
break :blk @intCast(u32, got.p_vaddr + fn_owner_decl.link.elf.offset_table_index * ptr_bytes);
};
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
const fn_owner_decl = mod.declPtr(func.owner_decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
try fn_owner_decl.link.elf.ensureInitialized(elf_file);
const got_addr = @intCast(u32, fn_owner_decl.link.elf.getOffsetTableAddress(elf_file));
try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = got_addr });
_ = try self.addInst(.{
.tag = .blr,
.data = .{ .reg = .x30 },
});
} else if (func_value.castTag(.extern_fn)) |_| {
return self.fail("TODO implement calling extern functions", .{});
} else {
return self.fail("TODO implement calling bitcasted functions", .{});
}
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
const fn_owner_decl = mod.declPtr(func.owner_decl);
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
try fn_owner_decl.link.macho.ensureInitialized(macho_file);
try self.genSetReg(Type.initTag(.u64), .x30, .{
.linker_load = .{
.type = .got,
.sym_index = fn_owner_decl.link.macho.sym_index,
.sym_index = fn_owner_decl.link.macho.getSymbolIndex().?,
},
});
// blr x30
_ = try self.addInst(.{
.tag = .blr,
.data = .{ .reg = .x30 },
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
try fn_owner_decl.link.coff.ensureInitialized(coff_file);
try self.genSetReg(Type.initTag(.u64), .x30, .{
.linker_load = .{
.type = .got,
.sym_index = fn_owner_decl.link.coff.getSymbolIndex().?,
},
});
} else if (func_value.castTag(.extern_fn)) |func_payload| {
const extern_fn = func_payload.data;
const decl_name = mod.declPtr(extern_fn.owner_decl).name;
if (extern_fn.lib_name) |lib_name| {
log.debug("TODO enforce that '{s}' is expected in '{s}' library", .{
decl_name,
lib_name,
});
}
const sym_index = try macho_file.getGlobalSymbol(mem.sliceTo(decl_name, 0));
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
try p9.seeDecl(func.owner_decl);
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const got_addr = p9.bases.data;
const got_index = fn_owner_decl.link.plan9.got_index.?;
const fn_got_addr = got_addr + got_index * ptr_bytes;
try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = fn_got_addr });
} else unreachable;
_ = try self.addInst(.{
.tag = .blr,
.data = .{ .reg = .x30 },
});
} else if (func_value.castTag(.extern_fn)) |func_payload| {
const extern_fn = func_payload.data;
const decl_name = mod.declPtr(extern_fn.owner_decl).name;
if (extern_fn.lib_name) |lib_name| {
log.debug("TODO enforce that '{s}' is expected in '{s}' library", .{
decl_name,
lib_name,
});
}
if (self.bin_file.cast(link.File.MachO)) |macho_file| {
const sym_index = try macho_file.getGlobalSymbol(mem.sliceTo(decl_name, 0));
_ = try self.addInst(.{
.tag = .call_extern,
.data = .{
.relocation = .{
.atom_index = mod.declPtr(self.mod_fn.owner_decl).link.macho.sym_index,
.atom_index = mod.declPtr(self.mod_fn.owner_decl).link.macho.getSymbolIndex().?,
.sym_index = sym_index,
},
},
});
} else {
return self.fail("TODO implement calling bitcasted functions", .{});
}
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
const fn_owner_decl = mod.declPtr(func.owner_decl);
try self.genSetReg(Type.initTag(.u64), .x30, .{
.linker_load = .{
.type = .got,
.sym_index = fn_owner_decl.link.coff.sym_index,
},
});
// blr x30
_ = try self.addInst(.{
.tag = .blr,
.data = .{ .reg = .x30 },
});
} else if (func_value.castTag(.extern_fn)) |func_payload| {
const extern_fn = func_payload.data;
const decl_name = mod.declPtr(extern_fn.owner_decl).name;
if (extern_fn.lib_name) |lib_name| {
log.debug("TODO enforce that '{s}' is expected in '{s}' library", .{
decl_name,
lib_name,
});
}
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
const sym_index = try coff_file.getGlobalSymbol(mem.sliceTo(decl_name, 0));
try self.genSetReg(Type.initTag(.u64), .x30, .{
.linker_load = .{
@ -4393,35 +4369,16 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
.sym_index = sym_index,
},
});
// blr x30
_ = try self.addInst(.{
.tag = .blr,
.data = .{ .reg = .x30 },
});
} else {
return self.fail("TODO implement calling bitcasted functions", .{});
}
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
if (func_value.castTag(.function)) |func_payload| {
try p9.seeDecl(func_payload.data.owner_decl);
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const got_addr = p9.bases.data;
const got_index = mod.declPtr(func_payload.data.owner_decl).link.plan9.got_index.?;
const fn_got_addr = got_addr + got_index * ptr_bytes;
try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = fn_got_addr });
_ = try self.addInst(.{
.tag = .blr,
.data = .{ .reg = .x30 },
});
} else if (func_value.castTag(.extern_fn)) |_| {
return self.fail("TODO implement calling extern functions", .{});
} else {
return self.fail("TODO implement calling bitcasted functions", .{});
}
} else unreachable;
} else {
return self.fail("TODO implement calling bitcasted functions", .{});
}
} else {
assert(ty.zigTypeTag() == .Pointer);
const mcv = try self.resolveInst(callee);
@ -5537,8 +5494,8 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
const mod = self.bin_file.options.module.?;
const owner_decl = mod.declPtr(self.mod_fn.owner_decl);
const atom_index = switch (self.bin_file.tag) {
.macho => owner_decl.link.macho.sym_index,
.coff => owner_decl.link.coff.sym_index,
.macho => owner_decl.link.macho.getSymbolIndex().?,
.coff => owner_decl.link.coff.getSymbolIndex().?,
else => unreachable, // unsupported target format
};
_ = try self.addInst(.{
@ -5651,8 +5608,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
const mod = self.bin_file.options.module.?;
const owner_decl = mod.declPtr(self.mod_fn.owner_decl);
const atom_index = switch (self.bin_file.tag) {
.macho => owner_decl.link.macho.sym_index,
.coff => owner_decl.link.coff.sym_index,
.macho => owner_decl.link.macho.getSymbolIndex().?,
.coff => owner_decl.link.coff.getSymbolIndex().?,
else => unreachable, // unsupported target format
};
_ = try self.addInst(.{
@ -5845,8 +5802,8 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I
const mod = self.bin_file.options.module.?;
const owner_decl = mod.declPtr(self.mod_fn.owner_decl);
const atom_index = switch (self.bin_file.tag) {
.macho => owner_decl.link.macho.sym_index,
.coff => owner_decl.link.coff.sym_index,
.macho => owner_decl.link.macho.getSymbolIndex().?,
.coff => owner_decl.link.coff.getSymbolIndex().?,
else => unreachable, // unsupported target format
};
_ = try self.addInst(.{
@ -6165,24 +6122,19 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
mod.markDeclAlive(decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
// Because MachO is PIE-always-on, we defer memory address resolution until
// the linker has enough info to perform relocations.
assert(decl.link.macho.sym_index != 0);
try decl.link.elf.ensureInitialized(elf_file);
return MCValue{ .memory = decl.link.elf.getOffsetTableAddress(elf_file) };
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
try decl.link.macho.ensureInitialized(macho_file);
return MCValue{ .linker_load = .{
.type = .got,
.sym_index = decl.link.macho.sym_index,
.sym_index = decl.link.macho.getSymbolIndex().?,
} };
} else if (self.bin_file.cast(link.File.Coff)) |_| {
// Because COFF is PIE-always-on, we defer memory address resolution until
// the linker has enough info to perform relocations.
assert(decl.link.coff.sym_index != 0);
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
try decl.link.coff.ensureInitialized(coff_file);
return MCValue{ .linker_load = .{
.type = .got,
.sym_index = decl.link.coff.sym_index,
.sym_index = decl.link.coff.getSymbolIndex().?,
} };
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
try p9.seeDecl(decl_index);

View File

@ -4253,59 +4253,57 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
// Due to incremental compilation, how function calls are generated depends
// on linking.
switch (self.bin_file.tag) {
.elf => {
if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const mod = self.bin_file.options.module.?;
const fn_owner_decl = mod.declPtr(func.owner_decl);
const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
break :blk @intCast(u32, got.p_vaddr + fn_owner_decl.link.elf.offset_table_index * ptr_bytes);
} else unreachable;
try self.genSetReg(Type.initTag(.usize), .lr, .{ .memory = got_addr });
} else if (func_value.castTag(.extern_fn)) |_| {
return self.fail("TODO implement calling extern functions", .{});
} else {
return self.fail("TODO implement calling bitcasted functions", .{});
}
if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
const mod = self.bin_file.options.module.?;
const fn_owner_decl = mod.declPtr(func.owner_decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
try fn_owner_decl.link.elf.ensureInitialized(elf_file);
const got_addr = @intCast(u32, fn_owner_decl.link.elf.getOffsetTableAddress(elf_file));
try self.genSetReg(Type.initTag(.usize), .lr, .{ .memory = got_addr });
} else if (self.bin_file.cast(link.File.MachO)) |_| {
unreachable; // unsupported architecture for MachO
} else {
assert(ty.zigTypeTag() == .Pointer);
const mcv = try self.resolveInst(callee);
try self.genSetReg(Type.initTag(.usize), .lr, mcv);
}
// TODO: add Instruction.supportedOn
// function for ARM
if (Target.arm.featureSetHas(self.target.cpu.features, .has_v5t)) {
_ = try self.addInst(.{
.tag = .blx,
.data = .{ .reg = .lr },
return self.fail("TODO implement call on {s} for {s}", .{
@tagName(self.bin_file.tag),
@tagName(self.target.cpu.arch),
});
} else {
return self.fail("TODO fix blx emulation for ARM <v5", .{});
// _ = try self.addInst(.{
// .tag = .mov,
// .data = .{ .rr_op = .{
// .rd = .lr,
// .rn = .r0,
// .op = Instruction.Operand.reg(.pc, Instruction.Operand.Shift.none),
// } },
// });
// _ = try self.addInst(.{
// .tag = .bx,
// .data = .{ .reg = .lr },
// });
}
},
.macho => unreachable, // unsupported architecture for MachO
.coff => return self.fail("TODO implement call in COFF for {}", .{self.target.cpu.arch}),
.plan9 => return self.fail("TODO implement call on plan9 for {}", .{self.target.cpu.arch}),
else => unreachable,
} else if (func_value.castTag(.extern_fn)) |_| {
return self.fail("TODO implement calling extern functions", .{});
} else {
return self.fail("TODO implement calling bitcasted functions", .{});
}
} else {
assert(ty.zigTypeTag() == .Pointer);
const mcv = try self.resolveInst(callee);
try self.genSetReg(Type.initTag(.usize), .lr, mcv);
}
// TODO: add Instruction.supportedOn
// function for ARM
if (Target.arm.featureSetHas(self.target.cpu.features, .has_v5t)) {
_ = try self.addInst(.{
.tag = .blx,
.data = .{ .reg = .lr },
});
} else {
return self.fail("TODO fix blx emulation for ARM <v5", .{});
// _ = try self.addInst(.{
// .tag = .mov,
// .data = .{ .rr_op = .{
// .rd = .lr,
// .rn = .r0,
// .op = Instruction.Operand.reg(.pc, Instruction.Operand.Shift.none),
// } },
// });
// _ = try self.addInst(.{
// .tag = .bx,
// .data = .{ .reg = .lr },
// });
}
const result: MCValue = result: {
@ -6086,9 +6084,8 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
mod.markDeclAlive(decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
try decl.link.elf.ensureInitialized(elf_file);
return MCValue{ .memory = decl.link.elf.getOffsetTableAddress(elf_file) };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
unreachable; // unsupported architecture for MachO
} else if (self.bin_file.cast(link.File.Coff)) |_| {

View File

@ -1722,14 +1722,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const mod = self.bin_file.options.module.?;
const fn_owner_decl = mod.declPtr(func.owner_decl);
const got_addr = blk: {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
break :blk @intCast(u32, got.p_vaddr + fn_owner_decl.link.elf.offset_table_index * ptr_bytes);
};
try fn_owner_decl.link.elf.ensureInitialized(elf_file);
const got_addr = @intCast(u32, fn_owner_decl.link.elf.getOffsetTableAddress(elf_file));
try self.genSetReg(Type.initTag(.usize), .ra, .{ .memory = got_addr });
_ = try self.addInst(.{
@ -2557,9 +2553,8 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
const decl = mod.declPtr(decl_index);
mod.markDeclAlive(decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
try decl.link.elf.ensureInitialized(elf_file);
return MCValue{ .memory = decl.link.elf.getOffsetTableAddress(elf_file) };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
// TODO I'm hacking my way through here by repurposing .memory for storing
// index to the GOT target symbol index.

View File

@ -1216,12 +1216,11 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (self.bin_file.tag == link.File.Elf.base_tag) {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const mod = self.bin_file.options.module.?;
const fn_owner_decl = mod.declPtr(func.owner_decl);
const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
const mod = self.bin_file.options.module.?;
break :blk @intCast(u32, got.p_vaddr + mod.declPtr(func.owner_decl).link.elf.offset_table_index * ptr_bytes);
try fn_owner_decl.link.elf.ensureInitialized(elf_file);
break :blk @intCast(u32, fn_owner_decl.link.elf.getOffsetTableAddress(elf_file));
} else unreachable;
try self.genSetReg(Type.initTag(.usize), .o7, .{ .memory = got_addr });
@ -4193,9 +4192,6 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
}
fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) InnerError!MCValue {
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
// TODO this feels clunky. Perhaps we should check for it in `genTypedValue`?
if (tv.ty.zigTypeTag() == .Pointer) blk: {
if (tv.ty.castPtrToFn()) |_| break :blk;
@ -4209,9 +4205,8 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
mod.markDeclAlive(decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
try decl.link.elf.ensureInitialized(elf_file);
return MCValue{ .memory = decl.link.elf.getOffsetTableAddress(elf_file) };
} else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
}

View File

@ -2120,22 +2120,28 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
const module = func.bin_file.base.options.module.?;
if (func_val.castTag(.function)) |function| {
break :blk module.declPtr(function.data.owner_decl);
const decl = module.declPtr(function.data.owner_decl);
try decl.link.wasm.ensureInitialized(func.bin_file);
break :blk decl;
} else if (func_val.castTag(.extern_fn)) |extern_fn| {
const ext_decl = module.declPtr(extern_fn.data.owner_decl);
const ext_info = ext_decl.ty.fnInfo();
var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types, ext_info.return_type, func.target);
defer func_type.deinit(func.gpa);
const atom = &ext_decl.link.wasm;
try atom.ensureInitialized(func.bin_file);
ext_decl.fn_link.wasm.type_index = try func.bin_file.putOrGetFuncType(func_type);
try func.bin_file.addOrUpdateImport(
mem.sliceTo(ext_decl.name, 0),
ext_decl.link.wasm.sym_index,
atom.getSymbolIndex().?,
ext_decl.getExternFn().?.lib_name,
ext_decl.fn_link.wasm.type_index,
);
break :blk ext_decl;
} else if (func_val.castTag(.decl_ref)) |decl_ref| {
break :blk module.declPtr(decl_ref.data);
const decl = module.declPtr(decl_ref.data);
try decl.link.wasm.ensureInitialized(func.bin_file);
break :blk decl;
}
return func.fail("Expected a function, but instead found type '{}'", .{func_val.tag()});
};
@ -2752,6 +2758,7 @@ fn lowerDeclRefValue(func: *CodeGen, tv: TypedValue, decl_index: Module.Decl.Ind
}
module.markDeclAlive(decl);
try decl.link.wasm.ensureInitialized(func.bin_file);
const target_sym_index = decl.link.wasm.sym_index;
if (decl.ty.zigTypeTag() == .Fn) {

View File

@ -2671,9 +2671,9 @@ fn loadMemPtrIntoRegister(self: *Self, reg: Register, ptr_ty: Type, ptr: MCValue
const mod = self.bin_file.options.module.?;
const fn_owner_decl = mod.declPtr(self.mod_fn.owner_decl);
const atom_index = if (self.bin_file.tag == link.File.MachO.base_tag)
fn_owner_decl.link.macho.sym_index
fn_owner_decl.link.macho.getSymbolIndex().?
else
fn_owner_decl.link.coff.sym_index;
fn_owner_decl.link.coff.getSymbolIndex().?;
const flags: u2 = switch (load_struct.type) {
.got => 0b00,
.direct => 0b01,
@ -3992,49 +3992,26 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
// Due to incremental compilation, how function calls are generated depends
// on linking.
const mod = self.bin_file.options.module.?;
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const fn_owner_decl = mod.declPtr(func.owner_decl);
const got_addr = blk: {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
break :blk @intCast(u32, got.p_vaddr + fn_owner_decl.link.elf.offset_table_index * ptr_bytes);
};
if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
const fn_owner_decl = mod.declPtr(func.owner_decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
try fn_owner_decl.link.elf.ensureInitialized(elf_file);
const got_addr = @intCast(u32, fn_owner_decl.link.elf.getOffsetTableAddress(elf_file));
_ = try self.addInst(.{
.tag = .call,
.ops = Mir.Inst.Ops.encode(.{ .flags = 0b01 }),
.data = .{ .imm = @truncate(u32, got_addr) },
.data = .{ .imm = got_addr },
});
} else if (func_value.castTag(.extern_fn)) |_| {
return self.fail("TODO implement calling extern functions", .{});
} else {
return self.fail("TODO implement calling bitcasted functions", .{});
}
} else {
assert(ty.zigTypeTag() == .Pointer);
const mcv = try self.resolveInst(callee);
try self.genSetReg(Type.initTag(.usize), .rax, mcv);
_ = try self.addInst(.{
.tag = .call,
.ops = Mir.Inst.Ops.encode(.{
.reg1 = .rax,
.flags = 0b01,
}),
.data = undefined,
});
}
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
const fn_owner_decl = mod.declPtr(func.owner_decl);
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
try fn_owner_decl.link.coff.ensureInitialized(coff_file);
const sym_index = fn_owner_decl.link.coff.getSymbolIndex().?;
try self.genSetReg(Type.initTag(.usize), .rax, .{
.linker_load = .{
.type = .got,
.sym_index = fn_owner_decl.link.coff.sym_index,
.sym_index = sym_index,
},
});
_ = try self.addInst(.{
@ -4045,15 +4022,47 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
}),
.data = undefined,
});
} else if (func_value.castTag(.extern_fn)) |func_payload| {
const extern_fn = func_payload.data;
const decl_name = mod.declPtr(extern_fn.owner_decl).name;
if (extern_fn.lib_name) |lib_name| {
log.debug("TODO enforce that '{s}' is expected in '{s}' library", .{
decl_name,
lib_name,
});
}
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
try fn_owner_decl.link.macho.ensureInitialized(macho_file);
const sym_index = fn_owner_decl.link.macho.getSymbolIndex().?;
try self.genSetReg(Type.initTag(.usize), .rax, .{
.linker_load = .{
.type = .got,
.sym_index = sym_index,
},
});
_ = try self.addInst(.{
.tag = .call,
.ops = Mir.Inst.Ops.encode(.{
.reg1 = .rax,
.flags = 0b01,
}),
.data = undefined,
});
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
try p9.seeDecl(func.owner_decl);
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const got_addr = p9.bases.data;
const got_index = fn_owner_decl.link.plan9.got_index.?;
const fn_got_addr = got_addr + got_index * ptr_bytes;
_ = try self.addInst(.{
.tag = .call,
.ops = Mir.Inst.Ops.encode(.{ .flags = 0b01 }),
.data = .{ .imm = @intCast(u32, fn_got_addr) },
});
} else unreachable;
} else if (func_value.castTag(.extern_fn)) |func_payload| {
const extern_fn = func_payload.data;
const decl_name = mod.declPtr(extern_fn.owner_decl).name;
if (extern_fn.lib_name) |lib_name| {
log.debug("TODO enforce that '{s}' is expected in '{s}' library", .{
decl_name,
lib_name,
});
}
if (self.bin_file.cast(link.File.Coff)) |coff_file| {
const sym_index = try coff_file.getGlobalSymbol(mem.sliceTo(decl_name, 0));
try self.genSetReg(Type.initTag(.usize), .rax, .{
.linker_load = .{
@ -4069,108 +4078,37 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
}),
.data = undefined,
});
} else {
return self.fail("TODO implement calling bitcasted functions", .{});
}
} else {
assert(ty.zigTypeTag() == .Pointer);
const mcv = try self.resolveInst(callee);
try self.genSetReg(Type.initTag(.usize), .rax, mcv);
_ = try self.addInst(.{
.tag = .call,
.ops = Mir.Inst.Ops.encode(.{
.reg1 = .rax,
.flags = 0b01,
}),
.data = undefined,
});
}
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
const fn_owner_decl = mod.declPtr(func.owner_decl);
const sym_index = fn_owner_decl.link.macho.sym_index;
try self.genSetReg(Type.initTag(.usize), .rax, .{
.linker_load = .{
.type = .got,
.sym_index = sym_index,
},
});
// callq *%rax
_ = try self.addInst(.{
.tag = .call,
.ops = Mir.Inst.Ops.encode(.{
.reg1 = .rax,
.flags = 0b01,
}),
.data = undefined,
});
} else if (func_value.castTag(.extern_fn)) |func_payload| {
const extern_fn = func_payload.data;
const decl_name = mod.declPtr(extern_fn.owner_decl).name;
if (extern_fn.lib_name) |lib_name| {
log.debug("TODO enforce that '{s}' is expected in '{s}' library", .{
decl_name,
lib_name,
});
}
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
const sym_index = try macho_file.getGlobalSymbol(mem.sliceTo(decl_name, 0));
_ = try self.addInst(.{
.tag = .call_extern,
.ops = undefined,
.data = .{
.relocation = .{
.atom_index = mod.declPtr(self.mod_fn.owner_decl).link.macho.sym_index,
.atom_index = mod.declPtr(self.mod_fn.owner_decl).link.macho.getSymbolIndex().?,
.sym_index = sym_index,
},
},
});
} else {
return self.fail("TODO implement calling bitcasted functions", .{});
return self.fail("TODO implement calling extern functions", .{});
}
} else {
assert(ty.zigTypeTag() == .Pointer);
const mcv = try self.resolveInst(callee);
try self.genSetReg(Type.initTag(.usize), .rax, mcv);
_ = try self.addInst(.{
.tag = .call,
.ops = Mir.Inst.Ops.encode(.{
.reg1 = .rax,
.flags = 0b01,
}),
.data = undefined,
});
return self.fail("TODO implement calling bitcasted functions", .{});
}
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
try p9.seeDecl(func_payload.data.owner_decl);
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const got_addr = p9.bases.data;
const got_index = mod.declPtr(func_payload.data.owner_decl).link.plan9.got_index.?;
const fn_got_addr = got_addr + got_index * ptr_bytes;
_ = try self.addInst(.{
.tag = .call,
.ops = Mir.Inst.Ops.encode(.{ .flags = 0b01 }),
.data = .{ .imm = @intCast(u32, fn_got_addr) },
});
} else return self.fail("TODO implement calling extern fn on plan9", .{});
} else {
assert(ty.zigTypeTag() == .Pointer);
const mcv = try self.resolveInst(callee);
try self.genSetReg(Type.initTag(.usize), .rax, mcv);
_ = try self.addInst(.{
.tag = .call,
.ops = Mir.Inst.Ops.encode(.{
.reg1 = .rax,
.flags = 0b01,
}),
.data = undefined,
});
}
} else unreachable;
} else {
assert(ty.zigTypeTag() == .Pointer);
const mcv = try self.resolveInst(callee);
try self.genSetReg(Type.initTag(.usize), .rax, mcv);
_ = try self.addInst(.{
.tag = .call,
.ops = Mir.Inst.Ops.encode(.{
.reg1 = .rax,
.flags = 0b01,
}),
.data = undefined,
});
}
if (info.stack_byte_count > 0) {
// Readjust the stack
@ -6781,20 +6719,19 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
module.markDeclAlive(decl);
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
assert(decl.link.macho.sym_index != 0);
try decl.link.elf.ensureInitialized(elf_file);
return MCValue{ .memory = decl.link.elf.getOffsetTableAddress(elf_file) };
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
try decl.link.macho.ensureInitialized(macho_file);
return MCValue{ .linker_load = .{
.type = .got,
.sym_index = decl.link.macho.sym_index,
.sym_index = decl.link.macho.getSymbolIndex().?,
} };
} else if (self.bin_file.cast(link.File.Coff)) |_| {
assert(decl.link.coff.sym_index != 0);
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
try decl.link.coff.ensureInitialized(coff_file);
return MCValue{ .linker_load = .{
.type = .got,
.sym_index = decl.link.coff.sym_index,
.sym_index = decl.link.coff.getSymbolIndex().?,
} };
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
try p9.seeDecl(decl_index);

View File

@ -533,8 +533,7 @@ pub const File = struct {
}
}
/// May be called before or after updateDeclExports but must be called
/// after allocateDeclIndexes for any given Decl.
/// May be called before or after updateDeclExports for any given Decl.
pub fn updateDecl(base: *File, module: *Module, decl_index: Module.Decl.Index) UpdateDeclError!void {
const decl = module.declPtr(decl_index);
log.debug("updateDecl {*} ({s}), type={}", .{ decl, decl.name, decl.ty.fmtDebug() });
@ -557,8 +556,7 @@ pub const File = struct {
}
}
/// May be called before or after updateDeclExports but must be called
/// after allocateDeclIndexes for any given Decl.
/// May be called before or after updateDeclExports for any given Decl.
pub fn updateFunc(base: *File, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) UpdateDeclError!void {
const owner_decl = module.declPtr(func.owner_decl);
log.debug("updateFunc {*} ({s}), type={}", .{
@ -602,28 +600,6 @@ pub const File = struct {
}
}
/// Must be called before any call to updateDecl or updateDeclExports for
/// any given Decl.
/// TODO we're transitioning to deleting this function and instead having
/// each linker backend notice the first time updateDecl or updateFunc is called, or
/// a callee referenced from AIR.
pub fn allocateDeclIndexes(base: *File, decl_index: Module.Decl.Index) error{OutOfMemory}!void {
const decl = base.options.module.?.declPtr(decl_index);
log.debug("allocateDeclIndexes {*} ({s})", .{ decl, decl.name });
if (build_options.only_c) {
assert(base.tag == .c);
return;
}
switch (base.tag) {
.coff => return @fieldParentPtr(Coff, "base", base).allocateDeclIndexes(decl_index),
.elf => return @fieldParentPtr(Elf, "base", base).allocateDeclIndexes(decl_index),
.macho => return @fieldParentPtr(MachO, "base", base).allocateDeclIndexes(decl_index),
.wasm => return @fieldParentPtr(Wasm, "base", base).allocateDeclIndexes(decl_index),
.plan9 => return @fieldParentPtr(Plan9, "base", base).allocateDeclIndexes(decl_index),
.c, .spirv, .nvptx => {},
}
}
pub fn releaseLock(self: *File) void {
if (self.lock) |*lock| {
lock.release();
@ -874,8 +850,7 @@ pub const File = struct {
AnalysisFail,
};
/// May be called before or after updateDecl, but must be called after
/// allocateDeclIndexes for any given Decl.
/// May be called before or after updateDecl for any given Decl.
pub fn updateDeclExports(
base: *File,
module: *Module,
@ -911,6 +886,8 @@ pub const File = struct {
/// The linker is passed information about the containing atom, `parent_atom_index`, and offset within it's
/// memory buffer, `offset`, so that it can make a note of potential relocation sites, should the
/// `Decl`'s address was not yet resolved, or the containing atom gets moved in virtual memory.
/// May be called before or after updateFunc/updateDecl therefore it is up to the linker to allocate
/// the block/atom.
pub fn getDeclVAddr(base: *File, decl_index: Module.Decl.Index, reloc_info: RelocInfo) !u64 {
if (build_options.only_c) unreachable;
switch (base.tag) {

View File

@ -480,16 +480,6 @@ fn growSectionVM(self: *Coff, sect_id: u32, needed_size: u32) !void {
header.virtual_size = increased_size;
}
pub fn allocateDeclIndexes(self: *Coff, decl_index: Module.Decl.Index) !void {
if (self.llvm_object) |_| return;
const decl = self.base.options.module.?.declPtr(decl_index);
if (decl.link.coff.sym_index != 0) return;
decl.link.coff.sym_index = try self.allocateSymbol();
const gpa = self.base.allocator;
try self.atom_by_index_table.putNoClobber(gpa, decl.link.coff.sym_index, &decl.link.coff);
try self.decls.putNoClobber(gpa, decl_index, null);
}
fn allocateAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u32 {
const tracy = trace(@src());
defer tracy.end();
@ -615,7 +605,7 @@ fn allocateAtom(self: *Coff, atom: *Atom, new_atom_size: u32, alignment: u32) !u
return vaddr;
}
fn allocateSymbol(self: *Coff) !u32 {
pub fn allocateSymbol(self: *Coff) !u32 {
const gpa = self.base.allocator;
try self.locals.ensureUnusedCapacity(gpa, 1);
@ -716,12 +706,11 @@ fn createGotAtom(self: *Coff, target: SymbolWithLoc) !*Atom {
const atom = try gpa.create(Atom);
errdefer gpa.destroy(atom);
atom.* = Atom.empty;
atom.sym_index = try self.allocateSymbol();
try atom.ensureInitialized(self);
atom.size = @sizeOf(u64);
atom.alignment = @alignOf(u64);
try self.managed_atoms.append(gpa, atom);
try self.atom_by_index_table.putNoClobber(gpa, atom.sym_index, atom);
const sym = atom.getSymbolPtr(self);
sym.section_number = @intToEnum(coff.SectionNumber, self.got_section_index.? + 1);
@ -754,12 +743,11 @@ fn createImportAtom(self: *Coff) !*Atom {
const atom = try gpa.create(Atom);
errdefer gpa.destroy(atom);
atom.* = Atom.empty;
atom.sym_index = try self.allocateSymbol();
try atom.ensureInitialized(self);
atom.size = @sizeOf(u64);
atom.alignment = @alignOf(u64);
try self.managed_atoms.append(gpa, atom);
try self.atom_by_index_table.putNoClobber(gpa, atom.sym_index, atom);
const sym = atom.getSymbolPtr(self);
sym.section_number = @intToEnum(coff.SectionNumber, self.idata_section_index.? + 1);
@ -790,7 +778,11 @@ fn writeAtom(self: *Coff, atom: *Atom, code: []const u8) !void {
const sym = atom.getSymbol(self);
const section = self.sections.get(@enumToInt(sym.section_number) - 1);
const file_offset = section.header.pointer_to_raw_data + sym.value - section.header.virtual_address;
log.debug("writing atom for symbol {s} at file offset 0x{x} to 0x{x}", .{ atom.getName(self), file_offset, file_offset + code.len });
log.debug("writing atom for symbol {s} at file offset 0x{x} to 0x{x}", .{
atom.getName(self),
file_offset,
file_offset + code.len,
});
try self.base.file.?.pwriteAll(code, file_offset);
try self.resolveRelocs(atom);
}
@ -848,6 +840,7 @@ fn freeAtom(self: *Coff, atom: *Atom) void {
// Remove any relocs and base relocs associated with this Atom
self.freeRelocationsForAtom(atom);
const gpa = self.base.allocator;
const sym = atom.getSymbol(self);
const sect_id = @enumToInt(sym.section_number) - 1;
const free_list = &self.sections.items(.free_list)[sect_id];
@ -885,7 +878,7 @@ fn freeAtom(self: *Coff, atom: *Atom) void {
if (!already_have_free_list_node and prev.freeListEligible(self)) {
// The free list is heuristics, it doesn't have to be perfect, so we can
// ignore the OOM here.
free_list.append(self.base.allocator, prev) catch {};
free_list.append(gpa, prev) catch {};
}
} else {
atom.prev = null;
@ -896,6 +889,28 @@ fn freeAtom(self: *Coff, atom: *Atom) void {
} else {
atom.next = null;
}
// Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
const sym_index = atom.getSymbolIndex().?;
self.locals_free_list.append(gpa, sym_index) catch {};
// Try freeing GOT atom if this decl had one
const got_target = SymbolWithLoc{ .sym_index = sym_index, .file = null };
if (self.got_entries_table.get(got_target)) |got_index| {
self.got_entries_free_list.append(gpa, @intCast(u32, got_index)) catch {};
self.got_entries.items[got_index] = .{
.target = .{ .sym_index = 0, .file = null },
.sym_index = 0,
};
_ = self.got_entries_table.remove(got_target);
log.debug(" adding GOT index {d} to free list (target local@{d})", .{ got_index, sym_index });
}
self.locals.items[sym_index].section_number = .UNDEFINED;
_ = self.atom_by_index_table.remove(sym_index);
log.debug(" adding local symbol index {d} to free list", .{sym_index});
atom.sym_index = 0;
}
pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
@ -912,8 +927,15 @@ pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, live
const decl_index = func.owner_decl;
const decl = module.declPtr(decl_index);
self.freeUnnamedConsts(decl_index);
self.freeRelocationsForAtom(&decl.link.coff);
const atom = &decl.link.coff;
try atom.ensureInitialized(self);
const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
if (gop.found_existing) {
self.freeUnnamedConsts(decl_index);
self.freeRelocationsForAtom(&decl.link.coff);
} else {
gop.value_ptr.* = null;
}
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
@ -960,9 +982,9 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In
const atom = try gpa.create(Atom);
errdefer gpa.destroy(atom);
atom.* = Atom.empty;
try atom.ensureInitialized(self);
try self.managed_atoms.append(gpa, atom);
atom.sym_index = try self.allocateSymbol();
const sym = atom.getSymbolPtr(self);
const sym_name = blk: {
const decl_name = try decl.getFullyQualifiedName(mod);
defer gpa.free(decl_name);
@ -971,14 +993,11 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In
break :blk try std.fmt.allocPrint(gpa, "__unnamed_{s}_{d}", .{ decl_name, index });
};
defer gpa.free(sym_name);
try self.setSymbolName(sym, sym_name);
sym.section_number = @intToEnum(coff.SectionNumber, self.rdata_section_index.? + 1);
try self.managed_atoms.append(gpa, atom);
try self.atom_by_index_table.putNoClobber(gpa, atom.sym_index, atom);
try self.setSymbolName(atom.getSymbolPtr(self), sym_name);
atom.getSymbolPtr(self).section_number = @intToEnum(coff.SectionNumber, self.rdata_section_index.? + 1);
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), tv, &code_buffer, .none, .{
.parent_atom_index = atom.sym_index,
.parent_atom_index = atom.getSymbolIndex().?,
});
const code = switch (res) {
.ok => code_buffer.items,
@ -993,17 +1012,17 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In
const required_alignment = tv.ty.abiAlignment(self.base.options.target);
atom.alignment = required_alignment;
atom.size = @intCast(u32, code.len);
sym.value = try self.allocateAtom(atom, atom.size, atom.alignment);
atom.getSymbolPtr(self).value = try self.allocateAtom(atom, atom.size, atom.alignment);
errdefer self.freeAtom(atom);
try unnamed_consts.append(gpa, atom);
log.debug("allocated atom for {s} at 0x{x}", .{ sym_name, sym.value });
log.debug("allocated atom for {s} at 0x{x}", .{ sym_name, atom.getSymbol(self).value });
log.debug(" (required alignment 0x{x})", .{required_alignment});
try self.writeAtom(atom, code);
return atom.sym_index;
return atom.getSymbolIndex().?;
}
pub fn updateDecl(self: *Coff, module: *Module, decl_index: Module.Decl.Index) !void {
@ -1028,7 +1047,14 @@ pub fn updateDecl(self: *Coff, module: *Module, decl_index: Module.Decl.Index) !
}
}
self.freeRelocationsForAtom(&decl.link.coff);
const atom = &decl.link.coff;
try atom.ensureInitialized(self);
const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
if (gop.found_existing) {
self.freeRelocationsForAtom(atom);
} else {
gop.value_ptr.* = null;
}
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
@ -1038,7 +1064,7 @@ pub fn updateDecl(self: *Coff, module: *Module, decl_index: Module.Decl.Index) !
.ty = decl.ty,
.val = decl_val,
}, &code_buffer, .none, .{
.parent_atom_index = decl.link.coff.sym_index,
.parent_atom_index = decl.link.coff.getSymbolIndex().?,
});
const code = switch (res) {
.ok => code_buffer.items,
@ -1099,7 +1125,7 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []const u8,
const code_len = @intCast(u32, code.len);
const atom = &decl.link.coff;
assert(atom.sym_index != 0); // Caller forgot to allocateDeclIndexes()
if (atom.size != 0) {
const sym = atom.getSymbolPtr(self);
try self.setSymbolName(sym, decl_name);
@ -1116,7 +1142,7 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []const u8,
if (vaddr != sym.value) {
sym.value = vaddr;
log.debug(" (updating GOT entry)", .{});
const got_target = SymbolWithLoc{ .sym_index = atom.sym_index, .file = null };
const got_target = SymbolWithLoc{ .sym_index = atom.getSymbolIndex().?, .file = null };
const got_atom = self.getGotAtomForSymbol(got_target).?;
self.markRelocsDirtyByTarget(got_target);
try self.writePtrWidthAtom(got_atom);
@ -1137,10 +1163,10 @@ fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []const u8,
atom.size = code_len;
sym.value = vaddr;
const got_target = SymbolWithLoc{ .sym_index = atom.sym_index, .file = null };
const got_target = SymbolWithLoc{ .sym_index = atom.getSymbolIndex().?, .file = null };
const got_index = try self.allocateGotEntry(got_target);
const got_atom = try self.createGotAtom(got_target);
self.got_entries.items[got_index].sym_index = got_atom.sym_index;
self.got_entries.items[got_index].sym_index = got_atom.getSymbolIndex().?;
try self.writePtrWidthAtom(got_atom);
}
@ -1160,11 +1186,6 @@ fn freeUnnamedConsts(self: *Coff, decl_index: Module.Decl.Index) void {
const unnamed_consts = self.unnamed_const_atoms.getPtr(decl_index) orelse return;
for (unnamed_consts.items) |atom| {
self.freeAtom(atom);
self.locals_free_list.append(gpa, atom.sym_index) catch {};
self.locals.items[atom.sym_index].section_number = .UNDEFINED;
_ = self.atom_by_index_table.remove(atom.sym_index);
log.debug(" adding local symbol index {d} to free list", .{atom.sym_index});
atom.sym_index = 0;
}
unnamed_consts.clearAndFree(gpa);
}
@ -1179,35 +1200,11 @@ pub fn freeDecl(self: *Coff, decl_index: Module.Decl.Index) void {
log.debug("freeDecl {*}", .{decl});
const kv = self.decls.fetchRemove(decl_index);
if (kv.?.value) |_| {
self.freeAtom(&decl.link.coff);
self.freeUnnamedConsts(decl_index);
}
// Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
const gpa = self.base.allocator;
const sym_index = decl.link.coff.sym_index;
if (sym_index != 0) {
self.locals_free_list.append(gpa, sym_index) catch {};
// Try freeing GOT atom if this decl had one
const got_target = SymbolWithLoc{ .sym_index = sym_index, .file = null };
if (self.got_entries_table.get(got_target)) |got_index| {
self.got_entries_free_list.append(gpa, @intCast(u32, got_index)) catch {};
self.got_entries.items[got_index] = .{
.target = .{ .sym_index = 0, .file = null },
.sym_index = 0,
};
_ = self.got_entries_table.remove(got_target);
log.debug(" adding GOT index {d} to free list (target local@{d})", .{ got_index, sym_index });
if (self.decls.fetchRemove(decl_index)) |kv| {
if (kv.value) |_| {
self.freeAtom(&decl.link.coff);
self.freeUnnamedConsts(decl_index);
}
self.locals.items[sym_index].section_number = .UNDEFINED;
_ = self.atom_by_index_table.remove(sym_index);
log.debug(" adding local symbol index {d} to free list", .{sym_index});
decl.link.coff.sym_index = 0;
}
}
@ -1261,7 +1258,14 @@ pub fn updateDeclExports(
const decl = module.declPtr(decl_index);
const atom = &decl.link.coff;
if (atom.sym_index == 0) return;
if (atom.getSymbolIndex() == null) return;
const gop = try self.decls.getOrPut(gpa, decl_index);
if (!gop.found_existing) {
gop.value_ptr.* = self.getDeclOutputSection(decl);
}
const decl_sym = atom.getSymbol(self);
for (exports) |exp| {
@ -1416,7 +1420,7 @@ pub fn flushModule(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Nod
const import_index = try self.allocateImportEntry(global);
const import_atom = try self.createImportAtom();
self.imports.items[import_index].sym_index = import_atom.sym_index;
self.imports.items[import_index].sym_index = import_atom.getSymbolIndex().?;
try self.writePtrWidthAtom(import_atom);
}
@ -1460,10 +1464,12 @@ pub fn getDeclVAddr(
const decl = mod.declPtr(decl_index);
assert(self.llvm_object == null);
assert(decl.link.coff.sym_index != 0);
try decl.link.coff.ensureInitialized(self);
const sym_index = decl.link.coff.getSymbolIndex().?;
const atom = self.getAtomForSymbol(.{ .sym_index = reloc_info.parent_atom_index, .file = null }).?;
const target = SymbolWithLoc{ .sym_index = decl.link.coff.sym_index, .file = null };
const target = SymbolWithLoc{ .sym_index = sym_index, .file = null };
try atom.addRelocation(self, .{
.type = .direct,
.target = target,

View File

@ -39,30 +39,45 @@ pub const empty = Atom{
.next = null,
};
pub fn ensureInitialized(self: *Atom, coff_file: *Coff) !void {
if (self.getSymbolIndex() != null) return; // Already initialized
self.sym_index = try coff_file.allocateSymbol();
try coff_file.atom_by_index_table.putNoClobber(coff_file.base.allocator, self.sym_index, self);
}
pub fn getSymbolIndex(self: Atom) ?u32 {
if (self.sym_index == 0) return null;
return self.sym_index;
}
/// Returns symbol referencing this atom.
pub fn getSymbol(self: Atom, coff_file: *const Coff) *const coff.Symbol {
const sym_index = self.getSymbolIndex().?;
return coff_file.getSymbol(.{
.sym_index = self.sym_index,
.sym_index = sym_index,
.file = self.file,
});
}
/// Returns pointer-to-symbol referencing this atom.
pub fn getSymbolPtr(self: Atom, coff_file: *Coff) *coff.Symbol {
const sym_index = self.getSymbolIndex().?;
return coff_file.getSymbolPtr(.{
.sym_index = self.sym_index,
.sym_index = sym_index,
.file = self.file,
});
}
pub fn getSymbolWithLoc(self: Atom) SymbolWithLoc {
return .{ .sym_index = self.sym_index, .file = self.file };
const sym_index = self.getSymbolIndex().?;
return .{ .sym_index = sym_index, .file = self.file };
}
/// Returns the name of this atom.
pub fn getName(self: Atom, coff_file: *const Coff) []const u8 {
const sym_index = self.getSymbolIndex().?;
return coff_file.getSymbolName(.{
.sym_index = self.sym_index,
.sym_index = sym_index,
.file = self.file,
});
}

View File

@ -10,6 +10,7 @@ const fs = std.fs;
const elf = std.elf;
const log = std.log.scoped(.link);
const Atom = @import("Elf/Atom.zig");
const Module = @import("../Module.zig");
const Compilation = @import("../Compilation.zig");
const Dwarf = @import("Dwarf.zig");
@ -31,6 +32,8 @@ const Air = @import("../Air.zig");
const Liveness = @import("../Liveness.zig");
const LlvmObject = @import("../codegen/llvm.zig").Object;
pub const TextBlock = Atom;
const default_entry_addr = 0x8000000;
pub const base_tag: File.Tag = .elf;
@ -188,62 +191,10 @@ const ideal_factor = 3;
/// it as a possible place to put new symbols, it must have enough room for this many bytes
/// (plus extra for reserved capacity).
const minimum_text_block_size = 64;
const min_text_capacity = padToIdeal(minimum_text_block_size);
pub const min_text_capacity = padToIdeal(minimum_text_block_size);
pub const PtrWidth = enum { p32, p64 };
pub const TextBlock = struct {
/// Each decl always gets a local symbol with the fully qualified name.
/// The vaddr and size are found here directly.
/// The file offset is found by computing the vaddr offset from the section vaddr
/// the symbol references, and adding that to the file offset of the section.
/// If this field is 0, it means the codegen size = 0 and there is no symbol or
/// offset table entry.
local_sym_index: u32,
/// This field is undefined for symbols with size = 0.
offset_table_index: u32,
/// Points to the previous and next neighbors, based on the `text_offset`.
/// This can be used to find, for example, the capacity of this `TextBlock`.
prev: ?*TextBlock,
next: ?*TextBlock,
dbg_info_atom: Dwarf.Atom,
pub const empty = TextBlock{
.local_sym_index = 0,
.offset_table_index = undefined,
.prev = null,
.next = null,
.dbg_info_atom = undefined,
};
/// Returns how much room there is to grow in virtual address space.
/// File offset relocation happens transparently, so it is not included in
/// this calculation.
fn capacity(self: TextBlock, elf_file: Elf) u64 {
const self_sym = elf_file.local_symbols.items[self.local_sym_index];
if (self.next) |next| {
const next_sym = elf_file.local_symbols.items[next.local_sym_index];
return next_sym.st_value - self_sym.st_value;
} else {
// We are the last block. The capacity is limited only by virtual address space.
return std.math.maxInt(u32) - self_sym.st_value;
}
}
fn freeListEligible(self: TextBlock, elf_file: Elf) bool {
// No need to keep a free list node for the last block.
const next = self.next orelse return false;
const self_sym = elf_file.local_symbols.items[self.local_sym_index];
const next_sym = elf_file.local_symbols.items[next.local_sym_index];
const cap = next_sym.st_value - self_sym.st_value;
const ideal_cap = padToIdeal(self_sym.st_size);
if (cap <= ideal_cap) return false;
const surplus = cap - ideal_cap;
return surplus >= min_text_capacity;
}
};
pub const Export = struct {
sym_index: ?u32 = null,
};
@ -393,9 +344,10 @@ pub fn getDeclVAddr(self: *Elf, decl_index: Module.Decl.Index, reloc_info: File.
const decl = mod.declPtr(decl_index);
assert(self.llvm_object == null);
assert(decl.link.elf.local_sym_index != 0);
const target = decl.link.elf.local_sym_index;
try decl.link.elf.ensureInitialized(self);
const target = decl.link.elf.getSymbolIndex().?;
const vaddr = self.local_symbols.items[target].st_value;
const atom = self.atom_by_index_table.get(reloc_info.parent_atom_index).?;
const gop = try self.relocs.getOrPut(self.base.allocator, atom);
@ -496,7 +448,7 @@ fn makeString(self: *Elf, bytes: []const u8) !u32 {
return @intCast(u32, result);
}
fn getString(self: Elf, str_off: u32) []const u8 {
pub fn getString(self: Elf, str_off: u32) []const u8 {
assert(str_off < self.shstrtab.items.len);
return mem.sliceTo(@ptrCast([*:0]const u8, self.shstrtab.items.ptr + str_off), 0);
}
@ -941,7 +893,7 @@ fn growAllocSection(self: *Elf, shdr_index: u16, phdr_index: u16, needed_size: u
// Must move the entire section.
const new_offset = self.findFreeSpace(needed_size, self.page_size);
const existing_size = if (self.atoms.get(phdr_index)) |last| blk: {
const sym = self.local_symbols.items[last.local_sym_index];
const sym = last.getSymbol(self);
break :blk (sym.st_value + sym.st_size) - phdr.p_vaddr;
} else if (shdr_index == self.got_section_index.?) blk: {
break :blk shdr.sh_size;
@ -1079,7 +1031,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
while (it.next()) |entry| {
const atom = entry.key_ptr.*;
const relocs = entry.value_ptr.*;
const source_sym = self.local_symbols.items[atom.local_sym_index];
const source_sym = atom.getSymbol(self);
const source_shdr = self.sections.items[source_sym.st_shndx];
log.debug("relocating '{s}'", .{self.getString(source_sym.st_name)});
@ -2082,11 +2034,13 @@ fn writeElfHeader(self: *Elf) !void {
}
fn freeTextBlock(self: *Elf, text_block: *TextBlock, phdr_index: u16) void {
const local_sym = self.local_symbols.items[text_block.local_sym_index];
const local_sym = text_block.getSymbol(self);
const name_str_index = local_sym.st_name;
const name = self.getString(name_str_index);
log.debug("freeTextBlock {*} ({s})", .{ text_block, name });
self.freeRelocationsForTextBlock(text_block);
const free_list = self.atom_free_lists.getPtr(phdr_index).?;
var already_have_free_list_node = false;
{
@ -2118,7 +2072,7 @@ fn freeTextBlock(self: *Elf, text_block: *TextBlock, phdr_index: u16) void {
if (text_block.prev) |prev| {
prev.next = text_block.next;
if (!already_have_free_list_node and prev.freeListEligible(self.*)) {
if (!already_have_free_list_node and prev.freeListEligible(self)) {
// The free list is heuristics, it doesn't have to be perfect, so we can
// ignore the OOM here.
free_list.append(self.base.allocator, prev) catch {};
@ -2133,6 +2087,15 @@ fn freeTextBlock(self: *Elf, text_block: *TextBlock, phdr_index: u16) void {
text_block.next = null;
}
// Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
const local_sym_index = text_block.getSymbolIndex().?;
self.local_symbol_free_list.append(self.base.allocator, local_sym_index) catch {};
self.local_symbols.items[local_sym_index].st_info = 0;
_ = self.atom_by_index_table.remove(local_sym_index);
text_block.local_sym_index = 0;
self.offset_table_free_list.append(self.base.allocator, text_block.offset_table_index) catch {};
if (self.dwarf) |*dw| {
dw.freeAtom(&text_block.dbg_info_atom);
}
@ -2146,9 +2109,9 @@ fn shrinkTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, phdr
}
fn growTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, alignment: u64, phdr_index: u16) !u64 {
const sym = self.local_symbols.items[text_block.local_sym_index];
const sym = text_block.getSymbol(self);
const align_ok = mem.alignBackwardGeneric(u64, sym.st_value, alignment) == sym.st_value;
const need_realloc = !align_ok or new_block_size > text_block.capacity(self.*);
const need_realloc = !align_ok or new_block_size > text_block.capacity(self);
if (!need_realloc) return sym.st_value;
return self.allocateTextBlock(text_block, new_block_size, alignment, phdr_index);
}
@ -2176,8 +2139,8 @@ fn allocateTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, al
const big_block = free_list.items[i];
// We now have a pointer to a live text block that has too much capacity.
// Is it enough that we could fit this new text block?
const sym = self.local_symbols.items[big_block.local_sym_index];
const capacity = big_block.capacity(self.*);
const sym = big_block.getSymbol(self);
const capacity = big_block.capacity(self);
const ideal_capacity = padToIdeal(capacity);
const ideal_capacity_end_vaddr = std.math.add(u64, sym.st_value, ideal_capacity) catch ideal_capacity;
const capacity_end_vaddr = sym.st_value + capacity;
@ -2187,7 +2150,7 @@ fn allocateTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, al
// Additional bookkeeping here to notice if this free list node
// should be deleted because the block that it points to has grown to take up
// more of the extra capacity.
if (!big_block.freeListEligible(self.*)) {
if (!big_block.freeListEligible(self)) {
_ = free_list.swapRemove(i);
} else {
i += 1;
@ -2207,7 +2170,7 @@ fn allocateTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, al
}
break :blk new_start_vaddr;
} else if (self.atoms.get(phdr_index)) |last| {
const sym = self.local_symbols.items[last.local_sym_index];
const sym = last.getSymbol(self);
const ideal_capacity = padToIdeal(sym.st_size);
const ideal_capacity_end_vaddr = sym.st_value + ideal_capacity;
const new_start_vaddr = mem.alignForwardGeneric(u64, ideal_capacity_end_vaddr, alignment);
@ -2262,7 +2225,7 @@ fn allocateTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64, al
return vaddr;
}
fn allocateLocalSymbol(self: *Elf) !u32 {
pub fn allocateLocalSymbol(self: *Elf) !u32 {
try self.local_symbols.ensureUnusedCapacity(self.base.allocator, 1);
const index = blk: {
@ -2289,40 +2252,35 @@ fn allocateLocalSymbol(self: *Elf) !u32 {
return index;
}
pub fn allocateDeclIndexes(self: *Elf, decl_index: Module.Decl.Index) !void {
if (self.llvm_object) |_| return;
const mod = self.base.options.module.?;
const decl = mod.declPtr(decl_index);
if (decl.link.elf.local_sym_index != 0) return;
pub fn allocateGotOffset(self: *Elf) !u32 {
try self.offset_table.ensureUnusedCapacity(self.base.allocator, 1);
try self.decls.putNoClobber(self.base.allocator, decl_index, null);
const decl_name = try decl.getFullyQualifiedName(mod);
defer self.base.allocator.free(decl_name);
const index = blk: {
if (self.offset_table_free_list.popOrNull()) |index| {
log.debug(" (reusing GOT offset at index {d})", .{index});
break :blk index;
} else {
log.debug(" (allocating GOT offset at index {d})", .{self.offset_table.items.len});
const index = @intCast(u32, self.offset_table.items.len);
_ = self.offset_table.addOneAssumeCapacity();
self.offset_table_count_dirty = true;
break :blk index;
}
};
log.debug("allocating symbol indexes for {s}", .{decl_name});
decl.link.elf.local_sym_index = try self.allocateLocalSymbol();
try self.atom_by_index_table.putNoClobber(self.base.allocator, decl.link.elf.local_sym_index, &decl.link.elf);
self.offset_table.items[index] = 0;
return index;
}
if (self.offset_table_free_list.popOrNull()) |i| {
decl.link.elf.offset_table_index = i;
} else {
decl.link.elf.offset_table_index = @intCast(u32, self.offset_table.items.len);
_ = self.offset_table.addOneAssumeCapacity();
self.offset_table_count_dirty = true;
}
self.offset_table.items[decl.link.elf.offset_table_index] = 0;
fn freeRelocationsForTextBlock(self: *Elf, text_block: *TextBlock) void {
var removed_relocs = self.relocs.fetchRemove(text_block);
if (removed_relocs) |*relocs| relocs.value.deinit(self.base.allocator);
}
fn freeUnnamedConsts(self: *Elf, decl_index: Module.Decl.Index) void {
const unnamed_consts = self.unnamed_const_atoms.getPtr(decl_index) orelse return;
for (unnamed_consts.items) |atom| {
self.freeTextBlock(atom, self.phdr_load_ro_index.?);
self.local_symbol_free_list.append(self.base.allocator, atom.local_sym_index) catch {};
self.local_symbols.items[atom.local_sym_index].st_info = 0;
_ = self.atom_by_index_table.remove(atom.local_sym_index);
}
unnamed_consts.clearAndFree(self.base.allocator);
}
@ -2335,20 +2293,13 @@ pub fn freeDecl(self: *Elf, decl_index: Module.Decl.Index) void {
const mod = self.base.options.module.?;
const decl = mod.declPtr(decl_index);
const kv = self.decls.fetchRemove(decl_index);
if (kv.?.value) |index| {
self.freeTextBlock(&decl.link.elf, index);
self.freeUnnamedConsts(decl_index);
}
log.debug("freeDecl {*}", .{decl});
// Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
if (decl.link.elf.local_sym_index != 0) {
self.local_symbol_free_list.append(self.base.allocator, decl.link.elf.local_sym_index) catch {};
self.local_symbols.items[decl.link.elf.local_sym_index].st_info = 0;
_ = self.atom_by_index_table.remove(decl.link.elf.local_sym_index);
decl.link.elf.local_sym_index = 0;
self.offset_table_free_list.append(self.base.allocator, decl.link.elf.offset_table_index) catch {};
if (self.decls.fetchRemove(decl_index)) |kv| {
if (kv.value) |index| {
self.freeTextBlock(&decl.link.elf, index);
self.freeUnnamedConsts(decl_index);
}
}
if (self.dwarf) |*dw| {
@ -2397,10 +2348,9 @@ fn updateDeclCode(self: *Elf, decl_index: Module.Decl.Index, code: []const u8, s
const phdr_index = decl_ptr.*.?;
const shdr_index = self.phdr_shdr_table.get(phdr_index).?;
assert(decl.link.elf.local_sym_index != 0); // Caller forgot to allocateDeclIndexes()
const local_sym = &self.local_symbols.items[decl.link.elf.local_sym_index];
const local_sym = decl.link.elf.getSymbolPtr(self);
if (local_sym.st_size != 0) {
const capacity = decl.link.elf.capacity(self.*);
const capacity = decl.link.elf.capacity(self);
const need_realloc = code.len > capacity or
!mem.isAlignedGeneric(u64, local_sym.st_value, required_alignment);
if (need_realloc) {
@ -2422,7 +2372,7 @@ fn updateDeclCode(self: *Elf, decl_index: Module.Decl.Index, code: []const u8, s
local_sym.st_other = 0;
local_sym.st_shndx = shdr_index;
// TODO this write could be avoided if no fields of the symbol were changed.
try self.writeSymbol(decl.link.elf.local_sym_index);
try self.writeSymbol(decl.link.elf.getSymbolIndex().?);
} else {
const name_str_index = try self.makeString(decl_name);
const vaddr = try self.allocateTextBlock(&decl.link.elf, code.len, required_alignment, phdr_index);
@ -2439,7 +2389,7 @@ fn updateDeclCode(self: *Elf, decl_index: Module.Decl.Index, code: []const u8, s
};
self.offset_table.items[decl.link.elf.offset_table_index] = vaddr;
try self.writeSymbol(decl.link.elf.local_sym_index);
try self.writeSymbol(decl.link.elf.getSymbolIndex().?);
try self.writeOffsetTableEntry(decl.link.elf.offset_table_index);
}
@ -2461,12 +2411,20 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven
const tracy = trace(@src());
defer tracy.end();
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
const decl_index = func.owner_decl;
const decl = module.declPtr(decl_index);
self.freeUnnamedConsts(decl_index);
const atom = &decl.link.elf;
try atom.ensureInitialized(self);
const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
if (gop.found_existing) {
self.freeUnnamedConsts(decl_index);
self.freeRelocationsForTextBlock(atom);
} else {
gop.value_ptr.* = null;
}
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(module, decl_index) else null;
defer if (decl_state) |*ds| ds.deinit();
@ -2527,6 +2485,15 @@ pub fn updateDecl(self: *Elf, module: *Module, decl_index: Module.Decl.Index) !v
assert(!self.unnamed_const_atoms.contains(decl_index));
const atom = &decl.link.elf;
try atom.ensureInitialized(self);
const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
if (gop.found_existing) {
self.freeRelocationsForTextBlock(atom);
} else {
gop.value_ptr.* = null;
}
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
@ -2542,14 +2509,14 @@ pub fn updateDecl(self: *Elf, module: *Module, decl_index: Module.Decl.Index) !v
}, &code_buffer, .{
.dwarf = ds,
}, .{
.parent_atom_index = decl.link.elf.local_sym_index,
.parent_atom_index = decl.link.elf.getSymbolIndex().?,
})
else
try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
.ty = decl.ty,
.val = decl_val,
}, &code_buffer, .none, .{
.parent_atom_index = decl.link.elf.local_sym_index,
.parent_atom_index = decl.link.elf.getSymbolIndex().?,
});
const code = switch (res) {
@ -2593,6 +2560,8 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module
const atom = try self.base.allocator.create(TextBlock);
errdefer self.base.allocator.destroy(atom);
atom.* = TextBlock.empty;
// TODO for unnamed consts we don't need GOT offset/entry allocated
try atom.ensureInitialized(self);
try self.managed_atoms.append(self.base.allocator, atom);
const name_str_index = blk: {
@ -2607,14 +2576,10 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module
};
const name = self.getString(name_str_index);
log.debug("allocating symbol indexes for {s}", .{name});
atom.local_sym_index = try self.allocateLocalSymbol();
try self.atom_by_index_table.putNoClobber(self.base.allocator, atom.local_sym_index, atom);
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), typed_value, &code_buffer, .{
.none = {},
}, .{
.parent_atom_index = atom.local_sym_index,
.parent_atom_index = atom.getSymbolIndex().?,
});
const code = switch (res) {
.ok => code_buffer.items,
@ -2634,7 +2599,7 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module
log.debug("allocated text block for {s} at 0x{x}", .{ name, vaddr });
const local_sym = &self.local_symbols.items[atom.local_sym_index];
const local_sym = atom.getSymbolPtr(self);
local_sym.* = .{
.st_name = name_str_index,
.st_info = (elf.STB_LOCAL << 4) | elf.STT_OBJECT,
@ -2644,14 +2609,14 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module
.st_size = code.len,
};
try self.writeSymbol(atom.local_sym_index);
try self.writeSymbol(atom.getSymbolIndex().?);
try unnamed_consts.append(self.base.allocator, atom);
const section_offset = local_sym.st_value - self.program_headers.items[phdr_index].p_vaddr;
const file_offset = self.sections.items[shdr_index].sh_offset + section_offset;
try self.base.file.?.pwriteAll(code, file_offset);
return atom.local_sym_index;
return atom.getSymbolIndex().?;
}
pub fn updateDeclExports(
@ -2670,16 +2635,19 @@ pub fn updateDeclExports(
const tracy = trace(@src());
defer tracy.end();
try self.global_symbols.ensureUnusedCapacity(self.base.allocator, exports.len);
const decl = module.declPtr(decl_index);
if (decl.link.elf.local_sym_index == 0) return;
const decl_sym = self.local_symbols.items[decl.link.elf.local_sym_index];
const atom = &decl.link.elf;
const decl_ptr = self.decls.getPtr(decl_index).?;
if (decl_ptr.* == null) {
decl_ptr.* = try self.getDeclPhdrIndex(decl);
if (atom.getSymbolIndex() == null) return;
const decl_sym = atom.getSymbol(self);
try self.global_symbols.ensureUnusedCapacity(self.base.allocator, exports.len);
const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
if (!gop.found_existing) {
gop.value_ptr.* = try self.getDeclPhdrIndex(decl);
}
const phdr_index = decl_ptr.*.?;
const phdr_index = gop.value_ptr.*.?;
const shdr_index = self.phdr_shdr_table.get(phdr_index).?;
for (exports) |exp| {
@ -3040,7 +3008,7 @@ fn getLDMOption(target: std.Target) ?[]const u8 {
}
}
fn padToIdeal(actual_size: anytype) @TypeOf(actual_size) {
pub fn padToIdeal(actual_size: anytype) @TypeOf(actual_size) {
return actual_size +| (actual_size / ideal_factor);
}

96
src/link/Elf/Atom.zig Normal file
View File

@ -0,0 +1,96 @@
const Atom = @This();
const std = @import("std");
const assert = std.debug.assert;
const elf = std.elf;
const Dwarf = @import("../Dwarf.zig");
const Elf = @import("../Elf.zig");
/// Each decl always gets a local symbol with the fully qualified name.
/// The vaddr and size are found here directly.
/// The file offset is found by computing the vaddr offset from the section vaddr
/// the symbol references, and adding that to the file offset of the section.
/// If this field is 0, it means the codegen size = 0 and there is no symbol or
/// offset table entry.
local_sym_index: u32,
/// This field is undefined for symbols with size = 0.
offset_table_index: u32,
/// Points to the previous and next neighbors, based on the `text_offset`.
/// This can be used to find, for example, the capacity of this `TextBlock`.
prev: ?*Atom,
next: ?*Atom,
dbg_info_atom: Dwarf.Atom,
pub const empty = Atom{
.local_sym_index = 0,
.offset_table_index = undefined,
.prev = null,
.next = null,
.dbg_info_atom = undefined,
};
pub fn ensureInitialized(self: *Atom, elf_file: *Elf) !void {
if (self.getSymbolIndex() != null) return; // Already initialized
self.local_sym_index = try elf_file.allocateLocalSymbol();
self.offset_table_index = try elf_file.allocateGotOffset();
try elf_file.atom_by_index_table.putNoClobber(elf_file.base.allocator, self.local_sym_index, self);
}
pub fn getSymbolIndex(self: Atom) ?u32 {
if (self.local_sym_index == 0) return null;
return self.local_sym_index;
}
pub fn getSymbol(self: Atom, elf_file: *Elf) elf.Elf64_Sym {
const sym_index = self.getSymbolIndex().?;
return elf_file.local_symbols.items[sym_index];
}
pub fn getSymbolPtr(self: Atom, elf_file: *Elf) *elf.Elf64_Sym {
const sym_index = self.getSymbolIndex().?;
return &elf_file.local_symbols.items[sym_index];
}
pub fn getName(self: Atom, elf_file: *Elf) []const u8 {
const sym = self.getSymbol();
return elf_file.getString(sym.st_name);
}
pub fn getOffsetTableAddress(self: Atom, elf_file: *Elf) u64 {
assert(self.getSymbolIndex() != null);
const target = elf_file.base.options.target;
const ptr_bits = target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const got = elf_file.program_headers.items[elf_file.phdr_got_index.?];
return got.p_vaddr + self.offset_table_index * ptr_bytes;
}
/// Returns how much room there is to grow in virtual address space.
/// File offset relocation happens transparently, so it is not included in
/// this calculation.
pub fn capacity(self: Atom, elf_file: *Elf) u64 {
const self_sym = self.getSymbol(elf_file);
if (self.next) |next| {
const next_sym = next.getSymbol(elf_file);
return next_sym.st_value - self_sym.st_value;
} else {
// We are the last block. The capacity is limited only by virtual address space.
return std.math.maxInt(u32) - self_sym.st_value;
}
}
pub fn freeListEligible(self: Atom, elf_file: *Elf) bool {
// No need to keep a free list node for the last block.
const next = self.next orelse return false;
const self_sym = self.getSymbol(elf_file);
const next_sym = next.getSymbol(elf_file);
const cap = next_sym.st_value - self_sym.st_value;
const ideal_cap = Elf.padToIdeal(self_sym.st_size);
if (cap <= ideal_cap) return false;
const surplus = cap - ideal_cap;
return surplus >= Elf.min_text_capacity;
}

View File

@ -1056,19 +1056,14 @@ pub fn allocateSpecialSymbols(self: *MachO) !void {
pub fn createGotAtom(self: *MachO, target: SymbolWithLoc) !*Atom {
const gpa = self.base.allocator;
const sym_index = try self.allocateSymbol();
const atom = blk: {
const atom = try gpa.create(Atom);
atom.* = Atom.empty;
atom.sym_index = sym_index;
atom.size = @sizeOf(u64);
atom.alignment = @alignOf(u64);
break :blk atom;
};
const atom = try gpa.create(Atom);
atom.* = Atom.empty;
try atom.ensureInitialized(self);
atom.size = @sizeOf(u64);
atom.alignment = @alignOf(u64);
errdefer gpa.destroy(atom);
try self.managed_atoms.append(gpa, atom);
try self.atom_by_index_table.putNoClobber(gpa, atom.sym_index, atom);
const sym = atom.getSymbolPtr(self);
sym.n_type = macho.N_SECT;
@ -1109,15 +1104,11 @@ pub fn createDyldPrivateAtom(self: *MachO) !void {
const gpa = self.base.allocator;
const sym_index = try self.allocateSymbol();
const atom = blk: {
const atom = try gpa.create(Atom);
atom.* = Atom.empty;
atom.sym_index = sym_index;
atom.size = @sizeOf(u64);
atom.alignment = @alignOf(u64);
break :blk atom;
};
const atom = try gpa.create(Atom);
atom.* = Atom.empty;
try atom.ensureInitialized(self);
atom.size = @sizeOf(u64);
atom.alignment = @alignOf(u64);
errdefer gpa.destroy(atom);
const sym = atom.getSymbolPtr(self);
@ -1126,7 +1117,6 @@ pub fn createDyldPrivateAtom(self: *MachO) !void {
self.dyld_private_atom = atom;
try self.managed_atoms.append(gpa, atom);
try self.atom_by_index_table.putNoClobber(gpa, sym_index, atom);
sym.n_value = try self.allocateAtom(atom, atom.size, @alignOf(u64));
log.debug("allocated dyld_private atom at 0x{x}", .{sym.n_value});
@ -1144,18 +1134,14 @@ pub fn createStubHelperPreambleAtom(self: *MachO) !void {
.aarch64 => 6 * @sizeOf(u32),
else => unreachable,
};
const sym_index = try self.allocateSymbol();
const atom = blk: {
const atom = try gpa.create(Atom);
atom.* = Atom.empty;
atom.sym_index = sym_index;
atom.size = size;
atom.alignment = switch (arch) {
.x86_64 => 1,
.aarch64 => @alignOf(u32),
else => unreachable,
};
break :blk atom;
const atom = try gpa.create(Atom);
atom.* = Atom.empty;
try atom.ensureInitialized(self);
atom.size = size;
atom.alignment = switch (arch) {
.x86_64 => 1,
.aarch64 => @alignOf(u32),
else => unreachable,
};
errdefer gpa.destroy(atom);
@ -1163,7 +1149,7 @@ pub fn createStubHelperPreambleAtom(self: *MachO) !void {
sym.n_type = macho.N_SECT;
sym.n_sect = self.stub_helper_section_index.? + 1;
const dyld_private_sym_index = self.dyld_private_atom.?.sym_index;
const dyld_private_sym_index = self.dyld_private_atom.?.getSymbolIndex().?;
const code = try gpa.alloc(u8, size);
defer gpa.free(code);
@ -1258,7 +1244,6 @@ pub fn createStubHelperPreambleAtom(self: *MachO) !void {
self.stub_helper_preamble_atom = atom;
try self.managed_atoms.append(gpa, atom);
try self.atom_by_index_table.putNoClobber(gpa, sym_index, atom);
sym.n_value = try self.allocateAtom(atom, size, atom.alignment);
log.debug("allocated stub preamble atom at 0x{x}", .{sym.n_value});
@ -1273,18 +1258,14 @@ pub fn createStubHelperAtom(self: *MachO) !*Atom {
.aarch64 => 3 * @sizeOf(u32),
else => unreachable,
};
const sym_index = try self.allocateSymbol();
const atom = blk: {
const atom = try gpa.create(Atom);
atom.* = Atom.empty;
atom.sym_index = sym_index;
atom.size = size;
atom.alignment = switch (arch) {
.x86_64 => 1,
.aarch64 => @alignOf(u32),
else => unreachable,
};
break :blk atom;
const atom = try gpa.create(Atom);
atom.* = Atom.empty;
try atom.ensureInitialized(self);
atom.size = size;
atom.alignment = switch (arch) {
.x86_64 => 1,
.aarch64 => @alignOf(u32),
else => unreachable,
};
errdefer gpa.destroy(atom);
@ -1306,7 +1287,7 @@ pub fn createStubHelperAtom(self: *MachO) !*Atom {
try atom.addRelocation(self, .{
.type = @enumToInt(macho.reloc_type_x86_64.X86_64_RELOC_BRANCH),
.target = .{ .sym_index = self.stub_helper_preamble_atom.?.sym_index, .file = null },
.target = .{ .sym_index = self.stub_helper_preamble_atom.?.getSymbolIndex().?, .file = null },
.offset = 6,
.addend = 0,
.pcrel = true,
@ -1329,7 +1310,7 @@ pub fn createStubHelperAtom(self: *MachO) !*Atom {
try atom.addRelocation(self, .{
.type = @enumToInt(macho.reloc_type_arm64.ARM64_RELOC_BRANCH26),
.target = .{ .sym_index = self.stub_helper_preamble_atom.?.sym_index, .file = null },
.target = .{ .sym_index = self.stub_helper_preamble_atom.?.getSymbolIndex().?, .file = null },
.offset = 4,
.addend = 0,
.pcrel = true,
@ -1340,7 +1321,6 @@ pub fn createStubHelperAtom(self: *MachO) !*Atom {
}
try self.managed_atoms.append(gpa, atom);
try self.atom_by_index_table.putNoClobber(gpa, sym_index, atom);
sym.n_value = try self.allocateAtom(atom, size, atom.alignment);
log.debug("allocated stub helper atom at 0x{x}", .{sym.n_value});
@ -1351,15 +1331,11 @@ pub fn createStubHelperAtom(self: *MachO) !*Atom {
pub fn createLazyPointerAtom(self: *MachO, stub_sym_index: u32, target: SymbolWithLoc) !*Atom {
const gpa = self.base.allocator;
const sym_index = try self.allocateSymbol();
const atom = blk: {
const atom = try gpa.create(Atom);
atom.* = Atom.empty;
atom.sym_index = sym_index;
atom.size = @sizeOf(u64);
atom.alignment = @alignOf(u64);
break :blk atom;
};
const atom = try gpa.create(Atom);
atom.* = Atom.empty;
try atom.ensureInitialized(self);
atom.size = @sizeOf(u64);
atom.alignment = @alignOf(u64);
errdefer gpa.destroy(atom);
const sym = atom.getSymbolPtr(self);
@ -1385,7 +1361,6 @@ pub fn createLazyPointerAtom(self: *MachO, stub_sym_index: u32, target: SymbolWi
});
try self.managed_atoms.append(gpa, atom);
try self.atom_by_index_table.putNoClobber(gpa, sym_index, atom);
sym.n_value = try self.allocateAtom(atom, atom.size, @alignOf(u64));
log.debug("allocated lazy pointer atom at 0x{x} ({s})", .{ sym.n_value, self.getSymbolName(target) });
@ -1402,19 +1377,15 @@ pub fn createStubAtom(self: *MachO, laptr_sym_index: u32) !*Atom {
.aarch64 => 3 * @sizeOf(u32),
else => unreachable, // unhandled architecture type
};
const sym_index = try self.allocateSymbol();
const atom = blk: {
const atom = try gpa.create(Atom);
atom.* = Atom.empty;
atom.sym_index = sym_index;
atom.size = size;
atom.alignment = switch (arch) {
.x86_64 => 1,
.aarch64 => @alignOf(u32),
else => unreachable, // unhandled architecture type
const atom = try gpa.create(Atom);
atom.* = Atom.empty;
try atom.ensureInitialized(self);
atom.size = size;
atom.alignment = switch (arch) {
.x86_64 => 1,
.aarch64 => @alignOf(u32),
else => unreachable, // unhandled architecture type
};
break :blk atom;
};
errdefer gpa.destroy(atom);
@ -1476,7 +1447,6 @@ pub fn createStubAtom(self: *MachO, laptr_sym_index: u32) !*Atom {
}
try self.managed_atoms.append(gpa, atom);
try self.atom_by_index_table.putNoClobber(gpa, sym_index, atom);
sym.n_value = try self.allocateAtom(atom, size, atom.alignment);
log.debug("allocated stub atom at 0x{x}", .{sym.n_value});
@ -1617,9 +1587,9 @@ pub fn resolveSymbolsInDylibs(self: *MachO) !void {
const stub_index = try self.allocateStubEntry(global);
const stub_helper_atom = try self.createStubHelperAtom();
const laptr_atom = try self.createLazyPointerAtom(stub_helper_atom.sym_index, global);
const stub_atom = try self.createStubAtom(laptr_atom.sym_index);
self.stubs.items[stub_index].sym_index = stub_atom.sym_index;
const laptr_atom = try self.createLazyPointerAtom(stub_helper_atom.getSymbolIndex().?, global);
const stub_atom = try self.createStubAtom(laptr_atom.getSymbolIndex().?);
self.stubs.items[stub_index].sym_index = stub_atom.getSymbolIndex().?;
self.markRelocsDirtyByTarget(global);
}
@ -1717,7 +1687,7 @@ pub fn resolveDyldStubBinder(self: *MachO) !void {
// Add dyld_stub_binder as the final GOT entry.
const got_index = try self.allocateGotEntry(global);
const got_atom = try self.createGotAtom(global);
self.got_entries.items[got_index].sym_index = got_atom.sym_index;
self.got_entries.items[got_index].sym_index = got_atom.getSymbolIndex().?;
try self.writePtrWidthAtom(got_atom);
}
@ -1811,6 +1781,8 @@ pub fn deinit(self: *MachO) void {
fn freeAtom(self: *MachO, atom: *Atom) void {
log.debug("freeAtom {*}", .{atom});
const gpa = self.base.allocator;
// Remove any relocs and base relocs associated with this Atom
self.freeRelocationsForAtom(atom);
@ -1850,7 +1822,7 @@ fn freeAtom(self: *MachO, atom: *Atom) void {
if (!already_have_free_list_node and prev.freeListEligible(self)) {
// The free list is heuristics, it doesn't have to be perfect, so we can ignore
// the OOM here.
free_list.append(self.base.allocator, prev) catch {};
free_list.append(gpa, prev) catch {};
}
} else {
atom.prev = null;
@ -1862,6 +1834,33 @@ fn freeAtom(self: *MachO, atom: *Atom) void {
atom.next = null;
}
// Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
const sym_index = atom.getSymbolIndex().?;
self.locals_free_list.append(gpa, sym_index) catch {};
// Try freeing GOT atom if this decl had one
const got_target = SymbolWithLoc{ .sym_index = sym_index, .file = null };
if (self.got_entries_table.get(got_target)) |got_index| {
self.got_entries_free_list.append(gpa, @intCast(u32, got_index)) catch {};
self.got_entries.items[got_index] = .{
.target = .{ .sym_index = 0, .file = null },
.sym_index = 0,
};
_ = self.got_entries_table.remove(got_target);
if (self.d_sym) |*d_sym| {
d_sym.swapRemoveRelocs(sym_index);
}
log.debug(" adding GOT index {d} to free list (target local@{d})", .{ got_index, sym_index });
}
self.locals.items[sym_index].n_type = 0;
_ = self.atom_by_index_table.remove(sym_index);
log.debug(" adding local symbol index {d} to free list", .{sym_index});
atom.sym_index = 0;
if (self.d_sym) |*d_sym| {
d_sym.dwarf.freeAtom(&atom.dbg_info_atom);
}
@ -1883,7 +1882,7 @@ fn growAtom(self: *MachO, atom: *Atom, new_atom_size: u64, alignment: u64) !u64
return self.allocateAtom(atom, new_atom_size, alignment);
}
fn allocateSymbol(self: *MachO) !u32 {
pub fn allocateSymbol(self: *MachO) !u32 {
try self.locals.ensureUnusedCapacity(self.base.allocator, 1);
const index = blk: {
@ -1975,16 +1974,6 @@ pub fn allocateStubEntry(self: *MachO, target: SymbolWithLoc) !u32 {
return index;
}
pub fn allocateDeclIndexes(self: *MachO, decl_index: Module.Decl.Index) !void {
if (self.llvm_object) |_| return;
const decl = self.base.options.module.?.declPtr(decl_index);
if (decl.link.macho.sym_index != 0) return;
decl.link.macho.sym_index = try self.allocateSymbol();
try self.atom_by_index_table.putNoClobber(self.base.allocator, decl.link.macho.sym_index, &decl.link.macho);
try self.decls.putNoClobber(self.base.allocator, decl_index, null);
}
pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
if (build_options.skip_non_native and builtin.object_format != .macho) {
@panic("Attempted to compile for object format that was disabled by build configuration");
@ -1997,8 +1986,15 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv
const decl_index = func.owner_decl;
const decl = module.declPtr(decl_index);
self.freeUnnamedConsts(decl_index);
self.freeRelocationsForAtom(&decl.link.macho);
const atom = &decl.link.macho;
try atom.ensureInitialized(self);
const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
if (gop.found_existing) {
self.freeUnnamedConsts(decl_index);
self.freeRelocationsForAtom(atom);
} else {
gop.value_ptr.* = null;
}
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
@ -2072,14 +2068,11 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl_index: Modu
const atom = try gpa.create(Atom);
errdefer gpa.destroy(atom);
atom.* = Atom.empty;
atom.sym_index = try self.allocateSymbol();
try atom.ensureInitialized(self);
try self.managed_atoms.append(gpa, atom);
try self.atom_by_index_table.putNoClobber(gpa, atom.sym_index, atom);
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), typed_value, &code_buffer, .none, .{
.parent_atom_index = atom.sym_index,
.parent_atom_index = atom.getSymbolIndex().?,
});
const code = switch (res) {
.ok => code_buffer.items,
@ -2111,7 +2104,7 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl_index: Modu
try self.writeAtom(atom, code);
return atom.sym_index;
return atom.getSymbolIndex().?;
}
pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index) !void {
@ -2136,7 +2129,14 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index)
}
}
self.freeRelocationsForAtom(&decl.link.macho);
const atom = &decl.link.macho;
try atom.ensureInitialized(self);
const gop = try self.decls.getOrPut(self.base.allocator, decl_index);
if (gop.found_existing) {
self.freeRelocationsForAtom(atom);
} else {
gop.value_ptr.* = null;
}
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
@ -2155,14 +2155,14 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index)
}, &code_buffer, .{
.dwarf = ds,
}, .{
.parent_atom_index = decl.link.macho.sym_index,
.parent_atom_index = decl.link.macho.getSymbolIndex().?,
})
else
try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
.ty = decl.ty,
.val = decl_val,
}, &code_buffer, .none, .{
.parent_atom_index = decl.link.macho.sym_index,
.parent_atom_index = decl.link.macho.getSymbolIndex().?,
});
const code = switch (res) {
@ -2337,12 +2337,12 @@ fn updateDeclCode(self: *MachO, decl_index: Module.Decl.Index, code: []const u8)
const decl = mod.declPtr(decl_index);
const required_alignment = decl.getAlignment(self.base.options.target);
assert(decl.link.macho.sym_index != 0); // Caller forgot to call allocateDeclIndexes()
const sym_name = try decl.getFullyQualifiedName(mod);
defer self.base.allocator.free(sym_name);
const atom = &decl.link.macho;
const sym_index = atom.getSymbolIndex().?; // Atom was not initialized
const decl_ptr = self.decls.getPtr(decl_index).?;
if (decl_ptr.* == null) {
decl_ptr.* = self.getDeclOutputSection(decl);
@ -2368,7 +2368,7 @@ fn updateDeclCode(self: *MachO, decl_index: Module.Decl.Index, code: []const u8)
if (vaddr != sym.n_value) {
sym.n_value = vaddr;
log.debug(" (updating GOT entry)", .{});
const got_target = SymbolWithLoc{ .sym_index = atom.sym_index, .file = null };
const got_target = SymbolWithLoc{ .sym_index = sym_index, .file = null };
const got_atom = self.getGotAtomForSymbol(got_target).?;
self.markRelocsDirtyByTarget(got_target);
try self.writePtrWidthAtom(got_atom);
@ -2399,10 +2399,10 @@ fn updateDeclCode(self: *MachO, decl_index: Module.Decl.Index, code: []const u8)
atom.size = code_len;
sym.n_value = vaddr;
const got_target = SymbolWithLoc{ .sym_index = atom.sym_index, .file = null };
const got_target = SymbolWithLoc{ .sym_index = sym_index, .file = null };
const got_index = try self.allocateGotEntry(got_target);
const got_atom = try self.createGotAtom(got_target);
self.got_entries.items[got_index].sym_index = got_atom.sym_index;
self.got_entries.items[got_index].sym_index = got_atom.getSymbolIndex().?;
try self.writePtrWidthAtom(got_atom);
}
@ -2438,8 +2438,16 @@ pub fn updateDeclExports(
const gpa = self.base.allocator;
const decl = module.declPtr(decl_index);
if (decl.link.macho.sym_index == 0) return;
const decl_sym = decl.link.macho.getSymbol(self);
const atom = &decl.link.macho;
if (atom.getSymbolIndex() == null) return;
const gop = try self.decls.getOrPut(gpa, decl_index);
if (!gop.found_existing) {
gop.value_ptr.* = self.getDeclOutputSection(decl);
}
const decl_sym = atom.getSymbol(self);
for (exports) |exp| {
const exp_name = try std.fmt.allocPrint(gpa, "_{s}", .{exp.options.name});
@ -2573,11 +2581,6 @@ fn freeUnnamedConsts(self: *MachO, decl_index: Module.Decl.Index) void {
const unnamed_consts = self.unnamed_const_atoms.getPtr(decl_index) orelse return;
for (unnamed_consts.items) |atom| {
self.freeAtom(atom);
self.locals_free_list.append(gpa, atom.sym_index) catch {};
self.locals.items[atom.sym_index].n_type = 0;
_ = self.atom_by_index_table.remove(atom.sym_index);
log.debug(" adding local symbol index {d} to free list", .{atom.sym_index});
atom.sym_index = 0;
}
unnamed_consts.clearAndFree(gpa);
}
@ -2591,39 +2594,11 @@ pub fn freeDecl(self: *MachO, decl_index: Module.Decl.Index) void {
log.debug("freeDecl {*}", .{decl});
const kv = self.decls.fetchSwapRemove(decl_index);
if (kv.?.value) |_| {
self.freeAtom(&decl.link.macho);
self.freeUnnamedConsts(decl_index);
}
// Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
const gpa = self.base.allocator;
const sym_index = decl.link.macho.sym_index;
if (sym_index != 0) {
self.locals_free_list.append(gpa, sym_index) catch {};
// Try freeing GOT atom if this decl had one
const got_target = SymbolWithLoc{ .sym_index = sym_index, .file = null };
if (self.got_entries_table.get(got_target)) |got_index| {
self.got_entries_free_list.append(gpa, @intCast(u32, got_index)) catch {};
self.got_entries.items[got_index] = .{
.target = .{ .sym_index = 0, .file = null },
.sym_index = 0,
};
_ = self.got_entries_table.remove(got_target);
if (self.d_sym) |*d_sym| {
d_sym.swapRemoveRelocs(sym_index);
}
log.debug(" adding GOT index {d} to free list (target local@{d})", .{ got_index, sym_index });
if (self.decls.fetchSwapRemove(decl_index)) |kv| {
if (kv.value) |_| {
self.freeAtom(&decl.link.macho);
self.freeUnnamedConsts(decl_index);
}
self.locals.items[sym_index].n_type = 0;
_ = self.atom_by_index_table.remove(sym_index);
log.debug(" adding local symbol index {d} to free list", .{sym_index});
decl.link.macho.sym_index = 0;
}
if (self.d_sym) |*d_sym| {
@ -2636,7 +2611,9 @@ pub fn getDeclVAddr(self: *MachO, decl_index: Module.Decl.Index, reloc_info: Fil
const decl = mod.declPtr(decl_index);
assert(self.llvm_object == null);
assert(decl.link.macho.sym_index != 0);
try decl.link.macho.ensureInitialized(self);
const sym_index = decl.link.macho.getSymbolIndex().?;
const atom = self.getAtomForSymbol(.{ .sym_index = reloc_info.parent_atom_index, .file = null }).?;
try atom.addRelocation(self, .{
@ -2645,7 +2622,7 @@ pub fn getDeclVAddr(self: *MachO, decl_index: Module.Decl.Index, reloc_info: Fil
.x86_64 => @enumToInt(macho.reloc_type_x86_64.X86_64_RELOC_UNSIGNED),
else => unreachable,
},
.target = .{ .sym_index = decl.link.macho.sym_index, .file = null },
.target = .{ .sym_index = sym_index, .file = null },
.offset = @intCast(u32, reloc_info.offset),
.addend = reloc_info.addend,
.pcrel = false,
@ -3179,7 +3156,7 @@ fn collectRebaseData(self: *MachO, rebase: *Rebase) !void {
const slice = self.sections.slice();
for (self.rebases.keys()) |atom, i| {
log.debug(" ATOM(%{d}, '{s}')", .{ atom.sym_index, atom.getName(self) });
log.debug(" ATOM(%{?d}, '{s}')", .{ atom.getSymbolIndex(), atom.getName(self) });
const sym = atom.getSymbol(self);
const segment_index = slice.items(.segment_index)[sym.n_sect - 1];
@ -3208,7 +3185,7 @@ fn collectBindData(self: *MachO, bind: anytype, raw_bindings: anytype) !void {
const slice = self.sections.slice();
for (raw_bindings.keys()) |atom, i| {
log.debug(" ATOM(%{d}, '{s}')", .{ atom.sym_index, atom.getName(self) });
log.debug(" ATOM(%{?d}, '{s}')", .{ atom.getSymbolIndex(), atom.getName(self) });
const sym = atom.getSymbol(self);
const segment_index = slice.items(.segment_index)[sym.n_sect - 1];
@ -4277,8 +4254,8 @@ pub fn logAtoms(self: *MachO) void {
pub fn logAtom(self: *MachO, atom: *const Atom) void {
const sym = atom.getSymbol(self);
const sym_name = atom.getName(self);
log.debug(" ATOM(%{d}, '{s}') @ {x} (sizeof({x}), alignof({x})) in object({?d}) in sect({d})", .{
atom.sym_index,
log.debug(" ATOM(%{?d}, '{s}') @ {x} (sizeof({x}), alignof({x})) in object({?d}) in sect({d})", .{
atom.getSymbolIndex(),
sym_name,
sym.n_value,
atom.size,

View File

@ -64,6 +64,17 @@ pub const empty = Atom{
.dbg_info_atom = undefined,
};
pub fn ensureInitialized(self: *Atom, macho_file: *MachO) !void {
if (self.getSymbolIndex() != null) return; // Already initialized
self.sym_index = try macho_file.allocateSymbol();
try macho_file.atom_by_index_table.putNoClobber(macho_file.base.allocator, self.sym_index, self);
}
pub fn getSymbolIndex(self: Atom) ?u32 {
if (self.sym_index == 0) return null;
return self.sym_index;
}
/// Returns symbol referencing this atom.
pub fn getSymbol(self: Atom, macho_file: *MachO) macho.nlist_64 {
return self.getSymbolPtr(macho_file).*;
@ -71,20 +82,23 @@ pub fn getSymbol(self: Atom, macho_file: *MachO) macho.nlist_64 {
/// Returns pointer-to-symbol referencing this atom.
pub fn getSymbolPtr(self: Atom, macho_file: *MachO) *macho.nlist_64 {
const sym_index = self.getSymbolIndex().?;
return macho_file.getSymbolPtr(.{
.sym_index = self.sym_index,
.sym_index = sym_index,
.file = self.file,
});
}
pub fn getSymbolWithLoc(self: Atom) SymbolWithLoc {
return .{ .sym_index = self.sym_index, .file = self.file };
const sym_index = self.getSymbolIndex().?;
return .{ .sym_index = sym_index, .file = self.file };
}
/// Returns the name of this atom.
pub fn getName(self: Atom, macho_file: *MachO) []const u8 {
const sym_index = self.getSymbolIndex().?;
return macho_file.getSymbolName(.{
.sym_index = self.sym_index,
.sym_index = sym_index,
.file = self.file,
});
}
@ -144,7 +158,7 @@ pub fn addRelocations(
pub fn addRebase(self: *Atom, macho_file: *MachO, offset: u32) !void {
const gpa = macho_file.base.allocator;
log.debug(" (adding rebase at offset 0x{x} in %{d})", .{ offset, self.sym_index });
log.debug(" (adding rebase at offset 0x{x} in %{?d})", .{ offset, self.getSymbolIndex() });
const gop = try macho_file.rebases.getOrPut(gpa, self);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
@ -154,10 +168,10 @@ pub fn addRebase(self: *Atom, macho_file: *MachO, offset: u32) !void {
pub fn addBinding(self: *Atom, macho_file: *MachO, binding: Binding) !void {
const gpa = macho_file.base.allocator;
log.debug(" (adding binding to symbol {s} at offset 0x{x} in %{d})", .{
log.debug(" (adding binding to symbol {s} at offset 0x{x} in %{?d})", .{
macho_file.getSymbolName(binding.target),
binding.offset,
self.sym_index,
self.getSymbolIndex(),
});
const gop = try macho_file.bindings.getOrPut(gpa, self);
if (!gop.found_existing) {
@ -168,10 +182,10 @@ pub fn addBinding(self: *Atom, macho_file: *MachO, binding: Binding) !void {
pub fn addLazyBinding(self: *Atom, macho_file: *MachO, binding: Binding) !void {
const gpa = macho_file.base.allocator;
log.debug(" (adding lazy binding to symbol {s} at offset 0x{x} in %{d})", .{
log.debug(" (adding lazy binding to symbol {s} at offset 0x{x} in %{?d})", .{
macho_file.getSymbolName(binding.target),
binding.offset,
self.sym_index,
self.getSymbolIndex(),
});
const gop = try macho_file.lazy_bindings.getOrPut(gpa, self);
if (!gop.found_existing) {

View File

@ -424,7 +424,7 @@ fn updateFinish(self: *Plan9, decl: *Module.Decl) !void {
// write the internal linker metadata
decl.link.plan9.type = sym_t;
// write the symbol
// we already have the got index because that got allocated in allocateDeclIndexes
// we already have the got index
const sym: aout.Sym = .{
.value = undefined, // the value of stuff gets filled in in flushModule
.type = decl.link.plan9.type,
@ -737,7 +737,7 @@ fn addDeclExports(
pub fn freeDecl(self: *Plan9, decl_index: Module.Decl.Index) void {
// TODO audit the lifetimes of decls table entries. It's possible to get
// allocateDeclIndexes and then freeDecl without any updateDecl in between.
// freeDecl without any updateDecl in between.
// However that is planned to change, see the TODO comment in Module.zig
// in the deleteUnusedDecl function.
const mod = self.base.options.module.?;
@ -959,11 +959,6 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void {
}
}
/// this will be removed, moved to updateFinish
pub fn allocateDeclIndexes(self: *Plan9, decl_index: Module.Decl.Index) !void {
_ = self;
_ = decl_index;
}
/// Must be called only after a successful call to `updateDecl`.
pub fn updateDeclLineNumber(self: *Plan9, mod: *Module, decl: *const Module.Decl) !void {
_ = self;

View File

@ -986,31 +986,23 @@ pub fn deinit(wasm: *Wasm) void {
}
}
pub fn allocateDeclIndexes(wasm: *Wasm, decl_index: Module.Decl.Index) !void {
if (wasm.llvm_object) |_| return;
const decl = wasm.base.options.module.?.declPtr(decl_index);
if (decl.link.wasm.sym_index != 0) return;
/// Allocates a new symbol and returns its index.
/// Will re-use slots when a symbol was freed at an earlier stage.
pub fn allocateSymbol(wasm: *Wasm) !u32 {
try wasm.symbols.ensureUnusedCapacity(wasm.base.allocator, 1);
try wasm.decls.putNoClobber(wasm.base.allocator, decl_index, {});
const atom = &decl.link.wasm;
var symbol: Symbol = .{
.name = undefined, // will be set after updateDecl
.flags = @enumToInt(Symbol.Flag.WASM_SYM_BINDING_LOCAL),
.tag = undefined, // will be set after updateDecl
.index = undefined, // will be set after updateDecl
};
if (wasm.symbols_free_list.popOrNull()) |index| {
atom.sym_index = index;
wasm.symbols.items[index] = symbol;
} else {
atom.sym_index = @intCast(u32, wasm.symbols.items.len);
wasm.symbols.appendAssumeCapacity(symbol);
return index;
}
try wasm.symbol_atom.putNoClobber(wasm.base.allocator, atom.symbolLoc(), atom);
const index = @intCast(u32, wasm.symbols.items.len);
wasm.symbols.appendAssumeCapacity(symbol);
return index;
}
pub fn updateFunc(wasm: *Wasm, mod: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
@ -1026,9 +1018,12 @@ pub fn updateFunc(wasm: *Wasm, mod: *Module, func: *Module.Fn, air: Air, livenes
const decl_index = func.owner_decl;
const decl = mod.declPtr(decl_index);
assert(decl.link.wasm.sym_index != 0); // Must call allocateDeclIndexes()
decl.link.wasm.clear();
const atom = &decl.link.wasm;
try atom.ensureInitialized(wasm);
const gop = try wasm.decls.getOrPut(wasm.base.allocator, decl_index);
if (gop.found_existing) {
atom.clear();
} else gop.value_ptr.* = {};
var decl_state: ?Dwarf.DeclState = if (wasm.dwarf) |*dwarf| try dwarf.initDeclState(mod, decl_index) else null;
defer if (decl_state) |*ds| ds.deinit();
@ -1083,16 +1078,19 @@ pub fn updateDecl(wasm: *Wasm, mod: *Module, decl_index: Module.Decl.Index) !voi
defer tracy.end();
const decl = mod.declPtr(decl_index);
assert(decl.link.wasm.sym_index != 0); // Must call allocateDeclIndexes()
decl.link.wasm.clear();
if (decl.val.castTag(.function)) |_| {
return;
} else if (decl.val.castTag(.extern_fn)) |_| {
return;
}
const atom = &decl.link.wasm;
try atom.ensureInitialized(wasm);
const gop = try wasm.decls.getOrPut(wasm.base.allocator, decl_index);
if (gop.found_existing) {
atom.clear();
} else gop.value_ptr.* = {};
if (decl.isExtern()) {
const variable = decl.getVariable().?;
const name = mem.sliceTo(decl.name, 0);
@ -1148,8 +1146,8 @@ fn finishUpdateDecl(wasm: *Wasm, decl: *Module.Decl, code: []const u8) !void {
try atom.code.appendSlice(wasm.base.allocator, code);
try wasm.resolved_symbols.put(wasm.base.allocator, atom.symbolLoc(), {});
if (code.len == 0) return;
atom.size = @intCast(u32, code.len);
if (code.len == 0) return;
atom.alignment = decl.ty.abiAlignment(wasm.base.options.target);
}
@ -1211,28 +1209,19 @@ pub fn lowerUnnamedConst(wasm: *Wasm, tv: TypedValue, decl_index: Module.Decl.In
defer wasm.base.allocator.free(fqdn);
const name = try std.fmt.allocPrintZ(wasm.base.allocator, "__unnamed_{s}_{d}", .{ fqdn, local_index });
defer wasm.base.allocator.free(name);
var symbol: Symbol = .{
.name = try wasm.string_table.put(wasm.base.allocator, name),
.flags = 0,
.tag = .data,
.index = undefined,
};
symbol.setFlag(.WASM_SYM_BINDING_LOCAL);
const atom = try decl.link.wasm.locals.addOne(wasm.base.allocator);
atom.* = Atom.empty;
try atom.ensureInitialized(wasm);
atom.alignment = tv.ty.abiAlignment(wasm.base.options.target);
try wasm.symbols.ensureUnusedCapacity(wasm.base.allocator, 1);
wasm.symbols.items[atom.sym_index] = .{
.name = try wasm.string_table.put(wasm.base.allocator, name),
.flags = @enumToInt(Symbol.Flag.WASM_SYM_BINDING_LOCAL),
.tag = .data,
.index = undefined,
};
if (wasm.symbols_free_list.popOrNull()) |index| {
atom.sym_index = index;
wasm.symbols.items[index] = symbol;
} else {
atom.sym_index = @intCast(u32, wasm.symbols.items.len);
wasm.symbols.appendAssumeCapacity(symbol);
}
try wasm.resolved_symbols.putNoClobber(wasm.base.allocator, atom.symbolLoc(), {});
try wasm.symbol_atom.putNoClobber(wasm.base.allocator, atom.symbolLoc(), atom);
var value_bytes = std.ArrayList(u8).init(wasm.base.allocator);
defer value_bytes.deinit();
@ -1304,8 +1293,8 @@ pub fn getDeclVAddr(
) !u64 {
const mod = wasm.base.options.module.?;
const decl = mod.declPtr(decl_index);
try decl.link.wasm.ensureInitialized(wasm);
const target_symbol_index = decl.link.wasm.sym_index;
assert(target_symbol_index != 0);
assert(reloc_info.parent_atom_index != 0);
const atom = wasm.symbol_atom.get(.{ .file = null, .index = reloc_info.parent_atom_index }).?;
const is_wasm32 = wasm.base.options.target.cpu.arch == .wasm32;
@ -1363,6 +1352,7 @@ pub fn updateDeclExports(
}
const decl = mod.declPtr(decl_index);
if (decl.link.wasm.getSymbolIndex() == null) return; // unititialized
for (exports) |exp| {
if (exp.options.section) |section| {

View File

@ -95,6 +95,17 @@ pub fn symbolLoc(atom: Atom) Wasm.SymbolLoc {
return .{ .file = atom.file, .index = atom.sym_index };
}
pub fn ensureInitialized(atom: *Atom, wasm_bin: *Wasm) !void {
if (atom.getSymbolIndex() != null) return; // already initialized
atom.sym_index = try wasm_bin.allocateSymbol();
try wasm_bin.symbol_atom.putNoClobber(wasm_bin.base.allocator, atom.symbolLoc(), atom);
}
pub fn getSymbolIndex(atom: Atom) ?u32 {
if (atom.sym_index == 0) return null;
return atom.sym_index;
}
/// Returns the virtual address of the `Atom`. This is the address starting
/// from the first entry within a section.
pub fn getVA(atom: Atom, wasm: *const Wasm, symbol: *const Symbol) u32 {