compiler: tlv pointers are not comptime-known

Pointers to thread-local variables do not have their addresses known
until runtime, so it is nonsensical for them to be comptime-known. There
was logic in the compiler which was essentially attempting to treat them
as not being comptime-known despite the pointer being an interned value.
This was a bit of a mess, the check was frequent enough to actually show
up in compiler profiles, and it was very awkward for backends to deal
with, because they had to grapple with the fact that a "constant" they
were lowering might actually require runtime operations.

So, instead, do not consider these pointers to be comptime-known in
*any* way. Never intern such a pointer; instead, when the address of a
threadlocal is taken, emit an AIR instruction which computes the pointer
at runtime. This avoids lots of special handling for TLVs across
basically all codegen backends; of all somewhat-functional backends, the
only one which wasn't improved by this change was the LLVM backend,
because LLVM pretends this complexity around threadlocals doesn't exist.

This change simplifies Sema and codegen, avoids a potential source of
bugs, and potentially improves Sema performance very slightly by
avoiding a non-trivial check on a hot path.
This commit is contained in:
mlugg 2025-05-26 22:59:01 +01:00 committed by Matthew Lugg
parent 3ed9155f10
commit 92c63126e8
21 changed files with 242 additions and 290 deletions

View File

@ -1530,6 +1530,12 @@ fn handleSegfaultWindows(info: *windows.EXCEPTION_POINTERS) callconv(.winapi) c_
}
fn handleSegfaultWindowsExtra(info: *windows.EXCEPTION_POINTERS, msg: u8, label: ?[]const u8) noreturn {
// For backends that cannot handle the language features used by this segfault handler, we have a simpler one,
switch (builtin.zig_backend) {
.stage2_x86_64 => if (builtin.target.ofmt == .coff) @trap(),
else => {},
}
comptime assert(windows.CONTEXT != void);
nosuspend switch (panic_stage) {
0 => {

View File

@ -849,6 +849,17 @@ pub const Inst = struct {
/// Uses the `vector_store_elem` field.
vector_store_elem,
/// Compute a pointer to a threadlocal or dllimport `Nav`, meaning one of:
///
/// * `threadlocal var`
/// * `extern threadlocal var` (or corresponding `@extern`)
/// * `@extern` with `.is_dll_import = true`
///
/// Such pointers are runtime values, so cannot be represented with an InternPool index.
///
/// Uses the `ty_nav` field.
tlv_dllimport_ptr,
/// Implements @cVaArg builtin.
/// Uses the `ty_op` field.
c_va_arg,
@ -1150,6 +1161,10 @@ pub const Inst = struct {
// Index into a different array.
payload: u32,
},
ty_nav: struct {
ty: InternPool.Index,
nav: InternPool.Nav.Index,
},
inferred_alloc_comptime: InferredAllocComptime,
inferred_alloc: InferredAlloc,
@ -1604,6 +1619,8 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool)
return Type.fromInterned(ip.indexToKey(err_union_ty.ip_index).error_union_type.payload_type);
},
.tlv_dllimport_ptr => return .fromInterned(datas[@intFromEnum(inst)].ty_nav.ty),
.work_item_id,
.work_group_size,
.work_group_id,
@ -1876,6 +1893,7 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: *const InternPool) bool {
.err_return_trace,
.addrspace_cast,
.save_err_return_trace_index,
.tlv_dllimport_ptr,
.work_item_id,
.work_group_size,
.work_group_id,

View File

@ -311,6 +311,10 @@ fn checkBody(air: Air, body: []const Air.Inst.Index, zcu: *Zcu) bool {
if (!checkRef(bin.rhs, zcu)) return false;
},
.tlv_dllimport_ptr => {
if (!checkType(.fromInterned(data.ty_nav.ty), zcu)) return false;
},
.select,
.mul_add,
=> {

View File

@ -12036,30 +12036,6 @@ pub fn isVariable(ip: *const InternPool, val: Index) bool {
return val.unwrap(ip).getTag(ip) == .variable;
}
pub fn getBackingNav(ip: *const InternPool, val: Index) Nav.Index.Optional {
var base = val;
while (true) {
const unwrapped_base = base.unwrap(ip);
const base_item = unwrapped_base.getItem(ip);
switch (base_item.tag) {
.ptr_nav => return @enumFromInt(unwrapped_base.getExtra(ip).view().items(.@"0")[
base_item.data + std.meta.fieldIndex(PtrNav, "nav").?
]),
inline .ptr_eu_payload,
.ptr_opt_payload,
.ptr_elem,
.ptr_field,
=> |tag| base = @enumFromInt(unwrapped_base.getExtra(ip).view().items(.@"0")[
base_item.data + std.meta.fieldIndex(tag.Payload(), "base").?
]),
.ptr_slice => base = @enumFromInt(unwrapped_base.getExtra(ip).view().items(.@"0")[
base_item.data + std.meta.fieldIndex(PtrSlice, "ptr").?
]),
else => return .none,
}
}
}
pub fn getBackingAddrTag(ip: *const InternPool, val: Index) ?Key.Ptr.BaseAddr.Tag {
var base = val;
while (true) {

View File

@ -334,6 +334,7 @@ pub fn categorizeOperand(
.wasm_memory_size,
.err_return_trace,
.save_err_return_trace_index,
.tlv_dllimport_ptr,
.c_va_start,
.work_item_id,
.work_group_size,
@ -960,6 +961,7 @@ fn analyzeInst(
.wasm_memory_size,
.err_return_trace,
.save_err_return_trace_index,
.tlv_dllimport_ptr,
.c_va_start,
.work_item_id,
.work_group_size,

View File

@ -62,6 +62,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
.wasm_memory_size,
.err_return_trace,
.save_err_return_trace_index,
.tlv_dllimport_ptr,
.c_va_start,
.work_item_id,
.work_group_size,

View File

@ -2223,10 +2223,7 @@ fn resolveValue(sema: *Sema, inst: Air.Inst.Ref) CompileError!?Value {
if (inst.toInterned()) |ip_index| {
const val: Value = .fromInterned(ip_index);
assert(val.getVariable(zcu) == null);
if (val.isPtrRuntimeValue(zcu)) return null;
return val;
} else {
// Runtime-known value.
@ -2295,31 +2292,12 @@ pub fn resolveFinalDeclValue(
air_ref: Air.Inst.Ref,
) CompileError!Value {
const zcu = sema.pt.zcu;
const val = try sema.resolveValue(air_ref) orelse {
const is_runtime_ptr = rt_ptr: {
const ip_index = air_ref.toInterned() orelse break :rt_ptr false;
const val: Value = .fromInterned(ip_index);
break :rt_ptr val.isPtrRuntimeValue(zcu);
};
switch (sema.failWithNeededComptime(block, src, .{ .simple = .container_var_init })) {
error.AnalysisFail => |e| {
if (sema.err != null and is_runtime_ptr) {
try sema.errNote(src, sema.err.?, "threadlocal and dll imported variables have runtime-known addresses", .{});
}
return e;
},
else => |e| return e,
}
};
const val = try sema.resolveConstValue(block, src, air_ref, .{ .simple = .container_var_init });
if (val.canMutateComptimeVarState(zcu)) {
const ip = &zcu.intern_pool;
const nav = ip.getNav(sema.owner.unwrap().nav_val);
return sema.failWithContainsReferenceToComptimeVar(block, src, nav.name, "global variable", val);
}
return val;
}
@ -26506,6 +26484,7 @@ fn zirBuiltinExtern(
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
const src = block.nodeOffset(extra.node);
const ty_src = block.builtinCallArgSrc(extra.node, 0);
const options_src = block.builtinCallArgSrc(extra.node, 1);
@ -26560,17 +26539,15 @@ fn zirBuiltinExtern(
},
.owner_nav = undefined, // ignored by `getExtern`
});
const extern_nav = ip.indexToKey(extern_val).@"extern".owner_nav;
return Air.internedToRef((try pt.getCoerced(Value.fromInterned(try pt.intern(.{ .ptr = .{
.ty = switch (ip.indexToKey(ty.toIntern())) {
.ptr_type => ty.toIntern(),
.opt_type => |child_type| child_type,
else => unreachable,
},
.base_addr = .{ .nav = extern_nav },
.byte_offset = 0,
} })), ty)).toIntern());
const uncasted_ptr = try sema.analyzeNavRef(block, src, ip.indexToKey(extern_val).@"extern".owner_nav);
// We want to cast to `ty`, but that isn't necessarily an allowed coercion.
if (try sema.resolveValue(uncasted_ptr)) |uncasted_ptr_val| {
const casted_ptr_val = try pt.getCoerced(uncasted_ptr_val, ty);
return Air.internedToRef(casted_ptr_val.toIntern());
} else {
return block.addBitCast(ty, uncasted_ptr);
}
}
fn zirWorkItem(
@ -32037,7 +32014,20 @@ fn analyzeNavRefInner(sema: *Sema, block: *Block, src: LazySrcLoc, orig_nav_inde
break :nav orig_nav_index;
};
const ty, const alignment, const @"addrspace", const is_const = switch (ip.getNav(nav_index).status) {
const nav_status = ip.getNav(nav_index).status;
const is_tlv_or_dllimport = switch (nav_status) {
.unresolved => unreachable,
// dllimports go straight to `fully_resolved`; the only option is threadlocal
.type_resolved => |r| r.is_threadlocal,
.fully_resolved => |r| switch (ip.indexToKey(r.val)) {
.@"extern" => |e| e.is_threadlocal or e.is_dll_import,
.variable => |v| v.is_threadlocal,
else => false,
},
};
const ty, const alignment, const @"addrspace", const is_const = switch (nav_status) {
.unresolved => unreachable,
.type_resolved => |r| .{ r.type, r.alignment, r.@"addrspace", r.is_const },
.fully_resolved => |r| .{ ip.typeOf(r.val), r.alignment, r.@"addrspace", zcu.navValIsConst(r.val) },
@ -32050,9 +32040,22 @@ fn analyzeNavRefInner(sema: *Sema, block: *Block, src: LazySrcLoc, orig_nav_inde
.address_space = @"addrspace",
},
});
if (is_tlv_or_dllimport) {
// This pointer is runtime-known; we need to emit an AIR instruction to create it.
return block.addInst(.{
.tag = .tlv_dllimport_ptr,
.data = .{ .ty_nav = .{
.ty = ptr_ty.toIntern(),
.nav = nav_index,
} },
});
}
if (is_ref) {
try sema.maybeQueueFuncBodyAnalysis(block, src, nav_index);
}
return Air.internedToRef((try pt.intern(.{ .ptr = .{
.ty = ptr_ty.toIntern(),
.base_addr = .{ .nav = nav_index },

View File

@ -1325,21 +1325,6 @@ pub fn isLazySize(val: Value, zcu: *Zcu) bool {
};
}
pub fn isPtrRuntimeValue(val: Value, zcu: *Zcu) bool {
const ip = &zcu.intern_pool;
const nav = ip.getBackingNav(val.toIntern()).unwrap() orelse return false;
const nav_val = switch (ip.getNav(nav).status) {
.unresolved => unreachable,
.type_resolved => |r| return r.is_threadlocal,
.fully_resolved => |r| r.val,
};
return switch (ip.indexToKey(nav_val)) {
.@"extern" => |e| e.is_threadlocal or e.is_dll_import,
.variable => |v| v.is_threadlocal,
else => false,
};
}
// Asserts that the provided start/end are in-bounds.
pub fn sliceArray(
val: Value,

View File

@ -876,6 +876,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.is_named_enum_value => return self.fail("TODO implement is_named_enum_value", .{}),
.error_set_has_value => return self.fail("TODO implement error_set_has_value", .{}),
.vector_store_elem => return self.fail("TODO implement vector_store_elem", .{}),
.tlv_dllimport_ptr => return self.fail("TODO implement tlv_dllimport_ptr", .{}),
.c_va_arg => return self.fail("TODO implement c_va_arg", .{}),
.c_va_copy => return self.fail("TODO implement c_va_copy", .{}),
@ -6168,7 +6169,7 @@ fn genTypedValue(self: *Self, val: Value) InnerError!MCValue {
.memory => |addr| .{ .memory = addr },
.load_got => |sym_index| .{ .linker_load = .{ .type = .got, .sym_index = sym_index } },
.load_direct => |sym_index| .{ .linker_load = .{ .type = .direct, .sym_index = sym_index } },
.load_symbol, .load_tlv, .lea_symbol, .lea_direct => unreachable, // TODO
.load_symbol, .lea_symbol, .lea_direct => unreachable, // TODO
},
.fail => |msg| return self.failMsg(msg),
};

View File

@ -865,6 +865,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.is_named_enum_value => return self.fail("TODO implement is_named_enum_value", .{}),
.error_set_has_value => return self.fail("TODO implement error_set_has_value", .{}),
.vector_store_elem => return self.fail("TODO implement vector_store_elem", .{}),
.tlv_dllimport_ptr => return self.fail("TODO implement tlv_dllimport_ptr", .{}),
.c_va_arg => return self.fail("TODO implement c_va_arg", .{}),
.c_va_copy => return self.fail("TODO implement c_va_copy", .{}),
@ -6135,7 +6136,7 @@ fn genTypedValue(self: *Self, val: Value) InnerError!MCValue {
.mcv => |mcv| switch (mcv) {
.none => .none,
.undef => .undef,
.load_got, .load_symbol, .load_direct, .load_tlv, .lea_symbol, .lea_direct => unreachable, // TODO
.load_got, .load_symbol, .load_direct, .lea_symbol, .lea_direct => unreachable, // TODO
.immediate => |imm| .{ .immediate = @truncate(imm) },
.memory => |addr| .{ .memory = addr },
},

View File

@ -162,12 +162,8 @@ const MCValue = union(enum) {
immediate: u64,
/// The value doesn't exist in memory yet.
load_symbol: SymbolOffset,
/// A TLV value.
load_tlv: u32,
/// The address of the memory location not-yet-allocated by the linker.
lea_symbol: SymbolOffset,
/// The address of a TLV value.
lea_tlv: u32,
/// The value is in a target-specific register.
register: Register,
/// The value is split across two registers
@ -224,7 +220,6 @@ const MCValue = union(enum) {
.lea_frame,
.undef,
.lea_symbol,
.lea_tlv,
.air_ref,
.reserved_frame,
=> false,
@ -233,7 +228,6 @@ const MCValue = union(enum) {
.register_pair,
.register_offset,
.load_symbol,
.load_tlv,
.indirect,
=> true,
@ -254,12 +248,10 @@ const MCValue = union(enum) {
.undef,
.air_ref,
.lea_symbol,
.lea_tlv,
.reserved_frame,
=> unreachable, // not in memory
.load_symbol => |sym_off| .{ .lea_symbol = sym_off },
.load_tlv => |sym| .{ .lea_tlv = sym },
.memory => |addr| .{ .immediate = addr },
.load_frame => |off| .{ .lea_frame = off },
.indirect => |reg_off| switch (reg_off.off) {
@ -281,7 +273,6 @@ const MCValue = union(enum) {
.register_pair,
.load_frame,
.load_symbol,
.load_tlv,
.reserved_frame,
=> unreachable, // not a pointer
@ -290,7 +281,6 @@ const MCValue = union(enum) {
.register_offset => |reg_off| .{ .indirect = reg_off },
.lea_frame => |off| .{ .load_frame = off },
.lea_symbol => |sym_off| .{ .load_symbol = sym_off },
.lea_tlv => |sym| .{ .load_tlv = sym },
};
}
@ -308,8 +298,6 @@ const MCValue = union(enum) {
.indirect,
.load_symbol,
.lea_symbol,
.lea_tlv,
.load_tlv,
=> switch (off) {
0 => mcv,
else => unreachable,
@ -367,8 +355,6 @@ const InstTracking = struct {
.memory,
.load_frame,
.lea_frame,
.load_tlv,
.lea_tlv,
.load_symbol,
.lea_symbol,
=> result,
@ -424,8 +410,6 @@ const InstTracking = struct {
.lea_frame,
.load_symbol,
.lea_symbol,
.load_tlv,
.lea_tlv,
=> inst_tracking.long,
.dead,
.register,
@ -454,8 +438,6 @@ const InstTracking = struct {
.lea_frame,
.load_symbol,
.lea_symbol,
.load_tlv,
.lea_tlv,
=> assert(std.meta.eql(inst_tracking.long, target.long)),
.load_frame,
.reserved_frame,
@ -1665,6 +1647,8 @@ fn genBody(func: *Func, body: []const Air.Inst.Index) InnerError!void {
.wrap_errunion_payload => try func.airWrapErrUnionPayload(inst),
.wrap_errunion_err => try func.airWrapErrUnionErr(inst),
.tlv_dllimport_ptr => try func.airTlvDllimportPtr(inst),
.add_optimized,
.sub_optimized,
.mul_optimized,
@ -3620,6 +3604,50 @@ fn airWrapErrUnionErr(func: *Func, inst: Air.Inst.Index) !void {
return func.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airTlvDllimportPtr(func: *Func, inst: Air.Inst.Index) !void {
const zcu = func.pt.zcu;
const ip = &zcu.intern_pool;
const ty_nav = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_nav;
const ptr_ty: Type = .fromInterned(ty_nav.ty);
const nav = ip.getNav(ty_nav.nav);
const tlv_sym_index = if (func.bin_file.cast(.elf)) |elf_file| sym: {
const zo = elf_file.zigObjectPtr().?;
if (nav.getExtern(ip)) |e| {
const sym = try elf_file.getGlobalSymbol(nav.name.toSlice(ip), e.lib_name.toSlice(ip));
zo.symbol(sym).flags.is_extern_ptr = true;
break :sym sym;
}
break :sym try zo.getOrCreateMetadataForNav(zcu, ty_nav.nav);
} else return func.fail("TODO tlv_dllimport_ptr on {}", .{func.bin_file.tag});
const dest_mcv = try func.allocRegOrMem(ptr_ty, inst, true);
if (dest_mcv.isRegister()) {
_ = try func.addInst(.{
.tag = .pseudo_load_tlv,
.data = .{ .reloc = .{
.register = dest_mcv.getReg().?,
.atom_index = try func.owner.getSymbolIndex(func),
.sym_index = tlv_sym_index,
} },
});
} else {
const tmp_reg, const tmp_lock = try func.allocReg(.int);
defer func.register_manager.unlockReg(tmp_lock);
_ = try func.addInst(.{
.tag = .pseudo_load_tlv,
.data = .{ .reloc = .{
.register = tmp_reg,
.atom_index = try func.owner.getSymbolIndex(func),
.sym_index = tlv_sym_index,
} },
});
try func.genCopy(ptr_ty, dest_mcv, .{ .register = tmp_reg });
}
return func.finishAir(inst, dest_mcv, .{ .none, .none, .none });
}
fn airTry(func: *Func, inst: Air.Inst.Index) !void {
const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
const extra = func.air.extraData(Air.Try, pl_op.payload);
@ -4494,14 +4522,12 @@ fn load(func: *Func, dst_mcv: MCValue, ptr_mcv: MCValue, ptr_ty: Type) InnerErro
.register_offset,
.lea_frame,
.lea_symbol,
.lea_tlv,
=> try func.genCopy(dst_ty, dst_mcv, ptr_mcv.deref()),
.memory,
.indirect,
.load_symbol,
.load_frame,
.load_tlv,
=> {
const addr_reg = try func.copyToTmpRegister(ptr_ty, ptr_mcv);
const addr_lock = func.register_manager.lockRegAssumeUnused(addr_reg);
@ -4548,14 +4574,12 @@ fn store(func: *Func, ptr_mcv: MCValue, src_mcv: MCValue, ptr_ty: Type) !void {
.register_offset,
.lea_symbol,
.lea_frame,
.lea_tlv,
=> try func.genCopy(src_ty, ptr_mcv.deref(), src_mcv),
.memory,
.indirect,
.load_symbol,
.load_frame,
.load_tlv,
=> {
const addr_reg = try func.copyToTmpRegister(ptr_ty, ptr_mcv);
const addr_lock = func.register_manager.lockRegAssumeUnused(addr_reg);
@ -6544,7 +6568,7 @@ fn genCopy(func: *Func, ty: Type, dst_mcv: MCValue, src_mcv: MCValue) !void {
ty,
src_mcv,
),
.load_symbol, .load_tlv => {
.load_symbol => {
const addr_reg, const addr_lock = try func.allocReg(.int);
defer func.register_manager.unlockReg(addr_lock);
@ -7072,25 +7096,6 @@ fn genSetReg(func: *Func, ty: Type, reg: Register, src_mcv: MCValue) InnerError!
try func.genSetReg(ty, addr_reg, src_mcv.address());
try func.genSetReg(ty, reg, .{ .indirect = .{ .reg = addr_reg } });
},
.lea_tlv => |sym| {
const atom_index = try func.owner.getSymbolIndex(func);
_ = try func.addInst(.{
.tag = .pseudo_load_tlv,
.data = .{ .reloc = .{
.register = reg,
.atom_index = atom_index,
.sym_index = sym,
} },
});
},
.load_tlv => {
const addr_reg, const addr_lock = try func.allocReg(.int);
defer func.register_manager.unlockReg(addr_lock);
try func.genSetReg(ty, addr_reg, src_mcv.address());
try func.genSetReg(ty, reg, .{ .indirect = .{ .reg = addr_reg } });
},
.air_ref => |ref| try func.genSetReg(ty, reg, try func.resolveInst(ref)),
else => return func.fail("TODO: genSetReg {s}", .{@tagName(src_mcv)}),
}
@ -7256,7 +7261,6 @@ fn genSetMem(
return func.genSetMem(base, disp, ty, .{ .register = reg });
},
.air_ref => |src_ref| try func.genSetMem(base, disp, ty, try func.resolveInst(src_ref)),
else => return func.fail("TODO: genSetMem {s}", .{@tagName(src_mcv)}),
}
}
@ -8190,7 +8194,6 @@ fn genTypedValue(func: *Func, val: Value) InnerError!MCValue {
.undef => unreachable,
.lea_symbol => |sym_index| .{ .lea_symbol = .{ .sym = sym_index } },
.load_symbol => |sym_index| .{ .load_symbol = .{ .sym = sym_index } },
.load_tlv => |sym_index| .{ .lea_tlv = sym_index },
.immediate => |imm| .{ .immediate = imm },
.memory => |addr| .{ .memory = addr },
.load_got, .load_direct, .lea_direct => {

View File

@ -719,6 +719,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.is_named_enum_value => @panic("TODO implement is_named_enum_value"),
.error_set_has_value => @panic("TODO implement error_set_has_value"),
.vector_store_elem => @panic("TODO implement vector_store_elem"),
.tlv_dllimport_ptr => @panic("TODO implement tlv_dllimport_ptr"),
.c_va_arg => return self.fail("TODO implement c_va_arg", .{}),
.c_va_copy => return self.fail("TODO implement c_va_copy", .{}),
@ -4088,7 +4089,7 @@ fn genTypedValue(self: *Self, val: Value) InnerError!MCValue {
.mcv => |mcv| switch (mcv) {
.none => .none,
.undef => .undef,
.load_got, .load_symbol, .load_direct, .load_tlv, .lea_symbol, .lea_direct => unreachable, // TODO
.load_got, .load_symbol, .load_direct, .lea_symbol, .lea_direct => unreachable, // TODO
.immediate => |imm| .{ .immediate = imm },
.memory => |addr| .{ .memory = addr },
},

View File

@ -2050,6 +2050,8 @@ fn genInst(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.error_set_has_value => cg.airErrorSetHasValue(inst),
.frame_addr => cg.airFrameAddress(inst),
.tlv_dllimport_ptr => cg.airTlvDllimportPtr(inst),
.assembly,
.is_err_ptr,
.is_non_err_ptr,
@ -7551,6 +7553,19 @@ fn airFrameAddress(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
return cg.finishAir(inst, .stack, &.{});
}
fn airTlvDllimportPtr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const ty_nav = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_nav;
const mod = cg.pt.zcu.navFileScope(cg.owner_nav).mod.?;
if (mod.single_threaded) {
const result: WValue = .{ .nav_ref = .{
.nav_index = ty_nav.nav,
.offset = 0,
} };
return cg.finishAir(inst, result, &.{});
}
return cg.fail("TODO: thread-local variables", .{});
}
fn typeOf(cg: *CodeGen, inst: Air.Inst.Ref) Type {
const zcu = cg.pt.zcu;
return cg.air.typeOf(inst, &zcu.intern_pool);

View File

@ -204,12 +204,6 @@ pub const MCValue = union(enum) {
/// The value is a pointer to a value referenced indirectly via GOT.
/// Payload is a symbol index.
lea_got: u32,
/// The value is a threadlocal variable.
/// Payload is a symbol index.
load_tlv: u32,
/// The value is a pointer to a threadlocal variable.
/// Payload is a symbol index.
lea_tlv: u32,
/// The value stored at an offset from a frame index
/// Payload is a frame address.
load_frame: bits.FrameAddr,
@ -238,7 +232,6 @@ pub const MCValue = union(enum) {
.lea_symbol,
.lea_direct,
.lea_got,
.lea_tlv,
.lea_frame,
.elementwise_regs_then_frame,
.reserved_frame,
@ -252,7 +245,6 @@ pub const MCValue = union(enum) {
.load_symbol,
.load_got,
.load_direct,
.load_tlv,
.indirect,
=> true,
.load_frame => |frame_addr| !frame_addr.index.isNamed(),
@ -355,7 +347,6 @@ pub const MCValue = union(enum) {
.lea_symbol,
.lea_direct,
.lea_got,
.lea_tlv,
.lea_frame,
.elementwise_regs_then_frame,
.reserved_frame,
@ -368,7 +359,6 @@ pub const MCValue = union(enum) {
},
.load_direct => |sym_index| .{ .lea_direct = sym_index },
.load_got => |sym_index| .{ .lea_got = sym_index },
.load_tlv => |sym_index| .{ .lea_tlv = sym_index },
.load_frame => |frame_addr| .{ .lea_frame = frame_addr },
.load_symbol => |sym_off| .{ .lea_symbol = sym_off },
};
@ -390,7 +380,6 @@ pub const MCValue = union(enum) {
.indirect,
.load_direct,
.load_got,
.load_tlv,
.load_frame,
.load_symbol,
.elementwise_regs_then_frame,
@ -402,7 +391,6 @@ pub const MCValue = union(enum) {
.register_offset => |reg_off| .{ .indirect = reg_off },
.lea_direct => |sym_index| .{ .load_direct = sym_index },
.lea_got => |sym_index| .{ .load_got = sym_index },
.lea_tlv => |sym_index| .{ .load_tlv = sym_index },
.lea_frame => |frame_addr| .{ .load_frame = frame_addr },
.lea_symbol => |sym_index| .{ .load_symbol = sym_index },
};
@ -430,8 +418,6 @@ pub const MCValue = union(enum) {
.lea_direct,
.load_got,
.lea_got,
.load_tlv,
.lea_tlv,
.load_frame,
.load_symbol,
.lea_symbol,
@ -469,8 +455,6 @@ pub const MCValue = union(enum) {
.lea_direct,
.load_got,
.lea_got,
.load_tlv,
.lea_tlv,
.lea_frame,
.elementwise_regs_then_frame,
.reserved_frame,
@ -555,8 +539,6 @@ pub const MCValue = union(enum) {
.lea_direct => |pl| try writer.print("direct:{d}", .{pl}),
.load_got => |pl| try writer.print("[got:{d}]", .{pl}),
.lea_got => |pl| try writer.print("got:{d}", .{pl}),
.load_tlv => |pl| try writer.print("[tlv:{d}]", .{pl}),
.lea_tlv => |pl| try writer.print("tlv:{d}", .{pl}),
.load_frame => |pl| try writer.print("[{} + 0x{x}]", .{ pl.index, pl.off }),
.elementwise_regs_then_frame => |pl| try writer.print("elementwise:{d}:[{} + 0x{x}]", .{
pl.regs, pl.frame_index, pl.frame_off,
@ -585,8 +567,6 @@ const InstTracking = struct {
.lea_direct,
.load_got,
.lea_got,
.load_tlv,
.lea_tlv,
.load_frame,
.lea_frame,
.load_symbol,
@ -688,8 +668,6 @@ const InstTracking = struct {
.lea_direct,
.load_got,
.lea_got,
.load_tlv,
.lea_tlv,
.lea_frame,
.load_symbol,
.lea_symbol,
@ -120945,6 +120923,47 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
};
for (ops) |op| try op.die(cg);
},
.tlv_dllimport_ptr => switch (cg.bin_file.tag) {
.elf, .macho => {
const ty_nav = air_datas[@intFromEnum(inst)].ty_nav;
const nav = ip.getNav(ty_nav.nav);
const tlv_sym_index = sym: {
if (cg.bin_file.cast(.elf)) |elf_file| {
const zo = elf_file.zigObjectPtr().?;
if (nav.getExtern(ip)) |e| {
const sym = try elf_file.getGlobalSymbol(nav.name.toSlice(ip), e.lib_name.toSlice(ip));
zo.symbol(sym).flags.is_extern_ptr = true;
break :sym sym;
}
break :sym try zo.getOrCreateMetadataForNav(zcu, ty_nav.nav);
}
if (cg.bin_file.cast(.macho)) |macho_file| {
const zo = macho_file.getZigObject().?;
if (nav.getExtern(ip)) |e| {
const sym = try macho_file.getGlobalSymbol(nav.name.toSlice(ip), e.lib_name.toSlice(ip));
zo.symbols.items[sym].flags.is_extern_ptr = true;
break :sym sym;
}
break :sym try zo.getOrCreateMetadataForNav(macho_file, ty_nav.nav);
}
unreachable;
};
if (cg.mod.pic) {
try cg.spillRegisters(&.{ .rdi, .rax });
} else {
try cg.spillRegisters(&.{.rax});
}
var slot = try cg.tempInit(.usize, .{ .lea_symbol = .{
.sym_index = tlv_sym_index,
} });
while (try slot.toRegClass(true, .general_purpose, cg)) {}
try slot.finish(inst, &.{}, &.{}, cg);
},
else => return cg.fail("TODO implement tlv/dllimport on {}", .{cg.bin_file.tag}),
},
.c_va_arg => try cg.airVaArg(inst),
.c_va_copy => try cg.airVaCopy(inst),
.c_va_end => try cg.airVaEnd(inst),
@ -124664,7 +124683,7 @@ fn airArrayElemVal(self: *CodeGen, inst: Air.Inst.Index) !void {
}.to64(),
),
},
.memory, .load_symbol, .load_direct, .load_got, .load_tlv => switch (index_mcv) {
.memory, .load_symbol, .load_direct, .load_got => switch (index_mcv) {
.immediate => |index_imm| try self.asmMemoryImmediate(
.{ ._, .bt },
.{
@ -124729,9 +124748,8 @@ fn airArrayElemVal(self: *CodeGen, inst: Air.Inst.Index) !void {
.load_symbol,
.load_direct,
.load_got,
.load_tlv,
=> try self.genSetReg(addr_reg, .usize, array_mcv.address(), .{}),
.lea_symbol, .lea_direct, .lea_tlv => unreachable,
.lea_symbol, .lea_direct => unreachable,
else => return self.fail("TODO airArrayElemVal_val for {s} of {}", .{
@tagName(array_mcv), array_ty.fmt(pt),
}),
@ -126611,7 +126629,6 @@ fn load(self: *CodeGen, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerE
.lea_symbol,
.lea_direct,
.lea_got,
.lea_tlv,
.lea_frame,
=> try self.genCopy(dst_ty, dst_mcv, ptr_mcv.deref(), .{}),
.memory,
@ -126619,7 +126636,6 @@ fn load(self: *CodeGen, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) InnerE
.load_symbol,
.load_direct,
.load_got,
.load_tlv,
.load_frame,
=> {
const addr_reg = try self.copyToTmpRegister(ptr_ty, ptr_mcv);
@ -126831,7 +126847,6 @@ fn store(
.lea_symbol,
.lea_direct,
.lea_got,
.lea_tlv,
.lea_frame,
=> try self.genCopy(src_ty, ptr_mcv.deref(), src_mcv, opts),
.memory,
@ -126839,7 +126854,6 @@ fn store(
.load_symbol,
.load_direct,
.load_got,
.load_tlv,
.load_frame,
=> {
const addr_reg = try self.copyToTmpRegister(ptr_ty, ptr_mcv);
@ -127309,7 +127323,6 @@ fn genUnOpMir(self: *CodeGen, mir_tag: Mir.Inst.FixedTag, dst_ty: Type, dst_mcv:
.lea_symbol,
.lea_direct,
.lea_got,
.lea_tlv,
.lea_frame,
.elementwise_regs_then_frame,
.reserved_frame,
@ -127317,7 +127330,7 @@ fn genUnOpMir(self: *CodeGen, mir_tag: Mir.Inst.FixedTag, dst_ty: Type, dst_mcv:
=> unreachable, // unmodifiable destination
.register => |dst_reg| try self.asmRegister(mir_tag, registerAlias(dst_reg, abi_size)),
.register_pair, .register_triple, .register_quadruple => unreachable, // unimplemented
.memory, .load_symbol, .load_got, .load_direct, .load_tlv => {
.memory, .load_symbol, .load_got, .load_direct => {
const addr_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
const addr_reg_lock = self.register_manager.lockRegAssumeUnused(addr_reg);
defer self.register_manager.unlockReg(addr_reg_lock);
@ -128922,8 +128935,6 @@ fn genBinOp(
.lea_direct,
.load_got,
.lea_got,
.load_tlv,
.lea_tlv,
.lea_frame,
=> true,
.memory => |addr| std.math.cast(i32, @as(i64, @bitCast(addr))) == null,
@ -128983,8 +128994,6 @@ fn genBinOp(
.lea_direct,
.load_got,
.lea_got,
.load_tlv,
.lea_tlv,
.lea_frame,
.elementwise_regs_then_frame,
.reserved_frame,
@ -130167,7 +130176,6 @@ fn genBinOpMir(
.register_mask,
.lea_direct,
.lea_got,
.lea_tlv,
.lea_frame,
.lea_symbol,
.elementwise_regs_then_frame,
@ -130265,8 +130273,6 @@ fn genBinOpMir(
.lea_direct,
.load_got,
.lea_got,
.load_tlv,
.lea_tlv,
.load_frame,
.lea_frame,
=> {
@ -130304,7 +130310,6 @@ fn genBinOpMir(
.lea_symbol,
.lea_direct,
.lea_got,
.lea_tlv,
.lea_frame,
=> {
assert(off == 0);
@ -130320,7 +130325,6 @@ fn genBinOpMir(
.load_symbol,
.load_direct,
.load_got,
.load_tlv,
=> {
const ptr_ty = try pt.singleConstPtrType(ty);
const addr_reg = try self.copyToTmpRegister(ptr_ty, src_mcv.address());
@ -130340,13 +130344,13 @@ fn genBinOpMir(
}
}
},
.memory, .indirect, .load_symbol, .load_got, .load_direct, .load_tlv, .load_frame => {
.memory, .indirect, .load_symbol, .load_got, .load_direct, .load_frame => {
const OpInfo = ?struct { addr_reg: Register, addr_lock: RegisterLock };
const limb_abi_size: u32 = @min(abi_size, 8);
const dst_info: OpInfo = switch (dst_mcv) {
else => unreachable,
.memory, .load_symbol, .load_got, .load_direct, .load_tlv => dst: {
.memory, .load_symbol, .load_got, .load_direct => dst: {
const dst_addr_reg =
(try self.register_manager.allocReg(null, abi.RegisterClass.gp)).to64();
const dst_addr_lock = self.register_manager.lockRegAssumeUnused(dst_addr_reg);
@ -130384,17 +130388,16 @@ fn genBinOpMir(
.indirect,
.lea_direct,
.lea_got,
.lea_tlv,
.load_frame,
.lea_frame,
.lea_symbol,
=> null,
.memory, .load_symbol, .load_got, .load_direct, .load_tlv => src: {
.memory, .load_symbol, .load_got, .load_direct => src: {
switch (resolved_src_mcv) {
.memory => |addr| if (std.math.cast(i32, @as(i64, @bitCast(addr))) != null and
std.math.cast(i32, @as(i64, @bitCast(addr)) + abi_size - limb_abi_size) != null)
break :src null,
.load_symbol, .load_got, .load_direct, .load_tlv => {},
.load_symbol, .load_got, .load_direct => {},
else => unreachable,
}
@ -130437,7 +130440,6 @@ fn genBinOpMir(
.load_symbol,
.load_got,
.load_direct,
.load_tlv,
=> .{
.base = .{ .reg = dst_info.?.addr_reg },
.mod = .{ .rm = .{
@ -130533,8 +130535,6 @@ fn genBinOpMir(
.lea_direct,
.load_got,
.lea_got,
.load_tlv,
.lea_tlv,
.load_frame,
.lea_frame,
=> {
@ -130549,7 +130549,6 @@ fn genBinOpMir(
.lea_symbol,
.lea_direct,
.lea_got,
.lea_tlv,
.lea_frame,
=> switch (limb_i) {
0 => resolved_src_mcv,
@ -130601,7 +130600,6 @@ fn genIntMulComplexOpMir(self: *CodeGen, dst_ty: Type, dst_mcv: MCValue, src_mcv
.lea_symbol,
.lea_direct,
.lea_got,
.lea_tlv,
.lea_frame,
.elementwise_regs_then_frame,
.reserved_frame,
@ -130666,8 +130664,6 @@ fn genIntMulComplexOpMir(self: *CodeGen, dst_ty: Type, dst_mcv: MCValue, src_mcv
.lea_direct,
.load_got,
.lea_got,
.load_tlv,
.lea_tlv,
.lea_frame,
=> {
const src_reg = try self.copyToTmpRegister(dst_ty, resolved_src_mcv);
@ -130723,7 +130719,7 @@ fn genIntMulComplexOpMir(self: *CodeGen, dst_ty: Type, dst_mcv: MCValue, src_mcv
}
},
.register_pair, .register_triple, .register_quadruple => unreachable, // unimplemented
.memory, .indirect, .load_symbol, .load_direct, .load_got, .load_tlv, .load_frame => {
.memory, .indirect, .load_symbol, .load_direct, .load_got, .load_frame => {
const tmp_reg = try self.copyToTmpRegister(dst_ty, dst_mcv);
const tmp_mcv = MCValue{ .register = tmp_reg };
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
@ -130899,7 +130895,7 @@ fn genLocalDebugInfo(
.disp = sym_off.off,
} },
}),
.lea_direct, .lea_got, .lea_tlv => |sym_index| try self.asmAirMemory(.dbg_local, inst, .{
.lea_direct, .lea_got => |sym_index| try self.asmAirMemory(.dbg_local, inst, .{
.base = .{ .reloc = sym_index },
.mod = .{ .rm = .{ .size = .qword } },
}),
@ -131548,7 +131544,6 @@ fn airCmp(self: *CodeGen, inst: Air.Inst.Index, op: std.math.CompareOperator) !v
.indirect,
.lea_direct,
.lea_got,
.lea_tlv,
.lea_frame,
.lea_symbol,
.elementwise_regs_then_frame,
@ -131556,7 +131551,7 @@ fn airCmp(self: *CodeGen, inst: Air.Inst.Index, op: std.math.CompareOperator) !v
.air_ref,
=> unreachable,
.register, .register_pair, .register_triple, .register_quadruple, .load_frame => null,
.memory, .load_symbol, .load_got, .load_direct, .load_tlv => dst: {
.memory, .load_symbol, .load_got, .load_direct => dst: {
switch (resolved_dst_mcv) {
.memory => |addr| if (std.math.cast(
i32,
@ -131565,7 +131560,7 @@ fn airCmp(self: *CodeGen, inst: Air.Inst.Index, op: std.math.CompareOperator) !v
i32,
@as(i64, @bitCast(addr)) + abi_size - 8,
) != null) break :dst null,
.load_symbol, .load_got, .load_direct, .load_tlv => {},
.load_symbol, .load_got, .load_direct => {},
else => unreachable,
}
@ -131605,14 +131600,13 @@ fn airCmp(self: *CodeGen, inst: Air.Inst.Index, op: std.math.CompareOperator) !v
.lea_symbol,
.lea_direct,
.lea_got,
.lea_tlv,
.lea_frame,
.elementwise_regs_then_frame,
.reserved_frame,
.air_ref,
=> unreachable,
.register_pair, .register_triple, .register_quadruple, .load_frame => null,
.memory, .load_symbol, .load_got, .load_direct, .load_tlv => src: {
.memory, .load_symbol, .load_got, .load_direct => src: {
switch (resolved_src_mcv) {
.memory => |addr| if (std.math.cast(
i32,
@ -131621,7 +131615,7 @@ fn airCmp(self: *CodeGen, inst: Air.Inst.Index, op: std.math.CompareOperator) !v
i32,
@as(i64, @bitCast(addr)) + abi_size - 8,
) != null) break :src null,
.load_symbol, .load_got, .load_direct, .load_tlv => {},
.load_symbol, .load_got, .load_direct => {},
else => unreachable,
}
@ -132011,7 +132005,6 @@ fn isNull(self: *CodeGen, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue)
.register_mask,
.lea_direct,
.lea_got,
.lea_tlv,
.lea_symbol,
.elementwise_regs_then_frame,
.reserved_frame,
@ -132063,7 +132056,6 @@ fn isNull(self: *CodeGen, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue)
.load_symbol,
.load_got,
.load_direct,
.load_tlv,
=> {
const addr_reg = (try self.register_manager.allocReg(null, abi.RegisterClass.gp)).to64();
const addr_reg_lock = self.register_manager.lockRegAssumeUnused(addr_reg);
@ -133105,7 +133097,7 @@ fn airAsm(self: *CodeGen, inst: Air.Inst.Index) !void {
.memory => |addr| if (std.math.cast(i32, @as(i64, @bitCast(addr)))) |_|
break :arg input_mcv,
.indirect, .load_frame => break :arg input_mcv,
.load_symbol, .load_direct, .load_got, .load_tlv => {},
.load_symbol, .load_direct, .load_got => {},
else => {
const temp_mcv = try self.allocTempRegOrMem(ty, false);
try self.genCopy(ty, temp_mcv, input_mcv, .{});
@ -134075,7 +134067,6 @@ fn genCopy(self: *CodeGen, ty: Type, dst_mcv: MCValue, src_mcv: MCValue, opts: C
.register_mask,
.lea_direct,
.lea_got,
.lea_tlv,
.lea_frame,
.lea_symbol,
.elementwise_regs_then_frame,
@ -134159,7 +134150,7 @@ fn genCopy(self: *CodeGen, ty: Type, dst_mcv: MCValue, src_mcv: MCValue, opts: C
}
return;
},
.load_symbol, .load_direct, .load_got, .load_tlv => {
.load_symbol, .load_direct, .load_got => {
const src_addr_reg =
(try self.register_manager.allocReg(null, abi.RegisterClass.gp)).to64();
const src_addr_lock = self.register_manager.lockRegAssumeUnused(src_addr_reg);
@ -134192,7 +134183,7 @@ fn genCopy(self: *CodeGen, ty: Type, dst_mcv: MCValue, src_mcv: MCValue, opts: C
.undef => if (opts.safety and part_i > 0) .{ .register = dst_regs[0] } else .undef,
dst_tag => |src_regs| .{ .register = src_regs[part_i] },
.memory, .indirect, .load_frame => src_mcv.address().offset(part_disp).deref(),
.load_symbol, .load_direct, .load_got, .load_tlv => .{ .indirect = .{
.load_symbol, .load_direct, .load_got => .{ .indirect = .{
.reg = src_info.?.addr_reg,
.off = part_disp,
} },
@ -134213,11 +134204,11 @@ fn genCopy(self: *CodeGen, ty: Type, dst_mcv: MCValue, src_mcv: MCValue, opts: C
src_mcv,
opts,
),
.memory, .load_symbol, .load_direct, .load_got, .load_tlv => {
.memory, .load_symbol, .load_direct, .load_got => {
switch (dst_mcv) {
.memory => |addr| if (std.math.cast(i32, @as(i64, @bitCast(addr)))) |small_addr|
return self.genSetMem(.{ .reg = .ds }, small_addr, ty, src_mcv, opts),
.load_symbol, .load_direct, .load_got, .load_tlv => {},
.load_symbol, .load_direct, .load_got => {},
else => unreachable,
}
@ -134639,7 +134630,7 @@ fn genSetReg(
if (src_reg_mask.info.inverted) try self.asmRegister(.{ ._, .not }, registerAlias(bits_reg, abi_size));
try self.genSetReg(dst_reg, ty, .{ .register = bits_reg }, .{});
},
.memory, .load_symbol, .load_direct, .load_got, .load_tlv => {
.memory, .load_symbol, .load_direct, .load_got => {
switch (src_mcv) {
.memory => |addr| if (std.math.cast(i32, @as(i64, @bitCast(addr)))) |small_addr|
return (try self.moveStrategy(
@ -134683,7 +134674,7 @@ fn genSetReg(
.segment, .mmx, .ip, .cr, .dr => unreachable,
.x87, .sse => {},
},
.load_got, .load_tlv => {},
.load_got => {},
else => unreachable,
}
@ -134734,7 +134725,6 @@ fn genSetReg(
.payload = try self.addExtra(bits.SymbolOffset{ .sym_index = sym_index }),
} },
}),
.lea_tlv => unreachable, // TODO: remove this
.air_ref => |src_ref| try self.genSetReg(dst_reg, ty, try self.resolveInst(src_ref), opts),
}
}
@ -134952,8 +134942,6 @@ fn genSetMem(
.lea_direct,
.load_got,
.lea_got,
.load_tlv,
.lea_tlv,
.load_frame,
.lea_frame,
.load_symbol,
@ -138121,11 +138109,7 @@ fn airAggregateInit(self: *CodeGen, inst: Air.Inst.Index) !void {
const elem_byte_off: i32 = @intCast(elem_off / elem_abi_bits * elem_abi_size);
const elem_bit_off = elem_off % elem_abi_bits;
const elem_mcv = try self.resolveInst(elem);
const mat_elem_mcv = switch (elem_mcv) {
.load_tlv => |sym_index| MCValue{ .lea_tlv = sym_index },
else => elem_mcv,
};
const elem_lock = switch (mat_elem_mcv) {
const elem_lock = switch (elem_mcv) {
.register => |reg| self.register_manager.lockReg(reg),
.immediate => |imm| lock: {
if (imm == 0) continue;
@ -138137,7 +138121,7 @@ fn airAggregateInit(self: *CodeGen, inst: Air.Inst.Index) !void {
const elem_extra_bits = self.regExtraBits(elem_ty);
{
const temp_reg = try self.copyToTmpRegister(elem_ty, mat_elem_mcv);
const temp_reg = try self.copyToTmpRegister(elem_ty, elem_mcv);
const temp_alias = registerAlias(temp_reg, elem_abi_size);
const temp_lock = self.register_manager.lockRegAssumeUnused(temp_reg);
defer self.register_manager.unlockReg(temp_lock);
@ -138160,7 +138144,7 @@ fn airAggregateInit(self: *CodeGen, inst: Air.Inst.Index) !void {
);
}
if (elem_bit_off > elem_extra_bits) {
const temp_reg = try self.copyToTmpRegister(elem_ty, mat_elem_mcv);
const temp_reg = try self.copyToTmpRegister(elem_ty, elem_mcv);
const temp_alias = registerAlias(temp_reg, elem_abi_size);
const temp_lock = self.register_manager.lockRegAssumeUnused(temp_reg);
defer self.register_manager.unlockReg(temp_lock);
@ -138192,11 +138176,7 @@ fn airAggregateInit(self: *CodeGen, inst: Air.Inst.Index) !void {
const elem_ty = result_ty.fieldType(elem_i, zcu);
const elem_off: i32 = @intCast(result_ty.structFieldOffset(elem_i, zcu));
const elem_mcv = try self.resolveInst(elem);
const mat_elem_mcv = switch (elem_mcv) {
.load_tlv => |sym_index| MCValue{ .lea_tlv = sym_index },
else => elem_mcv,
};
try self.genSetMem(.{ .frame = frame_index }, elem_off, elem_ty, mat_elem_mcv, .{});
try self.genSetMem(.{ .frame = frame_index }, elem_off, elem_ty, elem_mcv, .{});
}
break :result .{ .load_frame = .{ .index = frame_index } };
},
@ -138239,16 +138219,12 @@ fn airAggregateInit(self: *CodeGen, inst: Air.Inst.Index) !void {
for (elements, 0..) |elem, elem_i| {
const elem_mcv = try self.resolveInst(elem);
const mat_elem_mcv = switch (elem_mcv) {
.load_tlv => |sym_index| MCValue{ .lea_tlv = sym_index },
else => elem_mcv,
};
const elem_off: i32 = @intCast(elem_size * elem_i);
try self.genSetMem(
.{ .frame = frame_index },
elem_off,
elem_ty,
mat_elem_mcv,
elem_mcv,
.{},
);
}
@ -138744,32 +138720,7 @@ fn resolveInst(self: *CodeGen, ref: Air.Inst.Ref) InnerError!MCValue {
const mcv: MCValue = if (ref.toIndex()) |inst| mcv: {
break :mcv self.inst_tracking.getPtr(inst).?.short;
} else mcv: {
const const_mcv = try self.genTypedValue(.fromInterned(ref.toInterned().?));
switch (const_mcv) {
.lea_tlv => |tlv_sym| switch (self.bin_file.tag) {
.elf, .macho => {
if (self.mod.pic) {
try self.spillRegisters(&.{ .rdi, .rax });
} else {
try self.spillRegisters(&.{.rax});
}
const frame_index = try self.allocFrameIndex(.init(.{
.size = 8,
.alignment = .@"8",
}));
try self.genSetMem(
.{ .frame = frame_index },
0,
.usize,
.{ .lea_symbol = .{ .sym_index = tlv_sym } },
.{},
);
break :mcv .{ .load_frame = .{ .index = frame_index } };
},
else => break :mcv const_mcv,
},
else => break :mcv const_mcv,
}
break :mcv try self.genTypedValue(.fromInterned(ref.toInterned().?));
};
switch (mcv) {
@ -138819,7 +138770,6 @@ fn genResult(self: *CodeGen, res: codegen.GenResult) InnerError!MCValue {
.load_direct => |sym_index| .{ .load_direct = sym_index },
.lea_direct => |sym_index| .{ .lea_direct = sym_index },
.load_got => |sym_index| .{ .lea_got = sym_index },
.load_tlv => |sym_index| .{ .lea_tlv = sym_index },
},
.fail => |msg| return self.failMsg(msg),
};
@ -139686,8 +139636,6 @@ const Temp = struct {
.lea_direct,
.load_got,
.lea_got,
.load_tlv,
.lea_tlv,
.lea_frame,
.elementwise_regs_then_frame,
.reserved_frame,
@ -140133,7 +140081,6 @@ const Temp = struct {
.register_offset,
.lea_direct,
.lea_got,
.lea_tlv,
.lea_frame,
=> return false,
.memory,
@ -140141,7 +140088,6 @@ const Temp = struct {
.load_symbol,
.load_direct,
.load_got,
.load_tlv,
.load_frame,
=> return temp.toRegClass(true, .general_purpose, cg),
.lea_symbol => |sym_off| {
@ -145048,34 +144994,7 @@ fn tempFromOperand(cg: *CodeGen, op_ref: Air.Inst.Ref, op_dies: bool) InnerError
if (op_ref.toIndex()) |op_inst| return .{ .index = op_inst };
const val = op_ref.toInterned().?;
return cg.tempInit(.fromInterned(ip.typeOf(val)), init: {
const const_mcv = try cg.genTypedValue(.fromInterned(val));
switch (const_mcv) {
.lea_tlv => |tlv_sym| switch (cg.bin_file.tag) {
.elf, .macho => {
if (cg.mod.pic) {
try cg.spillRegisters(&.{ .rdi, .rax });
} else {
try cg.spillRegisters(&.{.rax});
}
const frame_index = try cg.allocFrameIndex(.init(.{
.size = 8,
.alignment = .@"8",
}));
try cg.genSetMem(
.{ .frame = frame_index },
0,
.usize,
.{ .lea_symbol = .{ .sym_index = tlv_sym } },
.{},
);
break :init .{ .load_frame = .{ .index = frame_index } };
},
else => break :init const_mcv,
},
else => break :init const_mcv,
}
});
return cg.tempInit(.fromInterned(ip.typeOf(val)), try cg.genTypedValue(.fromInterned(val)));
}
fn tempsFromOperandsInner(

View File

@ -818,10 +818,6 @@ pub const GenResult = union(enum) {
/// The bit-width of the immediate may be smaller than `u64`. For example, on 32-bit targets
/// such as ARM, the immediate will never exceed 32-bits.
immediate: u64,
/// Threadlocal variable with address deferred until the linker allocates
/// everything in virtual memory.
/// Payload is a symbol index.
load_tlv: u32,
/// Decl with address deferred until the linker allocates everything in virtual memory.
/// Payload is a symbol index.
load_direct: u32,
@ -883,13 +879,13 @@ fn genNavRef(
}
const nav = ip.getNav(nav_index);
assert(!nav.isThreadlocal(ip));
const is_extern, const lib_name, const is_threadlocal = if (nav.getExtern(ip)) |e|
.{ true, e.lib_name, e.is_threadlocal }
const is_extern, const lib_name = if (nav.getExtern(ip)) |e|
.{ true, e.lib_name }
else
.{ false, .none, nav.isThreadlocal(ip) };
.{ false, .none };
const single_threaded = zcu.navFileScope(nav_index).mod.?.single_threaded;
const name = nav.name;
if (lf.cast(.elf)) |elf_file| {
const zo = elf_file.zigObjectPtr().?;
@ -899,9 +895,6 @@ fn genNavRef(
return .{ .mcv = .{ .lea_symbol = sym_index } };
}
const sym_index = try zo.getOrCreateMetadataForNav(zcu, nav_index);
if (!single_threaded and is_threadlocal) {
return .{ .mcv = .{ .load_tlv = sym_index } };
}
return .{ .mcv = .{ .lea_symbol = sym_index } };
} else if (lf.cast(.macho)) |macho_file| {
const zo = macho_file.getZigObject().?;
@ -912,9 +905,6 @@ fn genNavRef(
}
const sym_index = try zo.getOrCreateMetadataForNav(macho_file, nav_index);
const sym = zo.symbols.items[sym_index];
if (!single_threaded and is_threadlocal) {
return .{ .mcv = .{ .load_tlv = sym.nlist_idx } };
}
return .{ .mcv = .{ .lea_symbol = sym.nlist_idx } };
} else if (lf.cast(.coff)) |coff_file| {
if (is_extern) {

View File

@ -3453,6 +3453,8 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail,
.error_set_has_value => return f.fail("TODO: C backend: implement error_set_has_value", .{}),
.vector_store_elem => return f.fail("TODO: C backend: implement vector_store_elem", .{}),
.tlv_dllimport_ptr => try airTlvDllimportPtr(f, inst),
.c_va_start => try airCVaStart(f, inst),
.c_va_arg => try airCVaArg(f, inst),
.c_va_end => try airCVaEnd(f, inst),
@ -7617,6 +7619,17 @@ fn airMulAdd(f: *Function, inst: Air.Inst.Index) !CValue {
return local;
}
fn airTlvDllimportPtr(f: *Function, inst: Air.Inst.Index) !CValue {
const ty_nav = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_nav;
const writer = f.object.writer();
const local = try f.allocLocal(inst, .fromInterned(ty_nav.ty));
try f.writeCValue(writer, local, .Other);
try writer.writeAll(" = ");
try f.object.dg.renderNav(writer, ty_nav.nav, .Other);
try writer.writeAll(";\n");
return local;
}
fn airCVaStart(f: *Function, inst: Air.Inst.Index) !CValue {
const pt = f.object.dg.pt;
const zcu = pt.zcu;

View File

@ -5015,6 +5015,8 @@ pub const FuncGen = struct {
.vector_store_elem => try self.airVectorStoreElem(inst),
.tlv_dllimport_ptr => try self.airTlvDllimportPtr(inst),
.inferred_alloc, .inferred_alloc_comptime => unreachable,
.dbg_stmt => try self.airDbgStmt(inst),
@ -8112,6 +8114,13 @@ pub const FuncGen = struct {
return .none;
}
fn airTlvDllimportPtr(fg: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = fg.ng.object;
const ty_nav = fg.air.instructions.items(.data)[@intFromEnum(inst)].ty_nav;
const llvm_ptr_const = try o.lowerNavRefValue(ty_nav.nav);
return llvm_ptr_const.toValue();
}
fn airMin(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
const zcu = o.pt.zcu;

View File

@ -320,6 +320,7 @@ const Writer = struct {
.reduce, .reduce_optimized => try w.writeReduce(s, inst),
.cmp_vector, .cmp_vector_optimized => try w.writeCmpVector(s, inst),
.vector_store_elem => try w.writeVectorStoreElem(s, inst),
.tlv_dllimport_ptr => try w.writeTlvDllimportPtr(s, inst),
.work_item_id,
.work_group_size,
@ -552,6 +553,13 @@ const Writer = struct {
try w.writeOperand(s, inst, 2, extra.rhs);
}
fn writeTlvDllimportPtr(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
const ip = &w.pt.zcu.intern_pool;
const ty_nav = w.air.instructions.items(.data)[@intFromEnum(inst)].ty_nav;
try w.writeType(s, .fromInterned(ty_nav.ty));
try s.print(", '{}'", .{ip.getNav(ty_nav.nav).fqn.fmt(ip)});
}
fn writeAtomicLoad(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
const atomic_load = w.air.instructions.items(.data)[@intFromEnum(inst)].atomic_load;

View File

@ -7,6 +7,8 @@ test "thread local variable" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.os.tag == .macos) {
// Fails due to register hazards.
@ -26,6 +28,7 @@ test "pointer to thread local array" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest; // TODO
const s = "Hello world";
@memcpy(buffer[0..s.len], s);
@ -40,6 +43,7 @@ test "reference a global threadlocal variable" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest; // TODO
_ = nrfx_uart_rx(&g_uart0);
}

View File

@ -6,9 +6,6 @@ pub export fn entry() void {
}
// error
// backend=stage2
// target=native
//
// :2:36: error: unable to resolve comptime value
// :2:36: note: initializer of container-level variable must be comptime-known
// :2:36: note: threadlocal and dll imported variables have runtime-known addresses

View File

@ -7,12 +7,8 @@ pub export fn entry2() void {
_ = foo_dll;
}
// error
// backend=stage2
// target=native
//
// :1:16: error: unable to resolve comptime value
// :1:16: note: initializer of container-level variable must be comptime-known
// :1:16: note: threadlocal and dll imported variables have runtime-known addresses
// :2:17: error: unable to resolve comptime value
// :2:17: note: initializer of container-level variable must be comptime-known
// :2:17: note: threadlocal and dll imported variables have runtime-known addresses