From 179117c114ca44d977f794961762e110c1955911 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Sat, 15 Apr 2023 00:57:17 +0200 Subject: [PATCH 1/3] x86_64: split MCValue.tlv_reloc into .load_tlv and .lea_tlv `.load_tlv` signifies we want to load the value of a TLV `.lea_tlv` signifies we want to load effective address of a TLV --- src/arch/aarch64/CodeGen.zig | 2 +- src/arch/arm/CodeGen.zig | 2 +- src/arch/riscv64/CodeGen.zig | 2 +- src/arch/sparc64/CodeGen.zig | 2 +- src/arch/x86_64/CodeGen.zig | 173 ++++++++++++++++++++++++----------- src/codegen.zig | 8 +- 6 files changed, 126 insertions(+), 63 deletions(-) diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index acaddedce7..fcf60bd551 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -6171,7 +6171,7 @@ fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue { .linker_load => |ll| .{ .linker_load = ll }, .immediate => |imm| .{ .immediate = imm }, .memory => |addr| .{ .memory = addr }, - .tlv_reloc => unreachable, // TODO + .load_tlv => unreachable, // TODO }, .fail => |msg| { self.err_msg = msg; diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 69a71a2c7e..7e5c92d457 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -6114,7 +6114,7 @@ fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue { .mcv => |mcv| switch (mcv) { .none => .none, .undef => .undef, - .tlv_reloc, .linker_load => unreachable, // TODO + .load_tlv, .linker_load => unreachable, // TODO .immediate => |imm| .{ .immediate = @truncate(u32, imm) }, .memory => |addr| .{ .memory = addr }, }, diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 49c6ff183c..108eaba05a 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -2572,7 +2572,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { .mcv => |mcv| switch (mcv) { .none => .none, .undef => .undef, - .tlv_reloc, .linker_load => unreachable, // TODO + .load_tlv, .linker_load => unreachable, // TODO .immediate => |imm| .{ .immediate = imm }, .memory => |addr| .{ .memory = addr }, }, diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 52f8ba085f..f96444580c 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -3931,7 +3931,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { .mcv => |mcv| switch (mcv) { .none => .none, .undef => .undef, - .tlv_reloc, .linker_load => unreachable, // TODO + .load_tlv, .linker_load => unreachable, // TODO .immediate => |imm| .{ .immediate = imm }, .memory => |addr| .{ .memory = addr }, }, diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index cbf8a48d0f..e930e60c96 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -132,10 +132,12 @@ pub const MCValue = union(enum) { memory: u64, /// The value is in memory but requires a linker relocation fixup. linker_load: codegen.LinkerLoad, - /// Pointer to a threadlocal variable. - /// The address resolution will be deferred until the linker allocates everything in virtual memory. + /// The value is a threadlocal variable. /// Payload is a symbol index. - tlv_reloc: u32, + load_tlv: u32, + /// The value is a pointer to threadlocal variable. + /// Payload is a symbol index. + lea_tlv: u32, /// The value is one of the stack variables. /// If the type is a pointer, it means the pointer address is in the stack at this offset. stack_offset: i32, @@ -150,7 +152,8 @@ pub const MCValue = union(enum) { .stack_offset, .ptr_stack_offset, .linker_load, - .tlv_reloc, + .load_tlv, + .lea_tlv, => true, else => false, }; @@ -2923,7 +2926,8 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { })); }, .memory => |addr| try self.genSetReg(Type.usize, addr_reg, .{ .immediate = addr }), - .tlv_reloc => try self.genSetReg(array_ty, addr_reg, array), + .lea_tlv => unreachable, + .load_tlv => |sym_index| try self.genSetReg(array_ty, addr_reg, .{ .lea_tlv = sym_index }), .linker_load => |load_struct| { const atom_index = if (self.bin_file.cast(link.File.MachO)) |macho_file| blk: { const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); @@ -3650,7 +3654,8 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo else => return self.fail("TODO implement loading from register into {}", .{dst_mcv}), } }, - .memory, .tlv_reloc => { + .load_tlv => |sym_index| try self.load(dst_mcv, .{ .lea_tlv = sym_index }, ptr_ty), + .memory, .lea_tlv => { const reg = try self.copyToTmpRegister(ptr_ty, ptr); try self.load(dst_mcv, .{ .register = reg }, ptr_ty); }, @@ -3806,7 +3811,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type -@intCast(i32, overflow_bit_offset), ); }, - .memory, .linker_load => if (abi_size <= 8) { + .memory, .load_tlv, .linker_load => if (abi_size <= 8) { const tmp_reg = try self.copyToTmpRegister(value_ty, value); const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg); defer self.register_manager.unlockReg(tmp_lock); @@ -3819,6 +3824,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type switch (value) { .memory => |addr| try self.genSetReg(Type.usize, addr_reg, .{ .immediate = addr }), + .load_tlv => |sym_index| try self.genSetReg(Type.usize, addr_reg, .{ .lea_tlv = sym_index }), .linker_load => |load_struct| { const atom_index = if (self.bin_file.cast(link.File.MachO)) |macho_file| blk: { const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); @@ -3855,7 +3861,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type .{ .immediate = abi_size }, .{}, ), - .ptr_stack_offset, .tlv_reloc => { + .ptr_stack_offset, .lea_tlv => { const tmp_reg = try self.copyToTmpRegister(value_ty, value); const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg); defer self.register_manager.unlockReg(tmp_lock); @@ -3864,7 +3870,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type }, } }, - .memory, .linker_load => { + .memory, .load_tlv, .linker_load => { const value_lock: ?RegisterLock = switch (value) { .register => |reg| self.register_manager.lockReg(reg), else => null, @@ -3881,6 +3887,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type // Load the pointer, which is stored in memory try self.asmRegisterMemory(.mov, addr_reg, Memory.sib(.qword, .{ .base = addr_reg })); }, + .load_tlv => |sym_index| try self.genSetReg(ptr_ty, addr_reg, .{ .lea_tlv = sym_index }), .linker_load => |load_struct| { const atom_index = if (self.bin_file.cast(link.File.MachO)) |macho_file| blk: { const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); @@ -3900,8 +3907,8 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type const new_ptr = MCValue{ .register = addr_reg }; try self.store(new_ptr, value, ptr_ty, value_ty); }, - .tlv_reloc => { - const addr_reg = try self.copyToTmpRegister(Type.usize, ptr); + .lea_tlv => { + const addr_reg = try self.copyToTmpRegister(ptr_ty, ptr); const addr_reg_lock = self.register_manager.lockRegAssumeUnused(addr_reg); defer self.register_manager.unlockReg(addr_reg_lock); @@ -3953,14 +3960,17 @@ fn fieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, index: u32 const dst_mcv: MCValue = result: { switch (mcv) { - .stack_offset, .tlv_reloc => { + .stack_offset, .lea_tlv, .load_tlv => { const offset_reg = try self.copyToTmpRegister(ptr_ty, .{ .immediate = field_offset, }); const offset_reg_lock = self.register_manager.lockRegAssumeUnused(offset_reg); defer self.register_manager.unlockReg(offset_reg_lock); - const dst_mcv = try self.copyToRegisterWithInstTracking(inst, ptr_ty, mcv); + const dst_mcv = try self.copyToRegisterWithInstTracking(inst, ptr_ty, switch (mcv) { + .load_tlv => |sym_index| .{ .lea_tlv = sym_index }, + else => mcv, + }); try self.genBinOpMir(.add, ptr_ty, dst_mcv, .{ .register = offset_reg }); break :result dst_mcv; }, @@ -4235,14 +4245,15 @@ fn genUnOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MCValue })); }, .ptr_stack_offset => unreachable, - .tlv_reloc => unreachable, - .memory, .linker_load => { + .lea_tlv => unreachable, + .memory, .load_tlv, .linker_load => { const addr_reg = (try self.register_manager.allocReg(null, gp)).to64(); const addr_reg_lock = self.register_manager.lockRegAssumeUnused(addr_reg); defer self.register_manager.unlockReg(addr_reg_lock); switch (dst_mcv) { .memory => |addr| try self.genSetReg(Type.usize, addr_reg, .{ .immediate = addr }), + .load_tlv => |sym_index| try self.genSetReg(Type.usize, addr_reg, .{ .lea_tlv = sym_index }), .linker_load => |load_struct| { const atom_index = if (self.bin_file.cast(link.File.MachO)) |macho_file| blk: { const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); @@ -4756,7 +4767,10 @@ fn genBinOp( } } const dst_mcv = try self.allocRegOrMemAdvanced(lhs_ty, maybe_inst, true); - try self.setRegOrMem(lhs_ty, dst_mcv, lhs); + try self.setRegOrMem(lhs_ty, dst_mcv, switch (lhs) { + .load_tlv => |sym_index| .{ .lea_tlv = sym_index }, + else => lhs, + }); break :dst dst_mcv; }; const dst_mcv_lock: ?RegisterLock = switch (dst_mcv) { @@ -4897,7 +4911,7 @@ fn genBinOp( .eflags, .register_overflow, .ptr_stack_offset, - .tlv_reloc, + .lea_tlv, => unreachable, .register => |src_reg| try self.asmCmovccRegisterRegister( registerAlias(tmp_reg, cmov_abi_size), @@ -4912,13 +4926,14 @@ fn genBinOp( }), cc, ), - .memory, .linker_load => { + .memory, .load_tlv, .linker_load => { const addr_reg = (try self.register_manager.allocReg(null, gp)).to64(); const addr_reg_lock = self.register_manager.lockRegAssumeUnused(addr_reg); defer self.register_manager.unlockReg(addr_reg_lock); switch (mat_src_mcv) { .memory => |addr| try self.genSetReg(Type.usize, addr_reg, .{ .immediate = addr }), + .load_tlv => |sym_index| try self.genSetReg(Type.usize, addr_reg, .{ .lea_tlv = sym_index }), .linker_load => |load_struct| { const atom_index = if (self.bin_file.cast(link.File.MachO)) |macho_file| blk: { const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); @@ -4982,13 +4997,6 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, ty: Type, dst_mcv: MCValue, s .undef => unreachable, .dead, .unreach => unreachable, .register_overflow => unreachable, - .ptr_stack_offset, .tlv_reloc => { - const dst_reg_lock = self.register_manager.lockReg(dst_reg); - defer if (dst_reg_lock) |lock| self.register_manager.unlockReg(lock); - - const reg = try self.copyToTmpRegister(ty, src_mcv); - return self.genBinOpMir(mir_tag, ty, dst_mcv, .{ .register = reg }); - }, .register => |src_reg| switch (ty.zigTypeTag()) { .Float => { if (intrinsicsAllowed(self.target.*, ty)) { @@ -5037,7 +5045,7 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, ty: Type, dst_mcv: MCValue, s )), else => unreachable, }, - .memory, .linker_load, .eflags => { + .ptr_stack_offset, .memory, .lea_tlv, .load_tlv, .linker_load, .eflags => { assert(abi_size <= 8); const dst_reg_lock = self.register_manager.lockReg(dst_reg); defer if (dst_reg_lock) |lock| self.register_manager.unlockReg(lock); @@ -5052,19 +5060,20 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, ty: Type, dst_mcv: MCValue, s ), } }, - .memory, .linker_load, .stack_offset => { + .memory, .load_tlv, .linker_load, .stack_offset => { const dst: ?struct { addr_reg: Register, addr_lock: RegisterLock, } = switch (dst_mcv) { else => unreachable, - .memory, .linker_load => dst: { + .memory, .load_tlv, .linker_load => dst: { const dst_addr_reg = (try self.register_manager.allocReg(null, gp)).to64(); const dst_addr_lock = self.register_manager.lockRegAssumeUnused(dst_addr_reg); errdefer self.register_manager.unlockReg(dst_addr_lock); switch (dst_mcv) { .memory => |addr| try self.genSetReg(Type.usize, dst_addr_reg, .{ .immediate = addr }), + .load_tlv => |sym_index| try self.genSetReg(Type.usize, dst_addr_reg, .{ .lea_tlv = sym_index }), .linker_load => |load_struct| { const atom_index = if (self.bin_file.cast(link.File.MachO)) |macho_file| blk: { const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); @@ -5099,7 +5108,7 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, ty: Type, dst_mcv: MCValue, s addr_lock: RegisterLock, } = switch (src_mcv) { else => null, - .memory, .linker_load => src: { + .memory, .load_tlv, .linker_load => src: { const src_limb_reg = try self.register_manager.allocReg(null, gp); const src_limb_lock = self.register_manager.lockRegAssumeUnused(src_limb_reg); errdefer self.register_manager.unlockReg(src_limb_lock); @@ -5110,6 +5119,7 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, ty: Type, dst_mcv: MCValue, s switch (src_mcv) { .memory => |addr| try self.genSetReg(Type.usize, src_addr_reg, .{ .immediate = addr }), + .load_tlv => |sym_index| try self.genSetReg(Type.usize, src_addr_reg, .{ .lea_tlv = sym_index }), .linker_load => |load_struct| { const atom_index = if (self.bin_file.cast(link.File.MachO)) |macho_file| blk: { const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); @@ -5233,7 +5243,7 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, ty: Type, dst_mcv: MCValue, s else => unreachable, } }, - .memory, .linker_load, .tlv_reloc => { + .memory, .linker_load, .lea_tlv, .load_tlv => { try self.asmRegisterMemory( .mov, registerAlias(src.?.limb_reg, limb_abi_size), @@ -5272,7 +5282,7 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, ty: Type, dst_mcv: MCValue, s } }, .ptr_stack_offset => unreachable, - .tlv_reloc => unreachable, + .lea_tlv => unreachable, } } @@ -5286,7 +5296,7 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M .dead, .unreach, .immediate => unreachable, .eflags => unreachable, .ptr_stack_offset => unreachable, - .tlv_reloc => unreachable, + .lea_tlv => unreachable, .register_overflow => unreachable, .register => |dst_reg| { const dst_alias = registerAlias(dst_reg, abi_size); @@ -5298,7 +5308,7 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M .undef => try self.genSetReg(dst_ty, dst_reg, .undef), .dead, .unreach => unreachable, .ptr_stack_offset => unreachable, - .tlv_reloc => unreachable, + .lea_tlv => unreachable, .register_overflow => unreachable, .register => |src_reg| try self.asmRegisterRegister( .imul, @@ -5325,7 +5335,7 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = .rbp, .disp = -off }), ); }, - .memory => { + .memory, .load_tlv => { return self.fail("TODO implement x86 multiply source memory", .{}); }, .linker_load => { @@ -5342,7 +5352,7 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M .undef => return self.genSetStack(dst_ty, off, .undef, .{}), .dead, .unreach => unreachable, .ptr_stack_offset => unreachable, - .tlv_reloc => unreachable, + .lea_tlv => unreachable, .register_overflow => unreachable, .register => |src_reg| { // copy dst to a register @@ -5365,7 +5375,7 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M return self.genSetStack(dst_ty, off, .{ .register = dst_reg }, .{}); }, - .memory, .stack_offset => { + .memory, .load_tlv, .stack_offset => { return self.fail("TODO implement x86 multiply source memory", .{}); }, .linker_load => { @@ -5376,7 +5386,7 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M }, } }, - .memory => { + .memory, .load_tlv => { return self.fail("TODO implement x86 multiply destination memory", .{}); }, .linker_load => { @@ -5565,7 +5575,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier .linker_load => unreachable, .eflags => unreachable, .register_overflow => unreachable, - .tlv_reloc => unreachable, + .lea_tlv => unreachable, + .load_tlv => unreachable, } } @@ -5604,7 +5615,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier .linker_load => unreachable, .eflags => unreachable, .register_overflow => unreachable, - .tlv_reloc => unreachable, + .lea_tlv => unreachable, + .load_tlv => unreachable, } } @@ -6126,7 +6138,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC .register_overflow, .ptr_stack_offset, .eflags, - .tlv_reloc, + .lea_tlv, => unreachable, .register => |opt_reg| { @@ -6147,13 +6159,14 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC return .{ .eflags = .nc }; }, - .memory, .linker_load => { + .memory, .load_tlv, .linker_load => { const addr_reg = (try self.register_manager.allocReg(null, gp)).to64(); const addr_reg_lock = self.register_manager.lockRegAssumeUnused(addr_reg); defer self.register_manager.unlockReg(addr_reg_lock); switch (opt_mcv) { .memory => |addr| try self.genSetReg(Type.usize, addr_reg, .{ .immediate = addr }), + .load_tlv => |sym_index| try self.genSetReg(Type.usize, addr_reg, .{ .lea_tlv = sym_index }), .linker_load => |load_struct| { const atom_index = if (self.bin_file.cast(link.File.MachO)) |macho_file| blk: { const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); @@ -7099,7 +7112,7 @@ fn genSetStackArg(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerE else => return self.fail("TODO implement inputs on stack for {} with abi size > 8", .{mcv}), } }, - .memory, .linker_load => { + .memory, .load_tlv, .linker_load => { if (abi_size <= 8) { const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStackArg(ty, stack_offset, MCValue{ .register = reg }); @@ -7111,6 +7124,7 @@ fn genSetStackArg(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerE switch (mcv) { .memory => |addr| try self.genSetReg(Type.usize, addr_reg, .{ .immediate = addr }), + .load_tlv => |sym_index| try self.genSetReg(Type.usize, addr_reg, .{ .lea_tlv = sym_index }), .linker_load => |load_struct| { const atom_index = if (self.bin_file.cast(link.File.MachO)) |macho_file| blk: { const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); @@ -7170,7 +7184,7 @@ fn genSetStackArg(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerE }, } }, - .ptr_stack_offset, .tlv_reloc => { + .ptr_stack_offset, .lea_tlv => { const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStackArg(ty, stack_offset, MCValue{ .register = reg }); }, @@ -7328,7 +7342,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue, opts: Inl }, } }, - .memory, .linker_load => if (abi_size <= 8) { + .memory, .load_tlv, .linker_load => if (abi_size <= 8) { const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }, opts); } else { @@ -7338,6 +7352,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue, opts: Inl switch (mcv) { .memory => |addr| try self.genSetReg(Type.usize, addr_reg, .{ .immediate = addr }), + .load_tlv => |sym_index| try self.genSetReg(Type.usize, addr_reg, .{ .lea_tlv = sym_index }), .linker_load => |load_struct| { const atom_index = if (self.bin_file.cast(link.File.MachO)) |macho_file| blk: { const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); @@ -7375,7 +7390,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue, opts: Inl .{ .immediate = abi_size }, .{}, ), - .ptr_stack_offset, .tlv_reloc => { + .ptr_stack_offset, .lea_tlv => { const tmp_reg = try self.copyToTmpRegister(ty, mcv); const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg); defer self.register_manager.unlockReg(tmp_lock); @@ -7484,7 +7499,10 @@ fn genInlineMemcpy( .got, .direct => try self.asmMovLinker(.rdi, atom_index, load_struct), } }, - .tlv_reloc => try self.genSetReg(Type.usize, .rdi, dst_ptr), + .lea_tlv, .load_tlv => try self.genSetReg(Type.usize, .rdi, switch (dst_ptr) { + .load_tlv => |sym_index| .{ .lea_tlv = sym_index }, + else => dst_ptr, + }), .stack_offset, .ptr_stack_offset => |off| { try self.asmRegisterMemory(switch (dst_ptr) { .stack_offset => .mov, @@ -7527,7 +7545,10 @@ fn genInlineMemcpy( .got, .direct => try self.asmMovLinker(.rsi, atom_index, load_struct), } }, - .tlv_reloc => try self.genSetReg(Type.usize, .rsi, src_ptr), + .lea_tlv, .load_tlv => try self.genSetReg(Type.usize, .rsi, switch (src_ptr) { + .load_tlv => |sym_index| .{ .lea_tlv = sym_index }, + else => src_ptr, + }), .stack_offset, .ptr_stack_offset => |off| { try self.asmRegisterMemory(switch (src_ptr) { .stack_offset => .mov, @@ -7593,7 +7614,10 @@ fn genInlineMemset( .got, .direct => try self.asmMovLinker(.rdi, atom_index, load_struct), } }, - .tlv_reloc => try self.genSetReg(Type.usize, .rdi, dst_ptr), + .lea_tlv, .load_tlv => try self.genSetReg(Type.usize, .rdi, switch (dst_ptr) { + .load_tlv => |sym_index| .{ .lea_tlv = sym_index }, + else => dst_ptr, + }), .stack_offset, .ptr_stack_offset => |off| { try self.asmRegisterMemory(switch (dst_ptr) { .stack_offset => .mov, @@ -7769,7 +7793,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void } }, }, - .tlv_reloc => |sym_index| { + .lea_tlv => |sym_index| { const atom_index = if (self.bin_file.cast(link.File.MachO)) |macho_file| blk: { const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); break :blk macho_file.getAtom(atom).getSymbolIndex().?; @@ -7793,6 +7817,33 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void try self.genSetReg(Type.usize, reg, .{ .register = .rax }); } else return self.fail("TODO emit ptr to TLV sequence on {s}", .{@tagName(self.bin_file.tag)}); }, + .load_tlv => |sym_index| { + const base_reg = switch (ty.zigTypeTag()) { + .Float => (try self.register_manager.allocReg(null, gp)).to64(), + else => reg.to64(), + }; + try self.genSetReg(Type.usize, base_reg, .{ .lea_tlv = sym_index }); + switch (ty.zigTypeTag()) { + .Float => if (intrinsicsAllowed(self.target.*, ty)) { + return self.asmRegisterMemory( + switch (ty.tag()) { + .f32 => .movss, + .f64 => .movsd, + else => return self.fail("TODO genSetReg from memory for {}", .{ + ty.fmt(self.bin_file.options.module.?), + }), + }, + reg.to128(), + Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = base_reg }), + ); + } else return self.fail("TODO genSetReg from memory for float with no intrinsics", .{}), + else => try self.asmRegisterMemory( + .mov, + registerAlias(reg, abi_size), + Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = base_reg }), + ), + } + }, .linker_load => |load_struct| { const atom_index = if (self.bin_file.cast(link.File.MachO)) |macho_file| blk: { const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); @@ -8586,7 +8637,11 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { const elem_byte_off = @intCast(i32, elem_off / elem_abi_bits * elem_abi_size); const elem_bit_off = elem_off % elem_abi_bits; const elem_mcv = try self.resolveInst(elem); - const elem_lock = switch (elem_mcv) { + const mat_elem_mcv = switch (elem_mcv) { + .load_tlv => |sym_index| MCValue{ .lea_tlv = sym_index }, + else => elem_mcv, + }; + const elem_lock = switch (mat_elem_mcv) { .register => |reg| self.register_manager.lockReg(reg), .immediate => |imm| lock: { if (imm == 0) continue; @@ -8596,7 +8651,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { }; defer if (elem_lock) |lock| self.register_manager.unlockReg(lock); const elem_reg = registerAlias( - try self.copyToTmpRegister(elem_ty, elem_mcv), + try self.copyToTmpRegister(elem_ty, mat_elem_mcv), elem_abi_size, ); const elem_extra_bits = self.regExtraBits(elem_ty); @@ -8616,7 +8671,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { .{ .register = elem_reg }, ); if (elem_bit_off > elem_extra_bits) { - const reg = try self.copyToTmpRegister(elem_ty, elem_mcv); + const reg = try self.copyToTmpRegister(elem_ty, mat_elem_mcv); if (elem_extra_bits > 0) { try self.truncateRegister(elem_ty, registerAlias(reg, elem_abi_size)); } @@ -8641,7 +8696,11 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { const elem_ty = result_ty.structFieldType(elem_i); const elem_off = @intCast(i32, result_ty.structFieldOffset(elem_i, self.target.*)); const elem_mcv = try self.resolveInst(elem); - try self.genSetStack(elem_ty, stack_offset - elem_off, elem_mcv, .{}); + const mat_elem_mcv = switch (elem_mcv) { + .load_tlv => |sym_index| MCValue{ .lea_tlv = sym_index }, + else => elem_mcv, + }; + try self.genSetStack(elem_ty, stack_offset - elem_off, mat_elem_mcv, .{}); } break :res .{ .stack_offset = stack_offset }; }, @@ -8652,8 +8711,12 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { for (elements, 0..) |elem, elem_i| { const elem_mcv = try self.resolveInst(elem); + const mat_elem_mcv = switch (elem_mcv) { + .load_tlv => |sym_index| MCValue{ .lea_tlv = sym_index }, + else => elem_mcv, + }; const elem_off = @intCast(i32, elem_size * elem_i); - try self.genSetStack(elem_ty, stack_offset - elem_off, elem_mcv, .{}); + try self.genSetStack(elem_ty, stack_offset - elem_off, mat_elem_mcv, .{}); } break :res MCValue{ .stack_offset = stack_offset }; }, @@ -8779,7 +8842,7 @@ fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue { .linker_load => |ll| .{ .linker_load = ll }, .immediate => |imm| .{ .immediate = imm }, .memory => |addr| .{ .memory = addr }, - .tlv_reloc => |sym_index| .{ .tlv_reloc = sym_index }, + .load_tlv => |sym_index| .{ .load_tlv = sym_index }, }, .fail => |msg| { self.err_msg = msg; diff --git a/src/codegen.zig b/src/codegen.zig index 6939b750fd..15f4440788 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -932,10 +932,10 @@ pub const GenResult = union(enum) { /// such as ARM, the immediate will never exceed 32-bits. immediate: u64, linker_load: LinkerLoad, - /// Pointer to a threadlocal variable. - /// The address resolution will be deferred until the linker allocates everything in virtual memory. + /// Threadlocal variable with address deferred until the linker allocates + /// everything in virtual memory. /// Payload is a symbol index. - tlv_reloc: u32, + load_tlv: u32, /// Direct by-address reference to memory location. memory: u64, }; @@ -1005,7 +1005,7 @@ fn genDeclRef( const atom_index = try macho_file.getOrCreateAtomForDecl(decl_index); const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?; if (is_threadlocal) { - return GenResult.mcv(.{ .tlv_reloc = sym_index }); + return GenResult.mcv(.{ .load_tlv = sym_index }); } return GenResult.mcv(.{ .linker_load = .{ .type = .got, From b82130709d121222f793c082dbbe6c29e7f2ec41 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Sat, 15 Apr 2023 11:10:20 +0200 Subject: [PATCH 2/3] x86_64: cleanup different memory load types Split `MCValue.linker_load` into `.load_got`, `.load_direct`, and `.lea_direct`. --- src/arch/aarch64/CodeGen.zig | 3 +- src/arch/arm/CodeGen.zig | 2 +- src/arch/riscv64/CodeGen.zig | 2 +- src/arch/sparc64/CodeGen.zig | 2 +- src/arch/x86_64/CodeGen.zig | 677 ++++++++++++++--------------------- src/codegen.zig | 28 +- 6 files changed, 288 insertions(+), 426 deletions(-) diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index fcf60bd551..e08386dfcb 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -6168,9 +6168,10 @@ fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue { .mcv => |mcv| switch (mcv) { .none => .none, .undef => .undef, - .linker_load => |ll| .{ .linker_load = ll }, .immediate => |imm| .{ .immediate = imm }, .memory => |addr| .{ .memory = addr }, + .load_got => |sym_index| .{ .linker_load = .{ .type = .got, .sym_index = sym_index } }, + .load_direct => |sym_index| .{ .linker_load = .{ .type = .direct, .sym_index = sym_index } }, .load_tlv => unreachable, // TODO }, .fail => |msg| { diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 7e5c92d457..1acf5a5164 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -6114,7 +6114,7 @@ fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue { .mcv => |mcv| switch (mcv) { .none => .none, .undef => .undef, - .load_tlv, .linker_load => unreachable, // TODO + .load_got, .load_direct, .load_tlv => unreachable, // TODO .immediate => |imm| .{ .immediate = @truncate(u32, imm) }, .memory => |addr| .{ .memory = addr }, }, diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 108eaba05a..16d9548da7 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -2572,7 +2572,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { .mcv => |mcv| switch (mcv) { .none => .none, .undef => .undef, - .load_tlv, .linker_load => unreachable, // TODO + .load_got, .load_direct, .load_tlv => unreachable, // TODO .immediate => |imm| .{ .immediate = imm }, .memory => |addr| .{ .memory = addr }, }, diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index f96444580c..5405b212a2 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -3931,7 +3931,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { .mcv => |mcv| switch (mcv) { .none => .none, .undef => .undef, - .load_tlv, .linker_load => unreachable, // TODO + .load_got, .load_direct, .load_tlv => unreachable, // TODO .immediate => |imm| .{ .immediate = imm }, .memory => |addr| .{ .memory = addr }, }, diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index e930e60c96..476b4aae47 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -130,8 +130,15 @@ pub const MCValue = union(enum) { /// The value is in memory at a hard-coded address. /// If the type is a pointer, it means the pointer address is at this memory location. memory: u64, - /// The value is in memory but requires a linker relocation fixup. - linker_load: codegen.LinkerLoad, + /// The value is in memory. + /// Payload is a symbol index. + load_direct: u32, + /// The value is a pointer to value in memory. + /// Payload is a symbol index. + lea_direct: u32, + /// The value is in memory referenced indirectly via GOT. + /// Payload is a symbol index. + load_got: u32, /// The value is a threadlocal variable. /// Payload is a symbol index. load_tlv: u32, @@ -151,7 +158,9 @@ pub const MCValue = union(enum) { .memory, .stack_offset, .ptr_stack_offset, - .linker_load, + .load_direct, + .lea_direct, + .load_got, .load_tlv, .lea_tlv, => true, @@ -739,40 +748,6 @@ fn asmMemoryRegisterImmediate( }); } -fn asmMovLinker(self: *Self, reg: Register, atom_index: u32, linker_load: codegen.LinkerLoad) !void { - const ops: Mir.Inst.Ops = switch (linker_load.type) { - .got => .got_reloc, - .direct => .direct_reloc, - .import => .import_reloc, - }; - _ = try self.addInst(.{ - .tag = .mov_linker, - .ops = ops, - .data = .{ .payload = try self.addExtra(Mir.LeaRegisterReloc{ - .reg = @enumToInt(reg), - .atom_index = atom_index, - .sym_index = linker_load.sym_index, - }) }, - }); -} - -fn asmLeaLinker(self: *Self, reg: Register, atom_index: u32, linker_load: codegen.LinkerLoad) !void { - const ops: Mir.Inst.Ops = switch (linker_load.type) { - .got => .got_reloc, - .direct => .direct_reloc, - .import => .import_reloc, - }; - _ = try self.addInst(.{ - .tag = .lea_linker, - .ops = ops, - .data = .{ .payload = try self.addExtra(Mir.LeaRegisterReloc{ - .reg = @enumToInt(reg), - .atom_index = atom_index, - .sym_index = linker_load.sym_index, - }) }, - }); -} - fn gen(self: *Self) InnerError!void { const cc = self.fn_type.fnCallingConvention(); if (cc != .Naked) { @@ -2925,24 +2900,14 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { .disp = -off, })); }, - .memory => |addr| try self.genSetReg(Type.usize, addr_reg, .{ .immediate = addr }), - .lea_tlv => unreachable, - .load_tlv => |sym_index| try self.genSetReg(array_ty, addr_reg, .{ .lea_tlv = sym_index }), - .linker_load => |load_struct| { - const atom_index = if (self.bin_file.cast(link.File.MachO)) |macho_file| blk: { - const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); - break :blk macho_file.getAtom(atom).getSymbolIndex().?; - } else if (self.bin_file.cast(link.File.Coff)) |coff_file| blk: { - const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); - break :blk coff_file.getAtom(atom).getSymbolIndex().?; - } else unreachable; - - switch (load_struct.type) { - .import => unreachable, - .got => try self.asmMovLinker(addr_reg, atom_index, load_struct), - .direct => try self.asmLeaLinker(addr_reg, atom_index, load_struct), - } - }, + .load_got => try self.genSetReg(array_ty, addr_reg, array), + .memory, .load_direct, .load_tlv => try self.genSetReg(Type.usize, addr_reg, switch (array) { + .memory => |addr| .{ .immediate = addr }, + .load_direct => |sym_index| .{ .lea_direct = sym_index }, + .load_tlv => |sym_index| .{ .lea_tlv = sym_index }, + else => unreachable, + }), + .lea_direct, .lea_tlv => unreachable, else => return self.fail("TODO implement array_elem_val when array is {}", .{array}), } @@ -3654,31 +3619,15 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo else => return self.fail("TODO implement loading from register into {}", .{dst_mcv}), } }, - .load_tlv => |sym_index| try self.load(dst_mcv, .{ .lea_tlv = sym_index }, ptr_ty), - .memory, .lea_tlv => { + .load_direct, .load_tlv => |sym_index| try self.load(dst_mcv, switch (ptr) { + .load_direct => .{ .lea_direct = sym_index }, + .load_tlv => .{ .lea_tlv = sym_index }, + else => unreachable, + }, ptr_ty), + .memory, .load_got, .lea_direct, .lea_tlv => { const reg = try self.copyToTmpRegister(ptr_ty, ptr); try self.load(dst_mcv, .{ .register = reg }, ptr_ty); }, - .linker_load => |load_struct| { - const addr_reg = (try self.register_manager.allocReg(null, gp)).to64(); - const addr_reg_lock = self.register_manager.lockRegAssumeUnused(addr_reg); - defer self.register_manager.unlockReg(addr_reg_lock); - - const atom_index = if (self.bin_file.cast(link.File.MachO)) |macho_file| blk: { - const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); - break :blk macho_file.getAtom(atom).getSymbolIndex().?; - } else if (self.bin_file.cast(link.File.Coff)) |coff_file| blk: { - const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); - break :blk coff_file.getAtom(atom).getSymbolIndex().?; - } else unreachable; - - switch (load_struct.type) { - .import => unreachable, - .got, .direct => try self.asmMovLinker(addr_reg, atom_index, load_struct), - } - - try self.load(dst_mcv, .{ .register = addr_reg }, ptr_ty); - }, } } @@ -3811,7 +3760,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type -@intCast(i32, overflow_bit_offset), ); }, - .memory, .load_tlv, .linker_load => if (abi_size <= 8) { + .memory, .load_tlv, .load_direct => if (abi_size <= 8) { const tmp_reg = try self.copyToTmpRegister(value_ty, value); const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg); defer self.register_manager.unlockReg(tmp_lock); @@ -3822,26 +3771,12 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type const addr_lock = self.register_manager.lockRegAssumeUnused(addr_reg); defer self.register_manager.unlockReg(addr_lock); - switch (value) { - .memory => |addr| try self.genSetReg(Type.usize, addr_reg, .{ .immediate = addr }), - .load_tlv => |sym_index| try self.genSetReg(Type.usize, addr_reg, .{ .lea_tlv = sym_index }), - .linker_load => |load_struct| { - const atom_index = if (self.bin_file.cast(link.File.MachO)) |macho_file| blk: { - const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); - break :blk macho_file.getAtom(atom).getSymbolIndex().?; - } else if (self.bin_file.cast(link.File.Coff)) |coff_file| blk: { - const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); - break :blk coff_file.getAtom(atom).getSymbolIndex().?; - } else unreachable; - switch (load_struct.type) { - .import => unreachable, - .got => try self.asmMovLinker(addr_reg, atom_index, load_struct), - .direct => try self.asmLeaLinker(addr_reg, atom_index, load_struct), - } - }, + try self.genSetReg(Type.usize, addr_reg, switch (value) { + .memory => |addr| .{ .immediate = addr }, + .load_direct => |sym_index| .{ .lea_direct = sym_index }, + .load_tlv => |sym_index| .{ .lea_tlv = sym_index }, else => unreachable, - } - + }); try self.genInlineMemcpy( ptr, .{ .register = addr_reg }, @@ -3861,7 +3796,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type .{ .immediate = abi_size }, .{}, ), - .ptr_stack_offset, .lea_tlv => { + .ptr_stack_offset, .load_got, .lea_direct, .lea_tlv => { const tmp_reg = try self.copyToTmpRegister(value_ty, value); const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg); defer self.register_manager.unlockReg(tmp_lock); @@ -3870,7 +3805,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type }, } }, - .memory, .load_tlv, .linker_load => { + .memory, .load_direct, .load_tlv => { const value_lock: ?RegisterLock = switch (value) { .register => |reg| self.register_manager.lockReg(reg), else => null, @@ -3887,27 +3822,15 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type // Load the pointer, which is stored in memory try self.asmRegisterMemory(.mov, addr_reg, Memory.sib(.qword, .{ .base = addr_reg })); }, + .load_direct => |sym_index| try self.genSetReg(ptr_ty, addr_reg, .{ .lea_direct = sym_index }), .load_tlv => |sym_index| try self.genSetReg(ptr_ty, addr_reg, .{ .lea_tlv = sym_index }), - .linker_load => |load_struct| { - const atom_index = if (self.bin_file.cast(link.File.MachO)) |macho_file| blk: { - const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); - break :blk macho_file.getAtom(atom).getSymbolIndex().?; - } else if (self.bin_file.cast(link.File.Coff)) |coff_file| blk: { - const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); - break :blk coff_file.getAtom(atom).getSymbolIndex().?; - } else unreachable; - switch (load_struct.type) { - .import => unreachable, - .got, .direct => try self.asmMovLinker(addr_reg, atom_index, load_struct), - } - }, else => unreachable, } const new_ptr = MCValue{ .register = addr_reg }; try self.store(new_ptr, value, ptr_ty, value_ty); }, - .lea_tlv => { + .load_got, .lea_direct, .lea_tlv => { const addr_reg = try self.copyToTmpRegister(ptr_ty, ptr); const addr_reg_lock = self.register_manager.lockRegAssumeUnused(addr_reg); defer self.register_manager.unlockReg(addr_reg_lock); @@ -4245,32 +4168,18 @@ fn genUnOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MCValue })); }, .ptr_stack_offset => unreachable, - .lea_tlv => unreachable, - .memory, .load_tlv, .linker_load => { + .load_got, .lea_direct, .lea_tlv => unreachable, + .memory, .load_direct, .load_tlv => { const addr_reg = (try self.register_manager.allocReg(null, gp)).to64(); const addr_reg_lock = self.register_manager.lockRegAssumeUnused(addr_reg); defer self.register_manager.unlockReg(addr_reg_lock); - switch (dst_mcv) { - .memory => |addr| try self.genSetReg(Type.usize, addr_reg, .{ .immediate = addr }), - .load_tlv => |sym_index| try self.genSetReg(Type.usize, addr_reg, .{ .lea_tlv = sym_index }), - .linker_load => |load_struct| { - const atom_index = if (self.bin_file.cast(link.File.MachO)) |macho_file| blk: { - const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); - break :blk macho_file.getAtom(atom).getSymbolIndex().?; - } else if (self.bin_file.cast(link.File.Coff)) |coff_file| blk: { - const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); - break :blk coff_file.getAtom(atom).getSymbolIndex().?; - } else unreachable; - - switch (load_struct.type) { - .import => unreachable, - .got => try self.asmMovLinker(addr_reg, atom_index, load_struct), - .direct => try self.asmLeaLinker(addr_reg, atom_index, load_struct), - } - }, + try self.genSetReg(Type.usize, addr_reg, switch (dst_mcv) { + .memory => |addr| .{ .immediate = addr }, + .load_direct => |sym_index| .{ .lea_direct = sym_index }, + .load_tlv => |sym_index| .{ .lea_tlv = sym_index }, else => unreachable, - } + }); try self.asmMemory( mir_tag, Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = addr_reg }), @@ -4768,6 +4677,7 @@ fn genBinOp( } const dst_mcv = try self.allocRegOrMemAdvanced(lhs_ty, maybe_inst, true); try self.setRegOrMem(lhs_ty, dst_mcv, switch (lhs) { + .load_direct => |sym_index| .{ .lea_direct = sym_index }, .load_tlv => |sym_index| .{ .lea_tlv = sym_index }, else => lhs, }); @@ -4911,6 +4821,8 @@ fn genBinOp( .eflags, .register_overflow, .ptr_stack_offset, + .load_got, + .lea_direct, .lea_tlv, => unreachable, .register => |src_reg| try self.asmCmovccRegisterRegister( @@ -4926,32 +4838,17 @@ fn genBinOp( }), cc, ), - .memory, .load_tlv, .linker_load => { + .memory, .load_direct, .load_tlv => { const addr_reg = (try self.register_manager.allocReg(null, gp)).to64(); const addr_reg_lock = self.register_manager.lockRegAssumeUnused(addr_reg); defer self.register_manager.unlockReg(addr_reg_lock); - switch (mat_src_mcv) { - .memory => |addr| try self.genSetReg(Type.usize, addr_reg, .{ .immediate = addr }), - .load_tlv => |sym_index| try self.genSetReg(Type.usize, addr_reg, .{ .lea_tlv = sym_index }), - .linker_load => |load_struct| { - const atom_index = if (self.bin_file.cast(link.File.MachO)) |macho_file| blk: { - const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); - break :blk macho_file.getAtom(atom).getSymbolIndex().?; - } else if (self.bin_file.cast(link.File.Coff)) |coff_file| blk: { - const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); - break :blk coff_file.getAtom(atom).getSymbolIndex().?; - } else unreachable; - - switch (load_struct.type) { - .import => unreachable, - .got => try self.asmMovLinker(addr_reg, atom_index, load_struct), - .direct => try self.asmLeaLinker(addr_reg, atom_index, load_struct), - } - }, + try self.genSetReg(Type.usize, addr_reg, switch (mat_src_mcv) { + .memory => |addr| .{ .immediate = addr }, + .load_direct => |sym_index| .{ .lea_direct = sym_index }, + .load_tlv => |sym_index| .{ .lea_tlv = sym_index }, else => unreachable, - } - + }); try self.asmCmovccRegisterMemory( registerAlias(tmp_reg, cmov_abi_size), Memory.sib(Memory.PtrSize.fromSize(cmov_abi_size), .{ .base = addr_reg }), @@ -5045,7 +4942,15 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, ty: Type, dst_mcv: MCValue, s )), else => unreachable, }, - .ptr_stack_offset, .memory, .lea_tlv, .load_tlv, .linker_load, .eflags => { + .ptr_stack_offset, + .memory, + .load_got, + .lea_direct, + .load_direct, + .lea_tlv, + .load_tlv, + .eflags, + => { assert(abi_size <= 8); const dst_reg_lock = self.register_manager.lockReg(dst_reg); defer if (dst_reg_lock) |lock| self.register_manager.unlockReg(lock); @@ -5060,37 +4965,23 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, ty: Type, dst_mcv: MCValue, s ), } }, - .memory, .load_tlv, .linker_load, .stack_offset => { + .memory, .load_got, .load_direct, .load_tlv, .stack_offset => { const dst: ?struct { addr_reg: Register, addr_lock: RegisterLock, } = switch (dst_mcv) { else => unreachable, - .memory, .load_tlv, .linker_load => dst: { + .memory, .load_got, .load_direct, .load_tlv => dst: { const dst_addr_reg = (try self.register_manager.allocReg(null, gp)).to64(); const dst_addr_lock = self.register_manager.lockRegAssumeUnused(dst_addr_reg); errdefer self.register_manager.unlockReg(dst_addr_lock); - switch (dst_mcv) { - .memory => |addr| try self.genSetReg(Type.usize, dst_addr_reg, .{ .immediate = addr }), - .load_tlv => |sym_index| try self.genSetReg(Type.usize, dst_addr_reg, .{ .lea_tlv = sym_index }), - .linker_load => |load_struct| { - const atom_index = if (self.bin_file.cast(link.File.MachO)) |macho_file| blk: { - const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); - break :blk macho_file.getAtom(atom).getSymbolIndex().?; - } else if (self.bin_file.cast(link.File.Coff)) |coff_file| blk: { - const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); - break :blk coff_file.getAtom(atom).getSymbolIndex().?; - } else unreachable; - - switch (load_struct.type) { - .import => unreachable, - .got => try self.asmMovLinker(dst_addr_reg, atom_index, load_struct), - .direct => try self.asmLeaLinker(dst_addr_reg, atom_index, load_struct), - } - }, - else => unreachable, - } + try self.genSetReg(Type.usize, dst_addr_reg, switch (dst_mcv) { + .memory => |addr| .{ .immediate = addr }, + .load_direct => |sym_index| .{ .lea_direct = sym_index }, + .load_tlv => |sym_index| .{ .lea_tlv = sym_index }, + else => dst_mcv, + }); break :dst .{ .addr_reg = dst_addr_reg, @@ -5108,7 +4999,7 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, ty: Type, dst_mcv: MCValue, s addr_lock: RegisterLock, } = switch (src_mcv) { else => null, - .memory, .load_tlv, .linker_load => src: { + .memory, .load_got, .load_direct, .load_tlv => src: { const src_limb_reg = try self.register_manager.allocReg(null, gp); const src_limb_lock = self.register_manager.lockRegAssumeUnused(src_limb_reg); errdefer self.register_manager.unlockReg(src_limb_lock); @@ -5117,26 +5008,12 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, ty: Type, dst_mcv: MCValue, s const src_addr_lock = self.register_manager.lockRegAssumeUnused(src_addr_reg); errdefer self.register_manager.unlockReg(src_addr_lock); - switch (src_mcv) { - .memory => |addr| try self.genSetReg(Type.usize, src_addr_reg, .{ .immediate = addr }), - .load_tlv => |sym_index| try self.genSetReg(Type.usize, src_addr_reg, .{ .lea_tlv = sym_index }), - .linker_load => |load_struct| { - const atom_index = if (self.bin_file.cast(link.File.MachO)) |macho_file| blk: { - const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); - break :blk macho_file.getAtom(atom).getSymbolIndex().?; - } else if (self.bin_file.cast(link.File.Coff)) |coff_file| blk: { - const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); - break :blk coff_file.getAtom(atom).getSymbolIndex().?; - } else unreachable; - - switch (load_struct.type) { - .import => unreachable, - .got => try self.asmMovLinker(src_addr_reg, atom_index, load_struct), - .direct => try self.asmLeaLinker(src_addr_reg, atom_index, load_struct), - } - }, - else => unreachable, - } + try self.genSetReg(Type.usize, src_addr_reg, switch (src_mcv) { + .memory => |addr| .{ .immediate = addr }, + .load_direct => |sym_index| .{ .lea_direct = sym_index }, + .load_tlv => |sym_index| .{ .lea_tlv = sym_index }, + else => src_mcv, + }); break :src .{ .addr_reg = src_addr_reg, @@ -5179,7 +5056,11 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, ty: Type, dst_mcv: MCValue, s .base = .rbp, .disp = off - dst_off, }, - .memory, .linker_load => .{ .base = dst.?.addr_reg, .disp = off }, + .memory, + .load_got, + .load_direct, + .load_tlv, + => .{ .base = dst.?.addr_reg, .disp = off }, }, ); switch (src_mcv) { @@ -5243,7 +5124,11 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, ty: Type, dst_mcv: MCValue, s else => unreachable, } }, - .memory, .linker_load, .lea_tlv, .load_tlv => { + .memory, + .load_got, + .load_direct, + .load_tlv, + => { try self.asmRegisterMemory( .mov, registerAlias(src.?.limb_reg, limb_abi_size), @@ -5258,11 +5143,19 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, ty: Type, dst_mcv: MCValue, s registerAlias(src.?.limb_reg, limb_abi_size), ); }, - .stack_offset, .ptr_stack_offset, .eflags => { + .stack_offset, + .ptr_stack_offset, + .eflags, + .lea_direct, + .lea_tlv, + => { const src_limb_reg = try self.copyToTmpRegister(limb_ty, switch (src_mcv) { .stack_offset => |src_off| .{ .stack_offset = src_off - off }, .ptr_stack_offset, .eflags, + .load_got, + .lea_direct, + .lea_tlv, => off: { assert(off == 0); break :off src_mcv; @@ -5283,6 +5176,7 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, ty: Type, dst_mcv: MCValue, s }, .ptr_stack_offset => unreachable, .lea_tlv => unreachable, + .lea_direct => unreachable, } } @@ -5296,6 +5190,7 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M .dead, .unreach, .immediate => unreachable, .eflags => unreachable, .ptr_stack_offset => unreachable, + .lea_direct => unreachable, .lea_tlv => unreachable, .register_overflow => unreachable, .register => |dst_reg| { @@ -5308,6 +5203,7 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M .undef => try self.genSetReg(dst_ty, dst_reg, .undef), .dead, .unreach => unreachable, .ptr_stack_offset => unreachable, + .lea_direct => unreachable, .lea_tlv => unreachable, .register_overflow => unreachable, .register => |src_reg| try self.asmRegisterRegister( @@ -5335,12 +5231,13 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = .rbp, .disp = -off }), ); }, - .memory, .load_tlv => { + .memory, + .load_got, + .load_direct, + .load_tlv, + => { return self.fail("TODO implement x86 multiply source memory", .{}); }, - .linker_load => { - return self.fail("TODO implement x86 multiply source symbol at index in linker", .{}); - }, .eflags => { return self.fail("TODO implement x86 multiply source eflags", .{}); }, @@ -5352,6 +5249,7 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M .undef => return self.genSetStack(dst_ty, off, .undef, .{}), .dead, .unreach => unreachable, .ptr_stack_offset => unreachable, + .lea_direct => unreachable, .lea_tlv => unreachable, .register_overflow => unreachable, .register => |src_reg| { @@ -5375,23 +5273,26 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M return self.genSetStack(dst_ty, off, .{ .register = dst_reg }, .{}); }, - .memory, .load_tlv, .stack_offset => { + .memory, + .load_got, + .load_direct, + .load_tlv, + .stack_offset, + => { return self.fail("TODO implement x86 multiply source memory", .{}); }, - .linker_load => { - return self.fail("TODO implement x86 multiply source symbol at index in linker", .{}); - }, .eflags => { return self.fail("TODO implement x86 multiply source eflags", .{}); }, } }, - .memory, .load_tlv => { + .memory, + .load_got, + .load_direct, + .load_tlv, + => { return self.fail("TODO implement x86 multiply destination memory", .{}); }, - .linker_load => { - return self.fail("TODO implement x86 multiply destination symbol at index in linker", .{}); - }, } } @@ -5471,7 +5372,8 @@ fn genVarDbgInfo( .offset = -off, } }, .memory => |address| .{ .memory = address }, - .linker_load => |linker_load| .{ .linker_load = linker_load }, + .load_got => |sym_index| .{ .linker_load = .{ .type = .got, .sym_index = sym_index } }, + .load_direct => |sym_index| .{ .linker_load = .{ .type = .direct, .sym_index = sym_index } }, .immediate => |x| .{ .immediate = x }, .undef => .undef, .none => .none, @@ -5572,9 +5474,11 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier .unreach => unreachable, .dead => unreachable, .memory => unreachable, - .linker_load => unreachable, .eflags => unreachable, .register_overflow => unreachable, + .load_got => unreachable, + .lea_direct => unreachable, + .load_direct => unreachable, .lea_tlv => unreachable, .load_tlv => unreachable, } @@ -5612,9 +5516,11 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier .unreach => unreachable, .dead => unreachable, .memory => unreachable, - .linker_load => unreachable, .eflags => unreachable, .register_overflow => unreachable, + .load_got => unreachable, + .lea_direct => unreachable, + .load_direct => unreachable, .lea_tlv => unreachable, .load_tlv => unreachable, } @@ -5639,21 +5545,13 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier .base = .ds, .disp = @intCast(i32, got_addr), })); - } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { - const atom_index = try coff_file.getOrCreateAtomForDecl(func.owner_decl); - const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?; - try self.genSetReg(Type.usize, .rax, .{ .linker_load = .{ - .type = .got, - .sym_index = sym_index, - } }); + } else if (self.bin_file.cast(link.File.Coff)) |_| { + const sym_index = try self.getSymbolIndexForDecl(func.owner_decl); + try self.genSetReg(Type.usize, .rax, .{ .load_got = sym_index }); try self.asmRegister(.call, .rax); - } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { - const atom_index = try macho_file.getOrCreateAtomForDecl(func.owner_decl); - const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?; - try self.genSetReg(Type.usize, .rax, .{ .linker_load = .{ - .type = .got, - .sym_index = sym_index, - } }); + } else if (self.bin_file.cast(link.File.MachO)) |_| { + const sym_index = try self.getSymbolIndexForDecl(func.owner_decl); + try self.genSetReg(Type.usize, .rax, .{ .load_got = sym_index }); try self.asmRegister(.call, .rax); } else if (self.bin_file.cast(link.File.Plan9)) |p9| { const decl_block_index = try p9.seeDecl(func.owner_decl); @@ -5673,18 +5571,21 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier const decl_name = mem.sliceTo(mod.declPtr(extern_fn.owner_decl).name, 0); const lib_name = mem.sliceTo(extern_fn.lib_name, 0); if (self.bin_file.cast(link.File.Coff)) |coff_file| { + const atom_index = try self.getSymbolIndexForDecl(self.mod_fn.owner_decl); const sym_index = try coff_file.getGlobalSymbol(decl_name, lib_name); - try self.genSetReg(Type.usize, .rax, .{ - .linker_load = .{ - .type = .import, + _ = try self.addInst(.{ + .tag = .mov_linker, + .ops = .import_reloc, + .data = .{ .payload = try self.addExtra(Mir.LeaRegisterReloc{ + .reg = @enumToInt(Register.rax), + .atom_index = atom_index, .sym_index = sym_index, - }, + }) }, }); try self.asmRegister(.call, .rax); } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { const sym_index = try macho_file.getGlobalSymbol(decl_name, lib_name); - const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); - const atom_index = macho_file.getAtom(atom).getSymbolIndex().?; + const atom_index = try self.getSymbolIndexForDecl(self.mod_fn.owner_decl); _ = try self.addInst(.{ .tag = .call_extern, .ops = undefined, @@ -5883,20 +5784,14 @@ fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void { 4, // dword alignment ); const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?; - try self.genSetReg(Type.usize, addr_reg, .{ .linker_load = .{ - .type = .got, - .sym_index = sym_index, - } }); + try self.genSetReg(Type.usize, addr_reg, .{ .load_got = sym_index }); } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { const atom_index = try macho_file.getOrCreateAtomForLazySymbol( .{ .kind = .const_data, .ty = Type.anyerror }, 4, // dword alignment ); const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?; - try self.genSetReg(Type.usize, addr_reg, .{ .linker_load = .{ - .type = .got, - .sym_index = sym_index, - } }); + try self.genSetReg(Type.usize, addr_reg, .{ .load_got = sym_index }); } else { return self.fail("TODO implement airErrorName for x86_64 {s}", .{@tagName(self.bin_file.tag)}); } @@ -6138,6 +6033,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC .register_overflow, .ptr_stack_offset, .eflags, + .lea_direct, .lea_tlv, => unreachable, @@ -6159,31 +6055,21 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC return .{ .eflags = .nc }; }, - .memory, .load_tlv, .linker_load => { + .memory, + .load_got, + .load_direct, + .load_tlv, + => { const addr_reg = (try self.register_manager.allocReg(null, gp)).to64(); const addr_reg_lock = self.register_manager.lockRegAssumeUnused(addr_reg); defer self.register_manager.unlockReg(addr_reg_lock); - switch (opt_mcv) { - .memory => |addr| try self.genSetReg(Type.usize, addr_reg, .{ .immediate = addr }), - .load_tlv => |sym_index| try self.genSetReg(Type.usize, addr_reg, .{ .lea_tlv = sym_index }), - .linker_load => |load_struct| { - const atom_index = if (self.bin_file.cast(link.File.MachO)) |macho_file| blk: { - const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); - break :blk macho_file.getAtom(atom).getSymbolIndex().?; - } else if (self.bin_file.cast(link.File.Coff)) |coff_file| blk: { - const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); - break :blk coff_file.getAtom(atom).getSymbolIndex().?; - } else unreachable; - - switch (load_struct.type) { - .import => unreachable, - .got => try self.asmMovLinker(addr_reg, atom_index, load_struct), - .direct => try self.asmLeaLinker(addr_reg, atom_index, load_struct), - } - }, - else => unreachable, - } + try self.genSetReg(Type.usize, addr_reg, switch (opt_mcv) { + .memory => |addr| .{ .immediate = addr }, + .load_direct => |sym_index| .{ .lea_direct = sym_index }, + .load_tlv => |sym_index| .{ .lea_tlv = sym_index }, + else => opt_mcv, + }); const some_abi_size = @intCast(u32, some_info.ty.abiSize(self.target.*)); try self.asmMemoryImmediate(.cmp, Memory.sib( @@ -7112,7 +6998,10 @@ fn genSetStackArg(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerE else => return self.fail("TODO implement inputs on stack for {} with abi size > 8", .{mcv}), } }, - .memory, .load_tlv, .linker_load => { + .memory, + .load_direct, + .load_tlv, + => { if (abi_size <= 8) { const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStackArg(ty, stack_offset, MCValue{ .register = reg }); @@ -7122,27 +7011,12 @@ fn genSetStackArg(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerE const addr_lock = self.register_manager.lockRegAssumeUnused(addr_reg); defer self.register_manager.unlockReg(addr_lock); - switch (mcv) { - .memory => |addr| try self.genSetReg(Type.usize, addr_reg, .{ .immediate = addr }), - .load_tlv => |sym_index| try self.genSetReg(Type.usize, addr_reg, .{ .lea_tlv = sym_index }), - .linker_load => |load_struct| { - const atom_index = if (self.bin_file.cast(link.File.MachO)) |macho_file| blk: { - const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); - break :blk macho_file.getAtom(atom).getSymbolIndex().?; - } else if (self.bin_file.cast(link.File.Coff)) |coff_file| blk: { - const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); - break :blk coff_file.getAtom(atom).getSymbolIndex().?; - } else unreachable; - - switch (load_struct.type) { - .import => unreachable, - .got => try self.asmMovLinker(addr_reg, atom_index, load_struct), - .direct => try self.asmLeaLinker(addr_reg, atom_index, load_struct), - } - }, + try self.genSetReg(Type.usize, addr_reg, switch (mcv) { + .memory => |addr| .{ .immediate = addr }, + .load_direct => |sym_index| .{ .lea_direct = sym_index }, + .load_tlv => |sym_index| .{ .lea_tlv = sym_index }, else => unreachable, - } - + }); try self.genInlineMemcpy( .{ .ptr_stack_offset = stack_offset }, .{ .register = addr_reg }, @@ -7184,7 +7058,11 @@ fn genSetStackArg(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerE }, } }, - .ptr_stack_offset, .lea_tlv => { + .ptr_stack_offset, + .load_got, + .lea_direct, + .lea_tlv, + => { const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStackArg(ty, stack_offset, MCValue{ .register = reg }); }, @@ -7342,7 +7220,10 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue, opts: Inl }, } }, - .memory, .load_tlv, .linker_load => if (abi_size <= 8) { + .memory, + .load_direct, + .load_tlv, + => if (abi_size <= 8) { const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }, opts); } else { @@ -7350,27 +7231,12 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue, opts: Inl const addr_lock = self.register_manager.lockRegAssumeUnused(addr_reg); defer self.register_manager.unlockReg(addr_lock); - switch (mcv) { - .memory => |addr| try self.genSetReg(Type.usize, addr_reg, .{ .immediate = addr }), - .load_tlv => |sym_index| try self.genSetReg(Type.usize, addr_reg, .{ .lea_tlv = sym_index }), - .linker_load => |load_struct| { - const atom_index = if (self.bin_file.cast(link.File.MachO)) |macho_file| blk: { - const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); - break :blk macho_file.getAtom(atom).getSymbolIndex().?; - } else if (self.bin_file.cast(link.File.Coff)) |coff_file| blk: { - const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); - break :blk coff_file.getAtom(atom).getSymbolIndex().?; - } else unreachable; - - switch (load_struct.type) { - .import => unreachable, - .got => try self.asmMovLinker(addr_reg, atom_index, load_struct), - .direct => try self.asmLeaLinker(addr_reg, atom_index, load_struct), - } - }, + try self.genSetReg(Type.usize, addr_reg, switch (mcv) { + .memory => |addr| .{ .immediate = addr }, + .load_direct => |sym_index| .{ .lea_direct = sym_index }, + .load_tlv => |sym_index| .{ .lea_tlv = sym_index }, else => unreachable, - } - + }); try self.genInlineMemcpy( .{ .ptr_stack_offset = stack_offset }, .{ .register = addr_reg }, @@ -7390,7 +7256,11 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue, opts: Inl .{ .immediate = abi_size }, .{}, ), - .ptr_stack_offset, .lea_tlv => { + .ptr_stack_offset, + .load_got, + .lea_direct, + .lea_tlv, + => { const tmp_reg = try self.copyToTmpRegister(ty, mcv); const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg); defer self.register_manager.unlockReg(tmp_lock); @@ -7485,21 +7355,13 @@ fn genInlineMemcpy( // Load the pointer, which is stored in memory try self.asmRegisterMemory(.mov, .rdi, Memory.sib(.qword, .{ .base = .rdi })); }, - .linker_load => |load_struct| { - const atom_index = if (self.bin_file.cast(link.File.MachO)) |macho_file| blk: { - const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); - break :blk macho_file.getAtom(atom).getSymbolIndex().?; - } else if (self.bin_file.cast(link.File.Coff)) |coff_file| blk: { - const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); - break :blk coff_file.getAtom(atom).getSymbolIndex().?; - } else unreachable; - - switch (load_struct.type) { - .import => unreachable, - .got, .direct => try self.asmMovLinker(.rdi, atom_index, load_struct), - } - }, - .lea_tlv, .load_tlv => try self.genSetReg(Type.usize, .rdi, switch (dst_ptr) { + .load_got, + .lea_direct, + .load_direct, + .lea_tlv, + .load_tlv, + => try self.genSetReg(Type.usize, .rdi, switch (dst_ptr) { + .load_direct => |sym_index| .{ .lea_direct = sym_index }, .load_tlv => |sym_index| .{ .lea_tlv = sym_index }, else => dst_ptr, }), @@ -7531,21 +7393,13 @@ fn genInlineMemcpy( // Load the pointer, which is stored in memory try self.asmRegisterMemory(.mov, .rsi, Memory.sib(.qword, .{ .base = .rsi })); }, - .linker_load => |load_struct| { - const atom_index = if (self.bin_file.cast(link.File.MachO)) |macho_file| blk: { - const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); - break :blk macho_file.getAtom(atom).getSymbolIndex().?; - } else if (self.bin_file.cast(link.File.Coff)) |coff_file| blk: { - const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); - break :blk coff_file.getAtom(atom).getSymbolIndex().?; - } else unreachable; - - switch (load_struct.type) { - .import => unreachable, - .got, .direct => try self.asmMovLinker(.rsi, atom_index, load_struct), - } - }, - .lea_tlv, .load_tlv => try self.genSetReg(Type.usize, .rsi, switch (src_ptr) { + .load_got, + .lea_direct, + .load_direct, + .lea_tlv, + .load_tlv, + => try self.genSetReg(Type.usize, .rsi, switch (src_ptr) { + .load_direct => |sym_index| .{ .lea_direct = sym_index }, .load_tlv => |sym_index| .{ .lea_tlv = sym_index }, else => src_ptr, }), @@ -7600,21 +7454,13 @@ fn genInlineMemset( // Load the pointer, which is stored in memory try self.asmRegisterMemory(.mov, .rdi, Memory.sib(.qword, .{ .base = .rdi })); }, - .linker_load => |load_struct| { - const atom_index = if (self.bin_file.cast(link.File.MachO)) |macho_file| blk: { - const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); - break :blk macho_file.getAtom(atom).getSymbolIndex().?; - } else if (self.bin_file.cast(link.File.Coff)) |coff_file| blk: { - const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); - break :blk coff_file.getAtom(atom).getSymbolIndex().?; - } else unreachable; - - switch (load_struct.type) { - .import => unreachable, - .got, .direct => try self.asmMovLinker(.rdi, atom_index, load_struct), - } - }, - .lea_tlv, .load_tlv => try self.genSetReg(Type.usize, .rdi, switch (dst_ptr) { + .load_got, + .lea_direct, + .load_direct, + .lea_tlv, + .load_tlv, + => try self.genSetReg(Type.usize, .rdi, switch (dst_ptr) { + .load_direct => |sym_index| .{ .lea_direct = sym_index }, .load_tlv => |sym_index| .{ .lea_tlv = sym_index }, else => dst_ptr, }), @@ -7793,15 +7639,64 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void } }, }, - .lea_tlv => |sym_index| { - const atom_index = if (self.bin_file.cast(link.File.MachO)) |macho_file| blk: { - const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); - break :blk macho_file.getAtom(atom).getSymbolIndex().?; - } else if (self.bin_file.cast(link.File.Coff)) |coff_file| blk: { - const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); - break :blk coff_file.getAtom(atom).getSymbolIndex().?; - } else unreachable; + .load_got, .lea_direct => |sym_index| { + const atom_index = try self.getSymbolIndexForDecl(self.mod_fn.owner_decl); + _ = try self.addInst(.{ + .tag = switch (mcv) { + .load_got => .mov_linker, + .lea_direct => .lea_linker, + else => unreachable, + }, + .ops = switch (mcv) { + .load_got => .got_reloc, + .lea_direct => .direct_reloc, + else => unreachable, + }, + .data = .{ .payload = try self.addExtra(Mir.LeaRegisterReloc{ + .reg = @enumToInt(reg), + .atom_index = atom_index, + .sym_index = sym_index, + }) }, + }); + }, + .load_direct => |sym_index| { + switch (ty.zigTypeTag()) { + .Float => { + const addr_reg = (try self.register_manager.allocReg(null, gp)).to64(); + try self.genSetReg(Type.usize, addr_reg, .{ .lea_direct = sym_index }); + if (intrinsicsAllowed(self.target.*, ty)) { + return self.asmRegisterMemory( + switch (ty.tag()) { + .f32 => .movss, + .f64 => .movsd, + else => return self.fail("TODO genSetReg from memory for {}", .{ + ty.fmt(self.bin_file.options.module.?), + }), + }, + reg.to128(), + Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = addr_reg }), + ); + } + + return self.fail("TODO genSetReg from memory for float with no intrinsics", .{}); + }, + else => { + const atom_index = try self.getSymbolIndexForDecl(self.mod_fn.owner_decl); + _ = try self.addInst(.{ + .tag = .mov_linker, + .ops = .direct_reloc, + .data = .{ .payload = try self.addExtra(Mir.LeaRegisterReloc{ + .reg = @enumToInt(registerAlias(reg, abi_size)), + .atom_index = atom_index, + .sym_index = sym_index, + }) }, + }); + }, + } + }, + .lea_tlv => |sym_index| { + const atom_index = try self.getSymbolIndexForDecl(self.mod_fn.owner_decl); if (self.bin_file.cast(link.File.MachO)) |_| { _ = try self.addInst(.{ .tag = .mov_linker, @@ -7844,39 +7739,6 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void ), } }, - .linker_load => |load_struct| { - const atom_index = if (self.bin_file.cast(link.File.MachO)) |macho_file| blk: { - const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); - break :blk macho_file.getAtom(atom).getSymbolIndex().?; - } else if (self.bin_file.cast(link.File.Coff)) |coff_file| blk: { - const atom = try coff_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl); - break :blk coff_file.getAtom(atom).getSymbolIndex().?; - } else unreachable; - - switch (ty.zigTypeTag()) { - .Float => { - const base_reg = (try self.register_manager.allocReg(null, gp)).to64(); - try self.asmLeaLinker(base_reg, atom_index, load_struct); - - if (intrinsicsAllowed(self.target.*, ty)) { - return self.asmRegisterMemory( - switch (ty.tag()) { - .f32 => .movss, - .f64 => .movsd, - else => return self.fail("TODO genSetReg from memory for {}", .{ - ty.fmt(self.bin_file.options.module.?), - }), - }, - reg.to128(), - Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = base_reg.to64() }), - ); - } - - return self.fail("TODO genSetReg from memory for float with no intrinsics", .{}); - }, - else => try self.asmMovLinker(registerAlias(reg, abi_size), atom_index, load_struct), - } - }, .stack_offset => |off| { switch (ty.zigTypeTag()) { .Int => switch (ty.intInfo(self.target.*).signedness) { @@ -8515,20 +8377,14 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void { 4, // dword alignment ); const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?; - try self.genSetReg(Type.usize, addr_reg, .{ .linker_load = .{ - .type = .got, - .sym_index = sym_index, - } }); + try self.genSetReg(Type.usize, addr_reg, .{ .load_got = sym_index }); } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { const atom_index = try macho_file.getOrCreateAtomForLazySymbol( .{ .kind = .const_data, .ty = Type.anyerror }, 4, // dword alignment ); const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?; - try self.genSetReg(Type.usize, addr_reg, .{ .linker_load = .{ - .type = .got, - .sym_index = sym_index, - } }); + try self.genSetReg(Type.usize, addr_reg, .{ .load_got = sym_index }); } else { return self.fail("TODO implement airErrorName for x86_64 {s}", .{@tagName(self.bin_file.tag)}); } @@ -8839,9 +8695,10 @@ fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue { .mcv => |mcv| switch (mcv) { .none => .none, .undef => .undef, - .linker_load => |ll| .{ .linker_load = ll }, .immediate => |imm| .{ .immediate = imm }, .memory => |addr| .{ .memory = addr }, + .load_direct => |sym_index| .{ .load_direct = sym_index }, + .load_got => |sym_index| .{ .load_got = sym_index }, .load_tlv => |sym_index| .{ .load_tlv = sym_index }, }, .fail => |msg| { @@ -9140,3 +8997,13 @@ fn intrinsicsAllowed(target: Target, ty: Type) bool { fn hasAvxSupport(target: Target) bool { return Target.x86.featureSetHasAny(target.cpu.features, .{ .avx, .avx2 }); } + +fn getSymbolIndexForDecl(self: *Self, decl_index: Module.Decl.Index) !u32 { + if (self.bin_file.cast(link.File.MachO)) |macho_file| { + const atom = try macho_file.getOrCreateAtomForDecl(decl_index); + return macho_file.getAtom(atom).getSymbolIndex().?; + } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { + const atom = try coff_file.getOrCreateAtomForDecl(decl_index); + return coff_file.getAtom(atom).getSymbolIndex().?; + } else unreachable; +} diff --git a/src/codegen.zig b/src/codegen.zig index 15f4440788..6d6238ceda 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -931,11 +931,17 @@ pub const GenResult = union(enum) { /// The bit-width of the immediate may be smaller than `u64`. For example, on 32-bit targets /// such as ARM, the immediate will never exceed 32-bits. immediate: u64, - linker_load: LinkerLoad, /// Threadlocal variable with address deferred until the linker allocates /// everything in virtual memory. /// Payload is a symbol index. load_tlv: u32, + /// Decl with address deferred until the linker allocates everything in virtual memory. + /// Payload is a symbol index. + load_direct: u32, + /// Decl referenced via GOT with address deferred until the linker allocates + /// everything in virtual memory. + /// Payload is a symbol index. + load_got: u32, /// Direct by-address reference to memory location. memory: u64, }; @@ -1007,17 +1013,11 @@ fn genDeclRef( if (is_threadlocal) { return GenResult.mcv(.{ .load_tlv = sym_index }); } - return GenResult.mcv(.{ .linker_load = .{ - .type = .got, - .sym_index = sym_index, - } }); + return GenResult.mcv(.{ .load_got = sym_index }); } else if (bin_file.cast(link.File.Coff)) |coff_file| { const atom_index = try coff_file.getOrCreateAtomForDecl(decl_index); const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?; - return GenResult.mcv(.{ .linker_load = .{ - .type = .got, - .sym_index = sym_index, - } }); + return GenResult.mcv(.{ .load_got = sym_index }); } else if (bin_file.cast(link.File.Plan9)) |p9| { const decl_block_index = try p9.seeDecl(decl_index); const decl_block = p9.getDeclBlock(decl_block_index); @@ -1044,15 +1044,9 @@ fn genUnnamedConst( if (bin_file.cast(link.File.Elf)) |elf_file| { return GenResult.mcv(.{ .memory = elf_file.getSymbol(local_sym_index).st_value }); } else if (bin_file.cast(link.File.MachO)) |_| { - return GenResult.mcv(.{ .linker_load = .{ - .type = .direct, - .sym_index = local_sym_index, - } }); + return GenResult.mcv(.{ .load_direct = local_sym_index }); } else if (bin_file.cast(link.File.Coff)) |_| { - return GenResult.mcv(.{ .linker_load = .{ - .type = .direct, - .sym_index = local_sym_index, - } }); + return GenResult.mcv(.{ .load_direct = local_sym_index }); } else if (bin_file.cast(link.File.Plan9)) |p9| { const ptr_bits = target.cpu.arch.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); From ecc52d859f7f1e400c8bfc08aea971f4596649cf Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Sat, 15 Apr 2023 14:57:38 +0200 Subject: [PATCH 3/3] x86_64: fix loading/storing pointers from linker deferred memory locations --- src/arch/x86_64/CodeGen.zig | 116 +++++++++++++++++++++--------------- 1 file changed, 69 insertions(+), 47 deletions(-) diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 476b4aae47..20c945e451 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -3619,11 +3619,15 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo else => return self.fail("TODO implement loading from register into {}", .{dst_mcv}), } }, - .load_direct, .load_tlv => |sym_index| try self.load(dst_mcv, switch (ptr) { - .load_direct => .{ .lea_direct = sym_index }, - .load_tlv => .{ .lea_tlv = sym_index }, - else => unreachable, - }, ptr_ty), + .load_direct => |sym_index| { + const addr_reg = try self.copyToTmpRegister(Type.usize, .{ .lea_direct = sym_index }); + const addr_reg_lock = self.register_manager.lockRegAssumeUnused(addr_reg); + defer self.register_manager.unlockReg(addr_reg_lock); + // Load the pointer, which is stored in memory + try self.asmRegisterMemory(.mov, addr_reg, Memory.sib(.qword, .{ .base = addr_reg })); + try self.load(dst_mcv, .{ .register = addr_reg }, ptr_ty); + }, + .load_tlv => |sym_index| try self.load(dst_mcv, .{ .lea_tlv = sym_index }, ptr_ty), .memory, .load_got, .lea_direct, .lea_tlv => { const reg = try self.copyToTmpRegister(ptr_ty, ptr); try self.load(dst_mcv, .{ .register = reg }, ptr_ty); @@ -3817,15 +3821,15 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type defer self.register_manager.unlockReg(addr_reg_lock); switch (ptr) { - .memory => |addr| { - try self.genSetReg(ptr_ty, addr_reg, .{ .immediate = addr }); - // Load the pointer, which is stored in memory - try self.asmRegisterMemory(.mov, addr_reg, Memory.sib(.qword, .{ .base = addr_reg })); - }, + .memory => |addr| try self.genSetReg(ptr_ty, addr_reg, .{ .immediate = addr }), .load_direct => |sym_index| try self.genSetReg(ptr_ty, addr_reg, .{ .lea_direct = sym_index }), .load_tlv => |sym_index| try self.genSetReg(ptr_ty, addr_reg, .{ .lea_tlv = sym_index }), else => unreachable, } + if (ptr != .load_tlv) { + // Load the pointer, which is stored in memory + try self.asmRegisterMemory(.mov, addr_reg, Memory.sib(.qword, .{ .base = addr_reg })); + } const new_ptr = MCValue{ .register = addr_reg }; try self.store(new_ptr, value, ptr_ty, value_ty); @@ -7223,6 +7227,8 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue, opts: Inl .memory, .load_direct, .load_tlv, + .lea_direct, + .lea_tlv, => if (abi_size <= 8) { const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }, opts); @@ -7235,7 +7241,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue, opts: Inl .memory => |addr| .{ .immediate = addr }, .load_direct => |sym_index| .{ .lea_direct = sym_index }, .load_tlv => |sym_index| .{ .lea_tlv = sym_index }, - else => unreachable, + else => mcv, }); try self.genInlineMemcpy( .{ .ptr_stack_offset = stack_offset }, @@ -7258,8 +7264,6 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue, opts: Inl ), .ptr_stack_offset, .load_got, - .lea_direct, - .lea_tlv, => { const tmp_reg = try self.copyToTmpRegister(ty, mcv); const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg); @@ -7350,21 +7354,27 @@ fn genInlineMemcpy( try self.spillRegisters(&.{ .rdi, .rsi, .rcx }); switch (dst_ptr) { - .memory => |addr| { - try self.genSetReg(Type.usize, .rdi, .{ .immediate = addr }); - // Load the pointer, which is stored in memory - try self.asmRegisterMemory(.mov, .rdi, Memory.sib(.qword, .{ .base = .rdi })); + .lea_tlv, + .load_tlv, + => { + try self.genSetReg(Type.usize, .rdi, switch (dst_ptr) { + .load_tlv => |sym_index| .{ .lea_tlv = sym_index }, + else => dst_ptr, + }); }, + .memory, .load_got, .lea_direct, .load_direct, - .lea_tlv, - .load_tlv, - => try self.genSetReg(Type.usize, .rdi, switch (dst_ptr) { - .load_direct => |sym_index| .{ .lea_direct = sym_index }, - .load_tlv => |sym_index| .{ .lea_tlv = sym_index }, - else => dst_ptr, - }), + => { + try self.genSetReg(Type.usize, .rdi, switch (dst_ptr) { + .memory => |addr| .{ .immediate = addr }, + .load_direct => |sym_index| .{ .lea_direct = sym_index }, + else => dst_ptr, + }); + // Load the pointer, which is stored in memory + try self.asmRegisterMemory(.mov, .rdi, Memory.sib(.qword, .{ .base = .rdi })); + }, .stack_offset, .ptr_stack_offset => |off| { try self.asmRegisterMemory(switch (dst_ptr) { .stack_offset => .mov, @@ -7388,21 +7398,27 @@ fn genInlineMemcpy( } switch (src_ptr) { - .memory => |addr| { - try self.genSetReg(Type.usize, .rsi, .{ .immediate = addr }); - // Load the pointer, which is stored in memory - try self.asmRegisterMemory(.mov, .rsi, Memory.sib(.qword, .{ .base = .rsi })); + .lea_tlv, + .load_tlv, + => { + try self.genSetReg(Type.usize, .rsi, switch (src_ptr) { + .load_tlv => |sym_index| .{ .lea_tlv = sym_index }, + else => dst_ptr, + }); }, + .memory, .load_got, .lea_direct, .load_direct, - .lea_tlv, - .load_tlv, - => try self.genSetReg(Type.usize, .rsi, switch (src_ptr) { - .load_direct => |sym_index| .{ .lea_direct = sym_index }, - .load_tlv => |sym_index| .{ .lea_tlv = sym_index }, - else => src_ptr, - }), + => { + try self.genSetReg(Type.usize, .rsi, switch (src_ptr) { + .memory => |addr| .{ .immediate = addr }, + .load_direct => |sym_index| .{ .lea_direct = sym_index }, + else => src_ptr, + }); + // Load the pointer, which is stored in memory + try self.asmRegisterMemory(.mov, .rsi, Memory.sib(.qword, .{ .base = .rsi })); + }, .stack_offset, .ptr_stack_offset => |off| { try self.asmRegisterMemory(switch (src_ptr) { .stack_offset => .mov, @@ -7449,21 +7465,27 @@ fn genInlineMemset( try self.spillRegisters(&.{ .rdi, .al, .rcx }); switch (dst_ptr) { - .memory => |addr| { - try self.genSetReg(Type.usize, .rdi, .{ .immediate = addr }); + .lea_tlv, + .load_tlv, + => { + try self.genSetReg(Type.usize, .rdi, switch (dst_ptr) { + .load_tlv => |sym_index| .{ .lea_tlv = sym_index }, + else => dst_ptr, + }); + }, + .load_got => try self.genSetReg(Type.usize, .rdi, dst_ptr), + .memory, + .lea_direct, + .load_direct, + => { + try self.genSetReg(Type.usize, .rdi, switch (dst_ptr) { + .memory => |addr| .{ .immediate = addr }, + .load_direct => |sym_index| .{ .lea_direct = sym_index }, + else => dst_ptr, + }); // Load the pointer, which is stored in memory try self.asmRegisterMemory(.mov, .rdi, Memory.sib(.qword, .{ .base = .rdi })); }, - .load_got, - .lea_direct, - .load_direct, - .lea_tlv, - .load_tlv, - => try self.genSetReg(Type.usize, .rdi, switch (dst_ptr) { - .load_direct => |sym_index| .{ .lea_direct = sym_index }, - .load_tlv => |sym_index| .{ .lea_tlv = sym_index }, - else => dst_ptr, - }), .stack_offset, .ptr_stack_offset => |off| { try self.asmRegisterMemory(switch (dst_ptr) { .stack_offset => .mov,