diff --git a/src/Air.zig b/src/Air.zig index 3b1dfc644a..4379d5dde1 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -1889,7 +1889,10 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: *const InternPool) bool { }, .load => air.typeOf(data.ty_op.operand, ip).isVolatilePtrIp(ip), .slice_elem_val, .ptr_elem_val => air.typeOf(data.bin_op.lhs, ip).isVolatilePtrIp(ip), - .atomic_load => air.typeOf(data.atomic_load.ptr, ip).isVolatilePtrIp(ip), + .atomic_load => switch (data.atomic_load.order) { + .unordered, .monotonic => air.typeOf(data.atomic_load.ptr, ip).isVolatilePtrIp(ip), + else => true, // Stronger memory orderings have inter-thread side effects. + }, }; } diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index d4b96ae3f3..7ed8352b5d 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -7738,7 +7738,10 @@ fn airAtomicLoad(func: *Func, inst: Air.Inst.Index) !void { const bit_size = elem_ty.bitSize(zcu); if (bit_size > 64) return func.fail("TODO: airAtomicLoad > 64 bits", .{}); - const result_mcv = try func.allocRegOrMem(elem_ty, inst, true); + const result_mcv: MCValue = if (func.liveness.isUnused(inst)) + .{ .register = .zero } + else + try func.allocRegOrMem(elem_ty, inst, true); assert(result_mcv == .register); // should be less than 8 bytes if (order == .seq_cst) { @@ -7754,11 +7757,10 @@ fn airAtomicLoad(func: *Func, inst: Air.Inst.Index) !void { try func.load(result_mcv, ptr_mcv, ptr_ty); switch (order) { - // Don't guarnetee other memory operations to be ordered after the load. - .unordered => {}, - .monotonic => {}, - // Make sure all previous reads happen before any reading or writing accurs. - .seq_cst, .acquire => { + // Don't guarantee other memory operations to be ordered after the load. + .unordered, .monotonic => {}, + // Make sure all previous reads happen before any reading or writing occurs. + .acquire, .seq_cst => { _ = try func.addInst(.{ .tag = .fence, .data = .{ .fence = .{ diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 3ba38ccd7b..f0478d7e23 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -106219,23 +106219,29 @@ fn airAtomicRmw(self: *CodeGen, inst: Air.Inst.Index) !void { fn airAtomicLoad(self: *CodeGen, inst: Air.Inst.Index) !void { const atomic_load = self.air.instructions.items(.data)[@intFromEnum(inst)].atomic_load; + const result: MCValue = result: { + const ptr_ty = self.typeOf(atomic_load.ptr); + const ptr_mcv = try self.resolveInst(atomic_load.ptr); + const ptr_lock = switch (ptr_mcv) { + .register => |reg| self.register_manager.lockRegAssumeUnused(reg), + else => null, + }; + defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock); - const ptr_ty = self.typeOf(atomic_load.ptr); - const ptr_mcv = try self.resolveInst(atomic_load.ptr); - const ptr_lock = switch (ptr_mcv) { - .register => |reg| self.register_manager.lockRegAssumeUnused(reg), - else => null, - }; - defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock); + const unused = self.liveness.isUnused(inst); - const dst_mcv = - if (self.reuseOperand(inst, atomic_load.ptr, 0, ptr_mcv)) + const dst_mcv: MCValue = if (unused) + .{ .register = try self.register_manager.allocReg(null, self.regSetForType(ptr_ty.childType(self.pt.zcu))) } + else if (self.reuseOperand(inst, atomic_load.ptr, 0, ptr_mcv)) ptr_mcv else try self.allocRegOrMem(inst, true); - try self.load(dst_mcv, ptr_ty, ptr_mcv); - return self.finishAir(inst, dst_mcv, .{ atomic_load.ptr, .none, .none }); + try self.load(dst_mcv, ptr_ty, ptr_mcv); + + break :result if (unused) .unreach else dst_mcv; + }; + return self.finishAir(inst, result, .{ atomic_load.ptr, .none, .none }); } fn airAtomicStore(self: *CodeGen, inst: Air.Inst.Index, order: std.builtin.AtomicOrder) !void {