diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 6f6137e030..b071f8ac59 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -578,7 +578,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .breakpoint => try self.airBreakpoint(), .ret_addr => @panic("TODO try self.airRetAddr(inst)"), .frame_addr => @panic("TODO try self.airFrameAddress(inst)"), - .fence => @panic("TODO try self.airFence()"), + .fence => try self.airFence(inst), .cond_br => try self.airCondBr(inst), .dbg_stmt => try self.airDbgStmt(inst), .fptrunc => @panic("TODO try self.airFptrunc(inst)"), @@ -1442,6 +1442,29 @@ fn airDiv(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } +fn airFence(self: *Self, inst: Air.Inst.Index) !void { + // TODO weaken this as needed, currently this implements the strongest membar form + const fence = self.air.instructions.items(.data)[inst].fence; + _ = fence; + + // membar #StoreStore | #LoadStore | #StoreLoad | #LoadLoad + _ = try self.addInst(.{ + .tag = .membar, + .data = .{ + .membar_mask = .{ + .mmask = .{ + .store_store = true, + .store_load = true, + .load_store = true, + .load_load = true, + }, + }, + }, + }); + + return self.finishAir(inst, .dead, .{ .none, .none, .none }); +} + fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { diff --git a/src/arch/sparc64/Emit.zig b/src/arch/sparc64/Emit.zig index 18abea63ed..e1546fbc28 100644 --- a/src/arch/sparc64/Emit.zig +++ b/src/arch/sparc64/Emit.zig @@ -98,6 +98,8 @@ pub fn emitMir( .xor => try emit.mirArithmetic3Op(inst), .xnor => try emit.mirArithmetic3Op(inst), + .membar => try emit.mirMembar(inst), + .movcc => try emit.mirConditionalMove(inst), .movr => @panic("TODO implement sparc64 movr"), @@ -342,6 +344,17 @@ fn mirConditionalMove(emit: *Emit, inst: Mir.Inst.Index) !void { } } +fn mirMembar(emit: *Emit, inst: Mir.Inst.Index) !void { + const tag = emit.mir.instructions.items(.tag)[inst]; + const mask = emit.mir.instructions.items(.data)[inst].membar_mask; + assert(tag == .membar); + + try emit.writeInstruction(Instruction.membar( + mask.cmask, + mask.mmask, + )); +} + fn mirNop(emit: *Emit) !void { try emit.writeInstruction(Instruction.nop()); } diff --git a/src/arch/sparc64/Mir.zig b/src/arch/sparc64/Mir.zig index b46ecd9c79..25b4a4ce2c 100644 --- a/src/arch/sparc64/Mir.zig +++ b/src/arch/sparc64/Mir.zig @@ -78,6 +78,10 @@ pub const Inst = struct { xor, xnor, + /// A.32 Memory Barrier + /// This uses the membar_mask field. + membar, + /// A.35 Move Integer Register on Condition (MOVcc) /// This uses the conditional_move_int field. movcc, @@ -236,6 +240,13 @@ pub const Inst = struct { inst: Index, }, + /// Membar mask, controls the barrier behavior + /// Used by e.g. membar + membar_mask: struct { + mmask: Instruction.MemOrderingConstraint = .{}, + cmask: Instruction.MemCompletionConstraint = .{}, + }, + /// Conditional move, checking the integer status code /// if is_imm true then it uses the imm field of rs2_or_imm, /// otherwise it uses rs2 field. diff --git a/src/arch/sparc64/bits.zig b/src/arch/sparc64/bits.zig index 29ef9da1d2..5c856ea756 100644 --- a/src/arch/sparc64/bits.zig +++ b/src/arch/sparc64/bits.zig @@ -1261,6 +1261,10 @@ pub const Instruction = union(enum) { }; } + pub fn membar(cmask: MemCompletionConstraint, mmask: MemOrderingConstraint) Instruction { + return format3h(cmask, mmask); + } + pub fn movcc(comptime s2: type, cond: Condition, ccr: CCR, rs2: s2, rd: Register) Instruction { return switch (s2) { Register => format4c(0b10_1100, cond, ccr, rs2, rd),