stage2: sparc64: Implement airFence + SPARCv9 membar

This commit is contained in:
Koakuma 2022-06-16 01:32:02 +07:00
parent 18d61d691c
commit 36bfe4b7ef
4 changed files with 52 additions and 1 deletions

View File

@ -578,7 +578,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.breakpoint => try self.airBreakpoint(),
.ret_addr => @panic("TODO try self.airRetAddr(inst)"),
.frame_addr => @panic("TODO try self.airFrameAddress(inst)"),
.fence => @panic("TODO try self.airFence()"),
.fence => try self.airFence(inst),
.cond_br => try self.airCondBr(inst),
.dbg_stmt => try self.airDbgStmt(inst),
.fptrunc => @panic("TODO try self.airFptrunc(inst)"),
@ -1442,6 +1442,29 @@ fn airDiv(self: *Self, inst: Air.Inst.Index) !void {
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airFence(self: *Self, inst: Air.Inst.Index) !void {
// TODO weaken this as needed, currently this implements the strongest membar form
const fence = self.air.instructions.items(.data)[inst].fence;
_ = fence;
// membar #StoreStore | #LoadStore | #StoreLoad | #LoadLoad
_ = try self.addInst(.{
.tag = .membar,
.data = .{
.membar_mask = .{
.mmask = .{
.store_store = true,
.store_load = true,
.load_store = true,
.load_load = true,
},
},
},
});
return self.finishAir(inst, .dead, .{ .none, .none, .none });
}
fn airIsErr(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {

View File

@ -98,6 +98,8 @@ pub fn emitMir(
.xor => try emit.mirArithmetic3Op(inst),
.xnor => try emit.mirArithmetic3Op(inst),
.membar => try emit.mirMembar(inst),
.movcc => try emit.mirConditionalMove(inst),
.movr => @panic("TODO implement sparc64 movr"),
@ -342,6 +344,17 @@ fn mirConditionalMove(emit: *Emit, inst: Mir.Inst.Index) !void {
}
}
fn mirMembar(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const mask = emit.mir.instructions.items(.data)[inst].membar_mask;
assert(tag == .membar);
try emit.writeInstruction(Instruction.membar(
mask.cmask,
mask.mmask,
));
}
fn mirNop(emit: *Emit) !void {
try emit.writeInstruction(Instruction.nop());
}

View File

@ -78,6 +78,10 @@ pub const Inst = struct {
xor,
xnor,
/// A.32 Memory Barrier
/// This uses the membar_mask field.
membar,
/// A.35 Move Integer Register on Condition (MOVcc)
/// This uses the conditional_move_int field.
movcc,
@ -236,6 +240,13 @@ pub const Inst = struct {
inst: Index,
},
/// Membar mask, controls the barrier behavior
/// Used by e.g. membar
membar_mask: struct {
mmask: Instruction.MemOrderingConstraint = .{},
cmask: Instruction.MemCompletionConstraint = .{},
},
/// Conditional move, checking the integer status code
/// if is_imm true then it uses the imm field of rs2_or_imm,
/// otherwise it uses rs2 field.

View File

@ -1261,6 +1261,10 @@ pub const Instruction = union(enum) {
};
}
pub fn membar(cmask: MemCompletionConstraint, mmask: MemOrderingConstraint) Instruction {
return format3h(cmask, mmask);
}
pub fn movcc(comptime s2: type, cond: Condition, ccr: CCR, rs2: s2, rd: Register) Instruction {
return switch (s2) {
Register => format4c(0b10_1100, cond, ccr, rs2, rd),