Merge pull request #11658 from koachan/sparc64-codegen

stage2: sparc64: Make basic test harness run
This commit is contained in:
Jakub Konka 2022-05-17 09:20:02 +02:00 committed by GitHub
commit 3fde14035b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 1909 additions and 62 deletions

File diff suppressed because it is too large Load Diff

View File

@ -8,6 +8,7 @@ const link = @import("../../link.zig");
const Module = @import("../../Module.zig");
const ErrorMsg = Module.ErrorMsg;
const Liveness = @import("../../Liveness.zig");
const log = std.log.scoped(.sparcv9_emit);
const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput;
const DW = std.dwarf;
const leb128 = std.leb;
@ -31,16 +32,44 @@ prev_di_column: u32,
/// Relative to the beginning of `code`.
prev_di_pc: usize,
/// The branch type of every branch
branch_types: std.AutoHashMapUnmanaged(Mir.Inst.Index, BranchType) = .{},
/// For every forward branch, maps the target instruction to a list of
/// branches which branch to this target instruction
branch_forward_origins: std.AutoHashMapUnmanaged(Mir.Inst.Index, std.ArrayListUnmanaged(Mir.Inst.Index)) = .{},
/// For backward branches: stores the code offset of the target
/// instruction
///
/// For forward branches: stores the code offset of the branch
/// instruction
code_offset_mapping: std.AutoHashMapUnmanaged(Mir.Inst.Index, usize) = .{},
const InnerError = error{
OutOfMemory,
EmitFail,
};
const BranchType = enum {
bpcc,
bpr,
fn default(tag: Mir.Inst.Tag) BranchType {
return switch (tag) {
.bpcc => .bpcc,
.bpr => .bpr,
else => unreachable,
};
}
};
pub fn emitMir(
emit: *Emit,
) InnerError!void {
const mir_tags = emit.mir.instructions.items(.tag);
// Convert absolute addresses into offsets and
// find smallest lowerings for branch instructions
try emit.lowerBranches();
// Emit machine code
for (mir_tags) |tag, index| {
const inst = @intCast(u32, index);
@ -51,7 +80,8 @@ pub fn emitMir(
.add => try emit.mirArithmetic3Op(inst),
.bpcc => @panic("TODO implement sparc64 bpcc"),
.bpr => try emit.mirConditionalBranch(inst),
.bpcc => try emit.mirConditionalBranch(inst),
.call => @panic("TODO implement sparc64 call"),
@ -64,6 +94,8 @@ pub fn emitMir(
.@"or" => try emit.mirArithmetic3Op(inst),
.mulx => try emit.mirArithmetic3Op(inst),
.nop => try emit.mirNop(),
.@"return" => try emit.mirArithmetic2Op(inst),
@ -73,7 +105,12 @@ pub fn emitMir(
.sethi => try emit.mirSethi(inst),
.sll => @panic("TODO implement sparc64 sll"),
.srl => @panic("TODO implement sparc64 srl"),
.sra => @panic("TODO implement sparc64 sra"),
.sllx => @panic("TODO implement sparc64 sllx"),
.srlx => @panic("TODO implement sparc64 srlx"),
.srax => @panic("TODO implement sparc64 srax"),
.stb => try emit.mirArithmetic3Op(inst),
.sth => try emit.mirArithmetic3Op(inst),
@ -81,6 +118,7 @@ pub fn emitMir(
.stx => try emit.mirArithmetic3Op(inst),
.sub => try emit.mirArithmetic3Op(inst),
.subcc => try emit.mirArithmetic3Op(inst),
.tcc => try emit.mirTrap(inst),
}
@ -88,6 +126,14 @@ pub fn emitMir(
}
pub fn deinit(emit: *Emit) void {
var iter = emit.branch_forward_origins.valueIterator();
while (iter.next()) |origin_list| {
origin_list.deinit(emit.bin_file.allocator);
}
emit.branch_types.deinit(emit.bin_file.allocator);
emit.branch_forward_origins.deinit(emit.bin_file.allocator);
emit.code_offset_mapping.deinit(emit.bin_file.allocator);
emit.* = undefined;
}
@ -161,6 +207,7 @@ fn mirArithmetic3Op(emit: *Emit, inst: Mir.Inst.Index) !void {
.lduw => try emit.writeInstruction(Instruction.lduw(i13, rs1, imm, rd)),
.ldx => try emit.writeInstruction(Instruction.ldx(i13, rs1, imm, rd)),
.@"or" => try emit.writeInstruction(Instruction.@"or"(i13, rs1, imm, rd)),
.mulx => try emit.writeInstruction(Instruction.mulx(i13, rs1, imm, rd)),
.save => try emit.writeInstruction(Instruction.save(i13, rs1, imm, rd)),
.restore => try emit.writeInstruction(Instruction.restore(i13, rs1, imm, rd)),
.stb => try emit.writeInstruction(Instruction.stb(i13, rs1, imm, rd)),
@ -168,6 +215,7 @@ fn mirArithmetic3Op(emit: *Emit, inst: Mir.Inst.Index) !void {
.stw => try emit.writeInstruction(Instruction.stw(i13, rs1, imm, rd)),
.stx => try emit.writeInstruction(Instruction.stx(i13, rs1, imm, rd)),
.sub => try emit.writeInstruction(Instruction.sub(i13, rs1, imm, rd)),
.subcc => try emit.writeInstruction(Instruction.subcc(i13, rs1, imm, rd)),
else => unreachable,
}
} else {
@ -180,6 +228,7 @@ fn mirArithmetic3Op(emit: *Emit, inst: Mir.Inst.Index) !void {
.lduw => try emit.writeInstruction(Instruction.lduw(Register, rs1, rs2, rd)),
.ldx => try emit.writeInstruction(Instruction.ldx(Register, rs1, rs2, rd)),
.@"or" => try emit.writeInstruction(Instruction.@"or"(Register, rs1, rs2, rd)),
.mulx => try emit.writeInstruction(Instruction.mulx(Register, rs1, rs2, rd)),
.save => try emit.writeInstruction(Instruction.save(Register, rs1, rs2, rd)),
.restore => try emit.writeInstruction(Instruction.restore(Register, rs1, rs2, rd)),
.stb => try emit.writeInstruction(Instruction.stb(Register, rs1, rs2, rd)),
@ -187,11 +236,56 @@ fn mirArithmetic3Op(emit: *Emit, inst: Mir.Inst.Index) !void {
.stw => try emit.writeInstruction(Instruction.stw(Register, rs1, rs2, rd)),
.stx => try emit.writeInstruction(Instruction.stx(Register, rs1, rs2, rd)),
.sub => try emit.writeInstruction(Instruction.sub(Register, rs1, rs2, rd)),
.subcc => try emit.writeInstruction(Instruction.subcc(Register, rs1, rs2, rd)),
else => unreachable,
}
}
}
fn mirConditionalBranch(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const branch_type = emit.branch_types.get(inst).?;
switch (branch_type) {
.bpcc => switch (tag) {
.bpcc => {
const branch_predict_int = emit.mir.instructions.items(.data)[inst].branch_predict_int;
const offset = @intCast(i64, emit.code_offset_mapping.get(branch_predict_int.inst).?) - @intCast(i64, emit.code.items.len);
log.debug("mirConditionalBranch: {} offset={}", .{ inst, offset });
try emit.writeInstruction(
Instruction.bpcc(
branch_predict_int.cond,
branch_predict_int.annul,
branch_predict_int.pt,
branch_predict_int.ccr,
@intCast(i21, offset),
),
);
},
else => unreachable,
},
.bpr => switch (tag) {
.bpr => {
const branch_predict_reg = emit.mir.instructions.items(.data)[inst].branch_predict_reg;
const offset = @intCast(i64, emit.code_offset_mapping.get(branch_predict_reg.inst).?) - @intCast(i64, emit.code.items.len);
log.debug("mirConditionalBranch: {} offset={}", .{ inst, offset });
try emit.writeInstruction(
Instruction.bpr(
branch_predict_reg.cond,
branch_predict_reg.annul,
branch_predict_reg.pt,
branch_predict_reg.rs1,
@intCast(i18, offset),
),
);
},
else => unreachable,
},
}
}
fn mirNop(emit: *Emit) !void {
try emit.writeInstruction(Instruction.nop());
}
@ -232,6 +326,16 @@ fn mirTrap(emit: *Emit, inst: Mir.Inst.Index) !void {
// Common helper functions
fn branchTarget(emit: *Emit, inst: Mir.Inst.Index) Mir.Inst.Index {
const tag = emit.mir.instructions.items(.tag)[inst];
switch (tag) {
.bpcc => return emit.mir.instructions.items(.data)[inst].branch_predict_int.inst,
.bpr => return emit.mir.instructions.items(.data)[inst].branch_predict_reg.inst,
else => unreachable,
}
}
fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) !void {
const delta_line = @intCast(i32, line) - @intCast(i32, emit.prev_di_line);
const delta_pc: usize = emit.code.items.len - emit.prev_di_pc;
@ -264,6 +368,164 @@ fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError {
return error.EmitFail;
}
fn instructionSize(emit: *Emit, inst: Mir.Inst.Index) usize {
const tag = emit.mir.instructions.items(.tag)[inst];
switch (tag) {
.dbg_line,
.dbg_epilogue_begin,
.dbg_prologue_end,
=> return 0,
// Currently Mir instructions always map to single machine instruction.
else => return 4,
}
}
fn isBranch(tag: Mir.Inst.Tag) bool {
return switch (tag) {
.bpcc => true,
.bpr => true,
else => false,
};
}
fn lowerBranches(emit: *Emit) !void {
const mir_tags = emit.mir.instructions.items(.tag);
const allocator = emit.bin_file.allocator;
// First pass: Note down all branches and their target
// instructions, i.e. populate branch_types,
// branch_forward_origins, and code_offset_mapping
//
// TODO optimization opportunity: do this in codegen while
// generating MIR
for (mir_tags) |tag, index| {
const inst = @intCast(u32, index);
if (isBranch(tag)) {
const target_inst = emit.branchTarget(inst);
// Remember this branch instruction
try emit.branch_types.put(allocator, inst, BranchType.default(tag));
// Forward branches require some extra stuff: We only
// know their offset once we arrive at the target
// instruction. Therefore, we need to be able to
// access the branch instruction when we visit the
// target instruction in order to manipulate its type
// etc.
if (target_inst > inst) {
// Remember the branch instruction index
try emit.code_offset_mapping.put(allocator, inst, 0);
if (emit.branch_forward_origins.getPtr(target_inst)) |origin_list| {
try origin_list.append(allocator, inst);
} else {
var origin_list: std.ArrayListUnmanaged(Mir.Inst.Index) = .{};
try origin_list.append(allocator, inst);
try emit.branch_forward_origins.put(allocator, target_inst, origin_list);
}
}
// Remember the target instruction index so that we
// update the real code offset in all future passes
//
// putNoClobber may not be used as the put operation
// may clobber the entry when multiple branches branch
// to the same target instruction
try emit.code_offset_mapping.put(allocator, target_inst, 0);
}
}
// Further passes: Until all branches are lowered, interate
// through all instructions and calculate new offsets and
// potentially new branch types
var all_branches_lowered = false;
while (!all_branches_lowered) {
all_branches_lowered = true;
var current_code_offset: usize = 0;
for (mir_tags) |tag, index| {
const inst = @intCast(u32, index);
// If this instruction contained in the code offset
// mapping (when it is a target of a branch or if it is a
// forward branch), update the code offset
if (emit.code_offset_mapping.getPtr(inst)) |offset| {
offset.* = current_code_offset;
}
// If this instruction is a backward branch, calculate the
// offset, which may potentially update the branch type
if (isBranch(tag)) {
const target_inst = emit.branchTarget(inst);
if (target_inst < inst) {
const target_offset = emit.code_offset_mapping.get(target_inst).?;
const offset = @intCast(i64, target_offset) - @intCast(i64, current_code_offset);
const branch_type = emit.branch_types.getPtr(inst).?;
const optimal_branch_type = try emit.optimalBranchType(tag, offset);
if (branch_type.* != optimal_branch_type) {
branch_type.* = optimal_branch_type;
all_branches_lowered = false;
}
log.debug("lowerBranches: branch {} has offset {}", .{ inst, offset });
}
}
// If this instruction is the target of one or more
// forward branches, calculate the offset, which may
// potentially update the branch type
if (emit.branch_forward_origins.get(inst)) |origin_list| {
for (origin_list.items) |forward_branch_inst| {
const branch_tag = emit.mir.instructions.items(.tag)[forward_branch_inst];
const forward_branch_inst_offset = emit.code_offset_mapping.get(forward_branch_inst).?;
const offset = @intCast(i64, current_code_offset) - @intCast(i64, forward_branch_inst_offset);
const branch_type = emit.branch_types.getPtr(forward_branch_inst).?;
const optimal_branch_type = try emit.optimalBranchType(branch_tag, offset);
if (branch_type.* != optimal_branch_type) {
branch_type.* = optimal_branch_type;
all_branches_lowered = false;
}
log.debug("lowerBranches: branch {} has offset {}", .{ forward_branch_inst, offset });
}
}
// Increment code offset
current_code_offset += emit.instructionSize(inst);
}
}
}
fn optimalBranchType(emit: *Emit, tag: Mir.Inst.Tag, offset: i64) !BranchType {
assert(offset & 0b11 == 0);
switch (tag) {
// TODO use the following strategy to implement long branches:
// - Negate the conditional and target of the original instruction;
// - In the space immediately after the branch, load
// the address of the original target, preferrably in
// a PC-relative way, into %o7; and
// - jmpl %o7 + %g0, %g0
.bpcc => {
if (std.math.cast(i21, offset)) |_| {
return BranchType.bpcc;
} else |_| {
return emit.fail("TODO support BPcc branches larger than +-1 MiB", .{});
}
},
.bpr => {
if (std.math.cast(i18, offset)) |_| {
return BranchType.bpr;
} else |_| {
return emit.fail("TODO support BPr branches larger than +-128 KiB", .{});
}
},
else => unreachable,
}
}
fn writeInstruction(emit: *Emit, instruction: Instruction) !void {
// SPARCv9 instructions are always arranged in BE regardless of the
// endianness mode the CPU is running in (Section 3.1 of the ISA specification).

View File

@ -43,8 +43,12 @@ pub const Inst = struct {
// TODO add other operations.
add,
/// A.3 Branch on Integer Register with Prediction (BPr)
/// This uses the branch_predict_reg field.
bpr,
/// A.7 Branch on Integer Condition Codes with Prediction (BPcc)
/// This uses the branch_predict field.
/// This uses the branch_predict_int field.
bpcc,
/// A.8 Call and Link
@ -70,6 +74,11 @@ pub const Inst = struct {
// TODO add other operations.
@"or",
/// A.37 Multiply and Divide (64-bit)
/// This uses the arithmetic_3op field.
// TODO add other operations.
mulx,
/// A.40 No Operation
/// This uses the nop field.
nop,
@ -89,8 +98,12 @@ pub const Inst = struct {
/// A.49 Shift
/// This uses the shift field.
// TODO add other operations.
sll,
srl,
sra,
sllx,
srlx,
srax,
/// A.54 Store Integer
/// This uses the arithmetic_3op field.
@ -106,10 +119,15 @@ pub const Inst = struct {
/// This uses the arithmetic_3op field.
// TODO add other operations.
sub,
subcc,
/// A.61 Trap on Integer Condition Codes (Tcc)
/// This uses the trap field.
tcc,
// TODO add synthetic instructions
// TODO add cmp synthetic instruction to avoid wasting a register when
// comparing with subcc
};
/// The position of an MIR instruction within the `Mir` instructions array.
@ -164,13 +182,23 @@ pub const Inst = struct {
link: Register = .o7,
},
/// Branch with prediction.
/// Branch with prediction, checking the integer status code
/// Used by e.g. bpcc
branch_predict: struct {
branch_predict_int: struct {
annul: bool = false,
pt: bool = true,
ccr: Instruction.CCR,
cond: Instruction.Condition,
cond: Instruction.ICondition,
inst: Index,
},
/// Branch with prediction, comparing a register's content with zero
/// Used by e.g. bpr
branch_predict_reg: struct {
annul: bool = false,
pt: bool = true,
cond: Instruction.RCondition,
rs1: Register,
inst: Index,
},
@ -191,7 +219,7 @@ pub const Inst = struct {
/// if is_imm true then it uses the imm field of rs2_or_imm,
/// otherwise it uses rs2 field.
///
/// Used by e.g. add, sub
/// Used by e.g. sllx
shift: struct {
is_imm: bool,
width: Instruction.ShiftWidth,
@ -210,7 +238,7 @@ pub const Inst = struct {
/// Used by e.g. tcc
trap: struct {
is_imm: bool = true,
cond: Instruction.Condition,
cond: Instruction.ICondition,
ccr: Instruction.CCR = .icc,
rs1: Register = .g0,
rs2_or_imm: union {

View File

@ -512,10 +512,172 @@ pub const Instruction = union(enum) {
lookaside: bool = false,
};
// TODO: Need to define an enum for `cond` values
// This is kinda challenging since the cond values have different meanings
// depending on whether it's operating on integer or FP CCR.
pub const Condition = u4;
// In SPARCv9, FP and integer comparison operations
// are encoded differently.
pub const FCondition = enum(u4) {
/// Branch Never
nv,
/// Branch on Not Equal
ne,
/// Branch on Less or Greater
lg,
/// Branch on Unordered or Less
ul,
/// Branch on Less
lt,
/// Branch on Unordered or Greater
ug,
/// Branch on Greater
gt,
/// Branch on Unordered
un,
/// Branch Always
al,
/// Branch on Equal
eq,
/// Branch on Unordered or Equal
ue,
/// Branch on Greater or Equal
ge,
/// Branch on Unordered or Greater or Equal
uge,
/// Branch on Less or Equal
le,
/// Branch on Unordered or Less or Equal
ule,
/// Branch on Ordered
ord,
/// Converts a std.math.CompareOperator into a condition flag,
/// i.e. returns the condition that is true iff the result of the
/// comparison is true.
pub fn fromCompareOperator(op: std.math.CompareOperator) FCondition {
return switch (op) {
.gte => .ge,
.gt => .gt,
.neq => .ne,
.lt => .lt,
.lte => .le,
.eq => .eq,
};
}
/// Returns the condition which is true iff the given condition is
/// false (if such a condition exists).
pub fn negate(cond: FCondition) FCondition {
return switch (cond) {
.eq => .ne,
.ne => .eq,
.ge => .ul,
.ul => .ge,
.le => .ug,
.ug => .le,
.lt => .uge,
.uge => .lt,
.gt => .ule,
.ule => .gt,
.ue => .lg,
.lg => .ue,
.ord => .un,
.un => .ord,
.al => unreachable,
.nv => unreachable,
};
}
};
pub const ICondition = enum(u4) {
/// Branch Never
nv,
/// Branch on Equal
eq,
/// Branch on Less or Equal
le,
/// Branch on Less
lt,
/// Branch on Less or Equal Unsigned
leu,
/// Branch on Carry Set (Less than, Unsigned)
cs,
/// Branch on Negative
neg,
/// Branch on Overflow Set
vs,
/// Branch Always
al,
/// Branch on Not Equal
ne,
/// Branch on Greater
gt,
/// Branch on Greater or Equal
ge,
/// Branch on Greater Unsigned
gu,
/// Branch on Carry Clear (Greater Than or Equal, Unsigned)
cc,
/// Branch on Positive
pos,
/// Branch on Overflow Clear
vc,
/// Converts a std.math.CompareOperator into a condition flag,
/// i.e. returns the condition that is true iff the result of the
/// comparison is true. Assumes signed comparison.
pub fn fromCompareOperatorSigned(op: std.math.CompareOperator) ICondition {
return switch (op) {
.gte => .ge,
.gt => .gt,
.neq => .ne,
.lt => .lt,
.lte => .le,
.eq => .eq,
};
}
/// Converts a std.math.CompareOperator into a condition flag,
/// i.e. returns the condition that is true iff the result of the
/// comparison is true. Assumes unsigned comparison.
pub fn fromCompareOperatorUnsigned(op: std.math.CompareOperator) ICondition {
return switch (op) {
.gte => .cc,
.gt => .gu,
.neq => .ne,
.lt => .cs,
.lte => .le,
.eq => .eq,
};
}
/// Returns the condition which is true iff the given condition is
/// false (if such a condition exists).
pub fn negate(cond: ICondition) ICondition {
return switch (cond) {
.eq => .ne,
.ne => .eq,
.cs => .cc,
.cc => .cs,
.neg => .pos,
.pos => .neg,
.vs => .vc,
.vc => .vs,
.gu => .leu,
.leu => .gu,
.ge => .lt,
.lt => .ge,
.gt => .le,
.le => .gt,
.al => unreachable,
.nv => unreachable,
};
}
};
pub const Condition = packed union {
fcond: FCondition,
icond: ICondition,
encoded: u4,
};
pub fn toU32(self: Instruction) u32 {
// TODO: Remove this once packed structs work.
@ -593,7 +755,7 @@ pub const Instruction = union(enum) {
return Instruction{
.format_2b = .{
.a = @boolToInt(annul),
.cond = cond,
.cond = cond.encoded,
.op2 = op2,
.disp22 = udisp_truncated,
},
@ -614,7 +776,7 @@ pub const Instruction = union(enum) {
return Instruction{
.format_2c = .{
.a = @boolToInt(annul),
.cond = cond,
.cond = cond.encoded,
.op2 = op2,
.cc1 = ccr_cc1,
.cc0 = ccr_cc0,
@ -895,7 +1057,7 @@ pub const Instruction = union(enum) {
.rd = rd.enc(),
.op3 = op3,
.cc2 = ccr_cc2,
.cond = cond,
.cond = cond.encoded,
.cc1 = ccr_cc1,
.cc0 = ccr_cc0,
.rs2 = rs2.enc(),
@ -912,7 +1074,7 @@ pub const Instruction = union(enum) {
.rd = rd.enc(),
.op3 = op3,
.cc2 = ccr_cc2,
.cond = cond,
.cond = cond.encoded,
.cc1 = ccr_cc1,
.cc0 = ccr_cc0,
.simm11 = @bitCast(u11, imm),
@ -960,7 +1122,7 @@ pub const Instruction = union(enum) {
.format_4g = .{
.rd = rd.enc(),
.op3 = op3,
.cond = cond,
.cond = cond.encoded,
.opf_cc = opf_cc,
.opf_low = opf_low,
.rs2 = rs2.enc(),
@ -979,6 +1141,14 @@ pub const Instruction = union(enum) {
};
}
pub fn bpcc(cond: ICondition, annul: bool, pt: bool, ccr: CCR, disp: i21) Instruction {
return format2c(0b001, .{ .icond = cond }, annul, pt, ccr, disp);
}
pub fn bpr(cond: RCondition, annul: bool, pt: bool, rs1: Register, disp: i18) Instruction {
return format2d(0b011, cond, annul, pt, rs1, disp);
}
pub fn jmpl(comptime s2: type, rs1: Register, rs2: s2, rd: Register) Instruction {
return switch (s2) {
Register => format3a(0b10, 0b11_1000, rs1, rs2, rd),
@ -1027,6 +1197,14 @@ pub const Instruction = union(enum) {
};
}
pub fn mulx(comptime s2: type, rs1: Register, rs2: s2, rd: Register) Instruction {
return switch (s2) {
Register => format3a(0b10, 0b00_1001, rs1, rs2, rd),
i13 => format3b(0b10, 0b00_1001, rs1, rs2, rd),
else => unreachable,
};
}
pub fn nop() Instruction {
return sethi(0, .g0);
}
@ -1099,11 +1277,19 @@ pub const Instruction = union(enum) {
};
}
pub fn trap(comptime s2: type, cond: Condition, ccr: CCR, rs1: Register, rs2: s2) Instruction {
pub fn subcc(comptime s2: type, rs1: Register, rs2: s2, rd: Register) Instruction {
return switch (s2) {
Register => format3a(0b10, 0b01_0100, rs1, rs2, rd),
i13 => format3b(0b10, 0b01_0100, rs1, rs2, rd),
else => unreachable,
};
}
pub fn trap(comptime s2: type, cond: ICondition, ccr: CCR, rs1: Register, rs2: s2) Instruction {
// Tcc instructions abuse the rd field to store the conditionals.
return switch (s2) {
Register => format4a(0b11_1010, ccr, rs1, rs2, @intToEnum(Register, cond)),
u7 => format4e(0b11_1010, ccr, rs1, @intToEnum(Register, cond), rs2),
Register => format4a(0b11_1010, ccr, rs1, rs2, @intToEnum(Register, @enumToInt(cond))),
u7 => format4e(0b11_1010, ccr, rs1, @intToEnum(Register, @enumToInt(cond)), rs2),
else => unreachable,
};
}
@ -1128,11 +1314,11 @@ test "Serialize formats" {
.expected = 0b00_00000_100_0000000000000000000000,
},
.{
.inst = Instruction.format2b(6, 3, true, -4),
.inst = Instruction.format2b(6, .{ .icond = .lt }, true, -4),
.expected = 0b00_1_0011_110_1111111111111111111111,
},
.{
.inst = Instruction.format2c(3, 0, false, true, .xcc, 8),
.inst = Instruction.format2c(3, .{ .icond = .nv }, false, true, .xcc, 8),
.expected = 0b00_0_0000_011_1_0_1_0000000000000000010,
},
.{
@ -1224,11 +1410,11 @@ test "Serialize formats" {
.expected = 0b10_10010_001000_00000_1_1_0_11111111111,
},
.{
.inst = Instruction.format4c(8, 0, .xcc, .g0, .o1),
.inst = Instruction.format4c(8, .{ .icond = .nv }, .xcc, .g0, .o1),
.expected = 0b10_01001_001000_1_0000_0_1_0_000000_00000,
},
.{
.inst = Instruction.format4d(8, 0, .xcc, 0, .l2),
.inst = Instruction.format4d(8, .{ .icond = .nv }, .xcc, 0, .l2),
.expected = 0b10_10010_001000_1_0000_1_1_0_00000000000,
},
.{
@ -1240,7 +1426,7 @@ test "Serialize formats" {
.expected = 0b10_10010_001000_00000_0_001_00100_01001,
},
.{
.inst = Instruction.format4g(8, 4, 2, 0, .o1, .l2),
.inst = Instruction.format4g(8, 4, 2, .{ .icond = .nv }, .o1, .l2),
.expected = 0b10_10010_001000_0_0000_010_000100_01001,
},
};