x64: move from compare_flags_* mcv to eflags with condition codes enum

This commit is contained in:
Jakub Konka 2022-05-25 17:06:47 +02:00
parent 71e2a56e3e
commit 5fb7070642
4 changed files with 505 additions and 295 deletions

View File

@ -37,6 +37,7 @@ const caller_preserved_regs = abi.caller_preserved_regs;
const c_abi_int_param_regs = abi.c_abi_int_param_regs;
const c_abi_int_return_regs = abi.c_abi_int_return_regs;
const Condition = bits.Condition;
const RegisterManager = abi.RegisterManager;
const RegisterLock = RegisterManager.RegisterLock;
const Register = bits.Register;
@ -65,7 +66,7 @@ arg_index: u32,
src_loc: Module.SrcLoc,
stack_align: u32,
compare_flags_inst: ?Air.Inst.Index = null,
eflags_inst: ?Air.Inst.Index = null,
/// MIR Instructions
mir_instructions: std.MultiArrayList(Mir.Inst) = .{},
@ -149,12 +150,8 @@ pub const MCValue = union(enum) {
stack_offset: i32,
/// The value is a pointer to one of the stack variables (payload is stack offset).
ptr_stack_offset: i32,
/// The value is in the compare flags assuming an unsigned operation,
/// with this operator applied on top of it.
compare_flags_unsigned: math.CompareOperator,
/// The value is in the compare flags assuming a signed operation,
/// with this operator applied on top of it.
compare_flags_signed: math.CompareOperator,
/// The value resides in the EFLAGS register.
eflags: Condition,
fn isMemory(mcv: MCValue) bool {
return switch (mcv) {
@ -183,8 +180,7 @@ pub const MCValue = union(enum) {
.immediate,
.memory,
.compare_flags_unsigned,
.compare_flags_signed,
.eflags,
.ptr_stack_offset,
.undef,
.register_overflow_unsigned,
@ -780,10 +776,10 @@ fn processDeath(self: *Self, inst: Air.Inst.Index) void {
},
.register_overflow_signed, .register_overflow_unsigned => |reg| {
self.register_manager.freeReg(reg.to64());
self.compare_flags_inst = null;
self.eflags_inst = null;
},
.compare_flags_signed, .compare_flags_unsigned => {
self.compare_flags_inst = null;
.eflags => {
self.eflags_inst = null;
},
else => {}, // TODO process stack allocation death
}
@ -929,16 +925,14 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void
try self.genSetStack(self.air.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv, .{});
}
pub fn spillCompareFlagsIfOccupied(self: *Self) !void {
if (self.compare_flags_inst) |inst_to_save| {
pub fn spillEflagsIfOccupied(self: *Self) !void {
if (self.eflags_inst) |inst_to_save| {
const mcv = self.getResolvedInstValue(inst_to_save);
const new_mcv = switch (mcv) {
.register_overflow_signed,
.register_overflow_unsigned,
=> try self.allocRegOrMem(inst_to_save, false),
.compare_flags_signed,
.compare_flags_unsigned,
=> try self.allocRegOrMem(inst_to_save, true),
.eflags => try self.allocRegOrMem(inst_to_save, true),
else => unreachable,
};
@ -948,7 +942,7 @@ pub fn spillCompareFlagsIfOccupied(self: *Self) !void {
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
try branch.inst_table.put(self.gpa, inst_to_save, new_mcv);
self.compare_flags_inst = null;
self.eflags_inst = null;
// TODO consolidate with register manager and spillInstruction
// this call should really belong in the register manager!
@ -1123,31 +1117,8 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void {
switch (operand) {
.dead => unreachable,
.unreach => unreachable,
.compare_flags_unsigned => |op| {
const r = MCValue{
.compare_flags_unsigned = switch (op) {
.gte => .lt,
.gt => .lte,
.neq => .eq,
.lt => .gte,
.lte => .gt,
.eq => .neq,
},
};
break :result r;
},
.compare_flags_signed => |op| {
const r = MCValue{
.compare_flags_signed = switch (op) {
.gte => .lt,
.gt => .lte,
.neq => .eq,
.lt => .gte,
.lte => .gt,
.eq => .neq,
},
};
break :result r;
.eflags => |cc| {
break :result MCValue{ .eflags = cc.negate() };
},
else => {},
}
@ -1213,13 +1184,17 @@ fn airMin(self: *Self, inst: Air.Inst.Index) !void {
try self.genBinOpMir(.cmp, ty, .{ .register = lhs_reg }, rhs_mcv);
const dst_mcv = try self.copyToRegisterWithInstTracking(inst, ty, rhs_mcv);
const cc: Condition = switch (signedness) {
.unsigned => .b,
.signed => .l,
};
_ = try self.addInst(.{
.tag = if (signedness == .signed) .cond_mov_lt else .cond_mov_below,
.tag = .cond_mov,
.ops = Mir.Inst.Ops.encode(.{
.reg1 = dst_mcv.register,
.reg2 = lhs_reg,
}),
.data = undefined,
.data = .{ .cc = cc },
});
break :result dst_mcv;
@ -1341,7 +1316,7 @@ fn airAddSubShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
return self.fail("TODO implement add/sub/shl with overflow for Ints larger than 64bits", .{});
}
try self.spillCompareFlagsIfOccupied();
try self.spillEflagsIfOccupied();
if (tag == .shl_with_overflow) {
try self.spillRegisters(1, .{.rcx});
@ -1362,7 +1337,7 @@ fn airAddSubShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const int_info = ty.intInfo(self.target.*);
if (math.isPowerOfTwo(int_info.bits) and int_info.bits >= 8) {
self.compare_flags_inst = inst;
self.eflags_inst = inst;
const result: MCValue = switch (int_info.signedness) {
.signed => .{ .register_overflow_signed = partial.register },
@ -1371,7 +1346,7 @@ fn airAddSubShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
break :result result;
}
self.compare_flags_inst = null;
self.eflags_inst = null;
const tuple_ty = self.air.typeOfIndex(inst);
const tuple_size = @intCast(u32, tuple_ty.abiSize(self.target.*));
@ -1413,17 +1388,16 @@ fn genSetStackTruncatedOverflowCompare(
};
const overflow_reg = temp_regs[0];
const flags: u2 = switch (int_info.signedness) {
.signed => 0b00,
.unsigned => 0b10,
const cc: Condition = switch (int_info.signedness) {
.signed => .o,
.unsigned => .c,
};
_ = try self.addInst(.{
.tag = .cond_set_byte_overflow,
.tag = .cond_set_byte,
.ops = Mir.Inst.Ops.encode(.{
.reg1 = overflow_reg.to8(),
.flags = flags,
}),
.data = undefined,
.data = .{ .cc = cc },
});
const scratch_reg = temp_regs[1];
@ -1438,9 +1412,9 @@ fn genSetStackTruncatedOverflowCompare(
const eq_reg = temp_regs[2];
_ = try self.addInst(.{
.tag = .cond_set_byte_eq_ne,
.tag = .cond_set_byte,
.ops = Mir.Inst.Ops.encode(.{ .reg1 = eq_reg.to8() }),
.data = undefined,
.data = .{ .cc = .ne },
});
try self.genBinOpMir(
@ -1477,8 +1451,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
const int_info = ty.intInfo(self.target.*);
if (math.isPowerOfTwo(int_info.bits) and int_info.bits >= 8) {
try self.spillCompareFlagsIfOccupied();
self.compare_flags_inst = inst;
try self.spillEflagsIfOccupied();
self.eflags_inst = inst;
try self.spillRegisters(2, .{ .rax, .rdx });
@ -1492,8 +1466,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
};
}
try self.spillCompareFlagsIfOccupied();
self.compare_flags_inst = null;
try self.spillEflagsIfOccupied();
self.eflags_inst = null;
const dst_reg: Register = dst_reg: {
switch (int_info.signedness) {
@ -1686,12 +1660,12 @@ fn genInlineIntDivFloor(self: *Self, ty: Type, lhs: MCValue, rhs: MCValue) !MCVa
.data = undefined,
});
_ = try self.addInst(.{
.tag = .cond_mov_eq,
.tag = .cond_mov,
.ops = Mir.Inst.Ops.encode(.{
.reg1 = divisor.to64(),
.reg2 = .rdx,
}),
.data = undefined,
.data = .{ .cc = .e },
});
try self.genBinOpMir(.add, Type.isize, .{ .register = divisor }, .{ .register = .rax });
return MCValue{ .register = divisor };
@ -2511,8 +2485,7 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
.undef => unreachable,
.unreach => unreachable,
.dead => unreachable,
.compare_flags_unsigned => unreachable,
.compare_flags_signed => unreachable,
.eflags => unreachable,
.register_overflow_unsigned => unreachable,
.register_overflow_signed => unreachable,
.immediate => |imm| {
@ -2532,8 +2505,7 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
switch (dst_mcv) {
.dead => unreachable,
.undef => unreachable,
.compare_flags_unsigned => unreachable,
.compare_flags_signed => unreachable,
.eflags => unreachable,
.register => |dst_reg| {
// mov dst_reg, [reg]
_ = try self.addInst(.{
@ -2637,8 +2609,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
.undef => unreachable,
.unreach => unreachable,
.dead => unreachable,
.compare_flags_unsigned => unreachable,
.compare_flags_signed => unreachable,
.eflags => unreachable,
.register_overflow_unsigned => unreachable,
.register_overflow_signed => unreachable,
.immediate => |imm| {
@ -2660,8 +2631,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
.undef => unreachable,
.dead => unreachable,
.unreach => unreachable,
.compare_flags_unsigned => unreachable,
.compare_flags_signed => unreachable,
.eflags => unreachable,
.immediate => |imm| {
switch (abi_size) {
1, 2, 4 => {
@ -3041,18 +3011,17 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
defer self.register_manager.unlockReg(reg_lock);
const dst_reg = try self.register_manager.allocReg(inst, gp);
const flags: u2 = switch (mcv) {
.register_overflow_unsigned => 0b10,
.register_overflow_signed => 0b00,
const cc: Condition = switch (mcv) {
.register_overflow_unsigned => .c,
.register_overflow_signed => .o,
else => unreachable,
};
_ = try self.addInst(.{
.tag = .cond_set_byte_overflow,
.tag = .cond_set_byte,
.ops = Mir.Inst.Ops.encode(.{
.reg1 = dst_reg.to8(),
.flags = flags,
}),
.data = undefined,
.data = .{ .cc = cc },
});
break :result MCValue{ .register = dst_reg.to8() };
},
@ -3479,8 +3448,7 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MCValu
.none => unreachable,
.undef => unreachable,
.dead, .unreach, .immediate => unreachable,
.compare_flags_unsigned => unreachable,
.compare_flags_signed => unreachable,
.eflags => unreachable,
.register_overflow_unsigned => unreachable,
.register_overflow_signed => unreachable,
.register => |dst_reg| {
@ -3559,8 +3527,7 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MCValu
.memory,
.got_load,
.direct_load,
.compare_flags_signed,
.compare_flags_unsigned,
.eflags,
=> {
assert(abi_size <= 8);
const dst_reg_lock = self.register_manager.lockReg(dst_reg);
@ -3649,11 +3616,8 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MCValu
.got_load, .direct_load => {
return self.fail("TODO implement x86 ADD/SUB/CMP source symbol at index in linker", .{});
},
.compare_flags_unsigned => {
return self.fail("TODO implement x86 ADD/SUB/CMP source compare flag (unsigned)", .{});
},
.compare_flags_signed => {
return self.fail("TODO implement x86 ADD/SUB/CMP source compare flag (signed)", .{});
.eflags => {
return self.fail("TODO implement x86 ADD/SUB/CMP source eflags", .{});
},
}
},
@ -3674,8 +3638,7 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M
.none => unreachable,
.undef => unreachable,
.dead, .unreach, .immediate => unreachable,
.compare_flags_unsigned => unreachable,
.compare_flags_signed => unreachable,
.eflags => unreachable,
.ptr_stack_offset => unreachable,
.register_overflow_unsigned => unreachable,
.register_overflow_signed => unreachable,
@ -3734,11 +3697,8 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M
.got_load, .direct_load => {
return self.fail("TODO implement x86 multiply source symbol at index in linker", .{});
},
.compare_flags_unsigned => {
return self.fail("TODO implement x86 multiply source compare flag (unsigned)", .{});
},
.compare_flags_signed => {
return self.fail("TODO implement x86 multiply source compare flag (signed)", .{});
.eflags => {
return self.fail("TODO implement x86 multiply source eflags", .{});
},
}
},
@ -3782,11 +3742,8 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M
.got_load, .direct_load => {
return self.fail("TODO implement x86 multiply source symbol at index in linker", .{});
},
.compare_flags_unsigned => {
return self.fail("TODO implement x86 multiply source compare flag (unsigned)", .{});
},
.compare_flags_signed => {
return self.fail("TODO implement x86 multiply source compare flag (signed)", .{});
.eflags => {
return self.fail("TODO implement x86 multiply source eflags", .{});
},
}
},
@ -3905,7 +3862,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
var info = try self.resolveCallingConventionValues(fn_ty);
defer info.deinit(self);
try self.spillCompareFlagsIfOccupied();
try self.spillEflagsIfOccupied();
for (caller_preserved_regs) |reg| {
try self.register_manager.getReg(reg, null);
@ -3957,8 +3914,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
.memory => unreachable,
.got_load => unreachable,
.direct_load => unreachable,
.compare_flags_signed => unreachable,
.compare_flags_unsigned => unreachable,
.eflags => unreachable,
.register_overflow_signed => unreachable,
.register_overflow_unsigned => unreachable,
}
@ -4220,8 +4176,8 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
break :blk ty.intInfo(self.target.*).signedness;
};
try self.spillCompareFlagsIfOccupied();
self.compare_flags_inst = inst;
try self.spillEflagsIfOccupied();
self.eflags_inst = inst;
const result: MCValue = result: {
// There are 2 operands, destination and source.
@ -4265,9 +4221,10 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
defer if (src_lock) |lock| self.register_manager.unlockReg(lock);
try self.genBinOpMir(.cmp, ty, dst_mcv, src_mcv);
break :result switch (signedness) {
.signed => MCValue{ .compare_flags_signed = op },
.unsigned => MCValue{ .compare_flags_unsigned = op },
.signed => MCValue{ .eflags = Condition.fromCompareOperatorSigned(op) },
.unsigned => MCValue{ .eflags = Condition.fromCompareOperatorUnsigned(op) },
};
};
@ -4440,47 +4397,39 @@ fn addDbgInfoTypeReloc(self: *Self, ty: Type) !void {
fn genCondBrMir(self: *Self, ty: Type, mcv: MCValue) !u32 {
const abi_size = ty.abiSize(self.target.*);
switch (mcv) {
.compare_flags_unsigned,
.compare_flags_signed,
=> |cmp_op| {
// Here we map the opposites since the jump is to the false branch.
const flags: u2 = switch (cmp_op) {
.gte => 0b10,
.gt => 0b11,
.neq => 0b01,
.lt => 0b00,
.lte => 0b01,
.eq => 0b00,
};
const tag: Mir.Inst.Tag = if (cmp_op == .neq or cmp_op == .eq)
.cond_jmp_eq_ne
else if (mcv == .compare_flags_unsigned)
Mir.Inst.Tag.cond_jmp_above_below
else
Mir.Inst.Tag.cond_jmp_greater_less;
.eflags => |cc| {
return self.addInst(.{
.tag = tag,
.ops = Mir.Inst.Ops.encode(.{ .flags = flags }),
.data = .{ .inst = undefined },
.tag = .cond_jmp,
.ops = Mir.Inst.Ops.encode(.{}),
.data = .{
.inst_cc = .{
.inst = undefined,
// Here we map the opposites since the jump is to the false branch.
.cc = cc.negate(),
},
},
});
},
.register => |reg| {
try self.spillCompareFlagsIfOccupied();
try self.spillEflagsIfOccupied();
_ = try self.addInst(.{
.tag = .@"test",
.ops = Mir.Inst.Ops.encode(.{ .reg1 = reg }),
.data = .{ .imm = 1 },
});
return self.addInst(.{
.tag = .cond_jmp_eq_ne,
.ops = Mir.Inst.Ops.encode(.{ .flags = 0b01 }),
.data = .{ .inst = undefined },
.tag = .cond_jmp,
.ops = Mir.Inst.Ops.encode(.{}),
.data = .{ .inst_cc = .{
.inst = undefined,
.cc = .e,
} },
});
},
.immediate,
.stack_offset,
=> {
try self.spillCompareFlagsIfOccupied();
try self.spillEflagsIfOccupied();
if (abi_size <= 8) {
const reg = try self.copyToTmpRegister(ty, mcv);
return self.genCondBrMir(ty, .{ .register = reg });
@ -4516,7 +4465,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
// Capture the state of register and stack allocation state so that we can revert to it.
const parent_next_stack_offset = self.next_stack_offset;
const parent_free_registers = self.register_manager.free_registers;
const parent_compare_flags_inst = self.compare_flags_inst;
const parent_eflags_inst = self.eflags_inst;
var parent_stack = try self.stack.clone(self.gpa);
defer parent_stack.deinit(self.gpa);
const parent_registers = self.register_manager.registers;
@ -4538,7 +4487,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
defer saved_then_branch.deinit(self.gpa);
self.register_manager.registers = parent_registers;
self.compare_flags_inst = parent_compare_flags_inst;
self.eflags_inst = parent_eflags_inst;
self.stack.deinit(self.gpa);
self.stack = parent_stack;
@ -4640,8 +4589,8 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
}
fn isNull(self: *Self, inst: Air.Inst.Index, ty: Type, operand: MCValue) !MCValue {
try self.spillCompareFlagsIfOccupied();
self.compare_flags_inst = inst;
try self.spillEflagsIfOccupied();
self.eflags_inst = inst;
const cmp_ty: Type = if (!ty.isPtrLikeOptional()) blk: {
var buf: Type.Payload.ElemType = undefined;
@ -4651,13 +4600,13 @@ fn isNull(self: *Self, inst: Air.Inst.Index, ty: Type, operand: MCValue) !MCValu
try self.genBinOpMir(.cmp, cmp_ty, operand, MCValue{ .immediate = 0 });
return MCValue{ .compare_flags_unsigned = .eq };
return MCValue{ .eflags = .e };
}
fn isNonNull(self: *Self, inst: Air.Inst.Index, ty: Type, operand: MCValue) !MCValue {
const is_null_res = try self.isNull(inst, ty, operand);
assert(is_null_res.compare_flags_unsigned == .eq);
return MCValue{ .compare_flags_unsigned = .neq };
assert(is_null_res.eflags == .e);
return MCValue{ .eflags = is_null_res.eflags.negate() };
}
fn isErr(self: *Self, inst: Air.Inst.Index, ty: Type, operand: MCValue) !MCValue {
@ -4667,8 +4616,8 @@ fn isErr(self: *Self, inst: Air.Inst.Index, ty: Type, operand: MCValue) !MCValue
return MCValue{ .immediate = 0 }; // always false
}
try self.spillCompareFlagsIfOccupied();
self.compare_flags_inst = inst;
try self.spillEflagsIfOccupied();
self.eflags_inst = inst;
const err_off = errUnionErrorOffset(ty.errorUnionPayload(), self.target.*);
switch (operand) {
@ -4691,15 +4640,15 @@ fn isErr(self: *Self, inst: Air.Inst.Index, ty: Type, operand: MCValue) !MCValue
else => return self.fail("TODO implement isErr for {}", .{operand}),
}
return MCValue{ .compare_flags_unsigned = .gt };
return MCValue{ .eflags = .a };
}
fn isNonErr(self: *Self, inst: Air.Inst.Index, ty: Type, operand: MCValue) !MCValue {
const is_err_res = try self.isErr(inst, ty, operand);
switch (is_err_res) {
.compare_flags_unsigned => |op| {
assert(op == .gt);
return MCValue{ .compare_flags_unsigned = .lte };
.eflags => |cc| {
assert(cc == .a);
return MCValue{ .eflags = cc.negate() };
},
.immediate => |imm| {
assert(imm == 0);
@ -4914,10 +4863,9 @@ fn genCondSwitchMir(self: *Self, ty: Type, condition: MCValue, case: MCValue) !u
.none => unreachable,
.undef => unreachable,
.dead, .unreach => unreachable,
.compare_flags_signed => unreachable,
.compare_flags_unsigned => unreachable,
.eflags => unreachable,
.register => |cond_reg| {
try self.spillCompareFlagsIfOccupied();
try self.spillEflagsIfOccupied();
const cond_reg_lock = self.register_manager.lockReg(cond_reg);
defer if (cond_reg_lock) |lock| self.register_manager.unlockReg(lock);
@ -4965,13 +4913,16 @@ fn genCondSwitchMir(self: *Self, ty: Type, condition: MCValue, case: MCValue) !u
.data = undefined,
});
return self.addInst(.{
.tag = .cond_jmp_eq_ne,
.tag = .cond_jmp,
.ops = Mir.Inst.Ops.encode(.{}),
.data = .{ .inst = undefined },
.data = .{ .inst_cc = .{
.inst = undefined,
.cc = .ne,
} },
});
},
.stack_offset => {
try self.spillCompareFlagsIfOccupied();
try self.spillEflagsIfOccupied();
if (abi_size <= 8) {
const reg = try self.copyToTmpRegister(ty, condition);
@ -5030,7 +4981,7 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void {
// Capture the state of register and stack allocation state so that we can revert to it.
const parent_next_stack_offset = self.next_stack_offset;
const parent_free_registers = self.register_manager.free_registers;
const parent_compare_flags_inst = self.compare_flags_inst;
const parent_eflags_inst = self.eflags_inst;
var parent_stack = try self.stack.clone(self.gpa);
defer parent_stack.deinit(self.gpa);
const parent_registers = self.register_manager.registers;
@ -5052,7 +5003,7 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void {
defer saved_case_branch.deinit(self.gpa);
self.register_manager.registers = parent_registers;
self.compare_flags_inst = parent_compare_flags_inst;
self.eflags_inst = parent_eflags_inst;
self.stack.deinit(self.gpa);
self.stack = parent_stack;
parent_stack = .{};
@ -5092,7 +5043,15 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void {
fn performReloc(self: *Self, reloc: Mir.Inst.Index) !void {
const next_inst = @intCast(u32, self.mir_instructions.len);
self.mir_instructions.items(.data)[reloc].inst = next_inst;
switch (self.mir_instructions.items(.tag)[reloc]) {
.cond_jmp => {
self.mir_instructions.items(.data)[reloc].inst_cc.inst = next_inst;
},
.jmp => {
self.mir_instructions.items(.data)[reloc].inst = next_inst;
},
else => unreachable,
}
}
fn airBr(self: *Self, inst: Air.Inst.Index) !void {
@ -5111,7 +5070,7 @@ fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void {
block_data.mcv = switch (operand_mcv) {
.none, .dead, .unreach => unreachable,
.register, .stack_offset, .memory => operand_mcv,
.compare_flags_signed, .compare_flags_unsigned, .immediate => blk: {
.eflags, .immediate => blk: {
const new_mcv = try self.allocRegOrMem(block, true);
try self.setRegOrMem(self.air.typeOfIndex(block), new_mcv, operand_mcv);
break :blk new_mcv;
@ -5335,9 +5294,7 @@ fn genSetStackArg(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerE
.register_overflow_unsigned,
.register_overflow_signed,
=> return self.fail("TODO genSetStackArg for register with overflow bit", .{}),
.compare_flags_unsigned,
.compare_flags_signed,
=> {
.eflags => {
const reg = try self.copyToTmpRegister(ty, mcv);
return self.genSetStackArg(ty, stack_offset, .{ .register = reg });
},
@ -5484,18 +5441,17 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue, opts: Inl
const overflow_bit_ty = ty.structFieldType(1);
const overflow_bit_offset = ty.structFieldOffset(1, self.target.*);
const tmp_reg = try self.register_manager.allocReg(null, gp);
const flags: u2 = switch (mcv) {
.register_overflow_unsigned => 0b10,
.register_overflow_signed => 0b00,
const cc: Condition = switch (mcv) {
.register_overflow_unsigned => .c,
.register_overflow_signed => .o,
else => unreachable,
};
_ = try self.addInst(.{
.tag = .cond_set_byte_overflow,
.tag = .cond_set_byte,
.ops = Mir.Inst.Ops.encode(.{
.reg1 = tmp_reg.to8(),
.flags = flags,
}),
.data = undefined,
.data = .{ .cc = cc },
});
return self.genSetStack(
@ -5505,9 +5461,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue, opts: Inl
.{},
);
},
.compare_flags_unsigned,
.compare_flags_signed,
=> {
.eflags => {
const reg = try self.copyToTmpRegister(ty, mcv);
return self.genSetStack(ty, stack_offset, .{ .register = reg }, opts);
},
@ -5832,9 +5786,12 @@ fn genInlineMemcpy(
// je end
const loop_reloc = try self.addInst(.{
.tag = .cond_jmp_eq_ne,
.ops = Mir.Inst.Ops.encode(.{ .flags = 0b01 }),
.data = .{ .inst = undefined },
.tag = .cond_jmp,
.ops = Mir.Inst.Ops.encode(.{}),
.data = .{ .inst_cc = .{
.inst = undefined,
.cc = .e,
} },
});
// mov tmp, [addr + rcx]
@ -5950,9 +5907,12 @@ fn genInlineMemset(
// je end
const loop_reloc = try self.addInst(.{
.tag = .cond_jmp_eq_ne,
.ops = Mir.Inst.Ops.encode(.{ .flags = 0b01 }),
.data = .{ .inst = undefined },
.tag = .cond_jmp,
.ops = Mir.Inst.Ops.encode(.{}),
.data = .{ .inst_cc = .{
.inst = undefined,
.cc = .e,
} },
});
switch (value) {
@ -6025,31 +5985,13 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
else => unreachable,
}
},
.compare_flags_unsigned,
.compare_flags_signed,
=> |op| {
const tag: Mir.Inst.Tag = switch (op) {
.gte, .gt, .lt, .lte => if (mcv == .compare_flags_unsigned)
Mir.Inst.Tag.cond_set_byte_above_below
else
Mir.Inst.Tag.cond_set_byte_greater_less,
.eq, .neq => .cond_set_byte_eq_ne,
};
const flags: u2 = switch (op) {
.gte => 0b00,
.gt => 0b01,
.lt => 0b10,
.lte => 0b11,
.eq => 0b01,
.neq => 0b00,
};
.eflags => |cc| {
_ = try self.addInst(.{
.tag = tag,
.tag = .cond_set_byte,
.ops = Mir.Inst.Ops.encode(.{
.reg1 = reg.to8(),
.flags = flags,
}),
.data = undefined,
.data = .{ .cc = cc },
});
},
.immediate => |x| {

View File

@ -158,20 +158,9 @@ pub fn lowerMir(emit: *Emit) InnerError!void {
.jmp => try emit.mirJmpCall(.jmp_near, inst),
.call => try emit.mirJmpCall(.call_near, inst),
.cond_jmp_greater_less,
.cond_jmp_above_below,
.cond_jmp_eq_ne,
=> try emit.mirCondJmp(tag, inst),
.cond_set_byte_greater_less,
.cond_set_byte_above_below,
.cond_set_byte_eq_ne,
.cond_set_byte_overflow,
=> try emit.mirCondSetByte(tag, inst),
.cond_mov_eq => try emit.mirCondMov(.cmove, inst),
.cond_mov_lt => try emit.mirCondMov(.cmovl, inst),
.cond_mov_below => try emit.mirCondMov(.cmovb, inst),
.cond_jmp => try emit.mirCondJmp(inst),
.cond_set_byte => try emit.mirCondSetByte(inst),
.cond_mov => try emit.mirCondMov(inst),
.ret => try emit.mirRet(inst),
@ -356,70 +345,130 @@ fn mirJmpCall(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
}
}
fn mirCondJmp(emit: *Emit, mir_tag: Mir.Inst.Tag, inst: Mir.Inst.Index) InnerError!void {
const ops = emit.mir.instructions.items(.ops)[inst].decode();
const target = emit.mir.instructions.items(.data)[inst].inst;
const tag = switch (mir_tag) {
.cond_jmp_greater_less => switch (ops.flags) {
0b00 => Tag.jge,
0b01 => Tag.jg,
0b10 => Tag.jl,
0b11 => Tag.jle,
},
.cond_jmp_above_below => switch (ops.flags) {
0b00 => Tag.jae,
0b01 => Tag.ja,
0b10 => Tag.jb,
0b11 => Tag.jbe,
},
.cond_jmp_eq_ne => switch (@truncate(u1, ops.flags)) {
0b0 => Tag.jne,
0b1 => Tag.je,
},
else => unreachable,
fn mirCondJmp(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const mir_tag = emit.mir.instructions.items(.tag)[inst];
assert(mir_tag == .cond_jmp);
const inst_cc = emit.mir.instructions.items(.data)[inst].inst_cc;
const tag: Tag = switch (inst_cc.cc) {
.a => .ja,
.ae => .jae,
.b => .jb,
.be => .jbe,
.c => .jc,
.e => .je,
.g => .jg,
.ge => .jge,
.l => .jl,
.le => .jle,
.na => .jna,
.nae => .jnae,
.nb => .jnb,
.nbe => .jnbe,
.nc => .jnc,
.ne => .jne,
.ng => .jng,
.nge => .jnge,
.nl => .jnl,
.nle => .jnle,
.no => .jno,
.np => .jnp,
.ns => .jns,
.nz => .jnz,
.o => .jo,
.p => .jp,
.pe => .jpe,
.po => .jpo,
.s => .js,
.z => .jz,
};
const source = emit.code.items.len;
try lowerToDEnc(tag, 0, emit.code);
try emit.relocs.append(emit.bin_file.allocator, .{
.source = source,
.target = target,
.target = inst_cc.inst,
.offset = emit.code.items.len - 4,
.length = 6,
});
}
fn mirCondSetByte(emit: *Emit, mir_tag: Mir.Inst.Tag, inst: Mir.Inst.Index) InnerError!void {
fn mirCondSetByte(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const mir_tag = emit.mir.instructions.items(.tag)[inst];
assert(mir_tag == .cond_set_byte);
const ops = emit.mir.instructions.items(.ops)[inst].decode();
const tag = switch (mir_tag) {
.cond_set_byte_greater_less => switch (ops.flags) {
0b00 => Tag.setge,
0b01 => Tag.setg,
0b10 => Tag.setl,
0b11 => Tag.setle,
},
.cond_set_byte_above_below => switch (ops.flags) {
0b00 => Tag.setae,
0b01 => Tag.seta,
0b10 => Tag.setb,
0b11 => Tag.setbe,
},
.cond_set_byte_eq_ne => switch (@truncate(u1, ops.flags)) {
0b0 => Tag.setne,
0b1 => Tag.sete,
},
.cond_set_byte_overflow => switch (ops.flags) {
0b00 => Tag.seto,
0b01 => Tag.setno,
0b10 => Tag.setc,
0b11 => Tag.setnc,
},
else => unreachable,
const cc = emit.mir.instructions.items(.data)[inst].cc;
const tag: Tag = switch (cc) {
.a => .seta,
.ae => .setae,
.b => .setb,
.be => .setbe,
.c => .setc,
.e => .sete,
.g => .setg,
.ge => .setge,
.l => .setl,
.le => .setle,
.na => .setna,
.nae => .setnae,
.nb => .setnb,
.nbe => .setnbe,
.nc => .setnc,
.ne => .setne,
.ng => .setng,
.nge => .setnge,
.nl => .setnl,
.nle => .setnle,
.no => .setno,
.np => .setnp,
.ns => .setns,
.nz => .setnz,
.o => .seto,
.p => .setp,
.pe => .setpe,
.po => .setpo,
.s => .sets,
.z => .setz,
};
return lowerToMEnc(tag, RegisterOrMemory.reg(ops.reg1.to8()), emit.code);
}
fn mirCondMov(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
fn mirCondMov(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const mir_tag = emit.mir.instructions.items(.tag)[inst];
assert(mir_tag == .cond_mov);
const ops = emit.mir.instructions.items(.ops)[inst].decode();
const cc = emit.mir.instructions.items(.data)[inst].cc;
const tag: Tag = switch (cc) {
.a => .cmova,
.ae => .cmovae,
.b => .cmovb,
.be => .cmovbe,
.c => .cmovc,
.e => .cmove,
.g => .cmovg,
.ge => .cmovge,
.l => .cmovl,
.le => .cmovle,
.na => .cmovna,
.nae => .cmovnae,
.nb => .cmovnb,
.nbe => .cmovnbe,
.nc => .cmovnc,
.ne => .cmovne,
.ng => .cmovng,
.nge => .cmovnge,
.nl => .cmovnl,
.nle => .cmovnle,
.no => .cmovno,
.np => .cmovnp,
.ns => .cmovns,
.nz => .cmovnz,
.o => .cmovo,
.p => .cmovp,
.pe => .cmovpe,
.po => .cmovpo,
.s => .cmovs,
.z => .cmovz,
};
if (ops.flags == 0b00) {
return lowerToRmEnc(tag, ops.reg1, RegisterOrMemory.reg(ops.reg2), emit.code);
}
@ -1277,7 +1326,7 @@ const Tag = enum {
setp,
setpe,
setnp,
setop,
setpo,
setl,
setnge,
setnl,
@ -1286,6 +1335,36 @@ const Tag = enum {
setng,
setnle,
setg,
cmovo,
cmovno,
cmovb,
cmovc,
cmovnae,
cmovnb,
cmovnc,
cmovae,
cmove,
cmovz,
cmovne,
cmovnz,
cmovbe,
cmovna,
cmova,
cmovnbe,
cmovs,
cmovns,
cmovp,
cmovpe,
cmovnp,
cmovpo,
cmovl,
cmovnge,
cmovnl,
cmovge,
cmovle,
cmovng,
cmovnle,
cmovg,
shl,
sal,
shr,
@ -1294,12 +1373,6 @@ const Tag = enum {
cwd,
cdq,
cqo,
cmove,
cmovz,
cmovl,
cmovng,
cmovb,
cmovnae,
movsd,
movss,
addsd,
@ -1372,7 +1445,7 @@ const Tag = enum {
.setp,
.setpe,
.setnp,
.setop,
.setpo,
.setl,
.setnge,
.setnl,
@ -1492,77 +1565,110 @@ inline fn getOpCode(tag: Tag, enc: Encoding, is_one_byte: bool) OpCode {
.d => return switch (tag) {
.jmp_near => OpCode.init(&.{0xe9}),
.call_near => OpCode.init(&.{0xe8}),
.jo => if (is_one_byte) OpCode.init(&.{0x70}) else OpCode.init(&.{0x0f,0x80}),
.jno => if (is_one_byte) OpCode.init(&.{0x71}) else OpCode.init(&.{0x0f,0x81}),
.jb,
.jc,
.jnae => if (is_one_byte) OpCode.init(&.{0x72}) else OpCode.init(&.{0x0f,0x82}),
.jnb,
.jnc,
.jae => if (is_one_byte) OpCode.init(&.{0x73}) else OpCode.init(&.{0x0f,0x83}),
.je,
.jz => if (is_one_byte) OpCode.init(&.{0x74}) else OpCode.init(&.{0x0f,0x84}),
.jne,
.jnz => if (is_one_byte) OpCode.init(&.{0x75}) else OpCode.init(&.{0x0f,0x85}),
.jna,
.jbe => if (is_one_byte) OpCode.init(&.{0x76}) else OpCode.init(&.{0x0f,0x86}),
.jnbe,
.ja => if (is_one_byte) OpCode.init(&.{0x77}) else OpCode.init(&.{0x0f,0x87}),
.js => if (is_one_byte) OpCode.init(&.{0x78}) else OpCode.init(&.{0x0f,0x88}),
.jns => if (is_one_byte) OpCode.init(&.{0x79}) else OpCode.init(&.{0x0f,0x89}),
.jpe,
.jp => if (is_one_byte) OpCode.init(&.{0x7a}) else OpCode.init(&.{0x0f,0x8a}),
.jpo,
.jnp => if (is_one_byte) OpCode.init(&.{0x7b}) else OpCode.init(&.{0x0f,0x8b}),
.jnge,
.jl => if (is_one_byte) OpCode.init(&.{0x7c}) else OpCode.init(&.{0x0f,0x8c}),
.jge,
.jnl => if (is_one_byte) OpCode.init(&.{0x7d}) else OpCode.init(&.{0x0f,0x8d}),
.jle,
.jng => if (is_one_byte) OpCode.init(&.{0x7e}) else OpCode.init(&.{0x0f,0x8e}),
.jg,
.jnle => if (is_one_byte) OpCode.init(&.{0x7f}) else OpCode.init(&.{0x0f,0x8f}),
else => unreachable,
},
.m => return switch (tag) {
.jmp_near,
.call_near,
.push => OpCode.init(&.{0xff}),
.pop => OpCode.init(&.{0x8f}),
.seto => OpCode.init(&.{0x0f,0x90}),
.setno => OpCode.init(&.{0x0f,0x91}),
.setb,
.setc,
.setnae => OpCode.init(&.{0x0f,0x92}),
.setnb,
.setnc,
.setae => OpCode.init(&.{0x0f,0x93}),
.sete,
.setz => OpCode.init(&.{0x0f,0x94}),
.setne,
.setnz => OpCode.init(&.{0x0f,0x95}),
.setbe,
.setna => OpCode.init(&.{0x0f,0x96}),
.seta,
.setnbe => OpCode.init(&.{0x0f,0x97}),
.sets => OpCode.init(&.{0x0f,0x98}),
.setns => OpCode.init(&.{0x0f,0x99}),
.setp,
.setpe => OpCode.init(&.{0x0f,0x9a}),
.setnp,
.setop => OpCode.init(&.{0x0f,0x9b}),
.setpo => OpCode.init(&.{0x0f,0x9b}),
.setl,
.setnge => OpCode.init(&.{0x0f,0x9c}),
.setnl,
.setge => OpCode.init(&.{0x0f,0x9d}),
.setle,
.setng => OpCode.init(&.{0x0f,0x9e}),
.setnle,
.setg => OpCode.init(&.{0x0f,0x9f}),
.idiv,
.div,
.imul,
.mul => if (is_one_byte) OpCode.init(&.{0xf6}) else OpCode.init(&.{0xf7}),
.fisttp16 => OpCode.init(&.{0xdf}),
.fisttp32 => OpCode.init(&.{0xdb}),
.fisttp64 => OpCode.init(&.{0xdd}),
@ -1640,12 +1746,52 @@ inline fn getOpCode(tag: Tag, enc: Encoding, is_one_byte: bool) OpCode {
.movzx => if (is_one_byte) OpCode.init(&.{0x0f,0xb6}) else OpCode.init(&.{0x0f,0xb7}),
.lea => if (is_one_byte) OpCode.init(&.{0x8c}) else OpCode.init(&.{0x8d}),
.imul => OpCode.init(&.{0x0f,0xaf}),
.cmove,
.cmovz => OpCode.init(&.{0x0f,0x44}),
.cmova,
.cmovnbe, => OpCode.init(&.{0x0f,0x47}),
.cmovae,
.cmovnb, => OpCode.init(&.{0x0f,0x43}),
.cmovb,
.cmovc,
.cmovnae => OpCode.init(&.{0x0f,0x42}),
.cmovbe,
.cmovna, => OpCode.init(&.{0x0f,0x46}),
.cmove,
.cmovz, => OpCode.init(&.{0x0f,0x44}),
.cmovg,
.cmovnle, => OpCode.init(&.{0x0f,0x4f}),
.cmovge,
.cmovnl, => OpCode.init(&.{0x0f,0x4d}),
.cmovl,
.cmovng => OpCode.init(&.{0x0f,0x4c}),
.cmovnge, => OpCode.init(&.{0x0f,0x4c}),
.cmovle,
.cmovng, => OpCode.init(&.{0x0f,0x4e}),
.cmovne,
.cmovnz, => OpCode.init(&.{0x0f,0x45}),
.cmovno => OpCode.init(&.{0x0f,0x41}),
.cmovnp,
.cmovpo, => OpCode.init(&.{0x0f,0x4b}),
.cmovns => OpCode.init(&.{0x0f,0x49}),
.cmovo => OpCode.init(&.{0x0f,0x40}),
.cmovp,
.cmovpe, => OpCode.init(&.{0x0f,0x4a}),
.cmovs => OpCode.init(&.{0x0f,0x48}),
.movsd => OpCode.init(&.{0xf2,0x0f,0x10}),
.movss => OpCode.init(&.{0xf3,0x0f,0x10}),
.addsd => OpCode.init(&.{0xf2,0x0f,0x58}),
@ -1735,7 +1881,7 @@ inline fn getModRmExt(tag: Tag) u3 {
.setp,
.setpe,
.setnp,
.setop,
.setpo,
.setl,
.setnge,
.setnl,

View File

@ -275,42 +275,25 @@ pub const Inst = struct {
call,
/// ops flags:
/// 0b00 gte
/// 0b01 gt
/// 0b10 lt
/// 0b11 lte
cond_jmp_greater_less,
cond_set_byte_greater_less,
/// unused
/// Notes:
/// * uses `inst_cc` in Data.
cond_jmp,
/// ops flags:
/// 0b00 above or equal
/// 0b01 above
/// 0b10 below
/// 0b11 below or equal
cond_jmp_above_below,
cond_set_byte_above_below,
/// ops flags:
/// 0bX0 ne
/// 0bX1 eq
cond_jmp_eq_ne,
cond_set_byte_eq_ne,
/// 0b00 reg1
/// Notes:
/// * uses condition code (CC) stored as part of data
cond_set_byte,
/// ops flags:
/// 0b00 reg1, reg2,
/// 0b01 reg1, word ptr [reg2 + imm]
/// 0b10 reg1, dword ptr [reg2 + imm]
/// 0b11 reg1, qword ptr [reg2 + imm]
cond_mov_eq,
cond_mov_lt,
cond_mov_below,
/// ops flags:
/// 0b00 reg1 if OF = 1
/// 0b01 reg1 if OF = 0
/// 0b10 reg1 if CF = 1
/// 0b11 reg1 if CF = 0
cond_set_byte_overflow,
/// Notes:
/// * uses condition code (CC) stored as part of data
cond_mov,
/// ops flags: form:
/// 0b00 reg1
@ -451,6 +434,16 @@ pub const Inst = struct {
inst: Index,
/// A 32-bit immediate value.
imm: u32,
/// A condition code for use with EFLAGS register.
cc: bits.Condition,
/// Another instruction with condition code.
/// Used by `cond_jmp`.
inst_cc: struct {
/// Another instruction.
inst: Index,
/// A condition code for use with EFLAGS register.
cc: bits.Condition,
},
/// An extern function.
extern_fn: struct {
/// Index of the containing atom.

View File

@ -6,6 +6,135 @@ const ArrayList = std.ArrayList;
const Allocator = std.mem.Allocator;
const DW = std.dwarf;
/// EFLAGS condition codes
pub const Condition = enum(u5) {
/// above
a,
/// above or equal
ae,
/// below
b,
/// below or equal
be,
/// carry
c,
/// equal
e,
/// greater
g,
/// greater or equal
ge,
/// less
l,
/// less or equal
le,
/// not above
na,
/// not above or equal
nae,
/// not below
nb,
/// not below or equal
nbe,
/// not carry
nc,
/// not equal
ne,
/// not greater
ng,
/// not greater or equal
nge,
/// not less
nl,
/// not less or equal
nle,
/// not overflow
no,
/// not parity
np,
/// not sign
ns,
/// not zero
nz,
/// overflow
o,
/// parity
p,
/// parity even
pe,
/// parity odd
po,
/// sign
s,
/// zero
z,
/// Converts a std.math.CompareOperator into a condition flag,
/// i.e. returns the condition that is true iff the result of the
/// comparison is true. Assumes signed comparison
pub fn fromCompareOperatorSigned(op: std.math.CompareOperator) Condition {
return switch (op) {
.gte => .ge,
.gt => .g,
.neq => .ne,
.lt => .l,
.lte => .le,
.eq => .e,
};
}
/// Converts a std.math.CompareOperator into a condition flag,
/// i.e. returns the condition that is true iff the result of the
/// comparison is true. Assumes unsigned comparison
pub fn fromCompareOperatorUnsigned(op: std.math.CompareOperator) Condition {
return switch (op) {
.gte => .ae,
.gt => .a,
.neq => .ne,
.lt => .b,
.lte => .be,
.eq => .e,
};
}
/// Returns the condition which is true iff the given condition is
/// false (if such a condition exists)
pub fn negate(cond: Condition) Condition {
return switch (cond) {
.a => .na,
.ae => .nae,
.b => .nb,
.be => .nbe,
.c => .nc,
.e => .ne,
.g => .ng,
.ge => .nge,
.l => .nl,
.le => .nle,
.na => .a,
.nae => .ae,
.nb => .b,
.nbe => .be,
.nc => .c,
.ne => .e,
.ng => .g,
.nge => .ge,
.nl => .l,
.nle => .le,
.no => .o,
.np => .p,
.ns => .s,
.nz => .z,
.o => .no,
.p => .np,
.pe => unreachable,
.po => unreachable,
.s => .ns,
.z => .nz,
};
}
};
// zig fmt: off
/// Definitions of all of the general purpose x64 registers. The order is semantically meaningful.