From 0c8ce9ed9d916599e9ec518ecced941bf0b37eff Mon Sep 17 00:00:00 2001 From: Koakuma Date: Mon, 16 May 2022 23:00:49 +0700 Subject: [PATCH] stage2: sparc64: Implement airCmp --- src/arch/sparc64/CodeGen.zig | 229 ++++++++++++++++++++++++++++++++++- src/arch/sparc64/Mir.zig | 4 + 2 files changed, 227 insertions(+), 6 deletions(-) diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index d09e65c3bf..b9aa20efdf 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -521,12 +521,12 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .div_float, .div_trunc, .div_floor, .div_exact => try self.airDiv(inst), - .cmp_lt => @panic("TODO try self.airCmp(inst, .lt)"), - .cmp_lte => @panic("TODO try self.airCmp(inst, .lte)"), - .cmp_eq => @panic("TODO try self.airCmp(inst, .eq)"), - .cmp_gte => @panic("TODO try self.airCmp(inst, .gte)"), - .cmp_gt => @panic("TODO try self.airCmp(inst, .gt)"), - .cmp_neq => @panic("TODO try self.airCmp(inst, .neq)"), + .cmp_lt => try self.airCmp(inst, .lt), + .cmp_lte => try self.airCmp(inst, .lte), + .cmp_eq => try self.airCmp(inst, .eq), + .cmp_gte => try self.airCmp(inst, .gte), + .cmp_gt => try self.airCmp(inst, .gt), + .cmp_neq => try self.airCmp(inst, .neq), .cmp_vector => @panic("TODO try self.airCmpVector(inst)"), .cmp_lt_errors_len => @panic("TODO try self.airCmpLtErrorsLen(inst)"), @@ -996,6 +996,54 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions. @panic("TODO handle return value with BigTomb"); } +fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const lhs = try self.resolveInst(bin_op.lhs); + const rhs = try self.resolveInst(bin_op.rhs); + const lhs_ty = self.air.typeOf(bin_op.lhs); + + var int_buffer: Type.Payload.Bits = undefined; + const int_ty = switch (lhs_ty.zigTypeTag()) { + .Vector => unreachable, // Should be handled by cmp_vector? + .Enum => lhs_ty.intTagType(&int_buffer), + .Int => lhs_ty, + .Bool => Type.initTag(.u1), + .Pointer => Type.usize, + .ErrorSet => Type.initTag(.u16), + .Optional => blk: { + var opt_buffer: Type.Payload.ElemType = undefined; + const payload_ty = lhs_ty.optionalChild(&opt_buffer); + if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { + break :blk Type.initTag(.u1); + } else if (lhs_ty.isPtrLikeOptional()) { + break :blk Type.usize; + } else { + return self.fail("TODO SPARCv9 cmp non-pointer optionals", .{}); + } + }, + .Float => return self.fail("TODO SPARCv9 cmp floats", .{}), + else => unreachable, + }; + + const int_info = int_ty.intInfo(self.target.*); + if (int_info.bits <= 64) { + _ = try self.binOp(.cmp_eq, inst, lhs, rhs, int_ty, int_ty); + + try self.spillCompareFlagsIfOccupied(); + self.compare_flags_inst = inst; + + break :result switch (int_info.signedness) { + .signed => MCValue{ .compare_flags_signed = op }, + .unsigned => MCValue{ .compare_flags_unsigned = op }, + }; + } else { + return self.fail("TODO SPARCv9 cmp for ints > 64 bits", .{}); + } + }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); +} + fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const cond = try self.resolveInst(pl_op.operand); @@ -1427,6 +1475,149 @@ fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { return MCValue{ .stack_offset = stack_offset }; } +/// For all your binary operation needs, this function will generate +/// the corresponding Mir instruction(s). Returns the location of the +/// result. +/// +/// If the binary operation itself happens to be an Air instruction, +/// pass the corresponding index in the inst parameter. That helps +/// this function do stuff like reusing operands. +/// +/// This function does not do any lowering to Mir itself, but instead +/// looks at the lhs and rhs and determines which kind of lowering +/// would be best suitable and then delegates the lowering to other +/// functions. +fn binOp( + self: *Self, + tag: Air.Inst.Tag, + maybe_inst: ?Air.Inst.Index, + lhs: MCValue, + rhs: MCValue, + lhs_ty: Type, + rhs_ty: Type, +) InnerError!MCValue { + const mod = self.bin_file.options.module.?; + switch (tag) { + .cmp_eq => { + switch (lhs_ty.zigTypeTag()) { + .Float => return self.fail("TODO binary operations on floats", .{}), + .Vector => return self.fail("TODO binary operations on vectors", .{}), + .Int => { + assert(lhs_ty.eql(rhs_ty, mod)); + const int_info = lhs_ty.intInfo(self.target.*); + if (int_info.bits <= 64) { + // TODO optimize for small (i13) values by putting them inside immediates + + const mir_tag: Mir.Inst.Tag = switch (tag) { + .cmp_eq => .subcc, + else => unreachable, + }; + + return try self.binOpRegister(mir_tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty); + } else { + return self.fail("TODO binary operations on int with bits > 64", .{}); + } + }, + else => unreachable, + } + }, + + else => return self.fail("TODO implement {} binOp for SPARCv9", .{tag}), + } +} + +/// Don't call this function directly. Use binOp instead. +/// +/// Calling this function signals an intention to generate a Mir +/// instruction of the form +/// +/// op dest, lhs, rhs +/// +/// Asserts that generating an instruction of that form is possible. +fn binOpRegister( + self: *Self, + mir_tag: Mir.Inst.Tag, + maybe_inst: ?Air.Inst.Index, + lhs: MCValue, + rhs: MCValue, + lhs_ty: Type, + rhs_ty: Type, +) !MCValue { + const lhs_is_register = lhs == .register; + const rhs_is_register = rhs == .register; + + if (lhs_is_register) self.register_manager.freezeRegs(&.{lhs.register}); + if (rhs_is_register) self.register_manager.freezeRegs(&.{rhs.register}); + + const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; + + const lhs_reg = if (lhs_is_register) lhs.register else blk: { + const track_inst: ?Air.Inst.Index = if (maybe_inst) |inst| inst: { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + break :inst Air.refToIndex(bin_op.lhs).?; + } else null; + + const reg = try self.register_manager.allocReg(track_inst); + self.register_manager.freezeRegs(&.{reg}); + + if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg }); + + break :blk reg; + }; + defer self.register_manager.unfreezeRegs(&.{lhs_reg}); + + const rhs_reg = if (rhs_is_register) rhs.register else blk: { + const track_inst: ?Air.Inst.Index = if (maybe_inst) |inst| inst: { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + break :inst Air.refToIndex(bin_op.rhs).?; + } else null; + + const reg = try self.register_manager.allocReg(track_inst); + self.register_manager.freezeRegs(&.{reg}); + + if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg }); + + break :blk reg; + }; + defer self.register_manager.unfreezeRegs(&.{rhs_reg}); + + const dest_reg = switch (mir_tag) { + else => if (maybe_inst) |inst| blk: { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + + if (lhs_is_register and self.reuseOperand(inst, bin_op.lhs, 0, lhs)) { + break :blk lhs_reg; + } else if (rhs_is_register and self.reuseOperand(inst, bin_op.rhs, 1, rhs)) { + break :blk rhs_reg; + } else { + break :blk try self.register_manager.allocReg(inst); + } + } else blk: { + break :blk try self.register_manager.allocReg(null); + }, + }; + + if (!lhs_is_register) try self.genSetReg(lhs_ty, lhs_reg, lhs); + if (!rhs_is_register) try self.genSetReg(rhs_ty, rhs_reg, rhs); + + const mir_data: Mir.Inst.Data = switch (mir_tag) { + .subcc => .{ .arithmetic_3op = .{ + .is_imm = false, + .rd = dest_reg, + .rs1 = lhs_reg, + .rs2_or_imm = .{ .rs2 = rhs_reg }, + } }, + else => unreachable, + }; + + _ = try self.addInst(.{ + .tag = mir_tag, + .data = mir_data, + }); + + return MCValue{ .register = dest_reg }; +} + fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { const block_data = self.blocks.getPtr(block).?; @@ -2146,6 +2337,9 @@ fn processDeath(self: *Self, inst: Air.Inst.Index) void { .register => |reg| { self.register_manager.freeReg(reg); }, + .compare_flags_signed, .compare_flags_unsigned => { + self.compare_flags_inst = null; + }, else => {}, // TODO process stack allocation death } } @@ -2338,6 +2532,29 @@ fn setRegOrMem(self: *Self, ty: Type, loc: MCValue, val: MCValue) !void { } } +/// Save the current instruction stored in the compare flags if +/// occupied +fn spillCompareFlagsIfOccupied(self: *Self) !void { + if (self.compare_flags_inst) |inst_to_save| { + const mcv = self.getResolvedInstValue(inst_to_save); + switch (mcv) { + .compare_flags_signed, + .compare_flags_unsigned, + => {}, + else => unreachable, // mcv doesn't occupy the compare flags + } + + const new_mcv = try self.allocRegOrMem(inst_to_save, true); + try self.setRegOrMem(self.air.typeOfIndex(inst_to_save), new_mcv, mcv); + log.debug("spilling {d} to mcv {any}", .{ inst_to_save, new_mcv }); + + const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; + try branch.inst_table.put(self.gpa, inst_to_save, new_mcv); + + self.compare_flags_inst = null; + } +} + pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void { const stack_mcv = try self.allocRegOrMem(inst, false); log.debug("spilling {d} to stack mcv {any}", .{ inst, stack_mcv }); diff --git a/src/arch/sparc64/Mir.zig b/src/arch/sparc64/Mir.zig index d9309f1524..683b09fa06 100644 --- a/src/arch/sparc64/Mir.zig +++ b/src/arch/sparc64/Mir.zig @@ -115,6 +115,10 @@ pub const Inst = struct { /// A.61 Trap on Integer Condition Codes (Tcc) /// This uses the trap field. tcc, + + // TODO add synthetic instructions + // TODO add cmp synthetic instruction to avoid wasting a register when + // comparing with subcc }; /// The position of an MIR instruction within the `Mir` instructions array.