mirror of
https://github.com/ziglang/zig.git
synced 2026-01-30 19:23:37 +00:00
regalloc: make register class bitmask non-optional
This commit is contained in:
parent
f766b25f82
commit
e95dfac03e
@ -21,9 +21,6 @@ const DW = std.dwarf;
|
||||
const leb128 = std.leb;
|
||||
const log = std.log.scoped(.codegen);
|
||||
const build_options = @import("build_options");
|
||||
const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager;
|
||||
const RegisterManager = RegisterManagerFn(Self, Register, &callee_preserved_regs);
|
||||
const RegisterLock = RegisterManager.RegisterLock;
|
||||
|
||||
const GenerateSymbolError = @import("../../codegen.zig").GenerateSymbolError;
|
||||
const FnResult = @import("../../codegen.zig").FnResult;
|
||||
@ -31,11 +28,14 @@ const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput;
|
||||
|
||||
const bits = @import("bits.zig");
|
||||
const abi = @import("abi.zig");
|
||||
const RegisterManager = abi.RegisterManager;
|
||||
const RegisterLock = RegisterManager.RegisterLock;
|
||||
const Register = bits.Register;
|
||||
const Instruction = bits.Instruction;
|
||||
const callee_preserved_regs = abi.callee_preserved_regs;
|
||||
const c_abi_int_param_regs = abi.c_abi_int_param_regs;
|
||||
const c_abi_int_return_regs = abi.c_abi_int_return_regs;
|
||||
const gp = abi.RegisterClass.gp;
|
||||
|
||||
const InnerError = error{
|
||||
OutOfMemory,
|
||||
@ -888,7 +888,7 @@ fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
|
||||
if (reg_ok) {
|
||||
// Make sure the type can fit in a register before we try to allocate one.
|
||||
if (abi_size <= 8) {
|
||||
if (self.register_manager.tryAllocReg(inst, .{})) |reg| {
|
||||
if (self.register_manager.tryAllocReg(inst, gp)) |reg| {
|
||||
return MCValue{ .register = registerAlias(reg, abi_size) };
|
||||
}
|
||||
}
|
||||
@ -951,7 +951,7 @@ fn spillCompareFlagsIfOccupied(self: *Self) !void {
|
||||
/// allocated. A second call to `copyToTmpRegister` may return the same register.
|
||||
/// This can have a side effect of spilling instructions to the stack to free up a register.
|
||||
fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register {
|
||||
const raw_reg = try self.register_manager.allocReg(null, .{});
|
||||
const raw_reg = try self.register_manager.allocReg(null, gp);
|
||||
const reg = registerAlias(raw_reg, ty.abiSize(self.target.*));
|
||||
try self.genSetReg(ty, reg, mcv);
|
||||
return reg;
|
||||
@ -961,7 +961,7 @@ fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register {
|
||||
/// `reg_owner` is the instruction that gets associated with the register in the register table.
|
||||
/// This can have a side effect of spilling instructions to the stack to free up a register.
|
||||
fn copyToNewRegister(self: *Self, reg_owner: Air.Inst.Index, mcv: MCValue) !MCValue {
|
||||
const raw_reg = try self.register_manager.allocReg(reg_owner, .{});
|
||||
const raw_reg = try self.register_manager.allocReg(reg_owner, gp);
|
||||
const ty = self.air.typeOfIndex(reg_owner);
|
||||
const reg = registerAlias(raw_reg, ty.abiSize(self.target.*));
|
||||
try self.genSetReg(self.air.typeOfIndex(reg_owner), reg, mcv);
|
||||
@ -1074,11 +1074,11 @@ fn trunc(
|
||||
if (operand == .register and self.reuseOperand(inst, ty_op.operand, 0, operand)) {
|
||||
break :blk registerAlias(operand_reg, dest_ty.abiSize(self.target.*));
|
||||
} else {
|
||||
const raw_reg = try self.register_manager.allocReg(inst, .{});
|
||||
const raw_reg = try self.register_manager.allocReg(inst, gp);
|
||||
break :blk registerAlias(raw_reg, dest_ty.abiSize(self.target.*));
|
||||
}
|
||||
} else blk: {
|
||||
const raw_reg = try self.register_manager.allocReg(null, .{});
|
||||
const raw_reg = try self.register_manager.allocReg(null, gp);
|
||||
break :blk registerAlias(raw_reg, dest_ty.abiSize(self.target.*));
|
||||
};
|
||||
|
||||
@ -1160,7 +1160,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void {
|
||||
break :blk op_reg;
|
||||
}
|
||||
|
||||
const raw_reg = try self.register_manager.allocReg(null, .{});
|
||||
const raw_reg = try self.register_manager.allocReg(null, gp);
|
||||
break :blk raw_reg.to32();
|
||||
};
|
||||
|
||||
@ -1193,7 +1193,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void {
|
||||
break :blk op_reg;
|
||||
}
|
||||
|
||||
const raw_reg = try self.register_manager.allocReg(null, .{});
|
||||
const raw_reg = try self.register_manager.allocReg(null, gp);
|
||||
break :blk registerAlias(raw_reg, operand_ty.abiSize(self.target.*));
|
||||
};
|
||||
|
||||
@ -1293,7 +1293,7 @@ fn binOpRegister(
|
||||
break :inst Air.refToIndex(md.lhs).?;
|
||||
} else null;
|
||||
|
||||
const raw_reg = try self.register_manager.allocReg(track_inst, .{});
|
||||
const raw_reg = try self.register_manager.allocReg(track_inst, gp);
|
||||
const reg = registerAlias(raw_reg, lhs_ty.abiSize(self.target.*));
|
||||
|
||||
if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg });
|
||||
@ -1308,7 +1308,7 @@ fn binOpRegister(
|
||||
break :inst Air.refToIndex(md.rhs).?;
|
||||
} else null;
|
||||
|
||||
const raw_reg = try self.register_manager.allocReg(track_inst, .{});
|
||||
const raw_reg = try self.register_manager.allocReg(track_inst, gp);
|
||||
const reg = registerAlias(raw_reg, rhs_ty.abiAlignment(self.target.*));
|
||||
|
||||
if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg });
|
||||
@ -1326,11 +1326,11 @@ fn binOpRegister(
|
||||
} else if (rhs_is_register and self.reuseOperand(md.inst, md.rhs, 1, rhs)) {
|
||||
break :blk rhs_reg;
|
||||
} else {
|
||||
const raw_reg = try self.register_manager.allocReg(md.inst, .{});
|
||||
const raw_reg = try self.register_manager.allocReg(md.inst, gp);
|
||||
break :blk registerAlias(raw_reg, lhs_ty.abiSize(self.target.*));
|
||||
}
|
||||
} else blk: {
|
||||
const raw_reg = try self.register_manager.allocReg(null, .{});
|
||||
const raw_reg = try self.register_manager.allocReg(null, gp);
|
||||
break :blk registerAlias(raw_reg, lhs_ty.abiSize(self.target.*));
|
||||
},
|
||||
};
|
||||
@ -1431,7 +1431,7 @@ fn binOpImmediate(
|
||||
).?;
|
||||
} else null;
|
||||
|
||||
const raw_reg = try self.register_manager.allocReg(track_inst, .{});
|
||||
const raw_reg = try self.register_manager.allocReg(track_inst, gp);
|
||||
const reg = registerAlias(raw_reg, lhs_ty.abiSize(self.target.*));
|
||||
|
||||
if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg });
|
||||
@ -1452,11 +1452,11 @@ fn binOpImmediate(
|
||||
)) {
|
||||
break :blk lhs_reg;
|
||||
} else {
|
||||
const raw_reg = try self.register_manager.allocReg(md.inst, .{});
|
||||
const raw_reg = try self.register_manager.allocReg(md.inst, gp);
|
||||
break :blk registerAlias(raw_reg, lhs_ty.abiSize(self.target.*));
|
||||
}
|
||||
} else blk: {
|
||||
const raw_reg = try self.register_manager.allocReg(null, .{});
|
||||
const raw_reg = try self.register_manager.allocReg(null, gp);
|
||||
break :blk registerAlias(raw_reg, lhs_ty.abiSize(self.target.*));
|
||||
},
|
||||
};
|
||||
@ -1872,7 +1872,7 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const dest_reg_lock = self.register_manager.lockRegAssumeUnused(dest_reg);
|
||||
defer self.register_manager.unlockReg(dest_reg_lock);
|
||||
|
||||
const raw_truncated_reg = try self.register_manager.allocReg(null, .{});
|
||||
const raw_truncated_reg = try self.register_manager.allocReg(null, gp);
|
||||
const truncated_reg = registerAlias(raw_truncated_reg, lhs_ty.abiSize(self.target.*));
|
||||
const truncated_reg_lock = self.register_manager.lockRegAssumeUnused(truncated_reg);
|
||||
defer self.register_manager.unlockReg(truncated_reg_lock);
|
||||
@ -1983,7 +1983,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const dest_reg_lock = self.register_manager.lockRegAssumeUnused(dest_reg);
|
||||
defer self.register_manager.unlockReg(dest_reg_lock);
|
||||
|
||||
const truncated_reg = try self.register_manager.allocReg(null, .{});
|
||||
const truncated_reg = try self.register_manager.allocReg(null, gp);
|
||||
const truncated_reg_lock = self.register_manager.lockRegAssumeUnused(truncated_reg);
|
||||
defer self.register_manager.unlockReg(truncated_reg_lock);
|
||||
|
||||
@ -2048,7 +2048,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
defer if (rhs_lock) |reg| self.register_manager.unlockReg(reg);
|
||||
|
||||
const lhs_reg = if (lhs_is_register) lhs.register else blk: {
|
||||
const raw_reg = try self.register_manager.allocReg(null, .{});
|
||||
const raw_reg = try self.register_manager.allocReg(null, gp);
|
||||
const reg = registerAlias(raw_reg, lhs_ty.abiSize(self.target.*));
|
||||
break :blk reg;
|
||||
};
|
||||
@ -2056,7 +2056,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
defer if (new_lhs_lock) |reg| self.register_manager.unlockReg(reg);
|
||||
|
||||
const rhs_reg = if (rhs_is_register) rhs.register else blk: {
|
||||
const raw_reg = try self.register_manager.allocReg(null, .{});
|
||||
const raw_reg = try self.register_manager.allocReg(null, gp);
|
||||
const reg = registerAlias(raw_reg, rhs_ty.abiAlignment(self.target.*));
|
||||
break :blk reg;
|
||||
};
|
||||
@ -2067,7 +2067,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
if (!rhs_is_register) try self.genSetReg(rhs_ty, rhs_reg, rhs);
|
||||
|
||||
const dest_reg = blk: {
|
||||
const raw_reg = try self.register_manager.allocReg(null, .{});
|
||||
const raw_reg = try self.register_manager.allocReg(null, gp);
|
||||
const reg = registerAlias(raw_reg, lhs_ty.abiSize(self.target.*));
|
||||
break :blk reg;
|
||||
};
|
||||
@ -2086,7 +2086,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
} },
|
||||
});
|
||||
|
||||
const dest_high_reg = try self.register_manager.allocReg(null, .{});
|
||||
const dest_high_reg = try self.register_manager.allocReg(null, gp);
|
||||
const dest_high_reg_lock = self.register_manager.lockRegAssumeUnused(dest_high_reg);
|
||||
defer self.register_manager.unlockReg(dest_high_reg_lock);
|
||||
|
||||
@ -2136,7 +2136,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
}
|
||||
},
|
||||
.unsigned => {
|
||||
const dest_high_reg = try self.register_manager.allocReg(null, .{});
|
||||
const dest_high_reg = try self.register_manager.allocReg(null, gp);
|
||||
const dest_high_reg_lock = self.register_manager.lockRegAssumeUnused(dest_high_reg);
|
||||
defer self.register_manager.unlockReg(dest_high_reg_lock);
|
||||
|
||||
@ -2192,7 +2192,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
},
|
||||
}
|
||||
|
||||
const truncated_reg = try self.register_manager.allocReg(null, .{});
|
||||
const truncated_reg = try self.register_manager.allocReg(null, gp);
|
||||
const truncated_reg_lock = self.register_manager.lockRegAssumeUnused(truncated_reg);
|
||||
defer self.register_manager.unlockReg(truncated_reg_lock);
|
||||
|
||||
@ -2663,7 +2663,7 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
|
||||
},
|
||||
.stack_offset => |off| {
|
||||
if (elem_size <= 8) {
|
||||
const raw_tmp_reg = try self.register_manager.allocReg(null, .{});
|
||||
const raw_tmp_reg = try self.register_manager.allocReg(null, gp);
|
||||
const tmp_reg = registerAlias(raw_tmp_reg, elem_size);
|
||||
const tmp_reg_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
||||
defer self.register_manager.unlockReg(tmp_reg_lock);
|
||||
@ -2672,7 +2672,7 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
|
||||
try self.genSetStack(elem_ty, off, MCValue{ .register = tmp_reg });
|
||||
} else {
|
||||
// TODO optimize the register allocation
|
||||
const regs = try self.register_manager.allocRegs(4, .{ null, null, null, null }, .{});
|
||||
const regs = try self.register_manager.allocRegs(4, .{ null, null, null, null }, gp);
|
||||
const regs_locks = self.register_manager.lockRegsAssumeUnused(4, regs);
|
||||
defer for (regs_locks) |reg| {
|
||||
self.register_manager.unlockReg(reg);
|
||||
@ -2887,7 +2887,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
|
||||
},
|
||||
else => {
|
||||
if (abi_size <= 8) {
|
||||
const raw_tmp_reg = try self.register_manager.allocReg(null, .{});
|
||||
const raw_tmp_reg = try self.register_manager.allocReg(null, gp);
|
||||
const tmp_reg = registerAlias(raw_tmp_reg, abi_size);
|
||||
const tmp_reg_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
||||
defer self.register_manager.unlockReg(tmp_reg_lock);
|
||||
@ -3002,7 +3002,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
|
||||
// TODO return special MCValue condition flags
|
||||
// get overflow bit: set register to C flag
|
||||
// resp. V flag
|
||||
const raw_dest_reg = try self.register_manager.allocReg(null, .{});
|
||||
const raw_dest_reg = try self.register_manager.allocReg(null, gp);
|
||||
const dest_reg = raw_dest_reg.to32();
|
||||
|
||||
// C flag: cset reg, cs
|
||||
@ -4065,7 +4065,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
|
||||
|
||||
const overflow_bit_ty = ty.structFieldType(1);
|
||||
const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, self.target.*));
|
||||
const raw_cond_reg = try self.register_manager.allocReg(null, .{});
|
||||
const raw_cond_reg = try self.register_manager.allocReg(null, gp);
|
||||
const cond_reg = registerAlias(
|
||||
raw_cond_reg,
|
||||
@intCast(u32, overflow_bit_ty.abiSize(self.target.*)),
|
||||
@ -4113,7 +4113,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
|
||||
const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
|
||||
|
||||
// TODO call extern memcpy
|
||||
const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }, .{});
|
||||
const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }, gp);
|
||||
const regs_locks = self.register_manager.lockRegsAssumeUnused(5, regs);
|
||||
defer for (regs_locks) |reg| {
|
||||
self.register_manager.unlockReg(reg);
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
const builtin = @import("builtin");
|
||||
const bits = @import("bits.zig");
|
||||
const Register = bits.Register;
|
||||
const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager;
|
||||
|
||||
const callee_preserved_regs_impl = if (builtin.os.tag.isDarwin()) struct {
|
||||
pub const callee_preserved_regs = [_]Register{
|
||||
@ -18,3 +19,19 @@ pub const callee_preserved_regs = callee_preserved_regs_impl.callee_preserved_re
|
||||
|
||||
pub const c_abi_int_param_regs = [_]Register{ .x0, .x1, .x2, .x3, .x4, .x5, .x6, .x7 };
|
||||
pub const c_abi_int_return_regs = [_]Register{ .x0, .x1, .x2, .x3, .x4, .x5, .x6, .x7 };
|
||||
|
||||
const allocatable_registers = callee_preserved_regs;
|
||||
pub const RegisterManager = RegisterManagerFn(@import("CodeGen.zig"), Register, &allocatable_registers);
|
||||
|
||||
// Register classes
|
||||
const RegisterBitSet = RegisterManager.RegisterBitSet;
|
||||
pub const RegisterClass = struct {
|
||||
pub const gp: RegisterBitSet = blk: {
|
||||
var set = RegisterBitSet.initEmpty();
|
||||
set.setRangeValue(.{
|
||||
.start = 0,
|
||||
.end = callee_preserved_regs.len,
|
||||
}, true);
|
||||
break :blk set;
|
||||
};
|
||||
};
|
||||
|
||||
@ -21,9 +21,6 @@ const DW = std.dwarf;
|
||||
const leb128 = std.leb;
|
||||
const log = std.log.scoped(.codegen);
|
||||
const build_options = @import("build_options");
|
||||
const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager;
|
||||
const RegisterManager = RegisterManagerFn(Self, Register, &allocatable_registers);
|
||||
const RegisterLock = RegisterManager.RegisterLock;
|
||||
|
||||
const FnResult = @import("../../codegen.zig").FnResult;
|
||||
const GenerateSymbolError = @import("../../codegen.zig").GenerateSymbolError;
|
||||
@ -31,14 +28,16 @@ const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput;
|
||||
|
||||
const bits = @import("bits.zig");
|
||||
const abi = @import("abi.zig");
|
||||
const RegisterManager = abi.RegisterManager;
|
||||
const RegisterLock = RegisterManager.RegisterLock;
|
||||
const Register = bits.Register;
|
||||
const Instruction = bits.Instruction;
|
||||
const Condition = bits.Condition;
|
||||
const callee_preserved_regs = abi.callee_preserved_regs;
|
||||
const caller_preserved_regs = abi.caller_preserved_regs;
|
||||
const allocatable_registers = abi.allocatable_registers;
|
||||
const c_abi_int_param_regs = abi.c_abi_int_param_regs;
|
||||
const c_abi_int_return_regs = abi.c_abi_int_return_regs;
|
||||
const gp = abi.RegisterClass.gp;
|
||||
|
||||
const InnerError = error{
|
||||
OutOfMemory,
|
||||
@ -874,7 +873,7 @@ fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
|
||||
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
|
||||
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
|
||||
if (abi_size <= ptr_bytes) {
|
||||
if (self.register_manager.tryAllocReg(inst, .{})) |reg| {
|
||||
if (self.register_manager.tryAllocReg(inst, gp)) |reg| {
|
||||
return MCValue{ .register = reg };
|
||||
}
|
||||
}
|
||||
@ -939,7 +938,7 @@ fn spillCompareFlagsIfOccupied(self: *Self) !void {
|
||||
/// allocated. A second call to `copyToTmpRegister` may return the same register.
|
||||
/// This can have a side effect of spilling instructions to the stack to free up a register.
|
||||
fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register {
|
||||
const reg = try self.register_manager.allocReg(null, .{});
|
||||
const reg = try self.register_manager.allocReg(null, gp);
|
||||
try self.genSetReg(ty, reg, mcv);
|
||||
return reg;
|
||||
}
|
||||
@ -948,7 +947,7 @@ fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register {
|
||||
/// `reg_owner` is the instruction that gets associated with the register in the register table.
|
||||
/// This can have a side effect of spilling instructions to the stack to free up a register.
|
||||
fn copyToNewRegister(self: *Self, reg_owner: Air.Inst.Index, mcv: MCValue) !MCValue {
|
||||
const reg = try self.register_manager.allocReg(reg_owner, .{});
|
||||
const reg = try self.register_manager.allocReg(reg_owner, gp);
|
||||
try self.genSetReg(self.air.typeOfIndex(reg_owner), reg, mcv);
|
||||
return MCValue{ .register = reg };
|
||||
}
|
||||
@ -1065,9 +1064,9 @@ fn trunc(
|
||||
if (operand == .register and self.reuseOperand(inst, ty_op.operand, 0, operand)) {
|
||||
break :blk operand_reg;
|
||||
} else {
|
||||
break :blk try self.register_manager.allocReg(inst, .{});
|
||||
break :blk try self.register_manager.allocReg(inst, gp);
|
||||
}
|
||||
} else try self.register_manager.allocReg(null, .{});
|
||||
} else try self.register_manager.allocReg(null, gp);
|
||||
|
||||
switch (info_b.bits) {
|
||||
32 => {
|
||||
@ -1153,7 +1152,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void {
|
||||
break :blk op_reg;
|
||||
}
|
||||
|
||||
break :blk try self.register_manager.allocReg(null, .{});
|
||||
break :blk try self.register_manager.allocReg(null, gp);
|
||||
};
|
||||
|
||||
_ = try self.addInst(.{
|
||||
@ -1183,7 +1182,7 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void {
|
||||
break :blk op_reg;
|
||||
}
|
||||
|
||||
break :blk try self.register_manager.allocReg(null, .{});
|
||||
break :blk try self.register_manager.allocReg(null, gp);
|
||||
};
|
||||
|
||||
_ = try self.addInst(.{
|
||||
@ -1254,9 +1253,9 @@ fn minMax(
|
||||
} else if (rhs_is_register and self.reuseOperand(inst, bin_op.rhs, 1, rhs)) {
|
||||
break :blk rhs_reg;
|
||||
} else {
|
||||
break :blk try self.register_manager.allocReg(inst, .{});
|
||||
break :blk try self.register_manager.allocReg(inst, gp);
|
||||
}
|
||||
} else try self.register_manager.allocReg(null, .{});
|
||||
} else try self.register_manager.allocReg(null, gp);
|
||||
|
||||
// lhs == reg should have been checked by airMinMax
|
||||
//
|
||||
@ -1438,7 +1437,7 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const dest_reg_lock = self.register_manager.lockRegAssumeUnused(dest_reg);
|
||||
defer self.register_manager.unlockReg(dest_reg_lock);
|
||||
|
||||
const truncated_reg = try self.register_manager.allocReg(null, .{});
|
||||
const truncated_reg = try self.register_manager.allocReg(null, gp);
|
||||
const truncated_reg_lock = self.register_manager.lockRegAssumeUnused(truncated_reg);
|
||||
defer self.register_manager.unlockReg(truncated_reg_lock);
|
||||
|
||||
@ -1543,7 +1542,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const dest_reg_lock = self.register_manager.lockRegAssumeUnused(dest_reg);
|
||||
defer self.register_manager.unlockReg(dest_reg_lock);
|
||||
|
||||
const truncated_reg = try self.register_manager.allocReg(null, .{});
|
||||
const truncated_reg = try self.register_manager.allocReg(null, gp);
|
||||
const truncated_reg_lock = self.register_manager.lockRegAssumeUnused(truncated_reg);
|
||||
defer self.register_manager.unlockReg(truncated_reg_lock);
|
||||
|
||||
@ -1582,18 +1581,18 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const lhs_reg = if (lhs_is_register)
|
||||
lhs.register
|
||||
else
|
||||
try self.register_manager.allocReg(null, .{});
|
||||
try self.register_manager.allocReg(null, gp);
|
||||
const new_lhs_lock = self.register_manager.lockReg(lhs_reg);
|
||||
defer if (new_lhs_lock) |reg| self.register_manager.unlockReg(reg);
|
||||
|
||||
const rhs_reg = if (rhs_is_register)
|
||||
rhs.register
|
||||
else
|
||||
try self.register_manager.allocReg(null, .{});
|
||||
try self.register_manager.allocReg(null, gp);
|
||||
const new_rhs_lock = self.register_manager.lockReg(rhs_reg);
|
||||
defer if (new_rhs_lock) |reg| self.register_manager.unlockReg(reg);
|
||||
|
||||
const dest_regs = try self.register_manager.allocRegs(2, .{ null, null }, .{});
|
||||
const dest_regs = try self.register_manager.allocRegs(2, .{ null, null }, gp);
|
||||
const dest_regs_locks = self.register_manager.lockRegsAssumeUnused(2, dest_regs);
|
||||
defer for (dest_regs_locks) |reg| {
|
||||
self.register_manager.unlockReg(reg);
|
||||
@ -1604,7 +1603,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
|
||||
if (!lhs_is_register) try self.genSetReg(lhs_ty, lhs_reg, lhs);
|
||||
if (!rhs_is_register) try self.genSetReg(rhs_ty, rhs_reg, rhs);
|
||||
|
||||
const truncated_reg = try self.register_manager.allocReg(null, .{});
|
||||
const truncated_reg = try self.register_manager.allocReg(null, gp);
|
||||
const truncated_reg_lock = self.register_manager.lockRegAssumeUnused(truncated_reg);
|
||||
defer self.register_manager.unlockReg(truncated_reg_lock);
|
||||
|
||||
@ -2026,7 +2025,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const base_reg_lock = self.register_manager.lockRegAssumeUnused(base_reg);
|
||||
defer self.register_manager.unlockReg(base_reg_lock);
|
||||
|
||||
const dst_reg = try self.register_manager.allocReg(inst, .{});
|
||||
const dst_reg = try self.register_manager.allocReg(inst, gp);
|
||||
const dst_mcv = MCValue{ .register = dst_reg };
|
||||
const dst_reg_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
|
||||
defer self.register_manager.unlockReg(dst_reg_lock);
|
||||
@ -2234,7 +2233,7 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
|
||||
},
|
||||
.stack_offset => |off| {
|
||||
if (elem_size <= 4) {
|
||||
const tmp_reg = try self.register_manager.allocReg(null, .{});
|
||||
const tmp_reg = try self.register_manager.allocReg(null, gp);
|
||||
const tmp_reg_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
||||
defer self.register_manager.unlockReg(tmp_reg_lock);
|
||||
|
||||
@ -2242,7 +2241,7 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
|
||||
try self.genSetStack(elem_ty, off, MCValue{ .register = tmp_reg });
|
||||
} else {
|
||||
// TODO optimize the register allocation
|
||||
const regs = try self.register_manager.allocRegs(4, .{ null, null, null, null }, .{});
|
||||
const regs = try self.register_manager.allocRegs(4, .{ null, null, null, null }, gp);
|
||||
const regs_locks = self.register_manager.lockRegsAssumeUnused(4, regs);
|
||||
defer for (regs_locks) |reg_locked| {
|
||||
self.register_manager.unlockReg(reg_locked);
|
||||
@ -2271,7 +2270,7 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
|
||||
.stack_offset,
|
||||
.stack_argument_offset,
|
||||
=> {
|
||||
const reg = try self.register_manager.allocReg(null, .{});
|
||||
const reg = try self.register_manager.allocReg(null, gp);
|
||||
const reg_lock = self.register_manager.lockRegAssumeUnused(reg);
|
||||
defer self.register_manager.unlockReg(reg_lock);
|
||||
|
||||
@ -2338,14 +2337,14 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
|
||||
},
|
||||
else => {
|
||||
if (elem_size <= 4) {
|
||||
const tmp_reg = try self.register_manager.allocReg(null, .{});
|
||||
const tmp_reg = try self.register_manager.allocReg(null, gp);
|
||||
const tmp_reg_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
||||
defer self.register_manager.unlockReg(tmp_reg_lock);
|
||||
|
||||
try self.genSetReg(value_ty, tmp_reg, value);
|
||||
try self.store(ptr, .{ .register = tmp_reg }, ptr_ty, value_ty);
|
||||
} else {
|
||||
const regs = try self.register_manager.allocRegs(4, .{ null, null, null, null }, .{});
|
||||
const regs = try self.register_manager.allocRegs(4, .{ null, null, null, null }, gp);
|
||||
const regs_locks = self.register_manager.lockRegsAssumeUnused(4, regs);
|
||||
defer for (regs_locks) |reg| {
|
||||
self.register_manager.unlockReg(reg);
|
||||
@ -2487,7 +2486,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
|
||||
1 => {
|
||||
// get overflow bit: set register to C flag
|
||||
// resp. V flag
|
||||
const dest_reg = try self.register_manager.allocReg(null, .{});
|
||||
const dest_reg = try self.register_manager.allocReg(null, gp);
|
||||
|
||||
// mov reg, #0
|
||||
_ = try self.addInst(.{
|
||||
@ -2567,7 +2566,7 @@ fn binOpRegister(
|
||||
break :inst Air.refToIndex(md.lhs).?;
|
||||
} else null;
|
||||
|
||||
const reg = try self.register_manager.allocReg(track_inst, .{});
|
||||
const reg = try self.register_manager.allocReg(track_inst, gp);
|
||||
|
||||
if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg });
|
||||
|
||||
@ -2581,7 +2580,7 @@ fn binOpRegister(
|
||||
break :inst Air.refToIndex(md.rhs).?;
|
||||
} else null;
|
||||
|
||||
const reg = try self.register_manager.allocReg(track_inst, .{});
|
||||
const reg = try self.register_manager.allocReg(track_inst, gp);
|
||||
|
||||
if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg });
|
||||
|
||||
@ -2598,9 +2597,9 @@ fn binOpRegister(
|
||||
} else if (rhs_is_register and self.reuseOperand(md.inst, md.rhs, 1, rhs)) {
|
||||
break :blk rhs_reg;
|
||||
} else {
|
||||
break :blk try self.register_manager.allocReg(md.inst, .{});
|
||||
break :blk try self.register_manager.allocReg(md.inst, gp);
|
||||
}
|
||||
} else try self.register_manager.allocReg(null, .{}),
|
||||
} else try self.register_manager.allocReg(null, gp),
|
||||
};
|
||||
|
||||
if (!lhs_is_register) try self.genSetReg(lhs_ty, lhs_reg, lhs);
|
||||
@ -2684,7 +2683,7 @@ fn binOpImmediate(
|
||||
).?;
|
||||
} else null;
|
||||
|
||||
const reg = try self.register_manager.allocReg(track_inst, .{});
|
||||
const reg = try self.register_manager.allocReg(track_inst, gp);
|
||||
|
||||
if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg });
|
||||
|
||||
@ -2704,9 +2703,9 @@ fn binOpImmediate(
|
||||
)) {
|
||||
break :blk lhs_reg;
|
||||
} else {
|
||||
break :blk try self.register_manager.allocReg(md.inst, .{});
|
||||
break :blk try self.register_manager.allocReg(md.inst, gp);
|
||||
}
|
||||
} else try self.register_manager.allocReg(null, .{}),
|
||||
} else try self.register_manager.allocReg(null, gp),
|
||||
};
|
||||
|
||||
if (!lhs_is_register) try self.genSetReg(lhs_ty, lhs_reg, lhs);
|
||||
@ -4363,7 +4362,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
|
||||
|
||||
const overflow_bit_ty = ty.structFieldType(1);
|
||||
const overflow_bit_offset = @intCast(u32, ty.structFieldOffset(1, self.target.*));
|
||||
const cond_reg = try self.register_manager.allocReg(null, .{});
|
||||
const cond_reg = try self.register_manager.allocReg(null, gp);
|
||||
|
||||
// C flag: movcs reg, #1
|
||||
// V flag: movvs reg, #1
|
||||
@ -4408,7 +4407,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
|
||||
const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
|
||||
|
||||
// TODO call extern memcpy
|
||||
const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }, .{});
|
||||
const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }, gp);
|
||||
const src_reg = regs[0];
|
||||
const dst_reg = regs[1];
|
||||
const len_reg = regs[2];
|
||||
@ -4782,7 +4781,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I
|
||||
const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
|
||||
|
||||
// TODO call extern memcpy
|
||||
const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }, .{});
|
||||
const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }, gp);
|
||||
const src_reg = regs[0];
|
||||
const dst_reg = regs[1];
|
||||
const len_reg = regs[2];
|
||||
|
||||
@ -1,9 +1,25 @@
|
||||
const bits = @import("bits.zig");
|
||||
const Register = bits.Register;
|
||||
const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager;
|
||||
|
||||
pub const callee_preserved_regs = [_]Register{ .r4, .r5, .r6, .r7, .r8, .r10 };
|
||||
pub const caller_preserved_regs = [_]Register{ .r0, .r1, .r2, .r3 };
|
||||
pub const allocatable_registers = callee_preserved_regs ++ caller_preserved_regs;
|
||||
|
||||
pub const c_abi_int_param_regs = [_]Register{ .r0, .r1, .r2, .r3 };
|
||||
pub const c_abi_int_return_regs = [_]Register{ .r0, .r1 };
|
||||
|
||||
const allocatable_registers = callee_preserved_regs ++ caller_preserved_regs;
|
||||
pub const RegisterManager = RegisterManagerFn(@import("CodeGen.zig"), Register, &allocatable_registers);
|
||||
|
||||
// Register classes
|
||||
const RegisterBitSet = RegisterManager.RegisterBitSet;
|
||||
pub const RegisterClass = struct {
|
||||
pub const gp: RegisterBitSet = blk: {
|
||||
var set = RegisterBitSet.initEmpty();
|
||||
set.setRangeValue(.{
|
||||
.start = 0,
|
||||
.end = caller_preserved_regs.len + callee_preserved_regs.len,
|
||||
}, true);
|
||||
break :blk set;
|
||||
};
|
||||
};
|
||||
|
||||
@ -21,9 +21,6 @@ const DW = std.dwarf;
|
||||
const leb128 = std.leb;
|
||||
const log = std.log.scoped(.codegen);
|
||||
const build_options = @import("build_options");
|
||||
const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager;
|
||||
const RegisterManager = RegisterManagerFn(Self, Register, &callee_preserved_regs);
|
||||
const RegisterLock = RegisterManager.RegisterLock;
|
||||
|
||||
const FnResult = @import("../../codegen.zig").FnResult;
|
||||
const GenerateSymbolError = @import("../../codegen.zig").GenerateSymbolError;
|
||||
@ -32,8 +29,11 @@ const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput;
|
||||
const bits = @import("bits.zig");
|
||||
const abi = @import("abi.zig");
|
||||
const Register = bits.Register;
|
||||
const RegisterManager = abi.RegisterManager;
|
||||
const RegisterLock = RegisterManager.RegisterLock;
|
||||
const Instruction = abi.Instruction;
|
||||
const callee_preserved_regs = abi.callee_preserved_regs;
|
||||
const gp = abi.RegisterClass.gp;
|
||||
|
||||
const InnerError = error{
|
||||
OutOfMemory,
|
||||
@ -803,7 +803,7 @@ fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
|
||||
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
|
||||
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
|
||||
if (abi_size <= ptr_bytes) {
|
||||
if (self.register_manager.tryAllocReg(inst, .{})) |reg| {
|
||||
if (self.register_manager.tryAllocReg(inst, gp)) |reg| {
|
||||
return MCValue{ .register = reg };
|
||||
}
|
||||
}
|
||||
@ -826,7 +826,7 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void
|
||||
/// allocated. A second call to `copyToTmpRegister` may return the same register.
|
||||
/// This can have a side effect of spilling instructions to the stack to free up a register.
|
||||
fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register {
|
||||
const reg = try self.register_manager.allocReg(null, .{});
|
||||
const reg = try self.register_manager.allocReg(null, gp);
|
||||
try self.genSetReg(ty, reg, mcv);
|
||||
return reg;
|
||||
}
|
||||
@ -835,7 +835,7 @@ fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register {
|
||||
/// `reg_owner` is the instruction that gets associated with the register in the register table.
|
||||
/// This can have a side effect of spilling instructions to the stack to free up a register.
|
||||
fn copyToNewRegister(self: *Self, reg_owner: Air.Inst.Index, mcv: MCValue) !MCValue {
|
||||
const reg = try self.register_manager.allocReg(reg_owner, .{});
|
||||
const reg = try self.register_manager.allocReg(reg_owner, gp);
|
||||
try self.genSetReg(self.air.typeOfIndex(reg_owner), reg, mcv);
|
||||
return MCValue{ .register = reg };
|
||||
}
|
||||
@ -958,7 +958,7 @@ fn binOpRegister(
|
||||
break :inst Air.refToIndex(bin_op.lhs).?;
|
||||
} else null;
|
||||
|
||||
const reg = try self.register_manager.allocReg(track_inst, .{});
|
||||
const reg = try self.register_manager.allocReg(track_inst, gp);
|
||||
|
||||
if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg });
|
||||
|
||||
@ -973,7 +973,7 @@ fn binOpRegister(
|
||||
break :inst Air.refToIndex(bin_op.rhs).?;
|
||||
} else null;
|
||||
|
||||
const reg = try self.register_manager.allocReg(track_inst, .{});
|
||||
const reg = try self.register_manager.allocReg(track_inst, gp);
|
||||
|
||||
if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg });
|
||||
|
||||
@ -990,9 +990,9 @@ fn binOpRegister(
|
||||
} else if (rhs_is_register and self.reuseOperand(inst, bin_op.rhs, 1, rhs)) {
|
||||
break :blk rhs_reg;
|
||||
} else {
|
||||
break :blk try self.register_manager.allocReg(inst, .{});
|
||||
break :blk try self.register_manager.allocReg(inst, gp);
|
||||
}
|
||||
} else try self.register_manager.allocReg(null, .{});
|
||||
} else try self.register_manager.allocReg(null, gp);
|
||||
|
||||
if (!lhs_is_register) try self.genSetReg(lhs_ty, lhs_reg, lhs);
|
||||
if (!rhs_is_register) try self.genSetReg(rhs_ty, rhs_reg, rhs);
|
||||
@ -1482,7 +1482,7 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
|
||||
.memory,
|
||||
.stack_offset,
|
||||
=> {
|
||||
const reg = try self.register_manager.allocReg(null, .{});
|
||||
const reg = try self.register_manager.allocReg(null, gp);
|
||||
const reg_lock = self.register_manager.lockRegAssumeUnused(reg);
|
||||
defer self.register_manager.unlockReg(reg_lock);
|
||||
|
||||
|
||||
@ -1,6 +1,23 @@
|
||||
const bits = @import("bits.zig");
|
||||
const Register = bits.Register;
|
||||
const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager;
|
||||
|
||||
pub const callee_preserved_regs = [_]Register{
|
||||
.s0, .s1, .s2, .s3, .s4, .s5, .s6, .s7, .s8, .s9, .s10, .s11,
|
||||
};
|
||||
|
||||
const allocatable_registers = callee_preserved_regs;
|
||||
pub const RegisterManager = RegisterManagerFn(@import("CodeGen.zig"), Register, &allocatable_registers);
|
||||
|
||||
// Register classes
|
||||
const RegisterBitSet = RegisterManager.RegisterBitSet;
|
||||
pub const RegisterClass = struct {
|
||||
pub const gp: RegisterBitSet = blk: {
|
||||
var set = RegisterBitSet.initEmpty();
|
||||
set.setRangeValue(.{
|
||||
.start = 0,
|
||||
.end = callee_preserved_regs.len,
|
||||
}, true);
|
||||
break :blk set;
|
||||
};
|
||||
};
|
||||
|
||||
@ -21,9 +21,6 @@ const Type = @import("../../type.zig").Type;
|
||||
const GenerateSymbolError = @import("../../codegen.zig").GenerateSymbolError;
|
||||
const FnResult = @import("../../codegen.zig").FnResult;
|
||||
const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput;
|
||||
const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager;
|
||||
const RegisterManager = RegisterManagerFn(Self, Register, &abi.allocatable_regs);
|
||||
const RegisterLock = RegisterManager.RegisterLock;
|
||||
|
||||
const build_options = @import("build_options");
|
||||
|
||||
@ -31,7 +28,10 @@ const bits = @import("bits.zig");
|
||||
const abi = @import("abi.zig");
|
||||
const Instruction = bits.Instruction;
|
||||
const ShiftWidth = Instruction.ShiftWidth;
|
||||
const RegisterManager = abi.RegisterManager;
|
||||
const RegisterLock = RegisterManager.RegisterLock;
|
||||
const Register = bits.Register;
|
||||
const gp = abi.RegisterClass.gp;
|
||||
|
||||
const Self = @This();
|
||||
|
||||
@ -1613,7 +1613,7 @@ fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
|
||||
if (reg_ok) {
|
||||
// Make sure the type can fit in a register before we try to allocate one.
|
||||
if (abi_size <= 8) {
|
||||
if (self.register_manager.tryAllocReg(inst, .{})) |reg| {
|
||||
if (self.register_manager.tryAllocReg(inst, gp)) |reg| {
|
||||
return MCValue{ .register = reg };
|
||||
}
|
||||
}
|
||||
@ -1854,7 +1854,7 @@ fn binOpImmediate(
|
||||
).?;
|
||||
} else null;
|
||||
|
||||
const reg = try self.register_manager.allocReg(track_inst, .{});
|
||||
const reg = try self.register_manager.allocReg(track_inst, gp);
|
||||
|
||||
if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg });
|
||||
|
||||
@ -1873,10 +1873,10 @@ fn binOpImmediate(
|
||||
)) {
|
||||
break :blk lhs_reg;
|
||||
} else {
|
||||
break :blk try self.register_manager.allocReg(md.inst, .{});
|
||||
break :blk try self.register_manager.allocReg(md.inst, gp);
|
||||
}
|
||||
} else blk: {
|
||||
break :blk try self.register_manager.allocReg(null, .{});
|
||||
break :blk try self.register_manager.allocReg(null, gp);
|
||||
},
|
||||
};
|
||||
|
||||
@ -1953,7 +1953,7 @@ fn binOpRegister(
|
||||
break :inst Air.refToIndex(md.lhs).?;
|
||||
} else null;
|
||||
|
||||
const reg = try self.register_manager.allocReg(track_inst, .{});
|
||||
const reg = try self.register_manager.allocReg(track_inst, gp);
|
||||
if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg });
|
||||
|
||||
break :blk reg;
|
||||
@ -1966,7 +1966,7 @@ fn binOpRegister(
|
||||
break :inst Air.refToIndex(md.rhs).?;
|
||||
} else null;
|
||||
|
||||
const reg = try self.register_manager.allocReg(track_inst, .{});
|
||||
const reg = try self.register_manager.allocReg(track_inst, gp);
|
||||
if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg });
|
||||
|
||||
break :blk reg;
|
||||
@ -1981,10 +1981,10 @@ fn binOpRegister(
|
||||
} else if (rhs_is_register and self.reuseOperand(md.inst, md.rhs, 1, rhs)) {
|
||||
break :blk rhs_reg;
|
||||
} else {
|
||||
break :blk try self.register_manager.allocReg(md.inst, .{});
|
||||
break :blk try self.register_manager.allocReg(md.inst, gp);
|
||||
}
|
||||
} else blk: {
|
||||
break :blk try self.register_manager.allocReg(null, .{});
|
||||
break :blk try self.register_manager.allocReg(null, gp);
|
||||
},
|
||||
};
|
||||
|
||||
@ -2077,7 +2077,7 @@ fn brVoid(self: *Self, block: Air.Inst.Index) !void {
|
||||
/// allocated. A second call to `copyToTmpRegister` may return the same register.
|
||||
/// This can have a side effect of spilling instructions to the stack to free up a register.
|
||||
fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register {
|
||||
const reg = try self.register_manager.allocReg(null, .{});
|
||||
const reg = try self.register_manager.allocReg(null, gp);
|
||||
try self.genSetReg(ty, reg, mcv);
|
||||
return reg;
|
||||
}
|
||||
@ -2364,7 +2364,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
|
||||
});
|
||||
} else {
|
||||
// Need to allocate a temporary register to load 64-bit immediates.
|
||||
const tmp_reg = try self.register_manager.allocReg(null, .{});
|
||||
const tmp_reg = try self.register_manager.allocReg(null, gp);
|
||||
|
||||
try self.genSetReg(ty, tmp_reg, .{ .immediate = @truncate(u32, x) });
|
||||
try self.genSetReg(ty, reg, .{ .immediate = @truncate(u32, x >> 32) });
|
||||
@ -2478,7 +2478,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
|
||||
};
|
||||
const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
|
||||
|
||||
const regs = try self.register_manager.allocRegs(4, .{ null, null, null, null }, .{});
|
||||
const regs = try self.register_manager.allocRegs(4, .{ null, null, null, null }, gp);
|
||||
const regs_locks = self.register_manager.lockRegsAssumeUnused(4, regs);
|
||||
defer for (regs_locks) |reg| {
|
||||
self.register_manager.unlockReg(reg);
|
||||
@ -2717,14 +2717,14 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
|
||||
},
|
||||
.stack_offset => |off| {
|
||||
if (elem_size <= 8) {
|
||||
const tmp_reg = try self.register_manager.allocReg(null, .{});
|
||||
const tmp_reg = try self.register_manager.allocReg(null, gp);
|
||||
const tmp_reg_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
||||
defer self.register_manager.unlockReg(tmp_reg_lock);
|
||||
|
||||
try self.load(.{ .register = tmp_reg }, ptr, ptr_ty);
|
||||
try self.genSetStack(elem_ty, off, MCValue{ .register = tmp_reg });
|
||||
} else {
|
||||
const regs = try self.register_manager.allocRegs(3, .{ null, null, null }, .{});
|
||||
const regs = try self.register_manager.allocRegs(3, .{ null, null, null }, gp);
|
||||
const regs_locks = self.register_manager.lockRegsAssumeUnused(3, regs);
|
||||
defer for (regs_locks) |reg| {
|
||||
self.register_manager.unlockReg(reg);
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
const bits = @import("bits.zig");
|
||||
const Register = bits.Register;
|
||||
const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager;
|
||||
|
||||
// SPARCv9 stack constants.
|
||||
// See: Registers and the Stack Frame, page 3P-8, SCD 2.4.1.
|
||||
@ -21,7 +22,7 @@ pub const stack_save_area = 176;
|
||||
pub const caller_preserved_regs = [_]Register{ .o0, .o1, .o2, .o3, .o4, .o5, .g1, .g4, .g5 };
|
||||
|
||||
// Try to allocate i, l, o, then g sets of registers, in order of priority.
|
||||
pub const allocatable_regs = [_]Register{
|
||||
const allocatable_regs = [_]Register{
|
||||
// zig fmt: off
|
||||
.@"i0", .@"i1", .@"i2", .@"i3", .@"i4", .@"i5",
|
||||
.l0, .l1, .l2, .l3, .l4, .l5, .l6, .l7,
|
||||
@ -35,3 +36,18 @@ pub const c_abi_int_param_regs_callee_view = [_]Register{ .@"i0", .@"i1", .@"i2"
|
||||
|
||||
pub const c_abi_int_return_regs_caller_view = [_]Register{ .o0, .o1, .o2, .o3 };
|
||||
pub const c_abi_int_return_regs_callee_view = [_]Register{ .@"i0", .@"i1", .@"i2", .@"i3" };
|
||||
|
||||
pub const RegisterManager = RegisterManagerFn(@import("CodeGen.zig"), Register, &allocatable_regs);
|
||||
|
||||
// Register classes
|
||||
const RegisterBitSet = RegisterManager.RegisterBitSet;
|
||||
pub const RegisterClass = struct {
|
||||
pub const gp: RegisterBitSet = blk: {
|
||||
var set = RegisterBitSet.initEmpty();
|
||||
set.setRangeValue(.{
|
||||
.start = 0,
|
||||
.end = allocatable_regs.len,
|
||||
}, true);
|
||||
break :blk set;
|
||||
};
|
||||
};
|
||||
|
||||
@ -884,9 +884,7 @@ fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
|
||||
if (self.intrinsicsAllowed(elem_ty)) {
|
||||
const ptr_bytes: u64 = 32;
|
||||
if (abi_size <= ptr_bytes) {
|
||||
if (self.register_manager.tryAllocReg(inst, .{
|
||||
.selector_mask = sse,
|
||||
})) |reg| {
|
||||
if (self.register_manager.tryAllocReg(inst, sse)) |reg| {
|
||||
return MCValue{ .register = registerAlias(reg, abi_size) };
|
||||
}
|
||||
}
|
||||
@ -899,9 +897,7 @@ fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
|
||||
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
|
||||
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
|
||||
if (abi_size <= ptr_bytes) {
|
||||
if (self.register_manager.tryAllocReg(inst, .{
|
||||
.selector_mask = gp,
|
||||
})) |reg| {
|
||||
if (self.register_manager.tryAllocReg(inst, gp)) |reg| {
|
||||
return MCValue{ .register = registerAlias(reg, abi_size) };
|
||||
}
|
||||
}
|
||||
@ -972,16 +968,14 @@ pub fn spillRegisters(self: *Self, comptime count: comptime_int, registers: [cou
|
||||
/// allocated. A second call to `copyToTmpRegister` may return the same register.
|
||||
/// This can have a side effect of spilling instructions to the stack to free up a register.
|
||||
fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register {
|
||||
const mask: RegisterManager.RegisterBitSet = switch (ty.zigTypeTag()) {
|
||||
const reg_class: RegisterManager.RegisterBitSet = switch (ty.zigTypeTag()) {
|
||||
.Float => blk: {
|
||||
if (self.intrinsicsAllowed(ty)) break :blk sse;
|
||||
return self.fail("TODO copy {} to register", .{ty.fmtDebug()});
|
||||
},
|
||||
else => gp,
|
||||
};
|
||||
const reg: Register = try self.register_manager.allocReg(null, .{
|
||||
.selector_mask = mask,
|
||||
});
|
||||
const reg: Register = try self.register_manager.allocReg(null, reg_class);
|
||||
try self.genSetReg(ty, reg, mcv);
|
||||
return reg;
|
||||
}
|
||||
@ -991,16 +985,14 @@ fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register {
|
||||
/// This can have a side effect of spilling instructions to the stack to free up a register.
|
||||
/// WARNING make sure that the allocated register matches the returned MCValue from an instruction!
|
||||
fn copyToRegisterWithInstTracking(self: *Self, reg_owner: Air.Inst.Index, ty: Type, mcv: MCValue) !MCValue {
|
||||
const mask: RegisterManager.RegisterBitSet = switch (ty.zigTypeTag()) {
|
||||
const reg_class: RegisterManager.RegisterBitSet = switch (ty.zigTypeTag()) {
|
||||
.Float => blk: {
|
||||
if (self.intrinsicsAllowed(ty)) break :blk sse;
|
||||
return self.fail("TODO copy {} to register", .{ty.fmtDebug()});
|
||||
},
|
||||
else => gp,
|
||||
};
|
||||
const reg: Register = try self.register_manager.allocReg(reg_owner, .{
|
||||
.selector_mask = mask,
|
||||
});
|
||||
const reg: Register = try self.register_manager.allocReg(reg_owner, reg_class);
|
||||
try self.genSetReg(ty, reg, mcv);
|
||||
return MCValue{ .register = reg };
|
||||
}
|
||||
@ -1056,9 +1048,7 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void {
|
||||
};
|
||||
defer if (operand_lock) |lock| self.register_manager.unlockReg(lock);
|
||||
|
||||
const reg = try self.register_manager.allocReg(inst, .{
|
||||
.selector_mask = gp,
|
||||
});
|
||||
const reg = try self.register_manager.allocReg(inst, gp);
|
||||
try self.genSetReg(dest_ty, reg, .{ .immediate = 0 });
|
||||
try self.genSetReg(operand_ty, reg, operand);
|
||||
break :blk MCValue{ .register = reg };
|
||||
@ -1413,9 +1403,7 @@ fn genSetStackTruncatedOverflowCompare(
|
||||
.unsigned => ty,
|
||||
};
|
||||
|
||||
const temp_regs = try self.register_manager.allocRegs(3, .{ null, null, null }, .{
|
||||
.selector_mask = gp,
|
||||
});
|
||||
const temp_regs = try self.register_manager.allocRegs(3, .{ null, null, null }, gp);
|
||||
const temp_regs_locks = self.register_manager.lockRegsAssumeUnused(3, temp_regs);
|
||||
defer for (temp_regs_locks) |rreg| {
|
||||
self.register_manager.unlockReg(rreg);
|
||||
@ -2077,9 +2065,7 @@ fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue {
|
||||
const offset_reg_lock = self.register_manager.lockRegAssumeUnused(offset_reg);
|
||||
defer self.register_manager.unlockReg(offset_reg_lock);
|
||||
|
||||
const addr_reg = try self.register_manager.allocReg(null, .{
|
||||
.selector_mask = gp,
|
||||
});
|
||||
const addr_reg = try self.register_manager.allocReg(null, gp);
|
||||
switch (slice_mcv) {
|
||||
.stack_offset => |off| {
|
||||
// mov reg, [rbp - 8]
|
||||
@ -2158,9 +2144,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const offset_reg_lock = self.register_manager.lockRegAssumeUnused(offset_reg);
|
||||
defer self.register_manager.unlockReg(offset_reg_lock);
|
||||
|
||||
const addr_reg = try self.register_manager.allocReg(null, .{
|
||||
.selector_mask = gp,
|
||||
});
|
||||
const addr_reg = try self.register_manager.allocReg(null, gp);
|
||||
switch (array) {
|
||||
.register => {
|
||||
const off = @intCast(i32, try self.allocMem(
|
||||
@ -2527,7 +2511,7 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
|
||||
},
|
||||
.stack_offset => |off| {
|
||||
if (abi_size <= 8) {
|
||||
const tmp_reg = try self.register_manager.allocReg(null, .{ .selector_mask = gp });
|
||||
const tmp_reg = try self.register_manager.allocReg(null, gp);
|
||||
try self.load(.{ .register = tmp_reg }, ptr, ptr_ty);
|
||||
return self.genSetStack(elem_ty, off, MCValue{ .register = tmp_reg }, .{});
|
||||
}
|
||||
@ -2728,7 +2712,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
|
||||
};
|
||||
defer if (value_lock) |lock| self.register_manager.unlockReg(lock);
|
||||
|
||||
const addr_reg = try self.register_manager.allocReg(null, .{ .selector_mask = gp });
|
||||
const addr_reg = try self.register_manager.allocReg(null, gp);
|
||||
const addr_reg_lock = self.register_manager.lockRegAssumeUnused(addr_reg);
|
||||
defer self.register_manager.unlockReg(addr_reg_lock);
|
||||
|
||||
@ -2800,7 +2784,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
|
||||
.memory,
|
||||
=> {
|
||||
if (abi_size <= 8) {
|
||||
const tmp_reg = try self.register_manager.allocReg(null, .{ .selector_mask = gp });
|
||||
const tmp_reg = try self.register_manager.allocReg(null, gp);
|
||||
const tmp_reg_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
||||
defer self.register_manager.unlockReg(tmp_reg_lock);
|
||||
|
||||
@ -2918,7 +2902,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde
|
||||
if (can_reuse_operand) {
|
||||
break :blk reg;
|
||||
} else {
|
||||
const result_reg = try self.register_manager.allocReg(inst, .{ .selector_mask = gp });
|
||||
const result_reg = try self.register_manager.allocReg(inst, gp);
|
||||
try self.genSetReg(ptr_ty, result_reg, mcv);
|
||||
break :blk result_reg;
|
||||
}
|
||||
@ -3019,7 +3003,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const reg_lock = self.register_manager.lockRegAssumeUnused(reg);
|
||||
defer self.register_manager.unlockReg(reg_lock);
|
||||
|
||||
const dst_reg = try self.register_manager.allocReg(inst, .{ .selector_mask = gp });
|
||||
const dst_reg = try self.register_manager.allocReg(inst, gp);
|
||||
const flags: u2 = switch (mcv) {
|
||||
.register_overflow_unsigned => 0b10,
|
||||
.register_overflow_signed => 0b00,
|
||||
@ -5428,7 +5412,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue, opts: Inl
|
||||
|
||||
const overflow_bit_ty = ty.structFieldType(1);
|
||||
const overflow_bit_offset = ty.structFieldOffset(1, self.target.*);
|
||||
const tmp_reg = try self.register_manager.allocReg(null, .{ .selector_mask = gp });
|
||||
const tmp_reg = try self.register_manager.allocReg(null, gp);
|
||||
const flags: u2 = switch (mcv) {
|
||||
.register_overflow_unsigned => 0b10,
|
||||
.register_overflow_signed => 0b00,
|
||||
@ -5656,7 +5640,7 @@ fn genInlineMemcpy(
|
||||
null;
|
||||
defer if (dsbase_lock) |lock| self.register_manager.unlockReg(lock);
|
||||
|
||||
const dst_addr_reg = try self.register_manager.allocReg(null, .{ .selector_mask = gp });
|
||||
const dst_addr_reg = try self.register_manager.allocReg(null, gp);
|
||||
switch (dst_ptr) {
|
||||
.memory,
|
||||
.got_load,
|
||||
@ -5691,7 +5675,7 @@ fn genInlineMemcpy(
|
||||
const dst_addr_reg_lock = self.register_manager.lockRegAssumeUnused(dst_addr_reg);
|
||||
defer self.register_manager.unlockReg(dst_addr_reg_lock);
|
||||
|
||||
const src_addr_reg = try self.register_manager.allocReg(null, .{ .selector_mask = gp });
|
||||
const src_addr_reg = try self.register_manager.allocReg(null, gp);
|
||||
switch (src_ptr) {
|
||||
.memory,
|
||||
.got_load,
|
||||
@ -5726,9 +5710,7 @@ fn genInlineMemcpy(
|
||||
const src_addr_reg_lock = self.register_manager.lockRegAssumeUnused(src_addr_reg);
|
||||
defer self.register_manager.unlockReg(src_addr_reg_lock);
|
||||
|
||||
const regs = try self.register_manager.allocRegs(2, .{ null, null }, .{
|
||||
.selector_mask = gp,
|
||||
});
|
||||
const regs = try self.register_manager.allocRegs(2, .{ null, null }, gp);
|
||||
const count_reg = regs[0].to64();
|
||||
const tmp_reg = regs[1].to8();
|
||||
|
||||
@ -5828,7 +5810,7 @@ fn genInlineMemset(
|
||||
const rax_lock = self.register_manager.lockRegAssumeUnused(.rax);
|
||||
defer self.register_manager.unlockReg(rax_lock);
|
||||
|
||||
const addr_reg = try self.register_manager.allocReg(null, .{ .selector_mask = gp });
|
||||
const addr_reg = try self.register_manager.allocReg(null, gp);
|
||||
switch (dst_ptr) {
|
||||
.memory,
|
||||
.got_load,
|
||||
@ -6087,7 +6069,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
|
||||
=> {
|
||||
switch (ty.zigTypeTag()) {
|
||||
.Float => {
|
||||
const base_reg = try self.register_manager.allocReg(null, .{ .selector_mask = gp });
|
||||
const base_reg = try self.register_manager.allocReg(null, gp);
|
||||
try self.loadMemPtrIntoRegister(base_reg, Type.usize, mcv);
|
||||
|
||||
if (self.intrinsicsAllowed(ty)) {
|
||||
@ -6130,7 +6112,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
|
||||
},
|
||||
.memory => |x| switch (ty.zigTypeTag()) {
|
||||
.Float => {
|
||||
const base_reg = try self.register_manager.allocReg(null, .{ .selector_mask = gp });
|
||||
const base_reg = try self.register_manager.allocReg(null, gp);
|
||||
try self.loadMemPtrIntoRegister(base_reg, Type.usize, mcv);
|
||||
|
||||
if (self.intrinsicsAllowed(ty)) {
|
||||
@ -6461,7 +6443,7 @@ fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const src: MCValue = blk: {
|
||||
switch (src_ptr) {
|
||||
.got_load, .direct_load, .memory => {
|
||||
const reg = try self.register_manager.allocReg(null, .{ .selector_mask = gp });
|
||||
const reg = try self.register_manager.allocReg(null, gp);
|
||||
try self.loadMemPtrIntoRegister(reg, src_ty, src_ptr);
|
||||
_ = try self.addInst(.{
|
||||
.tag = .mov,
|
||||
|
||||
@ -57,6 +57,11 @@ pub fn RegisterManager(
|
||||
return @fieldParentPtr(Function, "register_manager", self);
|
||||
}
|
||||
|
||||
fn excludeRegister(reg: Register, register_class: RegisterBitSet) bool {
|
||||
const index = indexOfRegIntoTracked(reg) orelse return true;
|
||||
return !register_class.isSet(index);
|
||||
}
|
||||
|
||||
fn markRegAllocated(self: *Self, reg: Register) void {
|
||||
const index = indexOfRegIntoTracked(reg) orelse return;
|
||||
self.allocated_registers.set(index);
|
||||
@ -167,10 +172,6 @@ pub fn RegisterManager(
|
||||
return self.locked_registers.count() > 0;
|
||||
}
|
||||
|
||||
const AllocOpts = struct {
|
||||
selector_mask: ?RegisterBitSet = null,
|
||||
};
|
||||
|
||||
/// Allocates a specified number of registers, optionally
|
||||
/// tracking them. Returns `null` if not enough registers are
|
||||
/// free.
|
||||
@ -178,14 +179,12 @@ pub fn RegisterManager(
|
||||
self: *Self,
|
||||
comptime count: comptime_int,
|
||||
insts: [count]?Air.Inst.Index,
|
||||
opts: AllocOpts,
|
||||
register_class: RegisterBitSet,
|
||||
) ?[count]Register {
|
||||
comptime assert(count > 0 and count <= tracked_registers.len);
|
||||
|
||||
const available_registers = opts.selector_mask orelse RegisterBitSet.initFull();
|
||||
|
||||
var free_and_not_locked_registers = self.free_registers;
|
||||
free_and_not_locked_registers.setIntersection(available_registers);
|
||||
free_and_not_locked_registers.setIntersection(register_class);
|
||||
|
||||
var unlocked_registers = self.locked_registers;
|
||||
unlocked_registers.toggleAll();
|
||||
@ -198,6 +197,7 @@ pub fn RegisterManager(
|
||||
var i: usize = 0;
|
||||
for (tracked_registers) |reg| {
|
||||
if (i >= count) break;
|
||||
if (excludeRegister(reg, register_class)) continue;
|
||||
if (self.isRegLocked(reg)) continue;
|
||||
if (!self.isRegFree(reg)) continue;
|
||||
|
||||
@ -223,8 +223,8 @@ pub fn RegisterManager(
|
||||
/// Allocates a register and optionally tracks it with a
|
||||
/// corresponding instruction. Returns `null` if all registers
|
||||
/// are allocated.
|
||||
pub fn tryAllocReg(self: *Self, inst: ?Air.Inst.Index, opts: AllocOpts) ?Register {
|
||||
return if (tryAllocRegs(self, 1, .{inst}, opts)) |regs| regs[0] else null;
|
||||
pub fn tryAllocReg(self: *Self, inst: ?Air.Inst.Index, register_class: RegisterBitSet) ?Register {
|
||||
return if (tryAllocRegs(self, 1, .{inst}, register_class)) |regs| regs[0] else null;
|
||||
}
|
||||
|
||||
/// Allocates a specified number of registers, optionally
|
||||
@ -234,18 +234,16 @@ pub fn RegisterManager(
|
||||
self: *Self,
|
||||
comptime count: comptime_int,
|
||||
insts: [count]?Air.Inst.Index,
|
||||
opts: AllocOpts,
|
||||
register_class: RegisterBitSet,
|
||||
) AllocateRegistersError![count]Register {
|
||||
comptime assert(count > 0 and count <= tracked_registers.len);
|
||||
|
||||
const available_registers = opts.selector_mask orelse RegisterBitSet.initFull();
|
||||
|
||||
var locked_registers = self.locked_registers;
|
||||
locked_registers.setIntersection(available_registers);
|
||||
locked_registers.setIntersection(register_class);
|
||||
|
||||
if (count > available_registers.count() - locked_registers.count()) return error.OutOfRegisters;
|
||||
if (count > register_class.count() - locked_registers.count()) return error.OutOfRegisters;
|
||||
|
||||
const result = self.tryAllocRegs(count, insts, opts) orelse blk: {
|
||||
const result = self.tryAllocRegs(count, insts, register_class) orelse blk: {
|
||||
// We'll take over the first count registers. Spill
|
||||
// the instructions that were previously there to a
|
||||
// stack allocations.
|
||||
@ -253,6 +251,7 @@ pub fn RegisterManager(
|
||||
var i: usize = 0;
|
||||
for (tracked_registers) |reg| {
|
||||
if (i >= count) break;
|
||||
if (excludeRegister(reg, register_class)) continue;
|
||||
if (self.isRegLocked(reg)) continue;
|
||||
|
||||
regs[i] = reg;
|
||||
@ -288,8 +287,12 @@ pub fn RegisterManager(
|
||||
|
||||
/// Allocates a register and optionally tracks it with a
|
||||
/// corresponding instruction.
|
||||
pub fn allocReg(self: *Self, inst: ?Air.Inst.Index, opts: AllocOpts) AllocateRegistersError!Register {
|
||||
return (try self.allocRegs(1, .{inst}, opts))[0];
|
||||
pub fn allocReg(
|
||||
self: *Self,
|
||||
inst: ?Air.Inst.Index,
|
||||
register_class: RegisterBitSet,
|
||||
) AllocateRegistersError!Register {
|
||||
return (try self.allocRegs(1, .{inst}, register_class))[0];
|
||||
}
|
||||
|
||||
/// Spills the register if it is currently allocated. If a
|
||||
@ -374,11 +377,15 @@ const MockRegister2 = enum(u2) {
|
||||
fn MockFunction(comptime Register: type) type {
|
||||
return struct {
|
||||
allocator: Allocator,
|
||||
register_manager: RegisterManager(Self, Register, &Register.allocatable_registers) = .{},
|
||||
register_manager: RegisterManagerT = .{},
|
||||
spilled: std.ArrayListUnmanaged(Register) = .{},
|
||||
|
||||
const Self = @This();
|
||||
|
||||
const RegisterManagerT = RegisterManager(Self, Register, &Register.allocatable_registers);
|
||||
|
||||
pub const reg_class: RegisterManagerT.RegisterBitSet = RegisterManagerT.RegisterBitSet.initFull();
|
||||
|
||||
pub fn deinit(self: *Self) void {
|
||||
self.spilled.deinit(self.allocator);
|
||||
}
|
||||
@ -423,10 +430,20 @@ test "tryAllocReg: no spilling" {
|
||||
defer function.deinit();
|
||||
|
||||
const mock_instruction: Air.Inst.Index = 1;
|
||||
const reg_class = MockFunction1.reg_class;
|
||||
|
||||
try expectEqual(@as(?MockRegister1, .r2), function.register_manager.tryAllocReg(mock_instruction, .{}));
|
||||
try expectEqual(@as(?MockRegister1, .r3), function.register_manager.tryAllocReg(mock_instruction, .{}));
|
||||
try expectEqual(@as(?MockRegister1, null), function.register_manager.tryAllocReg(mock_instruction, .{}));
|
||||
try expectEqual(@as(?MockRegister1, .r2), function.register_manager.tryAllocReg(
|
||||
mock_instruction,
|
||||
reg_class,
|
||||
));
|
||||
try expectEqual(@as(?MockRegister1, .r3), function.register_manager.tryAllocReg(
|
||||
mock_instruction,
|
||||
reg_class,
|
||||
));
|
||||
try expectEqual(@as(?MockRegister1, null), function.register_manager.tryAllocReg(
|
||||
mock_instruction,
|
||||
reg_class,
|
||||
));
|
||||
|
||||
try expect(function.register_manager.isRegAllocated(.r2));
|
||||
try expect(function.register_manager.isRegAllocated(.r3));
|
||||
@ -451,17 +468,30 @@ test "allocReg: spilling" {
|
||||
defer function.deinit();
|
||||
|
||||
const mock_instruction: Air.Inst.Index = 1;
|
||||
const reg_class = MockFunction1.reg_class;
|
||||
|
||||
try expectEqual(@as(?MockRegister1, .r2), try function.register_manager.allocReg(mock_instruction, .{}));
|
||||
try expectEqual(@as(?MockRegister1, .r3), try function.register_manager.allocReg(mock_instruction, .{}));
|
||||
try expectEqual(@as(?MockRegister1, .r2), try function.register_manager.allocReg(
|
||||
mock_instruction,
|
||||
reg_class,
|
||||
));
|
||||
try expectEqual(@as(?MockRegister1, .r3), try function.register_manager.allocReg(
|
||||
mock_instruction,
|
||||
reg_class,
|
||||
));
|
||||
|
||||
// Spill a register
|
||||
try expectEqual(@as(?MockRegister1, .r2), try function.register_manager.allocReg(mock_instruction, .{}));
|
||||
try expectEqual(@as(?MockRegister1, .r2), try function.register_manager.allocReg(
|
||||
mock_instruction,
|
||||
reg_class,
|
||||
));
|
||||
try expectEqualSlices(MockRegister1, &[_]MockRegister1{.r2}, function.spilled.items);
|
||||
|
||||
// No spilling necessary
|
||||
function.register_manager.freeReg(.r3);
|
||||
try expectEqual(@as(?MockRegister1, .r3), try function.register_manager.allocReg(mock_instruction, .{}));
|
||||
try expectEqual(@as(?MockRegister1, .r3), try function.register_manager.allocReg(
|
||||
mock_instruction,
|
||||
reg_class,
|
||||
));
|
||||
try expectEqualSlices(MockRegister1, &[_]MockRegister1{.r2}, function.spilled.items);
|
||||
|
||||
// Locked registers
|
||||
@ -470,7 +500,10 @@ test "allocReg: spilling" {
|
||||
const lock = function.register_manager.lockReg(.r2);
|
||||
defer if (lock) |reg| function.register_manager.unlockReg(reg);
|
||||
|
||||
try expectEqual(@as(?MockRegister1, .r3), try function.register_manager.allocReg(mock_instruction, .{}));
|
||||
try expectEqual(@as(?MockRegister1, .r3), try function.register_manager.allocReg(
|
||||
mock_instruction,
|
||||
reg_class,
|
||||
));
|
||||
}
|
||||
try expect(!function.register_manager.lockedRegsExist());
|
||||
}
|
||||
@ -483,7 +516,13 @@ test "tryAllocRegs" {
|
||||
};
|
||||
defer function.deinit();
|
||||
|
||||
try expectEqual([_]MockRegister2{ .r0, .r1, .r2 }, function.register_manager.tryAllocRegs(3, .{ null, null, null }, .{}).?);
|
||||
const reg_class = MockFunction2.reg_class;
|
||||
|
||||
try expectEqual([_]MockRegister2{ .r0, .r1, .r2 }, function.register_manager.tryAllocRegs(
|
||||
3,
|
||||
.{ null, null, null },
|
||||
reg_class,
|
||||
).?);
|
||||
|
||||
try expect(function.register_manager.isRegAllocated(.r0));
|
||||
try expect(function.register_manager.isRegAllocated(.r1));
|
||||
@ -498,7 +537,11 @@ test "tryAllocRegs" {
|
||||
const lock = function.register_manager.lockReg(.r1);
|
||||
defer if (lock) |reg| function.register_manager.unlockReg(reg);
|
||||
|
||||
try expectEqual([_]MockRegister2{ .r0, .r2, .r3 }, function.register_manager.tryAllocRegs(3, .{ null, null, null }, .{}).?);
|
||||
try expectEqual([_]MockRegister2{ .r0, .r2, .r3 }, function.register_manager.tryAllocRegs(
|
||||
3,
|
||||
.{ null, null, null },
|
||||
reg_class,
|
||||
).?);
|
||||
}
|
||||
try expect(!function.register_manager.lockedRegsExist());
|
||||
|
||||
@ -518,6 +561,8 @@ test "allocRegs: normal usage" {
|
||||
};
|
||||
defer function.deinit();
|
||||
|
||||
const reg_class = MockFunction2.reg_class;
|
||||
|
||||
{
|
||||
const result_reg: MockRegister2 = .r1;
|
||||
|
||||
@ -537,7 +582,7 @@ test "allocRegs: normal usage" {
|
||||
const lock = function.register_manager.lockReg(result_reg);
|
||||
defer if (lock) |reg| function.register_manager.unlockReg(reg);
|
||||
|
||||
const regs = try function.register_manager.allocRegs(2, .{ null, null }, .{});
|
||||
const regs = try function.register_manager.allocRegs(2, .{ null, null }, reg_class);
|
||||
try function.genAdd(result_reg, regs[0], regs[1]);
|
||||
}
|
||||
}
|
||||
@ -552,6 +597,8 @@ test "allocRegs: selectively reducing register pressure" {
|
||||
};
|
||||
defer function.deinit();
|
||||
|
||||
const reg_class = MockFunction2.reg_class;
|
||||
|
||||
{
|
||||
const result_reg: MockRegister2 = .r1;
|
||||
|
||||
@ -559,12 +606,12 @@ test "allocRegs: selectively reducing register pressure" {
|
||||
|
||||
// Here, we don't defer unlock because we manually unlock
|
||||
// after genAdd
|
||||
const regs = try function.register_manager.allocRegs(2, .{ null, null }, .{});
|
||||
const regs = try function.register_manager.allocRegs(2, .{ null, null }, reg_class);
|
||||
|
||||
try function.genAdd(result_reg, regs[0], regs[1]);
|
||||
function.register_manager.unlockReg(lock.?);
|
||||
|
||||
const extra_summand_reg = try function.register_manager.allocReg(null, .{});
|
||||
const extra_summand_reg = try function.register_manager.allocReg(null, reg_class);
|
||||
try function.genAdd(result_reg, result_reg, extra_summand_reg);
|
||||
}
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user