mirror of
https://github.com/ziglang/zig.git
synced 2026-01-03 20:13:21 +00:00
stage2 register manager: Use an array instead of a hashmap for tracking
allocated registers
This commit is contained in:
parent
37b05742ff
commit
3a55cda14d
@ -449,7 +449,6 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
.rbrace_src = src_data.rbrace_src,
|
||||
.source = src_data.source,
|
||||
};
|
||||
defer function.register_manager.deinit(bin_file.allocator);
|
||||
defer function.stack.deinit(bin_file.allocator);
|
||||
defer function.exitlude_jump_relocs.deinit(bin_file.allocator);
|
||||
|
||||
@ -779,8 +778,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
branch.inst_table.putAssumeCapacity(inst, .dead);
|
||||
switch (prev_value) {
|
||||
.register => |reg| {
|
||||
const canon_reg = toCanonicalReg(reg);
|
||||
self.register_manager.freeReg(canon_reg);
|
||||
// TODO separate architectures with registers from
|
||||
// stack-based architectures (spu_2)
|
||||
if (callee_preserved_regs.len > 0) {
|
||||
const canon_reg = toCanonicalReg(reg);
|
||||
self.register_manager.freeReg(canon_reg);
|
||||
}
|
||||
},
|
||||
else => {}, // TODO process stack allocation death
|
||||
}
|
||||
@ -920,9 +923,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
const ptr_bits = arch.ptrBitWidth();
|
||||
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
|
||||
if (abi_size <= ptr_bytes) {
|
||||
try self.register_manager.registers.ensureCapacity(self.gpa, self.register_manager.registers.count() + 1);
|
||||
if (self.register_manager.tryAllocReg(inst)) |reg| {
|
||||
return MCValue{ .register = registerAlias(reg, abi_size) };
|
||||
// TODO separate architectures with registers from
|
||||
// stack-based architectures (spu_2)
|
||||
if (callee_preserved_regs.len > 0) {
|
||||
if (self.register_manager.tryAllocReg(inst)) |reg| {
|
||||
return MCValue{ .register = registerAlias(reg, abi_size) };
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -952,8 +958,6 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
/// `reg_owner` is the instruction that gets associated with the register in the register table.
|
||||
/// This can have a side effect of spilling instructions to the stack to free up a register.
|
||||
fn copyToNewRegister(self: *Self, reg_owner: *ir.Inst, mcv: MCValue) !MCValue {
|
||||
try self.register_manager.registers.ensureCapacity(self.gpa, @intCast(u32, self.register_manager.registers.count() + 1));
|
||||
|
||||
const reg = try self.register_manager.allocReg(reg_owner);
|
||||
try self.genSetReg(reg_owner.src, reg_owner.ty, reg, mcv);
|
||||
return MCValue{ .register = reg };
|
||||
@ -1240,10 +1244,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
.register => |reg| {
|
||||
// If it's in the registers table, need to associate the register with the
|
||||
// new instruction.
|
||||
if (self.register_manager.registers.getEntry(toCanonicalReg(reg))) |entry| {
|
||||
entry.value = inst;
|
||||
// TODO separate architectures with registers from
|
||||
// stack-based architectures (spu_2)
|
||||
if (callee_preserved_regs.len > 0) {
|
||||
if (reg.allocIndex()) |index| {
|
||||
if (!self.register_manager.isRegFree(reg)) {
|
||||
self.register_manager.registers[index] = inst;
|
||||
}
|
||||
}
|
||||
log.debug("reusing {} => {*}", .{ reg, inst });
|
||||
}
|
||||
log.debug("reusing {} => {*}", .{ reg, inst });
|
||||
},
|
||||
.stack_offset => |off| {
|
||||
log.debug("reusing stack offset {} => {*}", .{ off, inst });
|
||||
@ -1738,6 +1748,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
const arg_index = self.arg_index;
|
||||
self.arg_index += 1;
|
||||
|
||||
// TODO separate architectures with registers from
|
||||
// stack-based architectures (spu_2)
|
||||
if (callee_preserved_regs.len == 0) {
|
||||
return self.fail(inst.base.src, "TODO implement Register enum for {}", .{self.target.cpu.arch});
|
||||
}
|
||||
@ -1769,7 +1781,6 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
|
||||
switch (mcv) {
|
||||
.register => |reg| {
|
||||
try self.register_manager.registers.ensureCapacity(self.gpa, self.register_manager.registers.count() + 1);
|
||||
self.register_manager.getRegAssumeFree(toCanonicalReg(reg), &inst.base);
|
||||
},
|
||||
else => {},
|
||||
@ -2075,7 +2086,11 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
switch (mc_arg) {
|
||||
.none => continue,
|
||||
.register => |reg| {
|
||||
try self.register_manager.getRegWithoutTracking(reg);
|
||||
// TODO prevent this macho if block to be generated for all archs
|
||||
switch (arch) {
|
||||
.x86_64, .aarch64 => try self.register_manager.getRegWithoutTracking(reg),
|
||||
else => unreachable,
|
||||
}
|
||||
try self.genSetReg(arg.src, arg.ty, reg, arg_mcv);
|
||||
},
|
||||
.stack_offset => {
|
||||
@ -2397,8 +2412,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
const parent_free_registers = self.register_manager.free_registers;
|
||||
var parent_stack = try self.stack.clone(self.gpa);
|
||||
defer parent_stack.deinit(self.gpa);
|
||||
var parent_registers = try self.register_manager.registers.clone(self.gpa);
|
||||
defer parent_registers.deinit(self.gpa);
|
||||
const parent_registers = self.register_manager.registers;
|
||||
|
||||
try self.branch_stack.append(.{});
|
||||
|
||||
@ -2414,9 +2428,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
var saved_then_branch = self.branch_stack.pop();
|
||||
defer saved_then_branch.deinit(self.gpa);
|
||||
|
||||
self.register_manager.registers.deinit(self.gpa);
|
||||
self.register_manager.registers = parent_registers;
|
||||
parent_registers = .{};
|
||||
|
||||
self.stack.deinit(self.gpa);
|
||||
self.stack = parent_stack;
|
||||
|
||||
@ -16,7 +16,7 @@ pub fn RegisterManager(
|
||||
) type {
|
||||
return struct {
|
||||
/// The key must be canonical register.
|
||||
registers: std.AutoHashMapUnmanaged(Register, *ir.Inst) = .{},
|
||||
registers: [callee_preserved_regs.len]?*ir.Inst = [_]?*ir.Inst{null} ** callee_preserved_regs.len,
|
||||
free_registers: FreeRegInt = math.maxInt(FreeRegInt),
|
||||
/// Tracks all registers allocated in the course of this function
|
||||
allocated_registers: FreeRegInt = 0,
|
||||
@ -31,14 +31,6 @@ pub fn RegisterManager(
|
||||
return @fieldParentPtr(Function, "register_manager", self);
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Self, allocator: *Allocator) void {
|
||||
self.registers.deinit(allocator);
|
||||
}
|
||||
|
||||
fn isTracked(reg: Register) bool {
|
||||
return reg.allocIndex() != null;
|
||||
}
|
||||
|
||||
fn markRegUsed(self: *Self, reg: Register) void {
|
||||
if (FreeRegInt == u0) return;
|
||||
const index = reg.allocIndex() orelse return;
|
||||
@ -73,13 +65,13 @@ pub fn RegisterManager(
|
||||
return self.allocated_registers & @as(FreeRegInt, 1) << shift != 0;
|
||||
}
|
||||
|
||||
/// Before calling, must ensureCapacity + count on self.registers.
|
||||
/// Returns `null` if all registers are allocated.
|
||||
pub fn tryAllocRegs(self: *Self, comptime count: comptime_int, insts: [count]*ir.Inst) ?[count]Register {
|
||||
if (self.tryAllocRegsWithoutTracking(count)) |regs| {
|
||||
for (regs) |reg, i| {
|
||||
const index = reg.allocIndex().?; // allocIndex() on a callee-preserved reg should never return null
|
||||
self.registers[index] = insts[i];
|
||||
self.markRegUsed(reg);
|
||||
self.registers.putAssumeCapacityNoClobber(reg, insts[i]);
|
||||
}
|
||||
|
||||
return regs;
|
||||
@ -88,13 +80,11 @@ pub fn RegisterManager(
|
||||
}
|
||||
}
|
||||
|
||||
/// Before calling, must ensureCapacity + 1 on self.registers.
|
||||
/// Returns `null` if all registers are allocated.
|
||||
pub fn tryAllocReg(self: *Self, inst: *ir.Inst) ?Register {
|
||||
return if (tryAllocRegs(self, 1, .{inst})) |regs| regs[0] else null;
|
||||
}
|
||||
|
||||
/// Before calling, must ensureCapacity + count on self.registers.
|
||||
pub fn allocRegs(self: *Self, comptime count: comptime_int, insts: [count]*ir.Inst) ![count]Register {
|
||||
comptime assert(count > 0 and count <= callee_preserved_regs.len);
|
||||
|
||||
@ -106,24 +96,22 @@ pub fn RegisterManager(
|
||||
std.mem.copy(Register, ®s, callee_preserved_regs[0..count]);
|
||||
|
||||
for (regs) |reg, i| {
|
||||
const index = reg.allocIndex().?; // allocIndex() on a callee-preserved reg should never return null
|
||||
if (self.isRegFree(reg)) {
|
||||
self.markRegUsed(reg);
|
||||
self.registers.putAssumeCapacityNoClobber(reg, insts[i]);
|
||||
} else {
|
||||
const regs_entry = self.registers.getEntry(reg).?;
|
||||
const spilled_inst = regs_entry.value;
|
||||
regs_entry.value = insts[i];
|
||||
const spilled_inst = self.registers[index].?;
|
||||
try self.getFunction().spillInstruction(spilled_inst.src, reg, spilled_inst);
|
||||
}
|
||||
self.registers[index] = insts[i];
|
||||
}
|
||||
|
||||
break :blk regs;
|
||||
};
|
||||
}
|
||||
|
||||
/// Before calling, must ensureCapacity + 1 on self.registers.
|
||||
pub fn allocReg(self: *Self, inst: *ir.Inst) !Register {
|
||||
return (try allocRegs(self, 1, .{inst}))[0];
|
||||
return (try self.allocRegs(1, .{inst}))[0];
|
||||
}
|
||||
|
||||
/// Does not track the registers.
|
||||
@ -150,37 +138,48 @@ pub fn RegisterManager(
|
||||
/// Does not track the register.
|
||||
/// Returns `null` if all registers are allocated.
|
||||
pub fn tryAllocRegWithoutTracking(self: *Self) ?Register {
|
||||
return if (tryAllocRegsWithoutTracking(self, 1)) |regs| regs[0] else null;
|
||||
return if (self.tryAllocRegsWithoutTracking(1)) |regs| regs[0] else null;
|
||||
}
|
||||
|
||||
/// Does not track the registers
|
||||
pub fn allocRegsWithoutTracking(self: *Self, comptime count: comptime_int) ![count]Register {
|
||||
return self.tryAllocRegsWithoutTracking(count) orelse blk: {
|
||||
// We'll take over the first count registers. Spill
|
||||
// the instructions that were previously there to a
|
||||
// stack allocations.
|
||||
var regs: [count]Register = undefined;
|
||||
std.mem.copy(Register, ®s, callee_preserved_regs[0..count]);
|
||||
|
||||
for (regs) |reg, i| {
|
||||
const index = reg.allocIndex().?; // allocIndex() on a callee-preserved reg should never return null
|
||||
if (!self.isRegFree(reg)) {
|
||||
const spilled_inst = self.registers[index].?;
|
||||
try self.getFunction().spillInstruction(spilled_inst.src, reg, spilled_inst);
|
||||
self.registers[index] = null;
|
||||
self.markRegFree(reg);
|
||||
}
|
||||
}
|
||||
|
||||
break :blk regs;
|
||||
};
|
||||
}
|
||||
|
||||
/// Does not track the register.
|
||||
pub fn allocRegWithoutTracking(self: *Self) !Register {
|
||||
return self.tryAllocRegWithoutTracking() orelse b: {
|
||||
// We'll take over the first register. Move the instruction that was previously
|
||||
// there to a stack allocation.
|
||||
const reg = callee_preserved_regs[0];
|
||||
const regs_entry = self.registers.remove(reg).?;
|
||||
const spilled_inst = regs_entry.value;
|
||||
try self.getFunction().spillInstruction(spilled_inst.src, reg, spilled_inst);
|
||||
self.markRegFree(reg);
|
||||
|
||||
break :b reg;
|
||||
};
|
||||
return (try self.allocRegsWithoutTracking(1))[0];
|
||||
}
|
||||
|
||||
/// Allocates the specified register with the specified
|
||||
/// instruction. Spills the register if it is currently
|
||||
/// allocated.
|
||||
/// Before calling, must ensureCapacity + 1 on self.registers.
|
||||
pub fn getReg(self: *Self, reg: Register, inst: *ir.Inst) !void {
|
||||
if (!isTracked(reg)) return;
|
||||
const index = reg.allocIndex() orelse return;
|
||||
|
||||
if (!self.isRegFree(reg)) {
|
||||
// Move the instruction that was previously there to a
|
||||
// stack allocation.
|
||||
const regs_entry = self.registers.getEntry(reg).?;
|
||||
const spilled_inst = regs_entry.value;
|
||||
regs_entry.value = inst;
|
||||
const spilled_inst = self.registers[index].?;
|
||||
self.registers[index] = inst;
|
||||
try self.getFunction().spillInstruction(spilled_inst.src, reg, spilled_inst);
|
||||
} else {
|
||||
self.getRegAssumeFree(reg, inst);
|
||||
@ -190,34 +189,33 @@ pub fn RegisterManager(
|
||||
/// Spills the register if it is currently allocated.
|
||||
/// Does not track the register.
|
||||
pub fn getRegWithoutTracking(self: *Self, reg: Register) !void {
|
||||
if (!isTracked(reg)) return;
|
||||
const index = reg.allocIndex() orelse return;
|
||||
|
||||
if (!self.isRegFree(reg)) {
|
||||
// Move the instruction that was previously there to a
|
||||
// stack allocation.
|
||||
const regs_entry = self.registers.remove(reg).?;
|
||||
const spilled_inst = regs_entry.value;
|
||||
const spilled_inst = self.registers[index].?;
|
||||
try self.getFunction().spillInstruction(spilled_inst.src, reg, spilled_inst);
|
||||
self.markRegFree(reg);
|
||||
}
|
||||
}
|
||||
|
||||
/// Allocates the specified register with the specified
|
||||
/// instruction. Assumes that the register is free and no
|
||||
/// instruction. Asserts that the register is free and no
|
||||
/// spilling is necessary.
|
||||
/// Before calling, must ensureCapacity + 1 on self.registers.
|
||||
pub fn getRegAssumeFree(self: *Self, reg: Register, inst: *ir.Inst) void {
|
||||
if (!isTracked(reg)) return;
|
||||
const index = reg.allocIndex() orelse return;
|
||||
|
||||
self.registers.putAssumeCapacityNoClobber(reg, inst);
|
||||
assert(self.registers[index] == null);
|
||||
self.registers[index] = inst;
|
||||
self.markRegUsed(reg);
|
||||
}
|
||||
|
||||
/// Marks the specified register as free
|
||||
pub fn freeReg(self: *Self, reg: Register) void {
|
||||
if (!isTracked(reg)) return;
|
||||
const index = reg.allocIndex() orelse return;
|
||||
|
||||
_ = self.registers.remove(reg);
|
||||
self.registers[index] = null;
|
||||
self.markRegFree(reg);
|
||||
}
|
||||
};
|
||||
@ -247,7 +245,6 @@ const MockFunction = struct {
|
||||
const Self = @This();
|
||||
|
||||
pub fn deinit(self: *Self) void {
|
||||
self.register_manager.deinit(self.allocator);
|
||||
self.spilled.deinit(self.allocator);
|
||||
}
|
||||
|
||||
@ -273,7 +270,6 @@ test "tryAllocReg: no spilling" {
|
||||
std.testing.expect(!function.register_manager.isRegAllocated(.r2));
|
||||
std.testing.expect(!function.register_manager.isRegAllocated(.r3));
|
||||
|
||||
try function.register_manager.registers.ensureCapacity(allocator, function.register_manager.registers.count() + 2);
|
||||
std.testing.expectEqual(@as(?MockRegister, .r2), function.register_manager.tryAllocReg(&mock_instruction));
|
||||
std.testing.expectEqual(@as(?MockRegister, .r3), function.register_manager.tryAllocReg(&mock_instruction));
|
||||
std.testing.expectEqual(@as(?MockRegister, null), function.register_manager.tryAllocReg(&mock_instruction));
|
||||
@ -305,7 +301,6 @@ test "allocReg: spilling" {
|
||||
std.testing.expect(!function.register_manager.isRegAllocated(.r2));
|
||||
std.testing.expect(!function.register_manager.isRegAllocated(.r3));
|
||||
|
||||
try function.register_manager.registers.ensureCapacity(allocator, function.register_manager.registers.count() + 2);
|
||||
std.testing.expectEqual(@as(?MockRegister, .r2), try function.register_manager.allocReg(&mock_instruction));
|
||||
std.testing.expectEqual(@as(?MockRegister, .r3), try function.register_manager.allocReg(&mock_instruction));
|
||||
|
||||
@ -336,14 +331,12 @@ test "getReg" {
|
||||
std.testing.expect(!function.register_manager.isRegAllocated(.r2));
|
||||
std.testing.expect(!function.register_manager.isRegAllocated(.r3));
|
||||
|
||||
try function.register_manager.registers.ensureCapacity(allocator, function.register_manager.registers.count() + 2);
|
||||
try function.register_manager.getReg(.r3, &mock_instruction);
|
||||
|
||||
std.testing.expect(!function.register_manager.isRegAllocated(.r2));
|
||||
std.testing.expect(function.register_manager.isRegAllocated(.r3));
|
||||
|
||||
// Spill r3
|
||||
try function.register_manager.registers.ensureCapacity(allocator, function.register_manager.registers.count() + 2);
|
||||
try function.register_manager.getReg(.r3, &mock_instruction);
|
||||
|
||||
std.testing.expect(!function.register_manager.isRegAllocated(.r2));
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user