From d5ee45117722c1685f4b739686d74ccc7cf5b8d9 Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Fri, 3 Jun 2022 15:52:03 +0200 Subject: [PATCH] stage2 ARM: introduce support for basic switch expressions --- src/arch/arm/CodeGen.zig | 76 ++++++++++++++++++++++++---------------- test/behavior/switch.zig | 6 ---- 2 files changed, 45 insertions(+), 37 deletions(-) diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index b6b364ddc4..a0f9c12c34 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -953,15 +953,6 @@ fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register { return reg; } -/// Allocates a new register and copies `mcv` into it. -/// `reg_owner` is the instruction that gets associated with the register in the register table. -/// This can have a side effect of spilling instructions to the stack to free up a register. -fn copyToNewRegister(self: *Self, reg_owner: Air.Inst.Index, mcv: MCValue) !MCValue { - const reg = try self.register_manager.allocReg(reg_owner, gp); - try self.genSetReg(self.air.typeOfIndex(reg_owner), reg, mcv); - return MCValue{ .register = reg }; -} - fn airAlloc(self: *Self, inst: Air.Inst.Index) !void { const stack_offset = try self.allocMemPtr(inst); return self.finishAir(inst, .{ .ptr_stack_offset = stack_offset }, .{ .none, .none, .none }); @@ -2185,6 +2176,9 @@ fn reuseOperand(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, op_ind .stack_offset => |off| { log.debug("%{d} => stack offset {d} (reused)", .{ inst, off }); }, + .cpsr_flags => { + log.debug("%{d} => cpsr_flags (reused)", .{inst}); + }, else => return false, } @@ -2487,7 +2481,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { else => unreachable, }; - if (self.liveness.operandDies(inst, 0)) { + if (self.reuseOperand(inst, operand, 0, field)) { break :result field; } else { // Copy to new register @@ -2511,6 +2505,41 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } +/// Allocates a new register. If Inst in non-null, additionally tracks +/// this register and the corresponding int and removes all previous +/// tracking. Does not do the actual moving (that is handled by +/// genSetReg). +fn prepareNewRegForMoving( + self: *Self, + track_inst: ?Air.Inst.Index, + register_class: RegisterManager.RegisterBitSet, + mcv: MCValue, +) !Register { + const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; + const reg = try self.register_manager.allocReg(track_inst, register_class); + + if (track_inst) |inst| { + // Overwrite the MCValue associated with this inst + branch.inst_table.putAssumeCapacity(inst, .{ .register = reg }); + + // If the previous MCValue occupied some space we track, we + // need to make sure it is marked as free now. + switch (mcv) { + .cpsr_flags => { + assert(self.cpsr_flags_inst.? == inst); + self.cpsr_flags_inst = null; + }, + .register => |prev_reg| { + assert(!self.register_manager.isRegFree(prev_reg)); + self.register_manager.freeReg(prev_reg); + }, + else => {}, + } + } + + return reg; +} + /// Don't call this function directly. Use binOp instead. /// /// Calling this function signals an intention to generate a Mir @@ -2537,18 +2566,12 @@ fn binOpRegister( null; defer if (lhs_lock) |reg| self.register_manager.unlockReg(reg); - const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; - const lhs_reg = if (lhs_is_register) lhs.register else blk: { const track_inst: ?Air.Inst.Index = if (metadata) |md| inst: { break :inst Air.refToIndex(md.lhs).?; } else null; - const reg = try self.register_manager.allocReg(track_inst, gp); - - if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg }); - - break :blk reg; + break :blk try self.prepareNewRegForMoving(track_inst, gp, lhs); }; const new_lhs_lock = self.register_manager.lockReg(lhs_reg); defer if (new_lhs_lock) |reg| self.register_manager.unlockReg(reg); @@ -2558,11 +2581,7 @@ fn binOpRegister( break :inst Air.refToIndex(md.rhs).?; } else null; - const reg = try self.register_manager.allocReg(track_inst, gp); - - if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg }); - - break :blk reg; + break :blk try self.prepareNewRegForMoving(track_inst, gp, rhs); }; const new_rhs_lock = self.register_manager.lockReg(rhs_reg); defer if (new_rhs_lock) |reg| self.register_manager.unlockReg(reg); @@ -2652,8 +2671,6 @@ fn binOpImmediate( null; defer if (lhs_lock) |reg| self.register_manager.unlockReg(reg); - const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; - const lhs_reg = if (lhs_is_register) lhs.register else blk: { const track_inst: ?Air.Inst.Index = if (metadata) |md| inst: { break :inst Air.refToIndex( @@ -2661,11 +2678,7 @@ fn binOpImmediate( ).?; } else null; - const reg = try self.register_manager.allocReg(track_inst, gp); - - if (track_inst) |inst| branch.inst_table.putAssumeCapacity(inst, .{ .register = reg }); - - break :blk reg; + break :blk try self.prepareNewRegForMoving(track_inst, gp, lhs); }; const new_lhs_lock = self.register_manager.lockReg(lhs_reg); defer if (new_lhs_lock) |reg| self.register_manager.unlockReg(reg); @@ -3444,7 +3457,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions. if (RegisterManager.indexOfRegIntoTracked(reg) == null) { // Save function return value into a tracked register log.debug("airCall: copying {} as it is not tracked", .{reg}); - break :result try self.copyToNewRegister(inst, info.return_value); + const new_reg = try self.copyToTmpRegister(fn_ty.fnReturnType(), info.return_value); + break :result MCValue{ .register = new_reg }; } }, else => {}, @@ -4124,7 +4138,7 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void { var case_i: u32 = 0; while (case_i < switch_br.data.cases_len) : (case_i += 1) { const case = self.air.extraData(Air.SwitchBr.Case, extra_index); - const items = @bitCast([]const Air.Inst.Ref, self.air.extra[case.end..][0..case.data.items_len]); + const items = @ptrCast([]const Air.Inst.Ref, self.air.extra[case.end..][0..case.data.items_len]); assert(items.len > 0); const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len]; extra_index = case.end + items.len + case_body.len; diff --git a/test/behavior/switch.zig b/test/behavior/switch.zig index 2d10ad0a13..c44f8fe223 100644 --- a/test/behavior/switch.zig +++ b/test/behavior/switch.zig @@ -5,7 +5,6 @@ const expectError = std.testing.expectError; const expectEqual = std.testing.expectEqual; test "switch with numbers" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO try testSwitchWithNumbers(13); @@ -21,7 +20,6 @@ fn testSwitchWithNumbers(x: u32) !void { } test "switch with all ranges" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO try expect(testSwitchWithAllRanges(50, 3) == 1); @@ -176,7 +174,6 @@ test "undefined.u0" { } test "switch with disjoint range" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO var q: u8 = 0; @@ -397,7 +394,6 @@ fn switchWithUnreachable(x: i32) i32 { } test "capture value of switch with all unreachable prongs" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO const x = return_a_number() catch |err| switch (err) { @@ -412,7 +408,6 @@ fn return_a_number() anyerror!i32 { test "switch on integer with else capturing expr" { if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO const S = struct { @@ -658,7 +653,6 @@ test "switch capture copies its payload" { } test "capture of integer forwards the switch condition directly" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO const S = struct {